Use compute_node consistently in ResourceTracker
This is the second patch set in a series refactoring code that uses ResourceTracker.compute_node so that it can later be changed to be a ComputeNode object. Note that in this patch compute_node is still a dict. The methods that calculate resource usage in the ResourceTracker take a parameter called (variously) resources, values or stats. Sometimes these are called passing self.compute_node as the value for this parameter and sometimes a dict data structure obtained from the virt driver by the get_available_resources() method. The result of the changes are always copied into the self.compute_node. This mixing of data structures won't work if self.compute_node is converted to be a ComputeNode object instead of a dict. The previous patch in this series initialises self.compute_node at the start of _update_available_resources() so it has a value at the outset. This patch copies the resources data into self.compute_node and uses only that data structure in the methods that calculate resource usage. As a consequence it is no longer necessary to pass this in as a parameter in those methods or to copy the result into self.compute_node at the end of the calculations. This step will allow us, in following patches, to convert the code in the update methods to object notiation when self.compute_node is changed to be an object instread of a dict. Minor changes included are removal the final copy into compute_node and moving initialisation of the stats plugin into _init_compute_node(). Change-Id: Ia79b34aa9e6aad5cabe63bb7545c545b952b2b3a
This commit is contained in:
parent
e8707aa3b5
commit
414bdd21cd
|
@ -135,12 +135,11 @@ class ResourceTracker(object):
|
|||
instance_ref['numa_topology'] = claim.claimed_numa_topology
|
||||
|
||||
# Mark resources in-use and update stats
|
||||
self._update_usage_from_instance(context, self.compute_node,
|
||||
instance_ref)
|
||||
self._update_usage_from_instance(context, instance_ref)
|
||||
|
||||
elevated = context.elevated()
|
||||
# persist changes to the compute node:
|
||||
self._update(elevated, self.compute_node)
|
||||
self._update(elevated)
|
||||
|
||||
return claim
|
||||
|
||||
|
@ -185,9 +184,9 @@ class ResourceTracker(object):
|
|||
# Mark the resources in-use for the resize landing on this
|
||||
# compute host:
|
||||
self._update_usage_from_migration(context, instance_ref, image_meta,
|
||||
self.compute_node, migration)
|
||||
migration)
|
||||
elevated = context.elevated()
|
||||
self._update(elevated, self.compute_node)
|
||||
self._update(elevated)
|
||||
|
||||
return claim
|
||||
|
||||
|
@ -229,9 +228,9 @@ class ResourceTracker(object):
|
|||
# flag the instance as deleted to revert the resource usage
|
||||
# and associated stats:
|
||||
instance['vm_state'] = vm_states.DELETED
|
||||
self._update_usage_from_instance(context, self.compute_node, instance)
|
||||
self._update_usage_from_instance(context, instance)
|
||||
|
||||
self._update(context.elevated(), self.compute_node)
|
||||
self._update(context.elevated())
|
||||
|
||||
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
|
||||
def drop_resize_claim(self, context, instance, instance_type=None,
|
||||
|
@ -258,10 +257,10 @@ class ResourceTracker(object):
|
|||
self.pci_tracker.update_pci_for_migration(context,
|
||||
instance,
|
||||
sign=-1)
|
||||
self._update_usage(context, self.compute_node, usage, sign=-1)
|
||||
self._update_usage(usage, sign=-1)
|
||||
|
||||
ctxt = context.elevated()
|
||||
self._update(ctxt, self.compute_node)
|
||||
self._update(ctxt)
|
||||
|
||||
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
|
||||
def update_usage(self, context, instance):
|
||||
|
@ -276,9 +275,8 @@ class ResourceTracker(object):
|
|||
# don't update usage for this instance unless it submitted a resource
|
||||
# claim first:
|
||||
if uuid in self.tracked_instances:
|
||||
self._update_usage_from_instance(context, self.compute_node,
|
||||
instance)
|
||||
self._update(context.elevated(), self.compute_node)
|
||||
self._update_usage_from_instance(context, instance)
|
||||
self._update(context.elevated())
|
||||
|
||||
@property
|
||||
def disabled(self):
|
||||
|
@ -299,9 +297,10 @@ class ResourceTracker(object):
|
|||
:param resources: initial values
|
||||
"""
|
||||
|
||||
# if there is already a compute node we don't
|
||||
# need to do anything
|
||||
# if there is already a compute node just use resources
|
||||
# to initialize
|
||||
if self.compute_node:
|
||||
self._copy_resources(resources)
|
||||
return
|
||||
|
||||
# TODO(pmurray): this lookup should be removed when the service_id
|
||||
|
@ -313,14 +312,21 @@ class ResourceTracker(object):
|
|||
return
|
||||
|
||||
# now try to get the compute node record from the
|
||||
# database. If we get one we are done.
|
||||
# database. If we get one we use resources to initialize
|
||||
self.compute_node = self._get_compute_node(context)
|
||||
if self.compute_node:
|
||||
self._copy_resources(resources)
|
||||
return
|
||||
|
||||
# there was no local copy and none in the database
|
||||
# so we need to create a new compute node. This needs
|
||||
# to be initialised with resource values.
|
||||
# initial values for the database.
|
||||
#
|
||||
# TODO(pmurray) this section will be cleaned up when we
|
||||
# use the ComputeNode object. Here it is the conductor call
|
||||
# to compute_node_create() that sets up the compute_node
|
||||
# dict. That will change to create the compute_node, initialize
|
||||
# it and then save.
|
||||
cn = {}
|
||||
cn.update(resources)
|
||||
# TODO(pmurray) service_id is deprecated but is still a required field.
|
||||
|
@ -342,6 +348,23 @@ class ResourceTracker(object):
|
|||
'%(host)s:%(node)s'),
|
||||
{'host': self.host, 'node': self.nodename})
|
||||
|
||||
# now we have created a compute node we can copy resources
|
||||
# NOTE(pmurray): this has an unnecessary copy until the above
|
||||
# is cleaned up.
|
||||
self._copy_resources(resources)
|
||||
|
||||
def _copy_resources(self, resources):
|
||||
"""Copy resource values to initialise compute_node and related
|
||||
data structures.
|
||||
"""
|
||||
# purge old stats and init with anything passed in by the driver
|
||||
self.stats.clear()
|
||||
self.stats.digest_stats(resources.get('stats'))
|
||||
|
||||
# now copy reset to compute_node
|
||||
self.compute_node.update(resources)
|
||||
self.compute_node.pop('pci_passthrough_devices', None)
|
||||
|
||||
def _get_host_metrics(self, context, nodename):
|
||||
"""Get the metrics from monitors and
|
||||
notify information to message bus.
|
||||
|
@ -436,18 +459,18 @@ class ResourceTracker(object):
|
|||
'numa_topology'])
|
||||
|
||||
# Now calculate usage based on instance utilization:
|
||||
self._update_usage_from_instances(context, resources, instances)
|
||||
self._update_usage_from_instances(context, instances)
|
||||
|
||||
# Grab all in-progress migrations:
|
||||
migrations = objects.MigrationList.get_in_progress_by_host_and_node(
|
||||
context, self.host, self.nodename)
|
||||
|
||||
self._update_usage_from_migrations(context, resources, migrations)
|
||||
self._update_usage_from_migrations(context, migrations)
|
||||
|
||||
# Detect and account for orphaned instances that may exist on the
|
||||
# hypervisor, but are not in the DB:
|
||||
orphans = self._find_orphaned_instances()
|
||||
self._update_usage_from_orphans(context, resources, orphans)
|
||||
self._update_usage_from_orphans(orphans)
|
||||
|
||||
# NOTE(yjiang5): Because pci device tracker status is not cleared in
|
||||
# this periodic task, and also because the resource tracker is not
|
||||
|
@ -455,14 +478,14 @@ class ResourceTracker(object):
|
|||
# from deleted instances.
|
||||
if self.pci_tracker:
|
||||
self.pci_tracker.clean_usage(instances, migrations, orphans)
|
||||
resources['pci_device_pools'] = self.pci_tracker.stats
|
||||
self.compute_node['pci_device_pools'] = self.pci_tracker.stats
|
||||
else:
|
||||
resources['pci_device_pools'] = []
|
||||
self.compute_node['pci_device_pools'] = []
|
||||
|
||||
self._report_final_resource_view(resources)
|
||||
self._report_final_resource_view()
|
||||
|
||||
metrics = self._get_host_metrics(context, self.nodename)
|
||||
resources['metrics'] = jsonutils.dumps(metrics)
|
||||
self.compute_node['metrics'] = jsonutils.dumps(metrics)
|
||||
|
||||
# TODO(sbauza): Juno compute nodes are missing the host field and
|
||||
# the Juno ResourceTracker does not set this field, even if
|
||||
|
@ -471,9 +494,9 @@ class ResourceTracker(object):
|
|||
# to add this field in the resources dict until the RT is using
|
||||
# the ComputeNode.save() method for populating the table.
|
||||
# tl;dr: To be removed once RT is using ComputeNode.save()
|
||||
resources['host'] = self.host
|
||||
self.compute_node['host'] = self.host
|
||||
|
||||
self._update(context, resources)
|
||||
self._update(context)
|
||||
LOG.info(_LI('Compute_service record updated for %(host)s:%(node)s'),
|
||||
{'host': self.host, 'node': self.nodename})
|
||||
|
||||
|
@ -539,23 +562,24 @@ class ResourceTracker(object):
|
|||
'free_vcpus': free_vcpus,
|
||||
'pci_devices': pci_devices})
|
||||
|
||||
def _report_final_resource_view(self, resources):
|
||||
def _report_final_resource_view(self):
|
||||
"""Report final calculate of physical memory, used virtual memory,
|
||||
disk, usable vCPUs, used virtual CPUs and PCI devices,
|
||||
including instance calculations and in-progress resource claims. These
|
||||
values will be exposed via the compute node table to the scheduler.
|
||||
"""
|
||||
vcpus = resources['vcpus']
|
||||
vcpus = self.compute_node['vcpus']
|
||||
if vcpus:
|
||||
tcpu = vcpus
|
||||
ucpu = resources['vcpus_used']
|
||||
ucpu = self.compute_node['vcpus_used']
|
||||
LOG.info(_LI("Total usable vcpus: %(tcpu)s, "
|
||||
"total allocated vcpus: %(ucpu)s"),
|
||||
{'tcpu': vcpus, 'ucpu': resources['vcpus_used']})
|
||||
{'tcpu': vcpus,
|
||||
'ucpu': ucpu})
|
||||
else:
|
||||
tcpu = 0
|
||||
ucpu = 0
|
||||
pci_device_pools = resources.get('pci_device_pools')
|
||||
pci_device_pools = self.compute_node.get('pci_device_pools')
|
||||
LOG.info(_LI("Final resource view: "
|
||||
"name=%(node)s "
|
||||
"phys_ram=%(phys_ram)sMB "
|
||||
|
@ -566,38 +590,36 @@ class ResourceTracker(object):
|
|||
"used_vcpus=%(used_vcpus)s "
|
||||
"pci_stats=%(pci_stats)s"),
|
||||
{'node': self.nodename,
|
||||
'phys_ram': resources['memory_mb'],
|
||||
'used_ram': resources['memory_mb_used'],
|
||||
'phys_disk': resources['local_gb'],
|
||||
'used_disk': resources['local_gb_used'],
|
||||
'phys_ram': self.compute_node['memory_mb'],
|
||||
'used_ram': self.compute_node['memory_mb_used'],
|
||||
'phys_disk': self.compute_node['local_gb'],
|
||||
'used_disk': self.compute_node['local_gb_used'],
|
||||
'total_vcpus': tcpu,
|
||||
'used_vcpus': ucpu,
|
||||
'pci_stats': pci_device_pools})
|
||||
|
||||
def _resource_change(self, resources):
|
||||
def _resource_change(self):
|
||||
"""Check to see if any resouces have changed."""
|
||||
if cmp(resources, self.old_resources) != 0:
|
||||
self.old_resources = copy.deepcopy(resources)
|
||||
if cmp(self.compute_node, self.old_resources) != 0:
|
||||
self.old_resources = copy.deepcopy(self.compute_node)
|
||||
return True
|
||||
return False
|
||||
|
||||
def _update(self, context, values):
|
||||
def _update(self, context):
|
||||
"""Update partial stats locally and populate them to Scheduler."""
|
||||
self._write_ext_resources(values)
|
||||
self._write_ext_resources(self.compute_node)
|
||||
# NOTE(pmurray): the stats field is stored as a json string. The
|
||||
# json conversion will be done automatically by the ComputeNode object
|
||||
# so this can be removed when using ComputeNode.
|
||||
values['stats'] = jsonutils.dumps(values['stats'])
|
||||
self.compute_node['stats'] = jsonutils.dumps(
|
||||
self.compute_node['stats'])
|
||||
|
||||
if not self._resource_change(values):
|
||||
if not self._resource_change():
|
||||
return
|
||||
if "service" in self.compute_node:
|
||||
del self.compute_node['service']
|
||||
# NOTE(sbauza): Now the DB update is asynchronous, we need to locally
|
||||
# update the values
|
||||
self.compute_node.update(values)
|
||||
# Persist the stats to the Scheduler
|
||||
self._update_resource_stats(context, values)
|
||||
self._update_resource_stats(context, self.compute_node)
|
||||
if self.pci_tracker:
|
||||
self.pci_tracker.save(context)
|
||||
|
||||
|
@ -607,33 +629,36 @@ class ResourceTracker(object):
|
|||
self.scheduler_client.update_resource_stats(
|
||||
context, (self.host, self.nodename), stats)
|
||||
|
||||
def _update_usage(self, context, resources, usage, sign=1):
|
||||
def _update_usage(self, usage, sign=1):
|
||||
mem_usage = usage['memory_mb']
|
||||
|
||||
overhead = self.driver.estimate_instance_overhead(usage)
|
||||
mem_usage += overhead['memory_mb']
|
||||
|
||||
resources['memory_mb_used'] += sign * mem_usage
|
||||
resources['local_gb_used'] += sign * usage.get('root_gb', 0)
|
||||
resources['local_gb_used'] += sign * usage.get('ephemeral_gb', 0)
|
||||
self.compute_node['memory_mb_used'] += sign * mem_usage
|
||||
self.compute_node['local_gb_used'] += sign * usage.get('root_gb', 0)
|
||||
self.compute_node['local_gb_used'] += (
|
||||
sign * usage.get('ephemeral_gb', 0))
|
||||
|
||||
# free ram and disk may be negative, depending on policy:
|
||||
resources['free_ram_mb'] = (resources['memory_mb'] -
|
||||
resources['memory_mb_used'])
|
||||
resources['free_disk_gb'] = (resources['local_gb'] -
|
||||
resources['local_gb_used'])
|
||||
self.compute_node['free_ram_mb'] = (
|
||||
self.compute_node['memory_mb'] -
|
||||
self.compute_node['memory_mb_used'])
|
||||
self.compute_node['free_disk_gb'] = (
|
||||
self.compute_node['local_gb'] -
|
||||
self.compute_node['local_gb_used'])
|
||||
|
||||
resources['running_vms'] = self.stats.num_instances
|
||||
self.compute_node['running_vms'] = self.stats.num_instances
|
||||
self.ext_resources_handler.update_from_instance(usage, sign)
|
||||
|
||||
# Calculate the numa usage
|
||||
free = sign == -1
|
||||
updated_numa_topology = hardware.get_host_numa_usage_from_instance(
|
||||
resources, usage, free)
|
||||
resources['numa_topology'] = updated_numa_topology
|
||||
self.compute_node, usage, free)
|
||||
self.compute_node['numa_topology'] = updated_numa_topology
|
||||
|
||||
def _update_usage_from_migration(self, context, instance, image_meta,
|
||||
resources, migration):
|
||||
migration):
|
||||
"""Update usage for a single migration. The record may
|
||||
represent an incoming or outbound migration.
|
||||
"""
|
||||
|
@ -677,7 +702,7 @@ class ResourceTracker(object):
|
|||
instance['system_metadata'])
|
||||
|
||||
if itype:
|
||||
host_topology = resources.get('numa_topology')
|
||||
host_topology = self.compute_node.get('numa_topology')
|
||||
if host_topology:
|
||||
host_topology = objects.NUMATopology.obj_from_db_obj(
|
||||
host_topology)
|
||||
|
@ -689,14 +714,14 @@ class ResourceTracker(object):
|
|||
itype, numa_topology=numa_topology)
|
||||
if self.pci_tracker:
|
||||
self.pci_tracker.update_pci_for_migration(context, instance)
|
||||
self._update_usage(context, resources, usage)
|
||||
self._update_usage(usage)
|
||||
if self.pci_tracker:
|
||||
resources['pci_device_pools'] = self.pci_tracker.stats
|
||||
self.compute_node['pci_device_pools'] = self.pci_tracker.stats
|
||||
else:
|
||||
resources['pci_device_pools'] = []
|
||||
self.compute_node['pci_device_pools'] = []
|
||||
self.tracked_migrations[uuid] = (migration, itype)
|
||||
|
||||
def _update_usage_from_migrations(self, context, resources, migrations):
|
||||
def _update_usage_from_migrations(self, context, migrations):
|
||||
|
||||
self.tracked_migrations.clear()
|
||||
|
||||
|
@ -729,13 +754,13 @@ class ResourceTracker(object):
|
|||
instance = migration.instance
|
||||
try:
|
||||
self._update_usage_from_migration(context, instance, None,
|
||||
resources, migration)
|
||||
migration)
|
||||
except exception.FlavorNotFound:
|
||||
LOG.warning(_LW("Flavor could not be found, skipping "
|
||||
"migration."), instance_uuid=uuid)
|
||||
continue
|
||||
|
||||
def _update_usage_from_instance(self, context, resources, instance):
|
||||
def _update_usage_from_instance(self, context, instance):
|
||||
"""Update usage for a single instance."""
|
||||
|
||||
uuid = instance['uuid']
|
||||
|
@ -759,15 +784,15 @@ class ResourceTracker(object):
|
|||
# if it's a new or deleted instance:
|
||||
if is_new_instance or is_deleted_instance:
|
||||
# new instance, update compute node resource usage:
|
||||
self._update_usage(context, resources, instance, sign=sign)
|
||||
self._update_usage(instance, sign=sign)
|
||||
|
||||
resources['current_workload'] = self.stats.calculate_workload()
|
||||
self.compute_node['current_workload'] = self.stats.calculate_workload()
|
||||
if self.pci_tracker:
|
||||
resources['pci_device_pools'] = self.pci_tracker.stats
|
||||
self.compute_node['pci_device_pools'] = self.pci_tracker.stats
|
||||
else:
|
||||
resources['pci_device_pools'] = []
|
||||
self.compute_node['pci_device_pools'] = []
|
||||
|
||||
def _update_usage_from_instances(self, context, resources, instances):
|
||||
def _update_usage_from_instances(self, context, instances):
|
||||
"""Calculate resource usage based on instance utilization. This is
|
||||
different than the hypervisor's view as it will account for all
|
||||
instances assigned to the local compute host, even if they are not
|
||||
|
@ -775,26 +800,25 @@ class ResourceTracker(object):
|
|||
"""
|
||||
self.tracked_instances.clear()
|
||||
|
||||
# purge old stats and init with anything passed in by the driver
|
||||
self.stats.clear()
|
||||
self.stats.digest_stats(resources.get('stats'))
|
||||
|
||||
# set some initial values, reserve room for host/hypervisor:
|
||||
resources['local_gb_used'] = CONF.reserved_host_disk_mb / 1024
|
||||
resources['memory_mb_used'] = CONF.reserved_host_memory_mb
|
||||
resources['free_ram_mb'] = (resources['memory_mb'] -
|
||||
resources['memory_mb_used'])
|
||||
resources['free_disk_gb'] = (resources['local_gb'] -
|
||||
resources['local_gb_used'])
|
||||
resources['current_workload'] = 0
|
||||
resources['running_vms'] = 0
|
||||
self.compute_node['local_gb_used'] = CONF.reserved_host_disk_mb / 1024
|
||||
self.compute_node['memory_mb_used'] = CONF.reserved_host_memory_mb
|
||||
self.compute_node['free_ram_mb'] = (
|
||||
self.compute_node['memory_mb'] -
|
||||
self.compute_node['memory_mb_used'])
|
||||
self.compute_node['free_disk_gb'] = (
|
||||
self.compute_node['local_gb'] -
|
||||
self.compute_node['local_gb_used'])
|
||||
self.compute_node['current_workload'] = 0
|
||||
self.compute_node['running_vms'] = 0
|
||||
|
||||
# Reset values for extended resources
|
||||
self.ext_resources_handler.reset_resources(resources, self.driver)
|
||||
self.ext_resources_handler.reset_resources(self.compute_node,
|
||||
self.driver)
|
||||
|
||||
for instance in instances:
|
||||
if instance.vm_state != vm_states.DELETED:
|
||||
self._update_usage_from_instance(context, resources, instance)
|
||||
self._update_usage_from_instance(context, instance)
|
||||
|
||||
def _find_orphaned_instances(self):
|
||||
"""Given the set of instances and migrations already account for
|
||||
|
@ -817,7 +841,7 @@ class ResourceTracker(object):
|
|||
|
||||
return orphans
|
||||
|
||||
def _update_usage_from_orphans(self, context, resources, orphans):
|
||||
def _update_usage_from_orphans(self, orphans):
|
||||
"""Include orphaned instances in usage."""
|
||||
for orphan in orphans:
|
||||
memory_mb = orphan['memory_mb']
|
||||
|
@ -828,7 +852,7 @@ class ResourceTracker(object):
|
|||
|
||||
# just record memory usage for the orphan
|
||||
usage = {'memory_mb': memory_mb}
|
||||
self._update_usage(context, resources, usage)
|
||||
self._update_usage(usage)
|
||||
|
||||
def _verify_resources(self, resources):
|
||||
resource_keys = ["vcpus", "memory_mb", "local_gb", "cpu_info",
|
||||
|
|
|
@ -25,9 +25,8 @@ class FakeResourceTracker(resource_tracker.ResourceTracker):
|
|||
self.compute_node = values
|
||||
self.compute_node['id'] = 1
|
||||
|
||||
def _update(self, context, values, prune_stats=False):
|
||||
self._write_ext_resources(values)
|
||||
self.compute_node.update(values)
|
||||
def _update(self, context):
|
||||
self._write_ext_resources(self.compute_node)
|
||||
|
||||
def _get_service(self, context):
|
||||
return objects.Service(id=1)
|
||||
|
|
|
@ -15,6 +15,7 @@
|
|||
|
||||
"""Tests for compute resource tracking."""
|
||||
|
||||
import copy
|
||||
import uuid
|
||||
|
||||
import mock
|
||||
|
@ -732,17 +733,21 @@ class SchedulerClientTrackerTestCase(BaseTrackerTestCase):
|
|||
side_effect=self._fake_compute_node_update)
|
||||
|
||||
def test_update_resource(self):
|
||||
self.tracker._write_ext_resources = mock.Mock()
|
||||
values = {'stats': {}, 'foo': 'bar', 'baz_count': 0}
|
||||
self.tracker._update(self.context, values)
|
||||
|
||||
expected = {'stats': '{}', 'foo': 'bar', 'baz_count': 0,
|
||||
'id': 1}
|
||||
# change a compute node value to simulate a change
|
||||
self.tracker.compute_node['local_gb_used'] += 1
|
||||
expected = copy.deepcopy(self.tracker.compute_node)
|
||||
self.tracker._update(self.context)
|
||||
self.tracker.scheduler_client.update_resource_stats.\
|
||||
assert_called_once_with(self.context,
|
||||
("fakehost", "fakenode"),
|
||||
expected)
|
||||
|
||||
def test_no_update_resource(self):
|
||||
self.tracker._update(self.context)
|
||||
update = self.tracker.scheduler_client.update_resource_stats
|
||||
self.assertFalse(update.called, "update_resource_stats should not be "
|
||||
"called when there is no change")
|
||||
|
||||
|
||||
class TrackerPciStatsTestCase(BaseTrackerTestCase):
|
||||
|
||||
|
|
|
@ -392,7 +392,8 @@ class TestUpdateAvailableResources(BaseTestCase):
|
|||
migr_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
|
||||
'fake-node')
|
||||
|
||||
expected_resources = {
|
||||
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
|
||||
expected_resources.update({
|
||||
# host is added in update_available_resources()
|
||||
# before calling _update()
|
||||
'host': 'fake-host',
|
||||
|
@ -414,9 +415,9 @@ class TestUpdateAvailableResources(BaseTestCase):
|
|||
'current_workload': 0,
|
||||
'vcpus': 4,
|
||||
'running_vms': 0
|
||||
}
|
||||
update_mock.assert_called_once_with(mock.sentinel.ctx,
|
||||
expected_resources)
|
||||
})
|
||||
update_mock.assert_called_once_with(mock.sentinel.ctx)
|
||||
self.assertEqual(expected_resources, self.rt.compute_node)
|
||||
|
||||
@mock.patch('nova.objects.Service.get_by_compute_host')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
|
@ -437,7 +438,8 @@ class TestUpdateAvailableResources(BaseTestCase):
|
|||
|
||||
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
|
||||
'fake-node')
|
||||
expected_resources = {
|
||||
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
|
||||
expected_resources.update({
|
||||
# host is added in update_available_resources()
|
||||
# before calling _update()
|
||||
'host': 'fake-host',
|
||||
|
@ -459,9 +461,9 @@ class TestUpdateAvailableResources(BaseTestCase):
|
|||
'current_workload': 0,
|
||||
'vcpus': 4,
|
||||
'running_vms': 0
|
||||
}
|
||||
update_mock.assert_called_once_with(mock.sentinel.ctx,
|
||||
expected_resources)
|
||||
})
|
||||
update_mock.assert_called_once_with(mock.sentinel.ctx)
|
||||
self.assertEqual(expected_resources, self.rt.compute_node)
|
||||
|
||||
@mock.patch('nova.objects.Service.get_by_compute_host')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
|
@ -482,7 +484,8 @@ class TestUpdateAvailableResources(BaseTestCase):
|
|||
|
||||
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
|
||||
'fake-node')
|
||||
expected_resources = {
|
||||
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
|
||||
expected_resources.update({
|
||||
# host is added in update_available_resources()
|
||||
# before calling _update()
|
||||
'host': 'fake-host',
|
||||
|
@ -515,9 +518,9 @@ class TestUpdateAvailableResources(BaseTestCase):
|
|||
'current_workload': 0,
|
||||
'vcpus': 4,
|
||||
'running_vms': 1 # One active instance
|
||||
}
|
||||
update_mock.assert_called_once_with(mock.sentinel.ctx,
|
||||
expected_resources)
|
||||
})
|
||||
update_mock.assert_called_once_with(mock.sentinel.ctx)
|
||||
self.assertEqual(expected_resources, self.rt.compute_node)
|
||||
|
||||
@mock.patch('nova.objects.Service.get_by_compute_host')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
|
@ -558,7 +561,8 @@ class TestUpdateAvailableResources(BaseTestCase):
|
|||
|
||||
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
|
||||
'fake-node')
|
||||
expected_resources = {
|
||||
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
|
||||
expected_resources.update({
|
||||
# host is added in update_available_resources()
|
||||
# before calling _update()
|
||||
'host': 'fake-host',
|
||||
|
@ -582,9 +586,9 @@ class TestUpdateAvailableResources(BaseTestCase):
|
|||
# Yep, for some reason, orphaned instances are not counted
|
||||
# as running VMs...
|
||||
'running_vms': 0
|
||||
}
|
||||
update_mock.assert_called_once_with(mock.sentinel.ctx,
|
||||
expected_resources)
|
||||
})
|
||||
update_mock.assert_called_once_with(mock.sentinel.ctx)
|
||||
self.assertEqual(expected_resources, self.rt.compute_node)
|
||||
|
||||
@mock.patch('nova.objects.Service.get_by_compute_host')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
|
@ -622,7 +626,8 @@ class TestUpdateAvailableResources(BaseTestCase):
|
|||
|
||||
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
|
||||
'fake-node')
|
||||
expected_resources = {
|
||||
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
|
||||
expected_resources.update({
|
||||
# host is added in update_available_resources()
|
||||
# before calling _update()
|
||||
'host': 'fake-host',
|
||||
|
@ -644,9 +649,9 @@ class TestUpdateAvailableResources(BaseTestCase):
|
|||
'current_workload': 0,
|
||||
'vcpus': 4,
|
||||
'running_vms': 0
|
||||
}
|
||||
update_mock.assert_called_once_with(mock.sentinel.ctx,
|
||||
expected_resources)
|
||||
})
|
||||
update_mock.assert_called_once_with(mock.sentinel.ctx)
|
||||
self.assertEqual(expected_resources, self.rt.compute_node)
|
||||
|
||||
@mock.patch('nova.objects.Service.get_by_compute_host')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
|
@ -680,7 +685,8 @@ class TestUpdateAvailableResources(BaseTestCase):
|
|||
|
||||
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
|
||||
'fake-node')
|
||||
expected_resources = {
|
||||
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
|
||||
expected_resources.update({
|
||||
# host is added in update_available_resources()
|
||||
# before calling _update()
|
||||
'host': 'fake-host',
|
||||
|
@ -702,9 +708,9 @@ class TestUpdateAvailableResources(BaseTestCase):
|
|||
'current_workload': 0,
|
||||
'vcpus': 4,
|
||||
'running_vms': 0
|
||||
}
|
||||
update_mock.assert_called_once_with(mock.sentinel.ctx,
|
||||
expected_resources)
|
||||
})
|
||||
update_mock.assert_called_once_with(mock.sentinel.ctx)
|
||||
self.assertEqual(expected_resources, self.rt.compute_node)
|
||||
|
||||
@mock.patch('nova.objects.Service.get_by_compute_host')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
|
@ -742,7 +748,8 @@ class TestUpdateAvailableResources(BaseTestCase):
|
|||
|
||||
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
|
||||
'fake-node')
|
||||
expected_resources = {
|
||||
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
|
||||
expected_resources.update({
|
||||
# host is added in update_available_resources()
|
||||
# before calling _update()
|
||||
'host': 'fake-host',
|
||||
|
@ -768,9 +775,9 @@ class TestUpdateAvailableResources(BaseTestCase):
|
|||
'current_workload': 1, # One migrating instance...
|
||||
'vcpus': 4,
|
||||
'running_vms': 2
|
||||
}
|
||||
update_mock.assert_called_once_with(mock.sentinel.ctx,
|
||||
expected_resources)
|
||||
})
|
||||
update_mock.assert_called_once_with(mock.sentinel.ctx)
|
||||
self.assertEqual(expected_resources, self.rt.compute_node)
|
||||
|
||||
|
||||
class TestInitComputeNode(BaseTestCase):
|
||||
|
@ -898,7 +905,6 @@ class TestUpdateComputeNode(BaseTestCase):
|
|||
@mock.patch('nova.objects.Service.get_by_compute_host')
|
||||
def test_existing_compute_node_updated_same_resources(self, service_mock):
|
||||
self._setup_rt()
|
||||
self.rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
|
||||
|
||||
capi = self.cond_api_mock
|
||||
create_node_mock = capi.compute_node_create
|
||||
|
@ -910,6 +916,7 @@ class TestUpdateComputeNode(BaseTestCase):
|
|||
# host is added in update_available_resources()
|
||||
# before calling _update()
|
||||
'host': 'fake-host',
|
||||
'id': 1,
|
||||
'host_ip': 'fake-ip',
|
||||
'numa_topology': None,
|
||||
'metrics': '[]',
|
||||
|
@ -930,7 +937,8 @@ class TestUpdateComputeNode(BaseTestCase):
|
|||
'running_vms': 0
|
||||
}
|
||||
orig_resources = copy.deepcopy(resources)
|
||||
self.rt._update(mock.sentinel.ctx, resources)
|
||||
self.rt.compute_node = copy.deepcopy(orig_resources)
|
||||
self.rt._update(mock.sentinel.ctx)
|
||||
|
||||
self.assertFalse(self.rt.disabled)
|
||||
self.assertFalse(service_mock.called)
|
||||
|
@ -943,13 +951,12 @@ class TestUpdateComputeNode(BaseTestCase):
|
|||
# (unchanged) resources for the compute node
|
||||
self.sched_client_mock.reset_mock()
|
||||
urs_mock = self.sched_client_mock.update_resource_stats
|
||||
self.rt._update(mock.sentinel.ctx, orig_resources)
|
||||
self.rt._update(mock.sentinel.ctx)
|
||||
self.assertFalse(urs_mock.called)
|
||||
|
||||
@mock.patch('nova.objects.Service.get_by_compute_host')
|
||||
def test_existing_compute_node_updated_new_resources(self, service_mock):
|
||||
self._setup_rt()
|
||||
self.rt.compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
|
||||
|
||||
capi = self.cond_api_mock
|
||||
create_node_mock = capi.compute_node_create
|
||||
|
@ -962,6 +969,7 @@ class TestUpdateComputeNode(BaseTestCase):
|
|||
# host is added in update_available_resources()
|
||||
# before calling _update()
|
||||
'host': 'fake-host',
|
||||
'id': 1,
|
||||
'host_ip': 'fake-ip',
|
||||
'numa_topology': None,
|
||||
'metrics': '[]',
|
||||
|
@ -985,13 +993,14 @@ class TestUpdateComputeNode(BaseTestCase):
|
|||
expected_resources['id'] = 1
|
||||
expected_resources['stats'] = '{}'
|
||||
|
||||
self.rt.ext_resources_handler.reset_resources(resources,
|
||||
self.rt.compute_node = copy.deepcopy(resources)
|
||||
self.rt.ext_resources_handler.reset_resources(self.rt.compute_node,
|
||||
self.rt.driver)
|
||||
# This emulates the behavior that occurs in the
|
||||
# RT.update_available_resource() method, which updates resource
|
||||
# information in the ERT differently than all other resources.
|
||||
self.rt.ext_resources_handler.update_from_instance(dict(vcpus=2))
|
||||
self.rt._update(mock.sentinel.ctx, resources)
|
||||
self.rt._update(mock.sentinel.ctx)
|
||||
|
||||
self.assertFalse(self.rt.disabled)
|
||||
self.assertFalse(service_mock.called)
|
||||
|
@ -1070,7 +1079,8 @@ class TestInstanceClaim(BaseTestCase):
|
|||
})
|
||||
with mock.patch.object(self.rt, '_update') as update_mock:
|
||||
self.rt.instance_claim(self.ctx, self.instance, None)
|
||||
update_mock.assert_called_once_with(self.elevated, expected)
|
||||
update_mock.assert_called_once_with(self.elevated)
|
||||
self.assertEqual(expected, self.rt.compute_node)
|
||||
|
||||
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
|
||||
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
|
||||
|
@ -1110,8 +1120,8 @@ class TestInstanceClaim(BaseTestCase):
|
|||
cell.cpu_usage += 1
|
||||
with mock.patch.object(self.rt, '_update') as update_mock:
|
||||
self.rt.instance_claim(self.ctx, self.instance, limits)
|
||||
self.assertTrue(update_mock.called)
|
||||
updated_compute_node = update_mock.call_args[0][1]
|
||||
update_mock.assert_called_once_with(self.ctx.elevated())
|
||||
updated_compute_node = self.rt.compute_node
|
||||
new_numa = updated_compute_node['numa_topology']
|
||||
new_numa = objects.NUMATopology.obj_from_db_obj(new_numa)
|
||||
self.assertEqualNUMAHostTopology(expected_numa, new_numa)
|
||||
|
|
Loading…
Reference in New Issue