Convert RT compute_node to be a ComputeNode object

This patch converts the ResourceTracker compute_node property
to be a ComputeNode object. A number of fields automatically
take care of mapping their values to a db format, so some of the
code creating json strings goes away with this change.

The scheduler client report code is simplified by the change
to use a ComputeNode object.

Note that this change naturally required modification to a
number of tests in test_tracker, test_resource_tracker and
test_client to cater for objects instead of dicts. Some of these
tests were using incorrect values or arbitrary key names that do
not exist as ComputeNode fields, so they had to be corrected to
conform to the type checking of the ComputeNode object.

part of blueprint make-resource-tracker-use-objects

Change-Id: I2279f01ad55083c31c663242a2a60a48191e88c3
This commit is contained in:
Paul Murray 2015-03-03 18:52:35 +00:00
parent 78f2a98b50
commit 2ef014eb31
6 changed files with 342 additions and 441 deletions

View File

@ -85,7 +85,7 @@ class ResourceTracker(object):
self.monitors = monitor_handler.choose_monitors(self)
self.ext_resources_handler = \
ext_resources.ResourceHandler(CONF.compute_resources)
self.old_resources = {}
self.old_resources = objects.ComputeNode()
self.scheduler_client = scheduler_client.SchedulerClient()
@utils.synchronized(COMPUTE_RESOURCE_SEMAPHORE)
@ -322,39 +322,18 @@ class ResourceTracker(object):
# there was no local copy and none in the database
# so we need to create a new compute node. This needs
# initial values for the database.
#
# TODO(pmurray) this section will be cleaned up when we
# use the ComputeNode object. Here it is the conductor call
# to compute_node_create() that sets up the compute_node
# dict. That will change to create the compute_node, initialize
# it and then save.
cn = {}
cn.update(resources)
# to be initialised with resource values.
self.compute_node = objects.ComputeNode(context)
# TODO(pmurray) service_id is deprecated but is still a required field.
# This should be removed when the field is changed.
cn['service_id'] = service.id
cn['host'] = self.host
# initialize load stats from existing instances:
self._write_ext_resources(cn)
# NOTE(pmurray): the stats field is stored as a json string. The
# json conversion will be done automatically by the ComputeNode object
# so this can be removed when using ComputeNode.
cn['stats'] = jsonutils.dumps(cn['stats'])
# pci_passthrough_devices may be in resources but are not
# stored in compute nodes
cn.pop('pci_passthrough_devices', None)
self.compute_node = self.conductor_api.compute_node_create(context, cn)
self.compute_node.service_id = service.id
self.compute_node.host = self.host
self._copy_resources(resources)
self.compute_node.create()
LOG.info(_LI('Compute_service record created for '
'%(host)s:%(node)s'),
{'host': self.host, 'node': self.nodename})
# now we have created a compute node we can copy resources
# NOTE(pmurray): this has an unnecessary copy until the above
# is cleaned up.
self._copy_resources(resources)
def _copy_resources(self, resources):
"""Copy resource values to initialise compute_node and related
data structures.
@ -363,9 +342,8 @@ class ResourceTracker(object):
self.stats.clear()
self.stats.digest_stats(resources.get('stats'))
# now copy reset to compute_node
self.compute_node.update(resources)
self.compute_node.pop('pci_passthrough_devices', None)
# now copy rest to compute_node
self.compute_node.update_from_virt_driver(resources)
def _get_host_metrics(self, context, nodename):
"""Get the metrics from monitors and
@ -487,24 +465,19 @@ class ResourceTracker(object):
# from deleted instances.
if self.pci_tracker:
self.pci_tracker.clean_usage(instances, migrations, orphans)
self.compute_node['pci_device_pools'] = self.pci_tracker.stats
dev_pools_obj = self.pci_tracker.stats.to_device_pools_obj()
self.compute_node.pci_device_pools = dev_pools_obj
else:
self.compute_node['pci_device_pools'] = []
self.compute_node.pci_device_pools = objects.PciDevicePoolList()
self._report_final_resource_view()
metrics = self._get_host_metrics(context, self.nodename)
self.compute_node['metrics'] = jsonutils.dumps(metrics)
# TODO(sbauza): Juno compute nodes are missing the host field and
# the Juno ResourceTracker does not set this field, even if
# the ComputeNode object can show it.
# Unfortunately, as we're not yet using ComputeNode.save(), we need
# to add this field in the resources dict until the RT is using
# the ComputeNode.save() method for populating the table.
# tl;dr: To be removed once RT is using ComputeNode.save()
self.compute_node['host'] = self.host
# TODO(pmurray): metrics should not be a json string in ComputeNode,
# but it is. This should be changed in ComputeNode
self.compute_node.metrics = jsonutils.dumps(metrics)
# update the compute_node
self._update(context)
LOG.info(_LI('Compute_service record updated for %(host)s:%(node)s'),
{'host': self.host, 'node': self.nodename})
@ -512,16 +485,14 @@ class ResourceTracker(object):
def _get_compute_node(self, context):
"""Returns compute node for the host and nodename."""
try:
compute = objects.ComputeNode.get_by_host_and_nodename(
return objects.ComputeNode.get_by_host_and_nodename(
context, self.host, self.nodename)
return obj_base.obj_to_primitive(compute)
except exception.NotFound:
LOG.warning(_LW("No compute node record for %(host)s:%(node)s"),
{'host': self.host, 'node': self.nodename})
def _write_ext_resources(self, resources):
resources['stats'] = {}
resources['stats'].update(self.stats)
resources.stats = copy.deepcopy(self.stats)
self.ext_resources_handler.write_resources(resources)
def _get_service(self, context):
@ -577,10 +548,10 @@ class ResourceTracker(object):
including instance calculations and in-progress resource claims. These
values will be exposed via the compute node table to the scheduler.
"""
vcpus = self.compute_node['vcpus']
vcpus = self.compute_node.vcpus
if vcpus:
tcpu = vcpus
ucpu = self.compute_node['vcpus_used']
ucpu = self.compute_node.vcpus_used
LOG.info(_LI("Total usable vcpus: %(tcpu)s, "
"total allocated vcpus: %(ucpu)s"),
{'tcpu': vcpus,
@ -588,7 +559,7 @@ class ResourceTracker(object):
else:
tcpu = 0
ucpu = 0
pci_device_pools = self.compute_node.get('pci_device_pools')
pci_stats = self.compute_node.pci_device_pools
LOG.info(_LI("Final resource view: "
"name=%(node)s "
"phys_ram=%(phys_ram)sMB "
@ -599,17 +570,17 @@ class ResourceTracker(object):
"used_vcpus=%(used_vcpus)s "
"pci_stats=%(pci_stats)s"),
{'node': self.nodename,
'phys_ram': self.compute_node['memory_mb'],
'used_ram': self.compute_node['memory_mb_used'],
'phys_disk': self.compute_node['local_gb'],
'used_disk': self.compute_node['local_gb_used'],
'phys_ram': self.compute_node.memory_mb,
'used_ram': self.compute_node.memory_mb_used,
'phys_disk': self.compute_node.local_gb,
'used_disk': self.compute_node.local_gb_used,
'total_vcpus': tcpu,
'used_vcpus': ucpu,
'pci_stats': pci_device_pools})
'pci_stats': pci_stats})
def _resource_change(self):
"""Check to see if any resouces have changed."""
if cmp(self.compute_node, self.old_resources) != 0:
"""Check to see if any resources have changed."""
if not obj_base.obj_equal_prims(self.compute_node, self.old_resources):
self.old_resources = copy.deepcopy(self.compute_node)
return True
return False
@ -617,54 +588,37 @@ class ResourceTracker(object):
def _update(self, context):
"""Update partial stats locally and populate them to Scheduler."""
self._write_ext_resources(self.compute_node)
# NOTE(pmurray): the stats field is stored as a json string. The
# json conversion will be done automatically by the ComputeNode object
# so this can be removed when using ComputeNode.
self.compute_node['stats'] = jsonutils.dumps(
self.compute_node['stats'])
if not self._resource_change():
return
if "service" in self.compute_node:
del self.compute_node['service']
# Persist the stats to the Scheduler
self._update_resource_stats(context, self.compute_node)
self.scheduler_client.update_resource_stats(self.compute_node)
if self.pci_tracker:
self.pci_tracker.save(context)
def _update_resource_stats(self, context, values):
stats = values.copy()
stats['id'] = self.compute_node['id']
self.scheduler_client.update_resource_stats(
context, (self.host, self.nodename), stats)
def _update_usage(self, usage, sign=1):
mem_usage = usage['memory_mb']
overhead = self.driver.estimate_instance_overhead(usage)
mem_usage += overhead['memory_mb']
self.compute_node['memory_mb_used'] += sign * mem_usage
self.compute_node['local_gb_used'] += sign * usage.get('root_gb', 0)
self.compute_node['local_gb_used'] += (
sign * usage.get('ephemeral_gb', 0))
self.compute_node.memory_mb_used += sign * mem_usage
self.compute_node.local_gb_used += sign * usage.get('root_gb', 0)
self.compute_node.local_gb_used += sign * usage.get('ephemeral_gb', 0)
# free ram and disk may be negative, depending on policy:
self.compute_node['free_ram_mb'] = (
self.compute_node['memory_mb'] -
self.compute_node['memory_mb_used'])
self.compute_node['free_disk_gb'] = (
self.compute_node['local_gb'] -
self.compute_node['local_gb_used'])
self.compute_node.free_ram_mb = (self.compute_node.memory_mb -
self.compute_node.memory_mb_used)
self.compute_node.free_disk_gb = (self.compute_node.local_gb -
self.compute_node.local_gb_used)
self.compute_node['running_vms'] = self.stats.num_instances
self.compute_node.running_vms = self.stats.num_instances
self.ext_resources_handler.update_from_instance(usage, sign)
# Calculate the numa usage
free = sign == -1
updated_numa_topology = hardware.get_host_numa_usage_from_instance(
self.compute_node, usage, free)
self.compute_node['numa_topology'] = updated_numa_topology
self.compute_node.numa_topology = updated_numa_topology
def _update_usage_from_migration(self, context, instance, image_meta,
migration):
@ -725,9 +679,11 @@ class ResourceTracker(object):
self.pci_tracker.update_pci_for_migration(context, instance)
self._update_usage(usage)
if self.pci_tracker:
self.compute_node['pci_device_pools'] = self.pci_tracker.stats
obj = self.pci_tracker.stats.to_device_pools_obj()
self.compute_node.pci_device_pools = obj
else:
self.compute_node['pci_device_pools'] = []
obj = objects.PciDevicePoolList()
self.compute_node.pci_device_pools = obj
self.tracked_migrations[uuid] = (migration, itype)
def _update_usage_from_migrations(self, context, migrations):
@ -795,11 +751,12 @@ class ResourceTracker(object):
# new instance, update compute node resource usage:
self._update_usage(instance, sign=sign)
self.compute_node['current_workload'] = self.stats.calculate_workload()
self.compute_node.current_workload = self.stats.calculate_workload()
if self.pci_tracker:
self.compute_node['pci_device_pools'] = self.pci_tracker.stats
obj = self.pci_tracker.stats.to_device_pools_obj()
self.compute_node.pci_device_pools = obj
else:
self.compute_node['pci_device_pools'] = []
self.compute_node.pci_device_pools = objects.PciDevicePoolList()
def _update_usage_from_instances(self, context, instances):
"""Calculate resource usage based on instance utilization. This is
@ -810,16 +767,14 @@ class ResourceTracker(object):
self.tracked_instances.clear()
# set some initial values, reserve room for host/hypervisor:
self.compute_node['local_gb_used'] = CONF.reserved_host_disk_mb / 1024
self.compute_node['memory_mb_used'] = CONF.reserved_host_memory_mb
self.compute_node['free_ram_mb'] = (
self.compute_node['memory_mb'] -
self.compute_node['memory_mb_used'])
self.compute_node['free_disk_gb'] = (
self.compute_node['local_gb'] -
self.compute_node['local_gb_used'])
self.compute_node['current_workload'] = 0
self.compute_node['running_vms'] = 0
self.compute_node.local_gb_used = CONF.reserved_host_disk_mb / 1024
self.compute_node.memory_mb_used = CONF.reserved_host_memory_mb
self.compute_node.free_ram_mb = (self.compute_node.memory_mb -
self.compute_node.memory_mb_used)
self.compute_node.free_disk_gb = (self.compute_node.local_gb -
self.compute_node.local_gb_used)
self.compute_node.current_workload = 0
self.compute_node.running_vms = 0
# Reset values for extended resources
self.ext_resources_handler.reset_resources(self.compute_node,

View File

@ -57,8 +57,8 @@ class SchedulerClient(object):
def delete_aggregate(self, context, aggregate):
self.queryclient.delete_aggregate(context, aggregate)
def update_resource_stats(self, context, name, stats):
self.reportclient.update_resource_stats(context, name, stats)
def update_resource_stats(self, compute_node):
self.reportclient.update_resource_stats(compute_node)
def update_instance_info(self, context, host_name, instance_info):
self.queryclient.update_instance_info(context, host_name,

View File

@ -13,62 +13,13 @@
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_serialization import jsonutils
from nova import exception
from nova.i18n import _LI
from nova import objects
LOG = logging.getLogger(__name__)
class SchedulerReportClient(object):
"""Client class for updating the scheduler."""
def update_resource_stats(self, context, name, stats):
"""Creates or updates stats for the desired service.
def update_resource_stats(self, compute_node):
"""Creates or updates stats for the supplied compute node.
:param context: local context
:param name: name of resource to update
:type name: immutable (str or tuple)
:param stats: updated stats to send to scheduler
:type stats: dict
:param compute_node: updated nova.objects.ComputeNode to report
"""
if 'id' in stats:
compute_node_id = stats['id']
updates = stats.copy()
del updates['id']
else:
raise exception.ComputeHostNotCreated(name=str(name))
if 'stats' in updates:
# NOTE(danms): This is currently pre-serialized for us,
# which we don't want if we're using the object. So,
# fix it here, and follow up with removing this when the
# RT is converted to proper objects.
updates['stats'] = jsonutils.loads(updates['stats'])
compute_node = objects.ComputeNode(context=context,
id=compute_node_id)
compute_node.obj_reset_changes()
for k, v in updates.items():
if k == 'pci_device_pools':
# NOTE(danms): Since the updates are actually the result of
# a obj_to_primitive() on some real objects, we need to convert
# back to a real object (not from_dict() or _from_db_object(),
# which expect a db-formatted object) but just an attr-based
# reconstruction. When we start getting a ComputeNode from
# scheduler this "bandage" can go away.
if v:
devpools = [objects.PciDevicePool.from_dict(x) for x in v]
else:
devpools = []
compute_node.pci_device_pools = objects.PciDevicePoolList(
objects=devpools)
else:
setattr(compute_node, k, v)
compute_node.save()
LOG.info(_LI('Compute_service record updated for '
'%s') % str(name))

View File

@ -16,6 +16,7 @@
"""Tests for compute resource tracking."""
import copy
import six
import uuid
import mock
@ -31,6 +32,7 @@ from nova import db
from nova import exception
from nova import objects
from nova.objects import base as obj_base
from nova.objects import pci_device_pool
from nova import rpc
from nova import test
from nova.tests.unit.compute.monitors import test_monitors
@ -57,6 +59,7 @@ EPHEMERAL_GB = 1
FAKE_VIRT_LOCAL_GB = ROOT_GB + EPHEMERAL_GB
FAKE_VIRT_VCPUS = 1
FAKE_VIRT_STATS = {'virt_stat': 10}
FAKE_VIRT_STATS_COERCED = {'virt_stat': '10'}
FAKE_VIRT_STATS_JSON = jsonutils.dumps(FAKE_VIRT_STATS)
RESOURCE_NAMES = ['vcpu']
CONF = cfg.CONF
@ -227,6 +230,7 @@ class BaseTestCase(test.TestCase):
self.update_call_count = 0
def _create_compute_node(self, values=None):
# This creates a db representation of a compute_node.
compute = {
"id": 1,
"service_id": 1,
@ -259,6 +263,16 @@ class BaseTestCase(test.TestCase):
compute.update(values)
return compute
def _create_compute_node_obj(self, context):
# Use the db representation of a compute node returned
# by _create_compute_node() to create an equivalent compute
# node object.
compute = self._create_compute_node()
compute_obj = objects.ComputeNode()
compute_obj = objects.ComputeNode._from_db_object(
context, compute_obj, compute)
return compute_obj
def _create_service(self, host="fakehost", compute=None):
if compute:
compute = [compute]
@ -411,7 +425,7 @@ class BaseTestCase(test.TestCase):
driver = self._driver()
tracker = resource_tracker.ResourceTracker(host, driver, node)
tracker.compute_node = self._create_compute_node()
tracker.compute_node = self._create_compute_node_obj(self.context)
tracker.ext_resources_handler = \
resources.ResourceHandler(RESOURCE_NAMES, True)
return tracker
@ -556,8 +570,6 @@ class BaseTrackerTestCase(BaseTestCase):
patcher = pci_fakes.fake_pci_whitelist()
self.addCleanup(patcher.stop)
self.stubs.Set(self.tracker.scheduler_client, 'update_resource_stats',
self._fake_compute_node_update)
self._init_tracker()
self.limits = self._limits()
@ -641,6 +653,19 @@ class BaseTrackerTestCase(BaseTestCase):
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def assertEqualPciDevicePool(self, expected, observed):
self.assertEqual(expected.product_id, observed.product_id)
self.assertEqual(expected.vendor_id, observed.vendor_id)
self.assertEqual(expected.tags, observed.tags)
self.assertEqual(expected.count, observed.count)
def assertEqualPciDevicePoolList(self, expected, observed):
ex_objs = expected.objects
ob_objs = observed.objects
self.assertEqual(len(ex_objs), len(ob_objs))
for i in range(len(ex_objs)):
self.assertEqualPciDevicePool(ex_objs[i], ob_objs[i])
def _assert(self, value, field, tracker=None):
if tracker is None:
@ -663,12 +688,12 @@ class TrackerTestCase(BaseTrackerTestCase):
def test_free_ram_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.memory_mb - driver.memory_mb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_ram_mb'])
self.assertEqual(mem_free, self.tracker.compute_node.free_ram_mb)
def test_free_disk_resource_value(self):
driver = FakeVirtDriver()
mem_free = driver.local_gb - driver.local_gb_used
self.assertEqual(mem_free, self.tracker.compute_node['free_disk_gb'])
self.assertEqual(mem_free, self.tracker.compute_node.free_disk_gb)
def test_update_compute_node(self):
self.assertFalse(self.tracker.disabled)
@ -687,9 +712,10 @@ class TrackerTestCase(BaseTrackerTestCase):
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
self.assertEqual(driver.pci_stats,
self.tracker.compute_node['pci_device_pools'])
self.assertEqual(0, self.tracker.compute_node.current_workload)
expected = pci_device_pool.from_pci_stats(driver.pci_stats)
self.assertEqual(expected,
self.tracker.compute_node.pci_device_pools)
def test_set_instance_host_and_node(self):
inst = objects.Instance()
@ -705,22 +731,30 @@ class SchedulerClientTrackerTestCase(BaseTrackerTestCase):
def setUp(self):
super(SchedulerClientTrackerTestCase, self).setUp()
self.tracker.scheduler_client.update_resource_stats = mock.Mock(
side_effect=self._fake_compute_node_update)
self.tracker.scheduler_client.update_resource_stats = mock.Mock()
def test_update_resource(self):
# change a compute node value to simulate a change
self.tracker.compute_node['local_gb_used'] += 1
expected = copy.deepcopy(self.tracker.compute_node)
# NOTE(pmurray): we are not doing a full pass through the resource
# trackers update path, so safest to do two updates and look for
# differences then to rely on the initial state being the same
# as an update
urs_mock = self.tracker.scheduler_client.update_resource_stats
self.tracker._update(self.context)
self.tracker.scheduler_client.update_resource_stats.\
assert_called_once_with(self.context,
("fakehost", "fakenode"),
expected)
urs_mock.reset_mock()
# change a compute node value to simulate a change
self.tracker.compute_node.local_gb_used += 1
self.tracker._update(self.context)
urs_mock.assert_called_once_with(self.tracker.compute_node)
def test_no_update_resource(self):
# NOTE(pmurray): we are not doing a full pass through the resource
# trackers update path, so safest to do two updates and look for
# differences then to rely on the initial state being the same
# as an update
self.tracker._update(self.context)
update = self.tracker.scheduler_client.update_resource_stats
update.reset_mock()
self.tracker._update(self.context)
self.assertFalse(update.called, "update_resource_stats should not be "
"called when there is no change")
@ -744,13 +778,10 @@ class TrackerPciStatsTestCase(BaseTrackerTestCase):
self._assert(FAKE_VIRT_MEMORY_MB, 'free_ram_mb')
self._assert(FAKE_VIRT_LOCAL_GB, 'free_disk_gb')
self.assertFalse(self.tracker.disabled)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
# NOTE(danms): PciDeviceStats only supports iteration, so we have to
# listify it before we can examine the contents by index.
pools = list(self.tracker.compute_node['pci_device_pools'])
self.assertEqual(driver.pci_stats[0]['product_id'],
pools[0]['product_id'])
self.assertEqual(0, self.tracker.compute_node.current_workload)
expected_pools = pci_device_pool.from_pci_stats(driver.pci_stats)
observed_pools = self.tracker.compute_node.pci_device_pools
self.assertEqualPciDevicePoolList(expected_pools, observed_pools)
def _driver(self):
return FakeVirtDriver(pci_support=True)
@ -758,18 +789,11 @@ class TrackerPciStatsTestCase(BaseTrackerTestCase):
class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
def setUp(self):
super(TrackerExtraResourcesTestCase, self).setUp()
self.driver = self._driver()
def _driver(self):
return FakeVirtDriver()
def test_set_empty_ext_resources(self):
resources = self.driver.get_available_resource(self.tracker.nodename)
self.assertNotIn('stats', resources)
resources = self._create_compute_node_obj(self.context)
del resources.stats
self.tracker._write_ext_resources(resources)
self.assertIn('stats', resources)
self.assertEqual({}, resources.stats)
def test_set_extra_resources(self):
def fake_write_resources(resources):
@ -780,12 +804,13 @@ class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
'write_resources',
fake_write_resources)
resources = self.driver.get_available_resource(self.tracker.nodename)
resources = self._create_compute_node_obj(self.context)
del resources.stats
self.tracker._write_ext_resources(resources)
expected = {"resA": "123", "resB": 12}
expected = {"resA": "123", "resB": "12"}
self.assertEqual(sorted(expected),
sorted(resources['stats']))
sorted(resources.stats))
class InstanceClaimTestCase(BaseTrackerTestCase):
@ -910,13 +935,13 @@ class InstanceClaimTestCase(BaseTrackerTestCase):
with mock.patch.object(instance, 'save'):
self.tracker.instance_claim(self.context, instance, limits)
self.assertEqual(memory_mb + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.tracker.compute_node.memory_mb_used)
self.assertEqualNUMAHostTopology(
claim_topology,
objects.NUMATopology.obj_from_db_obj(
self.compute['numa_topology']))
self.assertEqual(root_gb * 2,
self.tracker.compute_node['local_gb_used'])
self.tracker.compute_node.local_gb_used)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@ -938,11 +963,11 @@ class InstanceClaimTestCase(BaseTrackerTestCase):
pass
self.assertEqual(2 * (flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD),
self.tracker.compute_node['memory_mb_used'])
self.tracker.compute_node.memory_mb_used)
self.assertEqual(2 * (flavor['root_gb'] + flavor['ephemeral_gb']),
self.tracker.compute_node['local_gb_used'])
self.tracker.compute_node.local_gb_used)
self.assertEqual(2 * flavor['vcpus'],
self.tracker.compute_node['vcpus_used'])
self.tracker.compute_node.vcpus_used)
self.assertEqualNUMAHostTopology(
claim_topology,
@ -962,8 +987,8 @@ class InstanceClaimTestCase(BaseTrackerTestCase):
except test.TestingException:
pass
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
self.assertEqual(0, self.compute['memory_mb_used'])
self.assertEqual(0, self.compute['local_gb_used'])
self.assertEqualNUMAHostTopology(
@ -986,9 +1011,9 @@ class InstanceClaimTestCase(BaseTrackerTestCase):
with self.tracker.instance_claim(self.context, instance):
# <insert exciting things that utilize resources>
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.tracker.compute_node.memory_mb_used)
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node['local_gb_used'])
self.tracker.compute_node.local_gb_used)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
@ -1003,9 +1028,9 @@ class InstanceClaimTestCase(BaseTrackerTestCase):
mock_get_all.return_value = [instance]
self.tracker.update_available_resource(self.context)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.tracker.compute_node.memory_mb_used)
self.assertEqual(flavor['root_gb'] + flavor['ephemeral_gb'],
self.tracker.compute_node['local_gb_used'])
self.tracker.compute_node.local_gb_used)
self.assertEqual(flavor['memory_mb'] + FAKE_VIRT_MEMORY_OVERHEAD,
self.compute['memory_mb_used'])
self.assertEqualNUMAHostTopology(
@ -1023,49 +1048,49 @@ class InstanceClaimTestCase(BaseTrackerTestCase):
with self.tracker.instance_claim(self.context, instance):
pass
self.assertEqual(1, self.tracker.compute_node['current_workload'])
self.assertEqual(1, self.tracker.compute_node.current_workload)
instance['vm_state'] = vm_states.ACTIVE
instance['task_state'] = None
instance['host'] = 'fakehost'
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['current_workload'])
self.assertEqual(0, self.tracker.compute_node.current_workload)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.Instance.save')
def test_cpu_stats(self, mock_save, mock_get):
limits = {'disk_gb': 100, 'memory_mb': 100}
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
self.assertEqual(0, self.tracker.compute_node.vcpus_used)
vcpus = 1
instance = self._fake_instance_obj(vcpus=vcpus)
# should not do anything until a claim is made:
self.tracker.update_usage(self.context, instance)
self.assertEqual(0, self.tracker.compute_node['vcpus_used'])
self.assertEqual(0, self.tracker.compute_node.vcpus_used)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
# instance state can change without modifying vcpus in use:
instance['task_state'] = task_states.SCHEDULING
self.tracker.update_usage(self.context, instance)
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
add_vcpus = 10
vcpus += add_vcpus
instance = self._fake_instance_obj(vcpus=add_vcpus)
with self.tracker.instance_claim(self.context, instance, limits):
pass
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
instance['vm_state'] = vm_states.DELETED
self.tracker.update_usage(self.context, instance)
vcpus -= add_vcpus
self.assertEqual(vcpus, self.tracker.compute_node['vcpus_used'])
self.assertEqual(vcpus, self.tracker.compute_node.vcpus_used)
def test_skip_deleted_instances(self):
# ensure that the audit process skips instances that have vm_state
@ -1073,8 +1098,8 @@ class InstanceClaimTestCase(BaseTrackerTestCase):
self._fake_instance_obj(vm_state=vm_states.DELETED, host=self.host)
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
def test_deleted_instances_with_migrations(self, mock_migration_list):
@ -1083,8 +1108,8 @@ class InstanceClaimTestCase(BaseTrackerTestCase):
instance_uuid='invalid')
mock_migration_list.return_value = [migration]
self.tracker.update_available_resource(self.context)
self.assertEqual(0, self.tracker.compute_node['memory_mb_used'])
self.assertEqual(0, self.tracker.compute_node['local_gb_used'])
self.assertEqual(0, self.tracker.compute_node.memory_mb_used)
self.assertEqual(0, self.tracker.compute_node.local_gb_used)
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
@ -1237,7 +1262,7 @@ class OrphanTestCase(BaseTrackerTestCase):
def test_usage(self):
self.assertEqual(2 * FAKE_VIRT_MEMORY_WITH_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.tracker.compute_node.memory_mb_used)
def test_find(self):
# create one legit instance and verify the 2 orphans remain
@ -1369,30 +1394,28 @@ class StatsDictTestCase(BaseTrackerTestCase):
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS)
def _get_stats(self):
return jsonutils.loads(self.tracker.compute_node['stats'])
def test_virt_stats(self):
# start with virt driver stats
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
# adding an instance should keep virt driver stats
self._fake_instance_obj(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
expected_stats = {}
expected_stats.update(FAKE_VIRT_STATS)
expected_stats.update(self.tracker.stats)
stats = self.tracker.compute_node.stats
# compute node stats are coerced to strings
expected_stats = copy.deepcopy(FAKE_VIRT_STATS_COERCED)
for k, v in self.tracker.stats.iteritems():
expected_stats[k] = six.text_type(v)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
class StatsJsonTestCase(BaseTrackerTestCase):
@ -1402,30 +1425,28 @@ class StatsJsonTestCase(BaseTrackerTestCase):
def _driver(self):
return FakeVirtDriver(stats=FAKE_VIRT_STATS_JSON)
def _get_stats(self):
return jsonutils.loads(self.tracker.compute_node['stats'])
def test_virt_stats(self):
# start with virt driver stats
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
# adding an instance should keep virt driver stats
# and add rt stats
self._fake_instance_obj(vm_state=vm_states.ACTIVE, host=self.host)
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
expected_stats = {}
expected_stats.update(FAKE_VIRT_STATS)
expected_stats.update(self.tracker.stats)
stats = self.tracker.compute_node.stats
# compute node stats are coerced to strings
expected_stats = copy.deepcopy(FAKE_VIRT_STATS_COERCED)
for k, v in self.tracker.stats.iteritems():
expected_stats[k] = six.text_type(v)
self.assertEqual(expected_stats, stats)
# removing the instances should keep only virt driver stats
self._instances = {}
self.tracker.update_available_resource(self.context)
stats = self._get_stats()
self.assertEqual(FAKE_VIRT_STATS, stats)
stats = self.tracker.compute_node.stats
self.assertEqual(FAKE_VIRT_STATS_COERCED, stats)
class StatsInvalidJsonTestCase(BaseTrackerTestCase):

View File

@ -14,16 +14,19 @@ import contextlib
import copy
import mock
from oslo_serialization import jsonutils
from oslo_utils import units
from nova.compute import arch
from nova.compute import claims
from nova.compute import hv_type
from nova.compute import power_state
from nova.compute import resource_tracker
from nova.compute import task_states
from nova.compute import vm_mode
from nova.compute import vm_states
from nova import exception as exc
from nova import objects
from nova.objects import base as obj_base
from nova import test
_VIRT_DRIVER_AVAIL_RESOURCES = {
@ -41,39 +44,40 @@ _VIRT_DRIVER_AVAIL_RESOURCES = {
}
_COMPUTE_NODE_FIXTURES = [
{
'id': 1,
objects.ComputeNode(
id=1,
# NOTE(jaypipes): Will be removed with the
# detach-compute-node-from-service blueprint
# implementation.
'service_id': 1,
'host': 'fake-host',
'service': None,
'vcpus': _VIRT_DRIVER_AVAIL_RESOURCES['vcpus'],
'memory_mb': _VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'],
'local_gb': _VIRT_DRIVER_AVAIL_RESOURCES['local_gb'],
'vcpus_used': _VIRT_DRIVER_AVAIL_RESOURCES['vcpus_used'],
'memory_mb_used': _VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used'],
'local_gb_used': _VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used'],
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': 'fake-host',
'free_ram_mb': (_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'] -
_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used']),
'free_disk_gb': (_VIRT_DRIVER_AVAIL_RESOURCES['local_gb'] -
_VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used']),
'current_workload': 0,
'running_vms': 0,
'cpu_info': '{}',
'disk_available_least': 0,
'host_ip': 'fake-ip',
'supported_instances': None,
'metrics': None,
'pci_stats': None,
'extra_resources': None,
'stats': '{}',
'numa_topology': None
},
service_id=1,
host='fake-host',
vcpus=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus'],
memory_mb=_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'],
local_gb=_VIRT_DRIVER_AVAIL_RESOURCES['local_gb'],
vcpus_used=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus_used'],
memory_mb_used=_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used'],
local_gb_used=_VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used'],
hypervisor_type='fake',
hypervisor_version=0,
hypervisor_hostname='fake-host',
free_ram_mb=(_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'] -
_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used']),
free_disk_gb=(_VIRT_DRIVER_AVAIL_RESOURCES['local_gb'] -
_VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used']),
current_workload=0,
running_vms=0,
cpu_info='{}',
disk_available_least=0,
host_ip='1.1.1.1',
supported_hv_specs=[
objects.HVSpec.from_list([arch.I686, hv_type.KVM, vm_mode.HVM])
],
metrics=None,
pci_device_pools=None,
extra_resources=None,
stats={},
numa_topology=None,
),
]
_SERVICE_FIXTURE = objects.Service(
@ -354,7 +358,7 @@ class BaseTestCase(test.NoDBTestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.rt = None
self.flags(my_ip='fake-ip')
self.flags(my_ip='1.1.1.1')
def _setup_rt(self, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES,
estimate_overhead=overhead_zero):
@ -410,7 +414,7 @@ class TestUpdateAvailableResources(BaseTestCase):
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': 'fake-ip',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
@ -420,7 +424,7 @@ class TestUpdateAvailableResources(BaseTestCase):
'local_gb': 6,
'free_ram_mb': 512,
'memory_mb_used': 0,
'pci_device_pools': [],
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 0,
@ -430,7 +434,8 @@ class TestUpdateAvailableResources(BaseTestCase):
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertEqual(expected_resources, self.rt.compute_node)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@ -456,7 +461,7 @@ class TestUpdateAvailableResources(BaseTestCase):
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': 'fake-ip',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
@ -466,7 +471,7 @@ class TestUpdateAvailableResources(BaseTestCase):
'local_gb': 6,
'free_ram_mb': 0, # 512MB avail - 512MB reserved
'memory_mb_used': 512, # 0MB used + 512MB reserved
'pci_device_pools': [],
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 1, # 0GB used + 1 GB reserved
@ -476,7 +481,8 @@ class TestUpdateAvailableResources(BaseTestCase):
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertEqual(expected_resources, self.rt.compute_node)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@ -502,7 +508,7 @@ class TestUpdateAvailableResources(BaseTestCase):
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': 'fake-ip',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
@ -512,7 +518,7 @@ class TestUpdateAvailableResources(BaseTestCase):
'local_gb': 6,
'free_ram_mb': 384, # 512 - 128 used
'memory_mb_used': 128,
'pci_device_pools': [],
'pci_device_pools': objects.PciDevicePoolList(),
# NOTE(jaypipes): Due to the design of the ERT, which now is used
# track VCPUs, the actual used VCPUs isn't
# "written" to the resources dictionary that is
@ -533,7 +539,8 @@ class TestUpdateAvailableResources(BaseTestCase):
'running_vms': 1 # One active instance
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertEqual(expected_resources, self.rt.compute_node)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@ -579,7 +586,7 @@ class TestUpdateAvailableResources(BaseTestCase):
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': 'fake-ip',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
@ -589,7 +596,7 @@ class TestUpdateAvailableResources(BaseTestCase):
'local_gb': 6,
'free_ram_mb': 448, # 512 - 64 orphaned usage
'memory_mb_used': 64,
'pci_device_pools': [],
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 0,
@ -601,7 +608,8 @@ class TestUpdateAvailableResources(BaseTestCase):
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertEqual(expected_resources, self.rt.compute_node)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@ -644,7 +652,7 @@ class TestUpdateAvailableResources(BaseTestCase):
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': 'fake-ip',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
@ -654,7 +662,7 @@ class TestUpdateAvailableResources(BaseTestCase):
'local_gb': 6,
'free_ram_mb': 384, # 512 total - 128 for possible revert of orig
'memory_mb_used': 128, # 128 possible revert amount
'pci_device_pools': [],
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 1,
@ -664,7 +672,8 @@ class TestUpdateAvailableResources(BaseTestCase):
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertEqual(expected_resources, self.rt.compute_node)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@ -703,7 +712,7 @@ class TestUpdateAvailableResources(BaseTestCase):
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': 'fake-ip',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
@ -713,7 +722,7 @@ class TestUpdateAvailableResources(BaseTestCase):
'local_gb': 6,
'free_ram_mb': 256, # 512 total - 256 for possible confirm of new
'memory_mb_used': 256, # 256 possible confirmed amount
'pci_device_pools': [],
'pci_device_pools': objects.PciDevicePoolList(),
'vcpus_used': 0, # See NOTE(jaypipes) above about why this is 0
'hypervisor_type': 'fake',
'local_gb_used': 5,
@ -723,7 +732,8 @@ class TestUpdateAvailableResources(BaseTestCase):
'running_vms': 0
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertEqual(expected_resources, self.rt.compute_node)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@ -766,7 +776,7 @@ class TestUpdateAvailableResources(BaseTestCase):
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'host_ip': 'fake-ip',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
@ -778,7 +788,7 @@ class TestUpdateAvailableResources(BaseTestCase):
# 512 total - 128 existing - 256 new flav - 128 old flav
'free_ram_mb': 0,
'memory_mb_used': 512, # 128 exist + 256 new flav + 128 old flav
'pci_device_pools': [],
'pci_device_pools': objects.PciDevicePoolList(),
# See NOTE(jaypipes) above for reason why this isn't accurate until
# _update() is called.
'vcpus_used': 0,
@ -790,7 +800,8 @@ class TestUpdateAvailableResources(BaseTestCase):
'running_vms': 2
})
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertEqual(expected_resources, self.rt.compute_node)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
class TestInitComputeNode(BaseTestCase):
@ -848,25 +859,19 @@ class TestInitComputeNode(BaseTestCase):
self.assertFalse(create_mock.called)
self.assertFalse(self.rt.disabled)
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
def test_compute_node_created_on_empty(self, get_mock, service_mock):
def test_compute_node_created_on_empty(self, get_mock, service_mock,
create_mock):
self._setup_rt()
def fake_create_node(_ctx, resources):
res = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
res.update(resources)
return res
capi = self.cond_api_mock
create_node_mock = capi.compute_node_create
create_node_mock.side_effect = fake_create_node
service_obj = _SERVICE_FIXTURE
service_mock.return_value = service_obj
get_mock.side_effect = exc.NotFound
resources = {
'host_ip': 'fake-ip',
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
@ -886,22 +891,28 @@ class TestInitComputeNode(BaseTestCase):
'running_vms': 0,
'pci_passthrough_devices': '[]'
}
# We need to do this because _update() actually modifies
# the supplied dictionary :(
expected_resources = copy.deepcopy(resources)
# NOTE(pmurray): This will go away when the ComputeNode object is used
expected_resources['stats'] = '{}'
# NOTE(pmurray): no initial values are calculated before the initial
# creation. vcpus is derived from ERT resources, so this means its
# value will be 0
expected_resources['vcpus'] = 0
# NOTE(jaypipes): This will go away once
# detach-compute-node-from-service blueprint is done
expected_resources['service_id'] = 1
# NOTE(sbauza): ResourceTracker adds host field
expected_resources['host'] = 'fake-host'
# pci_passthrough_devices should is not held in compute nodes
del expected_resources['pci_passthrough_devices']
# The expected compute represents the initial values used
# when creating a compute node.
expected_compute = objects.ComputeNode(
host_ip=resources['host_ip'],
vcpus=resources['vcpus'],
memory_mb=resources['memory_mb'],
local_gb=resources['local_gb'],
cpu_info=resources['cpu_info'],
vcpus_used=resources['vcpus_used'],
memory_mb_used=resources['memory_mb_used'],
local_gb_used=resources['local_gb_used'],
numa_topology=resources['numa_topology'],
hypervisor_type=resources['hypervisor_type'],
hypervisor_version=resources['hypervisor_version'],
hypervisor_hostname=resources['hypervisor_hostname'],
# NOTE(jaypipes): This will go away once
# detach-compute-node-from-service blueprint
# is done
service_id=1,
# NOTE(sbauza): ResourceTracker adds host field
host='fake-host',
)
self.rt._init_compute_node(mock.sentinel.ctx, resources)
@ -909,8 +920,9 @@ class TestInitComputeNode(BaseTestCase):
service_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host')
get_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
create_node_mock.assert_called_once_with(mock.sentinel.ctx,
expected_resources)
create_mock.assert_called_once_with()
self.assertTrue(obj_base.obj_equal_prims(expected_compute,
self.rt.compute_node))
class TestUpdateComputeNode(BaseTestCase):
@ -919,43 +931,34 @@ class TestUpdateComputeNode(BaseTestCase):
def test_existing_compute_node_updated_same_resources(self, service_mock):
self._setup_rt()
capi = self.cond_api_mock
create_node_mock = capi.compute_node_create
# This is the same set of resources as the fixture, deliberately. We
# are checking below to see that update_resource_stats() is not
# needlessly called when the resources don't actually change.
resources = {
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'id': 1,
'host_ip': 'fake-ip',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 6,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 512,
'memory_mb_used': 0,
'pci_device_pools': [],
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 0,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
}
orig_resources = copy.deepcopy(resources)
self.rt.compute_node = copy.deepcopy(orig_resources)
compute = objects.ComputeNode(
host_ip='1.1.1.1',
numa_topology=None,
metrics='[]',
cpu_info='',
hypervisor_hostname='fakehost',
free_disk_gb=6,
hypervisor_version=0,
local_gb=6,
free_ram_mb=512,
memory_mb_used=0,
pci_device_pools=objects.PciDevicePoolList(),
vcpus_used=0,
hypervisor_type='fake',
local_gb_used=0,
memory_mb=512,
current_workload=0,
vcpus=4,
running_vms=0
)
self.rt.compute_node = compute
self.rt._update(mock.sentinel.ctx)
self.assertFalse(self.rt.disabled)
self.assertFalse(service_mock.called)
self.assertFalse(create_node_mock.called)
# The above call to _update() will populate the
# RT.old_resources collection with the resources. Here, we check that
@ -971,42 +974,37 @@ class TestUpdateComputeNode(BaseTestCase):
def test_existing_compute_node_updated_new_resources(self, service_mock):
self._setup_rt()
capi = self.cond_api_mock
create_node_mock = capi.compute_node_create
# Deliberately changing local_gb_used, vcpus_used, and memory_mb_used
# below to be different from the compute node fixture's base usages.
# We want to check that the code paths update the stored compute node
# usage records with what is supplied to _update().
resources = {
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
'id': 1,
'host_ip': 'fake-ip',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': 'fakehost',
'free_disk_gb': 2,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 384,
'memory_mb_used': 128,
'pci_device_pools': [],
'vcpus_used': 2,
'hypervisor_type': 'fake',
'local_gb_used': 4,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
}
expected_resources = copy.deepcopy(resources)
expected_resources['id'] = 1
expected_resources['stats'] = '{}'
compute = objects.ComputeNode(
host='fake-host',
host_ip='1.1.1.1',
numa_topology=None,
metrics='[]',
cpu_info='',
hypervisor_hostname='fakehost',
free_disk_gb=2,
hypervisor_version=0,
local_gb=6,
free_ram_mb=384,
memory_mb_used=128,
pci_device_pools=objects.PciDevicePoolList(),
vcpus_used=2,
hypervisor_type='fake',
local_gb_used=4,
memory_mb=512,
current_workload=0,
vcpus=4,
running_vms=0
)
expected_resources = copy.deepcopy(compute)
expected_resources.stats = {}
expected_resources.vcpus = 4
expected_resources.vcpus_used = 2
self.rt.compute_node = copy.deepcopy(resources)
self.rt.compute_node = compute
self.rt.ext_resources_handler.reset_resources(self.rt.compute_node,
self.rt.driver)
# This emulates the behavior that occurs in the
@ -1017,11 +1015,8 @@ class TestUpdateComputeNode(BaseTestCase):
self.assertFalse(self.rt.disabled)
self.assertFalse(service_mock.called)
self.assertFalse(create_node_mock.called)
urs_mock = self.sched_client_mock.update_resource_stats
urs_mock.assert_called_once_with(mock.sentinel.ctx,
('fake-host', 'fake-node'),
expected_resources)
urs_mock.assert_called_once_with(self.rt.compute_node)
class TestInstanceClaim(BaseTestCase):
@ -1090,13 +1085,14 @@ class TestInstanceClaim(BaseTestCase):
"free_ram_mb": expected['memory_mb'] - self.instance.memory_mb,
'running_vms': 1,
# 'vcpus_used': 0, # vcpus are not claimed
'pci_device_pools': [],
'pci_device_pools': objects.PciDevicePoolList(),
})
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
update_mock.assert_called_once_with(self.elevated)
self.assertEqual(expected, self.rt.compute_node)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance_uuid')
@mock.patch('nova.objects.MigrationList.get_in_progress_by_host_and_node')
@ -1205,11 +1201,11 @@ class TestResizeClaim(BaseTestCase):
def adjust_expected(self, expected, flavor):
disk_used = flavor['root_gb'] + flavor['ephemeral_gb']
expected['free_disk_gb'] -= disk_used
expected['local_gb_used'] += disk_used
expected['free_ram_mb'] -= flavor['memory_mb']
expected['memory_mb_used'] += flavor['memory_mb']
expected['vcpus_used'] += flavor['vcpus']
expected.free_disk_gb -= disk_used
expected.local_gb_used += disk_used
expected.free_ram_mb -= flavor['memory_mb']
expected.memory_mb_used += flavor['memory_mb']
expected.vcpus_used += flavor['vcpus']
@mock.patch('nova.objects.Flavor.get_by_id')
def test_claim(self, flavor_mock, pci_mock, inst_list_mock, inst_by_uuid,
@ -1231,7 +1227,8 @@ class TestResizeClaim(BaseTestCase):
self.ctx, self.instance, self.flavor, None)
self.assertIsInstance(claim, claims.ResizeClaim)
self.assertEqual(expected, self.rt.compute_node)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
def test_same_host(self, pci_mock, inst_list_mock, inst_by_uuid,
migr_mock):
@ -1257,7 +1254,8 @@ class TestResizeClaim(BaseTestCase):
self.assertNotEqual(expected, self.rt.compute_node)
claim.abort()
self.assertEqual(expected, self.rt.compute_node)
self.assertTrue(obj_base.obj_equal_prims(expected,
self.rt.compute_node))
def test_revert_reserve_source(
self, pci_mock, inst_list_mock, inst_by_uuid, migr_mock):
@ -1286,42 +1284,39 @@ class TestResizeClaim(BaseTestCase):
# Register the instance with dst_rt
expected = copy.deepcopy(dst_rt.compute_node)
del expected['stats']
with mock.patch.object(dst_instance, 'save'):
dst_rt.instance_claim(self.ctx, dst_instance)
self.adjust_expected(expected, new_itype)
expected_stats = {'num_task_resize_migrating': 1,
expected.stats = {'num_task_resize_migrating': 1,
'io_workload': 1,
'num_instances': 1,
'num_proj_fake-project': 1,
'num_vm_active': 1,
'num_os_type_fake-os': 1}
expected['current_workload'] = 1
expected['running_vms'] = 1
actual_stats = dst_rt.compute_node.pop('stats')
actual_stats = jsonutils.loads(actual_stats)
self.assertEqual(expected_stats, actual_stats)
self.assertEqual(expected, dst_rt.compute_node)
expected.current_workload = 1
expected.running_vms = 1
self.assertTrue(obj_base.obj_equal_prims(expected,
dst_rt.compute_node))
# Provide the migration via a mock, then audit dst_rt to check that
# the instance + migration resources are not double-counted
self.audit(dst_rt, [dst_instance], [dst_migr], dst_instance)
actual_stats = dst_rt.compute_node.pop('stats')
actual_stats = jsonutils.loads(actual_stats)
self.assertEqual(expected_stats, actual_stats)
self.assertEqual(expected, dst_rt.compute_node)
self.assertTrue(obj_base.obj_equal_prims(expected,
dst_rt.compute_node))
# Audit src_rt with src_migr
expected = copy.deepcopy(src_rt.compute_node)
self.adjust_expected(expected, old_itype)
self.audit(src_rt, [], [src_migr], src_instance)
self.assertEqual(expected, src_rt.compute_node)
self.assertTrue(obj_base.obj_equal_prims(expected,
src_rt.compute_node))
# Flag the instance as reverting and re-audit
src_instance['vm_state'] = vm_states.RESIZED
src_instance['task_state'] = task_states.RESIZE_REVERTING
self.audit(src_rt, [], [src_migr], src_instance)
self.assertEqual(expected, src_rt.compute_node)
self.assertTrue(obj_base.obj_equal_prims(expected,
src_rt.compute_node))
def test_dupe_filter(self, pci_mock, inst_list_mock, inst_by_uuid,
migr_mock):

View File

@ -17,8 +17,8 @@ import mock
import oslo_messaging as messaging
from nova import context
from nova import exception
from nova import objects
from nova.objects import pci_device_pool
from nova.scheduler import client as scheduler_client
from nova.scheduler.client import query as scheduler_query_client
from nova.scheduler.client import report as scheduler_report_client
@ -37,38 +37,18 @@ class SchedulerReportClientTestCase(test.NoDBTestCase):
self.client = scheduler_report_client.SchedulerReportClient()
@mock.patch.object(objects.ComputeNode, '__new__')
def test_update_compute_node_works(self, mock_cn):
stats = {"id": 1, "foo": "bar",
"pci_device_pools": [{"vendor_id": "foo",
"product_id": "foo",
"count": 1,
"a": "b"}]}
self.client.update_resource_stats(self.context,
('fakehost', 'fakenode'),
stats)
mock_cn.assert_called_once_with(objects.ComputeNode,
context=self.context,
id=1)
cn = mock_cn()
cn.obj_reset_changes.assert_called_once_with()
self.assertEqual("b", cn.pci_device_pools[0].tags["a"])
cn.save.assert_called_once_with()
self.assertEqual('bar', cn.foo)
def test_update_compute_node_raises(self):
stats = {"foo": "bar"}
self.assertRaises(exception.ComputeHostNotCreated,
self.client.update_resource_stats,
self.context, ('fakehost', 'fakenode'), stats)
@mock.patch('nova.objects.ComputeNode.save')
def test_update_resource_stats_pci_device_pools_none(self, mock_save):
stats = {"id": 1, "foo": "bar",
"pci_device_pools": None}
self.client.update_resource_stats(self.context,
('fakehost', 'fakenode'),
stats)
@mock.patch.object(objects.ComputeNode, 'save')
def test_update_resource_stats_saves(self, mock_save):
cn = objects.ComputeNode()
cn.host = 'fakehost'
cn.hypervisor_hostname = 'fakenode'
cn.pci_device_pools = pci_device_pool.from_pci_stats(
[{"vendor_id": "foo",
"product_id": "foo",
"count": 1,
"a": "b"}])
self.client.update_resource_stats(cn)
mock_save.assert_called_once_with()
class SchedulerQueryClientTestCase(test.NoDBTestCase):
@ -178,8 +158,7 @@ class SchedulerClientTestCase(test.NoDBTestCase):
def test_update_resource_stats(self, mock_update_resource_stats):
self.assertIsNone(self.client.reportclient.instance)
self.client.update_resource_stats('ctxt', 'fake_name', 'fake_stats')
self.client.update_resource_stats(mock.sentinel.cn)
self.assertIsNotNone(self.client.reportclient.instance)
mock_update_resource_stats.assert_called_once_with(
'ctxt', 'fake_name', 'fake_stats')
mock_update_resource_stats.assert_called_once_with(mock.sentinel.cn)