Remove DictCompat from ComputeNode

This change removes NovaObjectDictCompat from the ComputeNode object.
The only changes needed were changing dict syntax over to either use
dot syntax or getattr/setattr. In cases where the dict function update()
was used, the update dictionary was unchanged, but was not passed into
a new update helper function that applies the updates using setattr() on
the given kwargs.

Change-Id: Iba5303d23492db55f46df8b8f6b3dbae5b4971c1
Partially-Implements: bp rm-object-dict-compat-newton
This commit is contained in:
Ryan Rossiter 2016-01-08 23:08:21 +00:00
parent b5e65b4356
commit 8806ac9e41
24 changed files with 158 additions and 132 deletions

View File

@ -206,17 +206,17 @@ class HostController(wsgi.Controller):
def _get_total_resources(host_name, compute_node):
return {'resource': {'host': host_name,
'project': '(total)',
'cpu': compute_node['vcpus'],
'memory_mb': compute_node['memory_mb'],
'disk_gb': compute_node['local_gb']}}
'cpu': compute_node.vcpus,
'memory_mb': compute_node.memory_mb,
'disk_gb': compute_node.local_gb}}
@staticmethod
def _get_used_now_resources(host_name, compute_node):
return {'resource': {'host': host_name,
'project': '(used_now)',
'cpu': compute_node['vcpus_used'],
'memory_mb': compute_node['memory_mb_used'],
'disk_gb': compute_node['local_gb_used']}}
'cpu': compute_node.vcpus_used,
'memory_mb': compute_node.memory_mb_used,
'disk_gb': compute_node.local_gb_used}}
@staticmethod
def _get_resource_totals_from_instances(host_name, instances):

View File

@ -56,7 +56,7 @@ class HypervisorsController(wsgi.Controller):
'free_ram_mb', 'free_disk_gb', 'current_workload',
'running_vms', 'cpu_info', 'disk_available_least',
'host_ip'):
hyp_dict[field] = hypervisor[field]
hyp_dict[field] = getattr(hypervisor, field)
hyp_dict['service'] = {
'id': service.id,

View File

@ -234,17 +234,17 @@ class HostController(object):
def _get_total_resources(host_name, compute_node):
return {'resource': {'host': host_name,
'project': '(total)',
'cpu': compute_node['vcpus'],
'memory_mb': compute_node['memory_mb'],
'disk_gb': compute_node['local_gb']}}
'cpu': compute_node.vcpus,
'memory_mb': compute_node.memory_mb,
'disk_gb': compute_node.local_gb}}
@staticmethod
def _get_used_now_resources(host_name, compute_node):
return {'resource': {'host': host_name,
'project': '(used_now)',
'cpu': compute_node['vcpus_used'],
'memory_mb': compute_node['memory_mb_used'],
'disk_gb': compute_node['local_gb_used']}}
'cpu': compute_node.vcpus_used,
'memory_mb': compute_node.memory_mb_used,
'disk_gb': compute_node.local_gb_used}}
@staticmethod
def _get_resource_totals_from_instances(host_name, instances):

View File

@ -61,7 +61,7 @@ class HypervisorsController(object):
if ext_loaded:
fields += ('host_ip',)
for field in fields:
hyp_dict[field] = hypervisor[field]
hyp_dict[field] = getattr(hypervisor, field)
hyp_dict['service'] = {
'id': service.id,

View File

@ -104,7 +104,7 @@ class PciController(wsgi.Controller):
results = []
for node in compute_nodes:
pci_devs = objects.PciDeviceList.get_by_compute_node(
context, node['id'])
context, node.id)
results.extend([self._view_pcidevice(dev, detail)
for dev in pci_devs])
return results

View File

@ -275,11 +275,11 @@ class CellStateManager(base.Base):
continue
chost = compute_hosts[host]
chost['free_ram_mb'] += compute['free_ram_mb']
free_disk = compute['free_disk_gb'] * 1024
chost['free_ram_mb'] += compute.free_ram_mb
free_disk = compute.free_disk_gb * 1024
chost['free_disk_mb'] += free_disk
chost['total_ram_mb'] += compute['memory_mb']
total_disk = compute['local_gb'] * 1024
chost['total_ram_mb'] += compute.memory_mb
total_disk = compute.local_gb * 1024
chost['total_disk_mb'] += total_disk
_get_compute_hosts()

View File

@ -161,8 +161,8 @@ class Claim(NopClaim):
def _test_memory(self, resources, limit):
type_ = _("memory")
unit = "MB"
total = resources['memory_mb']
used = resources['memory_mb_used']
total = resources.memory_mb
used = resources.memory_mb_used
requested = self.memory_mb
return self._test(type_, unit, total, used, requested, limit)
@ -170,8 +170,8 @@ class Claim(NopClaim):
def _test_disk(self, resources, limit):
type_ = _("disk")
unit = "GB"
total = resources['local_gb']
used = resources['local_gb_used']
total = resources.local_gb
used = resources.local_gb_used
requested = self.disk_gb
return self._test(type_, unit, total, used, requested, limit)
@ -179,8 +179,8 @@ class Claim(NopClaim):
def _test_vcpus(self, resources, limit):
type_ = _("vcpu")
unit = "VCPU"
total = resources['vcpus']
used = resources['vcpus_used']
total = resources.vcpus
used = resources.vcpus_used
requested = self.vcpus
return self._test(type_, unit, total, used, requested, limit)
@ -199,7 +199,8 @@ class Claim(NopClaim):
self.instance, limits)
def _test_numa_topology(self, resources, limit):
host_topology = resources.get('numa_topology')
host_topology = (resources.numa_topology
if 'numa_topology' in resources else None)
requested_topology = self.numa_topology
if host_topology:
host_topology = objects.NUMATopology.obj_from_db_obj(

View File

@ -548,7 +548,7 @@ class ResourceTracker(object):
if 'pci_passthrough_devices' in resources:
# TODO(jaypipes): Move this into _init_compute_node()
if not self.pci_tracker:
n_id = self.compute_node['id'] if self.compute_node else None
n_id = self.compute_node.id if self.compute_node else None
self.pci_tracker = pci_manager.PciDevTracker(context,
node_id=n_id)
dev_json = resources.pop('pci_passthrough_devices')

View File

@ -141,13 +141,13 @@ class LiveMigrationTask(base.TaskBase):
source_info = self._get_compute_info(self.source)
destination_info = self._get_compute_info(destination)
source_type = source_info['hypervisor_type']
destination_type = destination_info['hypervisor_type']
source_type = source_info.hypervisor_type
destination_type = destination_info.hypervisor_type
if source_type != destination_type:
raise exception.InvalidHypervisorType()
source_version = source_info['hypervisor_version']
destination_version = destination_info['hypervisor_version']
source_version = source_info.hypervisor_version
destination_version = destination_info.hypervisor_version
if source_version > destination_version:
raise exception.DestinationHypervisorTooOld()

View File

@ -32,10 +32,8 @@ CONF.import_opt('disk_allocation_ratio', 'nova.compute.resource_tracker')
LOG = logging.getLogger(__name__)
# TODO(berrange): Remove NovaObjectDictCompat
@base.NovaObjectRegistry.register
class ComputeNode(base.NovaPersistentObject, base.NovaObject,
base.NovaObjectDictCompat):
class ComputeNode(base.NovaPersistentObject, base.NovaObject):
# Version 1.0: Initial version
# Version 1.1: Added get_by_service_id()
# Version 1.2: String attributes updated to support unicode
@ -148,19 +146,19 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject,
service = objects.Service.get_by_id(
compute._context, db_compute['service_id'])
except exception.ServiceNotFound:
compute['host'] = None
compute.host = None
return
try:
compute['host'] = service.host
compute.host = service.host
except (AttributeError, exception.OrphanedObjectError):
# Host can be nullable in Service
compute['host'] = None
compute.host = None
elif 'host' in db_compute and db_compute['host'] is not None:
# New-style DB having host as a field
compute['host'] = db_compute['host']
compute.host = db_compute['host']
else:
# We assume it should not happen but in case, let's set it to None
compute['host'] = None
compute.host = None
@staticmethod
def _from_db_object(context, compute, db_compute):
@ -205,18 +203,18 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject,
if value == 0.0 and key == 'disk_allocation_ratio':
# It's not specified either on the controller
value = 1.0
compute[key] = value
setattr(compute, key, value)
stats = db_compute['stats']
if stats:
compute['stats'] = jsonutils.loads(stats)
compute.stats = jsonutils.loads(stats)
sup_insts = db_compute.get('supported_instances')
if sup_insts:
hv_specs = jsonutils.loads(sup_insts)
hv_specs = [objects.HVSpec.from_list(hv_spec)
for hv_spec in hv_specs]
compute['supported_hv_specs'] = hv_specs
compute.supported_hv_specs = hv_specs
pci_stats = db_compute.get('pci_stats')
if pci_stats is not None:
@ -354,7 +352,7 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject,
"disk_available_least", "host_ip"]
for key in keys:
if key in resources:
self[key] = resources[key]
setattr(self, key, resources[key])
# supported_instances has a different name in compute_node
if 'supported_instances' in resources:

View File

@ -81,7 +81,9 @@ class IronicHostManager(host_manager.HostManager):
def host_state_cls(self, host, node, **kwargs):
"""Factory function/property to create a new HostState."""
compute = kwargs.get('compute')
if compute and compute.get('hypervisor_type') == hv_type.IRONIC:
get_ht = lambda c: (c.hypervisor_type if 'hypervisor_type' in c
else None)
if compute and get_ht(compute) == hv_type.IRONIC:
return IronicNodeState(host, node)
else:
return host_manager.HostState(host, node)

View File

@ -155,7 +155,7 @@ class HypervisorsCellsSampleJsonTests(api_sample_base.ApiSampleTestBaseV21):
'nova.compute.cells_api.HostAPI.get_host_uptime',
fake_get_host_uptime)
hypervisor_id = fake_hypervisor['id']
hypervisor_id = fake_hypervisor.id
response = self._do_get('os-hypervisors/%s/uptime' % hypervisor_id)
subs = {'hypervisor_id': str(hypervisor_id)}
self._verify_response('hypervisors-uptime-resp', subs, response, 200)

View File

@ -146,7 +146,7 @@ def fake_compute_node_statistics(context):
if key == 'count':
result[key] += 1
else:
result[key] += hyper[key]
result[key] += getattr(hyper, key)
return result

View File

@ -884,10 +884,7 @@ class CellsTargetedMethodsTestCase(test.NoDBTestCase):
'fake_host', binary, params_to_update)
result = response.value_or_raise()
self.assertIsInstance(result, objects.Service)
# NOTE(sbauza): As NovaObjects can't be comparated directly, we need to
# check the fields by primitiving them first
self.assertEqual(jsonutils.to_primitive(fake_service),
jsonutils.to_primitive(result))
self.assertTrue(objects_base.obj_equal_prims(fake_service, result))
def test_proxy_rpc_to_manager_call(self):
fake_topic = 'fake-topic'

View File

@ -143,7 +143,7 @@ class ClaimTestCase(test.NoDBTestCase):
}
if values:
resources.update(values)
return resources
return objects.ComputeNode(**resources)
def test_memory_unlimited(self):
self._claim(memory_mb=99999999)

View File

@ -1445,7 +1445,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.build_and_run_instance(self.context, instance, {}, {},
filter_properties,
block_device_mapping=[])
self.assertEqual(999999999999, self.rt.compute_node['memory_mb_used'])
self.assertEqual(999999999999, self.rt.compute_node.memory_mb_used)
def test_create_instance_unlimited_disk(self):
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
@ -1465,22 +1465,22 @@ class ComputeTestCase(BaseTestCase):
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[], limits=limits)
self.assertEqual(1024, self.rt.compute_node['memory_mb_used'])
self.assertEqual(256, self.rt.compute_node['local_gb_used'])
self.assertEqual(1024, self.rt.compute_node.memory_mb_used)
self.assertEqual(256, self.rt.compute_node.local_gb_used)
params = {"memory_mb": 2048, "root_gb": 256, "ephemeral_gb": 256}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance, {}, {},
{}, block_device_mapping=[], limits=limits)
self.assertEqual(3072, self.rt.compute_node['memory_mb_used'])
self.assertEqual(768, self.rt.compute_node['local_gb_used'])
self.assertEqual(3072, self.rt.compute_node.memory_mb_used)
self.assertEqual(768, self.rt.compute_node.local_gb_used)
params = {"memory_mb": 8192, "root_gb": 8192, "ephemeral_gb": 8192}
instance = self._create_fake_instance_obj(params)
self.compute.build_and_run_instance(self.context, instance,
{}, {}, {}, block_device_mapping=[], limits=limits)
self.assertEqual(3072, self.rt.compute_node['memory_mb_used'])
self.assertEqual(768, self.rt.compute_node['local_gb_used'])
self.assertEqual(3072, self.rt.compute_node.memory_mb_used)
self.assertEqual(768, self.rt.compute_node.local_gb_used)
def test_create_multiple_instance_with_neutron_port(self):
instance_type = flavors.get_default_flavor()
@ -1522,7 +1522,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.build_and_run_instance(self.context, instance, {}, {},
filter_properties, block_device_mapping=[])
self.assertEqual(instance_mb, self.rt.compute_node['memory_mb_used'])
self.assertEqual(instance_mb, self.rt.compute_node.memory_mb_used)
def test_create_instance_with_oversubscribed_ram_fail(self):
"""Test passing of oversubscribed ram policy from the scheduler, but
@ -1569,7 +1569,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.build_and_run_instance(self.context, instance, {}, {},
filter_properties, block_device_mapping=[])
self.assertEqual(2, self.rt.compute_node['vcpus_used'])
self.assertEqual(2, self.rt.compute_node.vcpus_used)
# create one more instance:
params = {"memory_mb": 10, "root_gb": 1,
@ -1578,14 +1578,14 @@ class ComputeTestCase(BaseTestCase):
self.compute.build_and_run_instance(self.context, instance, {}, {},
filter_properties, block_device_mapping=[])
self.assertEqual(3, self.rt.compute_node['vcpus_used'])
self.assertEqual(3, self.rt.compute_node.vcpus_used)
# delete the instance:
instance['vm_state'] = vm_states.DELETED
self.rt.update_usage(self.context,
instance=instance)
self.assertEqual(2, self.rt.compute_node['vcpus_used'])
self.assertEqual(2, self.rt.compute_node.vcpus_used)
# now oversubscribe vcpus and fail:
params = {"memory_mb": 10, "root_gb": 1,
@ -1620,7 +1620,7 @@ class ComputeTestCase(BaseTestCase):
self.compute.build_and_run_instance(self.context, instance, {}, {},
filter_properties, block_device_mapping=[])
self.assertEqual(instance_gb, self.rt.compute_node['local_gb_used'])
self.assertEqual(instance_gb, self.rt.compute_node.local_gb_used)
def test_create_instance_with_oversubscribed_disk_fail(self):
"""Test passing of oversubscribed disk policy from the scheduler, but

View File

@ -168,6 +168,6 @@ class MultiNodeComputeTestCase(BaseTestCase):
# Verify B gets deleted since now only A is reported by driver
self.assertEqual(len(fake_compute_nodes), 1)
self.assertEqual(fake_compute_nodes[0]['hypervisor_hostname'], 'A')
self.assertEqual(fake_compute_nodes[0].hypervisor_hostname, 'A')
self.assertEqual(sorted(self.compute._resource_tracker_dict.keys()),
['A'])

View File

@ -686,7 +686,7 @@ class BaseTrackerTestCase(BaseTestCase):
if field not in tracker.compute_node:
raise test.TestingException(
"'%(field)s' not in compute node." % {'field': field})
x = tracker.compute_node[field]
x = getattr(tracker.compute_node, field)
if field == 'numa_topology':
self.assertEqualNUMAHostTopology(
@ -822,8 +822,8 @@ class TrackerExtraResourcesTestCase(BaseTrackerTestCase):
def test_set_extra_resources(self):
def fake_write_resources(resources):
resources['stats']['resA'] = '123'
resources['stats']['resB'] = 12
resources.stats['resA'] = '123'
resources.stats['resB'] = 12
self.stubs.Set(self.tracker.ext_resources_handler,
'write_resources',
@ -1041,9 +1041,9 @@ class InstanceClaimTestCase(BaseTrackerTestCase):
'migration_context'],
mock_instance_list.call_args_list[0][1]['expected_attrs'])
self.assertEqual(FAKE_VIRT_MEMORY_MB + FAKE_VIRT_MEMORY_OVERHEAD,
self.tracker.compute_node['memory_mb_used'])
self.tracker.compute_node.memory_mb_used)
self.assertEqual(ROOT_GB + EPHEMERAL_GB,
self.tracker.compute_node['local_gb_used'])
self.tracker.compute_node.local_gb_used)
mock_migration_list.assert_called_once_with(self.context,
"fakehost",
"fakenode")
@ -1294,7 +1294,7 @@ class TrackerPeriodicTestCase(BaseTrackerTestCase):
@mock.patch.object(self.tracker, '_verify_resources')
@mock.patch.object(self.tracker, '_report_hypervisor_resource_view')
def _test(mock_rhrv, mock_vr, mock_uar, mock_driver):
resources = {'there is someone in my head': 'but it\'s not me'}
resources = self._create_compute_node()
mock_driver.get_available_resource.return_value = resources
self.tracker.update_available_resource(self.context)
mock_uar.assert_called_once_with(self.context, resources)

View File

@ -39,7 +39,7 @@ def _fake_resources():
'vcpus': 2,
'vcpus_used': 0
}
return resources
return objects.ComputeNode(**resources)
class ShelveComputeManagerTestCase(test_compute.BaseTestCase):

View File

@ -463,7 +463,7 @@ class TestUpdateAvailableResources(BaseTestCase):
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
vals = {
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
@ -485,7 +485,8 @@ class TestUpdateAvailableResources(BaseTestCase):
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
})
}
_update_compute_node(expected_resources, **vals)
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@ -508,7 +509,7 @@ class TestUpdateAvailableResources(BaseTestCase):
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
vals = {
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
@ -530,7 +531,8 @@ class TestUpdateAvailableResources(BaseTestCase):
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
})
}
_update_compute_node(expected_resources, **vals)
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@ -560,7 +562,7 @@ class TestUpdateAvailableResources(BaseTestCase):
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
vals = {
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
@ -582,7 +584,8 @@ class TestUpdateAvailableResources(BaseTestCase):
'current_workload': 0,
'vcpus': 4,
'running_vms': 1 # One active instance
})
}
_update_compute_node(expected_resources, **vals)
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@ -630,7 +633,7 @@ class TestUpdateAvailableResources(BaseTestCase):
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
vals = {
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
@ -654,7 +657,8 @@ class TestUpdateAvailableResources(BaseTestCase):
# Yep, for some reason, orphaned instances are not counted
# as running VMs...
'running_vms': 0
})
}
_update_compute_node(expected_resources, **vals)
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@ -702,7 +706,7 @@ class TestUpdateAvailableResources(BaseTestCase):
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
vals = {
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
@ -724,7 +728,8 @@ class TestUpdateAvailableResources(BaseTestCase):
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
})
}
_update_compute_node(expected_resources, **vals)
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@ -769,7 +774,7 @@ class TestUpdateAvailableResources(BaseTestCase):
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
vals = {
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
@ -791,7 +796,8 @@ class TestUpdateAvailableResources(BaseTestCase):
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
})
}
_update_compute_node(expected_resources, **vals)
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@ -833,7 +839,7 @@ class TestUpdateAvailableResources(BaseTestCase):
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
vals = {
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
@ -855,7 +861,8 @@ class TestUpdateAvailableResources(BaseTestCase):
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
})
}
_update_compute_node(expected_resources, **vals)
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@ -906,7 +913,7 @@ class TestUpdateAvailableResources(BaseTestCase):
get_cn_mock.assert_called_once_with(mock.sentinel.ctx, 'fake-host',
'fake-node')
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_resources.update({
vals = {
# host is added in update_available_resources()
# before calling _update()
'host': 'fake-host',
@ -930,7 +937,8 @@ class TestUpdateAvailableResources(BaseTestCase):
'current_workload': 1, # One migrating instance...
'vcpus': 4,
'running_vms': 2
})
}
_update_compute_node(expected_resources, **vals)
update_mock.assert_called_once_with(mock.sentinel.ctx)
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
self.rt.compute_node))
@ -1211,15 +1219,16 @@ class TestInstanceClaim(BaseTestCase):
self.rt.compute_node))
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
expected.update({
vals = {
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected['local_gb'] - disk_used,
"free_ram_mb": expected['memory_mb'] - self.instance.memory_mb,
'free_disk_gb': expected.local_gb - disk_used,
"free_ram_mb": expected.memory_mb - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': objects.PciDevicePoolList(),
})
}
_update_compute_node(expected, **vals)
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
@ -1236,15 +1245,16 @@ class TestInstanceClaim(BaseTestCase):
expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
expected.update({
vals = {
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected['local_gb'] - disk_used,
"free_ram_mb": expected['memory_mb'] - self.instance.memory_mb,
'free_disk_gb': expected.local_gb - disk_used,
"free_ram_mb": expected.memory_mb - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': objects.PciDevicePoolList(),
})
}
_update_compute_node(expected, **vals)
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
@ -1253,7 +1263,7 @@ class TestInstanceClaim(BaseTestCase):
self.rt.compute_node))
expected_updated = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected_updated['pci_device_pools'] = objects.PciDevicePoolList()
expected_updated.pci_device_pools = objects.PciDevicePoolList()
self.instance.vm_state = vm_states.SHELVED_OFFLOADED
with mock.patch.object(self.rt, '_update') as update_mock:
@ -1270,15 +1280,16 @@ class TestInstanceClaim(BaseTestCase):
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected.update({
vals = {
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected['local_gb'] - disk_used,
"free_ram_mb": expected['memory_mb'] - self.instance.memory_mb,
'free_disk_gb': expected.local_gb - disk_used,
"free_ram_mb": expected.memory_mb - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': objects.PciDevicePoolList(),
})
}
_update_compute_node(expected, **vals)
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
@ -1311,15 +1322,16 @@ class TestInstanceClaim(BaseTestCase):
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
expected.update({
vals = {
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected['local_gb'] - disk_used,
"free_ram_mb": expected['memory_mb'] - self.instance.memory_mb,
'free_disk_gb': expected.local_gb - disk_used,
"free_ram_mb": expected.memory_mb - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': pci_pools
})
}
_update_compute_node(expected, **vals)
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, None)
@ -1411,9 +1423,9 @@ class TestInstanceClaim(BaseTestCase):
pci_mock.return_value = objects.InstancePCIRequests(requests=[])
good_limits = {
'memory_mb': _COMPUTE_NODE_FIXTURES[0]['memory_mb'],
'disk_gb': _COMPUTE_NODE_FIXTURES[0]['local_gb'],
'vcpu': _COMPUTE_NODE_FIXTURES[0]['vcpus'],
'memory_mb': _COMPUTE_NODE_FIXTURES[0].memory_mb,
'disk_gb': _COMPUTE_NODE_FIXTURES[0].local_gb,
'vcpu': _COMPUTE_NODE_FIXTURES[0].vcpus,
}
for key in good_limits.keys():
bad_limits = copy.deepcopy(good_limits)
@ -1432,7 +1444,7 @@ class TestInstanceClaim(BaseTestCase):
self.instance.numa_topology = _INSTANCE_NUMA_TOPOLOGIES['2mb']
host_topology = _NUMA_HOST_TOPOLOGIES['2mb']
self.rt.compute_node['numa_topology'] = host_topology._to_json()
self.rt.compute_node.numa_topology = host_topology._to_json()
limits = {'numa_topology': _NUMA_LIMIT_TOPOLOGIES['2mb']}
expected_numa = copy.deepcopy(host_topology)
@ -1444,7 +1456,7 @@ class TestInstanceClaim(BaseTestCase):
self.rt.instance_claim(self.ctx, self.instance, limits)
update_mock.assert_called_once_with(self.ctx.elevated())
updated_compute_node = self.rt.compute_node
new_numa = updated_compute_node['numa_topology']
new_numa = updated_compute_node.numa_topology
new_numa = objects.NUMATopology.obj_from_db_obj(new_numa)
self.assertEqualNUMAHostTopology(expected_numa, new_numa)
@ -1747,3 +1759,8 @@ class TestInstanceInResizeState(test.NoDBTestCase):
instance = objects.Instance(vm_state=vm_states.RESIZED,
task_state=task_states.RESIZE_FINISH)
self.assertTrue(resource_tracker._instance_in_resize_state(instance))
def _update_compute_node(node, **kwargs):
for key, value in kwargs.items():
setattr(node, key, value)

View File

@ -219,12 +219,13 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
self.task._check_host_is_up(self.destination)
self.task._check_destination_has_enough_memory()
self.task._get_compute_info(self.instance_host).AndReturn({
"hypervisor_type": "b"
})
self.task._get_compute_info(self.destination).AndReturn({
"hypervisor_type": "a"
})
self.task._get_compute_info(self.instance_host).AndReturn(
objects.ComputeNode(hypervisor_type='b')
)
self.task._get_compute_info(self.destination).AndReturn(
objects.ComputeNode(hypervisor_type='a')
)
self.mox.ReplayAll()
self.assertRaises(exception.InvalidHypervisorType,
@ -238,14 +239,14 @@ class LiveMigrationTaskTestCase(test.NoDBTestCase):
self.task._check_host_is_up(self.destination)
self.task._check_destination_has_enough_memory()
self.task._get_compute_info(self.instance_host).AndReturn({
"hypervisor_type": "a",
"hypervisor_version": 7
})
self.task._get_compute_info(self.destination).AndReturn({
"hypervisor_type": "a",
"hypervisor_version": 6
})
host1 = {'hypervisor_type': 'a', 'hypervisor_version': 7}
self.task._get_compute_info(self.instance_host).AndReturn(
objects.ComputeNode(**host1)
)
host2 = {'hypervisor_type': 'a', 'hypervisor_version': 6}
self.task._get_compute_info(self.destination).AndReturn(
objects.ComputeNode(**host2)
)
self.mox.ReplayAll()
self.assertRaises(exception.DestinationHypervisorTooOld,

View File

@ -470,8 +470,8 @@ class HostManagerTestCase(test.NoDBTestCase):
# Check that .service is set properly
for i in range(4):
compute_node = fakes.COMPUTE_NODES[i]
host = compute_node['host']
node = compute_node['hypervisor_hostname']
host = compute_node.host
node = compute_node.hypervisor_hostname
state_key = (host, node)
self.assertEqual(host_states_map[state_key].service,
obj_base.obj_to_primitive(fakes.get_service_by_host(host)))
@ -792,8 +792,10 @@ class HostManagerChangedNodesTestCase(test.NoDBTestCase):
def test_get_all_host_states_after_delete_one(self, mock_get_by_host,
mock_get_all,
mock_get_by_binary):
getter = (lambda n: n.hypervisor_hostname
if 'hypervisor_hostname' in n else None)
running_nodes = [n for n in fakes.COMPUTE_NODES
if n.get('hypervisor_hostname') != 'node4']
if getter(n) != 'node4']
mock_get_by_host.return_value = objects.InstanceList()
mock_get_all.side_effect = [fakes.COMPUTE_NODES, running_nodes]

View File

@ -122,7 +122,7 @@ class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
@mock.patch.object(ironic_host_manager.IronicNodeState, '__init__')
def test_create_ironic_node_state(self, init_mock):
init_mock.return_value = None
compute = {'hypervisor_type': 'ironic'}
compute = objects.ComputeNode(**{'hypervisor_type': 'ironic'})
host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
compute=compute)
self.assertIs(ironic_host_manager.IronicNodeState, type(host_state))
@ -130,17 +130,25 @@ class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
@mock.patch.object(host_manager.HostState, '__init__')
def test_create_non_ironic_host_state(self, init_mock):
init_mock.return_value = None
compute = {'cpu_info': 'other cpu'}
compute = objects.ComputeNode(**{'cpu_info': 'other cpu'})
host_state = self.host_manager.host_state_cls('fake-host', 'fake-node',
compute=compute)
self.assertIs(host_manager.HostState, type(host_state))
@mock.patch.object(host_manager.HostState, '__init__')
def test_create_host_state_null_compute(self, init_mock):
init_mock.return_value = None
host_state = self.host_manager.host_state_cls('fake-host', 'fake-node')
self.assertIs(host_manager.HostState, type(host_state))
@mock.patch('nova.objects.ServiceList.get_by_binary')
@mock.patch('nova.objects.ComputeNodeList.get_all')
def test_get_all_host_states_after_delete_one(self, mock_get_all,
mock_get_by_binary):
getter = (lambda n: n.hypervisor_hostname
if 'hypervisor_hostname' in n else None)
running_nodes = [n for n in ironic_fakes.COMPUTE_NODES
if n.get('hypervisor_hostname') != 'node4uuid']
if getter(n) != 'node4uuid']
mock_get_all.side_effect = [
ironic_fakes.COMPUTE_NODES, running_nodes]
@ -187,7 +195,7 @@ class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
self.assertEqual(10240, host.free_disk_mb)
self.assertEqual(1, host.vcpus_total)
self.assertEqual(0, host.vcpus_used)
self.assertEqual(self.compute_node['stats'], host.stats)
self.assertEqual(self.compute_node.stats, host.stats)
self.assertEqual('ironic', host.hypervisor_type)
self.assertEqual(1, host.hypervisor_version)
self.assertEqual('fake_host', host.hypervisor_hostname)

View File

@ -4593,7 +4593,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
label='fake',
status=fields.PciDeviceStatus.ALLOCATED,
address='0000:00:00.1',
compute_id=compute_ref['id'],
compute_id=compute_ref.id,
instance_uuid=instance.uuid,
request_id=None,
extra_info={})
@ -4636,7 +4636,7 @@ class LibvirtConnTestCase(test.NoDBTestCase):
label='fake',
status=fields.PciDeviceStatus.ALLOCATED,
address='0000:00:00.2',
compute_id=compute_ref['id'],
compute_id=compute_ref.id,
instance_uuid=instance.uuid,
request_id=None,
extra_info={})