Change how HostManager is calling the service information

As we want to separate the compute node from its service information, we then
change how we can get the service by calling Service.get_by_topic instead of
using the service field from the ComputeNode object.

Partially-Implements: blueprint detach-service-from-computenode

Change-Id: Ie81760fdb5b0c892de0f9ca6355c39c7bd67193f
This commit is contained in:
Sylvain Bauza 2015-02-12 14:46:56 +01:00
parent e8bbf9e903
commit 5f6306985f
6 changed files with 79 additions and 19 deletions

View File

@ -416,11 +416,15 @@ class HostManager(object):
in HostState are pre-populated and adjusted based on data in the db.
"""
service_refs = {service.host: service
for service in objects.ServiceList.get_by_topic(
context, CONF.compute_topic)}
# Get resource usage across the available compute nodes:
compute_nodes = objects.ComputeNodeList.get_all(context)
seen_nodes = set()
for compute in compute_nodes:
service = compute.service
service = service_refs.get(compute.host)
if not service:
LOG.warning(_LW(
"No service record found for host %(host)s "

View File

@ -34,7 +34,6 @@ COMPUTE_NODES = [
id=1, local_gb=1024, memory_mb=1024, vcpus=1,
disk_available_least=None, free_ram_mb=512, vcpus_used=1,
free_disk_gb=512, local_gb_used=0, updated_at=None,
_cached_service=objects.Service(host='host1', disabled=False),
host='host1', hypervisor_hostname='node1', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
hypervisor_type='foo', supported_hv_specs=[],
@ -43,7 +42,6 @@ COMPUTE_NODES = [
id=2, local_gb=2048, memory_mb=2048, vcpus=2,
disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
free_disk_gb=1024, local_gb_used=0, updated_at=None,
_cached_service=objects.Service(host='host2', disabled=True),
host='host2', hypervisor_hostname='node2', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
hypervisor_type='foo', supported_hv_specs=[],
@ -52,7 +50,6 @@ COMPUTE_NODES = [
id=3, local_gb=4096, memory_mb=4096, vcpus=4,
disk_available_least=3333, free_ram_mb=3072, vcpus_used=1,
free_disk_gb=3072, local_gb_used=0, updated_at=None,
_cached_service=objects.Service(host='host3', disabled=False),
host='host3', hypervisor_hostname='node3', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=NUMA_TOPOLOGY._to_json(),
hypervisor_type='foo', supported_hv_specs=[],
@ -61,17 +58,28 @@ COMPUTE_NODES = [
id=4, local_gb=8192, memory_mb=8192, vcpus=8,
disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
free_disk_gb=8888, local_gb_used=0, updated_at=None,
_cached_service=objects.Service(host='host4', disabled=False),
host='host4', hypervisor_hostname='node4', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
hypervisor_type='foo', supported_hv_specs=[],
pci_device_pools=None, cpu_info=None, stats=None, metrics=None),
# Broken entry
objects.ComputeNode(
id=5, local_gb=1024, memory_mb=1024, vcpus=1, _cached_service=None,
id=5, local_gb=1024, memory_mb=1024, vcpus=1,
host='fake', hypervisor_hostname='fake-hyp'),
]
SERVICES = [
objects.Service(host='host1', disabled=False),
objects.Service(host='host2', disabled=True),
objects.Service(host='host3', disabled=False),
objects.Service(host='host4', disabled=False),
]
def get_service_by_host(host):
services = [service for service in SERVICES if service.host == host]
return services[0]
class FakeHostState(host_manager.HostState):
def __init__(self, host, node, attribute_dict):

View File

@ -24,7 +24,6 @@ COMPUTE_NODES = [
id=1, local_gb=10, memory_mb=1024, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
updated_at=None, cpu_info='baremetal cpu',
_cached_service=objects.Service(host='host1', disabled=False),
host='host1',
hypervisor_hostname='node1uuid', host_ip='127.0.0.1',
hypervisor_version=1, hypervisor_type='ironic',
@ -38,7 +37,6 @@ COMPUTE_NODES = [
id=2, local_gb=20, memory_mb=2048, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
updated_at=None, cpu_info='baremetal cpu',
_cached_service=objects.Service(host='host2', disabled=True),
host='host2',
hypervisor_hostname='node2uuid', host_ip='127.0.0.1',
hypervisor_version=1, hypervisor_type='ironic',
@ -52,7 +50,6 @@ COMPUTE_NODES = [
id=3, local_gb=30, memory_mb=3072, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
updated_at=None, cpu_info='baremetal cpu',
_cached_service=objects.Service(host='host3', disabled=False),
host='host3',
hypervisor_hostname='node3uuid', host_ip='127.0.0.1',
hypervisor_version=1, hypervisor_type='ironic',
@ -66,7 +63,6 @@ COMPUTE_NODES = [
id=4, local_gb=40, memory_mb=4096, vcpus=1,
vcpus_used=0, local_gb_used=0, memory_mb_used=0,
updated_at=None, cpu_info='baremetal cpu',
_cached_service=objects.Service(host='host4', disabled=False),
host='host4',
hypervisor_hostname='node4uuid', host_ip='127.0.0.1',
hypervisor_version=1, hypervisor_type='ironic',
@ -80,7 +76,6 @@ COMPUTE_NODES = [
objects.ComputeNode(
id=5, local_gb=50, memory_mb=5120, vcpus=1,
host='fake', cpu_info='baremetal cpu',
_cached_service=None,
stats=dict(ironic_driver=
"nova.virt.ironic.driver.IronicDriver",
cpu_arch='i386'),
@ -89,3 +84,15 @@ COMPUTE_NODES = [
free_disk_gb=50, free_ram_mb=5120,
hypervisor_hostname='fake-hyp'),
]
SERVICES = [
objects.Service(host='host1', disabled=False),
objects.Service(host='host2', disabled=True),
objects.Service(host='host3', disabled=False),
objects.Service(host='host4', disabled=False),
]
def get_service_by_host(host):
services = [service for service in SERVICES if service.host == host]
return services[0]

View File

@ -36,12 +36,15 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
driver_cls = filter_scheduler.FilterScheduler
@mock.patch('nova.objects.ServiceList.get_by_topic',
return_value=fakes.SERVICES)
@mock.patch('nova.objects.ComputeNodeList.get_all',
return_value=fakes.COMPUTE_NODES)
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
return_value={'numa_topology': None,
'pci_requests': None})
def test_schedule_happy_day(self, mock_get_extra, mock_get_all):
def test_schedule_happy_day(self, mock_get_extra, mock_get_all,
mock_get_by_topic):
"""Make sure there's nothing glaringly wrong with _schedule()
by doing a happy day pass through.
"""
@ -111,12 +114,15 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEqual({'vcpus': 5}, host_state.limits)
@mock.patch('nova.objects.ServiceList.get_by_topic',
return_value=fakes.SERVICES)
@mock.patch('nova.objects.ComputeNodeList.get_all',
return_value=fakes.COMPUTE_NODES)
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
return_value={'numa_topology': None,
'pci_requests': None})
def test_schedule_host_pool(self, mock_get_extra, mock_get_all):
def test_schedule_host_pool(self, mock_get_extra, mock_get_all,
mock_get_by_topic):
"""Make sure the scheduler_host_subset_size property works properly."""
self.flags(scheduler_host_subset_size=2)
@ -141,12 +147,15 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
# one host should be chosen
self.assertEqual(len(hosts), 1)
@mock.patch('nova.objects.ServiceList.get_by_topic',
return_value=fakes.SERVICES)
@mock.patch('nova.objects.ComputeNodeList.get_all',
return_value=fakes.COMPUTE_NODES)
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
return_value={'numa_topology': None,
'pci_requests': None})
def test_schedule_large_host_pool(self, mock_get_extra, mock_get_all):
def test_schedule_large_host_pool(self, mock_get_extra, mock_get_all,
mock_get_by_topic):
"""Hosts should still be chosen if pool size
is larger than number of filtered hosts.
"""
@ -172,12 +181,15 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
# one host should be chose
self.assertEqual(len(hosts), 1)
@mock.patch('nova.objects.ServiceList.get_by_topic',
return_value=fakes.SERVICES)
@mock.patch('nova.objects.ComputeNodeList.get_all',
return_value=fakes.COMPUTE_NODES)
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
return_value={'numa_topology': None,
'pci_requests': None})
def test_schedule_chooses_best_host(self, mock_get_extra, mock_get_all):
def test_schedule_chooses_best_host(self, mock_get_extra, mock_get_all,
mock_get_by_topic):
"""If scheduler_host_subset_size is 1, the largest host with greatest
weight should be returned.
"""
@ -218,12 +230,15 @@ class FilterSchedulerTestCase(test_scheduler.SchedulerTestCase):
self.assertEqual(50, hosts[0].weight)
@mock.patch('nova.objects.ServiceList.get_by_topic',
return_value=fakes.SERVICES)
@mock.patch('nova.objects.ComputeNodeList.get_all',
return_value=fakes.COMPUTE_NODES)
@mock.patch('nova.db.instance_extra_get_by_instance_uuid',
return_value={'numa_topology': None,
'pci_requests': None})
def test_select_destinations(self, mock_get_extra, mock_get_all):
def test_select_destinations(self, mock_get_extra, mock_get_all,
mock_get_by_topic):
"""select_destinations is basically a wrapper around _schedule().
Similar to the _schedule tests, this just does a happy path test to

View File

@ -281,6 +281,8 @@ class HostManagerTestCase(test.NoDBTestCase):
self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all')
self.mox.StubOutWithMock(host_manager.LOG, 'warning')
objects.ServiceList.get_by_topic(
context, CONF.compute_topic).AndReturn(fakes.SERVICES)
objects.ComputeNodeList.get_all(context).AndReturn(fakes.COMPUTE_NODES)
# node 3 host physical disk space is greater than database
host_manager.LOG.warning("Host %(hostname)s has more disk space "
@ -292,7 +294,6 @@ class HostManagerTestCase(test.NoDBTestCase):
host_manager.LOG.warning("No service record found for host %(host)s "
"on %(topic)s topic",
{'host': 'fake', 'topic': CONF.compute_topic})
self.mox.ReplayAll()
self.host_manager.get_all_host_states(context)
host_states_map = self.host_manager.host_state_map
@ -305,7 +306,7 @@ class HostManagerTestCase(test.NoDBTestCase):
node = compute_node['hypervisor_hostname']
state_key = (host, node)
self.assertEqual(host_states_map[state_key].service,
obj_base.obj_to_primitive(compute_node['service']))
obj_base.obj_to_primitive(fakes.get_service_by_host(host)))
self.assertEqual(host_states_map[('host1', 'node1')].free_ram_mb,
512)
# 511GB
@ -351,6 +352,8 @@ class HostManagerChangedNodesTestCase(test.NoDBTestCase):
self.mox.StubOutWithMock(objects.ServiceList, 'get_by_topic')
self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all')
objects.ServiceList.get_by_topic(
context, CONF.compute_topic).AndReturn(fakes.SERVICES)
objects.ComputeNodeList.get_all(context).AndReturn(fakes.COMPUTE_NODES)
self.mox.ReplayAll()
@ -364,10 +367,14 @@ class HostManagerChangedNodesTestCase(test.NoDBTestCase):
self.mox.StubOutWithMock(objects.ServiceList, 'get_by_topic')
self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all')
# all nodes active for first call
objects.ServiceList.get_by_topic(
context, CONF.compute_topic).AndReturn(fakes.SERVICES)
objects.ComputeNodeList.get_all(context).AndReturn(fakes.COMPUTE_NODES)
# remove node4 for second call
running_nodes = [n for n in fakes.COMPUTE_NODES
if n.get('hypervisor_hostname') != 'node4']
objects.ServiceList.get_by_topic(
context, CONF.compute_topic).AndReturn(fakes.SERVICES)
objects.ComputeNodeList.get_all(context).AndReturn(running_nodes)
self.mox.ReplayAll()
@ -382,8 +389,12 @@ class HostManagerChangedNodesTestCase(test.NoDBTestCase):
self.mox.StubOutWithMock(objects.ServiceList, 'get_by_topic')
self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all')
# all nodes active for first call
objects.ServiceList.get_by_topic(
context, CONF.compute_topic).AndReturn(fakes.SERVICES)
objects.ComputeNodeList.get_all(context).AndReturn(fakes.COMPUTE_NODES)
# remove all nodes for second call
objects.ServiceList.get_by_topic(
context, CONF.compute_topic).AndReturn(fakes.SERVICES)
objects.ComputeNodeList.get_all(context).AndReturn([])
self.mox.ReplayAll()

View File

@ -18,6 +18,7 @@ Tests For IronicHostManager
"""
import mock
from oslo_config import cfg
from nova import exception
from nova import objects
@ -28,6 +29,9 @@ from nova.scheduler import ironic_host_manager
from nova import test
from nova.tests.unit.scheduler import ironic_fakes
CONF = cfg.CONF
CONF.import_opt('compute_topic', 'nova.compute.rpcapi')
class FakeFilterClass1(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
@ -62,7 +66,10 @@ class IronicHostManagerTestCase(test.NoDBTestCase):
# Ensure .service is set and we have the values we expect to.
context = 'fake_context'
self.mox.StubOutWithMock(objects.ServiceList, 'get_by_topic')
self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all')
objects.ServiceList.get_by_topic(
context, CONF.compute_topic).AndReturn(ironic_fakes.SERVICES)
objects.ComputeNodeList.get_all(context).AndReturn(
ironic_fakes.COMPUTE_NODES)
self.mox.ReplayAll()
@ -78,7 +85,7 @@ class IronicHostManagerTestCase(test.NoDBTestCase):
state_key = (host, node)
self.assertEqual(host_states_map[state_key].service,
obj_base.obj_to_primitive(
compute_node.service))
ironic_fakes.get_service_by_host(host)))
self.assertEqual(compute_node.stats,
host_states_map[state_key].stats)
self.assertEqual(compute_node.free_ram_mb,
@ -131,11 +138,15 @@ class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
self.mox.StubOutWithMock(objects.ServiceList, 'get_by_topic')
self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all')
# all nodes active for first call
objects.ServiceList.get_by_topic(
context, CONF.compute_topic).AndReturn(ironic_fakes.SERVICES)
objects.ComputeNodeList.get_all(context).AndReturn(
ironic_fakes.COMPUTE_NODES)
# remove node4 for second call
running_nodes = [n for n in ironic_fakes.COMPUTE_NODES
if n.get('hypervisor_hostname') != 'node4uuid']
objects.ServiceList.get_by_topic(
context, CONF.compute_topic).AndReturn(ironic_fakes.SERVICES)
objects.ComputeNodeList.get_all(context).AndReturn(running_nodes)
self.mox.ReplayAll()
@ -150,9 +161,13 @@ class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
self.mox.StubOutWithMock(objects.ServiceList, 'get_by_topic')
self.mox.StubOutWithMock(objects.ComputeNodeList, 'get_all')
# all nodes active for first call
objects.ServiceList.get_by_topic(
context, CONF.compute_topic).AndReturn(ironic_fakes.SERVICES)
objects.ComputeNodeList.get_all(context).AndReturn(
ironic_fakes.COMPUTE_NODES)
# remove all nodes for second call
objects.ServiceList.get_by_topic(
context, CONF.compute_topic).AndReturn(ironic_fakes.SERVICES)
objects.ComputeNodeList.get_all(context).AndReturn([])
self.mox.ReplayAll()