Instantiate aggregates information when HostManager is starting
In order to avoid having the scheduler continually query aggregate information from the Nova database, we need to have the scheduler read all aggregate information on service startup, and add on a later patch scheduler RPC API calls to update the scheduler about any changes to an aggregate's settings. Partially-Implements: blueprint isolate-scheduler-db Change-Id: Ic5298a85656800e8eae4840ff83e51e6503c8a10
This commit is contained in:
@@ -28,6 +28,7 @@ from oslo_utils import timeutils
|
||||
|
||||
from nova.compute import task_states
|
||||
from nova.compute import vm_states
|
||||
from nova import context as ctxt_mod
|
||||
from nova import exception
|
||||
from nova.i18n import _, _LI, _LW
|
||||
from nova import objects
|
||||
@@ -145,6 +146,9 @@ class HostState(object):
|
||||
# Generic metrics from compute nodes
|
||||
self.metrics = {}
|
||||
|
||||
# List of aggregates the host belongs to
|
||||
self.aggregates = []
|
||||
|
||||
self.updated = None
|
||||
if compute:
|
||||
self.update_from_compute_node(compute)
|
||||
@@ -309,6 +313,20 @@ class HostManager(object):
|
||||
weigher_classes = self.weight_handler.get_matching_classes(
|
||||
CONF.scheduler_weight_classes)
|
||||
self.weighers = [cls() for cls in weigher_classes]
|
||||
# Dict of aggregates keyed by their ID
|
||||
self.aggs_by_id = {}
|
||||
# Dict of set of aggregate IDs keyed by the name of the host belonging
|
||||
# to those aggregates
|
||||
self.host_aggregates_map = collections.defaultdict(set)
|
||||
self._init_aggregates()
|
||||
|
||||
def _init_aggregates(self):
|
||||
elevated = ctxt_mod.get_admin_context()
|
||||
aggs = objects.AggregateList.get_all(elevated)
|
||||
for agg in aggs:
|
||||
self.aggs_by_id[agg.id] = agg
|
||||
for host in agg.hosts:
|
||||
self.host_aggregates_map[host].add(agg.id)
|
||||
|
||||
def _choose_host_filters(self, filter_cls_names):
|
||||
"""Since the caller may specify which filters to use we need
|
||||
@@ -446,6 +464,12 @@ class HostManager(object):
|
||||
else:
|
||||
host_state = self.host_state_cls(host, node, compute=compute)
|
||||
self.host_state_map[state_key] = host_state
|
||||
# We force to update the aggregates info each time a new request
|
||||
# comes in, because some changes on the aggregates could have been
|
||||
# happening after setting this field for the first time
|
||||
host_state.aggregates = [self.aggs_by_id[agg_id] for agg_id in
|
||||
self.host_aggregates_map[
|
||||
host_state.host]]
|
||||
host_state.update_service(dict(service.iteritems()))
|
||||
seen_nodes.add(state_key)
|
||||
|
||||
|
||||
@@ -16,6 +16,8 @@
|
||||
Tests For HostManager
|
||||
"""
|
||||
|
||||
import collections
|
||||
|
||||
import mock
|
||||
from oslo_config import cfg
|
||||
from oslo_serialization import jsonutils
|
||||
@@ -56,7 +58,8 @@ class HostManagerTestCase(test.NoDBTestCase):
|
||||
cls in ['FakeFilterClass1',
|
||||
'FakeFilterClass2']])
|
||||
self.flags(scheduler_default_filters=['FakeFilterClass1'])
|
||||
self.host_manager = host_manager.HostManager()
|
||||
with mock.patch.object(host_manager.HostManager, '_init_aggregates'):
|
||||
self.host_manager = host_manager.HostManager()
|
||||
self.fake_hosts = [host_manager.HostState('fake_host%s' % x,
|
||||
'fake-node') for x in xrange(1, 5)]
|
||||
self.fake_hosts += [host_manager.HostState('fake_multihost',
|
||||
@@ -67,6 +70,30 @@ class HostManagerTestCase(test.NoDBTestCase):
|
||||
self.assertEqual(1, len(default_filters))
|
||||
self.assertIsInstance(default_filters[0], FakeFilterClass1)
|
||||
|
||||
@mock.patch.object(objects.AggregateList, 'get_all')
|
||||
def test_init_aggregates_no_aggs(self, agg_get_all):
|
||||
agg_get_all.return_value = []
|
||||
self.host_manager = host_manager.HostManager()
|
||||
self.assertEqual({}, self.host_manager.aggs_by_id)
|
||||
self.assertEqual({}, self.host_manager.host_aggregates_map)
|
||||
|
||||
@mock.patch.object(objects.AggregateList, 'get_all')
|
||||
def test_init_aggregates_one_agg_no_hosts(self, agg_get_all):
|
||||
fake_agg = objects.Aggregate(id=1, hosts=[])
|
||||
agg_get_all.return_value = [fake_agg]
|
||||
self.host_manager = host_manager.HostManager()
|
||||
self.assertEqual({1: fake_agg}, self.host_manager.aggs_by_id)
|
||||
self.assertEqual({}, self.host_manager.host_aggregates_map)
|
||||
|
||||
@mock.patch.object(objects.AggregateList, 'get_all')
|
||||
def test_init_aggregates_one_agg_with_hosts(self, agg_get_all):
|
||||
fake_agg = objects.Aggregate(id=1, hosts=['fake-host'])
|
||||
agg_get_all.return_value = [fake_agg]
|
||||
self.host_manager = host_manager.HostManager()
|
||||
self.assertEqual({1: fake_agg}, self.host_manager.aggs_by_id)
|
||||
self.assertEqual({'fake-host': set([1])},
|
||||
self.host_manager.host_aggregates_map)
|
||||
|
||||
def test_choose_host_filters_not_found(self):
|
||||
self.assertRaises(exception.SchedulerHostFilterNotFound,
|
||||
self.host_manager._choose_host_filters,
|
||||
@@ -312,13 +339,67 @@ class HostManagerTestCase(test.NoDBTestCase):
|
||||
self.assertEqual(host_states_map[('host4', 'node4')].free_disk_mb,
|
||||
8388608)
|
||||
|
||||
@mock.patch.object(host_manager.HostState, 'update_from_compute_node')
|
||||
@mock.patch.object(objects.ComputeNodeList, 'get_all')
|
||||
@mock.patch.object(objects.ServiceList, 'get_by_topic')
|
||||
def test_get_all_host_states_with_no_aggs(self, svc_get_by_topic,
|
||||
cn_get_all, update_from_cn):
|
||||
svc_get_by_topic.return_value = [objects.Service(host='fake')]
|
||||
cn_get_all.return_value = [
|
||||
objects.ComputeNode(host='fake', hypervisor_hostname='fake')]
|
||||
|
||||
self.host_manager.host_aggregates_map = collections.defaultdict(set)
|
||||
|
||||
self.host_manager.get_all_host_states('fake-context')
|
||||
host_state = self.host_manager.host_state_map[('fake', 'fake')]
|
||||
self.assertEqual([], host_state.aggregates)
|
||||
|
||||
@mock.patch.object(host_manager.HostState, 'update_from_compute_node')
|
||||
@mock.patch.object(objects.ComputeNodeList, 'get_all')
|
||||
@mock.patch.object(objects.ServiceList, 'get_by_topic')
|
||||
def test_get_all_host_states_with_matching_aggs(self, svc_get_by_topic,
|
||||
cn_get_all,
|
||||
update_from_cn):
|
||||
svc_get_by_topic.return_value = [objects.Service(host='fake')]
|
||||
cn_get_all.return_value = [
|
||||
objects.ComputeNode(host='fake', hypervisor_hostname='fake')]
|
||||
fake_agg = objects.Aggregate(id=1)
|
||||
self.host_manager.host_aggregates_map = collections.defaultdict(
|
||||
set, {'fake': set([1])})
|
||||
self.host_manager.aggs_by_id = {1: fake_agg}
|
||||
|
||||
self.host_manager.get_all_host_states('fake-context')
|
||||
host_state = self.host_manager.host_state_map[('fake', 'fake')]
|
||||
self.assertEqual([fake_agg], host_state.aggregates)
|
||||
|
||||
@mock.patch.object(host_manager.HostState, 'update_from_compute_node')
|
||||
@mock.patch.object(objects.ComputeNodeList, 'get_all')
|
||||
@mock.patch.object(objects.ServiceList, 'get_by_topic')
|
||||
def test_get_all_host_states_with_not_matching_aggs(self, svc_get_by_topic,
|
||||
cn_get_all,
|
||||
update_from_cn):
|
||||
svc_get_by_topic.return_value = [objects.Service(host='fake'),
|
||||
objects.Service(host='other')]
|
||||
cn_get_all.return_value = [
|
||||
objects.ComputeNode(host='fake', hypervisor_hostname='fake'),
|
||||
objects.ComputeNode(host='other', hypervisor_hostname='other')]
|
||||
fake_agg = objects.Aggregate(id=1)
|
||||
self.host_manager.host_aggregates_map = collections.defaultdict(
|
||||
set, {'other': set([1])})
|
||||
self.host_manager.aggs_by_id = {1: fake_agg}
|
||||
|
||||
self.host_manager.get_all_host_states('fake-context')
|
||||
host_state = self.host_manager.host_state_map[('fake', 'fake')]
|
||||
self.assertEqual([], host_state.aggregates)
|
||||
|
||||
|
||||
class HostManagerChangedNodesTestCase(test.NoDBTestCase):
|
||||
"""Test case for HostManager class."""
|
||||
|
||||
def setUp(self):
|
||||
super(HostManagerChangedNodesTestCase, self).setUp()
|
||||
self.host_manager = host_manager.HostManager()
|
||||
with mock.patch.object(host_manager.HostManager, '_init_aggregates'):
|
||||
self.host_manager = host_manager.HostManager()
|
||||
self.fake_hosts = [
|
||||
host_manager.HostState('host1', 'node1'),
|
||||
host_manager.HostState('host2', 'node2'),
|
||||
|
||||
@@ -48,9 +48,11 @@ class IronicHostManagerTestCase(test.NoDBTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(IronicHostManagerTestCase, self).setUp()
|
||||
self.host_manager = ironic_host_manager.IronicHostManager()
|
||||
with mock.patch.object(host_manager.HostManager, '_init_aggregates'):
|
||||
self.host_manager = ironic_host_manager.IronicHostManager()
|
||||
|
||||
def test_manager_public_api_signatures(self):
|
||||
@mock.patch.object(host_manager.HostManager, '_init_aggregates')
|
||||
def test_manager_public_api_signatures(self, mock_init_aggs):
|
||||
self.assertPublicAPISignatures(host_manager.HostManager(),
|
||||
self.host_manager)
|
||||
|
||||
@@ -99,7 +101,8 @@ class IronicHostManagerChangedNodesTestCase(test.NoDBTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(IronicHostManagerChangedNodesTestCase, self).setUp()
|
||||
self.host_manager = ironic_host_manager.IronicHostManager()
|
||||
with mock.patch.object(host_manager.HostManager, '_init_aggregates'):
|
||||
self.host_manager = ironic_host_manager.IronicHostManager()
|
||||
ironic_driver = "nova.virt.ironic.driver.IronicDriver"
|
||||
supported_instances = [
|
||||
objects.HVSpec.from_list(["i386", "baremetal", "baremetal"])]
|
||||
@@ -233,7 +236,8 @@ class IronicHostManagerTestFilters(test.NoDBTestCase):
|
||||
cls in ['FakeFilterClass1',
|
||||
'FakeFilterClass2']])
|
||||
self.flags(scheduler_default_filters=['FakeFilterClass1'])
|
||||
self.host_manager = ironic_host_manager.IronicHostManager()
|
||||
with mock.patch.object(host_manager.HostManager, '_init_aggregates'):
|
||||
self.host_manager = ironic_host_manager.IronicHostManager()
|
||||
self.fake_hosts = [ironic_host_manager.IronicNodeState(
|
||||
'fake_host%s' % x, 'fake-node') for x in range(1, 5)]
|
||||
self.fake_hosts += [ironic_host_manager.IronicNodeState(
|
||||
|
||||
@@ -22,6 +22,7 @@ import mock
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova.scheduler import driver
|
||||
from nova.scheduler import host_manager
|
||||
from nova.scheduler import manager
|
||||
from nova import servicegroup
|
||||
from nova import test
|
||||
@@ -38,7 +39,8 @@ class SchedulerManagerTestCase(test.NoDBTestCase):
|
||||
def setUp(self):
|
||||
super(SchedulerManagerTestCase, self).setUp()
|
||||
self.flags(scheduler_driver=self.driver_cls_name)
|
||||
self.manager = self.manager_cls()
|
||||
with mock.patch.object(host_manager.HostManager, '_init_aggregates'):
|
||||
self.manager = self.manager_cls()
|
||||
self.context = context.RequestContext('fake_user', 'fake_project')
|
||||
self.topic = 'fake_topic'
|
||||
self.fake_args = (1, 2, 3)
|
||||
@@ -78,7 +80,8 @@ class SchedulerTestCase(test.NoDBTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(SchedulerTestCase, self).setUp()
|
||||
self.driver = self.driver_cls()
|
||||
with mock.patch.object(host_manager.HostManager, '_init_aggregates'):
|
||||
self.driver = self.driver_cls()
|
||||
self.context = context.RequestContext('fake_user', 'fake_project')
|
||||
self.topic = 'fake_topic'
|
||||
self.servicegroup_api = servicegroup.API()
|
||||
|
||||
Reference in New Issue
Block a user