Merge "Convert HostMapping.cells to a dict" into stable/stein

This commit is contained in:
Zuul 2019-08-29 13:15:22 +00:00 committed by Gerrit Code Review
commit 64bbf64783
2 changed files with 26 additions and 22 deletions

View File

@ -427,7 +427,7 @@ class HostManager(object):
count = 0 count = 0
if not computes_by_cell: if not computes_by_cell:
computes_by_cell = {} computes_by_cell = {}
for cell in self.cells: for cell in self.cells.values():
with context_module.target_cell(context, cell) as cctxt: with context_module.target_cell(context, cell) as cctxt:
cell_cns = objects.ComputeNodeList.get_all( cell_cns = objects.ComputeNodeList.get_all(
cctxt).objects cctxt).objects
@ -654,24 +654,26 @@ class HostManager(object):
temp_cells.objects.remove(c) temp_cells.objects.remove(c)
# once its done break for optimization # once its done break for optimization
break break
# NOTE(danms, tssurya): global list of cells cached which # NOTE(danms, tssurya): global dict, keyed by cell uuid, of cells
# will be refreshed every time a SIGHUP is sent to the scheduler. # cached which will be refreshed every time a SIGHUP is sent to the
self.cells = temp_cells # scheduler.
self.cells = {cell.uuid: cell for cell in temp_cells}
LOG.debug('Found %(count)i cells: %(cells)s', LOG.debug('Found %(count)i cells: %(cells)s',
{'count': len(self.cells), {'count': len(self.cells),
'cells': ', '.join([c.uuid for c in self.cells])}) 'cells': ', '.join(self.cells)})
# NOTE(tssurya): Global cache of only the enabled cells. This way # NOTE(tssurya): Global cache of only the enabled cells. This way
# scheduling is limited only to the enabled cells. However this # scheduling is limited only to the enabled cells. However this
# cache will be refreshed every time a cell is disabled or enabled # cache will be refreshed every time a cell is disabled or enabled
# or when a new cell is created as long as a SIGHUP signal is sent # or when a new cell is created as long as a SIGHUP signal is sent
# to the scheduler. # to the scheduler.
self.enabled_cells = [c for c in self.cells if not c.disabled] self.enabled_cells = [c for c in temp_cells if not c.disabled]
# Filtering the disabled cells only for logging purposes. # Filtering the disabled cells only for logging purposes.
disabled_cells = [c for c in self.cells if c.disabled] if LOG.isEnabledFor(logging.DEBUG):
LOG.debug('Found %(count)i disabled cells: %(cells)s', disabled_cells = [c for c in temp_cells if c.disabled]
{'count': len(disabled_cells), LOG.debug('Found %(count)i disabled cells: %(cells)s',
'cells': ', '.join( {'count': len(disabled_cells),
[c.identity for c in disabled_cells])}) 'cells': ', '.join(
[c.identity for c in disabled_cells])})
def get_host_states_by_uuids(self, context, compute_uuids, spec_obj): def get_host_states_by_uuids(self, context, compute_uuids, spec_obj):

View File

@ -108,10 +108,12 @@ class HostManagerTestCase(test.NoDBTestCase):
return_value=cells) as mock_cm: return_value=cells) as mock_cm:
self.host_manager.refresh_cells_caches() self.host_manager.refresh_cells_caches()
mock_cm.assert_called_once() mock_cm.assert_called_once()
# Cell2 is not in the enabled list.
self.assertEqual(2, len(self.host_manager.enabled_cells)) self.assertEqual(2, len(self.host_manager.enabled_cells))
self.assertEqual(cell_uuid3, self.host_manager.enabled_cells[1].uuid) self.assertEqual(cell_uuid3, self.host_manager.enabled_cells[1].uuid)
# But it is still in the full list.
self.assertEqual(3, len(self.host_manager.cells)) self.assertEqual(3, len(self.host_manager.cells))
self.assertEqual(cell_uuid2, self.host_manager.cells[1].uuid) self.assertIn(cell_uuid2, self.host_manager.cells)
def test_refresh_cells_caches_except_cell0(self): def test_refresh_cells_caches_except_cell0(self):
ctxt = nova_context.RequestContext('fake-user', 'fake_project') ctxt = nova_context.RequestContext('fake-user', 'fake_project')
@ -546,7 +548,7 @@ class HostManagerTestCase(test.NoDBTestCase):
mock_get_by_binary.return_value = fakes.SERVICES mock_get_by_binary.return_value = fakes.SERVICES
context = 'fake_context' context = 'fake_context'
compute_nodes, services = self.host_manager._get_computes_for_cells( compute_nodes, services = self.host_manager._get_computes_for_cells(
context, self.host_manager.cells) context, self.host_manager.enabled_cells)
# _get_host_states returns a generator, so make a map from it # _get_host_states returns a generator, so make a map from it
host_states_map = {(state.host, state.nodename): state for state in host_states_map = {(state.host, state.nodename): state for state in
@ -612,7 +614,7 @@ class HostManagerTestCase(test.NoDBTestCase):
context = nova_context.get_admin_context() context = nova_context.get_admin_context()
compute_nodes, services = self.host_manager._get_computes_for_cells( compute_nodes, services = self.host_manager._get_computes_for_cells(
context, self.host_manager.cells) context, self.host_manager.enabled_cells)
hosts = self.host_manager._get_host_states( hosts = self.host_manager._get_host_states(
context, compute_nodes, services) context, compute_nodes, services)
@ -641,7 +643,7 @@ class HostManagerTestCase(test.NoDBTestCase):
context = nova_context.get_admin_context() context = nova_context.get_admin_context()
compute_nodes, services = self.host_manager._get_computes_for_cells( compute_nodes, services = self.host_manager._get_computes_for_cells(
context, self.host_manager.cells) context, self.host_manager.enabled_cells)
hosts = self.host_manager._get_host_states( hosts = self.host_manager._get_host_states(
context, compute_nodes, services) context, compute_nodes, services)
@ -672,7 +674,7 @@ class HostManagerTestCase(test.NoDBTestCase):
context = nova_context.get_admin_context() context = nova_context.get_admin_context()
compute_nodes, services = self.host_manager._get_computes_for_cells( compute_nodes, services = self.host_manager._get_computes_for_cells(
context, self.host_manager.cells) context, self.host_manager.enabled_cells)
hosts = self.host_manager._get_host_states( hosts = self.host_manager._get_host_states(
context, compute_nodes, services) context, compute_nodes, services)
@ -718,7 +720,7 @@ class HostManagerTestCase(test.NoDBTestCase):
context = nova_context.get_admin_context() context = nova_context.get_admin_context()
compute_nodes, services = self.host_manager._get_computes_for_cells( compute_nodes, services = self.host_manager._get_computes_for_cells(
context, self.host_manager.cells) context, self.host_manager.enabled_cells)
self.host_manager._get_host_states(context, compute_nodes, services) self.host_manager._get_host_states(context, compute_nodes, services)
@ -1077,7 +1079,7 @@ class HostManagerChangedNodesTestCase(test.NoDBTestCase):
context = 'fake_context' context = 'fake_context'
compute_nodes, services = self.host_manager._get_computes_for_cells( compute_nodes, services = self.host_manager._get_computes_for_cells(
context, self.host_manager.cells) context, self.host_manager.enabled_cells)
# _get_host_states returns a generator, so make a map from it # _get_host_states returns a generator, so make a map from it
host_states_map = {(state.host, state.nodename): state for state in host_states_map = {(state.host, state.nodename): state for state in
@ -1103,7 +1105,7 @@ class HostManagerChangedNodesTestCase(test.NoDBTestCase):
# first call: all nodes # first call: all nodes
compute_nodes, services = self.host_manager._get_computes_for_cells( compute_nodes, services = self.host_manager._get_computes_for_cells(
context, self.host_manager.cells) context, self.host_manager.enabled_cells)
hosts = self.host_manager._get_host_states( hosts = self.host_manager._get_host_states(
context, compute_nodes, services) context, compute_nodes, services)
# _get_host_states returns a generator, so make a map from it # _get_host_states returns a generator, so make a map from it
@ -1113,7 +1115,7 @@ class HostManagerChangedNodesTestCase(test.NoDBTestCase):
# second call: just running nodes # second call: just running nodes
compute_nodes, services = self.host_manager._get_computes_for_cells( compute_nodes, services = self.host_manager._get_computes_for_cells(
context, self.host_manager.cells) context, self.host_manager.enabled_cells)
hosts = self.host_manager._get_host_states( hosts = self.host_manager._get_host_states(
context, compute_nodes, services) context, compute_nodes, services)
host_states_map = {(state.host, state.nodename): state for state in host_states_map = {(state.host, state.nodename): state for state in
@ -1133,7 +1135,7 @@ class HostManagerChangedNodesTestCase(test.NoDBTestCase):
# first call: all nodes # first call: all nodes
compute_nodes, services = self.host_manager._get_computes_for_cells( compute_nodes, services = self.host_manager._get_computes_for_cells(
context, self.host_manager.cells) context, self.host_manager.enabled_cells)
hosts = self.host_manager._get_host_states( hosts = self.host_manager._get_host_states(
context, compute_nodes, services) context, compute_nodes, services)
# _get_host_states returns a generator, so make a map from it # _get_host_states returns a generator, so make a map from it
@ -1143,7 +1145,7 @@ class HostManagerChangedNodesTestCase(test.NoDBTestCase):
# second call: no nodes # second call: no nodes
compute_nodes, services = self.host_manager._get_computes_for_cells( compute_nodes, services = self.host_manager._get_computes_for_cells(
context, self.host_manager.cells) context, self.host_manager.enabled_cells)
hosts = self.host_manager._get_host_states( hosts = self.host_manager._get_host_states(
context, compute_nodes, services) context, compute_nodes, services)
host_states_map = {(state.host, state.nodename): state for state in host_states_map = {(state.host, state.nodename): state for state in