Fix wrong varible use when updating resource stats

The code path for updating the nodepool resource stats was still
assuming a full Project instance that we no longer have when requesting
hold of a node set.

Change-Id: I03a11bc21ae519229fff05b6bff7b9dbb4ae9253
This commit is contained in:
Simon Westphahl 2021-08-20 11:35:56 +02:00 committed by James E. Blair
parent dfc298c40a
commit 919c5a3654
2 changed files with 25 additions and 3 deletions

View File

@ -1922,6 +1922,13 @@ class TestScheduler(ZuulTestCase):
client = zuul.rpcclient.RPCClient('127.0.0.1',
self.gearman_server.port)
self.addCleanup(client.shutdown)
# Set resources so we can examine the code path for updating
# the stats on autohold.
self.fake_nodepool.resources = {
'cores': 2,
'ram': 1024,
'instances': 1,
}
r = client.autohold('tenant-one', 'org/project', 'project-test2',
"", "", "reason text", 1)
self.assertTrue(r)
@ -1959,17 +1966,24 @@ class TestScheduler(ZuulTestCase):
break
self.assertIsNone(held_node)
self.hold_jobs_in_queue = True
# Now test that failed jobs are autoheld
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')
self.executor_server.failJob('project-test2', B)
self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
build = list(self.scheds.first.sched.executor.builds.values())[0]
self.hold_jobs_in_queue = False
self.executor_api.release()
self.waitUntilSettled()
self.assertEqual(B.data['status'], 'NEW')
self.assertEqual(B.reported, 1)
# project-test2
self.assertEqual(self.history[1].result, 'FAILURE')
self.assertTrue(build.held)
# Check nodepool for a held node
held_node = None
@ -2022,6 +2036,15 @@ class TestScheduler(ZuulTestCase):
self.assertEqual(3, len(node_states))
self.assertEqual([zuul.model.STATE_USED] * 3, node_states)
# The resources should be reported
self.assertReportedStat(
'zuul.nodepool.resources.tenant.tenant-one.ram',
value='1024', kind='g')
self.assertReportedStat(
'zuul.nodepool.resources.project.'
'review_example_com/org/project.ram',
value='1024', kind='g')
@simple_layout('layouts/autohold.yaml')
def test_autohold_info(self):
client = zuul.rpcclient.RPCClient('127.0.0.1',

View File

@ -256,16 +256,15 @@ class Nodepool(object):
# When holding a nodeset we need to update the gauges to avoid
# leaking resources
if tenant and project and resources:
project_name = project.canonical_name
subtract_resources(
self.current_resources_by_tenant[tenant], resources)
subtract_resources(
self.current_resources_by_project[project_name], resources)
self.current_resources_by_project[project], resources)
self.emitStatsResources()
if duration:
self.emitStatsResourceCounters(
tenant, project_name, resources, duration)
tenant, project, resources, duration)
# TODO (felix): Switch back to use a build object here rather than the
# ansible_job once it's available via ZK.