Merge "Add required reason for hold" into feature/zuulv3

This commit is contained in:
Jenkins 2017-07-31 20:36:25 +00:00 committed by Gerrit Code Review
commit a1e39d0676
8 changed files with 16 additions and 6 deletions

View File

@ -28,7 +28,7 @@ Autohold
Example::
zuul autohold --tenant openstack --project example_project --job example_job ---count 1
zuul autohold --tenant openstack --project example_project --job example_job --reason "reason text" --count 1
Enqueue
^^^^^^^

View File

@ -1441,7 +1441,8 @@ class TestScheduler(ZuulTestCase):
client = zuul.rpcclient.RPCClient('127.0.0.1',
self.gearman_server.port)
self.addCleanup(client.shutdown)
r = client.autohold('tenant-one', 'org/project', 'project-test2', 1)
r = client.autohold('tenant-one', 'org/project', 'project-test2',
"reason text", 1)
self.assertTrue(r)
self.executor_server.failJob('project-test2', A)
@ -1469,6 +1470,7 @@ class TestScheduler(ZuulTestCase):
'review.example.com/org/project',
'project-test2'])
)
self.assertEqual(held_node['hold_reason'], "reason text")
# Another failed change should not hold any more nodes
B = self.fake_gerrit.addFakeChange('org/project', 'master', 'B')

View File

@ -54,6 +54,8 @@ class Client(zuul.cmd.ZuulApp):
required=True)
cmd_autohold.add_argument('--job', help='job name',
required=True)
cmd_autohold.add_argument('--reason', help='reason for the hold',
required=True)
cmd_autohold.add_argument('--count',
help='number of job runs (default: 1)',
required=False, type=int, default=1)
@ -156,6 +158,7 @@ class Client(zuul.cmd.ZuulApp):
r = client.autohold(tenant_name=self.args.tenant,
project_name=self.args.project,
job_name=self.args.job,
reason=self.args.reason,
count=self.args.count)
return r

View File

@ -357,6 +357,7 @@ class Node(object):
self.id = None
self.lock = None
self.hold_job = None
self.hold_reason = None
# Attributes from Nodepool
self._state = 'unknown'
self.state_time = time.time()
@ -398,6 +399,7 @@ class Node(object):
d = {}
d['state'] = self.state
d['hold_job'] = self.hold_job
d['hold_reason'] = self.hold_reason
for k in self._keys:
d[k] = getattr(self, k)
return d

View File

@ -55,12 +55,13 @@ class Nodepool(object):
if autohold_key not in self.sched.autohold_requests:
return
hold_iterations = self.sched.autohold_requests[autohold_key]
(hold_iterations, reason) = self.sched.autohold_requests[autohold_key]
nodes = nodeset.getNodes()
for node in nodes:
node.state = model.STATE_HOLD
node.hold_job = " ".join(autohold_key)
node.hold_reason = reason
self.sched.zk.storeNode(node)
# We remove the autohold when the number of nodes in hold

View File

@ -48,10 +48,11 @@ class RPCClient(object):
self.log.debug("Job complete, success: %s" % (not job.failure))
return job
def autohold(self, tenant_name, project_name, job_name, count):
def autohold(self, tenant_name, project_name, job_name, reason, count):
data = {'tenant_name': tenant_name,
'project_name': project_name,
'job_name': job_name,
'reason': reason,
'count': count}
return not self.submitJob('zuul:autohold', data).failure

View File

@ -111,6 +111,7 @@ class RPCListener(object):
return
params['job_name'] = args['job_name']
params['reason'] = args['reason']
if args['count'] < 0:
error = "Invalid count: %d" % args['count']

View File

@ -350,14 +350,14 @@ class Scheduler(threading.Thread):
self.last_reconfigured = int(time.time())
# TODOv3(jeblair): reconfigure time should be per-tenant
def autohold(self, tenant_name, project_name, job_name, count):
def autohold(self, tenant_name, project_name, job_name, reason, count):
key = (tenant_name, project_name, job_name)
if count == 0 and key in self.autohold_requests:
self.log.debug("Removing autohold for %s", key)
del self.autohold_requests[key]
else:
self.log.debug("Autohold requested for %s", key)
self.autohold_requests[key] = count
self.autohold_requests[key] = (count, reason)
def promote(self, tenant_name, pipeline_name, change_ids):
event = PromoteEvent(tenant_name, pipeline_name, change_ids)