Merge "Allow dependent jobs to force dependencies to run"

This commit is contained in:
Zuul 2024-12-09 07:29:45 +00:00 committed by Gerrit Code Review
commit c3d1c027a6
4 changed files with 141 additions and 30 deletions

View File

@ -0,0 +1,15 @@
---
features:
- |
Zuul will now run any job that is a hard dependency of a job that
is set to run.
Previously if job B had a hard dependency on job A, and job A did
not run due to a file matcher, Zuul would report an error. Now it
will instead ignore the file matcher on job A as long as job B is
to be run.
This will allow for simpler file matchers, more intuitive
behavior, and enable a new pattern for job graphs where job A
might have a file matcher that never matches, but job B will cause
it to run regardless.

View File

@ -912,6 +912,10 @@ class FakeFrozenJob(model.Job):
self.uuid = uuid.uuid4().hex
self.ref = 'fake reference'
self.all_refs = [self.ref]
self.matches_change = True
def _set(self, **kw):
self.__dict__.update(kw)
class TestGraph(BaseTestCase):
@ -943,7 +947,7 @@ class TestGraph(BaseTestCase):
with testtools.ExpectedException(
Exception,
"Dependency cycle detected in job jobX"):
graph.freezeDependencies()
graph.freezeDependencies(self.log)
# Disallow circular dependencies
graph = setup_graph()
@ -954,7 +958,7 @@ class TestGraph(BaseTestCase):
with testtools.ExpectedException(
Exception,
"Dependency cycle detected in job job3"):
graph.freezeDependencies()
graph.freezeDependencies(self.log)
graph = setup_graph()
jobs[3].dependencies = frozenset([model.JobDependency(jobs[5].name)])
@ -967,7 +971,7 @@ class TestGraph(BaseTestCase):
with testtools.ExpectedException(
Exception,
"Dependency cycle detected in job job3"):
graph.freezeDependencies()
graph.freezeDependencies(self.log)
graph = setup_graph()
jobs[3].dependencies = frozenset([model.JobDependency(jobs[2].name)])
@ -978,7 +982,7 @@ class TestGraph(BaseTestCase):
graph.addJob(jobs[5])
jobs[6].dependencies = frozenset([model.JobDependency(jobs[2].name)])
graph.addJob(jobs[6])
graph.freezeDependencies()
graph.freezeDependencies(self.log)
def test_job_graph_allows_soft_dependencies(self):
parent = FakeFrozenJob('parent')
@ -990,14 +994,14 @@ class TestGraph(BaseTestCase):
graph = model.JobGraph({})
graph.addJob(parent)
graph.addJob(child)
graph.freezeDependencies()
graph.freezeDependencies(self.log)
self.assertEqual(graph.getParentJobsRecursively(child),
[parent])
# Skip the parent
graph = model.JobGraph({})
graph.addJob(child)
graph.freezeDependencies()
graph.freezeDependencies(self.log)
self.assertEqual(graph.getParentJobsRecursively(child), [])
def test_job_graph_allows_soft_dependencies4(self):
@ -1018,7 +1022,7 @@ class TestGraph(BaseTestCase):
for j in parents:
graph.addJob(j)
graph.addJob(child)
graph.freezeDependencies()
graph.freezeDependencies(self.log)
self.assertEqual(set(graph.getParentJobsRecursively(child)),
set(parents))
@ -1028,7 +1032,7 @@ class TestGraph(BaseTestCase):
if j is not parents[0]:
graph.addJob(j)
graph.addJob(child)
graph.freezeDependencies()
graph.freezeDependencies(self.log)
self.assertEqual(set(graph.getParentJobsRecursively(child)),
set(parents) -
set([parents[0], parents[2], parents[3]]))
@ -1039,7 +1043,7 @@ class TestGraph(BaseTestCase):
if j is not parents[3]:
graph.addJob(j)
graph.addJob(child)
graph.freezeDependencies()
graph.freezeDependencies(self.log)
self.assertEqual(set(graph.getParentJobsRecursively(child)),
set(parents) - set([parents[3]]))
@ -1068,7 +1072,7 @@ class TestGraph(BaseTestCase):
graph.addJob(child)
# We don't expect this to raise an exception
graph.freezeDependencies()
graph.freezeDependencies(self.log)
class TestTenant(BaseTestCase):

View File

@ -7100,6 +7100,42 @@ class TestJobUpdateFileMatcher(ZuulTestCase):
self.assertHistory([])
def test_job_dependencies_update(self):
"Test that a dependent job will run when depending job updates"
in_repo_conf = textwrap.dedent(
"""
# Same
- job:
name: existing-files
files:
- README.txt
# Changed
- job:
name: existing-irr
files:
- README.txt
- project:
name: org/project
check:
jobs:
- existing-files
- existing-irr:
dependencies: [existing-files]
""")
file_dict = {'zuul.d/existing.yaml': in_repo_conf}
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A',
files=file_dict)
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='existing-files', result='SUCCESS', changes='1,1'),
dict(name='existing-irr', result='SUCCESS', changes='1,1'),
], ordered=False)
def test_new_job(self):
"Test matchers are overridden when creating a new job"
in_repo_conf = textwrap.dedent(

View File

@ -3108,6 +3108,8 @@ class FrozenJob(zkobject.ZKObject):
ref=None,
other_refs=[],
image_build_name=None,
# Not serialized
matches_change=True,
)
def __repr__(self):
@ -3649,8 +3651,12 @@ class Job(ConfigObject):
"""
project_canonical_names = set()
project_canonical_names.update(self.required_projects.keys())
if self.run:
run = [self.run[0]]
else:
run = []
project_canonical_names.update(self._projectsFromPlaybooks(
itertools.chain(self.pre_run, [self.run[0]], self.post_run,
itertools.chain(self.pre_run, run, self.post_run,
self.cleanup_run), with_implicit=True))
project_canonical_names.update({
iv.project_canonical_name
@ -4624,7 +4630,7 @@ class JobGraph(object):
uuids_to_iterate.add((u, current_dependent_uuids[u]['soft']))
return [self.getJobFromUuid(u) for u in all_dependent_uuids]
def freezeDependencies(self, layout=None):
def freezeDependencies(self, log, layout=None):
for dependent_uuid, parents in self._dependencies.items():
dependencies = self.job_dependencies.setdefault(dependent_uuid, {})
for parent_name, parent_soft in parents.items():
@ -4646,6 +4652,14 @@ class JobGraph(object):
raise JobConfigurationError(
"Job %s depends on %s which was not run." %
(dependent_job.name, parent_name))
if not parent_soft:
# If this is a hard dependency, then tell the
# parent to ignore file matchers.
if not parent_job.matches_change:
log.debug(
"Forcing non-matching hard dependency "
"%s to run for %s", parent_job, dependent_job)
parent_job._set(matches_change=True)
dependencies[parent_job.uuid] = dict(soft=parent_soft)
dependents = self.job_dependents.setdefault(
parent_job.uuid, {})
@ -4686,6 +4700,35 @@ class JobGraph(object):
return self.project_metadata[name]
return None
def removeNonMatchingJobs(self, log):
# This is similar to what we do in freezeDependencies
for dependent_uuid, parents in self._dependencies.items():
for parent_name, parent_soft in parents.items():
dependent_job = self._job_map[dependent_uuid]
# We typically depend on jobs with the same ref, but
# if we have been deduplicated, then we depend on
# every job-ref for the given parent job.
for ref in dependent_job.all_refs:
parent_job = self.getJob(parent_name, ref)
if parent_job is None:
continue
if not parent_soft:
# If this is a hard dependency, then tell the
# parent to ignore file matchers.
if not parent_job.matches_change:
log.debug(
"Forcing non-matching hard dependency "
"%s to run for %s", parent_job, dependent_job)
parent_job._set(matches_change=True)
# Afer removing duplicates and walking the dependency graph,
# remove any jobs that we shouldn't run because of file
# matchers.
for job in list(self._job_map.values()):
if not job.matches_change:
log.debug("Removing non-matching job %s", job)
self._removeJob(job)
def deduplicateJobs(self, log, item):
# Jobs are deduplicated before they start, so returned data
# are not considered at all.
@ -9271,7 +9314,7 @@ class Layout(object):
def extendJobGraph(self, context, item, change, ppc, job_graph,
skip_file_matcher, redact_secrets_and_keys,
debug_messages):
debug_messages, pending_errors):
log = item.annotateLogger(self.log)
semaphore_handler = item.pipeline.tenant.semaphore_handler
job_list = ppc.job_list
@ -9370,6 +9413,7 @@ class Layout(object):
final_job, change, self)
else:
matched_files = True
matches_change = True
if not matched_files:
if updates_job_config:
# Log the reason we're ignoring the file matcher
@ -9386,31 +9430,37 @@ class Layout(object):
add_debug_line(debug_messages,
"Job {jobname} did not match files".
format(jobname=jobname), indent=2)
continue
# A decision not to run based on a file matcher
# can be overridden later, so we just note our
# initial decision here.
matches_change = False
frozen_job = final_job.freezeJob(
context, self.tenant, self, item, change,
redact_secrets_and_keys)
frozen_job._set(matches_change=matches_change)
job_graph.addJob(frozen_job)
# These are only errors if we actually decide to run the job
if final_job.abstract:
raise JobConfigurationError(
pending_errors[frozen_job.uuid] = JobConfigurationError(
"Job %s is abstract and may not be directly run" %
(final_job.name,))
if (not final_job.ignore_allowed_projects and
final_job.allowed_projects is not None and
change.project.name not in final_job.allowed_projects):
raise JobConfigurationError(
elif (not final_job.ignore_allowed_projects and
final_job.allowed_projects is not None and
change.project.name not in final_job.allowed_projects):
pending_errors[frozen_job.uuid] = JobConfigurationError(
"Project %s is not allowed to run job %s" %
(change.project.name, final_job.name))
if ((not pipeline.post_review) and final_job.post_review):
raise JobConfigurationError(
elif ((not pipeline.post_review) and final_job.post_review):
pending_errors[frozen_job.uuid] = JobConfigurationError(
"Pre-review pipeline %s does not allow "
"post-review job %s" % (
pipeline.name, final_job.name))
if not final_job.run:
raise JobConfigurationError(
elif not final_job.run:
pending_errors[frozen_job.uuid] = JobConfigurationError(
"Job %s does not specify a run playbook" % (
final_job.name,))
job_graph.addJob(final_job.freezeJob(
context, self.tenant, self, item, change,
redact_secrets_and_keys))
def createJobGraph(self, context, item,
skip_file_matcher,
redact_secrets_and_keys,
@ -9429,6 +9479,7 @@ class Layout(object):
else:
job_map = item.current_build_set.jobs
job_graph = JobGraph(job_map)
pending_errors = {}
for change in item.changes:
ppc = self.getProjectPipelineConfig(item, change)
if not ppc:
@ -9439,14 +9490,19 @@ class Layout(object):
debug_messages.extend(ppc.debug_messages)
self.extendJobGraph(
context, item, change, ppc, job_graph, skip_file_matcher,
redact_secrets_and_keys, debug_messages)
redact_secrets_and_keys, debug_messages, pending_errors)
if ppc.fail_fast is not None:
# Any explicit setting of fail_fast takes effect,
# last one wins.
fail_fast = ppc.fail_fast
job_graph.deduplicateJobs(self.log, item)
job_graph.freezeDependencies(self)
log = item.annotateLogger(self.log)
job_graph.deduplicateJobs(log, item)
job_graph.removeNonMatchingJobs(log)
job_graph.freezeDependencies(log, self)
for job in job_map.values():
if job.uuid in pending_errors:
raise pending_errors[job.uuid]
# Copy project metadata to job_graph since this must be independent
# of the layout as we need it in order to prepare the context for