diff --git a/doc/source/developer/model-changelog.rst b/doc/source/developer/model-changelog.rst index b6828ca0bf..a9c7531c60 100644 --- a/doc/source/developer/model-changelog.rst +++ b/doc/source/developer/model-changelog.rst @@ -192,3 +192,9 @@ Version 25 :Prior Zuul version: 9.3.0 :Description: Add job_uuid to BuildRequests and BuildResultEvents. Affects schedulers and executors. + +Version 26 +---------- +:Prior Zuul version: 9.5.0 +:Description: Refactor circular dependencies. + Affects schedulers and executors. diff --git a/releasenotes/notes/bundle-refactor-b2a02cabcfda0f5f.yaml b/releasenotes/notes/bundle-refactor-b2a02cabcfda0f5f.yaml new file mode 100644 index 0000000000..0305c11acc --- /dev/null +++ b/releasenotes/notes/bundle-refactor-b2a02cabcfda0f5f.yaml @@ -0,0 +1,60 @@ +--- +prelude: > + This release includes a significant refactoring of the internal + handling of circular dependencies. This requires some changes for + consumers of Zuul output (via some reporters or the REST API) and + requires special care during upgrades. In the case of a + dependency cycle between changes, Zuul pipeline queue items will + now represent multiple changes rather than a single change. This + allows for more intuitive behavior and information display as well + as better handling of job deduplication. +upgrade: + - | + Zuul can not be upgraded to this version while running. To upgrade: + + * Stop all Zuul components running the previous version + (stopping Nodepool is optional). + + * On a scheduler machine or image (with the scheduler stopped) + and the new version of Zuul, run the command: + + zuul-admin delete-state --keep-config-cache + + This will delete all of the pipeline state from ZooKeeper, but + it will retain the configuration cache (which contains all of + the project configuration from zuul.yaml files). This will + speed up the startup process. + + * Start all Zuul components on the new version. + - The MQTT reporter now includes a job_uuid field to correlate retry + builds with final builds. +deprecations: + - | + The syntax of string substitution in pipeline reporter messages + has changed. Since queue items may now represent more than one + change, the `{change}` substitution in messages is deprecated and + will be removed in a future version. To maintain backwards + compatability, it currently refers to the arbitrary first change + in the list of changes for a queue item. Please upgrade your + usage to use the new `{changes}` substitution which is a list. + - | + The syntax of string substitution in SMTP reporter messages + has changed. Since queue items may now represent more than one + change, the `{change}` substitution in messages is deprecated and + will be removed in a future version. To maintain backwards + compatability, it currently refers to the arbitrary first change + in the list of changes for a queue item. Please upgrade your + usage to use the new `{changes}` substitution which is a list. + - | + The MQTT and Elasticsearch reporters now include a `changes` field + which is a list of dictionaries representing the changes included + in an item. The correspending scalar fields describing what was + previously the only change associated with an item remain for + backwards compatability and refer to the arbitrary first change is + the list of changes for a queue item. These scalar values will be + removed in a future version of Zuul. Please upgrade yur usage to + use the new `changes` entries. + - | + The `zuul.bundle_id` variable is deprecated and will be removed in + a future version. For backwards compatability, it currently + duplicates the item uuid. diff --git a/tests/base.py b/tests/base.py index a34d3977ac..aafff55c4f 100644 --- a/tests/base.py +++ b/tests/base.py @@ -881,32 +881,32 @@ class FakeGerritChange(object): if 'approved' not in label: label['approved'] = app['by'] revisions = {} - rev = self.patchsets[-1] - num = len(self.patchsets) - files = {} - for f in rev['files']: - if f['file'] == '/COMMIT_MSG': - continue - files[f['file']] = {"status": f['type'][0]} # ADDED -> A - parent = '0000000000000000000000000000000000000000' - if self.depends_on_change: - parent = self.depends_on_change.patchsets[ - self.depends_on_patchset - 1]['revision'] - revisions[rev['revision']] = { - "kind": "REWORK", - "_number": num, - "created": rev['createdOn'], - "uploader": rev['uploader'], - "ref": rev['ref'], - "commit": { - "subject": self.subject, - "message": self.data['commitMessage'], - "parents": [{ - "commit": parent, - }] - }, - "files": files - } + for i, rev in enumerate(self.patchsets): + num = i + 1 + files = {} + for f in rev['files']: + if f['file'] == '/COMMIT_MSG': + continue + files[f['file']] = {"status": f['type'][0]} # ADDED -> A + parent = '0000000000000000000000000000000000000000' + if self.depends_on_change: + parent = self.depends_on_change.patchsets[ + self.depends_on_patchset - 1]['revision'] + revisions[rev['revision']] = { + "kind": "REWORK", + "_number": num, + "created": rev['createdOn'], + "uploader": rev['uploader'], + "ref": rev['ref'], + "commit": { + "subject": self.subject, + "message": self.data['commitMessage'], + "parents": [{ + "commit": parent, + }] + }, + "files": files + } data = { "id": self.project + '~' + self.branch + '~' + self.data['id'], "project": self.project, @@ -1462,13 +1462,14 @@ class FakeGerritConnection(gerritconnection.GerritConnection): } return event - def review(self, item, message, submit, labels, checks_api, file_comments, - phase1, phase2, zuul_event_id=None): + def review(self, item, change, message, submit, labels, + checks_api, file_comments, phase1, phase2, + zuul_event_id=None): if self.web_server: return super(FakeGerritConnection, self).review( - item, message, submit, labels, checks_api, file_comments, - phase1, phase2, zuul_event_id) - self._test_handle_review(int(item.change.number), message, submit, + item, change, message, submit, labels, checks_api, + file_comments, phase1, phase2, zuul_event_id) + self._test_handle_review(int(change.number), message, submit, labels, phase1, phase2) def _test_get_submitted_together(self, change): @@ -3577,9 +3578,11 @@ class TestingExecutorApi(HoldableExecutorApi): self._test_build_request_job_map = {} if build_request.uuid in self._test_build_request_job_map: return self._test_build_request_job_map[build_request.uuid] - job_name = build_request.job_name + + params = self.getParams(build_request) + job_name = params['zuul']['job'] self._test_build_request_job_map[build_request.uuid] = job_name - return build_request.job_name + return job_name def release(self, what=None): """ diff --git a/tests/fakegithub.py b/tests/fakegithub.py index 533483786f..52681b0069 100644 --- a/tests/fakegithub.py +++ b/tests/fakegithub.py @@ -691,7 +691,7 @@ class FakeGithubSession(object): if commit is None: commit = FakeCommit(head_sha) repo._commits[head_sha] = commit - repo.check_run_counter += 1 + repo.check_run_counter += 1 check_run = commit.set_check_run( str(repo.check_run_counter), json['name'], diff --git a/tests/unit/test_circular_dependencies.py b/tests/unit/test_circular_dependencies.py index 5cf8831c1e..efcbb6a6f0 100644 --- a/tests/unit/test_circular_dependencies.py +++ b/tests/unit/test_circular_dependencies.py @@ -1,5 +1,5 @@ # Copyright 2019 BMW Group -# Copyright 2023 Acme Gating, LLC +# Copyright 2023-2024 Acme Gating, LLC # # This module is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -207,15 +207,15 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertHistory([ # Change A (check + gate) - dict(name="project1-job", result="SUCCESS", changes="3,1 1,1 2,1"), + dict(name="project1-job", result="SUCCESS", changes="3,1 2,1 1,1"), dict(name="project-vars-job", result="SUCCESS", - changes="3,1 1,1 2,1"), - dict(name="project1-job", result="SUCCESS", changes="3,1 1,1 2,1"), + changes="3,1 2,1 1,1"), + dict(name="project1-job", result="SUCCESS", changes="3,1 2,1 1,1"), dict(name="project-vars-job", result="SUCCESS", - changes="3,1 1,1 2,1"), + changes="3,1 2,1 1,1"), # Change B (check + gate) dict(name="project-job", result="SUCCESS", changes="3,1 2,1 1,1"), - dict(name="project-job", result="SUCCESS", changes="3,1 1,1 2,1"), + dict(name="project-job", result="SUCCESS", changes="3,1 2,1 1,1"), # Change C (check + gate) dict(name="project2-job", result="SUCCESS", changes="3,1"), dict(name="project2-job", result="SUCCESS", changes="3,1"), @@ -412,7 +412,8 @@ class TestGerritCircularDependencies(ZuulTestCase): queue_change_numbers = [] for queue in tenant.layout.pipelines["gate"].queues: for item in queue.queue: - queue_change_numbers.append(item.change.number) + for change in item.changes: + queue_change_numbers.append(change.number) self.assertEqual(queue_change_numbers, ['2', '3', '1']) self.executor_server.hold_jobs_in_build = False @@ -539,8 +540,8 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertEqual(A.reported, 3) self.assertEqual(B.reported, 3) - self.assertIn("bundle", A.messages[-1]) - self.assertIn("bundle", B.messages[-1]) + self.assertIn("cycle that failed", A.messages[-1]) + self.assertIn("cycle that failed", B.messages[-1]) self.assertEqual(A.data["status"], "NEW") self.assertEqual(B.data["status"], "NEW") @@ -578,8 +579,8 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertEqual(A.reported, 2) self.assertEqual(B.reported, 2) - self.assertIn("bundle", A.messages[-1]) - self.assertIn("bundle", B.messages[-1]) + self.assertIn("cycle that failed", A.messages[-1]) + self.assertIn("cycle that failed", B.messages[-1]) self.assertEqual(A.data["status"], "NEW") self.assertEqual(B.data["status"], "NEW") @@ -684,13 +685,13 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertFalse(re.search('Change .*? can not be merged', A.messages[-1])) - self.assertIn("bundle that failed.", B.messages[-1]) + self.assertIn("cycle that failed.", B.messages[-1]) self.assertFalse(re.search('Change http://localhost:.*? is needed', B.messages[-1])) self.assertFalse(re.search('Change .*? can not be merged', B.messages[-1])) - self.assertIn("bundle that failed.", C.messages[-1]) + self.assertIn("cycle that failed.", C.messages[-1]) self.assertFalse(re.search('Change http://localhost:.*? is needed', C.messages[-1])) self.assertFalse(re.search('Change .*? can not be merged', @@ -826,8 +827,9 @@ class TestGerritCircularDependencies(ZuulTestCase): self.executor_server.release() self.waitUntilSettled() - self.assertEqual(A.reported, 3) - self.assertEqual(B.reported, 3) + # Start, Dequeue (cycle abandoned), Start, Success + self.assertEqual(A.reported, 4) + self.assertEqual(B.reported, 4) self.assertEqual(A.data["status"], "MERGED") self.assertEqual(B.data["status"], "MERGED") @@ -865,16 +867,15 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertEqual(B.reported, 3) self.assertEqual(A.patchsets[-1]["approvals"][-1]["value"], "-2") self.assertEqual(B.patchsets[-1]["approvals"][-1]["value"], "-2") - self.assertIn("bundle", A.messages[-1]) - self.assertIn("bundle", B.messages[-1]) - self.assertEqual(A.data["status"], "NEW") + self.assertIn("cycle that failed", A.messages[-1]) + self.assertIn("cycle that failed", B.messages[-1]) + self.assertEqual(A.data["status"], "MERGED") self.assertEqual(B.data["status"], "NEW") - buildsets = {bs.refs[0].change: bs for bs in - self.scheds.first.connections.connections[ - 'database'].getBuildsets()} - self.assertEqual(buildsets[2].result, 'MERGE_FAILURE') - self.assertEqual(buildsets[1].result, 'FAILURE') + buildsets = self.scheds.first.connections.connections[ + 'database'].getBuildsets() + self.assertEqual(len(buildsets), 1) + self.assertEqual(buildsets[0].result, 'MERGE_FAILURE') def test_cycle_reporting_partial_failure(self): A = self.fake_gerrit.addFakeChange("org/project", "master", "A") @@ -898,8 +899,8 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertEqual(A.reported, 3) self.assertEqual(B.reported, 3) - self.assertIn("bundle", A.messages[-1]) - self.assertIn("bundle", B.messages[-1]) + self.assertIn("cycle that failed", A.messages[-1]) + self.assertIn("cycle that failed", B.messages[-1]) self.assertEqual(A.data["status"], "NEW") self.assertEqual(B.data["status"], "MERGED") @@ -946,7 +947,7 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertEqual(B.data["status"], "MERGED") self.assertEqual(C.data["status"], "NEW") - def test_independent_bundle_items(self): + def test_independent_cycle_items(self): self.executor_server.hold_jobs_in_build = True A = self.fake_gerrit.addFakeChange("org/project", "master", "A") B = self.fake_gerrit.addFakeChange("org/project", "master", "B") @@ -964,11 +965,11 @@ class TestGerritCircularDependencies(ZuulTestCase): self.waitUntilSettled() tenant = self.scheds.first.sched.abide.tenants.get("tenant-one") - for queue in tenant.layout.pipelines["check"].queues: - for item in queue.queue: - self.assertIn(item, item.bundle.items) - self.assertEqual(len(item.bundle.items), 2) + self.assertEqual(len(tenant.layout.pipelines["check"].queues), 1) + queue = tenant.layout.pipelines["check"].queues[0].queue + self.assertEqual(len(queue), 1) + self.assertEqual(len(self.builds), 2) for build in self.builds: self.assertTrue(build.hasChanges(A, B)) @@ -1058,6 +1059,30 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertEqual(A.data["status"], "MERGED") self.assertEqual(B.data["status"], "MERGED") + def test_cycle_git_dependency2(self): + # Reverse the enqueue order to make sure both cases are + # tested. + A = self.fake_gerrit.addFakeChange("org/project", "master", "A") + B = self.fake_gerrit.addFakeChange("org/project", "master", "B") + A.addApproval("Code-Review", 2) + B.addApproval("Code-Review", 2) + A.addApproval("Approved", 1) + + # A -> B (via commit-depends) + A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format( + A.subject, B.data["url"] + ) + # B -> A (via parent-child dependency) + B.setDependsOn(A, 1) + + self.fake_gerrit.addEvent(B.addApproval("Approved", 1)) + self.waitUntilSettled() + + self.assertEqual(A.reported, 2) + self.assertEqual(B.reported, 2) + self.assertEqual(A.data["status"], "MERGED") + self.assertEqual(B.data["status"], "MERGED") + def test_cycle_git_dependency_failure(self): A = self.fake_gerrit.addFakeChange("org/project", "master", "A") B = self.fake_gerrit.addFakeChange("org/project", "master", "B") @@ -1107,7 +1132,10 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertEqual(len(A.patchsets[-1]["approvals"]), 1) self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified") - self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1") + self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "-1") + self.assertEqual(len(B.patchsets[-1]["approvals"]), 1) + self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified") + self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "-1") def test_cycle_merge_conflict(self): self.hold_merge_jobs_in_queue = True @@ -1142,7 +1170,7 @@ class TestGerritCircularDependencies(ZuulTestCase): self.merger_api.release() self.waitUntilSettled() - self.assertEqual(A.reported, 0) + self.assertEqual(A.reported, 1) self.assertEqual(B.reported, 1) self.assertEqual(A.data["status"], "NEW") self.assertEqual(B.data["status"], "NEW") @@ -1183,10 +1211,6 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertEqual(len(A.patchsets[-1]["approvals"]), 1) self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified") self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1") - - self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1)) - self.waitUntilSettled() - self.assertEqual(len(B.patchsets[-1]["approvals"]), 1) self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified") self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "1") @@ -1245,7 +1269,7 @@ class TestGerritCircularDependencies(ZuulTestCase): self.waitUntilSettled() vars_builds = [b for b in self.builds if b.name == "project-vars-job"] - self.assertEqual(len(vars_builds), 1) + self.assertEqual(len(vars_builds), 3) self.assertEqual(vars_builds[0].job.variables["test_var"], "pass") self.executor_server.release() @@ -1255,30 +1279,10 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified") self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1") - self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1)) - self.waitUntilSettled() - - vars_builds = [b for b in self.builds if b.name == "project-vars-job"] - self.assertEqual(len(vars_builds), 1) - self.assertEqual(vars_builds[0].job.variables["test_var"], "pass") - - self.executor_server.release() - self.waitUntilSettled() - self.assertEqual(len(B.patchsets[-1]["approvals"]), 1) self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified") self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "1") - self.fake_gerrit.addEvent(C.getPatchsetCreatedEvent(1)) - self.waitUntilSettled() - - vars_builds = [b for b in self.builds if b.name == "project-vars-job"] - self.assertEqual(len(vars_builds), 1) - self.assertEqual(vars_builds[0].job.variables["test_var"], "pass") - - self.executor_server.release() - self.waitUntilSettled() - self.assertEqual(len(C.patchsets[-1]["approvals"]), 1) self.assertEqual(C.patchsets[-1]["approvals"][0]["type"], "Verified") self.assertEqual(C.patchsets[-1]["approvals"][0]["value"], "1") @@ -1309,7 +1313,7 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertEqual(C.data["status"], "MERGED") def test_circular_config_change_single_merge_job(self): - """Regression tests to make sure that a bundle with non-live + """Regression tests to make sure that a cycle with non-live config changes only spawns one merge job (so that we avoid problems with multiple jobs arriving in the wrong order).""" @@ -1347,7 +1351,7 @@ class TestGerritCircularDependencies(ZuulTestCase): self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1)) self.waitUntilSettled() - # Assert that there is a single merge job for the bundle. + # Assert that there is a single merge job for the cycle. self.assertEqual(len(self.merger_api.queued()), 1) self.hold_merge_jobs_in_queue = False @@ -1373,7 +1377,7 @@ class TestGerritCircularDependencies(ZuulTestCase): self.executor_server.hold_jobs_in_build = True - # bundle_id should be in check build of A + # bundle_id should be in check build of A,B self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1)) self.waitUntilSettled() var_zuul_items = self.builds[0].parameters["zuul"]["items"] @@ -1388,19 +1392,6 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertEqual(len(A.patchsets[-1]["approvals"]), 1) self.assertEqual(A.patchsets[-1]["approvals"][0]["type"], "Verified") self.assertEqual(A.patchsets[-1]["approvals"][0]["value"], "1") - - # bundle_id should be in check build of B - self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1)) - self.waitUntilSettled() - var_zuul_items = self.builds[0].parameters["zuul"]["items"] - self.assertEqual(len(var_zuul_items), 2) - self.assertIn("bundle_id", var_zuul_items[0]) - bundle_id_0 = var_zuul_items[0]["bundle_id"] - self.assertIn("bundle_id", var_zuul_items[1]) - bundle_id_1 = var_zuul_items[1]["bundle_id"] - self.assertEqual(bundle_id_0, bundle_id_1) - self.executor_server.release() - self.waitUntilSettled() self.assertEqual(len(B.patchsets[-1]["approvals"]), 1) self.assertEqual(B.patchsets[-1]["approvals"][0]["type"], "Verified") self.assertEqual(B.patchsets[-1]["approvals"][0]["value"], "1") @@ -1464,7 +1455,7 @@ class TestGerritCircularDependencies(ZuulTestCase): - project-vars-job """) } - # Change zuul config so the bundle is considered updating config + # Change zuul config so the cycle is considered updating config A = self.fake_gerrit.addFakeChange("org/project2", "master", "A", files=org_project_files) B = self.fake_gerrit.addFakeChange("org/project1", "master", "B") @@ -1532,9 +1523,10 @@ class TestGerritCircularDependencies(ZuulTestCase): self.fake_gerrit.addEvent(A.addApproval("Approved", 1)) self.waitUntilSettled() - self.assertEqual(A.reported, 3) - self.assertEqual(B.reported, 3) - self.assertEqual(C.reported, 6) + # Two failures, and one success (start and end) + self.assertEqual(A.reported, 4) + self.assertEqual(B.reported, 4) + self.assertEqual(C.reported, 4) self.assertEqual(A.data["status"], "MERGED") self.assertEqual(B.data["status"], "MERGED") self.assertEqual(C.data["status"], "MERGED") @@ -1828,41 +1820,18 @@ class TestGerritCircularDependencies(ZuulTestCase): # dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), ], ordered=False) - @simple_layout('layouts/job-dedup-child-of-diff-parent.yaml') - def test_job_deduplication_child_of_diff_parent(self): - # This will never happen in practice, but it's theoretically - # possible, so we have a test to codify and exercise the - # behavior. - - # The common job is forced to not deduplicate, but since there - # is no return data, the inputs to child-job are identical, so - # child-job is deduplicated. In practice, there will always - # be different return data so this is unlikely to happen. - - # The child job uses auto deduplication. - self._test_job_deduplication() - self.assertHistory([ - dict(name="common-job", result="SUCCESS", changes="2,1 1,1", - ref='refs/changes/02/2/1'), - dict(name="common-job", result="SUCCESS", changes="2,1 1,1", - ref='refs/changes/01/1/1'), - dict(name="child-job", result="SUCCESS", changes="2,1 1,1"), - ], ordered=False) - @simple_layout('layouts/job-dedup-child-of-diff-parent.yaml') def test_job_deduplication_child_of_diff_parent_diff_data(self): - # This is the more realistic test of the above, where we - # return different data from the non-deduplicated parent job, - # which should still causes the child job to be deduplicated. - - # The child job uses auto deduplication. + # The common job is forced to not deduplicate, and the child + # job is deduplicated. The child job treats each of the + # common jobs as a parent. self.executor_server.returnData( 'common-job', 'refs/changes/02/2/1', {'foo': 'a'} ) self.executor_server.returnData( 'common-job', 'refs/changes/01/1/1', - {'foo': 'b'} + {'bar': 'b'} ) self._test_job_deduplication() self.assertHistory([ @@ -1873,6 +1842,9 @@ class TestGerritCircularDependencies(ZuulTestCase): dict(name="child-job", result="SUCCESS", changes="2,1 1,1", ref='refs/changes/02/2/1'), ], ordered=False) + job = self.getJobFromHistory('child-job') + self.assertEqual({'foo': 'a', 'bar': 'b'}, + job.parameters['parent_data']) @simple_layout('layouts/job-dedup-paused-parent.yaml') def test_job_deduplication_paused_parent(self): @@ -1953,8 +1925,13 @@ class TestGerritCircularDependencies(ZuulTestCase): self.waitUntilSettled() # Fail the node request and unpause + tenant = self.scheds.first.sched.abide.tenants.get('tenant-one') + pipeline = tenant.layout.pipelines['gate'] + items = pipeline.getAllItems() + job = items[0].current_build_set.job_graph.getJob( + 'common-job', items[0].changes[0].cache_key) for req in self.fake_nodepool.getNodeRequests(): - if req['requestor_data']['job_name'] == 'common-job': + if req['requestor_data']['job_uuid'] == job.uuid: self.fake_nodepool.addFailRequest(req) self.fake_nodepool.unpause() @@ -1962,7 +1939,15 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertEqual(A.data['status'], 'NEW') self.assertEqual(B.data['status'], 'NEW') - self.assertHistory([]) + # This would previously fail both items in the bundle as soon + # as the bundle as a whole started failing. The new/current + # behavior is more like non-bundle items, in that the item + # will continue running jobs even after one job fails. + # common-job does not appear in history due to the node failure + self.assertHistory([ + dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"), + dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"), + ], ordered=False) self.assertEqual(len(self.fake_nodepool.history), 3) @simple_layout('layouts/job-dedup-auto-shared.yaml') @@ -2046,14 +2031,6 @@ class TestGerritCircularDependencies(ZuulTestCase): raise Exception("Unable to find build") build.should_retry = True - # Store a reference to the queue items so we can inspect their - # internal attributes later to double check the retry build - # count is correct. - tenant = self.scheds.first.sched.abide.tenants.get('tenant-one') - pipeline = tenant.layout.pipelines['gate'] - items = pipeline.getAllItems() - self.assertEqual(len(items), 2) - self.executor_server.release('project1-job') self.executor_server.release('project2-job') self.waitUntilSettled() @@ -2071,10 +2048,6 @@ class TestGerritCircularDependencies(ZuulTestCase): dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), ], ordered=False) self.assertEqual(len(self.fake_nodepool.history), 5) - self.assertEqual(items[0].change.project.name, 'org/project2') - self.assertEqual(len(items[0].current_build_set.retry_builds), 0) - self.assertEqual(items[1].change.project.name, 'org/project1') - self.assertEqual(len(items[1].current_build_set.retry_builds), 1) @simple_layout('layouts/job-dedup-auto-shared.yaml') def test_job_deduplication_multi_scheduler(self): @@ -2258,9 +2231,10 @@ class TestGerritCircularDependencies(ZuulTestCase): # It's tricky to get info about a noop build, but the jobs in # the report have the build UUID, so we make sure it's # different. - a_noop = [l for l in A.messages[-1].split('\n') if 'noop' in l][0] - b_noop = [l for l in B.messages[-1].split('\n') if 'noop' in l][0] - self.assertNotEqual(a_noop, b_noop) + a_noop = [l for l in A.messages[-1].split('\n') if 'noop' in l] + b_noop = [l for l in B.messages[-1].split('\n') if 'noop' in l] + self.assertEqual(a_noop, b_noop) + self.assertNotEqual(a_noop[0], a_noop[1]) @simple_layout('layouts/job-dedup-retry.yaml') def test_job_deduplication_retry(self): @@ -2516,7 +2490,7 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertHistory([ dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"), dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="project2-job", result="SUCCESS", changes="1,1 2,1"), + dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"), # This is deduplicated # dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), ], ordered=False) @@ -2529,9 +2503,9 @@ class TestGerritCircularDependencies(ZuulTestCase): dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"), dict(name="common-job", result="SUCCESS", changes="2,1 1,1", ref='refs/changes/01/1/1'), - dict(name="project2-job", result="SUCCESS", changes="1,1 2,1"), + dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"), # This is not deduplicated - dict(name="common-job", result="SUCCESS", changes="1,1 2,1", + dict(name="common-job", result="SUCCESS", changes="2,1 1,1", ref='refs/changes/02/2/1'), ], ordered=False) self._assert_job_deduplication_check() @@ -2542,7 +2516,7 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertHistory([ dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"), dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="project2-job", result="SUCCESS", changes="1,1 2,1"), + dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"), # This is deduplicated # dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), ], ordered=False) @@ -2553,9 +2527,9 @@ class TestGerritCircularDependencies(ZuulTestCase): self._test_job_deduplication_check() self.assertHistory([ dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="common-job", result="SUCCESS", changes="1,1 2,1", + dict(name="common-job", result="SUCCESS", changes="2,1 1,1", ref='refs/changes/02/2/1'), - dict(name="project2-job", result="SUCCESS", changes="1,1 2,1"), + dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"), # This is not deduplicated, though it would be under auto dict(name="common-job", result="SUCCESS", changes="2,1 1,1", ref='refs/changes/01/1/1'), @@ -2570,7 +2544,7 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertHistory([ dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"), dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="project2-job", result="SUCCESS", changes="1,1 2,1"), + dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"), # This is deduplicated # dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), ], ordered=False) @@ -2601,7 +2575,7 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertHistory([ dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="project2-job", result="SUCCESS", changes="1,1 2,1"), + dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"), dict(name="child2-job", result="SUCCESS", changes="2,1 1,1"), # This is deduplicated # dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), @@ -2634,7 +2608,7 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertHistory([ dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="project2-job", result="SUCCESS", changes="1,1 2,1"), + dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"), dict(name="child2-job", result="SUCCESS", changes="2,1 1,1"), # This is deduplicated # dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), @@ -2650,61 +2624,40 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertHistory([ dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), dict(name="child1-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="child2-job", result="SUCCESS", changes="1,1 2,1"), + dict(name="child2-job", result="SUCCESS", changes="2,1 1,1"), dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="project2-job", result="SUCCESS", changes="1,1 2,1"), + dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"), # This is deduplicated # dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), ], ordered=False) self._assert_job_deduplication_check() - @simple_layout('layouts/job-dedup-child-of-diff-parent.yaml') - def test_job_deduplication_check_child_of_diff_parent(self): - # This will never happen in practice, but it's theoretically - # possible, so we have a test to codify and exercise the - # behavior. - - # The common job is forced to not deduplicate, but since there - # is no return data, the inputs to child-job are identical, so - # child-job is deduplicated. In practice, there will always - # be different return data so this is unlikely to happen. - - # The child job uses auto deduplication. - self._test_job_deduplication_check() - self.assertHistory([ - dict(name="common-job", result="SUCCESS", changes="2,1 1,1", - ref='refs/changes/01/1/1'), - dict(name="common-job", result="SUCCESS", changes="1,1 2,1", - ref='refs/changes/02/2/1'), - dict(name="child-job", result="SUCCESS", changes="2,1 1,1"), - ], ordered=False) - self._assert_job_deduplication_check() - @simple_layout('layouts/job-dedup-child-of-diff-parent.yaml') def test_job_deduplication_check_child_of_diff_parent_diff_data(self): - # This is the more realistic test of the above, where we - # return different data from the non-deduplicated parent job, - # which should still cause the child job to be deduplicated. - - # The child job uses auto deduplication. + # The common job is forced to not deduplicate, and the child + # job is deduplicated. The child job treats each of the + # common jobs as a parent. self.executor_server.returnData( 'common-job', 'refs/changes/02/2/1', {'foo': 'a'} ) self.executor_server.returnData( 'common-job', 'refs/changes/01/1/1', - {'foo': 'b'} + {'bar': 'b'} ) self._test_job_deduplication_check() self.assertHistory([ dict(name="common-job", result="SUCCESS", changes="2,1 1,1", ref='refs/changes/01/1/1'), - dict(name="common-job", result="SUCCESS", changes="1,1 2,1", + dict(name="common-job", result="SUCCESS", changes="2,1 1,1", ref='refs/changes/02/2/1'), dict(name="child-job", result="SUCCESS", changes="2,1 1,1", - ref='refs/changes/01/1/1'), + ref='refs/changes/02/2/1'), ], ordered=False) self._assert_job_deduplication_check() + job = self.getJobFromHistory('child-job') + self.assertEqual({'foo': 'a', 'bar': 'b'}, + job.parameters['parent_data']) @simple_layout('layouts/job-dedup-paused-parent.yaml') def test_job_deduplication_check_paused_parent(self): @@ -2752,10 +2705,9 @@ class TestGerritCircularDependencies(ZuulTestCase): self.waitUntilSettled() self.assertHistory([ - dict(name="common-job", result="SUCCESS", changes="2,1 1,1", - ref='refs/changes/01/1/1'), + dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="project2-job", result="SUCCESS", changes="1,1 2,1"), + dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"), ], ordered=False) self._assert_job_deduplication_check() @@ -2781,8 +2733,13 @@ class TestGerritCircularDependencies(ZuulTestCase): self.waitUntilSettled() # Fail the node request and unpause + tenant = self.scheds.first.sched.abide.tenants.get('tenant-one') + pipeline = tenant.layout.pipelines['check'] + items = pipeline.getAllItems() + job = items[0].current_build_set.job_graph.getJob( + 'common-job', items[0].changes[0].cache_key) for req in self.fake_nodepool.getNodeRequests(): - if req['requestor_data']['job_name'] == 'common-job': + if req['requestor_data']['job_uuid'] == job.uuid: self.fake_nodepool.addFailRequest(req) self.fake_nodepool.unpause() @@ -2792,7 +2749,7 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertEqual(B.data['status'], 'NEW') self.assertHistory([ dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="project2-job", result="SUCCESS", changes="1,1 2,1"), + dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"), ], ordered=False) self.assertEqual(len(self.fake_nodepool.history), 3) @@ -2834,7 +2791,7 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertHistory([ dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"), dict(name="common-job", result="FAILURE", changes="2,1 1,1"), - dict(name="project2-job", result="SUCCESS", changes="1,1 2,1"), + dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"), # This is deduplicated # dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), ], ordered=False) @@ -2843,9 +2800,9 @@ class TestGerritCircularDependencies(ZuulTestCase): @simple_layout('layouts/job-dedup-false.yaml') def test_job_deduplication_check_false_failed_job(self): # Test that if we are *not* deduplicating jobs, we don't - # duplicate the result on two different builds. - # The way we check that is to retry the common-job between two - # items, but only once, and only on one item. The other item + # duplicate the result on two different builds. The way we + # check that is to retry the common-job between two changes, + # but only once, and only on one change. The other change # should be unaffected. self.executor_server.hold_jobs_in_build = True A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A') @@ -2871,14 +2828,6 @@ class TestGerritCircularDependencies(ZuulTestCase): raise Exception("Unable to find build") build.should_retry = True - # Store a reference to the queue items so we can inspect their - # internal attributes later to double check the retry build - # count is correct. - tenant = self.scheds.first.sched.abide.tenants.get('tenant-one') - pipeline = tenant.layout.pipelines['check'] - items = pipeline.getAllItems() - self.assertEqual(len(items), 4) - self.executor_server.release('project1-job') self.executor_server.release('project2-job') self.waitUntilSettled() @@ -2889,21 +2838,16 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertEqual(A.data['status'], 'NEW') self.assertEqual(B.data['status'], 'NEW') self.assertHistory([ - dict(name="project2-job", result="SUCCESS", changes="1,1 2,1"), + dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"), dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="common-job", result=None, changes="2,1 1,1"), - dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="common-job", result="SUCCESS", changes="1,1 2,1"), + dict(name="common-job", result=None, changes="2,1 1,1", + ref='refs/changes/01/1/1'), + dict(name="common-job", result="SUCCESS", changes="2,1 1,1", + ref='refs/changes/01/1/1'), + dict(name="common-job", result="SUCCESS", changes="2,1 1,1", + ref='refs/changes/02/2/1'), ], ordered=False) self.assertEqual(len(self.fake_nodepool.history), 5) - self.assertEqual(items[0].change.project.name, 'org/project2') - self.assertEqual(len(items[0].current_build_set.retry_builds), 0) - self.assertEqual(items[1].change.project.name, 'org/project1') - self.assertEqual(len(items[1].current_build_set.retry_builds), 1) - self.assertEqual(items[2].change.project.name, 'org/project1') - self.assertEqual(len(items[2].current_build_set.retry_builds), 0) - self.assertEqual(items[3].change.project.name, 'org/project2') - self.assertEqual(len(items[3].current_build_set.retry_builds), 0) @simple_layout('layouts/job-dedup-auto-shared.yaml') def test_job_deduplication_check_multi_scheduler(self): @@ -2985,14 +2929,11 @@ class TestGerritCircularDependencies(ZuulTestCase): self.executor_server.release() self.waitUntilSettled(matcher=[app]) - # We expect common-job to be "un-deduplicated" due to the - # "Attempt to start build with deduplicated node request ID" error. self.assertHistory([ dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"), dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="project2-job", result="SUCCESS", changes="1,1 2,1"), - dict(name="common-job", result="SUCCESS", changes="1,1 2,1"), - dict(name="project2-job2", result="SUCCESS", changes="1,1 2,1"), + dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"), + dict(name="project2-job2", result="SUCCESS", changes="2,1 1,1"), ], ordered=False) @simple_layout('layouts/job-dedup-multi-sched-complete.yaml') @@ -3042,13 +2983,11 @@ class TestGerritCircularDependencies(ZuulTestCase): self.executor_server.release() self.waitUntilSettled(matcher=[app]) - # We expect common-job to be "un-deduplicated". self.assertHistory([ dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"), dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="project2-job", result="SUCCESS", changes="1,1 2,1"), - dict(name="common-job", result="SUCCESS", changes="1,1 2,1"), - dict(name="project2-job2", result="SUCCESS", changes="1,1 2,1"), + dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"), + dict(name="project2-job2", result="SUCCESS", changes="2,1 1,1"), ], ordered=False) @simple_layout('layouts/job-dedup-noop.yaml') @@ -3082,9 +3021,10 @@ class TestGerritCircularDependencies(ZuulTestCase): # It's tricky to get info about a noop build, but the jobs in # the report have the build UUID, so we make sure it's # different. - a_noop = [l for l in A.messages[-1].split('\n') if 'noop' in l][0] - b_noop = [l for l in B.messages[-1].split('\n') if 'noop' in l][0] - self.assertNotEqual(a_noop, b_noop) + a_noop = [l for l in A.messages[-1].split('\n') if 'noop' in l] + b_noop = [l for l in B.messages[-1].split('\n') if 'noop' in l] + self.assertEqual(a_noop, b_noop) + self.assertNotEqual(a_noop[0], a_noop[1]) @simple_layout('layouts/job-dedup-retry.yaml') def test_job_deduplication_check_retry(self): @@ -3112,7 +3052,7 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertHistory([ dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="project2-job", result="SUCCESS", changes="1,1 2,1"), + dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"), # There should be exactly 3 runs of the job (not 6) dict(name="common-job", result=None, changes="2,1 1,1"), dict(name="common-job", result=None, changes="2,1 1,1"), @@ -3176,10 +3116,10 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertHistory([ dict(name="parent-job", result="ABORTED", changes="2,1 1,1"), dict(name="project1-job", result="ABORTED", changes="2,1 1,1"), - dict(name="project2-job", result="ABORTED", changes="1,1 2,1"), + dict(name="project2-job", result="ABORTED", changes="2,1 1,1"), dict(name="parent-job", result="SUCCESS", changes="2,1 1,1"), dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="project2-job", result="SUCCESS", changes="1,1 2,1"), + dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"), ], ordered=False) self.assertEqual(len(self.fake_nodepool.history), 6) @@ -3232,13 +3172,13 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertHistory([ dict(name="parent-job", result="SUCCESS", changes="2,1 1,1"), dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="project2-job", result="SUCCESS", changes="1,1 2,1"), + dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"), # Only one run of the common job since it's the same dict(name="common-child-job", result="SUCCESS", changes="2,1 1,1"), # The forked job depends on different parents # so it should run twice dict(name="forked-child-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="forked-child-job", result="SUCCESS", changes="1,1 2,1"), + dict(name="forked-child-job", result="SUCCESS", changes="2,1 1,1"), ], ordered=False) self.assertEqual(len(self.fake_nodepool.history), 6) @@ -3292,9 +3232,9 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertHistory([ dict(name="common-job", result="SUCCESS", changes="2,1 1,1", - ref='refs/changes/01/1/1'), + ref='refs/changes/02/2/1'), dict(name="child-job", result="SUCCESS", changes="2,1 1,1", - ref='refs/changes/01/1/1'), + ref='refs/changes/02/2/1'), dict(name="project1-job", result="SUCCESS", changes="2,1 1,1", ref='refs/changes/01/1/1'), ], ordered=False) @@ -3336,7 +3276,7 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertHistory([ dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"), dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="project2-job", result="SUCCESS", changes="1,1 2,1"), + dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"), # This is deduplicated # dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), ], ordered=False) @@ -3470,9 +3410,47 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertEqual(G.queried, 8) self.assertHistory([ dict(name="project1-job", result="SUCCESS", - changes="7,1 6,1 5,1 4,1 1,1 2,1 3,1"), + changes="1,1 2,1 3,1 4,1 5,1 6,1 7,1", + ref="refs/changes/01/1/1"), + dict(name="project1-job", result="SUCCESS", + changes="1,1 2,1 3,1 4,1 5,1 6,1 7,1", + ref="refs/changes/02/2/1"), + dict(name="project1-job", result="SUCCESS", + changes="1,1 2,1 3,1 4,1 5,1 6,1 7,1", + ref="refs/changes/03/3/1"), + dict(name="project1-job", result="SUCCESS", + changes="1,1 2,1 3,1 4,1 5,1 6,1 7,1", + ref="refs/changes/04/4/1"), + dict(name="project1-job", result="SUCCESS", + changes="1,1 2,1 3,1 4,1 5,1 6,1 7,1", + ref="refs/changes/05/5/1"), + dict(name="project1-job", result="SUCCESS", + changes="1,1 2,1 3,1 4,1 5,1 6,1 7,1", + ref="refs/changes/06/6/1"), + dict(name="project1-job", result="SUCCESS", + changes="1,1 2,1 3,1 4,1 5,1 6,1 7,1", + ref="refs/changes/07/7/1"), dict(name="project-vars-job", result="SUCCESS", - changes="7,1 6,1 5,1 4,1 1,1 2,1 3,1"), + changes="1,1 2,1 3,1 4,1 5,1 6,1 7,1", + ref="refs/changes/01/1/1"), + dict(name="project-vars-job", result="SUCCESS", + changes="1,1 2,1 3,1 4,1 5,1 6,1 7,1", + ref="refs/changes/02/2/1"), + dict(name="project-vars-job", result="SUCCESS", + changes="1,1 2,1 3,1 4,1 5,1 6,1 7,1", + ref="refs/changes/03/3/1"), + dict(name="project-vars-job", result="SUCCESS", + changes="1,1 2,1 3,1 4,1 5,1 6,1 7,1", + ref="refs/changes/04/4/1"), + dict(name="project-vars-job", result="SUCCESS", + changes="1,1 2,1 3,1 4,1 5,1 6,1 7,1", + ref="refs/changes/05/5/1"), + dict(name="project-vars-job", result="SUCCESS", + changes="1,1 2,1 3,1 4,1 5,1 6,1 7,1", + ref="refs/changes/06/6/1"), + dict(name="project-vars-job", result="SUCCESS", + changes="1,1 2,1 3,1 4,1 5,1 6,1 7,1", + ref="refs/changes/07/7/1"), ], ordered=False) @simple_layout('layouts/submitted-together-per-branch.yaml') @@ -3623,12 +3601,20 @@ class TestGerritCircularDependencies(ZuulTestCase): self.fake_gerrit.addEvent(D.getPatchsetCreatedEvent(2)) self.waitUntilSettled() - self.assertHistory([ - dict(name="check-job", result="SUCCESS", - changes="4,2 4,1 3,1 2,2 2,1 1,1"), - dict(name="check-job", result="SUCCESS", - changes="4,2 2,2 2,1 1,1 4,1 3,1"), - ], ordered=False) + # This used to run jobs with some very unlikely changes + # merged. That passed the test because we didn't actually get + # a ref for the outdated commits, so when the merger merged + # them, that was a noop. Since we now have correct refs for + # the outdated changes, we hit a merge conflict, which is a + # reasonable and likely error. It's not likely that we can + # make this test run a job with correct data. + # self.assertHistory([ + # dict(name="check-job", result="SUCCESS", + # changes="4,2 4,1 3,1 2,2 2,1 1,1"), + # dict(name="check-job", result="SUCCESS", + # changes="4,2 2,2 2,1 1,1 4,1 3,1"), + # ], ordered=False) + self.assertHistory([], ordered=False) @simple_layout('layouts/deps-by-topic.yaml') def test_deps_by_topic_new_patchset(self): @@ -3653,8 +3639,8 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertHistory([ dict(name="check-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="check-job", result="SUCCESS", changes="1,1 2,1"), - ], ordered=False) + dict(name="check-job", result="SUCCESS", changes="2,1 1,1"), + ]) A.addPatchset() self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2)) @@ -3663,9 +3649,10 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertHistory([ # Original check run dict(name="check-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="check-job", result="SUCCESS", changes="1,1 2,1"), + dict(name="check-job", result="SUCCESS", changes="2,1 1,1"), # Second check run dict(name="check-job", result="SUCCESS", changes="2,1 1,2"), + dict(name="check-job", result="SUCCESS", changes="2,1 1,2"), ], ordered=False) def test_deps_by_topic_multi_tenant(self): @@ -3711,7 +3698,7 @@ class TestGerritCircularDependencies(ZuulTestCase): dict(name="project5-job-t1", result="SUCCESS", changes="1,1"), dict(name="project6-job-t1", result="SUCCESS", changes="2,1"), dict(name="project5-job-t2", result="SUCCESS", changes="2,1 1,1"), - dict(name="project6-job-t2", result="SUCCESS", changes="1,1 2,1"), + dict(name="project6-job-t2", result="SUCCESS", changes="2,1 1,1"), # Gate dict(name="project5-job-t2", result="SUCCESS", changes="1,1 2,1"), dict(name="project6-job-t2", result="SUCCESS", changes="1,1 2,1"), @@ -3728,7 +3715,7 @@ class TestGerritCircularDependencies(ZuulTestCase): # The first change: A = self.fake_gerrit.addFakeChange("org/project", "master", "A") self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1)) - self.waitUntilSettled() + self.waitUntilSettled("Stage 1") # Now that it has been uploaded, upload the second change and # point it at the first. @@ -3738,7 +3725,7 @@ class TestGerritCircularDependencies(ZuulTestCase): B.subject, A.data["url"] ) self.fake_gerrit.addEvent(B.getPatchsetCreatedEvent(1)) - self.waitUntilSettled() + self.waitUntilSettled("Stage 2") # Now that the second change is known, update the first change # B <-> A @@ -3747,16 +3734,16 @@ class TestGerritCircularDependencies(ZuulTestCase): A.subject, B.data["url"] ) self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(2)) - self.waitUntilSettled() + self.waitUntilSettled("Stage 3") self.executor_server.hold_jobs_in_build = False self.executor_server.release() - self.waitUntilSettled() + self.waitUntilSettled("Stage 4") self.assertHistory([ dict(name="project-job", result="ABORTED", changes="1,1"), dict(name="project-job", result="ABORTED", changes="1,1 2,1"), - dict(name="project-job", result="SUCCESS", changes="1,2 2,1"), + dict(name="project-job", result="SUCCESS", changes="2,1 1,2"), dict(name="project-job", result="SUCCESS", changes="2,1 1,2"), ], ordered=False) @@ -3788,8 +3775,10 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertHistory([ dict(name="check-job", result="ABORTED", changes="1,1"), - dict(name="check-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="check-job", result="SUCCESS", changes="1,1 2,1"), + dict(name="check-job", result="SUCCESS", changes="1,1 2,1", + ref="refs/changes/01/1/1"), + dict(name="check-job", result="SUCCESS", changes="1,1 2,1", + ref="refs/changes/02/2/1"), ], ordered=False) @simple_layout('layouts/deps-by-topic.yaml') @@ -3827,9 +3816,16 @@ class TestGerritCircularDependencies(ZuulTestCase): self.assertEqual(B.data["status"], "NEW") self.assertEqual(C.data["status"], "NEW") self.assertHistory([ - dict(name="gate-job", result="ABORTED", changes="1,1 2,1"), - dict(name="gate-job", result="ABORTED", changes="1,1 2,1"), - dict(name="check-job", result="SUCCESS", changes="2,1 1,1 3,1"), + dict(name="gate-job", result="ABORTED", changes="1,1 2,1", + ref="refs/changes/01/1/1"), + dict(name="gate-job", result="ABORTED", changes="1,1 2,1", + ref="refs/changes/02/2/1"), + dict(name="check-job", result="SUCCESS", changes="2,1 1,1 3,1", + ref="refs/changes/02/2/1"), + dict(name="check-job", result="SUCCESS", changes="2,1 1,1 3,1", + ref="refs/changes/01/1/1"), + # check-job for change 3 is deduplicated into change 1 + # because they are the same repo. ], ordered=False) def test_dependency_refresh_config_error(self): @@ -3891,6 +3887,7 @@ class TestGithubCircularDependencies(ZuulTestCase): scheduler_count = 1 def test_cycle_not_ready(self): + # C is missing the approved label A = self.fake_github.openFakePullRequest("gh/project", "master", "A") B = self.fake_github.openFakePullRequest("gh/project1", "master", "B") C = self.fake_github.openFakePullRequest("gh/project1", "master", "C") @@ -4059,7 +4056,7 @@ class TestGithubCircularDependencies(ZuulTestCase): self.assertFalse(A.is_merged) self.assertFalse(B.is_merged) - self.assertIn("part of a bundle that can not merge", + self.assertIn("failed to merge", A.comments[-1]) self.assertTrue( re.search("Change https://github.com/gh/project/pull/1 " @@ -4068,7 +4065,7 @@ class TestGithubCircularDependencies(ZuulTestCase): self.assertFalse(re.search('Change .*? is needed', A.comments[-1])) - self.assertIn("part of a bundle that can not merge", + self.assertIn("failed to merge", B.comments[-1]) self.assertTrue( re.search("Change https://github.com/gh/project/pull/1 " @@ -4088,7 +4085,7 @@ class TestGithubCircularDependencies(ZuulTestCase): # The first change: A = self.fake_github.openFakePullRequest("gh/project", "master", "A") self.fake_github.emitEvent(A.getPullRequestOpenedEvent()) - self.waitUntilSettled() + self.waitUntilSettled("Stage 1") # Now that it has been uploaded, upload the second change and # point it at the first. @@ -4098,7 +4095,7 @@ class TestGithubCircularDependencies(ZuulTestCase): B.subject, A.url ) self.fake_github.emitEvent(B.getPullRequestOpenedEvent()) - self.waitUntilSettled() + self.waitUntilSettled("Stage 2") # Now that the second change is known, update the first change # B <-> A @@ -4107,11 +4104,11 @@ class TestGithubCircularDependencies(ZuulTestCase): ) self.fake_github.emitEvent(A.getPullRequestEditedEvent(A.subject)) - self.waitUntilSettled() + self.waitUntilSettled("Stage 3") self.executor_server.hold_jobs_in_build = False self.executor_server.release() - self.waitUntilSettled() + self.waitUntilSettled("Stage 4") self.assertHistory([ dict(name="project-job", result="ABORTED", @@ -4120,7 +4117,13 @@ class TestGithubCircularDependencies(ZuulTestCase): dict(name="project-job", result="ABORTED", changes=f"{A.number},{A.head_sha} {B.number},{B.head_sha}"), dict(name="project-job", result="SUCCESS", - changes=f"{B.number},{B.head_sha} {A.number},{A.head_sha}"), + changes=f"{B.number},{B.head_sha} {A.number},{A.head_sha}", + ref="refs/pull/1/head", + ), + dict(name="project-job", result="SUCCESS", + changes=f"{B.number},{B.head_sha} {A.number},{A.head_sha}", + ref="refs/pull/2/head", + ), ], ordered=False) @@ -4184,7 +4187,11 @@ class TestGithubAppCircularDependencies(ZuulGithubAppTestCase): dict(name="project-job", result="ABORTED", changes=f"{A.number},{A.head_sha} {B.number},{B.head_sha}"), dict(name="project-job", result="SUCCESS", - changes=f"{B.number},{B.head_sha} {A.number},{A.head_sha}"), + changes=f"{B.number},{B.head_sha} {A.number},{A.head_sha}", + ref="refs/pull/1/head"), + dict(name="project-job", result="SUCCESS", + changes=f"{B.number},{B.head_sha} {A.number},{A.head_sha}", + ref="refs/pull/2/head"), ], ordered=False) # TODO: We shouldn't need this in the future, but for now, # verify that since we are issuing two check runs, they both @@ -4195,6 +4202,15 @@ class TestGithubAppCircularDependencies(ZuulGithubAppTestCase): self.assertEqual(check_run["status"], "completed") check_run = check_runs[1] self.assertEqual(check_run["status"], "completed") + self.assertNotEqual(check_runs[0]["id"], check_runs[1]["id"]) + + check_runs = self.fake_github.getCommitChecks("gh/project", B.head_sha) + self.assertEqual(len(check_runs), 2) + check_run = check_runs[0] + self.assertEqual(check_run["status"], "completed") + check_run = check_runs[1] + self.assertEqual(check_run["status"], "completed") + self.assertNotEqual(check_runs[0]["id"], check_runs[1]["id"]) @skip("Disabled due to safety check") @simple_layout('layouts/dependency_removal_gate.yaml', driver='github') @@ -4415,13 +4431,12 @@ class TestGithubAppCircularDependencies(ZuulGithubAppTestCase): self.fake_github.emitEvent(C.addLabel("approved")) self.waitUntilSettled() expected_cycle = {A.number, B.number, C.number, E.number} - self.assertEqual(len(list(pipeline.getAllItems())), 4) + self.assertEqual(len(list(pipeline.getAllItems())), 1) for item in pipeline.getAllItems(): - cycle = {i.change.number for i in item.bundle.items} + cycle = {c.number for c in item.changes} self.assertEqual(expected_cycle, cycle) - # Now we remove the dependency on E. That ends up removing - # all the changes. + # Now we remove the dependency on E. This re-enqueues ABC and E. # A->C, B->A, C->B A.body = "{}\n\nDepends-On: {}\n".format( @@ -4429,7 +4444,7 @@ class TestGithubAppCircularDependencies(ZuulGithubAppTestCase): ) self.fake_github.emitEvent(A.getPullRequestEditedEvent(A.body)) self.waitUntilSettled() - self.assertEqual(len(list(pipeline.getAllItems())), 0) + self.assertEqual(len(list(pipeline.getAllItems())), 2) # Now remove all dependencies for the three remaining changes, # and also E so that it doesn't get pulled back in as an @@ -4447,7 +4462,7 @@ class TestGithubAppCircularDependencies(ZuulGithubAppTestCase): self.waitUntilSettled() self.assertEqual(len(list(pipeline.getAllItems())), 3) for item in pipeline.getAllItems(): - self.assertIsNone(item.bundle) + self.assertEqual(len(item.changes), 1) # Remove the first change from the queue by forcing a # dependency on D. @@ -4458,10 +4473,10 @@ class TestGithubAppCircularDependencies(ZuulGithubAppTestCase): self.waitUntilSettled() self.assertEqual(len(list(pipeline.getAllItems())), 2) for item in pipeline.getAllItems(): - self.assertIsNone(item.bundle) + self.assertEqual(len(item.changes), 1) - # B and C are still in the queue. Attempt to put them - # back into a bundle. (We can't; they will be removed) + # B and C are still in the queue. Put them + # back into a bundle. C.body = "{}\n\nDepends-On: {}\n".format( C.subject, B.url ) @@ -4472,15 +4487,22 @@ class TestGithubAppCircularDependencies(ZuulGithubAppTestCase): ) self.fake_github.emitEvent(B.getPullRequestEditedEvent(B.body)) self.waitUntilSettled() - self.assertEqual(len(list(pipeline.getAllItems())), 0) + expected_cycle = {B.number, C.number} + self.assertEqual(len(list(pipeline.getAllItems())), 1) + for item in pipeline.getAllItems(): + cycle = {c.number for c in item.changes} + self.assertEqual(expected_cycle, cycle) # All done. self.executor_server.hold_jobs_in_build = False self.executor_server.release() self.waitUntilSettled() - for build in self.history: + for build in self.history[:-3]: self.assertEqual(build.result, 'ABORTED') + # Changes B, C in the end + for build in self.history[-3:]: + self.assertEqual(build.result, 'SUCCESS') @skip("Disabled due to safety check") @simple_layout('layouts/dependency_removal_gate.yaml', driver='github') @@ -4566,7 +4588,7 @@ class TestGithubAppCircularDependencies(ZuulGithubAppTestCase): self.assertEqual(len(list(pipeline.getAllItems())), 1) for item in pipeline.getAllItems(): - self.assertIsNone(item.bundle) + self.assertEqual(len(item.changes), 1) B.body = "{}\n\nDepends-On: {}\n".format( B.subject, A.url @@ -4579,29 +4601,30 @@ class TestGithubAppCircularDependencies(ZuulGithubAppTestCase): ) self.fake_github.emitEvent(A.getPullRequestEditedEvent(A.body)) self.waitUntilSettled() - self.assertEqual(len(list(pipeline.getAllItems())), 0) + self.assertEqual(len(list(pipeline.getAllItems())), 1) + expected_cycle = {A.number, B.number} + for item in pipeline.getAllItems(): + cycle = {c.number for c in item.changes} + self.assertEqual(expected_cycle, cycle) # All done. self.executor_server.hold_jobs_in_build = False self.executor_server.release() self.waitUntilSettled() - for build in self.history: + for build in self.history[:-3]: self.assertEqual(build.result, 'ABORTED') + # Changes A, B in the end + for build in self.history[-3:]: + self.assertEqual(build.result, 'SUCCESS') - def assertQueueBundles(self, pipeline, queue_index, bundles): + def assertQueueCycles(self, pipeline, queue_index, bundles): queue = pipeline.queues[queue_index] self.assertEqual(len(queue.queue), len(bundles)) for x, item in enumerate(queue.queue): - if item.bundle is None: - cycle = None - else: - cycle = {i.change.number for i in item.bundle.items} - if bundles[x] is None: - expected_cycle = None - else: - expected_cycle = {c.number for c in bundles[x]} + cycle = {c.number for c in item.changes} + expected_cycle = {c.number for c in bundles[x]} self.assertEqual(expected_cycle, cycle) @skip("Disabled due to safety check") @@ -4646,7 +4669,7 @@ class TestGithubAppCircularDependencies(ZuulGithubAppTestCase): self.waitUntilSettled() abce = [A, B, C, E] self.assertEqual(len(pipeline.queues), 1) - self.assertQueueBundles(pipeline, 0, [abce, abce, abce, abce]) + self.assertQueueCycles(pipeline, 0, [abce, abce, abce, abce]) # Now we remove the dependency on E. @@ -4659,9 +4682,9 @@ class TestGithubAppCircularDependencies(ZuulGithubAppTestCase): abc = [A, B, C] # A,B,C - self.assertQueueBundles(pipeline, 0, [abc, abc, abc]) + self.assertQueueCycles(pipeline, 0, [abc, abc, abc]) # B,C,A - self.assertQueueBundles(pipeline, 1, [abc, abc, abc]) + self.assertQueueCycles(pipeline, 1, [abc, abc, abc]) # Now remove all dependencies for the three remaining changes. A.body = A.subject @@ -4677,9 +4700,9 @@ class TestGithubAppCircularDependencies(ZuulGithubAppTestCase): # A, B, C individually (not in that order) self.assertEqual(len(pipeline.queues), 3) - self.assertQueueBundles(pipeline, 0, [None]) - self.assertQueueBundles(pipeline, 1, [None]) - self.assertQueueBundles(pipeline, 2, [None]) + self.assertQueueCycles(pipeline, 0, [None]) + self.assertQueueCycles(pipeline, 1, [None]) + self.assertQueueCycles(pipeline, 2, [None]) # Remove the first change from the queue by forcing a # dependency on D. @@ -4689,8 +4712,8 @@ class TestGithubAppCircularDependencies(ZuulGithubAppTestCase): self.fake_github.emitEvent(A.getPullRequestEditedEvent(A.body)) self.waitUntilSettled() self.assertEqual(len(pipeline.queues), 2) - self.assertQueueBundles(pipeline, 0, [None]) - self.assertQueueBundles(pipeline, 1, [None]) + self.assertQueueCycles(pipeline, 0, [None]) + self.assertQueueCycles(pipeline, 1, [None]) # Verify that we can put B and C into a bundle. C.body = "{}\n\nDepends-On: {}\n".format( @@ -4705,8 +4728,8 @@ class TestGithubAppCircularDependencies(ZuulGithubAppTestCase): self.waitUntilSettled() self.assertEqual(len(pipeline.queues), 2) bc = [B, C] - self.assertQueueBundles(pipeline, 0, [bc, bc]) - self.assertQueueBundles(pipeline, 1, [bc, bc]) + self.assertQueueCycles(pipeline, 0, [bc, bc]) + self.assertQueueCycles(pipeline, 1, [bc, bc]) # All done. self.executor_server.hold_jobs_in_build = False @@ -4803,7 +4826,7 @@ class TestGithubAppCircularDependencies(ZuulGithubAppTestCase): self.waitUntilSettled() abce = [A, B, C, E] self.assertEqual(len(pipeline.queues), 1) - self.assertQueueBundles(pipeline, 0, [abce, abce, abce, abce]) + self.assertQueueCycles(pipeline, 0, [abce]) # Now we remove the dependency on E. @@ -4815,8 +4838,8 @@ class TestGithubAppCircularDependencies(ZuulGithubAppTestCase): self.waitUntilSettled() abc = [A, B, C] - # B,C,A - self.assertQueueBundles(pipeline, 0, [abc, abc, abc]) + # ABC, E + self.assertQueueCycles(pipeline, 0, [abc, [E]]) # Now remove all dependencies for the three remaining changes. A.body = A.subject @@ -4830,10 +4853,13 @@ class TestGithubAppCircularDependencies(ZuulGithubAppTestCase): self.fake_github.emitEvent(C.getPullRequestEditedEvent(C.body)) self.waitUntilSettled() - # B, C individually (not in that order) - self.assertEqual(len(pipeline.queues), 2) - self.assertQueueBundles(pipeline, 0, [None]) - self.assertQueueBundles(pipeline, 1, [None]) + # A, B, C individually + # ABC E, A, B, C + self.assertEqual(len(pipeline.queues), 4) + self.assertQueueCycles(pipeline, 0, [abc, [E]]) + self.assertQueueCycles(pipeline, 1, [[A]]) + self.assertQueueCycles(pipeline, 2, [[B]]) + self.assertQueueCycles(pipeline, 3, [[C]]) # Verify that we can put B and C into a bundle. C.body = "{}\n\nDepends-On: {}\n".format( @@ -4846,19 +4872,21 @@ class TestGithubAppCircularDependencies(ZuulGithubAppTestCase): ) self.fake_github.emitEvent(B.getPullRequestEditedEvent(B.body)) self.waitUntilSettled() - self.assertEqual(len(pipeline.queues), 1) + self.assertEqual(len(pipeline.queues), 3) bc = [B, C] - self.assertQueueBundles(pipeline, 0, [bc, bc]) + self.assertQueueCycles(pipeline, 0, [abc, [E]]) + self.assertQueueCycles(pipeline, 1, [[A]]) + self.assertQueueCycles(pipeline, 2, [bc]) # All done. self.executor_server.hold_jobs_in_build = False self.executor_server.release() self.waitUntilSettled() - for build in self.history[:-2]: + for build in self.history[:-7]: self.assertEqual(build.result, 'ABORTED') - # A single change in the end - for build in self.history[-2:]: + # Changes A, B, C in the end + for build in self.history[-7:]: self.assertEqual(build.result, 'SUCCESS') @skip("Disabled due to safety check") @@ -4896,8 +4924,8 @@ class TestGithubAppCircularDependencies(ZuulGithubAppTestCase): self.assertEqual(len(list(pipeline.getAllItems())), 4) self.assertEqual(len(pipeline.queues), 2) ab = [A, B] - self.assertQueueBundles(pipeline, 0, [ab, ab]) - self.assertQueueBundles(pipeline, 1, [ab, ab]) + self.assertQueueCycles(pipeline, 0, [ab, ab]) + self.assertQueueCycles(pipeline, 1, [ab, ab]) expected_cycle = {A.number, B.number} for item in pipeline.getAllItems(): @@ -4944,10 +4972,9 @@ class TestGithubAppCircularDependencies(ZuulGithubAppTestCase): self.fake_github.emitEvent(A.getPullRequestOpenedEvent()) self.waitUntilSettled() - expected_cycle = {A.number} self.assertEqual(len(list(pipeline.getAllItems())), 1) for item in pipeline.getAllItems(): - self.assertIsNone(item.bundle) + self.assertEqual(len(item.changes), 1) B.body = "{}\n\nDepends-On: {}\n".format( B.subject, A.url @@ -4960,22 +4987,17 @@ class TestGithubAppCircularDependencies(ZuulGithubAppTestCase): ) self.fake_github.emitEvent(A.getPullRequestEditedEvent(A.body)) self.waitUntilSettled() - self.assertEqual(len(list(pipeline.getAllItems())), 2) + self.assertEqual(len(list(pipeline.getAllItems())), 1) self.assertEqual(len(pipeline.queues), 1) ab = [A, B] - self.assertQueueBundles(pipeline, 0, [ab, ab]) - - expected_cycle = {A.number, B.number} - for item in pipeline.getAllItems(): - cycle = {i.change.number for i in item.bundle.items} - self.assertEqual(expected_cycle, cycle) + self.assertQueueCycles(pipeline, 0, [ab]) # All done. self.executor_server.hold_jobs_in_build = False self.executor_server.release() self.waitUntilSettled() - for build in self.history[:-2]: + for build in self.history[:-4]: self.assertEqual(build.result, 'ABORTED') # A single change in the end for build in self.history[-2:]: diff --git a/tests/unit/test_executor.py b/tests/unit/test_executor.py index 64e6119de2..7449821f36 100644 --- a/tests/unit/test_executor.py +++ b/tests/unit/test_executor.py @@ -1165,7 +1165,7 @@ class TestExecutorFailure(ZuulTestCase): self.waitUntilSettled() job = items[0].current_build_set.job_graph.getJob( - 'project-merge', items[0].change.cache_key) + 'project-merge', items[0].changes[0].cache_key) build_retries = items[0].current_build_set.getRetryBuildsForJob(job) self.assertEqual(len(build_retries), 1) self.assertIsNotNone(build_retries[0].error_detail) diff --git a/tests/unit/test_model.py b/tests/unit/test_model.py index 74a5d7e9a9..16ffacdc7a 100644 --- a/tests/unit/test_model.py +++ b/tests/unit/test_model.py @@ -232,7 +232,7 @@ class TestJob(BaseTestCase): change = model.Change(self.project) change.branch = 'master' change.cache_stat = Dummy(key=Dummy(reference=uuid.uuid4().hex)) - item = self.queue.enqueueChange(change, None) + item = self.queue.enqueueChanges([change], None) self.assertTrue(base.changeMatchesBranch(change)) self.assertTrue(python27.changeMatchesBranch(change)) @@ -249,7 +249,7 @@ class TestJob(BaseTestCase): change.branch = 'stable/diablo' change.cache_stat = Dummy(key=Dummy(reference=uuid.uuid4().hex)) - item = self.queue.enqueueChange(change, None) + item = self.queue.enqueueChanges([change], None) self.assertTrue(base.changeMatchesBranch(change)) self.assertTrue(python27.changeMatchesBranch(change)) @@ -300,7 +300,7 @@ class TestJob(BaseTestCase): change.branch = 'master' change.cache_stat = Dummy(key=Dummy(reference=uuid.uuid4().hex)) change.files = ['/COMMIT_MSG', 'ignored-file'] - item = self.queue.enqueueChange(change, None) + item = self.queue.enqueueChanges([change], None) self.assertTrue(base.changeMatchesFiles(change)) self.assertFalse(python27.changeMatchesFiles(change)) @@ -375,7 +375,7 @@ class TestJob(BaseTestCase): # Test master change.branch = 'master' change.cache_stat = Dummy(key=Dummy(reference=uuid.uuid4().hex)) - item = self.queue.enqueueChange(change, None) + item = self.queue.enqueueChanges([change], None) with testtools.ExpectedException( Exception, "Pre-review pipeline gate does not allow post-review job"): @@ -453,7 +453,7 @@ class TestJob(BaseTestCase): change = model.Change(self.project) change.branch = 'master' change.cache_stat = Dummy(key=Dummy(reference=uuid.uuid4().hex)) - item = self.queue.enqueueChange(change, None) + item = self.queue.enqueueChanges([change], None) self.assertTrue(base.changeMatchesBranch(change)) self.assertTrue(python27.changeMatchesBranch(change)) @@ -488,6 +488,7 @@ class FakeFrozenJob(model.Job): super().__init__(name) self.uuid = uuid.uuid4().hex self.ref = 'fake reference' + self.all_refs = [self.ref] class TestGraph(BaseTestCase): diff --git a/tests/unit/test_model_upgrade.py b/tests/unit/test_model_upgrade.py index ee44452f42..61ab93970f 100644 --- a/tests/unit/test_model_upgrade.py +++ b/tests/unit/test_model_upgrade.py @@ -465,53 +465,6 @@ class TestGithubModelUpgrade(ZuulTestCase): config_file = 'zuul-github-driver.conf' scheduler_count = 1 - @model_version(3) - @simple_layout('layouts/gate-github.yaml', driver='github') - def test_status_checks_removal(self): - # This tests the old behavior -- that changes are not dequeued - # once their required status checks are removed -- since the - # new behavior requires a flag in ZK. - # Contrast with test_status_checks_removal. - github = self.fake_github.getGithubClient() - repo = github.repo_from_project('org/project') - repo._set_branch_protection( - 'master', contexts=['something/check', 'tenant-one/gate']) - - A = self.fake_github.openFakePullRequest('org/project', 'master', 'A') - self.fake_github.emitEvent(A.getPullRequestOpenedEvent()) - self.waitUntilSettled() - - self.executor_server.hold_jobs_in_build = True - # Since the required status 'something/check' is not fulfilled, - # no job is expected - self.assertEqual(0, len(self.history)) - - # Set the required status 'something/check' - repo.create_status(A.head_sha, 'success', 'example.com', 'description', - 'something/check') - - self.fake_github.emitEvent(A.getPullRequestOpenedEvent()) - self.waitUntilSettled() - - # Remove it and verify the change is not dequeued (old behavior). - repo.create_status(A.head_sha, 'failed', 'example.com', 'description', - 'something/check') - self.fake_github.emitEvent(A.getCommitStatusEvent('something/check', - state='failed', - user='foo')) - self.waitUntilSettled() - - self.executor_server.hold_jobs_in_build = False - self.executor_server.release() - self.waitUntilSettled() - - # the change should have entered the gate - self.assertHistory([ - dict(name='project-test1', result='SUCCESS'), - dict(name='project-test2', result='SUCCESS'), - ], ordered=False) - self.assertTrue(A.is_merged) - @model_version(10) @simple_layout('layouts/github-merge-mode.yaml', driver='github') def test_merge_method_syntax_check(self): @@ -703,48 +656,6 @@ class TestDefaultBranchUpgrade(ZuulTestCase): self.assertEqual('foobar', md.default_branch) -class TestDeduplication(ZuulTestCase): - config_file = "zuul-gerrit-github.conf" - tenant_config_file = "config/circular-dependencies/main.yaml" - scheduler_count = 1 - - def _test_job_deduplication(self): - A = self.fake_gerrit.addFakeChange('org/project1', 'master', 'A') - B = self.fake_gerrit.addFakeChange('org/project2', 'master', 'B') - - # A <-> B - A.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format( - A.subject, B.data["url"] - ) - B.data["commitMessage"] = "{}\n\nDepends-On: {}\n".format( - B.subject, A.data["url"] - ) - - A.addApproval('Code-Review', 2) - B.addApproval('Code-Review', 2) - - self.fake_gerrit.addEvent(A.addApproval('Approved', 1)) - self.fake_gerrit.addEvent(B.addApproval('Approved', 1)) - - self.waitUntilSettled() - - self.assertEqual(A.data['status'], 'MERGED') - self.assertEqual(B.data['status'], 'MERGED') - - @simple_layout('layouts/job-dedup-auto-shared.yaml') - @model_version(7) - def test_job_deduplication_auto_shared(self): - self._test_job_deduplication() - self.assertHistory([ - dict(name="project1-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), - dict(name="project2-job", result="SUCCESS", changes="2,1 1,1"), - # This would be deduplicated - dict(name="common-job", result="SUCCESS", changes="2,1 1,1"), - ], ordered=False) - self.assertEqual(len(self.fake_nodepool.history), 4) - - class TestDataReturn(AnsibleZuulTestCase): tenant_config_file = 'config/data-return/main.yaml' diff --git a/tests/unit/test_scheduler.py b/tests/unit/test_scheduler.py index 37b0f7f4c1..a2fd23062d 100644 --- a/tests/unit/test_scheduler.py +++ b/tests/unit/test_scheduler.py @@ -1107,7 +1107,7 @@ class TestScheduler(ZuulTestCase): self.assertEqual(len(queue), 1) self.assertEqual(queue[0].zone, None) params = self.executor_server.executor_api.getParams(queue[0]) - self.assertEqual(queue[0].job_name, 'project-merge') + self.assertEqual(params['zuul']['job'], 'project-merge') self.assertEqual(params['items'][0]['number'], '%d' % A.number) self.executor_api.release('.*-merge') @@ -1121,12 +1121,14 @@ class TestScheduler(ZuulTestCase): self.assertEqual(len(self.builds), 0) self.assertEqual(len(queue), 6) - self.assertEqual(queue[0].job_name, 'project-test1') - self.assertEqual(queue[1].job_name, 'project-test2') - self.assertEqual(queue[2].job_name, 'project-test1') - self.assertEqual(queue[3].job_name, 'project-test2') - self.assertEqual(queue[4].job_name, 'project-test1') - self.assertEqual(queue[5].job_name, 'project-test2') + params = [self.executor_server.executor_api.getParams(x) + for x in queue] + self.assertEqual(params[0]['zuul']['job'], 'project-test1') + self.assertEqual(params[1]['zuul']['job'], 'project-test2') + self.assertEqual(params[2]['zuul']['job'], 'project-test1') + self.assertEqual(params[3]['zuul']['job'], 'project-test2') + self.assertEqual(params[4]['zuul']['job'], 'project-test1') + self.assertEqual(params[5]['zuul']['job'], 'project-test2') self.executor_api.release(queue[0]) self.waitUntilSettled() @@ -2935,16 +2937,16 @@ class TestScheduler(ZuulTestCase): items = check_pipeline.getAllItems() self.assertEqual(len(items), 3) - self.assertEqual(items[0].change.number, '1') - self.assertEqual(items[0].change.patchset, '1') + self.assertEqual(items[0].changes[0].number, '1') + self.assertEqual(items[0].changes[0].patchset, '1') self.assertFalse(items[0].live) - self.assertEqual(items[1].change.number, '2') - self.assertEqual(items[1].change.patchset, '1') + self.assertEqual(items[1].changes[0].number, '2') + self.assertEqual(items[1].changes[0].patchset, '1') self.assertTrue(items[1].live) - self.assertEqual(items[2].change.number, '1') - self.assertEqual(items[2].change.patchset, '1') + self.assertEqual(items[2].changes[0].number, '1') + self.assertEqual(items[2].changes[0].patchset, '1') self.assertTrue(items[2].live) # Add a new patchset to A @@ -2957,16 +2959,16 @@ class TestScheduler(ZuulTestCase): items = check_pipeline.getAllItems() self.assertEqual(len(items), 3) - self.assertEqual(items[0].change.number, '1') - self.assertEqual(items[0].change.patchset, '1') + self.assertEqual(items[0].changes[0].number, '1') + self.assertEqual(items[0].changes[0].patchset, '1') self.assertFalse(items[0].live) - self.assertEqual(items[1].change.number, '2') - self.assertEqual(items[1].change.patchset, '1') + self.assertEqual(items[1].changes[0].number, '2') + self.assertEqual(items[1].changes[0].patchset, '1') self.assertTrue(items[1].live) - self.assertEqual(items[2].change.number, '1') - self.assertEqual(items[2].change.patchset, '2') + self.assertEqual(items[2].changes[0].number, '1') + self.assertEqual(items[2].changes[0].patchset, '2') self.assertTrue(items[2].live) # Add a new patchset to B @@ -2979,16 +2981,16 @@ class TestScheduler(ZuulTestCase): items = check_pipeline.getAllItems() self.assertEqual(len(items), 3) - self.assertEqual(items[0].change.number, '1') - self.assertEqual(items[0].change.patchset, '2') + self.assertEqual(items[0].changes[0].number, '1') + self.assertEqual(items[0].changes[0].patchset, '2') self.assertTrue(items[0].live) - self.assertEqual(items[1].change.number, '1') - self.assertEqual(items[1].change.patchset, '1') + self.assertEqual(items[1].changes[0].number, '1') + self.assertEqual(items[1].changes[0].patchset, '1') self.assertFalse(items[1].live) - self.assertEqual(items[2].change.number, '2') - self.assertEqual(items[2].change.patchset, '2') + self.assertEqual(items[2].changes[0].number, '2') + self.assertEqual(items[2].changes[0].patchset, '2') self.assertTrue(items[2].live) self.builds[0].release() @@ -3055,13 +3057,13 @@ class TestScheduler(ZuulTestCase): items = check_pipeline.getAllItems() self.assertEqual(len(items), 3) - self.assertEqual(items[0].change.number, '1') + self.assertEqual(items[0].changes[0].number, '1') self.assertFalse(items[0].live) - self.assertEqual(items[1].change.number, '2') + self.assertEqual(items[1].changes[0].number, '2') self.assertTrue(items[1].live) - self.assertEqual(items[2].change.number, '1') + self.assertEqual(items[2].changes[0].number, '1') self.assertTrue(items[2].live) # Abandon A @@ -3073,10 +3075,10 @@ class TestScheduler(ZuulTestCase): items = check_pipeline.getAllItems() self.assertEqual(len(items), 2) - self.assertEqual(items[0].change.number, '1') + self.assertEqual(items[0].changes[0].number, '1') self.assertFalse(items[0].live) - self.assertEqual(items[1].change.number, '2') + self.assertEqual(items[1].changes[0].number, '2') self.assertTrue(items[1].live) self.executor_server.hold_jobs_in_build = False @@ -4589,8 +4591,9 @@ class TestScheduler(ZuulTestCase): first = pipeline_status['change_queues'][0]['heads'][0][0] second = pipeline_status['change_queues'][1]['heads'][0][0] - self.assertIn(first['ref'], ['refs/heads/master', 'refs/heads/stable']) - self.assertIn(second['ref'], + self.assertIn(first['changes'][0]['ref'], + ['refs/heads/master', 'refs/heads/stable']) + self.assertIn(second['changes'][0]['ref'], ['refs/heads/master', 'refs/heads/stable']) self.executor_server.hold_jobs_in_build = False @@ -5799,7 +5802,6 @@ For CI problems and help debugging, contact ci@example.org""" build_set = items[0].current_build_set job = list(filter(lambda j: j.name == 'project-test1', items[0].getJobs()))[0] - build_set.job_graph.getJobFromName(job) for x in range(3): # We should have x+1 retried builds for project-test1 @@ -8311,8 +8313,8 @@ class TestSemaphore(ZuulTestCase): 1) items = check_pipeline.getAllItems() - self.assertEqual(items[0].change.number, '1') - self.assertEqual(items[0].change.patchset, '2') + self.assertEqual(items[0].changes[0].number, '1') + self.assertEqual(items[0].changes[0].patchset, '2') self.assertTrue(items[0].live) self.executor_server.hold_jobs_in_build = False @@ -8389,7 +8391,8 @@ class TestSemaphore(ZuulTestCase): # Save some variables for later use while the job is running check_pipeline = tenant.layout.pipelines['check'] item = check_pipeline.getAllItems()[0] - job = item.getJob('semaphore-one-test1') + job = list(filter(lambda j: j.name == 'semaphore-one-test1', + item.getJobs()))[0] tenant.semaphore_handler.cleanupLeaks() diff --git a/tests/unit/test_sos.py b/tests/unit/test_sos.py index 8085ba8d4a..b217361481 100644 --- a/tests/unit/test_sos.py +++ b/tests/unit/test_sos.py @@ -717,7 +717,12 @@ class TestSOSCircularDependencies(ZuulTestCase): self.assertEqual(len(self.builds), 4) builds = self.builds[:] self.executor_server.failJob('job1', A) + # Since it's one queue item for the two changes, all 4 + # builds need to complete. builds[0].release() + builds[1].release() + builds[2].release() + builds[3].release() app.sched.wake_event.set() self.waitUntilSettled(matcher=[app]) self.assertEqual(A.reported, 2) diff --git a/tests/unit/test_timer_driver.py b/tests/unit/test_timer_driver.py index 0f03fbb8b3..4746f1d0b9 100644 --- a/tests/unit/test_timer_driver.py +++ b/tests/unit/test_timer_driver.py @@ -79,7 +79,7 @@ class TestTimerAlwaysDynamicBranches(ZuulTestCase): self.assertEqual(len(pipeline.queues), 2) for queue in pipeline.queues: item = queue.queue[0] - self.assertIn(item.change.branch, ['master', 'stable']) + self.assertIn(item.changes[0].branch, ['master', 'stable']) self.executor_server.hold_jobs_in_build = False diff --git a/tests/unit/test_tracing.py b/tests/unit/test_tracing.py index 26b048fe9d..8d5d080944 100644 --- a/tests/unit/test_tracing.py +++ b/tests/unit/test_tracing.py @@ -23,7 +23,11 @@ from opentelemetry import trace def attributes_to_dict(attrlist): ret = {} for attr in attrlist: - ret[attr.key] = attr.value.string_value + if attr.value.string_value: + ret[attr.key] = attr.value.string_value + else: + ret[attr.key] = [v.string_value + for v in attr.value.array_value.values] return ret @@ -247,8 +251,8 @@ class TestTracing(ZuulTestCase): jobexec.span_id) item_attrs = attributes_to_dict(item.attributes) - self.assertTrue(item_attrs['ref_number'] == "1") - self.assertTrue(item_attrs['ref_patchset'] == "1") + self.assertTrue(item_attrs['ref_number'] == ["1"]) + self.assertTrue(item_attrs['ref_patchset'] == ["1"]) self.assertTrue('zuul_event_id' in item_attrs) def getSpan(self, name): diff --git a/tests/unit/test_v3.py b/tests/unit/test_v3.py index 4e9d780499..fe97468478 100644 --- a/tests/unit/test_v3.py +++ b/tests/unit/test_v3.py @@ -1730,8 +1730,8 @@ class TestInRepoConfig(ZuulTestCase): self.waitUntilSettled() items = check_pipeline.getAllItems() - self.assertEqual(items[0].change.number, '1') - self.assertEqual(items[0].change.patchset, '1') + self.assertEqual(items[0].changes[0].number, '1') + self.assertEqual(items[0].changes[0].patchset, '1') self.assertTrue(items[0].live) in_repo_conf = textwrap.dedent( @@ -1760,8 +1760,8 @@ class TestInRepoConfig(ZuulTestCase): self.waitUntilSettled() items = check_pipeline.getAllItems() - self.assertEqual(items[0].change.number, '1') - self.assertEqual(items[0].change.patchset, '2') + self.assertEqual(items[0].changes[0].number, '1') + self.assertEqual(items[0].changes[0].patchset, '2') self.assertTrue(items[0].live) self.executor_server.hold_jobs_in_build = False @@ -3438,9 +3438,9 @@ class TestExtraConfigInDependent(ZuulTestCase): # Jobs in both changes should be success self.assertHistory([ dict(name='project2-private-extra-file', result='SUCCESS', - changes='3,1 1,1 2,1'), + changes='3,1 2,1 1,1'), dict(name='project2-private-extra-dir', result='SUCCESS', - changes='3,1 1,1 2,1'), + changes='3,1 2,1 1,1'), dict(name='project-test1', result='SUCCESS', changes='3,1 2,1 1,1'), dict(name='project3-private-extra-file', result='SUCCESS', @@ -3987,8 +3987,8 @@ class TestInRepoJoin(ZuulTestCase): self.waitUntilSettled() items = gate_pipeline.getAllItems() - self.assertEqual(items[0].change.number, '1') - self.assertEqual(items[0].change.patchset, '1') + self.assertEqual(items[0].changes[0].number, '1') + self.assertEqual(items[0].changes[0].patchset, '1') self.assertTrue(items[0].live) self.executor_server.hold_jobs_in_build = False diff --git a/tests/unit/test_web.py b/tests/unit/test_web.py index b5ce626667..c23e4ef324 100644 --- a/tests/unit/test_web.py +++ b/tests/unit/test_web.py @@ -173,13 +173,14 @@ class TestWeb(BaseTestWeb): # information is missing. self.assertIsNone(q['branch']) for head in q['heads']: - for change in head: + for item in head: self.assertIn( 'review.example.com/org/project', - change['project_canonical']) - self.assertTrue(change['active']) + item['changes'][0]['project_canonical']) + self.assertTrue(item['active']) + change = item['changes'][0] self.assertIn(change['id'], ('1,1', '2,1', '3,1')) - for job in change['jobs']: + for job in item['jobs']: status_jobs.append(job) self.assertEqual('project-merge', status_jobs[0]['name']) # TODO(mordred) pull uuids from self.builds @@ -334,12 +335,13 @@ class TestWeb(BaseTestWeb): data = self.get_url("api/tenant/tenant-one/status/change/1,1").json() self.assertEqual(1, len(data), data) - self.assertEqual("org/project", data[0]['project']) + self.assertEqual("org/project", data[0]['changes'][0]['project']) data = self.get_url("api/tenant/tenant-one/status/change/2,1").json() self.assertEqual(1, len(data), data) - self.assertEqual("org/project1", data[0]['project'], data) + self.assertEqual("org/project1", data[0]['changes'][0]['project'], + data) @simple_layout('layouts/nodeset-alternatives.yaml') def test_web_find_job_nodeset_alternatives(self): @@ -1966,7 +1968,10 @@ class TestBuildInfo(BaseTestWeb): buildsets = self.get_url("api/tenant/tenant-one/buildsets").json() self.assertEqual(2, len(buildsets)) - project_bs = [x for x in buildsets if x["project"] == "org/project"][0] + project_bs = [ + x for x in buildsets + if x["refs"][0]["project"] == "org/project" + ][0] buildset = self.get_url( "api/tenant/tenant-one/buildset/%s" % project_bs['uuid']).json() @@ -2070,7 +2075,10 @@ class TestArtifacts(BaseTestWeb, AnsibleZuulTestCase): self.waitUntilSettled() buildsets = self.get_url("api/tenant/tenant-one/buildsets").json() - project_bs = [x for x in buildsets if x["project"] == "org/project"][0] + project_bs = [ + x for x in buildsets + if x["refs"][0]["project"] == "org/project" + ][0] buildset = self.get_url( "api/tenant/tenant-one/buildset/%s" % project_bs['uuid']).json() self.assertEqual(3, len(buildset["builds"])) @@ -2672,7 +2680,7 @@ class TestTenantScopedWebApi(BaseTestWeb): items = tenant.layout.pipelines['gate'].getAllItems() enqueue_times = {} for item in items: - enqueue_times[str(item.change)] = item.enqueue_time + enqueue_times[str(item.changes[0])] = item.enqueue_time # REST API args = {'pipeline': 'gate', @@ -2699,7 +2707,7 @@ class TestTenantScopedWebApi(BaseTestWeb): items = tenant.layout.pipelines['gate'].getAllItems() for item in items: self.assertEqual( - enqueue_times[str(item.change)], item.enqueue_time) + enqueue_times[str(item.changes[0])], item.enqueue_time) self.waitUntilSettled() self.executor_server.release('.*-merge') @@ -2761,7 +2769,7 @@ class TestTenantScopedWebApi(BaseTestWeb): items = tenant.layout.pipelines['gate'].getAllItems() enqueue_times = {} for item in items: - enqueue_times[str(item.change)] = item.enqueue_time + enqueue_times[str(item.changes[0])] = item.enqueue_time # REST API args = {'pipeline': 'gate', @@ -2788,7 +2796,7 @@ class TestTenantScopedWebApi(BaseTestWeb): items = tenant.layout.pipelines['gate'].getAllItems() for item in items: self.assertEqual( - enqueue_times[str(item.change)], item.enqueue_time) + enqueue_times[str(item.changes[0])], item.enqueue_time) self.waitUntilSettled() self.executor_server.release('.*-merge') @@ -2853,7 +2861,7 @@ class TestTenantScopedWebApi(BaseTestWeb): if i.live] enqueue_times = {} for item in items: - enqueue_times[str(item.change)] = item.enqueue_time + enqueue_times[str(item.changes[0])] = item.enqueue_time # REST API args = {'pipeline': 'check', @@ -2882,12 +2890,12 @@ class TestTenantScopedWebApi(BaseTestWeb): if i.live] for item in items: self.assertEqual( - enqueue_times[str(item.change)], item.enqueue_time) + enqueue_times[str(item.changes[0])], item.enqueue_time) # We can't reliably test for side effects in the check # pipeline since the change queues are independent, so we # directly examine the queues. - queue_items = [(item.change.number, item.live) for item in + queue_items = [(item.changes[0].number, item.live) for item in tenant.layout.pipelines['check'].getAllItems()] expected = [('1', False), ('2', True), @@ -3555,7 +3563,7 @@ class TestCLIViaWebApi(BaseTestWeb): items = tenant.layout.pipelines['gate'].getAllItems() enqueue_times = {} for item in items: - enqueue_times[str(item.change)] = item.enqueue_time + enqueue_times[str(item.changes[0])] = item.enqueue_time # Promote B and C using the cli authz = {'iss': 'zuul_operator', @@ -3581,7 +3589,7 @@ class TestCLIViaWebApi(BaseTestWeb): items = tenant.layout.pipelines['gate'].getAllItems() for item in items: self.assertEqual( - enqueue_times[str(item.change)], item.enqueue_time) + enqueue_times[str(item.changes[0])], item.enqueue_time) self.waitUntilSettled() self.executor_server.release('.*-merge') diff --git a/tests/zuul_client/test_zuulclient.py b/tests/zuul_client/test_zuulclient.py index 0487015b38..4e2ecf7a25 100644 --- a/tests/zuul_client/test_zuulclient.py +++ b/tests/zuul_client/test_zuulclient.py @@ -356,7 +356,7 @@ class TestZuulClientAdmin(BaseTestWeb): items = tenant.layout.pipelines['gate'].getAllItems() enqueue_times = {} for item in items: - enqueue_times[str(item.change)] = item.enqueue_time + enqueue_times[str(item.changes[0])] = item.enqueue_time # Promote B and C using the cli authz = {'iss': 'zuul_operator', @@ -382,7 +382,7 @@ class TestZuulClientAdmin(BaseTestWeb): items = tenant.layout.pipelines['gate'].getAllItems() for item in items: self.assertEqual( - enqueue_times[str(item.change)], item.enqueue_time) + enqueue_times[str(item.changes[0])], item.enqueue_time) self.waitUntilSettled() self.executor_server.release('.*-merge') diff --git a/zuul/driver/elasticsearch/reporter.py b/zuul/driver/elasticsearch/reporter.py index cab00d8392..c384b8598e 100644 --- a/zuul/driver/elasticsearch/reporter.py +++ b/zuul/driver/elasticsearch/reporter.py @@ -1,4 +1,5 @@ # Copyright 2019 Red Hat, Inc. +# Copyright 2024 Acme Gating, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -37,20 +38,34 @@ class ElasticsearchReporter(BaseReporter): docs = [] index = '%s.%s-%s' % (self.index, item.pipeline.tenant.name, time.strftime("%Y.%m.%d")) + changes = [ + { + "project": change.project.name, + "change": getattr(change, 'number', None), + "patchset": getattr(change, 'patchset', None), + "ref": getattr(change, 'ref', ''), + "oldrev": getattr(change, 'oldrev', ''), + "newrev": getattr(change, 'newrev', ''), + "branch": getattr(change, 'branch', ''), + "ref_url": change.url, + } + for change in item.changes + ] buildset_doc = { "uuid": item.current_build_set.uuid, "build_type": "buildset", "tenant": item.pipeline.tenant.name, "pipeline": item.pipeline.name, - "project": item.change.project.name, - "change": getattr(item.change, 'number', None), - "patchset": getattr(item.change, 'patchset', None), - "ref": getattr(item.change, 'ref', ''), - "oldrev": getattr(item.change, 'oldrev', ''), - "newrev": getattr(item.change, 'newrev', ''), - "branch": getattr(item.change, 'branch', ''), + "changes": changes, + "project": item.changes[0].project.name, + "change": getattr(item.changes[0], 'number', None), + "patchset": getattr(item.changes[0], 'patchset', None), + "ref": getattr(item.changes[0], 'ref', ''), + "oldrev": getattr(item.changes[0], 'oldrev', ''), + "newrev": getattr(item.changes[0], 'newrev', ''), + "branch": getattr(item.changes[0], 'branch', ''), "zuul_ref": item.current_build_set.ref, - "ref_url": item.change.url, + "ref_url": item.changes[0].url, "result": item.current_build_set.result, "message": self._formatItemReport(item, with_jobs=False) } @@ -80,8 +95,21 @@ class ElasticsearchReporter(BaseReporter): buildset_doc['duration'] = ( buildset_doc['end_time'] - buildset_doc['start_time']) + change = item.getChangeForJob(build.job) + change_doc = { + "project": change.project.name, + "change": getattr(change, 'number', None), + "patchset": getattr(change, 'patchset', None), + "ref": getattr(change, 'ref', ''), + "oldrev": getattr(change, 'oldrev', ''), + "newrev": getattr(change, 'newrev', ''), + "branch": getattr(change, 'branch', ''), + "ref_url": change.url, + } + build_doc = { "uuid": build.uuid, + "change": change_doc, "build_type": "build", "buildset_uuid": buildset_doc['uuid'], "job_name": build.job.name, diff --git a/zuul/driver/gerrit/gerritconnection.py b/zuul/driver/gerrit/gerritconnection.py index a0919f5d35..8d0fd871e1 100644 --- a/zuul/driver/gerrit/gerritconnection.py +++ b/zuul/driver/gerrit/gerritconnection.py @@ -1,6 +1,6 @@ # Copyright 2011 OpenStack, LLC. # Copyright 2012 Hewlett-Packard Development Company, L.P. -# Copyright 2023 Acme Gating, LLC +# Copyright 2023-2024 Acme Gating, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -1165,24 +1165,23 @@ class GerritConnection(ZKChangeCacheMixin, ZKBranchCacheMixin, BaseConnection): } self.event_queue.put(event) - def review(self, item, message, submit, labels, checks_api, + def review(self, item, change, message, submit, labels, checks_api, file_comments, phase1, phase2, zuul_event_id=None): if self.session: meth = self.review_http else: meth = self.review_ssh - return meth(item, message, submit, labels, checks_api, + return meth(item, change, message, submit, labels, checks_api, file_comments, phase1, phase2, zuul_event_id=zuul_event_id) - def review_ssh(self, item, message, submit, labels, checks_api, + def review_ssh(self, item, change, message, submit, labels, checks_api, file_comments, phase1, phase2, zuul_event_id=None): log = get_annotated_logger(self.log, zuul_event_id) if checks_api: log.error("Zuul is configured to report to the checks API, " "but no HTTP password is present for the connection " "in the configuration file.") - change = item.change project = change.project.name cmd = 'gerrit review --project %s' % project if phase1: @@ -1208,8 +1207,7 @@ class GerritConnection(ZKChangeCacheMixin, ZKBranchCacheMixin, BaseConnection): out, err = self._ssh(cmd, zuul_event_id=zuul_event_id) return err - def report_checks(self, log, item, changeid, checkinfo): - change = item.change + def report_checks(self, log, item, change, changeid, checkinfo): checkinfo = checkinfo.copy() uuid = checkinfo.pop('uuid', None) scheme = checkinfo.pop('scheme', None) @@ -1254,10 +1252,9 @@ class GerritConnection(ZKChangeCacheMixin, ZKBranchCacheMixin, BaseConnection): "attempt %s: %s", x, e) time.sleep(x * self.submit_retry_backoff) - def review_http(self, item, message, submit, labels, + def review_http(self, item, change, message, submit, labels, checks_api, file_comments, phase1, phase2, zuul_event_id=None): - change = item.change changeid = "%s~%s~%s" % ( urllib.parse.quote(str(change.project), safe=''), urllib.parse.quote(str(change.branch), safe=''), @@ -1293,7 +1290,7 @@ class GerritConnection(ZKChangeCacheMixin, ZKBranchCacheMixin, BaseConnection): if self.version >= (2, 13, 0): data['tag'] = 'autogenerated:zuul:%s' % (item.pipeline.name) if checks_api: - self.report_checks(log, item, changeid, checks_api) + self.report_checks(log, item, change, changeid, checks_api) if (message or data.get('labels') or data.get('comments') or data.get('robot_comments')): for x in range(1, 4): @@ -1356,7 +1353,7 @@ class GerritConnection(ZKChangeCacheMixin, ZKBranchCacheMixin, BaseConnection): def queryChangeHTTP(self, number, event=None): query = ('changes/%s?o=DETAILED_ACCOUNTS&o=CURRENT_REVISION&' 'o=CURRENT_COMMIT&o=CURRENT_FILES&o=LABELS&' - 'o=DETAILED_LABELS' % (number,)) + 'o=DETAILED_LABELS&o=ALL_REVISIONS' % (number,)) if self.version >= (3, 5, 0): query += '&o=SUBMIT_REQUIREMENTS' data = self.get(query) diff --git a/zuul/driver/gerrit/gerritmodel.py b/zuul/driver/gerrit/gerritmodel.py index 42afe44861..54ab35efad 100644 --- a/zuul/driver/gerrit/gerritmodel.py +++ b/zuul/driver/gerrit/gerritmodel.py @@ -160,9 +160,12 @@ class GerritChange(Change): '%s/c/%s/+/%s' % (baseurl, self.project.name, self.number), ] + for rev_commit, revision in data['revisions'].items(): + if str(revision['_number']) == self.patchset: + self.ref = revision['ref'] + self.commit = rev_commit + if str(current_revision['_number']) == self.patchset: - self.ref = current_revision['ref'] - self.commit = data['current_revision'] self.is_current_patchset = True else: self.is_current_patchset = False diff --git a/zuul/driver/gerrit/gerritreporter.py b/zuul/driver/gerrit/gerritreporter.py index c38a9484ac..525c1f8823 100644 --- a/zuul/driver/gerrit/gerritreporter.py +++ b/zuul/driver/gerrit/gerritreporter.py @@ -1,4 +1,5 @@ # Copyright 2013 Rackspace Australia +# Copyright 2024 Acme Gating, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -43,44 +44,44 @@ class GerritReporter(BaseReporter): """Send a message to gerrit.""" log = get_annotated_logger(self.log, item.event) + ret = [] + for change in item.changes: + err = self._reportChange(item, change, log, phase1, phase2) + if err: + ret.append(err) + return ret + + def _reportChange(self, item, change, log, phase1=True, phase2=True): + """Send a message to gerrit.""" # If the source is no GerritSource we cannot report anything here. - if not isinstance(item.change.project.source, GerritSource): + if not isinstance(change.project.source, GerritSource): return # We can only report changes, not plain branches - if not isinstance(item.change, Change): + if not isinstance(change, Change): return # For supporting several Gerrit connections we also must filter by # the canonical hostname. - if item.change.project.source.connection.canonical_hostname != \ + if change.project.source.connection.canonical_hostname != \ self.connection.canonical_hostname: - log.debug("Not reporting %s as this Gerrit reporter " - "is for %s and the change is from %s", - item, self.connection.canonical_hostname, - item.change.project.source.connection.canonical_hostname) return - comments = self.getFileComments(item) + comments = self.getFileComments(item, change) if self._create_comment: message = self._formatItemReport(item) else: message = '' log.debug("Report change %s, params %s, message: %s, comments: %s", - item.change, self.config, message, comments) - if phase2 and self._submit and not hasattr(item.change, '_ref_sha'): + change, self.config, message, comments) + if phase2 and self._submit and not hasattr(change, '_ref_sha'): # If we're starting to submit a bundle, save the current # ref sha for every item in the bundle. - changes = set([item.change]) - if item.bundle: - for i in item.bundle.items: - changes.add(i.change) - # Store a dict of project,branch -> sha so that if we have # duplicate project/branches, we only query once. ref_shas = {} - for other_change in changes: + for other_change in item.changes: if not isinstance(other_change, GerritChange): continue key = (other_change.project, other_change.branch) @@ -92,9 +93,10 @@ class GerritReporter(BaseReporter): ref_shas[key] = ref_sha other_change._ref_sha = ref_sha - return self.connection.review(item, message, self._submit, - self._labels, self._checks_api, - comments, phase1, phase2, + return self.connection.review(item, change, message, + self._submit, self._labels, + self._checks_api, comments, + phase1, phase2, zuul_event_id=item.event) def getSubmitAllowNeeds(self): diff --git a/zuul/driver/git/gitconnection.py b/zuul/driver/git/gitconnection.py index 4e477558bf..fd81b0478f 100644 --- a/zuul/driver/git/gitconnection.py +++ b/zuul/driver/git/gitconnection.py @@ -78,7 +78,7 @@ class GitConnection(ZKChangeCacheMixin, BaseConnection): self.projects[project.name] = project def getChangeFilesUpdated(self, project_name, branch, tosha): - job = self.sched.merger.getFilesChanges( + job = self.sched.merger.getFilesChangesRaw( self.connection_name, project_name, branch, tosha, needs_result=True) self.log.debug("Waiting for fileschanges job %s" % job) @@ -86,8 +86,8 @@ class GitConnection(ZKChangeCacheMixin, BaseConnection): if not job.updated: raise Exception("Fileschanges job %s failed" % job) self.log.debug("Fileschanges job %s got changes on files %s" % - (job, job.files)) - return job.files + (job, job.files[0])) + return job.files[0] def lsRemote(self, project): refs = {} diff --git a/zuul/driver/github/githubreporter.py b/zuul/driver/github/githubreporter.py index c1af9096eb..c77c4aea01 100644 --- a/zuul/driver/github/githubreporter.py +++ b/zuul/driver/github/githubreporter.py @@ -1,4 +1,5 @@ # Copyright 2015 Puppet Labs +# Copyright 2024 Acme Gating, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -58,37 +59,48 @@ class GithubReporter(BaseReporter): self.context = "{}/{}".format(pipeline.tenant.name, pipeline.name) def report(self, item, phase1=True, phase2=True): + """Report on an event.""" + log = get_annotated_logger(self.log, item.event) + + ret = [] + for change in item.changes: + err = self._reportChange(item, change, log, phase1, phase2) + if err: + ret.append(err) + return ret + + def _reportChange(self, item, change, log, phase1=True, phase2=True): """Report on an event.""" # If the source is not GithubSource we cannot report anything here. - if not isinstance(item.change.project.source, GithubSource): + if not isinstance(change.project.source, GithubSource): return # For supporting several Github connections we also must filter by # the canonical hostname. - if item.change.project.source.connection.canonical_hostname != \ + if change.project.source.connection.canonical_hostname != \ self.connection.canonical_hostname: return # order is important for github branch protection. # A status should be set before a merge attempt if phase1 and self._commit_status is not None: - if (hasattr(item.change, 'patchset') and - item.change.patchset is not None): - self.setCommitStatus(item) - elif (hasattr(item.change, 'newrev') and - item.change.newrev is not None): - self.setCommitStatus(item) + if (hasattr(change, 'patchset') and + change.patchset is not None): + self.setCommitStatus(item, change) + elif (hasattr(change, 'newrev') and + change.newrev is not None): + self.setCommitStatus(item, change) # Comments, labels, and merges can only be performed on pull requests. # If the change is not a pull request (e.g. a push) skip them. - if hasattr(item.change, 'number'): + if hasattr(change, 'number'): errors_received = False if phase1: if self._labels or self._unlabels: - self.setLabels(item) + self.setLabels(item, change) if self._review: - self.addReview(item) + self.addReview(item, change) if self._check: - check_errors = self.updateCheck(item) + check_errors = self.updateCheck(item, change) # TODO (felix): We could use this mechanism to # also report back errors from label and review # actions @@ -98,12 +110,12 @@ class GithubReporter(BaseReporter): ) errors_received = True if self._create_comment or errors_received: - self.addPullComment(item) + self.addPullComment(item, change) if phase2 and self._merge: try: - self.mergePull(item) + self.mergePull(item, change) except Exception as e: - self.addPullComment(item, str(e)) + self.addPullComment(item, change, str(e)) def _formatJobResult(self, job_fields): # We select different emojis to represents build results: @@ -145,24 +157,24 @@ class GithubReporter(BaseReporter): ret += 'Skipped %i %s\n' % (skipped, jobtext) return ret - def addPullComment(self, item, comment=None): + def addPullComment(self, item, change, comment=None): log = get_annotated_logger(self.log, item.event) message = comment or self._formatItemReport(item) - project = item.change.project.name - pr_number = item.change.number + project = change.project.name + pr_number = change.number log.debug('Reporting change %s, params %s, message: %s', - item.change, self.config, message) + change, self.config, message) self.connection.commentPull(project, pr_number, message, zuul_event_id=item.event) - def setCommitStatus(self, item): + def setCommitStatus(self, item, change): log = get_annotated_logger(self.log, item.event) - project = item.change.project.name - if hasattr(item.change, 'patchset'): - sha = item.change.patchset - elif hasattr(item.change, 'newrev'): - sha = item.change.newrev + project = change.project.name + if hasattr(change, 'patchset'): + sha = change.patchset + elif hasattr(change, 'newrev'): + sha = change.newrev state = self._commit_status url = item.formatStatusUrl() @@ -180,27 +192,27 @@ class GithubReporter(BaseReporter): log.debug( 'Reporting change %s, params %s, ' 'context: %s, state: %s, description: %s, url: %s', - item.change, self.config, self.context, state, description, url) + change, self.config, self.context, state, description, url) self.connection.setCommitStatus( project, sha, state, url, description, self.context, zuul_event_id=item.event) - def mergePull(self, item): + def mergePull(self, item, change): log = get_annotated_logger(self.log, item.event) - merge_mode = item.current_build_set.getMergeMode() + merge_mode = item.current_build_set.getMergeMode(change) if merge_mode not in self.merge_modes: mode = model.get_merge_mode_name(merge_mode) self.log.warning('Merge mode %s not supported by Github', mode) raise MergeFailure('Merge mode %s not supported by Github' % mode) - project = item.change.project.name - pr_number = item.change.number - sha = item.change.patchset + project = change.project.name + pr_number = change.number + sha = change.patchset log.debug('Reporting change %s, params %s, merging via API', - item.change, self.config) - message = self._formatMergeMessage(item.change, merge_mode) + change, self.config) + message = self._formatMergeMessage(change, merge_mode) merge_mode = self.merge_modes[merge_mode] for i in [1, 2]: @@ -208,26 +220,26 @@ class GithubReporter(BaseReporter): self.connection.mergePull(project, pr_number, message, sha=sha, method=merge_mode, zuul_event_id=item.event) - self.connection.updateChangeAttributes(item.change, + self.connection.updateChangeAttributes(change, is_merged=True) return except MergeFailure as e: log.exception('Merge attempt of change %s %s/2 failed.', - item.change, i, exc_info=True) + change, i, exc_info=True) error_message = str(e) if i == 1: time.sleep(2) log.warning('Merge of change %s failed after 2 attempts, giving up', - item.change) + change) raise MergeFailure(error_message) - def addReview(self, item): + def addReview(self, item, change): log = get_annotated_logger(self.log, item.event) - project = item.change.project.name - pr_number = item.change.number - sha = item.change.patchset + project = change.project.name + pr_number = change.number + sha = change.patchset log.debug('Reporting change %s, params %s, review:\n%s', - item.change, self.config, self._review) + change, self.config, self._review) self.connection.reviewPull( project, pr_number, @@ -239,12 +251,12 @@ class GithubReporter(BaseReporter): self.connection.unlabelPull(project, pr_number, label, zuul_event_id=item.event) - def updateCheck(self, item): + def updateCheck(self, item, change): log = get_annotated_logger(self.log, item.event) message = self._formatItemReport(item) - project = item.change.project.name - pr_number = item.change.number - sha = item.change.patchset + project = change.project.name + pr_number = change.number + sha = change.patchset status = self._check # We declare a item as completed if it either has a result @@ -260,13 +272,13 @@ class GithubReporter(BaseReporter): log.debug( "Updating check for change %s, params %s, context %s, message: %s", - item.change, self.config, self.context, message + change, self.config, self.context, message ) details_url = item.formatStatusUrl() # Check for inline comments that can be reported via checks API - file_comments = self.getFileComments(item) + file_comments = self.getFileComments(item, change) # Github allows an external id to be added to a check run. We can use # this to identify the check run in any custom actions we define. @@ -279,11 +291,13 @@ class GithubReporter(BaseReporter): { "tenant": item.pipeline.tenant.name, "pipeline": item.pipeline.name, - "change": item.change.number, + "change": change.number, } ) state = item.dynamic_state[self.connection.connection_name] + check_run_ids = state.setdefault('check_run_ids', {}) + check_run_id = check_run_ids.get(change.cache_key) check_run_id, errors = self.connection.updateCheck( project, pr_number, @@ -296,27 +310,27 @@ class GithubReporter(BaseReporter): file_comments, external_id, zuul_event_id=item.event, - check_run_id=state.get('check_run_id') + check_run_id=check_run_id, ) if check_run_id: - state['check_run_id'] = check_run_id + check_run_ids[change.cache_key] = check_run_id return errors - def setLabels(self, item): + def setLabels(self, item, change): log = get_annotated_logger(self.log, item.event) - project = item.change.project.name - pr_number = item.change.number + project = change.project.name + pr_number = change.number if self._labels: log.debug('Reporting change %s, params %s, labels:\n%s', - item.change, self.config, self._labels) + change, self.config, self._labels) for label in self._labels: self.connection.labelPull(project, pr_number, label, zuul_event_id=item.event) if self._unlabels: log.debug('Reporting change %s, params %s, unlabels:\n%s', - item.change, self.config, self._unlabels) + change, self.config, self._unlabels) for label in self._unlabels: self.connection.unlabelPull(project, pr_number, label, zuul_event_id=item.event) diff --git a/zuul/driver/gitlab/gitlabreporter.py b/zuul/driver/gitlab/gitlabreporter.py index b909bc3d70..718a5a494f 100644 --- a/zuul/driver/gitlab/gitlabreporter.py +++ b/zuul/driver/gitlab/gitlabreporter.py @@ -1,4 +1,5 @@ # Copyright 2019 Red Hat, Inc. +# Copyright 2024 Acme Gating, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -51,62 +52,68 @@ class GitlabReporter(BaseReporter): def report(self, item, phase1=True, phase2=True): """Report on an event.""" - if not isinstance(item.change.project.source, GitlabSource): + for change in item.changes: + self._reportChange(item, change, phase1, phase2) + return [] + + def _reportChange(self, item, change, phase1=True, phase2=True): + """Report on an event.""" + if not isinstance(change.project.source, GitlabSource): return - if item.change.project.source.connection.canonical_hostname != \ + if change.project.source.connection.canonical_hostname != \ self.connection.canonical_hostname: return - if hasattr(item.change, 'number'): + if hasattr(change, 'number'): if phase1: if self._create_comment: - self.addMRComment(item) + self.addMRComment(item, change) if self._approval is not None: - self.setApproval(item) + self.setApproval(item, change) if self._labels or self._unlabels: - self.setLabels(item) + self.setLabels(item, change) if phase2 and self._merge: - self.mergeMR(item) - if not item.change.is_merged: + self.mergeMR(item, change) + if not change.is_merged: msg = self._formatItemReportMergeConflict(item) - self.addMRComment(item, msg) + self.addMRComment(item, change, msg) - def addMRComment(self, item, comment=None): + def addMRComment(self, item, change, comment=None): log = get_annotated_logger(self.log, item.event) message = comment or self._formatItemReport(item) - project = item.change.project.name - mr_number = item.change.number + project = change.project.name + mr_number = change.number log.debug('Reporting change %s, params %s, message: %s', - item.change, self.config, message) + change, self.config, message) self.connection.commentMR(project, mr_number, message, event=item.event) - def setApproval(self, item): + def setApproval(self, item, change): log = get_annotated_logger(self.log, item.event) - project = item.change.project.name - mr_number = item.change.number - patchset = item.change.patchset + project = change.project.name + mr_number = change.number + patchset = change.patchset log.debug('Reporting change %s, params %s, approval: %s', - item.change, self.config, self._approval) + change, self.config, self._approval) self.connection.approveMR(project, mr_number, patchset, self._approval, event=item.event) - def setLabels(self, item): + def setLabels(self, item, change): log = get_annotated_logger(self.log, item.event) - project = item.change.project.name - mr_number = item.change.number + project = change.project.name + mr_number = change.number log.debug('Reporting change %s, params %s, labels: %s, unlabels: %s', - item.change, self.config, self._labels, self._unlabels) + change, self.config, self._labels, self._unlabels) self.connection.updateMRLabels(project, mr_number, self._labels, self._unlabels, zuul_event_id=item.event) - def mergeMR(self, item): - project = item.change.project.name - mr_number = item.change.number + def mergeMR(self, item, change): + project = change.project.name + mr_number = change.number - merge_mode = item.current_build_set.getMergeMode() + merge_mode = item.current_build_set.getMergeMode(change) if merge_mode not in self.merge_modes: mode = model.get_merge_mode_name(merge_mode) @@ -118,17 +125,17 @@ class GitlabReporter(BaseReporter): for i in [1, 2]: try: self.connection.mergeMR(project, mr_number, merge_mode) - item.change.is_merged = True + change.is_merged = True return except MergeFailure: self.log.exception( 'Merge attempt of change %s %s/2 failed.' % - (item.change, i), exc_info=True) + (change, i), exc_info=True) if i == 1: time.sleep(2) self.log.warning( 'Merge of change %s failed after 2 attempts, giving up' % - item.change) + change) def getSubmitAllowNeeds(self): return [] diff --git a/zuul/driver/mqtt/mqttreporter.py b/zuul/driver/mqtt/mqttreporter.py index 6765f32a73..2a6e582bae 100644 --- a/zuul/driver/mqtt/mqttreporter.py +++ b/zuul/driver/mqtt/mqttreporter.py @@ -1,4 +1,5 @@ # Copyright 2017 Red Hat, Inc. +# Copyright 2024 Acme Gating, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -32,21 +33,35 @@ class MQTTReporter(BaseReporter): return include_returned_data = self.config.get('include-returned-data') log = get_annotated_logger(self.log, item.event) - log.debug("Report change %s, params %s", item.change, self.config) + log.debug("Report %s, params %s", item, self.config) + changes = [ + { + 'project': change.project.name, + 'branch': getattr(change, 'branch', ''), + 'change_url': change.url, + 'change': getattr(change, 'number', ''), + 'patchset': getattr(change, 'patchset', ''), + 'commit_id': getattr(change, 'commit_id', ''), + 'owner': getattr(change, 'owner', ''), + 'ref': getattr(change, 'ref', ''), + } + for change in item.changes + ] message = { 'timestamp': time.time(), 'action': self._action, 'tenant': item.pipeline.tenant.name, 'zuul_ref': item.current_build_set.ref, 'pipeline': item.pipeline.name, - 'project': item.change.project.name, - 'branch': getattr(item.change, 'branch', ''), - 'change_url': item.change.url, - 'change': getattr(item.change, 'number', ''), - 'patchset': getattr(item.change, 'patchset', ''), - 'commit_id': getattr(item.change, 'commit_id', ''), - 'owner': getattr(item.change, 'owner', ''), - 'ref': getattr(item.change, 'ref', ''), + 'changes': changes, + 'project': item.changes[0].project.name, + 'branch': getattr(item.changes[0], 'branch', ''), + 'change_url': item.changes[0].url, + 'change': getattr(item.changes[0], 'number', ''), + 'patchset': getattr(item.changes[0], 'patchset', ''), + 'commit_id': getattr(item.changes[0], 'commit_id', ''), + 'owner': getattr(item.changes[0], 'owner', ''), + 'ref': getattr(item.changes[0], 'ref', ''), 'message': self._formatItemReport( item, with_jobs=False), 'trigger_time': item.event.timestamp, @@ -63,13 +78,26 @@ class MQTTReporter(BaseReporter): for job in item.getJobs(): job_informations = { 'job_name': job.name, + 'job_uuid': job.uuid, 'voting': job.voting, } build = item.current_build_set.getBuild(job) if build: # Report build data if available (result, web_url) = item.formatJobResult(job) + change = item.getChangeForJob(job) + change_info = { + 'project': change.project.name, + 'branch': getattr(change, 'branch', ''), + 'change_url': change.url, + 'change': getattr(change, 'number', ''), + 'patchset': getattr(change, 'patchset', ''), + 'commit_id': getattr(change, 'commit_id', ''), + 'owner': getattr(change, 'owner', ''), + 'ref': getattr(change, 'ref', ''), + } job_informations.update({ + 'change': change_info, 'uuid': build.uuid, 'start_time': build.start_time, 'end_time': build.end_time, @@ -90,16 +118,17 @@ class MQTTReporter(BaseReporter): # Report build data of retried builds if available retry_builds = item.current_build_set.getRetryBuildsForJob( job) - for build in retry_builds: + for retry_build in retry_builds: (result, web_url) = item.formatJobResult(job, build) retry_build_information = { 'job_name': job.name, + 'job_uuid': job.uuid, 'voting': job.voting, - 'uuid': build.uuid, - 'start_time': build.start_time, - 'end_time': build.end_time, - 'execute_time': build.execute_time, - 'log_url': build.log_url, + 'uuid': retry_build.uuid, + 'start_time': retry_build.start_time, + 'end_time': retry_build.end_time, + 'execute_time': retry_build.execute_time, + 'log_url': retry_build.log_url, 'web_url': web_url, 'result': result, } @@ -112,11 +141,12 @@ class MQTTReporter(BaseReporter): topic = self.config['topic'].format( tenant=item.pipeline.tenant.name, pipeline=item.pipeline.name, - project=item.change.project.name, - branch=getattr(item.change, 'branch', None), - change=getattr(item.change, 'number', None), - patchset=getattr(item.change, 'patchset', None), - ref=getattr(item.change, 'ref', None)) + changes=changes, + project=item.changes[0].project.name, + branch=getattr(item.changes[0], 'branch', None), + change=getattr(item.changes[0], 'number', None), + patchset=getattr(item.changes[0], 'patchset', None), + ref=getattr(item.changes[0], 'ref', None)) except Exception: log.exception("Error while formatting MQTT topic %s:", self.config['topic']) diff --git a/zuul/driver/pagure/pagurereporter.py b/zuul/driver/pagure/pagurereporter.py index b380357529..c420b4aae1 100644 --- a/zuul/driver/pagure/pagurereporter.py +++ b/zuul/driver/pagure/pagurereporter.py @@ -1,4 +1,5 @@ # Copyright 2018 Red Hat, Inc. +# Copyright 2024 Acme Gating, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -36,33 +37,39 @@ class PagureReporter(BaseReporter): def report(self, item, phase1=True, phase2=True): """Report on an event.""" + for change in item.changes: + self._reportChange(item, change, phase1, phase2) + return [] + + def _reportChange(self, item, change, phase1=True, phase2=True): + """Report on an event.""" # If the source is not PagureSource we cannot report anything here. - if not isinstance(item.change.project.source, PagureSource): + if not isinstance(change.project.source, PagureSource): return # For supporting several Pagure connections we also must filter by # the canonical hostname. - if item.change.project.source.connection.canonical_hostname != \ + if change.project.source.connection.canonical_hostname != \ self.connection.canonical_hostname: return if phase1: if self._commit_status is not None: - if (hasattr(item.change, 'patchset') and - item.change.patchset is not None): - self.setCommitStatus(item) - elif (hasattr(item.change, 'newrev') and - item.change.newrev is not None): - self.setCommitStatus(item) - if hasattr(item.change, 'number'): + if (hasattr(change, 'patchset') and + change.patchset is not None): + self.setCommitStatus(item, change) + elif (hasattr(change, 'newrev') and + change.newrev is not None): + self.setCommitStatus(item, change) + if hasattr(change, 'number'): if self._create_comment: - self.addPullComment(item) + self.addPullComment(item, change) if phase2 and self._merge: - self.mergePull(item) - if not item.change.is_merged: + self.mergePull(item, change) + if not change.is_merged: msg = self._formatItemReportMergeConflict(item) - self.addPullComment(item, msg) + self.addPullComment(item, change, msg) def _formatItemReportJobs(self, item): # Return the list of jobs portion of the report @@ -75,23 +82,23 @@ class PagureReporter(BaseReporter): ret += 'Skipped %i %s\n' % (skipped, jobtext) return ret - def addPullComment(self, item, comment=None): + def addPullComment(self, item, change, comment=None): message = comment or self._formatItemReport(item) - project = item.change.project.name - pr_number = item.change.number + project = change.project.name + pr_number = change.number self.log.debug( 'Reporting change %s, params %s, message: %s' % - (item.change, self.config, message)) + (change, self.config, message)) self.connection.commentPull(project, pr_number, message) - def setCommitStatus(self, item): - project = item.change.project.name - if hasattr(item.change, 'patchset'): - sha = item.change.patchset - elif hasattr(item.change, 'newrev'): - sha = item.change.newrev + def setCommitStatus(self, item, change): + project = change.project.name + if hasattr(change, 'patchset'): + sha = change.patchset + elif hasattr(change, 'newrev'): + sha = change.newrev state = self._commit_status - change_number = item.change.number + change_number = change.number url_pattern = self.config.get('status-url') sched_config = self.connection.sched.config @@ -106,30 +113,30 @@ class PagureReporter(BaseReporter): self.log.debug( 'Reporting change %s, params %s, ' 'context: %s, state: %s, description: %s, url: %s' % - (item.change, self.config, + (change, self.config, self.context, state, description, url)) self.connection.setCommitStatus( project, change_number, state, url, description, self.context) - def mergePull(self, item): - project = item.change.project.name - pr_number = item.change.number + def mergePull(self, item, change): + project = change.project.name + pr_number = change.number for i in [1, 2]: try: self.connection.mergePull(project, pr_number) - item.change.is_merged = True + change.is_merged = True return except MergeFailure: self.log.exception( 'Merge attempt of change %s %s/2 failed.' % - (item.change, i), exc_info=True) + (change, i), exc_info=True) if i == 1: time.sleep(2) self.log.warning( 'Merge of change %s failed after 2 attempts, giving up' % - item.change) + change) def getSubmitAllowNeeds(self): return [] diff --git a/zuul/driver/smtp/smtpreporter.py b/zuul/driver/smtp/smtpreporter.py index a5d8938c11..abc292be7b 100644 --- a/zuul/driver/smtp/smtpreporter.py +++ b/zuul/driver/smtp/smtpreporter.py @@ -1,4 +1,5 @@ # Copyright 2013 Rackspace Australia +# Copyright 2024 Acme Gating, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -32,8 +33,8 @@ class SMTPReporter(BaseReporter): log = get_annotated_logger(self.log, item.event) message = self._formatItemReport(item) - log.debug("Report change %s, params %s, message: %s", - item.change, self.config, message) + log.debug("Report %s, params %s, message: %s", + item, self.config, message) from_email = self.config['from'] \ if 'from' in self.config else None @@ -42,13 +43,17 @@ class SMTPReporter(BaseReporter): if 'subject' in self.config: subject = self.config['subject'].format( - change=item.change, pipeline=item.pipeline.getSafeAttributes()) + change=item.changes[0], + changes=item.changes, + pipeline=item.pipeline.getSafeAttributes()) else: - subject = "Report for change {change} against {ref}".format( - change=item.change, ref=item.change.ref) + subject = "Report for changes {changes} against {ref}".format( + changes=' '.join([str(c) for c in item.changes]), + ref=' '.join([c.ref for c in item.changes])) self.connection.sendMail(subject, message, from_email, to_email, zuul_event_id=item.event) + return [] def getSchema(): diff --git a/zuul/driver/sql/sqlconnection.py b/zuul/driver/sql/sqlconnection.py index 9a15a369e7..9cddb12ffa 100644 --- a/zuul/driver/sql/sqlconnection.py +++ b/zuul/driver/sql/sqlconnection.py @@ -246,10 +246,13 @@ class DatabaseSession(object): # joinedload). q = self.session().query(self.connection.buildModel).\ join(self.connection.buildSetModel).\ + join(self.connection.refModel).\ outerjoin(self.connection.providesModel).\ - options(orm.contains_eager(self.connection.buildModel.buildset), + options(orm.contains_eager(self.connection.buildModel.buildset). + subqueryload(self.connection.buildSetModel.refs), orm.selectinload(self.connection.buildModel.provides), - orm.selectinload(self.connection.buildModel.artifacts)) + orm.selectinload(self.connection.buildModel.artifacts), + orm.selectinload(self.connection.buildModel.ref)) q = self.listFilter(q, buildset_table.c.tenant, tenant) q = self.listFilter(q, build_table.c.uuid, uuid) @@ -428,7 +431,9 @@ class DatabaseSession(object): options(orm.joinedload(self.connection.buildSetModel.builds). subqueryload(self.connection.buildModel.artifacts)).\ options(orm.joinedload(self.connection.buildSetModel.builds). - subqueryload(self.connection.buildModel.provides)) + subqueryload(self.connection.buildModel.provides)).\ + options(orm.joinedload(self.connection.buildSetModel.builds). + subqueryload(self.connection.buildModel.ref)) q = self.listFilter(q, buildset_table.c.tenant, tenant) q = self.listFilter(q, buildset_table.c.uuid, uuid) @@ -799,6 +804,11 @@ class SQLConnection(BaseConnection): with self.getSession() as db: return db.getBuilds(*args, **kw) + def getBuild(self, *args, **kw): + """Return a Build object""" + with self.getSession() as db: + return db.getBuild(*args, **kw) + def getBuildsets(self, *args, **kw): """Return a list of BuildSet objects""" with self.getSession() as db: diff --git a/zuul/driver/sql/sqlreporter.py b/zuul/driver/sql/sqlreporter.py index d4627421e4..a7d7e7edfa 100644 --- a/zuul/driver/sql/sqlreporter.py +++ b/zuul/driver/sql/sqlreporter.py @@ -1,4 +1,5 @@ # Copyright 2015 Rackspace Australia +# Copyright 2024 Acme Gating, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -54,16 +55,6 @@ class SQLReporter(BaseReporter): event_timestamp = datetime.datetime.fromtimestamp( item.event.timestamp, tz=datetime.timezone.utc) - ref = db.getOrCreateRef( - project=item.change.project.name, - change=getattr(item.change, 'number', None), - patchset=getattr(item.change, 'patchset', None), - ref_url=item.change.url, - ref=getattr(item.change, 'ref', ''), - oldrev=getattr(item.change, 'oldrev', ''), - newrev=getattr(item.change, 'newrev', ''), - branch=getattr(item.change, 'branch', ''), - ) db_buildset = db.createBuildSet( uuid=buildset.uuid, tenant=item.pipeline.tenant.name, @@ -72,7 +63,18 @@ class SQLReporter(BaseReporter): event_timestamp=event_timestamp, updated=datetime.datetime.utcnow(), ) - db_buildset.refs.append(ref) + for change in item.changes: + ref = db.getOrCreateRef( + project=change.project.name, + change=getattr(change, 'number', None), + patchset=getattr(change, 'patchset', None), + ref_url=change.url, + ref=getattr(change, 'ref', ''), + oldrev=getattr(change, 'oldrev', ''), + newrev=getattr(change, 'newrev', ''), + branch=getattr(change, 'branch', ''), + ) + db_buildset.refs.append(ref) return db_buildset def reportBuildsetStart(self, buildset): @@ -200,15 +202,16 @@ class SQLReporter(BaseReporter): if db_buildset.first_build_start_time is None: db_buildset.first_build_start_time = start item = buildset.item + change = item.getChangeForJob(build.job) ref = db.getOrCreateRef( - project=item.change.project.name, - change=getattr(item.change, 'number', None), - patchset=getattr(item.change, 'patchset', None), - ref_url=item.change.url, - ref=getattr(item.change, 'ref', ''), - oldrev=getattr(item.change, 'oldrev', ''), - newrev=getattr(item.change, 'newrev', ''), - branch=getattr(item.change, 'branch', ''), + project=change.project.name, + change=getattr(change, 'number', None), + patchset=getattr(change, 'patchset', None), + ref_url=change.url, + ref=getattr(change, 'ref', ''), + oldrev=getattr(change, 'oldrev', ''), + newrev=getattr(change, 'newrev', ''), + branch=getattr(change, 'branch', ''), ) db_build = db_buildset.createBuild( diff --git a/zuul/executor/client.py b/zuul/executor/client.py index f8f008dbaa..51e4ed2cfb 100644 --- a/zuul/executor/client.py +++ b/zuul/executor/client.py @@ -56,9 +56,9 @@ class ExecutorClient(object): tracer = trace.get_tracer("zuul") uuid = str(uuid4().hex) log.info( - "Execute job %s (uuid: %s) on nodes %s for change %s " + "Execute job %s (uuid: %s) on nodes %s for %s " "with dependent changes %s", - job, uuid, nodes, item.change, dependent_changes) + job, uuid, nodes, item, dependent_changes) params = zuul.executor.common.construct_build_params( uuid, self.sched.connections, @@ -93,7 +93,7 @@ class ExecutorClient(object): if job.name == 'noop': data = {"start_time": time.time()} started_event = BuildStartedEvent( - build.uuid, build.build_set.uuid, job.name, job._job_id, + build.uuid, build.build_set.uuid, job.uuid, None, data, zuul_event_id=build.zuul_event_id) self.result_events[pipeline.tenant.name][pipeline.name].put( started_event @@ -101,7 +101,7 @@ class ExecutorClient(object): result = {"result": "SUCCESS", "end_time": time.time()} completed_event = BuildCompletedEvent( - build.uuid, build.build_set.uuid, job.name, job._job_id, + build.uuid, build.build_set.uuid, job.uuid, None, result, zuul_event_id=build.zuul_event_id) self.result_events[pipeline.tenant.name][pipeline.name].put( completed_event @@ -134,7 +134,7 @@ class ExecutorClient(object): f"{req_id}") data = {"start_time": time.time()} started_event = BuildStartedEvent( - build.uuid, build.build_set.uuid, job.name, job._job_id, + build.uuid, build.build_set.uuid, job.uuid, None, data, zuul_event_id=build.zuul_event_id) self.result_events[pipeline.tenant.name][pipeline.name].put( started_event @@ -142,7 +142,7 @@ class ExecutorClient(object): result = {"result": None, "end_time": time.time()} completed_event = BuildCompletedEvent( - build.uuid, build.build_set.uuid, job.name, job._job_id, + build.uuid, build.build_set.uuid, job.uuid, None, result, zuul_event_id=build.zuul_event_id) self.result_events[pipeline.tenant.name][pipeline.name].put( completed_event @@ -173,8 +173,7 @@ class ExecutorClient(object): request = BuildRequest( uuid=uuid, build_set_uuid=build.build_set.uuid, - job_name=job.name, - job_uuid=job._job_id, + job_uuid=job.uuid, tenant_name=build.build_set.item.pipeline.tenant.name, pipeline_name=build.build_set.item.pipeline.name, zone=executor_zone, @@ -225,7 +224,7 @@ class ExecutorClient(object): pipeline_name = build.build_set.item.pipeline.name event = BuildCompletedEvent( build_request.uuid, build_request.build_set_uuid, - build_request.job_name, build_request.job_uuid, + build_request.job_uuid, build_request.path, result) self.result_events[tenant_name][pipeline_name].put(event) finally: @@ -312,7 +311,7 @@ class ExecutorClient(object): event = BuildCompletedEvent( build_request.uuid, build_request.build_set_uuid, - build_request.job_name, build_request.job_uuid, + build_request.job_uuid, build_request.path, result) self.result_events[build_request.tenant_name][ build_request.pipeline_name].put(event) diff --git a/zuul/executor/common.py b/zuul/executor/common.py index ba25b0164b..6d11508745 100644 --- a/zuul/executor/common.py +++ b/zuul/executor/common.py @@ -30,22 +30,23 @@ def construct_build_params(uuid, connections, job, item, pipeline, environment - for example, a local runner. """ tenant = pipeline.tenant + change = item.getChangeForJob(job) project = dict( - name=item.change.project.name, - short_name=item.change.project.name.split('/')[-1], - canonical_hostname=item.change.project.canonical_hostname, - canonical_name=item.change.project.canonical_name, + name=change.project.name, + short_name=change.project.name.split('/')[-1], + canonical_hostname=change.project.canonical_hostname, + canonical_name=change.project.canonical_name, src_dir=os.path.join('src', strings.workspace_project_path( - item.change.project.canonical_hostname, - item.change.project.name, + change.project.canonical_hostname, + change.project.name, job.workspace_scheme)), ) zuul_params = dict( build=uuid, buildset=item.current_build_set.uuid, - ref=item.change.ref, + ref=change.ref, pipeline=pipeline.name, post_review=pipeline.post_review, job=job.name, @@ -54,30 +55,30 @@ def construct_build_params(uuid, connections, job, item, pipeline, event_id=item.event.zuul_event_id if item.event else None, jobtags=sorted(job.tags), ) - if hasattr(item.change, 'branch'): - zuul_params['branch'] = item.change.branch - if hasattr(item.change, 'tag'): - zuul_params['tag'] = item.change.tag - if hasattr(item.change, 'number'): - zuul_params['change'] = str(item.change.number) - if hasattr(item.change, 'url'): - zuul_params['change_url'] = item.change.url - if hasattr(item.change, 'patchset'): - zuul_params['patchset'] = str(item.change.patchset) - if hasattr(item.change, 'message'): - zuul_params['message'] = strings.b64encode(item.change.message) - zuul_params['change_message'] = item.change.message + if hasattr(change, 'branch'): + zuul_params['branch'] = change.branch + if hasattr(change, 'tag'): + zuul_params['tag'] = change.tag + if hasattr(change, 'number'): + zuul_params['change'] = str(change.number) + if hasattr(change, 'url'): + zuul_params['change_url'] = change.url + if hasattr(change, 'patchset'): + zuul_params['patchset'] = str(change.patchset) + if hasattr(change, 'message'): + zuul_params['message'] = strings.b64encode(change.message) + zuul_params['change_message'] = change.message commit_id = None - if (hasattr(item.change, 'oldrev') and item.change.oldrev - and item.change.oldrev != '0' * 40): - zuul_params['oldrev'] = item.change.oldrev - commit_id = item.change.oldrev - if (hasattr(item.change, 'newrev') and item.change.newrev - and item.change.newrev != '0' * 40): - zuul_params['newrev'] = item.change.newrev - commit_id = item.change.newrev - if hasattr(item.change, 'commit_id'): - commit_id = item.change.commit_id + if (hasattr(change, 'oldrev') and change.oldrev + and change.oldrev != '0' * 40): + zuul_params['oldrev'] = change.oldrev + commit_id = change.oldrev + if (hasattr(change, 'newrev') and change.newrev + and change.newrev != '0' * 40): + zuul_params['newrev'] = change.newrev + commit_id = change.newrev + if hasattr(change, 'commit_id'): + commit_id = change.commit_id if commit_id: zuul_params['commit_id'] = commit_id @@ -101,8 +102,8 @@ def construct_build_params(uuid, connections, job, item, pipeline, params['job_ref'] = job.getPath() params['items'] = merger_items params['projects'] = [] - if hasattr(item.change, 'branch'): - params['branch'] = item.change.branch + if hasattr(change, 'branch'): + params['branch'] = change.branch else: params['branch'] = None merge_rs = item.current_build_set.merge_repo_state @@ -116,8 +117,8 @@ def construct_build_params(uuid, connections, job, item, pipeline, params['ssh_keys'].append("REDACTED") else: params['ssh_keys'].append(dict( - connection_name=item.change.project.connection_name, - project_name=item.change.project.name)) + connection_name=change.project.connection_name, + project_name=change.project.name)) params['zuul'] = zuul_params projects = set() required_projects = set() diff --git a/zuul/executor/server.py b/zuul/executor/server.py index 23b894acfa..31ebd28d77 100644 --- a/zuul/executor/server.py +++ b/zuul/executor/server.py @@ -4196,7 +4196,7 @@ class ExecutorServer(BaseMergeServer): event = BuildStartedEvent( build_request.uuid, build_request.build_set_uuid, - build_request.job_name, build_request.job_uuid, + build_request.job_uuid, build_request.path, data, build_request.event_id) self.result_events[build_request.tenant_name][ build_request.pipeline_name].put(event) @@ -4204,7 +4204,7 @@ class ExecutorServer(BaseMergeServer): def updateBuildStatus(self, build_request, data): event = BuildStatusEvent( build_request.uuid, build_request.build_set_uuid, - build_request.job_name, build_request.job_uuid, + build_request.job_uuid, build_request.path, data, build_request.event_id) self.result_events[build_request.tenant_name][ build_request.pipeline_name].put(event) @@ -4219,7 +4219,7 @@ class ExecutorServer(BaseMergeServer): event = BuildPausedEvent( build_request.uuid, build_request.build_set_uuid, - build_request.job_name, build_request.job_uuid, + build_request.job_uuid, build_request.path, data, build_request.event_id) self.result_events[build_request.tenant_name][ build_request.pipeline_name].put(event) @@ -4286,7 +4286,7 @@ class ExecutorServer(BaseMergeServer): updater = self.executor_api.getRequestUpdater(build_request) event = BuildCompletedEvent( build_request.uuid, build_request.build_set_uuid, - build_request.job_name, build_request.job_uuid, + build_request.job_uuid, build_request.path, result, build_request.event_id) build_request.state = BuildRequest.COMPLETED updated = False diff --git a/zuul/manager/__init__.py b/zuul/manager/__init__.py index 42bb56cfcb..773ccf4439 100644 --- a/zuul/manager/__init__.py +++ b/zuul/manager/__init__.py @@ -1,3 +1,5 @@ +# Copyright 2021-2024 Acme Gating, LLC +# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -25,11 +27,10 @@ from zuul.lib.logutil import get_annotated_logger from zuul.lib.tarjan import strongly_connected_components import zuul.lib.tracing as tracing from zuul.model import ( - Change, DequeueEvent, PipelineState, PipelineChangeList, QueueItem, + Change, PipelineState, PipelineChangeList, QueueItem, filter_severity ) from zuul.zk.change_cache import ChangeKey -from zuul.zk.components import COMPONENT_REGISTRY from zuul.zk.exceptions import LockException from zuul.zk.locks import pipeline_lock @@ -204,12 +205,17 @@ class PipelineManager(metaclass=ABCMeta): event, change, ef, self, str(match_result))) return False - def getNodePriority(self, item): - queue = self.pipeline.getRelativePriorityQueue(item.change.project) - items = self.pipeline.getAllItems() - items = [i for i in items - if i.change.project in queue and - i.live] + def getNodePriority(self, item, change): + queue_projects = set(self.pipeline.getRelativePriorityQueue( + change.project)) + items = [] + for i in self.pipeline.getAllItems(): + if not i.live: + continue + item_projects = set([ + c.project for c in i.changes]) + if item_projects.intersection(queue_projects): + items.append(i) index = items.index(item) # Quantize on a logarithmic scale so that we don't constantly # needlessly adjust thousands of node requests. @@ -260,10 +266,12 @@ class PipelineManager(metaclass=ABCMeta): if item.layout_uuid: active_layout_uuids.add(item.layout_uuid) - if isinstance(item.change, model.Change): - referenced_change_keys.update(item.change.getNeedsChanges( - self.useDependenciesByTopic(item.change.project))) - referenced_change_keys.update(item.change.getNeededByChanges()) + for change in item.changes: + if isinstance(change, model.Change): + referenced_change_keys.update(change.getNeedsChanges( + self.useDependenciesByTopic(change.project))) + referenced_change_keys.update( + change.getNeededByChanges()) # Clean up unused layouts in the cache unused_layouts = set(self._layout_cache.keys()) - active_layout_uuids @@ -283,8 +291,11 @@ class PipelineManager(metaclass=ABCMeta): def isChangeAlreadyInPipeline(self, change): # Checks live items in the pipeline for item in self.pipeline.getAllItems(): - if item.live and change.equals(item.change): - return True + if not item.live: + continue + for c in item.changes: + if change.equals(c): + return True return False def isChangeRelevantToPipeline(self, change): @@ -304,8 +315,9 @@ class PipelineManager(metaclass=ABCMeta): def isChangeAlreadyInQueue(self, change, change_queue): # Checks any item in the specified change queue for item in change_queue.queue: - if change.equals(item.change): - return True + for c in item.changes: + if change.equals(c): + return True return False def refreshDeps(self, change, event): @@ -314,14 +326,15 @@ class PipelineManager(metaclass=ABCMeta): to_refresh = set() for item in self.pipeline.getAllItems(): - if not isinstance(item.change, model.Change): - continue - if item.change.equals(change): - to_refresh.add(item.change) - for dep_change_ref in item.change.commit_needs_changes: - dep_change_key = ChangeKey.fromReference(dep_change_ref) - if dep_change_key.isSameChange(change.cache_stat.key): - to_refresh.add(item.change) + for item_change in item.changes: + if not isinstance(item_change, model.Change): + continue + if item_change.equals(change): + to_refresh.add(item_change) + for dep_change_ref in item_change.commit_needs_changes: + dep_change_key = ChangeKey.fromReference(dep_change_ref) + if dep_change_key.isSameChange(change.cache_stat.key): + to_refresh.add(item_change) for existing_change in to_refresh: self.updateCommitDependencies(existing_change, event) @@ -346,7 +359,7 @@ class PipelineManager(metaclass=ABCMeta): def reportNormalBuildsetEnd(self, build_set, action, final, result=None): # Report a buildset end if there are jobs or errors - if ((build_set.job_graph and len(build_set.job_graph.jobs) > 0) or + if ((build_set.job_graph and len(build_set.job_graph.job_uuids) > 0) or build_set.has_blocking_errors or build_set.unable_to_merge): self.sql.reportBuildsetEnd(build_set, action, @@ -383,14 +396,16 @@ class PipelineManager(metaclass=ABCMeta): try: ret = reporter.report(item, phase1=phase1, phase2=phase2) if ret: - report_errors.append(ret) + for r in ret: + if r: + report_errors.append(r) except Exception as e: item.setReportedResult('ERROR') log.exception("Exception while reporting") report_errors.append(str(e)) return report_errors - def isChangeReadyToBeEnqueued(self, change, event): + def areChangesReadyToBeEnqueued(self, changes, event): return True def enqueueChangesAhead(self, change, event, quiet, ignore_requirements, @@ -403,7 +418,7 @@ class PipelineManager(metaclass=ABCMeta): dependency_graph=None): return True - def getMissingNeededChanges(self, change, change_queue, event, + def getMissingNeededChanges(self, changes, change_queue, event, dependency_graph=None): """Check that all needed changes are ahead in the queue. @@ -414,8 +429,8 @@ class PipelineManager(metaclass=ABCMeta): """ return False, [] - def getFailingDependentItems(self, item, nnfi): - return None + def getFailingDependentItems(self, item): + return [] def getItemForChange(self, change, change_queue=None): if change_queue is not None: @@ -424,16 +439,18 @@ class PipelineManager(metaclass=ABCMeta): items = self.pipeline.getAllItems() for item in items: - if item.change.equals(change): - return item + for c in item.changes: + if change.equals(c): + return item return None def findOldVersionOfChangeAlreadyInQueue(self, change): for item in self.pipeline.getAllItems(): if not item.live: continue - if change.isUpdateOf(item.change): - return item + for item_change in item.changes: + if change.isUpdateOf(item_change): + return item return None def removeOldVersionsOfChange(self, change, event): @@ -442,18 +459,66 @@ class PipelineManager(metaclass=ABCMeta): old_item = self.findOldVersionOfChangeAlreadyInQueue(change) if old_item: log = get_annotated_logger(self.log, event) - log.debug("Change %s is a new version of %s, removing %s", - change, old_item.change, old_item) + log.debug("Change %s is a new version, removing %s", + change, old_item) self.removeItem(old_item) def removeAbandonedChange(self, change, event): log = get_annotated_logger(self.log, event) - log.debug("Change %s abandoned, removing." % change) + log.debug("Change %s abandoned, removing", change) for item in self.pipeline.getAllItems(): if not item.live: continue - if item.change.equals(change): - self.removeItem(item) + for item_change in item.changes: + if item_change.equals(change): + if len(item.changes) > 1: + msg = ("Dependency cycle change " + f"{change.url} abandoned.") + item.setDequeuedNeedingChange(msg) + try: + self.reportItem(item) + except exceptions.MergeFailure: + pass + self.removeItem(item) + + def reEnqueueIfDepsPresent(self, item, needs_changes, log, + skip_presence_check=True): + # This item is about to be dequeued because it's missing + # changes. Try to re-enqueue it before dequeing. + # Return whether the dequeue should be quiet. + if not item.live: + return False + if not all(self.isChangeAlreadyInPipeline(c) for c in needs_changes): + return False + # We enqueue only the first change in the item, presuming + # that the remaining changes will be pulled in as + # appropriate. Because we skip the presence check, we + # can't enqueue all of the items changes directly since we + # would end up with new items for every change. We only + # want one new item. + change = item.changes[0] + # Check if there is another live item with the change already. + for other_item in self.pipeline.getAllItems(): + if other_item is item: + continue + if not other_item.live: + continue + for item_change in other_item.changes: + if item_change.equals(change): + return True + # Try enqueue, if that succeeds, keep this dequeue quiet + try: + log.info("Attempting re-enqueue of %s", item) + return self.addChange( + change, item.event, + enqueue_time=item.enqueue_time, + quiet=True, + skip_presence_check=skip_presence_check) + except Exception: + log.exception("Unable to re-enqueue %s " + "which is missing dependencies", + item) + return False @abstractmethod def getChangeQueue(self, change, event, existing=None): @@ -461,11 +526,11 @@ class PipelineManager(metaclass=ABCMeta): def reEnqueueItem(self, item, last_head, old_item_ahead, item_ahead_valid): log = get_annotated_logger(self.log, item.event) - with self.getChangeQueue(item.change, item.event, + with self.getChangeQueue(item.changes[0], item.event, last_head.queue) as change_queue: if change_queue: - log.debug("Re-enqueing change %s in queue %s", - item.change, change_queue) + log.debug("Re-enqueing %s in queue %s", + item, change_queue) change_queue.enqueueItem(item) # If the old item ahead was re-enqued, this value will @@ -520,7 +585,7 @@ class PipelineManager(metaclass=ABCMeta): return True else: log.error("Unable to find change queue for project %s", - item.change.project) + item.change[0].project) return False def addChange(self, change, event, quiet=False, enqueue_time=None, @@ -553,30 +618,8 @@ class PipelineManager(metaclass=ABCMeta): change, self.pipeline) return False - if not ignore_requirements: - for f in self.ref_filters: - if f.connection_name != change.project.connection_name: - log.debug("Filter %s skipped for change %s due " - "to mismatched connections" % (f, change)) - continue - match_result = f.matches(change) - if not match_result: - log.debug("Change %s does not match pipeline " - "requirement %s because %s" % ( - change, f, str(match_result))) - return False - - if not self.isChangeReadyToBeEnqueued(change, event): - log.debug("Change %s is not ready to be enqueued, ignoring" % - change) - return False - - # We know this change isn't in this pipeline, but it may be in - # others. If it is, then presumably its commit_needs are up - # to date and this is a noop; otherwise, we need to refresh - # them anyway. - if isinstance(change, model.Change): - self.updateCommitDependencies(change, event) + self.getDependencyGraph(change, dependency_graph, event, + update_deps=True) with self.getChangeQueue(change, event, change_queue) as change_queue: if not change_queue: @@ -585,14 +628,54 @@ class PipelineManager(metaclass=ABCMeta): (change, change.project)) return False + cycle = [] + if isinstance(change, model.Change): + cycle = self.cycleForChange(change, dependency_graph, event) + cycle = self.sortCycleByGitDepends(cycle) + if not cycle: + cycle = [change] + + if not ignore_requirements: + for f in self.ref_filters: + for cycle_change in cycle: + if (f.connection_name != + cycle_change.project.connection_name): + log.debug("Filter %s skipped for change %s due " + "to mismatched connections", + f, cycle_change) + continue + match_result = f.matches(cycle_change) + if not match_result: + log.debug("Change %s does not match pipeline " + "requirement %s because %s", + cycle_change, f, str(match_result)) + return False + + if not self.areChangesReadyToBeEnqueued(cycle, event): + log.debug("Cycle %s is not ready to be enqueued, ignoring" % + cycle) + return False + + if len(cycle) > 1: + for cycle_change in cycle: + if not self.canProcessCycle(cycle_change.project): + log.info("Dequeing change %s since the project " + "does not allow circular dependencies", + cycle_change) + warnings = ["Dependency cycle detected and project " + f"{cycle_change.project.name} " + "doesn't allow circular dependencies"] + self._reportNonEqueuedItem( + change_queue, change, event, warnings) + return False + warnings = [] - if not self.enqueueChangesAhead(change, event, quiet, - ignore_requirements, - change_queue, history=history, - dependency_graph=dependency_graph, - warnings=warnings): - self.dequeueIncompleteCycle(change, dependency_graph, event, - change_queue) + if not self.enqueueChangesAhead( + cycle, event, quiet, + ignore_requirements, + change_queue, history=history, + dependency_graph=dependency_graph, + warnings=warnings): log.debug("Failed to enqueue changes ahead of %s" % change) if warnings: self._reportNonEqueuedItem(change_queue, change, @@ -607,21 +690,8 @@ class PipelineManager(metaclass=ABCMeta): change) return True - cycle = [] - if isinstance(change, model.Change): - cycle = self.cycleForChange(change, dependency_graph, event) - if cycle and not self.canProcessCycle(change.project): - log.info("Dequeing change %s since the project " - "does not allow circular dependencies", change) - warnings = ["Dependency cycle detected and project " - f"{change.project.name} doesn't allow " - "circular dependencies"] - self._reportNonEqueuedItem(change_queue, - cycle[-1], event, warnings) - return False - - log.info("Adding change %s to queue %s in %s" % - (change, change_queue, self.pipeline)) + log.info("Adding %s to queue %s in %s" % + (cycle, change_queue, self.pipeline)) if enqueue_time is None: enqueue_time = time.time() @@ -632,10 +702,9 @@ class PipelineManager(metaclass=ABCMeta): span_info = tracing.startSavedSpan( 'QueueItem', start_time=enqueue_time, links=[link]) - item = change_queue.enqueueChange(change, event, - span_info=span_info, - enqueue_time=enqueue_time) - self.updateBundle(item, change_queue, cycle) + item = change_queue.enqueueChanges(cycle, event, + span_info=span_info, + enqueue_time=enqueue_time) with item.activeContext(self.current_context): if enqueue_time: @@ -647,25 +716,18 @@ class PipelineManager(metaclass=ABCMeta): if item.live: self.reportEnqueue(item) - # Items in a dependency cycle are expected to be enqueued after - # each other. To prevent non-cycle items from being enqueued - # between items of the same cycle, enqueue items behind each item - # in the cycle once all items in the cycle are enqueued. - if all([self.isChangeAlreadyInQueue(c, change_queue) - for c in cycle]): - if cycle: - self.log.debug("Cycle complete, enqueing changes behind") - for c in cycle or [change]: - self.enqueueChangesBehind(c, event, quiet, - ignore_requirements, - change_queue, history, - dependency_graph) + for c in cycle: + self.enqueueChangesBehind(c, event, quiet, + ignore_requirements, + change_queue, history, + dependency_graph) zuul_driver = self.sched.connections.drivers['zuul'] tenant = self.pipeline.tenant with trace.use_span(tracing.restoreSpan(item.span_info)): - zuul_driver.onChangeEnqueued( - tenant, item.change, self.pipeline, event) + for c in item.changes: + zuul_driver.onChangeEnqueued( + tenant, c, self.pipeline, event) self.dequeueSupercededItems(item) return True @@ -673,7 +735,7 @@ class PipelineManager(metaclass=ABCMeta): # Enqueue an item which otherwise can not be enqueued in order # to report a message to the user. actions = self.pipeline.failure_actions - ci = change_queue.enqueueChange(change, event) + ci = change_queue.enqueueChanges([change], event) try: for w in warnings: ci.warning(w) @@ -682,7 +744,8 @@ class PipelineManager(metaclass=ABCMeta): # Only report the item if the project is in the current # pipeline. Otherwise the change could be spammed by # reports from unrelated pipelines. - if self.pipeline.tenant.layout.getProjectPipelineConfig(ci): + if self.pipeline.tenant.layout.getProjectPipelineConfig( + ci, change): self.sendReport(actions, ci) finally: # Ensure that the item is dequeued in any case. Otherwise we @@ -712,6 +775,22 @@ class PipelineManager(metaclass=ABCMeta): return scc return [] + def sortCycleByGitDepends(self, cycle): + new_cycle = [] + cycle = list(cycle) + while cycle: + self._sortCycleByGitDepends(cycle[0], cycle, new_cycle) + return new_cycle + + def _sortCycleByGitDepends(self, change, cycle, new_cycle): + cycle.remove(change) + for needed_change in self.resolveChangeReferences( + change.git_needs_changes): + if needed_change not in cycle: + continue + self._sortCycleByGitDepends(needed_change, cycle, new_cycle) + new_cycle.append(change) + def getCycleDependencies(self, change, dependency_graph, event): cycle = self.cycleForChange(change, dependency_graph, event) return set( @@ -720,7 +799,11 @@ class PipelineManager(metaclass=ABCMeta): ) - set(cycle) def getDependencyGraph(self, change, dependency_graph, event, - history=None): + update_deps=False, + history=None, quiet=False, indent=''): + log = get_annotated_logger(self.log, event) + if not quiet: + log.debug("%sChecking for changes needed by %s:", indent, change) if self.pipeline.ignore_dependencies: return if not isinstance(change, model.Change): @@ -731,17 +814,36 @@ class PipelineManager(metaclass=ABCMeta): if history is None: history = set() history.add(change) + if update_deps: + self.updateCommitDependencies(change, event) for needed_change in self.resolveChangeReferences( change.getNeedsChanges( self.useDependenciesByTopic(change.project))): + if not quiet: + log.debug("%sChange %s needs change %s:", + indent, change, needed_change) if needed_change.is_merged: + if not quiet: + log.debug("%sNeeded change is merged", indent) continue + if (self.pipeline.tenant.max_dependencies is not None and + (len(dependency_graph) > + self.pipeline.tenant.max_dependencies)): + log.debug("%sDependency graph for change %s is too large", + indent, change) + raise Exception("Dependency graph is too large") + node = dependency_graph.setdefault(change, []) - node.append(needed_change) + if needed_change not in node: + if not quiet: + log.debug("%sAdding change %s to dependency graph for " + "change %s", indent, needed_change, change) + node.append(needed_change) if needed_change not in history: self.getDependencyGraph(needed_change, dependency_graph, - event, history) + event, update_deps, history, + quiet, indent + ' ') def getQueueConfig(self, project): layout = self.pipeline.tenant.layout @@ -774,56 +876,15 @@ class PipelineManager(metaclass=ABCMeta): return queue_config.dependencies_by_topic - def getNonMergeableCycleChanges(self, bundle): + def getNonMergeableCycleChanges(self, item): """Return changes in the cycle that do not fulfill the pipeline's ready criteria.""" return [] - def updateBundle(self, item, change_queue, cycle): - if not cycle: - return - - log = get_annotated_logger(self.log, item.event) - item.updateAttributes(self.current_context, bundle=model.Bundle()) - - # Try to find already enqueued items of this cycle, so we use - # the same bundle - for needed_change in (c for c in cycle if c is not item.change): - needed_item = self.getItemForChange(needed_change, change_queue) - if not needed_item: - continue - # Use a common bundle for the cycle - item.updateAttributes(self.current_context, - bundle=needed_item.bundle) - break - - log.info("Adding cycle item %s to bundle %s", item, item.bundle) - bundle = item.bundle - bundle.add_item(item) - - # Write out the updated bundle info to Zookeeper for all items - # since it may have mutated since our last write. - for bundle_item in bundle.items: - bundle_item.updateAttributes(self.current_context, - bundle=bundle) - - def dequeueIncompleteCycle(self, change, dependency_graph, event, - change_queue): - log = get_annotated_logger(self.log, event) - cycle = self.cycleForChange(change, dependency_graph, event) - enqueued_cycle_items = [i for i in (self.getItemForChange(c, - change_queue) - for c in cycle) if i is not None] - if enqueued_cycle_items: - log.info("Dequeuing incomplete cycle items: %s", - enqueued_cycle_items) - for cycle_item in enqueued_cycle_items: - self.dequeueItem(cycle_item) - def dequeueItem(self, item, quiet=False): log = get_annotated_logger(self.log, item.event) - log.debug("Removing change %s from queue", item.change) + log.debug("Removing %s from queue", item) # In case a item is dequeued that doesn't have a result yet # (success/failed/...) we report it as dequeued. # Without this check, all items with a valid result would be reported @@ -843,8 +904,9 @@ class PipelineManager(metaclass=ABCMeta): 'zuul_tenant': self.pipeline.tenant.name, 'zuul_pipeline': self.pipeline.name, } - for k, v in item.change.getSafeAttributes().toDict().items(): - span_attrs['ref_' + k] = v + for change in item.changes: + for k, v in change.getSafeAttributes().toDict().items(): + span_attrs.setdefault(f'ref_{k}', []).append(v) tracing.endSavedSpan(item.current_build_set.span_info) tracing.endSavedSpan(item.span_info, attributes=span_attrs) @@ -853,68 +915,34 @@ class PipelineManager(metaclass=ABCMeta): log = get_annotated_logger(self.log, item.event) # Remove an item from the queue, probably because it has been # superseded by another change. - log.debug("Canceling builds behind change: %s " - "because it is being removed.", item.change) + log.debug("Canceling builds behind item: %s " + "because it is being removed.", item) self.cancelJobs(item) self.dequeueItem(item) self.reportStats(item) - if item.bundle is None: - return - - log.debug("Dequeueing items in bundle %s", item.bundle) - bundle_iter = (i for i in item.bundle.items if i is not item) - for bundle_item in bundle_iter: - self.cancelJobs(bundle_item) - self.dequeueItem(bundle_item) - self.reportStats(bundle_item) - def dequeueSupercededItems(self, item): - change_id = ( - item.change._id() if isinstance(item.change, Change) - else None - ) for other_name in self.pipeline.supercedes: other_pipeline = self.pipeline.tenant.layout.pipelines.get( other_name) if not other_pipeline: continue - # MODEL_API: >2 - if COMPONENT_REGISTRY.model_api > 2: + for change in item.changes: + change_id = ( + change._id() if isinstance(change, Change) + else None + ) event = model.SupercedeEvent( other_pipeline.tenant.name, other_pipeline.name, - item.change.project.canonical_hostname, - item.change.project.name, + change.project.canonical_hostname, + change.project.name, change_id, - item.change.ref) + change.ref) self.sched.pipeline_trigger_events[ self.pipeline.tenant.name][other_pipeline.name ].put_supercede(event) - else: - # Note: Iterating over the pipelines w/o locking and - # refreshing them is wrong and only kept for backward - # compatibility. - found = None - for other_item in other_pipeline.getAllItems(): - if (other_item.live - and other_item.change.equals(item.change)): - found = other_item - break - if found: - self.log.info("Item %s is superceded by %s, dequeuing", - found, item) - event = DequeueEvent( - other_pipeline.tenant.name, - other_pipeline.name, - item.change.project.canonical_hostname, - item.change.project.name, - change_id, - item.change.ref) - self.sched.pipeline_management_events[ - self.pipeline.tenant.name][other_pipeline.name].put( - event, needs_result=False) def updateCommitDependencies(self, change, event): log = get_annotated_logger(self.log, event) @@ -983,14 +1011,15 @@ class PipelineManager(metaclass=ABCMeta): if not jobs: return False build_set = item.current_build_set - log.debug("Requesting nodes for change %s", item.change) - if self.sched.globals.use_relative_priority: - relative_priority = item.getNodePriority() - else: - relative_priority = 0 + log.debug("Requesting nodes for %s", item) parent_span = tracing.restoreSpan(build_set.span_info) with trace.use_span(parent_span): for job in jobs: + if self.sched.globals.use_relative_priority: + relative_priority = self.getNodePriority( + item, item.getChangeForJob(job)) + else: + relative_priority = 0 self._makeNodepoolRequest( log, build_set, job, relative_priority) return True @@ -1010,7 +1039,7 @@ class PipelineManager(metaclass=ABCMeta): build_set.setJobNodeRequestID(job, req.id) if req.fulfilled: nodeset = self.sched.nodepool.getNodeSet(req, job.nodeset) - job = build_set.item.getJob(req._job_id) + job = build_set.item.getJob(req.job_uuid) build_set.jobNodeRequestComplete(job, nodeset) else: job.setWaitingStatus(f'node request: {req.id}') @@ -1040,10 +1069,10 @@ class PipelineManager(metaclass=ABCMeta): def _executeJobs(self, item, jobs): log = get_annotated_logger(self.log, item.event) - log.debug("Executing jobs for change %s", item.change) + log.debug("Executing jobs for %s", item) build_set = item.current_build_set for job in jobs: - log.debug("Found job %s for change %s", job, item.change) + log.debug("Found job %s for %s", job, item) try: zone = build_set.getJobNodeExecutorZone(job) nodes = build_set.getJobNodeList(job) @@ -1054,7 +1083,7 @@ class PipelineManager(metaclass=ABCMeta): job.setWaitingStatus('executor') except Exception: log.exception("Exception while executing job %s " - "for change %s:", job, item.change) + "for %s:", job, item) try: # If we hit an exception we don't have a build in the # current item so a potentially aquired semaphore must be @@ -1080,7 +1109,7 @@ class PipelineManager(metaclass=ABCMeta): def cancelJobs(self, item, prime=True): log = get_annotated_logger(self.log, item.event) - log.debug("Cancel jobs for change %s", item.change) + log.debug("Cancel jobs for %s", item) canceled = False old_build_set = item.current_build_set jobs_to_cancel = item.getJobs() @@ -1088,11 +1117,7 @@ class PipelineManager(metaclass=ABCMeta): for job in jobs_to_cancel: self.sched.cancelJob(old_build_set, job, final=True) - # Don't reset builds for a failing bundle when it has already started - # reporting, to keep available build results. Those items will be - # reported immediately afterwards during queue processing. - if (prime and item.current_build_set.ref and not - item.didBundleStartReporting()): + if (prime and item.current_build_set.ref): # Force a dequeued result here because we haven't actually # reported the item, but we are done with this buildset. self.reportNormalBuildsetEnd( @@ -1102,8 +1127,8 @@ class PipelineManager(metaclass=ABCMeta): item.resetAllBuilds() for item_behind in item.items_behind: - log.debug("Canceling jobs for change %s, behind change %s", - item_behind.change, item.change) + log.debug("Canceling jobs for %s, behind %s", + item_behind, item) if self.cancelJobs(item_behind, prime=prime): canceled = True return canceled @@ -1128,9 +1153,15 @@ class PipelineManager(metaclass=ABCMeta): severity_error = False for err in layout.loading_errors.errors: econtext = err.key.context - if ((err.key not in parent_error_keys) or - (econtext.project_name == item.change.project.name and - econtext.branch == item.change.branch)): + matches_project_branch = False + if econtext: + for change in item.changes: + if (econtext.project_name == change.project.name and + econtext.branch == change.branch): + matches_project_branch = True + break + if (err.key not in parent_error_keys or + matches_project_branch): relevant_errors.append(err) if err.severity == model.SEVERITY_ERROR: severity_error = True @@ -1231,7 +1262,7 @@ class PipelineManager(metaclass=ABCMeta): # Untrusted layout is broken and trusted is broken or not set elif untrusted_layout and untrusted_errors: # Find a layout loading error that match - # the current item.change and only report + # the current item.changes and only report # if one is found. relevant_errors, severity_error = self._findRelevantErrors( item, untrusted_layout) @@ -1246,7 +1277,7 @@ class PipelineManager(metaclass=ABCMeta): # Trusted layout is broken elif trusted_layout and trusted_errors: # Find a layout loading error that match - # the current item.change and only report + # the current item.changes and only report # if one is found. relevant_errors, severity_error = self._findRelevantErrors( item, trusted_layout) @@ -1273,10 +1304,11 @@ class PipelineManager(metaclass=ABCMeta): # Get the branches of the item and all items ahead project_branches = {} while item: - if hasattr(item.change, 'branch'): - this_project_branches = project_branches.setdefault( - item.change.project.canonical_name, set()) - this_project_branches.add(item.change.branch) + for change in item.changes: + if hasattr(change, 'branch'): + this_project_branches = project_branches.setdefault( + change.project.canonical_name, set()) + this_project_branches.add(change.branch) item = item.item_ahead return project_branches @@ -1315,20 +1347,8 @@ class PipelineManager(metaclass=ABCMeta): # We're probably waiting on a merge job for the item ahead. return None - # If the current change does not update the layout, use its parent. - # If the bundle doesn't update the config or the bundle updates the - # config but the current change's project is not part of the tenant - # (e.g. when dealing w/ cross-tenant cycles), use the parent layout. - if not ( - item.change.updatesConfig(item.pipeline.tenant) or - ( - item.bundle - and item.bundle.updatesConfig(item.pipeline.tenant) - and item.pipeline.tenant.getProject( - item.change.project.canonical_name - )[1] is not None - ) - ): + # If the current item does not update the layout, use its parent. + if not item.updatesConfig(): return self.getFallbackLayout(item) # Else this item updates the config, # ask the merger for the result. @@ -1338,7 +1358,7 @@ class PipelineManager(metaclass=ABCMeta): if build_set.unable_to_merge: return self.getFallbackLayout(item) - log.debug("Preparing dynamic layout for: %s" % item.change) + log.debug("Preparing dynamic layout for: %s" % item) start = time.time() layout = self._loadDynamicLayout(item) self.reportPipelineTiming('layout_generation_time', start) @@ -1356,8 +1376,9 @@ class PipelineManager(metaclass=ABCMeta): tenant.getProjectBranches(project.canonical_name)) # Additionally add all target branches of all involved items. - branches.update(item.change.branch for item in items - if hasattr(item.change, 'branch')) + branches.update(change.branch for item in items + for change in item.changes + if hasattr(change, 'branch')) # Make sure override-checkout targets are part of the repo state for item in items: @@ -1379,13 +1400,7 @@ class PipelineManager(metaclass=ABCMeta): def scheduleMerge(self, item, files=None, dirs=None): log = item.annotateLogger(self.log) - build_set = item.bundle_build_set - if build_set.item is not item: - # This is a bundle, but this isn't the bundle buildset. - # Just say we're not ready and let the real one get - # scheduled later. - return False - + build_set = item.current_build_set log.debug("Scheduling merge for item %s (files: %s, dirs: %s)" % (item, files, dirs)) @@ -1394,16 +1409,16 @@ class PipelineManager(metaclass=ABCMeta): # change that is tested. tenant = item.pipeline.tenant items = list(item.items_ahead) + [item] - if item.bundle: - items.extend(item.bundle.items) projects = { - item.change.project for item in items - if tenant.getProject(item.change.project.canonical_name)[1] + change.project for i in items for change in i.changes + if tenant.getProject(change.project.canonical_name)[1] } branches = self._branchesForRepoState(projects=projects, tenant=tenant, items=items) - if isinstance(item.change, model.Change): + # Using the first change as representative of whether this + # pipeline is handling changes or refs + if isinstance(item.changes[0], model.Change): self.sched.merger.mergeChanges(build_set.merger_items, build_set, files, dirs, precedence=self.pipeline.precedence, @@ -1423,16 +1438,8 @@ class PipelineManager(metaclass=ABCMeta): log = item.annotateLogger(self.log) log.debug("Scheduling fileschanged for item %s", item) build_set = item.current_build_set - - # if base_sha is not available, fallback to branch - to_sha = getattr(item.change, "base_sha", None) - if to_sha is None: - to_sha = getattr(item.change, "branch", None) - - self.sched.merger.getFilesChanges( - item.change.project.connection_name, item.change.project.name, - item.change.ref, to_sha, build_set=build_set, - event=item.event) + self.sched.merger.getFilesChanges(item.changes, build_set=build_set, + event=item.event) build_set.updateAttributes(self.current_context, files_state=build_set.PENDING) return False @@ -1501,12 +1508,11 @@ class PipelineManager(metaclass=ABCMeta): repo_state_state=item.current_build_set.PENDING) return True - def prepareItem(self, item: QueueItem) -> bool: + def prepareItem(self, item): build_set = item.current_build_set tenant = item.pipeline.tenant # We always need to set the configuration of the item if it # isn't already set. - tpc = tenant.project_configs.get(item.change.project.canonical_name) if not build_set.ref: build_set.setConfiguration(self.current_context) @@ -1527,22 +1533,20 @@ class PipelineManager(metaclass=ABCMeta): ready = True # If the project is in this tenant, fetch missing files so we # know if it updates the config. - if tpc: + in_tenant = False + for c in item.changes: + if tenant.project_configs.get(c.project.canonical_name): + in_tenant = True + break + if in_tenant: if build_set.files_state == build_set.NEW: ready = self.scheduleFilesChanges(item) if build_set.files_state == build_set.PENDING: ready = False # If this change alters config or is live, schedule merge and # build a layout. - # If we are dealing w/ a bundle and the bundle updates config we also - # have to merge since a config change in any of the bundle's items - # applies to all items. This is, unless the current item is not part - # of this tenant (e.g. cross-tenant cycle). if build_set.merge_state == build_set.NEW: - if item.live or item.change.updatesConfig(tenant) or ( - item.bundle and - item.bundle.updatesConfig(tenant) and tpc is not None - ): + if item.live or item.updatesConfig(): # Collect extra config files and dirs of required changes. extra_config_files = set() extra_config_dirs = set() @@ -1611,7 +1615,7 @@ class PipelineManager(metaclass=ABCMeta): (str(e))) return False if (item.current_build_set.job_graph and - len(item.current_build_set.job_graph.jobs) > 0): + len(item.current_build_set.job_graph.job_uuids) > 0): self.sql.reportBuildsetStart(build_set) # At this point we know all frozen jobs and their repos so update the @@ -1620,12 +1624,8 @@ class PipelineManager(metaclass=ABCMeta): self.scheduleGlobalRepoState(item) if build_set.repo_state_state == build_set.PENDING: return False + item.updateJobParentData() - while item.deduplicateJobs(log): - # If we deduplicated a build, then we may need to apply - # its parent data to child jobs, so keep running this - # until we stop deduplicating builds. - pass return True def _processOneItem(self, item, nnfi): @@ -1640,24 +1640,20 @@ class PipelineManager(metaclass=ABCMeta): item_ahead = None change_queue = item.queue - if COMPONENT_REGISTRY.model_api > 3: - # This sets a QueueItem flag which is only understood by - # api 4. - meets_reqs = self.isChangeReadyToBeEnqueued( - item.change, item.event) - else: - meets_reqs = True + meets_reqs = self.areChangesReadyToBeEnqueued(item.changes, item.event) + dependency_graph = collections.OrderedDict() + self.getDependencyGraph(item.changes[0], dependency_graph, item.event, + quiet=True) abort, needs_changes = self.getMissingNeededChanges( - item.change, change_queue, item.event) + item.changes, change_queue, item.event, + dependency_graph=dependency_graph) if not (meets_reqs and not needs_changes): # It's not okay to enqueue this change, we should remove it. - log.info("Dequeuing change %s because " - "it can no longer merge" % item.change) + log.info("Dequeuing %s because " + "it can no longer merge" % item) self.cancelJobs(item) quiet_dequeue = False - if item.isBundleFailing(): - item.setDequeuedBundleFailing('Bundle is failing') - elif not meets_reqs: + if not meets_reqs: item.setDequeuedMissingRequirements() else: clist = ', '.join([c.url for c in needs_changes]) @@ -1671,67 +1667,47 @@ class PipelineManager(metaclass=ABCMeta): # just added updated versions of them, possibly # updating a cycle. In that case, attempt to # re-enqueue this change with the updated deps. - if (item.live and - all([self.isChangeAlreadyInPipeline(c) - for c in needs_changes])): - # Try enqueue, if that succeeds, keep this dequeue quiet - try: - log.info("Attempting re-enqueue of change %s", - item.change) - quiet_dequeue = self.addChange( - item.change, item.event, - enqueue_time=item.enqueue_time, - quiet=True, - skip_presence_check=True) - except Exception: - log.exception("Unable to re-enqueue change %s " - "which is missing dependencies", - item.change) + quiet_dequeue = self.reEnqueueIfDepsPresent( + item, needs_changes, log) if item.live and not quiet_dequeue: try: self.reportItem(item) except exceptions.MergeFailure: pass - self.dequeueItem(item, quiet_dequeue) + self.dequeueItem(item) return (True, nnfi) - # Safety check: verify that the bundle dependency graph is correct - dependency_graph = collections.OrderedDict() - self.getDependencyGraph(item.change, dependency_graph, item.event) + # Safety check: verify that the cycle dependency graph is correct cycle = self.cycleForChange( - item.change, dependency_graph, item.event, debug=False) - if item.bundle: - bundle_cycle = {i.change for i in item.bundle.items} - else: - bundle_cycle = set() - if (item.live and - set(cycle) != bundle_cycle and - not item.didBundleStartReporting()): - log.info("Item bundle has changed: %s, now: %s, was: %s", item, - set(cycle), bundle_cycle) + item.changes[0], dependency_graph, item.event, debug=False) + cycle = cycle or [item.changes[0]] + item_cycle = set(item.changes) + if (item.live and set(cycle) != item_cycle): + log.info("Item cycle has changed: %s, now: %s, was: %s", item, + set(cycle), item_cycle) self.removeItem(item) + if item.live: + self.reEnqueueIfDepsPresent(item, needs_changes, log, + skip_presence_check=False) return (True, nnfi) actionable = change_queue.isActionable(item) item.updateAttributes(self.current_context, active=actionable) - dep_items = self.getFailingDependentItems(item, nnfi) + dep_items = self.getFailingDependentItems(item) if dep_items: failing_reasons.append('a needed change is failing') self.cancelJobs(item, prime=False) else: - item_ahead_merged = False - if (item_ahead and - hasattr(item_ahead.change, 'is_merged') and - item_ahead.change.is_merged): - item_ahead_merged = True + item_ahead_merged = (item_ahead.areAllChangesMerged() + if item_ahead else False) if (item_ahead != nnfi and not item_ahead_merged): # Our current base is different than what we expected, # and it's not because our current base merged. Something # ahead must have failed. - log.info("Resetting builds for change %s because the " + log.info("Resetting builds for changes %s because the " "item ahead, %s, is not the nearest non-failing " - "item, %s" % (item.change, item_ahead, nnfi)) + "item, %s" % (item.changes, item_ahead, nnfi)) change_queue.moveItem(item, nnfi) changed = True self.cancelJobs(item) @@ -1739,10 +1715,12 @@ class PipelineManager(metaclass=ABCMeta): ready = self.prepareItem(item) # Starting jobs reporting should only be done once if there are # jobs to run for this item. - if ready and len(self.pipeline.start_actions) > 0 \ - and len(item.current_build_set.job_graph.jobs) > 0 \ - and not item.reported_start \ - and not item.quiet: + if (ready + and len(self.pipeline.start_actions) > 0 + and len(item.current_build_set.job_graph.job_uuids) > 0 + and not item.reported_start + and not item.quiet + ): self.reportStart(item) item.updateAttributes(self.current_context, reported_start=True) @@ -1752,12 +1730,6 @@ class PipelineManager(metaclass=ABCMeta): failing_reasons.append("it has an invalid configuration") if ready and self.provisionNodes(item): changed = True - if ready and item.bundle and item.didBundleFinish(): - # Since the bundle finished we need to check if any item - # can report. If that's the case we need to process the - # queue again. - changed = changed or any( - i.item_ahead is None for i in item.bundle.items) if ready and self.executeJobs(item): changed = True @@ -1769,61 +1741,47 @@ class PipelineManager(metaclass=ABCMeta): changed = dequeued = True can_report = not item_ahead and item.areAllJobsComplete() and item.live - if can_report and item.bundle: - can_report = can_report and ( - item.isBundleFailing() or item.didBundleFinish() - ) + is_cycle = len(item.changes) > 1 + if can_report and is_cycle: # Before starting to merge the cycle items, make sure they # can still be merged, to reduce the chance of a partial merge. - if can_report and not item.bundle.started_reporting: - non_mergeable_cycle_changes = self.getNonMergeableCycleChanges( - item.bundle) - if non_mergeable_cycle_changes: - clist = ', '.join([ - c.url for c in non_mergeable_cycle_changes]) - if len(non_mergeable_cycle_changes) > 1: - msg = f'Changes {clist} can not be merged.' - else: - msg = f'Change {clist} can not be merged.' - item.bundle.cannot_merge = msg - failing_reasons.append("cycle can not be merged") - log.debug( - "Dequeuing item %s because cycle can no longer merge", - item - ) - item.bundle.started_reporting = can_report + non_mergeable_cycle_changes = self.getNonMergeableCycleChanges( + item) + if non_mergeable_cycle_changes: + clist = ', '.join([ + c.url for c in non_mergeable_cycle_changes]) + if len(non_mergeable_cycle_changes) > 1: + msg = f'Changes {clist} can not be merged.' + else: + msg = f'Change {clist} can not be merged.' + item.setDequeuedNeedingChange(msg) + failing_reasons.append("cycle can not be merged") + log.debug( + "Dequeuing item %s because cycle can no longer merge", + item + ) + try: + self.reportItem(item) + except exceptions.MergeFailure: + pass + self.dequeueItem(item) + return (True, nnfi) if can_report: - # If we're starting to report a successful bundle, enable - # two-phase reporting. Report the first phase for every item - # in the bundle, then the second. - phase1 = True - phase2 = True - if (self.changes_merge - and item.bundle - and (not item.cannotMergeBundle()) - and (not item.isBundleFailing())): - for i in item.bundle.items: - if not i.reported: - self.log.debug("Report phase1 for bundle item %s", i) - self.reportItem(i, phase1=True, phase2=False) - phase1 = False - + succeeded = item.didAllJobsSucceed() try: - self.reportItem(item, phase1=phase1, phase2=phase2) + self.reportItem(item) except exceptions.MergeFailure: failing_reasons.append("it did not merge") for item_behind in item.items_behind: - log.info("Resetting builds for change %s because the " + log.info("Resetting builds for %s because the " "item ahead, %s, failed to merge" % - (item_behind.change, item)) + (item_behind, item)) self.cancelJobs(item_behind) # Only re-report items in the cycle when we encounter a merge - # failure for a successful bundle. - if (item.bundle and not ( - item.isBundleFailing() or item.cannotMergeBundle())): - item.bundle.failed_reporting = True - self.reportProcessedBundleItems(item) + # failure for a successful cycle. + if is_cycle and succeeded: + self.sendReport(self.pipeline.failure_actions, item) self.dequeueItem(item) changed = dequeued = True elif not failing_reasons and item.live: @@ -1836,8 +1794,8 @@ class PipelineManager(metaclass=ABCMeta): (item, failing_reasons)) if (item.live and not dequeued and self.sched.globals.use_relative_priority): - priority = item.getNodePriority() - for _, request_id in item.current_build_set.getNodeRequests(): + for job, request_id in \ + item.current_build_set.getNodeRequests(): node_request = self.sched.nodepool.zk_nodepool.getNodeRequest( request_id, cached=True) if not node_request: @@ -1846,29 +1804,14 @@ class PipelineManager(metaclass=ABCMeta): # If the node request was locked and accepted by a # provider, we can no longer update the relative priority. continue + priority = self.getNodePriority( + item, + item.getChangeForJob(job)) if node_request.relative_priority != priority: self.sched.nodepool.reviseRequest( node_request, priority) return (changed, nnfi) - def reportProcessedBundleItems(self, item): - """Report failure to already reported bundle items. - - In case we encounter e.g. a merge failure when we already successfully - reported some items, we need to go back and report again. - """ - reported_items = [i for i in item.bundle.items if i.reported] - - actions = self.pipeline.failure_actions - for ri in reported_items: - self.sendReport(actions, ri) - if ri is not item: - # Don't override the reported sql result for the item - # that "really" failed. - ri.setReportedResult('FAILURE') - self.reportNormalBuildsetEnd(ri.current_build_set, - 'failure', final=True) - def processQueue(self): # Do whatever needs to be done for each change in the queue self.log.debug("Starting queue processor: %s" % self.pipeline.name) @@ -1883,7 +1826,8 @@ class PipelineManager(metaclass=ABCMeta): if item_changed: queue_changed = True self.reportStats(item) - change_keys.add(item.change.cache_stat.key) + for change in item.changes: + change_keys.add(change.cache_stat.key) if queue_changed: changed = True status = '' @@ -1921,36 +1865,14 @@ class PipelineManager(metaclass=ABCMeta): def onBuildPaused(self, build): log = get_annotated_logger(self.log, build.zuul_event_id) item = build.build_set.item - log.debug("Build %s of %s paused", build, item.change) - for item_with_build in self._getItemsWithBuild(build): - item_with_build.setResult(build) + log.debug("Build %s of %s paused", build, item) + item.setResult(build) # We need to resume builds because we could either have no children # or have children that are already skipped. self._resumeBuilds(build.build_set) return True - def _legacyGetJob(self, item, job): - # TODO (model_api<21): The "this_job" indirection can be - # removed when the circular dependency refactor is complete. - # Until then, we can't assume that the build (and therefore - # job) is within the current buildset (if it has been - # deduplicated). - try: - this_uuid = item.current_build_set.job_graph.getUuidForJobName( - job.name) - except ValueError: - # This doesn't currently raise a ValueError, it just - # returns None, but that could easily change during - # refactoring so let's go ahead and handle both. - this_uuid = None - if this_uuid is None: - # This is the model_api < 18 case, we're going to end up - # looking up dependencies by name anyway so the specific - # frozen job object doesn't matter. - return job - return item.current_build_set.job_graph.getJobFromUuid(this_uuid) - def _resumeBuilds(self, build_set): """ Resumes all paused builds of a buildset that may be resumed. @@ -1960,13 +1882,12 @@ class PipelineManager(metaclass=ABCMeta): continue # check if all child jobs are finished child_builds = [] - for item in self._getItemsWithBuild(build): - job_graph = item.current_build_set.job_graph - _this_job = self._legacyGetJob(item, build.job) - child_builds += [ - item.current_build_set.getBuild(x) - for x in job_graph.getDependentJobsRecursively( - _this_job)] + item = build.build_set.item + job_graph = item.current_build_set.job_graph + child_builds += [ + item.current_build_set.getBuild(x) + for x in job_graph.getDependentJobsRecursively( + build.job)] all_completed = True for child_build in child_builds: if not child_build or not child_build.result: @@ -1985,8 +1906,7 @@ class PipelineManager(metaclass=ABCMeta): def _resetDependentBuilds(self, build_set, build): job_graph = build_set.job_graph - _this_job = self._legacyGetJob(build_set.item, build.job) - for job in job_graph.getDependentJobsRecursively(_this_job): + for job in job_graph.getDependentJobsRecursively(build.job): self.sched.cancelJob(build_set, job) build = build_set.getBuild(job) if build: @@ -2005,77 +1925,68 @@ class PipelineManager(metaclass=ABCMeta): if not build or not build.result: self.sched.cancelJob(build_set, job, final=True) - def _getItemsWithBuild(self, build): - # If the build was for deduplicated jobs, apply the results to - # all the items that use this build. - item = build.build_set.item - build_in_items = [item] - - for bundle in item.findDuplicateBundles(): - for other_item in bundle.items: - if other_item in build_in_items: - continue - if build in other_item.current_build_set.getBuilds(): - build_in_items.append(other_item) - - return build_in_items - def onBuildCompleted(self, build): log = get_annotated_logger(self.log, build.zuul_event_id) item = build.build_set.item - log.debug("Build %s of %s completed" % (build, item.change)) + log.debug("Build %s of %s completed", build, item) event_queue = self.sched.pipeline_result_events[ item.pipeline.tenant.name][item.pipeline.name] item.pipeline.tenant.semaphore_handler.release( event_queue, item, build.job) - if item.getJob(build.job.name) is None: + # MODEL_API < 25 + if item.getJob(build.job.uuid or build.job.name) is None: log.info("Build %s no longer in job graph for item %s", build, item) return - for item in self._getItemsWithBuild(build): - # We don't care about some actions below if this build - # isn't in the current buildset, so determine that before - # it is potentially removed with setResult. - if build not in item.current_build_set.getBuilds(): - current = False - else: - current = True - item.setResult(build) - log.debug("Item %s status is now:\n %s", item, item.formatStatus()) + item = build.build_set.item + # We don't care about some actions below if this build + # isn't in the current buildset, so determine that before + # it is potentially removed with setResult. + if build not in item.current_build_set.getBuilds(): + current = False + else: + current = True + item.setResult(build) + log.debug("Item %s status is now:\n %s", item, item.formatStatus()) - if not current: - continue - build_set = item.current_build_set + if not current: + return + build_set = item.current_build_set - if build.retry: - if build_set.getJobNodeSetInfo(build.job): - build_set.removeJobNodeSetInfo(build.job) + if build.retry: + if build_set.getJobNodeSetInfo(build.job): + build_set.removeJobNodeSetInfo(build.job) - # in case this was a paused build we need to retry all - # child jobs - self._resetDependentBuilds(build_set, build) + # in case this was a paused build we need to retry all + # child jobs + self._resetDependentBuilds(build_set, build) - self._resumeBuilds(build_set) + self._resumeBuilds(build_set) - if (build_set.fail_fast and - build.failed and build.job.voting and not build.retry): - # If fail-fast is set and the build is not successful - # cancel all remaining jobs. - log.debug("Build %s failed and fail-fast enabled, canceling " - "running builds", build) - self._cancelRunningBuilds(build_set) + if (build_set.fail_fast and + build.failed and build.job.voting and not build.retry): + # If fail-fast is set and the build is not successful + # cancel all remaining jobs. + log.debug("Build %s failed and fail-fast enabled, canceling " + "running builds", build) + self._cancelRunningBuilds(build_set) return True def onFilesChangesCompleted(self, event, build_set): item = build_set.item - source = self.sched.connections.getSource( - item.change.project.connection_name) - source.setChangeAttributes(item.change, files=event.files) + for i, change in enumerate(item.changes): + source = self.sched.connections.getSource( + change.project.connection_name) + if event.files: + change_files = event.files[i] + else: + change_files = None + source.setChangeAttributes(change, files=change_files) build_set.updateAttributes(self.current_context, files_state=build_set.COMPLETE) if build_set.merge_state == build_set.COMPLETE: @@ -2107,36 +2018,33 @@ class PipelineManager(metaclass=ABCMeta): def _onMergeCompleted(self, event, build_set): item = build_set.item log = get_annotated_logger(self.log, item.event) - source = self.sched.connections.getSource( - item.change.project.connection_name) - if isinstance(item.change, model.Tag): + if isinstance(item.changes[0], model.Tag): + # Since this is only used for Tag items, we know that + # circular dependencies are not in play and there is only + # one change in the list of changes. + if len(item.changes) > 1: + raise Exception("Tag item with more than one change") + change = item.changes[0] + source = self.sched.connections.getSource( + change.project.connection_name) source.setChangeAttributes( - item.change, containing_branches=event.item_in_branches) - for other_item in item.bundle_items: - other_build_set = other_item.current_build_set - with other_build_set.activeContext(self.current_context): - other_build_set.setMergeRepoState(event.repo_state) - other_build_set.merge_state = other_build_set.COMPLETE - if event.merged: - other_build_set.commit = event.commit - other_build_set.setFiles(event.files) - elif event.updated: - other_build_set.commit = ( - item.change.newrev or - '0000000000000000000000000000000000000000') - if not other_build_set.commit: - log.info("Unable to merge change %s" % item.change) - item.setUnableToMerge(event.errors) + change, containing_branches=event.item_in_branches) + with build_set.activeContext(self.current_context): + build_set.setMergeRepoState(event.repo_state) + build_set.merge_state = build_set.COMPLETE + if event.merged: + build_set.setFiles(event.files) + if not (event.merged or event.updated): + log.info("Unable to merge %s" % item) + item.setUnableToMerge(event.errors) def _onGlobalRepoStateCompleted(self, event, build_set): item = build_set.item if not event.updated: - self.log.info("Unable to get global repo state for change %s" - % item.change) + self.log.info("Unable to get global repo state for %s", item) item.setUnableToMerge(event.errors) else: - self.log.info("Received global repo state for change %s" - % item.change) + self.log.info("Received global repo state for %s", item) with build_set.activeContext(self.current_context): build_set.setExtraRepoState(event.repo_state) build_set.repo_state_state = build_set.COMPLETE @@ -2158,7 +2066,9 @@ class PipelineManager(metaclass=ABCMeta): # Make a new request if self.sched.globals.use_relative_priority: - relative_priority = build_set.item.getNodePriority() + relative_priority = self.getNodePriority( + build_set.item, + build_set.item.getChangeForJob(job)) else: relative_priority = 0 log = build_set.item.annotateLogger(self.log) @@ -2169,7 +2079,7 @@ class PipelineManager(metaclass=ABCMeta): log = get_annotated_logger(self.log, request.event_id) self.reportPipelineTiming('node_request_time', request.created_time) - job = build_set.item.getJob(request._job_id) + job = build_set.item.getJob(request.job_uuid) # First see if we need to retry the request if not request.fulfilled: log.info("Node request %s: failure for %s", @@ -2195,29 +2105,44 @@ class PipelineManager(metaclass=ABCMeta): "with nodes %s", request, job.name, build_set.item, request.nodes) - def reportItem(self, item, phase1=True, phase2=True): + def reportItem(self, item): log = get_annotated_logger(self.log, item.event) action = None + phase1 = True + phase2 = True + is_cycle = len(item.changes) > 1 + succeeded = item.didAllJobsSucceed() already_reported = item.reported - if phase2 and not phase1: - already_reported = False + if (self.changes_merge + and is_cycle + and succeeded): + phase2 = False if not already_reported: - action, reported = self._reportItem(item, phase1, phase2) + action, reported = self._reportItem( + item, phase1=phase1, phase2=phase2) + if phase2 is False: + phase1 = False + phase2 = True + action, reported = self._reportItem( + item, phase1=phase1, phase2=phase2) item.updateAttributes(self.current_context, reported=reported) + if not phase2: return if self.changes_merge: - succeeded = item.didAllJobsSucceed() and not item.isBundleFailing() merged = item.reported - source = item.change.project.source if merged: - merged = source.isMerged(item.change, item.change.branch) + for change in item.changes: + source = change.project.source + merged = source.isMerged(change, change.branch) + if not merged: + break if action: if action == 'success' and not merged: log.debug("Overriding result for %s to merge failure", - item.change) + item) action = 'merge-failure' item.setReportedResult('MERGE_FAILURE') self.reportNormalBuildsetEnd(item.current_build_set, @@ -2225,26 +2150,26 @@ class PipelineManager(metaclass=ABCMeta): change_queue = item.queue if not (succeeded and merged): if (not item.current_build_set.job_graph or - not item.current_build_set.job_graph.jobs): + not item.current_build_set.job_graph.job_uuids): error_reason = "did not have any jobs configured" elif not succeeded: error_reason = "failed tests" else: error_reason = "failed to merge" - log.info("Reported change %s did not merge because it %s, " + log.info("Changes for %s did not merge because it %s, " "status: all-succeeded: %s, merged: %s", - item.change, error_reason, succeeded, merged) + item, error_reason, succeeded, merged) if not succeeded: change_queue.decreaseWindowSize() log.debug("%s window size decreased to %s", change_queue, change_queue.window) raise exceptions.MergeFailure( - "Change %s failed to merge" % item.change) + "Changes for %s failed to merge" % item) else: self.reportNormalBuildsetEnd(item.current_build_set, action, final=True) - log.info("Reported change %s status: all-succeeded: %s, " - "merged: %s", item.change, succeeded, merged) + log.info("Reported %s status: all-succeeded: %s, " + "merged: %s", item, succeeded, merged) change_queue.increaseWindowSize() log.debug("%s window size increased to %s", change_queue, change_queue.window) @@ -2252,15 +2177,17 @@ class PipelineManager(metaclass=ABCMeta): zuul_driver = self.sched.connections.drivers['zuul'] tenant = self.pipeline.tenant with trace.use_span(tracing.restoreSpan(item.span_info)): - zuul_driver.onChangeMerged(tenant, item.change, source) + for change in item.changes: + source = change.project.source + zuul_driver.onChangeMerged(tenant, change, source) elif action: self.reportNormalBuildsetEnd(item.current_build_set, action, final=True) def _reportItem(self, item, phase1, phase2): log = get_annotated_logger(self.log, item.event) - log.debug("Reporting phase1: %s phase2: %s change: %s", - phase1, phase2, item.change) + log.debug("Reporting phase1: %s phase2: %s item: %s", + phase1, phase2, item) ret = True # Means error as returned by trigger.report # In the case of failure, we may not have completed an initial @@ -2280,19 +2207,22 @@ class PipelineManager(metaclass=ABCMeta): layout = self.pipeline.tenant.layout try: - project_in_pipeline = bool( - layout.getProjectPipelineConfig(item)) + for change in item.changes: + project_in_pipeline = bool( + layout.getProjectPipelineConfig(item, change)) + if project_in_pipeline: + break except Exception: - log.exception("Invalid config for change %s", item.change) + log.exception("Invalid config for %s", item) if not project_in_pipeline: - log.debug("Project %s not in pipeline %s for change %s", - item.change.project, self.pipeline, item.change) + log.debug("Project not in pipeline %s for %s", + self.pipeline, item) project_in_pipeline = False action = 'no-jobs' actions = self.pipeline.no_jobs_actions item.setReportedResult('NO_JOBS') elif item.current_build_set.has_blocking_errors: - log.debug("Invalid config for change %s", item.change) + log.debug("Invalid config for %s", item) action = 'config-error' actions = self.pipeline.config_error_actions item.setReportedResult('CONFIG_ERROR') @@ -2313,24 +2243,11 @@ class PipelineManager(metaclass=ABCMeta): item.setReportedResult('FAILURE') elif not item.getJobs(): # We don't send empty reports with +1 - log.debug("No jobs for change %s", item.change) + log.debug("No jobs for %s", item) action = 'no-jobs' actions = self.pipeline.no_jobs_actions item.setReportedResult('NO_JOBS') - elif item.cannotMergeBundle(): - log.debug("Bundle can not be merged") - action = 'failure' - actions = self.pipeline.failure_actions - item.setReportedResult("FAILURE") - elif item.isBundleFailing(): - log.debug("Bundle is failing") - action = 'failure' - actions = self.pipeline.failure_actions - item.setReportedResult("FAILURE") - if not item.didAllJobsSucceed(): - with self.pipeline.state.activeContext(self.current_context): - self.pipeline.state.consecutive_failures += 1 - elif item.didAllJobsSucceed() and not item.isBundleFailing(): + elif item.didAllJobsSucceed(): log.debug("success %s", self.pipeline.success_actions) action = 'success' actions = self.pipeline.success_actions @@ -2368,9 +2285,13 @@ class PipelineManager(metaclass=ABCMeta): # when dequeing. if item.dequeue_time: dt = (item.dequeue_time - item.enqueue_time) * 1000 + item_changes = len(item.changes) else: dt = None - items = len(self.pipeline.getAllItems()) + item_changes = 0 + changes = sum(len(i.changes) + for i in self.pipeline.getAllItems()) + # TODO(jeblair): add items keys like changes tenant = self.pipeline.tenant basekey = 'zuul.tenant.%s' % tenant.name @@ -2379,11 +2300,11 @@ class PipelineManager(metaclass=ABCMeta): # stats_counts.zuul.tenant..pipeline..total_changes # stats.gauges.zuul.tenant..pipeline..current_changes # stats.gauges.zuul.tenant..pipeline..window - self.sched.statsd.gauge(key + '.current_changes', items) + self.sched.statsd.gauge(key + '.current_changes', changes) self.sched.statsd.gauge(key + '.window', item.pipeline.window) if dt: self.sched.statsd.timing(key + '.resident_time', dt) - self.sched.statsd.incr(key + '.total_changes') + self.sched.statsd.incr(key + '.total_changes', item_changes) if item.queue and item.queue.name: queuename = (item.queue.name. replace('.', '_').replace('/', '.')) @@ -2392,30 +2313,34 @@ class PipelineManager(metaclass=ABCMeta): # stats.gauges.zuul.tenant..pipeline..queue..current_changes # stats.gauges.zuul.tenant..pipeline..queue..window queuekey = '%s.queue.%s' % (key, queuename) + queue_changes = sum(len(i.changes) for i in item.queue.queue) self.sched.statsd.gauge(queuekey + '.current_changes', - len(item.queue.queue)) + queue_changes) self.sched.statsd.gauge(queuekey + '.window', item.queue.window) if dt: self.sched.statsd.timing(queuekey + '.resident_time', dt) - self.sched.statsd.incr(queuekey + '.total_changes') - if hasattr(item.change, 'branch'): - hostname = (item.change.project.canonical_hostname. - replace('.', '_')) - projectname = (item.change.project.name. - replace('.', '_').replace('/', '.')) - projectname = projectname.replace('.', '_').replace('/', '.') - branchname = item.change.branch.replace('.', '_').replace( - '/', '.') - # stats.timers.zuul.tenant..pipeline.. - # project....resident_time - # stats_counts.zuul.tenant..pipeline.. - # project....total_changes - key += '.project.%s.%s.%s' % (hostname, projectname, - branchname) - if dt: - self.sched.statsd.timing(key + '.resident_time', dt) - self.sched.statsd.incr(key + '.total_changes') + self.sched.statsd.incr(queuekey + '.total_changes', + item_changes) + for change in item.changes: + if hasattr(change, 'branch'): + hostname = (change.project.canonical_hostname. + replace('.', '_')) + projectname = (change.project.name. + replace('.', '_').replace('/', '.')) + projectname = projectname.replace('.', '_').replace( + '/', '.') + branchname = change.branch.replace('.', '_').replace( + '/', '.') + # stats.timers.zuul.tenant..pipeline.. + # project....resident_time + # stats_counts.zuul.tenant..pipeline.. + # project....total_changes + key += '.project.%s.%s.%s' % (hostname, projectname, + branchname) + if dt: + self.sched.statsd.timing(key + '.resident_time', dt) + self.sched.statsd.incr(key + '.total_changes') if ( trigger_event and hasattr(trigger_event, 'arrived_at_scheduler_timestamp') diff --git a/zuul/manager/dependent.py b/zuul/manager/dependent.py index f9c9e06682..d9d37ebe77 100644 --- a/zuul/manager/dependent.py +++ b/zuul/manager/dependent.py @@ -1,3 +1,5 @@ +# Copyright 2024 Acme Gating, LLC +# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -43,35 +45,34 @@ class DependentPipelineManager(SharedQueuePipelineManager): window_decrease_factor=p.window_decrease_factor, name=queue_name) - def getNodePriority(self, item): - with self.getChangeQueue(item.change, item.event) as change_queue: - items = change_queue.queue - return items.index(item) + def getNodePriority(self, item, change): + return item.queue.queue.index(item) - def isChangeReadyToBeEnqueued(self, change, event): + def areChangesReadyToBeEnqueued(self, changes, event): log = get_annotated_logger(self.log, event) - source = change.project.source - if not source.canMerge(change, self.getSubmitAllowNeeds(), - event=event): - log.debug("Change %s can not merge", change) - return False + for change in changes: + source = change.project.source + if not source.canMerge(change, self.getSubmitAllowNeeds(), + event=event): + log.debug("Change %s can not merge", change) + return False return True - def getNonMergeableCycleChanges(self, bundle): + def getNonMergeableCycleChanges(self, item): """Return changes in the cycle that do not fulfill the pipeline's ready criteria.""" changes = [] - for item in bundle.items: - source = item.change.project.source + for change in item.changes: + source = change.project.source if not source.canMerge( - item.change, + change, self.getSubmitAllowNeeds(), event=item.event, allow_refresh=True, ): log = get_annotated_logger(self.log, item.event) - log.debug("Change %s can no longer be merged", item.change) - changes.append(item.change) + log.debug("Change %s can no longer be merged", change) + changes.append(change) return changes def enqueueChangesBehind(self, change, event, quiet, ignore_requirements, @@ -142,29 +143,26 @@ class DependentPipelineManager(SharedQueuePipelineManager): change_queue=change_queue, history=history, dependency_graph=dependency_graph) - def enqueueChangesAhead(self, change, event, quiet, ignore_requirements, + def enqueueChangesAhead(self, changes, event, quiet, ignore_requirements, change_queue, history=None, dependency_graph=None, warnings=None): log = get_annotated_logger(self.log, event) history = history if history is not None else [] - if hasattr(change, 'number'): - history.append(change) - else: - # Don't enqueue dependencies ahead of a non-change ref. - return True + for change in changes: + if hasattr(change, 'number'): + history.append(change) + else: + # Don't enqueue dependencies ahead of a non-change ref. + return True abort, needed_changes = self.getMissingNeededChanges( - change, change_queue, event, + changes, change_queue, event, dependency_graph=dependency_graph, warnings=warnings) if abort: return False - # Treat cycle dependencies as needed for the current change - needed_changes.extend( - self.getCycleDependencies(change, dependency_graph, event)) - if not needed_changes: return True log.debug(" Changes %s must be merged ahead of %s", @@ -183,107 +181,93 @@ class DependentPipelineManager(SharedQueuePipelineManager): return False return True - def getMissingNeededChanges(self, change, change_queue, event, + def getMissingNeededChanges(self, changes, change_queue, event, dependency_graph=None, warnings=None): log = get_annotated_logger(self.log, event) + changes_needed = [] + abort = False # Return true if okay to proceed enqueing this change, # false if the change should not be enqueued. - log.debug("Checking for changes needed by %s:" % change) - if not isinstance(change, model.Change): - log.debug(" %s does not support dependencies", type(change)) - return False, [] - if not change.getNeedsChanges( - self.useDependenciesByTopic(change.project)): - log.debug(" No changes needed") - return False, [] - changes_needed = [] - abort = False - # Ignore supplied change_queue - with self.getChangeQueue(change, event) as change_queue: - for needed_change in self.resolveChangeReferences( - change.getNeedsChanges( - self.useDependenciesByTopic(change.project))): - log.debug(" Change %s needs change %s:" % ( - change, needed_change)) - if needed_change.is_merged: - log.debug(" Needed change is merged") - continue - - if dependency_graph is not None: - log.debug(" Adding change %s to dependency graph for " - "change %s", needed_change, change) - node = dependency_graph.setdefault(change, []) - node.append(needed_change) - - if (self.pipeline.tenant.max_dependencies is not None and - dependency_graph is not None and - (len(dependency_graph) > - self.pipeline.tenant.max_dependencies)): - log.debug(" Dependency graph for change %s is too large", - change) - return True, [] - - with self.getChangeQueue(needed_change, - event) as needed_change_queue: - if needed_change_queue != change_queue: - msg = ("Change %s in project %s does not " - "share a change queue with %s " - "in project %s" % - (needed_change.number, - needed_change.project, - change.number, - change.project)) - log.debug(" " + msg) - if warnings is not None: - warnings.append(msg) + for change in changes: + log.debug("Checking for changes needed by %s:" % change) + if not isinstance(change, model.Change): + log.debug(" %s does not support dependencies", type(change)) + continue + needed_changes = dependency_graph.get(change) + if not needed_changes: + log.debug(" No changes needed") + continue + # Ignore supplied change_queue + with self.getChangeQueue(change, event) as change_queue: + for needed_change in needed_changes: + log.debug(" Change %s needs change %s:" % ( + change, needed_change)) + if needed_change.is_merged: + log.debug(" Needed change is merged") + continue + with self.getChangeQueue(needed_change, + event) as needed_change_queue: + if needed_change_queue != change_queue: + msg = ("Change %s in project %s does not " + "share a change queue with %s " + "in project %s" % + (needed_change.number, + needed_change.project, + change.number, + change.project)) + log.debug(" " + msg) + if warnings is not None: + warnings.append(msg) + changes_needed.append(needed_change) + abort = True + if not needed_change.is_current_patchset: + log.debug(" Needed change is not " + "the current patchset") changes_needed.append(needed_change) abort = True - if not needed_change.is_current_patchset: - log.debug(" Needed change is not the current patchset") - changes_needed.append(needed_change) - abort = True - if self.isChangeAlreadyInQueue(needed_change, change_queue): - log.debug(" Needed change is already ahead in the queue") - continue - if needed_change.project.source.canMerge( - needed_change, self.getSubmitAllowNeeds(), - event=event): - log.debug(" Change %s is needed", needed_change) - if needed_change not in changes_needed: - changes_needed.append(needed_change) + if needed_change in changes: + log.debug(" Needed change is in cycle") continue - # The needed change can't be merged. - log.debug(" Change %s is needed but can not be merged", - needed_change) - changes_needed.append(needed_change) - abort = True + if self.isChangeAlreadyInQueue( + needed_change, change_queue): + log.debug(" Needed change is already " + "ahead in the queue") + continue + if needed_change.project.source.canMerge( + needed_change, self.getSubmitAllowNeeds(), + event=event): + log.debug(" Change %s is needed", needed_change) + if needed_change not in changes_needed: + changes_needed.append(needed_change) + continue + else: + # The needed change can't be merged. + log.debug(" Change %s is needed " + "but can not be merged", + needed_change) + changes_needed.append(needed_change) + abort = True return abort, changes_needed - def getFailingDependentItems(self, item, nnfi): - if not isinstance(item.change, model.Change): - return None - if not item.change.getNeedsChanges( - self.useDependenciesByTopic(item.change.project)): - return None + def getFailingDependentItems(self, item): failing_items = set() - for needed_change in self.resolveChangeReferences( - item.change.getNeedsChanges( - self.useDependenciesByTopic(item.change.project))): - needed_item = self.getItemForChange(needed_change) - if not needed_item: + for change in item.changes: + if not isinstance(change, model.Change): continue - if needed_item.current_build_set.failing_reasons: - failing_items.add(needed_item) - # Only look at the bundle if the item ahead is the nearest non-failing - # item. This is important in order to correctly reset the bundle items - # in case of a failure. - if item.item_ahead == nnfi and item.isBundleFailing(): - failing_items.update(item.bundle.items) - failing_items.remove(item) - if failing_items: - return failing_items - return None + needs_changes = change.getNeedsChanges( + self.useDependenciesByTopic(change.project)) + if not needs_changes: + continue + for needed_change in self.resolveChangeReferences(needs_changes): + needed_item = self.getItemForChange(needed_change) + if not needed_item: + continue + if needed_item is item: + continue + if needed_item.current_build_set.failing_reasons: + failing_items.add(needed_item) + return failing_items def dequeueItem(self, item, quiet=False): super(DependentPipelineManager, self).dequeueItem(item, quiet) diff --git a/zuul/manager/independent.py b/zuul/manager/independent.py index 3710de75cb..ad1d0bfcb2 100644 --- a/zuul/manager/independent.py +++ b/zuul/manager/independent.py @@ -1,3 +1,5 @@ +# Copyright 2021-2024 Acme Gating, LLC +# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -37,28 +39,25 @@ class IndependentPipelineManager(PipelineManager): log.debug("Dynamically created queue %s", change_queue) return DynamicChangeQueueContextManager(change_queue) - def enqueueChangesAhead(self, change, event, quiet, ignore_requirements, + def enqueueChangesAhead(self, changes, event, quiet, ignore_requirements, change_queue, history=None, dependency_graph=None, warnings=None): log = get_annotated_logger(self.log, event) history = history if history is not None else [] - if hasattr(change, 'number'): - history.append(change) - else: - # Don't enqueue dependencies ahead of a non-change ref. - return True + for change in changes: + if hasattr(change, 'number'): + history.append(change) + else: + # Don't enqueue dependencies ahead of a non-change ref. + return True abort, needed_changes = self.getMissingNeededChanges( - change, change_queue, event, + changes, change_queue, event, dependency_graph=dependency_graph) if abort: return False - # Treat cycle dependencies as needed for the current change - needed_changes.extend( - self.getCycleDependencies(change, dependency_graph, event)) - if not needed_changes: return True log.debug(" Changes %s must be merged ahead of %s" % ( @@ -80,55 +79,43 @@ class IndependentPipelineManager(PipelineManager): return False return True - def getMissingNeededChanges(self, change, change_queue, event, + def getMissingNeededChanges(self, changes, change_queue, event, dependency_graph=None): log = get_annotated_logger(self.log, event) if self.pipeline.ignore_dependencies: return False, [] - log.debug("Checking for changes needed by %s:" % change) - # Return true if okay to proceed enqueing this change, - # false if the change should not be enqueued. - if not isinstance(change, model.Change): - log.debug(" %s does not support dependencies" % type(change)) - return False, [] - if not change.getNeedsChanges( - self.useDependenciesByTopic(change.project)): - log.debug(" No changes needed") - return False, [] changes_needed = [] abort = False - for needed_change in self.resolveChangeReferences( - change.getNeedsChanges( - self.useDependenciesByTopic(change.project))): - log.debug(" Change %s needs change %s:" % ( - change, needed_change)) - if needed_change.is_merged: - log.debug(" Needed change is merged") + for change in changes: + log.debug("Checking for changes needed by %s:" % change) + # Return true if okay to proceed enqueing this change, + # false if the change should not be enqueued. + if not isinstance(change, model.Change): + log.debug(" %s does not support dependencies" % type(change)) continue - - if dependency_graph is not None: - log.debug(" Adding change %s to dependency graph for " - "change %s", needed_change, change) - node = dependency_graph.setdefault(change, []) - node.append(needed_change) - - if (self.pipeline.tenant.max_dependencies is not None and - dependency_graph is not None and - len(dependency_graph) > self.pipeline.tenant.max_dependencies): - log.debug(" Dependency graph for change %s is too large", - change) - return True, [] - - if self.isChangeAlreadyInQueue(needed_change, change_queue): - log.debug(" Needed change is already ahead in the queue") + needed_changes = dependency_graph.get(change) + if not needed_changes: + log.debug(" No changes needed") continue - log.debug(" Change %s is needed" % needed_change) - if needed_change not in changes_needed: - changes_needed.append(needed_change) - continue - # This differs from the dependent pipeline check in not - # verifying that the dependent change is mergable. + for needed_change in needed_changes: + log.debug(" Change %s needs change %s:" % ( + change, needed_change)) + if needed_change.is_merged: + log.debug(" Needed change is merged") + continue + if needed_change in changes: + log.debug(" Needed change is in cycle") + continue + if self.isChangeAlreadyInQueue(needed_change, change_queue): + log.debug(" Needed change is already ahead in the queue") + continue + log.debug(" Change %s is needed" % needed_change) + if needed_change not in changes_needed: + changes_needed.append(needed_change) + continue + # This differs from the dependent pipeline check in not + # verifying that the dependent change is mergable. return abort, changes_needed def dequeueItem(self, item, quiet=False): diff --git a/zuul/manager/supercedent.py b/zuul/manager/supercedent.py index 23d36848e3..c6409f0628 100644 --- a/zuul/manager/supercedent.py +++ b/zuul/manager/supercedent.py @@ -1,3 +1,5 @@ +# Copyright 2021, 2023-2024 Acme Gating, LLC +# # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at @@ -32,11 +34,11 @@ class SupercedentPipelineManager(PipelineManager): # Don't use Pipeline.getQueue to find an existing queue # because we're matching project and (branch or ref). for queue in self.pipeline.queues: - if (queue.queue[-1].change.project == change.project and + if (queue.queue[-1].changes[0].project == change.project and ((hasattr(change, 'branch') and - hasattr(queue.queue[-1].change, 'branch') and - queue.queue[-1].change.branch == change.branch) or - queue.queue[-1].change.ref == change.ref)): + hasattr(queue.queue[-1].changes[0], 'branch') and + queue.queue[-1].changes[0].branch == change.branch) or + queue.queue[-1].changes[0].ref == change.ref)): log.debug("Found existing queue %s", queue) return DynamicChangeQueueContextManager(queue) change_queue = model.ChangeQueue.new( @@ -66,6 +68,13 @@ class SupercedentPipelineManager(PipelineManager): (item, queue.queue[-1])) self.removeItem(item) + def cycleForChange(self, *args, **kw): + ret = super().cycleForChange(*args, **kw) + if len(ret) > 1: + raise Exception("Dependency cycles not supported " + "in supercedent pipelines") + return ret + def addChange(self, *args, **kw): ret = super(SupercedentPipelineManager, self).addChange( *args, **kw) diff --git a/zuul/merger/client.py b/zuul/merger/client.py index 48b56e1c71..875c330623 100644 --- a/zuul/merger/client.py +++ b/zuul/merger/client.py @@ -1,4 +1,5 @@ # Copyright 2014 OpenStack Foundation +# Copyright 2021-2022, 2024 Acme Gating, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -138,13 +139,43 @@ class MergeClient(object): ) return job - def getFilesChanges(self, connection_name, project_name, branch, - tosha=None, precedence=PRECEDENCE_HIGH, - build_set=None, needs_result=False, event=None): - data = dict(connection=connection_name, - project=project_name, - branch=branch, - tosha=tosha) + def getFilesChanges(self, changes, precedence=PRECEDENCE_HIGH, + build_set=None, needs_result=False, + event=None): + changes_data = [] + for change in changes: + # if base_sha is not available, fallback to branch + tosha = getattr(change, "base_sha", None) + if tosha is None: + tosha = getattr(change, "branch", None) + changes_data.append(dict( + connection=change.project.connection_name, + project=change.project.name, + branch=change.ref, + tosha=tosha, + )) + data = dict(changes=changes_data) + job = self.submitJob( + MergeRequest.FILES_CHANGES, + data, + build_set, + precedence, + needs_result=needs_result, + event=event, + ) + return job + + def getFilesChangesRaw(self, connection_name, project_name, branch, tosha, + precedence=PRECEDENCE_HIGH, + build_set=None, needs_result=False, + event=None): + changes_data = [dict( + connection=connection_name, + project=project_name, + branch=branch, + tosha=tosha, + )] + data = dict(changes=changes_data) job = self.submitJob( MergeRequest.FILES_CHANGES, data, diff --git a/zuul/merger/server.py b/zuul/merger/server.py index ec01e75b60..2ca7fb1c2e 100644 --- a/zuul/merger/server.py +++ b/zuul/merger/server.py @@ -1,5 +1,5 @@ # Copyright 2014 OpenStack Foundation -# Copyright 2021-2022 Acme Gating, LLC +# Copyright 2021-2022, 2024 Acme Gating, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -334,25 +334,36 @@ class BaseMergeServer(metaclass=ABCMeta): self.log.debug("Got fileschanges job: %s", merge_request.uuid) zuul_event_id = merge_request.event_id - connection_name = args['connection'] - project_name = args['project'] + # MODEL_API < 26: + changes = args.get('changes') + old_format = False + if changes is None: + changes = [args] + old_format = True - lock = self.repo_locks.getRepoLock(connection_name, project_name) - try: - self._update(connection_name, project_name, - zuul_event_id=zuul_event_id) - with lock: - files = self.merger.getFilesChanges( - connection_name, project_name, - args['branch'], args['tosha'], - zuul_event_id=zuul_event_id) - except Exception: - result = dict(update=False) + results = [] + for change in changes: + connection_name = change['connection'] + project_name = change['project'] + + lock = self.repo_locks.getRepoLock(connection_name, project_name) + try: + self._update(connection_name, project_name, + zuul_event_id=zuul_event_id) + with lock: + files = self.merger.getFilesChanges( + connection_name, project_name, + change['branch'], change['tosha'], + zuul_event_id=zuul_event_id) + results.append(files) + except Exception: + return dict(updated=False) + + if old_format: + # MODEL_API < 26: + return dict(updated=True, files=results[0]) else: - result = dict(updated=True, files=files) - - result['zuul_event_id'] = zuul_event_id - return result + return dict(updated=True, files=results) def completeMergeJob(self, merge_request, result): log = get_annotated_logger(self.log, merge_request.event_id) diff --git a/zuul/model.py b/zuul/model.py index 42e8ae7ce9..d6871b3073 100644 --- a/zuul/model.py +++ b/zuul/model.py @@ -1,5 +1,5 @@ # Copyright 2012 Hewlett-Packard Development Company, L.P. -# Copyright 2021-2023 Acme Gating, LLC +# Copyright 2021-2024 Acme Gating, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -904,75 +904,12 @@ class PipelineState(zkobject.ZKObject): "queues": queues, "old_queues": old_queues, }) - if context.build_references: - self._fixBuildReferences(data, context) - context.build_references = False return data - def _fixBuildReferences(self, data, context): - # Reconcile duplicate builds; if we find any BuildReference - # objects, look up the actual builds and replace - log = context.log - build_map = {} - to_replace_dicts = [] - to_replace_lists = [] - for queue in data['queues'] + data['old_queues']: - for item in queue.queue: - buildset = item.current_build_set - for build_job, build in buildset.builds.items(): - if isinstance(build, BuildReference): - to_replace_dicts.append((item, - buildset, - buildset.builds, - build_job, - build._path)) - else: - build_map[build.getPath()] = build - for job_name, build_list in buildset.retry_builds.items(): - for build in build_list: - if isinstance(build, BuildReference): - to_replace_lists.append((item, - None, - build_list, - build, - build._path)) - else: - build_map[build.getPath()] = build - for (item, buildset, build_dict, build_job, build_path - ) in to_replace_dicts: - orig_build = build_map.get(build_path) - if orig_build: - build_dict[build_job] = orig_build - else: - log.warning("Unable to find deduplicated build %s for %s", - build_path, item) - del build_dict[build_job] - # We're not going to be able to use the results of - # this deduplication, which means we're going to try - # to launch the job again. To make sure that happens - # cleanly, go ahead and remove any nodeset information - # that we copied when we thought we were going to - # deduplicate it. - buildset.nodeset_info.pop(build_job, None) - buildset.node_requests.pop(build_job, None) - for (item, buildset, build_list, build, build_path - ) in to_replace_lists: - idx = build_list.index(build) - orig_build = build_map.get(build_path) - if orig_build: - build_list[idx] = build_map[build_path] - else: - log.warning("Unable to find deduplicated build %s for %s", - build_path, item) - del build_list[idx] - def _getKnownItems(self): items = [] for queue in (*self.old_queues, *self.queues): items.extend(queue.queue) - for item in queue.queue: - if item.bundle: - items.extend(item.bundle.items) return items def cleanup(self, context): @@ -1214,10 +1151,6 @@ class ChangeQueue(zkobject.ZKObject): existing_items = {} for item in self.queue: existing_items[item.getPath()] = item - if item.bundle: - existing_items.update({ - i.getPath(): i for i in item.bundle.items - }) items_by_path = OrderedDict() # This is a tuple of (x, Future), where x is None if no action @@ -1254,15 +1187,6 @@ class ChangeQueue(zkobject.ZKObject): items_behind=[items_by_path[p] for p in item._items_behind if p in items_by_path]) - bundle_by_uuid = {} - for item in items_by_path.values(): - if not item.bundle: - continue - bundle_data = item.bundle - item._set(bundle=bundle_by_uuid.setdefault( - bundle_data["uuid"], - Bundle.deserialize(context, self, items_by_path, bundle_data))) - data.update({ "_jobs": set(data["_jobs"]), "queue": list(items_by_path.values()), @@ -1307,12 +1231,13 @@ class ChangeQueue(zkobject.ZKObject): def matches(self, project_cname, branch): return (project_cname, branch) in self.project_branches - def enqueueChange(self, change, event, span_info=None, enqueue_time=None): + def enqueueChanges(self, changes, event, span_info=None, + enqueue_time=None): if enqueue_time is None: enqueue_time = time.time() item = QueueItem.new(self.zk_context, queue=self, - change=change, + changes=changes, event=event, span_info=span_info, enqueue_time=enqueue_time) @@ -1340,20 +1265,10 @@ class ChangeQueue(zkobject.ZKObject): item_behind.updateAttributes(self.zk_context, item_ahead=item.item_ahead) - if item.bundle: - # This item may have builds referenced by other items in - # the bundle, or even other bundles (in the case of - # independent pipelines). Rather than trying to figure - # that out here, we will just let PipelineState.cleanup - # handle garbage collecting these items when done. - item.updateAttributes( - self.zk_context, item_ahead=None, items_behind=[], - dequeue_time=time.time()) - else: - item.delete(self.zk_context) - # We use the dequeue time for stats reporting, but the queue - # item will no longer be in Zookeeper at this point. - item._set(dequeue_time=time.time()) + item.delete(self.zk_context) + # We use the dequeue time for stats reporting, but the queue + # item will no longer be in Zookeeper at this point. + item._set(dequeue_time=time.time()) def moveItem(self, item, item_ahead): if item.item_ahead == item_ahead: @@ -1380,13 +1295,7 @@ class ChangeQueue(zkobject.ZKObject): def isActionable(self, item): if not self.window: return True - # Ignore done items waiting for bundle dependencies to finish - num_waiting_items = len([ - i for i in self.queue - if i.bundle and i.areAllJobsComplete() - ]) - window = self.window + num_waiting_items - return item in self.queue[:window] + return item in self.queue[:self.window] def increaseWindowSize(self): if not self.window: @@ -1746,14 +1655,12 @@ class NodeRequest(object): """A request for a set of nodes.""" def __init__(self, requestor, build_set_uuid, tenant_name, pipeline_name, - job_name, job_uuid, labels, provider, relative_priority, + job_uuid, labels, provider, relative_priority, event_id=None, span_info=None): self.requestor = requestor self.build_set_uuid = build_set_uuid self.tenant_name = tenant_name self.pipeline_name = pipeline_name - # MODEL_API < 24 - self.job_name = job_name self.job_uuid = job_uuid self.labels = labels self.nodes = [] @@ -1799,12 +1706,6 @@ class NodeRequest(object): self._state = value self.state_time = time.time() - @property - def _job_id(self): - # MODEL_API < 24 - # Remove this after circular dep refactor - return self.job_uuid or self.job_name - def __repr__(self): return '' % (self.id, self.labels) @@ -1823,11 +1724,9 @@ class NodeRequest(object): "build_set_uuid": self.build_set_uuid, "tenant_name": self.tenant_name, "pipeline_name": self.pipeline_name, - "job_name": self.job_name, + "job_uuid": self.job_uuid, "span_info": self.span_info, } - if (COMPONENT_REGISTRY.model_api >= 24): - d["requestor_data"]['job_uuid'] = self.job_uuid d.setdefault('node_types', self.labels) d.setdefault('requestor', self.requestor) d.setdefault('created_time', self.created_time) @@ -1871,7 +1770,6 @@ class NodeRequest(object): build_set_uuid=requestor_data.get("build_set_uuid"), tenant_name=requestor_data.get("tenant_name"), pipeline_name=requestor_data.get("pipeline_name"), - job_name=requestor_data.get("job_name"), job_uuid=requestor_data.get("job_uuid"), labels=data["node_types"], provider=data["provider"], @@ -2304,13 +2202,14 @@ class JobData(zkobject.ShardedZKObject): return self._path @classmethod - def new(klass, context, **kw): + def new(klass, context, create=True, **kw): """Create a new instance and save it in ZooKeeper""" obj = klass() kw['hash'] = JobData.getHash(kw['data']) obj._set(**kw) - data = obj._trySerialize(context) - obj._save(context, data, create=True) + if create: + data = obj._trySerialize(context) + obj._save(context, data, create=True) return obj @staticmethod @@ -2398,7 +2297,9 @@ class FrozenJob(zkobject.ZKObject): def __init__(self): super().__init__() - self._set(_ready_to_run=False) + self._set(_ready_to_run=False, + ref=None, + other_refs=[]) def __repr__(self): name = getattr(self, 'name', '') @@ -2426,35 +2327,40 @@ class FrozenJob(zkobject.ZKObject): @classmethod def new(klass, context, **kw): - obj = klass() - if (COMPONENT_REGISTRY.model_api < 19): - obj._set(uuid=None) - else: - obj._set(uuid=uuid4().hex) + raise NotImplementedError() + + @classmethod + def createInMemory(klass, **kw): + obj = klass() + obj._set(uuid=uuid4().hex) - # Convert these to JobData after creation. - job_data_vars = {} for k in klass.job_data_attributes: v = kw.pop(k, None) - if v: - # If the value is long, we need to make this a JobData; - # otherwise we can use the dict as-is. - if (len(json_dumps(v, sort_keys=True).encode('utf8')) > - klass.MAX_DATA_LEN): - job_data_vars[k] = v - v = None kw['_' + k] = v obj._set(**kw) - data = obj._trySerialize(context) - obj._save(context, data, create=True) + return obj + + def internalCreate(self, context): + # Convert these to JobData after creation. + job_data_vars = [] + for k in self.job_data_attributes: + v = getattr(self, '_' + k) + if v: + # If the value is long, we need to make this a + # JobData; otherwise we can use the value as-is. + # TODO(jeblair): if we apply the same createInMemory + # approach to JobData creation, we can avoid this + # serialization test as well as rewriting the + # frozenjob object below. + v = self._makeJobData(context, k, v, create=False) + self._set(**{'_' + k: v}) + if isinstance(v, JobData): + job_data_vars.append(v) + super().internalCreate(context) # If we need to make any JobData entries, do that now. - update_kw = {} - for (k, v) in job_data_vars.items(): - update_kw['_' + k] = obj._makeJobData(context, k, v) - if update_kw: - obj.updateAttributes(context, **update_kw) - return obj + for v in job_data_vars: + v.internalCreate(context) def isBase(self): return self.parent is None @@ -2467,14 +2373,11 @@ class FrozenJob(zkobject.ZKObject): return f"{parent_path}/job/{safe_id}" def getPath(self): - # MODEL_API < 19 - job_id = self.uuid or self.name - return self.jobPath(job_id, self.buildset.getPath()) + return self.jobPath(self.uuid, self.buildset.getPath()) - # MODEL_API < 19 @property - def _job_id(self): - return self.uuid or self.name + def all_refs(self): + return [self.ref, *self.other_refs] def serialize(self, context): # Ensure that any special handling in this method is matched @@ -2516,6 +2419,9 @@ class FrozenJob(zkobject.ZKObject): if (COMPONENT_REGISTRY.model_api < 9): data['nodeset'] = data['nodeset_alternatives'][0] + data['ref'] = self.ref + data['other_refs'] = self.other_refs + # Use json_dumps to strip any ZuulMark entries return json_dumps(data, sort_keys=True).encode("utf8") @@ -2546,12 +2452,6 @@ class FrozenJob(zkobject.ZKObject): # MODEL_API < 19 data.setdefault("uuid", None) - # MODEL_API < 22 - if 'ref' not in data and hasattr(self, 'buildset'): - # buildset is provided on the scheduler, but not on the - # executor; but we don't need the ref on the executor. - data['ref'] = self.buildset.item.change.cache_key - if hasattr(self, 'nodeset_alternatives'): alts = self.nodeset_alternatives else: @@ -2700,7 +2600,7 @@ class FrozenJob(zkobject.ZKObject): artifact_data = artifact_data[:] for a in artifacts: # Change here may be any ref type (tag, change, etc) - ref = other_build.build_set.item.change + ref = other_build.build_set.item.getChangeForJob(other_build.job) a.update({'project': ref.project.name, 'job': other_build.job.name}) # Change is a Branch @@ -2720,12 +2620,12 @@ class FrozenJob(zkobject.ZKObject): artifact_data.append(a) return parent_data, secret_parent_data, artifact_data - def _makeJobData(self, context, name, data): + def _makeJobData(self, context, name, data, create=True): # If the data is large, store it in another object if (len(json_dumps(data, sort_keys=True).encode('utf8')) > self.MAX_DATA_LEN): return JobData.new( - context, _path=self.getPath() + '/' + name, + context, create=create, _path=self.getPath() + '/' + name, data=data) # Otherwise we can store it as a local dict return data @@ -3035,7 +2935,7 @@ class Job(ConfigObject): ns = nodeset return ns.flattenAlternatives(layout) - def freezeJob(self, context, tenant, layout, item, + def freezeJob(self, context, tenant, layout, item, change, redact_secrets_and_keys): buildset = item.current_build_set kw = {} @@ -3087,10 +2987,14 @@ class Job(ConfigObject): kw['dependencies'] = frozenset(kw['dependencies']) kw['semaphores'] = list(kw['semaphores']) kw['failure_output'] = list(kw['failure_output']) - kw['ref'] = item.change.cache_key + kw['ref'] = change.cache_key # Don't add buildset to attributes since it's not serialized kw['buildset'] = buildset - return FrozenJob.new(context, **kw) + # This creates the frozen job in memory but does not write it + # to ZK yet. We may end up combining the job with other jobs + # before we finalize the job graph. We will write all + # remaining jobs to zk at that point. + return FrozenJob.createInMemory(**kw) def getConfigHash(self, tenant): # Make a hash of the job configuration for determining whether @@ -3662,8 +3566,6 @@ class JobGraph(object): # BuildSet (either the real list of jobs, or a cached list of # "old" jobs for comparison). self._job_map = job_map - # An ordered list of jobs - self.jobs = [] # An ordered list of job UUIDs self.job_uuids = [] # dependent_job_uuid -> dict(parent_job_name -> soft) @@ -3681,19 +3583,12 @@ class JobGraph(object): # Dict of {job_uuid: {child_uuid: {soft: bool}}} self.job_dependents = {} self.project_metadata = {} - # A temporary model version to help with the circular dep refactor - self.model_version = 0 - # Store the model version at the time this object was instantiated - # so we don't change behavior while freezing. - if COMPONENT_REGISTRY.model_api >= 22: - self.model_version = 22 def __repr__(self): - return '' % (self.jobs) + return '' % (self.job_uuids) def toDict(self): data = { - "jobs": self.jobs, "job_uuids": self.job_uuids, "dependencies": self._dependencies, "job_dependencies": self.job_dependencies, @@ -3701,123 +3596,59 @@ class JobGraph(object): "project_metadata": { k: v.toDict() for (k, v) in self.project_metadata.items() }, - "model_version": self.model_version, } return data @classmethod def fromDict(klass, data, job_map): self = klass(job_map) - self.model_version = data.get('model_version', self.model_version) - self.jobs = data['jobs'] - # MODEL_API < 19: if job uuids is not set, we default the - # UUID for all jobs to None. - self.job_uuids = data.get('job_uuids', [None] * len(self.jobs)) + self.job_uuids = data['job_uuids'] self._dependencies = data['dependencies'] self.project_metadata = { k: ProjectMetadata.fromDict(v) for (k, v) in data['project_metadata'].items() } - # MODEL_API < 21 - self.job_dependencies = data.get('job_dependencies', {}) - self.job_dependents = data.get('job_dependents', {}) + self.job_dependencies = data['job_dependencies'] + self.job_dependents = data['job_dependents'] return self def addJob(self, job): # A graph must be created after the job list is frozen, # therefore we should only get one job with the same name. - if (self.model_version < 22): - job_id = job.name - else: - job_id = job.uuid - self._job_map[job_id] = job - self.jobs.append(job.name) + self._job_map[job.uuid] = job self.job_uuids.append(job.uuid) # Append the dependency information - self._dependencies.setdefault(job_id, {}) + self._dependencies.setdefault(job.uuid, {}) for dependency in job.dependencies: - self._dependencies[job_id][dependency.name] = dependency.soft + self._dependencies[job.uuid][dependency.name] = dependency.soft + + def _removeJob(self, job): + # This should only be called internally during deduplication + del self._job_map[job.uuid] + self.job_uuids.remove(job.uuid) + # Append the dependency information + del self._dependencies[job.uuid] def getJobs(self): # Report in the order of layout cfg - if (self.model_version < 22): - return [self._job_map[x] for x in self.jobs] - else: - return [self._job_map[x] for x in self.job_uuids] - - def getJobIds(self): - if (self.model_version < 22): - return self.jobs - else: - return self.job_uuids - - def getUuidForJobName(self, job_name): - return self.job_uuids[self.jobs.index(job_name)] - - def getUuidForJobId(self, job_id): - if (self.model_version < 22): - return self.job_uuids[self.jobs.index(job_id)] - return job_id - - def getNameForJobId(self, job_id): - if (self.model_version < 22): - return job_id - return self.jobs[self.job_uuids.index(job_id)] + return [self._job_map[x] for x in self.job_uuids] def getJobFromUuid(self, job_uuid): - if (self.model_version < 22): - index = self.job_uuids.index(job_uuid) - name = self.jobs[index] - return self._job_map[name] - else: - return self._job_map[job_uuid] - - def getJobFromName(self, job_name): - # TODO: this must be removed by completion of circular - # dependency refactor. - if (self.model_version < 22): - return self._job_map.get(job_name) - else: - try: - index = self.jobs.index(job_name) - except ValueError: - return None - uuid = self.job_uuids[index] - return self._job_map[uuid] + return self._job_map[job_uuid] def getJob(self, name, ref): for job in self._job_map.values(): - if job.name == name and job.ref == ref: + if job.name == name and ref in job.all_refs: return job def getDirectDependentJobs(self, job): - # First, are we able to support the new method? - if self._dependencies and not self.job_dependencies: - # MODEL_API < 21 - return [self._job_map[name] - for name in self._legacyGetDirectDependentJobNames( - job.name, skip_soft=False)] ret = [] for dependent_uuid, dependent_data in \ self.job_dependents.get(job.uuid, {}).items(): ret.append(self.getJobFromUuid(dependent_uuid)) return ret - # MODEL_API < 21 - def _legacyGetDirectDependentJobNames(self, parent_job, skip_soft=False): - ret = set() - for dependent_name, parents in self._dependencies.items(): - part = parent_job in parents \ - and (not skip_soft or not parents[parent_job]) - if part: - ret.add(dependent_name) - return ret - def getDependentJobsRecursively(self, job, skip_soft=False): - if self._dependencies and not self.job_dependents: - # MODEL_API < 21 - return self._legacyGetDependentJobsRecursively(job.name, - skip_soft=skip_soft) all_dependent_uuids = set() uuids_to_iterate = set([(job.uuid, False)]) while len(uuids_to_iterate) > 0: @@ -3837,73 +3668,38 @@ class JobGraph(object): uuids_to_iterate.add((u, current_dependent_uuids[u]['soft'])) return [self.getJobFromUuid(u) for u in all_dependent_uuids] - # MODEL_API < 21 - def _legacyGetDependentJobsRecursively(self, parent_job, skip_soft=False): - all_dependent_jobs = set() - jobs_to_iterate = set([parent_job]) - while len(jobs_to_iterate) > 0: - current_job = jobs_to_iterate.pop() - current_dependent_jobs = self._legacyGetDirectDependentJobNames( - current_job, skip_soft) - new_dependent_jobs = current_dependent_jobs - all_dependent_jobs - jobs_to_iterate |= new_dependent_jobs - all_dependent_jobs |= new_dependent_jobs - return [self._job_map[name] for name in all_dependent_jobs] - - def _legacyCheckDependencies(self, layout=None): - for dependent_name, parents in self._dependencies.items(): - # For the side effect of verifying no cycles - self._legacyGetParentJobNamesRecursively(dependent_name) - if layout: - for dependent_name, parents in self._dependencies.items(): - for parent_name, parent_soft in parents.items(): - # If the caller spplied a layout, verify that the - # job exists to provide a helpful error message. - # Called for exception side effect: - layout.getJob(parent_name) - def freezeDependencies(self, layout=None): - if (COMPONENT_REGISTRY.model_api < 21): - return self._legacyCheckDependencies(layout) - for dependent_id, parents in self._dependencies.items(): - dependent_uuid = self.getUuidForJobId(dependent_id) - if dependent_uuid is None: - # MODEL_API < 21 - self.job_dependencies = {} - self.job_dependents = {} - return self._legacyCheckDependencies(layout) + for dependent_uuid, parents in self._dependencies.items(): dependencies = self.job_dependencies.setdefault(dependent_uuid, {}) for parent_name, parent_soft in parents.items(): - dependent_job = self._job_map[dependent_id] - # We typically depend on jobs with the same ref (but - # this could later be modified by deduplication). - parent_job = self.getJob(parent_name, dependent_job.ref) - if parent_job is None: - if parent_soft: - if layout: - # If the caller spplied a layout, verify that the - # job exists to provide a helpful error message. - # Called for exception side effect: - layout.getJob(parent_name) - continue - raise Exception( - "Job %s depends on %s which was not run." % - (dependent_job.name, parent_name)) - dependencies[parent_job.uuid] = dict(soft=parent_soft) - dependents = self.job_dependents.setdefault( - parent_job.uuid, {}) - dependents[dependent_uuid] = dict(soft=parent_soft) - for dependent_id, parents in self._dependencies.items(): - dependent_job = self._job_map[dependent_id] + dependent_job = self._job_map[dependent_uuid] + # We typically depend on jobs with the same ref, but + # if we have been deduplicated, then we depend on + # every job-ref for the given parent job. + for ref in dependent_job.all_refs: + parent_job = self.getJob(parent_name, ref) + if parent_job is None: + if parent_soft: + if layout: + # If the caller spplied a layout, + # verify that the job exists to + # provide a helpful error message. + # Called for exception side effect: + layout.getJob(parent_name) + continue + raise Exception( + "Job %s depends on %s which was not run." % + (dependent_job.name, parent_name)) + dependencies[parent_job.uuid] = dict(soft=parent_soft) + dependents = self.job_dependents.setdefault( + parent_job.uuid, {}) + dependents[dependent_uuid] = dict(soft=parent_soft) + for dependent_uuid, parents in self._dependencies.items(): + dependent_job = self._job_map[dependent_uuid] # For the side effect of verifying no cycles self.getParentJobsRecursively(dependent_job) def getParentJobsRecursively(self, job, skip_soft=False): - if self._dependencies and not self.job_dependencies: - # MODEL_API < 21 - return [self._job_map[name] for name in - self._legacyGetParentJobNamesRecursively( - job.name, skip_soft=skip_soft)] all_dependency_uuids = set() uuids_to_iterate = set([(job.uuid, False)]) ancestor_uuids = set() @@ -3930,41 +3726,47 @@ class JobGraph(object): uuids_to_iterate.add((u, current_dependency_uuids[u]['soft'])) return [self.getJobFromUuid(u) for u in all_dependency_uuids] - # MODEL_API < 21 - def _legacyGetParentJobNamesRecursively(self, dependent_job, - skip_soft=False): - all_parent_jobs = set() - jobs_to_iterate = set([(dependent_job, False)]) - ancestor_jobs = set() - while len(jobs_to_iterate) > 0: - (current_job, current_soft) = jobs_to_iterate.pop() - if current_job in ancestor_jobs: - raise Exception("Dependency cycle detected in job %s" % - current_job) - ancestor_jobs.add(current_job) - current_parent_jobs = self._dependencies.get(current_job) - if skip_soft: - hard_parent_jobs = \ - {d: s for d, s in current_parent_jobs.items() if not s} - current_parent_jobs = hard_parent_jobs - if current_parent_jobs is None: - if current_soft: - current_parent_jobs = {} - else: - raise Exception("Job %s depends on %s which was not run." % - (dependent_job, current_job)) - elif dependent_job != current_job: - all_parent_jobs.add(current_job) - new_parent_jobs = set(current_parent_jobs.keys()) - all_parent_jobs - for j in new_parent_jobs: - jobs_to_iterate.add((j, current_parent_jobs[j])) - return all_parent_jobs - def getProjectMetadata(self, name): if name in self.project_metadata: return self.project_metadata[name] return None + def deduplicateJobs(self, log): + # Jobs are deduplicated before they start, so returned data + # are not considered at all. + # + # If a to-be-deduplicated job depends on a non-deduplicated + # job, it will treat each (job, ref) instance as a parent. + # + # Otherwise, each job will depend only on jobs for the same + # ref. + job_list = list(self._job_map.values()) + while job_list: + job = job_list.pop(0) + if job.deduplicate is False: + continue + for other_job in job_list[:]: + if other_job.deduplicate is False: + continue + if not other_job.isEqual(job): + continue + job_change = job.buildset.item.getChangeForJob(job) + other_job_change = other_job.buildset.item.getChangeForJob( + other_job) + if job.deduplicate == 'auto': + # Deduplicate if there are required projects + # or the item project is the same. + if (not job.required_projects and + job_change.project != + other_job_change.project): + continue + # Deduplicate! + log.info("Deduplicating %s for %s into %s for %s", + other_job, other_job_change, job, job_change) + job.other_refs.append(other_job.ref) + self._removeJob(other_job) + job_list.remove(other_job) + @total_ordering class JobRequest: @@ -4119,15 +3921,13 @@ class BuildRequest(JobRequest): ALL_STATES = JobRequest.ALL_STATES + (PAUSED,) - def __init__(self, uuid, zone, build_set_uuid, job_name, job_uuid, + def __init__(self, uuid, zone, build_set_uuid, job_uuid, tenant_name, pipeline_name, event_id, precedence=None, state=None, result_path=None, span_context=None): super().__init__(uuid, precedence, state, result_path, span_context) self.zone = zone self.build_set_uuid = build_set_uuid - # MODEL_API < 25 - self.job_name = job_name self.job_uuid = job_uuid self.tenant_name = tenant_name self.pipeline_name = pipeline_name @@ -4137,25 +3937,17 @@ class BuildRequest(JobRequest): # build the url for the live log stream. self.worker_info = None - @property - def _job_id(self): - # MODEL_API < 25 - # Remove this after circular dep refactor - return self.job_uuid or self.job_name - def toDict(self): d = super().toDict() d.update({ "zone": self.zone, "build_set_uuid": self.build_set_uuid, - "job_name": self.job_name, + "job_uuid": self.job_uuid, "tenant_name": self.tenant_name, "pipeline_name": self.pipeline_name, "event_id": self.event_id, "worker_info": self.worker_info, }) - if (COMPONENT_REGISTRY.model_api >= 25): - d['job_uuid'] = self.job_uuid return d @classmethod @@ -4164,8 +3956,7 @@ class BuildRequest(JobRequest): data["uuid"], data["zone"], data["build_set_uuid"], - data["job_name"], - data.get("job_uuid"), + data["job_uuid"], data["tenant_name"], data["pipeline_name"], data["event_id"], @@ -4181,7 +3972,7 @@ class BuildRequest(JobRequest): def __repr__(self): return ( - f"" ) @@ -4537,16 +4328,12 @@ class BuildSet(zkobject.ZKObject): def __init__(self): super().__init__() - model_version = 0 - if COMPONENT_REGISTRY.model_api >= 23: - model_version = 23 self._set( item=None, builds={}, retry_builds={}, result=None, uuid=uuid4().hex, - commit=None, dependent_changes=None, merger_items=None, unable_to_merge=False, @@ -4571,14 +4358,11 @@ class BuildSet(zkobject.ZKObject): fail_fast=False, job_graph=None, jobs={}, - deduplicated_jobs=[], job_versions={}, build_versions={}, # Cached job graph of previous layout; not serialized _old_job_graph=None, _old_jobs={}, - # A temporary model version to help with the circular dep refactor - model_version=model_version, ) def setFiles(self, items): @@ -4667,7 +4451,6 @@ class BuildSet(zkobject.ZKObject): for j, l in self.retry_builds.items()}, "result": self.result, "uuid": self.uuid, - "commit": self.commit, "dependent_changes": self.dependent_changes, "merger_items": self.merger_items, "unable_to_merge": self.unable_to_merge, @@ -4697,18 +4480,10 @@ class BuildSet(zkobject.ZKObject): "repo_state_request_time": self.repo_state_request_time, "job_versions": self.job_versions, "build_versions": self.build_versions, - "model_version": self.model_version, # jobs (serialize as separate objects) } return json.dumps(data, sort_keys=True).encode("utf8") - def _isMyBuild(self, build_path): - parts = build_path.split('/') - buildset_uuid = parts[-5] - if buildset_uuid == self.uuid: - return True - return False - def deserialize(self, raw, context): data = super().deserialize(raw, context) # Set our UUID so that getPath() returns the correct path for @@ -4775,7 +4550,7 @@ class BuildSet(zkobject.ZKObject): existing_retry_builds = {b.getPath(): b for bl in self.retry_builds.values() for b in bl} - # This is a tuple of (kind, job_id, job_build_key, Future), + # This is a tuple of (kind, job_uuid, Future), # where kind is None if no action needs to be taken, or a # string to indicate which kind of job it was. This structure # allows us to execute async ZK reads and perform local data @@ -4786,88 +4561,65 @@ class BuildSet(zkobject.ZKObject): build_versions = data.get('build_versions', {}) # jobs (deserialize as separate objects) if job_graph := data['job_graph']: - for job_id in job_graph.getJobIds(): + for job_uuid in job_graph.job_uuids: # If we have a current build before refreshing, we may # be able to skip refreshing some items since they # will not have changed. - - # TODO: after circular dependency refactor, we can - # just use job_id (or job.uuid) for everything. Until - # then, the index for the job graph and the index for - # the build dict may each either be name or id. - job_name = job_graph.getNameForJobId(job_id) - if self.model_version < 23: - job_build_key = job_graph.getNameForJobId(job_id) - else: - job_build_key = job_graph.getUuidForJobId(job_id) - build_path = data["builds"].get(job_build_key) - old_build = self.builds.get(job_build_key) + build_path = data["builds"].get(job_uuid) + old_build = self.builds.get(job_uuid) old_build_exists = (old_build and old_build.getPath() == build_path) - if job_id in self.jobs: - job = self.jobs[job_id] + if job_uuid in self.jobs: + job = self.jobs[job_uuid] if ((not old_build_exists) or self.shouldRefreshJob(job, job_versions)): - tpe_jobs.append((None, job_id, job_build_key, + tpe_jobs.append((None, job_uuid, tpe.submit(job.refresh, context))) else: - job_uuid = job_graph.getUuidForJobId(job_id) - # MODEL_API < 19; use job_name if job_uuid is None - job_path = FrozenJob.jobPath( - job_uuid or job_name, self.getPath()) - tpe_jobs.append(('job', job_id, job_build_key, tpe.submit( + job_path = FrozenJob.jobPath(job_uuid, self.getPath()) + tpe_jobs.append(('job', job_uuid, tpe.submit( FrozenJob.fromZK, context, job_path, buildset=self))) if build_path: - build = self.builds.get(job_build_key) - builds[job_build_key] = build + build = self.builds.get(job_uuid) + builds[job_uuid] = build if build and build.getPath() == build_path: if self.shouldRefreshBuild(build, build_versions): tpe_jobs.append(( - None, job_id, job_build_key, tpe.submit( + None, job_uuid, tpe.submit( build.refresh, context))) else: - if not self._isMyBuild(build_path): - build = BuildReference(build_path) - context.build_references = True - builds[job_build_key] = build - else: - tpe_jobs.append(( - 'build', job_id, job_build_key, tpe.submit( - Build.fromZK, context, build_path, - build_set=self))) + tpe_jobs.append(( + 'build', job_uuid, tpe.submit( + Build.fromZK, context, build_path, + build_set=self))) - for retry_path in data["retry_builds"].get(job_build_key, []): + for retry_path in data["retry_builds"].get(job_uuid, []): retry_build = existing_retry_builds.get(retry_path) if retry_build and retry_build.getPath() == retry_path: # Retry builds never change. - retry_builds[job_build_key].append(retry_build) + retry_builds[job_uuid].append(retry_build) else: - if not self._isMyBuild(retry_path): - retry_build = BuildReference(retry_path) - context.build_references = True - retry_builds[job_build_key].append(retry_build) - else: - tpe_jobs.append(( - 'retry', job_id, job_build_key, tpe.submit( - Build.fromZK, context, retry_path, - build_set=self))) + tpe_jobs.append(( + 'retry', job_uuid, tpe.submit( + Build.fromZK, context, retry_path, + build_set=self))) - for (kind, job_id, job_build_key, future) in tpe_jobs: + for (kind, job_uuid, future) in tpe_jobs: result = future.result() if kind == 'job': - self.jobs[job_id] = result + self.jobs[job_uuid] = result elif kind == 'build': # We normally set the job on the constructor, but we # may not have had it in time. At this point though, # the job future is guaranteed to have completed, so # we can look it up now. - result._set(job=self.jobs[job_id]) - builds[job_build_key] = result + result._set(job=self.jobs[job_uuid]) + builds[job_uuid] = result elif kind == 'retry': - result._set(job=self.jobs[job_id]) - retry_builds[job_build_key].append(result) + result._set(job=self.jobs[job_uuid]) + retry_builds[job_uuid].append(result) data.update({ "builds": builds, @@ -4901,7 +4653,7 @@ class BuildSet(zkobject.ZKObject): version = job.getZKVersion() if version is not None: - self.job_versions[self._getJobId(job)] = version + 1 + self.job_versions[job.uuid] = version + 1 self.updateAttributes(context, job_versions=self.job_versions) def shouldRefreshBuild(self, build, build_versions): @@ -4917,7 +4669,7 @@ class BuildSet(zkobject.ZKObject): if (COMPONENT_REGISTRY.model_api < 12): return True current = job.getZKVersion() - expected = job_versions.get(self._getJobId(job), 0) + expected = job_versions.get(job.uuid, 0) return expected != current @property @@ -4939,27 +4691,26 @@ class BuildSet(zkobject.ZKObject): # so we don't know what the other changes ahead will be # until jobs start. if self.dependent_changes is None: - items = [] - if self.item.bundle: - items.extend(reversed(self.item.bundle.items)) - else: - items.append(self.item) - + items = [self.item] items.extend(i for i in self.item.items_ahead if i not in items) items.reverse() - self.dependent_changes = [self._toChangeDict(i) for i in items] - self.merger_items = [i.makeMergerItem() for i in items] + self.dependent_changes = [ + self._toChangeDict(i, c) for i in items for c in i.changes + ] + self.merger_items = [ + i.makeMergerItem(c) for i in items for c in i.changes + ] self.configured = True self.configured_time = time.time() - def _toChangeDict(self, item): + def _toChangeDict(self, item, change): # Inject bundle_id to dict if available, this can be used to decide # if changes belongs to the same bunbdle - change_dict = item.change.toDict() - if item.bundle: - change_dict['bundle_id'] = item.bundle.uuid + change_dict = change.toDict() + if len(item.changes) > 1: + change_dict['bundle_id'] = item.uuid return change_dict def getStateName(self, state_num): @@ -4971,48 +4722,22 @@ class BuildSet(zkobject.ZKObject): # refactor is complete at which point the build and its linked # job should be 1:1. with self.activeContext(self.item.pipeline.manager.current_context): - job_id = self._getJobId(job) - self.builds[job_id] = build - if job_id not in self.tries: - self.tries[job_id] = 1 + self.builds[job.uuid] = build + if job.uuid not in self.tries: + self.tries[job.uuid] = 1 def addRetryBuild(self, build): with self.activeContext(self.item.pipeline.manager.current_context): self.retry_builds.setdefault( - self._getJobId(build.job), []).append(build) + build.job.uuid, []).append(build) def removeBuild(self, build): - # Temporarily for circular dependency refactoring, we remove - # all builds with the same job name as the supplied build (in - # case they have been deduplicated). - job_id = None - for my_job_id, my_build in self.builds.items(): - if my_build.job.name == build.job.name: - job_id = my_job_id - if job_id is None: - return with self.activeContext(self.item.pipeline.manager.current_context): - self.tries[job_id] += 1 - del self.builds[job_id] - - # MODEL_API < 23 - def _getJobId(self, job): - if self.model_version < 23: - return job.name - return job.uuid - - def _getJobById(self, job_id): - if self.model_version < 23: - for job in self.job_graph.getJobs(): - if job.name == job_id: - return job - for job in self.job_graph.getJobs(): - if job.uuid == job_id: - return job - return None + self.tries[build.job.uuid] += 1 + del self.builds[build.job.uuid] def getBuild(self, job): - return self.builds.get(self._getJobId(job)) + return self.builds.get(job.uuid) def getBuilds(self): builds = list(self.builds.values()) @@ -5020,13 +4745,11 @@ class BuildSet(zkobject.ZKObject): return builds def getRetryBuildsForJob(self, job): - job_id = self._getJobId(job) - return self.retry_builds.get(job_id, []) + return self.retry_builds.get(job.uuid, []) def getJobNodeSetInfo(self, job): # Return None if not provisioned; dict of info about nodes otherwise - job_id = self._getJobId(job) - return self.nodeset_info.get(job_id) + return self.nodeset_info.get(job.uuid) def getJobNodeProvider(self, job): info = self.getJobNodeSetInfo(job) @@ -5044,58 +4767,32 @@ class BuildSet(zkobject.ZKObject): return info.get('nodes') def removeJobNodeSetInfo(self, job): - job_id = self._getJobId(job) - if job_id not in self.nodeset_info: + if job.uuid not in self.nodeset_info: raise Exception("No job nodeset for %s" % (job.name)) with self.activeContext(self.item.pipeline.manager.current_context): - del self.nodeset_info[job_id] + del self.nodeset_info[job.uuid] def setJobNodeRequestID(self, job, request_id): - job_id = self._getJobId(job) - if job_id in self.node_requests: + if job.uuid in self.node_requests: raise Exception("Prior node request for %s" % (job.name)) with self.activeContext(self.item.pipeline.manager.current_context): - self.node_requests[job_id] = request_id + self.node_requests[job.uuid] = request_id - def getJobNodeRequestID(self, job, ignore_deduplicate=False): - job_id = self._getJobId(job) - r = self.node_requests.get(job_id) - if ignore_deduplicate and isinstance(r, dict): - return None - return r + def getJobNodeRequestID(self, job): + return self.node_requests.get(job.uuid) def getNodeRequests(self): - # This ignores deduplicated node requests - for job_id, request in self.node_requests.items(): - if isinstance(request, dict): - continue - yield self._getJobById(job_id), request + for job_uuid, request in self.node_requests.items(): + yield self.job_graph.getJobFromUuid(job_uuid), request def removeJobNodeRequestID(self, job): - job_id = self._getJobId(job) - if job_id in self.node_requests: + if job.uuid in self.node_requests: with self.activeContext( self.item.pipeline.manager.current_context): - del self.node_requests[job_id] - - def setJobNodeRequestDuplicate(self, job, other_item): - job_id = self._getJobId(job) - with self.activeContext( - self.item.pipeline.manager.current_context): - self.node_requests[job_id] = { - 'deduplicated_item': other_item.uuid} - - def setJobNodeSetInfoDuplicate(self, job, other_item): - # Nothing uses this value yet; we just need an entry in the - # nodset_info dict. - job_id = self._getJobId(job) - with self.activeContext(self.item.pipeline.manager.current_context): - self.nodeset_info[job_id] = { - 'deduplicated_item': other_item.uuid} + del self.node_requests[job.uuid] def jobNodeRequestComplete(self, job, nodeset): - job_id = self._getJobId(job) - if job_id in self.nodeset_info: + if job.uuid in self.nodeset_info: raise Exception("Prior node request for %s" % (job.name)) info = {} if nodeset.nodes: @@ -5107,13 +4804,12 @@ class BuildSet(zkobject.ZKObject): info['provider'] = node.provider info['nodes'] = [n.id for n in nodeset.getNodes()] with self.activeContext(self.item.pipeline.manager.current_context): - self.nodeset_info[job_id] = info + self.nodeset_info[job.uuid] = info def getTries(self, job): - job_id = self._getJobId(job) - return self.tries.get(job_id, 0) + return self.tries.get(job.uuid, 0) - def getMergeMode(self): + def getMergeMode(self, change): # We may be called before this build set has a shadow layout # (ie, we are called to perform the merge to create that # layout). It's possible that the change we are merging will @@ -5123,7 +4819,7 @@ class BuildSet(zkobject.ZKObject): # or if that fails, the current live layout, or if that fails, # use the default: merge-resolve. item = self.item - project = self.item.change.project + project = change.project project_metadata = None while item: if item.current_build_set.job_graph: @@ -5194,7 +4890,7 @@ class QueueItem(zkobject.ZKObject): self._set( uuid=uuid4().hex, queue=None, - change=None, # a ref + changes=[], # a list of refs dequeued_needing_change=None, dequeued_missing_requirements=False, current_build_set=None, @@ -5217,11 +4913,6 @@ class QueueItem(zkobject.ZKObject): # Additional container for connection specifig information to be # used by reporters throughout the lifecycle dynamic_state=defaultdict(dict), - - # A bundle holds other queue items that have to be successful - # for the current queue item to succeed - bundle=None, - dequeued_bundle_failing=False ) @property @@ -5230,20 +4921,6 @@ class QueueItem(zkobject.ZKObject): return self.queue.pipeline return None - @property - def bundle_build_set(self): - if self.bundle: - for item in self.bundle.items: - if item.live: - return item.current_build_set - return self.current_build_set - - @property - def bundle_items(self): - if self.bundle: - return self.bundle.items - return [self] - @classmethod def new(klass, context, **kw): obj = klass() @@ -5256,10 +4933,10 @@ class QueueItem(zkobject.ZKObject): # Skip the initial merge for branch/ref items as we don't need it in # order to build a job graph. The merger items will be included as # part of the extra repo state if there are jobs to run. - merge_state = (BuildSet.NEW if isinstance(obj.change, (Change, Tag)) - else BuildSet.COMPLETE) - files_state = (BuildSet.COMPLETE if obj.change.files is not None - else BuildSet.NEW) + should_merge = any(isinstance(o, (Change, Tag)) for o in obj.changes) + merge_state = (BuildSet.NEW if should_merge else BuildSet.COMPLETE) + should_files = any(o.files is None for o in obj.changes) + files_state = (BuildSet.NEW if should_files else BuildSet.COMPLETE) with trace.use_span(tracing.restoreSpan(obj.span_info)): buildset_span_info = tracing.startSavedSpan("BuildSet") @@ -5303,7 +4980,7 @@ class QueueItem(zkobject.ZKObject): # TODO: we need to also store some info about the change in # Zookeeper in order to show the change info on the status page. # This needs change cache and the API to resolve change by key. - "change": self.change.cache_key, + "changes": [c.cache_key for c in self.changes], "dequeued_needing_change": self.dequeued_needing_change, "dequeued_missing_requirements": self.dequeued_missing_requirements, @@ -5326,8 +5003,6 @@ class QueueItem(zkobject.ZKObject): "data": self.event.toDict(), }, "dynamic_state": self.dynamic_state, - "bundle": self.bundle and self.bundle.serialize(), - "dequeued_bundle_failing": self.dequeued_bundle_failing, "first_job_start_time": self.first_job_start_time, } return json.dumps(data, sort_keys=True).encode("utf8") @@ -5356,12 +5031,8 @@ class QueueItem(zkobject.ZKObject): f"Event type {event_type} not deserializable") event = event_class.fromDict(data["event"]["data"]) - change = self.pipeline.manager.resolveChangeReferences( - [data["change"]])[0] - # MODEL_API < 22: This can be removed once we remove the - # backwards-compat setting of FrozenJob.ref - self._set(change=change) - + changes = self.pipeline.manager.resolveChangeReferences( + data["changes"]) build_set = self.current_build_set if build_set and build_set.getPath() == data["current_build_set"]: build_set.refresh(context) @@ -5372,7 +5043,7 @@ class QueueItem(zkobject.ZKObject): data.update({ "event": event, - "change": change, + "changes": changes, "log": get_annotated_logger(self.log, event), "dynamic_state": defaultdict(dict, data["dynamic_state"]), "current_build_set": build_set, @@ -5395,13 +5066,13 @@ class QueueItem(zkobject.ZKObject): else: live = 'non-live' return '' % ( - self.uuid, live, self.change, pipeline) + self.uuid, live, self.changes, pipeline) def resetAllBuilds(self): context = self.pipeline.manager.current_context old_build_set = self.current_build_set - files_state = (BuildSet.COMPLETE if self.change.files is not None - else BuildSet.NEW) + have_all_files = all(c.files is not None for c in self.changes) + files_state = (BuildSet.COMPLETE if have_all_files else BuildSet.NEW) with trace.use_span(tracing.restoreSpan(self.span_info)): old_buildset_span = tracing.restoreSpan(old_build_set.span_info) @@ -5443,43 +5114,28 @@ class QueueItem(zkobject.ZKObject): self.current_build_set.warning_messages.append(msg) self.log.info(msg) + def getChangeForJob(self, job): + for change in self.changes: + if change.cache_key == job.ref: + return change + return None + def freezeJobGraph(self, layout, context, skip_file_matcher, redact_secrets_and_keys): - """Find or create actual matching jobs for this item's change and + """Find or create actual matching jobs for this item's changes and store the resulting job tree.""" - # TODO: move this and related methods to BuildSet - ppc = layout.getProjectPipelineConfig(self) try: - if ppc and ppc.debug: - debug_messages = ppc.debug_messages.copy() - else: - debug_messages = None - job_graph = layout.createJobGraph( - context, self, ppc, skip_file_matcher, redact_secrets_and_keys, - debug_messages) + results = layout.createJobGraph(context, self, skip_file_matcher, + redact_secrets_and_keys) + job_graph = results['job_graph'] - # Copy project metadata to job_graph since this must be independent - # of the layout as we need it in order to prepare the context for - # job execution. - # The layout might be no longer available at this point, as the - # scheduler submitting the job can be different from the one that - # created the layout. - job_graph.project_metadata = layout.project_metadata - - if debug_messages is None: - debug_messages = self.current_build_set.debug_messages - - if ppc: - fail_fast = ppc.fail_fast - else: - fail_fast = self.current_build_set.fail_fast - self.current_build_set.updateAttributes( - context, job_graph=job_graph, - fail_fast=fail_fast, - debug_messages=debug_messages) + # Write the jobs out to ZK + for frozen_job in job_graph._job_map.values(): + frozen_job.internalCreate(context) + self.current_build_set.updateAttributes(context, **results) except Exception: self.current_build_set.updateAttributes( context, job_graph=None, _old_job_graph=None) @@ -5494,15 +5150,8 @@ class QueueItem(zkobject.ZKObject): return [] return self.current_build_set.job_graph.getJobs() - def getJob(self, job_id): - # MODEL_API < 24 - job_graph = self.current_build_set.job_graph - try: - job = job_graph.getJobFromUuid(job_id) - if job is not None: - return job - except (KeyError, ValueError): - return job_graph.getJobFromName(job_id) + def getJob(self, job_uuid): + return self.current_build_set.job_graph.getJobFromUuid(job_uuid) @property def items_ahead(self): @@ -5511,6 +5160,12 @@ class QueueItem(zkobject.ZKObject): yield item_ahead item_ahead = item_ahead.item_ahead + def areAllChangesMerged(self): + for change in self.changes: + if not getattr(change, 'is_merged', True): + return False + return True + def haveAllJobsStarted(self): if not self.hasJobGraph(): return False @@ -5586,36 +5241,6 @@ class QueueItem(zkobject.ZKObject): return True return False - def isBundleFailing(self): - if self.bundle: - # We are only checking other items that share the same change - # queue, since we don't need to wait for changes in other change - # queues. - return self.bundle.failed_reporting or any( - i.hasAnyJobFailed() or i.didMergerFail() - for i in self.bundle.items - if i.live and i.queue == self.queue) - return False - - def didBundleFinish(self): - if self.bundle: - # We are only checking other items that share the same change - # queue, since we don't need to wait for changes in other change - # queues. - return all(i.areAllJobsComplete() for i in self.bundle.items if - i.live and i.queue == self.queue) - return True - - def didBundleStartReporting(self): - if self.bundle: - return self.bundle.started_reporting - return False - - def cannotMergeBundle(self): - if self.bundle: - return bool(self.bundle.cannot_merge) - return False - def didMergerFail(self): return self.current_build_set.unable_to_merge @@ -5632,39 +5257,45 @@ class QueueItem(zkobject.ZKObject): return self.dequeued_missing_requirements def includesConfigUpdates(self): + """Returns whether the changes include updates to the + trusted and untrusted configs""" includes_trusted = False includes_untrusted = False tenant = self.pipeline.tenant item = self - if item.bundle: - # Check all items in the bundle for config updates - for bundle_item in item.bundle.items: - if bundle_item.change.updatesConfig(tenant): - trusted, project = tenant.getProject( - bundle_item.change.project.canonical_name) + while item: + for change in item.changes: + if change.updatesConfig(tenant): + (trusted, project) = tenant.getProject( + change.project.canonical_name) if trusted: includes_trusted = True else: includes_untrusted = True if includes_trusted and includes_untrusted: # We're done early - return includes_trusted, includes_untrusted - - while item: - if item.change.updatesConfig(tenant): - (trusted, project) = tenant.getProject( - item.change.project.canonical_name) - if trusted: - includes_trusted = True - else: - includes_untrusted = True - if includes_trusted and includes_untrusted: - # We're done early - return (includes_trusted, includes_untrusted) + return (includes_trusted, includes_untrusted) item = item.item_ahead return (includes_trusted, includes_untrusted) + def updatesConfig(self): + """Returns whether the changes update the config""" + for change in self.changes: + if change.updatesConfig(self.pipeline.tenant): + tenant_project = self.pipeline.tenant.getProject( + change.project.canonical_name + )[1] + # If the cycle doesn't update the config or a change + # in the cycle updates the config but the that + # change's project is not part of the tenant + # (e.g. when dealing w/ cross-tenant cycles), return + # False. + if tenant_project is None: + continue + return True + return False + def isHoldingFollowingChanges(self): if not self.live: return False @@ -5691,14 +5322,17 @@ class QueueItem(zkobject.ZKObject): if requirements_tuple not in self._cached_sql_results: conn = self.pipeline.manager.sched.connections.getSqlConnection() if conn: - builds = conn.getBuilds( - tenant=self.pipeline.tenant.name, - project=self.change.project.name, - pipeline=self.pipeline.name, - change=self.change.number, - branch=self.change.branch, - patchset=self.change.patchset, - provides=requirements_tuple) + for change in self.changes: + builds = conn.getBuilds( + tenant=self.pipeline.tenant.name, + project=change.project.name, + pipeline=self.pipeline.name, + change=change.number, + branch=change.branch, + patchset=change.patchset, + provides=requirements_tuple) + if builds: + break else: builds = [] # Just look at the most recent buildset. @@ -5751,7 +5385,7 @@ class QueueItem(zkobject.ZKObject): item = None found = False for item in self.pipeline.getAllItems(): - if item.live and item.change == self.change: + if item.live and set(item.changes) == set(self.changes): found = True break if found: @@ -5780,9 +5414,10 @@ class QueueItem(zkobject.ZKObject): build.result_data, logger=self.log) for a in artifacts: - a.update({'project': self.change.project.name, - 'change': self.change.number, - 'patchset': self.change.patchset, + change = self.getChangeForJob(_job) + a.update({'project': change.project.name, + 'change': change.number, + 'patchset': change.patchset, 'job': build.job.name}) self.log.debug( "Found live artifacts: %s", repr(artifacts)) @@ -5818,86 +5453,6 @@ class QueueItem(zkobject.ZKObject): self.setResult(fakebuild) return False - def findDuplicateBundles(self): - """ - Find other bundles in the pipeline that are equivalent to ours. - """ - if not self.bundle: - return [] - - if len([i for i in self.bundle.items if i.live]) > 1: - # We are in a queue that has multiple live items, so we - # will only check our own bundle. - return [self.bundle] - - ret = [] - for item in self.queue.pipeline.getAllItems(): - if not item.live: - continue - if item is self: - continue - if not item.bundle: - continue - other_bundle_changes = {i.change for i in item.bundle.items} - this_bundle_changes = {i.change for i in self.bundle.items} - if other_bundle_changes != this_bundle_changes: - continue - other_item_queue = {i.change for i in item.queue.queue} - this_item_queue = {i.change for i in self.queue.queue} - if other_item_queue != this_item_queue: - continue - if item.bundle not in ret: - ret.append(item.bundle) - return ret - - def findDuplicateJob(self, job, other_bundles): - """ - If another item in the bundle has a duplicate job, - return the other item - """ - # A note on some of the checks below: - # - # A possible difference between jobs could be the dependent - # job tree under this one. Because that is passed to the job - # as zuul.child_jobs, that could be a different input to the - # job, and therefore produce a different output. However, a - # very common pattern is to build a common artifact in a - # parent job and then do something different with it in a - # child job. The utility of automatic deduplication in that - # case is very compelling, so we do not check child_jobs when - # deduplicating. Users can set deduplicate:false if that - # behavior is important. - # - # Theoretically, it would be okay to deduplicate a job with - # different parents as long as the inputs are the same. But - # that won't happen because the job's dependencies are checked - # in isEqual. Similarly, it would be okay to deduplicate the - # same job with a deduplicated parent as long as the returned - # data are the same. However, in practice, that will never be - # the case since all Zuul jobs return artifacts (the - # manifest), and those will be different. No special handling - # is done here, that is a natural consequence of parent_data - # being different. - - if not self.bundle: - return None - if job.deduplicate is False: - return None - for other_bundle in other_bundles: - for other_item in other_bundle.items: - if other_item is self: - continue - for other_job in other_item.getJobs(): - if other_job.isEqual(job): - if job.deduplicate == 'auto': - # Deduplicate if there are required projects - # or the item project is the same. - if (not job.required_projects and - self.change.project != - other_item.change.project): - continue - return other_item - def updateJobParentData(self): job_graph = self.current_build_set.job_graph failed_job_ids = set() # Jobs that run and failed @@ -5911,11 +5466,11 @@ class QueueItem(zkobject.ZKObject): if build.result == 'SUCCESS' or build.paused: pass elif build.result == 'SKIPPED': - ignored_job_ids.add(job._job_id) + ignored_job_ids.add(job.uuid) else: # elif build.result in ('FAILURE', 'CANCELED', ...): - failed_job_ids.add(job._job_id) + failed_job_ids.add(job.uuid) else: - unexecuted_job_ids.add(job._job_id) + unexecuted_job_ids.add(job.uuid) jobs_not_started.add(job) for job in job_graph.getJobs(): @@ -5926,17 +5481,17 @@ class QueueItem(zkobject.ZKObject): all_parent_jobs_successful = True parent_builds_with_data = {} for parent_job in job_graph.getParentJobsRecursively(job): - if parent_job._job_id in unexecuted_job_ids \ - or parent_job._job_id in failed_job_ids: + if parent_job.uuid in unexecuted_job_ids \ + or parent_job.uuid in failed_job_ids: all_parent_jobs_successful = False break parent_build = self.current_build_set.getBuild(parent_job) if parent_build.result_data: - parent_builds_with_data[parent_job._job_id] = parent_build + parent_builds_with_data[parent_job.uuid] = parent_build for parent_job in job_graph.getParentJobsRecursively( job, skip_soft=True): - if parent_job._job_id in ignored_job_ids: + if parent_job.uuid in ignored_job_ids: all_parent_jobs_successful = False break @@ -5956,7 +5511,7 @@ class QueueItem(zkobject.ZKObject): new_artifact_data = job.artifact_data or [] for parent_job in job_graph.getJobs(): parent_build = parent_builds_with_data.get( - parent_job._job_id) + parent_job.uuid) if parent_build: (new_parent_data, new_secret_parent_data, @@ -5982,7 +5537,7 @@ class QueueItem(zkobject.ZKObject): for parent_job in job_graph.getParentJobsRecursively(job): parent_build = self.current_build_set.getBuild(parent_job) if parent_build and parent_build.result_data: - parent_builds_with_data[parent_job._job_id] = parent_build + parent_builds_with_data[parent_job.uuid] = parent_build parent_data = {} secret_parent_data = {} @@ -5994,7 +5549,7 @@ class QueueItem(zkobject.ZKObject): # in sorted config order) and apply parent data of the jobs we # already found. for parent_job in job_graph.getJobs(): - parent_build = parent_builds_with_data.get(parent_job._job_id) + parent_build = parent_builds_with_data.get(parent_job.uuid) if not parent_build: continue (parent_data, secret_parent_data, artifact_data @@ -6005,87 +5560,6 @@ class QueueItem(zkobject.ZKObject): parent_build) return parent_data, secret_parent_data, artifact_data - def deduplicateJobs(self, log): - """Sync node request and build info with deduplicated jobs - - Returns a boolean indicating whether a build was deduplicated. - """ - deduplicated = False - if not self.live: - return False - if not self.current_build_set.job_graph: - return False - if self.item_ahead: - # Only run jobs if any 'hold' jobs on the change ahead - # have completed successfully. - if self.item_ahead.isHoldingFollowingChanges(): - return False - - self.updateJobParentData() - - if COMPONENT_REGISTRY.model_api < 8: - return False - - if not self.bundle: - return False - - build_set = self.current_build_set - job_graph = build_set.job_graph - other_bundles = self.findDuplicateBundles() - for job in job_graph.getJobs(): - this_request = build_set.getJobNodeRequestID(job) - this_nodeset = build_set.getJobNodeSetInfo(job) - this_build = build_set.getBuild(job) - - if this_build: - # Nothing more possible for this job - continue - - other_item = self.findDuplicateJob(job, other_bundles) - if not other_item: - continue - other_build_set = other_item.current_build_set - # TODO: this lookup of the other job by name will be - # refactored out as part of the circular dependency - # refactor. - other_job = other_build_set.job_graph.getJobFromName(job.name) - - # Handle node requests - other_request = other_build_set.getJobNodeRequestID(other_job) - if (isinstance(other_request, dict) and - other_request.get('deduplicated_item') == self.uuid): - # We're the original, but we're probably in the middle - # of a retry - return False - if other_request is not None and this_request is None: - log.info("Deduplicating request of bundle job %s for item %s " - "with item %s", job, self, other_item) - build_set.setJobNodeRequestDuplicate(job, other_item) - job._set(_ready_to_run=False) - - # Handle provisioned nodes - other_nodeset = other_build_set.getJobNodeSetInfo(other_job) - if (isinstance(other_nodeset, dict) and - other_nodeset.get('deduplicated_item') == self.uuid): - # We're the original, but we're probably in the middle - # of a retry - return False - if other_nodeset is not None and this_nodeset is None: - log.info("Deduplicating nodeset of bundle job %s for item %s " - "with item %s", job, self, other_item) - build_set.setJobNodeSetInfoDuplicate(job, other_item) - job._set(_ready_to_run=False) - - # Handle builds - other_build = other_build_set.getBuild(other_job) - if other_build and not this_build: - log.info("Deduplicating build of bundle job %s for item %s " - "with item %s", job, self, other_item) - self.addBuild(job, other_build) - job._set(_ready_to_run=False) - deduplicated = True - return deduplicated - def findJobsToRun(self, semaphore_handler): torun = [] if not self.live: @@ -6133,11 +5607,11 @@ class QueueItem(zkobject.ZKObject): if build and (build.result == 'SUCCESS' or build.paused): pass elif build and build.result == 'SKIPPED': - ignored_job_ids.add(job._job_id) + ignored_job_ids.add(job.uuid) elif build and build.result in ('FAILURE', 'CANCELED'): - failed_job_ids.add(job._job_id) + failed_job_ids.add(job.uuid) else: - unexecuted_job_ids.add(job._job_id) + unexecuted_job_ids.add(job.uuid) nodeset = build_set.getJobNodeSetInfo(job) if nodeset is None: req_id = build_set.getJobNodeRequestID(job) @@ -6166,11 +5640,11 @@ class QueueItem(zkobject.ZKObject): all_dep_jobs_successful = True # Every parent job (dependency), whether soft or hard: all_dep_job_ids = set( - [x._job_id for x in + [x.uuid for x in job_graph.getParentJobsRecursively(job)]) # Only the hard deps: hard_dep_job_ids = set( - [x._job_id for x in job_graph.getParentJobsRecursively( + [x.uuid for x in job_graph.getParentJobsRecursively( job, skip_soft=True)]) # Any dep that hasn't finished (or started) running unexecuted_dep_job_ids = unexecuted_job_ids & all_dep_job_ids @@ -6187,7 +5661,7 @@ class QueueItem(zkobject.ZKObject): failed_dep_job_ids | ignored_hard_dep_job_ids) if required_dep_job_ids: - deps = [build_set._getJobById(i).name + deps = [self.getJob(i).name for i in required_dep_job_ids] job.setWaitingStatus('dependencies: {}'.format( ', '.join(deps))) @@ -6295,12 +5769,6 @@ class QueueItem(zkobject.ZKObject): dequeued_missing_requirements=True) self._setAllJobsSkipped('Missing pipeline requirements') - def setDequeuedBundleFailing(self, msg): - self.updateAttributes( - self.pipeline.manager.current_context, - dequeued_bundle_failing=True) - self._setMissingJobsSkipped(msg) - def setUnableToMerge(self, errors=None): with self.current_build_set.activeContext( self.pipeline.manager.current_context): @@ -6358,16 +5826,17 @@ class QueueItem(zkobject.ZKObject): tenant=self.pipeline.tenant.name, final=True) - def getNodePriority(self): - return self.pipeline.manager.getNodePriority(self) - def formatUrlPattern(self, url_pattern, job=None, build=None): url = None # Produce safe versions of objects which may be useful in # result formatting, but don't allow users to crawl through # the entire data structure where they might be able to access # secrets, etc. - safe_change = self.change.getSafeAttributes() + if job: + change = self.getChangeForJob(job) + safe_change = change.getSafeAttributes() + else: + safe_change = self.changes[0].getSafeAttributes() safe_pipeline = self.pipeline.getSafeAttributes() safe_tenant = self.pipeline.tenant.getSafeAttributes() safe_buildset = self.current_build_set.getSafeAttributes() @@ -6437,37 +5906,43 @@ class QueueItem(zkobject.ZKObject): ret = {} ret['active'] = self.active ret['live'] = self.live - if hasattr(self.change, 'url') and self.change.url is not None: - ret['url'] = self.change.url - else: - ret['url'] = None - if hasattr(self.change, 'ref') and self.change.ref is not None: - ret['ref'] = self.change.ref - else: - ret['ref'] = None - ret['id'] = self.change._id() + changes = [] + for change in self.changes: + ret_change = {} + if hasattr(change, 'url') and change.url is not None: + ret_change['url'] = change.url + else: + ret_change['url'] = None + if hasattr(change, 'ref') and change.ref is not None: + ret_change['ref'] = change.ref + else: + ret_change['ref'] = None + if change.project: + ret_change['project'] = change.project.name + ret_change['project_canonical'] = change.project.canonical_name + else: + # For cross-project dependencies with the depends-on + # project not known to zuul, the project is None + # Set it to a static value + ret_change['project'] = "Unknown Project" + ret_change['project_canonical'] = "Unknown Project" + if hasattr(change, 'owner'): + ret_change['owner'] = change.owner + else: + ret_change['owner'] = None + ret_change['id'] = change._id() + changes.append(ret_change) + ret['id'] = self.uuid + ret['changes'] = changes if self.item_ahead: - ret['item_ahead'] = self.item_ahead.change._id() + ret['item_ahead'] = self.item_ahead.uuid else: ret['item_ahead'] = None - ret['items_behind'] = [i.change._id() for i in self.items_behind] + ret['items_behind'] = [i.uuid for i in self.items_behind] ret['failing_reasons'] = self.current_build_set.failing_reasons ret['zuul_ref'] = self.current_build_set.ref - if self.change.project: - ret['project'] = self.change.project.name - ret['project_canonical'] = self.change.project.canonical_name - else: - # For cross-project dependencies with the depends-on - # project not known to zuul, the project is None - # Set it to a static value - ret['project'] = "Unknown Project" - ret['project_canonical'] = "Unknown Project" ret['enqueue_time'] = int(self.enqueue_time * 1000) ret['jobs'] = [] - if hasattr(self.change, 'owner'): - ret['owner'] = self.change.owner - else: - ret['owner'] = None max_remaining = 0 for job in self.getJobs(): now = time.time() @@ -6538,21 +6013,12 @@ class QueueItem(zkobject.ZKObject): ret['remaining_time'] = None return ret - def formatStatus(self, indent=0, html=False): + def formatStatus(self, indent=0): indent_str = ' ' * indent - ret = '' - if html and getattr(self.change, 'url', None) is not None: - ret += '%sProject %s change %s\n' % ( - indent_str, - self.change.project.name, - self.change.url, - self.change._id()) - else: - ret += '%sProject %s change %s based on %s\n' % ( - indent_str, - self.change.project.name, - self.change._id(), - self.item_ahead) + ret = '%s%s based on %s\n' % ( + indent_str, + self, + self.item_ahead) for job in self.getJobs(): build = self.current_build_set.getBuild(job) if build: @@ -6564,18 +6030,11 @@ class QueueItem(zkobject.ZKObject): voting = ' (non-voting)' else: voting = '' - if html: - if build: - url = build.url - else: - url = None - if url is not None: - job_name = '%s' % (url, job_name) ret += '%s %s: %s%s' % (indent_str, job_name, result, voting) ret += '\n' return ret - def makeMergerItem(self): + def makeMergerItem(self, change): # Create a dictionary with all info about the item needed by # the merger. number = None @@ -6583,23 +6042,23 @@ class QueueItem(zkobject.ZKObject): oldrev = None newrev = None branch = None - if hasattr(self.change, 'number'): - number = self.change.number - patchset = self.change.patchset - if hasattr(self.change, 'newrev'): - oldrev = self.change.oldrev - newrev = self.change.newrev - if hasattr(self.change, 'branch'): - branch = self.change.branch + if hasattr(change, 'number'): + number = change.number + patchset = change.patchset + if hasattr(change, 'newrev'): + oldrev = change.oldrev + newrev = change.newrev + if hasattr(change, 'branch'): + branch = change.branch - source = self.change.project.source + source = change.project.source connection_name = source.connection.connection_name - project = self.change.project + project = change.project return dict(project=project.name, connection=connection_name, - merge_mode=self.current_build_set.getMergeMode(), - ref=self.change.ref, + merge_mode=self.current_build_set.getMergeMode(change), + ref=change.ref, branch=branch, buildset_uuid=self.current_build_set.uuid, number=number, @@ -6608,7 +6067,7 @@ class QueueItem(zkobject.ZKObject): newrev=newrev, ) - def updatesJobConfig(self, job, layout): + def updatesJobConfig(self, job, change, layout): log = self.annotateLogger(self.log) layout_ahead = None if self.pipeline.manager: @@ -6618,15 +6077,14 @@ class QueueItem(zkobject.ZKObject): # would be if the layout had not changed. if self.current_build_set._old_job_graph is None: try: - ppc = layout_ahead.getProjectPipelineConfig(self) log.debug("Creating job graph for config change detection") + results = layout_ahead.createJobGraph( + None, self, + skip_file_matcher=True, + redact_secrets_and_keys=False, + old=True) self.current_build_set._set( - _old_job_graph=layout_ahead.createJobGraph( - None, self, ppc, - skip_file_matcher=True, - redact_secrets_and_keys=False, - debug_messages=None, - old=True)) + _old_job_graph=results['job_graph']) log.debug("Done creating job graph for " "config change detection") except Exception: @@ -6637,8 +6095,8 @@ class QueueItem(zkobject.ZKObject): # which jobs have changed, so rather than run them # all, just rely on the file matchers as-is. return False - old_job = self.current_build_set._old_job_graph.getJobFromName( - job.name) + old_job = self.current_build_set._old_job_graph.getJob( + job.name, change.cache_key) if old_job is None: log.debug("Found a newly created job") return True # A newly created job @@ -6663,48 +6121,6 @@ class QueueItem(zkobject.ZKObject): return keys -class Bundle: - """Identifies a collection of changes that must be treated as one unit.""" - - def __init__(self, uuid=None): - self.uuid = uuid or uuid4().hex - self.items = [] - self.started_reporting = False - self.failed_reporting = False - self.cannot_merge = None - - def __repr__(self): - return '= 25): - d['job_uuid'] = self.job_uuid return d @classmethod def fromDict(cls, data): return cls( data.get("build_uuid"), data.get("build_set_uuid"), - data.get("job_name"), data.get("job_uuid"), + data.get("job_uuid"), data.get("build_request_ref"), data.get("data"), data.get("zuul_event_id")) def __repr__(self): return ( f"<{self.__class__.__name__} build={self.build_uuid} " - f"job={self.job_name}>" + f"job={self.job_uuid}>" ) @@ -7576,7 +6981,6 @@ class NodesProvisionedEvent(ResultEvent): """Nodes have been provisioned for a build_set :arg int request_id: The id of the fulfilled node request. - :arg str job_name: The name of the job this node request belongs to. :arg str build_set_uuid: UUID of the buildset this node request belongs to """ @@ -8649,7 +8053,7 @@ class Layout(object): return self.project_metadata[name] return None - def getProjectPipelineConfig(self, item): + def getProjectPipelineConfig(self, item, change): log = item.annotateLogger(self.log) # Create a project-pipeline config for the given item, taking # its branch (if any) into consideration. If the project does @@ -8662,8 +8066,8 @@ class Layout(object): # item). ppc = ProjectPipelineConfig() project_in_pipeline = False - for pc in self.getProjectConfigs(item.change.project.canonical_name): - if not pc.changeMatches(item.change): + for pc in self.getProjectConfigs(change.project.canonical_name): + if not pc.changeMatches(change): msg = "Project %s did not match" % (pc,) ppc.addDebug(msg) log.debug("%s item %s", msg, item) @@ -8676,7 +8080,7 @@ class Layout(object): for template in templates: template_ppc = template.pipelines.get(item.pipeline.name) if template_ppc: - if not template.changeMatches(item.change): + if not template.changeMatches(change): msg = "Project template %s did not match" % ( template,) ppc.addDebug(msg) @@ -8803,18 +8207,17 @@ class Layout(object): raise NoMatchingParentError() return jobs - def _createJobGraph(self, context, item, ppc, job_graph, - skip_file_matcher, redact_secrets_and_keys, - debug_messages): + def extendJobGraph(self, context, item, change, ppc, job_graph, + skip_file_matcher, redact_secrets_and_keys, + debug_messages): log = item.annotateLogger(self.log) semaphore_handler = item.pipeline.tenant.semaphore_handler job_list = ppc.job_list - change = item.change pipeline = item.pipeline add_debug_line(debug_messages, "Freezing job graph") for jobname in job_list.jobs: # This is the final job we are constructing - frozen_job = None + final_job = None log.debug("Collecting jobs %s for %s", jobname, change) add_debug_line(debug_messages, "Freezing job {jobname}".format( @@ -8843,22 +8246,22 @@ class Layout(object): jobname=jobname), indent=2) continue for variant in variants: - if frozen_job is None: - frozen_job = variant.copy() - frozen_job.setBase(self, semaphore_handler) + if final_job is None: + final_job = variant.copy() + final_job.setBase(self, semaphore_handler) else: - frozen_job.applyVariant(variant, self, semaphore_handler) - frozen_job.name = variant.name - frozen_job.name = jobname + final_job.applyVariant(variant, self, semaphore_handler) + final_job.name = variant.name + final_job.name = jobname # Now merge variables set from this parent ppc # (i.e. project+templates) directly into the job vars - frozen_job.updateProjectVariables(ppc.variables) + final_job.updateProjectVariables(ppc.variables) # If the job does not specify an ansible version default to the # tenant default. - if not frozen_job.ansible_version: - frozen_job.ansible_version = \ + if not final_job.ansible_version: + final_job.ansible_version = \ self.tenant.default_ansible_version log.debug("Froze job %s for %s", jobname, change) @@ -8867,7 +8270,7 @@ class Layout(object): matched = False for variant in job_list.jobs[jobname]: if variant.changeMatchesBranch(change): - frozen_job.applyVariant(variant, self, semaphore_handler) + final_job.applyVariant(variant, self, semaphore_handler) matched = True log.debug("Pipeline variant %s matched %s", repr(variant), change) @@ -8889,11 +8292,11 @@ class Layout(object): continue updates_job_config = False if not skip_file_matcher and \ - not frozen_job.changeMatchesFiles(change): + not final_job.changeMatchesFiles(change): matched_files = False - if frozen_job.match_on_config_updates: + if final_job.match_on_config_updates: updates_job_config = item.updatesJobConfig( - frozen_job, self) + final_job, change, self) else: matched_files = True if not matched_files: @@ -8901,43 +8304,49 @@ class Layout(object): # Log the reason we're ignoring the file matcher log.debug("The configuration of job %s is " "changed by %s; ignoring file matcher", - repr(frozen_job), change) + repr(final_job), change) add_debug_line(debug_messages, "The configuration of job {jobname} is " "changed; ignoring file matcher". format(jobname=jobname), indent=2) else: log.debug("Job %s did not match files in %s", - repr(frozen_job), change) + repr(final_job), change) add_debug_line(debug_messages, "Job {jobname} did not match files". format(jobname=jobname), indent=2) continue - if frozen_job.abstract: + if final_job.abstract: raise Exception("Job %s is abstract and may not be " "directly run" % - (frozen_job.name,)) - if (not frozen_job.ignore_allowed_projects and - frozen_job.allowed_projects is not None and - change.project.name not in frozen_job.allowed_projects): + (final_job.name,)) + if (not final_job.ignore_allowed_projects and + final_job.allowed_projects is not None and + change.project.name not in final_job.allowed_projects): raise Exception("Project %s is not allowed to run job %s" % - (change.project.name, frozen_job.name)) - if ((not pipeline.post_review) and frozen_job.post_review): + (change.project.name, final_job.name)) + if ((not pipeline.post_review) and final_job.post_review): raise Exception("Pre-review pipeline %s does not allow " "post-review job %s" % ( - pipeline.name, frozen_job.name)) - if not frozen_job.run: + pipeline.name, final_job.name)) + if not final_job.run: raise Exception("Job %s does not specify a run playbook" % ( - frozen_job.name,)) + final_job.name,)) - job_graph.addJob(frozen_job.freezeJob( - context, self.tenant, self, item, + job_graph.addJob(final_job.freezeJob( + context, self.tenant, self, item, change, redact_secrets_and_keys)) - def createJobGraph(self, context, item, ppc, skip_file_matcher, - redact_secrets_and_keys, debug_messages, old=False): - # NOTE(pabelanger): It is possible for a foreign project not to have a - # configured pipeline, if so return an empty JobGraph. + def createJobGraph(self, context, item, + skip_file_matcher, + redact_secrets_and_keys, + old=False): + """Find or create actual matching jobs for this item's change and + store the resulting job tree.""" + + enable_debug = False + fail_fast = item.current_build_set.fail_fast + debug_messages = [] if old: job_map = item.current_build_set._old_jobs if context is not None: @@ -8946,12 +8355,41 @@ class Layout(object): else: job_map = item.current_build_set.jobs job_graph = JobGraph(job_map) - if ppc: - self._createJobGraph(context, item, ppc, job_graph, - skip_file_matcher, redact_secrets_and_keys, - debug_messages) + for change in item.changes: + ppc = self.getProjectPipelineConfig(item, change) + if not ppc: + continue + if ppc.debug: + # Any ppc that sets debug=True enables debugging + enable_debug = True + debug_messages.extend(ppc.debug_messages) + self.extendJobGraph( + context, item, change, ppc, job_graph, skip_file_matcher, + redact_secrets_and_keys, debug_messages) + if ppc.fail_fast is not None: + # Any explicit setting of fail_fast takes effect, + # last one wins. + fail_fast = ppc.fail_fast + + job_graph.deduplicateJobs(self.log) job_graph.freezeDependencies(self) - return job_graph + + # Copy project metadata to job_graph since this must be independent + # of the layout as we need it in order to prepare the context for + # job execution. + # The layout might be no longer available at this point, as the + # scheduler submitting the job can be different from the one that + # created the layout. + job_graph.project_metadata = self.project_metadata + + if not enable_debug: + debug_messages = item.current_build_set.debug_messages + + return dict( + debug_messages=debug_messages, + fail_fast=fail_fast, + job_graph=job_graph, + ) class Semaphore(ConfigObject): diff --git a/zuul/model_api.py b/zuul/model_api.py index 54d75eebaa..24f7a91c59 100644 --- a/zuul/model_api.py +++ b/zuul/model_api.py @@ -14,4 +14,4 @@ # When making ZK schema changes, increment this and add a record to # doc/source/developer/model-changelog.rst -MODEL_API = 25 +MODEL_API = 26 diff --git a/zuul/nodepool.py b/zuul/nodepool.py index d31c1c2a5e..1c8894eded 100644 --- a/zuul/nodepool.py +++ b/zuul/nodepool.py @@ -191,7 +191,7 @@ class Nodepool(object): else: event_id = None req = model.NodeRequest(self.system_id, build_set_uuid, tenant_name, - pipeline_name, job.name, job._job_id, labels, + pipeline_name, job.uuid, labels, provider, relative_priority, event_id) if job.nodeset.nodes: diff --git a/zuul/reporter/__init__.py b/zuul/reporter/__init__.py index 4c18edffc7..fa0dc81127 100644 --- a/zuul/reporter/__init__.py +++ b/zuul/reporter/__init__.py @@ -1,4 +1,5 @@ # Copyright 2014 Rackspace Australia +# Copyright 2021-2024 Acme Gating, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain @@ -60,13 +61,14 @@ class BaseReporter(object, metaclass=abc.ABCMeta): def postConfig(self): """Run tasks after configuration is reloaded""" - def addConfigurationErrorComments(self, item, comments): + def addConfigurationErrorComments(self, item, change, comments): """Add file comments for configuration errors. Updates the comments dictionary with additional file comments - for any relevant configuration errors for this item's change. + for any relevant configuration errors for the specified change. :arg QueueItem item: The queue item + :arg Ref change: One of the item's changes to check :arg dict comments: a file comments dictionary """ @@ -77,13 +79,13 @@ class BaseReporter(object, metaclass=abc.ABCMeta): if not (context and mark and err.short_error): continue if context.project_canonical_name != \ - item.change.project.canonical_name: + change.project.canonical_name: continue - if not hasattr(item.change, 'branch'): + if not hasattr(change, 'branch'): continue - if context.branch != item.change.branch: + if context.branch != change.branch: continue - if context.path not in item.change.files: + if context.path not in change.files: continue existing_comments = comments.setdefault(context.path, []) existing_comments.append(dict(line=mark.end_line, @@ -94,36 +96,40 @@ class BaseReporter(object, metaclass=abc.ABCMeta): end_line=mark.end_line, end_character=mark.end_column))) - def _getFileComments(self, item): + def _getFileComments(self, item, change): """Get the file comments from the zuul_return value""" ret = {} for build in item.current_build_set.getBuilds(): fc = build.result_data.get("zuul", {}).get("file_comments") if not fc: continue + # Only consider comments for this change + if change.cache_key not in build.job.all_refs: + continue for fn, comments in fc.items(): existing_comments = ret.setdefault(fn, []) existing_comments.extend(comments) - self.addConfigurationErrorComments(item, ret) + self.addConfigurationErrorComments(item, change, ret) return ret - def getFileComments(self, item): - comments = self._getFileComments(item) - self.filterComments(item, comments) + def getFileComments(self, item, change): + comments = self._getFileComments(item, change) + self.filterComments(item, change, comments) return comments - def filterComments(self, item, comments): + def filterComments(self, item, change, comments): """Filter comments for files in change Remove any comments for files which do not appear in the - item's change. Leave warning messages if this happens. + specified change. Leave warning messages if this happens. :arg QueueItem item: The queue item + :arg Change change: The change :arg dict comments: a file comments dictionary (modified in place) """ for fn in list(comments.keys()): - if fn not in item.change.files: + if fn not in change.files: del comments[fn] item.warning("Comments left for invalid file %s" % (fn,)) @@ -172,7 +178,8 @@ class BaseReporter(object, metaclass=abc.ABCMeta): return item.pipeline.enqueue_message.format( pipeline=item.pipeline.getSafeAttributes(), - change=item.change.getSafeAttributes(), + change=item.changes[0].getSafeAttributes(), + changes=[c.getSafeAttributes() for c in item.changes], status_url=status_url) def _formatItemReportStart(self, item, with_jobs=True): @@ -182,7 +189,8 @@ class BaseReporter(object, metaclass=abc.ABCMeta): return item.pipeline.start_message.format( pipeline=item.pipeline.getSafeAttributes(), - change=item.change.getSafeAttributes(), + change=item.changes[0].getSafeAttributes(), + changes=[c.getSafeAttributes() for c in item.changes], status_url=status_url) def _formatItemReportSuccess(self, item, with_jobs=True): @@ -195,23 +203,23 @@ class BaseReporter(object, metaclass=abc.ABCMeta): return msg def _formatItemReportFailure(self, item, with_jobs=True): - if item.cannotMergeBundle(): - msg = 'This change is part of a bundle that can not merge.\n' - if isinstance(item.bundle.cannot_merge, str): - msg += '\n' + item.bundle.cannot_merge + '\n' - elif item.dequeued_needing_change: - msg = 'This change depends on a change that failed to merge.\n' + if len(item.changes) > 1: + change_text = 'These changes' + else: + change_text = 'This change' + if item.dequeued_needing_change: + msg = f'{change_text} depends on a change that failed to merge.\n' if isinstance(item.dequeued_needing_change, str): msg += '\n' + item.dequeued_needing_change + '\n' elif item.dequeued_missing_requirements: - msg = ('This change is unable to merge ' + msg = (f'{change_text} is unable to merge ' 'due to a missing merge requirement.\n') - elif item.isBundleFailing(): - msg = 'This change is part of a bundle that failed.\n' + elif len(item.changes) > 1: + msg = f'{change_text} is part of a dependency cycle that failed.\n' if with_jobs: msg = '{}\n\n{}'.format(msg, self._formatItemReportJobs(item)) msg = "{}\n\n{}".format( - msg, self._formatItemReportOtherBundleItems(item)) + msg, self._formatItemReportOtherChanges(item)) elif item.didMergerFail(): msg = item.pipeline.merge_conflict_message elif item.current_build_set.has_blocking_errors: @@ -247,7 +255,8 @@ class BaseReporter(object, metaclass=abc.ABCMeta): return item.pipeline.no_jobs_message.format( pipeline=item.pipeline.getSafeAttributes(), - change=item.change.getSafeAttributes(), + change=item.changes[0].getSafeAttributes(), + changes=[c.getSafeAttributes() for c in item.changes], status_url=status_url) def _formatItemReportDisabled(self, item, with_jobs=True): @@ -264,13 +273,9 @@ class BaseReporter(object, metaclass=abc.ABCMeta): msg += '\n\n' + self._formatItemReportJobs(item) return msg - def _formatItemReportOtherBundleItems(self, item): - related_changes = item.pipeline.manager.resolveChangeReferences( - item.change.getNeedsChanges( - item.pipeline.manager.useDependenciesByTopic( - item.change.project))) + def _formatItemReportOtherChanges(self, item): return "Related changes:\n{}\n".format("\n".join( - f' - {c.url}' for c in related_changes if c is not item.change)) + f' - {c.url}' for c in item.changes)) def _getItemReportJobsFields(self, item): # Extract the report elements from an item diff --git a/zuul/scheduler.py b/zuul/scheduler.py index a0ed78e571..c0ba56c972 100644 --- a/zuul/scheduler.py +++ b/zuul/scheduler.py @@ -905,13 +905,15 @@ class Scheduler(threading.Thread): try: if self.statsd and build.pipeline: tenant = build.pipeline.tenant - jobname = build.job.name.replace('.', '_').replace('/', '_') - hostname = (build.build_set.item.change.project. + item = build.build_set.item + job = build.job + change = item.getChangeForJob(job) + jobname = job.name.replace('.', '_').replace('/', '_') + hostname = (change.project. canonical_hostname.replace('.', '_')) - projectname = (build.build_set.item.change.project.name. + projectname = (change.project.name. replace('.', '_').replace('/', '_')) - branchname = (getattr(build.build_set.item.change, - 'branch', ''). + branchname = (getattr(change, 'branch', ''). replace('.', '_').replace('/', '_')) basekey = 'zuul.tenant.%s' % tenant.name pipekey = '%s.pipeline.%s' % (basekey, build.pipeline.name) @@ -1611,8 +1613,8 @@ class Scheduler(threading.Thread): log.info("Tenant reconfiguration complete for %s (duration: %s " "seconds)", event.tenant_name, duration) - def _reenqueueGetProject(self, tenant, item): - project = item.change.project + def _reenqueueGetProject(self, tenant, item, change): + project = change.project # Attempt to get the same project as the one passed in. If # the project is now found on a different connection or if it # is no longer available (due to a connection being removed), @@ -1644,12 +1646,13 @@ class Scheduler(threading.Thread): if child is item: return None if child and child.live: - (child_trusted, child_project) = tenant.getProject( - child.change.project.canonical_name) - if child_project: - source = child_project.source - new_project = source.getProject(project.name) - return new_project + for child_change in child.changes: + (child_trusted, child_project) = tenant.getProject( + child_change.project.canonical_name) + if child_project: + source = child_project.source + new_project = source.getProject(project.name) + return new_project return None @@ -1679,8 +1682,8 @@ class Scheduler(threading.Thread): for item in shared_queue.queue: # If the old item ahead made it in, re-enqueue # this one behind it. - new_project = self._reenqueueGetProject( - tenant, item) + new_projects = [self._reenqueueGetProject( + tenant, item, change) for change in item.changes] if item.item_ahead in items_to_remove: old_item_ahead = None item_ahead_valid = False @@ -1691,8 +1694,9 @@ class Scheduler(threading.Thread): item.item_ahead = None item.items_behind = [] reenqueued = False - if new_project: - item.change.project = new_project + if all(new_projects): + for change_index, change in enumerate(item.changes): + change.project = new_projects[change_index] item.queue = None if not old_item_ahead or not last_head: last_head = item @@ -1945,12 +1949,13 @@ class Scheduler(threading.Thread): for item in shared_queue.queue: if not item.live: continue - if (item.change.number == number and - item.change.patchset == patchset): - promote_operations.setdefault( - shared_queue, []).append(item) - found = True - break + for item_change in item.changes: + if (item_change.number == number and + item_change.patchset == patchset): + promote_operations.setdefault( + shared_queue, []).append(item) + found = True + break if found: break if not found: @@ -1981,11 +1986,12 @@ class Scheduler(threading.Thread): pipeline.manager.dequeueItem(item) for item in items_to_enqueue: - pipeline.manager.addChange( - item.change, item.event, - enqueue_time=item.enqueue_time, - quiet=True, - ignore_requirements=True) + for item_change in item.changes: + pipeline.manager.addChange( + item_change, item.event, + enqueue_time=item.enqueue_time, + quiet=True, + ignore_requirements=True) # Regardless, move this shared change queue to the head. pipeline.promoteQueue(change_queue) @@ -2011,14 +2017,15 @@ class Scheduler(threading.Thread): % (item, project.name)) for shared_queue in pipeline.queues: for item in shared_queue.queue: - if item.change.project != change.project: - continue - if (isinstance(item.change, Change) and - item.change.number == change.number and - item.change.patchset == change.patchset) or\ - (item.change.ref == change.ref): - pipeline.manager.removeItem(item) - return + for item_change in item.changes: + if item_change.project != change.project: + continue + if (isinstance(item_change, Change) and + item_change.number == change.number and + item_change.patchset == change.patchset) or\ + (item_change.ref == change.ref): + pipeline.manager.removeItem(item) + return raise Exception("Unable to find shared change queue for %s:%s" % (event.project_name, event.change or event.ref)) @@ -2059,18 +2066,19 @@ class Scheduler(threading.Thread): change = project.source.getChange(change_key, event=event) for shared_queue in pipeline.queues: for item in shared_queue.queue: - if item.change.project != change.project: - continue if not item.live: continue - if ((isinstance(item.change, Change) - and item.change.number == change.number - and item.change.patchset == change.patchset - ) or (item.change.ref == change.ref)): - log = get_annotated_logger(self.log, item.event) - log.info("Item %s is superceded, dequeuing", item) - pipeline.manager.removeItem(item) - return + for item_change in item.changes: + if item_change.project != change.project: + continue + if ((isinstance(item_change, Change) + and item_change.number == change.number + and item_change.patchset == change.patchset + ) or (item_change.ref == change.ref)): + log = get_annotated_logger(self.log, item.event) + log.info("Item %s is superceded, dequeuing", item) + pipeline.manager.removeItem(item) + return def _doSemaphoreReleaseEvent(self, event, pipeline): tenant = pipeline.tenant @@ -2791,7 +2799,7 @@ class Scheduler(threading.Thread): if not build_set: return - job = build_set.item.getJob(event._job_id) + job = build_set.item.getJob(event.job_uuid) build = build_set.getBuild(job) # Verify that the build uuid matches the one of the result if not build: @@ -2824,12 +2832,14 @@ class Scheduler(threading.Thread): log = get_annotated_logger( self.log, build.zuul_event_id, build=build.uuid) try: - change = build.build_set.item.change + item = build.build_set.item + job = build.job + change = item.getChangeForJob(job) estimate = self.times.getEstimatedTime( pipeline.tenant.name, change.project.name, getattr(change, 'branch', None), - build.job.name) + job.name) if not estimate: estimate = 0.0 build.estimated_time = estimate @@ -2884,11 +2894,8 @@ class Scheduler(threading.Thread): # resources. build = Build() job = DummyFrozenJob() - job.name = event.job_name job.uuid = event.job_uuid job.provides = [] - # MODEL_API < 25 - job._job_id = job.uuid or job.name build._set( job=job, uuid=event.build_uuid, @@ -2997,8 +3004,7 @@ class Scheduler(threading.Thread): # In case the build didn't show up on any executor, the node # request does still exist, so we have to make sure it is # removed from ZK. - request_id = build.build_set.getJobNodeRequestID( - build.job, ignore_deduplicate=True) + request_id = build.build_set.getJobNodeRequestID(build.job) if request_id: self.nodepool.deleteNodeRequest( request_id, event_id=build.zuul_event_id) @@ -3058,11 +3064,11 @@ class Scheduler(threading.Thread): return log = get_annotated_logger(self.log, request.event_id) - job = build_set.item.getJob(request._job_id) + job = build_set.item.getJob(request.job_uuid) if job is None: log.warning("Item %s does not contain job %s " "for node request %s", - build_set.item, request._job_id, request) + build_set.item, request.job_uuid, request) return # If the request failed, we must directly delete it as the nodes will @@ -3073,7 +3079,7 @@ class Scheduler(threading.Thread): nodeset = self.nodepool.getNodeSet(request, job.nodeset) - job = build_set.item.getJob(request._job_id) + job = build_set.item.getJob(request.job_uuid) if build_set.getJobNodeSetInfo(job) is None: pipeline.manager.onNodesProvisioned(request, nodeset, build_set) else: @@ -3111,8 +3117,8 @@ class Scheduler(threading.Thread): self.executor.cancel(build) except Exception: log.exception( - "Exception while canceling build %s for change %s", - build, item.change) + "Exception while canceling build %s for %s", + build, item) # In the unlikely case that a build is removed and # later added back, make sure we clear out the nodeset diff --git a/zuul/web/__init__.py b/zuul/web/__init__.py index a0c175a643..21b4b565cf 100755 --- a/zuul/web/__init__.py +++ b/zuul/web/__init__.py @@ -1,5 +1,5 @@ # Copyright (c) 2017 Red Hat -# Copyright 2021-2023 Acme Gating, LLC +# Copyright 2021-2024 Acme Gating, LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. @@ -385,9 +385,14 @@ class ChangeFilter(object): for pipeline in payload['pipelines']: for change_queue in pipeline.get('change_queues', []): for head in change_queue['heads']: - for change in head: - if self.wantChange(change): - status.append(copy.deepcopy(change)) + for item in head: + want_item = False + for change in item['changes']: + if self.wantChange(change): + want_item = True + break + if want_item: + status.append(copy.deepcopy(item)) return status def wantChange(self, change): @@ -1455,7 +1460,19 @@ class ZuulWebAPI(object): return my_datetime.strftime('%Y-%m-%dT%H:%M:%S') return None - def buildToDict(self, build, buildset=None): + def refToDict(self, ref): + return { + 'project': ref.project, + 'branch': ref.branch, + 'change': ref.change, + 'patchset': ref.patchset, + 'ref': ref.ref, + 'oldrev': ref.oldrev, + 'newrev': ref.newrev, + 'ref_url': ref.ref_url, + } + + def buildToDict(self, build, buildset=None, skip_refs=False): start_time = self._datetimeToString(build.start_time) end_time = self._datetimeToString(build.end_time) if build.start_time and build.end_time: @@ -1480,28 +1497,25 @@ class ZuulWebAPI(object): 'final': build.final, 'artifacts': [], 'provides': [], + 'ref': self.refToDict(build.ref), } - - # TODO: This should not be conditional in the future, when we - # can have multiple refs for a buildset. if buildset: + # We enter this branch if we're returning top-level build + # objects (ie, not builds under a buildset). event_timestamp = self._datetimeToString(buildset.event_timestamp) ret.update({ - 'project': build.ref.project, - 'branch': build.ref.branch, 'pipeline': buildset.pipeline, - 'change': build.ref.change, - 'patchset': build.ref.patchset, - 'ref': build.ref.ref, - 'oldrev': build.ref.oldrev, - 'newrev': build.ref.newrev, - 'ref_url': build.ref.ref_url, 'event_id': buildset.event_id, 'event_timestamp': event_timestamp, 'buildset': { 'uuid': buildset.uuid, }, }) + if not skip_refs: + ret['buildset']['refs'] = [ + self.refToDict(ref) + for ref in buildset.refs + ] for artifact in build.artifacts: art = { @@ -1560,7 +1574,8 @@ class ZuulWebAPI(object): idx_max=_idx_max, exclude_result=exclude_result, query_timeout=self.query_timeout) - return [self.buildToDict(b, b.buildset) for b in builds] + return [self.buildToDict(b, b.buildset, skip_refs=True) + for b in builds] @cherrypy.expose @cherrypy.tools.save_params() @@ -1570,10 +1585,10 @@ class ZuulWebAPI(object): def build(self, tenant_name, tenant, auth, uuid): connection = self._get_connection() - data = connection.getBuilds(tenant=tenant_name, uuid=uuid, limit=1) + data = connection.getBuild(tenant_name, uuid) if not data: raise cherrypy.HTTPError(404, "Build not found") - data = self.buildToDict(data[0], data[0].buildset) + data = self.buildToDict(data, data.buildset) return data def buildTimeToDict(self, build): @@ -1646,19 +1661,15 @@ class ZuulWebAPI(object): 'uuid': buildset.uuid, 'result': buildset.result, 'message': buildset.message, - 'project': buildset.refs[0].project, - 'branch': buildset.refs[0].branch, 'pipeline': buildset.pipeline, - 'change': buildset.refs[0].change, - 'patchset': buildset.refs[0].patchset, - 'ref': buildset.refs[0].ref, - 'oldrev': buildset.refs[0].oldrev, - 'newrev': buildset.refs[0].newrev, - 'ref_url': buildset.refs[0].ref_url, 'event_id': buildset.event_id, 'event_timestamp': event_timestamp, 'first_build_start_time': start, 'last_build_end_time': end, + 'refs': [ + self.refToDict(ref) + for ref in buildset.refs + ], } if builds: ret['builds'] = [] @@ -1798,7 +1809,7 @@ class ZuulWebAPI(object): @cherrypy.tools.check_tenant_auth() def project_freeze_jobs(self, tenant_name, tenant, auth, pipeline_name, project_name, branch_name): - item = self._freeze_jobs( + item, change = self._freeze_jobs( tenant, pipeline_name, project_name, branch_name) output = [] @@ -1822,9 +1833,10 @@ class ZuulWebAPI(object): job_name): # TODO(jhesketh): Allow a canonical change/item to be passed in which # would return the job with any in-change modifications. - item = self._freeze_jobs( + item, change = self._freeze_jobs( tenant, pipeline_name, project_name, branch_name) - job = item.current_build_set.job_graph.getJobFromName(job_name) + job = item.current_build_set.job_graph.getJob( + job_name, change.cache_key) if not job: raise cherrypy.HTTPError(404) @@ -1873,12 +1885,12 @@ class ZuulWebAPI(object): change.cache_stat = FakeCacheKey() with LocalZKContext(self.log) as context: queue = ChangeQueue.new(context, pipeline=pipeline) - item = QueueItem.new(context, queue=queue, change=change) + item = QueueItem.new(context, queue=queue, changes=[change]) item.freezeJobGraph(tenant.layout, context, skip_file_matcher=True, redact_secrets_and_keys=True) - return item + return item, change class StaticHandler(object):