The Gatekeeper, or a project gating system
Ви не можете вибрати більше 25 тем Теми мають розпочинатися з літери або цифри, можуть містити дефіси (-) і не повинні перевищувати 35 символів.

4942 рядки
177KB

  1. # Copyright 2012 Hewlett-Packard Development Company, L.P.
  2. #
  3. # Licensed under the Apache License, Version 2.0 (the "License"); you may
  4. # not use this file except in compliance with the License. You may obtain
  5. # a copy of the License at
  6. #
  7. # http://www.apache.org/licenses/LICENSE-2.0
  8. #
  9. # Unless required by applicable law or agreed to in writing, software
  10. # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
  11. # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
  12. # License for the specific language governing permissions and limitations
  13. # under the License.
  14. import abc
  15. from collections import OrderedDict
  16. import copy
  17. import json
  18. import logging
  19. import os
  20. import re2
  21. import struct
  22. import time
  23. from uuid import uuid4
  24. import urllib.parse
  25. import textwrap
  26. import types
  27. import itertools
  28. import jsonpath_rw
  29. from zuul import change_matcher
  30. from zuul.lib.config import get_default
  31. from zuul.lib.artifacts import get_artifacts_from_result_data
  32. from zuul.lib.logutil import get_annotated_logger
  33. MERGER_MERGE = 1 # "git merge"
  34. MERGER_MERGE_RESOLVE = 2 # "git merge -s resolve"
  35. MERGER_CHERRY_PICK = 3 # "git cherry-pick"
  36. MERGER_SQUASH_MERGE = 4 # "git merge --squash"
  37. MERGER_MAP = {
  38. 'merge': MERGER_MERGE,
  39. 'merge-resolve': MERGER_MERGE_RESOLVE,
  40. 'cherry-pick': MERGER_CHERRY_PICK,
  41. 'squash-merge': MERGER_SQUASH_MERGE,
  42. }
  43. PRECEDENCE_NORMAL = 0
  44. PRECEDENCE_LOW = 1
  45. PRECEDENCE_HIGH = 2
  46. PRECEDENCE_MAP = {
  47. None: PRECEDENCE_NORMAL,
  48. 'low': PRECEDENCE_LOW,
  49. 'normal': PRECEDENCE_NORMAL,
  50. 'high': PRECEDENCE_HIGH,
  51. }
  52. PRIORITY_MAP = {
  53. PRECEDENCE_NORMAL: 200,
  54. PRECEDENCE_LOW: 300,
  55. PRECEDENCE_HIGH: 100,
  56. }
  57. # Request states
  58. STATE_REQUESTED = 'requested'
  59. STATE_FULFILLED = 'fulfilled'
  60. STATE_FAILED = 'failed'
  61. REQUEST_STATES = set([STATE_REQUESTED,
  62. STATE_FULFILLED,
  63. STATE_FAILED])
  64. # Node states
  65. STATE_BUILDING = 'building'
  66. STATE_TESTING = 'testing'
  67. STATE_READY = 'ready'
  68. STATE_IN_USE = 'in-use'
  69. STATE_USED = 'used'
  70. STATE_HOLD = 'hold'
  71. STATE_DELETING = 'deleting'
  72. NODE_STATES = set([STATE_BUILDING,
  73. STATE_TESTING,
  74. STATE_READY,
  75. STATE_IN_USE,
  76. STATE_USED,
  77. STATE_HOLD,
  78. STATE_DELETING])
  79. class ConfigurationErrorKey(object):
  80. """A class which attempts to uniquely identify configuration errors
  81. based on their file location. It's not perfect, but it's usually
  82. sufficient to determine whether we should show an error to a user.
  83. """
  84. def __init__(self, context, mark, error_text):
  85. self.context = context
  86. self.mark = mark
  87. self.error_text = error_text
  88. elements = []
  89. if context:
  90. elements.extend([
  91. context.project.canonical_name,
  92. context.branch,
  93. context.path,
  94. ])
  95. else:
  96. elements.extend([None, None, None])
  97. if mark:
  98. elements.extend([
  99. mark.line,
  100. mark.snippet,
  101. ])
  102. else:
  103. elements.extend([None, None])
  104. elements.append(error_text)
  105. self._hash = hash('|'.join([str(x) for x in elements]))
  106. def __hash__(self):
  107. return self._hash
  108. def __ne__(self, other):
  109. return not self.__eq__(other)
  110. def __eq__(self, other):
  111. if not isinstance(other, ConfigurationErrorKey):
  112. return False
  113. return (self.context == other.context and
  114. self.mark.line == other.mark.line and
  115. self.mark.snippet == other.mark.snippet and
  116. self.error_text == other.error_text)
  117. class ConfigurationError(object):
  118. """A configuration error"""
  119. def __init__(self, context, mark, error, short_error=None):
  120. self.error = str(error)
  121. self.short_error = short_error
  122. self.key = ConfigurationErrorKey(context, mark, self.error)
  123. class LoadingErrors(object):
  124. """A configuration errors accumalator attached to a layout object
  125. """
  126. def __init__(self):
  127. self.errors = []
  128. self.error_keys = set()
  129. def addError(self, context, mark, error, short_error=None):
  130. e = ConfigurationError(context, mark, error, short_error)
  131. self.errors.append(e)
  132. self.error_keys.add(e.key)
  133. def __getitem__(self, index):
  134. return self.errors[index]
  135. def __len__(self):
  136. return len(self.errors)
  137. class NoMatchingParentError(Exception):
  138. """A job referenced a parent, but that parent had no variants which
  139. matched the current change."""
  140. pass
  141. class TemplateNotFoundError(Exception):
  142. """A project referenced a template that does not exist."""
  143. pass
  144. class RequirementsError(Exception):
  145. """A job's requirements were not met."""
  146. pass
  147. class Attributes(object):
  148. """A class to hold attributes for string formatting."""
  149. def __init__(self, **kw):
  150. setattr(self, '__dict__', kw)
  151. class Freezable(object):
  152. """A mix-in class so that an object can be made immutable"""
  153. def __init__(self):
  154. super(Freezable, self).__setattr__('_frozen', False)
  155. def freeze(self):
  156. """Make this object immutable"""
  157. def _freezelist(l):
  158. for i, v in enumerate(l):
  159. if isinstance(v, Freezable):
  160. if not v._frozen:
  161. v.freeze()
  162. elif isinstance(v, dict):
  163. l[i] = _freezedict(v)
  164. elif isinstance(v, list):
  165. l[i] = _freezelist(v)
  166. return tuple(l)
  167. def _freezedict(d):
  168. for k, v in list(d.items()):
  169. if isinstance(v, Freezable):
  170. if not v._frozen:
  171. v.freeze()
  172. elif isinstance(v, dict):
  173. d[k] = _freezedict(v)
  174. elif isinstance(v, list):
  175. d[k] = _freezelist(v)
  176. return types.MappingProxyType(d)
  177. _freezedict(self.__dict__)
  178. # Ignore return value from freezedict because __dict__ can't
  179. # be a mappingproxy.
  180. self._frozen = True
  181. def __setattr__(self, name, value):
  182. if self._frozen:
  183. raise Exception("Unable to modify frozen object %s" %
  184. (repr(self),))
  185. super(Freezable, self).__setattr__(name, value)
  186. class ConfigObject(Freezable):
  187. def __init__(self):
  188. super().__init__()
  189. self.source_context = None
  190. self.start_mark = None
  191. class Pipeline(object):
  192. """A configuration that ties together triggers, reporters and managers
  193. Trigger
  194. A description of which events should be processed
  195. Manager
  196. Responsible for enqueing and dequeing Changes
  197. Reporter
  198. Communicates success and failure results somewhere
  199. """
  200. def __init__(self, name, tenant):
  201. self.name = name
  202. # Note that pipelines are not portable across tenants (new
  203. # pipeline objects must be made when a tenant is
  204. # reconfigured). A pipeline requires a tenant in order to
  205. # reach the currently active layout for that tenant.
  206. self.tenant = tenant
  207. self.source_context = None
  208. self.start_mark = None
  209. self.description = None
  210. self.failure_message = None
  211. self.merge_failure_message = None
  212. self.success_message = None
  213. self.footer_message = None
  214. self.enqueue_message = None
  215. self.start_message = None
  216. self.post_review = False
  217. self.dequeue_on_new_patchset = True
  218. self.ignore_dependencies = False
  219. self.manager = None
  220. self.queues = []
  221. self.relative_priority_queues = {}
  222. self.precedence = PRECEDENCE_NORMAL
  223. self.triggers = []
  224. self.enqueue_actions = []
  225. self.start_actions = []
  226. self.success_actions = []
  227. self.failure_actions = []
  228. self.merge_failure_actions = []
  229. self.no_jobs_actions = []
  230. self.disabled_actions = []
  231. self.disable_at = None
  232. self._consecutive_failures = 0
  233. self._disabled = False
  234. self.window = None
  235. self.window_floor = None
  236. self.window_increase_type = None
  237. self.window_increase_factor = None
  238. self.window_decrease_type = None
  239. self.window_decrease_factor = None
  240. @property
  241. def actions(self):
  242. return (
  243. self.enqueue_actions +
  244. self.start_actions +
  245. self.success_actions +
  246. self.failure_actions +
  247. self.merge_failure_actions +
  248. self.no_jobs_actions +
  249. self.disabled_actions
  250. )
  251. def __repr__(self):
  252. return '<Pipeline %s>' % self.name
  253. def getSafeAttributes(self):
  254. return Attributes(name=self.name)
  255. def validateReferences(self, layout):
  256. # Verify that references to other objects in the layout are
  257. # valid.
  258. for pipeline in self.supercedes:
  259. if not layout.pipelines.get(pipeline):
  260. raise Exception(
  261. 'The pipeline "{this}" supercedes an unknown pipeline '
  262. '{other}.'.format(
  263. this=self.name,
  264. other=pipeline))
  265. def setManager(self, manager):
  266. self.manager = manager
  267. def addQueue(self, queue):
  268. self.queues.append(queue)
  269. def getQueue(self, project):
  270. for queue in self.queues:
  271. if project in queue.projects:
  272. return queue
  273. return None
  274. def getRelativePriorityQueue(self, project):
  275. for queue in self.relative_priority_queues.values():
  276. if project in queue:
  277. return queue
  278. return [project]
  279. def removeQueue(self, queue):
  280. if queue in self.queues:
  281. self.queues.remove(queue)
  282. def getChangesInQueue(self):
  283. changes = []
  284. for shared_queue in self.queues:
  285. changes.extend([x.change for x in shared_queue.queue])
  286. return changes
  287. def getAllItems(self):
  288. items = []
  289. for shared_queue in self.queues:
  290. items.extend(shared_queue.queue)
  291. return items
  292. def formatStatusJSON(self, websocket_url=None):
  293. j_pipeline = dict(name=self.name,
  294. description=self.description)
  295. j_queues = []
  296. j_pipeline['change_queues'] = j_queues
  297. for queue in self.queues:
  298. j_queue = dict(name=queue.name)
  299. j_queues.append(j_queue)
  300. j_queue['heads'] = []
  301. j_queue['window'] = queue.window
  302. j_changes = []
  303. for e in queue.queue:
  304. if not e.item_ahead:
  305. if j_changes:
  306. j_queue['heads'].append(j_changes)
  307. j_changes = []
  308. j_changes.append(e.formatJSON(websocket_url))
  309. if (len(j_changes) > 1 and
  310. (j_changes[-2]['remaining_time'] is not None) and
  311. (j_changes[-1]['remaining_time'] is not None)):
  312. j_changes[-1]['remaining_time'] = max(
  313. j_changes[-2]['remaining_time'],
  314. j_changes[-1]['remaining_time'])
  315. if j_changes:
  316. j_queue['heads'].append(j_changes)
  317. return j_pipeline
  318. class ChangeQueue(object):
  319. """A ChangeQueue contains Changes to be processed for related projects.
  320. A Pipeline with a DependentPipelineManager has multiple parallel
  321. ChangeQueues shared by different projects. For instance, there may a
  322. ChangeQueue shared by interrelated projects foo and bar, and a second queue
  323. for independent project baz.
  324. A Pipeline with an IndependentPipelineManager puts every Change into its
  325. own ChangeQueue.
  326. The ChangeQueue Window is inspired by TCP windows and controlls how many
  327. Changes in a given ChangeQueue will be considered active and ready to
  328. be processed. If a Change succeeds, the Window is increased by
  329. `window_increase_factor`. If a Change fails, the Window is decreased by
  330. `window_decrease_factor`.
  331. A ChangeQueue may be a dynamically created queue, which may be removed
  332. from a DependentPipelineManager once empty.
  333. """
  334. def __init__(self, pipeline, window=0, window_floor=1,
  335. window_increase_type='linear', window_increase_factor=1,
  336. window_decrease_type='exponential', window_decrease_factor=2,
  337. name=None, dynamic=False):
  338. self.pipeline = pipeline
  339. if name:
  340. self.name = name
  341. else:
  342. self.name = ''
  343. self.projects = []
  344. self._jobs = set()
  345. self.queue = []
  346. self.window = window
  347. self.window_floor = window_floor
  348. self.window_increase_type = window_increase_type
  349. self.window_increase_factor = window_increase_factor
  350. self.window_decrease_type = window_decrease_type
  351. self.window_decrease_factor = window_decrease_factor
  352. self.dynamic = dynamic
  353. def __repr__(self):
  354. return '<ChangeQueue %s: %s>' % (self.pipeline.name, self.name)
  355. def getJobs(self):
  356. return self._jobs
  357. def addProject(self, project):
  358. if project not in self.projects:
  359. self.projects.append(project)
  360. if not self.name:
  361. self.name = project.name
  362. def enqueueChange(self, change, event):
  363. item = QueueItem(self, change, event)
  364. self.enqueueItem(item)
  365. item.enqueue_time = time.time()
  366. return item
  367. def enqueueItem(self, item):
  368. item.pipeline = self.pipeline
  369. item.queue = self
  370. if self.queue:
  371. item.item_ahead = self.queue[-1]
  372. item.item_ahead.items_behind.append(item)
  373. self.queue.append(item)
  374. def dequeueItem(self, item):
  375. if item in self.queue:
  376. self.queue.remove(item)
  377. if item.item_ahead:
  378. item.item_ahead.items_behind.remove(item)
  379. for item_behind in item.items_behind:
  380. if item.item_ahead:
  381. item.item_ahead.items_behind.append(item_behind)
  382. item_behind.item_ahead = item.item_ahead
  383. item.item_ahead = None
  384. item.items_behind = []
  385. item.dequeue_time = time.time()
  386. def moveItem(self, item, item_ahead):
  387. if item.item_ahead == item_ahead:
  388. return False
  389. # Remove from current location
  390. if item.item_ahead:
  391. item.item_ahead.items_behind.remove(item)
  392. for item_behind in item.items_behind:
  393. if item.item_ahead:
  394. item.item_ahead.items_behind.append(item_behind)
  395. item_behind.item_ahead = item.item_ahead
  396. # Add to new location
  397. item.item_ahead = item_ahead
  398. item.items_behind = []
  399. if item.item_ahead:
  400. item.item_ahead.items_behind.append(item)
  401. return True
  402. def isActionable(self, item):
  403. if self.window:
  404. return item in self.queue[:self.window]
  405. else:
  406. return True
  407. def increaseWindowSize(self):
  408. if self.window:
  409. if self.window_increase_type == 'linear':
  410. self.window += self.window_increase_factor
  411. elif self.window_increase_type == 'exponential':
  412. self.window *= self.window_increase_factor
  413. def decreaseWindowSize(self):
  414. if self.window:
  415. if self.window_decrease_type == 'linear':
  416. self.window = max(
  417. self.window_floor,
  418. self.window - self.window_decrease_factor)
  419. elif self.window_decrease_type == 'exponential':
  420. self.window = max(
  421. self.window_floor,
  422. int(self.window / self.window_decrease_factor))
  423. class Project(object):
  424. """A Project represents a git repository such as openstack/nova."""
  425. # NOTE: Projects should only be instantiated via a Source object
  426. # so that they are associated with and cached by their Connection.
  427. # This makes a Project instance a unique identifier for a given
  428. # project from a given source.
  429. def __init__(self, name, source, foreign=False):
  430. self.name = name
  431. self.source = source
  432. self.connection_name = source.connection.connection_name
  433. self.canonical_hostname = source.canonical_hostname
  434. self.canonical_name = source.canonical_hostname + '/' + name
  435. # foreign projects are those referenced in dependencies
  436. # of layout projects, this should matter
  437. # when deciding whether to enqueue their changes
  438. # TODOv3 (jeblair): re-add support for foreign projects if needed
  439. self.foreign = foreign
  440. def __str__(self):
  441. return self.name
  442. def __repr__(self):
  443. return '<Project %s>' % (self.name)
  444. def getSafeAttributes(self):
  445. return Attributes(name=self.name)
  446. def toDict(self):
  447. d = {}
  448. d['name'] = self.name
  449. d['connection_name'] = self.connection_name
  450. d['canonical_name'] = self.canonical_name
  451. return d
  452. class Node(ConfigObject):
  453. """A single node for use by a job.
  454. This may represent a request for a node, or an actual node
  455. provided by Nodepool.
  456. """
  457. def __init__(self, name, label):
  458. super(Node, self).__init__()
  459. self.name = name
  460. self.label = label
  461. self.id = None
  462. self.lock = None
  463. self.hold_job = None
  464. self.comment = None
  465. # Attributes from Nodepool
  466. self._state = 'unknown'
  467. self.state_time = time.time()
  468. self.host_id = None
  469. self.interface_ip = None
  470. self.public_ipv4 = None
  471. self.private_ipv4 = None
  472. self.public_ipv6 = None
  473. self.connection_port = 22
  474. self.connection_type = None
  475. self._keys = []
  476. self.az = None
  477. self.provider = None
  478. self.region = None
  479. self.username = None
  480. self.hold_expiration = None
  481. self.resources = None
  482. @property
  483. def state(self):
  484. return self._state
  485. @state.setter
  486. def state(self, value):
  487. if value not in NODE_STATES:
  488. raise TypeError("'%s' is not a valid state" % value)
  489. self._state = value
  490. self.state_time = time.time()
  491. def __repr__(self):
  492. return '<Node %s %s:%s>' % (self.id, self.name, self.label)
  493. def __ne__(self, other):
  494. return not self.__eq__(other)
  495. def __eq__(self, other):
  496. if not isinstance(other, Node):
  497. return False
  498. return (self.name == other.name and
  499. self.label == other.label and
  500. self.id == other.id)
  501. def toDict(self, internal_attributes=False):
  502. d = {}
  503. d['state'] = self.state
  504. d['hold_job'] = self.hold_job
  505. d['comment'] = self.comment
  506. for k in self._keys:
  507. d[k] = getattr(self, k)
  508. if internal_attributes:
  509. # These attributes are only useful for the rpc serialization
  510. d['name'] = self.name[0]
  511. d['aliases'] = self.name[1:]
  512. d['label'] = self.label
  513. return d
  514. def updateFromDict(self, data):
  515. self._state = data['state']
  516. keys = []
  517. for k, v in data.items():
  518. if k == 'state':
  519. continue
  520. keys.append(k)
  521. setattr(self, k, v)
  522. self._keys = keys
  523. class Group(ConfigObject):
  524. """A logical group of nodes for use by a job.
  525. A Group is a named set of node names that will be provided to
  526. jobs in the inventory to describe logical units where some subset of tasks
  527. run.
  528. """
  529. def __init__(self, name, nodes):
  530. super(Group, self).__init__()
  531. self.name = name
  532. self.nodes = nodes
  533. def __repr__(self):
  534. return '<Group %s %s>' % (self.name, str(self.nodes))
  535. def __ne__(self, other):
  536. return not self.__eq__(other)
  537. def __eq__(self, other):
  538. if not isinstance(other, Group):
  539. return False
  540. return (self.name == other.name and
  541. self.nodes == other.nodes)
  542. def toDict(self):
  543. return {
  544. 'name': self.name,
  545. 'nodes': self.nodes
  546. }
  547. class NodeSet(ConfigObject):
  548. """A set of nodes.
  549. In configuration, NodeSets are attributes of Jobs indicating that
  550. a Job requires nodes matching this description.
  551. They may appear as top-level configuration objects and be named,
  552. or they may appears anonymously in in-line job definitions.
  553. """
  554. def __init__(self, name=None):
  555. super(NodeSet, self).__init__()
  556. self.name = name or ''
  557. self.nodes = OrderedDict()
  558. self.groups = OrderedDict()
  559. def __ne__(self, other):
  560. return not self.__eq__(other)
  561. def __eq__(self, other):
  562. if not isinstance(other, NodeSet):
  563. return False
  564. return (self.name == other.name and
  565. self.nodes == other.nodes)
  566. def toDict(self):
  567. d = {}
  568. d['name'] = self.name
  569. d['nodes'] = []
  570. for node in self.nodes.values():
  571. d['nodes'].append(node.toDict(internal_attributes=True))
  572. d['groups'] = []
  573. for group in self.groups.values():
  574. d['groups'].append(group.toDict())
  575. return d
  576. def copy(self):
  577. n = NodeSet(self.name)
  578. for name, node in self.nodes.items():
  579. n.addNode(Node(node.name, node.label))
  580. for name, group in self.groups.items():
  581. n.addGroup(Group(group.name, group.nodes[:]))
  582. return n
  583. def addNode(self, node):
  584. for name in node.name:
  585. if name in self.nodes:
  586. raise Exception("Duplicate node in %s" % (self,))
  587. self.nodes[tuple(node.name)] = node
  588. def getNodes(self):
  589. return list(self.nodes.values())
  590. def addGroup(self, group):
  591. if group.name in self.groups:
  592. raise Exception("Duplicate group in %s" % (self,))
  593. self.groups[group.name] = group
  594. def getGroups(self):
  595. return list(self.groups.values())
  596. def __repr__(self):
  597. if self.name:
  598. name = self.name + ' '
  599. else:
  600. name = ''
  601. return '<NodeSet %s%s>' % (name, list(self.nodes.values()))
  602. def __len__(self):
  603. return len(self.nodes)
  604. class NodeRequest(object):
  605. """A request for a set of nodes."""
  606. def __init__(self, requestor, build_set, job, nodeset, relative_priority,
  607. event=None):
  608. self.requestor = requestor
  609. self.build_set = build_set
  610. self.job = job
  611. self.nodeset = nodeset
  612. self._state = STATE_REQUESTED
  613. self.requested_time = time.time()
  614. self.state_time = time.time()
  615. self.created_time = None
  616. self.stat = None
  617. self.uid = uuid4().hex
  618. self.relative_priority = relative_priority
  619. self.provider = self._getPausedParentProvider()
  620. self.id = None
  621. self._zk_data = {} # Data that we read back from ZK
  622. if event is not None:
  623. self.event_id = event.zuul_event_id
  624. else:
  625. self.event_id = None
  626. # Zuul internal flags (not stored in ZK so they are not
  627. # overwritten).
  628. self.failed = False
  629. self.canceled = False
  630. def _getPausedParent(self):
  631. if self.build_set:
  632. job_graph = self.build_set.item.job_graph
  633. if job_graph:
  634. for parent in job_graph.getParentJobsRecursively(
  635. self.job.name):
  636. build = self.build_set.getBuild(parent.name)
  637. if build.paused:
  638. return build
  639. return None
  640. def _getPausedParentProvider(self):
  641. build = self._getPausedParent()
  642. if build:
  643. nodeset = self.build_set.getJobNodeSet(build.job.name)
  644. if nodeset and nodeset.nodes:
  645. return list(nodeset.nodes.values())[0].provider
  646. return None
  647. @property
  648. def priority(self):
  649. precedence_adjustment = 0
  650. if self.build_set:
  651. precedence = self.build_set.item.pipeline.precedence
  652. if self._getPausedParent():
  653. precedence_adjustment = -1
  654. else:
  655. precedence = PRECEDENCE_NORMAL
  656. initial_precedence = PRIORITY_MAP[precedence]
  657. return max(0, initial_precedence + precedence_adjustment)
  658. @property
  659. def fulfilled(self):
  660. return (self._state == STATE_FULFILLED) and not self.failed
  661. @property
  662. def state(self):
  663. return self._state
  664. @state.setter
  665. def state(self, value):
  666. if value not in REQUEST_STATES:
  667. raise TypeError("'%s' is not a valid state" % value)
  668. self._state = value
  669. self.state_time = time.time()
  670. def __repr__(self):
  671. return '<NodeRequest %s %s>' % (self.id, self.nodeset)
  672. def toDict(self):
  673. # Start with any previously read data
  674. d = self._zk_data.copy()
  675. nodes = [n.label for n in self.nodeset.getNodes()]
  676. # These are immutable once set
  677. d.setdefault('node_types', nodes)
  678. d.setdefault('requestor', self.requestor)
  679. d.setdefault('created_time', self.created_time)
  680. d.setdefault('provider', self.provider)
  681. # We might change these
  682. d['state'] = self.state
  683. d['state_time'] = self.state_time
  684. d['relative_priority'] = self.relative_priority
  685. d['event_id'] = self.event_id
  686. return d
  687. def updateFromDict(self, data):
  688. self._zk_data = data
  689. self._state = data['state']
  690. self.state_time = data['state_time']
  691. self.relative_priority = data.get('relative_priority', 0)
  692. class Secret(ConfigObject):
  693. """A collection of private data.
  694. In configuration, Secrets are collections of private data in
  695. key-value pair format. They are defined as top-level
  696. configuration objects and then referenced by Jobs.
  697. """
  698. def __init__(self, name, source_context):
  699. super(Secret, self).__init__()
  700. self.name = name
  701. self.source_context = source_context
  702. # The secret data may or may not be encrypted. This attribute
  703. # is named 'secret_data' to make it easy to search for and
  704. # spot where it is directly used.
  705. self.secret_data = {}
  706. def __ne__(self, other):
  707. return not self.__eq__(other)
  708. def __eq__(self, other):
  709. if not isinstance(other, Secret):
  710. return False
  711. return (self.name == other.name and
  712. self.source_context == other.source_context and
  713. self.secret_data == other.secret_data)
  714. def areDataEqual(self, other):
  715. return (self.secret_data == other.secret_data)
  716. def __repr__(self):
  717. return '<Secret %s>' % (self.name,)
  718. def _decrypt(self, private_key, secret_data):
  719. # recursive function to decrypt data
  720. if hasattr(secret_data, 'decrypt'):
  721. return secret_data.decrypt(private_key)
  722. if isinstance(secret_data, (dict, types.MappingProxyType)):
  723. decrypted_secret_data = {}
  724. for k, v in secret_data.items():
  725. decrypted_secret_data[k] = self._decrypt(private_key, v)
  726. return decrypted_secret_data
  727. if isinstance(secret_data, (list, tuple)):
  728. decrypted_secret_data = []
  729. for v in secret_data:
  730. decrypted_secret_data.append(self._decrypt(private_key, v))
  731. return decrypted_secret_data
  732. return secret_data
  733. def decrypt(self, private_key):
  734. """Return a copy of this secret with any encrypted data decrypted.
  735. Note that the original remains encrypted."""
  736. r = Secret(self.name, self.source_context)
  737. r.secret_data = self._decrypt(private_key, self.secret_data)
  738. return r
  739. class SecretUse(ConfigObject):
  740. """A use of a secret in a Job"""
  741. def __init__(self, name, alias):
  742. super(SecretUse, self).__init__()
  743. self.name = name
  744. self.alias = alias
  745. self.pass_to_parent = False
  746. class ProjectContext(ConfigObject):
  747. def __init__(self, project):
  748. super().__init__()
  749. self.project = project
  750. self.branch = None
  751. self.path = None
  752. def __str__(self):
  753. return self.project.name
  754. def toDict(self):
  755. return dict(
  756. project=self.project.name,
  757. )
  758. class SourceContext(ConfigObject):
  759. """A reference to the branch of a project in configuration.
  760. Jobs and playbooks reference this to keep track of where they
  761. originate."""
  762. def __init__(self, project, branch, path, trusted):
  763. super(SourceContext, self).__init__()
  764. self.project = project
  765. self.branch = branch
  766. self.path = path
  767. self.trusted = trusted
  768. self.implied_branch_matchers = None
  769. self.implied_branches = None
  770. def __str__(self):
  771. return '%s/%s@%s' % (self.project, self.path, self.branch)
  772. def __repr__(self):
  773. return '<SourceContext %s trusted:%s>' % (str(self),
  774. self.trusted)
  775. def __deepcopy__(self, memo):
  776. return self.copy()
  777. def copy(self):
  778. return self.__class__(self.project, self.branch, self.path,
  779. self.trusted)
  780. def isSameProject(self, other):
  781. if not isinstance(other, SourceContext):
  782. return False
  783. return (self.project == other.project and
  784. self.trusted == other.trusted)
  785. def __ne__(self, other):
  786. return not self.__eq__(other)
  787. def __eq__(self, other):
  788. if not isinstance(other, SourceContext):
  789. return False
  790. return (self.project == other.project and
  791. self.branch == other.branch and
  792. self.path == other.path and
  793. self.trusted == other.trusted)
  794. def toDict(self):
  795. return dict(
  796. project=self.project.name,
  797. branch=self.branch,
  798. path=self.path,
  799. )
  800. class PlaybookContext(ConfigObject):
  801. """A reference to a playbook in the context of a project.
  802. Jobs refer to objects of this class for their main, pre, and post
  803. playbooks so that we can keep track of which repos and security
  804. contexts are needed in order to run them.
  805. We also keep a list of roles so that playbooks only run with the
  806. roles which were defined at the point the playbook was defined.
  807. """
  808. def __init__(self, source_context, path, roles, secrets):
  809. super(PlaybookContext, self).__init__()
  810. self.source_context = source_context
  811. self.path = path
  812. self.roles = roles
  813. self.secrets = secrets
  814. self.decrypted_secrets = ()
  815. def __repr__(self):
  816. return '<PlaybookContext %s %s>' % (self.source_context,
  817. self.path)
  818. def __ne__(self, other):
  819. return not self.__eq__(other)
  820. def __eq__(self, other):
  821. if not isinstance(other, PlaybookContext):
  822. return False
  823. return (self.source_context == other.source_context and
  824. self.path == other.path and
  825. self.roles == other.roles and
  826. self.secrets == other.secrets)
  827. def copy(self):
  828. r = PlaybookContext(self.source_context,
  829. self.path,
  830. self.roles,
  831. self.secrets)
  832. return r
  833. def validateReferences(self, layout):
  834. # Verify that references to other objects in the layout are
  835. # valid.
  836. for secret_use in self.secrets:
  837. secret = layout.secrets.get(secret_use.name)
  838. if secret is None:
  839. raise Exception(
  840. 'The secret "{name}" was not found.'.format(
  841. name=secret_use.name))
  842. if secret_use.alias == 'zuul' or secret_use.alias == 'nodepool':
  843. raise Exception('Secrets named "zuul" or "nodepool" '
  844. 'are not allowed.')
  845. if not secret.source_context.isSameProject(self.source_context):
  846. raise Exception(
  847. "Unable to use secret {name}. Secrets must be "
  848. "defined in the same project in which they "
  849. "are used".format(
  850. name=secret_use.name))
  851. # Decrypt a copy of the secret to verify it can be done
  852. secret.decrypt(self.source_context.project.private_secrets_key)
  853. def freezeSecrets(self, layout):
  854. secrets = []
  855. for secret_use in self.secrets:
  856. secret = layout.secrets.get(secret_use.name)
  857. decrypted_secret = secret.decrypt(
  858. self.source_context.project.private_secrets_key)
  859. decrypted_secret.name = secret_use.alias
  860. secrets.append(decrypted_secret)
  861. self.decrypted_secrets = tuple(secrets)
  862. def addSecrets(self, decrypted_secrets):
  863. current_names = set([s.name for s in self.decrypted_secrets])
  864. new_secrets = [s for s in decrypted_secrets
  865. if s.name not in current_names]
  866. self.decrypted_secrets = self.decrypted_secrets + tuple(new_secrets)
  867. def toDict(self):
  868. # Render to a dict to use in passing json to the executor
  869. secrets = {}
  870. for secret in self.decrypted_secrets:
  871. secrets[secret.name] = secret.secret_data
  872. return dict(
  873. connection=self.source_context.project.connection_name,
  874. project=self.source_context.project.name,
  875. branch=self.source_context.branch,
  876. trusted=self.source_context.trusted,
  877. roles=[r.toDict() for r in self.roles],
  878. secrets=secrets,
  879. path=self.path)
  880. def toSchemaDict(self):
  881. # Render to a dict to use in REST api
  882. d = {
  883. 'path': self.path,
  884. 'roles': list(map(lambda x: x.toDict(), self.roles)),
  885. 'secrets': [{'name': secret.name, 'alias': secret.alias}
  886. for secret in self.secrets],
  887. }
  888. if self.source_context:
  889. d['source_context'] = self.source_context.toDict()
  890. else:
  891. d['source_context'] = None
  892. return d
  893. class Role(ConfigObject, metaclass=abc.ABCMeta):
  894. """A reference to an ansible role."""
  895. def __init__(self, target_name):
  896. super(Role, self).__init__()
  897. self.target_name = target_name
  898. @abc.abstractmethod
  899. def __repr__(self):
  900. pass
  901. def __ne__(self, other):
  902. return not self.__eq__(other)
  903. @abc.abstractmethod
  904. def __eq__(self, other):
  905. if not isinstance(other, Role):
  906. return False
  907. return (self.target_name == other.target_name)
  908. @abc.abstractmethod
  909. def toDict(self):
  910. # Render to a dict to use in passing json to the executor
  911. return dict(target_name=self.target_name)
  912. class ZuulRole(Role):
  913. """A reference to an ansible role in a Zuul project."""
  914. def __init__(self, target_name, project_canonical_name, implicit=False):
  915. super(ZuulRole, self).__init__(target_name)
  916. self.project_canonical_name = project_canonical_name
  917. self.implicit = implicit
  918. def __repr__(self):
  919. return '<ZuulRole %s %s>' % (self.project_canonical_name,
  920. self.target_name)
  921. __hash__ = object.__hash__
  922. def __eq__(self, other):
  923. if not isinstance(other, ZuulRole):
  924. return False
  925. # Implicit is not consulted for equality so that we can handle
  926. # implicit to explicit conversions.
  927. return (super(ZuulRole, self).__eq__(other) and
  928. self.project_canonical_name == other.project_canonical_name)
  929. def toDict(self):
  930. # Render to a dict to use in passing json to the executor
  931. d = super(ZuulRole, self).toDict()
  932. d['type'] = 'zuul'
  933. d['project_canonical_name'] = self.project_canonical_name
  934. d['implicit'] = self.implicit
  935. return d
  936. class Job(ConfigObject):
  937. """A Job represents the defintion of actions to perform.
  938. A Job is an abstract configuration concept. It describes what,
  939. where, and under what circumstances something should be run
  940. (contrast this with Build which is a concrete single execution of
  941. a Job).
  942. NB: Do not modify attributes of this class, set them directly
  943. (e.g., "job.run = ..." rather than "job.run.append(...)").
  944. """
  945. BASE_JOB_MARKER = object()
  946. def __init__(self, name):
  947. super(Job, self).__init__()
  948. # These attributes may override even the final form of a job
  949. # in the context of a project-pipeline. They can not affect
  950. # the execution of the job, but only whether the job is run
  951. # and how it is reported.
  952. self.context_attributes = dict(
  953. voting=True,
  954. hold_following_changes=False,
  955. failure_message=None,
  956. success_message=None,
  957. failure_url=None,
  958. success_url=None,
  959. branch_matcher=None,
  960. file_matcher=None,
  961. irrelevant_file_matcher=None, # skip-if
  962. match_on_config_updates=True,
  963. tags=frozenset(),
  964. provides=frozenset(),
  965. requires=frozenset(),
  966. dependencies=frozenset(),
  967. ignore_allowed_projects=None, # internal, but inherited
  968. # in the usual manner
  969. )
  970. # These attributes affect how the job is actually run and more
  971. # care must be taken when overriding them. If a job is
  972. # declared "final", these may not be overridden in a
  973. # project-pipeline.
  974. self.execution_attributes = dict(
  975. parent=None,
  976. timeout=None,
  977. post_timeout=None,
  978. variables={},
  979. extra_variables={},
  980. host_variables={},
  981. group_variables={},
  982. nodeset=NodeSet(),
  983. workspace=None,
  984. pre_run=(),
  985. post_run=(),
  986. cleanup_run=(),
  987. run=(),
  988. ansible_version=None,
  989. semaphore=None,
  990. attempts=3,
  991. final=False,
  992. abstract=False,
  993. protected=None,
  994. roles=(),
  995. required_projects={},
  996. allowed_projects=None,
  997. override_branch=None,
  998. override_checkout=None,
  999. post_review=None,
  1000. )
  1001. # These are generally internal attributes which are not
  1002. # accessible via configuration.
  1003. self.other_attributes = dict(
  1004. name=None,
  1005. source_context=None,
  1006. start_mark=None,
  1007. inheritance_path=(),
  1008. parent_data=None,
  1009. artifact_data=None,
  1010. description=None,
  1011. variant_description=None,
  1012. protected_origin=None,
  1013. _branches=(),
  1014. _implied_branch=None,
  1015. _files=(),
  1016. _irrelevant_files=(),
  1017. secrets=(), # secrets aren't inheritable
  1018. queued=False,
  1019. )
  1020. self.inheritable_attributes = {}
  1021. self.inheritable_attributes.update(self.context_attributes)
  1022. self.inheritable_attributes.update(self.execution_attributes)
  1023. self.attributes = {}
  1024. self.attributes.update(self.inheritable_attributes)
  1025. self.attributes.update(self.other_attributes)
  1026. self.name = name
  1027. def toDict(self, tenant):
  1028. '''
  1029. Convert a Job object's attributes to a dictionary.
  1030. '''
  1031. d = {}
  1032. d['name'] = self.name
  1033. d['branches'] = self._branches
  1034. d['files'] = self._files
  1035. d['irrelevant_files'] = self._irrelevant_files
  1036. d['variant_description'] = self.variant_description
  1037. d['implied_branch'] = self._implied_branch
  1038. if self.source_context:
  1039. d['source_context'] = self.source_context.toDict()
  1040. else:
  1041. d['source_context'] = None
  1042. d['description'] = self.description
  1043. d['required_projects'] = []
  1044. for project in self.required_projects.values():
  1045. d['required_projects'].append(project.toDict())
  1046. if self.semaphore:
  1047. # For now just leave the semaphore name here until we really need
  1048. # more information in zuul-web about this
  1049. d['semaphore'] = self.semaphore.name
  1050. else:
  1051. d['semaphore'] = None
  1052. d['variables'] = self.variables
  1053. d['final'] = self.final
  1054. d['abstract'] = self.abstract
  1055. d['protected'] = self.protected
  1056. d['voting'] = self.voting
  1057. d['timeout'] = self.timeout
  1058. d['tags'] = list(self.tags)
  1059. d['provides'] = list(self.provides)
  1060. d['requires'] = list(self.requires)
  1061. d['dependencies'] = list(map(lambda x: x.toDict(), self.dependencies))
  1062. d['attempts'] = self.attempts
  1063. d['roles'] = list(map(lambda x: x.toDict(), self.roles))
  1064. d['run'] = list(map(lambda x: x.toSchemaDict(), self.run))
  1065. d['pre_run'] = list(map(lambda x: x.toSchemaDict(), self.pre_run))
  1066. d['post_run'] = list(map(lambda x: x.toSchemaDict(), self.post_run))
  1067. d['cleanup_run'] = list(map(lambda x: x.toSchemaDict(),
  1068. self.cleanup_run))
  1069. d['post_review'] = self.post_review
  1070. d['match_on_config_updates'] = self.match_on_config_updates
  1071. if self.isBase():
  1072. d['parent'] = None
  1073. elif self.parent:
  1074. d['parent'] = self.parent
  1075. else:
  1076. d['parent'] = tenant.default_base_job
  1077. if isinstance(self.nodeset, str):
  1078. ns = tenant.layout.nodesets.get(self.nodeset)
  1079. else:
  1080. ns = self.nodeset
  1081. if ns:
  1082. d['nodeset'] = ns.toDict()
  1083. if self.ansible_version:
  1084. d['ansible_version'] = self.ansible_version
  1085. else:
  1086. d['ansible_version'] = None
  1087. return d
  1088. def __ne__(self, other):
  1089. return not self.__eq__(other)
  1090. def __eq__(self, other):
  1091. # Compare the name and all inheritable attributes to determine
  1092. # whether two jobs with the same name are identically
  1093. # configured. Useful upon reconfiguration.
  1094. if not isinstance(other, Job):
  1095. return False
  1096. if self.name != other.name:
  1097. return False
  1098. for k, v in self.attributes.items():
  1099. if getattr(self, k) != getattr(other, k):
  1100. return False
  1101. return True
  1102. __hash__ = object.__hash__
  1103. def __str__(self):
  1104. return self.name
  1105. def __repr__(self):
  1106. ln = 0
  1107. if self.start_mark:
  1108. ln = self.start_mark.line + 1
  1109. return '<Job %s branches: %s source: %s#%s>' % (
  1110. self.name,
  1111. self.branch_matcher,
  1112. self.source_context,
  1113. ln)
  1114. def __getattr__(self, name):
  1115. v = self.__dict__.get(name)
  1116. if v is None:
  1117. return self.attributes[name]
  1118. return v
  1119. def _get(self, name):
  1120. return self.__dict__.get(name)
  1121. def getSafeAttributes(self):
  1122. return Attributes(name=self.name)
  1123. def isBase(self):
  1124. return self.parent is self.BASE_JOB_MARKER
  1125. def setBase(self, layout):
  1126. self.inheritance_path = self.inheritance_path + (repr(self),)
  1127. if self._get('run') is not None:
  1128. self.run = self.freezePlaybooks(self.run, layout)
  1129. if self._get('pre_run') is not None:
  1130. self.pre_run = self.freezePlaybooks(self.pre_run, layout)
  1131. if self._get('post_run') is not None:
  1132. self.post_run = self.freezePlaybooks(self.post_run, layout)
  1133. if self._get('cleanup_run') is not None:
  1134. self.cleanup_run = self.freezePlaybooks(self.cleanup_run, layout)
  1135. def getNodeSet(self, layout):
  1136. if isinstance(self.nodeset, str):
  1137. # This references an existing named nodeset in the layout.
  1138. ns = layout.nodesets.get(self.nodeset)
  1139. if ns is None:
  1140. raise Exception(
  1141. 'The nodeset "{nodeset}" was not found.'.format(
  1142. nodeset=self.nodeset))
  1143. return ns
  1144. return self.nodeset
  1145. def validateReferences(self, layout):
  1146. # Verify that references to other objects in the layout are
  1147. # valid.
  1148. if not self.isBase() and self.parent:
  1149. layout.getJob(self.parent)
  1150. ns = self.getNodeSet(layout)
  1151. if layout.tenant.max_nodes_per_job != -1 and \
  1152. len(ns) > layout.tenant.max_nodes_per_job:
  1153. raise Exception(
  1154. 'The job "{job}" exceeds tenant '
  1155. 'max-nodes-per-job {maxnodes}.'.format(
  1156. job=self.name,
  1157. maxnodes=layout.tenant.max_nodes_per_job))
  1158. for pb in self.pre_run + self.run + self.post_run + self.cleanup_run:
  1159. pb.validateReferences(layout)
  1160. def addRoles(self, roles):
  1161. newroles = []
  1162. # Start with a copy of the existing roles, but if any of them
  1163. # are implicit roles which are identified as explicit in the
  1164. # new roles list, replace them with the explicit version.
  1165. changed = False
  1166. for existing_role in self.roles:
  1167. if existing_role in roles:
  1168. new_role = roles[roles.index(existing_role)]
  1169. else:
  1170. new_role = None
  1171. if (new_role and
  1172. isinstance(new_role, ZuulRole) and
  1173. isinstance(existing_role, ZuulRole) and
  1174. existing_role.implicit and not new_role.implicit):
  1175. newroles.append(new_role)
  1176. changed = True
  1177. else:
  1178. newroles.append(existing_role)
  1179. # Now add the new roles.
  1180. for role in reversed(roles):
  1181. if role not in newroles:
  1182. newroles.insert(0, role)
  1183. changed = True
  1184. if changed:
  1185. self.roles = tuple(newroles)
  1186. def getBranches(self):
  1187. # Return the raw branch list that match this job
  1188. return self._branches
  1189. def setBranchMatcher(self, branches):
  1190. # Set the branch matcher to match any of the supplied branches
  1191. self._branches = branches
  1192. matchers = []
  1193. for branch in branches:
  1194. matchers.append(change_matcher.BranchMatcher(branch))
  1195. self.branch_matcher = change_matcher.MatchAny(matchers)
  1196. def setFileMatcher(self, files):
  1197. # Set the file matcher to match any of the change files
  1198. self._files = files
  1199. matchers = []
  1200. for fn in files:
  1201. matchers.append(change_matcher.FileMatcher(fn))
  1202. self.file_matcher = change_matcher.MatchAnyFiles(matchers)
  1203. def setIrrelevantFileMatcher(self, irrelevant_files):
  1204. # Set the irrelevant file matcher to match any of the change files
  1205. self._irrelevant_files = irrelevant_files
  1206. matchers = []
  1207. for fn in irrelevant_files:
  1208. matchers.append(change_matcher.FileMatcher(fn))
  1209. self.irrelevant_file_matcher = change_matcher.MatchAllFiles(matchers)
  1210. def updateVariables(self, other_vars, other_extra_vars, other_host_vars,
  1211. other_group_vars):
  1212. if other_vars is not None:
  1213. self.variables = Job._deepUpdate(self.variables, other_vars)
  1214. if other_extra_vars is not None:
  1215. self.extra_variables = Job._deepUpdate(
  1216. self.extra_variables, other_extra_vars)
  1217. if other_host_vars is not None:
  1218. self.host_variables = Job._deepUpdate(
  1219. self.host_variables, other_host_vars)
  1220. if other_group_vars is not None:
  1221. self.group_variables = Job._deepUpdate(
  1222. self.group_variables, other_group_vars)
  1223. def updateParentData(self, other_build):
  1224. # Update variables, but give the current values priority (used
  1225. # for job return data which is lower precedence than defined
  1226. # job vars).
  1227. other_vars = other_build.result_data
  1228. v = self.parent_data or {}
  1229. v = Job._deepUpdate(v, other_vars)
  1230. # To avoid running afoul of checks that jobs don't set zuul
  1231. # variables, remove them from parent data here.
  1232. if 'zuul' in v:
  1233. del v['zuul']
  1234. self.parent_data = v
  1235. self.variables = Job._deepUpdate(self.parent_data, self.variables)
  1236. artifact_data = self.artifact_data or []
  1237. artifacts = get_artifacts_from_result_data(other_vars)
  1238. for a in artifacts:
  1239. # Change here may be any ref type (tag, change, etc)
  1240. ref = other_build.build_set.item.change
  1241. a.update({'project': ref.project.name,
  1242. 'job': other_build.job.name})
  1243. # Change is a Branch
  1244. if hasattr(ref, 'branch'):
  1245. a.update({'branch': ref.branch})
  1246. if hasattr(ref, 'number') and hasattr(ref, 'patchset'):
  1247. a.update({'change': str(ref.number),
  1248. 'patchset': ref.patchset})
  1249. # Otherwise we are ref type
  1250. else:
  1251. a.update({'ref': ref.ref,
  1252. 'oldrev': ref.oldrev,
  1253. 'newrev': ref.newrev})
  1254. if hasattr(ref, 'tag'):
  1255. a.update({'tag': ref.tag})
  1256. if a not in artifact_data:
  1257. artifact_data.append(a)
  1258. if artifact_data:
  1259. self.updateArtifactData(artifact_data)
  1260. def updateArtifactData(self, artifact_data):
  1261. self.artifact_data = artifact_data
  1262. def updateProjectVariables(self, project_vars):
  1263. # Merge project/template variables directly into the job
  1264. # variables. Job variables override project variables.
  1265. self.variables = Job._deepUpdate(project_vars, self.variables)
  1266. def updateProjects(self, other_projects):
  1267. required_projects = self.required_projects.copy()
  1268. required_projects.update(other_projects)
  1269. self.required_projects = required_projects
  1270. @staticmethod
  1271. def _deepUpdate(a, b):
  1272. # Merge nested dictionaries if possible, otherwise, overwrite
  1273. # the value in 'a' with the value in 'b'.
  1274. ret = {}
  1275. for k, av in a.items():
  1276. if k not in b:
  1277. ret[k] = av
  1278. for k, bv in b.items():
  1279. av = a.get(k)
  1280. if (isinstance(av, (dict, types.MappingProxyType)) and
  1281. isinstance(bv, (dict, types.MappingProxyType))):
  1282. ret[k] = Job._deepUpdate(av, bv)
  1283. else:
  1284. ret[k] = bv
  1285. return ret
  1286. def copy(self):
  1287. job = Job(self.name)
  1288. for k in self.attributes:
  1289. v = self._get(k)
  1290. if v is not None:
  1291. # If this is a config object, it's frozen, so it's
  1292. # safe to shallow copy.
  1293. setattr(job, k, v)
  1294. return job
  1295. def freezePlaybooks(self, pblist, layout):
  1296. """Take a list of playbooks, and return a copy of it updated with this
  1297. job's roles.
  1298. """
  1299. ret = []
  1300. for old_pb in pblist:
  1301. pb = old_pb.copy()
  1302. pb.roles = self.roles
  1303. pb.freezeSecrets(layout)
  1304. ret.append(pb)
  1305. return tuple(ret)
  1306. def applyVariant(self, other, layout):
  1307. """Copy the attributes which have been set on the other job to this
  1308. job."""
  1309. if not isinstance(other, Job):
  1310. raise Exception("Job unable to inherit from %s" % (other,))
  1311. for k in self.execution_attributes:
  1312. if (other._get(k) is not None and
  1313. k not in set(['final', 'abstract', 'protected'])):
  1314. if self.final:
  1315. raise Exception("Unable to modify final job %s attribute "
  1316. "%s=%s with variant %s" % (
  1317. repr(self), k, other._get(k),
  1318. repr(other)))
  1319. if self.protected_origin:
  1320. # this is a protected job, check origin of job definition
  1321. this_origin = self.protected_origin
  1322. other_origin = other.source_context.project.canonical_name
  1323. if this_origin != other_origin:
  1324. raise Exception("Job %s which is defined in %s is "
  1325. "protected and cannot be inherited "
  1326. "from other projects."
  1327. % (repr(self), this_origin))
  1328. if k not in set(['pre_run', 'run', 'post_run', 'cleanup_run',
  1329. 'roles', 'variables', 'extra_variables',
  1330. 'host_variables', 'group_variables',
  1331. 'required_projects', 'allowed_projects']):
  1332. setattr(self, k, other._get(k))
  1333. # Don't set final above so that we don't trip an error halfway
  1334. # through assignment.
  1335. if other.final != self.attributes['final']:
  1336. self.final = other.final
  1337. # Abstract may not be reset by a variant, it may only be
  1338. # cleared by inheriting.
  1339. if other.name != self.name:
  1340. self.abstract = other.abstract
  1341. elif other.abstract:
  1342. self.abstract = True
  1343. # Protected may only be set to true
  1344. if other.protected is not None:
  1345. # don't allow to reset protected flag
  1346. if not other.protected and self.protected_origin:
  1347. raise Exception("Unable to reset protected attribute of job"
  1348. " %s by job %s" % (
  1349. repr(self), repr(other)))
  1350. if not self.protected_origin:
  1351. self.protected_origin = \
  1352. other.source_context.project.canonical_name
  1353. # We must update roles before any playbook contexts
  1354. if other._get('roles') is not None:
  1355. self.addRoles(other.roles)
  1356. # Freeze the nodeset
  1357. self.nodeset = self.getNodeSet(layout)
  1358. # Pass secrets to parents
  1359. secrets_for_parents = [s for s in other.secrets if s.pass_to_parent]
  1360. if secrets_for_parents:
  1361. decrypted_secrets = []
  1362. for secret_use in secrets_for_parents:
  1363. secret = layout.secrets.get(secret_use.name)
  1364. decrypted_secret = secret.decrypt(
  1365. other.source_context.project.private_secrets_key)
  1366. decrypted_secret.name = secret_use.alias
  1367. decrypted_secrets.append(decrypted_secret)
  1368. # Add the secrets to any existing playbooks. If any of
  1369. # them are in an untrusted project, then we've just given
  1370. # a secret to a playbook which can run in dynamic config,
  1371. # therefore it's no longer safe to run this job
  1372. # pre-review. The only way pass-to-parent can work with
  1373. # pre-review pipeline is if all playbooks are in the
  1374. # trusted context.
  1375. for pb in itertools.chain(
  1376. self.pre_run, self.run, self.post_run, self.cleanup_run):
  1377. pb.addSecrets(decrypted_secrets)
  1378. if not pb.source_context.trusted:
  1379. self.post_review = True
  1380. if other._get('run') is not None:
  1381. other_run = self.freezePlaybooks(other.run, layout)
  1382. self.run = other_run
  1383. if other._get('pre_run') is not None:
  1384. other_pre_run = self.freezePlaybooks(other.pre_run, layout)
  1385. self.pre_run = self.pre_run + other_pre_run
  1386. if other._get('post_run') is not None:
  1387. other_post_run = self.freezePlaybooks(other.post_run, layout)
  1388. self.post_run = other_post_run + self.post_run
  1389. if other._get('cleanup_run') is not None:
  1390. other_cleanup_run = self.freezePlaybooks(other.cleanup_run, layout)
  1391. self.cleanup_run = other_cleanup_run + self.cleanup_run
  1392. self.updateVariables(other.variables, other.extra_variables,
  1393. other.host_variables, other.group_variables)
  1394. if other._get('required_projects') is not None:
  1395. self.updateProjects(other.required_projects)
  1396. if (other._get('allowed_projects') is not None and
  1397. self._get('allowed_projects') is not None):
  1398. self.allowed_projects = frozenset(
  1399. self.allowed_projects.intersection(
  1400. other.allowed_projects))
  1401. elif other._get('allowed_projects') is not None:
  1402. self.allowed_projects = other.allowed_projects
  1403. for k in self.context_attributes:
  1404. if (other._get(k) is not None and
  1405. k not in set(['tags', 'requires', 'provides'])):
  1406. setattr(self, k, other._get(k))
  1407. for k in ('tags', 'requires', 'provides'):
  1408. if other._get(k) is not None:
  1409. setattr(self, k, getattr(self, k).union(other._get(k)))
  1410. self.inheritance_path = self.inheritance_path + (repr(other),)
  1411. def changeMatchesBranch(self, change, override_branch=None):
  1412. if override_branch is None:
  1413. branch_change = change
  1414. else:
  1415. # If an override branch is supplied, create a very basic
  1416. # change (a Ref) and set its branch to the override
  1417. # branch.
  1418. branch_change = Ref(change.project)
  1419. branch_change.ref = override_branch
  1420. if self.branch_matcher and not self.branch_matcher.matches(
  1421. branch_change):
  1422. return False
  1423. return True
  1424. def changeMatchesFiles(self, change):
  1425. if self.file_matcher and not self.file_matcher.matches(change):
  1426. return False
  1427. # NB: This is a negative match.
  1428. if (self.irrelevant_file_matcher and
  1429. self.irrelevant_file_matcher.matches(change)):
  1430. return False
  1431. return True
  1432. class JobProject(ConfigObject):
  1433. """ A reference to a project from a job. """
  1434. def __init__(self, project_name, override_branch=None,
  1435. override_checkout=None):
  1436. super(JobProject, self).__init__()
  1437. self.project_name = project_name
  1438. self.override_branch = override_branch
  1439. self.override_checkout = override_checkout
  1440. def toDict(self):
  1441. d = dict()
  1442. d['project_name'] = self.project_name
  1443. d['override_branch'] = self.override_branch
  1444. d['override_checkout'] = self.override_checkout
  1445. return d
  1446. class JobSemaphore(ConfigObject):
  1447. """ A reference to a semaphore from a job. """
  1448. def __init__(self, semaphore_name, resources_first=False):
  1449. super().__init__()
  1450. self.name = semaphore_name
  1451. self.resources_first = resources_first
  1452. def toDict(self):
  1453. d = dict()
  1454. d['name'] = self.name
  1455. d['resources_first'] = self.resources_first
  1456. return d
  1457. class JobList(ConfigObject):
  1458. """ A list of jobs in a project's pipeline. """
  1459. def __init__(self):
  1460. super(JobList, self).__init__()
  1461. self.jobs = OrderedDict() # job.name -> [job, ...]
  1462. def addJob(self, job):
  1463. if job.name in self.jobs:
  1464. self.jobs[job.name].append(job)
  1465. else:
  1466. self.jobs[job.name] = [job]
  1467. def inheritFrom(self, other):
  1468. for jobname, jobs in other.jobs.items():
  1469. joblist = self.jobs.setdefault(jobname, [])
  1470. for job in jobs:
  1471. if job not in joblist:
  1472. joblist.append(job)
  1473. class JobDependency(ConfigObject):
  1474. """ A reference to another job in the project-pipeline-config. """
  1475. def __init__(self, name, soft=False):
  1476. super(JobDependency, self).__init__()
  1477. self.name = name
  1478. self.soft = soft
  1479. def toDict(self):
  1480. return {'name': self.name,
  1481. 'soft': self.soft}
  1482. class JobGraph(object):
  1483. """ A JobGraph represents the dependency graph between Job."""
  1484. def __init__(self):
  1485. self.jobs = OrderedDict() # job_name -> Job
  1486. # dependent_job_name -> dict(parent_job_name -> soft)
  1487. self._dependencies = {}
  1488. def __repr__(self):
  1489. return '<JobGraph %s>' % (self.jobs)
  1490. def addJob(self, job):
  1491. # A graph must be created after the job list is frozen,
  1492. # therefore we should only get one job with the same name.
  1493. if job.name in self.jobs:
  1494. raise Exception("Job %s already added" % (job.name,))
  1495. self.jobs[job.name] = job
  1496. # Append the dependency information
  1497. self._dependencies.setdefault(job.name, {})
  1498. try:
  1499. for dependency in job.dependencies:
  1500. # Make sure a circular dependency is never created
  1501. ancestor_jobs = self._getParentJobNamesRecursively(
  1502. dependency.name, soft=True)
  1503. ancestor_jobs.add(dependency.name)
  1504. if any((job.name == anc_job) for anc_job in ancestor_jobs):
  1505. raise Exception("Dependency cycle detected in job %s" %
  1506. (job.name,))
  1507. self._dependencies[job.name][dependency.name] = \
  1508. dependency.soft
  1509. except Exception:
  1510. del self.jobs[job.name]
  1511. del self._dependencies[job.name]
  1512. raise
  1513. def getJobs(self):
  1514. return list(self.jobs.values()) # Report in the order of layout cfg
  1515. def getDirectDependentJobs(self, parent_job, skip_soft=False):
  1516. ret = set()
  1517. for dependent_name, parents in self._dependencies.items():
  1518. part = parent_job in parents \
  1519. and (not skip_soft or not parents[parent_job])
  1520. if part:
  1521. ret.add(dependent_name)
  1522. return ret
  1523. def getDependentJobsRecursively(self, parent_job, skip_soft=False):
  1524. all_dependent_jobs = set()
  1525. jobs_to_iterate = set([parent_job])
  1526. while len(jobs_to_iterate) > 0:
  1527. current_job = jobs_to_iterate.pop()
  1528. current_dependent_jobs = self.getDirectDependentJobs(current_job,
  1529. skip_soft)
  1530. new_dependent_jobs = current_dependent_jobs - all_dependent_jobs
  1531. jobs_to_iterate |= new_dependent_jobs
  1532. all_dependent_jobs |= new_dependent_jobs
  1533. return [self.jobs[name] for name in all_dependent_jobs]
  1534. def getParentJobsRecursively(self, dependent_job, layout=None,
  1535. skip_soft=False):
  1536. return [self.jobs[name] for name in
  1537. self._getParentJobNamesRecursively(dependent_job,
  1538. layout=layout,
  1539. skip_soft=skip_soft)]
  1540. def _getParentJobNamesRecursively(self, dependent_job, soft=False,
  1541. layout=None, skip_soft=False):
  1542. all_parent_jobs = set()
  1543. jobs_to_iterate = set([(dependent_job, False)])
  1544. while len(jobs_to_iterate) > 0:
  1545. (current_job, current_soft) = jobs_to_iterate.pop()
  1546. current_parent_jobs = self._dependencies.get(current_job)
  1547. if skip_soft:
  1548. hard_parent_jobs = \
  1549. {d: s for d, s in current_parent_jobs.items() if not s}
  1550. current_parent_jobs = hard_parent_jobs
  1551. if current_parent_jobs is None:
  1552. if soft or current_soft:
  1553. if layout:
  1554. # If the caller supplied a layout, verify that
  1555. # the job exists to provide a helpful error
  1556. # message. Called for exception side effect:
  1557. layout.getJob(current_job)
  1558. current_parent_jobs = {}
  1559. else:
  1560. raise Exception("Job %s depends on %s which was not run." %
  1561. (dependent_job, current_job))
  1562. elif dependent_job != current_job:
  1563. all_parent_jobs.add(current_job)
  1564. new_parent_jobs = set(current_parent_jobs.keys()) - all_parent_jobs
  1565. for j in new_parent_jobs:
  1566. jobs_to_iterate.add((j, current_parent_jobs[j]))
  1567. return all_parent_jobs
  1568. class Build(object):
  1569. """A Build is an instance of a single execution of a Job.
  1570. While a Job describes what to run, a Build describes an actual
  1571. execution of that Job. Each build is associated with exactly one
  1572. Job (related builds are grouped together in a BuildSet).
  1573. """
  1574. def __init__(self, job, uuid, zuul_event_id=None):
  1575. self.job = job
  1576. self.uuid = uuid
  1577. self.url = None
  1578. self.result = None
  1579. self.result_data = {}
  1580. self.error_detail = None
  1581. self.build_set = None
  1582. self.execute_time = time.time()
  1583. self.start_time = None
  1584. self.end_time = None
  1585. self.estimated_time = None
  1586. self.canceled = False
  1587. self.paused = False
  1588. self.retry = False
  1589. self.parameters = {}
  1590. self.worker = Worker()
  1591. self.node_labels = []
  1592. self.node_name = None
  1593. self.nodeset = None
  1594. self.zuul_event_id = zuul_event_id
  1595. def __repr__(self):
  1596. return ('<Build %s of %s voting:%s on %s>' %
  1597. (self.uuid, self.job.name, self.job.voting, self.worker))
  1598. @property
  1599. def failed(self):
  1600. if self.result and self.result not in ['SUCCESS', 'SKIPPED']:
  1601. return True
  1602. return False
  1603. @property
  1604. def pipeline(self):
  1605. return self.build_set.item.pipeline
  1606. def getSafeAttributes(self):
  1607. return Attributes(uuid=self.uuid,
  1608. result=self.result,
  1609. error_detail=self.error_detail,
  1610. result_data=self.result_data)
  1611. class Worker(object):
  1612. """Information about the specific worker executing a Build."""
  1613. def __init__(self):
  1614. self.name = "Unknown"
  1615. self.hostname = None
  1616. self.log_port = None
  1617. def updateFromData(self, data):
  1618. """Update worker information if contained in the WORK_DATA response."""
  1619. self.name = data.get('worker_name', self.name)
  1620. self.hostname = data.get('worker_hostname', self.hostname)
  1621. self.log_port = data.get('worker_log_port', self.log_port)
  1622. def __repr__(self):
  1623. return '<Worker %s>' % self.name
  1624. class RepoFiles(object):
  1625. """RepoFiles holds config-file content for per-project job config.
  1626. When Zuul asks a merger to prepare a future multiple-repo state
  1627. and collect Zuul configuration files so that we can dynamically
  1628. load our configuration, this class provides cached access to that
  1629. data for use by the Change which updated the config files and any
  1630. changes that follow it in a ChangeQueue.
  1631. It is attached to a BuildSet since the content of Zuul
  1632. configuration files can change with each new BuildSet.
  1633. """
  1634. def __init__(self):
  1635. self.connections = {}
  1636. def __repr__(self):
  1637. return '<RepoFiles %s>' % self.connections
  1638. def setFiles(self, items):
  1639. self.hostnames = {}
  1640. for item in items:
  1641. connection = self.connections.setdefault(
  1642. item['connection'], {})
  1643. project = connection.setdefault(item['project'], {})
  1644. branch = project.setdefault(item['branch'], {})
  1645. branch.update(item['files'])
  1646. def getFile(self, connection_name, project_name, branch, fn):
  1647. host = self.connections.get(connection_name, {})
  1648. return host.get(project_name, {}).get(branch, {}).get(fn)
  1649. class BuildSet(object):
  1650. """A collection of Builds for one specific potential future repository
  1651. state.
  1652. When Zuul executes Builds for a change, it creates a Build to
  1653. represent each execution of each job and a BuildSet to keep track
  1654. of all the Builds running for that Change. When Zuul re-executes
  1655. Builds for a Change with a different configuration, all of the
  1656. running Builds in the BuildSet for that change are aborted, and a
  1657. new BuildSet is created to hold the Builds for the Jobs being
  1658. run with the new configuration.
  1659. A BuildSet also holds the UUID used to produce the Zuul Ref that
  1660. builders check out.
  1661. """
  1662. # Merge states:
  1663. NEW = 1
  1664. PENDING = 2
  1665. COMPLETE = 3
  1666. states_map = {
  1667. 1: 'NEW',
  1668. 2: 'PENDING',
  1669. 3: 'COMPLETE',
  1670. }
  1671. def __init__(self, item):
  1672. self.item = item
  1673. self.builds = {}
  1674. self.result = None
  1675. self.uuid = None
  1676. self.commit = None
  1677. self.dependent_changes = None
  1678. self.merger_items = None
  1679. self.unable_to_merge = False
  1680. self.config_errors = [] # list of ConfigurationErrors
  1681. self.failing_reasons = []
  1682. self.debug_messages = []
  1683. self.warning_messages = []
  1684. self.merge_state = self.NEW
  1685. self.nodesets = {} # job -> nodeset
  1686. self.node_requests = {} # job -> reqs
  1687. self.files = RepoFiles()
  1688. self.repo_state = {}
  1689. self.tries = {}
  1690. if item.change.files is not None:
  1691. self.files_state = self.COMPLETE
  1692. else:
  1693. self.files_state = self.NEW
  1694. @property
  1695. def ref(self):
  1696. # NOTE(jamielennox): The concept of buildset ref is to be removed and a
  1697. # buildset UUID identifier available instead. Currently the ref is
  1698. # checked to see if the BuildSet has been configured.
  1699. return 'Z' + self.uuid if self.uuid else None
  1700. def __repr__(self):
  1701. return '<BuildSet item: %s #builds: %s merge state: %s>' % (
  1702. self.item,
  1703. len(self.builds),
  1704. self.getStateName(self.merge_state))
  1705. def setConfiguration(self):
  1706. # The change isn't enqueued until after it's created
  1707. # so we don't know what the other changes ahead will be
  1708. # until jobs start.
  1709. if not self.uuid:
  1710. self.uuid = uuid4().hex
  1711. if self.dependent_changes is None:
  1712. items = [self.item]
  1713. next_item = self.item.item_ahead
  1714. while next_item:
  1715. items.append(next_item)
  1716. next_item = next_item.item_ahead
  1717. items.reverse()
  1718. self.dependent_changes = [i.change.toDict() for i in items]
  1719. self.merger_items = [i.makeMergerItem() for i in items]
  1720. def getStateName(self, state_num):
  1721. return self.states_map.get(
  1722. state_num, 'UNKNOWN (%s)' % state_num)
  1723. def addBuild(self, build):
  1724. self.builds[build.job.name] = build
  1725. if build.job.name not in self.tries:
  1726. self.tries[build.job.name] = 1
  1727. build.build_set = self
  1728. def removeBuild(self, build):
  1729. if build.job.name not in self.builds:
  1730. return
  1731. self.tries[build.job.name] += 1
  1732. del self.builds[build.job.name]
  1733. def getBuild(self, job_name):
  1734. return self.builds.get(job_name)
  1735. def getBuilds(self):
  1736. keys = list(self.builds.keys())
  1737. keys.sort()
  1738. return [self.builds.get(x) for x in keys]
  1739. def getJobNodeSet(self, job_name: str) -> NodeSet:
  1740. # Return None if not provisioned; empty NodeSet if no nodes
  1741. # required
  1742. return self.nodesets.get(job_name)
  1743. def removeJobNodeSet(self, job_name: str):
  1744. if job_name not in self.nodesets:
  1745. raise Exception("No job nodeset for %s" % (job_name))
  1746. del self.nodesets[job_name]
  1747. def setJobNodeRequest(self, job_name: str, req: NodeRequest):
  1748. if job_name in self.node_requests:
  1749. raise Exception("Prior node request for %s" % (job_name))
  1750. self.node_requests[job_name] = req
  1751. def getJobNodeRequest(self, job_name: str) -> NodeRequest:
  1752. return self.node_requests.get(job_name)
  1753. def removeJobNodeRequest(self, job_name: str):
  1754. if job_name in self.node_requests:
  1755. del self.node_requests[job_name]
  1756. def jobNodeRequestComplete(self, job_name: str, nodeset: NodeSet):
  1757. if job_name in self.nodesets:
  1758. raise Exception("Prior node request for %s" % (job_name))
  1759. self.nodesets[job_name] = nodeset
  1760. del self.node_requests[job_name]
  1761. def getTries(self, job_name):
  1762. return self.tries.get(job_name, 0)
  1763. def getMergeMode(self):
  1764. # We may be called before this build set has a shadow layout
  1765. # (ie, we are called to perform the merge to create that
  1766. # layout). It's possible that the change we are merging will
  1767. # update the merge-mode for the project, but there's not much
  1768. # we can do about that here. Instead, do the best we can by
  1769. # using the nearest shadow layout to determine the merge mode,
  1770. # or if that fails, the current live layout, or if that fails,
  1771. # use the default: merge-resolve.
  1772. item = self.item
  1773. layout = None
  1774. while item:
  1775. layout = item.layout
  1776. if layout:
  1777. break
  1778. item = item.item_ahead
  1779. if not layout:
  1780. layout = self.item.pipeline.tenant.layout
  1781. if layout:
  1782. project = self.item.change.project
  1783. project_metadata = layout.getProjectMetadata(
  1784. project.canonical_name)
  1785. if project_metadata:
  1786. return project_metadata.merge_mode
  1787. return MERGER_MERGE_RESOLVE
  1788. def getSafeAttributes(self):
  1789. return Attributes(uuid=self.uuid)
  1790. class QueueItem(object):
  1791. """Represents the position of a Change in a ChangeQueue.
  1792. All Changes are enqueued into ChangeQueue in a QueueItem. The QueueItem
  1793. holds the current `BuildSet` as well as all previous `BuildSets` that were
  1794. produced for this `QueueItem`.
  1795. """
  1796. def __init__(self, queue, change, event):
  1797. log = logging.getLogger("zuul.QueueItem")
  1798. self.log = get_annotated_logger(log, event)
  1799. self.pipeline = queue.pipeline
  1800. self.queue = queue
  1801. self.change = change # a ref
  1802. self.dequeued_needing_change = False
  1803. self.current_build_set = BuildSet(self)
  1804. self.item_ahead = None
  1805. self.items_behind = []
  1806. self.enqueue_time = None
  1807. self.report_time = None
  1808. self.dequeue_time = None
  1809. self.reported = False
  1810. self.reported_enqueue = False
  1811. self.reported_start = False
  1812. self.quiet = False
  1813. self.active = False # Whether an item is within an active window
  1814. self.live = True # Whether an item is intended to be processed at all
  1815. self.layout = None
  1816. self.project_pipeline_config = None
  1817. self.job_graph = None
  1818. self._old_job_graph = None # Cached job graph of previous layout
  1819. self._cached_sql_results = {}
  1820. self.event = event # The trigger event that lead to this queue item
  1821. def annotateLogger(self, logger):
  1822. """Return an annotated logger with the trigger event"""
  1823. return get_annotated_logger(logger, self.event)
  1824. def __repr__(self):
  1825. if self.pipeline:
  1826. pipeline = self.pipeline.name
  1827. else:
  1828. pipeline = None
  1829. return '<QueueItem 0x%x for %s in %s>' % (
  1830. id(self), self.change, pipeline)
  1831. def resetAllBuilds(self):
  1832. self.current_build_set = BuildSet(self)
  1833. self.layout = None
  1834. self.project_pipeline_config = None
  1835. self.job_graph = None
  1836. self._old_job_graph = None
  1837. def addBuild(self, build):
  1838. self.current_build_set.addBuild(build)
  1839. def removeBuild(self, build):
  1840. self.current_build_set.removeBuild(build)
  1841. def setReportedResult(self, result):
  1842. self.report_time = time.time()
  1843. self.current_build_set.result = result
  1844. def debug(self, msg, indent=0):
  1845. if (not self.project_pipeline_config or
  1846. not self.project_pipeline_config.debug):
  1847. return
  1848. if indent:
  1849. indent = ' ' * indent
  1850. else:
  1851. indent = ''
  1852. self.current_build_set.debug_messages.append(indent + msg)
  1853. def warning(self, msg):
  1854. self.current_build_set.warning_messages.append(msg)
  1855. self.log.info(msg)
  1856. def freezeJobGraph(self, skip_file_matcher=False):
  1857. """Find or create actual matching jobs for this item's change and
  1858. store the resulting job tree."""
  1859. ppc = self.layout.getProjectPipelineConfig(self)
  1860. try:
  1861. # Conditionally set self.ppc so that the debug method can
  1862. # consult it as we resolve the jobs.
  1863. self.project_pipeline_config = ppc
  1864. if ppc:
  1865. for msg in ppc.debug_messages:
  1866. self.debug(msg)
  1867. job_graph = self.layout.createJobGraph(
  1868. self, ppc, skip_file_matcher)
  1869. for job in job_graph.getJobs():
  1870. # Ensure that each jobs's dependencies are fully
  1871. # accessible. This will raise an exception if not.
  1872. job_graph.getParentJobsRecursively(job.name, self.layout)
  1873. self.job_graph = job_graph
  1874. except Exception:
  1875. self.project_pipeline_config = None
  1876. self.job_graph = None
  1877. self._old_job_graph = None
  1878. raise
  1879. def hasJobGraph(self):
  1880. """Returns True if the item has a job graph."""
  1881. return self.job_graph is not None
  1882. def getJobs(self):
  1883. if not self.live or not self.job_graph:
  1884. return []
  1885. return self.job_graph.getJobs()
  1886. def getJob(self, name):
  1887. if not self.job_graph:
  1888. return None
  1889. return self.job_graph.jobs.get(name)
  1890. @property
  1891. def items_ahead(self):
  1892. item_ahead = self.item_ahead
  1893. while item_ahead:
  1894. yield item_ahead
  1895. item_ahead = item_ahead.item_ahead
  1896. def getNonLiveItemsAhead(self):
  1897. items = [item for item in self.items_ahead if not item.live]
  1898. return reversed(items)
  1899. def haveAllJobsStarted(self):
  1900. if not self.hasJobGraph():
  1901. return False
  1902. for job in self.getJobs():
  1903. build = self.current_build_set.getBuild(job.name)
  1904. if not build or not build.start_time:
  1905. return False
  1906. return True
  1907. def areAllJobsComplete(self):
  1908. if (self.current_build_set.config_errors or
  1909. self.current_build_set.unable_to_merge):
  1910. return True
  1911. if not self.hasJobGraph():
  1912. return False
  1913. for job in self.getJobs():
  1914. build = self.current_build_set.getBuild(job.name)
  1915. if not build or not build.result:
  1916. return False
  1917. return True
  1918. def didAllJobsSucceed(self):
  1919. """Check if all jobs have completed with status SUCCESS.
  1920. Return True if all voting jobs have completed with status
  1921. SUCCESS. Non-voting jobs are ignored. Skipped jobs are
  1922. ignored, but skipping all jobs returns a failure. Incomplete
  1923. builds are considered a failure, hence this is unlikely to be
  1924. useful unless all builds are complete.
  1925. """
  1926. if not self.hasJobGraph():
  1927. return False
  1928. all_jobs_skipped = True
  1929. for job in self.getJobs():
  1930. build = self.current_build_set.getBuild(job.name)
  1931. if build:
  1932. # If the build ran, record whether or not it was skipped
  1933. # and return False if the build was voting and has an
  1934. # unsuccessful return value
  1935. if build.result != 'SKIPPED':
  1936. all_jobs_skipped = False
  1937. if job.voting and build.result not in ['SUCCESS', 'SKIPPED']:
  1938. return False
  1939. elif job.voting:
  1940. # If the build failed to run and was voting that is an
  1941. # unsuccessful build. But we don't count against it if not
  1942. # voting.
  1943. return False
  1944. # NOTE(pabelanger): We shouldn't be able to skip all jobs.
  1945. if all_jobs_skipped:
  1946. return False
  1947. return True
  1948. def hasAnyJobFailed(self):
  1949. """Check if any jobs have finished with a non-success result.
  1950. Return True if any job in the job graph has returned with a
  1951. status not equal to SUCCESS or SKIPPED, else return False.
  1952. Non-voting and in-flight jobs are ignored.
  1953. """
  1954. if not self.hasJobGraph():
  1955. return False
  1956. for job in self.getJobs():
  1957. if not job.voting:
  1958. continue
  1959. build = self.current_build_set.getBuild(job.name)
  1960. if (build and build.result and
  1961. build.result not in ['SUCCESS', 'SKIPPED']):
  1962. return True
  1963. return False
  1964. def didMergerFail(self):
  1965. return self.current_build_set.unable_to_merge
  1966. def getConfigErrors(self):
  1967. return self.current_build_set.config_errors
  1968. def wasDequeuedNeedingChange(self):
  1969. return self.dequeued_needing_change
  1970. def includesConfigUpdates(self):
  1971. includes_trusted = False
  1972. includes_untrusted = False
  1973. tenant = self.pipeline.tenant
  1974. item = self
  1975. while item:
  1976. if item.change.updatesConfig(tenant):
  1977. (trusted, project) = tenant.getProject(
  1978. item.change.project.canonical_name)
  1979. if trusted:
  1980. includes_trusted = True
  1981. else:
  1982. includes_untrusted = True
  1983. if includes_trusted and includes_untrusted:
  1984. # We're done early
  1985. return (includes_trusted, includes_untrusted)
  1986. item = item.item_ahead
  1987. return (includes_trusted, includes_untrusted)
  1988. def isHoldingFollowingChanges(self):
  1989. if not self.live:
  1990. return False
  1991. if not self.hasJobGraph():
  1992. return False
  1993. for job in self.getJobs():
  1994. if not job.hold_following_changes:
  1995. continue
  1996. build = self.current_build_set.getBuild(job.name)
  1997. if not build:
  1998. return True
  1999. if build.result != 'SUCCESS':
  2000. return True
  2001. if not self.item_ahead:
  2002. return False
  2003. return self.item_ahead.isHoldingFollowingChanges()
  2004. def _getRequirementsResultFromSQL(self, requirements):
  2005. # This either returns data or raises an exception
  2006. requirements_tuple = tuple(sorted(requirements))
  2007. if requirements_tuple not in self._cached_sql_results:
  2008. sql_driver = self.pipeline.manager.sched.connections.drivers['sql']
  2009. conn = sql_driver.tenant_connections.get(self.pipeline.tenant.name)
  2010. if conn:
  2011. builds = conn.getBuilds(
  2012. tenant=self.pipeline.tenant.name,
  2013. project=self.change.project.name,
  2014. pipeline=self.pipeline.name,
  2015. change=self.change.number,
  2016. branch=self.change.branch,
  2017. patchset=self.change.patchset,
  2018. provides=requirements_tuple)
  2019. else:
  2020. builds = []
  2021. # Just look at the most recent buildset.
  2022. # TODO: query for a buildset instead of filtering.
  2023. builds = [b for b in builds
  2024. if b.buildset.uuid == builds[0].buildset.uuid]
  2025. self._cached_sql_results[requirements_tuple] = builds
  2026. builds = self._cached_sql_results[requirements_tuple]
  2027. data = []
  2028. if not builds:
  2029. return data
  2030. for build in builds:
  2031. if build.result != 'SUCCESS':
  2032. provides = [x.name for x in build.provides]
  2033. requirement = list(requirements.intersection(set(provides)))
  2034. raise RequirementsError(
  2035. "Requirements %s not met by build %s" % (
  2036. requirement, build.uuid))
  2037. else:
  2038. for a in build.artifacts:
  2039. artifact = {'name': a.name,
  2040. 'url': a.url,
  2041. 'project': build.buildset.project,
  2042. 'change': str(build.buildset.change),
  2043. 'patchset': build.buildset.patchset,
  2044. 'job': build.job_name}
  2045. if a.meta:
  2046. artifact['metadata'] = json.loads(a.meta)
  2047. data.append(artifact)
  2048. return data
  2049. def providesRequirements(self, requirements, data, recurse=True):
  2050. # Mutates data and returns true/false if requirements
  2051. # satisfied.
  2052. if not requirements:
  2053. return True
  2054. if not self.live:
  2055. # Look for this item in other queues in the pipeline.
  2056. item = None
  2057. found = False
  2058. for item in self.pipeline.getAllItems():
  2059. if item.live and item.change == self.change:
  2060. found = True
  2061. break
  2062. if found:
  2063. if not item.providesRequirements(requirements, data,
  2064. recurse=False):
  2065. return False
  2066. else:
  2067. # Look for this item in the SQL DB.
  2068. data += self._getRequirementsResultFromSQL(requirements)
  2069. if self.hasJobGraph():
  2070. for job in self.getJobs():
  2071. if job.provides.intersection(requirements):
  2072. build = self.current_build_set.getBuild(job.name)
  2073. if not build:
  2074. return False
  2075. if build.result and build.result != 'SUCCESS':
  2076. return False
  2077. if not build.result and not build.paused:
  2078. return False
  2079. artifacts = get_artifacts_from_result_data(
  2080. build.result_data,
  2081. logger=self.log)
  2082. for a in artifacts:
  2083. a.update({'project': self.change.project.name,
  2084. 'change': self.change.number,
  2085. 'patchset': self.change.patchset,
  2086. 'job': build.job.name})
  2087. data += artifacts
  2088. if not self.item_ahead:
  2089. return True
  2090. if not recurse:
  2091. return True
  2092. return self.item_ahead.providesRequirements(requirements, data)
  2093. def jobRequirementsReady(self, job):
  2094. if not self.item_ahead:
  2095. return True
  2096. try:
  2097. data = []
  2098. ret = self.item_ahead.providesRequirements(job.requires, data)
  2099. data.reverse()
  2100. job.updateArtifactData(data)
  2101. except RequirementsError as e:
  2102. self.warning(str(e))
  2103. fakebuild = Build(job, None)
  2104. fakebuild.result = 'FAILURE'
  2105. self.addBuild(fakebuild)
  2106. self.setResult(fakebuild)
  2107. ret = True
  2108. return ret
  2109. def findJobsToRun(self, semaphore_handler):
  2110. torun = []
  2111. if not self.live:
  2112. return []
  2113. if not self.job_graph:
  2114. return []
  2115. if self.item_ahead:
  2116. # Only run jobs if any 'hold' jobs on the change ahead
  2117. # have completed successfully.
  2118. if self.item_ahead.isHoldingFollowingChanges():
  2119. return []
  2120. failed_job_names = set() # Jobs that run and failed
  2121. ignored_job_names = set() # Jobs that were skipped or canceled
  2122. unexecuted_job_names = set() # Jobs that were not started yet
  2123. jobs_not_started = set()
  2124. for job in self.job_graph.getJobs():
  2125. build = self.current_build_set.getBuild(job.name)
  2126. if build:
  2127. if build.result == 'SUCCESS' or build.paused:
  2128. pass
  2129. elif build.result == 'SKIPPED':
  2130. ignored_job_names.add(job.name)
  2131. else: # elif build.result in ('FAILURE', 'CANCELED', ...):
  2132. failed_job_names.add(job.name)
  2133. else:
  2134. unexecuted_job_names.add(job.name)
  2135. jobs_not_started.add(job)
  2136. # Attempt to run jobs in the order they appear in
  2137. # configuration.
  2138. for job in self.job_graph.getJobs():
  2139. if job not in jobs_not_started:
  2140. continue
  2141. if not self.jobRequirementsReady(job):
  2142. continue
  2143. all_parent_jobs_successful = True
  2144. parent_builds_with_data = {}
  2145. for parent_job in self.job_graph.getParentJobsRecursively(
  2146. job.name):
  2147. if parent_job.name in unexecuted_job_names \
  2148. or parent_job.name in failed_job_names:
  2149. all_parent_jobs_successful = False
  2150. break
  2151. parent_build = self.current_build_set.getBuild(parent_job.name)
  2152. if parent_build.result_data:
  2153. parent_builds_with_data[parent_job.name] = parent_build
  2154. for parent_job in self.job_graph.getParentJobsRecursively(
  2155. job.name, skip_soft=True):
  2156. if parent_job.name in ignored_job_names:
  2157. all_parent_jobs_successful = False
  2158. break
  2159. if all_parent_jobs_successful:
  2160. # Iterate in reverse order over all jobs of the graph (which is
  2161. # in sorted config order) and apply parent data of the jobs we
  2162. # already found.
  2163. if len(parent_builds_with_data) > 0:
  2164. for parent_job in reversed(self.job_graph.getJobs()):
  2165. parent_build = parent_builds_with_data.get(
  2166. parent_job.name)
  2167. if parent_build:
  2168. job.updateParentData(parent_build)
  2169. nodeset = self.current_build_set.getJobNodeSet(job.name)
  2170. if nodeset is None:
  2171. # The nodes for this job are not ready, skip
  2172. # it for now.
  2173. continue
  2174. if semaphore_handler.acquire(self, job, False):
  2175. # If this job needs a semaphore, either acquire it or
  2176. # make sure that we have it before running the job.
  2177. torun.append(job)
  2178. return torun
  2179. def findJobsToRequest(self, semaphore_handler):
  2180. build_set = self.current_build_set
  2181. toreq = []
  2182. if not self.live:
  2183. return []
  2184. if not self.job_graph:
  2185. return []
  2186. if self.item_ahead:
  2187. if self.item_ahead.isHoldingFollowingChanges():
  2188. return []
  2189. failed_job_names = set() # Jobs that run and failed
  2190. ignored_job_names = set() # Jobs that were skipped or canceled
  2191. unexecuted_job_names = set() # Jobs that were not started yet
  2192. jobs_not_requested = set()
  2193. for job in self.job_graph.getJobs():
  2194. build = build_set.getBuild(job.name)
  2195. if build and (build.result == 'SUCCESS' or build.paused):
  2196. pass
  2197. elif build and build.result == 'SKIPPED':
  2198. ignored_job_names.add(job.name)
  2199. elif build and build.result in ('FAILURE', 'CANCELED'):
  2200. failed_job_names.add(job.name)
  2201. else:
  2202. unexecuted_job_names.add(job.name)
  2203. nodeset = build_set.getJobNodeSet(job.name)
  2204. if nodeset is None:
  2205. req = build_set.getJobNodeRequest(job.name)
  2206. if req is None:
  2207. jobs_not_requested.add(job)
  2208. # Attempt to request nodes for jobs in the order jobs appear
  2209. # in configuration.
  2210. for job in self.job_graph.getJobs():
  2211. if job not in jobs_not_requested:
  2212. continue
  2213. if not self.jobRequirementsReady(job):
  2214. continue
  2215. all_parent_jobs_successful = True
  2216. for parent_job in self.job_graph.getParentJobsRecursively(
  2217. job.name):
  2218. if parent_job.name in unexecuted_job_names \
  2219. or parent_job.name in failed_job_names:
  2220. all_parent_jobs_successful = False
  2221. break
  2222. for parent_job in self.job_graph.getParentJobsRecursively(
  2223. job.name, skip_soft=True):
  2224. if parent_job.name in ignored_job_names:
  2225. all_parent_jobs_successful = False
  2226. break
  2227. if all_parent_jobs_successful:
  2228. if semaphore_handler.acquire(self, job, True):
  2229. # If this job needs a semaphore, either acquire it or
  2230. # make sure that we have it before requesting the nodes.
  2231. toreq.append(job)
  2232. job.queued = True
  2233. return toreq
  2234. def setResult(self, build):
  2235. if build.retry:
  2236. self.removeBuild(build)
  2237. return
  2238. skipped = []
  2239. # NOTE(pabelanger): Check successful jobs to see if zuul_return
  2240. # includes zuul.child_jobs.
  2241. build_result = build.result_data.get('zuul', {})
  2242. if build.result == 'SUCCESS' and 'child_jobs' in build_result:
  2243. zuul_return = build_result.get('child_jobs', [])
  2244. dependent_jobs = self.job_graph.getDirectDependentJobs(
  2245. build.job.name)
  2246. if not zuul_return:
  2247. # If zuul.child_jobs exists and is empty, the user
  2248. # wants to skip all child jobs.
  2249. to_skip = self.job_graph.getDependentJobsRecursively(
  2250. build.job.name, skip_soft=True)
  2251. skipped += to_skip
  2252. else:
  2253. # The user supplied a list of jobs to run.
  2254. intersect_jobs = dependent_jobs.intersection(zuul_return)
  2255. for skip in (dependent_jobs - intersect_jobs):
  2256. s = self.job_graph.jobs.get(skip)
  2257. skipped.append(s)
  2258. to_skip = self.job_graph.getDependentJobsRecursively(
  2259. skip, skip_soft=True)
  2260. skipped += to_skip
  2261. elif build.result != 'SUCCESS' and not build.paused:
  2262. to_skip = self.job_graph.getDependentJobsRecursively(
  2263. build.job.name)
  2264. skipped += to_skip
  2265. for job in skipped:
  2266. child_build = self.current_build_set.getBuild(job.name)
  2267. if not child_build:
  2268. fakebuild = Build(job, None)
  2269. fakebuild.result = 'SKIPPED'
  2270. self.addBuild(fakebuild)
  2271. def setNodeRequestFailure(self, job):
  2272. fakebuild = Build(job, None)
  2273. fakebuild.start_time = time.time()
  2274. fakebuild.end_time = time.time()
  2275. self.addBuild(fakebuild)
  2276. fakebuild.result = 'NODE_FAILURE'
  2277. self.setResult(fakebuild)
  2278. def setDequeuedNeedingChange(self):
  2279. self.dequeued_needing_change = True
  2280. self._setAllJobsSkipped()
  2281. def setUnableToMerge(self):
  2282. self.current_build_set.unable_to_merge = True
  2283. self._setAllJobsSkipped()
  2284. def setConfigError(self, error):
  2285. err = ConfigurationError(None, None, error)
  2286. self.setConfigErrors([err])
  2287. def setConfigErrors(self, errors):
  2288. self.current_build_set.config_errors = errors
  2289. self._setAllJobsSkipped()
  2290. def _setAllJobsSkipped(self):
  2291. for job in self.getJobs():
  2292. fakebuild = Build(job, None)
  2293. fakebuild.result = 'SKIPPED'
  2294. self.addBuild(fakebuild)
  2295. def getNodePriority(self):
  2296. return self.pipeline.manager.getNodePriority(self)
  2297. def formatUrlPattern(self, url_pattern, job=None, build=None):
  2298. url = None
  2299. # Produce safe versions of objects which may be useful in
  2300. # result formatting, but don't allow users to crawl through
  2301. # the entire data structure where they might be able to access
  2302. # secrets, etc.
  2303. safe_change = self.change.getSafeAttributes()
  2304. safe_pipeline = self.pipeline.getSafeAttributes()
  2305. safe_tenant = self.pipeline.tenant.getSafeAttributes()
  2306. safe_buildset = self.current_build_set.getSafeAttributes()
  2307. safe_job = job.getSafeAttributes() if job else {}
  2308. safe_build = build.getSafeAttributes() if build else {}
  2309. try:
  2310. url = url_pattern.format(change=safe_change,
  2311. pipeline=safe_pipeline,
  2312. tenant=safe_tenant,
  2313. buildset=safe_buildset,
  2314. job=safe_job,
  2315. build=safe_build)
  2316. except KeyError as e:
  2317. self.log.error("Error while formatting url for job %s: unknown "
  2318. "key %s in pattern %s"
  2319. % (job, e.args[0], url_pattern))
  2320. except AttributeError as e:
  2321. self.log.error("Error while formatting url for job %s: unknown "
  2322. "attribute %s in pattern %s"
  2323. % (job, e.args[0], url_pattern))
  2324. except Exception:
  2325. self.log.exception("Error while formatting url for job %s with "
  2326. "pattern %s:" % (job, url_pattern))
  2327. return url
  2328. def formatJobResult(self, job):
  2329. if (self.pipeline.tenant.report_build_page and
  2330. self.pipeline.tenant.web_root):
  2331. build = self.current_build_set.getBuild(job.name)
  2332. pattern = urllib.parse.urljoin(self.pipeline.tenant.web_root,
  2333. 'build/{build.uuid}')
  2334. url = self.formatUrlPattern(pattern, job, build)
  2335. return (build.result, url)
  2336. else:
  2337. return self.formatProvisionalJobResult(job)
  2338. def formatStatusUrl(self):
  2339. if self.current_build_set.result:
  2340. # We have reported (or are reporting) and so we should
  2341. # send the buildset page url
  2342. if (self.pipeline.tenant.report_build_page and
  2343. self.pipeline.tenant.web_root):
  2344. pattern = urllib.parse.urljoin(self.pipeline.tenant.web_root,
  2345. 'buildset/{buildset.uuid}')
  2346. return self.formatUrlPattern(pattern)
  2347. # We haven't reported yet (or we don't have a database), so
  2348. # the best we can do at the moment is send the status page
  2349. # url. TODO: require a database, insert buildsets into it
  2350. # when they are created, and remove this case.
  2351. if self.pipeline.tenant.web_root:
  2352. pattern = urllib.parse.urljoin(
  2353. self.pipeline.tenant.web_root,
  2354. 'status/change/{change.number},{change.patchset}')
  2355. return self.formatUrlPattern(pattern)
  2356. # Apparently we have no web site.
  2357. return None
  2358. def formatProvisionalJobResult(self, job):
  2359. build = self.current_build_set.getBuild(job.name)
  2360. result = build.result
  2361. pattern = None
  2362. if result == 'SUCCESS':
  2363. if job.success_message:
  2364. result = job.success_message
  2365. if job.success_url:
  2366. pattern = job.success_url
  2367. else:
  2368. if job.failure_message:
  2369. result = job.failure_message
  2370. if job.failure_url:
  2371. pattern = job.failure_url
  2372. url = None # The final URL
  2373. default_url = build.result_data.get('zuul', {}).get('log_url')
  2374. if pattern:
  2375. job_url = self.formatUrlPattern(pattern, job, build)
  2376. else:
  2377. job_url = None
  2378. try:
  2379. if job_url:
  2380. u = urllib.parse.urlparse(job_url)
  2381. if u.scheme:
  2382. # The job success or failure url is absolute, so it's
  2383. # our final url.
  2384. url = job_url
  2385. else:
  2386. # We have a relative job url. Combine it with our
  2387. # default url.
  2388. if default_url:
  2389. url = urllib.parse.urljoin(default_url, job_url)
  2390. except Exception:
  2391. self.log.exception("Error while parsing url for job %s:"
  2392. % (job,))
  2393. if not url:
  2394. url = default_url or build.url or job.name
  2395. return (result, url)
  2396. def formatJSON(self, websocket_url=None):
  2397. ret = {}
  2398. ret['active'] = self.active
  2399. ret['live'] = self.live
  2400. if hasattr(self.change, 'url') and self.change.url is not None:
  2401. ret['url'] = self.change.url
  2402. else:
  2403. ret['url'] = None
  2404. if hasattr(self.change, 'ref') and self.change.ref is not None:
  2405. ret['ref'] = self.change.ref
  2406. else:
  2407. ret['ref'] = None
  2408. ret['id'] = self.change._id()
  2409. if self.item_ahead:
  2410. ret['item_ahead'] = self.item_ahead.change._id()
  2411. else:
  2412. ret['item_ahead'] = None
  2413. ret['items_behind'] = [i.change._id() for i in self.items_behind]
  2414. ret['failing_reasons'] = self.current_build_set.failing_reasons
  2415. ret['zuul_ref'] = self.current_build_set.ref
  2416. if self.change.project:
  2417. ret['project'] = self.change.project.name
  2418. ret['project_canonical'] = self.change.project.canonical_name
  2419. else:
  2420. # For cross-project dependencies with the depends-on
  2421. # project not known to zuul, the project is None
  2422. # Set it to a static value
  2423. ret['project'] = "Unknown Project"
  2424. ret['project_canonical'] = "Unknown Project"
  2425. ret['enqueue_time'] = int(self.enqueue_time * 1000)
  2426. ret['jobs'] = []
  2427. if hasattr(self.change, 'owner'):
  2428. ret['owner'] = self.change.owner
  2429. else:
  2430. ret['owner'] = None
  2431. max_remaining = 0
  2432. for job in self.getJobs():
  2433. now = time.time()
  2434. build = self.current_build_set.getBuild(job.name)
  2435. elapsed = None
  2436. remaining = None
  2437. result = None
  2438. build_url = None
  2439. finger_url = None
  2440. report_url = None
  2441. worker = None
  2442. if build:
  2443. result = build.result
  2444. finger_url = build.url
  2445. # TODO(tobiash): add support for custom web root
  2446. urlformat = 'stream/{build.uuid}?' \
  2447. 'logfile=console.log'
  2448. if websocket_url:
  2449. urlformat += '&websocket_url={websocket_url}'
  2450. build_url = urlformat.format(
  2451. build=build, websocket_url=websocket_url)
  2452. (unused, report_url) = self.formatProvisionalJobResult(job)
  2453. if build.start_time:
  2454. if build.end_time:
  2455. elapsed = int((build.end_time -
  2456. build.start_time) * 1000)
  2457. remaining = 0
  2458. else:
  2459. elapsed = int((now - build.start_time) * 1000)
  2460. if build.estimated_time:
  2461. remaining = max(
  2462. int(build.estimated_time * 1000) - elapsed,
  2463. 0)
  2464. worker = {
  2465. 'name': build.worker.name,
  2466. 'hostname': build.worker.hostname,
  2467. }
  2468. if remaining and remaining > max_remaining:
  2469. max_remaining = remaining
  2470. ret['jobs'].append({
  2471. 'name': job.name,
  2472. 'dependencies': [x.name for x in job.dependencies],
  2473. 'elapsed_time': elapsed,
  2474. 'remaining_time': remaining,
  2475. 'url': build_url,
  2476. 'finger_url': finger_url,
  2477. 'report_url': report_url,
  2478. 'result': result,
  2479. 'voting': job.voting,
  2480. 'uuid': build.uuid if build else None,
  2481. 'execute_time': build.execute_time if build else None,
  2482. 'start_time': build.start_time if build else None,
  2483. 'end_time': build.end_time if build else None,
  2484. 'estimated_time': build.estimated_time if build else None,
  2485. 'pipeline': build.pipeline.name if build else None,
  2486. 'canceled': build.canceled if build else None,
  2487. 'paused': build.paused if build else None,
  2488. 'retry': build.retry if build else None,
  2489. 'tries': self.current_build_set.getTries(job.name),
  2490. 'queued': job.queued,
  2491. 'node_labels': build.node_labels if build else [],
  2492. 'node_name': build.node_name if build else None,
  2493. 'worker': worker,
  2494. })
  2495. if self.haveAllJobsStarted():
  2496. ret['remaining_time'] = max_remaining
  2497. else:
  2498. ret['remaining_time'] = None
  2499. return ret
  2500. def formatStatus(self, indent=0, html=False):
  2501. indent_str = ' ' * indent
  2502. ret = ''
  2503. if html and getattr(self.change, 'url', None) is not None:
  2504. ret += '%sProject %s change <a href="%s">%s</a>\n' % (
  2505. indent_str,
  2506. self.change.project.name,
  2507. self.change.url,
  2508. self.change._id())
  2509. else:
  2510. ret += '%sProject %s change %s based on %s\n' % (
  2511. indent_str,
  2512. self.change.project.name,
  2513. self.change._id(),
  2514. self.item_ahead)
  2515. for job in self.getJobs():
  2516. build = self.current_build_set.getBuild(job.name)
  2517. if build:
  2518. result = build.result
  2519. else:
  2520. result = None
  2521. job_name = job.name
  2522. if not job.voting:
  2523. voting = ' (non-voting)'
  2524. else:
  2525. voting = ''
  2526. if html:
  2527. if build:
  2528. url = build.url
  2529. else:
  2530. url = None
  2531. if url is not None:
  2532. job_name = '<a href="%s">%s</a>' % (url, job_name)
  2533. ret += '%s %s: %s%s' % (indent_str, job_name, result, voting)
  2534. ret += '\n'
  2535. return ret
  2536. def makeMergerItem(self):
  2537. # Create a dictionary with all info about the item needed by
  2538. # the merger.
  2539. number = None
  2540. patchset = None
  2541. oldrev = None
  2542. newrev = None
  2543. branch = None
  2544. if hasattr(self.change, 'number'):
  2545. number = self.change.number
  2546. patchset = self.change.patchset
  2547. if hasattr(self.change, 'newrev'):
  2548. oldrev = self.change.oldrev
  2549. newrev = self.change.newrev
  2550. if hasattr(self.change, 'branch'):
  2551. branch = self.change.branch
  2552. source = self.change.project.source
  2553. connection_name = source.connection.connection_name
  2554. project = self.change.project
  2555. return dict(project=project.name,
  2556. connection=connection_name,
  2557. merge_mode=self.current_build_set.getMergeMode(),
  2558. ref=self.change.ref,
  2559. branch=branch,
  2560. buildset_uuid=self.current_build_set.uuid,
  2561. number=number,
  2562. patchset=patchset,
  2563. oldrev=oldrev,
  2564. newrev=newrev,
  2565. )
  2566. def updatesJobConfig(self, job):
  2567. log = self.annotateLogger(self.log)
  2568. layout_ahead = self.pipeline.tenant.layout
  2569. if self.item_ahead and self.item_ahead.layout:
  2570. layout_ahead = self.item_ahead.layout
  2571. if layout_ahead and self.layout and self.layout is not layout_ahead:
  2572. # This change updates the layout. Calculate the job as it
  2573. # would be if the layout had not changed.
  2574. if self._old_job_graph is None:
  2575. try:
  2576. ppc = layout_ahead.getProjectPipelineConfig(self)
  2577. log.debug("Creating job graph for config change detection")
  2578. self._old_job_graph = layout_ahead.createJobGraph(
  2579. self, ppc, skip_file_matcher=True)
  2580. log.debug("Done creating job graph for "
  2581. "config change detection")
  2582. except Exception:
  2583. self.log.debug(
  2584. "Error freezing job graph in job update check:",
  2585. exc_info=True)
  2586. # The config was broken before, we have no idea
  2587. # which jobs have changed, so rather than run them
  2588. # all, just rely on the file matchers as-is.
  2589. return False
  2590. old_job = self._old_job_graph.jobs.get(job.name)
  2591. if old_job is None:
  2592. log.debug("Found a newly created job")
  2593. return True # A newly created job
  2594. if (job.toDict(self.pipeline.tenant) !=
  2595. old_job.toDict(self.pipeline.tenant)):
  2596. log.debug("Found an updated job")
  2597. return True # This job's configuration has changed
  2598. return False
  2599. class Ref(object):
  2600. """An existing state of a Project."""
  2601. def __init__(self, project):
  2602. self.project = project
  2603. self.ref = None
  2604. self.oldrev = None
  2605. self.newrev = None
  2606. self.files = []
  2607. def _id(self):
  2608. return self.newrev
  2609. def __repr__(self):
  2610. rep = None
  2611. pname = None
  2612. if self.project and self.project.name:
  2613. pname = self.project.name
  2614. if self.newrev == '0000000000000000000000000000000000000000':
  2615. rep = '<%s 0x%x %s deletes %s from %s' % (
  2616. type(self).__name__, id(self), pname,
  2617. self.ref, self.oldrev)
  2618. elif self.oldrev == '0000000000000000000000000000000000000000':
  2619. rep = '<%s 0x%x %s creates %s on %s>' % (
  2620. type(self).__name__, id(self), pname,
  2621. self.ref, self.newrev)
  2622. else:
  2623. # Catch all
  2624. rep = '<%s 0x%x %s %s updated %s..%s>' % (
  2625. type(self).__name__, id(self), pname,
  2626. self.ref, self.oldrev, self.newrev)
  2627. return rep
  2628. def equals(self, other):
  2629. if (self.project == other.project
  2630. and self.ref == other.ref
  2631. and self.newrev == other.newrev):
  2632. return True
  2633. return False
  2634. def isUpdateOf(self, other):
  2635. return False
  2636. def getRelatedChanges(self):
  2637. return set()
  2638. def updatesConfig(self, tenant):
  2639. tpc = tenant.project_configs.get(self.project.canonical_name)
  2640. if tpc is None:
  2641. return False
  2642. if self.files is None:
  2643. # If self.files is None we don't know if this change updates the
  2644. # config so assume it does as this is a safe default if we don't
  2645. # know.
  2646. return True
  2647. for fn in self.files:
  2648. if fn == 'zuul.yaml':
  2649. return True
  2650. if fn == '.zuul.yaml':
  2651. return True
  2652. if fn.startswith("zuul.d/"):
  2653. return True
  2654. if fn.startswith(".zuul.d/"):
  2655. return True
  2656. for ef in tpc.extra_config_files:
  2657. if fn.startswith(ef):
  2658. return True
  2659. for ed in tpc.extra_config_dirs:
  2660. if fn.startswith(ed):
  2661. return True
  2662. return False
  2663. def getSafeAttributes(self):
  2664. return Attributes(project=self.project,
  2665. ref=self.ref,
  2666. oldrev=self.oldrev,
  2667. newrev=self.newrev)
  2668. def toDict(self):
  2669. # Render to a dict to use in passing json to the executor
  2670. d = dict()
  2671. d['project'] = dict(
  2672. name=self.project.name,
  2673. short_name=self.project.name.split('/')[-1],
  2674. canonical_hostname=self.project.canonical_hostname,
  2675. canonical_name=self.project.canonical_name,
  2676. src_dir=os.path.join('src', self.project.canonical_name),
  2677. )
  2678. return d
  2679. class Branch(Ref):
  2680. """An existing branch state for a Project."""
  2681. def __init__(self, project):
  2682. super(Branch, self).__init__(project)
  2683. self.branch = None
  2684. def toDict(self):
  2685. # Render to a dict to use in passing json to the executor
  2686. d = super(Branch, self).toDict()
  2687. d['branch'] = self.branch
  2688. return d
  2689. class Tag(Ref):
  2690. """An existing tag state for a Project."""
  2691. def __init__(self, project):
  2692. super(Tag, self).__init__(project)
  2693. self.tag = None
  2694. class Change(Branch):
  2695. """A proposed new state for a Project."""
  2696. def __init__(self, project):
  2697. super(Change, self).__init__(project)
  2698. self.number = None
  2699. # The gitweb url for browsing the change
  2700. self.url = None
  2701. # URIs for this change which may appear in depends-on headers.
  2702. # Note this omits the scheme; i.e., is hostname/path.
  2703. self.uris = []
  2704. self.patchset = None
  2705. # Changes that the source determined are needed due to the
  2706. # git DAG:
  2707. self.git_needs_changes = []
  2708. self.git_needed_by_changes = []
  2709. # Changes that the source determined are needed by backwards
  2710. # compatible processing of Depends-On headers (Gerrit only):
  2711. self.compat_needs_changes = []
  2712. self.compat_needed_by_changes = []
  2713. # Changes that the pipeline manager determined are needed due
  2714. # to Depends-On headers (all drivers):
  2715. self.commit_needs_changes = None
  2716. self.refresh_deps = False
  2717. self.is_current_patchset = True
  2718. self.can_merge = False
  2719. self.is_merged = False
  2720. self.failed_to_merge = False
  2721. self.open = None
  2722. self.status = None
  2723. self.owner = None
  2724. # This may be the commit message, or it may be a cover message
  2725. # in the case of a PR. Either way, it's the place where we
  2726. # look for depends-on headers.
  2727. self.message = None
  2728. self.source_event = None
  2729. def _id(self):
  2730. return '%s,%s' % (self.number, self.patchset)
  2731. def __repr__(self):
  2732. pname = None
  2733. if self.project and self.project.name:
  2734. pname = self.project.name
  2735. return '<Change 0x%x %s %s>' % (id(self), pname, self._id())
  2736. def equals(self, other):
  2737. if self.number == other.number and self.patchset == other.patchset:
  2738. return True
  2739. return False
  2740. @property
  2741. def needs_changes(self):
  2742. return (self.git_needs_changes + self.compat_needs_changes +
  2743. self.commit_needs_changes)
  2744. @property
  2745. def needed_by_changes(self):
  2746. return (self.git_needed_by_changes + self.compat_needed_by_changes)
  2747. def isUpdateOf(self, other):
  2748. if (self.project == other.project and
  2749. (hasattr(other, 'number') and self.number == other.number) and
  2750. (hasattr(other, 'patchset') and
  2751. self.patchset is not None and
  2752. other.patchset is not None and
  2753. int(self.patchset) > int(other.patchset))):
  2754. return True
  2755. return False
  2756. def getRelatedChanges(self):
  2757. related = set()
  2758. for c in self.needs_changes:
  2759. related.add(c)
  2760. for c in self.needed_by_changes:
  2761. related.add(c)
  2762. related.update(c.getRelatedChanges())
  2763. return related
  2764. def getSafeAttributes(self):
  2765. return Attributes(project=self.project,
  2766. number=self.number,
  2767. patchset=self.patchset)
  2768. def toDict(self):
  2769. # Render to a dict to use in passing json to the executor
  2770. d = super(Change, self).toDict()
  2771. d['change'] = str(self.number)
  2772. d['change_url'] = self.url
  2773. d['patchset'] = str(self.patchset)
  2774. return d
  2775. class TriggerEvent(object):
  2776. """Incoming event from an external system."""
  2777. def __init__(self):
  2778. # TODO(jeblair): further reduce this list
  2779. self.data = None
  2780. # common
  2781. self.type = None
  2782. self.branch_updated = False
  2783. self.branch_created = False
  2784. self.branch_deleted = False
  2785. self.branch_protected = True
  2786. self.ref = None
  2787. # For management events (eg: enqueue / promote)
  2788. self.tenant_name = None
  2789. self.project_hostname = None
  2790. self.project_name = None
  2791. self.trigger_name = None
  2792. # Representation of the user account that performed the event.
  2793. self.account = None
  2794. # patchset-created, comment-added, etc.
  2795. self.change_number = None
  2796. self.change_url = None
  2797. self.patch_number = None
  2798. self.branch = None
  2799. self.comment = None
  2800. self.state = None
  2801. # ref-updated
  2802. self.oldrev = None
  2803. self.newrev = None
  2804. # For events that arrive with a destination pipeline (eg, from
  2805. # an admin command, etc):
  2806. self.forced_pipeline = None
  2807. # For logging
  2808. self.zuul_event_id = None
  2809. self.timestamp = None
  2810. @property
  2811. def canonical_project_name(self):
  2812. return self.project_hostname + '/' + self.project_name
  2813. def isPatchsetCreated(self):
  2814. return False
  2815. def isChangeAbandoned(self):
  2816. return False
  2817. def _repr(self):
  2818. flags = [str(self.type)]
  2819. if self.project_name:
  2820. flags.append(self.project_name)
  2821. if self.ref:
  2822. flags.append(self.ref)
  2823. if self.branch_updated:
  2824. flags.append('branch_updated')
  2825. if self.branch_created:
  2826. flags.append('branch_created')
  2827. if self.branch_deleted:
  2828. flags.append('branch_deleted')
  2829. return ' '.join(flags)
  2830. def __repr__(self):
  2831. return '<%s 0x%x %s>' % (self.__class__.__name__,
  2832. id(self), self._repr())
  2833. class FalseWithReason(object):
  2834. """Event filter result"""
  2835. def __init__(self, reason):
  2836. self.reason = reason
  2837. def __str__(self):
  2838. return self.reason
  2839. def __bool__(self):
  2840. return False
  2841. class BaseFilter(ConfigObject):
  2842. """Base Class for filtering which Changes and Events to process."""
  2843. pass
  2844. class EventFilter(BaseFilter):
  2845. """Allows a Pipeline to only respond to certain events."""
  2846. def __init__(self, trigger):
  2847. super(EventFilter, self).__init__()
  2848. self.trigger = trigger
  2849. def matches(self, event, ref):
  2850. # TODO(jeblair): consider removing ref argument
  2851. return True
  2852. class RefFilter(BaseFilter):
  2853. """Allows a Manager to only enqueue Changes that meet certain criteria."""
  2854. def __init__(self, connection_name):
  2855. super(RefFilter, self).__init__()
  2856. self.connection_name = connection_name
  2857. def matches(self, change):
  2858. return True
  2859. class TenantProjectConfig(object):
  2860. """A project in the context of a tenant.
  2861. A Project is globally unique in the system, however, when used in
  2862. a tenant, some metadata about the project local to the tenant is
  2863. stored in a TenantProjectConfig.
  2864. """
  2865. def __init__(self, project):
  2866. self.project = project
  2867. self.load_classes = set()
  2868. self.shadow_projects = set()
  2869. self.branches = []
  2870. # The tenant's default setting of exclude_unprotected_branches will
  2871. # be overridden by this one if not None.
  2872. self.exclude_unprotected_branches = None
  2873. self.parsed_branch_config = {} # branch -> ParsedConfig
  2874. # The list of paths to look for extra zuul config files
  2875. self.extra_config_files = ()
  2876. # The list of paths to look for extra zuul config dirs
  2877. self.extra_config_dirs = ()
  2878. class ProjectPipelineConfig(ConfigObject):
  2879. # Represents a project cofiguration in the context of a pipeline
  2880. def __init__(self):
  2881. super(ProjectPipelineConfig, self).__init__()
  2882. self.job_list = JobList()
  2883. self.queue_name = None
  2884. self.debug = False
  2885. self.debug_messages = []
  2886. self.fail_fast = None
  2887. self.variables = {}
  2888. def addDebug(self, msg):
  2889. self.debug_messages.append(msg)
  2890. def update(self, other):
  2891. if not isinstance(other, ProjectPipelineConfig):
  2892. raise Exception("Unable to update from %s" % (other,))
  2893. if self.queue_name is None:
  2894. self.queue_name = other.queue_name
  2895. if other.debug:
  2896. self.debug = other.debug
  2897. if self.fail_fast is None:
  2898. self.fail_fast = other.fail_fast
  2899. self.job_list.inheritFrom(other.job_list)
  2900. def updateVariables(self, other):
  2901. # We need to keep this separate to update() because we wish to
  2902. # apply the project variables all the time, even if its jobs
  2903. # only come from templates.
  2904. self.variables = Job._deepUpdate(self.variables, other)
  2905. def toDict(self):
  2906. d = {}
  2907. d['queue_name'] = self.queue_name
  2908. return d
  2909. class ProjectConfig(ConfigObject):
  2910. # Represents a project configuration
  2911. def __init__(self, name):
  2912. super(ProjectConfig, self).__init__()
  2913. self.name = name
  2914. self.templates = []
  2915. # Pipeline name -> ProjectPipelineConfig
  2916. self.pipelines = {}
  2917. self.branch_matcher = None
  2918. self.variables = {}
  2919. # These represent the values from the config file, but should
  2920. # not be used directly; instead, use the ProjectMetadata to
  2921. # find the computed value from across all project config
  2922. # stanzas.
  2923. self.merge_mode = None
  2924. self.default_branch = None
  2925. def __repr__(self):
  2926. return '<ProjectConfig %s source: %s %s>' % (
  2927. self.name, self.source_context, self.branch_matcher)
  2928. def copy(self):
  2929. r = self.__class__(self.name)
  2930. r.source_context = self.source_context
  2931. r.start_mark = self.start_mark
  2932. r.templates = self.templates
  2933. r.pipelines = self.pipelines
  2934. r.branch_matcher = self.branch_matcher
  2935. r.variables = self.variables
  2936. r.merge_mode = self.merge_mode
  2937. r.default_branch = self.default_branch
  2938. return r
  2939. def setImpliedBranchMatchers(self, branches):
  2940. if len(branches) == 0:
  2941. self.branch_matcher = None
  2942. elif len(branches) > 1:
  2943. matchers = [change_matcher.ImpliedBranchMatcher(branch)
  2944. for branch in branches]
  2945. self.branch_matcher = change_matcher.MatchAny(matchers)
  2946. else:
  2947. self.branch_matcher = change_matcher.ImpliedBranchMatcher(
  2948. branches[0])
  2949. def changeMatches(self, change):
  2950. if self.branch_matcher and not self.branch_matcher.matches(change):
  2951. return False
  2952. return True
  2953. def toDict(self):
  2954. d = {}
  2955. d['default_branch'] = self.default_branch
  2956. if self.merge_mode:
  2957. d['merge_mode'] = list(filter(lambda x: x[1] == self.merge_mode,
  2958. MERGER_MAP.items()))[0][0]
  2959. else:
  2960. d['merge_mode'] = None
  2961. d['templates'] = self.templates
  2962. return d
  2963. class ProjectMetadata(object):
  2964. """Information about a Project
  2965. A Layout holds one of these for each project it knows about.
  2966. Information about the project which is synthesized from multiple
  2967. ProjectConfig objects is stored here.
  2968. """
  2969. def __init__(self):
  2970. self.merge_mode = None
  2971. self.default_branch = None
  2972. class ConfigItemNotListError(Exception):
  2973. def __init__(self):
  2974. message = textwrap.dedent("""\
  2975. Configuration file is not a list. Each zuul.yaml configuration
  2976. file must be a list of items, for example:
  2977. - job:
  2978. name: foo
  2979. - project:
  2980. name: bar
  2981. Ensure that every item starts with "- " so that it is parsed as a
  2982. YAML list.
  2983. """)
  2984. super(ConfigItemNotListError, self).__init__(message)
  2985. class ConfigItemNotDictError(Exception):
  2986. def __init__(self):
  2987. message = textwrap.dedent("""\
  2988. Configuration item is not a dictionary. Each zuul.yaml
  2989. configuration file must be a list of dictionaries, for
  2990. example:
  2991. - job:
  2992. name: foo
  2993. - project:
  2994. name: bar
  2995. Ensure that every item in the list is a dictionary with one
  2996. key (in this example, 'job' and 'project').
  2997. """)
  2998. super(ConfigItemNotDictError, self).__init__(message)
  2999. class ConfigItemMultipleKeysError(Exception):
  3000. def __init__(self):
  3001. message = textwrap.dedent("""\
  3002. Configuration item has more than one key. Each zuul.yaml
  3003. configuration file must be a list of dictionaries with a
  3004. single key, for example:
  3005. - job:
  3006. name: foo
  3007. - project:
  3008. name: bar
  3009. Ensure that every item in the list is a dictionary with only
  3010. one key (in this example, 'job' and 'project'). This error
  3011. may be caused by insufficient indentation of the keys under
  3012. the configuration item ('name' in this example).
  3013. """)
  3014. super(ConfigItemMultipleKeysError, self).__init__(message)
  3015. class ConfigItemUnknownError(Exception):
  3016. def __init__(self):
  3017. message = textwrap.dedent("""\
  3018. Configuration item not recognized. Each zuul.yaml
  3019. configuration file must be a list of dictionaries, for
  3020. example:
  3021. - job:
  3022. name: foo
  3023. - project:
  3024. name: bar
  3025. The dictionary keys must match one of the configuration item
  3026. types recognized by zuul (for example, 'job' or 'project').
  3027. """)
  3028. super(ConfigItemUnknownError, self).__init__(message)
  3029. class UnparsedAbideConfig(object):
  3030. """A collection of yaml lists that has not yet been parsed into objects.
  3031. An Abide is a collection of tenants and access rules to those tenants.
  3032. """
  3033. def __init__(self):
  3034. self.tenants = []
  3035. self.admin_rules = []
  3036. self.known_tenants = set()
  3037. def extend(self, conf):
  3038. if isinstance(conf, UnparsedAbideConfig):
  3039. self.tenants.extend(conf.tenants)
  3040. self.admin_rules.extend(conf.admin_rules)
  3041. return
  3042. if not isinstance(conf, list):
  3043. raise ConfigItemNotListError()
  3044. for item in conf:
  3045. if not isinstance(item, dict):
  3046. raise ConfigItemNotDictError()
  3047. if len(item.keys()) > 1:
  3048. raise ConfigItemMultipleKeysError()
  3049. key, value = list(item.items())[0]
  3050. if key == 'tenant':
  3051. self.tenants.append(value)
  3052. if 'name' in value:
  3053. self.known_tenants.add(value['name'])
  3054. elif key == 'admin-rule':
  3055. self.admin_rules.append(value)
  3056. else:
  3057. raise ConfigItemUnknownError()
  3058. class UnparsedConfig(object):
  3059. """A collection of yaml lists that has not yet been parsed into objects."""
  3060. def __init__(self):
  3061. self.pragmas = []
  3062. self.pipelines = []
  3063. self.jobs = []
  3064. self.project_templates = []
  3065. self.projects = []
  3066. self.nodesets = []
  3067. self.secrets = []
  3068. self.semaphores = []
  3069. # The list of files/dirs which this represents.
  3070. self.files_examined = set()
  3071. self.dirs_examined = set()
  3072. def copy(self, trusted=None):
  3073. # If trusted is not None, update the source context of each
  3074. # object in the copy.
  3075. r = UnparsedConfig()
  3076. # Keep a cache of all the source contexts indexed by
  3077. # project-branch-path so that we can share them across objects
  3078. source_contexts = {}
  3079. for attr in ['pragmas', 'pipelines', 'jobs', 'project_templates',
  3080. 'projects', 'nodesets', 'secrets', 'semaphores']:
  3081. # Make a deep copy of each of our attributes
  3082. old_objlist = getattr(self, attr)
  3083. new_objlist = copy.deepcopy(old_objlist)
  3084. setattr(r, attr, new_objlist)
  3085. for i, new_obj in enumerate(new_objlist):
  3086. old_obj = old_objlist[i]
  3087. key = (old_obj['_source_context'].project,
  3088. old_obj['_source_context'].branch,
  3089. old_obj['_source_context'].path)
  3090. new_sc = source_contexts.get(key)
  3091. if not new_sc:
  3092. new_sc = new_obj['_source_context']
  3093. if trusted is not None:
  3094. new_sc.trusted = trusted
  3095. source_contexts[key] = new_sc
  3096. else:
  3097. new_obj['_source_context'] = new_sc
  3098. return r
  3099. def extend(self, conf):
  3100. if isinstance(conf, UnparsedConfig):
  3101. self.pragmas.extend(conf.pragmas)
  3102. self.pipelines.extend(conf.pipelines)
  3103. self.jobs.extend(conf.jobs)
  3104. self.project_templates.extend(conf.project_templates)
  3105. self.projects.extend(conf.projects)
  3106. self.nodesets.extend(conf.nodesets)
  3107. self.secrets.extend(conf.secrets)
  3108. self.semaphores.extend(conf.semaphores)
  3109. return
  3110. if not isinstance(conf, list):
  3111. raise ConfigItemNotListError()
  3112. for item in conf:
  3113. if not isinstance(item, dict):
  3114. raise ConfigItemNotDictError()
  3115. if len(item.keys()) > 1:
  3116. raise ConfigItemMultipleKeysError()
  3117. key, value = list(item.items())[0]
  3118. if key == 'project':
  3119. self.projects.append(value)
  3120. elif key == 'job':
  3121. self.jobs.append(value)
  3122. elif key == 'project-template':
  3123. self.project_templates.append(value)
  3124. elif key == 'pipeline':
  3125. self.pipelines.append(value)
  3126. elif key == 'nodeset':
  3127. self.nodesets.append(value)
  3128. elif key == 'secret':
  3129. self.secrets.append(value)
  3130. elif key == 'semaphore':
  3131. self.semaphores.append(value)
  3132. elif key == 'pragma':
  3133. self.pragmas.append(value)
  3134. else:
  3135. raise ConfigItemUnknownError()
  3136. class ParsedConfig(object):
  3137. """A collection of parsed config objects."""
  3138. def __init__(self):
  3139. self.pragmas = []
  3140. self.pipelines = []
  3141. self.jobs = []
  3142. self.project_templates = []
  3143. self.projects = []
  3144. self.projects_by_regex = {}
  3145. self.nodesets = []
  3146. self.secrets = []
  3147. self.semaphores = []
  3148. def copy(self):
  3149. r = ParsedConfig()
  3150. r.pragmas = self.pragmas[:]
  3151. r.pipelines = self.pipelines[:]
  3152. r.jobs = self.jobs[:]
  3153. r.project_templates = self.project_templates[:]
  3154. r.projects = self.projects[:]
  3155. r.projects_by_regex = copy.copy(self.projects_by_regex)
  3156. r.nodesets = self.nodesets[:]
  3157. r.secrets = self.secrets[:]
  3158. r.semaphores = self.semaphores[:]
  3159. return r
  3160. def extend(self, conf):
  3161. if isinstance(conf, ParsedConfig):
  3162. self.pragmas.extend(conf.pragmas)
  3163. self.pipelines.extend(conf.pipelines)
  3164. self.jobs.extend(conf.jobs)
  3165. self.project_templates.extend(conf.project_templates)
  3166. self.projects.extend(conf.projects)
  3167. self.nodesets.extend(conf.nodesets)
  3168. self.secrets.extend(conf.secrets)
  3169. self.semaphores.extend(conf.semaphores)
  3170. for regex, projects in conf.projects_by_regex.items():
  3171. self.projects_by_regex.setdefault(regex, []).extend(projects)
  3172. return
  3173. else:
  3174. raise ConfigItemUnknownError()
  3175. class Layout(object):
  3176. """Holds all of the Pipelines."""
  3177. log = logging.getLogger("zuul.layout")
  3178. def __init__(self, tenant):
  3179. self.uuid = uuid4().hex
  3180. self.tenant = tenant
  3181. self.project_configs = {}
  3182. self.project_templates = {}
  3183. self.project_metadata = {}
  3184. self.pipelines = OrderedDict()
  3185. # This is a dictionary of name -> [jobs]. The first element
  3186. # of the list is the first job added with that name. It is
  3187. # the reference definition for a given job. Subsequent
  3188. # elements are aspects of that job with different matchers
  3189. # that override some attribute of the job. These aspects all
  3190. # inherit from the reference definition.
  3191. noop = Job('noop')
  3192. noop.description = 'A job that will always succeed, no operation.'
  3193. noop.parent = noop.BASE_JOB_MARKER
  3194. noop.run = (PlaybookContext(None, 'noop.yaml', [], []),)
  3195. self.jobs = {'noop': [noop]}
  3196. self.nodesets = {}
  3197. self.secrets = {}
  3198. self.semaphores = {}
  3199. self.loading_errors = LoadingErrors()
  3200. def getJob(self, name):
  3201. if name in self.jobs:
  3202. return self.jobs[name][0]
  3203. raise Exception("Job %s not defined" % (name,))
  3204. def hasJob(self, name):
  3205. return name in self.jobs
  3206. def getJobs(self, name):
  3207. return self.jobs.get(name, [])
  3208. def addJob(self, job):
  3209. # We can have multiple variants of a job all with the same
  3210. # name, but these variants must all be defined in the same repo.
  3211. prior_jobs = [j for j in self.getJobs(job.name) if
  3212. j.source_context.project !=
  3213. job.source_context.project]
  3214. # Unless the repo is permitted to shadow another. If so, and
  3215. # the job we are adding is from a repo that is permitted to
  3216. # shadow the one with the older jobs, skip adding this job.
  3217. job_project = job.source_context.project
  3218. job_tpc = self.tenant.project_configs[job_project.canonical_name]
  3219. skip_add = False
  3220. for prior_job in prior_jobs[:]:
  3221. prior_project = prior_job.source_context.project
  3222. if prior_project in job_tpc.shadow_projects:
  3223. prior_jobs.remove(prior_job)
  3224. skip_add = True
  3225. if prior_jobs:
  3226. raise Exception("Job %s in %s is not permitted to shadow "
  3227. "job %s in %s" % (
  3228. job,
  3229. job.source_context.project,
  3230. prior_jobs[0],
  3231. prior_jobs[0].source_context.project))
  3232. if skip_add:
  3233. return False
  3234. if job.name in self.jobs:
  3235. self.jobs[job.name].append(job)
  3236. else:
  3237. self.jobs[job.name] = [job]
  3238. return True
  3239. def addNodeSet(self, nodeset):
  3240. # It's ok to have a duplicate nodeset definition, but only if
  3241. # they are in different branches of the same repo, and have
  3242. # the same values.
  3243. other = self.nodesets.get(nodeset.name)
  3244. if other is not None:
  3245. if not nodeset.source_context.isSameProject(other.source_context):
  3246. raise Exception("Nodeset %s already defined in project %s" %
  3247. (nodeset.name, other.source_context.project))
  3248. if nodeset.source_context.branch == other.source_context.branch:
  3249. raise Exception("Nodeset %s already defined" % (nodeset.name,))
  3250. if nodeset != other:
  3251. raise Exception("Nodeset %s does not match existing definition"
  3252. " in branch %s" %
  3253. (nodeset.name, other.source_context.branch))
  3254. # Identical data in a different branch of the same project;
  3255. # ignore the duplicate definition
  3256. return
  3257. self.nodesets[nodeset.name] = nodeset
  3258. def addSecret(self, secret):
  3259. # It's ok to have a duplicate secret definition, but only if
  3260. # they are in different branches of the same repo, and have
  3261. # the same values.
  3262. other = self.secrets.get(secret.name)
  3263. if other is not None:
  3264. if not secret.source_context.isSameProject(other.source_context):
  3265. raise Exception("Secret %s already defined in project %s" %
  3266. (secret.name, other.source_context.project))
  3267. if secret.source_context.branch == other.source_context.branch:
  3268. raise Exception("Secret %s already defined" % (secret.name,))
  3269. if not secret.areDataEqual(other):
  3270. raise Exception("Secret %s does not match existing definition"
  3271. " in branch %s" %
  3272. (secret.name, other.source_context.branch))
  3273. # Identical data in a different branch of the same project;
  3274. # ignore the duplicate definition
  3275. return
  3276. self.secrets[secret.name] = secret
  3277. def addSemaphore(self, semaphore):
  3278. # It's ok to have a duplicate semaphore definition, but only if
  3279. # they are in different branches of the same repo, and have
  3280. # the same values.
  3281. other = self.semaphores.get(semaphore.name)
  3282. if other is not None:
  3283. if not semaphore.source_context.isSameProject(
  3284. other.source_context):
  3285. raise Exception("Semaphore %s already defined in project %s" %
  3286. (semaphore.name, other.source_context.project))
  3287. if semaphore.source_context.branch == other.source_context.branch:
  3288. raise Exception("Semaphore %s already defined" %
  3289. (semaphore.name,))
  3290. if semaphore != other:
  3291. raise Exception("Semaphore %s does not match existing"
  3292. " definition in branch %s" %
  3293. (semaphore.name, other.source_context.branch))
  3294. # Identical data in a different branch of the same project;
  3295. # ignore the duplicate definition
  3296. return
  3297. self.semaphores[semaphore.name] = semaphore
  3298. def addPipeline(self, pipeline):
  3299. if pipeline.tenant is not self.tenant:
  3300. raise Exception("Pipeline created for tenant %s "
  3301. "may not be added to %s" % (
  3302. pipeline.tenant,
  3303. self.tenant))
  3304. if pipeline.name in self.pipelines:
  3305. raise Exception(
  3306. "Pipeline %s is already defined" % pipeline.name)
  3307. self.pipelines[pipeline.name] = pipeline
  3308. def addProjectTemplate(self, project_template):
  3309. template_list = self.project_templates.get(project_template.name)
  3310. if template_list is not None:
  3311. reference = template_list[0]
  3312. if (reference.source_context.project !=
  3313. project_template.source_context.project):
  3314. raise Exception("Project template %s is already defined" %
  3315. (project_template.name,))
  3316. else:
  3317. template_list = self.project_templates.setdefault(
  3318. project_template.name, [])
  3319. template_list.append(project_template)
  3320. def getProjectTemplates(self, name):
  3321. pt = self.project_templates.get(name, None)
  3322. if pt is None:
  3323. raise TemplateNotFoundError("Project template %s not found" % name)
  3324. return pt
  3325. def addProjectConfig(self, project_config):
  3326. if project_config.name in self.project_configs:
  3327. self.project_configs[project_config.name].append(project_config)
  3328. else:
  3329. self.project_configs[project_config.name] = [project_config]
  3330. self.project_metadata[project_config.name] = ProjectMetadata()
  3331. md = self.project_metadata[project_config.name]
  3332. if md.merge_mode is None and project_config.merge_mode is not None:
  3333. md.merge_mode = project_config.merge_mode
  3334. if (md.default_branch is None and
  3335. project_config.default_branch is not None):
  3336. md.default_branch = project_config.default_branch
  3337. def getProjectConfigs(self, name):
  3338. return self.project_configs.get(name, [])
  3339. def getAllProjectConfigs(self, name):
  3340. # Get all the project configs (project and project-template
  3341. # stanzas) for a project.
  3342. try:
  3343. ret = []
  3344. for pc in self.getProjectConfigs(name):
  3345. ret.append(pc)
  3346. for template_name in pc.templates:
  3347. templates = self.getProjectTemplates(template_name)
  3348. ret.extend(templates)
  3349. return ret
  3350. except TemplateNotFoundError as e:
  3351. self.log.warning("%s for project %s" % (e, name))
  3352. return []
  3353. def getProjectMetadata(self, name):
  3354. if name in self.project_metadata:
  3355. return self.project_metadata[name]
  3356. return None
  3357. def getProjectPipelineConfig(self, item):
  3358. log = item.annotateLogger(self.log)
  3359. # Create a project-pipeline config for the given item, taking
  3360. # its branch (if any) into consideration. If the project does
  3361. # not participate in the pipeline at all (in this branch),
  3362. # return None.
  3363. # A pc for a project can appear only in a config-project
  3364. # (unbranched, always applies), or in the project itself (it
  3365. # should have an implied branch matcher and it must match the
  3366. # item).
  3367. ppc = ProjectPipelineConfig()
  3368. project_in_pipeline = False
  3369. for pc in self.getProjectConfigs(item.change.project.canonical_name):
  3370. if not pc.changeMatches(item.change):
  3371. msg = "Project %s did not match" % (pc,)
  3372. ppc.addDebug(msg)
  3373. log.debug("%s item %s", msg, item)
  3374. continue
  3375. msg = "Project %s matched" % (pc,)
  3376. ppc.addDebug(msg)
  3377. log.debug("%s item %s", msg, item)
  3378. for template_name in pc.templates:
  3379. templates = self.getProjectTemplates(template_name)
  3380. for template in templates:
  3381. template_ppc = template.pipelines.get(item.pipeline.name)
  3382. if template_ppc:
  3383. if not template.changeMatches(item.change):
  3384. msg = "Project template %s did not match" % (
  3385. template,)
  3386. ppc.addDebug(msg)
  3387. log.debug("%s item %s", msg, item)
  3388. continue
  3389. msg = "Project template %s matched" % (
  3390. template,)
  3391. ppc.addDebug(msg)
  3392. log.debug("%s item %s", msg, item)
  3393. project_in_pipeline = True
  3394. ppc.update(template_ppc)
  3395. ppc.updateVariables(template.variables)
  3396. # Now merge in project variables (they will override
  3397. # template variables; later job variables may override
  3398. # these again)
  3399. ppc.updateVariables(pc.variables)
  3400. project_ppc = pc.pipelines.get(item.pipeline.name)
  3401. if project_ppc:
  3402. project_in_pipeline = True
  3403. ppc.update(project_ppc)
  3404. if project_in_pipeline:
  3405. return ppc
  3406. return None
  3407. def _updateOverrideCheckouts(self, override_checkouts, job):
  3408. # Update the values in an override_checkouts dict with those
  3409. # in a job. Used in collectJobVariants.
  3410. if job.override_checkout:
  3411. override_checkouts[None] = job.override_checkout
  3412. for req in job.required_projects.values():
  3413. if req.override_checkout:
  3414. override_checkouts[req.project_name] = req.override_checkout
  3415. def _collectJobVariants(self, item, jobname, change, path, jobs, stack,
  3416. override_checkouts, indent):
  3417. log = item.annotateLogger(self.log)
  3418. matched = False
  3419. local_override_checkouts = override_checkouts.copy()
  3420. override_branch = None
  3421. project = None
  3422. for variant in self.getJobs(jobname):
  3423. if project is None and variant.source_context:
  3424. project = variant.source_context.project
  3425. if override_checkouts.get(None) is not None:
  3426. override_branch = override_checkouts.get(None)
  3427. override_branch = override_checkouts.get(
  3428. project.canonical_name, override_branch)
  3429. branches = self.tenant.getProjectBranches(project)
  3430. if override_branch not in branches:
  3431. override_branch = None
  3432. if not variant.changeMatchesBranch(
  3433. change,
  3434. override_branch=override_branch):
  3435. log.debug("Variant %s did not match %s", repr(variant), change)
  3436. item.debug("Variant {variant} did not match".format(
  3437. variant=repr(variant)), indent=indent)
  3438. continue
  3439. else:
  3440. log.debug("Variant %s matched %s", repr(variant), change)
  3441. item.debug("Variant {variant} matched".format(
  3442. variant=repr(variant)), indent=indent)
  3443. if not variant.isBase():
  3444. parent = variant.parent
  3445. if not jobs and parent is None:
  3446. parent = self.tenant.default_base_job
  3447. else:
  3448. parent = None
  3449. self._updateOverrideCheckouts(local_override_checkouts, variant)
  3450. if parent and parent not in path:
  3451. if parent in stack:
  3452. raise Exception("Dependency cycle in jobs: %s" % stack)
  3453. self.collectJobs(item, parent, change, path, jobs,
  3454. stack + [jobname], local_override_checkouts)
  3455. matched = True
  3456. if variant not in jobs:
  3457. jobs.append(variant)
  3458. return matched
  3459. def collectJobs(self, item, jobname, change, path=None, jobs=None,
  3460. stack=None, override_checkouts=None):
  3461. log = item.annotateLogger(self.log)
  3462. # Stack is the recursion stack of job parent names. Each time
  3463. # we go up a level, we add to stack, and it's popped as we
  3464. # descend.
  3465. if stack is None:
  3466. stack = []
  3467. # Jobs is the list of jobs we've accumulated.
  3468. if jobs is None:
  3469. jobs = []
  3470. # Path is the list of job names we've examined. It
  3471. # accumulates and never reduces. If more than one job has the
  3472. # same parent, this will prevent us from adding it a second
  3473. # time.
  3474. if path is None:
  3475. path = []
  3476. # Override_checkouts is a dictionary of canonical project
  3477. # names -> branch names. It is not mutated, but instead new
  3478. # copies are made and updated as we ascend the hierarchy, so
  3479. # higher levels don't affect lower levels after we descend.
  3480. # It's used to override the branch matchers for jobs.
  3481. if override_checkouts is None:
  3482. override_checkouts = {}
  3483. path.append(jobname)
  3484. matched = False
  3485. indent = len(path) + 1
  3486. msg = "Collecting job variants for {jobname}".format(jobname=jobname)
  3487. log.debug(msg)
  3488. item.debug(msg, indent=indent)
  3489. matched = self._collectJobVariants(
  3490. item, jobname, change, path, jobs, stack, override_checkouts,
  3491. indent)
  3492. if not matched:
  3493. log.debug("No matching parents for job %s and change %s",
  3494. jobname, change)
  3495. item.debug("No matching parents for {jobname}".format(
  3496. jobname=repr(jobname)), indent=indent)
  3497. raise NoMatchingParentError()
  3498. return jobs
  3499. def _createJobGraph(self, item, ppc, job_graph, skip_file_matcher):
  3500. log = item.annotateLogger(self.log)
  3501. job_list = ppc.job_list
  3502. change = item.change
  3503. pipeline = item.pipeline
  3504. item.debug("Freezing job graph")
  3505. for jobname in job_list.jobs:
  3506. # This is the final job we are constructing
  3507. frozen_job = None
  3508. log.debug("Collecting jobs %s for %s", jobname, change)
  3509. item.debug("Freezing job {jobname}".format(
  3510. jobname=jobname), indent=1)
  3511. # Create the initial list of override_checkouts, which are
  3512. # used as we walk up the hierarchy to expand the set of
  3513. # jobs which match.
  3514. override_checkouts = {}
  3515. for variant in job_list.jobs[jobname]:
  3516. if variant.changeMatchesBranch(change):
  3517. self._updateOverrideCheckouts(override_checkouts, variant)
  3518. try:
  3519. variants = self.collectJobs(
  3520. item, jobname, change,
  3521. override_checkouts=override_checkouts)
  3522. except NoMatchingParentError:
  3523. variants = None
  3524. log.debug("Collected jobs %s for %s", jobname, change)
  3525. if not variants:
  3526. # A change must match at least one defined job variant
  3527. # (that is to say that it must match more than just
  3528. # the job that is defined in the tree).
  3529. item.debug("No matching variants for {jobname}".format(
  3530. jobname=jobname), indent=2)
  3531. continue
  3532. for variant in variants:
  3533. if frozen_job is None:
  3534. frozen_job = variant.copy()
  3535. frozen_job.setBase(item.layout)
  3536. else:
  3537. frozen_job.applyVariant(variant, item.layout)
  3538. frozen_job.name = variant.name
  3539. frozen_job.name = jobname
  3540. # Now merge variables set from this parent ppc
  3541. # (i.e. project+templates) directly into the job vars
  3542. frozen_job.updateProjectVariables(ppc.variables)
  3543. # If the job does not specify an ansible version default to the
  3544. # tenant default.
  3545. if not frozen_job.ansible_version:
  3546. frozen_job.ansible_version = \
  3547. item.layout.tenant.default_ansible_version
  3548. log.debug("Froze job %s for %s", jobname, change)
  3549. # Whether the change matches any of the project pipeline
  3550. # variants
  3551. matched = False
  3552. for variant in job_list.jobs[jobname]:
  3553. if variant.changeMatchesBranch(change):
  3554. frozen_job.applyVariant(variant, item.layout)
  3555. matched = True
  3556. log.debug("Pipeline variant %s matched %s",
  3557. repr(variant), change)
  3558. item.debug("Pipeline variant {variant} matched".format(
  3559. variant=repr(variant)), indent=2)
  3560. else:
  3561. log.debug("Pipeline variant %s did not match %s",
  3562. repr(variant), change)
  3563. item.debug("Pipeline variant {variant} did not match".
  3564. format(variant=repr(variant)), indent=2)
  3565. if not matched:
  3566. # A change must match at least one project pipeline
  3567. # job variant.
  3568. item.debug("No matching pipeline variants for {jobname}".
  3569. format(jobname=jobname), indent=2)
  3570. continue
  3571. updates_job_config = False
  3572. if not skip_file_matcher and \
  3573. not frozen_job.changeMatchesFiles(change):
  3574. matched_files = False
  3575. if frozen_job.match_on_config_updates:
  3576. updates_job_config = item.updatesJobConfig(frozen_job)
  3577. else:
  3578. matched_files = True
  3579. if not matched_files:
  3580. if updates_job_config:
  3581. # Log the reason we're ignoring the file matcher
  3582. log.debug("The configuration of job %s is "
  3583. "changed by %s; ignoring file matcher",
  3584. repr(frozen_job), change)
  3585. item.debug("The configuration of job {jobname} is "
  3586. "changed; ignoring file matcher".
  3587. format(jobname=jobname), indent=2)
  3588. else:
  3589. log.debug("Job %s did not match files in %s",
  3590. repr(frozen_job), change)
  3591. item.debug("Job {jobname} did not match files".
  3592. format(jobname=jobname), indent=2)
  3593. continue
  3594. if frozen_job.abstract:
  3595. raise Exception("Job %s is abstract and may not be "
  3596. "directly run" %
  3597. (frozen_job.name,))
  3598. if (not frozen_job.ignore_allowed_projects and
  3599. frozen_job.allowed_projects is not None and
  3600. change.project.name not in frozen_job.allowed_projects):
  3601. raise Exception("Project %s is not allowed to run job %s" %
  3602. (change.project.name, frozen_job.name))
  3603. if ((not pipeline.post_review) and frozen_job.post_review):
  3604. raise Exception("Pre-review pipeline %s does not allow "
  3605. "post-review job %s" % (
  3606. pipeline.name, frozen_job.name))
  3607. if not frozen_job.run:
  3608. raise Exception("Job %s does not specify a run playbook" % (
  3609. frozen_job.name,))
  3610. job_graph.addJob(frozen_job)
  3611. def createJobGraph(self, item, ppc, skip_file_matcher=False):
  3612. # NOTE(pabelanger): It is possible for a foreign project not to have a
  3613. # configured pipeline, if so return an empty JobGraph.
  3614. ret = JobGraph()
  3615. if ppc:
  3616. self._createJobGraph(item, ppc, ret, skip_file_matcher)
  3617. return ret
  3618. class Semaphore(ConfigObject):
  3619. def __init__(self, name, max=1):
  3620. super(Semaphore, self).__init__()
  3621. self.name = name
  3622. self.max = int(max)
  3623. def __ne__(self, other):
  3624. return not self.__eq__(other)
  3625. def __eq__(self, other):
  3626. if not isinstance(other, Semaphore):
  3627. return False
  3628. return (self.name == other.name and
  3629. self.max == other.max)
  3630. class SemaphoreHandler(object):
  3631. log = logging.getLogger("zuul.SemaphoreHandler")
  3632. def __init__(self):
  3633. self.semaphores = {}
  3634. def acquire(self, item, job, request_resources):
  3635. """
  3636. Aquires a semaphore for an item job combination. This gets called twice
  3637. during the lifecycle of a job. The first call is before requesting
  3638. build resources. The second call is before running the job. In which
  3639. call we really acquire the semaphore is defined by the job.
  3640. :param item: The item
  3641. :param job: The job
  3642. :param request_resources: True if we want to acquire for the request
  3643. resources phase, False if we want to acquire
  3644. for the run phase.
  3645. """
  3646. if not job.semaphore:
  3647. return True
  3648. log = get_annotated_logger(self.log, item.event)
  3649. if job.semaphore.resources_first and request_resources:
  3650. # We're currently in the resource request phase and want to get the
  3651. # resources before locking. So we don't need to do anything here.
  3652. return True
  3653. else:
  3654. # As a safety net we want to acuire the semaphore at least in the
  3655. # run phase so don't filter this here as re-acuiring the semaphore
  3656. # is not a problem here if it has been already acquired before in
  3657. # the resources phase.
  3658. pass
  3659. semaphore_key = job.semaphore.name
  3660. m = self.semaphores.get(semaphore_key)
  3661. if not m:
  3662. # The semaphore is not held, acquire it
  3663. self._acquire(semaphore_key, item, job.name, log)
  3664. return True
  3665. if (item, job.name) in m:
  3666. # This item already holds the semaphore
  3667. return True
  3668. # semaphore is there, check max
  3669. if len(m) < self._max_count(item, job.semaphore.name):
  3670. self._acquire(semaphore_key, item, job.name, log)
  3671. return True
  3672. return False
  3673. def release(self, item, job):
  3674. if not job.semaphore:
  3675. return
  3676. log = get_annotated_logger(self.log, item.event)
  3677. semaphore_key = job.semaphore.name
  3678. m = self.semaphores.get(semaphore_key)
  3679. if not m:
  3680. # The semaphore is not held, nothing to do
  3681. log.error("Semaphore can not be released for %s "
  3682. "because the semaphore is not held", item)
  3683. return
  3684. if (item, job.name) in m:
  3685. # This item is a holder of the semaphore
  3686. self._release(semaphore_key, item, job.name, log)
  3687. return
  3688. log.error("Semaphore can not be released for %s "
  3689. "which does not hold it", item)
  3690. def _acquire(self, semaphore_key, item, job_name, log):
  3691. log.debug("Semaphore acquire {semaphore}: job {job}, item {item}"
  3692. .format(semaphore=semaphore_key,
  3693. job=job_name,
  3694. item=item))
  3695. if semaphore_key not in self.semaphores:
  3696. self.semaphores[semaphore_key] = []
  3697. self.semaphores[semaphore_key].append((item, job_name))
  3698. def _release(self, semaphore_key, item, job_name, log):
  3699. log.debug("Semaphore release {semaphore}: job {job}, item {item}"
  3700. .format(semaphore=semaphore_key,
  3701. job=job_name,
  3702. item=item))
  3703. sem_item = (item, job_name)
  3704. if sem_item in self.semaphores[semaphore_key]:
  3705. self.semaphores[semaphore_key].remove(sem_item)
  3706. # cleanup if there is no user of the semaphore anymore
  3707. if len(self.semaphores[semaphore_key]) == 0:
  3708. del self.semaphores[semaphore_key]
  3709. @staticmethod
  3710. def _max_count(item, semaphore_name):
  3711. if not item.layout:
  3712. # This should not occur as the layout of the item must already be
  3713. # built when acquiring or releasing a semaphore for a job.
  3714. raise Exception("Item {} has no layout".format(item))
  3715. # find the right semaphore
  3716. default_semaphore = Semaphore(semaphore_name, 1)
  3717. semaphores = item.layout.semaphores
  3718. return semaphores.get(semaphore_name, default_semaphore).max
  3719. class Tenant(object):
  3720. def __init__(self, name):
  3721. self.name = name
  3722. self.max_nodes_per_job = 5
  3723. self.max_job_timeout = 10800
  3724. self.exclude_unprotected_branches = False
  3725. self.default_base_job = None
  3726. self.report_build_page = False
  3727. self.layout = None
  3728. # The unparsed configuration from the main zuul config for
  3729. # this tenant.
  3730. self.unparsed_config = None
  3731. # The list of projects from which we will read full
  3732. # configuration.
  3733. self.config_projects = []
  3734. # The parsed config from those projects.
  3735. self.config_projects_config = None
  3736. # The list of projects from which we will read untrusted
  3737. # in-repo configuration.
  3738. self.untrusted_projects = []
  3739. # The parsed config from those projects.
  3740. self.untrusted_projects_config = None
  3741. self.semaphore_handler = SemaphoreHandler()
  3742. # Metadata about projects for this tenant
  3743. # canonical project name -> TenantProjectConfig
  3744. self.project_configs = {}
  3745. # A mapping of project names to projects. project_name ->
  3746. # VALUE where VALUE is a further dictionary of
  3747. # canonical_hostname -> Project.
  3748. self.projects = {}
  3749. self.canonical_hostnames = set()
  3750. # The per tenant default ansible version
  3751. self.default_ansible_version = None
  3752. self.authorization_rules = []
  3753. def _addProject(self, tpc):
  3754. """Add a project to the project index
  3755. :arg TenantProjectConfig tpc: The TenantProjectConfig (with
  3756. associated project) to add.
  3757. """
  3758. project = tpc.project
  3759. self.canonical_hostnames.add(project.canonical_hostname)
  3760. hostname_dict = self.projects.setdefault(project.name, {})
  3761. if project.canonical_hostname in hostname_dict:
  3762. raise Exception("Project %s is already in project index" %
  3763. (project,))
  3764. hostname_dict[project.canonical_hostname] = project
  3765. self.project_configs[project.canonical_name] = tpc
  3766. def getProject(self, name):
  3767. """Return a project given its name.
  3768. :arg str name: The name of the project. It may be fully
  3769. qualified (E.g., "git.example.com/subpath/project") or may
  3770. contain only the project name name may be supplied (E.g.,
  3771. "subpath/project").
  3772. :returns: A tuple (trusted, project) or (None, None) if the
  3773. project is not found or ambiguous. The "trusted" boolean
  3774. indicates whether or not the project is trusted by this
  3775. tenant.
  3776. :rtype: (bool, Project)
  3777. """
  3778. path = name.split('/', 1)
  3779. if path[0] in self.canonical_hostnames:
  3780. hostname = path[0]
  3781. project_name = path[1]
  3782. else:
  3783. hostname = None
  3784. project_name = name
  3785. hostname_dict = self.projects.get(project_name)
  3786. project = None
  3787. if hostname_dict:
  3788. if hostname:
  3789. project = hostname_dict.get(hostname)
  3790. else:
  3791. values = list(hostname_dict.values())
  3792. if len(values) == 1:
  3793. project = values[0]
  3794. else:
  3795. raise Exception("Project name '%s' is ambiguous, "
  3796. "please fully qualify the project "
  3797. "with a hostname" % (name,))
  3798. if project is None:
  3799. return (None, None)
  3800. if project in self.config_projects:
  3801. return (True, project)
  3802. if project in self.untrusted_projects:
  3803. return (False, project)
  3804. # This should never happen:
  3805. raise Exception("Project %s is neither trusted nor untrusted" %
  3806. (project,))
  3807. def getProjectsByRegex(self, regex):
  3808. """Return all projects with a full match to either project name or
  3809. canonical project name.
  3810. :arg str regex: The regex to match
  3811. :returns: A list of tuples (trusted, project) describing the found
  3812. projects. Raises an exception if the same project name is found
  3813. several times across multiple hostnames.
  3814. """
  3815. matcher = re2.compile(regex)
  3816. projects = []
  3817. result = []
  3818. for name, hostname_dict in self.projects.items():
  3819. if matcher.fullmatch(name):
  3820. # validate that this match is unambiguous
  3821. values = list(hostname_dict.values())
  3822. if len(values) > 1:
  3823. raise Exception("Project name '%s' is ambiguous, "
  3824. "please fully qualify the project "
  3825. "with a hostname. Valid hostnames "
  3826. "are %s." % (name, hostname_dict.keys()))
  3827. projects.append(values[0])
  3828. else:
  3829. # try to match canonical project names
  3830. for project in hostname_dict.values():
  3831. if matcher.fullmatch(project.canonical_name):
  3832. projects.append(project)
  3833. for project in projects:
  3834. if project in self.config_projects:
  3835. result.append((True, project))
  3836. elif project in self.untrusted_projects:
  3837. result.append((False, project))
  3838. else:
  3839. raise Exception("Project %s is neither trusted nor untrusted" %
  3840. (project,))
  3841. return result
  3842. def getProjectBranches(self, project):
  3843. """Return a project's branches (filtered by this tenant config)
  3844. :arg Project project: The project object.
  3845. :returns: A list of branch names.
  3846. :rtype: [str]
  3847. """
  3848. tpc = self.project_configs[project.canonical_name]
  3849. return tpc.branches
  3850. def getExcludeUnprotectedBranches(self, project):
  3851. # Evaluate if unprotected branches should be excluded or not. The first
  3852. # match wins. The order is project -> tenant (default is false).
  3853. project_config = self.project_configs.get(project.canonical_name)
  3854. if project_config.exclude_unprotected_branches is not None:
  3855. exclude_unprotected = project_config.exclude_unprotected_branches
  3856. else:
  3857. exclude_unprotected = self.exclude_unprotected_branches
  3858. return exclude_unprotected
  3859. def addConfigProject(self, tpc):
  3860. self.config_projects.append(tpc.project)
  3861. self._addProject(tpc)
  3862. def addUntrustedProject(self, tpc):
  3863. self.untrusted_projects.append(tpc.project)
  3864. self._addProject(tpc)
  3865. def getSafeAttributes(self):
  3866. return Attributes(name=self.name)
  3867. class UnparsedBranchCache(object):
  3868. """Cache information about a single branch"""
  3869. def __init__(self):
  3870. self.load_skipped = True
  3871. self.extra_files_searched = set()
  3872. self.extra_dirs_searched = set()
  3873. self.files = {}
  3874. def isValidFor(self, tpc):
  3875. """Return True if this has valid cache results for the extra
  3876. files/dirs in the tpc.
  3877. """
  3878. if self.load_skipped:
  3879. return False
  3880. if (set(tpc.extra_config_files) <= self.extra_files_searched and
  3881. set(tpc.extra_config_dirs) <= self.extra_dirs_searched):
  3882. return True
  3883. return False
  3884. def setValidFor(self, tpc):
  3885. self.load_skipped = False
  3886. self.extra_files_searched |= set(tpc.extra_config_files)
  3887. self.extra_dirs_searched |= set(tpc.extra_config_dirs)
  3888. def put(self, path, config):
  3889. self.files[path] = config
  3890. def get(self, tpc):
  3891. ret = UnparsedConfig()
  3892. files_list = self.files.keys()
  3893. fns1 = []
  3894. fns2 = []
  3895. fns3 = []
  3896. fns4 = []
  3897. for fn in files_list:
  3898. if fn.startswith("zuul.d/"):
  3899. fns1.append(fn)
  3900. if fn.startswith(".zuul.d/"):
  3901. fns2.append(fn)
  3902. for ef in tpc.extra_config_files:
  3903. if fn.startswith(ef):
  3904. fns3.append(fn)
  3905. for ed in tpc.extra_config_dirs:
  3906. if fn.startswith(ed):
  3907. fns4.append(fn)
  3908. fns = (["zuul.yaml"] + sorted(fns1) + [".zuul.yaml"] +
  3909. sorted(fns2) + fns3 + sorted(fns4))
  3910. for fn in fns:
  3911. data = self.files.get(fn)
  3912. if data is not None:
  3913. ret.extend(data)
  3914. return ret
  3915. class Abide(object):
  3916. def __init__(self):
  3917. self.admin_rules = OrderedDict()
  3918. self.tenants = OrderedDict()
  3919. # project -> branch -> UnparsedBranchCache
  3920. self.unparsed_project_branch_cache = {}
  3921. def hasUnparsedBranchCache(self, canonical_project_name, branch):
  3922. project_branch_cache = self.unparsed_project_branch_cache.setdefault(
  3923. canonical_project_name, {})
  3924. cache = project_branch_cache.get(branch)
  3925. if cache is None:
  3926. return False
  3927. return True
  3928. def getUnparsedBranchCache(self, canonical_project_name, branch):
  3929. project_branch_cache = self.unparsed_project_branch_cache.setdefault(
  3930. canonical_project_name, {})
  3931. cache = project_branch_cache.get(branch)
  3932. if cache is not None:
  3933. return cache
  3934. project_branch_cache[branch] = UnparsedBranchCache()
  3935. return project_branch_cache[branch]
  3936. def clearUnparsedBranchCache(self, canonical_project_name, branch=None):
  3937. if canonical_project_name in self.unparsed_project_branch_cache:
  3938. project_branch_cache = \
  3939. self.unparsed_project_branch_cache[canonical_project_name]
  3940. if branch in project_branch_cache:
  3941. del project_branch_cache[branch]
  3942. if len(project_branch_cache) == 0 or branch is None:
  3943. del self.unparsed_project_branch_cache[canonical_project_name]
  3944. class JobTimeData(object):
  3945. format = 'B10H10H10B'
  3946. version = 0
  3947. def __init__(self, path):
  3948. self.path = path
  3949. self.success_times = [0 for x in range(10)]
  3950. self.failure_times = [0 for x in range(10)]
  3951. self.results = [0 for x in range(10)]
  3952. def load(self):
  3953. if not os.path.exists(self.path):
  3954. return
  3955. with open(self.path, 'rb') as f:
  3956. data = struct.unpack(self.format, f.read())
  3957. version = data[0]
  3958. if version != self.version:
  3959. raise Exception("Unkown data version")
  3960. self.success_times = list(data[1:11])
  3961. self.failure_times = list(data[11:21])
  3962. self.results = list(data[21:32])
  3963. def save(self):
  3964. tmpfile = self.path + '.tmp'
  3965. data = [self.version]
  3966. data.extend(self.success_times)
  3967. data.extend(self.failure_times)
  3968. data.extend(self.results)
  3969. data = struct.pack(self.format, *data)
  3970. with open(tmpfile, 'wb') as f:
  3971. f.write(data)
  3972. os.rename(tmpfile, self.path)
  3973. def add(self, elapsed, result):
  3974. elapsed = int(elapsed)
  3975. if result == 'SUCCESS':
  3976. self.success_times.append(elapsed)
  3977. self.success_times.pop(0)
  3978. result = 0
  3979. else:
  3980. self.failure_times.append(elapsed)
  3981. self.failure_times.pop(0)
  3982. result = 1
  3983. self.results.append(result)
  3984. self.results.pop(0)
  3985. def getEstimatedTime(self):
  3986. times = [x for x in self.success_times if x]
  3987. if times:
  3988. return float(sum(times)) / len(times)
  3989. return 0.0
  3990. class TimeDataBase(object):
  3991. def __init__(self, root):
  3992. self.root = root
  3993. def _getTD(self, build):
  3994. if hasattr(build.build_set.item.change, 'branch'):
  3995. branch = build.build_set.item.change.branch
  3996. else:
  3997. branch = ''
  3998. dir_path = os.path.join(
  3999. self.root,
  4000. build.build_set.item.pipeline.tenant.name,
  4001. build.build_set.item.change.project.canonical_name,
  4002. branch)
  4003. if not os.path.exists(dir_path):
  4004. os.makedirs(dir_path)
  4005. path = os.path.join(dir_path, build.job.name)
  4006. td = JobTimeData(path)
  4007. td.load()
  4008. return td
  4009. def getEstimatedTime(self, name):
  4010. return self._getTD(name).getEstimatedTime()
  4011. def update(self, build, elapsed, result):
  4012. td = self._getTD(build)
  4013. td.add(elapsed, result)
  4014. td.save()
  4015. class Capabilities(object):
  4016. """The set of capabilities this Zuul installation has.
  4017. Some plugins add elements to the external API. In order to
  4018. facilitate consumers knowing if functionality is available
  4019. or not, keep track of distinct capability flags.
  4020. """
  4021. def __init__(self, job_history=False):
  4022. self.job_history = job_history
  4023. def __repr__(self):
  4024. return '<Capabilities 0x%x %s>' % (id(self), self._renderFlags())
  4025. def _renderFlags(self):
  4026. d = self.toDict()
  4027. return " ".join(['{k}={v}'.format(k=k, v=v) for (k, v) in d.items()])
  4028. def copy(self):
  4029. return Capabilities(**self.toDict())
  4030. def toDict(self):
  4031. d = dict()
  4032. d['job_history'] = self.job_history
  4033. return d
  4034. class WebInfo(object):
  4035. """Information about the system needed by zuul-web /info."""
  4036. def __init__(self, websocket_url=None,
  4037. capabilities=None, stats_url=None,
  4038. stats_prefix=None, stats_type=None):
  4039. self.capabilities = capabilities or Capabilities()
  4040. self.stats_prefix = stats_prefix
  4041. self.stats_type = stats_type
  4042. self.stats_url = stats_url
  4043. self.tenant = None
  4044. self.websocket_url = websocket_url
  4045. def __repr__(self):
  4046. return '<WebInfo 0x%x capabilities=%s>' % (
  4047. id(self), str(self.capabilities))
  4048. def copy(self):
  4049. return WebInfo(
  4050. capabilities=self.capabilities.copy(),
  4051. stats_prefix=self.stats_prefix,
  4052. stats_type=self.stats_type,
  4053. stats_url=self.stats_url,
  4054. websocket_url=self.websocket_url)
  4055. @staticmethod
  4056. def fromConfig(config):
  4057. return WebInfo(
  4058. stats_prefix=get_default(config, 'statsd', 'prefix'),
  4059. stats_type=get_default(config, 'web', 'stats_type', 'graphite'),
  4060. stats_url=get_default(config, 'web', 'stats_url', None),
  4061. websocket_url=get_default(config, 'web', 'websocket_url', None),
  4062. )
  4063. def toDict(self):
  4064. d = dict()
  4065. d['capabilities'] = self.capabilities.toDict()
  4066. d['websocket_url'] = self.websocket_url
  4067. stats = dict()
  4068. stats['prefix'] = self.stats_prefix
  4069. stats['type'] = self.stats_type
  4070. stats['url'] = self.stats_url
  4071. d['stats'] = stats
  4072. if self.tenant:
  4073. d['tenant'] = self.tenant
  4074. return d
  4075. class HoldRequest(object):
  4076. def __init__(self):
  4077. self.lock = None
  4078. self.stat = None
  4079. self.id = None
  4080. self.expired = None
  4081. self.tenant = None
  4082. self.project = None
  4083. self.job = None
  4084. self.ref_filter = None
  4085. self.reason = None
  4086. self.node_expiration = None
  4087. # When max_count == current_count, hold request can no longer be used.
  4088. self.max_count = 1
  4089. self.current_count = 0
  4090. # The hold request 'nodes' attribute is a list of dictionaries
  4091. # (one list entry per hold request count) containing the build
  4092. # ID (build) and a list of nodes (nodes) held for that build.
  4093. # Example:
  4094. #
  4095. # hold_request.nodes = [
  4096. # { 'build': 'ca01...', 'nodes': ['00000001', '00000002'] },
  4097. # { 'build': 'fb72...', 'nodes': ['00000003', '00000004'] },
  4098. # ]
  4099. self.nodes = []
  4100. def __str__(self):
  4101. return "<HoldRequest %s: tenant=%s project=%s job=%s ref_filter=%s>" \
  4102. % (self.id, self.tenant, self.project, self.job, self.ref_filter)
  4103. @staticmethod
  4104. def fromDict(data):
  4105. '''
  4106. Return a new object from the given data dictionary.
  4107. '''
  4108. obj = HoldRequest()
  4109. obj.expired = data.get('expired')
  4110. obj.tenant = data.get('tenant')
  4111. obj.project = data.get('project')
  4112. obj.job = data.get('job')
  4113. obj.ref_filter = data.get('ref_filter')
  4114. obj.max_count = data.get('max_count')
  4115. obj.current_count = data.get('current_count')
  4116. obj.reason = data.get('reason')
  4117. obj.node_expiration = data.get('node_expiration')
  4118. obj.nodes = data.get('nodes', [])
  4119. return obj
  4120. def toDict(self):
  4121. '''
  4122. Return a dictionary representation of the object.
  4123. '''
  4124. d = dict()
  4125. d['id'] = self.id
  4126. d['expired'] = self.expired
  4127. d['tenant'] = self.tenant
  4128. d['project'] = self.project
  4129. d['job'] = self.job
  4130. d['ref_filter'] = self.ref_filter
  4131. d['max_count'] = self.max_count
  4132. d['current_count'] = self.current_count
  4133. d['reason'] = self.reason
  4134. d['node_expiration'] = self.node_expiration
  4135. d['nodes'] = self.nodes
  4136. return d
  4137. def updateFromDict(self, d):
  4138. '''
  4139. Update current object with data from the given dictionary.
  4140. '''
  4141. self.expired = d.get('expired')
  4142. self.tenant = d.get('tenant')
  4143. self.project = d.get('project')
  4144. self.job = d.get('job')
  4145. self.ref_filter = d.get('ref_filter')
  4146. self.max_count = d.get('max_count', 1)
  4147. self.current_count = d.get('current_count', 0)
  4148. self.reason = d.get('reason')
  4149. self.node_expiration = d.get('node_expiration')
  4150. def serialize(self):
  4151. '''
  4152. Return a representation of the object as a string.
  4153. Used for storing the object data in ZooKeeper.
  4154. '''
  4155. return json.dumps(self.toDict()).encode('utf8')
  4156. # AuthZ models
  4157. class AuthZRule(object):
  4158. """The base class for authorization rules"""
  4159. def __ne__(self, other):
  4160. return not self.__eq__(other)
  4161. class ClaimRule(AuthZRule):
  4162. """This rule checks the value of a claim.
  4163. The check tries to be smart by assessing the type of the tested value."""
  4164. def __init__(self, claim=None, value=None):
  4165. super(ClaimRule, self).__init__()
  4166. self.claim = claim or 'sub'
  4167. self.value = value
  4168. def _match_jsonpath(self, claims):
  4169. matches = [match.value
  4170. for match in jsonpath_rw.parse(self.claim).find(claims)]
  4171. if len(matches) == 1:
  4172. match = matches[0]
  4173. if isinstance(match, list):
  4174. return self.value in match
  4175. elif isinstance(match, str):
  4176. return self.value == match
  4177. else:
  4178. # unsupported type - don't raise, but this should be notified
  4179. return False
  4180. else:
  4181. # TODO we should differentiate no match and 2+ matches
  4182. return False
  4183. def _match_dict(self, claims):
  4184. def _compare(value, claim):
  4185. if isinstance(value, list):
  4186. if isinstance(claim, list):
  4187. # if the claim is empty, the value must be empty too:
  4188. if claim == []:
  4189. return value == []
  4190. else:
  4191. return (set(claim) <= set(value))
  4192. else:
  4193. return claim in value
  4194. elif isinstance(value, dict):
  4195. if not isinstance(claim, dict):
  4196. return False
  4197. elif value == {}:
  4198. return claim == {}
  4199. else:
  4200. return all(_compare(value[x], claim.get(x, {}))
  4201. for x in value.keys())
  4202. else:
  4203. return value == claim
  4204. return _compare(self.value, claims.get(self.claim, {}))
  4205. def __call__(self, claims):
  4206. if isinstance(self.value, dict):
  4207. return self._match_dict(claims)
  4208. else:
  4209. return self._match_jsonpath(claims)
  4210. def __eq__(self, other):
  4211. if not isinstance(other, ClaimRule):
  4212. return False
  4213. return (self.claim == other.claim and self.value == other.value)
  4214. def __repr__(self):
  4215. return '<ClaimRule "%s":"%s">' % (self.claim, self.value)
  4216. def __hash__(self):
  4217. return hash(repr(self))
  4218. class OrRule(AuthZRule):
  4219. def __init__(self, subrules):
  4220. super(OrRule, self).__init__()
  4221. self.rules = set(subrules)
  4222. def __call__(self, claims):
  4223. return any(rule(claims) for rule in self.rules)
  4224. def __eq__(self, other):
  4225. if not isinstance(other, OrRule):
  4226. return False
  4227. return self.rules == other.rules
  4228. def __repr__(self):
  4229. return '<OrRule %s>' % (' || '.join(repr(r) for r in self.rules))
  4230. def __hash__(self):
  4231. return hash(repr(self))
  4232. class AndRule(AuthZRule):
  4233. def __init__(self, subrules):
  4234. super(AndRule, self).__init__()
  4235. self.rules = set(subrules)
  4236. def __call__(self, claims):
  4237. return all(rule(claims) for rule in self.rules)
  4238. def __eq__(self, other):
  4239. if not isinstance(other, AndRule):
  4240. return False
  4241. return self.rules == other.rules
  4242. def __repr__(self):
  4243. return '<AndRule %s>' % (' && '.join(repr(r) for r in self.rules))
  4244. def __hash__(self):
  4245. return hash(repr(self))
  4246. class AuthZRuleTree(object):
  4247. def __init__(self, name):
  4248. self.name = name
  4249. # initialize actions as unauthorized
  4250. self.ruletree = None
  4251. def __call__(self, claims):
  4252. return self.ruletree(claims)
  4253. def __eq__(self, other):
  4254. if not isinstance(other, AuthZRuleTree):
  4255. return False
  4256. return (self.name == other.name and
  4257. self.ruletree == other.ruletree)
  4258. def __repr__(self):
  4259. return '<AuthZRuleTree [ %s ]>' % self.ruletree