Browse Source

Merge "executor: add support for generic build resource"

tags/3.4.0
Zuul 6 months ago
parent
commit
4389e9faa1

+ 46
- 0
doc/source/user/jobs.rst View File

@@ -436,6 +436,52 @@ of item.
436 436
          The patchset identifier for the change.  If a change is
437 437
          revised, this will have a different value.
438 438
 
439
+
440
+      .. var:: resources
441
+         :type: dict
442
+
443
+         A job using a container build resources has access to a resources variable
444
+         that describes the resource. Resources is a dictionary of group keys,
445
+         each value consists of:
446
+
447
+        .. var:: namespace
448
+
449
+            The resource's namespace name.
450
+
451
+        .. var:: context
452
+
453
+            The kube config context name.
454
+
455
+        .. var:: pod
456
+
457
+            The name of the pod when the label defines a kubectl connection.
458
+
459
+        Project or namespace resources might be used in a template as:
460
+
461
+        .. code-block:: yaml
462
+
463
+            - hosts: localhost
464
+                tasks:
465
+                - name: Create a k8s resource
466
+                    k8s_raw:
467
+                    state: present
468
+                    context: "{{ zuul.resources['node-name'].context }}"
469
+                    namespace: "{{ zuul.resources['node-name'].namespace }}"
470
+
471
+        Kubectl resources might be used in a template as:
472
+
473
+        .. code-block:: yaml
474
+
475
+            - hosts: localhost
476
+                tasks:
477
+                - name: Copy src repos to the pod
478
+                    command: >
479
+                    oc rsync -q --progress=false
480
+                        {{ zuul.executor.src_root }}/
481
+                        {{ zuul.resources['node-name'].pod }}:src/
482
+                    no_log: true
483
+
484
+
439 485
 .. var:: zuul_success
440 486
 
441 487
    Post run playbook(s) will be passed this variable to indicate if the run

+ 14
- 0
tests/base.py View File

@@ -1860,6 +1860,20 @@ class FakeNodepool(object):
1860 1860
             data['connection_type'] = 'winrm'
1861 1861
         if 'network' in node_type:
1862 1862
             data['connection_type'] = 'network_cli'
1863
+        if 'kubernetes-namespace' in node_type or 'fedora-pod' in node_type:
1864
+            data['connection_type'] = 'namespace'
1865
+            data['connection_port'] = {
1866
+                'name': 'zuul-ci',
1867
+                'namespace': 'zuul-ci-abcdefg',
1868
+                'host': 'localhost',
1869
+                'skiptls': True,
1870
+                'token': 'FakeToken',
1871
+                'ca_crt': 'FakeCA',
1872
+                'user': 'zuul-worker',
1873
+            }
1874
+            if 'fedora-pod' in node_type:
1875
+                data['connection_type'] = 'kubectl'
1876
+                data['connection_port']['pod'] = 'fedora-abcdefg'
1863 1877
 
1864 1878
         data = json.dumps(data).encode('utf8')
1865 1879
         path = self.client.create(path, data,

+ 8
- 0
tests/fixtures/config/container-build-resources/git/common-config/playbooks/container-machine.yaml View File

@@ -0,0 +1,8 @@
1
+- hosts: localhost
2
+  tasks:
3
+    # We except a namespace, a context and a pod name
4
+    - assert:
5
+        that:
6
+          - zuul.resources.container.namespace == 'zuul-ci-abcdefg'
7
+          - zuul.resources.container.context == 'zuul-ci-abcdefg:zuul-worker/'
8
+          - zuul.resources.container.pod == 'fedora-abcdefg'

+ 7
- 0
tests/fixtures/config/container-build-resources/git/common-config/playbooks/container-native.yaml View File

@@ -0,0 +1,7 @@
1
+- hosts: localhost
2
+  tasks:
3
+    # We except a namespace and a context
4
+    - assert:
5
+        that:
6
+          - zuul.resources.cluster1.namespace == 'zuul-ci-abcdefg'
7
+          - zuul.resources.cluster1.context == 'zuul-ci-abcdefg:zuul-worker/'

+ 39
- 0
tests/fixtures/config/container-build-resources/git/common-config/zuul.yaml View File

@@ -0,0 +1,39 @@
1
+- pipeline:
2
+    name: check
3
+    manager: independent
4
+    trigger:
5
+      gerrit:
6
+        - event: patchset-created
7
+    success:
8
+      gerrit:
9
+        Verified: 1
10
+    failure:
11
+      gerrit:
12
+        Verified: -1
13
+
14
+- job:
15
+    name: base
16
+    parent: null
17
+
18
+- job:
19
+    name: container-machine
20
+    nodeset:
21
+      nodes:
22
+        - name: container
23
+          label: fedora-pod
24
+    run: playbooks/container-machine.yaml
25
+
26
+- job:
27
+    name: container-native
28
+    nodeset:
29
+      nodes:
30
+        - name: cluster1
31
+          label: kubernetes-namespace
32
+    run: playbooks/container-native.yaml
33
+
34
+- project:
35
+    name: org/project
36
+    check:
37
+      jobs:
38
+        - container-machine
39
+        - container-native

+ 1
- 0
tests/fixtures/config/container-build-resources/git/org_project/README View File

@@ -0,0 +1 @@
1
+test

+ 8
- 0
tests/fixtures/config/container-build-resources/main.yaml View File

@@ -0,0 +1,8 @@
1
+- tenant:
2
+    name: tenant-one
3
+    source:
4
+      gerrit:
5
+        config-projects:
6
+          - common-config
7
+        untrusted-projects:
8
+          - org/project

+ 14
- 0
tests/unit/test_v3.py View File

@@ -4452,3 +4452,17 @@ class TestJobPause(AnsibleZuulTestCase):
4452 4452
         ])
4453 4453
 
4454 4454
         self.assertIn('test : SKIPPED', A.messages[0])
4455
+
4456
+
4457
+class TestContainerJobs(AnsibleZuulTestCase):
4458
+    tenant_config_file = "config/container-build-resources/main.yaml"
4459
+
4460
+    def test_container_jobs(self):
4461
+        A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
4462
+        self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
4463
+        self.waitUntilSettled()
4464
+
4465
+        self.assertHistory([
4466
+            dict(name='container-machine', result='SUCCESS', changes='1,1'),
4467
+            dict(name='container-native', result='SUCCESS', changes='1,1'),
4468
+        ])

+ 3
- 0
zuul/ansible/callback/zuul_stream.py View File

@@ -230,6 +230,9 @@ class CallbackModule(default.CallbackModule):
230 230
                 if task.loop:
231 231
                     # Don't try to stream from loops
232 232
                     continue
233
+                if play_vars[host].get('ansible_connection') in ('kubectl', ):
234
+                    # Don't try to stream from kubectl connection
235
+                    continue
233 236
 
234 237
                 log_id = "%s-%s" % (
235 238
                     task._uuid, paths._sanitize_filename(inventory_hostname))

+ 86
- 1
zuul/executor/server.py View File

@@ -27,6 +27,7 @@ import threading
27 27
 import time
28 28
 import traceback
29 29
 import git
30
+from urllib.parse import urlsplit
30 31
 
31 32
 from zuul.lib.yamlutil import yaml
32 33
 from zuul.lib.config import get_default
@@ -52,7 +53,8 @@ BUFFER_LINES_FOR_SYNTAX = 200
52 53
 COMMANDS = ['stop', 'pause', 'unpause', 'graceful', 'verbose',
53 54
             'unverbose', 'keep', 'nokeep']
54 55
 DEFAULT_FINGER_PORT = 7900
55
-BLACKLISTED_ANSIBLE_CONNECTION_TYPES = ['network_cli']
56
+BLACKLISTED_ANSIBLE_CONNECTION_TYPES = [
57
+    'network_cli', 'kubectl', 'project', 'namespace']
56 58
 
57 59
 
58 60
 class StopException(Exception):
@@ -1544,6 +1546,60 @@ class AnsibleJob(object):
1544 1546
         self.log.debug("Adding role path %s", role_path)
1545 1547
         jobdir_playbook.roles_path.append(role_path)
1546 1548
 
1549
+    def prepareKubeConfig(self, data):
1550
+        kube_cfg_path = os.path.join(self.jobdir.work_root, ".kube", "config")
1551
+        if os.path.exists(kube_cfg_path):
1552
+            kube_cfg = yaml.safe_load(open(kube_cfg_path))
1553
+        else:
1554
+            os.makedirs(os.path.dirname(kube_cfg_path), exist_ok=True)
1555
+            kube_cfg = {
1556
+                'apiVersion': 'v1',
1557
+                'kind': 'Config',
1558
+                'preferences': {},
1559
+                'users': [],
1560
+                'clusters': [],
1561
+                'contexts': [],
1562
+                'current-context': None,
1563
+            }
1564
+        # Add cluster
1565
+        cluster_name = urlsplit(data['host']).netloc.replace('.', '-')
1566
+        cluster = {
1567
+            'server': data['host'],
1568
+        }
1569
+        if data.get('ca_crt'):
1570
+            cluster['certificate-authority-data'] = data['ca_crt']
1571
+        if data['skiptls']:
1572
+            cluster['insecure-skip-tls-verify'] = True
1573
+        kube_cfg['clusters'].append({
1574
+            'name': cluster_name,
1575
+            'cluster': cluster,
1576
+        })
1577
+
1578
+        # Add user
1579
+        user_name = "%s:%s" % (data['namespace'], data['user'])
1580
+        kube_cfg['users'].append({
1581
+            'name': user_name,
1582
+            'user': {
1583
+                'token': data['token'],
1584
+            },
1585
+        })
1586
+
1587
+        # Add context
1588
+        data['context_name'] = "%s/%s" % (user_name, cluster_name)
1589
+        kube_cfg['contexts'].append({
1590
+            'name': data['context_name'],
1591
+            'context': {
1592
+                'user': user_name,
1593
+                'cluster': cluster_name,
1594
+                'namespace': data['namespace']
1595
+            }
1596
+        })
1597
+        if not kube_cfg['current-context']:
1598
+            kube_cfg['current-context'] = data['context_name']
1599
+
1600
+        with open(kube_cfg_path, "w") as of:
1601
+            of.write(yaml.safe_dump(kube_cfg, default_flow_style=False))
1602
+
1547 1603
     def prepareAnsibleFiles(self, args):
1548 1604
         all_vars = args['vars'].copy()
1549 1605
         check_varnames(all_vars)
@@ -1558,6 +1614,35 @@ class AnsibleJob(object):
1558 1614
             result_data_file=self.jobdir.result_data_file,
1559 1615
             inventory_file=self.jobdir.inventory)
1560 1616
 
1617
+        resources_nodes = []
1618
+        all_vars['zuul']['resources'] = {}
1619
+        for node in args['nodes']:
1620
+            if node.get('connection_type') in (
1621
+                    'namespace', 'project', 'kubectl'):
1622
+                # TODO: decrypt resource data using scheduler key
1623
+                data = node['connection_port']
1624
+                # Setup kube/config file
1625
+                self.prepareKubeConfig(data)
1626
+                # Convert connection_port in kubectl connection parameters
1627
+                node['connection_port'] = None
1628
+                node['kubectl_namespace'] = data['namespace']
1629
+                node['kubectl_context'] = data['context_name']
1630
+                # Add node information to zuul_resources
1631
+                all_vars['zuul']['resources'][node['name'][0]] = {
1632
+                    'namespace': data['namespace'],
1633
+                    'context': data['context_name'],
1634
+                }
1635
+                if node['connection_type'] in ('project', 'namespace'):
1636
+                    # Project are special nodes that are not the inventory
1637
+                    resources_nodes.append(node)
1638
+                else:
1639
+                    # Add the real pod name to the resources_var
1640
+                    all_vars['zuul']['resources'][
1641
+                        node['name'][0]]['pod'] = data['pod']
1642
+        # Remove resource node from nodes list
1643
+        for node in resources_nodes:
1644
+            args['nodes'].remove(node)
1645
+
1561 1646
         nodes = self.getHostList(args)
1562 1647
         setup_inventory = make_setup_inventory_dict(nodes)
1563 1648
         inventory = make_inventory_dict(nodes, args, all_vars)

Loading…
Cancel
Save