diff --git a/doc/source/user/jobs.rst b/doc/source/user/jobs.rst index 4dab6b93b7..fb549755a7 100644 --- a/doc/source/user/jobs.rst +++ b/doc/source/user/jobs.rst @@ -436,6 +436,52 @@ of item. The patchset identifier for the change. If a change is revised, this will have a different value. + + .. var:: resources + :type: dict + + A job using a container build resources has access to a resources variable + that describes the resource. Resources is a dictionary of group keys, + each value consists of: + + .. var:: namespace + + The resource's namespace name. + + .. var:: context + + The kube config context name. + + .. var:: pod + + The name of the pod when the label defines a kubectl connection. + + Project or namespace resources might be used in a template as: + + .. code-block:: yaml + + - hosts: localhost + tasks: + - name: Create a k8s resource + k8s_raw: + state: present + context: "{{ zuul.resources['node-name'].context }}" + namespace: "{{ zuul.resources['node-name'].namespace }}" + + Kubectl resources might be used in a template as: + + .. code-block:: yaml + + - hosts: localhost + tasks: + - name: Copy src repos to the pod + command: > + oc rsync -q --progress=false + {{ zuul.executor.src_root }}/ + {{ zuul.resources['node-name'].pod }}:src/ + no_log: true + + .. var:: zuul_success Post run playbook(s) will be passed this variable to indicate if the run diff --git a/tests/base.py b/tests/base.py index 2c6e675186..06b25c6b62 100644 --- a/tests/base.py +++ b/tests/base.py @@ -1841,6 +1841,20 @@ class FakeNodepool(object): data['connection_type'] = 'winrm' if 'network' in node_type: data['connection_type'] = 'network_cli' + if 'kubernetes-namespace' in node_type or 'fedora-pod' in node_type: + data['connection_type'] = 'namespace' + data['connection_port'] = { + 'name': 'zuul-ci', + 'namespace': 'zuul-ci-abcdefg', + 'host': 'localhost', + 'skiptls': True, + 'token': 'FakeToken', + 'ca_crt': 'FakeCA', + 'user': 'zuul-worker', + } + if 'fedora-pod' in node_type: + data['connection_type'] = 'kubectl' + data['connection_port']['pod'] = 'fedora-abcdefg' data = json.dumps(data).encode('utf8') path = self.client.create(path, data, diff --git a/tests/fixtures/config/container-build-resources/git/common-config/playbooks/container-machine.yaml b/tests/fixtures/config/container-build-resources/git/common-config/playbooks/container-machine.yaml new file mode 100644 index 0000000000..0cb21d8584 --- /dev/null +++ b/tests/fixtures/config/container-build-resources/git/common-config/playbooks/container-machine.yaml @@ -0,0 +1,8 @@ +- hosts: localhost + tasks: + # We except a namespace, a context and a pod name + - assert: + that: + - zuul.resources.container.namespace == 'zuul-ci-abcdefg' + - zuul.resources.container.context == 'zuul-ci-abcdefg:zuul-worker/' + - zuul.resources.container.pod == 'fedora-abcdefg' diff --git a/tests/fixtures/config/container-build-resources/git/common-config/playbooks/container-native.yaml b/tests/fixtures/config/container-build-resources/git/common-config/playbooks/container-native.yaml new file mode 100644 index 0000000000..121f22e55e --- /dev/null +++ b/tests/fixtures/config/container-build-resources/git/common-config/playbooks/container-native.yaml @@ -0,0 +1,7 @@ +- hosts: localhost + tasks: + # We except a namespace and a context + - assert: + that: + - zuul.resources.cluster1.namespace == 'zuul-ci-abcdefg' + - zuul.resources.cluster1.context == 'zuul-ci-abcdefg:zuul-worker/' diff --git a/tests/fixtures/config/container-build-resources/git/common-config/zuul.yaml b/tests/fixtures/config/container-build-resources/git/common-config/zuul.yaml new file mode 100644 index 0000000000..aad596a0b1 --- /dev/null +++ b/tests/fixtures/config/container-build-resources/git/common-config/zuul.yaml @@ -0,0 +1,39 @@ +- pipeline: + name: check + manager: independent + trigger: + gerrit: + - event: patchset-created + success: + gerrit: + Verified: 1 + failure: + gerrit: + Verified: -1 + +- job: + name: base + parent: null + +- job: + name: container-machine + nodeset: + nodes: + - name: container + label: fedora-pod + run: playbooks/container-machine.yaml + +- job: + name: container-native + nodeset: + nodes: + - name: cluster1 + label: kubernetes-namespace + run: playbooks/container-native.yaml + +- project: + name: org/project + check: + jobs: + - container-machine + - container-native diff --git a/tests/fixtures/config/container-build-resources/git/org_project/README b/tests/fixtures/config/container-build-resources/git/org_project/README new file mode 100644 index 0000000000..9daeafb986 --- /dev/null +++ b/tests/fixtures/config/container-build-resources/git/org_project/README @@ -0,0 +1 @@ +test diff --git a/tests/fixtures/config/container-build-resources/main.yaml b/tests/fixtures/config/container-build-resources/main.yaml new file mode 100644 index 0000000000..208e274b13 --- /dev/null +++ b/tests/fixtures/config/container-build-resources/main.yaml @@ -0,0 +1,8 @@ +- tenant: + name: tenant-one + source: + gerrit: + config-projects: + - common-config + untrusted-projects: + - org/project diff --git a/tests/unit/test_v3.py b/tests/unit/test_v3.py index e0553d3a75..11064b81fd 100644 --- a/tests/unit/test_v3.py +++ b/tests/unit/test_v3.py @@ -4452,3 +4452,17 @@ class TestJobPause(AnsibleZuulTestCase): ]) self.assertIn('test : SKIPPED', A.messages[0]) + + +class TestContainerJobs(AnsibleZuulTestCase): + tenant_config_file = "config/container-build-resources/main.yaml" + + def test_container_jobs(self): + A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A') + self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1)) + self.waitUntilSettled() + + self.assertHistory([ + dict(name='container-machine', result='SUCCESS', changes='1,1'), + dict(name='container-native', result='SUCCESS', changes='1,1'), + ]) diff --git a/zuul/ansible/callback/zuul_stream.py b/zuul/ansible/callback/zuul_stream.py index 3a6cae716d..4245e60fb2 100644 --- a/zuul/ansible/callback/zuul_stream.py +++ b/zuul/ansible/callback/zuul_stream.py @@ -230,6 +230,9 @@ class CallbackModule(default.CallbackModule): if task.loop: # Don't try to stream from loops continue + if play_vars[host].get('ansible_connection') in ('kubectl', ): + # Don't try to stream from kubectl connection + continue log_id = "%s-%s" % ( task._uuid, paths._sanitize_filename(inventory_hostname)) diff --git a/zuul/executor/server.py b/zuul/executor/server.py index 2f42c0fc88..d7681e004b 100644 --- a/zuul/executor/server.py +++ b/zuul/executor/server.py @@ -27,6 +27,7 @@ import threading import time import traceback import git +from urllib.parse import urlsplit from zuul.lib.yamlutil import yaml from zuul.lib.config import get_default @@ -52,7 +53,8 @@ BUFFER_LINES_FOR_SYNTAX = 200 COMMANDS = ['stop', 'pause', 'unpause', 'graceful', 'verbose', 'unverbose', 'keep', 'nokeep'] DEFAULT_FINGER_PORT = 7900 -BLACKLISTED_ANSIBLE_CONNECTION_TYPES = ['network_cli'] +BLACKLISTED_ANSIBLE_CONNECTION_TYPES = [ + 'network_cli', 'kubectl', 'project', 'namespace'] class StopException(Exception): @@ -1543,6 +1545,60 @@ class AnsibleJob(object): self.log.debug("Adding role path %s", role_path) jobdir_playbook.roles_path.append(role_path) + def prepareKubeConfig(self, data): + kube_cfg_path = os.path.join(self.jobdir.work_root, ".kube", "config") + if os.path.exists(kube_cfg_path): + kube_cfg = yaml.safe_load(open(kube_cfg_path)) + else: + os.makedirs(os.path.dirname(kube_cfg_path), exist_ok=True) + kube_cfg = { + 'apiVersion': 'v1', + 'kind': 'Config', + 'preferences': {}, + 'users': [], + 'clusters': [], + 'contexts': [], + 'current-context': None, + } + # Add cluster + cluster_name = urlsplit(data['host']).netloc.replace('.', '-') + cluster = { + 'server': data['host'], + } + if data.get('ca_crt'): + cluster['certificate-authority-data'] = data['ca_crt'] + if data['skiptls']: + cluster['insecure-skip-tls-verify'] = True + kube_cfg['clusters'].append({ + 'name': cluster_name, + 'cluster': cluster, + }) + + # Add user + user_name = "%s:%s" % (data['namespace'], data['user']) + kube_cfg['users'].append({ + 'name': user_name, + 'user': { + 'token': data['token'], + }, + }) + + # Add context + data['context_name'] = "%s/%s" % (user_name, cluster_name) + kube_cfg['contexts'].append({ + 'name': data['context_name'], + 'context': { + 'user': user_name, + 'cluster': cluster_name, + 'namespace': data['namespace'] + } + }) + if not kube_cfg['current-context']: + kube_cfg['current-context'] = data['context_name'] + + with open(kube_cfg_path, "w") as of: + of.write(yaml.safe_dump(kube_cfg, default_flow_style=False)) + def prepareAnsibleFiles(self, args): all_vars = args['vars'].copy() check_varnames(all_vars) @@ -1557,6 +1613,35 @@ class AnsibleJob(object): result_data_file=self.jobdir.result_data_file, inventory_file=self.jobdir.inventory) + resources_nodes = [] + all_vars['zuul']['resources'] = {} + for node in args['nodes']: + if node.get('connection_type') in ( + 'namespace', 'project', 'kubectl'): + # TODO: decrypt resource data using scheduler key + data = node['connection_port'] + # Setup kube/config file + self.prepareKubeConfig(data) + # Convert connection_port in kubectl connection parameters + node['connection_port'] = None + node['kubectl_namespace'] = data['namespace'] + node['kubectl_context'] = data['context_name'] + # Add node information to zuul_resources + all_vars['zuul']['resources'][node['name'][0]] = { + 'namespace': data['namespace'], + 'context': data['context_name'], + } + if node['connection_type'] in ('project', 'namespace'): + # Project are special nodes that are not the inventory + resources_nodes.append(node) + else: + # Add the real pod name to the resources_var + all_vars['zuul']['resources'][ + node['name'][0]]['pod'] = data['pod'] + # Remove resource node from nodes list + for node in resources_nodes: + args['nodes'].remove(node) + nodes = self.getHostList(args) setup_inventory = make_setup_inventory_dict(nodes) inventory = make_inventory_dict(nodes, args, all_vars)