Browse Source

executor: add support for generic build resource

This change adds executor support for generic build resources.
Using the connection_port, a kube config file is generated to enable ansible
tasks to use the resource. Context and pod name is also exported in a
zuul.resources variable to enable job direct access to the build resource.

Currently this change supports 'kubectl' resource for containers that behave
like a machine, and 'project'/'namespace' resource for native containers.

Change-Id: Icdb9e800177dc770c58f65b02456a6b904be0666
changes/68/570668/14
Tristan Cacqueray 3 years ago
parent
commit
3795280382
10 changed files with 226 additions and 1 deletions
  1. +46
    -0
      doc/source/user/jobs.rst
  2. +14
    -0
      tests/base.py
  3. +8
    -0
      tests/fixtures/config/container-build-resources/git/common-config/playbooks/container-machine.yaml
  4. +7
    -0
      tests/fixtures/config/container-build-resources/git/common-config/playbooks/container-native.yaml
  5. +39
    -0
      tests/fixtures/config/container-build-resources/git/common-config/zuul.yaml
  6. +1
    -0
      tests/fixtures/config/container-build-resources/git/org_project/README
  7. +8
    -0
      tests/fixtures/config/container-build-resources/main.yaml
  8. +14
    -0
      tests/unit/test_v3.py
  9. +3
    -0
      zuul/ansible/callback/zuul_stream.py
  10. +86
    -1
      zuul/executor/server.py

+ 46
- 0
doc/source/user/jobs.rst View File

@ -436,6 +436,52 @@ of item.
The patchset identifier for the change. If a change is
revised, this will have a different value.
.. var:: resources
:type: dict
A job using a container build resources has access to a resources variable
that describes the resource. Resources is a dictionary of group keys,
each value consists of:
.. var:: namespace
The resource's namespace name.
.. var:: context
The kube config context name.
.. var:: pod
The name of the pod when the label defines a kubectl connection.
Project or namespace resources might be used in a template as:
.. code-block:: yaml
- hosts: localhost
tasks:
- name: Create a k8s resource
k8s_raw:
state: present
context: "{{ zuul.resources['node-name'].context }}"
namespace: "{{ zuul.resources['node-name'].namespace }}"
Kubectl resources might be used in a template as:
.. code-block:: yaml
- hosts: localhost
tasks:
- name: Copy src repos to the pod
command: >
oc rsync -q --progress=false
{{ zuul.executor.src_root }}/
{{ zuul.resources['node-name'].pod }}:src/
no_log: true
.. var:: zuul_success
Post run playbook(s) will be passed this variable to indicate if the run


+ 14
- 0
tests/base.py View File

@ -1841,6 +1841,20 @@ class FakeNodepool(object):
data['connection_type'] = 'winrm'
if 'network' in node_type:
data['connection_type'] = 'network_cli'
if 'kubernetes-namespace' in node_type or 'fedora-pod' in node_type:
data['connection_type'] = 'namespace'
data['connection_port'] = {
'name': 'zuul-ci',
'namespace': 'zuul-ci-abcdefg',
'host': 'localhost',
'skiptls': True,
'token': 'FakeToken',
'ca_crt': 'FakeCA',
'user': 'zuul-worker',
}
if 'fedora-pod' in node_type:
data['connection_type'] = 'kubectl'
data['connection_port']['pod'] = 'fedora-abcdefg'
data = json.dumps(data).encode('utf8')
path = self.client.create(path, data,


+ 8
- 0
tests/fixtures/config/container-build-resources/git/common-config/playbooks/container-machine.yaml View File

@ -0,0 +1,8 @@
- hosts: localhost
tasks:
# We except a namespace, a context and a pod name
- assert:
that:
- zuul.resources.container.namespace == 'zuul-ci-abcdefg'
- zuul.resources.container.context == 'zuul-ci-abcdefg:zuul-worker/'
- zuul.resources.container.pod == 'fedora-abcdefg'

+ 7
- 0
tests/fixtures/config/container-build-resources/git/common-config/playbooks/container-native.yaml View File

@ -0,0 +1,7 @@
- hosts: localhost
tasks:
# We except a namespace and a context
- assert:
that:
- zuul.resources.cluster1.namespace == 'zuul-ci-abcdefg'
- zuul.resources.cluster1.context == 'zuul-ci-abcdefg:zuul-worker/'

+ 39
- 0
tests/fixtures/config/container-build-resources/git/common-config/zuul.yaml View File

@ -0,0 +1,39 @@
- pipeline:
name: check
manager: independent
trigger:
gerrit:
- event: patchset-created
success:
gerrit:
Verified: 1
failure:
gerrit:
Verified: -1
- job:
name: base
parent: null
- job:
name: container-machine
nodeset:
nodes:
- name: container
label: fedora-pod
run: playbooks/container-machine.yaml
- job:
name: container-native
nodeset:
nodes:
- name: cluster1
label: kubernetes-namespace
run: playbooks/container-native.yaml
- project:
name: org/project
check:
jobs:
- container-machine
- container-native

+ 1
- 0
tests/fixtures/config/container-build-resources/git/org_project/README View File

@ -0,0 +1 @@
test

+ 8
- 0
tests/fixtures/config/container-build-resources/main.yaml View File

@ -0,0 +1,8 @@
- tenant:
name: tenant-one
source:
gerrit:
config-projects:
- common-config
untrusted-projects:
- org/project

+ 14
- 0
tests/unit/test_v3.py View File

@ -4452,3 +4452,17 @@ class TestJobPause(AnsibleZuulTestCase):
])
self.assertIn('test : SKIPPED', A.messages[0])
class TestContainerJobs(AnsibleZuulTestCase):
tenant_config_file = "config/container-build-resources/main.yaml"
def test_container_jobs(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='container-machine', result='SUCCESS', changes='1,1'),
dict(name='container-native', result='SUCCESS', changes='1,1'),
])

+ 3
- 0
zuul/ansible/callback/zuul_stream.py View File

@ -230,6 +230,9 @@ class CallbackModule(default.CallbackModule):
if task.loop:
# Don't try to stream from loops
continue
if play_vars[host].get('ansible_connection') in ('kubectl', ):
# Don't try to stream from kubectl connection
continue
log_id = "%s-%s" % (
task._uuid, paths._sanitize_filename(inventory_hostname))


+ 86
- 1
zuul/executor/server.py View File

@ -27,6 +27,7 @@ import threading
import time
import traceback
import git
from urllib.parse import urlsplit
from zuul.lib.yamlutil import yaml
from zuul.lib.config import get_default
@ -52,7 +53,8 @@ BUFFER_LINES_FOR_SYNTAX = 200
COMMANDS = ['stop', 'pause', 'unpause', 'graceful', 'verbose',
'unverbose', 'keep', 'nokeep']
DEFAULT_FINGER_PORT = 7900
BLACKLISTED_ANSIBLE_CONNECTION_TYPES = ['network_cli']
BLACKLISTED_ANSIBLE_CONNECTION_TYPES = [
'network_cli', 'kubectl', 'project', 'namespace']
class StopException(Exception):
@ -1543,6 +1545,60 @@ class AnsibleJob(object):
self.log.debug("Adding role path %s", role_path)
jobdir_playbook.roles_path.append(role_path)
def prepareKubeConfig(self, data):
kube_cfg_path = os.path.join(self.jobdir.work_root, ".kube", "config")
if os.path.exists(kube_cfg_path):
kube_cfg = yaml.safe_load(open(kube_cfg_path))
else:
os.makedirs(os.path.dirname(kube_cfg_path), exist_ok=True)
kube_cfg = {
'apiVersion': 'v1',
'kind': 'Config',
'preferences': {},
'users': [],
'clusters': [],
'contexts': [],
'current-context': None,
}
# Add cluster
cluster_name = urlsplit(data['host']).netloc.replace('.', '-')
cluster = {
'server': data['host'],
}
if data.get('ca_crt'):
cluster['certificate-authority-data'] = data['ca_crt']
if data['skiptls']:
cluster['insecure-skip-tls-verify'] = True
kube_cfg['clusters'].append({
'name': cluster_name,
'cluster': cluster,
})
# Add user
user_name = "%s:%s" % (data['namespace'], data['user'])
kube_cfg['users'].append({
'name': user_name,
'user': {
'token': data['token'],
},
})
# Add context
data['context_name'] = "%s/%s" % (user_name, cluster_name)
kube_cfg['contexts'].append({
'name': data['context_name'],
'context': {
'user': user_name,
'cluster': cluster_name,
'namespace': data['namespace']
}
})
if not kube_cfg['current-context']:
kube_cfg['current-context'] = data['context_name']
with open(kube_cfg_path, "w") as of:
of.write(yaml.safe_dump(kube_cfg, default_flow_style=False))
def prepareAnsibleFiles(self, args):
all_vars = args['vars'].copy()
check_varnames(all_vars)
@ -1557,6 +1613,35 @@ class AnsibleJob(object):
result_data_file=self.jobdir.result_data_file,
inventory_file=self.jobdir.inventory)
resources_nodes = []
all_vars['zuul']['resources'] = {}
for node in args['nodes']:
if node.get('connection_type') in (
'namespace', 'project', 'kubectl'):
# TODO: decrypt resource data using scheduler key
data = node['connection_port']
# Setup kube/config file
self.prepareKubeConfig(data)
# Convert connection_port in kubectl connection parameters
node['connection_port'] = None
node['kubectl_namespace'] = data['namespace']
node['kubectl_context'] = data['context_name']
# Add node information to zuul_resources
all_vars['zuul']['resources'][node['name'][0]] = {
'namespace': data['namespace'],
'context': data['context_name'],
}
if node['connection_type'] in ('project', 'namespace'):
# Project are special nodes that are not the inventory
resources_nodes.append(node)
else:
# Add the real pod name to the resources_var
all_vars['zuul']['resources'][
node['name'][0]]['pod'] = data['pod']
# Remove resource node from nodes list
for node in resources_nodes:
args['nodes'].remove(node)
nodes = self.getHostList(args)
setup_inventory = make_setup_inventory_dict(nodes)
inventory = make_inventory_dict(nodes, args, all_vars)


Loading…
Cancel
Save