Merge "executor: add support for generic build resource"

This commit is contained in:
Zuul 2018-12-20 00:44:00 +00:00 committed by Gerrit Code Review
commit 4389e9faa1
10 changed files with 226 additions and 1 deletions

View File

@ -436,6 +436,52 @@ of item.
The patchset identifier for the change. If a change is
revised, this will have a different value.
.. var:: resources
:type: dict
A job using a container build resources has access to a resources variable
that describes the resource. Resources is a dictionary of group keys,
each value consists of:
.. var:: namespace
The resource's namespace name.
.. var:: context
The kube config context name.
.. var:: pod
The name of the pod when the label defines a kubectl connection.
Project or namespace resources might be used in a template as:
.. code-block:: yaml
- hosts: localhost
tasks:
- name: Create a k8s resource
k8s_raw:
state: present
context: "{{ zuul.resources['node-name'].context }}"
namespace: "{{ zuul.resources['node-name'].namespace }}"
Kubectl resources might be used in a template as:
.. code-block:: yaml
- hosts: localhost
tasks:
- name: Copy src repos to the pod
command: >
oc rsync -q --progress=false
{{ zuul.executor.src_root }}/
{{ zuul.resources['node-name'].pod }}:src/
no_log: true
.. var:: zuul_success
Post run playbook(s) will be passed this variable to indicate if the run

View File

@ -1860,6 +1860,20 @@ class FakeNodepool(object):
data['connection_type'] = 'winrm'
if 'network' in node_type:
data['connection_type'] = 'network_cli'
if 'kubernetes-namespace' in node_type or 'fedora-pod' in node_type:
data['connection_type'] = 'namespace'
data['connection_port'] = {
'name': 'zuul-ci',
'namespace': 'zuul-ci-abcdefg',
'host': 'localhost',
'skiptls': True,
'token': 'FakeToken',
'ca_crt': 'FakeCA',
'user': 'zuul-worker',
}
if 'fedora-pod' in node_type:
data['connection_type'] = 'kubectl'
data['connection_port']['pod'] = 'fedora-abcdefg'
data = json.dumps(data).encode('utf8')
path = self.client.create(path, data,

View File

@ -0,0 +1,8 @@
- hosts: localhost
tasks:
# We except a namespace, a context and a pod name
- assert:
that:
- zuul.resources.container.namespace == 'zuul-ci-abcdefg'
- zuul.resources.container.context == 'zuul-ci-abcdefg:zuul-worker/'
- zuul.resources.container.pod == 'fedora-abcdefg'

View File

@ -0,0 +1,7 @@
- hosts: localhost
tasks:
# We except a namespace and a context
- assert:
that:
- zuul.resources.cluster1.namespace == 'zuul-ci-abcdefg'
- zuul.resources.cluster1.context == 'zuul-ci-abcdefg:zuul-worker/'

View File

@ -0,0 +1,39 @@
- pipeline:
name: check
manager: independent
trigger:
gerrit:
- event: patchset-created
success:
gerrit:
Verified: 1
failure:
gerrit:
Verified: -1
- job:
name: base
parent: null
- job:
name: container-machine
nodeset:
nodes:
- name: container
label: fedora-pod
run: playbooks/container-machine.yaml
- job:
name: container-native
nodeset:
nodes:
- name: cluster1
label: kubernetes-namespace
run: playbooks/container-native.yaml
- project:
name: org/project
check:
jobs:
- container-machine
- container-native

View File

@ -0,0 +1 @@
test

View File

@ -0,0 +1,8 @@
- tenant:
name: tenant-one
source:
gerrit:
config-projects:
- common-config
untrusted-projects:
- org/project

View File

@ -4452,3 +4452,17 @@ class TestJobPause(AnsibleZuulTestCase):
])
self.assertIn('test : SKIPPED', A.messages[0])
class TestContainerJobs(AnsibleZuulTestCase):
tenant_config_file = "config/container-build-resources/main.yaml"
def test_container_jobs(self):
A = self.fake_gerrit.addFakeChange('org/project', 'master', 'A')
self.fake_gerrit.addEvent(A.getPatchsetCreatedEvent(1))
self.waitUntilSettled()
self.assertHistory([
dict(name='container-machine', result='SUCCESS', changes='1,1'),
dict(name='container-native', result='SUCCESS', changes='1,1'),
])

View File

@ -230,6 +230,9 @@ class CallbackModule(default.CallbackModule):
if task.loop:
# Don't try to stream from loops
continue
if play_vars[host].get('ansible_connection') in ('kubectl', ):
# Don't try to stream from kubectl connection
continue
log_id = "%s-%s" % (
task._uuid, paths._sanitize_filename(inventory_hostname))

View File

@ -27,6 +27,7 @@ import threading
import time
import traceback
import git
from urllib.parse import urlsplit
from zuul.lib.yamlutil import yaml
from zuul.lib.config import get_default
@ -52,7 +53,8 @@ BUFFER_LINES_FOR_SYNTAX = 200
COMMANDS = ['stop', 'pause', 'unpause', 'graceful', 'verbose',
'unverbose', 'keep', 'nokeep']
DEFAULT_FINGER_PORT = 7900
BLACKLISTED_ANSIBLE_CONNECTION_TYPES = ['network_cli']
BLACKLISTED_ANSIBLE_CONNECTION_TYPES = [
'network_cli', 'kubectl', 'project', 'namespace']
class StopException(Exception):
@ -1544,6 +1546,60 @@ class AnsibleJob(object):
self.log.debug("Adding role path %s", role_path)
jobdir_playbook.roles_path.append(role_path)
def prepareKubeConfig(self, data):
kube_cfg_path = os.path.join(self.jobdir.work_root, ".kube", "config")
if os.path.exists(kube_cfg_path):
kube_cfg = yaml.safe_load(open(kube_cfg_path))
else:
os.makedirs(os.path.dirname(kube_cfg_path), exist_ok=True)
kube_cfg = {
'apiVersion': 'v1',
'kind': 'Config',
'preferences': {},
'users': [],
'clusters': [],
'contexts': [],
'current-context': None,
}
# Add cluster
cluster_name = urlsplit(data['host']).netloc.replace('.', '-')
cluster = {
'server': data['host'],
}
if data.get('ca_crt'):
cluster['certificate-authority-data'] = data['ca_crt']
if data['skiptls']:
cluster['insecure-skip-tls-verify'] = True
kube_cfg['clusters'].append({
'name': cluster_name,
'cluster': cluster,
})
# Add user
user_name = "%s:%s" % (data['namespace'], data['user'])
kube_cfg['users'].append({
'name': user_name,
'user': {
'token': data['token'],
},
})
# Add context
data['context_name'] = "%s/%s" % (user_name, cluster_name)
kube_cfg['contexts'].append({
'name': data['context_name'],
'context': {
'user': user_name,
'cluster': cluster_name,
'namespace': data['namespace']
}
})
if not kube_cfg['current-context']:
kube_cfg['current-context'] = data['context_name']
with open(kube_cfg_path, "w") as of:
of.write(yaml.safe_dump(kube_cfg, default_flow_style=False))
def prepareAnsibleFiles(self, args):
all_vars = args['vars'].copy()
check_varnames(all_vars)
@ -1558,6 +1614,35 @@ class AnsibleJob(object):
result_data_file=self.jobdir.result_data_file,
inventory_file=self.jobdir.inventory)
resources_nodes = []
all_vars['zuul']['resources'] = {}
for node in args['nodes']:
if node.get('connection_type') in (
'namespace', 'project', 'kubectl'):
# TODO: decrypt resource data using scheduler key
data = node['connection_port']
# Setup kube/config file
self.prepareKubeConfig(data)
# Convert connection_port in kubectl connection parameters
node['connection_port'] = None
node['kubectl_namespace'] = data['namespace']
node['kubectl_context'] = data['context_name']
# Add node information to zuul_resources
all_vars['zuul']['resources'][node['name'][0]] = {
'namespace': data['namespace'],
'context': data['context_name'],
}
if node['connection_type'] in ('project', 'namespace'):
# Project are special nodes that are not the inventory
resources_nodes.append(node)
else:
# Add the real pod name to the resources_var
all_vars['zuul']['resources'][
node['name'][0]]['pod'] = data['pod']
# Remove resource node from nodes list
for node in resources_nodes:
args['nodes'].remove(node)
nodes = self.getHostList(args)
setup_inventory = make_setup_inventory_dict(nodes)
inventory = make_inventory_dict(nodes, args, all_vars)