Ansible launcher: register jobs from JJB

Use jenkins-job-builder to parse the yaml files, then when nodes
are added, register jobs with gearman accordingly.

Change-Id: I4947671a8e2c0ddbce50a47b8666b4c7ef72a974
This commit is contained in:
James E. Blair 2016-04-18 15:34:36 -07:00 committed by James E. Blair
parent 9b41a0b502
commit 119acf362f
2 changed files with 154 additions and 11 deletions

View File

@ -22,6 +22,7 @@ import extras
# instead it depends on lockfile-0.9.1 which uses pidfile.
pid_file_module = extras.try_imports(['daemon.pidlockfile', 'daemon.pidfile'])
import logging
import os
import sys
import signal
@ -47,6 +48,17 @@ class Launcher(zuul.cmd.ZuulApp):
help='show zuul version')
self.args = parser.parse_args()
def reconfigure_handler(self, signum, frame):
signal.signal(signal.SIGHUP, signal.SIG_IGN)
self.log.debug("Reconfiguration triggered")
self.read_config()
self.setup_logging('launcher', 'log_config')
try:
self.launcher.reconfigure(self.config)
except Exception:
self.log.exception("Reconfiguration failed:")
signal.signal(signal.SIGHUP, self.reconfigure_handler)
def exit_handler(self, signum, frame):
signal.signal(signal.SIGUSR1, signal.SIG_IGN)
self.launcher.stop()
@ -58,10 +70,13 @@ class Launcher(zuul.cmd.ZuulApp):
self.setup_logging('launcher', 'log_config')
self.log = logging.getLogger("zuul.Launcher")
LaunchServer = zuul.launcher.ansiblelaunchserver.LaunchServer
self.launcher = LaunchServer(self.config)
self.launcher.start()
signal.signal(signal.SIGHUP, self.reconfigure_handler)
signal.signal(signal.SIGUSR1, self.exit_handler)
signal.signal(signal.SIGUSR2, zuul.cmd.stack_dump_handler)
while True:

View File

@ -14,6 +14,7 @@
import json
import logging
import multiprocessing
import os
import shutil
import socket
@ -24,6 +25,7 @@ import traceback
import gear
import yaml
import jenkins_jobs.builder
class JobDir(object):
@ -50,6 +52,9 @@ class LaunchServer(object):
def __init__(self, config):
self.config = config
self.hostname = socket.gethostname()
self.node_workers = {}
self.mpmanager = multiprocessing.Manager()
self.jobs = self.mpmanager.dict()
def start(self):
self._running = True
@ -64,18 +69,41 @@ class LaunchServer(object):
self.worker.waitForServer()
self.log.debug("Registering")
self.register()
self.loadJobs()
self.log.debug("Starting worker")
self.thread = threading.Thread(target=self.run)
self.thread.daemon = True
self.thread.start()
def loadJobs(self):
self.log.debug("Loading jobs")
builder = JJB()
path = self.config.get('launcher', 'jenkins_jobs')
builder.load_files([path])
builder.parser.expandYaml()
unseen = set(self.jobs.keys())
for job in builder.parser.jobs:
self.jobs[job['name']] = job
unseen.discard(job['name'])
for name in unseen:
del self.jobs[name]
def register(self):
self.worker.registerFunction("node-assign:zuul")
def reconfigure(self, config):
self.log.debug("Reconfiguring")
self.config = config
self.loadJobs()
for node in self.node_workers.values():
node.queue.put(dict(action='reconfigure'))
def stop(self):
self.log.debug("Stopping")
self._running = False
self.worker.shutdown()
for node in self.node_workers.values():
node.queue.put(dict(action='stop'))
self.log.debug("Stopped")
def join(self):
@ -100,32 +128,126 @@ class LaunchServer(object):
self.log.exception("Exception while getting job")
def assignNode(self, job):
args = json.loads(job.arguments)
worker = NodeWorker(self.config, self.jobs,
args['name'], args['host'],
args['description'], args['labels'])
self.node_workers[worker.name] = worker
worker.process = multiprocessing.Process(target=worker.run)
worker.process.start()
data = dict(manager=self.hostname)
job.sendWorkData(json.dumps(data))
job.sendWorkComplete()
class NodeWorker(object):
log = logging.getLogger("zuul.NodeWorker")
def __init__(self, config, jobs, name, host, description, labels):
self.config = config
self.jobs = jobs
self.name = name
self.host = host
self.description = description
if not isinstance(labels, list):
labels = [labels]
self.labels = labels
self.registered_functions = set()
self._running = True
self.queue = multiprocessing.Queue()
def run(self):
self._running_job = False
server = self.config.get('gearman', 'server')
if self.config.has_option('gearman', 'port'):
port = self.config.get('gearman', 'port')
else:
port = 4730
self.worker = gear.Worker(self.name)
self.worker.addServer(server, port)
self.log.debug("Waiting for server")
self.worker.waitForServer()
self.register()
self.gearman_thread = threading.Thread(target=self.run_gearman)
self.gearman_thread.daemon = True
self.gearman_thread.start()
while self._running:
try:
self._run_queue()
except Exception:
self.log.exception("Exception in queue manager:")
def _run_queue(self):
item = self.queue.get()
if item['action'] == 'stop':
self._running = False
self.worker.shutdown()
elif item['action'] == 'reconfigure':
self.register()
def run_gearman(self):
while self._running:
try:
self._run_gearman()
except Exception:
self.log.exception("Exception in gearman manager:")
def _run_gearman(self):
job = self.worker.getJob()
try:
if job.name not in self.registered_functions:
self.log.error("Unable to handle job %s" % job.name)
job.sendWorkFail()
return
self.launch(job)
except Exception:
self.log.exception("Exception while running job")
job.sendWorkException(traceback.format_exc())
def generateFunctionNames(self, job):
# This only supports "node: foo" and "node: foo || bar"
ret = set()
job_labels = job.get('node')
matching_labels = set()
if job_labels:
job_labels = [x.strip() for x in job_labels.split('||')]
matching_labels = set(self.labels) & set(job_labels)
if not matching_labels:
return ret
ret.add('build:%s' % (job['name'],))
for label in matching_labels:
ret.add('build:%s:%s' % (job['name'], label))
return ret
def register(self):
if self._running_job:
return
new_functions = set()
for job in self.jobs.values():
new_functions |= self.generateFunctionNames(job)
for function in new_functions - self.registered_functions:
self.worker.registerFunction(function)
for function in self.registered_functions - new_functions:
self.worker.unRegisterFunction(function)
self.registered_functions = new_functions
def launch(self, job):
self._running_job = True
thread = threading.Thread(target=self._launch, args=(job,))
thread.start()
def _launch(self, job):
self.log.debug("Job %s: beginning" % (job.unique,))
return # TODO
with JobDir() as jobdir:
self.log.debug("Job %s: job root at %s" %
(job.unique, jobdir.root))
args = json.loads(job.arguments)
tasks = []
for project in args['projects']:
self.log.debug("Job %s: updating project %s" %
(job.unique, project['name']))
tasks.append(self.update(project['name'], project['url']))
for task in tasks:
task.wait()
self.log.debug("Job %s: git updates complete" % (job.unique,))
merger = self._getMerger(jobdir.git_root)
commit = merger.mergeChanges(args['items']) # noqa
# TODOv3: Ansible the ansible thing here.
self.prepareAnsibleFiles(jobdir, args)
result = self.runAnsible(jobdir)
@ -176,3 +298,9 @@ class LaunchServer(object):
return 'SUCCESS'
else:
return 'FAILURE'
class JJB(jenkins_jobs.builder.Builder):
def __init__(self):
self.global_config = None
self._plugins_list = []