qinling/qinling/engine/default_engine.py

254 lines
8.6 KiB
Python

# Copyright 2017 Catalyst IT Limited
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log as logging
import requests
import tenacity
from qinling.db import api as db_api
from qinling.engine import utils
from qinling import status
from qinling.utils import common
from qinling.utils import constants
from qinling.utils import etcd_util
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class DefaultEngine(object):
def __init__(self, orchestrator):
self.orchestrator = orchestrator
self.session = requests.Session()
def create_runtime(self, ctx, runtime_id):
LOG.info('Start to create.',
resource={'type': 'runtime', 'id': runtime_id})
with db_api.transaction():
runtime = db_api.get_runtime(runtime_id)
labels = {'runtime_id': runtime_id}
try:
self.orchestrator.create_pool(
runtime_id,
runtime.image,
labels=labels,
)
runtime.status = status.AVAILABLE
except Exception as e:
LOG.exception(
'Failed to create pool for runtime %s. Error: %s',
runtime_id,
str(e)
)
runtime.status = status.ERROR
def delete_runtime(self, ctx, runtime_id):
resource = {'type': 'runtime', 'id': runtime_id}
LOG.info('Start to delete.', resource=resource)
labels = {'runtime_id': runtime_id}
self.orchestrator.delete_pool(runtime_id, labels=labels)
db_api.delete_runtime(runtime_id)
LOG.info('Deleted.', resource=resource)
def update_runtime(self, ctx, runtime_id, image=None, pre_image=None):
resource = {'type': 'runtime', 'id': runtime_id}
LOG.info('Start to update, image=%s', image, resource=resource)
labels = {'runtime_id': runtime_id}
ret = self.orchestrator.update_pool(
runtime_id, labels=labels, image=image
)
if ret:
values = {'status': status.AVAILABLE}
db_api.update_runtime(runtime_id, values)
LOG.info('Updated.', resource=resource)
else:
values = {'status': status.AVAILABLE, 'image': pre_image}
db_api.update_runtime(runtime_id, values)
LOG.info('Rollbacked.', resource=resource)
@tenacity.retry(
wait=tenacity.wait_fixed(1),
stop=tenacity.stop_after_attempt(30),
retry=(tenacity.retry_if_result(lambda result: result is False))
)
def function_load_check(self, function_id, runtime_id):
with etcd_util.get_worker_lock() as lock:
if not lock.is_acquired():
return False
workers = etcd_util.get_workers(function_id)
running_execs = db_api.get_executions(
function_id=function_id, status=status.RUNNING
)
concurrency = (len(running_execs) or 1) / (len(workers) or 1)
if (len(workers) == 0 or
concurrency > CONF.engine.function_concurrency):
LOG.info(
'Scale up function %s. Current concurrency: %s, execution '
'number %s, worker number %s',
function_id, concurrency, len(running_execs), len(workers)
)
# NOTE(kong): The increase step could be configurable
return self.scaleup_function(None, function_id, runtime_id, 1)
def create_execution(self, ctx, execution_id, function_id, runtime_id,
input=None):
LOG.info(
'Creating execution. execution_id=%s, function_id=%s, '
'runtime_id=%s, input=%s',
execution_id, function_id, runtime_id, input
)
function = db_api.get_function(function_id)
source = function.code['source']
image = None
identifier = None
labels = None
svc_url = None
# Auto scale workers if needed
if source != constants.IMAGE_FUNCTION:
svc_url = self.function_load_check(function_id, runtime_id)
temp_url = etcd_util.get_service_url(function_id)
svc_url = svc_url or temp_url
if svc_url:
func_url = '%s/execute' % svc_url
LOG.debug(
'Found service url for function: %s, execution: %s, url: %s',
function_id, execution_id, func_url
)
data = utils.get_request_data(
CONF, function_id, execution_id,
input, function.entry, function.trust_id
)
success, res = utils.url_request(
self.session, func_url, body=data
)
success = success and res.pop('success')
LOG.debug(
'Finished execution %s, success: %s', execution_id, success
)
db_api.update_execution(
execution_id,
{
'status': status.SUCCESS if success else status.FAILED,
'logs': res.pop('logs', ''),
'result': res
}
)
return
if source == constants.IMAGE_FUNCTION:
image = function.code['image']
identifier = ('%s-%s' %
(common.generate_unicode_uuid(dashed=False),
function_id)
)[:63]
labels = {'function_id': function_id}
else:
identifier = runtime_id
labels = {'runtime_id': runtime_id}
_, svc_url = self.orchestrator.prepare_execution(
function_id,
image=image,
identifier=identifier,
labels=labels,
input=input,
)
success, res = self.orchestrator.run_execution(
execution_id,
function_id,
input=input,
identifier=identifier,
service_url=svc_url,
entry=function.entry,
trust_id=function.trust_id
)
logs = ''
# Execution log is only available for non-image source execution.
if svc_url:
logs = res.pop('logs', '')
success = success and res.pop('success')
else:
# If the function is created from docker image, the result is
# direct output, here we convert to a dict to fit into the db
# schema.
res = {'output': res}
LOG.debug(
'Finished execution %s, success: %s', execution_id, success
)
db_api.update_execution(
execution_id,
{
'status': status.SUCCESS if success else status.FAILED,
'logs': logs,
'result': res
}
)
def delete_function(self, ctx, function_id):
"""Deletes underlying resources allocated for function."""
resource = {'type': 'function', 'id': function_id}
LOG.info('Start to delete.', resource=resource)
labels = {'function_id': function_id}
self.orchestrator.delete_function(function_id, labels=labels)
LOG.info('Deleted.', resource=resource)
def scaleup_function(self, ctx, function_id, runtime_id, count=1):
worker_names, service_url = self.orchestrator.scaleup_function(
function_id,
identifier=runtime_id,
count=count
)
for name in worker_names:
etcd_util.create_worker(function_id, name)
etcd_util.create_service_url(function_id, service_url)
LOG.info('Finished scaling up function %s.', function_id)
return service_url
def scaledown_function(self, ctx, function_id, count=1):
workers = etcd_util.get_workers(function_id)
worker_deleted_num = (
count if len(workers) > count else len(workers) - 1
)
workers = workers[:worker_deleted_num]
for worker in workers:
LOG.debug('Removing worker %s', worker)
self.orchestrator.delete_worker(worker)
LOG.info('Finished scaling up function %s.', function_id)