Scale down function

This is admin only operation. The load monitoring of function execution
depends on the monitoring solution of underlying orchestrator.

Implements: blueprint qingling-autoscaling
Change-Id: I2eff62a45a718d230b51f18c5a49e0abea5f3164
This commit is contained in:
Lingxian Kong 2017-09-01 22:20:45 +12:00
parent 503bee80d6
commit ac9d477112
15 changed files with 121 additions and 26 deletions

View File

@ -226,16 +226,45 @@ class FunctionsController(rest.RestController):
@wsme_pecan.wsexpose( @wsme_pecan.wsexpose(
None, None,
types.uuid, types.uuid,
body=resources.ScaleInfo,
status_code=202 status_code=202
) )
def scale_up(self, id): def scale_up(self, id, scale):
"""Scale up the containers for function execution. """Scale up the containers for function execution.
This is admin only operation. The number of added containers is defined This is admin only operation. The load monitoring of function execution
in config file. depends on the monitoring solution of underlying orchestrator.
""" """
func_db = db_api.get_function(id) func_db = db_api.get_function(id)
params = scale.to_dict()
LOG.info('Starting to scale up function %s', id) LOG.info('Starting to scale up function %s, params: %s', id, params)
self.engine_client.scaleup_function(id, runtime_id=func_db.runtime_id) self.engine_client.scaleup_function(
id,
runtime_id=func_db.runtime_id,
count=params['count']
)
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(
None,
types.uuid,
body=resources.ScaleInfo,
status_code=202
)
def scale_down(self, id, scale):
"""Scale down the containers for function execution.
This is admin only operation. The load monitoring of function execution
depends on the monitoring solution of underlying orchestrator.
"""
func_db = db_api.get_function(id)
params = scale.to_dict()
if len(func_db.workers) <= 1:
LOG.info('No need to scale down function %s', id)
return
LOG.info('Starting to scale down function %s, params: %s', id, params)
self.engine_client.scaledown_function(id, count=params['count'])

View File

@ -358,3 +358,7 @@ class Jobs(ResourceList):
) )
return sample return sample
class ScaleInfo(Resource):
count = wtypes.IntegerType(minimum=1)

View File

@ -135,11 +135,6 @@ kubernetes_opts = [
default='127.0.0.1', default='127.0.0.1',
help='Qinling API service ip address.' help='Qinling API service ip address.'
), ),
cfg.IntOpt(
'scale_step',
default=1,
help='Number of pods for function scale up.'
),
] ]
CONF = cfg.CONF CONF = cfg.CONF

View File

@ -166,8 +166,12 @@ def create_function_worker(values):
return IMPL.create_function_worker(values) return IMPL.create_function_worker(values)
def delete_function_workers(id): def delete_function_worker(name):
return IMPL.delete_function_workers(id) return IMPL.delete_function_worker(name)
def delete_function_workers(function_id):
return IMPL.delete_function_workers(function_id)
def get_function_workers(function_id): def get_function_workers(function_id):

View File

@ -420,8 +420,22 @@ def get_function_workers(function_id, session=None):
@db_base.session_aware() @db_base.session_aware()
def delete_function_workers(id, session=None): def delete_function_worker(worker_name, session=None):
workers = get_function_workers(id) worker = db_base.model_query(
models.FunctionWorkers
).filter_by(worker_name=worker_name).first()
if not worker:
raise exc.DBEntityNotFoundError(
"FunctionWorker not found [worker_name=%s]" % worker_name
)
session.delete(worker)
@db_base.session_aware()
def delete_function_workers(function_id, session=None):
workers = get_function_workers(function_id)
for worker in workers: for worker in workers:
session.delete(worker) session.delete(worker)

View File

@ -119,11 +119,14 @@ class Job(model_base.QinlingSecureModelBase):
Function.service = relationship( Function.service = relationship(
"FunctionServiceMapping", "FunctionServiceMapping",
uselist=False, uselist=False,
lazy='subquery',
cascade="all, delete-orphan" cascade="all, delete-orphan"
) )
# Delete workers automatically when deleting function. # Delete workers automatically when deleting function.
Function.workers = relationship( Function.workers = relationship(
"FunctionWorkers", "FunctionWorkers",
order_by="FunctionWorkers.created_at",
lazy='subquery',
cascade="all, delete-orphan" cascade="all, delete-orphan"
) )

View File

@ -188,13 +188,14 @@ class DefaultEngine(object):
LOG.info('Deleted.', resource=resource) LOG.info('Deleted.', resource=resource)
def scaleup_function(self, ctx, function_id, runtime_id): def scaleup_function(self, ctx, function_id, runtime_id, count=1):
function = db_api.get_function(function_id) function = db_api.get_function(function_id)
worker_names = self.orchestrator.scaleup_function( worker_names = self.orchestrator.scaleup_function(
function_id, function_id,
identifier=runtime_id, identifier=runtime_id,
entry=function.entry entry=function.entry,
count=count
) )
with db_api.transaction(): with db_api.transaction():
@ -206,3 +207,20 @@ class DefaultEngine(object):
db_api.create_function_worker(worker) db_api.create_function_worker(worker)
LOG.info('Finished scaling up function %s.', function_id) LOG.info('Finished scaling up function %s.', function_id)
def scaledown_function(self, ctx, function_id, count=1):
func_db = db_api.get_function(function_id)
worker_deleted_num = (
count if len(func_db.workers) > count else len(func_db.workers) - 1
)
workers = func_db.workers[:worker_deleted_num]
with db_api.transaction():
for worker in workers:
LOG.debug('Removing worker %s', worker.worker_name)
self.orchestrator.delete_worker(
worker.worker_name,
)
db_api.delete_function_worker(worker.worker_name)
LOG.info('Finished scaling up function %s.', function_id)

View File

@ -54,6 +54,10 @@ class OrchestratorBase(object):
def scaleup_function(self, function_id, **kwargs): def scaleup_function(self, function_id, **kwargs):
raise NotImplementedError raise NotImplementedError
@abc.abstractmethod
def delete_worker(self, worker_name, **kwargs):
raise NotImplementedError
def load_orchestrator(conf): def load_orchestrator(conf):
global ORCHESTRATOR global ORCHESTRATOR

View File

@ -407,14 +407,14 @@ class KubernetesManager(base.OrchestratorBase):
label_selector=selector label_selector=selector
) )
LOG.info("Pod for function %s deleted.", function_id) LOG.info("Pod(s) for function %s deleted.", function_id)
def scaleup_function(self, function_id, identifier=None, def scaleup_function(self, function_id, identifier=None,
entry='main.main'): entry='main.main', count=1):
pod_names = [] pod_names = []
labels = {'runtime_id': identifier} labels = {'runtime_id': identifier}
pods = self._choose_available_pod( pods = self._choose_available_pod(
labels, count=self.conf.kubernetes.scale_step labels, count=count
) )
if not pods: if not pods:
@ -446,3 +446,10 @@ class KubernetesManager(base.OrchestratorBase):
LOG.info('Pods scaled up for function %s: %s', function_id, pod_names) LOG.info('Pods scaled up for function %s: %s', function_id, pod_names)
return pod_names return pod_names
def delete_worker(self, worker_name, **kwargs):
self.v1.delete_namespaced_pod(
worker_name,
self.conf.kubernetes.namespace,
{}
)

View File

@ -188,10 +188,20 @@ class EngineClient(object):
) )
@wrap_messaging_exception @wrap_messaging_exception
def scaleup_function(self, id, runtime_id): def scaleup_function(self, id, runtime_id, count=1):
return self._client.prepare(topic=self.topic, server=None).cast( return self._client.prepare(topic=self.topic, server=None).cast(
ctx.get_ctx(), ctx.get_ctx(),
'scaleup_function', 'scaleup_function',
function_id=id, function_id=id,
runtime_id=runtime_id, runtime_id=runtime_id,
count=count
)
@wrap_messaging_exception
def scaledown_function(self, id, count=1):
return self._client.prepare(topic=self.topic, server=None).cast(
ctx.get_ctx(),
'scaledown_function',
function_id=id,
count=count
) )

View File

@ -50,6 +50,9 @@ def handle_function_service_expiration(ctx, engine_client, orchestrator):
return return
for func_db in results: for func_db in results:
if not func_db.service:
continue
with db_api.transaction(): with db_api.transaction():
LOG.info( LOG.info(
'Deleting service mapping and workers for function %s', 'Deleting service mapping and workers for function %s',

8
qinling_tempest_plugin/pre_test_hook.sh Normal file → Executable file
View File

@ -20,7 +20,9 @@ export localconf=$BASE/new/devstack/local.conf
export QINLING_CONF=/etc/qinling/qinling.conf export QINLING_CONF=/etc/qinling/qinling.conf
# Install k8s cluster # Install k8s cluster
bash $BASE/new/qinling/tools/gate/setup_gate.sh pushd $BASE/new/qinling/
bash tools/gate/setup_gate.sh
popd
echo -e '[[post-config|$QINLING_CONF]]\n[kubernetes]\n' >> $localconf echo -e "[[post-config|$QINLING_CONF]]\n[kubernetes]\n" >> $localconf
echo -e 'qinling_service_address=${DEFAULT_HOST_IP}\n' >> $localconf echo -e "qinling_service_address=${DEFAULT_HOST_IP}\n" >> $localconf

View File

@ -21,6 +21,7 @@ classifier =
[files] [files]
packages = packages =
qinling qinling
qinling_tempest_plugin
[entry_points] [entry_points]
console_scripts = console_scripts =
@ -37,7 +38,7 @@ oslo.config.opts =
qinling.config = qinling.config:list_opts qinling.config = qinling.config:list_opts
tempest.test_plugins = tempest.test_plugins =
qinling_test = qinling_tempest_tests.plugin:QinlingTempestPlugin qinling_test = qinling_tempest_plugin.plugin:QinlingTempestPlugin
[build_sphinx] [build_sphinx]
all-files = 1 all-files = 1

View File

@ -20,7 +20,8 @@ function base_install {
iptables \ iptables \
ipcalc \ ipcalc \
nmap \ nmap \
lshw lshw \
screen
elif [ "x$HOST_OS" == "xcentos" ]; then elif [ "x$HOST_OS" == "xcentos" ]; then
sudo yum install -y \ sudo yum install -y \
epel-release epel-release

View File

@ -39,5 +39,5 @@ bash ${WORK_DIR}/tools/gate/kubeadm_aio.sh
# Starts a proxy to the Kubernetes API server in a screen session # Starts a proxy to the Kubernetes API server in a screen session
sudo screen -S kube_proxy -X quit || true sudo screen -S kube_proxy -X quit || true
sudo screen -dmS kube_proxy && screen -S kube_proxy -X screen -t kube_proxy sudo screen -dmS kube_proxy && sudo screen -S kube_proxy -X screen -t kube_proxy
sudo screen -S kube_proxy -p kube_proxy -X stuff 'kubectl proxy --accept-hosts=".*" --address="0.0.0.0"\n' sudo screen -S kube_proxy -p kube_proxy -X stuff 'kubectl proxy --accept-hosts=".*" --address="0.0.0.0"\n'