Support webhook

- Allow user create webhook based on the function.
- Webhook can be invoked without authentication.
- Can not delete function associated with webhook.

Another big change is, we are going to use minikube instead of
kubernetes-aio scripts from openstack-helm project.

Implements: blueprint qinling-function-webhook
Change-Id: I85e0b0f999f0d820bfacca9ac3b9af04e80df0d7
This commit is contained in:
Lingxian Kong 2018-01-09 17:30:39 +13:00
parent 40b886d5a5
commit 3e35a4b7d5
59 changed files with 753 additions and 1979 deletions

View File

@ -24,7 +24,7 @@ function install_k8s {
source tools/gate/setup_gate.sh source tools/gate/setup_gate.sh
popd popd
# Pre-pull the default docker image for python runtime and image function # Pre-fetch the default docker image for python runtime and image function
# test. # test.
sudo docker pull $QINLING_PYTHON_RUNTIME_IMAGE sudo docker pull $QINLING_PYTHON_RUNTIME_IMAGE
sudo docker pull openstackqinling/alpine-test sudo docker pull openstackqinling/alpine-test
@ -76,11 +76,11 @@ function configure_qinling {
iniset $QINLING_CONF_FILE DEFAULT server all iniset $QINLING_CONF_FILE DEFAULT server all
iniset $QINLING_CONF_FILE DEFAULT logging_context_format_string "%(asctime)s %(process)d %(color)s %(levelname)s [%(request_id)s] %(message)s %(resource)s (%(name)s)" iniset $QINLING_CONF_FILE DEFAULT logging_context_format_string "%(asctime)s %(process)d %(color)s %(levelname)s [%(request_id)s] %(message)s %(resource)s (%(name)s)"
iniset $QINLING_CONF_FILE storage file_system_dir $QINLING_FUNCTION_STORAGE_DIR iniset $QINLING_CONF_FILE storage file_system_dir $QINLING_FUNCTION_STORAGE_DIR
iniset $QINLING_CONF_FILE kubernetes qinling_service_address $DEFAULT_HOST_IP
# Setup keystone_authtoken section # Setup keystone_authtoken section
configure_auth_token_middleware $QINLING_CONF_FILE qinling $QINLING_AUTH_CACHE_DIR configure_auth_token_middleware $QINLING_CONF_FILE qinling $QINLING_AUTH_CACHE_DIR
iniset $QINLING_CONF_FILE keystone_authtoken www_authenticate_uri $KEYSTONE_AUTH_URI_V3 iniset $QINLING_CONF_FILE keystone_authtoken www_authenticate_uri $KEYSTONE_AUTH_URI_V3
iniset $QINLING_CONF_FILE keystone_authtoken region_name "$REGION_NAME"
# Setup RabbitMQ credentials # Setup RabbitMQ credentials
iniset_rpc_backend qinling $QINLING_CONF_FILE iniset_rpc_backend qinling $QINLING_CONF_FILE

View File

@ -65,9 +65,9 @@ class ExecutionsController(rest.RestController):
@rest_utils.wrap_wsme_controller_exception @rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(resources.Executions, wtypes.text, bool, wtypes.text, @wsme_pecan.wsexpose(resources.Executions, wtypes.text, bool, wtypes.text,
wtypes.text) wtypes.text, wtypes.text)
def get_all(self, function_id=None, all_projects=False, project_id=None, def get_all(self, function_id=None, all_projects=False, project_id=None,
status=None): status=None, description=None):
"""Return a list of executions. """Return a list of executions.
:param function_id: Optional. Filtering executions by function_id. :param function_id: Optional. Filtering executions by function_id.
@ -75,6 +75,7 @@ class ExecutionsController(rest.RestController):
resources, the param is ignored for normal user. resources, the param is ignored for normal user.
:param all_projects: Optional. Get resources of all projects. :param all_projects: Optional. Get resources of all projects.
:param status: Optional. Filter by execution status. :param status: Optional. Filter by execution status.
:param description: Optional. Filter by description.
""" """
ctx = context.get_ctx() ctx = context.get_ctx()
if project_id and not ctx.is_admin: if project_id and not ctx.is_admin:
@ -89,6 +90,7 @@ class ExecutionsController(rest.RestController):
function_id=function_id, function_id=function_id,
project_id=project_id, project_id=project_id,
status=status, status=status,
description=description
) )
LOG.info("Get all %ss. filters=%s", self.type, filters) LOG.info("Get all %ss. filters=%s", self.type, filters)

View File

@ -131,14 +131,13 @@ class FunctionsController(rest.RestController):
@rest_utils.wrap_pecan_controller_exception @rest_utils.wrap_pecan_controller_exception
@pecan.expose('json') @pecan.expose('json')
def post(self, **kwargs): def post(self, **kwargs):
LOG.info("Creating function, params: %s", kwargs)
# When using image to create function, runtime_id is not a required # When using image to create function, runtime_id is not a required
# param. # param.
if not POST_REQUIRED.issubset(set(kwargs.keys())): if not POST_REQUIRED.issubset(set(kwargs.keys())):
raise exc.InputException( raise exc.InputException(
'Required param is missing. Required: %s' % POST_REQUIRED 'Required param is missing. Required: %s' % POST_REQUIRED
) )
LOG.info("Creating function, params: %s", kwargs)
values = { values = {
'name': kwargs.get('name'), 'name': kwargs.get('name'),
@ -241,6 +240,10 @@ class FunctionsController(rest.RestController):
raise exc.NotAllowedException( raise exc.NotAllowedException(
'The function is still associated with running job(s).' 'The function is still associated with running job(s).'
) )
if func_db.webhook:
raise exc.NotAllowedException(
'The function is still associated with webhook.'
)
# Even admin user can not delete other project's function because # Even admin user can not delete other project's function because
# the trust associated can only be removed by function owner. # the trust associated can only be removed by function owner.

View File

@ -290,7 +290,7 @@ class Execution(Resource):
obj = cls() obj = cls()
for key, val in d.items(): for key, val in d.items():
if key == 'input' and val: if key == 'input' and val is not None:
if val.get('__function_input'): if val.get('__function_input'):
setattr(obj, key, val.get('__function_input')) setattr(obj, key, val.get('__function_input'))
else: else:
@ -393,3 +393,22 @@ class Jobs(ResourceList):
class ScaleInfo(Resource): class ScaleInfo(Resource):
count = wtypes.IntegerType(minimum=1) count = wtypes.IntegerType(minimum=1)
class Webhook(Resource):
id = types.uuid
function_id = types.uuid
description = wtypes.text
project_id = wsme.wsattr(wtypes.text, readonly=True)
created_at = wsme.wsattr(wtypes.text, readonly=True)
updated_at = wsme.wsattr(wtypes.text, readonly=True)
webhook_url = wsme.wsattr(wtypes.text, readonly=True)
class Webhooks(ResourceList):
webhooks = [Webhook]
def __init__(self, **kwargs):
self._type = 'webhooks'
super(Webhooks, self).__init__(**kwargs)

View File

@ -20,6 +20,7 @@ from qinling.api.controllers.v1 import function
from qinling.api.controllers.v1 import job from qinling.api.controllers.v1 import job
from qinling.api.controllers.v1 import resources from qinling.api.controllers.v1 import resources
from qinling.api.controllers.v1 import runtime from qinling.api.controllers.v1 import runtime
from qinling.api.controllers.v1 import webhook
class RootResource(resources.Resource): class RootResource(resources.Resource):
@ -36,6 +37,7 @@ class Controller(object):
runtimes = runtime.RuntimesController() runtimes = runtime.RuntimesController()
executions = execution.ExecutionsController() executions = execution.ExecutionsController()
jobs = job.JobsController() jobs = job.JobsController()
webhooks = webhook.WebhooksController()
@wsme_pecan.wsexpose(RootResource) @wsme_pecan.wsexpose(RootResource)
def index(self): def index(self):

View File

@ -0,0 +1,175 @@
# Copyright 2018 Catalyst IT Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import json
from oslo_log import log as logging
import pecan
from pecan import rest
import wsmeext.pecan as wsme_pecan
from qinling.api import access_control as acl
from qinling.api.controllers.v1 import resources
from qinling.api.controllers.v1 import types
from qinling import context
from qinling.db import api as db_api
from qinling import exceptions as exc
from qinling import rpc
from qinling.utils import constants
from qinling.utils import executions
from qinling.utils.openstack import keystone as keystone_utils
from qinling.utils import rest_utils
LOG = logging.getLogger(__name__)
POST_REQUIRED = set(['function_id'])
UPDATE_ALLOWED = set(['function_id', 'description'])
class WebhooksController(rest.RestController):
_custom_actions = {
'invoke': ['POST'],
}
def __init__(self, *args, **kwargs):
self.type = 'webhook'
self.engine_client = rpc.get_engine_client()
self.qinling_endpoint = keystone_utils.get_qinling_endpoint()
super(WebhooksController, self).__init__(*args, **kwargs)
def _add_webhook_url(self, id, webhook):
"""Add webhook_url attribute for webhook.
We generate the url dynamically in case the service url is changing.
"""
res = copy.deepcopy(webhook)
url = '/'.join(
[self.qinling_endpoint.strip('/'), constants.CURRENT_VERSION,
'webhooks', id, 'invoke']
)
res.update({'webhook_url': url})
return res
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(resources.Webhook, types.uuid)
def get(self, id):
LOG.info("Get %s %s.", self.type, id)
webhook = db_api.get_webhook(id).to_dict()
return resources.Webhook.from_dict(self._add_webhook_url(id, webhook))
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(resources.Webhooks)
def get_all(self):
LOG.info("Get all %ss.", self.type)
webhooks = []
for i in db_api.get_webhooks():
webhooks.append(
resources.Webhook.from_dict(
self._add_webhook_url(i.id, i.to_dict())
)
)
return resources.Webhooks(webhooks=webhooks)
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(
resources.Webhook,
body=resources.Webhook,
status_code=201
)
def post(self, webhook):
acl.enforce('webhook:create', context.get_ctx())
params = webhook.to_dict()
if not POST_REQUIRED.issubset(set(params.keys())):
raise exc.InputException(
'Required param is missing. Required: %s' % POST_REQUIRED
)
LOG.info("Creating %s, params: %s", self.type, params)
# Even admin user can not expose normal user's function
db_api.get_function(params['function_id'], insecure=False)
webhook_d = db_api.create_webhook(params).to_dict()
return resources.Webhook.from_dict(
self._add_webhook_url(webhook_d['id'], webhook_d)
)
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(None, types.uuid, status_code=204)
def delete(self, id):
acl.enforce('webhook:delete', context.get_ctx())
LOG.info("Delete %s %s.", self.type, id)
db_api.delete_webhook(id)
@rest_utils.wrap_wsme_controller_exception
@wsme_pecan.wsexpose(
resources.Webhook,
types.uuid,
body=resources.Webhook
)
def put(self, id, webhook):
"""Update webhook.
Currently, we only support update function_id.
"""
acl.enforce('webhook:update', context.get_ctx())
values = {}
for key in UPDATE_ALLOWED:
if webhook.to_dict().get(key) is not None:
values.update({key: webhook.to_dict()[key]})
LOG.info('Update %s %s, params: %s', self.type, id, values)
if 'function_id' in values:
# Even admin user can not expose normal user's function
db_api.get_function(values['function_id'], insecure=False)
webhook = db_api.update_webhook(id, values).to_dict()
return resources.Webhook.from_dict(self._add_webhook_url(id, webhook))
@rest_utils.wrap_pecan_controller_exception
@pecan.expose('json')
def invoke(self, id, **kwargs):
with db_api.transaction():
# The webhook url can be accessed without authentication, so
# insecure is used here
webhook_db = db_api.get_webhook(id, insecure=True)
function_db = webhook_db.function
trust_id = function_db.trust_id
project_id = function_db.project_id
LOG.info(
'Invoking function %s by webhook %s',
webhook_db.function_id, id
)
# Setup user context
ctx = keystone_utils.create_trust_context(trust_id, project_id)
context.set_ctx(ctx)
params = {
'function_id': webhook_db.function_id,
'sync': False,
'input': json.dumps(kwargs),
'description': constants.EXECUTION_BY_WEBHOOK % id
}
execution = executions.create_execution(self.engine_client, params)
pecan.response.status = 202
return {'execution_id': execution.id}

View File

@ -26,6 +26,13 @@ launch_opt = cfg.ListOpt(
help='Specifies which qinling server to start by the launch script.' help='Specifies which qinling server to start by the launch script.'
) )
default_opts = [
cfg.StrOpt(
'qinling_endpoint',
help='Qinling service endpoint.'
),
]
API_GROUP = 'api' API_GROUP = 'api'
api_opts = [ api_opts = [
cfg.StrOpt('host', default='0.0.0.0', help='Qinling API server host.'), cfg.StrOpt('host', default='0.0.0.0', help='Qinling API server host.'),
@ -139,11 +146,6 @@ kubernetes_opts = [
help='Kubernetes server address, e.g. you can start a proxy to the ' help='Kubernetes server address, e.g. you can start a proxy to the '
'Kubernetes API server by using "kubectl proxy" command.' 'Kubernetes API server by using "kubectl proxy" command.'
), ),
cfg.IPOpt(
'qinling_service_address',
default='127.0.0.1',
help='Qinling API service ip address.'
),
cfg.StrOpt( cfg.StrOpt(
'log_devel', 'log_devel',
default='INFO', default='INFO',
@ -172,7 +174,8 @@ def list_opts():
(STORAGE_GROUP, storage_opts), (STORAGE_GROUP, storage_opts),
(KUBERNETES_GROUP, kubernetes_opts), (KUBERNETES_GROUP, kubernetes_opts),
(ETCD_GROUP, etcd_opts), (ETCD_GROUP, etcd_opts),
(None, [launch_opt]) (None, [launch_opt]),
(None, default_opts),
] ]
return keystone_middleware_opts + keystone_loading_opts + qinling_opts return keystone_middleware_opts + keystone_loading_opts + qinling_opts

View File

@ -11,6 +11,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import re
from oslo_config import cfg from oslo_config import cfg
from oslo_context import context as oslo_context from oslo_context import context as oslo_context
@ -22,6 +23,7 @@ from qinling.utils import thread_local
CONF = cfg.CONF CONF = cfg.CONF
ALLOWED_WITHOUT_AUTH = ['/', '/v1/'] ALLOWED_WITHOUT_AUTH = ['/', '/v1/']
WEBHOOK_REG = '^/v1/webhooks/[a-f0-9-]+/invoke$'
CTX_THREAD_LOCAL_NAME = "QINLING_APP_CTX_THREAD_LOCAL" CTX_THREAD_LOCAL_NAME = "QINLING_APP_CTX_THREAD_LOCAL"
DEFAULT_PROJECT_ID = "default" DEFAULT_PROJECT_ID = "default"
@ -46,10 +48,11 @@ def authenticate(req):
class AuthHook(hooks.PecanHook): class AuthHook(hooks.PecanHook):
def before(self, state): def before(self, state):
if not CONF.pecan.auth_enable:
return
if state.request.path in ALLOWED_WITHOUT_AUTH: if state.request.path in ALLOWED_WITHOUT_AUTH:
return return
if re.search(WEBHOOK_REG, state.request.path):
if not CONF.pecan.auth_enable:
return return
try: try:

View File

@ -57,6 +57,7 @@ def transaction():
def delete_all(): def delete_all():
"""A helper function for testing.""" """A helper function for testing."""
delete_jobs(insecure=True) delete_jobs(insecure=True)
delete_webhooks(insecure=True)
delete_executions(insecure=True) delete_executions(insecure=True)
delete_functions(insecure=True) delete_functions(insecure=True)
delete_runtimes(insecure=True) delete_runtimes(insecure=True)
@ -69,7 +70,7 @@ def conditional_update(model, values, expected_values, **kwargs):
def get_function(id, insecure=None): def get_function(id, insecure=None):
"""Get function from db. """Get function from db.
'insecure' param is needed for job handler. 'insecure' param is needed for job handler and webhook.
""" """
return IMPL.get_function(id, insecure=insecure) return IMPL.get_function(id, insecure=insecure)
@ -176,3 +177,27 @@ def get_jobs():
def delete_jobs(**kwargs): def delete_jobs(**kwargs):
return IMPL.delete_jobs(**kwargs) return IMPL.delete_jobs(**kwargs)
def create_webhook(values):
return IMPL.create_webhook(values)
def get_webhook(id, insecure=None):
return IMPL.get_webhook(id, insecure=insecure)
def get_webhooks():
return IMPL.get_webhooks()
def delete_webhook(id):
return IMPL.delete_webhook(id)
def update_webhook(id, values):
return IMPL.update_webhook(id, values)
def delete_webhooks(**kwargs):
return IMPL.delete_webhooks(**kwargs)

View File

@ -155,7 +155,6 @@ def _get_collection(model, insecure=False, limit=None, marker=None,
query = (db_base.model_query(model, columns) if insecure query = (db_base.model_query(model, columns) if insecure
else _secure_query(model, *columns)) else _secure_query(model, *columns))
query = db_filters.apply_filters(query, model, **filters) query = db_filters.apply_filters(query, model, **filters)
query = _paginate_query( query = _paginate_query(
model, model,
limit, limit,
@ -441,3 +440,54 @@ def get_jobs(session=None, **kwargs):
@db_base.session_aware() @db_base.session_aware()
def delete_jobs(session=None, insecure=None, **kwargs): def delete_jobs(session=None, insecure=None, **kwargs):
return _delete_all(models.Job, insecure=insecure, **kwargs) return _delete_all(models.Job, insecure=insecure, **kwargs)
@db_base.session_aware()
def create_webhook(values, session=None):
webhook = models.Webhook()
webhook.update(values.copy())
try:
webhook.save(session=session)
except oslo_db_exc.DBDuplicateEntry as e:
raise exc.DBError(
"Duplicate entry for webhook: %s" % e.columns
)
return webhook
@db_base.insecure_aware()
@db_base.session_aware()
def get_webhook(id, insecure=None, session=None):
webhook = _get_db_object_by_id(models.Webhook, id, insecure=insecure)
if not webhook:
raise exc.DBEntityNotFoundError("Webhook not found [id=%s]" % id)
return webhook
@db_base.session_aware()
def get_webhooks(session=None, **kwargs):
return _get_collection_sorted_by_time(models.Webhook, **kwargs)
@db_base.session_aware()
def delete_webhook(id, session=None):
webhook = get_webhook(id)
session.delete(webhook)
@db_base.session_aware()
def update_webhook(id, values, session=None):
webhook = get_webhook(id)
webhook.update(values.copy())
return webhook
@db_base.insecure_aware()
@db_base.session_aware()
def delete_webhooks(session=None, insecure=None, **kwargs):
return _delete_all(models.Webhook, insecure=insecure, **kwargs)

View File

@ -114,3 +114,15 @@ def upgrade():
sa.ForeignKeyConstraint(['function_id'], [u'functions.id']), sa.ForeignKeyConstraint(['function_id'], [u'functions.id']),
info={"check_ifexists": True} info={"check_ifexists": True}
) )
op.create_table(
'webhooks',
sa.Column('created_at', sa.DateTime(), nullable=True),
sa.Column('updated_at', sa.DateTime(), nullable=True),
sa.Column('project_id', sa.String(length=80), nullable=False),
sa.Column('id', sa.String(length=36), nullable=False),
sa.Column('description', sa.String(length=255), nullable=True),
sa.Column('function_id', sa.String(length=36), nullable=False),
sa.PrimaryKeyConstraint('id'),
info={"check_ifexists": True}
)

View File

@ -85,9 +85,19 @@ class Job(model_base.QinlingSecureModelBase):
return d return d
class Webhook(model_base.QinlingSecureModelBase):
__tablename__ = 'webhooks'
function_id = sa.Column(
sa.String(36),
sa.ForeignKey(Function.id)
)
description = sa.Column(sa.String(255))
Runtime.functions = relationship("Function", back_populates="runtime") Runtime.functions = relationship("Function", back_populates="runtime")
# Only get jobs # Only get running jobs
Function.jobs = relationship( Function.jobs = relationship(
"Job", "Job",
back_populates="function", back_populates="function",
@ -96,3 +106,4 @@ Function.jobs = relationship(
"~Job.status.in_(['done', 'cancelled']))" "~Job.status.in_(['done', 'cancelled']))"
) )
) )
Function.webhook = relationship("Webhook", uselist=False, backref="function")

View File

@ -28,8 +28,9 @@ CONF = cfg.CONF
class DefaultEngine(object): class DefaultEngine(object):
def __init__(self, orchestrator): def __init__(self, orchestrator, qinling_endpoint):
self.orchestrator = orchestrator self.orchestrator = orchestrator
self.qinling_endpoint = qinling_endpoint
self.session = requests.Session() self.session = requests.Session()
def create_runtime(self, ctx, runtime_id): def create_runtime(self, ctx, runtime_id):
@ -142,7 +143,8 @@ class DefaultEngine(object):
data = utils.get_request_data( data = utils.get_request_data(
CONF, function_id, execution_id, CONF, function_id, execution_id,
input, function.entry, function.trust_id input, function.entry, function.trust_id,
self.qinling_endpoint
) )
success, res = utils.url_request( success, res = utils.url_request(
self.session, func_url, body=data self.session, func_url, body=data

View File

@ -23,6 +23,7 @@ from qinling.engine import default_engine as engine
from qinling.orchestrator import base as orchestra_base from qinling.orchestrator import base as orchestra_base
from qinling import rpc from qinling import rpc
from qinling.services import periodics from qinling.services import periodics
from qinling.utils.openstack import keystone as keystone_utils
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
CONF = cfg.CONF CONF = cfg.CONF
@ -34,14 +35,15 @@ class EngineService(cotyledon.Service):
self.server = None self.server = None
def run(self): def run(self):
orchestrator = orchestra_base.load_orchestrator(CONF) qinling_endpoint = keystone_utils.get_qinling_endpoint()
orchestrator = orchestra_base.load_orchestrator(CONF, qinling_endpoint)
db_api.setup_db() db_api.setup_db()
topic = CONF.engine.topic topic = CONF.engine.topic
server = CONF.engine.host server = CONF.engine.host
transport = messaging.get_rpc_transport(CONF) transport = messaging.get_rpc_transport(CONF)
target = messaging.Target(topic=topic, server=server, fanout=False) target = messaging.Target(topic=topic, server=server, fanout=False)
endpoint = engine.DefaultEngine(orchestrator) endpoint = engine.DefaultEngine(orchestrator, qinling_endpoint)
access_policy = dispatcher.DefaultRPCAccessPolicy access_policy = dispatcher.DefaultRPCAccessPolicy
self.server = messaging.get_rpc_server( self.server = messaging.get_rpc_server(
transport, transport,

View File

@ -19,6 +19,7 @@ import six
import tenacity import tenacity
from qinling import context from qinling import context
from qinling.utils import constants
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -33,10 +34,11 @@ def url_request(request_session, url, body=None):
temp[-1] = 'ping' temp[-1] = 'ping'
ping_url = '/'.join(temp) ping_url = '/'.join(temp)
r = tenacity.Retrying( r = tenacity.Retrying(
wait=tenacity.wait_fixed(0.5), wait=tenacity.wait_fixed(1),
stop=tenacity.stop_after_attempt(5), stop=tenacity.stop_after_attempt(30),
retry=tenacity.retry_if_exception_type(IOError)) retry=tenacity.retry_if_exception_type(IOError)
r.call(request_session.get, ping_url, timeout=(3, 3)) )
r.call(request_session.get, ping_url, timeout=(3, 3), verify=False)
except Exception as e: except Exception as e:
LOG.exception( LOG.exception(
"Failed to request url %s, error: %s", ping_url, str(e) "Failed to request url %s, error: %s", ping_url, str(e)
@ -44,16 +46,24 @@ def url_request(request_session, url, body=None):
return False, {'error': 'Function execution failed.'} return False, {'error': 'Function execution failed.'}
for a in six.moves.xrange(10): for a in six.moves.xrange(10):
res = None
try: try:
# Default execution max duration is 3min, could be configurable # Default execution max duration is 3min, could be configurable
r = request_session.post(url, json=body, timeout=(3, 180)) res = request_session.post(
return True, r.json() url, json=body, timeout=(3, 180), verify=False
)
return True, res.json()
except requests.ConnectionError as e: except requests.ConnectionError as e:
exception = e exception = e
# NOTE(kong): Could be configurable
time.sleep(1) time.sleep(1)
except Exception as e: except Exception as e:
LOG.exception("Failed to request url %s, error: %s", url, str(e)) LOG.exception(
"Failed to request url %s, error: %s", url, str(e)
)
if res:
LOG.error("Response status: %s, content: %s",
res.status_code, res.content)
return False, {'error': 'Function execution timeout.'} return False, {'error': 'Function execution timeout.'}
LOG.exception("Could not connect to function service. Reason: %s", LOG.exception("Could not connect to function service. Reason: %s",
@ -62,13 +72,12 @@ def url_request(request_session, url, body=None):
return False, {'error': 'Internal service error.'} return False, {'error': 'Internal service error.'}
def get_request_data(conf, function_id, execution_id, input, entry, trust_id): def get_request_data(conf, function_id, execution_id, input, entry, trust_id,
qinling_endpoint):
ctx = context.get_ctx() ctx = context.get_ctx()
download_url = ( download_url = (
'http://%s:%s/v1/functions/%s?download=true' % '%s/%s/functions/%s?download=true' %
(conf.kubernetes.qinling_service_address, (qinling_endpoint.strip('/'), constants.CURRENT_VERSION, function_id)
conf.api.port, function_id)
) )
data = { data = {
'execution_id': execution_id, 'execution_id': execution_id,

View File

@ -59,7 +59,7 @@ class OrchestratorBase(object):
raise NotImplementedError raise NotImplementedError
def load_orchestrator(conf): def load_orchestrator(conf, qinling_endpoint):
global ORCHESTRATOR global ORCHESTRATOR
if not ORCHESTRATOR: if not ORCHESTRATOR:
@ -67,7 +67,7 @@ def load_orchestrator(conf):
mgr = driver.DriverManager('qinling.orchestrator', mgr = driver.DriverManager('qinling.orchestrator',
conf.engine.orchestrator, conf.engine.orchestrator,
invoke_on_load=True, invoke_on_load=True,
invoke_args=[conf]) invoke_args=[conf, qinling_endpoint])
ORCHESTRATOR = mgr.driver ORCHESTRATOR = mgr.driver
except Exception as e: except Exception as e:

View File

@ -35,8 +35,9 @@ TEMPLATES_DIR = (os.path.dirname(os.path.realpath(__file__)) + '/templates/')
class KubernetesManager(base.OrchestratorBase): class KubernetesManager(base.OrchestratorBase):
def __init__(self, conf): def __init__(self, conf, qinling_endpoint):
self.conf = conf self.conf = conf
self.qinling_endpoint = qinling_endpoint
clients = k8s_util.get_k8s_clients(self.conf) clients = k8s_util.get_k8s_clients(self.conf)
self.v1 = clients['v1'] self.v1 = clients['v1']
@ -125,7 +126,6 @@ class KubernetesManager(base.OrchestratorBase):
def delete_pool(self, name, labels=None): def delete_pool(self, name, labels=None):
"""Delete all resources belong to the deployment.""" """Delete all resources belong to the deployment."""
LOG.info("Deleting deployment %s", name) LOG.info("Deleting deployment %s", name)
selector = common.convert_dict_to_string(labels) selector = common.convert_dict_to_string(labels)
@ -134,7 +134,6 @@ class KubernetesManager(base.OrchestratorBase):
self.conf.kubernetes.namespace, self.conf.kubernetes.namespace,
label_selector=selector label_selector=selector
) )
LOG.info("ReplicaSets in deployment %s deleted.", name) LOG.info("ReplicaSets in deployment %s deleted.", name)
ret = self.v1.list_namespaced_service( ret = self.v1.list_namespaced_service(
@ -146,7 +145,6 @@ class KubernetesManager(base.OrchestratorBase):
svc_name, svc_name,
self.conf.kubernetes.namespace, self.conf.kubernetes.namespace,
) )
LOG.info("Services in deployment %s deleted.", name) LOG.info("Services in deployment %s deleted.", name)
self.v1extention.delete_collection_namespaced_deployment( self.v1extention.delete_collection_namespaced_deployment(
@ -154,14 +152,12 @@ class KubernetesManager(base.OrchestratorBase):
label_selector=selector, label_selector=selector,
field_selector='metadata.name=%s' % name field_selector='metadata.name=%s' % name
) )
# Should delete pods after deleting deployment to avoid pods are # Should delete pods after deleting deployment to avoid pods are
# recreated by k8s. # recreated by k8s.
self.v1.delete_collection_namespaced_pod( self.v1.delete_collection_namespaced_pod(
self.conf.kubernetes.namespace, self.conf.kubernetes.namespace,
label_selector=selector label_selector=selector
) )
LOG.info("Pods in deployment %s deleted.", name) LOG.info("Pods in deployment %s deleted.", name)
LOG.info("Deployment %s deleted.", name) LOG.info("Deployment %s deleted.", name)
@ -398,7 +394,8 @@ class KubernetesManager(base.OrchestratorBase):
if service_url: if service_url:
func_url = '%s/execute' % service_url func_url = '%s/execute' % service_url
data = utils.get_request_data( data = utils.get_request_data(
self.conf, function_id, execution_id, input, entry, trust_id self.conf, function_id, execution_id, input, entry, trust_id,
self.qinling_endpoint
) )
LOG.debug( LOG.debug(
'Invoke function %s, url: %s, data: %s', 'Invoke function %s, url: %s, data: %s',

View File

@ -20,6 +20,7 @@ spec:
{{ key }}: {{ value }} {{ key }}: {{ value }}
{% endfor %} {% endfor %}
spec: spec:
terminationGracePeriodSeconds: 0
containers: containers:
- name: {{ container_name }} - name: {{ container_name }}
image: {{ image }} image: {{ image }}

View File

@ -7,6 +7,7 @@ metadata:
{{ key }}: {{ value }} {{ key }}: {{ value }}
{% endfor %} {% endfor %}
spec: spec:
terminationGracePeriodSeconds: 0
containers: containers:
- name: {{ pod_name }} - name: {{ pod_name }}
image: {{ pod_image }} image: {{ pod_image }}

View File

@ -158,3 +158,16 @@ class TestFunctionController(base.APITest):
) )
self.assertEqual(403, resp.status_int) self.assertEqual(403, resp.status_int)
def test_delete_with_webhook(self):
db_func = self.create_function(
runtime_id=self.runtime_id, prefix=TEST_CASE_NAME
)
self.create_webhook(function_id=db_func.id, prefix=TEST_CASE_NAME)
resp = self.app.delete(
'/v1/functions/%s' % db_func.id,
expect_errors=True
)
self.assertEqual(403, resp.status_int)

View File

@ -0,0 +1,69 @@
# Copyright 2018 Catalyst IT Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from qinling.tests.unit.api import base
TEST_CASE_NAME = 'TestWebhookController'
class TestWebhookController(base.APITest):
def setUp(self):
super(TestWebhookController, self).setUp()
db_func = self.create_function(prefix=TEST_CASE_NAME)
self.func_id = db_func.id
def test_crud(self):
# Create
body = {
'function_id': self.func_id,
'description': 'webhook test'
}
resp = self.app.post_json('/v1/webhooks', body)
self.assertEqual(201, resp.status_int)
webhook_id = resp.json.get('id')
self.assertIn(self.qinling_endpoint, resp.json.get('webhook_url'))
# Get
resp = self.app.get('/v1/webhooks/%s' % webhook_id)
self.assertEqual(200, resp.status_int)
self._assertDictContainsSubset(resp.json, body)
# List
resp = self.app.get('/v1/webhooks')
self.assertEqual(200, resp.status_int)
actual = self._assert_single_item(
resp.json['webhooks'], id=webhook_id
)
self._assertDictContainsSubset(actual, body)
# Update
resp = self.app.put_json(
'/v1/webhooks/%s' % webhook_id,
{'description': 'webhook test update'}
)
self.assertEqual(200, resp.status_int)
expected = {
'function_id': self.func_id,
'description': 'webhook test update'
}
resp = self.app.get('/v1/webhooks/%s' % webhook_id)
self.assertEqual(200, resp.status_int)
self._assertDictContainsSubset(resp.json, expected)
# Delete
resp = self.app.delete('/v1/webhooks/%s' % webhook_id)
self.assertEqual(204, resp.status_int)
resp = self.app.get('/v1/webhooks/%s' % webhook_id, expect_errors=True)
self.assertEqual(404, resp.status_int)

View File

@ -137,10 +137,13 @@ class DbTestCase(BaseTest):
(config.STORAGE_GROUP, config.storage_opts), (config.STORAGE_GROUP, config.storage_opts),
(config.KUBERNETES_GROUP, config.kubernetes_opts), (config.KUBERNETES_GROUP, config.kubernetes_opts),
(config.ETCD_GROUP, config.etcd_opts), (config.ETCD_GROUP, config.etcd_opts),
(None, [config.launch_opt]) (None, [config.launch_opt]),
(None, config.default_opts)
] ]
for group, options in qinling_opts: for group, options in qinling_opts:
cfg.CONF.register_opts(list(options), group) cfg.CONF.register_opts(list(options), group)
cls.qinling_endpoint = 'http://127.0.0.1:7070/'
cfg.CONF.set_default('qinling_endpoint', cls.qinling_endpoint)
db_api.setup_db() db_api.setup_db()
@ -200,11 +203,24 @@ class DbTestCase(BaseTest):
job_params = { job_params = {
'name': self.rand_name('job', prefix=prefix), 'name': self.rand_name('job', prefix=prefix),
'function_id': function_id, 'function_id': function_id,
# 'auth_enable' is disabled by default, we create runtime for # 'auth_enable' is disabled by default
# default tenant.
'project_id': DEFAULT_PROJECT_ID, 'project_id': DEFAULT_PROJECT_ID,
} }
job_params.update(kwargs) job_params.update(kwargs)
job = db_api.create_job(job_params) job = db_api.create_job(job_params)
return job return job
def create_webhook(self, function_id=None, prefix=None, **kwargs):
if not function_id:
function_id = self.create_function(prefix=prefix).id
webhook_params = {
'function_id': function_id,
# 'auth_enable' is disabled by default
'project_id': DEFAULT_PROJECT_ID,
}
webhook_params.update(kwargs)
webhook = db_api.create_webhook(webhook_params)
return webhook

View File

@ -50,7 +50,7 @@ class TestPeriodics(base.DbTestCase):
mock_k8s = mock.Mock() mock_k8s = mock.Mock()
mock_etcd_url.return_value = 'http://localhost:37718' mock_etcd_url.return_value = 'http://localhost:37718'
self.override_config('function_service_expiration', 1, 'engine') self.override_config('function_service_expiration', 1, 'engine')
engine = default_engine.DefaultEngine(mock_k8s) engine = default_engine.DefaultEngine(mock_k8s, CONF.qinling_endpoint)
periodics.handle_function_service_expiration(self.ctx, engine) periodics.handle_function_service_expiration(self.ctx, engine)
self.assertEqual(1, mock_k8s.delete_function.call_count) self.assertEqual(1, mock_k8s.delete_function.call_count)

View File

@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import functools import functools
import pdb
import sys import sys
import warnings import warnings
@ -104,3 +105,20 @@ def disable_ssl_warnings(func):
return func(*args, **kwargs) return func(*args, **kwargs)
return wrapper return wrapper
class ForkedPdb(pdb.Pdb):
"""A Pdb subclass that may be used from a forked multiprocessing child.
Usage:
from qinling.utils import common
common.ForkedPdb().set_trace()
"""
def interaction(self, *args, **kwargs):
_stdin = sys.stdin
try:
sys.stdin = file('/dev/stdin')
pdb.Pdb.interaction(self, *args, **kwargs)
finally:
sys.stdin = _stdin

View File

@ -11,8 +11,10 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
CURRENT_VERSION = 'v1'
EXECUTION_BY_JOB = 'Created by Job %s' EXECUTION_BY_JOB = 'Created by Job %s'
EXECUTION_BY_WEBHOOK = 'Created by Webhook %s'
PERIODIC_JOB_HANDLER = 'job_handler' PERIODIC_JOB_HANDLER = 'job_handler'
PERIODIC_FUNC_MAPPING_HANDLER = 'function_mapping_handler' PERIODIC_FUNC_MAPPING_HANDLER = 'function_mapping_handler'

View File

@ -138,3 +138,25 @@ def create_trust_context(trust_id, project_id):
auth_token=None, auth_token=None,
is_admin=True is_admin=True
) )
def get_qinling_endpoint():
'''Get Qinling service endpoint.'''
if CONF.qinling_endpoint:
return CONF.qinling_endpoint
region = CONF.keystone_authtoken.region_name
auth = v3.Password(
auth_url=CONF.keystone_authtoken.www_authenticate_uri,
username=CONF.keystone_authtoken.username,
password=CONF.keystone_authtoken.password,
project_name=CONF.keystone_authtoken.project_name,
user_domain_name=CONF.keystone_authtoken.user_domain_name,
project_domain_name=CONF.keystone_authtoken.project_domain_name,
)
sess = session.Session(auth=auth, verify=False)
endpoint = sess.get_endpoint(service_type='function-engine',
interface='public',
region_name=region)
return endpoint

View File

@ -14,16 +14,24 @@
import json import json
import six
from tempest.lib.common import rest_client from tempest.lib.common import rest_client
urlparse = six.moves.urllib.parse
class QinlingClientBase(rest_client.RestClient): class QinlingClientBase(rest_client.RestClient):
def __init__(self, auth_provider, **kwargs): def __init__(self, auth_provider, **kwargs):
super(QinlingClientBase, self).__init__(auth_provider, **kwargs) super(QinlingClientBase, self).__init__(auth_provider, **kwargs)
def get_list_objs(self, obj): def get_list_objs(self, obj, params=None):
resp, body = self.get('/v1/%s' % obj) url = '/v1/%s' % obj
query_string = ("?%s" % urlparse.urlencode(list(params.items()))
if params else "")
url += query_string
resp, body = self.get(url)
return resp, json.loads(body) return resp, json.loads(body)
def delete_obj(self, obj, id): def delete_obj(self, obj, id):

View File

@ -38,8 +38,8 @@ class QinlingClient(client_base.QinlingClientBase):
return resp, body return resp, body
def get_resources(self, res): def get_resources(self, res, params=None):
resp, body = self.get_list_objs(res) resp, body = self.get_list_objs(res, params=params)
return resp, body return resp, body
@ -102,3 +102,8 @@ class QinlingClient(client_base.QinlingClientBase):
def get_function_workers(self, function_id): def get_function_workers(self, function_id):
return self.get_resources('functions/%s/workers' % function_id) return self.get_resources('functions/%s/workers' % function_id)
def create_webhook(self, function_id):
req_body = {"function_id": function_id}
resp, body = self.post_json('webhooks', req_body)
return resp, body

View File

@ -89,27 +89,31 @@ class ExecutionsTest(base.BaseQinlingTest):
resp, body = self.client.create_execution(self.function_id, resp, body = self.client.create_execution(self.function_id,
input='{"name": "Qinling"}') input='{"name": "Qinling"}')
self.assertEqual(201, resp.status) self.assertEqual(201, resp.status)
execution_id_1 = body['id']
execution_id = body['id']
self.addCleanup(self.client.delete_resource, 'executions', self.addCleanup(self.client.delete_resource, 'executions',
execution_id, ignore_notfound=True) execution_id_1, ignore_notfound=True)
self.assertEqual('success', body['status'])
# Create another execution without input
resp, body = self.client.create_execution(self.function_id)
self.assertEqual(201, resp.status)
execution_id_2 = body['id']
self.addCleanup(self.client.delete_resource, 'executions',
execution_id_2, ignore_notfound=True)
self.assertEqual('success', body['status']) self.assertEqual('success', body['status'])
# Get executions # Get executions
resp, body = self.client.get_resources('executions') resp, body = self.client.get_resources('executions')
self.assertEqual(200, resp.status) self.assertEqual(200, resp.status)
self.assertIn( expected = {execution_id_1, execution_id_2}
execution_id, actual = set([execution['id'] for execution in body['executions']])
[execution['id'] for execution in body['executions']] self.assertTrue(expected.issubset(actual))
)
# Delete execution
resp = self.client.delete_resource('executions', execution_id)
# Delete executions
resp = self.client.delete_resource('executions', execution_id_1)
self.assertEqual(204, resp.status)
resp = self.client.delete_resource('executions', execution_id_2)
self.assertEqual(204, resp.status) self.assertEqual(204, resp.status)
@decorators.idempotent_id('2199d1e6-de7d-4345-8745-a8184d6022b1') @decorators.idempotent_id('2199d1e6-de7d-4345-8745-a8184d6022b1')

View File

@ -0,0 +1,101 @@
# Copyright 2018 Catalyst IT Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pkg_resources
import tempfile
import zipfile
import requests
from tempest.lib.common.utils import data_utils
from tempest.lib import decorators
from qinling_tempest_plugin.tests import base
class WebhooksTest(base.BaseQinlingTest):
name_prefix = 'WebhooksTest'
@classmethod
def resource_setup(cls):
super(WebhooksTest, cls).resource_setup()
cls.runtime_id = None
name = data_utils.rand_name('runtime', prefix=cls.name_prefix)
_, body = cls.admin_client.create_runtime(
'openstackqinling/python-runtime', name
)
cls.runtime_id = body['id']
@classmethod
def resource_cleanup(cls):
if cls.runtime_id:
cls.admin_client.delete_resource('runtimes', cls.runtime_id,
ignore_notfound=True)
super(WebhooksTest, cls).resource_cleanup()
def setUp(self):
super(WebhooksTest, self).setUp()
self.await_runtime_available(self.runtime_id)
self._create_function()
def _create_function(self, name='python_test.py'):
python_file_path = pkg_resources.resource_filename(
'qinling_tempest_plugin',
"functions/%s" % name
)
base_name, extention = os.path.splitext(python_file_path)
module_name = os.path.basename(base_name)
self.python_zip_file = os.path.join(
tempfile.gettempdir(),
'%s.zip' % module_name
)
if not os.path.isfile(self.python_zip_file):
zf = zipfile.ZipFile(self.python_zip_file, mode='w')
try:
# Use default compression mode, may change in future.
zf.write(
python_file_path,
'%s%s' % (module_name, extention),
compress_type=zipfile.ZIP_STORED
)
finally:
zf.close()
self.function_id = self.create_function(self.python_zip_file)
@decorators.idempotent_id('37DCD022-32D6-48D1-B90C-31D605DBE53B')
def test_webhook_invoke(self):
webhook_id, url = self.create_webhook()
resp = requests.post(url, data={'name': 'qinling'}, verify=False)
self.assertEqual(202, resp.status_code)
resp_exec_id = resp.json().get('execution_id')
self.addCleanup(self.client.delete_resource, 'executions',
resp_exec_id, ignore_notfound=True)
resp, body = self.client.get_resources(
'executions',
{'description': 'has:%s' % webhook_id}
)
self.assertEqual(200, resp.status)
self.assertEqual(1, len(body['executions']))
exec_id = body['executions'][0]['id']
self.assertEqual(resp_exec_id, exec_id)
self.await_execution_success(exec_id)
resp, body = self.client.get_execution_log(exec_id)
self.assertEqual(200, resp.status)
self.assertIn('qinling', body)

View File

@ -98,3 +98,12 @@ class BaseQinlingTest(test.BaseTestCase):
function_id, ignore_notfound=True) function_id, ignore_notfound=True)
return function_id return function_id
def create_webhook(self):
resp, body = self.client.create_webhook(self.function_id)
self.assertEqual(201, resp.status)
webhook_id = body['id']
self.addCleanup(self.client.delete_resource, 'webhooks',
webhook_id, ignore_notfound=True)
return webhook_id, body['webhook_url']

View File

@ -21,7 +21,6 @@ import os
import sys import sys
import time import time
import traceback import traceback
import zipfile
from flask import Flask from flask import Flask
from flask import request from flask import request
@ -34,6 +33,10 @@ app = Flask(__name__)
downloaded = False downloaded = False
downloading = False downloading = False
DOWNLOAD_ERROR = "Failed to download function package from %s, error: %s"
INVOKE_ERROR = "Function execution failed because of too much resource " \
"consumption"
def setup_logger(loglevel): def setup_logger(loglevel):
global app global app
@ -53,6 +56,47 @@ def _print_trace():
print(''.join(line for line in lines)) print(''.join(line for line in lines))
def _get_responce(output, duration, logs, success, code):
return Response(
response=json.dumps(
{
'output': output,
'duration': duration,
'logs': logs,
'success': success
}
),
status=code,
mimetype='application/json'
)
def _download_package(url, zip_file, token=None):
app.logger.info('Downloading function, download_url:%s' % url)
headers = {}
if token:
headers = {'X-Auth-Token': token}
try:
r = requests.get(url, headers=headers, stream=True,
verify=False, timeout=5)
if r.status_code != 200:
return _get_responce(
DOWNLOAD_ERROR % (url, r.content), 0, '', False, 500
)
with open(zip_file, 'wb') as fd:
for chunk in r.iter_content(chunk_size=65535):
fd.write(chunk)
except Exception as e:
return _get_responce(
DOWNLOAD_ERROR % (url, str(e)), 0, '', False, 500
)
app.logger.info('Downloaded function package to %s' % zip_file)
def _invoke_function(execution_id, zip_file, module_name, method, arg, input, def _invoke_function(execution_id, zip_file, module_name, method, arg, input,
return_dict): return_dict):
"""Thie function is supposed to be running in a child process.""" """Thie function is supposed to be running in a child process."""
@ -116,48 +160,16 @@ def execute():
) )
while downloading: while downloading:
# wait
time.sleep(3) time.sleep(3)
# download function package
if not downloading and not downloaded: if not downloading and not downloaded:
downloading = True downloading = True
token = params.get('token')
headers = {} _download_package(download_url, zip_file, params.get('token'))
if token:
headers = {'X-Auth-Token': token}
app.logger.info(
'Downloading function, download_url:%s, entry: %s' %
(download_url, entry)
)
# Get function code package from Qinling service.
r = requests.get(download_url, headers=headers, stream=True)
with open(zip_file, 'wb') as fd:
for chunk in r.iter_content(chunk_size=65535):
fd.write(chunk)
app.logger.info('Downloaded function package to %s' % zip_file)
downloading = False downloading = False
downloaded = True downloaded = True
if downloaded:
if not zipfile.is_zipfile(zip_file):
return Response(
response=json.dumps(
{
'output': 'The function package is incorrect.',
'duration': 0,
'logs': '',
'success': False
}
),
status=500,
mimetype='application/json'
)
# Provide an openstack session to user's function # Provide an openstack session to user's function
os_session = None os_session = None
if auth_url: if auth_url:
@ -189,8 +201,7 @@ def execute():
# Process was killed unexpectedly or finished with error. # Process was killed unexpectedly or finished with error.
if p.exitcode != 0: if p.exitcode != 0:
output = "Function execution failed because of too much resource " \ output = INVOKE_ERROR
"consumption."
success = False success = False
else: else:
output = return_dict.get('result') output = return_dict.get('result')
@ -201,18 +212,7 @@ def execute():
logs = f.read() logs = f.read()
os.remove('%s.out' % execution_id) os.remove('%s.out' % execution_id)
return Response( return _get_responce(output, duration, logs, success, 200)
response=json.dumps(
{
'output': output,
'duration': duration,
'logs': logs,
'success': success
}
),
status=200,
mimetype='application/json'
)
@app.route('/ping') @app.route('/ping')

7
tools/clear_resources.sh Normal file → Executable file
View File

@ -4,6 +4,13 @@ set -e
# export QINLING_URL=http://127.0.0.1:7070 # export QINLING_URL=http://127.0.0.1:7070
function delete_resources(){ function delete_resources(){
# Delete webhooks
ids=$(openstack webhook list -f yaml -c Id | awk '{print $3}')
for id in $ids
do
openstack webhook delete $id
done
# Delete jobs # Delete jobs
ids=$(openstack job list -f yaml -c Id | awk '{print $3}') ids=$(openstack job list -f yaml -c Id | awk '{print $3}')
for id in $ids for id in $ids

View File

@ -1,97 +0,0 @@
#!/bin/bash
set +xe
# if we can't find kubectl, fail immediately because it is likely
# the whitespace linter fails - no point to collect logs.
if ! type "kubectl" &> /dev/null; then
exit $1
fi
echo "Capturing logs from environment."
mkdir -p ${LOGS_DIR}/k8s/etc
sudo cp -a /etc/kubernetes ${LOGS_DIR}/k8s/etc
sudo chmod 777 --recursive ${LOGS_DIR}/*
mkdir -p ${LOGS_DIR}/k8s
for OBJECT_TYPE in nodes \
namespace \
storageclass; do
kubectl get ${OBJECT_TYPE} -o yaml > ${LOGS_DIR}/k8s/${OBJECT_TYPE}.yaml
done
kubectl describe nodes > ${LOGS_DIR}/k8s/nodes.txt
for OBJECT_TYPE in svc \
pods \
jobs \
deployments \
daemonsets \
statefulsets \
configmaps \
secrets; do
kubectl get --all-namespaces ${OBJECT_TYPE} -o yaml > \
${LOGS_DIR}/k8s/${OBJECT_TYPE}.yaml
done
mkdir -p ${LOGS_DIR}/k8s/pods
kubectl get pods -a --all-namespaces -o json | jq -r \
'.items[].metadata | .namespace + " " + .name' | while read line; do
NAMESPACE=$(echo $line | awk '{print $1}')
NAME=$(echo $line | awk '{print $2}')
kubectl get --namespace $NAMESPACE pod $NAME -o json | jq -r \
'.spec.containers[].name' | while read line; do
CONTAINER=$(echo $line | awk '{print $1}')
kubectl logs $NAME --namespace $NAMESPACE -c $CONTAINER > \
${LOGS_DIR}/k8s/pods/$NAMESPACE-$NAME-$CONTAINER.txt
done
done
mkdir -p ${LOGS_DIR}/k8s/svc
kubectl get svc -o json --all-namespaces | jq -r \
'.items[].metadata | .namespace + " " + .name' | while read line; do
NAMESPACE=$(echo $line | awk '{print $1}')
NAME=$(echo $line | awk '{print $2}')
kubectl describe svc $NAME --namespace $NAMESPACE > \
${LOGS_DIR}/k8s/svc/$NAMESPACE-$NAME.txt
done
mkdir -p ${LOGS_DIR}/k8s/pvc
kubectl get pvc -o json --all-namespaces | jq -r \
'.items[].metadata | .namespace + " " + .name' | while read line; do
NAMESPACE=$(echo $line | awk '{print $1}')
NAME=$(echo $line | awk '{print $2}')
kubectl describe pvc $NAME --namespace $NAMESPACE > \
${LOGS_DIR}/k8s/pvc/$NAMESPACE-$NAME.txt
done
mkdir -p ${LOGS_DIR}/k8s/rbac
for OBJECT_TYPE in clusterroles \
roles \
clusterrolebindings \
rolebindings; do
kubectl get ${OBJECT_TYPE} -o yaml > ${LOGS_DIR}/k8s/rbac/${OBJECT_TYPE}.yaml
done
mkdir -p ${LOGS_DIR}/k8s/descriptions
for NAMESPACE in $(kubectl get namespaces -o name | awk -F '/' '{ print $NF }') ; do
for OBJECT in $(kubectl get all --show-all -n $NAMESPACE -o name) ; do
OBJECT_TYPE=$(echo $OBJECT | awk -F '/' '{ print $1 }')
OBJECT_NAME=$(echo $OBJECT | awk -F '/' '{ print $2 }')
mkdir -p ${LOGS_DIR}/k8s/descriptions/${NAMESPACE}/${OBJECT_TYPE}
kubectl describe -n $NAMESPACE $OBJECT > ${LOGS_DIR}/k8s/descriptions/${NAMESPACE}/$OBJECT_TYPE/$OBJECT_NAME.txt
done
done
NODE_NAME=$(hostname)
mkdir -p ${LOGS_DIR}/nodes/${NODE_NAME}
echo "${NODE_NAME}" > ${LOGS_DIR}/nodes/master.txt
sudo docker logs kubelet 2> ${LOGS_DIR}/nodes/${NODE_NAME}/kubelet.txt
sudo docker logs kubeadm-aio 2>&1 > ${LOGS_DIR}/nodes/${NODE_NAME}/kubeadm-aio.txt
sudo docker images --digests --no-trunc --all > ${LOGS_DIR}/nodes/${NODE_NAME}/images.txt
sudo du -h --max-depth=1 /var/lib/docker | sort -hr > ${LOGS_DIR}/nodes/${NODE_NAME}/docker-size.txt
sudo iptables-save > ${LOGS_DIR}/nodes/${NODE_NAME}/iptables.txt
sudo ip a > ${LOGS_DIR}/nodes/${NODE_NAME}/ip.txt
sudo route -n > ${LOGS_DIR}/nodes/${NODE_NAME}/routes.txt
sudo arp -a > ${LOGS_DIR}/nodes/${NODE_NAME}/arp.txt
cat /etc/resolv.conf > ${LOGS_DIR}/nodes/${NODE_NAME}/resolv.conf
sudo lshw > ${LOGS_DIR}/nodes/${NODE_NAME}/hardware.txt
exit $1

View File

@ -42,87 +42,6 @@ function base_install {
fi fi
} }
function loopback_support_install {
if [ "x$HOST_OS" == "xubuntu" ]; then
sudo apt-get update -y
sudo apt-get install -y --no-install-recommends \
targetcli \
open-iscsi \
lshw
sudo systemctl restart iscsid
elif [ "x$HOST_OS" == "xcentos" ]; then
sudo yum install -y \
targetcli \
iscsi-initiator-utils \
lshw
elif [ "x$HOST_OS" == "xfedora" ]; then
sudo dnf install -y \
targetcli \
iscsi-initiator-utils \
lshw
fi
}
function loopback_setup {
sudo mkdir -p ${LOOPBACK_DIR}
for ((LOOPBACK_DEV=1;LOOPBACK_DEV<=${LOOPBACK_DEVS};LOOPBACK_DEV++)); do
if [ "x$HOST_OS" == "xubuntu" ]; then
sudo targetcli backstores/fileio create loopback-${LOOPBACK_DEV} ${LOOPBACK_DIR}/fileio-${LOOPBACK_DEV} ${LOOPBACK_SIZE}
else
sudo targetcli backstores/fileio create loopback-${LOOPBACK_DEV} ${LOOPBACK_DIR}/fileio-${LOOPBACK_DEV} ${LOOPBACK_SIZE} write_back=false
fi
done
sudo targetcli iscsi/ create iqn.2016-01.com.example:target
if ! [ "x$HOST_OS" == "xubuntu" ]; then
sudo targetcli iscsi/iqn.2016-01.com.example:target/tpg1/portals delete 0.0.0.0 3260
sudo targetcli iscsi/iqn.2016-01.com.example:target/tpg1/portals create 127.0.0.1 3260
else
#NOTE (Portdirect): Frustratingly it appears that Ubuntu's targetcli wont
# let you bind to localhost.
sudo targetcli iscsi/iqn.2016-01.com.example:target/tpg1/portals create 0.0.0.0 3260
fi
for ((LOOPBACK_DEV=1;LOOPBACK_DEV<=${LOOPBACK_DEVS};LOOPBACK_DEV++)); do
sudo targetcli iscsi/iqn.2016-01.com.example:target/tpg1/luns/ create /backstores/fileio/loopback-${LOOPBACK_DEV}
done
sudo targetcli iscsi/iqn.2016-01.com.example:target/tpg1/acls/ create $(sudo cat /etc/iscsi/initiatorname.iscsi | awk -F '=' '/^InitiatorName/ { print $NF}')
if [ "x$HOST_OS" == "xubuntu" ]; then
sudo targetcli iscsi/iqn.2016-01.com.example:target/tpg1 set attribute authentication=0
fi
sudo iscsiadm --mode discovery --type sendtargets --portal 127.0.0.1
sudo iscsiadm -m node -T iqn.2016-01.com.example:target -p 127.0.0.1:3260 -l
# Display disks
sudo lshw -class disk
}
function ceph_support_install {
if [ "x$HOST_OS" == "xubuntu" ]; then
sudo apt-get update -y
sudo apt-get install -y --no-install-recommends -qq \
ceph-common
elif [ "x$HOST_OS" == "xcentos" ]; then
sudo yum install -y \
ceph
elif [ "x$HOST_OS" == "xfedora" ]; then
sudo dnf install -y \
ceph
fi
sudo modprobe rbd
}
function nfs_support_install {
if [ "x$HOST_OS" == "xubuntu" ]; then
sudo apt-get update -y
sudo apt-get install -y --no-install-recommends -qq \
nfs-common
elif [ "x$HOST_OS" == "xcentos" ]; then
sudo yum install -y \
nfs-utils
elif [ "x$HOST_OS" == "xfedora" ]; then
sudo dnf install -y \
nfs-utils
fi
}
function gate_base_setup { function gate_base_setup {
# Install base requirements # Install base requirements
base_install base_install
@ -140,3 +59,10 @@ function gate_base_setup {
nfs_support_install nfs_support_install
fi fi
} }
function create_k8s_screen {
# Starts a proxy to the Kubernetes API server in a screen session
sudo screen -S kube_proxy -X quit || true
sudo screen -dmS kube_proxy && sudo screen -S kube_proxy -X screen -t kube_proxy
sudo screen -S kube_proxy -p kube_proxy -X stuff 'kubectl proxy --accept-hosts=".*" --address="0.0.0.0"\n'
}

View File

@ -1,148 +0,0 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
function kube_wait_for_pods {
# From Kolla-Kubernetes, orginal authors Kevin Fox & Serguei Bezverkhi
# Default wait timeout is 180 seconds
set +x
end=$(date +%s)
if ! [ -z $2 ]; then
end=$((end + $2))
else
end=$((end + 180))
fi
while true; do
kubectl get pods --namespace=$1 -o json | jq -r \
'.items[].status.phase' | grep Pending > /dev/null && \
PENDING=True || PENDING=False
query='.items[]|select(.status.phase=="Running")'
query="$query|.status.containerStatuses[].ready"
kubectl get pods --namespace=$1 -o json | jq -r "$query" | \
grep false > /dev/null && READY="False" || READY="True"
kubectl get jobs -o json --namespace=$1 | jq -r \
'.items[] | .spec.completions == .status.succeeded' | \
grep false > /dev/null && JOBR="False" || JOBR="True"
[ $PENDING == "False" -a $READY == "True" -a $JOBR == "True" ] && \
break || true
sleep 1
now=$(date +%s)
[ $now -gt $end ] && echo "containers failed to start." && \
kubectl get pods --namespace $1 -o wide && exit -1
done
set -x
}
function kube_wait_for_nodes {
# Default wait timeout is 180 seconds
set +x
end=$(date +%s)
if ! [ -z $2 ]; then
end=$((end + $2))
else
end=$((end + 180))
fi
while true; do
NUMBER_OF_NODES_EXPECTED=$1
NUMBER_OF_NODES=$(kubectl get nodes --no-headers -o name | wc -l)
[ $NUMBER_OF_NODES -eq $NUMBER_OF_NODES_EXPECTED ] && \
NODES_ONLINE="True" || NODES_ONLINE="False"
while read SUB_NODE; do
echo $SUB_NODE | grep -q ^Ready && NODES_READY="True" || NODES_READY="False"
done < <(kubectl get nodes --no-headers | awk '{ print $2 }')
[ $NODES_ONLINE == "True" -a $NODES_READY == "True" ] && \
break || true
sleep 5
now=$(date +%s)
[ $now -gt $end ] && echo "Nodes Failed to be ready in time." && \
kubectl get nodes -o wide && exit -1
done
set -x
}
function kubeadm_aio_reqs_install {
if [ "x$HOST_OS" == "xubuntu" ]; then
sudo apt-get install -y --no-install-recommends -qq \
docker.io \
jq
elif [ "x$HOST_OS" == "xcentos" ]; then
sudo yum install -y \
epel-release
sudo yum install -y \
docker-latest \
jq
sudo cp -f /usr/lib/systemd/system/docker-latest.service /etc/systemd/system/docker.service
sudo sed -i "s|/var/lib/docker-latest|/var/lib/docker|g" /etc/systemd/system/docker.service
sudo sed -i 's/^OPTIONS/#OPTIONS/g' /etc/sysconfig/docker-latest
sudo sed -i "s|^MountFlags=slave|MountFlags=share|g" /etc/systemd/system/docker.service
sudo sed -i "/--seccomp-profile/,+1 d" /etc/systemd/system/docker.service
echo "DOCKER_STORAGE_OPTIONS=--storage-driver=overlay" | sudo tee /etc/sysconfig/docker-latest-storage
sudo setenforce 0 || true
sudo systemctl daemon-reload
sudo systemctl restart docker
elif [ "x$HOST_OS" == "xfedora" ]; then
sudo dnf install -y \
docker-latest \
jq
sudo cp -f /usr/lib/systemd/system/docker-latest.service /etc/systemd/system/docker.service
sudo sed -i "s|/var/lib/docker-latest|/var/lib/docker|g" /etc/systemd/system/docker.service
echo "DOCKER_STORAGE_OPTIONS=--storage-driver=overlay2" | sudo tee /etc/sysconfig/docker-latest-storage
sudo setenforce 0 || true
sudo systemctl daemon-reload
sudo systemctl restart docker
fi
if CURRENT_KUBECTL_LOC=$(type -p kubectl); then
CURRENT_KUBECTL_VERSION=$(${CURRENT_KUBECTL_LOC} version --client --short | awk '{ print $NF }' | awk -F '+' '{ print $1 }')
fi
[ "x$KUBE_VERSION" == "x$CURRENT_KUBECTL_VERSION" ] || ( \
TMP_DIR=$(mktemp -d)
curl -sSL https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/bin/linux/amd64/kubectl -o ${TMP_DIR}/kubectl
chmod +x ${TMP_DIR}/kubectl
sudo mv ${TMP_DIR}/kubectl /usr/local/bin/kubectl
rm -rf ${TMP_DIR} )
}
function kubeadm_aio_build {
sudo docker build --pull -t ${KUBEADM_IMAGE} tools/kubeadm-aio
}
function kubeadm_aio_launch {
${WORK_DIR}/tools/kubeadm-aio/kubeadm-aio-launcher.sh
mkdir -p ${HOME}/.kube
cat ${KUBECONFIG} > ${HOME}/.kube/config
kube_wait_for_pods kube-system 240
kube_wait_for_pods default 240
}
function kubeadm_aio_clean {
sudo docker rm -f kubeadm-aio || true
sudo docker rm -f kubelet || true
sudo docker ps -aq | xargs -r -l1 -P16 sudo docker rm -f
sudo rm -rfv \
/etc/cni/net.d \
/etc/kubernetes \
/var/lib/etcd \
/var/etcd \
/var/lib/kubelet/* \
/var/lib/nova \
${HOME}/.kubeadm-aio/admin.conf \
/var/lib/openstack-helm \
/var/lib/nfs-provisioner || true
}
function ceph_kube_controller_manager_replace {
sudo docker pull ${CEPH_KUBE_CONTROLLER_MANAGER_IMAGE}
sudo docker tag ${CEPH_KUBE_CONTROLLER_MANAGER_IMAGE} ${BASE_KUBE_CONTROLLER_MANAGER_IMAGE}
}

View File

@ -42,48 +42,10 @@ function net_hosts_pre_kube {
sudo sed -i "/127.0.0.1/d" /etc/hosts sudo sed -i "/127.0.0.1/d" /etc/hosts
sudo sed -i "1 i 127.0.0.1 localhost" /etc/hosts sudo sed -i "1 i 127.0.0.1 localhost" /etc/hosts
# The var will be used in qinling pre_test_hook.sh host_ip=$(net_default_host_ip)
export DEFAULT_HOST_IP=$(net_default_host_ip) echo "${host_ip} $(hostname)" | sudo tee -a /etc/hosts
echo "${DEFAULT_HOST_IP} $(hostname)" | sudo tee -a /etc/hosts
} }
function net_hosts_post_kube { function net_hosts_post_kube {
sudo cp -f /etc/hosts-pre-kube /etc/hosts sudo cp -f /etc/hosts-pre-kube /etc/hosts
} }
function find_subnet_range {
if [ "x$HOST_OS" == "xubuntu" ]; then
ipcalc $(net_default_host_addr) | awk '/^Network/ { print $2 }'
else
eval $(ipcalc --network --prefix $(net_default_host_addr))
echo "$NETWORK/$PREFIX"
fi
}
function find_multi_subnet_range {
: ${PRIMARY_NODE_IP:="$(cat /etc/nodepool/primary_node | tail -1)"}
: ${SUB_NODE_IPS:="$(cat /etc/nodepool/sub_nodes)"}
NODE_IPS="${PRIMARY_NODE_IP} ${SUB_NODE_IPS}"
NODE_IP_UNSORTED=$(mktemp --suffix=.txt)
for NODE_IP in $NODE_IPS; do
echo $NODE_IP >> ${NODE_IP_UNSORTED}
done
NODE_IP_SORTED=$(mktemp --suffix=.txt)
sort -V ${NODE_IP_UNSORTED} > ${NODE_IP_SORTED}
rm -f ${NODE_IP_UNSORTED}
FIRST_IP_SUBNET=$(ipcalc "$(head -n 1 ${NODE_IP_SORTED})/24" | awk '/^Network/ { print $2 }')
LAST_IP_SUBNET=$(ipcalc "$(tail -n 1 ${NODE_IP_SORTED})/24" | awk '/^Network/ { print $2 }')
rm -f ${NODE_IP_SORTED}
function ip_diff {
echo $(($(echo $LAST_IP_SUBNET | awk -F '.' "{ print \$$1}") - $(echo $FIRST_IP_SUBNET | awk -F '.' "{ print \$$1}")))
}
for X in {1..4}; do
if ! [ "$(ip_diff $X)" -eq "0" ]; then
SUBMASK=$(((($X - 1 )) * 8))
break
elif [ $X -eq "4" ]; then
SUBMASK=24
fi
done
echo ${FIRST_IP_SUBNET%/*}/${SUBMASK}
}

View File

@ -1,25 +0,0 @@
#!/bin/bash
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
set -ex
: ${WORK_DIR:="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"}
source ${WORK_DIR}/tools/gate/vars.sh
source ${WORK_DIR}/tools/gate/funcs/network.sh
source ${WORK_DIR}/tools/gate/funcs/kube.sh
kubeadm_aio_reqs_install
# Re-use the docker image pre-built by openstack-helm team.
sudo docker pull ${KUBEADM_IMAGE} || kubeadm_aio_build
kubeadm_aio_launch

View File

@ -11,33 +11,21 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
set -ex set -ex
export WORK_DIR=$(pwd) export WORK_DIR=$(pwd)
source ${WORK_DIR}/tools/gate/vars.sh source ${WORK_DIR}/tools/gate/vars.sh
source ${WORK_DIR}/tools/gate/funcs/common.sh source ${WORK_DIR}/tools/gate/funcs/common.sh
source ${WORK_DIR}/tools/gate/funcs/network.sh source ${WORK_DIR}/tools/gate/funcs/network.sh
# Setup the logging location: by default use the working dir as the root.
rm -rf ${LOGS_DIR} || true
mkdir -p ${LOGS_DIR}
function dump_logs () {
${WORK_DIR}/tools/gate/dump_logs.sh
}
trap 'dump_logs "$?"' ERR
# Do the basic node setup for running the gate # Do the basic node setup for running the gate
gate_base_setup gate_base_setup
# We setup the network for pre kube here, to enable cluster restarts on
# development machines
net_resolv_pre_kube net_resolv_pre_kube
net_hosts_pre_kube net_hosts_pre_kube
# Setup the K8s Cluster # Setup the K8s Cluster
bash ${WORK_DIR}/tools/gate/kubeadm_aio.sh source ${WORK_DIR}/tools/gate/setup_minikube.sh
create_k8s_screen
# Starts a proxy to the Kubernetes API server in a screen session #net_hosts_post_kube
sudo screen -S kube_proxy -X quit || true #net_resolv_post_kube
sudo screen -dmS kube_proxy && sudo screen -S kube_proxy -X screen -t kube_proxy
sudo screen -S kube_proxy -p kube_proxy -X stuff 'kubectl proxy --accept-hosts=".*" --address="0.0.0.0"\n'

35
tools/gate/setup_minikube.sh Executable file
View File

@ -0,0 +1,35 @@
#!/usr/bin/env bash
set -xe
sudo apt-get install -y --no-install-recommends -qq \
docker.io \
jq
curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && sudo chmod +x minikube
curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && sudo chmod +x kubectl && sudo mv ./kubectl /usr/local/bin/kubectl
export MINIKUBE_WANTUPDATENOTIFICATION=false
export MINIKUBE_WANTREPORTERRORPROMPT=false
export MINIKUBE_HOME=$HOME
export CHANGE_MINIKUBE_NONE_USER=true
mkdir $HOME/.kube || true
touch $HOME/.kube/config
export KUBECONFIG=$HOME/.kube/config
sudo ./minikube delete || true
sudo -E ./minikube start --vm-driver=none --kubernetes-version ${KUBE_VERSION} --loglevel 0
# waits until kubectl can access the api server that Minikube has created
end=$(($(date +%s) + 600))
READY="False"
while true; do
kubectl get po &> /dev/null
if [ $? -ne 1 ]; then
READY="True"
echo "Kubernetes cluster is ready!"
fi
[ $READY == "True" ] && break || true
sleep 2
now=$(date +%s)
[ $now -gt $end ] && echo "Failed to setup kubernetes cluster in time" && exit -1
done

View File

@ -15,36 +15,12 @@
# Set work dir if not already done # Set work dir if not already done
: ${WORK_DIR:="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"} : ${WORK_DIR:="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"}
# Set logs directory
export LOGS_DIR=${LOGS_DIR:-"${WORK_DIR}/logs"}
# Get Host OS # Get Host OS
source /etc/os-release source /etc/os-release
export HOST_OS=${HOST_OS:="${ID}"} export HOST_OS=${HOST_OS:="${ID}"}
# Set versions of K8s to use # Set versions of K8s to use
export KUBE_VERSION=${KUBE_VERSION:-"v1.6.8"} export KUBE_VERSION=${KUBE_VERSION:-"v1.8.0"}
# Set K8s-AIO options
export KUBECONFIG=${KUBECONFIG:="${HOME}/.kubeadm-aio/admin.conf"}
export KUBEADM_IMAGE=${KUBEADM_IMAGE:="openstackhelm/kubeadm-aio:${KUBE_VERSION}"}
# Set K8s network options
export CNI_POD_CIDR=${CNI_POD_CIDR:="192.168.0.0/16"}
export KUBE_CNI=${KUBE_CNI:="calico"}
# Set PVC Backend
export PVC_BACKEND=${PVC_BACKEND:-"ceph"}
# Set Upstream DNS # Set Upstream DNS
export UPSTREAM_DNS=${UPSTREAM_DNS:-"8.8.8.8"} export UPSTREAM_DNS=${UPSTREAM_DNS:-"8.8.8.8"}
# Set gate script timeouts
export SERVICE_LAUNCH_TIMEOUT=${SERVICE_LAUNCH_TIMEOUT:="600"}
export SERVICE_TEST_TIMEOUT=${SERVICE_TEST_TIMEOUT:="600"}
# Setup Loopback device options
export LOOPBACK_CREATE=${LOOPBACK_CREATE:="false"}
export LOOPBACK_DEVS=${LOOPBACK_DEVS:="3"}
export LOOPBACK_SIZE=${LOOPBACK_SIZE:="500M"}
export LOOPBACK_DIR=${LOOPBACK_DIR:="/var/lib/iscsi-loopback"}

View File

@ -1,88 +0,0 @@
FROM ubuntu:16.04
MAINTAINER pete.birley@att.com
ENV KUBE_VERSION=v1.6.8 \
CNI_VERSION=v0.6.0-rc2 \
container="docker" \
DEBIAN_FRONTEND="noninteractive"
RUN set -x \
&& TMP_DIR=$(mktemp --directory) \
&& cd ${TMP_DIR} \
&& apt-get update \
&& apt-get install -y --no-install-recommends \
apt-transport-https \
ca-certificates \
curl \
dbus \
make \
git \
vim \
jq \
# Add Kubernetes repo
&& curl -sSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - \
&& echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list \
&& apt-get update \
&& apt-get install -y --no-install-recommends \
docker.io \
iptables \
kubectl \
kubelet \
kubernetes-cni \
# Install Kubeadm without running postinstall script as it expects systemd to be running.
&& apt-get download kubeadm \
&& dpkg --unpack kubeadm*.deb \
&& mv /var/lib/dpkg/info/kubeadm.postinst /opt/kubeadm.postinst \
&& dpkg --configure kubeadm \
&& apt-get install -yf kubeadm \
&& mkdir -p /etc/kubernetes/manifests \
# Install kubectl:
&& curl -sSL https://dl.k8s.io/${KUBE_VERSION}/kubernetes-client-linux-amd64.tar.gz | tar -zxv --strip-components=1 \
&& mv ${TMP_DIR}/client/bin/kubectl /usr/bin/kubectl \
&& chmod +x /usr/bin/kubectl \
# Install kubelet & kubeadm binaries:
# (portdirect) We do things in this weird way to let us use the deps and systemd
# units from the packages in the .deb repo.
&& curl -sSL https://dl.k8s.io/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz | tar -zxv --strip-components=1 \
&& mv ${TMP_DIR}/server/bin/kubelet /usr/bin/kubelet \
&& chmod +x /usr/bin/kubelet \
&& mv ${TMP_DIR}/server/bin/kubeadm /usr/bin/kubeadm \
&& chmod +x /usr/bin/kubeadm \
# Install CNI:
&& CNI_BIN_DIR=/opt/cni/bin \
&& mkdir -p ${CNI_BIN_DIR} \
&& cd ${CNI_BIN_DIR} \
&& curl -sSL https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-amd64-$CNI_VERSION.tgz | tar -zxv --strip-components=1 \
&& cd ${TMP_DIR} \
# Move kubelet binary as we will run containerised
&& mv /usr/bin/kubelet /usr/bin/kubelet-real \
# Install utils for PVC provisioners
&& apt-get install -y --no-install-recommends \
nfs-common \
ceph-common \
kmod \
# Tweak Systemd units and targets for running in a container
&& find /lib/systemd/system/sysinit.target.wants/ ! -name 'systemd-tmpfiles-setup.service' -type l -exec rm -fv {} + \
&& rm -fv \
/lib/systemd/system/multi-user.target.wants/* \
/etc/systemd/system/*.wants/* \
/lib/systemd/system/local-fs.target.wants/* \
/lib/systemd/system/sockets.target.wants/*udev* \
/lib/systemd/system/sockets.target.wants/*initctl* \
/lib/systemd/system/basic.target.wants/* \
# Clean up apt cache
&& rm -rf /var/lib/apt/lists/* \
# Clean up tmp dir
&& cd / \
&& rm -rf ${TMP_DIR}
# Load assets into place, setup startup target & units
COPY ./assets/ /
RUN set -x \
&& ln -s /usr/lib/systemd/system/container-up.target /etc/systemd/system/default.target \
&& mkdir -p /etc/systemd/system/container-up.target.wants \
&& ln -s /usr/lib/systemd/system/kubeadm-aio.service /etc/systemd/system/container-up.target.wants/kubeadm-aio.service
VOLUME /sys/fs/cgroup
CMD /kubeadm-aio

View File

@ -1,110 +0,0 @@
Kubeadm AIO Container
=====================
This container builds a small AIO Kubeadm based Kubernetes deployment
for Development and Gating use.
Instructions
------------
OS Specific Host setup:
~~~~~~~~~~~~~~~~~~~~~~~
Ubuntu:
^^^^^^^
From a freshly provisioned Ubuntu 16.04 LTS host run:
.. code:: bash
sudo apt-get update -y
sudo apt-get install -y \
docker.io \
nfs-common \
git \
make
OS Independent Host setup:
~~~~~~~~~~~~~~~~~~~~~~~~~~
You should install the ``kubectl`` and ``helm`` binaries:
.. code:: bash
KUBE_VERSION=v1.6.8
HELM_VERSION=v2.5.1
TMP_DIR=$(mktemp -d)
curl -sSL https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/bin/linux/amd64/kubectl -o ${TMP_DIR}/kubectl
chmod +x ${TMP_DIR}/kubectl
sudo mv ${TMP_DIR}/kubectl /usr/local/bin/kubectl
curl -sSL https://storage.googleapis.com/kubernetes-helm/helm-${HELM_VERSION}-linux-amd64.tar.gz | tar -zxv --strip-components=1 -C ${TMP_DIR}
sudo mv ${TMP_DIR}/helm /usr/local/bin/helm
rm -rf ${TMP_DIR}
And clone the OpenStack-Helm repo:
.. code:: bash
git clone https://git.openstack.org/openstack/openstack-helm
Build the AIO environment (optional)
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
A known good image is published to dockerhub on a fairly regular basis, but if
you wish to build your own image, from the root directory of the OpenStack-Helm
repo run:
.. code:: bash
export KUBEADM_IMAGE=openstackhelm/kubeadm-aio:v1.6.8
sudo docker build --pull -t ${KUBEADM_IMAGE} tools/kubeadm-aio
Deploy the AIO environment
~~~~~~~~~~~~~~~~~~~~~~~~~~
To launch the environment run:
.. code:: bash
export KUBEADM_IMAGE=openstackhelm/kubeadm-aio:v1.6.8
export KUBE_VERSION=v1.6.8
./tools/kubeadm-aio/kubeadm-aio-launcher.sh
export KUBECONFIG=${HOME}/.kubeadm-aio/admin.conf
Once this has run without errors, you should hopefully have a Kubernetes single
node environment running, with Helm, Calico, appropriate RBAC rules and node
labels to get developing.
Prior to launching you can also optionally set the following environment
variables to control aspects of the CNI used:
.. code:: bash
export KUBE_CNI=calico # or "canal" "weave" "flannel"
export CNI_POD_CIDR=192.168.0.0/16
If you wish to use this environment as the primary Kubernetes environment on
your host you may run the following, but note that this will wipe any previous
client configuration you may have.
.. code:: bash
mkdir -p ${HOME}/.kube
cat ${HOME}/.kubeadm-aio/admin.conf > ${HOME}/.kube/config
If you wish to create dummy network devices for Neutron to manage there
is a helper script that can set them up for you:
.. code:: bash
sudo docker exec kubelet /usr/bin/openstack-helm-aio-network-prep
Logs
~~~~
You can get the logs from your ``kubeadm-aio`` container by running:
.. code:: bash
sudo docker logs -f kubeadm-aio

View File

@ -1,2 +0,0 @@
KUBE_CNI=calico
CNI_POD_CIDR=192.168.0.0/16

View File

@ -1,3 +0,0 @@
# If KUBE_ROLE is set 'master' kubeadm-aio will set this node up to be a master
# node, otherwise if 'worker', will join an existing cluster.
KUBE_ROLE=master

View File

@ -1,3 +0,0 @@
# If KUBE_VERSION is set 'default' kubeadm will use the default version of K8s
# otherwise the version specified here will be used.
KUBE_VERSION=default

View File

@ -1 +0,0 @@
KUBEADM_JOIN_ARGS="no_command_supplied"

View File

@ -1,4 +0,0 @@
apiVersion: kubeadm.k8s.io/v1alpha1
kind: MasterConfiguration
apiServerExtraArgs:
runtime-config: "batch/v2alpha1=true"

View File

@ -1,3 +0,0 @@
# If KUBE_BIND_DEV is set to 'autodetect' we will use kubeadm's autodetect logic
# otherwise use the device specified to find the IP address to bind to.
KUBE_BIND_DEV=autodetect

View File

@ -1,3 +0,0 @@
# If KUBELET_CONTAINER is set 'to_this' one we will not attempt to launch a new
# container for the kubelet process, otherwise use the image tag specified
KUBELET_CONTAINER=this_one

View File

@ -1,54 +0,0 @@
#!/bin/bash
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
echo 'Checking cgroups'
if ls -dZ /sys/fs/cgroup | grep -q :svirt_sandbox_file_t: ; then
echo 'Invocation error: use -v /sys/fs/cgroup:/sys/fs/cgroup:ro parameter to docker run.'
exit 1
fi
echo 'Setting up K8s version to deploy'
: ${KUBE_VERSION:="default"}
sed -i "s|KUBE_VERSION=.*|KUBE_VERSION=${KUBE_VERSION}|g" /etc/kube-version
echo 'Setting up device to use for kube-api'
: ${KUBE_BIND_DEV:="autodetect"}
sed -i "s|KUBE_BIND_DEV=.*|KUBE_BIND_DEV=${KUBE_BIND_DEV}|g" /etc/kubeapi-device
echo 'Setting up container image to use for kubelet'
: ${KUBELET_CONTAINER:="this_one"}
sed -i "s|KUBELET_CONTAINER=.*|KUBELET_CONTAINER=${KUBELET_CONTAINER}|g" /etc/kubelet-container
echo 'Setting whether this node is a master, or slave, K8s node'
: ${KUBE_ROLE:="master"}
sed -i "s|KUBE_ROLE=.*|KUBE_ROLE=${KUBE_ROLE}|g" /etc/kube-role
echo 'Setting any kubeadm join commands'
: ${KUBEADM_JOIN_ARGS:="no_command_supplied"}
sed -i "s|KUBEADM_JOIN_ARGS=.*|KUBEADM_JOIN_ARGS=\"${KUBEADM_JOIN_ARGS}\"|g" /etc/kubeadm-join-command-args
echo 'Setting CNI pod CIDR'
: ${CNI_POD_CIDR:="192.168.0.0/16"}
sed -i "s|192.168.0.0/16|${CNI_POD_CIDR}|g" /opt/cni-manifests/*.yaml
sed -i "s|CNI_POD_CIDR=.*|CNI_POD_CIDR=\"${CNI_POD_CIDR}\"|g" /etc/kube-cni
echo 'Setting CNI '
: ${KUBE_CNI:="calico"}
sed -i "s|KUBE_CNI=.*|KUBE_CNI=\"${KUBE_CNI}\"|g" /etc/kube-cni
echo 'Starting Systemd'
exec /bin/systemd --system

View File

@ -1,365 +0,0 @@
# Calico Version v2.1.4
# http://docs.projectcalico.org/v2.1/releases#v2.1.4
# This manifest includes the following component versions:
# calico/node:v1.1.3
# calico/cni:v1.7.0
# calico/kube-policy-controller:v0.5.4
# This ConfigMap is used to configure a self-hosted Calico installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: calico-config
namespace: kube-system
data:
# The location of your etcd cluster. This uses the Service clusterIP
# defined below.
etcd_endpoints: "http://10.96.232.136:6666"
# Configure the Calico backend to use.
calico_backend: "bird"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"type": "calico",
"etcd_endpoints": "__ETCD_ENDPOINTS__",
"log_level": "info",
"ipam": {
"type": "calico-ipam"
},
"policy": {
"type": "k8s",
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
}
}
---
# This manifest installs the Calico etcd on the kubeadm master. This uses a DaemonSet
# to force it to run on the master even when the master isn't schedulable, and uses
# nodeSelector to ensure it only runs on the master.
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: calico-etcd
namespace: kube-system
labels:
k8s-app: calico-etcd
spec:
template:
metadata:
labels:
k8s-app: calico-etcd
annotations:
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
# reserves resources for critical add-on pods so that they can be rescheduled after
# a failure. This annotation works in tandem with the toleration below.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# Only run this pod on the master.
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
# This, along with the annotation above marks this pod as a critical add-on.
- key: CriticalAddonsOnly
operator: Exists
nodeSelector:
node-role.kubernetes.io/master: ""
hostNetwork: true
containers:
- name: calico-etcd
image: gcr.io/google_containers/etcd:2.2.1
env:
- name: CALICO_ETCD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
command: ["/bin/sh","-c"]
args: ["/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$CALICO_ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"]
volumeMounts:
- name: var-etcd
mountPath: /var/etcd
volumes:
- name: var-etcd
hostPath:
path: /var/etcd
---
# This manfiest installs the Service which gets traffic to the Calico
# etcd.
apiVersion: v1
kind: Service
metadata:
labels:
k8s-app: calico-etcd
name: calico-etcd
namespace: kube-system
spec:
# Select the calico-etcd pod running on the master.
selector:
k8s-app: calico-etcd
# This ClusterIP needs to be known in advance, since we cannot rely
# on DNS to get access to etcd.
clusterIP: 10.96.232.136
ports:
- port: 6666
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: calico-node
namespace: kube-system
labels:
k8s-app: calico-node
spec:
selector:
matchLabels:
k8s-app: calico-node
template:
metadata:
labels:
k8s-app: calico-node
annotations:
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
# reserves resources for critical add-on pods so that they can be rescheduled after
# a failure. This annotation works in tandem with the toleration below.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
# This, along with the annotation above marks this pod as a critical add-on.
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: calico-cni-plugin
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v1.1.3
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# Enable BGP. Disable to enforce policy only.
- name: CALICO_NETWORKING_BACKEND
valueFrom:
configMapKeyRef:
name: calico-config
key: calico_backend
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
# Configure the IP Pool from which Pod IPs will be chosen.
- name: CALICO_IPV4POOL_CIDR
value: "192.168.0.0/16"
- name: CALICO_IPV4POOL_IPIP
value: "always"
# Disable IPv6 on Kubernetes.
- name: FELIX_IPV6SUPPORT
value: "false"
# Set Felix logging to "info"
- name: FELIX_LOGSEVERITYSCREEN
value: "info"
# Auto-detect the BGP IP address.
- name: IP
value: ""
securityContext:
privileged: true
resources:
requests:
cpu: 250m
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.7.0
command: ["/install-cni.sh"]
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: calico-config
key: cni_network_config
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
---
# This manifest deploys the Calico policy controller on Kubernetes.
# See https://github.com/projectcalico/k8s-policy
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy
spec:
# The policy controller can only have a single active instance.
replicas: 1
strategy:
type: Recreate
template:
metadata:
name: calico-policy-controller
namespace: kube-system
labels:
k8s-app: calico-policy-controller
annotations:
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
# reserves resources for critical add-on pods so that they can be rescheduled after
# a failure. This annotation works in tandem with the toleration below.
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
# The policy controller must run in the host network namespace so that
# it isn't governed by policy that would prevent it from working.
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
# This, along with the annotation above marks this pod as a critical add-on.
- key: CriticalAddonsOnly
operator: Exists
serviceAccountName: calico-policy-controller
containers:
- name: calico-policy-controller
image: quay.io/calico/kube-policy-controller:v0.5.4
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
valueFrom:
configMapKeyRef:
name: calico-config
key: etcd_endpoints
# The location of the Kubernetes API. Use the default Kubernetes
# service for API access.
- name: K8S_API
value: "https://kubernetes.default:443"
# Since we're running in the host namespace and might not have KubeDNS
# access, configure the container's /etc/hosts to resolve
# kubernetes.default to the correct service clusterIP.
- name: CONFIGURE_ETC_HOSTS
value: "true"
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-cni-plugin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-cni-plugin
subjects:
- kind: ServiceAccount
name: calico-cni-plugin
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-cni-plugin
namespace: kube-system
rules:
- apiGroups: [""]
resources:
- pods
- nodes
verbs:
- get
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-cni-plugin
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: calico-policy-controller
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: calico-policy-controller
subjects:
- kind: ServiceAccount
name: calico-policy-controller
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: calico-policy-controller
namespace: kube-system
rules:
- apiGroups:
- ""
- extensions
resources:
- pods
- namespaces
- networkpolicies
verbs:
- watch
- list
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: calico-policy-controller
namespace: kube-system

View File

@ -1,329 +0,0 @@
# Calico Roles
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: canal
namespace: kube-system
rules:
- apiGroups: [""]
resources:
- namespaces
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- pods/status
verbs:
- update
- apiGroups: [""]
resources:
- pods
verbs:
- get
- list
- watch
- apiGroups: [""]
resources:
- nodes
verbs:
- get
- list
- update
- watch
- apiGroups: ["extensions"]
resources:
- thirdpartyresources
verbs:
- create
- get
- list
- watch
- apiGroups: ["extensions"]
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups: ["projectcalico.org"]
resources:
- globalconfigs
verbs:
- create
- get
- list
- update
- watch
- apiGroups: ["projectcalico.org"]
resources:
- ippools
verbs:
- create
- delete
- get
- list
- update
- watch
---
# Flannel roles
# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
rules:
- apiGroups:
- ""
resources:
- pods
verbs:
- get
- apiGroups:
- ""
resources:
- nodes
verbs:
- list
- watch
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: flannel
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: flannel
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: canal
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: canal
subjects:
- kind: ServiceAccount
name: canal
namespace: kube-system
---
# This ConfigMap can be used to configure a self-hosted Canal installation.
kind: ConfigMap
apiVersion: v1
metadata:
name: canal-config
namespace: kube-system
data:
# The interface used by canal for host <-> host communication.
# If left blank, then the interface is chosen using the node's
# default route.
canal_iface: ""
# Whether or not to masquerade traffic to destinations not within
# the pod network.
masquerade: "true"
# The CNI network configuration to install on each node.
cni_network_config: |-
{
"name": "k8s-pod-network",
"type": "calico",
"log_level": "info",
"datastore_type": "kubernetes",
"hostname": "__KUBERNETES_NODE_NAME__",
"ipam": {
"type": "host-local",
"subnet": "usePodCidr"
},
"policy": {
"type": "k8s",
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
},
"kubernetes": {
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
"kubeconfig": "__KUBECONFIG_FILEPATH__"
}
}
# Flannel network configuration. Mounted into the flannel container.
net-conf.json: |
{
"Network": "192.168.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
# This manifest installs the calico/node container, as well
# as the Calico CNI plugins and network config on
# each master and worker node in a Kubernetes cluster.
kind: DaemonSet
apiVersion: extensions/v1beta1
metadata:
name: canal
namespace: kube-system
labels:
k8s-app: canal
spec:
selector:
matchLabels:
k8s-app: canal
template:
metadata:
labels:
k8s-app: canal
annotations:
scheduler.alpha.kubernetes.io/critical-pod: ''
spec:
hostNetwork: true
serviceAccountName: canal
tolerations:
# Allow the pod to run on the master. This is required for
# the master to communicate with pods.
- key: node-role.kubernetes.io/master
effect: NoSchedule
# Mark the pod as a critical add-on for rescheduling.
- key: "CriticalAddonsOnly"
operator: "Exists"
containers:
# Runs calico/node container on each Kubernetes node. This
# container programs network policy and routes on each
# host.
- name: calico-node
image: quay.io/calico/node:v1.2.1
env:
# Use Kubernetes API as the backing datastore.
- name: DATASTORE_TYPE
value: "kubernetes"
# Enable felix logging.
- name: FELIX_LOGSEVERITYSYS
value: "info"
# Period, in seconds, at which felix re-applies all iptables state
- name: FELIX_IPTABLESREFRESHINTERVAL
value: "60"
# Disable IPV6 support in Felix.
- name: FELIX_IPV6SUPPORT
value: "false"
# Don't enable BGP.
- name: CALICO_NETWORKING_BACKEND
value: "none"
# Disable file logging so `kubectl logs` works.
- name: CALICO_DISABLE_FILE_LOGGING
value: "true"
- name: WAIT_FOR_DATASTORE
value: "true"
# No IP address needed.
- name: IP
value: ""
- name: HOSTNAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
# Set Felix endpoint to host default action to ACCEPT.
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
value: "ACCEPT"
securityContext:
privileged: true
resources:
requests:
cpu: 250m
volumeMounts:
- mountPath: /lib/modules
name: lib-modules
readOnly: true
- mountPath: /var/run/calico
name: var-run-calico
readOnly: false
# This container installs the Calico CNI binaries
# and CNI network config file on each node.
- name: install-cni
image: quay.io/calico/cni:v1.8.3
command: ["/install-cni.sh"]
env:
# The CNI network config to install on each node.
- name: CNI_NETWORK_CONFIG
valueFrom:
configMapKeyRef:
name: canal-config
key: cni_network_config
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /host/opt/cni/bin
name: cni-bin-dir
- mountPath: /host/etc/cni/net.d
name: cni-net-dir
# This container runs flannel using the kube-subnet-mgr backend
# for allocating subnets.
- name: kube-flannel
image: quay.io/coreos/flannel:v0.8.0
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: FLANNELD_IFACE
valueFrom:
configMapKeyRef:
name: canal-config
key: canal_iface
- name: FLANNELD_IP_MASQ
valueFrom:
configMapKeyRef:
name: canal-config
key: masquerade
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
# Used by calico/node.
- name: lib-modules
hostPath:
path: /lib/modules
- name: var-run-calico
hostPath:
path: /var/run/calico
# Used to install CNI.
- name: cni-bin-dir
hostPath:
path: /opt/cni/bin
- name: cni-net-dir
hostPath:
path: /etc/cni/net.d
# Used by flannel.
- name: run
hostPath:
path: /run
- name: flannel-cfg
configMap:
name: canal-config
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: canal
namespace: kube-system

View File

@ -1,94 +0,0 @@
#https://raw.githubusercontent.com/coreos/flannel/v0.8.0/Documentation/kube-flannel.yml
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: flannel
namespace: kube-system
---
kind: ConfigMap
apiVersion: v1
metadata:
name: kube-flannel-cfg
namespace: kube-system
labels:
tier: node
app: flannel
data:
cni-conf.json: |
{
"name": "cbr0",
"type": "flannel",
"delegate": {
"isDefaultGateway": true
}
}
net-conf.json: |
{
"Network": "192.168.0.0/16",
"Backend": {
"Type": "vxlan"
}
}
---
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kube-flannel-ds
namespace: kube-system
labels:
tier: node
app: flannel
spec:
template:
metadata:
labels:
tier: node
app: flannel
spec:
hostNetwork: true
nodeSelector:
beta.kubernetes.io/arch: amd64
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
serviceAccountName: flannel
containers:
- name: kube-flannel
image: quay.io/coreos/flannel:v0.8.0-amd64
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
securityContext:
privileged: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
volumeMounts:
- name: run
mountPath: /run
- name: flannel-cfg
mountPath: /etc/kube-flannel/
- name: install-cni
image: quay.io/coreos/flannel:v0.8.0-amd64
command: [ "/bin/sh", "-c", "set -e -x; cp -f /etc/kube-flannel/cni-conf.json /etc/cni/net.d/10-flannel.conf; while true; do sleep 3600; done" ]
volumeMounts:
- name: cni
mountPath: /etc/cni/net.d
- name: flannel-cfg
mountPath: /etc/kube-flannel/
volumes:
- name: run
hostPath:
path: /run
- name: cni
hostPath:
path: /etc/cni/net.d
- name: flannel-cfg
configMap:
name: kube-flannel-cfg

View File

@ -1,187 +0,0 @@
# curl --location "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16
apiVersion: v1
kind: List
items:
- apiVersion: v1
kind: ServiceAccount
metadata:
name: weave-net
annotations:
cloud.weave.works/launcher-info: |-
{
"server-version": "master-c3b4969",
"original-request": {
"url": "/k8s/v1.6/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiI2IiwgR2l0VmVyc2lvbjoidjEuNi43IiwgR2l0Q29tbWl0OiIwOTUxMzZjMzA3OGNjZjg4N2I5MDM0YjdjZTU5OGEwYTFmYWZmNzY5IiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAxNy0wNy0wNVQxNjo1MTo1NloiLCBHb1ZlcnNpb246ImdvMS43LjYiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjYiLCBHaXRWZXJzaW9uOiJ2MS42LjciLCBHaXRDb21taXQ6IjA5NTEzNmMzMDc4Y2NmODg3YjkwMzRiN2NlNTk4YTBhMWZhZmY3NjkiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE3LTA3LTA1VDE2OjQwOjQyWiIsIEdvVmVyc2lvbjoiZ28xLjcuNiIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16",
"date": "Sun Jul 30 2017 02:48:47 GMT+0000 (UTC)"
},
"email-address": "support@weave.works"
}
labels:
name: weave-net
namespace: kube-system
- apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: weave-net
annotations:
cloud.weave.works/launcher-info: |-
{
"server-version": "master-c3b4969",
"original-request": {
"url": "/k8s/v1.6/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiI2IiwgR2l0VmVyc2lvbjoidjEuNi43IiwgR2l0Q29tbWl0OiIwOTUxMzZjMzA3OGNjZjg4N2I5MDM0YjdjZTU5OGEwYTFmYWZmNzY5IiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAxNy0wNy0wNVQxNjo1MTo1NloiLCBHb1ZlcnNpb246ImdvMS43LjYiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjYiLCBHaXRWZXJzaW9uOiJ2MS42LjciLCBHaXRDb21taXQ6IjA5NTEzNmMzMDc4Y2NmODg3YjkwMzRiN2NlNTk4YTBhMWZhZmY3NjkiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE3LTA3LTA1VDE2OjQwOjQyWiIsIEdvVmVyc2lvbjoiZ28xLjcuNiIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16",
"date": "Sun Jul 30 2017 02:48:47 GMT+0000 (UTC)"
},
"email-address": "support@weave.works"
}
labels:
name: weave-net
rules:
- apiGroups:
- ''
resources:
- pods
- namespaces
- nodes
verbs:
- get
- list
- watch
- apiGroups:
- extensions
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRoleBinding
metadata:
name: weave-net
annotations:
cloud.weave.works/launcher-info: |-
{
"server-version": "master-c3b4969",
"original-request": {
"url": "/k8s/v1.6/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiI2IiwgR2l0VmVyc2lvbjoidjEuNi43IiwgR2l0Q29tbWl0OiIwOTUxMzZjMzA3OGNjZjg4N2I5MDM0YjdjZTU5OGEwYTFmYWZmNzY5IiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAxNy0wNy0wNVQxNjo1MTo1NloiLCBHb1ZlcnNpb246ImdvMS43LjYiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjYiLCBHaXRWZXJzaW9uOiJ2MS42LjciLCBHaXRDb21taXQ6IjA5NTEzNmMzMDc4Y2NmODg3YjkwMzRiN2NlNTk4YTBhMWZhZmY3NjkiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE3LTA3LTA1VDE2OjQwOjQyWiIsIEdvVmVyc2lvbjoiZ28xLjcuNiIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16",
"date": "Sun Jul 30 2017 02:48:47 GMT+0000 (UTC)"
},
"email-address": "support@weave.works"
}
labels:
name: weave-net
roleRef:
kind: ClusterRole
name: weave-net
apiGroup: rbac.authorization.k8s.io
subjects:
- kind: ServiceAccount
name: weave-net
namespace: kube-system
- apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: weave-net
annotations:
cloud.weave.works/launcher-info: |-
{
"server-version": "master-c3b4969",
"original-request": {
"url": "/k8s/v1.6/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiI2IiwgR2l0VmVyc2lvbjoidjEuNi43IiwgR2l0Q29tbWl0OiIwOTUxMzZjMzA3OGNjZjg4N2I5MDM0YjdjZTU5OGEwYTFmYWZmNzY5IiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAxNy0wNy0wNVQxNjo1MTo1NloiLCBHb1ZlcnNpb246ImdvMS43LjYiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjYiLCBHaXRWZXJzaW9uOiJ2MS42LjciLCBHaXRDb21taXQ6IjA5NTEzNmMzMDc4Y2NmODg3YjkwMzRiN2NlNTk4YTBhMWZhZmY3NjkiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE3LTA3LTA1VDE2OjQwOjQyWiIsIEdvVmVyc2lvbjoiZ28xLjcuNiIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16",
"date": "Sun Jul 30 2017 02:48:47 GMT+0000 (UTC)"
},
"email-address": "support@weave.works"
}
labels:
name: weave-net
namespace: kube-system
spec:
template:
metadata:
labels:
name: weave-net
spec:
containers:
- name: weave
command:
- /home/weave/launch.sh
env:
- name: WEAVE_MTU
value: '1337'
- name: IPALLOC_RANGE
value: 192.168.0.0/16
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: 'weaveworks/weave-kube:2.0.1'
imagePullPolicy: Always
livenessProbe:
httpGet:
host: 127.0.0.1
path: /status
port: 6784
initialDelaySeconds: 30
resources:
requests:
cpu: 10m
securityContext:
privileged: true
volumeMounts:
- name: weavedb
mountPath: /weavedb
- name: cni-bin
mountPath: /host/opt
- name: cni-bin2
mountPath: /host/home
- name: cni-conf
mountPath: /host/etc
- name: dbus
mountPath: /host/var/lib/dbus
- name: lib-modules
mountPath: /lib/modules
- name: weave-npc
env:
- name: HOSTNAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
image: 'weaveworks/weave-npc:2.0.1'
imagePullPolicy: Always
resources:
requests:
cpu: 10m
securityContext:
privileged: true
hostNetwork: true
hostPID: true
restartPolicy: Always
securityContext:
seLinuxOptions: {}
serviceAccountName: weave-net
tolerations:
- effect: NoSchedule
operator: Exists
volumes:
- name: weavedb
hostPath:
path: /var/lib/weave
- name: cni-bin
hostPath:
path: /opt
- name: cni-bin2
hostPath:
path: /home
- name: cni-conf
hostPath:
path: /etc
- name: dbus
hostPath:
path: /var/lib/dbus
- name: lib-modules
hostPath:
path: /lib/modules
updateStrategy:
type: RollingUpdate

View File

@ -1,73 +0,0 @@
kind: Service
apiVersion: v1
metadata:
name: nfs-provisioner
labels:
app: nfs-provisioner
spec:
ports:
- name: nfs
port: 2049
- name: mountd
port: 20048
- name: rpcbind
port: 111
- name: rpcbind-udp
port: 111
protocol: UDP
selector:
app: nfs-provisioner
---
kind: Deployment
apiVersion: apps/v1beta1
metadata:
name: nfs-provisioner
spec:
replicas: 1
strategy:
type: Recreate
template:
metadata:
labels:
app: nfs-provisioner
spec:
containers:
- name: nfs-provisioner
image: quay.io/kubernetes_incubator/nfs-provisioner:v1.0.7
ports:
- name: nfs
containerPort: 2049
- name: mountd
containerPort: 20048
- name: rpcbind
containerPort: 111
- name: rpcbind-udp
containerPort: 111
protocol: UDP
securityContext:
capabilities:
add:
- DAC_READ_SEARCH
- SYS_RESOURCE
args:
- "-provisioner=example.com/nfs"
- "-grace-period=10"
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
- name: SERVICE_NAME
value: nfs-provisioner
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
imagePullPolicy: "IfNotPresent"
volumeMounts:
- name: export-volume
mountPath: /export
volumes:
- name: export-volume
hostPath:
path: /var/lib/nfs-provisioner

View File

@ -1,5 +0,0 @@
kind: StorageClass
apiVersion: storage.k8s.io/v1
metadata:
name: general
provisioner: example.com/nfs

View File

@ -1,15 +0,0 @@
apiVersion: rbac.authorization.k8s.io/v1alpha1
kind: ClusterRoleBinding
metadata:
name: cluster-admin
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: Group
name: system:masters
- kind: Group
name: system:authenticated
- kind: Group
name: system:unauthenticated

View File

@ -1,95 +0,0 @@
#!/bin/bash
# Copyright 2017 The Openstack-Helm Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
set -xe
# Setup shared mounts for kubelet
sudo mkdir -p /var/lib/kubelet
sudo mount --bind /var/lib/kubelet /var/lib/kubelet
sudo mount --make-shared /var/lib/kubelet
# Cleanup any old deployment
sudo docker rm -f kubeadm-aio || true
sudo docker rm -f kubelet || true
sudo docker ps -aq | xargs -r -l1 -P16 sudo docker rm -f
sudo rm -rfv \
/etc/cni/net.d \
/etc/kubernetes \
/var/lib/etcd \
/var/etcd \
/var/lib/kubelet/* \
${HOME}/.kubeadm-aio/admin.conf \
/var/lib/nfs-provisioner || true
: ${KUBE_CNI:="calico"}
: ${CNI_POD_CIDR:="192.168.0.0/16"}
# Launch Container, refer to:
# https://docs.docker.com/engine/reference/run/
sudo docker run \
-dt \
--name=kubeadm-aio \
--net=host \
--security-opt=seccomp:unconfined \
--cap-add=SYS_ADMIN \
--tmpfs=/run \
--tmpfs=/run/lock \
--volume=/etc/machine-id:/etc/machine-id:ro \
--volume=${HOME}:${HOME}:rw \
--volume=${HOME}/.kubeadm-aio:/root:rw \
--volume=/etc/kubernetes:/etc/kubernetes:rw \
--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro \
--volume=/var/run/docker.sock:/run/docker.sock \
--env KUBELET_CONTAINER=${KUBEADM_IMAGE} \
--env KUBE_VERSION=${KUBE_VERSION} \
--env KUBE_CNI=${KUBE_CNI} \
--env CNI_POD_CIDR=${CNI_POD_CIDR} \
${KUBEADM_IMAGE}
echo "Waiting for kubeconfig"
set +x
end=$(($(date +%s) + 600))
READY="False"
while true; do
if [ -f ${HOME}/.kubeadm-aio/admin.conf ]; then
READY="True"
fi
[ $READY == "True" ] && break || true
sleep 1
now=$(date +%s)
[ $now -gt $end ] && \
echo "KubeADM did not generate kubectl config in time" && \
sudo docker logs kubeadm-aio && exit -1
done
set -x
# Set perms of kubeconfig and set env-var
sudo chown $(id -u):$(id -g) ${HOME}/.kubeadm-aio/admin.conf
export KUBECONFIG=${HOME}/.kubeadm-aio/admin.conf
echo "Waiting for node to be ready before continuing"
set +x
end=$(($(date +%s) + 600))
READY="False"
while true; do
READY=$(kubectl get nodes --no-headers=true | awk "{ print \$2 }" | head -1)
[ $READY == "Ready" ] && break || true
sleep 1
now=$(date +%s)
[ $now -gt $end ] && \
echo "Kube node did not register as ready in time" && \
sudo docker logs kubeadm-aio && exit -1
done
set -x