Add administrative operations for some resources
Now, admin user could list/show/delete/update most of qinling resources, except function deletion. Because function deletion needs to delete the trust created for qinling service by its owner, only end user has the authority to delete their trust. Implements: blueprint qinling-admin-operations Change-Id: I9ec4df59fbf8ac50c96d9677dd74c54677b307a5
This commit is contained in:
parent
3e35a4b7d5
commit
4b06c280bb
@ -64,7 +64,7 @@ function configure_qinling {
|
||||
rm -f "$QINLING_CONF_DIR"/*
|
||||
|
||||
mkdir_chown_stack "$QINLING_FUNCTION_STORAGE_DIR"
|
||||
rm -f "$QINLING_FUNCTION_STORAGE_DIR"/*
|
||||
rm -rf "$QINLING_FUNCTION_STORAGE_DIR"/*
|
||||
|
||||
cp $QINLING_DIR/etc/policy.json.sample $QINLING_POLICY_FILE
|
||||
|
||||
|
@ -15,4 +15,8 @@
|
||||
"function:detach": "rule:context_is_admin",
|
||||
|
||||
"execution:get_all:all_projects": "rule:context_is_admin",
|
||||
|
||||
"webhook:get_all:all_projects": "rule:context_is_admin",
|
||||
|
||||
"job:get_all:all_projects": "rule:context_is_admin",
|
||||
}
|
||||
|
@ -77,14 +77,11 @@ class ExecutionsController(rest.RestController):
|
||||
:param status: Optional. Filter by execution status.
|
||||
:param description: Optional. Filter by description.
|
||||
"""
|
||||
ctx = context.get_ctx()
|
||||
if project_id and not ctx.is_admin:
|
||||
project_id = context.ctx().projectid
|
||||
if project_id and ctx.is_admin:
|
||||
all_projects = True
|
||||
|
||||
project_id, all_projects = rest_utils.get_project_params(
|
||||
project_id, all_projects
|
||||
)
|
||||
if all_projects:
|
||||
acl.enforce('execution:get_all:all_projects', ctx)
|
||||
acl.enforce('execution:get_all:all_projects', context.get_ctx())
|
||||
|
||||
filters = rest_utils.get_filters(
|
||||
function_id=function_id,
|
||||
|
@ -209,14 +209,11 @@ class FunctionsController(rest.RestController):
|
||||
resources, the param is ignored for normal user.
|
||||
:param all_projects: Optional. Get resources of all projects.
|
||||
"""
|
||||
ctx = context.get_ctx()
|
||||
if project_id and not ctx.is_admin:
|
||||
project_id = context.ctx().projectid
|
||||
if project_id and ctx.is_admin:
|
||||
all_projects = True
|
||||
|
||||
project_id, all_projects = rest_utils.get_project_params(
|
||||
project_id, all_projects
|
||||
)
|
||||
if all_projects:
|
||||
acl.enforce('function:get_all:all_projects', ctx)
|
||||
acl.enforce('function:get_all:all_projects', context.get_ctx())
|
||||
|
||||
filters = rest_utils.get_filters(
|
||||
project_id=project_id,
|
||||
|
@ -19,10 +19,13 @@ from oslo_config import cfg
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import timeutils
|
||||
from pecan import rest
|
||||
from wsme import types as wtypes
|
||||
import wsmeext.pecan as wsme_pecan
|
||||
|
||||
from qinling.api import access_control as acl
|
||||
from qinling.api.controllers.v1 import resources
|
||||
from qinling.api.controllers.v1 import types
|
||||
from qinling import context
|
||||
from qinling.db import api as db_api
|
||||
from qinling import exceptions as exc
|
||||
from qinling import status
|
||||
@ -90,12 +93,21 @@ class JobsController(rest.RestController):
|
||||
return resources.Job.from_dict(job_db.to_dict())
|
||||
|
||||
@rest_utils.wrap_wsme_controller_exception
|
||||
@wsme_pecan.wsexpose(resources.Jobs)
|
||||
def get_all(self):
|
||||
LOG.info("Get all %ss.", self.type)
|
||||
@wsme_pecan.wsexpose(resources.Jobs, bool, wtypes.text)
|
||||
def get_all(self, all_projects=False, project_id=None):
|
||||
project_id, all_projects = rest_utils.get_project_params(
|
||||
project_id, all_projects
|
||||
)
|
||||
if all_projects:
|
||||
acl.enforce('job:get_all:all_projects', context.get_ctx())
|
||||
|
||||
filters = rest_utils.get_filters(
|
||||
project_id=project_id,
|
||||
)
|
||||
LOG.info("Get all %ss. filters=%s", self.type, filters)
|
||||
db_jobs = db_api.get_jobs(insecure=all_projects, **filters)
|
||||
jobs = [resources.Job.from_dict(db_model.to_dict())
|
||||
for db_model in db_api.get_jobs()]
|
||||
for db_model in db_jobs]
|
||||
|
||||
return resources.Jobs(jobs=jobs)
|
||||
|
||||
|
@ -17,6 +17,7 @@ import json
|
||||
from oslo_log import log as logging
|
||||
import pecan
|
||||
from pecan import rest
|
||||
from wsme import types as wtypes
|
||||
import wsmeext.pecan as wsme_pecan
|
||||
|
||||
from qinling.api import access_control as acl
|
||||
@ -70,12 +71,21 @@ class WebhooksController(rest.RestController):
|
||||
return resources.Webhook.from_dict(self._add_webhook_url(id, webhook))
|
||||
|
||||
@rest_utils.wrap_wsme_controller_exception
|
||||
@wsme_pecan.wsexpose(resources.Webhooks)
|
||||
def get_all(self):
|
||||
LOG.info("Get all %ss.", self.type)
|
||||
@wsme_pecan.wsexpose(resources.Webhooks, bool, wtypes.text)
|
||||
def get_all(self, all_projects=False, project_id=None):
|
||||
project_id, all_projects = rest_utils.get_project_params(
|
||||
project_id, all_projects
|
||||
)
|
||||
if all_projects:
|
||||
acl.enforce('webhook:get_all:all_projects', context.get_ctx())
|
||||
|
||||
filters = rest_utils.get_filters(
|
||||
project_id=project_id,
|
||||
)
|
||||
|
||||
LOG.info("Get all %ss. filters=%s", self.type, filters)
|
||||
webhooks = []
|
||||
for i in db_api.get_webhooks():
|
||||
for i in db_api.get_webhooks(insecure=all_projects, **filters):
|
||||
webhooks.append(
|
||||
resources.Webhook.from_dict(
|
||||
self._add_webhook_url(i.id, i.to_dict())
|
||||
|
@ -137,7 +137,7 @@ kubernetes_opts = [
|
||||
),
|
||||
cfg.IntOpt(
|
||||
'replicas',
|
||||
default=5,
|
||||
default=3,
|
||||
help='Number of desired replicas in deployment.'
|
||||
),
|
||||
cfg.StrOpt(
|
||||
|
@ -171,8 +171,8 @@ def update_job(id, values):
|
||||
return IMPL.update_job(id, values)
|
||||
|
||||
|
||||
def get_jobs():
|
||||
return IMPL.get_jobs()
|
||||
def get_jobs(**kwargs):
|
||||
return IMPL.get_jobs(**kwargs)
|
||||
|
||||
|
||||
def delete_jobs(**kwargs):
|
||||
@ -187,8 +187,8 @@ def get_webhook(id, insecure=None):
|
||||
return IMPL.get_webhook(id, insecure=insecure)
|
||||
|
||||
|
||||
def get_webhooks():
|
||||
return IMPL.get_webhooks()
|
||||
def get_webhooks(**kwargs):
|
||||
return IMPL.get_webhooks(**kwargs)
|
||||
|
||||
|
||||
def delete_webhook(id):
|
||||
|
@ -20,7 +20,7 @@ spec:
|
||||
{{ key }}: {{ value }}
|
||||
{% endfor %}
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
terminationGracePeriodSeconds: 5
|
||||
containers:
|
||||
- name: {{ container_name }}
|
||||
image: {{ image }}
|
||||
@ -30,7 +30,7 @@ spec:
|
||||
resources:
|
||||
limits:
|
||||
cpu: "0.3"
|
||||
memory: 256Mi
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: "0.1"
|
||||
memory: 64Mi
|
||||
memory: 32Mi
|
||||
|
@ -7,7 +7,7 @@ metadata:
|
||||
{{ key }}: {{ value }}
|
||||
{% endfor %}
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 0
|
||||
terminationGracePeriodSeconds: 5
|
||||
containers:
|
||||
- name: {{ pod_name }}
|
||||
image: {{ pod_image }}
|
||||
@ -22,7 +22,7 @@ spec:
|
||||
resources:
|
||||
limits:
|
||||
cpu: "0.3"
|
||||
memory: 256Mi
|
||||
memory: 128Mi
|
||||
requests:
|
||||
cpu: "0.1"
|
||||
memory: 64Mi
|
||||
memory: 32Mi
|
||||
|
@ -21,6 +21,7 @@ import six
|
||||
import webob
|
||||
from wsme import exc as wsme_exc
|
||||
|
||||
from qinling import context
|
||||
from qinling import exceptions as exc
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
@ -153,3 +154,14 @@ def _extract_filter_type_and_value(data):
|
||||
filter_type = 'eq'
|
||||
|
||||
return filter_type, value
|
||||
|
||||
|
||||
def get_project_params(project_id, all_projects):
|
||||
ctx = context.get_ctx()
|
||||
|
||||
if project_id and not ctx.is_admin:
|
||||
project_id = context.ctx().projectid
|
||||
if project_id and ctx.is_admin:
|
||||
all_projects = True
|
||||
|
||||
return project_id, all_projects
|
||||
|
@ -12,6 +12,8 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
from datetime import datetime
|
||||
from datetime import timedelta
|
||||
import json
|
||||
|
||||
import requests
|
||||
@ -107,3 +109,14 @@ class QinlingClient(client_base.QinlingClientBase):
|
||||
req_body = {"function_id": function_id}
|
||||
resp, body = self.post_json('webhooks', req_body)
|
||||
return resp, body
|
||||
|
||||
def create_job(self, function_id, first_execution_time=None):
|
||||
req_body = {"function_id": function_id}
|
||||
if not first_execution_time:
|
||||
first_execution_time = str(
|
||||
datetime.utcnow() + timedelta(hours=1)
|
||||
)
|
||||
req_body.update({'first_execution_time': first_execution_time})
|
||||
|
||||
resp, body = self.post_json('jobs', req_body)
|
||||
return resp, body
|
||||
|
@ -12,82 +12,26 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from concurrent import futures
|
||||
import os
|
||||
import pkg_resources
|
||||
import tempfile
|
||||
import zipfile
|
||||
|
||||
import futurist
|
||||
from oslo_serialization import jsonutils
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions
|
||||
|
||||
from qinling_tempest_plugin.tests import base
|
||||
|
||||
|
||||
# TODO(kong): Be careful that for k8s cluster, the default pool size is 3,
|
||||
# maybe we need customize that in future if there are many test cases but with
|
||||
# insufficient pods.
|
||||
class ExecutionsTest(base.BaseQinlingTest):
|
||||
name_prefix = 'ExecutionsTest'
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(ExecutionsTest, cls).resource_setup()
|
||||
|
||||
cls.runtime_id = None
|
||||
|
||||
# Create runtime for execution tests
|
||||
name = data_utils.rand_name('runtime', prefix=cls.name_prefix)
|
||||
_, body = cls.admin_client.create_runtime(
|
||||
'openstackqinling/python-runtime', name
|
||||
)
|
||||
cls.runtime_id = body['id']
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
if cls.runtime_id:
|
||||
cls.admin_client.delete_resource('runtimes', cls.runtime_id,
|
||||
ignore_notfound=True)
|
||||
|
||||
super(ExecutionsTest, cls).resource_cleanup()
|
||||
|
||||
def setUp(self):
|
||||
super(ExecutionsTest, self).setUp()
|
||||
self.await_runtime_available(self.runtime_id)
|
||||
|
||||
def _create_function(self, name='python_test.py'):
|
||||
python_file_path = pkg_resources.resource_filename(
|
||||
'qinling_tempest_plugin',
|
||||
"functions/%s" % name
|
||||
)
|
||||
base_name, extention = os.path.splitext(python_file_path)
|
||||
module_name = os.path.basename(base_name)
|
||||
self.python_zip_file = os.path.join(
|
||||
tempfile.gettempdir(),
|
||||
'%s.zip' % module_name
|
||||
)
|
||||
|
||||
if not os.path.isfile(self.python_zip_file):
|
||||
zf = zipfile.ZipFile(self.python_zip_file, mode='w')
|
||||
try:
|
||||
# Use default compression mode, may change in future.
|
||||
zf.write(
|
||||
python_file_path,
|
||||
'%s%s' % (module_name, extention),
|
||||
compress_type=zipfile.ZIP_STORED
|
||||
)
|
||||
finally:
|
||||
zf.close()
|
||||
|
||||
self.function_id = self.create_function(self.python_zip_file)
|
||||
|
||||
@decorators.idempotent_id('2a93fab0-2dae-4748-b0d4-f06b735ff451')
|
||||
def test_crud_execution(self):
|
||||
self._create_function()
|
||||
|
||||
resp, body = self.client.create_execution(self.function_id,
|
||||
function_id = self.create_function()
|
||||
resp, body = self.client.create_execution(function_id,
|
||||
input='{"name": "Qinling"}')
|
||||
self.assertEqual(201, resp.status)
|
||||
execution_id_1 = body['id']
|
||||
@ -96,7 +40,7 @@ class ExecutionsTest(base.BaseQinlingTest):
|
||||
self.assertEqual('success', body['status'])
|
||||
|
||||
# Create another execution without input
|
||||
resp, body = self.client.create_execution(self.function_id)
|
||||
resp, body = self.client.create_execution(function_id)
|
||||
self.assertEqual(201, resp.status)
|
||||
execution_id_2 = body['id']
|
||||
self.addCleanup(self.client.delete_resource, 'executions',
|
||||
@ -119,10 +63,9 @@ class ExecutionsTest(base.BaseQinlingTest):
|
||||
@decorators.idempotent_id('2199d1e6-de7d-4345-8745-a8184d6022b1')
|
||||
def test_get_all_admin(self):
|
||||
"""Admin user can get executions of other projects"""
|
||||
self._create_function()
|
||||
|
||||
function_id = self.create_function()
|
||||
resp, body = self.client.create_execution(
|
||||
self.function_id, input='{"name": "Qinling"}'
|
||||
function_id, input='{"name": "Qinling"}'
|
||||
)
|
||||
self.assertEqual(201, resp.status)
|
||||
|
||||
@ -154,10 +97,8 @@ class ExecutionsTest(base.BaseQinlingTest):
|
||||
|
||||
@decorators.idempotent_id('8096cc52-64d2-4660-a657-9ac0bdd743ae')
|
||||
def test_execution_async(self):
|
||||
self._create_function()
|
||||
|
||||
resp, body = self.client.create_execution(self.function_id, sync=False)
|
||||
|
||||
function_id = self.create_function()
|
||||
resp, body = self.client.create_execution(function_id, sync=False)
|
||||
self.assertEqual(201, resp.status)
|
||||
|
||||
execution_id = body['id']
|
||||
@ -169,9 +110,9 @@ class ExecutionsTest(base.BaseQinlingTest):
|
||||
|
||||
@decorators.idempotent_id('6cb47b1d-a8c6-48f2-a92f-c4f613c33d1c')
|
||||
def test_execution_log(self):
|
||||
self._create_function()
|
||||
function_id = self.create_function()
|
||||
resp, body = self.client.create_execution(
|
||||
self.function_id, input='{"name": "OpenStack"}'
|
||||
function_id, input='{"name": "OpenStack"}'
|
||||
)
|
||||
|
||||
self.assertEqual(201, resp.status)
|
||||
@ -189,10 +130,11 @@ class ExecutionsTest(base.BaseQinlingTest):
|
||||
|
||||
@decorators.idempotent_id('f22097dc-37db-484d-83d3-3a97e72ec576')
|
||||
def test_execution_concurrency_no_scale(self):
|
||||
self._create_function(name='test_python_sleep.py')
|
||||
package = self.create_package(name='test_python_sleep.py')
|
||||
function_id = self.create_function(package_path=package)
|
||||
|
||||
def _create_execution():
|
||||
resp, body = self.client.create_execution(self.function_id)
|
||||
resp, body = self.client.create_execution(function_id)
|
||||
return resp, body
|
||||
|
||||
futs = []
|
||||
@ -209,18 +151,18 @@ class ExecutionsTest(base.BaseQinlingTest):
|
||||
body['id'], ignore_notfound=True)
|
||||
self.assertEqual('success', body['status'])
|
||||
|
||||
resp, body = self.admin_client.get_function_workers(self.function_id)
|
||||
resp, body = self.admin_client.get_function_workers(function_id)
|
||||
|
||||
self.assertEqual(200, resp.status)
|
||||
self.assertEqual(1, len(body['workers']))
|
||||
|
||||
@decorators.idempotent_id('a5ed173a-19b7-4c92-ac78-c8862ad1d1d2')
|
||||
def test_execution_concurrency_scale_up(self):
|
||||
self.await_runtime_available(self.runtime_id)
|
||||
self._create_function(name='test_python_sleep.py')
|
||||
package = self.create_package(name='test_python_sleep.py')
|
||||
function_id = self.create_function(package_path=package)
|
||||
|
||||
def _create_execution():
|
||||
resp, body = self.client.create_execution(self.function_id)
|
||||
resp, body = self.client.create_execution(function_id)
|
||||
return resp, body
|
||||
|
||||
futs = []
|
||||
@ -237,14 +179,16 @@ class ExecutionsTest(base.BaseQinlingTest):
|
||||
body['id'], ignore_notfound=True)
|
||||
self.assertEqual('success', body['status'])
|
||||
|
||||
resp, body = self.admin_client.get_function_workers(self.function_id)
|
||||
resp, body = self.admin_client.get_function_workers(function_id)
|
||||
self.assertEqual(200, resp.status)
|
||||
self.assertEqual(2, len(body['workers']))
|
||||
|
||||
@decorators.idempotent_id('ccfe67ce-e467-11e7-916c-00224d6b7bc1')
|
||||
def test_python_execution_positional_args(self):
|
||||
self._create_function(name='test_python_positional_args.py')
|
||||
resp, body = self.client.create_execution(self.function_id,
|
||||
package = self.create_package(name='test_python_positional_args.py')
|
||||
function_id = self.create_function(package_path=package)
|
||||
|
||||
resp, body = self.client.create_execution(function_id,
|
||||
input='Qinling')
|
||||
|
||||
self.assertEqual(201, resp.status)
|
||||
@ -257,9 +201,10 @@ class ExecutionsTest(base.BaseQinlingTest):
|
||||
|
||||
@decorators.idempotent_id('a948382a-84af-4f0e-ad08-4297345e302c')
|
||||
def test_python_execution_file_limit(self):
|
||||
self._create_function(name='test_python_file_limit.py')
|
||||
package = self.create_package(name='test_python_file_limit.py')
|
||||
function_id = self.create_function(package_path=package)
|
||||
|
||||
resp, body = self.client.create_execution(self.function_id)
|
||||
resp, body = self.client.create_execution(function_id)
|
||||
|
||||
self.assertEqual(201, resp.status)
|
||||
self.addCleanup(self.client.delete_resource, 'executions',
|
||||
@ -273,8 +218,10 @@ class ExecutionsTest(base.BaseQinlingTest):
|
||||
|
||||
@decorators.idempotent_id('bf6f8f35-fa88-469b-8878-7aa85a8ce5ab')
|
||||
def test_python_execution_process_number(self):
|
||||
self._create_function(name='test_python_process_limit.py')
|
||||
resp, body = self.client.create_execution(self.function_id)
|
||||
package = self.create_package(name='test_python_process_limit.py')
|
||||
function_id = self.create_function(package_path=package)
|
||||
|
||||
resp, body = self.client.create_execution(function_id)
|
||||
|
||||
self.assertEqual(201, resp.status)
|
||||
self.addCleanup(self.client.delete_resource, 'executions',
|
||||
|
@ -12,11 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
import pkg_resources
|
||||
import tempfile
|
||||
import zipfile
|
||||
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib import decorators
|
||||
from tempest.lib import exceptions
|
||||
import tenacity
|
||||
@ -27,54 +23,12 @@ from qinling_tempest_plugin.tests import base
|
||||
class FunctionsTest(base.BaseQinlingTest):
|
||||
name_prefix = 'FunctionsTest'
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(FunctionsTest, cls).resource_setup()
|
||||
|
||||
cls.runtime_id = None
|
||||
|
||||
# Create runtime for function tests
|
||||
name = data_utils.rand_name('runtime', prefix=cls.name_prefix)
|
||||
_, body = cls.admin_client.create_runtime(
|
||||
'openstackqinling/python-runtime', name
|
||||
)
|
||||
cls.runtime_id = body['id']
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
if cls.runtime_id:
|
||||
cls.admin_client.delete_resource('runtimes', cls.runtime_id)
|
||||
|
||||
super(FunctionsTest, cls).resource_cleanup()
|
||||
|
||||
def setUp(self):
|
||||
super(FunctionsTest, self).setUp()
|
||||
|
||||
# Wait until runtime is available
|
||||
self.await_runtime_available(self.runtime_id)
|
||||
|
||||
python_file_path = pkg_resources.resource_filename(
|
||||
'qinling_tempest_plugin',
|
||||
"functions/python_test.py"
|
||||
)
|
||||
base_name, extention = os.path.splitext(python_file_path)
|
||||
module_name = os.path.basename(base_name)
|
||||
self.python_zip_file = os.path.join(
|
||||
tempfile.gettempdir(),
|
||||
'%s.zip' % module_name
|
||||
)
|
||||
|
||||
if not os.path.isfile(self.python_zip_file):
|
||||
zf = zipfile.ZipFile(self.python_zip_file, mode='w')
|
||||
try:
|
||||
# Use default compression mode, may change in future.
|
||||
zf.write(
|
||||
python_file_path,
|
||||
'%s%s' % (module_name, extention),
|
||||
compress_type=zipfile.ZIP_STORED
|
||||
)
|
||||
finally:
|
||||
zf.close()
|
||||
self.python_zip_file = self.create_package()
|
||||
|
||||
@decorators.idempotent_id('9c36ac64-9a44-4c44-9e44-241dcc6b0933')
|
||||
def test_crud_function(self):
|
||||
|
40
qinling_tempest_plugin/tests/api/test_jobs.py
Normal file
40
qinling_tempest_plugin/tests/api/test_jobs.py
Normal file
@ -0,0 +1,40 @@
|
||||
# Copyright 2018 Catalyst IT Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
from tempest.lib import decorators
|
||||
|
||||
from qinling_tempest_plugin.tests import base
|
||||
|
||||
|
||||
class JobsTest(base.BaseQinlingTest):
|
||||
name_prefix = 'JobsTest'
|
||||
|
||||
def setUp(self):
|
||||
super(JobsTest, self).setUp()
|
||||
|
||||
self.await_runtime_available(self.runtime_id)
|
||||
self.function_id = self.create_function()
|
||||
|
||||
@decorators.idempotent_id('68e4d562-f762-11e7-875d-00224d6b7bc1')
|
||||
def test_get_all_admin(self):
|
||||
"""Admin user can get jobs of other projects"""
|
||||
job_id = self.create_job(self.function_id)
|
||||
|
||||
resp, body = self.admin_client.get_resources(
|
||||
'jobs?all_projects=true'
|
||||
)
|
||||
self.assertEqual(200, resp.status)
|
||||
self.assertIn(
|
||||
job_id,
|
||||
[item['id'] for item in body['jobs']]
|
||||
)
|
@ -19,14 +19,12 @@ from qinling_tempest_plugin.tests import base
|
||||
|
||||
class RuntimesTest(base.BaseQinlingTest):
|
||||
name_prefix = 'RuntimesTest'
|
||||
create_runtime = False
|
||||
|
||||
@decorators.idempotent_id('fdc2f07f-dd1d-4981-86d3-5bc7908d9a9b')
|
||||
def test_crud_runtime(self):
|
||||
name = data_utils.rand_name('runtime', prefix=self.name_prefix)
|
||||
|
||||
resp, body = self.admin_client.create_runtime(
|
||||
'openstackqinling/python-runtime', name
|
||||
)
|
||||
resp, body = self.admin_client.create_runtime(self.image, name)
|
||||
|
||||
self.assertEqual(201, resp.status)
|
||||
self.assertEqual(name, body['name'])
|
||||
@ -72,7 +70,7 @@ class RuntimesTest(base.BaseQinlingTest):
|
||||
"""
|
||||
name = data_utils.rand_name('runtime', prefix=self.name_prefix)
|
||||
resp, body = self.admin_client.create_runtime(
|
||||
'openstackqinling/python-runtime', name, is_public=False
|
||||
self.image, name, is_public=False
|
||||
)
|
||||
|
||||
self.assertEqual(201, resp.status)
|
||||
|
@ -11,13 +11,7 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
import pkg_resources
|
||||
import tempfile
|
||||
import zipfile
|
||||
|
||||
import requests
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib import decorators
|
||||
|
||||
from qinling_tempest_plugin.tests import base
|
||||
@ -26,56 +20,10 @@ from qinling_tempest_plugin.tests import base
|
||||
class WebhooksTest(base.BaseQinlingTest):
|
||||
name_prefix = 'WebhooksTest'
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(WebhooksTest, cls).resource_setup()
|
||||
|
||||
cls.runtime_id = None
|
||||
|
||||
name = data_utils.rand_name('runtime', prefix=cls.name_prefix)
|
||||
_, body = cls.admin_client.create_runtime(
|
||||
'openstackqinling/python-runtime', name
|
||||
)
|
||||
cls.runtime_id = body['id']
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
if cls.runtime_id:
|
||||
cls.admin_client.delete_resource('runtimes', cls.runtime_id,
|
||||
ignore_notfound=True)
|
||||
|
||||
super(WebhooksTest, cls).resource_cleanup()
|
||||
|
||||
def setUp(self):
|
||||
super(WebhooksTest, self).setUp()
|
||||
self.await_runtime_available(self.runtime_id)
|
||||
self._create_function()
|
||||
|
||||
def _create_function(self, name='python_test.py'):
|
||||
python_file_path = pkg_resources.resource_filename(
|
||||
'qinling_tempest_plugin',
|
||||
"functions/%s" % name
|
||||
)
|
||||
base_name, extention = os.path.splitext(python_file_path)
|
||||
module_name = os.path.basename(base_name)
|
||||
self.python_zip_file = os.path.join(
|
||||
tempfile.gettempdir(),
|
||||
'%s.zip' % module_name
|
||||
)
|
||||
|
||||
if not os.path.isfile(self.python_zip_file):
|
||||
zf = zipfile.ZipFile(self.python_zip_file, mode='w')
|
||||
try:
|
||||
# Use default compression mode, may change in future.
|
||||
zf.write(
|
||||
python_file_path,
|
||||
'%s%s' % (module_name, extention),
|
||||
compress_type=zipfile.ZIP_STORED
|
||||
)
|
||||
finally:
|
||||
zf.close()
|
||||
|
||||
self.function_id = self.create_function(self.python_zip_file)
|
||||
self.function_id = self.create_function()
|
||||
|
||||
@decorators.idempotent_id('37DCD022-32D6-48D1-B90C-31D605DBE53B')
|
||||
def test_webhook_invoke(self):
|
||||
@ -99,3 +47,17 @@ class WebhooksTest(base.BaseQinlingTest):
|
||||
resp, body = self.client.get_execution_log(exec_id)
|
||||
self.assertEqual(200, resp.status)
|
||||
self.assertIn('qinling', body)
|
||||
|
||||
@decorators.idempotent_id('8e6e4f76-f748-11e7-8ec3-00224d6b7bc1')
|
||||
def test_get_all_admin(self):
|
||||
"""Admin user can get webhooks of other projects"""
|
||||
webhook_id, _ = self.create_webhook()
|
||||
|
||||
resp, body = self.admin_client.get_resources(
|
||||
'webhooks?all_projects=true'
|
||||
)
|
||||
self.assertEqual(200, resp.status)
|
||||
self.assertIn(
|
||||
webhook_id,
|
||||
[item['id'] for item in body['webhooks']]
|
||||
)
|
||||
|
@ -12,6 +12,9 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
import pkg_resources
|
||||
import tempfile
|
||||
import zipfile
|
||||
|
||||
from tempest import config
|
||||
from tempest.lib.common.utils import data_utils
|
||||
@ -25,6 +28,8 @@ CONF = config.CONF
|
||||
|
||||
class BaseQinlingTest(test.BaseTestCase):
|
||||
credentials = ('admin', 'primary', 'alt')
|
||||
create_runtime = True
|
||||
image = 'openstackqinling/python-runtime'
|
||||
|
||||
@classmethod
|
||||
def skip_checks(cls):
|
||||
@ -47,6 +52,26 @@ class BaseQinlingTest(test.BaseTestCase):
|
||||
cls.k8s_v1extention = clients['v1extention']
|
||||
cls.namespace = 'qinling'
|
||||
|
||||
@classmethod
|
||||
def resource_setup(cls):
|
||||
super(BaseQinlingTest, cls).resource_setup()
|
||||
|
||||
if cls.create_runtime:
|
||||
cls.runtime_id = None
|
||||
name = data_utils.rand_name('runtime', prefix=cls.name_prefix)
|
||||
_, body = cls.admin_client.create_runtime(cls.image, name)
|
||||
cls.runtime_id = body['id']
|
||||
|
||||
@classmethod
|
||||
def resource_cleanup(cls):
|
||||
if cls.create_runtime and cls.runtime_id:
|
||||
cls.admin_client.delete_resource(
|
||||
'runtimes', cls.runtime_id,
|
||||
ignore_notfound=True
|
||||
)
|
||||
|
||||
super(BaseQinlingTest, cls).resource_cleanup()
|
||||
|
||||
@tenacity.retry(
|
||||
wait=tenacity.wait_fixed(3),
|
||||
stop=tenacity.stop_after_attempt(10),
|
||||
@ -69,11 +94,37 @@ class BaseQinlingTest(test.BaseTestCase):
|
||||
self.assertEqual(200, resp.status)
|
||||
self.assertEqual('success', body['status'])
|
||||
|
||||
def create_package(self, name="python_test.py"):
|
||||
python_file_path = pkg_resources.resource_filename(
|
||||
'qinling_tempest_plugin',
|
||||
"functions/%s" % name
|
||||
)
|
||||
base_name, _ = os.path.splitext(python_file_path)
|
||||
module_name = os.path.basename(base_name)
|
||||
python_zip_file = os.path.join(
|
||||
tempfile.gettempdir(),
|
||||
'%s.zip' % module_name
|
||||
)
|
||||
|
||||
if not os.path.isfile(python_zip_file):
|
||||
zf = zipfile.PyZipFile(python_zip_file, mode='w')
|
||||
try:
|
||||
zf.writepy(python_file_path)
|
||||
finally:
|
||||
zf.close()
|
||||
|
||||
self.addCleanup(os.remove, python_zip_file)
|
||||
return python_zip_file
|
||||
|
||||
def create_function(self, package_path=None, image=False):
|
||||
function_name = data_utils.rand_name('function',
|
||||
prefix=self.name_prefix)
|
||||
function_name = data_utils.rand_name(
|
||||
'function',
|
||||
prefix=self.name_prefix
|
||||
)
|
||||
|
||||
if not image:
|
||||
if not package_path:
|
||||
package_path = self.create_package()
|
||||
base_name, _ = os.path.splitext(package_path)
|
||||
module_name = os.path.basename(base_name)
|
||||
with open(package_path, 'rb') as package_data:
|
||||
@ -84,7 +135,6 @@ class BaseQinlingTest(test.BaseTestCase):
|
||||
package_data=package_data,
|
||||
entry='%s.main' % module_name
|
||||
)
|
||||
self.addCleanup(os.remove, package_path)
|
||||
else:
|
||||
resp, body = self.client.create_function(
|
||||
{"source": "image", "image": "openstackqinling/alpine-test"},
|
||||
@ -107,3 +157,15 @@ class BaseQinlingTest(test.BaseTestCase):
|
||||
webhook_id, ignore_notfound=True)
|
||||
|
||||
return webhook_id, body['webhook_url']
|
||||
|
||||
def create_job(self, function_id=None):
|
||||
if not function_id:
|
||||
function_id = self.create_function()
|
||||
resp, body = self.client.create_job(function_id)
|
||||
self.assertEqual(201, resp.status)
|
||||
job_id = body['id']
|
||||
|
||||
self.addCleanup(self.client.delete_resource, 'jobs',
|
||||
job_id, ignore_notfound=True)
|
||||
|
||||
return job_id
|
||||
|
@ -11,10 +11,6 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
import os
|
||||
import pkg_resources
|
||||
import tempfile
|
||||
import zipfile
|
||||
|
||||
from tempest.lib.common.utils import data_utils
|
||||
from tempest.lib import decorators
|
||||
@ -24,33 +20,7 @@ from qinling_tempest_plugin.tests import base
|
||||
|
||||
class BasicOpsTest(base.BaseQinlingTest):
|
||||
name_prefix = 'BasicOpsTest'
|
||||
|
||||
def setUp(self):
|
||||
super(BasicOpsTest, self).setUp()
|
||||
|
||||
python_file_path = pkg_resources.resource_filename(
|
||||
'qinling_tempest_plugin',
|
||||
"functions/python_test.py"
|
||||
)
|
||||
|
||||
base_name, extention = os.path.splitext(python_file_path)
|
||||
self.base_name = os.path.basename(base_name)
|
||||
self.python_zip_file = os.path.join(
|
||||
tempfile.gettempdir(),
|
||||
'%s.zip' % self.base_name
|
||||
)
|
||||
|
||||
if not os.path.isfile(self.python_zip_file):
|
||||
zf = zipfile.ZipFile(self.python_zip_file, mode='w')
|
||||
try:
|
||||
# Use default compression mode, may change in future.
|
||||
zf.write(
|
||||
python_file_path,
|
||||
'%s%s' % (self.base_name, extention),
|
||||
compress_type=zipfile.ZIP_STORED
|
||||
)
|
||||
finally:
|
||||
zf.close()
|
||||
create_runtime = False
|
||||
|
||||
@decorators.idempotent_id('205fd749-2468-4d9f-9c05-45558d6d8f9e')
|
||||
def test_basic_ops(self):
|
||||
@ -62,44 +32,25 @@ class BasicOpsTest(base.BaseQinlingTest):
|
||||
4. Check result and execution log.
|
||||
"""
|
||||
name = data_utils.rand_name('runtime', prefix=self.name_prefix)
|
||||
resp, body = self.admin_client.create_runtime(
|
||||
'openstackqinling/python-runtime', name
|
||||
)
|
||||
|
||||
resp, body = self.admin_client.create_runtime(self.image, name)
|
||||
self.assertEqual(201, resp.status)
|
||||
self.assertEqual(name, body['name'])
|
||||
|
||||
# Wait for runtime to be available
|
||||
runtime_id = body['id']
|
||||
self.await_runtime_available(runtime_id)
|
||||
self.runtime_id = body['id']
|
||||
self.await_runtime_available(self.runtime_id)
|
||||
self.addCleanup(self.admin_client.delete_resource, 'runtimes',
|
||||
runtime_id, ignore_notfound=True)
|
||||
self.runtime_id, ignore_notfound=True)
|
||||
|
||||
# Create function
|
||||
function_name = data_utils.rand_name('function',
|
||||
prefix=self.name_prefix)
|
||||
with open(self.python_zip_file, 'rb') as package_data:
|
||||
resp, body = self.client.create_function(
|
||||
{"source": "package"},
|
||||
runtime_id,
|
||||
name=function_name,
|
||||
package_data=package_data,
|
||||
entry='%s.main' % self.base_name
|
||||
)
|
||||
function_id = body['id']
|
||||
|
||||
self.assertEqual(201, resp.status_code)
|
||||
self.addCleanup(self.client.delete_resource, 'functions',
|
||||
function_id, ignore_notfound=True)
|
||||
function_id = self.create_function()
|
||||
|
||||
# Invoke function
|
||||
resp, body = self.client.create_execution(
|
||||
function_id, input='{"name": "Qinling"}'
|
||||
)
|
||||
|
||||
self.assertEqual(201, resp.status)
|
||||
self.assertEqual('success', body['status'])
|
||||
|
||||
# self.assertEqual(201, resp.status)
|
||||
# self.assertEqual('success', body['status'])
|
||||
execution_id = body['id']
|
||||
self.addCleanup(self.client.delete_resource, 'executions',
|
||||
execution_id, ignore_notfound=True)
|
||||
|
@ -79,10 +79,10 @@ def _download_package(url, zip_file, token=None):
|
||||
headers = {'X-Auth-Token': token}
|
||||
|
||||
try:
|
||||
r = requests.get(url, headers=headers, stream=True,
|
||||
verify=False, timeout=5)
|
||||
r = requests.get(url, headers=headers, stream=True, timeout=5,
|
||||
verify=False)
|
||||
if r.status_code != 200:
|
||||
return _get_responce(
|
||||
return False, _get_responce(
|
||||
DOWNLOAD_ERROR % (url, r.content), 0, '', False, 500
|
||||
)
|
||||
|
||||
@ -90,12 +90,14 @@ def _download_package(url, zip_file, token=None):
|
||||
for chunk in r.iter_content(chunk_size=65535):
|
||||
fd.write(chunk)
|
||||
except Exception as e:
|
||||
return _get_responce(
|
||||
return False, _get_responce(
|
||||
DOWNLOAD_ERROR % (url, str(e)), 0, '', False, 500
|
||||
)
|
||||
|
||||
app.logger.info('Downloaded function package to %s' % zip_file)
|
||||
|
||||
return True, None
|
||||
|
||||
|
||||
def _invoke_function(execution_id, zip_file, module_name, method, arg, input,
|
||||
return_dict):
|
||||
@ -165,7 +167,10 @@ def execute():
|
||||
if not downloading and not downloaded:
|
||||
downloading = True
|
||||
|
||||
_download_package(download_url, zip_file, params.get('token'))
|
||||
ret, resp = _download_package(download_url, zip_file,
|
||||
params.get('token'))
|
||||
if not ret:
|
||||
return resp
|
||||
|
||||
downloading = False
|
||||
downloaded = True
|
||||
|
97
tools/gate/dump_logs.sh
Normal file
97
tools/gate/dump_logs.sh
Normal file
@ -0,0 +1,97 @@
|
||||
#!/bin/bash
|
||||
set +xe
|
||||
|
||||
# if we can't find kubectl, fail immediately because it is likely
|
||||
# the whitespace linter fails - no point to collect logs.
|
||||
if ! type "kubectl" &> /dev/null; then
|
||||
exit $1
|
||||
fi
|
||||
|
||||
echo "Capturing logs from environment."
|
||||
mkdir -p ${LOGS_DIR}/k8s/etc
|
||||
sudo cp -a /etc/kubernetes ${LOGS_DIR}/k8s/etc
|
||||
sudo chmod 777 --recursive ${LOGS_DIR}/*
|
||||
|
||||
mkdir -p ${LOGS_DIR}/k8s
|
||||
for OBJECT_TYPE in nodes \
|
||||
namespace \
|
||||
storageclass; do
|
||||
kubectl get ${OBJECT_TYPE} -o yaml > ${LOGS_DIR}/k8s/${OBJECT_TYPE}.yaml
|
||||
done
|
||||
kubectl describe nodes > ${LOGS_DIR}/k8s/nodes.txt
|
||||
for OBJECT_TYPE in svc \
|
||||
pods \
|
||||
jobs \
|
||||
deployments \
|
||||
daemonsets \
|
||||
statefulsets \
|
||||
configmaps \
|
||||
secrets; do
|
||||
kubectl get --all-namespaces ${OBJECT_TYPE} -o yaml > \
|
||||
${LOGS_DIR}/k8s/${OBJECT_TYPE}.yaml
|
||||
done
|
||||
|
||||
mkdir -p ${LOGS_DIR}/k8s/pods
|
||||
kubectl get pods -a --all-namespaces -o json | jq -r \
|
||||
'.items[].metadata | .namespace + " " + .name' | while read line; do
|
||||
NAMESPACE=$(echo $line | awk '{print $1}')
|
||||
NAME=$(echo $line | awk '{print $2}')
|
||||
kubectl get --namespace $NAMESPACE pod $NAME -o json | jq -r \
|
||||
'.spec.containers[].name' | while read line; do
|
||||
CONTAINER=$(echo $line | awk '{print $1}')
|
||||
kubectl logs $NAME --namespace $NAMESPACE -c $CONTAINER > \
|
||||
${LOGS_DIR}/k8s/pods/$NAMESPACE-$NAME-$CONTAINER.txt
|
||||
done
|
||||
done
|
||||
|
||||
mkdir -p ${LOGS_DIR}/k8s/svc
|
||||
kubectl get svc -o json --all-namespaces | jq -r \
|
||||
'.items[].metadata | .namespace + " " + .name' | while read line; do
|
||||
NAMESPACE=$(echo $line | awk '{print $1}')
|
||||
NAME=$(echo $line | awk '{print $2}')
|
||||
kubectl describe svc $NAME --namespace $NAMESPACE > \
|
||||
${LOGS_DIR}/k8s/svc/$NAMESPACE-$NAME.txt
|
||||
done
|
||||
|
||||
mkdir -p ${LOGS_DIR}/k8s/pvc
|
||||
kubectl get pvc -o json --all-namespaces | jq -r \
|
||||
'.items[].metadata | .namespace + " " + .name' | while read line; do
|
||||
NAMESPACE=$(echo $line | awk '{print $1}')
|
||||
NAME=$(echo $line | awk '{print $2}')
|
||||
kubectl describe pvc $NAME --namespace $NAMESPACE > \
|
||||
${LOGS_DIR}/k8s/pvc/$NAMESPACE-$NAME.txt
|
||||
done
|
||||
|
||||
mkdir -p ${LOGS_DIR}/k8s/rbac
|
||||
for OBJECT_TYPE in clusterroles \
|
||||
roles \
|
||||
clusterrolebindings \
|
||||
rolebindings; do
|
||||
kubectl get ${OBJECT_TYPE} -o yaml > ${LOGS_DIR}/k8s/rbac/${OBJECT_TYPE}.yaml
|
||||
done
|
||||
|
||||
mkdir -p ${LOGS_DIR}/k8s/descriptions
|
||||
for NAMESPACE in $(kubectl get namespaces -o name | awk -F '/' '{ print $NF }') ; do
|
||||
for OBJECT in $(kubectl get all --show-all -n $NAMESPACE -o name) ; do
|
||||
OBJECT_TYPE=$(echo $OBJECT | awk -F '/' '{ print $1 }')
|
||||
OBJECT_NAME=$(echo $OBJECT | awk -F '/' '{ print $2 }')
|
||||
mkdir -p ${LOGS_DIR}/k8s/descriptions/${NAMESPACE}/${OBJECT_TYPE}
|
||||
kubectl describe -n $NAMESPACE $OBJECT > ${LOGS_DIR}/k8s/descriptions/${NAMESPACE}/$OBJECT_TYPE/$OBJECT_NAME.txt
|
||||
done
|
||||
done
|
||||
|
||||
NODE_NAME=$(hostname)
|
||||
mkdir -p ${LOGS_DIR}/nodes/${NODE_NAME}
|
||||
echo "${NODE_NAME}" > ${LOGS_DIR}/nodes/master.txt
|
||||
sudo docker logs kubelet 2> ${LOGS_DIR}/nodes/${NODE_NAME}/kubelet.txt
|
||||
sudo docker logs kubeadm-aio 2>&1 > ${LOGS_DIR}/nodes/${NODE_NAME}/kubeadm-aio.txt
|
||||
sudo docker images --digests --no-trunc --all > ${LOGS_DIR}/nodes/${NODE_NAME}/images.txt
|
||||
sudo du -h --max-depth=1 /var/lib/docker | sort -hr > ${LOGS_DIR}/nodes/${NODE_NAME}/docker-size.txt
|
||||
sudo iptables-save > ${LOGS_DIR}/nodes/${NODE_NAME}/iptables.txt
|
||||
sudo ip a > ${LOGS_DIR}/nodes/${NODE_NAME}/ip.txt
|
||||
sudo route -n > ${LOGS_DIR}/nodes/${NODE_NAME}/routes.txt
|
||||
sudo arp -a > ${LOGS_DIR}/nodes/${NODE_NAME}/arp.txt
|
||||
cat /etc/resolv.conf > ${LOGS_DIR}/nodes/${NODE_NAME}/resolv.conf
|
||||
sudo lshw > ${LOGS_DIR}/nodes/${NODE_NAME}/hardware.txt
|
||||
|
||||
exit $1
|
@ -42,27 +42,14 @@ function base_install {
|
||||
fi
|
||||
}
|
||||
|
||||
function gate_base_setup {
|
||||
# Install base requirements
|
||||
base_install
|
||||
|
||||
# Install and setup iscsi loopback devices if required.
|
||||
if [ "x$LOOPBACK_CREATE" == "xtrue" ]; then
|
||||
loopback_support_install
|
||||
loopback_setup
|
||||
fi
|
||||
|
||||
# Install support packages for pvc backends
|
||||
if [ "x$PVC_BACKEND" == "xceph" ]; then
|
||||
ceph_support_install
|
||||
elif [ "x$PVC_BACKEND" == "xnfs" ]; then
|
||||
nfs_support_install
|
||||
fi
|
||||
}
|
||||
|
||||
function create_k8s_screen {
|
||||
# Starts a proxy to the Kubernetes API server in a screen session
|
||||
sudo screen -S kube_proxy -X quit || true
|
||||
sudo screen -dmS kube_proxy && sudo screen -S kube_proxy -X screen -t kube_proxy
|
||||
sudo screen -S kube_proxy -p kube_proxy -X stuff 'kubectl proxy --accept-hosts=".*" --address="0.0.0.0"\n'
|
||||
}
|
||||
|
||||
function gate_base_setup {
|
||||
# Install base requirements
|
||||
base_install
|
||||
}
|
||||
|
150
tools/gate/funcs/kube.sh
Normal file
150
tools/gate/funcs/kube.sh
Normal file
@ -0,0 +1,150 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
function kube_wait_for_pods {
|
||||
# From Kolla-Kubernetes, orginal authors Kevin Fox & Serguei Bezverkhi
|
||||
# Default wait timeout is 180 seconds
|
||||
set +x
|
||||
end=$(date +%s)
|
||||
if ! [ -z $2 ]; then
|
||||
end=$((end + $2))
|
||||
else
|
||||
end=$((end + 180))
|
||||
fi
|
||||
while true; do
|
||||
kubectl get pods --namespace=$1 -o json | jq -r \
|
||||
'.items[].status.phase' | grep Pending > /dev/null && \
|
||||
PENDING=True || PENDING=False
|
||||
query='.items[]|select(.status.phase=="Running")'
|
||||
query="$query|.status.containerStatuses[].ready"
|
||||
kubectl get pods --namespace=$1 -o json | jq -r "$query" | \
|
||||
grep false > /dev/null && READY="False" || READY="True"
|
||||
kubectl get jobs -o json --namespace=$1 | jq -r \
|
||||
'.items[] | .spec.completions == .status.succeeded' | \
|
||||
grep false > /dev/null && JOBR="False" || JOBR="True"
|
||||
[ $PENDING == "False" -a $READY == "True" -a $JOBR == "True" ] && \
|
||||
break || true
|
||||
sleep 1
|
||||
now=$(date +%s)
|
||||
[ $now -gt $end ] && echo "containers failed to start." && \
|
||||
kubectl get pods --namespace $1 -o wide && exit -1
|
||||
done
|
||||
set -x
|
||||
}
|
||||
|
||||
function kube_wait_for_nodes {
|
||||
# Default wait timeout is 180 seconds
|
||||
set +x
|
||||
end=$(date +%s)
|
||||
if ! [ -z $2 ]; then
|
||||
end=$((end + $2))
|
||||
else
|
||||
end=$((end + 180))
|
||||
fi
|
||||
while true; do
|
||||
NUMBER_OF_NODES_EXPECTED=$1
|
||||
NUMBER_OF_NODES=$(kubectl get nodes --no-headers -o name | wc -l)
|
||||
[ $NUMBER_OF_NODES -eq $NUMBER_OF_NODES_EXPECTED ] && \
|
||||
NODES_ONLINE="True" || NODES_ONLINE="False"
|
||||
while read SUB_NODE; do
|
||||
echo $SUB_NODE | grep -q ^Ready && NODES_READY="True" || NODES_READY="False"
|
||||
done < <(kubectl get nodes --no-headers | awk '{ print $2 }')
|
||||
[ $NODES_ONLINE == "True" -a $NODES_READY == "True" ] && \
|
||||
break || true
|
||||
sleep 5
|
||||
now=$(date +%s)
|
||||
[ $now -gt $end ] && echo "Nodes Failed to be ready in time." && \
|
||||
kubectl get nodes -o wide && exit -1
|
||||
done
|
||||
set -x
|
||||
}
|
||||
|
||||
function kubeadm_aio_reqs_install {
|
||||
if [ "x$HOST_OS" == "xubuntu" ]; then
|
||||
sudo apt-get install -y --no-install-recommends -qq \
|
||||
jq
|
||||
elif [ "x$HOST_OS" == "xcentos" ]; then
|
||||
sudo yum install -y \
|
||||
epel-release
|
||||
sudo yum install -y \
|
||||
docker-latest \
|
||||
jq
|
||||
sudo cp -f /usr/lib/systemd/system/docker-latest.service /etc/systemd/system/docker.service
|
||||
sudo sed -i "s|/var/lib/docker-latest|/var/lib/docker|g" /etc/systemd/system/docker.service
|
||||
sudo sed -i 's/^OPTIONS/#OPTIONS/g' /etc/sysconfig/docker-latest
|
||||
sudo sed -i "s|^MountFlags=slave|MountFlags=share|g" /etc/systemd/system/docker.service
|
||||
sudo sed -i "/--seccomp-profile/,+1 d" /etc/systemd/system/docker.service
|
||||
echo "DOCKER_STORAGE_OPTIONS=--storage-driver=overlay" | sudo tee /etc/sysconfig/docker-latest-storage
|
||||
sudo setenforce 0 || true
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl restart docker
|
||||
elif [ "x$HOST_OS" == "xfedora" ]; then
|
||||
sudo dnf install -y \
|
||||
docker-latest \
|
||||
jq
|
||||
sudo cp -f /usr/lib/systemd/system/docker-latest.service /etc/systemd/system/docker.service
|
||||
sudo sed -i "s|/var/lib/docker-latest|/var/lib/docker|g" /etc/systemd/system/docker.service
|
||||
echo "DOCKER_STORAGE_OPTIONS=--storage-driver=overlay2" | sudo tee /etc/sysconfig/docker-latest-storage
|
||||
sudo setenforce 0 || true
|
||||
sudo systemctl daemon-reload
|
||||
sudo systemctl restart docker
|
||||
fi
|
||||
|
||||
# Install docker and kubectl
|
||||
TMP_DIR=$(mktemp -d)
|
||||
|
||||
curl -sSL https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/bin/linux/amd64/kubectl -o ${TMP_DIR}/kubectl
|
||||
chmod +x ${TMP_DIR}/kubectl
|
||||
sudo mv ${TMP_DIR}/kubectl /usr/local/bin/kubectl
|
||||
|
||||
curl -fsSL get.docker.com -o ${TMP_DIR}/get-docker.sh
|
||||
sudo sh ${TMP_DIR}/get-docker.sh
|
||||
|
||||
rm -rf ${TMP_DIR}
|
||||
}
|
||||
|
||||
function kubeadm_aio_build {
|
||||
sudo docker build --pull -t ${KUBEADM_IMAGE} tools/kubeadm-aio
|
||||
}
|
||||
|
||||
function kubeadm_aio_launch {
|
||||
${WORK_DIR}/tools/kubeadm-aio/kubeadm-aio-launcher.sh
|
||||
|
||||
rm -rf ${HOME}/.kube
|
||||
mkdir -p ${HOME}/.kube
|
||||
cat ${KUBECONFIG} > ${HOME}/.kube/config
|
||||
kube_wait_for_pods kube-system 300
|
||||
kube_wait_for_pods default 300
|
||||
}
|
||||
|
||||
function kubeadm_aio_clean {
|
||||
sudo docker rm -f kubeadm-aio || true
|
||||
sudo docker rm -f kubelet || true
|
||||
sudo docker ps -aq | xargs -r -l1 -P16 sudo docker rm -f
|
||||
sudo rm -rfv \
|
||||
/etc/cni/net.d \
|
||||
/etc/kubernetes \
|
||||
/var/lib/etcd \
|
||||
/var/etcd \
|
||||
/var/lib/kubelet/* \
|
||||
/var/lib/nova \
|
||||
${HOME}/.kubeadm-aio/admin.conf \
|
||||
/var/lib/openstack-helm \
|
||||
/var/lib/nfs-provisioner || true
|
||||
}
|
||||
|
||||
function ceph_kube_controller_manager_replace {
|
||||
sudo docker pull ${CEPH_KUBE_CONTROLLER_MANAGER_IMAGE}
|
||||
sudo docker tag ${CEPH_KUBE_CONTROLLER_MANAGER_IMAGE} ${BASE_KUBE_CONTROLLER_MANAGER_IMAGE}
|
||||
}
|
@ -49,3 +49,40 @@ function net_hosts_pre_kube {
|
||||
function net_hosts_post_kube {
|
||||
sudo cp -f /etc/hosts-pre-kube /etc/hosts
|
||||
}
|
||||
|
||||
function find_subnet_range {
|
||||
if [ "x$HOST_OS" == "xubuntu" ]; then
|
||||
ipcalc $(net_default_host_addr) | awk '/^Network/ { print $2 }'
|
||||
else
|
||||
eval $(ipcalc --network --prefix $(net_default_host_addr))
|
||||
echo "$NETWORK/$PREFIX"
|
||||
fi
|
||||
}
|
||||
|
||||
function find_multi_subnet_range {
|
||||
: ${PRIMARY_NODE_IP:="$(cat /etc/nodepool/primary_node | tail -1)"}
|
||||
: ${SUB_NODE_IPS:="$(cat /etc/nodepool/sub_nodes)"}
|
||||
NODE_IPS="${PRIMARY_NODE_IP} ${SUB_NODE_IPS}"
|
||||
NODE_IP_UNSORTED=$(mktemp --suffix=.txt)
|
||||
for NODE_IP in $NODE_IPS; do
|
||||
echo $NODE_IP >> ${NODE_IP_UNSORTED}
|
||||
done
|
||||
NODE_IP_SORTED=$(mktemp --suffix=.txt)
|
||||
sort -V ${NODE_IP_UNSORTED} > ${NODE_IP_SORTED}
|
||||
rm -f ${NODE_IP_UNSORTED}
|
||||
FIRST_IP_SUBNET=$(ipcalc "$(head -n 1 ${NODE_IP_SORTED})/24" | awk '/^Network/ { print $2 }')
|
||||
LAST_IP_SUBNET=$(ipcalc "$(tail -n 1 ${NODE_IP_SORTED})/24" | awk '/^Network/ { print $2 }')
|
||||
rm -f ${NODE_IP_SORTED}
|
||||
function ip_diff {
|
||||
echo $(($(echo $LAST_IP_SUBNET | awk -F '.' "{ print \$$1}") - $(echo $FIRST_IP_SUBNET | awk -F '.' "{ print \$$1}")))
|
||||
}
|
||||
for X in {1..4}; do
|
||||
if ! [ "$(ip_diff $X)" -eq "0" ]; then
|
||||
SUBMASK=$(((($X - 1 )) * 8))
|
||||
break
|
||||
elif [ $X -eq "4" ]; then
|
||||
SUBMASK=24
|
||||
fi
|
||||
done
|
||||
echo ${FIRST_IP_SUBNET%/*}/${SUBMASK}
|
||||
}
|
25
tools/gate/kubeadm_aio.sh
Normal file
25
tools/gate/kubeadm_aio.sh
Normal file
@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
set -ex
|
||||
: ${WORK_DIR:="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"}
|
||||
source ${WORK_DIR}/tools/gate/vars.sh
|
||||
source ${WORK_DIR}/tools/gate/funcs/network.sh
|
||||
source ${WORK_DIR}/tools/gate/funcs/kube.sh
|
||||
|
||||
kubeadm_aio_reqs_install
|
||||
|
||||
# Re-use the docker image pre-built by openstack-helm team.
|
||||
sudo docker pull ${KUBEADM_IMAGE} || kubeadm_aio_build
|
||||
|
||||
kubeadm_aio_launch
|
@ -11,21 +11,31 @@
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -ex
|
||||
export WORK_DIR=$(pwd)
|
||||
source ${WORK_DIR}/tools/gate/vars.sh
|
||||
source ${WORK_DIR}/tools/gate/funcs/common.sh
|
||||
source ${WORK_DIR}/tools/gate/funcs/network.sh
|
||||
|
||||
# Setup the logging location: by default use the working dir as the root.
|
||||
rm -rf ${LOGS_DIR} || true
|
||||
mkdir -p ${LOGS_DIR}
|
||||
|
||||
function dump_logs () {
|
||||
${WORK_DIR}/tools/gate/dump_logs.sh
|
||||
}
|
||||
trap 'dump_logs "$?"' ERR
|
||||
|
||||
# Do the basic node setup for running the gate
|
||||
gate_base_setup
|
||||
|
||||
# We setup the network for pre kube here, to enable cluster restarts on
|
||||
# development machines
|
||||
net_resolv_pre_kube
|
||||
net_hosts_pre_kube
|
||||
|
||||
# Setup the K8s Cluster
|
||||
source ${WORK_DIR}/tools/gate/setup_minikube.sh
|
||||
create_k8s_screen
|
||||
bash ${WORK_DIR}/tools/gate/kubeadm_aio.sh
|
||||
|
||||
#net_hosts_post_kube
|
||||
#net_resolv_post_kube
|
||||
# Starts a proxy to the Kubernetes API server in a screen session
|
||||
create_k8s_screen
|
||||
|
@ -1,35 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
set -xe
|
||||
|
||||
sudo apt-get install -y --no-install-recommends -qq \
|
||||
docker.io \
|
||||
jq
|
||||
|
||||
curl -Lo minikube https://storage.googleapis.com/minikube/releases/latest/minikube-linux-amd64 && sudo chmod +x minikube
|
||||
curl -Lo kubectl https://storage.googleapis.com/kubernetes-release/release/$(curl -s https://storage.googleapis.com/kubernetes-release/release/stable.txt)/bin/linux/amd64/kubectl && sudo chmod +x kubectl && sudo mv ./kubectl /usr/local/bin/kubectl
|
||||
|
||||
export MINIKUBE_WANTUPDATENOTIFICATION=false
|
||||
export MINIKUBE_WANTREPORTERRORPROMPT=false
|
||||
export MINIKUBE_HOME=$HOME
|
||||
export CHANGE_MINIKUBE_NONE_USER=true
|
||||
mkdir $HOME/.kube || true
|
||||
touch $HOME/.kube/config
|
||||
|
||||
export KUBECONFIG=$HOME/.kube/config
|
||||
sudo ./minikube delete || true
|
||||
sudo -E ./minikube start --vm-driver=none --kubernetes-version ${KUBE_VERSION} --loglevel 0
|
||||
|
||||
# waits until kubectl can access the api server that Minikube has created
|
||||
end=$(($(date +%s) + 600))
|
||||
READY="False"
|
||||
while true; do
|
||||
kubectl get po &> /dev/null
|
||||
if [ $? -ne 1 ]; then
|
||||
READY="True"
|
||||
echo "Kubernetes cluster is ready!"
|
||||
fi
|
||||
[ $READY == "True" ] && break || true
|
||||
sleep 2
|
||||
now=$(date +%s)
|
||||
[ $now -gt $end ] && echo "Failed to setup kubernetes cluster in time" && exit -1
|
||||
done
|
@ -15,12 +15,28 @@
|
||||
# Set work dir if not already done
|
||||
: ${WORK_DIR:="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"}
|
||||
|
||||
# Set logs directory
|
||||
export LOGS_DIR=${LOGS_DIR:-"${WORK_DIR}/logs"}
|
||||
|
||||
# Get Host OS
|
||||
source /etc/os-release
|
||||
export HOST_OS=${HOST_OS:="${ID}"}
|
||||
|
||||
# Set versions of K8s to use
|
||||
export KUBE_VERSION=${KUBE_VERSION:-"v1.8.0"}
|
||||
export KUBE_VERSION=${KUBE_VERSION:-"v1.7.3"}
|
||||
export KUBEADM_IMAGE_VERSION=${KUBEADM_IMAGE_VERSION:-"v1.7.3"}
|
||||
|
||||
# Set K8s-AIO options
|
||||
export KUBECONFIG=${KUBECONFIG:="${HOME}/.kubeadm-aio/admin.conf"}
|
||||
export KUBEADM_IMAGE=${KUBEADM_IMAGE:="openstackhelm/kubeadm-aio:${KUBEADM_IMAGE_VERSION}"}
|
||||
|
||||
# Set K8s network options
|
||||
export CNI_POD_CIDR=${CNI_POD_CIDR:="192.168.0.0/16"}
|
||||
export KUBE_CNI=${KUBE_CNI:="calico"}
|
||||
|
||||
# Set Upstream DNS
|
||||
export UPSTREAM_DNS=${UPSTREAM_DNS:-"8.8.8.8"}
|
||||
|
||||
# Set gate script timeouts
|
||||
export SERVICE_LAUNCH_TIMEOUT=${SERVICE_LAUNCH_TIMEOUT:="600"}
|
||||
export SERVICE_TEST_TIMEOUT=${SERVICE_TEST_TIMEOUT:="600"}
|
||||
|
88
tools/kubeadm-aio/Dockerfile
Normal file
88
tools/kubeadm-aio/Dockerfile
Normal file
@ -0,0 +1,88 @@
|
||||
FROM ubuntu:16.04
|
||||
MAINTAINER pete.birley@att.com
|
||||
|
||||
ENV KUBE_VERSION=v1.6.8 \
|
||||
CNI_VERSION=v0.6.0-rc2 \
|
||||
container="docker" \
|
||||
DEBIAN_FRONTEND="noninteractive"
|
||||
|
||||
RUN set -x \
|
||||
&& TMP_DIR=$(mktemp --directory) \
|
||||
&& cd ${TMP_DIR} \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
apt-transport-https \
|
||||
ca-certificates \
|
||||
curl \
|
||||
dbus \
|
||||
make \
|
||||
git \
|
||||
vim \
|
||||
jq \
|
||||
# Add Kubernetes repo
|
||||
&& curl -sSL https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add - \
|
||||
&& echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" > /etc/apt/sources.list.d/kubernetes.list \
|
||||
&& apt-get update \
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
docker.io \
|
||||
iptables \
|
||||
kubectl \
|
||||
kubelet \
|
||||
kubernetes-cni \
|
||||
# Install Kubeadm without running postinstall script as it expects systemd to be running.
|
||||
&& apt-get download kubeadm \
|
||||
&& dpkg --unpack kubeadm*.deb \
|
||||
&& mv /var/lib/dpkg/info/kubeadm.postinst /opt/kubeadm.postinst \
|
||||
&& dpkg --configure kubeadm \
|
||||
&& apt-get install -yf kubeadm \
|
||||
&& mkdir -p /etc/kubernetes/manifests \
|
||||
# Install kubectl:
|
||||
&& curl -sSL https://dl.k8s.io/${KUBE_VERSION}/kubernetes-client-linux-amd64.tar.gz | tar -zxv --strip-components=1 \
|
||||
&& mv ${TMP_DIR}/client/bin/kubectl /usr/bin/kubectl \
|
||||
&& chmod +x /usr/bin/kubectl \
|
||||
# Install kubelet & kubeadm binaries:
|
||||
# (portdirect) We do things in this weird way to let us use the deps and systemd
|
||||
# units from the packages in the .deb repo.
|
||||
&& curl -sSL https://dl.k8s.io/${KUBE_VERSION}/kubernetes-server-linux-amd64.tar.gz | tar -zxv --strip-components=1 \
|
||||
&& mv ${TMP_DIR}/server/bin/kubelet /usr/bin/kubelet \
|
||||
&& chmod +x /usr/bin/kubelet \
|
||||
&& mv ${TMP_DIR}/server/bin/kubeadm /usr/bin/kubeadm \
|
||||
&& chmod +x /usr/bin/kubeadm \
|
||||
# Install CNI:
|
||||
&& CNI_BIN_DIR=/opt/cni/bin \
|
||||
&& mkdir -p ${CNI_BIN_DIR} \
|
||||
&& cd ${CNI_BIN_DIR} \
|
||||
&& curl -sSL https://github.com/containernetworking/plugins/releases/download/$CNI_VERSION/cni-plugins-amd64-$CNI_VERSION.tgz | tar -zxv --strip-components=1 \
|
||||
&& cd ${TMP_DIR} \
|
||||
# Move kubelet binary as we will run containerised
|
||||
&& mv /usr/bin/kubelet /usr/bin/kubelet-real \
|
||||
# Install utils for PVC provisioners
|
||||
&& apt-get install -y --no-install-recommends \
|
||||
nfs-common \
|
||||
ceph-common \
|
||||
kmod \
|
||||
# Tweak Systemd units and targets for running in a container
|
||||
&& find /lib/systemd/system/sysinit.target.wants/ ! -name 'systemd-tmpfiles-setup.service' -type l -exec rm -fv {} + \
|
||||
&& rm -fv \
|
||||
/lib/systemd/system/multi-user.target.wants/* \
|
||||
/etc/systemd/system/*.wants/* \
|
||||
/lib/systemd/system/local-fs.target.wants/* \
|
||||
/lib/systemd/system/sockets.target.wants/*udev* \
|
||||
/lib/systemd/system/sockets.target.wants/*initctl* \
|
||||
/lib/systemd/system/basic.target.wants/* \
|
||||
# Clean up apt cache
|
||||
&& rm -rf /var/lib/apt/lists/* \
|
||||
# Clean up tmp dir
|
||||
&& cd / \
|
||||
&& rm -rf ${TMP_DIR}
|
||||
|
||||
# Load assets into place, setup startup target & units
|
||||
COPY ./assets/ /
|
||||
RUN set -x \
|
||||
&& ln -s /usr/lib/systemd/system/container-up.target /etc/systemd/system/default.target \
|
||||
&& mkdir -p /etc/systemd/system/container-up.target.wants \
|
||||
&& ln -s /usr/lib/systemd/system/kubeadm-aio.service /etc/systemd/system/container-up.target.wants/kubeadm-aio.service
|
||||
|
||||
VOLUME /sys/fs/cgroup
|
||||
|
||||
CMD /kubeadm-aio
|
110
tools/kubeadm-aio/README.rst
Normal file
110
tools/kubeadm-aio/README.rst
Normal file
@ -0,0 +1,110 @@
|
||||
Kubeadm AIO Container
|
||||
=====================
|
||||
|
||||
This container builds a small AIO Kubeadm based Kubernetes deployment
|
||||
for Development and Gating use.
|
||||
|
||||
Instructions
|
||||
------------
|
||||
|
||||
OS Specific Host setup:
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Ubuntu:
|
||||
^^^^^^^
|
||||
|
||||
From a freshly provisioned Ubuntu 16.04 LTS host run:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
sudo apt-get update -y
|
||||
sudo apt-get install -y \
|
||||
docker.io \
|
||||
nfs-common \
|
||||
git \
|
||||
make
|
||||
|
||||
OS Independent Host setup:
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
You should install the ``kubectl`` and ``helm`` binaries:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
KUBE_VERSION=v1.6.8
|
||||
HELM_VERSION=v2.5.1
|
||||
|
||||
TMP_DIR=$(mktemp -d)
|
||||
curl -sSL https://storage.googleapis.com/kubernetes-release/release/${KUBE_VERSION}/bin/linux/amd64/kubectl -o ${TMP_DIR}/kubectl
|
||||
chmod +x ${TMP_DIR}/kubectl
|
||||
sudo mv ${TMP_DIR}/kubectl /usr/local/bin/kubectl
|
||||
curl -sSL https://storage.googleapis.com/kubernetes-helm/helm-${HELM_VERSION}-linux-amd64.tar.gz | tar -zxv --strip-components=1 -C ${TMP_DIR}
|
||||
sudo mv ${TMP_DIR}/helm /usr/local/bin/helm
|
||||
rm -rf ${TMP_DIR}
|
||||
|
||||
And clone the OpenStack-Helm repo:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
git clone https://git.openstack.org/openstack/openstack-helm
|
||||
|
||||
Build the AIO environment (optional)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
A known good image is published to dockerhub on a fairly regular basis, but if
|
||||
you wish to build your own image, from the root directory of the OpenStack-Helm
|
||||
repo run:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
export KUBEADM_IMAGE=openstackhelm/kubeadm-aio:v1.6.8
|
||||
sudo docker build --pull -t ${KUBEADM_IMAGE} tools/kubeadm-aio
|
||||
|
||||
Deploy the AIO environment
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
To launch the environment run:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
export KUBEADM_IMAGE=openstackhelm/kubeadm-aio:v1.6.8
|
||||
export KUBE_VERSION=v1.6.8
|
||||
./tools/kubeadm-aio/kubeadm-aio-launcher.sh
|
||||
export KUBECONFIG=${HOME}/.kubeadm-aio/admin.conf
|
||||
|
||||
Once this has run without errors, you should hopefully have a Kubernetes single
|
||||
node environment running, with Helm, Calico, appropriate RBAC rules and node
|
||||
labels to get developing.
|
||||
|
||||
Prior to launching you can also optionally set the following environment
|
||||
variables to control aspects of the CNI used:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
export KUBE_CNI=calico # or "canal" "weave" "flannel"
|
||||
export CNI_POD_CIDR=192.168.0.0/16
|
||||
|
||||
If you wish to use this environment as the primary Kubernetes environment on
|
||||
your host you may run the following, but note that this will wipe any previous
|
||||
client configuration you may have.
|
||||
|
||||
.. code:: bash
|
||||
|
||||
mkdir -p ${HOME}/.kube
|
||||
cat ${HOME}/.kubeadm-aio/admin.conf > ${HOME}/.kube/config
|
||||
|
||||
If you wish to create dummy network devices for Neutron to manage there
|
||||
is a helper script that can set them up for you:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
sudo docker exec kubelet /usr/bin/openstack-helm-aio-network-prep
|
||||
|
||||
Logs
|
||||
~~~~
|
||||
|
||||
You can get the logs from your ``kubeadm-aio`` container by running:
|
||||
|
||||
.. code:: bash
|
||||
|
||||
sudo docker logs -f kubeadm-aio
|
2
tools/kubeadm-aio/assets/etc/kube-cni
Normal file
2
tools/kubeadm-aio/assets/etc/kube-cni
Normal file
@ -0,0 +1,2 @@
|
||||
KUBE_CNI=calico
|
||||
CNI_POD_CIDR=192.168.0.0/16
|
3
tools/kubeadm-aio/assets/etc/kube-role
Normal file
3
tools/kubeadm-aio/assets/etc/kube-role
Normal file
@ -0,0 +1,3 @@
|
||||
# If KUBE_ROLE is set 'master' kubeadm-aio will set this node up to be a master
|
||||
# node, otherwise if 'worker', will join an existing cluster.
|
||||
KUBE_ROLE=master
|
3
tools/kubeadm-aio/assets/etc/kube-version
Normal file
3
tools/kubeadm-aio/assets/etc/kube-version
Normal file
@ -0,0 +1,3 @@
|
||||
# If KUBE_VERSION is set 'default' kubeadm will use the default version of K8s
|
||||
# otherwise the version specified here will be used.
|
||||
KUBE_VERSION=default
|
1
tools/kubeadm-aio/assets/etc/kubeadm-join-command-args
Normal file
1
tools/kubeadm-aio/assets/etc/kubeadm-join-command-args
Normal file
@ -0,0 +1 @@
|
||||
KUBEADM_JOIN_ARGS="no_command_supplied"
|
4
tools/kubeadm-aio/assets/etc/kubeadm.conf
Normal file
4
tools/kubeadm-aio/assets/etc/kubeadm.conf
Normal file
@ -0,0 +1,4 @@
|
||||
apiVersion: kubeadm.k8s.io/v1alpha1
|
||||
kind: MasterConfiguration
|
||||
apiServerExtraArgs:
|
||||
runtime-config: "batch/v2alpha1=true"
|
3
tools/kubeadm-aio/assets/etc/kubeapi-device
Normal file
3
tools/kubeadm-aio/assets/etc/kubeapi-device
Normal file
@ -0,0 +1,3 @@
|
||||
# If KUBE_BIND_DEV is set to 'autodetect' we will use kubeadm's autodetect logic
|
||||
# otherwise use the device specified to find the IP address to bind to.
|
||||
KUBE_BIND_DEV=autodetect
|
3
tools/kubeadm-aio/assets/etc/kubelet-container
Normal file
3
tools/kubeadm-aio/assets/etc/kubelet-container
Normal file
@ -0,0 +1,3 @@
|
||||
# If KUBELET_CONTAINER is set 'to_this' one we will not attempt to launch a new
|
||||
# container for the kubelet process, otherwise use the image tag specified
|
||||
KUBELET_CONTAINER=this_one
|
54
tools/kubeadm-aio/assets/kubeadm-aio
Executable file
54
tools/kubeadm-aio/assets/kubeadm-aio
Executable file
@ -0,0 +1,54 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2017 The Openstack-Helm Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
set -xe
|
||||
|
||||
echo 'Checking cgroups'
|
||||
if ls -dZ /sys/fs/cgroup | grep -q :svirt_sandbox_file_t: ; then
|
||||
echo 'Invocation error: use -v /sys/fs/cgroup:/sys/fs/cgroup:ro parameter to docker run.'
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo 'Setting up K8s version to deploy'
|
||||
: ${KUBE_VERSION:="default"}
|
||||
sed -i "s|KUBE_VERSION=.*|KUBE_VERSION=${KUBE_VERSION}|g" /etc/kube-version
|
||||
|
||||
echo 'Setting up device to use for kube-api'
|
||||
: ${KUBE_BIND_DEV:="autodetect"}
|
||||
sed -i "s|KUBE_BIND_DEV=.*|KUBE_BIND_DEV=${KUBE_BIND_DEV}|g" /etc/kubeapi-device
|
||||
|
||||
echo 'Setting up container image to use for kubelet'
|
||||
: ${KUBELET_CONTAINER:="this_one"}
|
||||
sed -i "s|KUBELET_CONTAINER=.*|KUBELET_CONTAINER=${KUBELET_CONTAINER}|g" /etc/kubelet-container
|
||||
|
||||
echo 'Setting whether this node is a master, or slave, K8s node'
|
||||
: ${KUBE_ROLE:="master"}
|
||||
sed -i "s|KUBE_ROLE=.*|KUBE_ROLE=${KUBE_ROLE}|g" /etc/kube-role
|
||||
|
||||
echo 'Setting any kubeadm join commands'
|
||||
: ${KUBEADM_JOIN_ARGS:="no_command_supplied"}
|
||||
sed -i "s|KUBEADM_JOIN_ARGS=.*|KUBEADM_JOIN_ARGS=\"${KUBEADM_JOIN_ARGS}\"|g" /etc/kubeadm-join-command-args
|
||||
|
||||
echo 'Setting CNI pod CIDR'
|
||||
: ${CNI_POD_CIDR:="192.168.0.0/16"}
|
||||
sed -i "s|192.168.0.0/16|${CNI_POD_CIDR}|g" /opt/cni-manifests/*.yaml
|
||||
sed -i "s|CNI_POD_CIDR=.*|CNI_POD_CIDR=\"${CNI_POD_CIDR}\"|g" /etc/kube-cni
|
||||
|
||||
echo 'Setting CNI '
|
||||
: ${KUBE_CNI:="calico"}
|
||||
sed -i "s|KUBE_CNI=.*|KUBE_CNI=\"${KUBE_CNI}\"|g" /etc/kube-cni
|
||||
|
||||
echo 'Starting Systemd'
|
||||
exec /bin/systemd --system
|
365
tools/kubeadm-aio/assets/opt/cni-manifests/calico.yaml
Normal file
365
tools/kubeadm-aio/assets/opt/cni-manifests/calico.yaml
Normal file
@ -0,0 +1,365 @@
|
||||
# Calico Version v2.1.4
|
||||
# http://docs.projectcalico.org/v2.1/releases#v2.1.4
|
||||
# This manifest includes the following component versions:
|
||||
# calico/node:v1.1.3
|
||||
# calico/cni:v1.7.0
|
||||
# calico/kube-policy-controller:v0.5.4
|
||||
|
||||
# This ConfigMap is used to configure a self-hosted Calico installation.
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: calico-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
# The location of your etcd cluster. This uses the Service clusterIP
|
||||
# defined below.
|
||||
etcd_endpoints: "http://10.96.232.136:6666"
|
||||
|
||||
# Configure the Calico backend to use.
|
||||
calico_backend: "bird"
|
||||
|
||||
# The CNI network configuration to install on each node.
|
||||
cni_network_config: |-
|
||||
{
|
||||
"name": "k8s-pod-network",
|
||||
"type": "calico",
|
||||
"etcd_endpoints": "__ETCD_ENDPOINTS__",
|
||||
"log_level": "info",
|
||||
"ipam": {
|
||||
"type": "calico-ipam"
|
||||
},
|
||||
"policy": {
|
||||
"type": "k8s",
|
||||
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
|
||||
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
|
||||
},
|
||||
"kubernetes": {
|
||||
"kubeconfig": "/etc/cni/net.d/__KUBECONFIG_FILENAME__"
|
||||
}
|
||||
}
|
||||
|
||||
---
|
||||
|
||||
# This manifest installs the Calico etcd on the kubeadm master. This uses a DaemonSet
|
||||
# to force it to run on the master even when the master isn't schedulable, and uses
|
||||
# nodeSelector to ensure it only runs on the master.
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: calico-etcd
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-etcd
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-etcd
|
||||
annotations:
|
||||
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
|
||||
# reserves resources for critical add-on pods so that they can be rescheduled after
|
||||
# a failure. This annotation works in tandem with the toleration below.
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
# Only run this pod on the master.
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
|
||||
# This, along with the annotation above marks this pod as a critical add-on.
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
nodeSelector:
|
||||
node-role.kubernetes.io/master: ""
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: calico-etcd
|
||||
image: gcr.io/google_containers/etcd:2.2.1
|
||||
env:
|
||||
- name: CALICO_ETCD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
command: ["/bin/sh","-c"]
|
||||
args: ["/usr/local/bin/etcd --name=calico --data-dir=/var/etcd/calico-data --advertise-client-urls=http://$CALICO_ETCD_IP:6666 --listen-client-urls=http://0.0.0.0:6666 --listen-peer-urls=http://0.0.0.0:6667"]
|
||||
volumeMounts:
|
||||
- name: var-etcd
|
||||
mountPath: /var/etcd
|
||||
volumes:
|
||||
- name: var-etcd
|
||||
hostPath:
|
||||
path: /var/etcd
|
||||
|
||||
---
|
||||
|
||||
# This manfiest installs the Service which gets traffic to the Calico
|
||||
# etcd.
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-etcd
|
||||
name: calico-etcd
|
||||
namespace: kube-system
|
||||
spec:
|
||||
# Select the calico-etcd pod running on the master.
|
||||
selector:
|
||||
k8s-app: calico-etcd
|
||||
# This ClusterIP needs to be known in advance, since we cannot rely
|
||||
# on DNS to get access to etcd.
|
||||
clusterIP: 10.96.232.136
|
||||
ports:
|
||||
- port: 6666
|
||||
|
||||
---
|
||||
|
||||
# This manifest installs the calico/node container, as well
|
||||
# as the Calico CNI plugins and network config on
|
||||
# each master and worker node in a Kubernetes cluster.
|
||||
kind: DaemonSet
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: calico-node
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
annotations:
|
||||
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
|
||||
# reserves resources for critical add-on pods so that they can be rescheduled after
|
||||
# a failure. This annotation works in tandem with the toleration below.
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
|
||||
# This, along with the annotation above marks this pod as a critical add-on.
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
serviceAccountName: calico-cni-plugin
|
||||
containers:
|
||||
# Runs calico/node container on each Kubernetes node. This
|
||||
# container programs network policy and routes on each
|
||||
# host.
|
||||
- name: calico-node
|
||||
image: quay.io/calico/node:v1.1.3
|
||||
env:
|
||||
# The location of the Calico etcd cluster.
|
||||
- name: ETCD_ENDPOINTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: etcd_endpoints
|
||||
# Enable BGP. Disable to enforce policy only.
|
||||
- name: CALICO_NETWORKING_BACKEND
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: calico_backend
|
||||
# Disable file logging so `kubectl logs` works.
|
||||
- name: CALICO_DISABLE_FILE_LOGGING
|
||||
value: "true"
|
||||
# Set Felix endpoint to host default action to ACCEPT.
|
||||
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
|
||||
value: "ACCEPT"
|
||||
# Configure the IP Pool from which Pod IPs will be chosen.
|
||||
- name: CALICO_IPV4POOL_CIDR
|
||||
value: "192.168.0.0/16"
|
||||
- name: CALICO_IPV4POOL_IPIP
|
||||
value: "always"
|
||||
# Disable IPv6 on Kubernetes.
|
||||
- name: FELIX_IPV6SUPPORT
|
||||
value: "false"
|
||||
# Set Felix logging to "info"
|
||||
- name: FELIX_LOGSEVERITYSCREEN
|
||||
value: "info"
|
||||
# Auto-detect the BGP IP address.
|
||||
- name: IP
|
||||
value: ""
|
||||
securityContext:
|
||||
privileged: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 250m
|
||||
volumeMounts:
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /var/run/calico
|
||||
name: var-run-calico
|
||||
readOnly: false
|
||||
# This container installs the Calico CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
image: quay.io/calico/cni:v1.7.0
|
||||
command: ["/install-cni.sh"]
|
||||
env:
|
||||
# The location of the Calico etcd cluster.
|
||||
- name: ETCD_ENDPOINTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: etcd_endpoints
|
||||
# The CNI network config to install on each node.
|
||||
- name: CNI_NETWORK_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: cni_network_config
|
||||
volumeMounts:
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-bin-dir
|
||||
- mountPath: /host/etc/cni/net.d
|
||||
name: cni-net-dir
|
||||
volumes:
|
||||
# Used by calico/node.
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: var-run-calico
|
||||
hostPath:
|
||||
path: /var/run/calico
|
||||
# Used to install CNI.
|
||||
- name: cni-bin-dir
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
- name: cni-net-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
|
||||
---
|
||||
|
||||
# This manifest deploys the Calico policy controller on Kubernetes.
|
||||
# See https://github.com/projectcalico/k8s-policy
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: calico-policy-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-policy
|
||||
spec:
|
||||
# The policy controller can only have a single active instance.
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
name: calico-policy-controller
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-policy-controller
|
||||
annotations:
|
||||
# Mark this pod as a critical add-on; when enabled, the critical add-on scheduler
|
||||
# reserves resources for critical add-on pods so that they can be rescheduled after
|
||||
# a failure. This annotation works in tandem with the toleration below.
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
# The policy controller must run in the host network namespace so that
|
||||
# it isn't governed by policy that would prevent it from working.
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
# Allow this pod to be rescheduled while the node is in "critical add-ons only" mode.
|
||||
# This, along with the annotation above marks this pod as a critical add-on.
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
serviceAccountName: calico-policy-controller
|
||||
containers:
|
||||
- name: calico-policy-controller
|
||||
image: quay.io/calico/kube-policy-controller:v0.5.4
|
||||
env:
|
||||
# The location of the Calico etcd cluster.
|
||||
- name: ETCD_ENDPOINTS
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: etcd_endpoints
|
||||
# The location of the Kubernetes API. Use the default Kubernetes
|
||||
# service for API access.
|
||||
- name: K8S_API
|
||||
value: "https://kubernetes.default:443"
|
||||
# Since we're running in the host namespace and might not have KubeDNS
|
||||
# access, configure the container's /etc/hosts to resolve
|
||||
# kubernetes.default to the correct service clusterIP.
|
||||
- name: CONFIGURE_ETC_HOSTS
|
||||
value: "true"
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-cni-plugin
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-cni-plugin
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-cni-plugin
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: calico-cni-plugin
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-cni-plugin
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-policy-controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-policy-controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-policy-controller
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: calico-policy-controller
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
- extensions
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-policy-controller
|
||||
namespace: kube-system
|
329
tools/kubeadm-aio/assets/opt/cni-manifests/canal.yaml
Normal file
329
tools/kubeadm-aio/assets/opt/cni-manifests/canal.yaml
Normal file
@ -0,0 +1,329 @@
|
||||
# Calico Roles
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: canal
|
||||
namespace: kube-system
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- watch
|
||||
- apiGroups: ["extensions"]
|
||||
resources:
|
||||
- thirdpartyresources
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: ["extensions"]
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups: ["projectcalico.org"]
|
||||
resources:
|
||||
- globalconfigs
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- watch
|
||||
- apiGroups: ["projectcalico.org"]
|
||||
resources:
|
||||
- ippools
|
||||
verbs:
|
||||
- create
|
||||
- delete
|
||||
- get
|
||||
- list
|
||||
- update
|
||||
- watch
|
||||
---
|
||||
# Flannel roles
|
||||
# Pulled from https://github.com/coreos/flannel/blob/master/Documentation/kube-flannel-rbac.yml
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: flannel
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
metadata:
|
||||
name: flannel
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: flannel
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: canal
|
||||
namespace: kube-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: canal
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: canal
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: canal
|
||||
namespace: kube-system
|
||||
---
|
||||
# This ConfigMap can be used to configure a self-hosted Canal installation.
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: canal-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
# The interface used by canal for host <-> host communication.
|
||||
# If left blank, then the interface is chosen using the node's
|
||||
# default route.
|
||||
canal_iface: ""
|
||||
|
||||
# Whether or not to masquerade traffic to destinations not within
|
||||
# the pod network.
|
||||
masquerade: "true"
|
||||
|
||||
# The CNI network configuration to install on each node.
|
||||
cni_network_config: |-
|
||||
{
|
||||
"name": "k8s-pod-network",
|
||||
"type": "calico",
|
||||
"log_level": "info",
|
||||
"datastore_type": "kubernetes",
|
||||
"hostname": "__KUBERNETES_NODE_NAME__",
|
||||
"ipam": {
|
||||
"type": "host-local",
|
||||
"subnet": "usePodCidr"
|
||||
},
|
||||
"policy": {
|
||||
"type": "k8s",
|
||||
"k8s_auth_token": "__SERVICEACCOUNT_TOKEN__"
|
||||
},
|
||||
"kubernetes": {
|
||||
"k8s_api_root": "https://__KUBERNETES_SERVICE_HOST__:__KUBERNETES_SERVICE_PORT__",
|
||||
"kubeconfig": "__KUBECONFIG_FILEPATH__"
|
||||
}
|
||||
}
|
||||
|
||||
# Flannel network configuration. Mounted into the flannel container.
|
||||
net-conf.json: |
|
||||
{
|
||||
"Network": "192.168.0.0/16",
|
||||
"Backend": {
|
||||
"Type": "vxlan"
|
||||
}
|
||||
}
|
||||
---
|
||||
# This manifest installs the calico/node container, as well
|
||||
# as the Calico CNI plugins and network config on
|
||||
# each master and worker node in a Kubernetes cluster.
|
||||
kind: DaemonSet
|
||||
apiVersion: extensions/v1beta1
|
||||
metadata:
|
||||
name: canal
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: canal
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: canal
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: canal
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
hostNetwork: true
|
||||
serviceAccountName: canal
|
||||
tolerations:
|
||||
# Allow the pod to run on the master. This is required for
|
||||
# the master to communicate with pods.
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
# Mark the pod as a critical add-on for rescheduling.
|
||||
- key: "CriticalAddonsOnly"
|
||||
operator: "Exists"
|
||||
containers:
|
||||
# Runs calico/node container on each Kubernetes node. This
|
||||
# container programs network policy and routes on each
|
||||
# host.
|
||||
- name: calico-node
|
||||
image: quay.io/calico/node:v1.2.1
|
||||
env:
|
||||
# Use Kubernetes API as the backing datastore.
|
||||
- name: DATASTORE_TYPE
|
||||
value: "kubernetes"
|
||||
# Enable felix logging.
|
||||
- name: FELIX_LOGSEVERITYSYS
|
||||
value: "info"
|
||||
# Period, in seconds, at which felix re-applies all iptables state
|
||||
- name: FELIX_IPTABLESREFRESHINTERVAL
|
||||
value: "60"
|
||||
# Disable IPV6 support in Felix.
|
||||
- name: FELIX_IPV6SUPPORT
|
||||
value: "false"
|
||||
# Don't enable BGP.
|
||||
- name: CALICO_NETWORKING_BACKEND
|
||||
value: "none"
|
||||
# Disable file logging so `kubectl logs` works.
|
||||
- name: CALICO_DISABLE_FILE_LOGGING
|
||||
value: "true"
|
||||
- name: WAIT_FOR_DATASTORE
|
||||
value: "true"
|
||||
# No IP address needed.
|
||||
- name: IP
|
||||
value: ""
|
||||
- name: HOSTNAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# Set Felix endpoint to host default action to ACCEPT.
|
||||
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
|
||||
value: "ACCEPT"
|
||||
securityContext:
|
||||
privileged: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 250m
|
||||
volumeMounts:
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /var/run/calico
|
||||
name: var-run-calico
|
||||
readOnly: false
|
||||
# This container installs the Calico CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
image: quay.io/calico/cni:v1.8.3
|
||||
command: ["/install-cni.sh"]
|
||||
env:
|
||||
# The CNI network config to install on each node.
|
||||
- name: CNI_NETWORK_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: canal-config
|
||||
key: cni_network_config
|
||||
- name: KUBERNETES_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
volumeMounts:
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-bin-dir
|
||||
- mountPath: /host/etc/cni/net.d
|
||||
name: cni-net-dir
|
||||
# This container runs flannel using the kube-subnet-mgr backend
|
||||
# for allocating subnets.
|
||||
- name: kube-flannel
|
||||
image: quay.io/coreos/flannel:v0.8.0
|
||||
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
|
||||
securityContext:
|
||||
privileged: true
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
- name: FLANNELD_IFACE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: canal-config
|
||||
key: canal_iface
|
||||
- name: FLANNELD_IP_MASQ
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: canal-config
|
||||
key: masquerade
|
||||
volumeMounts:
|
||||
- name: run
|
||||
mountPath: /run
|
||||
- name: flannel-cfg
|
||||
mountPath: /etc/kube-flannel/
|
||||
volumes:
|
||||
# Used by calico/node.
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: var-run-calico
|
||||
hostPath:
|
||||
path: /var/run/calico
|
||||
# Used to install CNI.
|
||||
- name: cni-bin-dir
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
- name: cni-net-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
# Used by flannel.
|
||||
- name: run
|
||||
hostPath:
|
||||
path: /run
|
||||
- name: flannel-cfg
|
||||
configMap:
|
||||
name: canal-config
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: canal
|
||||
namespace: kube-system
|
94
tools/kubeadm-aio/assets/opt/cni-manifests/flannel.yaml
Normal file
94
tools/kubeadm-aio/assets/opt/cni-manifests/flannel.yaml
Normal file
@ -0,0 +1,94 @@
|
||||
#https://raw.githubusercontent.com/coreos/flannel/v0.8.0/Documentation/kube-flannel.yml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: flannel
|
||||
namespace: kube-system
|
||||
---
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: kube-flannel-cfg
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
app: flannel
|
||||
data:
|
||||
cni-conf.json: |
|
||||
{
|
||||
"name": "cbr0",
|
||||
"type": "flannel",
|
||||
"delegate": {
|
||||
"isDefaultGateway": true
|
||||
}
|
||||
}
|
||||
net-conf.json: |
|
||||
{
|
||||
"Network": "192.168.0.0/16",
|
||||
"Backend": {
|
||||
"Type": "vxlan"
|
||||
}
|
||||
}
|
||||
---
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: kube-flannel-ds
|
||||
namespace: kube-system
|
||||
labels:
|
||||
tier: node
|
||||
app: flannel
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
tier: node
|
||||
app: flannel
|
||||
spec:
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/arch: amd64
|
||||
tolerations:
|
||||
- key: node-role.kubernetes.io/master
|
||||
operator: Exists
|
||||
effect: NoSchedule
|
||||
serviceAccountName: flannel
|
||||
containers:
|
||||
- name: kube-flannel
|
||||
image: quay.io/coreos/flannel:v0.8.0-amd64
|
||||
command: [ "/opt/bin/flanneld", "--ip-masq", "--kube-subnet-mgr" ]
|
||||
securityContext:
|
||||
privileged: true
|
||||
env:
|
||||
- name: POD_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.name
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
volumeMounts:
|
||||
- name: run
|
||||
mountPath: /run
|
||||
- name: flannel-cfg
|
||||
mountPath: /etc/kube-flannel/
|
||||
- name: install-cni
|
||||
image: quay.io/coreos/flannel:v0.8.0-amd64
|
||||
command: [ "/bin/sh", "-c", "set -e -x; cp -f /etc/kube-flannel/cni-conf.json /etc/cni/net.d/10-flannel.conf; while true; do sleep 3600; done" ]
|
||||
volumeMounts:
|
||||
- name: cni
|
||||
mountPath: /etc/cni/net.d
|
||||
- name: flannel-cfg
|
||||
mountPath: /etc/kube-flannel/
|
||||
volumes:
|
||||
- name: run
|
||||
hostPath:
|
||||
path: /run
|
||||
- name: cni
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
- name: flannel-cfg
|
||||
configMap:
|
||||
name: kube-flannel-cfg
|
187
tools/kubeadm-aio/assets/opt/cni-manifests/weave.yaml
Normal file
187
tools/kubeadm-aio/assets/opt/cni-manifests/weave.yaml
Normal file
@ -0,0 +1,187 @@
|
||||
# curl --location "https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16
|
||||
apiVersion: v1
|
||||
kind: List
|
||||
items:
|
||||
- apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: weave-net
|
||||
annotations:
|
||||
cloud.weave.works/launcher-info: |-
|
||||
{
|
||||
"server-version": "master-c3b4969",
|
||||
"original-request": {
|
||||
"url": "/k8s/v1.6/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiI2IiwgR2l0VmVyc2lvbjoidjEuNi43IiwgR2l0Q29tbWl0OiIwOTUxMzZjMzA3OGNjZjg4N2I5MDM0YjdjZTU5OGEwYTFmYWZmNzY5IiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAxNy0wNy0wNVQxNjo1MTo1NloiLCBHb1ZlcnNpb246ImdvMS43LjYiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjYiLCBHaXRWZXJzaW9uOiJ2MS42LjciLCBHaXRDb21taXQ6IjA5NTEzNmMzMDc4Y2NmODg3YjkwMzRiN2NlNTk4YTBhMWZhZmY3NjkiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE3LTA3LTA1VDE2OjQwOjQyWiIsIEdvVmVyc2lvbjoiZ28xLjcuNiIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16",
|
||||
"date": "Sun Jul 30 2017 02:48:47 GMT+0000 (UTC)"
|
||||
},
|
||||
"email-address": "support@weave.works"
|
||||
}
|
||||
labels:
|
||||
name: weave-net
|
||||
namespace: kube-system
|
||||
- apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: weave-net
|
||||
annotations:
|
||||
cloud.weave.works/launcher-info: |-
|
||||
{
|
||||
"server-version": "master-c3b4969",
|
||||
"original-request": {
|
||||
"url": "/k8s/v1.6/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiI2IiwgR2l0VmVyc2lvbjoidjEuNi43IiwgR2l0Q29tbWl0OiIwOTUxMzZjMzA3OGNjZjg4N2I5MDM0YjdjZTU5OGEwYTFmYWZmNzY5IiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAxNy0wNy0wNVQxNjo1MTo1NloiLCBHb1ZlcnNpb246ImdvMS43LjYiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjYiLCBHaXRWZXJzaW9uOiJ2MS42LjciLCBHaXRDb21taXQ6IjA5NTEzNmMzMDc4Y2NmODg3YjkwMzRiN2NlNTk4YTBhMWZhZmY3NjkiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE3LTA3LTA1VDE2OjQwOjQyWiIsIEdvVmVyc2lvbjoiZ28xLjcuNiIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16",
|
||||
"date": "Sun Jul 30 2017 02:48:47 GMT+0000 (UTC)"
|
||||
},
|
||||
"email-address": "support@weave.works"
|
||||
}
|
||||
labels:
|
||||
name: weave-net
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiVersion: rbac.authorization.k8s.io/v1beta1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: weave-net
|
||||
annotations:
|
||||
cloud.weave.works/launcher-info: |-
|
||||
{
|
||||
"server-version": "master-c3b4969",
|
||||
"original-request": {
|
||||
"url": "/k8s/v1.6/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiI2IiwgR2l0VmVyc2lvbjoidjEuNi43IiwgR2l0Q29tbWl0OiIwOTUxMzZjMzA3OGNjZjg4N2I5MDM0YjdjZTU5OGEwYTFmYWZmNzY5IiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAxNy0wNy0wNVQxNjo1MTo1NloiLCBHb1ZlcnNpb246ImdvMS43LjYiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjYiLCBHaXRWZXJzaW9uOiJ2MS42LjciLCBHaXRDb21taXQ6IjA5NTEzNmMzMDc4Y2NmODg3YjkwMzRiN2NlNTk4YTBhMWZhZmY3NjkiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE3LTA3LTA1VDE2OjQwOjQyWiIsIEdvVmVyc2lvbjoiZ28xLjcuNiIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16",
|
||||
"date": "Sun Jul 30 2017 02:48:47 GMT+0000 (UTC)"
|
||||
},
|
||||
"email-address": "support@weave.works"
|
||||
}
|
||||
labels:
|
||||
name: weave-net
|
||||
roleRef:
|
||||
kind: ClusterRole
|
||||
name: weave-net
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: weave-net
|
||||
namespace: kube-system
|
||||
- apiVersion: extensions/v1beta1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: weave-net
|
||||
annotations:
|
||||
cloud.weave.works/launcher-info: |-
|
||||
{
|
||||
"server-version": "master-c3b4969",
|
||||
"original-request": {
|
||||
"url": "/k8s/v1.6/net.yaml?k8s-version=Q2xpZW50IFZlcnNpb246IHZlcnNpb24uSW5mb3tNYWpvcjoiMSIsIE1pbm9yOiI2IiwgR2l0VmVyc2lvbjoidjEuNi43IiwgR2l0Q29tbWl0OiIwOTUxMzZjMzA3OGNjZjg4N2I5MDM0YjdjZTU5OGEwYTFmYWZmNzY5IiwgR2l0VHJlZVN0YXRlOiJjbGVhbiIsIEJ1aWxkRGF0ZToiMjAxNy0wNy0wNVQxNjo1MTo1NloiLCBHb1ZlcnNpb246ImdvMS43LjYiLCBDb21waWxlcjoiZ2MiLCBQbGF0Zm9ybToibGludXgvYW1kNjQifQpTZXJ2ZXIgVmVyc2lvbjogdmVyc2lvbi5JbmZve01ham9yOiIxIiwgTWlub3I6IjYiLCBHaXRWZXJzaW9uOiJ2MS42LjciLCBHaXRDb21taXQ6IjA5NTEzNmMzMDc4Y2NmODg3YjkwMzRiN2NlNTk4YTBhMWZhZmY3NjkiLCBHaXRUcmVlU3RhdGU6ImNsZWFuIiwgQnVpbGREYXRlOiIyMDE3LTA3LTA1VDE2OjQwOjQyWiIsIEdvVmVyc2lvbjoiZ28xLjcuNiIsIENvbXBpbGVyOiJnYyIsIFBsYXRmb3JtOiJsaW51eC9hbWQ2NCJ9Cg==&env.WEAVE_MTU=1337&env.IPALLOC_RANGE=192.168.0.0/16",
|
||||
"date": "Sun Jul 30 2017 02:48:47 GMT+0000 (UTC)"
|
||||
},
|
||||
"email-address": "support@weave.works"
|
||||
}
|
||||
labels:
|
||||
name: weave-net
|
||||
namespace: kube-system
|
||||
spec:
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
name: weave-net
|
||||
spec:
|
||||
containers:
|
||||
- name: weave
|
||||
command:
|
||||
- /home/weave/launch.sh
|
||||
env:
|
||||
- name: WEAVE_MTU
|
||||
value: '1337'
|
||||
- name: IPALLOC_RANGE
|
||||
value: 192.168.0.0/16
|
||||
- name: HOSTNAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
image: 'weaveworks/weave-kube:2.0.1'
|
||||
imagePullPolicy: Always
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /status
|
||||
port: 6784
|
||||
initialDelaySeconds: 30
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- name: weavedb
|
||||
mountPath: /weavedb
|
||||
- name: cni-bin
|
||||
mountPath: /host/opt
|
||||
- name: cni-bin2
|
||||
mountPath: /host/home
|
||||
- name: cni-conf
|
||||
mountPath: /host/etc
|
||||
- name: dbus
|
||||
mountPath: /host/var/lib/dbus
|
||||
- name: lib-modules
|
||||
mountPath: /lib/modules
|
||||
- name: weave-npc
|
||||
env:
|
||||
- name: HOSTNAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
image: 'weaveworks/weave-npc:2.0.1'
|
||||
imagePullPolicy: Always
|
||||
resources:
|
||||
requests:
|
||||
cpu: 10m
|
||||
securityContext:
|
||||
privileged: true
|
||||
hostNetwork: true
|
||||
hostPID: true
|
||||
restartPolicy: Always
|
||||
securityContext:
|
||||
seLinuxOptions: {}
|
||||
serviceAccountName: weave-net
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
volumes:
|
||||
- name: weavedb
|
||||
hostPath:
|
||||
path: /var/lib/weave
|
||||
- name: cni-bin
|
||||
hostPath:
|
||||
path: /opt
|
||||
- name: cni-bin2
|
||||
hostPath:
|
||||
path: /home
|
||||
- name: cni-conf
|
||||
hostPath:
|
||||
path: /etc
|
||||
- name: dbus
|
||||
hostPath:
|
||||
path: /var/lib/dbus
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
73
tools/kubeadm-aio/assets/opt/nfs-provisioner/deployment.yaml
Normal file
73
tools/kubeadm-aio/assets/opt/nfs-provisioner/deployment.yaml
Normal file
@ -0,0 +1,73 @@
|
||||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: nfs-provisioner
|
||||
labels:
|
||||
app: nfs-provisioner
|
||||
spec:
|
||||
ports:
|
||||
- name: nfs
|
||||
port: 2049
|
||||
- name: mountd
|
||||
port: 20048
|
||||
- name: rpcbind
|
||||
port: 111
|
||||
- name: rpcbind-udp
|
||||
port: 111
|
||||
protocol: UDP
|
||||
selector:
|
||||
app: nfs-provisioner
|
||||
---
|
||||
kind: Deployment
|
||||
apiVersion: apps/v1beta1
|
||||
metadata:
|
||||
name: nfs-provisioner
|
||||
spec:
|
||||
replicas: 1
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nfs-provisioner
|
||||
spec:
|
||||
containers:
|
||||
- name: nfs-provisioner
|
||||
image: quay.io/kubernetes_incubator/nfs-provisioner:v1.0.7
|
||||
ports:
|
||||
- name: nfs
|
||||
containerPort: 2049
|
||||
- name: mountd
|
||||
containerPort: 20048
|
||||
- name: rpcbind
|
||||
containerPort: 111
|
||||
- name: rpcbind-udp
|
||||
containerPort: 111
|
||||
protocol: UDP
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- DAC_READ_SEARCH
|
||||
- SYS_RESOURCE
|
||||
args:
|
||||
- "-provisioner=example.com/nfs"
|
||||
- "-grace-period=10"
|
||||
env:
|
||||
- name: POD_IP
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.podIP
|
||||
- name: SERVICE_NAME
|
||||
value: nfs-provisioner
|
||||
- name: POD_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: metadata.namespace
|
||||
imagePullPolicy: "IfNotPresent"
|
||||
volumeMounts:
|
||||
- name: export-volume
|
||||
mountPath: /export
|
||||
volumes:
|
||||
- name: export-volume
|
||||
hostPath:
|
||||
path: /var/lib/nfs-provisioner
|
@ -0,0 +1,5 @@
|
||||
kind: StorageClass
|
||||
apiVersion: storage.k8s.io/v1
|
||||
metadata:
|
||||
name: general
|
||||
provisioner: example.com/nfs
|
15
tools/kubeadm-aio/assets/opt/rbac/dev.yaml
Normal file
15
tools/kubeadm-aio/assets/opt/rbac/dev.yaml
Normal file
@ -0,0 +1,15 @@
|
||||
apiVersion: rbac.authorization.k8s.io/v1alpha1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cluster-admin
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cluster-admin
|
||||
subjects:
|
||||
- kind: Group
|
||||
name: system:masters
|
||||
- kind: Group
|
||||
name: system:authenticated
|
||||
- kind: Group
|
||||
name: system:unauthenticated
|
94
tools/kubeadm-aio/kubeadm-aio-launcher.sh
Executable file
94
tools/kubeadm-aio/kubeadm-aio-launcher.sh
Executable file
@ -0,0 +1,94 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Copyright 2017 The Openstack-Helm Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
set -xe
|
||||
|
||||
# Setup shared mounts for kubelet
|
||||
sudo mkdir -p /var/lib/kubelet
|
||||
sudo mount --bind /var/lib/kubelet /var/lib/kubelet
|
||||
sudo mount --make-shared /var/lib/kubelet
|
||||
|
||||
# Cleanup any old deployment
|
||||
sudo docker rm -f kubeadm-aio || true
|
||||
sudo docker rm -f kubelet || true
|
||||
sudo docker ps -aq | xargs -r -l1 -P16 sudo docker rm -f
|
||||
sudo rm -rfv \
|
||||
/etc/cni/net.d \
|
||||
/etc/kubernetes \
|
||||
/var/lib/etcd \
|
||||
/var/etcd \
|
||||
/var/lib/kubelet/* \
|
||||
${HOME}/.kubeadm-aio/admin.conf || true
|
||||
|
||||
: ${KUBE_CNI:="calico"}
|
||||
: ${CNI_POD_CIDR:="192.168.0.0/16"}
|
||||
|
||||
# Launch Container, refer to:
|
||||
# https://docs.docker.com/engine/reference/run/
|
||||
sudo docker run \
|
||||
-dt \
|
||||
--name=kubeadm-aio \
|
||||
--net=host \
|
||||
--security-opt=seccomp:unconfined \
|
||||
--cap-add=SYS_ADMIN \
|
||||
--tmpfs=/run \
|
||||
--tmpfs=/run/lock \
|
||||
--volume=/etc/machine-id:/etc/machine-id:ro \
|
||||
--volume=${HOME}:${HOME}:rw \
|
||||
--volume=${HOME}/.kubeadm-aio:/root:rw \
|
||||
--volume=/etc/kubernetes:/etc/kubernetes:rw \
|
||||
--volume=/sys/fs/cgroup:/sys/fs/cgroup:ro \
|
||||
--volume=/var/run/docker.sock:/run/docker.sock \
|
||||
--env KUBELET_CONTAINER=${KUBEADM_IMAGE} \
|
||||
--env KUBE_VERSION=${KUBE_VERSION} \
|
||||
--env KUBE_CNI=${KUBE_CNI} \
|
||||
--env CNI_POD_CIDR=${CNI_POD_CIDR} \
|
||||
${KUBEADM_IMAGE}
|
||||
|
||||
echo "Waiting for kubeconfig"
|
||||
set +x
|
||||
end=$(($(date +%s) + 600))
|
||||
READY="False"
|
||||
while true; do
|
||||
if [ -f ${HOME}/.kubeadm-aio/admin.conf ]; then
|
||||
READY="True"
|
||||
fi
|
||||
[ $READY == "True" ] && break || true
|
||||
sleep 1
|
||||
now=$(date +%s)
|
||||
[ $now -gt $end ] && \
|
||||
echo "KubeADM did not generate kubectl config in time" && \
|
||||
sudo docker logs kubeadm-aio && exit -1
|
||||
done
|
||||
set -x
|
||||
|
||||
# Set perms of kubeconfig and set env-var
|
||||
sudo chown $(id -u):$(id -g) ${HOME}/.kubeadm-aio/admin.conf
|
||||
export KUBECONFIG=${HOME}/.kubeadm-aio/admin.conf
|
||||
|
||||
echo "Waiting for node to be ready before continuing"
|
||||
set +x
|
||||
end=$(($(date +%s) + 600))
|
||||
READY="False"
|
||||
while true; do
|
||||
READY=$(kubectl get nodes --no-headers=true | awk "{ print \$2 }" | head -1)
|
||||
[ $READY == "Ready" ] && break || true
|
||||
sleep 1
|
||||
now=$(date +%s)
|
||||
[ $now -gt $end ] && \
|
||||
echo "Kube node did not register as ready in time" && \
|
||||
sudo docker logs kubeadm-aio && exit -1
|
||||
done
|
||||
set -x
|
Loading…
Reference in New Issue
Block a user