Cleanup solar dependency
Purge solar/resources as it is not used anymore in the installler. Purge scripts/yamls used with the solar deployments as well. Fix gitreview, gitignore, version, README. Add doc8/doc toxes for future docs. Change-Id: Iad418369229fd522588f6195605051daffb3dce2 Signed-off-by: Bogdan Dobrelya <bdobrelia@mirantis.com>
This commit is contained in:
parent
b9a685d625
commit
e8ac0d107e
|
@ -3,6 +3,10 @@
|
|||
# C extensions
|
||||
*.so
|
||||
|
||||
# Artifacts
|
||||
utils/packer/*/packer_cache/*
|
||||
utils/packer/*/output*
|
||||
|
||||
# Packages
|
||||
*.egg*
|
||||
*.egg-info
|
||||
|
@ -44,6 +48,7 @@ output/*/index.html
|
|||
|
||||
# Sphinx
|
||||
doc/build
|
||||
publish-doc
|
||||
|
||||
# pbr generates these
|
||||
AUTHORS
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
[gerrit]
|
||||
host=review.fuel-infra.org
|
||||
host=review.openstack.org
|
||||
port=29418
|
||||
project=nextgen/mcpinstaller.git
|
||||
project=openstack/fuel-ccp-installer.git
|
||||
|
|
48
README.rst
48
README.rst
|
@ -1,46 +1,4 @@
|
|||
This repository contains resources for configuring kubernetes with calico networking plugin using Solar.
|
||||
Express Fuel CCP setup using Kargo
|
||||
----------------------------------
|
||||
|
||||
Express Vagrant setup:
|
||||
---------------------
|
||||
|
||||
1. `git clone -b stable https://github.com/pigmej/mcpinstall.git && cd mcpinstall`
|
||||
2. `./deploy/kube-up.sh`
|
||||
3. `vagrant ssh solar`
|
||||
4. `kubectl get pods`
|
||||
|
||||
You can adjust any setting value from `utils/vagrant/vagrant-settings.yaml_defaults` by setting it in `utils/vagrant/vagrant-settings.yaml`. For example on Linux you should consider setting `sync_type` to `nfs` by adding `sync_type: nfs` to your vagrant-settings.yaml.
|
||||
|
||||
Fedora slave nodes:
|
||||
-------------------
|
||||
|
||||
If you don't want to use Ubuntu for slaves, you can use Fedora. After step 2 in above steps, please do as follow:
|
||||
|
||||
1. Download box file for the Vagrant provider you are using from [here](https://dl.fedoraproject.org/pub/fedora/linux/releases/23/Cloud/x86_64/Images/)
|
||||
2. Import it to Vagrant `vagrant box add fc23 <downloaded-box-name> --provider <provider> --force`
|
||||
3. Change slaves_image value to `fc23` in vagrant-settings.yaml file.
|
||||
4. Proceed from step 3 from "Express Vagrant setup" section.
|
||||
|
||||
|
||||
Configuration:
|
||||
--------------
|
||||
|
||||
In config.yaml you can set:
|
||||
- login data for kubernetes master
|
||||
- IP address for master
|
||||
- default login data for kubernetes slave nodes
|
||||
- node-specific config (IP address is required, but you can override default access data)
|
||||
- global_config resource inputs including:
|
||||
- cluster dns ip and domain
|
||||
- cluster ip range
|
||||
- default k8s version
|
||||
- default calico version
|
||||
|
||||
LCM example: Kubernetes version change:
|
||||
--------------------------------------
|
||||
|
||||
1. log in to solar master node (`vagrant ssh solar`)
|
||||
2. solar resource update kube-config k8s_version=v1.2.1
|
||||
3. solar changes stage
|
||||
4. solar changes process
|
||||
5. solar orch run-once -w 600
|
||||
6. After a while, kubernetes will restart in desired version
|
||||
TBD
|
||||
|
|
|
@ -1,47 +0,0 @@
|
|||
#! /bin/bash
|
||||
|
||||
# set -xe
|
||||
|
||||
export SOLAR_CONFIG_OVERRIDE="/.solar_config_override"
|
||||
|
||||
# install kubectl if not exists
|
||||
if ! type "kubectl" > /dev/null; then
|
||||
wget "https://storage.googleapis.com/kubernetes-release/release/v1.2.2/bin/linux/amd64/kubectl"
|
||||
chmod +x kubectl
|
||||
sudo mv kubectl /usr/local/bin/kubectl
|
||||
fi
|
||||
|
||||
pushd ~
|
||||
if [ ! -d ".kube" ]; then
|
||||
mkdir .kube
|
||||
cp /vagrant/kube-config .kube/config
|
||||
fi
|
||||
|
||||
# solar-resources stuff
|
||||
git clone https://github.com/openstack/solar-resources
|
||||
solar repo import -l solar-resources/resources -n resources
|
||||
solar repo import -l solar-resources/templates -n templates
|
||||
|
||||
pushd /vagrant
|
||||
sudo pip install -r requirements.txt
|
||||
solar repo import -l resources --name k8s
|
||||
|
||||
# copy config if not found
|
||||
if [ ! -f "config.yaml" ]; then
|
||||
cp config.yaml.sample config.yaml
|
||||
fi
|
||||
|
||||
./mcpinstall.py deploy
|
||||
./mcpinstall.py dns
|
||||
solar changes stage
|
||||
solar changes process
|
||||
|
||||
SUCCESS_MESSAGE='mcpinstall succeeded'
|
||||
ERROR_MESSAGE='mcpinstall encountered an error. Please refer to above message to see detailed error message. Deployment continues.'
|
||||
|
||||
if solar orch run-once -w 1200 -s ; then
|
||||
echo $SUCCESS_MESSAGE
|
||||
else
|
||||
echo $ERROR_MESSAGE
|
||||
solar orch report -w 120
|
||||
fi
|
|
@ -1,11 +0,0 @@
|
|||
#! /bin/bash
|
||||
pushd /vagrant/deploy
|
||||
rm -rf solar
|
||||
rm -rf solar-resources
|
||||
|
||||
git clone https://github.com/openstack/solar.git
|
||||
pushd solar/bootstrap/playbooks
|
||||
find . -type f -print0 |
|
||||
xargs -0 perl -pi -e 's;(?<!(home|/tmp))/vagrant;/vagrant/deploy/solar;g'
|
||||
perl -pi -e 's;pip install -e .;'\
|
||||
'pip install -e /vagrant/deploy/solar;g' solar.yaml
|
|
@ -1,5 +0,0 @@
|
|||
#! /bin/bash
|
||||
|
||||
pushd utils/vagrant
|
||||
vagrant up
|
||||
vagrant ssh solar -c /vagrant/deploy/deploy.sh
|
|
@ -0,0 +1,16 @@
|
|||
# Copyright 2015-2016 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License attached#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See then
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import pbr.version
|
||||
|
||||
version_info = pbr.version.VersionInfo('fuel-ccp-installer')
|
324
mcpinstall.py
324
mcpinstall.py
|
@ -1,324 +0,0 @@
|
|||
#!/usr/bin/env python
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import re
|
||||
|
||||
from netaddr import IPAddress
|
||||
import yaml
|
||||
|
||||
from solar.core.resource import composer as cr
|
||||
from solar.core.resource import resource as rs
|
||||
from solar.events.api import add_event
|
||||
from solar.events.controls import Dep
|
||||
|
||||
DEFAULT_MASTER_NODE_RESOURCE_NAME = 'kube-node-master'
|
||||
MASTER_NODE_RESOURCE_NAME = None
|
||||
CONFIG_NAME = 'config.yaml'
|
||||
DEFAULT_CONFIG_NAME = 'config.yaml.sample'
|
||||
|
||||
|
||||
def create_config(global_config):
|
||||
return cr.create('kube-config', 'k8s/global_config', global_config)[0]
|
||||
|
||||
|
||||
def get_slave_nodes():
|
||||
kube_nodes = rs.load_all(startswith='kube-node-')
|
||||
p = re.compile('^kube-node-\d+$')
|
||||
return [node for node in kube_nodes if p.match(node.name)]
|
||||
|
||||
|
||||
def setup_master(config, user_config, existing_node):
|
||||
if existing_node:
|
||||
master = existing_node
|
||||
else:
|
||||
master = cr.create(
|
||||
MASTER_NODE_RESOURCE_NAME, 'k8s/node',
|
||||
{'name': 'kube-node-master',
|
||||
'ip': user_config['ip'],
|
||||
'ssh_user': user_config['username'],
|
||||
'ssh_password': user_config['password'],
|
||||
'ssh_key': user_config['ssh_key']})['kube-node-master']
|
||||
|
||||
master.connect(config, {})
|
||||
docker = cr.create('kube-docker-master',
|
||||
'k8s/docker')['kube-docker-master']
|
||||
master.connect(docker, {})
|
||||
|
||||
kubelet = cr.create('kubelet-master',
|
||||
'k8s/kubelet_master')['kubelet-master']
|
||||
config.connect(kubelet, {'k8s_version': 'k8s_version'})
|
||||
|
||||
calico = cr.create('calico-master', 'k8s/calico_master',
|
||||
{'options': "--nat-outgoing --ipip"})['calico-master']
|
||||
master.connect(calico, {'ip': ['ip', 'etcd_host']})
|
||||
config.connect(calico, {'network': 'network',
|
||||
'prefix': 'prefix',
|
||||
'calico_version': 'version'})
|
||||
calico.connect(calico, {'etcd_host': 'etcd_authority',
|
||||
'etcd_port': 'etcd_authority',
|
||||
'etcd_authority': 'etcd_authority_internal'})
|
||||
config.connect(kubelet,
|
||||
{'service_cluster_ip_range': "service_cluster_ip_range"})
|
||||
master.connect(kubelet, {'name': 'master_host'})
|
||||
kubelet.connect(kubelet, {'master_host': 'master_address',
|
||||
'master_port': 'master_address'})
|
||||
|
||||
add_event(Dep('hosts_file_node_{}'.format(master.name), 'run', 'success',
|
||||
kubelet.name, 'run'))
|
||||
|
||||
add_event(Dep(docker.name, 'run', 'success', kubelet.name, 'run'))
|
||||
add_event(Dep(kubelet.name, 'run', 'success', calico.name, 'run'))
|
||||
|
||||
|
||||
def setup_nodes(config, user_config, num=1, existing_nodes=None):
|
||||
kube_nodes = []
|
||||
kubernetes_master = rs.load('kubelet-master')
|
||||
calico_master = rs.load('calico-master')
|
||||
internal_network = IPAddress(config.args['network'])
|
||||
|
||||
if existing_nodes:
|
||||
kube_nodes = [
|
||||
setup_slave_node(config, kubernetes_master, calico_master,
|
||||
internal_network, i, None, node)
|
||||
for (i, node) in enumerate(existing_nodes)
|
||||
]
|
||||
else:
|
||||
kube_nodes = [
|
||||
setup_slave_node(config, kubernetes_master, calico_master,
|
||||
internal_network, i, user_config[i])
|
||||
for i in xrange(num)
|
||||
]
|
||||
|
||||
kube_master = rs.load(MASTER_NODE_RESOURCE_NAME)
|
||||
all_nodes = kube_nodes[:] + [kube_master]
|
||||
hosts_files = rs.load_all(startswith='hosts_file_node_')
|
||||
for node in all_nodes:
|
||||
for host_file in hosts_files:
|
||||
node.connect(host_file, {'name': 'hosts:name', 'ip': 'hosts:ip'})
|
||||
|
||||
|
||||
def setup_slave_node(config,
|
||||
kubernetes_master,
|
||||
calico_master,
|
||||
internal_network,
|
||||
i,
|
||||
user_config=None,
|
||||
existing_node=None):
|
||||
j = i + 1
|
||||
if existing_node:
|
||||
kube_node = existing_node
|
||||
else:
|
||||
kube_node = cr.create(
|
||||
'kube-node-%d' % j, 'k8s/node',
|
||||
{'name': 'kube-node-%d' % j,
|
||||
'ip': user_config['ip'],
|
||||
'ssh_user': user_config['username'],
|
||||
'ssh_password': user_config['password'],
|
||||
'ssh_key': user_config['ssh_key']})['kube-node-%d' % j]
|
||||
|
||||
iface_node = cr.create('kube-node-%d-iface' % j, 'k8s/virt_iface',
|
||||
{'name': 'cbr0',
|
||||
'ipaddr': str(internal_network + 256 * j + 1),
|
||||
'onboot': 'yes',
|
||||
'bootproto': 'static',
|
||||
'type': 'Bridge'})['kube-node-%d-iface' % j]
|
||||
kube_node.connect(iface_node, {})
|
||||
|
||||
config.connect(iface_node, {'netmask': 'netmask'})
|
||||
|
||||
calico_node = cr.create('calico-node-%d' % j, 'k8s/calico', {})[0]
|
||||
|
||||
kube_node.connect(calico_node, {'ip': 'ip'})
|
||||
config.connect(calico_node, {'calico_version': 'version'})
|
||||
|
||||
calico_master.connect(calico_node, {'etcd_authority': 'etcd_authority'})
|
||||
calico_node.connect(calico_node, {
|
||||
'etcd_authority': 'etcd_authority_internal'
|
||||
})
|
||||
calico_cni = cr.create('calico-cni-node-%d' % j, 'k8s/cni', {})[0]
|
||||
calico_node.connect(calico_cni,
|
||||
{'etcd_authority_internal': 'etcd_authority'})
|
||||
|
||||
docker = cr.create('kube-docker-%d' % j,
|
||||
'k8s/docker')['kube-docker-%d' % j]
|
||||
|
||||
kube_node.connect(docker, {})
|
||||
iface_node.connect(docker, {'name': 'iface'})
|
||||
|
||||
kubelet = cr.create('kubelet-node-%d' % j, 'k8s/kubelet', {
|
||||
'kubelet_args': '--v=5',
|
||||
})['kubelet-node-%d' % j]
|
||||
|
||||
kube_node.connect(kubelet, {'name': 'kubelet_hostname'})
|
||||
kubernetes_master.connect(kubelet, {'master_address': 'master_api'})
|
||||
config.connect(kubelet, {'cluster_domain': 'cluster_domain',
|
||||
'cluster_dns': 'cluster_dns',
|
||||
'k8s_version': 'k8s_version'})
|
||||
|
||||
add_event(Dep('hosts_file_node_{}'.format(kube_node.name), 'run',
|
||||
'success', kubernetes_master.name, 'run'))
|
||||
|
||||
add_event(Dep(docker.name, 'run', 'success', calico_node.name, 'run'))
|
||||
add_event(Dep(docker.name, 'run', 'success', kubelet.name, 'run'))
|
||||
add_event(Dep(calico_node.name, 'run', 'success', kubelet.name, 'run'))
|
||||
return kube_node
|
||||
|
||||
|
||||
def add_dashboard(args, *_):
|
||||
kube_master = rs.load(MASTER_NODE_RESOURCE_NAME)
|
||||
master = rs.load('kubelet-master')
|
||||
dashboard = cr.create('kubernetes-dashboard', 'k8s/dashboard', {})[0]
|
||||
master.connect(dashboard, {'master_port': 'api_port'})
|
||||
kube_master.connect(dashboard, {'ip': 'api_host'})
|
||||
|
||||
|
||||
def add_dns(args, *_):
|
||||
config = rs.load('kube-config')
|
||||
kube_master = rs.load(MASTER_NODE_RESOURCE_NAME)
|
||||
master = rs.load('kubelet-master')
|
||||
kube_dns = cr.create('kube-dns', 'k8s/kubedns', {})[0]
|
||||
master.connect(kube_dns, {'master_port': 'api_port'})
|
||||
kube_master.connect(kube_dns, {'ip': 'api_host'})
|
||||
config.connect(kube_dns, {'cluster_domain': 'cluster_domain',
|
||||
'cluster_dns': 'cluster_dns'})
|
||||
|
||||
|
||||
def add_node(args, user_config):
|
||||
if args.nodes == 0:
|
||||
requested_num = 1
|
||||
else:
|
||||
requested_num = args.nodes
|
||||
config = rs.load('kube-config')
|
||||
kubernetes_master = rs.load('kubelet-master')
|
||||
calico_master = rs.load('calico-master')
|
||||
internal_network = IPAddress(config.args['network'])
|
||||
|
||||
def get_node_id(n):
|
||||
return n.name.split('-')[-1]
|
||||
|
||||
kube_nodes = get_slave_nodes()
|
||||
newest_id = int(get_node_id(max(kube_nodes, key=get_node_id)))
|
||||
|
||||
user_defined_nodes = user_config['kube_slaves']['slaves']
|
||||
new_left = len(user_defined_nodes) - len(kube_nodes)
|
||||
if new_left <= 0:
|
||||
raise ValueError("You need to configure more nodes in config.yaml")
|
||||
if new_left < requested_num:
|
||||
raise ValueError("You need to configure more nodes in config.yaml")
|
||||
|
||||
new_nodes = [setup_slave_node(
|
||||
config=config, user_config=user_defined_nodes[i],
|
||||
kubernetes_master=kubernetes_master, calico_master=calico_master,
|
||||
internal_network=internal_network, i=i)
|
||||
for i in xrange(newest_id, newest_id + requested_num)]
|
||||
|
||||
kube_master = rs.load(MASTER_NODE_RESOURCE_NAME)
|
||||
all_nodes = new_nodes[:] + [kube_master]
|
||||
hosts_files = rs.load_all(startswith='hosts_file_node_')
|
||||
for node in all_nodes:
|
||||
for host_file in hosts_files:
|
||||
node.connect(host_file, {'name': 'hosts:name', 'ip': 'hosts:ip'})
|
||||
|
||||
|
||||
def get_master_and_slave_nodes():
|
||||
nodes = sorted(rs.load_all(startswith='node'), key=lambda x: x.name)
|
||||
# We are using existing nodes only if there are 2 or more of them. One
|
||||
# created node will result in all resources being created from scratch.
|
||||
if len(nodes) >= 2:
|
||||
return (nodes[0], nodes[1:])
|
||||
else:
|
||||
return (None, None)
|
||||
|
||||
|
||||
def deploy_k8s(args, user_config):
|
||||
if args.nodes == 0:
|
||||
requested_num = len(user_config['kube_slaves']['slaves'])
|
||||
else:
|
||||
requested_num = args.nodes
|
||||
master_node, slave_nodes = get_master_and_slave_nodes()
|
||||
|
||||
config = create_config(user_config['global_config'])
|
||||
|
||||
setup_master(config, user_config['kube_master'], master_node)
|
||||
setup_nodes(config, user_config['kube_slaves']['slaves'], requested_num,
|
||||
slave_nodes)
|
||||
|
||||
if args.dashboard:
|
||||
add_dashboard(args)
|
||||
|
||||
if args.dns:
|
||||
add_dns(args)
|
||||
|
||||
|
||||
commands = {
|
||||
'deploy': deploy_k8s,
|
||||
'dashboard': add_dashboard,
|
||||
'dns': add_dns,
|
||||
'add-node': add_node
|
||||
}
|
||||
|
||||
|
||||
def get_args(user_config):
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument('command', type=str, choices=commands.keys())
|
||||
parser.add_argument('--nodes',
|
||||
type=int,
|
||||
default=0,
|
||||
help='Slave node count. Works with deploy and '
|
||||
'add-node. WARNING - this parameter does not work if '
|
||||
'you have already created Solar node resources. This '
|
||||
'script will make use of all your previously created '
|
||||
'Solar nodes if their count is bigger than 1.')
|
||||
parser.add_argument('--dashboard',
|
||||
dest='dashboard',
|
||||
action='store_true',
|
||||
help='Add dashboard. Works with deploy only. Can be '
|
||||
' done separately with `mcpinstall.py dashboard`')
|
||||
parser.add_argument('--dns',
|
||||
dest='dns',
|
||||
action='store_true',
|
||||
help='Add dns. Works with deploy only. Can be done '
|
||||
'separately with `mcpinstall.py dns')
|
||||
parser.set_defaults(dashboard=False, dns=False)
|
||||
|
||||
return parser.parse_args()
|
||||
|
||||
|
||||
def get_user_config():
|
||||
global CONFIG_NAME
|
||||
global DEFAULT_CONFIG_NAME
|
||||
|
||||
if os.path.exists(CONFIG_NAME):
|
||||
with open(CONFIG_NAME) as conf:
|
||||
config = yaml.load(conf)
|
||||
elif os.path.exists(DEFAULT_CONFIG_NAME):
|
||||
with open(DEFAULT_CONFIG_NAME) as conf:
|
||||
config = yaml.load(conf)
|
||||
else:
|
||||
raise Exception('{0} and {1} configuration files not found'.format(
|
||||
CONFIG_NAME, DEFAULT_CONFIG_NAME))
|
||||
|
||||
for slave in config['kube_slaves']['slaves']:
|
||||
for key, value in config['kube_slaves']['default'].iteritems():
|
||||
if key not in slave:
|
||||
slave[key] = value
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def setup_master_node_name():
|
||||
global MASTER_NODE_RESOURCE_NAME
|
||||
|
||||
master, _ = get_master_and_slave_nodes()
|
||||
if master is not None:
|
||||
MASTER_NODE_RESOURCE_NAME = master.name
|
||||
else:
|
||||
MASTER_NODE_RESOURCE_NAME = DEFAULT_MASTER_NODE_RESOURCE_NAME
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
user_config = get_user_config()
|
||||
args = get_args(user_config)
|
||||
setup_master_node_name()
|
||||
commands[args.command](args, user_config)
|
|
@ -1,6 +0,0 @@
|
|||
- hosts: [{{host}}]
|
||||
become: yes
|
||||
tasks:
|
||||
{% if options %}
|
||||
- shelL: calico pool add {{options}}
|
||||
{% endif %}
|
|
@ -1,13 +0,0 @@
|
|||
- hosts: [{{host}}]
|
||||
become: yes
|
||||
tasks:
|
||||
- get_url:
|
||||
url: https://github.com/Metaswitch/calico-docker/releases/download/{{version}}/calicoctl
|
||||
dest: /usr/bin/calicoctl-{{version}}
|
||||
- shell: chmod +x /usr/bin/calicoctl-{{version}}
|
||||
- file: dest=/usr/bin/calicoctl src=/usr/bin/calicoctl-{{version}} state=link
|
||||
- template:
|
||||
src: {{templates_dir}}/calico-node.service
|
||||
dest: /etc/systemd/system/calico-node.service
|
||||
- shell: systemctl daemon-reload
|
||||
- service: name=calico-node.service enabled=yes state=restarted
|
|
@ -1,13 +0,0 @@
|
|||
- hosts: [{{host}}]
|
||||
become: yes
|
||||
tasks:
|
||||
- get_url:
|
||||
url: https://github.com/Metaswitch/calico-docker/releases/download/{{version}}/calicoctl
|
||||
dest: /usr/bin/calicoctl-{{version}}
|
||||
- shell: chmod +x /usr/bin/calicoctl-{{version}}
|
||||
- file: dest=/usr/bin/calicoctl src=/usr/bin/calicoctl-{{version}} state=link
|
||||
- template:
|
||||
src: {{templates_dir}}/calico-node.service
|
||||
dest: /etc/systemd/system/calico-node.service
|
||||
- shell: systemctl daemon-reload
|
||||
- service: name=calico-node.service enabled=yes state=restarted
|
|
@ -1,20 +0,0 @@
|
|||
handler: ansible
|
||||
version: 1.0.0
|
||||
input:
|
||||
ip:
|
||||
schema: str!
|
||||
value: null
|
||||
etcd_authority:
|
||||
schema: str!
|
||||
value: null
|
||||
etcd_authority_internal:
|
||||
schema: str!
|
||||
value: null
|
||||
computable:
|
||||
lang: python
|
||||
type: full
|
||||
func: |
|
||||
return R[resource_name]['etcd_authority'].replace('http://', '')
|
||||
version:
|
||||
schema: str!
|
||||
value:
|
|
@ -1,16 +0,0 @@
|
|||
[Unit]
|
||||
Description=calicoctl node
|
||||
Requires=docker.service
|
||||
After=docker.service
|
||||
|
||||
[Service]
|
||||
User=root
|
||||
Environment="ETCD_AUTHORITY={{etcd_authority_internal}}"
|
||||
Environment="KUBERNETES_MASTER=http://kube-node-master:8080"
|
||||
Environment="DEFAULT_IPV4={{ip}}"
|
||||
PermissionsStartOnly=true
|
||||
ExecStartPre=/usr/bin/calicoctl checksystem --fix
|
||||
ExecStart=/usr/bin/calicoctl node --ip={{ip}} --detach=false
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -1,22 +0,0 @@
|
|||
- hosts: [{{host}}]
|
||||
become: yes
|
||||
tasks:
|
||||
- get_url:
|
||||
url: https://github.com/Metaswitch/calico-docker/releases/download/{{version}}/calicoctl
|
||||
dest: /usr/bin/calicoctl-{{version}}
|
||||
- shell: chmod +x /usr/bin/calicoctl-{{version}}
|
||||
- file: dest=/usr/bin/calicoctl src=/usr/bin/calicoctl-{{version}} state=link
|
||||
- template:
|
||||
src: {{templates_dir}}/calico-etcd.manifest
|
||||
dest: /etc/kubernetes/manifests/calico-etcd.manifest
|
||||
- template:
|
||||
src: {{templates_dir}}/calico-node.service
|
||||
dest: /etc/systemd/system/calico-node.service
|
||||
# wait for docker images ... mostly
|
||||
# TODO(jnowak): fix it by pulling image in advance
|
||||
- wait_for: host=0.0.0.0 port={{etcd_port}} connect_timeout=5 state=started timeout=300
|
||||
- shell: systemctl daemon-reload
|
||||
- service: name=calico-node.service enabled=yes state=restarted
|
||||
{% if options %}
|
||||
- shell: ETCD_AUTHORITY={{etcd_authority_internal}} calicoctl pool add {{network}}/{{prefix}} {{options}}
|
||||
{% endif %}
|
|
@ -1,22 +0,0 @@
|
|||
- hosts: [{{host}}]
|
||||
become: yes
|
||||
tasks:
|
||||
- get_url:
|
||||
url: https://github.com/Metaswitch/calico-docker/releases/download/{{version}}/calicoctl
|
||||
dest: /usr/bin/calicoctl-{{version}}
|
||||
- shell: chmod +x /usr/bin/calicoctl-{{version}}
|
||||
- file: dest=/usr/bin/calicoctl src=/usr/bin/calicoctl-{{version}} state=link
|
||||
- template:
|
||||
src: {{templates_dir}}/calico-etcd.manifest
|
||||
dest: /etc/kubernetes/manifests/calico-etcd.manifest
|
||||
- template:
|
||||
src: {{templates_dir}}/calico-node.service
|
||||
dest: /etc/systemd/system/calico-node.service
|
||||
# wait for docker images ... mostly
|
||||
# TODO(jnowak): fix it by pulling image in advance
|
||||
- wait_for: host=0.0.0.0 port={{etcd_port}} connect_timeout=5 state=started timeout=300
|
||||
- shell: systemctl daemon-reload
|
||||
- service: name=calico-node.service enabled=yes state=restarted
|
||||
{% if options %}
|
||||
- shell: ETCD_AUTHORITY={{etcd_authority_internal}} calicoctl pool add {{network}}/{{prefix}} {{options}}
|
||||
{% endif %}
|
|
@ -1,41 +0,0 @@
|
|||
handler: ansible
|
||||
version: 1.0.0
|
||||
input:
|
||||
ip:
|
||||
schema: str!
|
||||
value: null
|
||||
etcd_host:
|
||||
schema: str!
|
||||
value: null
|
||||
etcd_authority:
|
||||
schema: str!
|
||||
value: null
|
||||
computable:
|
||||
lang: jinja
|
||||
type: full
|
||||
func: |
|
||||
http://{{etcd_host}}:{{etcd_port}}
|
||||
etcd_authority_internal:
|
||||
schema: str!
|
||||
value: null
|
||||
computable:
|
||||
lang: python
|
||||
type: full
|
||||
func: |
|
||||
return R[resource_name]['etcd_authority'].replace('http://', '')
|
||||
etcd_port:
|
||||
schema: int!
|
||||
value: 6666
|
||||
options:
|
||||
schema: str
|
||||
value:
|
||||
network:
|
||||
schema: str
|
||||
value:
|
||||
prefix:
|
||||
schema: int
|
||||
value:
|
||||
version:
|
||||
schema: str!
|
||||
value:
|
||||
|
|
@ -1,30 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: calico-etcd
|
||||
namespace: calico-system
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: calico-etcd-container
|
||||
image: gcr.io/google_containers/etcd:2.2.1
|
||||
command:
|
||||
- "/usr/local/bin/etcd"
|
||||
- "--name=calico-etcd"
|
||||
- "--data-dir=/var/etcd"
|
||||
- "--advertise-client-urls=http://{{ip}}:{{etcd_port}}"
|
||||
- "--listen-client-urls=http://0.0.0.0:{{etcd_port}}"
|
||||
- "--listen-peer-urls=http://0.0.0.0:6660"
|
||||
securityContext:
|
||||
privileged: true
|
||||
ports:
|
||||
- name: clientport
|
||||
containerPort: {{etcd_port}}
|
||||
hostPort: {{etcd_port}}
|
||||
volumeMounts:
|
||||
- mountPath: /var/etcd
|
||||
name: varlibetcd
|
||||
volumes:
|
||||
- name: "varlibetcd"
|
||||
hostPath:
|
||||
path: "/var/lib/etcd_calico"
|
|
@ -1,16 +0,0 @@
|
|||
[Unit]
|
||||
Description=calicoctl node
|
||||
Requires=docker.service
|
||||
After=docker.service
|
||||
|
||||
[Service]
|
||||
User=root
|
||||
Environment="ETCD_AUTHORITY={{etcd_authority_internal}}"
|
||||
Environment="KUBERNETES_MASTER=http://kube-node-master:8080"
|
||||
Environment="DEFAULT_IPV4={{ip}}"
|
||||
PermissionsStartOnly=true
|
||||
ExecStartPre=/usr/bin/calicoctl checksystem --fix
|
||||
ExecStart=/usr/bin/calicoctl node --ip={{ip}} --detach=false
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -1,17 +0,0 @@
|
|||
- hosts: [{{host}}]
|
||||
become: yes
|
||||
tasks:
|
||||
- file: name=/opt/cni/bin state=directory
|
||||
- get_url:
|
||||
url: https://github.com/projectcalico/calico-cni/releases/download/{{version}}/calico
|
||||
dest: /opt/cni/bin/calico-{{version}}
|
||||
- get_url:
|
||||
url: https://github.com/projectcalico/calico-cni/releases/download/{{version}}/calico-ipam
|
||||
dest: /opt/cni/bin/calico-ipam-{{version}}
|
||||
- shell: chmod +x /opt/cni/bin/calico-{{version}} /opt/cni/bin/calico-ipam-{{version}}
|
||||
- file: dest=/opt/cni/bin/calico-ipam src=/opt/cni/bin/calico-ipam-{{version}} state=link
|
||||
- file: dest=/opt/cni/bin/calico src=/opt/cni/bin/calico-{{version}} state=link
|
||||
- file: name={{cni_netd}} state=directory
|
||||
- template:
|
||||
src: {{templates_dir}}/10-calico.conf
|
||||
dest: {{cni_netd}}/10-calico.conf
|
|
@ -1,17 +0,0 @@
|
|||
- hosts: [{{host}}]
|
||||
become: yes
|
||||
tasks:
|
||||
- file: name=/opt/cni/bin state=directory
|
||||
- get_url:
|
||||
url: https://github.com/projectcalico/calico-cni/releases/download/{{version}}/calico
|
||||
dest: /opt/cni/bin/calico-{{version}}
|
||||
- get_url:
|
||||
url: https://github.com/projectcalico/calico-cni/releases/download/{{version}}/calico-ipam
|
||||
dest: /opt/cni/bin/calico-ipam-{{version}}
|
||||
- shell: chmod +x /opt/cni/bin/calico-{{version}} /opt/cni/bin/calico-ipam-{{version}}
|
||||
- file: dest=/opt/cni/bin/calico-ipam src=/opt/cni/bin/calico-ipam-{{version}} state=link
|
||||
- file: dest=/opt/cni/bin/calico src=/opt/cni/bin/calico-{{version}} state=link
|
||||
- file: name={{cni_netd}} state=directory
|
||||
- template:
|
||||
src: {{templates_dir}}/10-calico.conf
|
||||
dest: {{cni_netd}}/10-calico.conf
|
|
@ -1,15 +0,0 @@
|
|||
handler: ansible
|
||||
version: 1.0.0
|
||||
input:
|
||||
etcd_authority:
|
||||
schema: str!
|
||||
value: null
|
||||
cni_netd:
|
||||
schema: str!
|
||||
value: /etc/cni/net.d
|
||||
log_level:
|
||||
schema: str!
|
||||
value: info
|
||||
version:
|
||||
schema: str!
|
||||
value: v1.2.0
|
|
@ -1,9 +0,0 @@
|
|||
{
|
||||
"name": "calico-k8s-network",
|
||||
"type": "calico",
|
||||
"etcd_authority": "{{etcd_authority}}",
|
||||
"log_level": "{{log_level}}",
|
||||
"ipam": {
|
||||
"type": "calico-ipam"
|
||||
}
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
- hosts: [{{host}}]
|
||||
become: yes
|
||||
tasks:
|
||||
- shell: kubectl delete service {{app_name}} --namespace=kube-system
|
||||
- shell: kubectl delete rc {{app_name}} --namespace=kube-system
|
|
@ -1,11 +0,0 @@
|
|||
- hosts: [{{host}}]
|
||||
become: yes
|
||||
tasks:
|
||||
- template:
|
||||
src: {{templates_dir}}/dashboard_rc.tmpl
|
||||
dest: /tmp/dashboard_rc.yaml
|
||||
- template:
|
||||
src: {{templates_dir}}/dashboard_service.tmpl
|
||||
dest: /tmp/dashboard_service.yaml
|
||||
- shell: kubectl create -f /tmp/dashboard_rc.yaml
|
||||
- shell: kubectl create -f /tmp/dashboard_service.yaml
|
|
@ -1,13 +0,0 @@
|
|||
- hosts: [{{host}}]
|
||||
become: yes
|
||||
tasks:
|
||||
- shell: kubectl delete service {{app_name}} --namespace=kube-system
|
||||
- shell: kubectl delete rc {{app_name}} --namespace=kube-system
|
||||
- template:
|
||||
src: {{templates_dir}}/dashboard_rc.tmpl
|
||||
dest: /tmp/dashboard_rc.yaml
|
||||
- template:
|
||||
src: {{templates_dir}}/dashboard_service.tmpl
|
||||
dest: /tmp/dashboard_service.yaml
|
||||
- shell: kubectl create -f /tmp/dashboard_rc.yaml
|
||||
- shell: kubectl create -f /tmp/dashboard_service.yaml
|
|
@ -1,12 +0,0 @@
|
|||
handler: ansible
|
||||
version: 1.0.0
|
||||
input:
|
||||
api_host:
|
||||
schema: str!
|
||||
value: null
|
||||
api_port:
|
||||
schema: int!
|
||||
value: null
|
||||
app_name:
|
||||
schema: str!
|
||||
value: kubernetes-dashboard
|
|
@ -1,35 +0,0 @@
|
|||
kind: ReplicationController
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
app: {{app_name}}
|
||||
version: v1.0.1
|
||||
name: {{app_name}}
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
app: {{app_name}}
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: {{app_name}}
|
||||
spec:
|
||||
containers:
|
||||
- name: {{app_name}}
|
||||
image: gcr.io/google_containers/kubernetes-dashboard-amd64:v1.0.1
|
||||
imagePullPolicy: Always
|
||||
ports:
|
||||
- containerPort: 9090
|
||||
protocol: TCP
|
||||
args:
|
||||
# Uncomment the following line to manually specify Kubernetes API server Host
|
||||
# If not specified, Dashboard will attempt to auto discover the API server and connect
|
||||
# to it. Uncomment only if the default does not work.
|
||||
- --apiserver-host=http://{{api_host}}:{{api_port}}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /
|
||||
port: 9090
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 30
|
|
@ -1,15 +0,0 @@
|
|||
kind: Service
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
labels:
|
||||
app: {{app_name}}
|
||||
kubernetes.io/cluster-service: "true"
|
||||
name: {{app_name}}
|
||||
namespace: kube-system
|
||||
spec:
|
||||
type: NodePort
|
||||
ports:
|
||||
- port: 80
|
||||
targetPort: 9090
|
||||
selector:
|
||||
app: {{app_name}}
|
|
@ -1,20 +0,0 @@
|
|||
- hosts: [{{host}}]
|
||||
become: yes
|
||||
tasks:
|
||||
- package: name=docker state=present enablerepo=updates-testing
|
||||
when: ansible_distribution == "Fedora"
|
||||
- package: name=docker.io state=present
|
||||
when: ansible_distribution == "Ubuntu"
|
||||
{% if network_options %}
|
||||
- lineinfile:
|
||||
dest: /etc/sysconfig/docker-network
|
||||
regexp: "DOCKER_NETWORK_OPTIONS=.*"
|
||||
line: DOCKER_NETWORK_OPTIONS="--bridge={{iface}} --iptables=false --ip-masq=false"
|
||||
when: ansible_distribution == "Fedora"
|
||||
- lineinfile:
|
||||
dest: /etc/defaults
|
||||
regexp: "DOCKER_OPTS=.*"
|
||||
line: DOCKER_OPTS=""
|
||||
when: ansible_distribution == "Ubuntu"
|
||||
{% endif %}
|
||||
- service: name=docker.service enabled=yes state=restarted
|
|
@ -1,9 +0,0 @@
|
|||
handler: ansible
|
||||
version: 1.0.0
|
||||
input:
|
||||
iface:
|
||||
schema: str!
|
||||
value: null
|
||||
network_options:
|
||||
schema: str!
|
||||
value: true
|
|
@ -1,10 +0,0 @@
|
|||
- hosts: [{{host}}]
|
||||
become: yes
|
||||
tasks:
|
||||
# without these lines: can't create /var/log/etcd.log: Is a directory
|
||||
- file: name=/var/log/etcd.log state=touch
|
||||
- file: name=/var/log/etcd-events.log state=touch
|
||||
|
||||
- template:
|
||||
src: {{templates_dir}}/etcd.manifest
|
||||
dest: /etc/kubernetes/manifests/etcd.manifest
|
|
@ -1,35 +0,0 @@
|
|||
handler: ansible
|
||||
version: 1.0.0
|
||||
|
||||
input:
|
||||
# listen_client_urls:
|
||||
# schema: [str!]
|
||||
# value:
|
||||
# advertise_client_urls:
|
||||
# schema: [str!]
|
||||
# value: []
|
||||
listen_client_url:
|
||||
schema: [str!]
|
||||
value: null
|
||||
computable:
|
||||
lang: jinja2
|
||||
type: full
|
||||
func: |
|
||||
http://{{listen_client_host}}:{{listen_client_port}}
|
||||
listen_client_url_events:
|
||||
schema: [str!]
|
||||
value: null
|
||||
computable:
|
||||
lang: jinja2
|
||||
type: full
|
||||
func: |
|
||||
http://{{listen_client_host}}:{{listen_client_port_events}}
|
||||
listen_client_host:
|
||||
schema: str!
|
||||
value:
|
||||
listen_client_port:
|
||||
schema: int!
|
||||
value: 4001
|
||||
listen_client_port_events:
|
||||
schema: int!
|
||||
value: 4002
|
|
@ -1,53 +0,0 @@
|
|||
---
|
||||
apiVersion: "v1"
|
||||
kind: "Pod"
|
||||
metadata:
|
||||
name: "etcd-server-events"
|
||||
namespace: "kube-system"
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
-
|
||||
name: "etcd-container"
|
||||
image: "gcr.io/google_containers/etcd:2.2.1"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-c"
|
||||
- "/usr/local/bin/etcd --listen-peer-urls http://127.0.0.1:2381 --addr 127.0.0.1:4002 --bind-addr 127.0.0.1:4002 --data-dir /var/etcd/data-events 1>>/var/log/etcd-events.log 2>&1"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: "127.0.0.1"
|
||||
port: 4002
|
||||
path: "/health"
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
ports:
|
||||
-
|
||||
name: "serverport"
|
||||
containerPort: 2381
|
||||
hostPort: 2381
|
||||
-
|
||||
name: "clientport"
|
||||
containerPort: 4002
|
||||
hostPort: {{listen_client_port_events}}
|
||||
volumeMounts:
|
||||
-
|
||||
name: "varetcd"
|
||||
mountPath: "/var/etcd"
|
||||
readOnly: false
|
||||
-
|
||||
name: "varlogetcd"
|
||||
mountPath: "/var/log/etcd-events.log"
|
||||
readOnly: false
|
||||
volumes:
|
||||
-
|
||||
name: "varetcd"
|
||||
hostPath:
|
||||
path: "/mnt/master-pd/var/etcd"
|
||||
-
|
||||
name: "varlogetcd"
|
||||
hostPath:
|
||||
path: "/var/log/etcd-events.log"
|
|
@ -1,41 +0,0 @@
|
|||
# [member]
|
||||
ETCD_NAME=default
|
||||
ETCD_DATA_DIR="/var/lib/etcd/default.etcd"
|
||||
#ETCD_SNAPSHOT_COUNTER="10000"
|
||||
#ETCD_HEARTBEAT_INTERVAL="100"
|
||||
#ETCD_ELECTION_TIMEOUT="1000"
|
||||
#ETCD_LISTEN_PEER_URLS="http://localhost:2380"
|
||||
ETCD_LISTEN_CLIENT_URLS="{{listen_client_url}}"
|
||||
#ETCD_MAX_SNAPSHOTS="5"
|
||||
#ETCD_MAX_WALS="5"
|
||||
#ETCD_CORS=""
|
||||
#
|
||||
#[cluster]
|
||||
#ETCD_INITIAL_ADVERTISE_PEER_URLS="http://localhost:2380"
|
||||
# if you use different ETCD_NAME (e.g. test), set ETCD_INITIAL_CLUSTER value for this name, i.e. "test=http://..."
|
||||
#ETCD_INITIAL_CLUSTER="default=http://localhost:2380"
|
||||
#ETCD_INITIAL_CLUSTER_STATE="new"
|
||||
#ETCD_INITIAL_CLUSTER_TOKEN="etcd-cluster"
|
||||
ETCD_ADVERTISE_CLIENT_URLS="{{listen_client_url}}"
|
||||
#ETCD_DISCOVERY=""
|
||||
#ETCD_DISCOVERY_SRV=""
|
||||
#ETCD_DISCOVERY_FALLBACK="proxy"
|
||||
#ETCD_DISCOVERY_PROXY=""
|
||||
#
|
||||
#[proxy]
|
||||
#ETCD_PROXY="off"
|
||||
#
|
||||
#[security]
|
||||
#ETCD_CERT_FILE=""
|
||||
#ETCD_KEY_FILE=""
|
||||
#ETCD_CLIENT_CERT_AUTH="false"
|
||||
#ETCD_TRUSTED_CA_FILE=""
|
||||
#ETCD_PEER_CERT_FILE=""
|
||||
#ETCD_PEER_KEY_FILE=""
|
||||
#ETCD_PEER_CLIENT_CERT_AUTH="false"
|
||||
#ETCD_PEER_TRUSTED_CA_FILE=""
|
||||
#
|
||||
#[logging]
|
||||
#ETCD_DEBUG="false"
|
||||
# examples for -log-package-levels etcdserver=WARNING,security=DEBUG
|
||||
#ETCD_LOG_PACKAGE_LEVELS=""
|
|
@ -1,45 +0,0 @@
|
|||
---
|
||||
apiVersion: "v1"
|
||||
kind: "Pod"
|
||||
metadata:
|
||||
name: "etcd-server"
|
||||
namespace: "kube-system"
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
-
|
||||
name: "etcd-container"
|
||||
image: "gcr.io/google_containers/etcd:2.2.1"
|
||||
resources:
|
||||
requests:
|
||||
cpu: "200m"
|
||||
command:
|
||||
- "/bin/sh"
|
||||
- "-c"
|
||||
- "/usr/local/bin/etcd --listen-peer-urls http://127.0.0.1:2380 --addr 127.0.0.1:4001 --bind-addr 127.0.0.1:4001 --data-dir /var/etcd/data"
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: "127.0.0.1"
|
||||
port: 4001
|
||||
path: "/health"
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 15
|
||||
ports:
|
||||
-
|
||||
name: "serverport"
|
||||
containerPort: 2380
|
||||
hostPort: 2380
|
||||
-
|
||||
name: "clientport"
|
||||
containerPort: 4001
|
||||
hostPort: {{listen_client_port}}
|
||||
volumeMounts:
|
||||
-
|
||||
name: "varetcd"
|
||||
mountPath: "/var/etcd"
|
||||
readOnly: false
|
||||
volumes:
|
||||
-
|
||||
name: "varetcd"
|
||||
hostPath:
|
||||
path: "/mnt/master-pd/var/etcd"
|
|
@ -1,27 +0,0 @@
|
|||
handler: none
|
||||
version: 1.0.0
|
||||
input:
|
||||
cluster_dns:
|
||||
schema: str!
|
||||
value: null
|
||||
cluster_domain:
|
||||
schema: str!
|
||||
value: null
|
||||
netmask:
|
||||
schema: str!
|
||||
value: '255.255.255.0'
|
||||
prefix:
|
||||
schema: int!
|
||||
value: 16
|
||||
network:
|
||||
schema: str!
|
||||
value: '172.20.0.0'
|
||||
service_cluster_ip_range:
|
||||
schema: str!
|
||||
value: 10.254.0.0/16
|
||||
k8s_version:
|
||||
schema: str!
|
||||
value: v1.2.4
|
||||
calico_version:
|
||||
schema: str!
|
||||
value: v0.19.0
|
|
@ -1,7 +0,0 @@
|
|||
- hosts: [{{host}}]
|
||||
become: yes
|
||||
tasks:
|
||||
- template:
|
||||
src: {{templates_dir}}/skydns.tmpl
|
||||
dest: /tmp/skydns.yaml
|
||||
- shell: kubectl create -f /tmp/skydns.yaml
|
|
@ -1,8 +0,0 @@
|
|||
- hosts: [{{host}}]
|
||||
become: yes
|
||||
tasks:
|
||||
- template:
|
||||
src: {{templates_dir}}/skydns.tmpl
|
||||
dest: /tmp/skydns.yaml
|
||||
- shell: kubectl remove -f /tmp/skydns.yaml
|
||||
- shell: kubectl create -f /tmp/skydns.yaml
|
|
@ -1,18 +0,0 @@
|
|||
handler: ansible
|
||||
version: 1.0.0
|
||||
input:
|
||||
api_host:
|
||||
schema: str!
|
||||
value: null
|
||||
api_port:
|
||||
schema: int!
|
||||
value: null
|
||||
app_name:
|
||||
schema: str!
|
||||
value: kube-dns
|
||||
cluster_domain:
|
||||
schema: str!
|
||||
value: null
|
||||
cluster_dns:
|
||||
schema: str!
|
||||
value: null
|
|
@ -1,153 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: {{app_name}}
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: {{app_name}}
|
||||
kubernetes.io/cluster-service: "true"
|
||||
kubernetes.io/name: "KubeDNS"
|
||||
spec:
|
||||
selector:
|
||||
k8s-app: {{app_name}}
|
||||
clusterIP: {{cluster_dns}}
|
||||
ports:
|
||||
- name: dns
|
||||
port: 53
|
||||
protocol: UDP
|
||||
- name: dns-tcp
|
||||
port: 53
|
||||
protocol: TCP
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ReplicationController
|
||||
metadata:
|
||||
name: {{app_name}}-v11
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: {{app_name}}
|
||||
version: v11
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
k8s-app: {{app_name}}
|
||||
version: v11
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: {{app_name}}
|
||||
version: v11
|
||||
kubernetes.io/cluster-service: "true"
|
||||
spec:
|
||||
containers:
|
||||
- name: etcd
|
||||
image: gcr.io/google_containers/etcd-amd64:2.2.1
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
# guaranteed class. Currently, this container falls into the
|
||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 500Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
command:
|
||||
- /usr/local/bin/etcd
|
||||
- -data-dir
|
||||
- /var/etcd/data
|
||||
- -listen-client-urls
|
||||
- http://127.0.0.1:2379,http://127.0.0.1:4001
|
||||
- -advertise-client-urls
|
||||
- http://127.0.0.1:2379,http://127.0.0.1:4001
|
||||
- -initial-cluster-token
|
||||
- skydns-etcd
|
||||
# volumeMounts:
|
||||
# - name: etcd-storage
|
||||
# mountPath: /var/etcd/data
|
||||
- name: kube2sky
|
||||
image: gcr.io/google_containers/kube2sky:1.14
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
# guaranteed class. Currently, this container falls into the
|
||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||
limits:
|
||||
cpu: 100m
|
||||
# Kube2sky watches all pods.
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
path: /healthz
|
||||
port: 8080
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
timeoutSeconds: 5
|
||||
successThreshold: 1
|
||||
failureThreshold: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
path: /readiness
|
||||
port: 8081
|
||||
scheme: HTTP
|
||||
# we poll on pod startup for the Kubernetes master service and
|
||||
# only setup the /readiness HTTP server once that's available.
|
||||
initialDelaySeconds: 30
|
||||
timeoutSeconds: 5
|
||||
args:
|
||||
# command = "/kube2sky"
|
||||
- --domain={{cluster_domain}}
|
||||
- --kube-master-url=http://{{api_host}}:{{api_port}}
|
||||
- name: skydns
|
||||
image: gcr.io/google_containers/skydns:2015-10-13-8c72f8c
|
||||
resources:
|
||||
# TODO: Set memory limits when we've profiled the container for large
|
||||
# clusters, then set request = limit to keep this container in
|
||||
# guaranteed class. Currently, this container falls into the
|
||||
# "burstable" category so the kubelet doesn't backoff from restarting it.
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 200Mi
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 50Mi
|
||||
args:
|
||||
# command = "/skydns"
|
||||
- -machines=http://127.0.0.1:4001
|
||||
- -addr=0.0.0.0:53
|
||||
- -ns-rotate=false
|
||||
- -domain={{cluster_domain}}
|
||||
ports:
|
||||
- containerPort: 53
|
||||
name: dns
|
||||
protocol: UDP
|
||||
- containerPort: 53
|
||||
name: dns-tcp
|
||||
protocol: TCP
|
||||
- name: healthz
|
||||
image: gcr.io/google_containers/exechealthz:1.0
|
||||
resources:
|
||||
# keep request = limit to keep this container in guaranteed class
|
||||
limits:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
requests:
|
||||
cpu: 10m
|
||||
memory: 20Mi
|
||||
args:
|
||||
- -cmd=nslookup kubernetes.default.svc.cluster.local 127.0.0.1 >/dev/null
|
||||
- -port=8080
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
protocol: TCP
|
||||
volumes:
|
||||
- name: etcd-storage
|
||||
emptyDir: {}
|
||||
dnsPolicy: Default # Don't use cluster DNS.
|
|
@ -1,17 +0,0 @@
|
|||
- hosts: [{{host}}]
|
||||
become: yes
|
||||
tasks:
|
||||
- get_url:
|
||||
url: http://storage.googleapis.com/kubernetes-release/release/{{k8s_version}}/bin/linux/amd64/kubelet
|
||||
dest: /usr/bin/kubelet{{k8s_version}}
|
||||
- shell: chmod +x /usr/bin/kubelet{{k8s_version}}
|
||||
- file: force=yes src=/usr/bin/kubelet{{k8s_version}} path=/usr/bin/kubelet state=link
|
||||
- file: name=/etc/kubernetes/manifests state=directory
|
||||
- template:
|
||||
src: {{templates_dir}}/kubelet.service
|
||||
dest: /etc/systemd/system/kubelet.service
|
||||
- template:
|
||||
src: {{templates_dir}}/kube-proxy.manifest
|
||||
dest: /etc/kubernetes/manifests/kube-proxy.manifest
|
||||
- shell: systemctl daemon-reload
|
||||
- service: name=kubelet state=restarted enabled=yes
|
|
@ -1,17 +0,0 @@
|
|||
- hosts: [{{host}}]
|
||||
become: yes
|
||||
tasks:
|
||||
- get_url:
|
||||
url: http://storage.googleapis.com/kubernetes-release/release/{{k8s_version}}/bin/linux/amd64/kubelet
|
||||
dest: /usr/bin/kubelet{{k8s_version}}
|
||||
- shell: chmod +x /usr/bin/kubelet{{k8s_version}}
|
||||
- file: force=yes src=/usr/bin/kubelet{{k8s_version}} path=/usr/bin/kubelet state=link
|
||||
- file: name=/etc/kubernetes/manifests state=directory
|
||||
- template:
|
||||
src: {{templates_dir}}/kubelet.service
|
||||
dest: /etc/systemd/system/kubelet.service
|
||||
- template:
|
||||
src: {{templates_dir}}/kube-proxy.manifest
|
||||
dest: /etc/kubernetes/manifests/kube-proxy.manifest
|
||||
- shell: systemctl daemon-reload
|
||||
- service: name=kubelet state=restarted enabled=yes
|
|
@ -1,29 +0,0 @@
|
|||
handler: ansible
|
||||
version: 1.0.0
|
||||
|
||||
input:
|
||||
# kubelet
|
||||
kubelet_address:
|
||||
schema: str
|
||||
value: 0.0.0.0
|
||||
kubelet_args:
|
||||
schema: str
|
||||
value: ""
|
||||
kubelet_hostname:
|
||||
schema: str
|
||||
value: "127.0.0.1"
|
||||
master_api:
|
||||
schema: str!
|
||||
value: ""
|
||||
cluster_domain:
|
||||
schema: str!
|
||||
value: null
|
||||
cluster_dns:
|
||||
schema: str!
|
||||
value: null
|
||||
etcd_authority:
|
||||
schema: str
|
||||
value: ""
|
||||
k8s_version:
|
||||
schema: str!
|
||||
value: ""
|
|
@ -1,16 +0,0 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-proxy
|
||||
spec:
|
||||
hostNetwork: true
|
||||
containers:
|
||||
- name: kube-proxy
|
||||
image: gcr.io/google_containers/hyperkube:{{k8s_version}}
|
||||
command:
|
||||
- /hyperkube
|
||||
- proxy
|
||||
- --master={{master_api}}
|
||||
- --proxy-mode=iptables
|
||||
securityContext:
|
||||
privileged: true
|
|
@ -1,26 +0,0 @@
|
|||
[Unit]
|
||||
Description=Kubernetes Kubelet
|
||||
Documentation=https://github.com/kubernetes/kubernetes
|
||||
After=calico-node.service
|
||||
Requires=calico-node.service
|
||||
|
||||
[Service]
|
||||
Environment="ETCD_AUTHORITY={{etcd_authority}}"
|
||||
Environment="KUBE_API_ROOT="{{master_api}}/api/v1""
|
||||
ExecStart=/usr/bin/kubelet \
|
||||
--address=0.0.0.0 \
|
||||
--allow-privileged=true \
|
||||
--cluster-dns={{cluster_dns}} \
|
||||
--cluster-domain={{cluster_domain}} \
|
||||
--config=/etc/kubernetes/manifests \
|
||||
--hostname-override={{kubelet_hostname}} \
|
||||
--api-servers={{master_api}} \
|
||||
--logtostderr=true \
|
||||
--network-plugin=cni \
|
||||
--network-plugin-dir=/etc/cni/net.d \
|
||||
{{kubelet_args}}
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -1,27 +0,0 @@
|
|||
- hosts: [{{host}}]
|
||||
become: yes
|
||||
tasks:
|
||||
- get_url:
|
||||
url: http://storage.googleapis.com/kubernetes-release/release/{{k8s_version}}/bin/linux/amd64/kubectl
|
||||
dest: /usr/bin/kubectl{{k8s_version}}
|
||||
- get_url:
|
||||
url: http://storage.googleapis.com/kubernetes-release/release/{{k8s_version}}/bin/linux/amd64/kubelet
|
||||
dest: /usr/bin/kubelet{{k8s_version}}
|
||||
- file: name=/usr/bin/kubectl{{k8s_version}} mode=0777
|
||||
- file: name=/usr/bin/kubelet{{k8s_version}} mode=0777
|
||||
- file: src=/usr/bin/kubectl{{k8s_version}} path=/usr/bin/kubectl state=link
|
||||
- file: src=/usr/bin/kubelet{{k8s_version}} path=/usr/bin/kubelet state=link
|
||||
- file: name=/etc/kubernetes/manifests state=directory
|
||||
- template:
|
||||
src: {{templates_dir}}/kubelet.service
|
||||
dest: /etc/systemd/system/kubelet.service
|
||||
- template:
|
||||
src: {{templates_dir}}/kubelet_master.manifest
|
||||
dest: /etc/kubernetes/manifests/kubelet_master.manifest
|
||||
- shell: systemctl daemon-reload
|
||||
- service: name=kubelet state=restarted enabled=yes
|
||||
- wait_for: host={{master_host}} port={{master_port}} connect_timeout=5 state=started timeout=300
|
||||
- shell: kubectl --server {{master_address}} get namespaces
|
||||
register: kube_namespaces
|
||||
- shell: kubectl --server {{master_address}} create namespace kube-system
|
||||
when: "'kube-system' not in kube_namespaces.stdout"
|
|
@ -1,27 +0,0 @@
|
|||
- hosts: [{{host}}]
|
||||
become: yes
|
||||
tasks:
|
||||
- get_url:
|
||||
url: http://storage.googleapis.com/kubernetes-release/release/{{k8s_version}}/bin/linux/amd64/kubectl
|
||||
dest: /usr/bin/kubectl{{k8s_version}}
|
||||
- get_url:
|
||||
url: http://storage.googleapis.com/kubernetes-release/release/{{k8s_version}}/bin/linux/amd64/kubelet
|
||||
dest: /usr/bin/kubelet{{k8s_version}}
|
||||
- file: name=/usr/bin/kubectl{{k8s_version}} mode=0777
|
||||
- file: name=/usr/bin/kubelet{{k8s_version}} mode=0777
|
||||
- file: src=/usr/bin/kubectl{{k8s_version}} path=/usr/bin/kubectl state=link
|
||||
- file: src=/usr/bin/kubelet{{k8s_version}} path=/usr/bin/kubelet state=link
|
||||
- file: name=/etc/kubernetes/manifests state=directory
|
||||
- template:
|
||||
src: {{templates_dir}}/kubelet.service
|
||||
dest: /etc/systemd/system/kubelet.service
|
||||
- template:
|
||||
src: {{templates_dir}}/kubelet_master.manifest
|
||||
dest: /etc/kubernetes/manifests/kubelet_master.manifest
|
||||
- shell: systemctl daemon-reload
|
||||
- service: name=kubelet state=restarted enabled=yes
|
||||
- wait_for: host={{master_host}} port={{master_port}} connect_timeout=5 state=started timeout=60
|
||||
- shell: kubectl --server {{master_address}} get namespaces
|
||||
register: kube_namespaces
|
||||
- shell: kubectl --server {{master_address}} create namespace kube-system
|
||||
when: "'kube-system' not in kube_namespaces.stdout"
|
|
@ -1,36 +0,0 @@
|
|||
handler: ansible
|
||||
version: 1.0.0
|
||||
|
||||
input:
|
||||
kubelet_args:
|
||||
schema: str
|
||||
value: ""
|
||||
cluster_dns:
|
||||
schema: str!
|
||||
value: null
|
||||
cluster_domain:
|
||||
schema: str!
|
||||
value: null
|
||||
master_port:
|
||||
schema: int
|
||||
value: 8080
|
||||
master_host:
|
||||
schema: str
|
||||
value: null
|
||||
master_address:
|
||||
schema: str
|
||||
value: null
|
||||
computable:
|
||||
lang: jinja2
|
||||
type: full
|
||||
func: |
|
||||
http://{{master_host}}:{{master_port}}
|
||||
service_cluster_ip_range:
|
||||
schema: str!
|
||||
value: null
|
||||
admission_control:
|
||||
schema: str!
|
||||
value: NamespaceLifecycle,NamespaceExists,LimitRanger,SecurityContextDeny,ResourceQuota
|
||||
k8s_version:
|
||||
schema: str!
|
||||
value: ""
|
|
@ -1,22 +0,0 @@
|
|||
[Unit]
|
||||
Description=Kubernetes Kubelet
|
||||
Documentation=https://github.com/kubernetes/kubernetes
|
||||
Requires=docker.service
|
||||
After=docker.service
|
||||
|
||||
[Service]
|
||||
ExecStart=/usr/bin/kubelet \
|
||||
--register-node=false \
|
||||
--allow-privileged=true \
|
||||
--config=/etc/kubernetes/manifests \
|
||||
--cluster-dns={{cluster_dns}} \
|
||||
--cluster_domain={{cluster_domain}} \
|
||||
--hairpin-mode=none \
|
||||
--enable-debugging-handlers=false \
|
||||
--cpu-cfs-quota=true \
|
||||
--logtostderr=true
|
||||
Restart=always
|
||||
RestartSec=10
|
||||
|
||||
[Install]
|
||||
WantedBy=multi-user.target
|
|
@ -1,98 +0,0 @@
|
|||
# TODO(jnowak): split it into parts
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: kube-controller
|
||||
spec:
|
||||
hostNetwork: true
|
||||
volumes:
|
||||
- name: "etc-kubernetes"
|
||||
hostPath:
|
||||
path: "/etc/kubernetes"
|
||||
- name: "var-run-kubernetes"
|
||||
hostPath:
|
||||
path: "/var/run/kubernetes"
|
||||
- name: "etcd-datadir"
|
||||
hostPath:
|
||||
path: "/var/lib/etcd_kube"
|
||||
- name: "usr"
|
||||
hostPath:
|
||||
path: "/usr"
|
||||
- name: "lib64"
|
||||
hostPath:
|
||||
path: "/lib64"
|
||||
containers:
|
||||
- name: kube-etcd
|
||||
image: gcr.io/google_containers/etcd:2.2.1
|
||||
command:
|
||||
- "/usr/local/bin/etcd"
|
||||
- "--name=etcd"
|
||||
- "--data-dir=/var/etcd"
|
||||
- "--advertise-client-urls=http://127.0.0.1:2379"
|
||||
- "--listen-client-urls=http://127.0.0.1:2379"
|
||||
- "--listen-peer-urls=http://127.0.0.1:2380"
|
||||
securityContext:
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /var/etcd
|
||||
name: "etcd-datadir"
|
||||
|
||||
- name: kube-apiserver
|
||||
image: gcr.io/google_containers/hyperkube:{{k8s_version}}
|
||||
command:
|
||||
- /hyperkube
|
||||
- apiserver
|
||||
- --allow-privileged=true
|
||||
- --insecure-bind-address=0.0.0.0
|
||||
- --insecure-port={{master_port}}
|
||||
- --etcd-servers=http://127.0.0.1:2379
|
||||
- --secure-port=0
|
||||
- --service-cluster-ip-range={{service_cluster_ip_range}}
|
||||
- --admission-control={{admission_control}}
|
||||
- --logtostderr=true
|
||||
ports:
|
||||
- containerPort: {{master_port}}
|
||||
hostPort: {{master_port}}
|
||||
name: local
|
||||
volumeMounts:
|
||||
- mountPath: /etc/kubernetes
|
||||
name: "etc-kubernetes"
|
||||
- mountPath: /var/run/kubernetes
|
||||
name: "var-run-kubernetes"
|
||||
|
||||
- name: kube-controller-manager
|
||||
image: gcr.io/google_containers/hyperkube:{{k8s_version}}
|
||||
command:
|
||||
- /hyperkube
|
||||
- controller-manager
|
||||
- --master=http://127.0.0.1:{{master_port}}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /healthz
|
||||
port: 10252
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 1
|
||||
|
||||
- name: kube-scheduler
|
||||
image: gcr.io/google_containers/hyperkube:{{k8s_version}}
|
||||
command:
|
||||
- /hyperkube
|
||||
- scheduler
|
||||
- --master=http://127.0.0.1:{{master_port}}
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: 127.0.0.1
|
||||
path: /healthz
|
||||
port: 10251
|
||||
initialDelaySeconds: 15
|
||||
timeoutSeconds: 1
|
||||
|
||||
- name: kube-proxy
|
||||
image: gcr.io/google_containers/hyperkube:{{k8s_version}}
|
||||
command:
|
||||
- /hyperkube
|
||||
- proxy
|
||||
- --master=http://127.0.0.1:{{master_port}}
|
||||
securityContext:
|
||||
privileged: true
|
|
@ -1,37 +0,0 @@
|
|||
resources:
|
||||
- id: ssh_transport_node_#{name}#
|
||||
from: resources/transport_ssh
|
||||
input:
|
||||
user: #{ssh_user}#
|
||||
key: #{ssh_key}#
|
||||
password: #{ssh_password}#
|
||||
- id: rsync_node_#{name}#
|
||||
from: resources/transport_rsync
|
||||
input:
|
||||
user: #{ssh_user}#
|
||||
key: #{ssh_key}#
|
||||
password: #{ssh_password}#
|
||||
- id: transports_node_#{name}#
|
||||
from: resources/transports
|
||||
input:
|
||||
transports:
|
||||
- key: ssh_transport_node_#{name}#::key
|
||||
user: ssh_transport_node_#{name}#::user
|
||||
port: ssh_transport_node_#{name}#::port
|
||||
name: ssh_transport_node_#{name}#::name
|
||||
password: ssh_transport_node_#{name}#::password
|
||||
- key: rsync_node_#{name}#::key
|
||||
name: rsync_node_#{name}#::name
|
||||
user: rsync_node_#{name}#::user
|
||||
port: rsync_node_#{name}#::port
|
||||
password: rsync_node_#{name}#::password
|
||||
- id: '#{name}#'
|
||||
from: k8s/os_node
|
||||
input:
|
||||
name: '#{name}#'
|
||||
ip: '#{ip}#'
|
||||
transports_id: transports_node_#{name}#::transports_id
|
||||
- id: hosts_file_node_#{name}#
|
||||
from: resources/hosts_file
|
||||
location: '#{name}#'
|
||||
tags: ['location=#{name}#']
|
|
@ -1,20 +0,0 @@
|
|||
set -xe
|
||||
|
||||
os=`lsb_release -si`
|
||||
|
||||
if [[ ! "$os" == "Ubuntu" ]]; then
|
||||
sudo dnf install -y python python-dnf ansible libselinux-python
|
||||
sudo /usr/sbin/setenforce 0 || echo 'ok'
|
||||
sudo systemctl stop firewalld.service || echo 'ok'
|
||||
sudo hostnamectl set-hostname --static "{{name}}"
|
||||
else
|
||||
if [[ "$os" == "Ubuntu" ]]; then
|
||||
while fuser /var/lib/dpkg/lock >/dev/null 2>&1; do
|
||||
echo "waiting for dpkg lock"
|
||||
sleep 1
|
||||
done
|
||||
sudo apt-get install -y python ansible
|
||||
sudo hostname {{name}}
|
||||
sudo bash -c "echo {{name}} > /etc/hostname"
|
||||
fi
|
||||
fi
|
|
@ -1,15 +0,0 @@
|
|||
handler: shell
|
||||
version: 1.0.0
|
||||
input:
|
||||
ip:
|
||||
schema: str!
|
||||
value:
|
||||
name:
|
||||
schema: str
|
||||
value: a node
|
||||
location_id:
|
||||
schema: str!
|
||||
value: $uuid
|
||||
reverse: True
|
||||
|
||||
tags: [resources=node]
|
|
@ -1,19 +0,0 @@
|
|||
- hosts: [{{host}}]
|
||||
become: yes
|
||||
tasks:
|
||||
- package: name=bridge-utils state=present
|
||||
- template:
|
||||
src: {{templates_dir}}/ifcfg
|
||||
dest: /etc/sysconfig/network-scripts/ifcfg-{{name}}
|
||||
when: ansible_distribution == "Fedora"
|
||||
- service: name=network.service state=restarted
|
||||
when: ansible_distribution == "Fedora"
|
||||
- template:
|
||||
src: {{templates_dir}}/ifcfg
|
||||
dest: /etc/network/interfaces.d/{{name}}.cfg
|
||||
when: ansible_distribution == "Ubuntu"
|
||||
- shell: brctl addbr {{name}}
|
||||
when: ansible_distribution == "Ubuntu"
|
||||
ignore_errors: True
|
||||
- service: name=networking.service state=restarted
|
||||
when: ansible_distribution == "Ubuntu"
|
|
@ -1,24 +0,0 @@
|
|||
- hosts: [{{host}}]
|
||||
become: yes
|
||||
tasks:
|
||||
- shell: ip addr show | grep {{ipaddr}} | awk '{print $NF}'
|
||||
register: enabled_iface
|
||||
ignore_errors: True
|
||||
- shell: ip li de {{'{{'}}enabled_iface.stdout{{'}}'}}
|
||||
when: enabled_iface.stdout != "{{name}}"
|
||||
- shell: rm -f /etc/sysconfig/network-scripts/ifcfg-{{'{{'}}enabled_iface.stdout{{'}}'}}
|
||||
when: ansible_distribution == "Fedora"
|
||||
- template:
|
||||
src: {{templates_dir}}/ifcfg
|
||||
dest: /etc/sysconfig/network-scripts/ifcfg-{{name}}
|
||||
when: ansible_distribution == "Fedora"
|
||||
- service: name=network.service state=restarted
|
||||
when: ansible_distribution == "Fedora"
|
||||
- shell: rm -f /etc/network/interfaces.d/{{'{{'}}enabled_iface.stdout{{'}}'}}.cfg
|
||||
when: ansible_distribution == "Ubuntu"
|
||||
- template:
|
||||
src: {{templates_dir}}/ifcfg
|
||||
dest: /etc/network/interfaces.d/{{name}}.cfg
|
||||
when: ansible_distribution == "Ubuntu"
|
||||
- service: name=networking.service state=restarted
|
||||
when: ansible_distribution == "Ubuntu"
|
|
@ -1,24 +0,0 @@
|
|||
handler: ansible
|
||||
version: 1.0.0
|
||||
input:
|
||||
name:
|
||||
schema: str!
|
||||
value: cbr0
|
||||
netmask:
|
||||
schema: str!
|
||||
value: null
|
||||
prefix:
|
||||
schema: int!
|
||||
value: null
|
||||
type:
|
||||
schema: str!
|
||||
value: null
|
||||
ipaddr:
|
||||
schema: str!
|
||||
value: null
|
||||
onboot:
|
||||
schema: str!
|
||||
value: "yes"
|
||||
bootproto:
|
||||
schema: str!
|
||||
value: static
|
|
@ -1,19 +0,0 @@
|
|||
{%if ansible_distribution == "Fedora" %}
|
||||
DEVICE={{name}}
|
||||
TYPE={{type}}
|
||||
IPADDR={{ipaddr}}
|
||||
NETMASK={{netmask}}
|
||||
ONBOOT={{onboot}}
|
||||
BOOTPROTO={{bootproto}}
|
||||
{% endif %}
|
||||
{%if ansible_distribution == "Ubuntu" %}
|
||||
auto {{name}}
|
||||
iface {{name}} inet static
|
||||
address {{ipaddr}}
|
||||
netmask {{netmask}}
|
||||
{% if type == "Bridge" %}
|
||||
bridge_stp off
|
||||
bridge_fd 0
|
||||
bridge_maxwait 0
|
||||
{% endif %}
|
||||
{% endif %}
|
|
@ -1,4 +1,5 @@
|
|||
bashate>=0.2 # Apache-2.0
|
||||
doc8
|
||||
oslosphinx>=2.5.0,!=3.4.0 # Apache-2.0
|
||||
sphinx>=1.2.1,!=1.3b1,<1.3 # BSD
|
||||
|
||||
|
|
22
tox.ini
22
tox.ini
|
@ -8,6 +8,28 @@ deps =
|
|||
-r{toxinidir}/requirements.txt
|
||||
-r{toxinidir}/test-requirements.txt
|
||||
|
||||
[testenv:doc8]
|
||||
commands = doc8 doc
|
||||
|
||||
[testenv:docs]
|
||||
whitelist_externals = /bin/rm
|
||||
commands =
|
||||
/bin/rm -rf doc/build
|
||||
python setup.py build_sphinx
|
||||
|
||||
[doc8]
|
||||
# Settings for doc8:
|
||||
# Ignore target directories
|
||||
ignore-path = doc/build*
|
||||
# File extensions to use
|
||||
extensions = .rst,.txt
|
||||
# Maximal line length should be 79 but we have some overlong lines.
|
||||
# Let's not get far more in.
|
||||
max-line-length = 80
|
||||
# Disable some doc8 checks:
|
||||
# D000: Check RST validity (cannot handle lineos directive)
|
||||
ignore = D000
|
||||
|
||||
[testenv:bashate]
|
||||
whitelist_externals = bash
|
||||
commands = bash -c "find {toxinidir} -type f -name '*.sh' -not -path '*/.tox/*' -print0 | xargs -0 bashate -v"
|
||||
|
|
|
@ -9,7 +9,7 @@ django-admin.py syncdb
|
|||
django-admin.py migrate
|
||||
```
|
||||
|
||||
To test examples run one of available test scripts. You need to run it from solar main dir, for example:
|
||||
To test examples run one of available test scripts. You need to run it from main dir, for example:
|
||||
|
||||
```
|
||||
./utils/jenkins/run_hosts_example.sh
|
||||
|
|
|
@ -98,5 +98,5 @@ groups:
|
|||
address_pool: management-pool01
|
||||
|
||||
nodes:
|
||||
- name: solar
|
||||
- name: fuel-ccp
|
||||
role: master
|
||||
|
|
|
@ -80,8 +80,8 @@ template:
|
|||
phys_dev: !os_env VLAN_BRIDGE
|
||||
|
||||
nodes:
|
||||
- name: solar # Custom name of VM for Fuel admin node
|
||||
role: solar # Fixed role for Fuel master node properties
|
||||
- name: fuel-ccp # Custom name of VM for Fuel admin node
|
||||
role: fuel-ccp # Fixed role for Fuel master node properties
|
||||
params:
|
||||
vcpu: !os_env ADMIN_NODE_CPU, 2
|
||||
memory: !os_env ADMIN_NODE_MEMORY, 3072
|
||||
|
|
|
@ -68,8 +68,8 @@ template:
|
|||
dhcp: false
|
||||
|
||||
nodes:
|
||||
- name: solar # Custom name of VM for Fuel admin node
|
||||
role: solar # Fixed role for Fuel master node properties
|
||||
- name: fuel-ccp # Custom name of VM for Fuel admin node
|
||||
role: fuel-ccp # Fixed role for Fuel master node properties
|
||||
params:
|
||||
vcpu: !os_env ADMIN_NODE_CPU, 2
|
||||
memory: !os_env ADMIN_NODE_MEMORY, 3072
|
||||
|
|
|
@ -48,7 +48,7 @@ def get_env():
|
|||
|
||||
|
||||
def get_master_ip(env):
|
||||
admin = env.get_node(name='solar')
|
||||
admin = env.get_node(name='fuel-ccp')
|
||||
return admin.get_ip_address_by_network_name('public')
|
||||
|
||||
|
||||
|
|
|
@ -1,163 +0,0 @@
|
|||
#!/bin/bash
|
||||
set -xe
|
||||
|
||||
# for now we assume that master ip is 10.0.0.2 and slaves ips are 10.0.0.{3,4,5,...}
|
||||
ADMIN_PASSWORD=vagrant
|
||||
ADMIN_USER=vagrant
|
||||
INSTALL_DIR=/home/vagrant/solar-k8s
|
||||
|
||||
ENV_NAME=${ENV_NAME:-solar-example}
|
||||
SLAVES_COUNT=${SLAVES_COUNT:-0}
|
||||
if [ "$VLAN_BRIDGE" ]; then
|
||||
CONF_PATH=${CONF_PATH:-utils/jenkins/default30-bridge.yaml}
|
||||
else
|
||||
CONF_PATH=${CONF_PATH:-utils/jenkins/default30.yaml}
|
||||
fi
|
||||
|
||||
IMAGE_PATH=${IMAGE_PATH:-bootstrap/output-qemu/ubuntu1404}
|
||||
TEST_SCRIPT=${TEST_SCRIPT:-/vagrant/examples/hosts_file/hosts.py}
|
||||
DEPLOY_TIMEOUT=${DEPLOY_TIMEOUT:-60}
|
||||
|
||||
SOLAR_DB_BACKEND=${SOLAR_DB_BACKEND:-riak}
|
||||
|
||||
SSH_OPTIONS="-o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o PreferredAuthentications=password"
|
||||
|
||||
dos.py erase ${ENV_NAME} || true
|
||||
mkdir -p tmp
|
||||
|
||||
mkdir -p logs
|
||||
rm -rf logs/*
|
||||
|
||||
ENV_NAME=${ENV_NAME} SLAVES_COUNT=${SLAVES_COUNT} IMAGE_PATH=${IMAGE_PATH} CONF_PATH=${CONF_PATH} python utils/jenkins/env.py create_env
|
||||
|
||||
SLAVE_IPS=`ENV_NAME=${ENV_NAME} python utils/jenkins/env.py get_slaves_ips`
|
||||
ADMIN_IP=`ENV_NAME=${ENV_NAME} python utils/jenkins/env.py get_admin_ip`
|
||||
|
||||
# Wait for all servers(grep only IP addresses):
|
||||
for IP in `echo ${ADMIN_IP} ${SLAVE_IPS} |grep -oE '((1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])\.){3}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])'`; do
|
||||
elapsed_time=0
|
||||
master_wait_time=30
|
||||
while true; do
|
||||
report=$(sshpass -p ${ADMIN_PASSWORD} ssh ${SSH_OPTIONS} ${ADMIN_USER}@${IP} echo ok || echo not ready)
|
||||
|
||||
if [ "${report}" = "ok" ]; then
|
||||
break
|
||||
fi
|
||||
|
||||
if [ "${elapsed_time}" -gt "${master_wait_time}" ]; then
|
||||
exit 2
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
let elapsed_time+=1
|
||||
done
|
||||
done
|
||||
|
||||
sshpass -p ${ADMIN_PASSWORD} rsync -rz . -e "ssh ${SSH_OPTIONS}" ${ADMIN_USER}@${ADMIN_IP}:/home/vagrant/solar-k8s --exclude tmp --exclude x-venv --exclude .vagrant --exclude .eggs --exclude *.box --exclude images --exclude utils/packer
|
||||
|
||||
set +e
|
||||
|
||||
sshpass -p ${ADMIN_PASSWORD} ssh ${SSH_OPTIONS} ${ADMIN_USER}@${ADMIN_IP} bash -s <<EOF
|
||||
set -x
|
||||
set -e
|
||||
|
||||
export PYTHONWARNINGS="ignore"
|
||||
|
||||
wget https://github.com/openstack/solar-resources/archive/master.zip
|
||||
unzip master.zip
|
||||
|
||||
solar repo import solar-resources-master/resources
|
||||
solar repo import solar-resources-master/templates
|
||||
solar repo import -n k8s solar-k8s/resources
|
||||
|
||||
solar repo update templates ${INSTALL_DIR}/utils/jenkins/repository
|
||||
|
||||
solar resource create nodes templates/nodes ips="${SLAVE_IPS}" count="${SLAVES_COUNT}"
|
||||
|
||||
sudo pip install -r ${INSTALL_DIR}/requirements.txt
|
||||
|
||||
pushd ${INSTALL_DIR}
|
||||
bash -c "${TEST_SCRIPT}"
|
||||
popd
|
||||
|
||||
solar changes stage
|
||||
solar changes process
|
||||
solar orch run-once
|
||||
|
||||
elapsed_time=0
|
||||
while true; do
|
||||
report=\$(solar o report)
|
||||
|
||||
errors=\$(echo "\${report}" | grep -e ERROR | wc -l)
|
||||
if [ "\${errors}" != "0" ]; then
|
||||
solar orch report
|
||||
echo FAILURE
|
||||
exit 1
|
||||
fi
|
||||
|
||||
running=\$(echo "\${report}" | grep -e PENDING -e INPROGRESS | wc -l)
|
||||
if [ "\${running}" == "0" ]; then
|
||||
solar orch report
|
||||
echo SUCCESS
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "\${elapsed_time}" -gt "${DEPLOY_TIMEOUT}" ]; then
|
||||
solar orch report
|
||||
echo TIMEOUT
|
||||
exit 2
|
||||
fi
|
||||
|
||||
sleep 5
|
||||
let elapsed_time+=5
|
||||
done
|
||||
EOF
|
||||
|
||||
deploy_res=$?
|
||||
|
||||
# setup VLAN if everything is ok and env will not be deleted
|
||||
if [ "$VLAN_BRIDGE" ] && [ "${deploy_res}" -eq "0" ] && [ "${DONT_DESTROY_ON_SUCCESS}" = "1" ];then
|
||||
rm -f VLAN_IPS
|
||||
for IP in `echo ${ADMIN_IP} ${SLAVE_IPS} |grep -oE '((1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])\.){3}(1?[0-9][0-9]?|2[0-4][0-9]|25[0-5])'`; do
|
||||
bridged_iface_mac="`ENV_NAME=${ENV_NAME} python utils/jenkins/env.py get_bridged_iface_mac $IP`"
|
||||
|
||||
sshpass -p ${ADMIN_PASSWORD} ssh ${SSH_OPTIONS} ${ADMIN_USER}@${IP} bash -s <<EOF >>VLAN_IPS
|
||||
bridged_iface=\$(ifconfig -a|awk -v mac="$bridged_iface_mac" '\$0 ~ mac {print \$1}' 'RS=\n\n')
|
||||
sudo ip route del default
|
||||
sudo dhclient "\${bridged_iface}"
|
||||
echo \$(ip addr list |grep ${bridged_iface_mac} -A 1 |grep 'inet ' |cut -d' ' -f6| cut -d/ -f1)
|
||||
EOF
|
||||
|
||||
done
|
||||
set +x
|
||||
sed -i '/^\s*$/d' VLAN_IPS
|
||||
echo "**************************************"
|
||||
echo "**************************************"
|
||||
echo "**************************************"
|
||||
echo "* VLANs IP addresses"
|
||||
echo "* MASTER IP: `head -n1 VLAN_IPS`"
|
||||
echo "* SLAVES IPS: `tail -n +2 VLAN_IPS | tr '\n' ' '`"
|
||||
echo "* USERNAME: vagrant"
|
||||
echo "* PASSWORD: vagrant"
|
||||
echo "* K8s dashboard: http://`head -n1 VLAN_IPS`/api/v1/proxy/namespaces/kube-system/services/kubernetes-dashboard"
|
||||
echo "**************************************"
|
||||
echo "**************************************"
|
||||
echo "**************************************"
|
||||
set -x
|
||||
fi
|
||||
|
||||
|
||||
# collect logs
|
||||
sshpass -p ${ADMIN_PASSWORD} scp ${SSH_OPTIONS} ${ADMIN_USER}@${ADMIN_IP}:/home/vagrant/solar.log logs/
|
||||
|
||||
if [ "${deploy_res}" -eq "0" ] && [ "${DONT_DESTROY_ON_SUCCESS}" != "1" ];then
|
||||
dos.py erase ${ENV_NAME}
|
||||
else
|
||||
if [ "${deploy_res}" -ne "0" ];then
|
||||
dos.py snapshot ${ENV_NAME} ${ENV_NAME}.snapshot
|
||||
dos.py destroy ${ENV_NAME}
|
||||
echo "To revert snapshot please run: dos.py revert ${ENV_NAME} ${ENV_NAME}.snapshot"
|
||||
fi
|
||||
fi
|
||||
|
||||
exit ${deploy_res}
|
|
@ -1,182 +0,0 @@
|
|||
# -*- mode: ruby -*-
|
||||
# vi: set ft=ruby :
|
||||
# Copyright 2015 Mirantis, Inc.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
Vagrant.require_version ">= 1.7.4"
|
||||
|
||||
require 'yaml'
|
||||
|
||||
# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
|
||||
VAGRANTFILE_API_VERSION = "2"
|
||||
|
||||
# configs, custom updates _defaults
|
||||
defaults_cfg = YAML.load_file('vagrant-settings.yaml_defaults')
|
||||
if File.exist?('vagrant-settings.yaml')
|
||||
custom_cfg = YAML.load_file('vagrant-settings.yaml')
|
||||
if custom_cfg
|
||||
cfg = defaults_cfg.merge(custom_cfg)
|
||||
else
|
||||
cfg = defaults_cfg
|
||||
end
|
||||
else
|
||||
cfg = defaults_cfg
|
||||
end
|
||||
|
||||
|
||||
SLAVES_COUNT = cfg["slaves_count"]
|
||||
SLAVES_RAM = cfg["slaves_ram"]
|
||||
SLAVES_IPS = cfg["slaves_ips"]
|
||||
SLAVES_IMAGE = cfg["slaves_image"]
|
||||
SLAVES_IMAGE_VERSION = cfg["slaves_image_version"]
|
||||
MASTER_RAM = cfg["master_ram"]
|
||||
MASTER_IPS = cfg["master_ips"]
|
||||
MASTER_IMAGE = cfg["master_image"]
|
||||
MASTER_IMAGE_VERSION = cfg["master_image_version"]
|
||||
SYNC_TYPE = cfg.fetch("sync_type", "rsync")
|
||||
MASTER_CPUS = cfg["master_cpus"]
|
||||
SLAVES_CPUS = cfg["slaves_cpus"]
|
||||
PARAVIRT_PROVIDER = cfg.fetch('paravirtprovider', false)
|
||||
PREPROVISIONED = cfg.fetch('preprovisioned', true)
|
||||
|
||||
def shell_script(filename, env=[], args=[])
|
||||
"/bin/bash -c \"#{env.join ' '} #{filename} #{args.join ' '} \""
|
||||
end
|
||||
|
||||
|
||||
Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
|
||||
|
||||
if not Vagrant.has_plugin?("vagrant-alpine")
|
||||
system "vagrant plugin install vagrant-alpine"
|
||||
exec "vagrant #{ARGV.join' '}"
|
||||
end
|
||||
|
||||
config.vm.define "solar", primary: true do |config|
|
||||
config.vm.box = MASTER_IMAGE
|
||||
config.vm.box_version = MASTER_IMAGE_VERSION
|
||||
|
||||
config.vm.provision "file", source: "~/.vagrant.d/insecure_private_key", destination: "/vagrant/tmp/keys/ssh_private"
|
||||
config.vm.provision "shell", inline: "sudo apk add --no-cache rsync"
|
||||
|
||||
config.vm.provider :virtualbox do |v|
|
||||
v.memory = MASTER_RAM
|
||||
v.cpus = MASTER_CPUS
|
||||
v.customize [
|
||||
"modifyvm", :id,
|
||||
"--memory", MASTER_RAM,
|
||||
"--cpus", MASTER_CPUS,
|
||||
"--ioapic", "on",
|
||||
]
|
||||
if PARAVIRT_PROVIDER
|
||||
v.customize ['modifyvm', :id, "--paravirtprovider", PARAVIRT_PROVIDER] # for linux guest
|
||||
end
|
||||
v.name = "solar"
|
||||
end
|
||||
|
||||
config.vm.provider :libvirt do |libvirt|
|
||||
libvirt.driver = 'kvm'
|
||||
libvirt.memory = MASTER_RAM
|
||||
libvirt.cpus = MASTER_CPUS
|
||||
libvirt.nested = true
|
||||
libvirt.cpu_mode = 'host-passthrough'
|
||||
libvirt.volume_cache = 'unsafe'
|
||||
libvirt.disk_bus = "virtio"
|
||||
end
|
||||
|
||||
if Vagrant.has_plugin?("vagrant-vbguest")
|
||||
config.vbguest.auto_update = false
|
||||
end
|
||||
|
||||
if SYNC_TYPE == 'nfs'
|
||||
config.vm.synced_folder "../../", "/vagrant", type: "nfs"
|
||||
end
|
||||
if SYNC_TYPE == 'rsync'
|
||||
config.vm.synced_folder "../../", "/vagrant", type: "rsync",
|
||||
rsync__args: ["--verbose", "--archive", "--delete", "-z"]
|
||||
end
|
||||
|
||||
ind = 0
|
||||
MASTER_IPS.each do |ip|
|
||||
config.vm.network :private_network, ip: "#{ip}", :dev => "solbr#{ind}", :mode => 'nat'
|
||||
ind = ind + 1
|
||||
end
|
||||
end
|
||||
|
||||
SLAVES_COUNT.times do |i|
|
||||
index = i + 1
|
||||
ip_index = i + 3
|
||||
config.vm.define "node-#{index}" do |config|
|
||||
|
||||
# Standard box with all stuff preinstalled
|
||||
config.vm.box = SLAVES_IMAGE
|
||||
config.vm.box_version = SLAVES_IMAGE_VERSION
|
||||
config.vm.host_name = "node-#{index}"
|
||||
|
||||
if Vagrant.has_plugin?("vagrant-vbguest")
|
||||
config.vbguest.no_install = true
|
||||
end
|
||||
|
||||
ind = 0
|
||||
SLAVES_IPS.each do |ip|
|
||||
config.vm.network :private_network, ip: "#{ip}#{ip_index}", :dev => "solbr#{ind}", :mode => 'nat'
|
||||
ind = ind + 1
|
||||
end
|
||||
|
||||
config.vm.provider :virtualbox do |v|
|
||||
boot_order(v, ['net', 'disk'])
|
||||
v.customize [
|
||||
"modifyvm", :id,
|
||||
"--memory", SLAVES_RAM,
|
||||
"--cpus", SLAVES_CPUS,
|
||||
"--ioapic", "on",
|
||||
"--macaddress1", "auto",
|
||||
]
|
||||
if PARAVIRT_PROVIDER
|
||||
v.customize ['modifyvm', :id, "--paravirtprovider", PARAVIRT_PROVIDER] # for linux guest
|
||||
end
|
||||
v.name = "node-#{index}"
|
||||
end
|
||||
|
||||
config.vm.provider :libvirt do |libvirt|
|
||||
libvirt.driver = 'kvm'
|
||||
libvirt.memory = SLAVES_RAM
|
||||
libvirt.cpus = SLAVES_CPUS
|
||||
libvirt.nested = true
|
||||
libvirt.cpu_mode = 'host-passthrough'
|
||||
libvirt.volume_cache = 'unsafe'
|
||||
libvirt.disk_bus = "virtio"
|
||||
end
|
||||
|
||||
config.vm.synced_folder ".", "/vagrant", disabled: true
|
||||
# if SYNC_TYPE == 'nfs'
|
||||
# config.vm.synced_folder ".", "/vagrant", type: "nfs"
|
||||
# end
|
||||
# if SYNC_TYPE == 'rsync'
|
||||
# config.vm.synced_folder ".", "/vagrant", type: "rsync",
|
||||
# rsync__args: ["--verbose", "--archive", "--delete", "-z"]
|
||||
# end
|
||||
end
|
||||
end
|
||||
|
||||
end
|
||||
|
||||
|
||||
def boot_order(virt_config, order)
|
||||
# Boot order is specified with special flag:
|
||||
# --boot<1-4> none|floppy|dvd|disk|net
|
||||
4.times do |idx|
|
||||
device = order[idx] || 'none'
|
||||
virt_config.customize ['modifyvm', :id, "--boot#{idx + 1}", device]
|
||||
end
|
||||
end
|
|
@ -1,32 +0,0 @@
|
|||
# copy it to vagrant-settings.yaml then Vagrantfile
|
||||
# will use values from this file
|
||||
|
||||
master_image: solar-project/solar-minimal
|
||||
master_image_version: ">= 0.3.0.pre3"
|
||||
# 256 is enough, can be trimed down when needed
|
||||
master_ram: 350
|
||||
master_cpus: 1
|
||||
slaves_ram: 1024
|
||||
slaves_count: 2
|
||||
# you can select between
|
||||
# yk0/ubuntu-xenial OR fc23
|
||||
slaves_image: yk0/ubuntu-xenial
|
||||
slaves_image_version: null
|
||||
master_ips:
|
||||
- 10.0.0.2
|
||||
- 10.1.0.2
|
||||
- 10.2.0.2
|
||||
slaves_cpus: 1
|
||||
slaves_ips:
|
||||
- 10.0.0.
|
||||
- 10.1.0.
|
||||
- 10.2.0.
|
||||
|
||||
# if you have virtualbox 5.x then enable it
|
||||
# if will speedup things a lot
|
||||
# paravirtprovider: kvm
|
||||
|
||||
# By default Virtualbox shared folder is used which is very slow
|
||||
# Uncomment following option to change it.
|
||||
# Possible options are: rsync, nfs
|
||||
# sync_type: nfs
|
Loading…
Reference in New Issue