Sample script for deploying k8s with kubespray

This patch provides a sample script that allows users to install
or delete kubernetes on VMs in the VNF when executing
instantiate/terminate/scale/heal through the kubespray ansible
server.
It also provides script to install and configure Load Balancer
for kubernetes cluster in the VNF.

When instantiating CNF with service resource whose type is
`NodePort` on Kubernetes VIM deployed by kubespray, its port must
be added into Load Balancer's configuration and restart it. So
this patch also provides a sample MgmtDriver and shell script
to fix this problem.

At the same time, we found that if instantiate operation fails,
after the `instantiate_end` operation, the `terminate_end` operation
will not be executed in the rollback, which may cause the
modification in `instantiate_end` remains in the environment, so
this patch adds a `terminate_end` operation in `post_rollback_vnf`.

Implements: blueprint k8s-mgmtdriver-kubespray
Change-Id: I45661b5d8006e87db5f46a595756231849723ce6
This commit is contained in:
Yi Feng 2021-07-20 16:14:48 +09:00
parent 89c3afc72d
commit b3bf4ec2ce
24 changed files with 2505 additions and 5 deletions

View File

@ -0,0 +1,344 @@
# Copyright (C) 2021 FUJITSU
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ipaddress
import os
import time
import eventlet
from oslo_log import log as logging
import paramiko
import yaml
from tacker.common import cmd_executer
from tacker.common import exceptions
from tacker.common import log
from tacker.vnflcm import utils as vnflcm_utils
from tacker.vnfm.mgmt_drivers import vnflcm_abstract_driver
COMMAND_WAIT_RETRY_TIME = 30
CONNECT_REMOTE_SERVER_RETRY_COUNT = 4
LOG = logging.getLogger(__name__)
K8S_CMD_TIMEOUT = 60
K8S_DEPLOY_TIMEOUT = 300
SERVER_WAIT_COMPLETE_TIME = 120
class CnfNodePortMgmt(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
def get_type(self):
return 'mgmt-drivers-cnf-nodeport'
def get_name(self):
return 'mgmt-drivers-cnf-nodeport'
def get_description(self):
return 'Tacker CNFMgmt NodePort Setting Driver'
@log.log
def instantiate_start(self, context, vnf_instance,
instantiate_vnf_request, grant,
grant_request, **kwargs):
pass
def _check_is_cidr(self, key, value, cidr_str):
# instantiate: check cidr
try:
ipaddress.ip_network(cidr_str)
except ValueError:
LOG.error('The {value} of {key} in the '
'additionalParams is invalid.'.format(
value=value, key=key))
raise exceptions.MgmtDriverParamInvalid(param=value)
def _check_input_parameters(self, additional_param, vnf_package_path):
if not additional_param:
LOG.error("The 'lcm-kubernetes-external-lb' cannot be None "
"in additionalParams.")
raise exceptions.MgmtDriverOtherError(
error_message="The 'lcm-kubernetes-external-lb' cannot"
" be None in additionalParams.")
if not isinstance(additional_param, dict):
LOG.error("The format of 'lcm-kubernetes-external-lb' in "
"additionalParams is invalid. It must be dict.")
raise exceptions.MgmtDriverOtherError(
error_message="The format of 'lcm-kubernetes-external-lb' in "
"additionalParams is invalid. It must be dict.")
for key, value in additional_param.items():
if key == 'external_lb_param':
for attr in ['ssh_username', 'ssh_password', 'ssh_ip']:
if not value.get(attr):
LOG.error(
'The {} of {} in the '
'additionalParams cannot'
' be None.'.format(attr, key))
raise exceptions.MgmtDriverNotFound(
param=attr)
if value.get('ssh_ip'):
self._check_is_cidr(
key, 'ssh_ip', value.get('ssh_ip'))
if not additional_param.get('script_path'):
LOG.error('The script_path of {} in the '
'additionalParams cannot be None.'.format(key))
raise exceptions.MgmtDriverNotFound(
param='script_path')
abs_script_path = os.path.join(
vnf_package_path, additional_param.get('script_path'))
if not os.path.exists(abs_script_path):
LOG.error('The path of external_lb_param'
' script is invalid.')
raise exceptions.MgmtDriverOtherError(
error_message="The path of external_lb_param"
" script is invalid")
def _init_commander_and_set_script(self, user, password, host,
timeout, vnf_package_path=None,
script_path=None):
retry = CONNECT_REMOTE_SERVER_RETRY_COUNT
while retry > 0:
try:
if (vnf_package_path and script_path):
connect = paramiko.Transport(host, 22)
connect.connect(username=user, password=password)
sftp = paramiko.SFTPClient.from_transport(connect)
sftp.put(os.path.join(vnf_package_path, script_path),
"/tmp/{}".format(
script_path.replace('Scripts', '')))
connect.close()
commander = cmd_executer.RemoteCommandExecutor(
user=user, password=password, host=host,
timeout=timeout)
return commander
except (exceptions.NotAuthorized, paramiko.SSHException,
paramiko.ssh_exception.NoValidConnectionsError) as e:
LOG.debug(e)
retry -= 1
if retry == 0:
LOG.error(e)
raise paramiko.SSHException()
time.sleep(SERVER_WAIT_COMPLETE_TIME)
def _execute_command(self, commander, ssh_command, timeout, type, retry):
eventlet.monkey_patch()
while retry >= 0:
try:
with eventlet.Timeout(timeout, True):
result = commander.execute_command(
ssh_command, input_data=None)
break
except eventlet.timeout.Timeout:
LOG.debug('It is time out, When execute command: '
'{}.'.format(ssh_command))
retry -= 1
if retry < 0:
LOG.error('It is time out, When execute command: '
'{}.'.format(ssh_command))
raise exceptions.MgmtDriverOtherError(
error_message='It is time out, When execute command: '
'{}.'.format(ssh_command))
time.sleep(COMMAND_WAIT_RETRY_TIME)
if type == 'common':
if result.get_return_code() != 0:
err = result.get_stderr()
LOG.error(err)
raise exceptions.MgmtDriverRemoteCommandError(err_info=err)
return result.get_stdout()
def _get_nodeport_from_kubernetes(self, no_port_info_list, lb_commander,
resource_info_list):
for no_port_info in no_port_info_list:
ssh_command = "kubectl describe svc '%(svc_name)s' -n" \
" '%(namespace)s' | grep NodePort: | awk" \
" '{print $3}' | awk -F '/' '{print $1}'" \
% {'svc_name': no_port_info.get('name'),
'namespace': no_port_info.get('namespace')}
results = self._execute_command(
lb_commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
node_ports = ','.join([result.replace(
'\n', '') for result in results])
no_port_info['node_ports'] = node_ports
resource_info_list.append(no_port_info)
def _get_script_input_parameter(self, vnf_package_path, additional_param,
operation_type):
script_path = additional_param.get(
'lcm-kubernetes-external-lb', {}).get('script_path')
ssh_ip = additional_param.get(
'lcm-kubernetes-external-lb', {}).get('external_lb_param').get(
'ssh_ip')
ssh_username = additional_param.get(
'lcm-kubernetes-external-lb', {}).get('external_lb_param').get(
'ssh_username')
ssh_password = additional_param.get(
'lcm-kubernetes-external-lb', {}).get('external_lb_param').get(
'ssh_password')
artifact_files = additional_param.get(
'lcm-kubernetes-def-files', {})
resource_info_list = []
no_port_info_list = []
for artifact_file in artifact_files:
artiface_file_path = os.path.join(
vnf_package_path, artifact_file)
with open(artiface_file_path, 'r', encoding='utf-8') as f:
yaml_content_all = yaml.safe_load_all(f.read())
for yaml_content in yaml_content_all:
if yaml_content.get('kind') == 'Service' and \
yaml_content.get('spec').get('type') == 'NodePort':
if operation_type == 'INSTANTIATE':
ports = yaml_content.get('spec').get('ports')
node_ports = [port.get(
'nodePort') for port in ports if port.get(
'nodePort')]
if len(node_ports) == len(ports):
node_ports_str = ','.join([str(
port) for port in node_ports])
resource_info = {
"namespace": yaml_content.get('metadata').get(
'namespace', 'default'),
"name": yaml_content.get('metadata').get(
'name'),
"node_ports": node_ports_str
}
resource_info_list.append(resource_info)
else:
no_port_info = {
"namespace": yaml_content.get('metadata').get(
'namespace', 'default'),
"name": yaml_content.get('metadata').get(
'name'),
}
no_port_info_list.append(no_port_info)
else:
resource_info = {
"namespace": yaml_content.get('metadata').get(
'namespace', 'default'),
"name": yaml_content.get('metadata').get('name')
}
resource_info_list.append(resource_info)
lb_commander = self._init_commander_and_set_script(
ssh_username, ssh_password, ssh_ip, K8S_CMD_TIMEOUT,
vnf_package_path=vnf_package_path,
script_path=script_path)
if operation_type == 'INSTANTIATE':
# get nodeport info from kubernetes
self._get_nodeport_from_kubernetes(
no_port_info_list, lb_commander, resource_info_list)
resource_info_str_list = []
for resource_info in resource_info_list:
resource_info_str = ','.join(
[value for key, value in resource_info.items()])
resource_info_str_list.append(resource_info_str)
all_resource_info_str = '#'.join(resource_info_str_list)
return lb_commander, all_resource_info_str
@log.log
def instantiate_end(self, context, vnf_instance,
instantiate_vnf_request, grant,
grant_request, **kwargs):
additional_param = instantiate_vnf_request.additional_params
vnf_package_path = vnflcm_utils._get_vnf_package_path(
context, vnf_instance.vnfd_id)
self._check_input_parameters(additional_param.get(
'lcm-kubernetes-external-lb', {}), vnf_package_path)
lb_commander, all_resource_info_str = \
self._get_script_input_parameter(
vnf_package_path, additional_param, 'INSTANTIATE')
ssh_command = 'bash /tmp/configure_lb.sh -i {} -a True'.format(
all_resource_info_str)
self._execute_command(
lb_commander, ssh_command, K8S_DEPLOY_TIMEOUT, 'common', 0)
lb_commander.close_session()
@log.log
def terminate_start(self, context, vnf_instance,
terminate_vnf_request, grant,
grant_request, **kwargs):
pass
@log.log
def terminate_end(self, context, vnf_instance,
terminate_vnf_request, grant,
grant_request, **kwargs):
vnf_package_path = vnflcm_utils._get_vnf_package_path(
context, vnf_instance.vnfd_id)
add_param = {}
if hasattr(terminate_vnf_request, 'additional_params'):
if terminate_vnf_request.additional_params:
additional_params = terminate_vnf_request.additional_params
add_param['lcm-kubernetes-external-lb'] = {
'script_path': additional_params.get('script_path'),
'external_lb_param': {
'ssh_ip': additional_params.get('ssh_ip'),
'ssh_username': additional_params.get('ssh_username'),
'ssh_password': additional_params.get('ssh_password'),
}
}
add_param['lcm-kubernetes-def-files'] = \
vnf_instance.instantiated_vnf_info.additional_params.get(
'lcm-kubernetes-def-files')
else:
add_param = \
vnf_instance.instantiated_vnf_info.additional_params
else:
add_param = \
vnf_instance.instantiated_vnf_info.additional_params
lb_commander, all_resource_info_str = \
self._get_script_input_parameter(
vnf_package_path, add_param, 'TERMINATE')
ssh_command = 'bash /tmp/configure_lb.sh -i {} -a False'.format(
all_resource_info_str)
self._execute_command(
lb_commander, ssh_command, K8S_DEPLOY_TIMEOUT, 'common', 0)
lb_commander.close_session()
@log.log
def scale_start(self, context, vnf_instance,
scale_vnf_request, grant,
grant_request, **kwargs):
pass
@log.log
def scale_end(self, context, vnf_instance,
scale_vnf_request, grant,
grant_request, **kwargs):
pass
@log.log
def heal_start(self, context, vnf_instance,
heal_vnf_request, grant,
grant_request, **kwargs):
pass
@log.log
def heal_end(self, context, vnf_instance,
heal_vnf_request, grant,
grant_request, **kwargs):
pass
@log.log
def change_external_connectivity_start(
self, context, vnf_instance,
change_ext_conn_request, grant,
grant_request, **kwargs):
pass
@log.log
def change_external_connectivity_end(
self, context, vnf_instance,
change_ext_conn_request, grant,
grant_request, **kwargs):
pass

View File

@ -0,0 +1,124 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: Sample VNF
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
- helloworld3_types.yaml
topology_template:
inputs:
descriptor_id:
type: string
descriptor_version:
type: string
provider:
type: string
product_name:
type: string
software_version:
type: string
vnfm_info:
type: list
entry_schema:
type: string
flavour_id:
type: string
flavour_description:
type: string
substitution_mappings:
node_type: company.provider.VNF
properties:
flavour_id: simple
requirements:
virtual_link_external: []
node_templates:
VNF:
type: company.provider.VNF
properties:
flavour_description: A simple flavour
interfaces:
Vnflcm:
instantiate_end:
implementation: mgmt-drivers-cnf-nodeport
terminate_end:
implementation: mgmt-drivers-cnf-nodeport
scale_start:
implementation: mgmt-drivers-cnf-nodeport
scale_end:
implementation: mgmt-drivers-cnf-nodeport
heal_start:
implementation: mgmt-drivers-cnf-nodeport
heal_end:
implementation: mgmt-drivers-cnf-nodeport
artifacts:
mgmt-drivers-cnf-nodeport:
description: Management driver for cnf nodeport setting
type: tosca.artifacts.Implementation.Python
file: Scripts/cnf_nodeport_mgmt.py
VDU1:
type: tosca.nodes.nfv.Vdu.Compute
properties:
name: vdu1-simple
description: kubernetes controller resource as VDU
vdu_profile:
min_number_of_instances: 1
max_number_of_instances: 2
policies:
- scaling_aspects:
type: tosca.policies.nfv.ScalingAspects
properties:
aspects:
vdu1_aspect:
name: vdu1_aspect
description: vdu1 scaling aspect
max_scale_level: 1
step_deltas:
- delta_1
- VDU1_initial_delta:
type: tosca.policies.nfv.VduInitialDelta
properties:
initial_delta:
number_of_instances: 1
targets: [ VDU1 ]
- VDU1_scaling_aspect_deltas:
type: tosca.policies.nfv.VduScalingAspectDeltas
properties:
aspect: vdu1_aspect
deltas:
delta_1:
number_of_instances: 1
targets: [ VDU1 ]
- instantiation_levels:
type: tosca.policies.nfv.InstantiationLevels
properties:
levels:
instantiation_level_1:
description: Smallest size
scale_info:
vdu1_aspect:
scale_level: 0
instantiation_level_2:
description: Largest size
scale_info:
vdu1_aspect:
scale_level: 1
default_level: instantiation_level_1
- vdu1_instantiation_levels:
type: tosca.policies.nfv.VduInstantiationLevels
properties:
levels:
instantiation_level_1:
number_of_instances: 1
instantiation_level_2:
number_of_instances: 2
targets: [ VDU1 ]

View File

@ -0,0 +1,31 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: Sample VNF
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
- helloworld3_types.yaml
- helloworld3_df_simple.yaml
topology_template:
inputs:
selected_flavour:
type: string
description: VNF deployment flavour selected by the consumer. It is provided in the API
node_templates:
VNF:
type: company.provider.VNF
properties:
flavour_id: { get_input: selected_flavour }
descriptor_id: b1bb0ce7-ebca-4fa7-95ed-4840d70a1177
provider: Company
product_name: Sample VNF
software_version: '1.0'
descriptor_version: '1.0'
vnfm_info:
- Tacker
requirements:
#- virtual_link_external # mapped in lower-level templates
#- virtual_link_internal # mapped in lower-level templates

View File

@ -0,0 +1,53 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: VNF type definition
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
node_types:
company.provider.VNF:
derived_from: tosca.nodes.nfv.VNF
properties:
descriptor_id:
type: string
constraints: [ valid_values: [ b1bb0ce7-ebca-4fa7-95ed-4840d70a1177 ] ]
default: b1bb0ce7-ebca-4fa7-95ed-4840d70a1177
descriptor_version:
type: string
constraints: [ valid_values: [ '1.0' ] ]
default: '1.0'
provider:
type: string
constraints: [ valid_values: [ 'Company' ] ]
default: 'Company'
product_name:
type: string
constraints: [ valid_values: [ 'Sample VNF' ] ]
default: 'Sample VNF'
software_version:
type: string
constraints: [ valid_values: [ '1.0' ] ]
default: '1.0'
vnfm_info:
type: list
entry_schema:
type: string
constraints: [ valid_values: [ Tacker ] ]
default: [ Tacker ]
flavour_id:
type: string
constraints: [ valid_values: [ simple ] ]
default: simple
flavour_description:
type: string
default: ""
requirements:
- virtual_link_external:
capability: tosca.capabilities.nfv.VirtualLinkable
- virtual_link_internal:
capability: tosca.capabilities.nfv.VirtualLinkable
interfaces:
Vnflcm:
type: tosca.interfaces.nfv.Vnflcm

View File

@ -0,0 +1,28 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: vdu1-simple
namespace: default
labels:
app: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.7.9
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
- name: kuryr-demo
image: celebdor/kuryr-demo
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: nginx-service
namespace: default
spec:
type: NodePort
selector:
app: nginx
ports:
- port: 80
nodePort: 30422
name: http
- port: 8080
nodePort: 32019
name: tcp

View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: Service
metadata:
name: nginx-service
namespace: kube-system
spec:
type: NodePort
selector:
app: nginx
ports:
- port: 80

View File

@ -0,0 +1,29 @@
TOSCA-Meta-File-Version: 1.0
Created-by: dummy_user
CSAR-Version: 1.1
Entry-Definitions: Definitions/helloworld3_top.vnfd.yaml
Name: Files/kubernetes/deployment.yaml
Content-Type: application/yaml
Algorithm: SHA-256
Hash: 3e87eebb432c3ff59f54d6dddd12a7adff3b8d7b1e272d5b5de3df5ae9aec4d4
Name: Files/kubernetes/service_with_nodeport.yaml
Content-Type: application/yaml
Algorithm: SHA-256
Hash: 2d76d6ef7500f90bff63e519c9740516dbe258134f8cbeada7bc29d43bd515b6
Name: Files/kubernetes/service_without_nodeport.yaml
Content-Type: application/yaml
Algorithm: SHA-256
Hash: 9994a5a5f630c41d178ac58fff93140d3509da5f01518a7bd0e16db70a545c5e
Name: Scripts/configure_lb.sh
Content-Type: application/x-shellscript
Algorithm: SHA-256
Hash: 45a01e214c06a66dc6b7a018650e292a8cc216e7db4cb638712852a843679d0d
Name: Scripts/cnf_nodeport_mgmt.py
Content-Type: text/x-python
Algorithm: SHA-256
Hash: 927d34e813f635f069ed352914aa7260e26c6b011560962aee5eb49e9faed927

View File

@ -0,0 +1,128 @@
#!/bin/bash
set -o xtrace
###############################################################################
#
# This script will set nodePort into external LoadBalancer.
# It's confirmed operation on Ubuntu of below.
#
# * OS type : Ubuntu(64 bit)
# * OS version : 20.04 LTS
# * OS architecture : amd64 (x86_64)
# * Disk/Ram size : 40GB/2GB
# * Pre setup user : ubuntu
#
###############################################################################
#==============================================================================
# Usage Definition
#==============================================================================
function usage {
sudo cat <<_EOT_
$(basename ${0}) is script to install external loadbalancer.
Usage:
$(basename ${0}) [-d] [-o] [-i <nodePort info>]
[-a <add configuration flag>]
Description:
This script is to set nodePort info into external loadbalancer's
configuration.
Options:
-i all nodePort info(use "#" to separate)
-a add/delete configuration flag
--help, -h Print this
_EOT_
exit 1
}
declare -g DEBUG_MODE="False"
declare -g OUTPUT_LOGFILE="False"
# nodePort info
declare -g NODEPORTSTR=${NODEPORTSTR:-}
declare -a -g NODEPORTS=${NODEPORTS:-}
declare -g ADD_CONFIGURE_FLAG="True"
if [ "$OPTIND" = 1 ]; then
while getopts doi:a:h OPT; do
case $OPT in
i)
NODEPORTSTR=$OPTARG # defalut,test,8080,8011#mynamespace,nginx,8012
NODEPORTS=(${NODEPORTSTR//#/ })
;;
a)
ADD_CONFIGURE_FLAG=$OPTARG
;;
h)
echo "h option. display help"
usage
;;
\?)
echo "Try to enter the h option." 1>&2
;;
esac
done
else
echo "No installed getopts-command." 1>&2
exit 1
fi
# Modify Haproxy
#----------------
function add_haproxy_conf {
for(( i=0;i<${#NODEPORTS[@]};i++)); do
split_node_port=(${NODEPORTS[i]//,/ })
cat <<EOF | sudo tee -a /etc/haproxy/haproxy.cfg >/dev/null
frontend ${split_node_port[0]}_${split_node_port[1]}
mode tcp
EOF
unset split_node_port[0]
unset split_node_port[1]
all_node_port=("${split_node_port[@]}")
for(( j=0;j<${#all_node_port[@]};j++)); do
cat <<EOF | sudo tee -a /etc/haproxy/haproxy.cfg >/dev/null
bind *:${all_node_port[j]}
EOF
done
cat <<EOF | sudo tee -a /etc/haproxy/haproxy.cfg >/dev/null
option tcplog
default_backend kubernetes-nodeport
EOF
done
}
function delete_haproxy_conf {
for(( i=0;i<${#NODEPORTS[@]};i++)); do
split_node_port=(${NODEPORTS[i]//,/ })
start_str=${split_node_port[0]}_${split_node_port[1]}
end_str='default_backend kubernetes-nodeport'
start_line_no=`grep -n "$start_str" /etc/haproxy/haproxy.cfg | \
cut -d ":" -f 1`
end_line_no=`grep -n "$end_str" /etc/haproxy/haproxy.cfg | head -1 |\
cut -d ":" -f 1`
sudo sed -i "${start_line_no},${end_line_no}d" /etc/haproxy/haproxy.cfg
done
}
function restart_haproxy {
sudo systemctl restart haproxy
sudo systemctl status haproxy | grep Active
result=$(ss -lnt |grep -E "8383")
if [[ -z $result ]]; then
echo 'restart haproxy failed!'
exit 255
fi
}
# Main
# ____
# set config file
if [[ $ADD_CONFIGURE_FLAG == "True" ]]; then
add_haproxy_conf
else
delete_haproxy_conf
fi
restart_haproxy

View File

@ -0,0 +1,278 @@
#!/bin/bash
set -o xtrace
###############################################################################
#
# This script will insall external LoadBalancer.
# It's confirmed operation on Ubuntu of below.
#
# * OS type : Ubuntu(64 bit)
# * OS version : 20.04 LTS
# * OS architecture : amd64 (x86_64)
# * Disk/Ram size : 40GB/2GB
# * Pre setup user : ubuntu
#
###############################################################################
#==============================================================================
# Usage Definition
#==============================================================================
function usage {
sudo cat <<_EOT_
$(basename ${0}) is script to install external loadbalancer.
Usage:
$(basename ${0}) [-d] [-o] [-m <master ip address>]
[-w <worker ip address>]
Description:
This script is to install external loadbalancer and set
loadbalancer's configuration.
Options:
-m all master nodes ip(use "," to separate)
-w all worker nodes ip(use "," to separate)
--help, -h Print this
_EOT_
exit 1
}
declare -g DEBUG_MODE="False"
declare -g OUTPUT_LOGFILE="False"
# master/worker ip
declare -g MASTER_IPADDRS=${MASTER_IPADDRS:-}
declare -a -g MASTER_IPS=${MASTER_IPS:-}
declare -g WORKER_IPADDRS=${WORKER_IPADDRS:-}
declare -a -g WORKER_IPS=${WORKER_IPS:-}
if [ "$OPTIND" = 1 ]; then
while getopts dom:w:h OPT; do
case $OPT in
m)
MASTER_IPADDRS=$OPTARG # 192.168.120.17,192.168.120.18
MASTER_IPS=(${MASTER_IPADDRS//,/ })
;;
w)
WORKER_IPADDRS=$OPTARG # 192.168.120.2,192.168.120.3
WORKER_IPS=(${WORKER_IPADDRS//,/ })
;;
h)
echo "h option. display help"
usage
;;
\?)
echo "Try to enter the h option." 1>&2
;;
esac
done
else
echo "No installed getopts-command." 1>&2
exit 1
fi
# Install Haproxy
#----------------
function install_haproxy {
REPOS_UPDATED=False apt_get_update
apt_get install haproxy
}
function modify_haproxy_conf {
cat <<EOF | sudo tee -a /etc/haproxy/haproxy.cfg >/dev/null
frontend kubernetes-apiserver
mode tcp
bind *:8383
option tcplog
default_backend kubernetes-apiserver
backend kubernetes-apiserver
mode tcp
balance roundrobin
EOF
for master_ip in ${MASTER_IPS[@]}; do
split_ips=(${master_ip//./ })
cat <<EOF | sudo tee -a /etc/haproxy/haproxy.cfg >/dev/null
server master${split_ips[3]} $master_ip:6443 check
EOF
done
cat <<EOF | sudo tee -a /etc/haproxy/haproxy.cfg >/dev/null
backend kubernetes-nodeport
mode tcp
balance roundrobin
EOF
for master_ip in ${MASTER_IPS[@]}; do
split_ips=(${master_ip//./ })
cat <<EOF | sudo tee -a /etc/haproxy/haproxy.cfg >/dev/null
server master${split_ips[3]} $master_ip check
EOF
done
for worker_ip in ${WORKER_IPS[@]}; do
split_ips=(${worker_ip//./ })
cat <<EOF | sudo tee -a /etc/haproxy/haproxy.cfg >/dev/null
server worker${split_ips[3]} $worker_ip check
EOF
done
}
function start_haproxy {
sudo systemctl enable haproxy
sudo systemctl start haproxy
sudo systemctl status haproxy | grep Active
result=$(ss -lnt |grep -E "8383")
if [[ -z $result ]]; then
sudo systemctl restart haproxy
fi
}
# Install Kubectl
#-------------------
function install_kubectl {
REPOS_UPDATED=False apt_get_update
sudo apt_get install -y apt-transport-https curl
result=`curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | \
sudo apt-key add -`
if [[ $result != "OK" ]]; then
exit 0
fi
echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | \
sudo tee -a /etc/apt/sources.list.d/kubernetes.list
apt_get update
apt_get install -y kubectl
mkdir -p $HOME/.kube
touch $HOME/.kube/config
sudo apt-get install mlocate
locate bash_completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
}
# Set common functions
#
# Refer: devstack project functions-common
#-----------------------------------------
function apt_get_update {
if [[ "$REPOS_UPDATED" == "True" ]]; then
return
fi
local sudo="sudo"
[[ "$(id -u)" = "0" ]] && sudo="env"
# time all the apt operations
time_start "apt-get-update"
local update_cmd="sudo apt-get update"
if ! timeout 300 sh -c "while ! $update_cmd; do sleep 30; done"; then
die $LINENO "Failed to update apt repos, we're dead now"
fi
REPOS_UPDATED=True
# stop the clock
time_stop "apt-get-update"
}
function time_start {
local name=$1
local start_time=${_TIME_START[$name]}
if [[ -n "$start_time" ]]; then
die $LINENO \
"Trying to start the clock on $name, but it's already been started"
fi
_TIME_START[$name]=$(date +%s%3N)
}
function time_stop {
local name
local end_time
local elapsed_time
local total
local start_time
name=$1
start_time=${_TIME_START[$name]}
if [[ -z "$start_time" ]]; then
die $LINENO \
"Trying to stop the clock on $name, but it was never started"
fi
end_time=$(date +%s%3N)
elapsed_time=$(($end_time - $start_time))
total=${_TIME_TOTAL[$name]:-0}
# reset the clock so we can start it in the future
_TIME_START[$name]=""
_TIME_TOTAL[$name]=$(($total + $elapsed_time))
}
function apt_get {
local xtrace result
xtrace=$(set +o | grep xtrace) # set +o xtrace
set +o xtrace
[[ "$OFFLINE" = "True" || -z "$@" ]] && return
local sudo="sudo"
[[ "$(id -u)" = "0" ]] && sudo="env"
# time all the apt operations
time_start "apt-get"
$xtrace
$sudo DEBIAN_FRONTEND=noninteractive \
http_proxy=${http_proxy:-} https_proxy=${https_proxy:-} \
no_proxy=${no_proxy:-} \
apt-get --option "Dpkg::Options::=--force-confold" \
--assume-yes "$@" < /dev/null
result=$?
# stop the clock
time_stop "apt-get"
return $result
}
# Pre preparations
# ________________
function check_OS {
. /etc/os-release
if [[ $PRETTY_NAME =~ "Ubuntu 20.04" ]]; then
os_architecture=`uname -a | grep 'x86_64'`
if [[ $os_architecture == "" ]]; then
echo "Your OS does not support at present."
echo "It only supports x86_64."
fi
else
echo "Your OS does not support at present."
echo "It only supports Ubuntu 20.04.1 LTS."
fi
}
function set_sudoers {
echo "ubuntu ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/ubuntu
}
function set_apt-conf_proxy {
sudo touch /etc/apt/apt.conf.d/proxy.conf
cat <<EOF | sudo tee /etc/apt/apt.conf.d/proxy.conf >/dev/null
Acquire::http::Proxy "${http_proxy}";
Acquire::https::Proxy "${https_proxy}";
EOF
}
# Main
# ____
# pre preparations
set_apt-conf_proxy
set_sudoers
check_OS
# install haproxy and set config file
install_haproxy
modify_haproxy_conf
start_haproxy
# install kubectl
install_kubectl

View File

@ -0,0 +1,832 @@
# Copyright (C) 2021 FUJITSU
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ipaddress
import os
import time
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
import paramiko
import yaml
from tacker.common import cmd_executer
from tacker.common import exceptions
from tacker.common import log
from tacker.db.db_base import CommonDbMixin
from tacker.db.nfvo import nfvo_db
from tacker.extensions import nfvo
from tacker.nfvo.nfvo_plugin import NfvoPlugin
from tacker import objects
from tacker.vnfm.infra_drivers.openstack import heat_client as hc
from tacker.vnfm.mgmt_drivers import vnflcm_abstract_driver
from tacker.vnfm import vim_client
COMMAND_WAIT_RETRY_TIME = 30
CONF = cfg.CONF
CONNECT_REMOTE_SERVER_RETRY_COUNT = 4
K8S_CMD_TIMEOUT = 30
K8S_DEPLOY_TIMEOUT = 300
K8S_INSTALL_TIMEOUT = 2700
LOG = logging.getLogger(__name__)
ROLE_MASTER = 'master'
ROLE_WORKER = 'worker'
SERVER_WAIT_COMPLETE_TIME = 240
TOKEN_CREATE_WAIT_TIME = 30
class KubesprayMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
def get_type(self):
return 'mgmt-drivers-kubespray'
def get_name(self):
return 'mgmt-drivers-kubespray'
def get_description(self):
return 'Tacker Kubespray VNFMgmt Driver'
@log.log
def instantiate_start(self, context, vnf_instance,
instantiate_vnf_request, grant,
grant_request, **kwargs):
pass
def _get_vim(self, context, vim_connection_info):
vim_client_obj = vim_client.VimClient()
if vim_connection_info:
vim_id = vim_connection_info[0].vim_id
access_info = vim_connection_info[0].access_info
if access_info:
region_name = access_info.get('region')
else:
region_name = None
else:
vim_id = None
region_name = None
try:
vim_res = vim_client_obj.get_vim(
context, vim_id, region_name=region_name)
except nfvo.VimNotFoundException:
raise exceptions.VimConnectionNotFound(vim_id=vim_id)
vim_res['vim_auth'].update({'region': region_name})
vim_info = {'id': vim_res['vim_id'], 'vim_id': vim_res['vim_id'],
'vim_type': vim_res['vim_type'],
'access_info': vim_res['vim_auth']}
return vim_info
def _get_vim_connection_info(self, context, instantiate_vnf_req):
vim_info = self._get_vim(
context, instantiate_vnf_req.vim_connection_info)
vim_connection_info = objects.VimConnectionInfo.obj_from_primitive(
vim_info, context)
return vim_connection_info
def _check_is_cidr(self, key, value, cidr_str):
# instantiate: check cidr
try:
ipaddress.ip_network(cidr_str)
except ValueError:
LOG.error('The {value} of {key} in the '
'additionalParams is invalid.'.format(
value=value, key=key))
raise exceptions.MgmtDriverParamInvalid(param=value)
def _check_value_exist(self, attr_list, value, key):
for attr in attr_list:
if not value.get(attr):
LOG.error(
'The {} of {} in the '
'additionalParams cannot'
' be None.'.format(attr, key))
raise exceptions.MgmtDriverNotFound(
param=attr)
def _check_input_parameters(self, additional_param, vnf_package_path):
if not additional_param:
LOG.error('The kubernetes cluster info cannot be None'
'in additionalParams.')
raise exceptions.MgmtDriverOtherError(
error_message="The kubernetes cluster info"
" cannot be None in additionalParams.")
if not isinstance(additional_param, dict):
LOG.error('The format of kubernetes cluster info in '
'additionalParams is invalid. It must be dict.')
raise exceptions.MgmtDriverOtherError(
error_message="The format of kubernetes cluster info in "
"additionalParams is invalid. It must be dict.")
for key, value in additional_param.items():
attr_list = []
if key not in ('proxy', 'external_lb_param', 'vim_name'):
attr_list.extend(['username', 'password'])
if key in ('master_node', 'worker_node', 'external_lb_param'):
attr_list.extend(['ssh_cp_name'])
if key == 'ansible':
attr_list.extend(['ip_address', 'kubespray_root_path',
'transferring_inventory_path'])
if key == 'external_lb_param':
attr_list.extend(['ssh_username', 'ssh_password',
'script_path'])
if value.get('script_path'):
abs_script_path = os.path.join(
vnf_package_path, value.get('script_path'))
if not os.path.exists(abs_script_path):
LOG.error('The path of external_lb_param'
' script is invalid.')
raise exceptions.MgmtDriverOtherError(
error_message="The path of external_lb_param"
" script is invalid")
if key in ('master_node', 'ansible'):
for attr in ['pod_cidr', 'cluster_cidr', 'ip_address']:
if value.get(attr):
self._check_is_cidr(
key, attr, value.get(attr))
if attr_list:
self._check_value_exist(attr_list, value, key)
def _get_ssh_ip_and_nic_ip(self, heatclient, stack_id, node):
resource_info = heatclient.resources.get(
stack_id=stack_id,
resource_name=node.get('ssh_cp_name'))
if resource_info.attributes.get('floating_ip_address'):
ssh_ip = resource_info.attributes.get('floating_ip_address')
else:
ssh_ip = resource_info.attributes.get(
'fixed_ips')[0].get('ip_address')
if not ssh_ip:
LOG.error("Failed to get the node's ssh ip.")
raise exceptions.MgmtDriverOtherError(
error_message="Failed to get"
" the node's ssh ip.")
if not node.get('nic_cp_name'):
nic_ip = ssh_ip
else:
nic_ip = heatclient.resources.get(
stack_id=stack_id,
resource_name=node.get('nic_cp_name')).attributes.get(
'fixed_ips')[0].get('ip_address')
if not nic_ip:
LOG.error("Failed to get the node's nic ip.")
raise exceptions.MgmtDriverOtherError(
error_message="Failed to get"
" the node's nic ip.")
return ssh_ip, nic_ip
def _get_group_resources_list(
self, heatclient, stack_id, node, additional_params):
# get group resources list
nest_resources_list = heatclient.resources.list(stack_id=stack_id)
group_stack_name = node.get("aspect_id")
group_stack_id = ""
for nest_resources in nest_resources_list:
if nest_resources.resource_name == group_stack_name:
group_stack_id = nest_resources.physical_resource_id
if not group_stack_id:
LOG.error('No stack id {} matching the group was found.'.format(
group_stack_id))
raise exceptions.MgmtDriverOtherError(
error_message='No stack id {} matching the'
' group was found.'.format(group_stack_id))
group_resources_list = heatclient.resources.list(
stack_id=group_stack_id)
return group_resources_list
def _get_install_info_for_k8s_node(self, nest_stack_id, node,
additional_params, heatclient):
# instantiate: get k8s ssh ips
vm_dict_list = []
# get ssh_ip and nic_ip from heat, and set value into vm_dict
if not node.get('aspect_id'):
ssh_ip, nic_ip = self._get_ssh_ip_and_nic_ip(
heatclient, nest_stack_id, node)
vm_dict = {
"ssh_ip": ssh_ip,
"nic_ip": nic_ip
}
vm_dict_list.append(vm_dict)
else:
group_resources_list = self._get_group_resources_list(
heatclient, nest_stack_id, node, additional_params)
for group_resource in group_resources_list:
stack_id = group_resource.physical_resource_id
ssh_ip, nic_ip = self._get_ssh_ip_and_nic_ip(
heatclient, stack_id, node)
vm_dict = {
"ssh_ip": ssh_ip,
"nic_ip": nic_ip
}
vm_dict_list.append(vm_dict)
return vm_dict_list
def _set_lb_info(self, nest_stack_id, external_lb_param, master_node,
heatclient):
# get ssh_ip and cluster_ip from heat, and set value into vm_dict
ssh_ip, _ = self._get_ssh_ip_and_nic_ip(
heatclient, nest_stack_id, external_lb_param)
external_lb_param['pod_cidr'] = master_node.get('pod_cidr', '')
external_lb_param['cluster_cidr'] = master_node.get(
'cluster_cidr', '')
external_lb_param['ssh_ip'] = ssh_ip
external_lb_param['cluster_ip'] = ssh_ip
def _init_commander_and_set_script(self, user, password, host,
timeout, vnf_package_path=None,
script_path=None, token_flag=False):
retry = CONNECT_REMOTE_SERVER_RETRY_COUNT
while retry > 0:
try:
if (vnf_package_path and script_path) or token_flag:
connect = paramiko.Transport(host, 22)
connect.connect(username=user, password=password)
sftp = paramiko.SFTPClient.from_transport(connect)
if vnf_package_path and script_path:
sftp.put(os.path.join(vnf_package_path, script_path),
"/tmp/{}".format(
script_path.replace('Scripts', '')))
if token_flag:
fname = 'create_admin_token.yaml'
sftp.put(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../../../samples/mgmt_driver/{}".format(fname)),
"/tmp/{}".format(fname))
connect.close()
commander = cmd_executer.RemoteCommandExecutor(
user=user, password=password, host=host,
timeout=timeout)
return commander
except (exceptions.NotAuthorized, paramiko.SSHException,
paramiko.ssh_exception.NoValidConnectionsError) as e:
LOG.debug(e)
retry -= 1
if retry == 0:
LOG.error(e)
raise paramiko.SSHException()
time.sleep(SERVER_WAIT_COMPLETE_TIME)
def _send_or_receive_file(self, host, user, password,
remote_file, local_file, operation):
connect = paramiko.Transport(host, 22)
connect.connect(username=user, password=password)
sftp = paramiko.SFTPClient.from_transport(connect)
if operation == 'receive':
sftp.get(remote_file, local_file)
else:
sftp.put(local_file, remote_file)
connect.close()
def _execute_command(self, commander, ssh_command, timeout, type, retry):
eventlet.monkey_patch()
while retry >= 0:
try:
with eventlet.Timeout(timeout, True):
result = commander.execute_command(
ssh_command, input_data=None)
break
except eventlet.timeout.Timeout:
LOG.debug('It is time out, When execute command: '
'{}.'.format(ssh_command))
retry -= 1
if retry < 0:
LOG.error('It is time out, When execute command: '
'{}.'.format(ssh_command))
raise exceptions.MgmtDriverOtherError(
error_message='It is time out, When execute command: '
'{}.'.format(ssh_command))
time.sleep(COMMAND_WAIT_RETRY_TIME)
if type == 'common':
if result.get_return_code() != 0:
err = result.get_stderr()
LOG.error(err)
raise exceptions.MgmtDriverRemoteCommandError(err_info=err)
elif type == 'ansible':
if result.get_return_code() != 0 \
and 'No such file or directory' in result.get_stderr()[0]:
return False
else:
error_message = 'The transferring_inventory_path has ' \
'exists in kubespray server. Please check' \
' your path.'
LOG.error(error_message)
raise exceptions.MgmtDriverRemoteCommandError(
err_info=error_message)
elif type == 'install':
if result.get_return_code() != 0:
for error in result.get_stdout():
if 'Timeout (12s) waiting for ' \
'privilege escalation prompt' in error and \
retry > 0:
self._execute_command(commander, ssh_command,
timeout, 'install', 0)
break
else:
err = result.get_stderr()
LOG.error(err)
raise exceptions.MgmtDriverRemoteCommandError(
err_info=err)
return result.get_stdout()
def _create_hosts_yaml(self, master_node, master_vm_dict_list,
worker_node, worker_vm_dict_list):
hosts_yaml_content = {
'all': {
'hosts': {},
'children': {
'kube-master': {'hosts': {}},
'kube-node': {'hosts': {}},
'etcd': {'hosts': {}},
'k8s-cluster': {
'children': {'kube-master': None, 'kube-node': None}},
'calico-rr': {'hosts': {}}}}}
for master_vm in master_vm_dict_list:
key = 'master' + master_vm.get('nic_ip').split('.')[-1]
hosts_yaml_content['all']['hosts'][key] = {
'ansible_host': master_vm.get('ssh_ip'),
'ip': master_vm.get('nic_ip'),
'ansible_user': master_node.get('username'),
'ansible_password': master_node.get('username'),
}
hosts_yaml_content['all']['children']['kube-master'][
'hosts'][key] = None
hosts_yaml_content['all']['children']['etcd'][
'hosts'][key] = None
for worker_vm in worker_vm_dict_list:
key = 'worker' + worker_vm.get('nic_ip').split('.')[-1]
hosts_yaml_content['all']['hosts'][key] = {
'ansible_host': worker_vm.get('ssh_ip'),
'ip': worker_vm.get('nic_ip'),
'ansible_user': worker_node.get('username'),
'ansible_password': worker_node.get('username'),
}
hosts_yaml_content['all']['children']['kube-node'][
'hosts'][key] = None
return hosts_yaml_content
def _install_k8s_cluster_and_set_config(
self, master_node, worker_node, proxy, ansible,
external_lb_param, master_vm_dict_list, worker_vm_dict_list):
"""Install Kubernetes Cluster Function
It will use Kubespray which is installed in advance to install
a Kubernetes Cluster.
At present, Kuberspray's version is v2.16.0. You can get detailed
information from the following url.
https://github.com/kubernetes-sigs/kubespray/tree/v2.16.0
"""
# get mtu value
master_commander = self._init_commander_and_set_script(
master_node.get('username'), master_node.get('password'),
master_vm_dict_list[0].get('ssh_ip'), K8S_CMD_TIMEOUT)
ssh_command = "ip a | grep '%(nic_ip)s' -B 2 | " \
"grep 'mtu' | awk '{print $5}'" % \
{'nic_ip': master_vm_dict_list[0].get('nic_ip')}
mtu_value = self._execute_command(
master_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)[0].replace('\n', '')
calico_veth_mtu = int(mtu_value) - 20
master_commander.close_session()
# create inventory/hosts.yaml
ansible_commander = self._init_commander_and_set_script(
ansible.get('username'), ansible.get('password'),
ansible.get('ip_address'), K8S_CMD_TIMEOUT)
ssh_command = "ls -l {}".format(
ansible.get('transferring_inventory_path'))
file_exists_flag = self._execute_command(
ansible_commander, ssh_command, K8S_CMD_TIMEOUT, 'ansible', 0)
if not file_exists_flag:
ssh_command = 'cp -r {kubespray_root_path}/inventory/sample' \
' {transferring_inventory_path}'.format(
kubespray_root_path=ansible.get(
'kubespray_root_path'),
transferring_inventory_path=ansible.get(
'transferring_inventory_path'))
self._execute_command(
ansible_commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
hosts_yaml_content = self._create_hosts_yaml(
master_node, master_vm_dict_list,
worker_node, worker_vm_dict_list)
local_hosts_yaml_path = '/tmp/hosts.yaml'
with open(local_hosts_yaml_path, 'w', encoding='utf-8') as nf:
yaml.safe_dump(hosts_yaml_content, nf, default_flow_style=False)
remote_hosts_yaml_path = ansible.get(
'transferring_inventory_path') + '/hosts.yaml'
self._send_or_receive_file(
ansible.get('ip_address'), ansible.get('username'),
ansible.get('password'), remote_hosts_yaml_path,
local_hosts_yaml_path, 'send')
# set calico mtu value
calico_file_path = ansible.get(
'transferring_inventory_path') + \
'/group_vars/k8s_cluster/k8s-net-calico.yml'
ssh_comma