Sample script for deploying k8s with kubespray

This patch provides a sample script that allows users to install
or delete kubernetes on VMs in the VNF when executing
instantiate/terminate/scale/heal through the kubespray ansible
server.
It also provides script to install and configure Load Balancer
for kubernetes cluster in the VNF.

When instantiating CNF with service resource whose type is
`NodePort` on Kubernetes VIM deployed by kubespray, its port must
be added into Load Balancer's configuration and restart it. So
this patch also provides a sample MgmtDriver and shell script
to fix this problem.

At the same time, we found that if instantiate operation fails,
after the `instantiate_end` operation, the `terminate_end` operation
will not be executed in the rollback, which may cause the
modification in `instantiate_end` remains in the environment, so
this patch adds a `terminate_end` operation in `post_rollback_vnf`.

Implements: blueprint k8s-mgmtdriver-kubespray
Change-Id: I45661b5d8006e87db5f46a595756231849723ce6
This commit is contained in:
Yi Feng 2021-07-20 16:14:48 +09:00
parent 89c3afc72d
commit b3bf4ec2ce
24 changed files with 2505 additions and 5 deletions

View File

@ -0,0 +1,344 @@
# Copyright (C) 2021 FUJITSU
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ipaddress
import os
import time
import eventlet
from oslo_log import log as logging
import paramiko
import yaml
from tacker.common import cmd_executer
from tacker.common import exceptions
from tacker.common import log
from tacker.vnflcm import utils as vnflcm_utils
from tacker.vnfm.mgmt_drivers import vnflcm_abstract_driver
COMMAND_WAIT_RETRY_TIME = 30
CONNECT_REMOTE_SERVER_RETRY_COUNT = 4
LOG = logging.getLogger(__name__)
K8S_CMD_TIMEOUT = 60
K8S_DEPLOY_TIMEOUT = 300
SERVER_WAIT_COMPLETE_TIME = 120
class CnfNodePortMgmt(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
def get_type(self):
return 'mgmt-drivers-cnf-nodeport'
def get_name(self):
return 'mgmt-drivers-cnf-nodeport'
def get_description(self):
return 'Tacker CNFMgmt NodePort Setting Driver'
@log.log
def instantiate_start(self, context, vnf_instance,
instantiate_vnf_request, grant,
grant_request, **kwargs):
pass
def _check_is_cidr(self, key, value, cidr_str):
# instantiate: check cidr
try:
ipaddress.ip_network(cidr_str)
except ValueError:
LOG.error('The {value} of {key} in the '
'additionalParams is invalid.'.format(
value=value, key=key))
raise exceptions.MgmtDriverParamInvalid(param=value)
def _check_input_parameters(self, additional_param, vnf_package_path):
if not additional_param:
LOG.error("The 'lcm-kubernetes-external-lb' cannot be None "
"in additionalParams.")
raise exceptions.MgmtDriverOtherError(
error_message="The 'lcm-kubernetes-external-lb' cannot"
" be None in additionalParams.")
if not isinstance(additional_param, dict):
LOG.error("The format of 'lcm-kubernetes-external-lb' in "
"additionalParams is invalid. It must be dict.")
raise exceptions.MgmtDriverOtherError(
error_message="The format of 'lcm-kubernetes-external-lb' in "
"additionalParams is invalid. It must be dict.")
for key, value in additional_param.items():
if key == 'external_lb_param':
for attr in ['ssh_username', 'ssh_password', 'ssh_ip']:
if not value.get(attr):
LOG.error(
'The {} of {} in the '
'additionalParams cannot'
' be None.'.format(attr, key))
raise exceptions.MgmtDriverNotFound(
param=attr)
if value.get('ssh_ip'):
self._check_is_cidr(
key, 'ssh_ip', value.get('ssh_ip'))
if not additional_param.get('script_path'):
LOG.error('The script_path of {} in the '
'additionalParams cannot be None.'.format(key))
raise exceptions.MgmtDriverNotFound(
param='script_path')
abs_script_path = os.path.join(
vnf_package_path, additional_param.get('script_path'))
if not os.path.exists(abs_script_path):
LOG.error('The path of external_lb_param'
' script is invalid.')
raise exceptions.MgmtDriverOtherError(
error_message="The path of external_lb_param"
" script is invalid")
def _init_commander_and_set_script(self, user, password, host,
timeout, vnf_package_path=None,
script_path=None):
retry = CONNECT_REMOTE_SERVER_RETRY_COUNT
while retry > 0:
try:
if (vnf_package_path and script_path):
connect = paramiko.Transport(host, 22)
connect.connect(username=user, password=password)
sftp = paramiko.SFTPClient.from_transport(connect)
sftp.put(os.path.join(vnf_package_path, script_path),
"/tmp/{}".format(
script_path.replace('Scripts', '')))
connect.close()
commander = cmd_executer.RemoteCommandExecutor(
user=user, password=password, host=host,
timeout=timeout)
return commander
except (exceptions.NotAuthorized, paramiko.SSHException,
paramiko.ssh_exception.NoValidConnectionsError) as e:
LOG.debug(e)
retry -= 1
if retry == 0:
LOG.error(e)
raise paramiko.SSHException()
time.sleep(SERVER_WAIT_COMPLETE_TIME)
def _execute_command(self, commander, ssh_command, timeout, type, retry):
eventlet.monkey_patch()
while retry >= 0:
try:
with eventlet.Timeout(timeout, True):
result = commander.execute_command(
ssh_command, input_data=None)
break
except eventlet.timeout.Timeout:
LOG.debug('It is time out, When execute command: '
'{}.'.format(ssh_command))
retry -= 1
if retry < 0:
LOG.error('It is time out, When execute command: '
'{}.'.format(ssh_command))
raise exceptions.MgmtDriverOtherError(
error_message='It is time out, When execute command: '
'{}.'.format(ssh_command))
time.sleep(COMMAND_WAIT_RETRY_TIME)
if type == 'common':
if result.get_return_code() != 0:
err = result.get_stderr()
LOG.error(err)
raise exceptions.MgmtDriverRemoteCommandError(err_info=err)
return result.get_stdout()
def _get_nodeport_from_kubernetes(self, no_port_info_list, lb_commander,
resource_info_list):
for no_port_info in no_port_info_list:
ssh_command = "kubectl describe svc '%(svc_name)s' -n" \
" '%(namespace)s' | grep NodePort: | awk" \
" '{print $3}' | awk -F '/' '{print $1}'" \
% {'svc_name': no_port_info.get('name'),
'namespace': no_port_info.get('namespace')}
results = self._execute_command(
lb_commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
node_ports = ','.join([result.replace(
'\n', '') for result in results])
no_port_info['node_ports'] = node_ports
resource_info_list.append(no_port_info)
def _get_script_input_parameter(self, vnf_package_path, additional_param,
operation_type):
script_path = additional_param.get(
'lcm-kubernetes-external-lb', {}).get('script_path')
ssh_ip = additional_param.get(
'lcm-kubernetes-external-lb', {}).get('external_lb_param').get(
'ssh_ip')
ssh_username = additional_param.get(
'lcm-kubernetes-external-lb', {}).get('external_lb_param').get(
'ssh_username')
ssh_password = additional_param.get(
'lcm-kubernetes-external-lb', {}).get('external_lb_param').get(
'ssh_password')
artifact_files = additional_param.get(
'lcm-kubernetes-def-files', {})
resource_info_list = []
no_port_info_list = []
for artifact_file in artifact_files:
artiface_file_path = os.path.join(
vnf_package_path, artifact_file)
with open(artiface_file_path, 'r', encoding='utf-8') as f:
yaml_content_all = yaml.safe_load_all(f.read())
for yaml_content in yaml_content_all:
if yaml_content.get('kind') == 'Service' and \
yaml_content.get('spec').get('type') == 'NodePort':
if operation_type == 'INSTANTIATE':
ports = yaml_content.get('spec').get('ports')
node_ports = [port.get(
'nodePort') for port in ports if port.get(
'nodePort')]
if len(node_ports) == len(ports):
node_ports_str = ','.join([str(
port) for port in node_ports])
resource_info = {
"namespace": yaml_content.get('metadata').get(
'namespace', 'default'),
"name": yaml_content.get('metadata').get(
'name'),
"node_ports": node_ports_str
}
resource_info_list.append(resource_info)
else:
no_port_info = {
"namespace": yaml_content.get('metadata').get(
'namespace', 'default'),
"name": yaml_content.get('metadata').get(
'name'),
}
no_port_info_list.append(no_port_info)
else:
resource_info = {
"namespace": yaml_content.get('metadata').get(
'namespace', 'default'),
"name": yaml_content.get('metadata').get('name')
}
resource_info_list.append(resource_info)
lb_commander = self._init_commander_and_set_script(
ssh_username, ssh_password, ssh_ip, K8S_CMD_TIMEOUT,
vnf_package_path=vnf_package_path,
script_path=script_path)
if operation_type == 'INSTANTIATE':
# get nodeport info from kubernetes
self._get_nodeport_from_kubernetes(
no_port_info_list, lb_commander, resource_info_list)
resource_info_str_list = []
for resource_info in resource_info_list:
resource_info_str = ','.join(
[value for key, value in resource_info.items()])
resource_info_str_list.append(resource_info_str)
all_resource_info_str = '#'.join(resource_info_str_list)
return lb_commander, all_resource_info_str
@log.log
def instantiate_end(self, context, vnf_instance,
instantiate_vnf_request, grant,
grant_request, **kwargs):
additional_param = instantiate_vnf_request.additional_params
vnf_package_path = vnflcm_utils._get_vnf_package_path(
context, vnf_instance.vnfd_id)
self._check_input_parameters(additional_param.get(
'lcm-kubernetes-external-lb', {}), vnf_package_path)
lb_commander, all_resource_info_str = \
self._get_script_input_parameter(
vnf_package_path, additional_param, 'INSTANTIATE')
ssh_command = 'bash /tmp/configure_lb.sh -i {} -a True'.format(
all_resource_info_str)
self._execute_command(
lb_commander, ssh_command, K8S_DEPLOY_TIMEOUT, 'common', 0)
lb_commander.close_session()
@log.log
def terminate_start(self, context, vnf_instance,
terminate_vnf_request, grant,
grant_request, **kwargs):
pass
@log.log
def terminate_end(self, context, vnf_instance,
terminate_vnf_request, grant,
grant_request, **kwargs):
vnf_package_path = vnflcm_utils._get_vnf_package_path(
context, vnf_instance.vnfd_id)
add_param = {}
if hasattr(terminate_vnf_request, 'additional_params'):
if terminate_vnf_request.additional_params:
additional_params = terminate_vnf_request.additional_params
add_param['lcm-kubernetes-external-lb'] = {
'script_path': additional_params.get('script_path'),
'external_lb_param': {
'ssh_ip': additional_params.get('ssh_ip'),
'ssh_username': additional_params.get('ssh_username'),
'ssh_password': additional_params.get('ssh_password'),
}
}
add_param['lcm-kubernetes-def-files'] = \
vnf_instance.instantiated_vnf_info.additional_params.get(
'lcm-kubernetes-def-files')
else:
add_param = \
vnf_instance.instantiated_vnf_info.additional_params
else:
add_param = \
vnf_instance.instantiated_vnf_info.additional_params
lb_commander, all_resource_info_str = \
self._get_script_input_parameter(
vnf_package_path, add_param, 'TERMINATE')
ssh_command = 'bash /tmp/configure_lb.sh -i {} -a False'.format(
all_resource_info_str)
self._execute_command(
lb_commander, ssh_command, K8S_DEPLOY_TIMEOUT, 'common', 0)
lb_commander.close_session()
@log.log
def scale_start(self, context, vnf_instance,
scale_vnf_request, grant,
grant_request, **kwargs):
pass
@log.log
def scale_end(self, context, vnf_instance,
scale_vnf_request, grant,
grant_request, **kwargs):
pass
@log.log
def heal_start(self, context, vnf_instance,
heal_vnf_request, grant,
grant_request, **kwargs):
pass
@log.log
def heal_end(self, context, vnf_instance,
heal_vnf_request, grant,
grant_request, **kwargs):
pass
@log.log
def change_external_connectivity_start(
self, context, vnf_instance,
change_ext_conn_request, grant,
grant_request, **kwargs):
pass
@log.log
def change_external_connectivity_end(
self, context, vnf_instance,
change_ext_conn_request, grant,
grant_request, **kwargs):
pass

View File

@ -0,0 +1,124 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: Sample VNF
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
- helloworld3_types.yaml
topology_template:
inputs:
descriptor_id:
type: string
descriptor_version:
type: string
provider:
type: string
product_name:
type: string
software_version:
type: string
vnfm_info:
type: list
entry_schema:
type: string
flavour_id:
type: string
flavour_description:
type: string
substitution_mappings:
node_type: company.provider.VNF
properties:
flavour_id: simple
requirements:
virtual_link_external: []
node_templates:
VNF:
type: company.provider.VNF
properties:
flavour_description: A simple flavour
interfaces:
Vnflcm:
instantiate_end:
implementation: mgmt-drivers-cnf-nodeport
terminate_end:
implementation: mgmt-drivers-cnf-nodeport
scale_start:
implementation: mgmt-drivers-cnf-nodeport
scale_end:
implementation: mgmt-drivers-cnf-nodeport
heal_start:
implementation: mgmt-drivers-cnf-nodeport
heal_end:
implementation: mgmt-drivers-cnf-nodeport
artifacts:
mgmt-drivers-cnf-nodeport:
description: Management driver for cnf nodeport setting
type: tosca.artifacts.Implementation.Python
file: Scripts/cnf_nodeport_mgmt.py
VDU1:
type: tosca.nodes.nfv.Vdu.Compute
properties:
name: vdu1-simple
description: kubernetes controller resource as VDU
vdu_profile:
min_number_of_instances: 1
max_number_of_instances: 2
policies:
- scaling_aspects:
type: tosca.policies.nfv.ScalingAspects
properties:
aspects:
vdu1_aspect:
name: vdu1_aspect
description: vdu1 scaling aspect
max_scale_level: 1
step_deltas:
- delta_1
- VDU1_initial_delta:
type: tosca.policies.nfv.VduInitialDelta
properties:
initial_delta:
number_of_instances: 1
targets: [ VDU1 ]
- VDU1_scaling_aspect_deltas:
type: tosca.policies.nfv.VduScalingAspectDeltas
properties:
aspect: vdu1_aspect
deltas:
delta_1:
number_of_instances: 1
targets: [ VDU1 ]
- instantiation_levels:
type: tosca.policies.nfv.InstantiationLevels
properties:
levels:
instantiation_level_1:
description: Smallest size
scale_info:
vdu1_aspect:
scale_level: 0
instantiation_level_2:
description: Largest size
scale_info:
vdu1_aspect:
scale_level: 1
default_level: instantiation_level_1
- vdu1_instantiation_levels:
type: tosca.policies.nfv.VduInstantiationLevels
properties:
levels:
instantiation_level_1:
number_of_instances: 1
instantiation_level_2:
number_of_instances: 2
targets: [ VDU1 ]

View File

@ -0,0 +1,31 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: Sample VNF
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
- helloworld3_types.yaml
- helloworld3_df_simple.yaml
topology_template:
inputs:
selected_flavour:
type: string
description: VNF deployment flavour selected by the consumer. It is provided in the API
node_templates:
VNF:
type: company.provider.VNF
properties:
flavour_id: { get_input: selected_flavour }
descriptor_id: b1bb0ce7-ebca-4fa7-95ed-4840d70a1177
provider: Company
product_name: Sample VNF
software_version: '1.0'
descriptor_version: '1.0'
vnfm_info:
- Tacker
requirements:
#- virtual_link_external # mapped in lower-level templates
#- virtual_link_internal # mapped in lower-level templates

View File

@ -0,0 +1,53 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: VNF type definition
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
node_types:
company.provider.VNF:
derived_from: tosca.nodes.nfv.VNF
properties:
descriptor_id:
type: string
constraints: [ valid_values: [ b1bb0ce7-ebca-4fa7-95ed-4840d70a1177 ] ]
default: b1bb0ce7-ebca-4fa7-95ed-4840d70a1177
descriptor_version:
type: string
constraints: [ valid_values: [ '1.0' ] ]
default: '1.0'
provider:
type: string
constraints: [ valid_values: [ 'Company' ] ]
default: 'Company'
product_name:
type: string
constraints: [ valid_values: [ 'Sample VNF' ] ]
default: 'Sample VNF'
software_version:
type: string
constraints: [ valid_values: [ '1.0' ] ]
default: '1.0'
vnfm_info:
type: list
entry_schema:
type: string
constraints: [ valid_values: [ Tacker ] ]
default: [ Tacker ]
flavour_id:
type: string
constraints: [ valid_values: [ simple ] ]
default: simple
flavour_description:
type: string
default: ""
requirements:
- virtual_link_external:
capability: tosca.capabilities.nfv.VirtualLinkable
- virtual_link_internal:
capability: tosca.capabilities.nfv.VirtualLinkable
interfaces:
Vnflcm:
type: tosca.interfaces.nfv.Vnflcm

View File

@ -0,0 +1,28 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: vdu1-simple
namespace: default
labels:
app: nginx
spec:
replicas: 1
selector:
matchLabels:
app: nginx
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.7.9
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
- name: kuryr-demo
image: celebdor/kuryr-demo
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080

View File

@ -0,0 +1,16 @@
apiVersion: v1
kind: Service
metadata:
name: nginx-service
namespace: default
spec:
type: NodePort
selector:
app: nginx
ports:
- port: 80
nodePort: 30422
name: http
- port: 8080
nodePort: 32019
name: tcp

View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: Service
metadata:
name: nginx-service
namespace: kube-system
spec:
type: NodePort
selector:
app: nginx
ports:
- port: 80

View File

@ -0,0 +1,29 @@
TOSCA-Meta-File-Version: 1.0
Created-by: dummy_user
CSAR-Version: 1.1
Entry-Definitions: Definitions/helloworld3_top.vnfd.yaml
Name: Files/kubernetes/deployment.yaml
Content-Type: application/yaml
Algorithm: SHA-256
Hash: 3e87eebb432c3ff59f54d6dddd12a7adff3b8d7b1e272d5b5de3df5ae9aec4d4
Name: Files/kubernetes/service_with_nodeport.yaml
Content-Type: application/yaml
Algorithm: SHA-256
Hash: 2d76d6ef7500f90bff63e519c9740516dbe258134f8cbeada7bc29d43bd515b6
Name: Files/kubernetes/service_without_nodeport.yaml
Content-Type: application/yaml
Algorithm: SHA-256
Hash: 9994a5a5f630c41d178ac58fff93140d3509da5f01518a7bd0e16db70a545c5e
Name: Scripts/configure_lb.sh
Content-Type: application/x-shellscript
Algorithm: SHA-256
Hash: 45a01e214c06a66dc6b7a018650e292a8cc216e7db4cb638712852a843679d0d
Name: Scripts/cnf_nodeport_mgmt.py
Content-Type: text/x-python
Algorithm: SHA-256
Hash: 927d34e813f635f069ed352914aa7260e26c6b011560962aee5eb49e9faed927

View File

@ -0,0 +1,128 @@
#!/bin/bash
set -o xtrace
###############################################################################
#
# This script will set nodePort into external LoadBalancer.
# It's confirmed operation on Ubuntu of below.
#
# * OS type : Ubuntu(64 bit)
# * OS version : 20.04 LTS
# * OS architecture : amd64 (x86_64)
# * Disk/Ram size : 40GB/2GB
# * Pre setup user : ubuntu
#
###############################################################################
#==============================================================================
# Usage Definition
#==============================================================================
function usage {
sudo cat <<_EOT_
$(basename ${0}) is script to install external loadbalancer.
Usage:
$(basename ${0}) [-d] [-o] [-i <nodePort info>]
[-a <add configuration flag>]
Description:
This script is to set nodePort info into external loadbalancer's
configuration.
Options:
-i all nodePort info(use "#" to separate)
-a add/delete configuration flag
--help, -h Print this
_EOT_
exit 1
}
declare -g DEBUG_MODE="False"
declare -g OUTPUT_LOGFILE="False"
# nodePort info
declare -g NODEPORTSTR=${NODEPORTSTR:-}
declare -a -g NODEPORTS=${NODEPORTS:-}
declare -g ADD_CONFIGURE_FLAG="True"
if [ "$OPTIND" = 1 ]; then
while getopts doi:a:h OPT; do
case $OPT in
i)
NODEPORTSTR=$OPTARG # defalut,test,8080,8011#mynamespace,nginx,8012
NODEPORTS=(${NODEPORTSTR//#/ })
;;
a)
ADD_CONFIGURE_FLAG=$OPTARG
;;
h)
echo "h option. display help"
usage
;;
\?)
echo "Try to enter the h option." 1>&2
;;
esac
done
else
echo "No installed getopts-command." 1>&2
exit 1
fi
# Modify Haproxy
#----------------
function add_haproxy_conf {
for(( i=0;i<${#NODEPORTS[@]};i++)); do
split_node_port=(${NODEPORTS[i]//,/ })
cat <<EOF | sudo tee -a /etc/haproxy/haproxy.cfg >/dev/null
frontend ${split_node_port[0]}_${split_node_port[1]}
mode tcp
EOF
unset split_node_port[0]
unset split_node_port[1]
all_node_port=("${split_node_port[@]}")
for(( j=0;j<${#all_node_port[@]};j++)); do
cat <<EOF | sudo tee -a /etc/haproxy/haproxy.cfg >/dev/null
bind *:${all_node_port[j]}
EOF
done
cat <<EOF | sudo tee -a /etc/haproxy/haproxy.cfg >/dev/null
option tcplog
default_backend kubernetes-nodeport
EOF
done
}
function delete_haproxy_conf {
for(( i=0;i<${#NODEPORTS[@]};i++)); do
split_node_port=(${NODEPORTS[i]//,/ })
start_str=${split_node_port[0]}_${split_node_port[1]}
end_str='default_backend kubernetes-nodeport'
start_line_no=`grep -n "$start_str" /etc/haproxy/haproxy.cfg | \
cut -d ":" -f 1`
end_line_no=`grep -n "$end_str" /etc/haproxy/haproxy.cfg | head -1 |\
cut -d ":" -f 1`
sudo sed -i "${start_line_no},${end_line_no}d" /etc/haproxy/haproxy.cfg
done
}
function restart_haproxy {
sudo systemctl restart haproxy
sudo systemctl status haproxy | grep Active
result=$(ss -lnt |grep -E "8383")
if [[ -z $result ]]; then
echo 'restart haproxy failed!'
exit 255
fi
}
# Main
# ____
# set config file
if [[ $ADD_CONFIGURE_FLAG == "True" ]]; then
add_haproxy_conf
else
delete_haproxy_conf
fi
restart_haproxy

View File

@ -0,0 +1,278 @@
#!/bin/bash
set -o xtrace
###############################################################################
#
# This script will insall external LoadBalancer.
# It's confirmed operation on Ubuntu of below.
#
# * OS type : Ubuntu(64 bit)
# * OS version : 20.04 LTS
# * OS architecture : amd64 (x86_64)
# * Disk/Ram size : 40GB/2GB
# * Pre setup user : ubuntu
#
###############################################################################
#==============================================================================
# Usage Definition
#==============================================================================
function usage {
sudo cat <<_EOT_
$(basename ${0}) is script to install external loadbalancer.
Usage:
$(basename ${0}) [-d] [-o] [-m <master ip address>]
[-w <worker ip address>]
Description:
This script is to install external loadbalancer and set
loadbalancer's configuration.
Options:
-m all master nodes ip(use "," to separate)
-w all worker nodes ip(use "," to separate)
--help, -h Print this
_EOT_
exit 1
}
declare -g DEBUG_MODE="False"
declare -g OUTPUT_LOGFILE="False"
# master/worker ip
declare -g MASTER_IPADDRS=${MASTER_IPADDRS:-}
declare -a -g MASTER_IPS=${MASTER_IPS:-}
declare -g WORKER_IPADDRS=${WORKER_IPADDRS:-}
declare -a -g WORKER_IPS=${WORKER_IPS:-}
if [ "$OPTIND" = 1 ]; then
while getopts dom:w:h OPT; do
case $OPT in
m)
MASTER_IPADDRS=$OPTARG # 192.168.120.17,192.168.120.18
MASTER_IPS=(${MASTER_IPADDRS//,/ })
;;
w)
WORKER_IPADDRS=$OPTARG # 192.168.120.2,192.168.120.3
WORKER_IPS=(${WORKER_IPADDRS//,/ })
;;
h)
echo "h option. display help"
usage
;;
\?)
echo "Try to enter the h option." 1>&2
;;
esac
done
else
echo "No installed getopts-command." 1>&2
exit 1
fi
# Install Haproxy
#----------------
function install_haproxy {
REPOS_UPDATED=False apt_get_update
apt_get install haproxy
}
function modify_haproxy_conf {
cat <<EOF | sudo tee -a /etc/haproxy/haproxy.cfg >/dev/null
frontend kubernetes-apiserver
mode tcp
bind *:8383
option tcplog
default_backend kubernetes-apiserver
backend kubernetes-apiserver
mode tcp
balance roundrobin
EOF
for master_ip in ${MASTER_IPS[@]}; do
split_ips=(${master_ip//./ })
cat <<EOF | sudo tee -a /etc/haproxy/haproxy.cfg >/dev/null
server master${split_ips[3]} $master_ip:6443 check
EOF
done
cat <<EOF | sudo tee -a /etc/haproxy/haproxy.cfg >/dev/null
backend kubernetes-nodeport
mode tcp
balance roundrobin
EOF
for master_ip in ${MASTER_IPS[@]}; do
split_ips=(${master_ip//./ })
cat <<EOF | sudo tee -a /etc/haproxy/haproxy.cfg >/dev/null
server master${split_ips[3]} $master_ip check
EOF
done
for worker_ip in ${WORKER_IPS[@]}; do
split_ips=(${worker_ip//./ })
cat <<EOF | sudo tee -a /etc/haproxy/haproxy.cfg >/dev/null
server worker${split_ips[3]} $worker_ip check
EOF
done
}
function start_haproxy {
sudo systemctl enable haproxy
sudo systemctl start haproxy
sudo systemctl status haproxy | grep Active
result=$(ss -lnt |grep -E "8383")
if [[ -z $result ]]; then
sudo systemctl restart haproxy
fi
}
# Install Kubectl
#-------------------
function install_kubectl {
REPOS_UPDATED=False apt_get_update
sudo apt_get install -y apt-transport-https curl
result=`curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | \
sudo apt-key add -`
if [[ $result != "OK" ]]; then
exit 0
fi
echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | \
sudo tee -a /etc/apt/sources.list.d/kubernetes.list
apt_get update
apt_get install -y kubectl
mkdir -p $HOME/.kube
touch $HOME/.kube/config
sudo apt-get install mlocate
locate bash_completion
source /usr/share/bash-completion/bash_completion
source <(kubectl completion bash)
}
# Set common functions
#
# Refer: devstack project functions-common
#-----------------------------------------
function apt_get_update {
if [[ "$REPOS_UPDATED" == "True" ]]; then
return
fi
local sudo="sudo"
[[ "$(id -u)" = "0" ]] && sudo="env"
# time all the apt operations
time_start "apt-get-update"
local update_cmd="sudo apt-get update"
if ! timeout 300 sh -c "while ! $update_cmd; do sleep 30; done"; then
die $LINENO "Failed to update apt repos, we're dead now"
fi
REPOS_UPDATED=True
# stop the clock
time_stop "apt-get-update"
}
function time_start {
local name=$1
local start_time=${_TIME_START[$name]}
if [[ -n "$start_time" ]]; then
die $LINENO \
"Trying to start the clock on $name, but it's already been started"
fi
_TIME_START[$name]=$(date +%s%3N)
}
function time_stop {
local name
local end_time
local elapsed_time
local total
local start_time
name=$1
start_time=${_TIME_START[$name]}
if [[ -z "$start_time" ]]; then
die $LINENO \
"Trying to stop the clock on $name, but it was never started"
fi
end_time=$(date +%s%3N)
elapsed_time=$(($end_time - $start_time))
total=${_TIME_TOTAL[$name]:-0}
# reset the clock so we can start it in the future
_TIME_START[$name]=""
_TIME_TOTAL[$name]=$(($total + $elapsed_time))
}
function apt_get {
local xtrace result
xtrace=$(set +o | grep xtrace) # set +o xtrace
set +o xtrace
[[ "$OFFLINE" = "True" || -z "$@" ]] && return
local sudo="sudo"
[[ "$(id -u)" = "0" ]] && sudo="env"
# time all the apt operations
time_start "apt-get"
$xtrace
$sudo DEBIAN_FRONTEND=noninteractive \
http_proxy=${http_proxy:-} https_proxy=${https_proxy:-} \
no_proxy=${no_proxy:-} \
apt-get --option "Dpkg::Options::=--force-confold" \
--assume-yes "$@" < /dev/null
result=$?
# stop the clock
time_stop "apt-get"
return $result
}
# Pre preparations
# ________________
function check_OS {
. /etc/os-release
if [[ $PRETTY_NAME =~ "Ubuntu 20.04" ]]; then
os_architecture=`uname -a | grep 'x86_64'`
if [[ $os_architecture == "" ]]; then
echo "Your OS does not support at present."
echo "It only supports x86_64."
fi
else
echo "Your OS does not support at present."
echo "It only supports Ubuntu 20.04.1 LTS."
fi
}
function set_sudoers {
echo "ubuntu ALL=(ALL) NOPASSWD: ALL" | sudo tee /etc/sudoers.d/ubuntu
}
function set_apt-conf_proxy {
sudo touch /etc/apt/apt.conf.d/proxy.conf
cat <<EOF | sudo tee /etc/apt/apt.conf.d/proxy.conf >/dev/null
Acquire::http::Proxy "${http_proxy}";
Acquire::https::Proxy "${https_proxy}";
EOF
}
# Main
# ____
# pre preparations
set_apt-conf_proxy
set_sudoers
check_OS
# install haproxy and set config file
install_haproxy
modify_haproxy_conf
start_haproxy
# install kubectl
install_kubectl

View File

@ -0,0 +1,832 @@
# Copyright (C) 2021 FUJITSU
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ipaddress
import os
import time
import eventlet
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import uuidutils
import paramiko
import yaml
from tacker.common import cmd_executer
from tacker.common import exceptions
from tacker.common import log
from tacker.db.db_base import CommonDbMixin
from tacker.db.nfvo import nfvo_db
from tacker.extensions import nfvo
from tacker.nfvo.nfvo_plugin import NfvoPlugin
from tacker import objects
from tacker.vnfm.infra_drivers.openstack import heat_client as hc
from tacker.vnfm.mgmt_drivers import vnflcm_abstract_driver
from tacker.vnfm import vim_client
COMMAND_WAIT_RETRY_TIME = 30
CONF = cfg.CONF
CONNECT_REMOTE_SERVER_RETRY_COUNT = 4
K8S_CMD_TIMEOUT = 30
K8S_DEPLOY_TIMEOUT = 300
K8S_INSTALL_TIMEOUT = 2700
LOG = logging.getLogger(__name__)
ROLE_MASTER = 'master'
ROLE_WORKER = 'worker'
SERVER_WAIT_COMPLETE_TIME = 240
TOKEN_CREATE_WAIT_TIME = 30
class KubesprayMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
def get_type(self):
return 'mgmt-drivers-kubespray'
def get_name(self):
return 'mgmt-drivers-kubespray'
def get_description(self):
return 'Tacker Kubespray VNFMgmt Driver'
@log.log
def instantiate_start(self, context, vnf_instance,
instantiate_vnf_request, grant,
grant_request, **kwargs):
pass
def _get_vim(self, context, vim_connection_info):
vim_client_obj = vim_client.VimClient()
if vim_connection_info:
vim_id = vim_connection_info[0].vim_id
access_info = vim_connection_info[0].access_info
if access_info:
region_name = access_info.get('region')
else:
region_name = None
else:
vim_id = None
region_name = None
try:
vim_res = vim_client_obj.get_vim(
context, vim_id, region_name=region_name)
except nfvo.VimNotFoundException:
raise exceptions.VimConnectionNotFound(vim_id=vim_id)
vim_res['vim_auth'].update({'region': region_name})
vim_info = {'id': vim_res['vim_id'], 'vim_id': vim_res['vim_id'],
'vim_type': vim_res['vim_type'],
'access_info': vim_res['vim_auth']}
return vim_info
def _get_vim_connection_info(self, context, instantiate_vnf_req):
vim_info = self._get_vim(
context, instantiate_vnf_req.vim_connection_info)
vim_connection_info = objects.VimConnectionInfo.obj_from_primitive(
vim_info, context)
return vim_connection_info
def _check_is_cidr(self, key, value, cidr_str):
# instantiate: check cidr
try:
ipaddress.ip_network(cidr_str)
except ValueError:
LOG.error('The {value} of {key} in the '
'additionalParams is invalid.'.format(
value=value, key=key))
raise exceptions.MgmtDriverParamInvalid(param=value)
def _check_value_exist(self, attr_list, value, key):
for attr in attr_list:
if not value.get(attr):
LOG.error(
'The {} of {} in the '
'additionalParams cannot'
' be None.'.format(attr, key))
raise exceptions.MgmtDriverNotFound(
param=attr)
def _check_input_parameters(self, additional_param, vnf_package_path):
if not additional_param:
LOG.error('The kubernetes cluster info cannot be None'
'in additionalParams.')
raise exceptions.MgmtDriverOtherError(
error_message="The kubernetes cluster info"
" cannot be None in additionalParams.")
if not isinstance(additional_param, dict):
LOG.error('The format of kubernetes cluster info in '
'additionalParams is invalid. It must be dict.')
raise exceptions.MgmtDriverOtherError(
error_message="The format of kubernetes cluster info in "
"additionalParams is invalid. It must be dict.")
for key, value in additional_param.items():
attr_list = []
if key not in ('proxy', 'external_lb_param', 'vim_name'):
attr_list.extend(['username', 'password'])
if key in ('master_node', 'worker_node', 'external_lb_param'):
attr_list.extend(['ssh_cp_name'])
if key == 'ansible':
attr_list.extend(['ip_address', 'kubespray_root_path',
'transferring_inventory_path'])
if key == 'external_lb_param':
attr_list.extend(['ssh_username', 'ssh_password',
'script_path'])
if value.get('script_path'):
abs_script_path = os.path.join(
vnf_package_path, value.get('script_path'))
if not os.path.exists(abs_script_path):
LOG.error('The path of external_lb_param'
' script is invalid.')
raise exceptions.MgmtDriverOtherError(
error_message="The path of external_lb_param"
" script is invalid")
if key in ('master_node', 'ansible'):
for attr in ['pod_cidr', 'cluster_cidr', 'ip_address']:
if value.get(attr):
self._check_is_cidr(
key, attr, value.get(attr))
if attr_list:
self._check_value_exist(attr_list, value, key)
def _get_ssh_ip_and_nic_ip(self, heatclient, stack_id, node):
resource_info = heatclient.resources.get(
stack_id=stack_id,
resource_name=node.get('ssh_cp_name'))
if resource_info.attributes.get('floating_ip_address'):
ssh_ip = resource_info.attributes.get('floating_ip_address')
else:
ssh_ip = resource_info.attributes.get(
'fixed_ips')[0].get('ip_address')
if not ssh_ip:
LOG.error("Failed to get the node's ssh ip.")
raise exceptions.MgmtDriverOtherError(
error_message="Failed to get"
" the node's ssh ip.")
if not node.get('nic_cp_name'):
nic_ip = ssh_ip
else:
nic_ip = heatclient.resources.get(
stack_id=stack_id,
resource_name=node.get('nic_cp_name')).attributes.get(
'fixed_ips')[0].get('ip_address')
if not nic_ip:
LOG.error("Failed to get the node's nic ip.")
raise exceptions.MgmtDriverOtherError(
error_message="Failed to get"
" the node's nic ip.")
return ssh_ip, nic_ip
def _get_group_resources_list(
self, heatclient, stack_id, node, additional_params):
# get group resources list
nest_resources_list = heatclient.resources.list(stack_id=stack_id)
group_stack_name = node.get("aspect_id")
group_stack_id = ""
for nest_resources in nest_resources_list:
if nest_resources.resource_name == group_stack_name:
group_stack_id = nest_resources.physical_resource_id
if not group_stack_id:
LOG.error('No stack id {} matching the group was found.'.format(
group_stack_id))
raise exceptions.MgmtDriverOtherError(
error_message='No stack id {} matching the'
' group was found.'.format(group_stack_id))
group_resources_list = heatclient.resources.list(
stack_id=group_stack_id)
return group_resources_list
def _get_install_info_for_k8s_node(self, nest_stack_id, node,
additional_params, heatclient):
# instantiate: get k8s ssh ips
vm_dict_list = []
# get ssh_ip and nic_ip from heat, and set value into vm_dict
if not node.get('aspect_id'):
ssh_ip, nic_ip = self._get_ssh_ip_and_nic_ip(
heatclient, nest_stack_id, node)
vm_dict = {
"ssh_ip": ssh_ip,
"nic_ip": nic_ip
}
vm_dict_list.append(vm_dict)
else:
group_resources_list = self._get_group_resources_list(
heatclient, nest_stack_id, node, additional_params)
for group_resource in group_resources_list:
stack_id = group_resource.physical_resource_id
ssh_ip, nic_ip = self._get_ssh_ip_and_nic_ip(
heatclient, stack_id, node)
vm_dict = {
"ssh_ip": ssh_ip,
"nic_ip": nic_ip
}
vm_dict_list.append(vm_dict)
return vm_dict_list
def _set_lb_info(self, nest_stack_id, external_lb_param, master_node,
heatclient):
# get ssh_ip and cluster_ip from heat, and set value into vm_dict
ssh_ip, _ = self._get_ssh_ip_and_nic_ip(
heatclient, nest_stack_id, external_lb_param)
external_lb_param['pod_cidr'] = master_node.get('pod_cidr', '')
external_lb_param['cluster_cidr'] = master_node.get(
'cluster_cidr', '')
external_lb_param['ssh_ip'] = ssh_ip
external_lb_param['cluster_ip'] = ssh_ip
def _init_commander_and_set_script(self, user, password, host,
timeout, vnf_package_path=None,
script_path=None, token_flag=False):
retry = CONNECT_REMOTE_SERVER_RETRY_COUNT
while retry > 0:
try:
if (vnf_package_path and script_path) or token_flag:
connect = paramiko.Transport(host, 22)
connect.connect(username=user, password=password)
sftp = paramiko.SFTPClient.from_transport(connect)
if vnf_package_path and script_path:
sftp.put(os.path.join(vnf_package_path, script_path),
"/tmp/{}".format(
script_path.replace('Scripts', '')))
if token_flag:
fname = 'create_admin_token.yaml'
sftp.put(os.path.join(
os.path.dirname(os.path.abspath(__file__)),
"../../../samples/mgmt_driver/{}".format(fname)),
"/tmp/{}".format(fname))
connect.close()
commander = cmd_executer.RemoteCommandExecutor(
user=user, password=password, host=host,
timeout=timeout)
return commander
except (exceptions.NotAuthorized, paramiko.SSHException,
paramiko.ssh_exception.NoValidConnectionsError) as e:
LOG.debug(e)
retry -= 1
if retry == 0:
LOG.error(e)
raise paramiko.SSHException()
time.sleep(SERVER_WAIT_COMPLETE_TIME)
def _send_or_receive_file(self, host, user, password,
remote_file, local_file, operation):
connect = paramiko.Transport(host, 22)
connect.connect(username=user, password=password)
sftp = paramiko.SFTPClient.from_transport(connect)
if operation == 'receive':
sftp.get(remote_file, local_file)
else:
sftp.put(local_file, remote_file)
connect.close()
def _execute_command(self, commander, ssh_command, timeout, type, retry):
eventlet.monkey_patch()
while retry >= 0:
try:
with eventlet.Timeout(timeout, True):
result = commander.execute_command(
ssh_command, input_data=None)
break
except eventlet.timeout.Timeout:
LOG.debug('It is time out, When execute command: '
'{}.'.format(ssh_command))
retry -= 1
if retry < 0:
LOG.error('It is time out, When execute command: '
'{}.'.format(ssh_command))
raise exceptions.MgmtDriverOtherError(
error_message='It is time out, When execute command: '
'{}.'.format(ssh_command))
time.sleep(COMMAND_WAIT_RETRY_TIME)
if type == 'common':
if result.get_return_code() != 0:
err = result.get_stderr()
LOG.error(err)
raise exceptions.MgmtDriverRemoteCommandError(err_info=err)
elif type == 'ansible':
if result.get_return_code() != 0 \
and 'No such file or directory' in result.get_stderr()[0]:
return False
else:
error_message = 'The transferring_inventory_path has ' \
'exists in kubespray server. Please check' \
' your path.'
LOG.error(error_message)
raise exceptions.MgmtDriverRemoteCommandError(
err_info=error_message)
elif type == 'install':
if result.get_return_code() != 0:
for error in result.get_stdout():
if 'Timeout (12s) waiting for ' \
'privilege escalation prompt' in error and \
retry > 0:
self._execute_command(commander, ssh_command,
timeout, 'install', 0)
break
else:
err = result.get_stderr()
LOG.error(err)
raise exceptions.MgmtDriverRemoteCommandError(
err_info=err)
return result.get_stdout()
def _create_hosts_yaml(self, master_node, master_vm_dict_list,
worker_node, worker_vm_dict_list):
hosts_yaml_content = {
'all': {
'hosts': {},
'children': {
'kube-master': {'hosts': {}},
'kube-node': {'hosts': {}},
'etcd': {'hosts': {}},
'k8s-cluster': {
'children': {'kube-master': None, 'kube-node': None}},
'calico-rr': {'hosts': {}}}}}
for master_vm in master_vm_dict_list:
key = 'master' + master_vm.get('nic_ip').split('.')[-1]
hosts_yaml_content['all']['hosts'][key] = {
'ansible_host': master_vm.get('ssh_ip'),
'ip': master_vm.get('nic_ip'),
'ansible_user': master_node.get('username'),
'ansible_password': master_node.get('username'),
}
hosts_yaml_content['all']['children']['kube-master'][
'hosts'][key] = None
hosts_yaml_content['all']['children']['etcd'][
'hosts'][key] = None
for worker_vm in worker_vm_dict_list:
key = 'worker' + worker_vm.get('nic_ip').split('.')[-1]
hosts_yaml_content['all']['hosts'][key] = {
'ansible_host': worker_vm.get('ssh_ip'),
'ip': worker_vm.get('nic_ip'),
'ansible_user': worker_node.get('username'),
'ansible_password': worker_node.get('username'),
}
hosts_yaml_content['all']['children']['kube-node'][
'hosts'][key] = None
return hosts_yaml_content
def _install_k8s_cluster_and_set_config(
self, master_node, worker_node, proxy, ansible,
external_lb_param, master_vm_dict_list, worker_vm_dict_list):
"""Install Kubernetes Cluster Function
It will use Kubespray which is installed in advance to install
a Kubernetes Cluster.
At present, Kuberspray's version is v2.16.0. You can get detailed
information from the following url.
https://github.com/kubernetes-sigs/kubespray/tree/v2.16.0
"""
# get mtu value
master_commander = self._init_commander_and_set_script(
master_node.get('username'), master_node.get('password'),
master_vm_dict_list[0].get('ssh_ip'), K8S_CMD_TIMEOUT)
ssh_command = "ip a | grep '%(nic_ip)s' -B 2 | " \
"grep 'mtu' | awk '{print $5}'" % \
{'nic_ip': master_vm_dict_list[0].get('nic_ip')}
mtu_value = self._execute_command(
master_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)[0].replace('\n', '')
calico_veth_mtu = int(mtu_value) - 20
master_commander.close_session()
# create inventory/hosts.yaml
ansible_commander = self._init_commander_and_set_script(
ansible.get('username'), ansible.get('password'),
ansible.get('ip_address'), K8S_CMD_TIMEOUT)
ssh_command = "ls -l {}".format(
ansible.get('transferring_inventory_path'))
file_exists_flag = self._execute_command(
ansible_commander, ssh_command, K8S_CMD_TIMEOUT, 'ansible', 0)
if not file_exists_flag:
ssh_command = 'cp -r {kubespray_root_path}/inventory/sample' \
' {transferring_inventory_path}'.format(
kubespray_root_path=ansible.get(
'kubespray_root_path'),
transferring_inventory_path=ansible.get(
'transferring_inventory_path'))
self._execute_command(
ansible_commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
hosts_yaml_content = self._create_hosts_yaml(
master_node, master_vm_dict_list,
worker_node, worker_vm_dict_list)
local_hosts_yaml_path = '/tmp/hosts.yaml'
with open(local_hosts_yaml_path, 'w', encoding='utf-8') as nf:
yaml.safe_dump(hosts_yaml_content, nf, default_flow_style=False)
remote_hosts_yaml_path = ansible.get(
'transferring_inventory_path') + '/hosts.yaml'
self._send_or_receive_file(
ansible.get('ip_address'), ansible.get('username'),
ansible.get('password'), remote_hosts_yaml_path,
local_hosts_yaml_path, 'send')
# set calico mtu value
calico_file_path = ansible.get(
'transferring_inventory_path') + \
'/group_vars/k8s_cluster/k8s-net-calico.yml'
ssh_command = 'sed -i "s/\\# calico_mtu: 1500/calico_mtu: ' \
'{mtu_value}/g" {calico_file_path}'.format(
mtu_value=mtu_value,
calico_file_path=calico_file_path)
self._execute_command(ansible_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
ssh_command = 'sed -i "s/\\# calico_veth_mtu: 1440/calico_veth_mtu:' \
' {calico_veth_mtu}/g" {calico_file_path}'.format(
calico_veth_mtu=calico_veth_mtu,
calico_file_path=calico_file_path)
self._execute_command(ansible_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
# set pod and service cidr information
if external_lb_param.get('cluster_cidr') and \
external_lb_param.get('pod_cidr'):
k8s_cluster_file_path = ansible.get(
'transferring_inventory_path') + \
'/group_vars/k8s_cluster/k8s-cluster.yml'
cluster_cidr = external_lb_param.get(
'cluster_cidr').replace('/', '\\/')
ssh_command = 'sed -i "s/kube_service_addresses:' \
' 10.233.0.0\\/18/' \
'kube_service_addresses: {k8s_service_address}/g"' \
' {k8s_cluster_file_path}'.format(
k8s_service_address=cluster_cidr,
k8s_cluster_file_path=k8s_cluster_file_path)
self._execute_command(ansible_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
pod_cidr = external_lb_param.get('pod_cidr').replace('/', '\\/')
ssh_command = 'sed -i "s/kube_pods_subnet: 10.233.64.0\\/18/' \
'kube_pods_subnet: {pod_cidr}/g"' \
' {k8s_cluster_file_path}'.format(
pod_cidr=pod_cidr,
k8s_cluster_file_path=k8s_cluster_file_path)
self._execute_command(ansible_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
# set proxy
if proxy:
proxy_file_path = ansible.get(
'transferring_inventory_path') + \
'/group_vars/all/all.yml'
http_proxy = proxy.get('http_proxy').replace('/', '\\/')
https_proxy = proxy.get('http_proxy').replace('/', '\\/')
ssh_command = 'sed -i "s/\\# http_proxy: \\"\\"/' \
'http_proxy: {http_proxy}/g"' \
' {proxy_file_path}'.format(
http_proxy=http_proxy,
proxy_file_path=proxy_file_path)
self._execute_command(ansible_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
ssh_command = 'sed -i "s/\\# https_proxy: \\"\\"/' \
'https_proxy: {https_proxy}/g"' \
' {proxy_file_path}'.format(
https_proxy=https_proxy,
proxy_file_path=proxy_file_path)
self._execute_command(ansible_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
ansible_commander.close_session()
# install k8s cluster
install_timeout = K8S_INSTALL_TIMEOUT * (
len(master_vm_dict_list) + len(worker_vm_dict_list))
ansible_commander = self._init_commander_and_set_script(
ansible.get('username'), ansible.get('password'),
ansible.get('ip_address'), install_timeout)
cluster_yaml_path = ansible.get(
'kubespray_root_path') + '/cluster.yml'
ssh_command = 'ansible-playbook -i {}/hosts.yaml --become' \
' --become-user=root {}'.format(
ansible.get('transferring_inventory_path'),
cluster_yaml_path)
self._execute_command(ansible_commander, ssh_command,
install_timeout, 'install', 1)
ansible_commander.close_session()
# get k8s bearer token
master_commander = self._init_commander_and_set_script(
master_node.get('username'), master_node.get('password'),
master_vm_dict_list[0].get('ssh_ip'), K8S_CMD_TIMEOUT,
token_flag=True)
ssh_command = "sudo kubectl create -f /tmp/create_admin_token.yaml"
self._execute_command(
master_commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
time.sleep(TOKEN_CREATE_WAIT_TIME)
ssh_command = "sudo kubectl get secret -n kube-system " \
"| grep '^admin-token' " \
"| awk '{print $1}' " \
"| xargs -i sudo kubectl get secret {} -n kube-system" \
" -ojsonpath={.data.token} | base64 -d"
bearer_token = self._execute_command(
master_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)[0].replace('\n', '')
master_commander.close_session()
if os.path.exists(local_hosts_yaml_path):
os.remove(local_hosts_yaml_path)
return bearer_token
def _install_and_set_lb(self, external_lb_param, vnf_package_path, proxy,
master_vm_dict_list, worker_vm_dict_list,
master_node):
lb_commander = self._init_commander_and_set_script(
external_lb_param.get('ssh_username'),
external_lb_param.get('ssh_password'),
external_lb_param.get('ssh_ip'), K8S_DEPLOY_TIMEOUT,
vnf_package_path=vnf_package_path,
script_path=external_lb_param.get('script_path'))
master_ssh_ips_str = ','.join([vm_dict.get(
'nic_ip') for vm_dict in master_vm_dict_list])
worker_ssh_ips_str = ','.join([vm_dict.get(
'nic_ip') for vm_dict in worker_vm_dict_list])
if proxy.get('http_proxy') and proxy.get('https_proxy'):
ssh_command = \
"export http_proxy={http_proxy};" \
"export https_proxy={https_proxy};" \
"bash /tmp/{script_path} " \
"-m {master_ip} -w {worker_ip} ".format(
http_proxy=proxy.get('http_proxy'),
https_proxy=proxy.get('https_proxy'),
master_ip=master_ssh_ips_str,
worker_ip=worker_ssh_ips_str,
script_path=external_lb_param.get(
'script_path').replace('Scripts/', ''))
else:
ssh_command = \
"bash /tmp/{script_path} " \
"-m {master_ip} -w {worker_ip} ".format(
master_ip=master_ssh_ips_str,
worker_ip=worker_ssh_ips_str,
script_path=external_lb_param.get(
'script_path').replace('Scripts/', ''))
self._execute_command(
lb_commander, ssh_command, K8S_DEPLOY_TIMEOUT, 'common', 0)
lb_commander.close_session()
# copy k8s admin configuration file
master_commander = self._init_commander_and_set_script(
master_node.get('username'), master_node.get('password'),
master_vm_dict_list[0].get('ssh_ip'), K8S_CMD_TIMEOUT)
ssh_command = 'sudo cp /etc/kubernetes/admin.conf /tmp/config;' \
'sudo chown $(id -u):$(id -g) /tmp/config'
self._execute_command(master_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
ssh_command = "sed -i 's/:6443/:8383/' /tmp/config"
self._execute_command(master_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
master_commander.close_session()
remote_admin_file_path = local_admin_file_path = '/tmp/config'
self._send_or_receive_file(
master_vm_dict_list[0].get('ssh_ip'),
master_node.get('username'), master_node.get('password'),
remote_admin_file_path, local_admin_file_path, 'receive')
# send config file to lb server
lb_admin_file_path = '~/.kube/config'
if os.path.exists(local_admin_file_path):
self._send_or_receive_file(
external_lb_param.get('ssh_ip'),
external_lb_param.get('ssh_username'),
external_lb_param.get('ssh_password'),
remote_admin_file_path, local_admin_file_path, 'send')
lb_commander = self._init_commander_and_set_script(
external_lb_param.get('ssh_username'),
external_lb_param.get('ssh_password'),
external_lb_param.get('ssh_ip'), K8S_CMD_TIMEOUT)
ssh_command = "mv {} {}".format(remote_admin_file_path,
lb_admin_file_path)
self._execute_command(lb_commander, ssh_command,
K8S_CMD_TIMEOUT, 'common', 0)
lb_commander.close_session()
if os.path.exists(local_admin_file_path):
os.remove(local_admin_file_path)
def _create_vim(self, context, vnf_instance, external_lb_param,
bearer_token, vim_name):
server = 'https://' + external_lb_param.get('cluster_ip') + ':8383'
vim_info = {
'vim': {
'name': vim_name,
'auth_url': server,
'vim_project': {
'name': 'default'
},
'auth_cred': {
'bearer_token': bearer_token
},
'type': 'kubernetes',
'tenant_id': context.project_id
}
}
try:
nfvo_plugin = NfvoPlugin()
created_vim_info = nfvo_plugin.create_vim(context, vim_info)
except Exception as e:
LOG.error("Failed to register kubernetes vim: {}".format(e))
raise exceptions.MgmtDriverOtherError(
error_message="Failed to register kubernetes vim: {}".format(
e))
id = uuidutils.generate_uuid()
vim_id = created_vim_info.get('id')
vim_type = 'kubernetes'
access_info = {
'auth_url': server
}
vim_connection_info = objects.VimConnectionInfo(
id=id, vim_id=vim_id, vim_type=vim_type,
access_info=access_info, interface_info=None
)
vim_connection_infos = vnf_instance.vim_connection_info
vim_connection_infos.append(vim_connection_info)
vnf_instance.vim_connection_info = vim_connection_infos
vnf_instance.save()
def _get_vnf_package_path(self, context, vnfd_id):
return os.path.join(CONF.vnf_package.vnf_package_csar_path,
self._get_vnf_package_id(context, vnfd_id))
def _get_vnf_package_id(self, context, vnfd_id):
vnf_package = objects.VnfPackageVnfd.get_by_id(context, vnfd_id)
return vnf_package.package_uuid
@log.log
def instantiate_end(self, context, vnf_instance,
instantiate_vnf_request, grant,
grant_request, **kwargs):
# get vim_connect_info
if hasattr(instantiate_vnf_request, 'vim_connection_info'):
vim_connection_info = self._get_vim_connection_info(
context, instantiate_vnf_request)
else:
# In case of healing entire Kubernetes cluster, 'heal_end' method
# will call this method using 'vnf_instance.instantiated_vnf_info'
# as the 'instantiate_vnf_request', but there is no
# 'vim_connection_info' in it, so we should get
# 'vim_connection_info' from 'vnf_instance'.
vim_connection_info = self._get_vim_connection_info(
context, vnf_instance)
additional_param = instantiate_vnf_request.additional_params.get(
'k8s_cluster_installation_param', {})
vim_name = additional_param.get('vim_name')
master_node = additional_param.get('master_node', {})
worker_node = additional_param.get('worker_node', {})
proxy = additional_param.get('proxy', {})
ansible = additional_param.get('ansible', {})
external_lb_param = additional_param.get('external_lb_param', {})
vnf_package_path = self._get_vnf_package_path(
context, vnf_instance.vnfd_id)
self._check_input_parameters(additional_param, vnf_package_path)
nest_stack_id = vnf_instance.instantiated_vnf_info.instance_id
if not vim_name:
vim_name = 'kubernetes_vim_' + vnf_instance.id
# get k8s node vm list
access_info = vim_connection_info.access_info
heatclient = hc.HeatClient(access_info)
master_vm_dict_list = \
self._get_install_info_for_k8s_node(
nest_stack_id, master_node,
instantiate_vnf_request.additional_params,
heatclient)
worker_vm_dict_list = \
self._get_install_info_for_k8s_node(
nest_stack_id, worker_node,
instantiate_vnf_request.additional_params, heatclient)
# set LB vm's info
self._set_lb_info(nest_stack_id, external_lb_param, master_node,
heatclient)
# install k8s_cluster and set config
bearer_token = self._install_k8s_cluster_and_set_config(
master_node, worker_node, proxy, ansible, external_lb_param,
master_vm_dict_list, worker_vm_dict_list)
# Install and set ExternalLB
self._install_and_set_lb(external_lb_param, vnf_package_path, proxy,
master_vm_dict_list, worker_vm_dict_list,
master_node)
# create vim
self._create_vim(context, vnf_instance, external_lb_param,
bearer_token, vim_name)
@log.log
def terminate_start(self, context, vnf_instance,
terminate_vnf_request, grant,
grant_request, **kwargs):
pass
def _get_vim_by_name(self, context, k8s_vim_name):
common_db_api = CommonDbMixin()
result = common_db_api.get_by_name(
context, nfvo_db.Vim, k8s_vim_name)
if not result:
LOG.debug("Cannot find kubernetes "
"vim with name: {}".format(k8s_vim_name))
return result
@log.log
def terminate_end(self, context, vnf_instance,
terminate_vnf_request, grant,
grant_request, **kwargs):
# delete kubernetes vim
k8s_params = vnf_instance.instantiated_vnf_info.additional_params.get(
'k8s_cluster_installation_param', {})
k8s_vim_name = k8s_params.get('vim_name')
if not k8s_vim_name:
k8s_vim_name = 'kubernetes_vim_' + vnf_instance.id
vim_info = self._get_vim_by_name(
context, k8s_vim_name)
if vim_info:
nfvo_plugin = NfvoPlugin()
nfvo_plugin.delete_vim(context, vim_info.id)
# delete cluster info on ansible server
ansible = {}
if hasattr(terminate_vnf_request, 'additional_params'):
if terminate_vnf_request.additional_params:
if terminate_vnf_request.additional_params.get(
'ansible_username'):
ansible['username'] = \
terminate_vnf_request.additional_params.get(
'ansible_username')
if terminate_vnf_request.additional_params.get(
'ansible_password'):
ansible['password'] = \
terminate_vnf_request.additional_params.get(
'ansible_password')
else:
ansible = k8s_params.get('ansible')
else:
ansible = k8s_params.get('ansible')
commander = self._init_commander_and_set_script(
ansible.get('username'), ansible.get('password'),
ansible.get('ip_address'), K8S_CMD_TIMEOUT)
ssh_command = 'rm -rf {}'.format(
k8s_params.get('ansible').get('transferring_inventory_path'))
self._execute_command(
commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
ssh_command = 'rm -rf ~/.ssh/known_hosts'
self._execute_command(
commander, ssh_command, K8S_CMD_TIMEOUT, 'common', 0)
commander.close_session()
@log.log
def scale_start(self, context, vnf_instance,
scale_vnf_request, grant,
grant_request, **kwargs):
pass
@log.log
def scale_end(self, context, vnf_instance,
scale_vnf_request, grant,
grant_request, **kwargs):
pass
@log.log
def heal_start(self, context, vnf_instance,
heal_vnf_request, grant,
grant_request, **kwargs):
pass
@log.log
def heal_end(self, context, vnf_instance,
heal_vnf_request, grant,
grant_request, **kwargs):
pass
@log.log
def change_external_connectivity_start(
self, context, vnf_instance,
change_ext_conn_request, grant,
grant_request, **kwargs):
pass
@log.log
def change_external_connectivity_end(
self, context, vnf_instance,
change_ext_conn_request, grant,
grant_request, **kwargs):
pass

View File

@ -0,0 +1,87 @@
heat_template_version: 2013-05-23
description: 'Simple Base HOT for Sample VNF'
parameters:
nfv:
type: json
resources:
master_instance:
type: OS::Heat::AutoScalingGroup
properties:
min_size: 1
max_size: 3
desired_capacity: 1
resource:
type: base_hot_nested_master.yaml
properties:
flavor: { get_param: [ nfv, VDU, masterNode, flavor ] }
image: { get_param: [ nfv, VDU, masterNode, image ] }
net1: { get_param: [ nfv, CP, masterNode_CP1, network ] }
master_instance_scale_out:
type: OS::Heat::ScalingPolicy
properties:
scaling_adjustment: 1
auto_scaling_group_id:
get_resource: master_instance
adjustment_type: change_in_capacity
master_instance_scale_in:
type: OS::Heat::ScalingPolicy
properties:
scaling_adjustment: -1
auto_scaling_group_id:
get_resource: master_instance
adjustment_type: change_in_capacity
worker_instance:
type: OS::Heat::AutoScalingGroup
properties:
min_size: 2
max_size: 4
desired_capacity: 2
resource:
type: base_hot_nested_worker.yaml
properties:
flavor: { get_param: [ nfv, VDU, workerNode, flavor ] }
image: { get_param: [ nfv, VDU, workerNode, image ] }
net1: { get_param: [ nfv, CP, workerNode_CP2, network ] }
worker_instance_scale_out:
type: OS::Heat::ScalingPolicy
properties:
scaling_adjustment: 1
auto_scaling_group_id:
get_resource: worker_instance
adjustment_type: change_in_capacity
worker_instance_scale_in:
type: OS::Heat::ScalingPolicy
properties:
scaling_adjustment: -1
auto_scaling_group_id:
get_resource: worker_instance
adjustment_type: change_in_capacity
externalLB:
type: OS::Nova::Server
properties:
flavor: { get_param: [ nfv, VDU, externalLB, flavor ] }
name: externalLB
image: { get_param: [ nfv, VDU, externalLB, image ] }
networks:
- port:
get_resource: externalLB_CP3
externalLB_CP3:
type: OS::Neutron::Port
properties:
network: { get_param: [ nfv, CP, externalLB_CP3, network ] }
externalLB_FloatingIP:
type: OS::Neutron::FloatingIP
properties:
floating_network: public
port_id:
get_resource: externalLB_CP3
outputs: {}

View File

@ -0,0 +1,33 @@
heat_template_version: 2013-05-23
description: 'masterNode HOT for Sample VNF'
parameters:
flavor:
type: string
image:
type: string
net1:
type: string
resources:
masterNode:
type: OS::Nova::Server
properties:
flavor: { get_param: flavor }
name: masterNode
image: { get_param: image }
networks:
- port:
get_resource: masterNode_CP1
masterNode_CP1:
type: OS::Neutron::Port
properties:
network: { get_param: net1 }
masterNode_FloatingIP:
properties:
floating_network: public
port_id:
get_resource: masterNode_CP1
type: OS::Neutron::FloatingIP

View File

@ -0,0 +1,33 @@
heat_template_version: 2013-05-23
description: 'workerNode HOT for Sample VNF'
parameters:
flavor:
type: string
image:
type: string
net1:
type: string
resources:
workerNode:
type: OS::Nova::Server
properties:
flavor: { get_param: flavor }
name: workerNode
image: { get_param: image }
networks:
- port:
get_resource: workerNode_CP2
workerNode_CP2:
type: OS::Neutron::Port
properties:
network: { get_param: net1 }
workerNode_FloatingIP:
properties:
floating_network: public
port_id:
get_resource: workerNode_CP2
type: OS::Neutron::FloatingIP

View File

@ -0,0 +1,288 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: Simple deployment flavour for Sample VNF
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
- sample_kubernetes_types.yaml
topology_template:
inputs:
id:
type: string
vendor:
type: string
version:
type: version
descriptor_id:
type: string
descriptor_version:
type: string
provider:
type: string
product_name:
type: string
software_version:
type: string
vnfm_info:
type: list
entry_schema:
type: string
flavour_id:
type: string
flavour_description:
type: string
substitution_mappings:
node_type: company.provider.VNF
properties:
flavour_id: simple
requirements:
virtual_link_external1_1: [ masterNode_CP1, virtual_link ]
virtual_link_external1_2: [ workerNode_CP2, virtual_link ]
virtual_link_external1_3: [ externalLB_CP3, virtual_link ]
node_templates:
VNF:
type: company.provider.VNF
properties:
flavour_description: A simple flavour
interfaces:
Vnflcm:
instantiate_end:
implementation: mgmt-drivers-kubespray
terminate_end:
implementation: mgmt-drivers-kubespray
scale_start:
implementation: mgmt-drivers-kubespray
scale_end:
implementation: mgmt-drivers-kubespray
heal_start:
implementation: mgmt-drivers-kubespray
heal_end:
implementation: mgmt-drivers-kubespray
artifacts:
mgmt-drivers-kubespray:
description: Management driver for kubernetes cluster
type: tosca.artifacts.Implementation.Python
file: Scripts/kubespray_mgmt.py
externalLB:
type: tosca.nodes.nfv.Vdu.Compute
properties:
name: externalLB
description: externalLB
vdu_profile:
min_number_of_instances: 1
max_number_of_instances: 1
sw_image_data:
name: ubuntu-20.04-server-cloudimg-amd64
version: '20.04'
checksum:
algorithm: sha-512
hash: fb1a1e50f9af2df6ab18a69b6bc5df07ebe8ef962b37e556ce95350ffc8f4a1118617d486e2018d1b3586aceaeda799e6cc073f330a7ad8f0ec0416cbd825452
container_format: bare
disk_format: qcow2
min_disk: 0 GB
size: 2 GB
capabilities:
virtual_compute:
properties:
requested_additional_capabilities:
properties:
requested_additional_capability_name: ds2G
support_mandatory: true
target_performance_parameters:
entry_schema: test
virtual_memory:
virtual_mem_size: 2 GB
virtual_cpu:
num_virtual_cpu: 2
virtual_local_storage:
- size_of_storage: 10 GB
masterNode:
type: tosca.nodes.nfv.Vdu.Compute
properties:
name: masterNode
description: masterNode compute node
vdu_profile:
min_number_of_instances: 1
max_number_of_instances: 3
sw_image_data:
name: ubuntu-20.04-server-cloudimg-amd64
version: '20.04'
checksum:
algorithm: sha-512
hash: fb1a1e50f9af2df6ab18a69b6bc5df07ebe8ef962b37e556ce95350ffc8f4a1118617d486e2018d1b3586aceaeda799e6cc073f330a7ad8f0ec0416cbd825452
container_format: bare
disk_format: qcow2
min_disk: 0 GB
size: 2 GB
capabilities:
virtual_compute:
properties:
requested_additional_capabilities:
properties:
requested_additional_capability_name: ds2G
support_mandatory: true
target_performance_parameters:
entry_schema: test
virtual_memory:
virtual_mem_size: 2 GB
virtual_cpu:
num_virtual_cpu: 2
virtual_local_storage:
- size_of_storage: 10 GB
workerNode:
type: tosca.nodes.nfv.Vdu.Compute
properties:
name: workerNode
description: workerNode compute node
vdu_profile:
min_number_of_instances: 2
max_number_of_instances: 4
sw_image_data:
name: ubuntu-20.04-server-cloudimg-amd64
version: '20.04'
checksum:
algorithm: sha-512
hash: fb1a1e50f9af2df6ab18a69b6bc5df07ebe8ef962b37e556ce95350ffc8f4a1118617d486e2018d1b3586aceaeda799e6cc073f330a7ad8f0ec0416cbd825452
container_format: bare
disk_format: qcow2
min_disk: 0 GB
size: 2 GB
capabilities:
virtual_compute:
properties:
requested_additional_capabilities:
properties:
requested_additional_capability_name: ds2G
support_mandatory: true
target_performance_parameters:
entry_schema: test
virtual_memory:
virtual_mem_size: 2 GB
virtual_cpu:
num_virtual_cpu: 2
virtual_local_storage:
- size_of_storage: 10 GB
externalLB_CP3:
type: tosca.nodes.nfv.VduCp
properties:
layer_protocols: [ ipv4 ]
order: 0
requirements:
- virtual_binding: externalLB
masterNode_CP1:
type: tosca.nodes.nfv.VduCp
properties:
layer_protocols: [ ipv4 ]
order: 0
requirements:
- virtual_binding: masterNode
workerNode_CP2:
type: tosca.nodes.nfv.VduCp
properties:
layer_protocols: [ ipv4 ]
order: 0
requirements:
- virtual_binding: workerNode
policies:
- scaling_aspects:
type: tosca.policies.nfv.ScalingAspects
properties:
aspects:
master_instance:
name: master_instance
description: master_instance scaling aspect
max_scale_level: 2
step_deltas:
- delta_1
worker_instance:
name: worker_instance
description: worker_instance scaling aspect
max_scale_level: 2
step_deltas:
- delta_1
- masterNode_initial_delta:
type: tosca.policies.nfv.VduInitialDelta
properties:
initial_delta:
number_of_instances: 1
targets: [ masterNode ]
- workerNode_initial_delta:
type: tosca.policies.nfv.VduInitialDelta
properties:
initial_delta:
number_of_instances: 2
targets: [ workerNode ]
- masterNode_scaling_aspect_deltas:
type: tosca.policies.nfv.VduScalingAspectDeltas
properties:
aspect: master_instance
deltas:
delta_1:
number_of_instances: 1
targets: [ masterNode ]
- workerNode_scaling_aspect_deltas:
type: tosca.policies.nfv.VduScalingAspectDeltas
properties:
aspect: worker_instance
deltas:
delta_1:
number_of_instances: 1
targets: [ workerNode ]
- instantiation_levels:
type: tosca.policies.nfv.InstantiationLevels
properties:
levels:
instantiation_level_1:
description: Smallest size
scale_info:
master_instance:
scale_level: 0
worker_instance:
scale_level: 0
instantiation_level_2:
description: Largest size
scale_info:
master_instance:
scale_level: 2
worker_instance:
scale_level: 2
default_level: instantiation_level_1
- masterNode_instantiation_levels:
type: tosca.policies.nfv.VduInstantiationLevels
properties:
levels:
instantiation_level_1:
number_of_instances: 1
instantiation_level_2:
number_of_instances: 3
targets: [ masterNode ]
- workerNode_instantiation_levels:
type: tosca.policies.nfv.VduInstantiationLevels
properties:
levels:
instantiation_level_1:
number_of_instances: 2
instantiation_level_2:
number_of_instances: 4
targets: [ workerNode ]

View File

@ -0,0 +1,31 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: Sample VNF.
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
- sample_kubernetes_types.yaml
- sample_kubernetes_df_simple.yaml
topology_template:
inputs:
selected_flavour:
type: string
description: VNF deployment flavour selected by the consumer. It is provided in the API
node_templates:
VNF:
type: company.provider.VNF
properties:
flavour_id: { get_input: selected_flavour }
descriptor_id: b1db0ce7-ebca-1fb7-95ed-4840d70a1163
provider: Company
product_name: Sample VNF
software_version: '1.0'
descriptor_version: '1.0'
vnfm_info:
- Tacker
requirements:
#- virtual_link_external # mapped in lower-level templates
#- virtual_link_internal # mapped in lower-level templates

View File

@ -0,0 +1,63 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: VNF type definition
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
node_types:
company.provider.VNF:
derived_from: tosca.nodes.nfv.VNF
properties:
id:
type: string
description: ID of this VNF
default: vnf_id
vendor:
type: string
description: name of the vendor who generate this VNF
default: vendor
version:
type: version
description: version of the software for this VNF
default: 1.0
descriptor_id:
type: string
constraints: [ valid_values: [ b1db0ce7-ebca-1fb7-95ed-4840d70a1163 ] ]
default: b1db0ce7-ebca-1fb7-95ed-4840d70a1163
descriptor_version:
type: string
constraints: [ valid_values: [ '1.0' ] ]
default: '1.0'
provider:
type: string
constraints: [ valid_values: [ 'Company' ] ]
default: 'Company'
product_name:
type: string
constraints: [ valid_values: [ 'Sample VNF' ] ]
default: 'Sample VNF'
software_version:
type: string
constraints: [ valid_values: [ '1.0' ] ]
default: '1.0'
vnfm_info:
type: list
entry_schema:
type: string
constraints: [ valid_values: [ Tacker ] ]
default: [ Tacker ]
flavour_id:
type: string
constraints: [ valid_values: [ simple ] ]
default: simple
flavour_description:
type: string
default: "This is the default flavour description"
requirements:
- virtual_link_internal:
capability: tosca.capabilities.nfv.VirtualLinkable
interfaces:
Vnflcm:
type: tosca.interfaces.nfv.Vnflcm

View File

@ -0,0 +1,14 @@
TOSCA-Meta-File-Version: 1.0
Created-by: dummy_user
CSAR-Version: 1.1
Entry-Definitions: Definitions/sample_kubernetes_top.vnfd.yaml
Name: Scripts/install_external_lb.sh
Content-Type: application/x-shellscript
Algorithm: SHA-256
Hash: 0b2445403a4b2ce2f905c2b7f77dcdb444a1e445379a11c6aca8e87d4b1f8198
Name: Scripts/kubespray_mgmt.py
Content-Type: text/x-python
Algorithm: SHA-256
Hash: 2d6232040fd049619e1a7c3e268b87ccec9aa4d56d955f1bfa4420a4f6531e31

View File

@ -0,0 +1,35 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tacker.vnfm.lcm_user_data.abstract_user_data import AbstractUserData
import tacker.vnfm.lcm_user_data.utils as UserDataUtil
class SampleUserData(AbstractUserData):
@staticmethod
def instantiate(base_hot_dict=None,
vnfd_dict=None,
inst_req_info=None,
grant_info=None):
api_param = UserDataUtil.get_diff_base_hot_param_from_api(
base_hot_dict, inst_req_info)
initial_param_dict = \
UserDataUtil.create_initial_param_server_port_dict(
base_hot_dict)
vdu_flavor_dict = \
UserDataUtil.create_vdu_flavor_capability_name_dict(vnfd_dict)
vdu_image_dict = UserDataUtil.create_sw_image_dict(vnfd_dict)
cpd_vl_dict = UserDataUtil.create_network_dict(
inst_req_info, initial_param_dict)
final_param_dict = UserDataUtil.create_final_param_dict(
initial_param_dict, vdu_flavor_dict, vdu_image_dict, cpd_vl_dict)
return {**final_param_dict, **api_param}

View File

@ -215,3 +215,6 @@ class CommonDbMixin(object):
except orm_exc.NoResultFound:
LOG.info("No result found for %(name)s in %(model)s table",
{'name': name, 'model': model})
def get_by_name(self, context, model, name):
return self._get_by_name(context, model, name)

View File

@ -2016,6 +2016,9 @@ class TestVnflcmDriver(db_base.SqlTestCase):
operation_params)
self.assertEqual(1, mock_lcm_save.call_count)
@mock.patch('tacker.vnflcm.utils._get_vnfd_dict')
@mock.patch('tacker.vnflcm.vnflcm_driver.VnfLcmDriver.'
'_load_vnf_interface')
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(VnfLcmDriver,
@ -2033,7 +2036,9 @@ class TestVnflcmDriver(db_base.SqlTestCase):
mock_notification,
mock_lcm_save,
mock_init_hash,
mock_get_service_plugins):
mock_get_service_plugins,
mock_vnf_interfaces,
mock_vnfd_dict):
mock_init_hash.return_value = {
"vnflcm_noop": "ffea638bfdbde3fb01f191bbe75b031859"
"b18d663b127100eb72b19eecd7ed51"
@ -2057,6 +2062,9 @@ class TestVnflcmDriver(db_base.SqlTestCase):
operation_params)
self.assertEqual(1, mock_lcm_save.call_count)
@mock.patch('tacker.vnflcm.utils._get_vnfd_dict')
@mock.patch('tacker.vnflcm.vnflcm_driver.VnfLcmDriver.'
'_load_vnf_interface')
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(VnfLcmDriver,
@ -2074,7 +2082,9 @@ class TestVnflcmDriver(db_base.SqlTestCase):
mock_notification,
mock_lcm_save,
mock_init_hash,
mock_get_service_plugins):
mock_get_service_plugins,
mock_vnf_interfaces,
mock_vnfd_dict):
mock_init_hash.return_value = {
"vnflcm_noop": "ffea638bfdbde3fb01f191bbe75b031859"
"b18d663b127100eb72b19eecd7ed51"
@ -2098,6 +2108,9 @@ class TestVnflcmDriver(db_base.SqlTestCase):
operation_params)
self.assertEqual(1, mock_lcm_save.call_count)
@mock.patch('tacker.vnflcm.utils._get_vnfd_dict')
@mock.patch('tacker.vnflcm.vnflcm_driver.VnfLcmDriver.'
'_load_vnf_interface')
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(VnfLcmDriver,
@ -2115,7 +2128,9 @@ class TestVnflcmDriver(db_base.SqlTestCase):
mock_notification,
mock_lcm_save,
mock_init_hash,
mock_get_service_plugins):
mock_get_service_plugins,
mock_vnf_interfaces,
mock_vnfd_dict):
mock_init_hash.return_value = {
"vnflcm_noop": "ffea638bfdbde3fb01f191bbe75b031859"
"b18d663b127100eb72b19eecd7ed51"
@ -2139,6 +2154,9 @@ class TestVnflcmDriver(db_base.SqlTestCase):
operation_params)
self.assertEqual(1, mock_lcm_save.call_count)
@mock.patch('tacker.vnflcm.utils._get_vnfd_dict')
@mock.patch('tacker.vnflcm.vnflcm_driver.VnfLcmDriver.'
'_load_vnf_interface')
@mock.patch.object(TackerManager, 'get_service_plugins',
return_value={'VNFM': FakeVNFMPlugin()})
@mock.patch.object(VnfLcmDriver,
@ -2156,7 +2174,9 @@ class TestVnflcmDriver(db_base.SqlTestCase):
mock_notification,
mock_lcm_save,
mock_init_hash,
mock_get_service_plugins):
mock_get_service_plugins,
mock_vnf_interfaces,
mock_vnfd_dict):
mock_init_hash.return_value = {
"vnflcm_noop": "ffea638bfdbde3fb01f191bbe75b031859"
"b18d663b127100eb72b19eecd7ed51"

View File

@ -79,6 +79,10 @@ def _get_vnfd_dict(context, vnfd_id, flavour_id):
return vnfd_dict
def get_vnfd_dict(context, vnfd_id, flavour_id):
return _get_vnfd_dict(context, vnfd_id, flavour_id)
def _get_vnflcm_interface(context, interface, vnf_instance, flavour_id):
'''Gets the interface found in vnfd

View File

@ -1531,7 +1531,6 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver):
vnfd_dict = vnflcm_utils._get_vnfd_dict(
context, vnf_instance.vnfd_id,
vnf_instance.instantiated_vnf_info.flavour_id)
vnf_info['action'] = 'in'
if len(scale_id_list) != 0:
kwargs = {'scale_name_list': scale_name_list}
@ -1660,6 +1659,22 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver):
vim_connection_info)
else:
vnfd_dict = vnflcm_utils.get_vnfd_dict(
context, vnf_instance.vnfd_id,
vnf_instance.instantiated_vnf_info.flavour_id)
# TODO(LiangLu): grant_request and grant here is planned to
# pass as a parameter, however due to they are not
# passed from conductor to vnflcm_driver, thus we put Null
# value to grant and grant_reqeust temporary.
# This part will be updated in next release.
self._mgmt_manager.invoke(
self._load_vnf_interface(
context, 'terminate_end',
vnf_instance, vnfd_dict),
'terminate_end', context=context,
vnf_instance=vnf_instance,
terminate_vnf_request=None,
grant=None, grant_request=None)
resource_changes = self._term_resource_update(
context, vnf_info, vnf_instance)