Merge "Support Helm chart as interface for Kubernetes VIM"
This commit is contained in:
commit
e62a62ccc9
|
@ -0,0 +1,10 @@
|
||||||
|
---
|
||||||
|
features:
|
||||||
|
- |
|
||||||
|
Add new interface for Kubernetes VIM to handle Helm chart. It enables Users
|
||||||
|
to include Helm chart files as MCIOP in their VNF Packages, to instantiate
|
||||||
|
and to terminate CNF with them.
|
||||||
|
And update sample of MgmtDriver to install and configure Helm package for
|
||||||
|
using Helm cli command in the deployed Kubernetes cluster VNF, and to
|
||||||
|
restore the registered helm repositories and charts after the master node is
|
||||||
|
healed.
|
|
@ -0,0 +1,49 @@
|
||||||
|
#!/bin/bash
|
||||||
|
set -o xtrace
|
||||||
|
|
||||||
|
###############################################################################
|
||||||
|
#
|
||||||
|
# This script will install and setting Helm for Tacker.
|
||||||
|
#
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
declare -g HELM_VERSION="3.5.4"
|
||||||
|
declare -g HELM_CHART_DIR="/var/tacker/helm"
|
||||||
|
|
||||||
|
# Install Helm
|
||||||
|
#-------------
|
||||||
|
function install_helm {
|
||||||
|
wget -P /tmp https://get.helm.sh/helm-v$HELM_VERSION-linux-amd64.tar.gz
|
||||||
|
tar zxf /tmp/helm-v$HELM_VERSION-linux-amd64.tar.gz -C /tmp
|
||||||
|
sudo mv /tmp/linux-amd64/helm /usr/local/bin/helm
|
||||||
|
}
|
||||||
|
|
||||||
|
# Install sshpass
|
||||||
|
#----------------
|
||||||
|
function install_sshpass {
|
||||||
|
sudo apt-get install -y sshpass
|
||||||
|
}
|
||||||
|
|
||||||
|
# Create helm chart directory
|
||||||
|
#----------------------------
|
||||||
|
function create_helm_chart_dir {
|
||||||
|
sudo mkdir -p $HELM_CHART_DIR
|
||||||
|
}
|
||||||
|
|
||||||
|
# Set proxy to environment
|
||||||
|
#-------------------------
|
||||||
|
function set_env_proxy {
|
||||||
|
cat <<EOF | sudo tee -a /etc/environment >/dev/null
|
||||||
|
http_proxy=${http_proxy//%40/@}
|
||||||
|
https_proxy=${https_proxy//%40/@}
|
||||||
|
no_proxy=$no_proxy
|
||||||
|
EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
# Main
|
||||||
|
# ____
|
||||||
|
install_helm
|
||||||
|
install_sshpass
|
||||||
|
create_helm_chart_dir
|
||||||
|
set_env_proxy
|
||||||
|
exit 0
|
|
@ -37,6 +37,10 @@ from tacker.vnfm.mgmt_drivers import vnflcm_abstract_driver
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
K8S_CMD_TIMEOUT = 30
|
K8S_CMD_TIMEOUT = 30
|
||||||
K8S_INSTALL_TIMEOUT = 2700
|
K8S_INSTALL_TIMEOUT = 2700
|
||||||
|
HELM_CMD_TIMEOUT = 30
|
||||||
|
HELM_INSTALL_TIMEOUT = 300
|
||||||
|
HELM_CHART_DIR = "/var/tacker/helm"
|
||||||
|
HELM_CHART_CMP_PATH = "/tmp/tacker-helm.tgz"
|
||||||
|
|
||||||
|
|
||||||
class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||||
|
@ -97,15 +101,22 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||||
stdout = result.get_stdout()
|
stdout = result.get_stdout()
|
||||||
LOG.debug(stdout)
|
LOG.debug(stdout)
|
||||||
LOG.debug(err)
|
LOG.debug(err)
|
||||||
elif type == 'certificate_key' or type == 'install':
|
elif type in ('certificate_key', 'install', 'scp'):
|
||||||
if result.get_return_code() != 0:
|
if result.get_return_code() != 0:
|
||||||
err = result.get_stderr()
|
err = result.get_stderr()
|
||||||
LOG.error(err)
|
LOG.error(err)
|
||||||
raise exceptions.MgmtDriverRemoteCommandError(err_info=err)
|
raise exceptions.MgmtDriverRemoteCommandError(err_info=err)
|
||||||
|
elif type == 'helm_repo_list':
|
||||||
|
if result.get_return_code() != 0:
|
||||||
|
err = result.get_stderr()[0].replace('\n', '')
|
||||||
|
if err == 'Error: no repositories to show':
|
||||||
|
return []
|
||||||
|
raise exceptions.MgmtDriverRemoteCommandError(err_info=err)
|
||||||
return result.get_stdout()
|
return result.get_stdout()
|
||||||
|
|
||||||
def _create_vim(self, context, vnf_instance, server, bearer_token,
|
def _create_vim(self, context, vnf_instance, server, bearer_token,
|
||||||
ssl_ca_cert, vim_name, project_name, master_vm_dict_list):
|
ssl_ca_cert, vim_name, project_name, master_vm_dict_list,
|
||||||
|
masternode_ip_list):
|
||||||
# ha: create vim
|
# ha: create vim
|
||||||
vim_info = {
|
vim_info = {
|
||||||
'vim': {
|
'vim': {
|
||||||
|
@ -133,6 +144,16 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||||
register_ip, server)
|
register_ip, server)
|
||||||
vim_info['vim']['auth_url'] = server
|
vim_info['vim']['auth_url'] = server
|
||||||
del vim_info['vim']['auth_cred']['ssl_ca_cert']
|
del vim_info['vim']['auth_cred']['ssl_ca_cert']
|
||||||
|
extra = {}
|
||||||
|
if masternode_ip_list:
|
||||||
|
username = master_vm_dict_list[0].get('ssh').get('username')
|
||||||
|
password = master_vm_dict_list[0].get('ssh').get('password')
|
||||||
|
helm_info = {
|
||||||
|
'masternode_ip': masternode_ip_list,
|
||||||
|
'masternode_username': username,
|
||||||
|
'masternode_password': password}
|
||||||
|
extra['helm_info'] = str(helm_info)
|
||||||
|
vim_info['vim']['extra'] = extra
|
||||||
try:
|
try:
|
||||||
nfvo_plugin = NfvoPlugin()
|
nfvo_plugin = NfvoPlugin()
|
||||||
created_vim_info = nfvo_plugin.create_vim(context, vim_info)
|
created_vim_info = nfvo_plugin.create_vim(context, vim_info)
|
||||||
|
@ -149,7 +170,7 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||||
}
|
}
|
||||||
vim_connection_info = objects.VimConnectionInfo(
|
vim_connection_info = objects.VimConnectionInfo(
|
||||||
id=id, vim_id=vim_id, vim_type=vim_type,
|
id=id, vim_id=vim_id, vim_type=vim_type,
|
||||||
access_info=access_info, interface_info=None
|
access_info=access_info, interface_info=None, extra=extra
|
||||||
)
|
)
|
||||||
vim_connection_infos = vnf_instance.vim_connection_info
|
vim_connection_infos = vnf_instance.vim_connection_info
|
||||||
vim_connection_infos.append(vim_connection_info)
|
vim_connection_infos.append(vim_connection_info)
|
||||||
|
@ -304,7 +325,8 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||||
return hosts_str
|
return hosts_str
|
||||||
|
|
||||||
def _init_commander_and_send_install_scripts(self, user, password, host,
|
def _init_commander_and_send_install_scripts(self, user, password, host,
|
||||||
vnf_package_path=None, script_path=None):
|
vnf_package_path=None, script_path=None,
|
||||||
|
helm_inst_script_path=None):
|
||||||
retry = 4
|
retry = 4
|
||||||
while retry > 0:
|
while retry > 0:
|
||||||
try:
|
try:
|
||||||
|
@ -320,6 +342,10 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||||
"../../../samples/mgmt_driver/"
|
"../../../samples/mgmt_driver/"
|
||||||
"create_admin_token.yaml"),
|
"create_admin_token.yaml"),
|
||||||
"/tmp/create_admin_token.yaml")
|
"/tmp/create_admin_token.yaml")
|
||||||
|
if helm_inst_script_path:
|
||||||
|
sftp.put(os.path.join(
|
||||||
|
vnf_package_path, helm_inst_script_path),
|
||||||
|
"/tmp/install_helm.sh")
|
||||||
connect.close()
|
connect.close()
|
||||||
commander = cmd_executer.RemoteCommandExecutor(
|
commander = cmd_executer.RemoteCommandExecutor(
|
||||||
user=user, password=password, host=host,
|
user=user, password=password, host=host,
|
||||||
|
@ -377,9 +403,23 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||||
self._execute_command(
|
self._execute_command(
|
||||||
commander, ssh_command, K8S_INSTALL_TIMEOUT, 'install', 0)
|
commander, ssh_command, K8S_INSTALL_TIMEOUT, 'install', 0)
|
||||||
|
|
||||||
|
def _install_helm(self, commander, proxy):
|
||||||
|
ssh_command = ""
|
||||||
|
if proxy.get("http_proxy") and proxy.get("https_proxy"):
|
||||||
|
ssh_command += ("export http_proxy={http_proxy}; "
|
||||||
|
"export https_proxy={https_proxy}; "
|
||||||
|
"export no_proxy={no_proxy}; ").format(
|
||||||
|
http_proxy=proxy.get('http_proxy'),
|
||||||
|
https_proxy=proxy.get('https_proxy'),
|
||||||
|
no_proxy=proxy.get('no_proxy'))
|
||||||
|
ssh_command += "bash /tmp/install_helm.sh;"
|
||||||
|
self._execute_command(
|
||||||
|
commander, ssh_command, HELM_INSTALL_TIMEOUT, 'install', 0)
|
||||||
|
|
||||||
def _install_k8s_cluster(self, context, vnf_instance,
|
def _install_k8s_cluster(self, context, vnf_instance,
|
||||||
proxy, script_path,
|
proxy, script_path,
|
||||||
master_vm_dict_list, worker_vm_dict_list):
|
master_vm_dict_list, worker_vm_dict_list,
|
||||||
|
helm_inst_script_path):
|
||||||
# instantiate: pre /etc/hosts
|
# instantiate: pre /etc/hosts
|
||||||
hosts_str = self._get_hosts(
|
hosts_str = self._get_hosts(
|
||||||
master_vm_dict_list, worker_vm_dict_list)
|
master_vm_dict_list, worker_vm_dict_list)
|
||||||
|
@ -399,6 +439,16 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||||
raise exceptions.MgmtDriverOtherError(
|
raise exceptions.MgmtDriverOtherError(
|
||||||
error_message="The path of install script is invalid")
|
error_message="The path of install script is invalid")
|
||||||
|
|
||||||
|
# check helm install and get helm install script_path
|
||||||
|
masternode_ip_list = []
|
||||||
|
if helm_inst_script_path:
|
||||||
|
abs_helm_inst_script_path = os.path.join(
|
||||||
|
vnf_package_path, helm_inst_script_path)
|
||||||
|
if not os.path.exists(abs_helm_inst_script_path):
|
||||||
|
LOG.error('The path of helm install script is invalid.')
|
||||||
|
raise exceptions.MgmtDriverParamInvalid(
|
||||||
|
param='helm_installation_script_path')
|
||||||
|
|
||||||
# set no proxy
|
# set no proxy
|
||||||
project_name = ''
|
project_name = ''
|
||||||
if proxy.get("http_proxy") and proxy.get("https_proxy"):
|
if proxy.get("http_proxy") and proxy.get("https_proxy"):
|
||||||
|
@ -446,7 +496,7 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||||
k8s_cluster = vm_dict.get('k8s_cluster', {})
|
k8s_cluster = vm_dict.get('k8s_cluster', {})
|
||||||
commander = self._init_commander_and_send_install_scripts(
|
commander = self._init_commander_and_send_install_scripts(
|
||||||
user, password, host,
|
user, password, host,
|
||||||
vnf_package_path, script_path)
|
vnf_package_path, script_path, helm_inst_script_path)
|
||||||
|
|
||||||
# set /etc/hosts for each node
|
# set /etc/hosts for each node
|
||||||
ssh_command = "> /tmp/tmp_hosts"
|
ssh_command = "> /tmp/tmp_hosts"
|
||||||
|
@ -562,6 +612,9 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||||
bearer_token = self._execute_command(
|
bearer_token = self._execute_command(
|
||||||
commander, ssh_command,
|
commander, ssh_command,
|
||||||
K8S_CMD_TIMEOUT, 'common', 0)[0].replace('\n', '')
|
K8S_CMD_TIMEOUT, 'common', 0)[0].replace('\n', '')
|
||||||
|
if helm_inst_script_path:
|
||||||
|
self._install_helm(commander, proxy)
|
||||||
|
masternode_ip_list.append(host)
|
||||||
commander.close_session()
|
commander.close_session()
|
||||||
|
|
||||||
# install worker node
|
# install worker node
|
||||||
|
@ -597,7 +650,8 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||||
cluster_ip, kubeadm_token, ssl_ca_cert_hash)
|
cluster_ip, kubeadm_token, ssl_ca_cert_hash)
|
||||||
commander.close_session()
|
commander.close_session()
|
||||||
|
|
||||||
return server, bearer_token, ssl_ca_cert, project_name
|
return (server, bearer_token, ssl_ca_cert, project_name,
|
||||||
|
masternode_ip_list)
|
||||||
|
|
||||||
def _check_values(self, additional_param):
|
def _check_values(self, additional_param):
|
||||||
for key, value in additional_param.items():
|
for key, value in additional_param.items():
|
||||||
|
@ -654,6 +708,8 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||||
master_node = additional_param.get('master_node', {})
|
master_node = additional_param.get('master_node', {})
|
||||||
worker_node = additional_param.get('worker_node', {})
|
worker_node = additional_param.get('worker_node', {})
|
||||||
proxy = additional_param.get('proxy', {})
|
proxy = additional_param.get('proxy', {})
|
||||||
|
helm_inst_script_path = additional_param.get(
|
||||||
|
'helm_installation_script_path', None)
|
||||||
# check script_path
|
# check script_path
|
||||||
if not script_path:
|
if not script_path:
|
||||||
LOG.error('The script_path in the '
|
LOG.error('The script_path in the '
|
||||||
|
@ -695,15 +751,16 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||||
worker_vm_dict_list = self._get_install_info_for_k8s_node(
|
worker_vm_dict_list = self._get_install_info_for_k8s_node(
|
||||||
nest_stack_id, worker_node,
|
nest_stack_id, worker_node,
|
||||||
instantiate_vnf_request.additional_params, 'worker', access_info)
|
instantiate_vnf_request.additional_params, 'worker', access_info)
|
||||||
server, bearer_token, ssl_ca_cert, project_name = \
|
server, bearer_token, ssl_ca_cert, project_name, masternode_ip_list = \
|
||||||
self._install_k8s_cluster(context, vnf_instance,
|
self._install_k8s_cluster(context, vnf_instance,
|
||||||
proxy, script_path, master_vm_dict_list,
|
proxy, script_path, master_vm_dict_list,
|
||||||
worker_vm_dict_list)
|
worker_vm_dict_list,
|
||||||
|
helm_inst_script_path)
|
||||||
|
|
||||||
# register vim with kubernetes cluster info
|
# register vim with kubernetes cluster info
|
||||||
self._create_vim(context, vnf_instance, server,
|
self._create_vim(context, vnf_instance, server,
|
||||||
bearer_token, ssl_ca_cert, vim_name, project_name,
|
bearer_token, ssl_ca_cert, vim_name, project_name,
|
||||||
master_vm_dict_list)
|
master_vm_dict_list, masternode_ip_list)
|
||||||
|
|
||||||
def terminate_start(self, context, vnf_instance,
|
def terminate_start(self, context, vnf_instance,
|
||||||
terminate_vnf_request, grant,
|
terminate_vnf_request, grant,
|
||||||
|
@ -1580,6 +1637,50 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||||
|
|
||||||
return target_physical_resource_ids
|
return target_physical_resource_ids
|
||||||
|
|
||||||
|
def _prepare_for_restoring_helm(self, commander, master_ip):
|
||||||
|
helm_info = {}
|
||||||
|
# get helm repo list
|
||||||
|
ssh_command = "helm repo list -o json"
|
||||||
|
result = self._execute_command(
|
||||||
|
commander, ssh_command, K8S_CMD_TIMEOUT, 'helm_repo_list', 0)
|
||||||
|
if result:
|
||||||
|
helmrepo_list = json.loads(result)
|
||||||
|
helm_info['ext_helmrepo_list'] = helmrepo_list
|
||||||
|
# compress local helm chart
|
||||||
|
ssh_command = ("sudo tar -zcf {cmp_path} -P {helm_chart_dir}"
|
||||||
|
.format(cmp_path=HELM_CHART_CMP_PATH,
|
||||||
|
helm_chart_dir=HELM_CHART_DIR))
|
||||||
|
self._execute_command(
|
||||||
|
commander, ssh_command, HELM_INSTALL_TIMEOUT, 'common', 0)
|
||||||
|
helm_info['local_repo_src_ip'] = master_ip
|
||||||
|
|
||||||
|
return helm_info
|
||||||
|
|
||||||
|
def _restore_helm_repo(self, commander, master_username, master_password,
|
||||||
|
local_repo_src_ip, ext_repo_list):
|
||||||
|
# restore local helm chart
|
||||||
|
ssh_command = (
|
||||||
|
"sudo sshpass -p {master_password} "
|
||||||
|
"scp -o StrictHostKeyChecking=no "
|
||||||
|
"{master_username}@{local_repo_src_ip}:{helm_chart_cmp_path} "
|
||||||
|
"{helm_chart_cmp_path};").format(
|
||||||
|
master_password=master_password,
|
||||||
|
master_username=master_username,
|
||||||
|
local_repo_src_ip=local_repo_src_ip,
|
||||||
|
helm_chart_cmp_path=HELM_CHART_CMP_PATH
|
||||||
|
)
|
||||||
|
ssh_command += "sudo tar -Pzxf {helm_chart_cmp_path};".format(
|
||||||
|
helm_chart_cmp_path=HELM_CHART_CMP_PATH)
|
||||||
|
self._execute_command(
|
||||||
|
commander, ssh_command, HELM_CMD_TIMEOUT, 'scp', 0)
|
||||||
|
# restore external helm repository
|
||||||
|
if ext_repo_list:
|
||||||
|
for ext_repo in ext_repo_list:
|
||||||
|
ssh_command += "helm repo add {name} {url};".format(
|
||||||
|
name=ext_repo.get('name'), url=ext_repo.get('url'))
|
||||||
|
self._execute_command(
|
||||||
|
commander, ssh_command, HELM_CMD_TIMEOUT, 'common', 0)
|
||||||
|
|
||||||
def heal_start(self, context, vnf_instance,
|
def heal_start(self, context, vnf_instance,
|
||||||
heal_vnf_request, grant,
|
heal_vnf_request, grant,
|
||||||
grant_request, **kwargs):
|
grant_request, **kwargs):
|
||||||
|
@ -1629,7 +1730,7 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||||
fixed_master_infos, proxy,
|
fixed_master_infos, proxy,
|
||||||
master_username, master_password, vnf_package_path,
|
master_username, master_password, vnf_package_path,
|
||||||
script_path, cluster_ip, pod_cidr, cluster_cidr,
|
script_path, cluster_ip, pod_cidr, cluster_cidr,
|
||||||
kubeadm_token, ssl_ca_cert_hash, ha_flag):
|
kubeadm_token, ssl_ca_cert_hash, ha_flag, helm_info):
|
||||||
not_fixed_master_nic_ips = [
|
not_fixed_master_nic_ips = [
|
||||||
master_ips.get('master_nic_ip')
|
master_ips.get('master_nic_ip')
|
||||||
for master_ips in not_fixed_master_infos.values()]
|
for master_ips in not_fixed_master_infos.values()]
|
||||||
|
@ -1656,7 +1757,8 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||||
commander = self._init_commander_and_send_install_scripts(
|
commander = self._init_commander_and_send_install_scripts(
|
||||||
master_username, master_password,
|
master_username, master_password,
|
||||||
fixed_master_info.get('master_ssh_ip'),
|
fixed_master_info.get('master_ssh_ip'),
|
||||||
vnf_package_path, script_path)
|
vnf_package_path, script_path,
|
||||||
|
helm_info.get('script_path', None))
|
||||||
self._set_node_ip_in_hosts(
|
self._set_node_ip_in_hosts(
|
||||||
commander, 'heal_end', hosts_str=hosts_str)
|
commander, 'heal_end', hosts_str=hosts_str)
|
||||||
if proxy.get('http_proxy') and proxy.get('https_proxy'):
|
if proxy.get('http_proxy') and proxy.get('https_proxy'):
|
||||||
|
@ -1699,6 +1801,12 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||||
certificate_key=certificate_key)
|
certificate_key=certificate_key)
|
||||||
self._execute_command(
|
self._execute_command(
|
||||||
commander, ssh_command, K8S_INSTALL_TIMEOUT, 'install', 0)
|
commander, ssh_command, K8S_INSTALL_TIMEOUT, 'install', 0)
|
||||||
|
if helm_info:
|
||||||
|
self._install_helm(commander, proxy)
|
||||||
|
self._restore_helm_repo(
|
||||||
|
commander, master_username, master_password,
|
||||||
|
helm_info.get('local_repo_src_ip'),
|
||||||
|
helm_info.get('ext_helmrepo_list', ''))
|
||||||
commander.close_session()
|
commander.close_session()
|
||||||
for not_fixed_master_name, not_fixed_master in \
|
for not_fixed_master_name, not_fixed_master in \
|
||||||
not_fixed_master_infos.items():
|
not_fixed_master_infos.items():
|
||||||
|
@ -1814,6 +1922,15 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||||
ssl_ca_cert_hash = self._execute_command(
|
ssl_ca_cert_hash = self._execute_command(
|
||||||
commander, ssh_command,
|
commander, ssh_command,
|
||||||
K8S_CMD_TIMEOUT, 'common', 3)[0].replace('\n', '')
|
K8S_CMD_TIMEOUT, 'common', 3)[0].replace('\n', '')
|
||||||
|
|
||||||
|
# prepare for restoring helm repository
|
||||||
|
helm_inst_script_path = k8s_cluster_installation_param.get(
|
||||||
|
'helm_installation_script_path', None)
|
||||||
|
helm_info = {}
|
||||||
|
if helm_inst_script_path:
|
||||||
|
helm_info = self._prepare_for_restoring_helm(commander, master_ip)
|
||||||
|
helm_info['script_path'] = helm_inst_script_path
|
||||||
|
|
||||||
commander.close_session()
|
commander.close_session()
|
||||||
if len(fixed_master_infos) + len(not_fixed_master_ssh_ips) == 1:
|
if len(fixed_master_infos) + len(not_fixed_master_ssh_ips) == 1:
|
||||||
ha_flag = False
|
ha_flag = False
|
||||||
|
@ -1829,7 +1946,7 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||||
fixed_master_infos, proxy,
|
fixed_master_infos, proxy,
|
||||||
master_username, master_password, vnf_package_path,
|
master_username, master_password, vnf_package_path,
|
||||||
script_path, cluster_ip, pod_cidr, cluster_cidr,
|
script_path, cluster_ip, pod_cidr, cluster_cidr,
|
||||||
kubeadm_token, ssl_ca_cert_hash, ha_flag)
|
kubeadm_token, ssl_ca_cert_hash, ha_flag, helm_info)
|
||||||
if flag_worker:
|
if flag_worker:
|
||||||
self._fix_worker_node(
|
self._fix_worker_node(
|
||||||
fixed_worker_infos,
|
fixed_worker_infos,
|
||||||
|
|
|
@ -11,7 +11,12 @@ Content-Type: application/sh
|
||||||
Algorithm: SHA-256
|
Algorithm: SHA-256
|
||||||
Hash: bc859fb8ffb9f92a19139553bdd077428a2c9572196e5844f1c912a7a822c249
|
Hash: bc859fb8ffb9f92a19139553bdd077428a2c9572196e5844f1c912a7a822c249
|
||||||
|
|
||||||
|
Name: Scripts/install_helm.sh
|
||||||
|
Content-Type: application/sh
|
||||||
|
Algorithm: SHA-256
|
||||||
|
Hash: 4af332b05e3e85662d403208e1e6d82e5276cbcd3b82a3562d2e3eb80d1ef714
|
||||||
|
|
||||||
Name: Scripts/kubernetes_mgmt.py
|
Name: Scripts/kubernetes_mgmt.py
|
||||||
Content-Type: text/x-python
|
Content-Type: text/x-python
|
||||||
Algorithm: SHA-256
|
Algorithm: SHA-256
|
||||||
Hash: b8c558cad30f219634a668f84d6e04998949e941f3909b5c60374b84dff58545
|
Hash: bf651994ca7422aadeb0a12fed179f44ab709029c2eee9b2b9c7e8cbf339a66d
|
||||||
|
|
|
@ -38,6 +38,7 @@ class Vim(model_base.BASE,
|
||||||
), nullable=False)
|
), nullable=False)
|
||||||
vim_auth = orm.relationship('VimAuth')
|
vim_auth = orm.relationship('VimAuth')
|
||||||
status = sa.Column(sa.String(255), nullable=False)
|
status = sa.Column(sa.String(255), nullable=False)
|
||||||
|
extra = sa.Column(types.Json, nullable=True)
|
||||||
|
|
||||||
__table_args__ = (
|
__table_args__ = (
|
||||||
schema.UniqueConstraint(
|
schema.UniqueConstraint(
|
||||||
|
|
|
@ -31,7 +31,7 @@ from tacker.plugins.common import constants
|
||||||
|
|
||||||
VIM_ATTRIBUTES = ('id', 'type', 'tenant_id', 'name', 'description',
|
VIM_ATTRIBUTES = ('id', 'type', 'tenant_id', 'name', 'description',
|
||||||
'placement_attr', 'shared', 'is_default',
|
'placement_attr', 'shared', 'is_default',
|
||||||
'created_at', 'updated_at', 'status')
|
'created_at', 'updated_at', 'status', 'extra')
|
||||||
|
|
||||||
VIM_AUTH_ATTRIBUTES = ('auth_url', 'vim_project', 'password', 'auth_cred')
|
VIM_AUTH_ATTRIBUTES = ('auth_url', 'vim_project', 'password', 'auth_cred')
|
||||||
|
|
||||||
|
@ -87,6 +87,7 @@ class NfvoPluginDb(nfvo.NFVOPluginBase, db_base.CommonDbMixin):
|
||||||
placement_attr=vim.get('placement_attr'),
|
placement_attr=vim.get('placement_attr'),
|
||||||
is_default=vim.get('is_default'),
|
is_default=vim.get('is_default'),
|
||||||
status=vim.get('status'),
|
status=vim.get('status'),
|
||||||
|
extra=vim.get('extra'),
|
||||||
deleted_at=datetime.min)
|
deleted_at=datetime.min)
|
||||||
context.session.add(vim_db)
|
context.session.add(vim_db)
|
||||||
vim_auth_db = nfvo_db.VimAuth(
|
vim_auth_db = nfvo_db.VimAuth(
|
||||||
|
@ -158,6 +159,8 @@ class NfvoPluginDb(nfvo.NFVOPluginBase, db_base.CommonDbMixin):
|
||||||
if 'placement_attr' in vim:
|
if 'placement_attr' in vim:
|
||||||
vim_db.update(
|
vim_db.update(
|
||||||
{'placement_attr': vim.get('placement_attr')})
|
{'placement_attr': vim.get('placement_attr')})
|
||||||
|
if 'extra' in vim:
|
||||||
|
vim_db.update({'extra': vim.get('extra')})
|
||||||
vim_auth_db = (self._model_query(
|
vim_auth_db = (self._model_query(
|
||||||
context, nfvo_db.VimAuth).filter(
|
context, nfvo_db.VimAuth).filter(
|
||||||
nfvo_db.VimAuth.vim_id == vim_id).with_for_update().one())
|
nfvo_db.VimAuth.vim_id == vim_id).with_for_update().one())
|
||||||
|
|
|
@ -129,6 +129,18 @@ class CNFHealWaitFailed(exceptions.TackerException):
|
||||||
message = _('%(reason)s')
|
message = _('%(reason)s')
|
||||||
|
|
||||||
|
|
||||||
|
class InvalidVimConnectionInfo(exceptions.TackerException):
|
||||||
|
message = _('Invalid vim_connection_info: %(reason)s')
|
||||||
|
|
||||||
|
|
||||||
|
class HelmClientRemoteCommandError(exceptions.TackerException):
|
||||||
|
message = _('Failed to execute remote command.')
|
||||||
|
|
||||||
|
|
||||||
|
class HelmClientOtherError(exceptions.TackerException):
|
||||||
|
message = _('An error occurred in HelmClient: %(error_message)s.')
|
||||||
|
|
||||||
|
|
||||||
class ServiceTypeNotFound(exceptions.NotFound):
|
class ServiceTypeNotFound(exceptions.NotFound):
|
||||||
message = _('service type %(service_type_id)s could not be found')
|
message = _('service type %(service_type_id)s could not be found')
|
||||||
|
|
||||||
|
|
|
@ -30,6 +30,8 @@ class VimConnectionInfo(base.TackerObject, base.TackerPersistentObject):
|
||||||
default={}),
|
default={}),
|
||||||
'access_info': fields.DictOfNullableStringsField(nullable=True,
|
'access_info': fields.DictOfNullableStringsField(nullable=True,
|
||||||
default={}),
|
default={}),
|
||||||
|
'extra': fields.DictOfNullableStringsField(nullable=True,
|
||||||
|
default={}),
|
||||||
}
|
}
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -39,11 +41,13 @@ class VimConnectionInfo(base.TackerObject, base.TackerPersistentObject):
|
||||||
vim_type = data_dict.get('vim_type')
|
vim_type = data_dict.get('vim_type')
|
||||||
access_info = data_dict.get('access_info', {})
|
access_info = data_dict.get('access_info', {})
|
||||||
interface_info = data_dict.get('interface_info', {})
|
interface_info = data_dict.get('interface_info', {})
|
||||||
|
extra = data_dict.get('extra', {})
|
||||||
obj = cls(id=id,
|
obj = cls(id=id,
|
||||||
vim_id=vim_id,
|
vim_id=vim_id,
|
||||||
vim_type=vim_type,
|
vim_type=vim_type,
|
||||||
interface_info=interface_info,
|
interface_info=interface_info,
|
||||||
access_info=access_info)
|
access_info=access_info,
|
||||||
|
extra=extra)
|
||||||
return obj
|
return obj
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -62,4 +66,5 @@ class VimConnectionInfo(base.TackerObject, base.TackerPersistentObject):
|
||||||
'vim_id': self.vim_id,
|
'vim_id': self.vim_id,
|
||||||
'vim_type': self.vim_type,
|
'vim_type': self.vim_type,
|
||||||
'interface_info': self.interface_info,
|
'interface_info': self.interface_info,
|
||||||
'access_info': self.access_info}
|
'access_info': self.access_info,
|
||||||
|
'extra': self.extra}
|
||||||
|
|
|
@ -0,0 +1,151 @@
|
||||||
|
tosca_definitions_version: tosca_simple_yaml_1_2
|
||||||
|
|
||||||
|
description: Sample CNF with helmchart
|
||||||
|
|
||||||
|
imports:
|
||||||
|
- etsi_nfv_sol001_common_types.yaml
|
||||||
|
- etsi_nfv_sol001_vnfd_types.yaml
|
||||||
|
- sample_vnfd_types.yaml
|
||||||
|
|
||||||
|
topology_template:
|
||||||
|
inputs:
|
||||||
|
descriptor_id:
|
||||||
|
type: string
|
||||||
|
descriptor_version:
|
||||||
|
type: string
|
||||||
|
provider:
|
||||||
|
type: string
|
||||||
|
product_name:
|
||||||
|
type: string
|
||||||
|
software_version:
|
||||||
|
type: string
|
||||||
|
vnfm_info:
|
||||||
|
type: list
|
||||||
|
entry_schema:
|
||||||
|
type: string
|
||||||
|
flavour_id:
|
||||||
|
type: string
|
||||||
|
flavour_description:
|
||||||
|
type: string
|
||||||
|
|
||||||
|
substitution_mappings:
|
||||||
|
node_type: company.provider.VNF
|
||||||
|
properties:
|
||||||
|
flavour_id: helmchart
|
||||||
|
requirements:
|
||||||
|
virtual_link_external: []
|
||||||
|
|
||||||
|
node_templates:
|
||||||
|
VNF:
|
||||||
|
type: company.provider.VNF
|
||||||
|
properties:
|
||||||
|
flavour_description: A flavour for single resources
|
||||||
|
|
||||||
|
VDU1:
|
||||||
|
type: tosca.nodes.nfv.Vdu.Compute
|
||||||
|
properties:
|
||||||
|
name: vdu1-localhelm
|
||||||
|
description: kubernetes resource as VDU1
|
||||||
|
vdu_profile:
|
||||||
|
min_number_of_instances: 1
|
||||||
|
max_number_of_instances: 3
|
||||||
|
|
||||||
|
VDU2:
|
||||||
|
type: tosca.nodes.nfv.Vdu.Compute
|
||||||
|
properties:
|
||||||
|
name: vdu2-apache
|
||||||
|
description: kubernetes resource as VDU2
|
||||||
|
vdu_profile:
|
||||||
|
min_number_of_instances: 1
|
||||||
|
max_number_of_instances: 3
|
||||||
|
|
||||||
|
policies:
|
||||||
|
- scaling_aspects:
|
||||||
|
type: tosca.policies.nfv.ScalingAspects
|
||||||
|
properties:
|
||||||
|
aspects:
|
||||||
|
vdu1_aspect:
|
||||||
|
name: vdu1_aspect
|
||||||
|
description: vdu1 scaling aspect
|
||||||
|
max_scale_level: 2
|
||||||
|
step_deltas:
|
||||||
|
- delta_1
|
||||||
|
vdu2_aspect:
|
||||||
|
name: vdu2_aspect
|
||||||
|
description: vdu2 scaling aspect
|
||||||
|
max_scale_level: 2
|
||||||
|
step_deltas:
|
||||||
|
- delta_1
|
||||||
|
|
||||||
|
- vdu1_initial_delta:
|
||||||
|
type: tosca.policies.nfv.VduInitialDelta
|
||||||
|
properties:
|
||||||
|
initial_delta:
|
||||||
|
number_of_instances: 1
|
||||||
|
targets: [ VDU1 ]
|
||||||
|
|
||||||
|
- vdu1_scaling_aspect_deltas:
|
||||||
|
type: tosca.policies.nfv.VduScalingAspectDeltas
|
||||||
|
properties:
|
||||||
|
aspect: vdu1_aspect
|
||||||
|
deltas:
|
||||||
|
delta_1:
|
||||||
|
number_of_instances: 1
|
||||||
|
targets: [ VDU1 ]
|
||||||
|
|
||||||
|
- vdu2_initial_delta:
|
||||||
|
type: tosca.policies.nfv.VduInitialDelta
|
||||||
|
properties:
|
||||||
|
initial_delta:
|
||||||
|
number_of_instances: 1
|
||||||
|
targets: [ VDU2 ]
|
||||||
|
|
||||||
|
- vdu2_scaling_aspect_deltas:
|
||||||
|
type: tosca.policies.nfv.VduScalingAspectDeltas
|
||||||
|
properties:
|
||||||
|
aspect: vdu2_aspect
|
||||||
|
deltas:
|
||||||
|
delta_1:
|
||||||
|
number_of_instances: 1
|
||||||
|
targets: [ VDU2 ]
|
||||||
|
|
||||||
|
- instantiation_levels:
|
||||||
|
type: tosca.policies.nfv.InstantiationLevels
|
||||||
|
properties:
|
||||||
|
levels:
|
||||||
|
instantiation_level_1:
|
||||||
|
description: Smallest size
|
||||||
|
scale_info:
|
||||||
|
vdu1_aspect:
|
||||||
|
scale_level: 0
|
||||||
|
vdu2_aspect:
|
||||||
|
scale_level: 0
|
||||||
|
instantiation_level_2:
|
||||||
|
description: Largest size
|
||||||
|
scale_info:
|
||||||
|
vdu1_aspect:
|
||||||
|
scale_level: 2
|
||||||
|
vdu2_aspect:
|
||||||
|
scale_level: 2
|
||||||
|
default_level: instantiation_level_1
|
||||||
|
|
||||||
|
- vdu1_instantiation_levels:
|
||||||
|
type: tosca.policies.nfv.VduInstantiationLevels
|
||||||
|
properties:
|
||||||
|
levels:
|
||||||
|
instantiation_level_1:
|
||||||
|
number_of_instances: 1
|
||||||
|
instantiation_level_2:
|
||||||
|
number_of_instances: 3
|
||||||
|
targets: [ VDU1 ]
|
||||||
|
|
||||||
|
- vdu2_instantiation_levels:
|
||||||
|
type: tosca.policies.nfv.VduInstantiationLevels
|
||||||
|
properties:
|
||||||
|
levels:
|
||||||
|
instantiation_level_1:
|
||||||
|
number_of_instances: 1
|
||||||
|
instantiation_level_2:
|
||||||
|
number_of_instances: 3
|
||||||
|
targets: [ VDU1 ]
|
||||||
|
|
|
@ -0,0 +1,31 @@
|
||||||
|
tosca_definitions_version: tosca_simple_yaml_1_2
|
||||||
|
|
||||||
|
description: Sample CNF with Helm
|
||||||
|
|
||||||
|
imports:
|
||||||
|
- etsi_nfv_sol001_common_types.yaml
|
||||||
|
- etsi_nfv_sol001_vnfd_types.yaml
|
||||||
|
- sample_vnfd_types.yaml
|
||||||
|
- sample_vnfd_df_helmchart.yaml
|
||||||
|
|
||||||
|
topology_template:
|
||||||
|
inputs:
|
||||||
|
selected_flavour:
|
||||||
|
type: string
|
||||||
|
description: VNF deployment flavour selected by the consumer. It is provided in the API
|
||||||
|
|
||||||
|
node_templates:
|
||||||
|
VNF:
|
||||||
|
type: company.provider.VNF
|
||||||
|
properties:
|
||||||
|
flavour_id: { get_input: selected_flavour }
|
||||||
|
descriptor_id: b1bb0ce7-ebca-4fa7-95ed-4840d70a1177
|
||||||
|
provider: Company
|
||||||
|
product_name: Sample CNF
|
||||||
|
software_version: '1.0'
|
||||||
|
descriptor_version: '1.0'
|
||||||
|
vnfm_info:
|
||||||
|
- Tacker
|
||||||
|
requirements:
|
||||||
|
#- virtual_link_external # mapped in lower-level templates
|
||||||
|
#- virtual_link_internal # mapped in lower-level templates
|
|
@ -0,0 +1,53 @@
|
||||||
|
tosca_definitions_version: tosca_simple_yaml_1_2
|
||||||
|
|
||||||
|
description: VNF type definition
|
||||||
|
|
||||||
|
imports:
|
||||||
|
- etsi_nfv_sol001_common_types.yaml
|
||||||
|
- etsi_nfv_sol001_vnfd_types.yaml
|
||||||
|
|
||||||
|
node_types:
|
||||||
|
company.provider.VNF:
|
||||||
|
derived_from: tosca.nodes.nfv.VNF
|
||||||
|
properties:
|
||||||
|
descriptor_id:
|
||||||
|
type: string
|
||||||
|
constraints: [ valid_values: [ b1bb0ce7-ebca-4fa7-95ed-4840d70a1177 ] ]
|
||||||
|
default: b1bb0ce7-ebca-4fa7-95ed-4840d70a1177
|
||||||
|
descriptor_version:
|
||||||
|
type: string
|
||||||
|
constraints: [ valid_values: [ '1.0' ] ]
|
||||||
|
default: '1.0'
|
||||||
|
provider:
|
||||||
|
type: string
|
||||||
|
constraints: [ valid_values: [ 'Company' ] ]
|
||||||
|
default: 'Company'
|
||||||
|
product_name:
|
||||||
|
type: string
|
||||||
|
constraints: [ valid_values: [ 'Sample CNF' ] ]
|
||||||
|
default: 'Sample CNF'
|
||||||
|
software_version:
|
||||||
|
type: string
|
||||||
|
constraints: [ valid_values: [ '1.0' ] ]
|
||||||
|
default: '1.0'
|
||||||
|
vnfm_info:
|
||||||
|
type: list
|
||||||
|
entry_schema:
|
||||||
|
type: string
|
||||||
|
constraints: [ valid_values: [ Tacker ] ]
|
||||||
|
default: [ Tacker ]
|
||||||
|
flavour_id:
|
||||||
|
type: string
|
||||||
|
constraints: [ valid_values: [ helmchart ] ]
|
||||||
|
default: helmchart
|
||||||
|
flavour_description:
|
||||||
|
type: string
|
||||||
|
default: ""
|
||||||
|
requirements:
|
||||||
|
- virtual_link_external:
|
||||||
|
capability: tosca.capabilities.nfv.VirtualLinkable
|
||||||
|
- virtual_link_internal:
|
||||||
|
capability: tosca.capabilities.nfv.VirtualLinkable
|
||||||
|
interfaces:
|
||||||
|
Vnflcm:
|
||||||
|
type: tosca.interfaces.nfv.Vnflcm
|
Binary file not shown.
|
@ -0,0 +1,9 @@
|
||||||
|
TOSCA-Meta-File-Version: 1.0
|
||||||
|
Created-by: dummy_user
|
||||||
|
CSAR-Version: 1.1
|
||||||
|
Entry-Definitions: Definitions/sample_vnfd_top.yaml
|
||||||
|
|
||||||
|
Name: Files/kubernetes/localhelm-0.1.0.tgz
|
||||||
|
Content-Type: application/tar+gzip
|
||||||
|
Algorithm: SHA-256
|
||||||
|
Hash: 837fcfb73e5fc58572851a80a0143373d9d28ec37bd3bdf52c4d7d34b97592d5
|
|
@ -0,0 +1,447 @@
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
|
||||||
|
from oslo_serialization import jsonutils
|
||||||
|
from oslo_utils import uuidutils
|
||||||
|
from sqlalchemy import desc
|
||||||
|
from sqlalchemy.orm import joinedload
|
||||||
|
|
||||||
|
from tacker.common import exceptions
|
||||||
|
from tacker import context
|
||||||
|
from tacker.db import api as db_api
|
||||||
|
from tacker.db.db_sqlalchemy import api
|
||||||
|
from tacker.db.db_sqlalchemy import models
|
||||||
|
from tacker.objects import fields
|
||||||
|
from tacker.objects import vnf_lcm_op_occs
|
||||||
|
from tacker.tests.functional import base
|
||||||
|
from tacker.tests import utils
|
||||||
|
|
||||||
|
VNF_PACKAGE_UPLOAD_TIMEOUT = 300
|
||||||
|
VNF_INSTANTIATE_TIMEOUT = 600
|
||||||
|
VNF_TERMINATE_TIMEOUT = 600
|
||||||
|
VNF_HEAL_TIMEOUT = 600
|
||||||
|
VNF_SCALE_TIMEOUT = 600
|
||||||
|
RETRY_WAIT_TIME = 5
|
||||||
|
|
||||||
|
|
||||||
|
def _create_and_upload_vnf_package(tacker_client, csar_package_name,
|
||||||
|
user_defined_data):
|
||||||
|
# create vnf package
|
||||||
|
body = jsonutils.dumps({"userDefinedData": user_defined_data})
|
||||||
|
resp, vnf_package = tacker_client.do_request(
|
||||||
|
'/vnfpkgm/v1/vnf_packages', "POST", body=body)
|
||||||
|
|
||||||
|
# upload vnf package
|
||||||
|
csar_package_path = "../../../etc/samples/etsi/nfv/{}".format(
|
||||||
|
csar_package_name)
|
||||||
|
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||||
|
csar_package_path))
|
||||||
|
|
||||||
|
# Generating unique vnfd id. This is required when multiple workers
|
||||||
|
# are running concurrently. The call below creates a new temporary
|
||||||
|
# CSAR with unique vnfd id.
|
||||||
|
file_path, uniqueid = utils.create_csar_with_unique_vnfd_id(file_path)
|
||||||
|
|
||||||
|
with open(file_path, 'rb') as file_object:
|
||||||
|
resp, resp_body = tacker_client.do_request(
|
||||||
|
'/vnfpkgm/v1/vnf_packages/{}/package_content'.format(
|
||||||
|
vnf_package['id']),
|
||||||
|
"PUT", body=file_object, content_type='application/zip')
|
||||||
|
|
||||||
|
# wait for onboard
|
||||||
|
start_time = int(time.time())
|
||||||
|
show_url = os.path.join('/vnfpkgm/v1/vnf_packages', vnf_package['id'])
|
||||||
|
vnfd_id = None
|
||||||
|
while True:
|
||||||
|
resp, body = tacker_client.do_request(show_url, "GET")
|
||||||
|
if body['onboardingState'] == "ONBOARDED":
|
||||||
|
vnfd_id = body['vnfdId']
|
||||||
|
break
|
||||||
|
|
||||||
|
if ((int(time.time()) - start_time) > VNF_PACKAGE_UPLOAD_TIMEOUT):
|
||||||
|
raise Exception("Failed to onboard vnf package, process could not"
|
||||||
|
" be completed within {} seconds".format(
|
||||||
|
VNF_PACKAGE_UPLOAD_TIMEOUT))
|
||||||
|
|
||||||
|
time.sleep(RETRY_WAIT_TIME)
|
||||||
|
|
||||||
|
# remove temporarily created CSAR file
|
||||||
|
os.remove(file_path)
|
||||||
|
return vnf_package['id'], vnfd_id
|
||||||
|
|
||||||
|
|
||||||
|
class VnfLcmKubernetesHelmTest(base.BaseTackerTest):
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def setUpClass(cls):
|
||||||
|
cls.tacker_client = base.BaseTackerTest.tacker_http_client()
|
||||||
|
cls.vnf_package_resource, cls.vnfd_id_resource = \
|
||||||
|
_create_and_upload_vnf_package(
|
||||||
|
cls.tacker_client, "test_cnf_helmchart",
|
||||||
|
{"key": "sample_helmchart_functional"})
|
||||||
|
cls.vnf_instance_ids = []
|
||||||
|
super(VnfLcmKubernetesHelmTest, cls).setUpClass()
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def tearDownClass(cls):
|
||||||
|
# Update vnf package operational state to DISABLED
|
||||||
|
update_req_body = jsonutils.dumps({
|
||||||
|
"operationalState": "DISABLED"})
|
||||||
|
base_path = "/vnfpkgm/v1/vnf_packages"
|
||||||
|
for package_id in [cls.vnf_package_resource]:
|
||||||
|
resp, resp_body = cls.tacker_client.do_request(
|
||||||
|
'{base_path}/{id}'.format(id=package_id,
|
||||||
|
base_path=base_path),
|
||||||
|
"PATCH", content_type='application/json',
|
||||||
|
body=update_req_body)
|
||||||
|
|
||||||
|
# Delete vnf package
|
||||||
|
url = '/vnfpkgm/v1/vnf_packages/{}'.format(package_id)
|
||||||
|
cls.tacker_client.do_request(url, "DELETE")
|
||||||
|
|
||||||
|
super(VnfLcmKubernetesHelmTest, cls).tearDownClass()
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super(VnfLcmKubernetesHelmTest, self).setUp()
|
||||||
|
self.base_vnf_instances_url = "/vnflcm/v1/vnf_instances"
|
||||||
|
self.base_vnf_lcm_op_occs_url = "/vnflcm/v1/vnf_lcm_op_occs"
|
||||||
|
self.context = context.get_admin_context()
|
||||||
|
vim_list = self.client.list_vims()
|
||||||
|
if not vim_list:
|
||||||
|
self.skipTest("Vims are not configured")
|
||||||
|
|
||||||
|
vim_id = 'vim-kubernetes'
|
||||||
|
vim = self.get_vim(vim_list, vim_id)
|
||||||
|
if not vim:
|
||||||
|
self.skipTest("Kubernetes VIM '{}' is missing".format(vim_id))
|
||||||
|
self.vim_id = vim['id']
|
||||||
|
|
||||||
|
def _instantiate_vnf_instance_request(
|
||||||
|
self, flavour_id, vim_id=None, additional_param=None):
|
||||||
|
request_body = {"flavourId": flavour_id}
|
||||||
|
|
||||||
|
if vim_id:
|
||||||
|
request_body["vimConnectionInfo"] = [
|
||||||
|
{"id": uuidutils.generate_uuid(),
|
||||||
|
"vimId": vim_id,
|
||||||
|
"vimType": "kubernetes"}]
|
||||||
|
|
||||||
|
if additional_param:
|
||||||
|
request_body["additionalParams"] = additional_param
|
||||||
|
|
||||||
|
return request_body
|
||||||
|
|
||||||
|
def _create_vnf_instance(self, vnfd_id, vnf_instance_name=None,
|
||||||
|
vnf_instance_description=None):
|
||||||
|
request_body = {'vnfdId': vnfd_id}
|
||||||
|
if vnf_instance_name:
|
||||||
|
request_body['vnfInstanceName'] = vnf_instance_name
|
||||||
|
|
||||||
|
if vnf_instance_description:
|
||||||
|
request_body['vnfInstanceDescription'] = vnf_instance_description
|
||||||
|
|
||||||
|
resp, response_body = self.http_client.do_request(
|
||||||
|
self.base_vnf_instances_url, "POST",
|
||||||
|
body=jsonutils.dumps(request_body))
|
||||||
|
return resp, response_body
|
||||||
|
|
||||||
|
def _delete_wait_vnf_instance(self, id):
|
||||||
|
url = os.path.join("/vnflcm/v1/vnf_instances", id)
|
||||||
|
start_time = int(time.time())
|
||||||
|
while True:
|
||||||
|
resp, body = self.tacker_client.do_request(url, "DELETE")
|
||||||
|
if 204 == resp.status_code:
|
||||||
|
break
|
||||||
|
|
||||||
|
if ((int(time.time()) - start_time) > VNF_TERMINATE_TIMEOUT):
|
||||||
|
raise Exception("Failed to delete vnf instance, process could"
|
||||||
|
" not be completed within {} seconds".format(
|
||||||
|
VNF_TERMINATE_TIMEOUT))
|
||||||
|
|
||||||
|
time.sleep(RETRY_WAIT_TIME)
|
||||||
|
|
||||||
|
def _show_vnf_instance(self, id):
|
||||||
|
show_url = os.path.join("/vnflcm/v1/vnf_instances", id)
|
||||||
|
resp, vnf_instance = self.tacker_client.do_request(show_url, "GET")
|
||||||
|
|
||||||
|
return vnf_instance
|
||||||
|
|
||||||
|
def _vnf_instance_wait(
|
||||||
|
self, id,
|
||||||
|
instantiation_state=fields.VnfInstanceState.INSTANTIATED,
|
||||||
|
timeout=VNF_INSTANTIATE_TIMEOUT):
|
||||||
|
show_url = os.path.join("/vnflcm/v1/vnf_instances", id)
|
||||||
|
start_time = int(time.time())
|
||||||
|
while True:
|
||||||
|
resp, body = self.tacker_client.do_request(show_url, "GET")
|
||||||
|
if body['instantiationState'] == instantiation_state:
|
||||||
|
break
|
||||||
|
|
||||||
|
if ((int(time.time()) - start_time) > timeout):
|
||||||
|
raise Exception("Failed to wait vnf instance, process could"
|
||||||
|
" not be completed within {} seconds".format(timeout))
|
||||||
|
|
||||||
|
time.sleep(RETRY_WAIT_TIME)
|
||||||
|
|
||||||
|
def _instantiate_vnf_instance(self, id, request_body):
|
||||||
|
url = os.path.join(self.base_vnf_instances_url, id, "instantiate")
|
||||||
|
resp, body = self.http_client.do_request(
|
||||||
|
url, "POST", body=jsonutils.dumps(request_body))
|
||||||
|
self.assertEqual(202, resp.status_code)
|
||||||
|
self._vnf_instance_wait(id)
|
||||||
|
|
||||||
|
def _create_and_instantiate_vnf_instance(self, flavour_id,
|
||||||
|
additional_params):
|
||||||
|
# create vnf instance
|
||||||
|
vnf_instance_name = "test_vnf_instance_for_cnf_heal-{}".format(
|
||||||
|
uuidutils.generate_uuid())
|
||||||
|
vnf_instance_description = "vnf instance for cnf heal testing"
|
||||||
|
resp, vnf_instance = self._create_vnf_instance(
|
||||||
|
self.vnfd_id_resource, vnf_instance_name=vnf_instance_name,
|
||||||
|
vnf_instance_description=vnf_instance_description)
|
||||||
|
|
||||||
|
# instantiate vnf instance
|
||||||
|
additional_param = additional_params
|
||||||
|
request_body = self._instantiate_vnf_instance_request(
|
||||||
|
flavour_id, vim_id=self.vim_id, additional_param=additional_param)
|
||||||
|
|
||||||
|
self._instantiate_vnf_instance(vnf_instance['id'], request_body)
|
||||||
|
vnf_instance = self._show_vnf_instance(vnf_instance['id'])
|
||||||
|
self.vnf_instance_ids.append(vnf_instance['id'])
|
||||||
|
|
||||||
|
return vnf_instance
|
||||||
|
|
||||||
|
def _terminate_vnf_instance(self, id):
|
||||||
|
# Terminate vnf forcefully
|
||||||
|
request_body = {
|
||||||
|
"terminationType": fields.VnfInstanceTerminationType.FORCEFUL,
|
||||||
|
}
|
||||||
|
url = os.path.join(self.base_vnf_instances_url, id, "terminate")
|
||||||
|
resp, body = self.http_client.do_request(
|
||||||
|
url, "POST", body=jsonutils.dumps(request_body))
|
||||||
|
self.assertEqual(202, resp.status_code)
|
||||||
|
self._vnf_instance_wait(
|
||||||
|
id,
|
||||||
|
instantiation_state=fields.VnfInstanceState.NOT_INSTANTIATED,
|
||||||
|
timeout=VNF_TERMINATE_TIMEOUT)
|
||||||
|
|
||||||
|
def _delete_vnf_instance(self, id):
|
||||||
|
self._delete_wait_vnf_instance(id)
|
||||||
|
|
||||||
|
# verify vnf instance is deleted
|
||||||
|
url = os.path.join(self.base_vnf_instances_url, id)
|
||||||
|
resp, body = self.http_client.do_request(url, "GET")
|
||||||
|
self.assertEqual(404, resp.status_code)
|
||||||
|
|
||||||
|
def _scale_vnf_instance(self, id, type, aspect_id,
|
||||||
|
number_of_steps=1):
|
||||||
|
url = os.path.join(self.base_vnf_instances_url, id, "scale")
|
||||||
|
# generate body
|
||||||
|
request_body = {
|
||||||
|
"type": type,
|
||||||
|
"aspectId": aspect_id,
|
||||||
|
"numberOfSteps": number_of_steps}
|
||||||
|
resp, body = self.http_client.do_request(
|
||||||
|
url, "POST", body=jsonutils.dumps(request_body))
|
||||||
|
self.assertEqual(202, resp.status_code)
|
||||||
|
|
||||||
|
def _heal_vnf_instance(self, id, vnfc_instance_id):
|
||||||
|
url = os.path.join(self.base_vnf_instances_url, id, "heal")
|
||||||
|
# generate body
|
||||||
|
request_body = {
|
||||||
|
"vnfcInstanceId": vnfc_instance_id}
|
||||||
|
resp, body = self.http_client.do_request(
|
||||||
|
url, "POST", body=jsonutils.dumps(request_body))
|
||||||
|
self.assertEqual(202, resp.status_code)
|
||||||
|
|
||||||
|
@db_api.context_manager.reader
|
||||||
|
def _vnf_notify_get_by_id(self, context, vnf_instance_id,
|
||||||
|
columns_to_join=None):
|
||||||
|
query = api.model_query(
|
||||||
|
context, models.VnfLcmOpOccs,
|
||||||
|
read_deleted="no", project_only=True).filter_by(
|
||||||
|
vnf_instance_id=vnf_instance_id).order_by(
|
||||||
|
desc("created_at"))
|
||||||
|
|
||||||
|
if columns_to_join:
|
||||||
|
for column in columns_to_join:
|
||||||
|
query = query.options(joinedload(column))
|
||||||
|
|
||||||
|
db_vnflcm_op_occ = query.first()
|
||||||
|
|
||||||
|
if not db_vnflcm_op_occ:
|
||||||
|
raise exceptions.VnfInstanceNotFound(id=vnf_instance_id)
|
||||||
|
|
||||||
|
vnflcm_op_occ = vnf_lcm_op_occs.VnfLcmOpOcc.obj_from_db_obj(
|
||||||
|
context, db_vnflcm_op_occ)
|
||||||
|
return vnflcm_op_occ
|
||||||
|
|
||||||
|
def _wait_vnflcm_op_occs(
|
||||||
|
self, context, vnf_instance_id,
|
||||||
|
operation_state='COMPLETED'):
|
||||||
|
start_time = int(time.time())
|
||||||
|
while True:
|
||||||
|
vnflcm_op_occ = self._vnf_notify_get_by_id(
|
||||||
|
context, vnf_instance_id)
|
||||||
|
|
||||||
|
if vnflcm_op_occ.operation_state == operation_state:
|
||||||
|
break
|
||||||
|
|
||||||
|
if ((int(time.time()) - start_time) > VNF_HEAL_TIMEOUT):
|
||||||
|
raise Exception("Failed to wait heal instance")
|
||||||
|
|
||||||
|
time.sleep(RETRY_WAIT_TIME)
|
||||||
|
|
||||||
|
def _get_vnfc_resource_info(self, vnf_instance):
|
||||||
|
inst_vnf_info = vnf_instance['instantiatedVnfInfo']
|
||||||
|
vnfc_resource_info = inst_vnf_info['vnfcResourceInfo']
|
||||||
|
return vnfc_resource_info
|
||||||
|
|
||||||
|
def _test_scale_cnf(self, vnf_instance):
|
||||||
|
"""Test scale in/out CNF"""
|
||||||
|
def _test_scale(id, type, aspect_id, previous_level,
|
||||||
|
delta_num=1, number_of_steps=1):
|
||||||
|
# scale operation
|
||||||
|
self._scale_vnf_instance(id, type, aspect_id, number_of_steps)
|
||||||
|
# wait vnflcm_op_occs.operation_state become COMPLETE
|
||||||
|
self._wait_vnflcm_op_occs(self.context, id)
|
||||||
|
# check scaleStatus after scale operation
|
||||||
|
vnf_instance = self._show_vnf_instance(id)
|
||||||
|
scale_status_after = \
|
||||||
|
vnf_instance['instantiatedVnfInfo']['scaleStatus']
|
||||||
|
if type == 'SCALE_OUT':
|
||||||
|
expected_level = previous_level + number_of_steps
|
||||||
|
else:
|
||||||
|
expected_level = previous_level - number_of_steps
|
||||||
|
for status in scale_status_after:
|
||||||
|
if status.get('aspectId') == aspect_id:
|
||||||
|
self.assertEqual(status.get('scaleLevel'), expected_level)
|
||||||
|
previous_level = status.get('scaleLevel')
|
||||||
|
|
||||||
|
return previous_level
|
||||||
|
|
||||||
|
aspect_id = "vdu1_aspect"
|
||||||
|
scale_status_initial = \
|
||||||
|
vnf_instance['instantiatedVnfInfo']['scaleStatus']
|
||||||
|
self.assertTrue(len(scale_status_initial) > 0)
|
||||||
|
for status in scale_status_initial:
|
||||||
|
self.assertIsNotNone(status.get('aspectId'))
|
||||||
|
self.assertIsNotNone(status.get('scaleLevel'))
|
||||||
|
if status.get('aspectId') == aspect_id:
|
||||||
|
previous_level = status.get('scaleLevel')
|
||||||
|
|
||||||
|
# test scale out
|
||||||
|
previous_level = _test_scale(
|
||||||
|
vnf_instance['id'], 'SCALE_OUT', aspect_id, previous_level)
|
||||||
|
|
||||||
|
# test scale in
|
||||||
|
previous_level = _test_scale(
|
||||||
|
vnf_instance['id'], 'SCALE_IN', aspect_id, previous_level)
|
||||||
|
|
||||||
|
def _test_heal_cnf_with_sol002(self, vnf_instance):
|
||||||
|
"""Test heal as per SOL002 for CNF"""
|
||||||
|
vnf_instance = self._show_vnf_instance(vnf_instance['id'])
|
||||||
|
before_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance)
|
||||||
|
|
||||||
|
# get vnfc_instance_id of heal target
|
||||||
|
before_pod_name = dict()
|
||||||
|
vnfc_instance_id = list()
|
||||||
|
for vnfc_rsc in before_vnfc_rscs:
|
||||||
|
if vnfc_rsc['vduId'] == "vdu1":
|
||||||
|
before_pod_name['vdu1'] = \
|
||||||
|
vnfc_rsc['computeResource']['resourceId']
|
||||||
|
elif vnfc_rsc['vduId'] == "vdu2":
|
||||||
|
before_pod_name['vdu2'] = \
|
||||||
|
vnfc_rsc['computeResource']['resourceId']
|
||||||
|
vnfc_instance_id.append(vnfc_rsc['id'])
|
||||||
|
|
||||||
|
# test heal SOL-002 (partial heal)
|
||||||
|
self._heal_vnf_instance(vnf_instance['id'], vnfc_instance_id)
|
||||||
|
# wait vnflcm_op_occs.operation_state become COMPLETE
|
||||||
|
self._wait_vnflcm_op_occs(self.context, vnf_instance['id'])
|
||||||
|
# check vnfcResourceInfo after heal operation
|
||||||
|
vnf_instance = self._show_vnf_instance(vnf_instance['id'])
|
||||||
|
after_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance)
|
||||||
|
self.assertEqual(len(before_vnfc_rscs), len(after_vnfc_rscs))
|
||||||
|
for vnfc_rsc in after_vnfc_rscs:
|
||||||
|
after_pod_name = vnfc_rsc['computeResource']['resourceId']
|
||||||
|
if vnfc_rsc['vduId'] == "vdu1":
|
||||||
|
# check stored pod name is changed (vdu1)
|
||||||
|
compute_resource = vnfc_rsc['computeResource']
|
||||||
|
before_pod_name = compute_resource['resourceId']
|
||||||
|
self.assertNotEqual(after_pod_name, before_pod_name['vdu1'])
|
||||||
|
elif vnfc_rsc['vduId'] == "vdu2":
|
||||||
|
# check stored pod name is changed (vdu2)
|
||||||
|
compute_resource = vnfc_rsc['computeResource']
|
||||||
|
before_pod_name = compute_resource['resourceId']
|
||||||
|
self.assertNotEqual(after_pod_name, before_pod_name['vdu2'])
|
||||||
|
|
||||||
|
def _test_heal_cnf_with_sol003(self, vnf_instance):
|
||||||
|
"""Test heal as per SOL003 for CNF"""
|
||||||
|
vnf_instance = self._show_vnf_instance(vnf_instance['id'])
|
||||||
|
before_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance)
|
||||||
|
|
||||||
|
# test heal SOL-003 (entire heal)
|
||||||
|
vnfc_instance_id = []
|
||||||
|
self._heal_vnf_instance(vnf_instance['id'], vnfc_instance_id)
|
||||||
|
# wait vnflcm_op_occs.operation_state become COMPLETE
|
||||||
|
self._wait_vnflcm_op_occs(self.context, vnf_instance['id'])
|
||||||
|
# check vnfcResourceInfo after heal operation
|
||||||
|
vnf_instance = self._show_vnf_instance(vnf_instance['id'])
|
||||||
|
after_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance)
|
||||||
|
self.assertEqual(len(before_vnfc_rscs), len(after_vnfc_rscs))
|
||||||
|
# check id and pod name (as computeResource.resourceId) is changed
|
||||||
|
for before_vnfc_rsc in before_vnfc_rscs:
|
||||||
|
for after_vnfc_rsc in after_vnfc_rscs:
|
||||||
|
self.assertNotEqual(
|
||||||
|
before_vnfc_rsc['id'], after_vnfc_rsc['id'])
|
||||||
|
self.assertNotEqual(
|
||||||
|
before_vnfc_rsc['computeResource']['resourceId'],
|
||||||
|
after_vnfc_rsc['computeResource']['resourceId'])
|
||||||
|
|
||||||
|
def test_vnflcm_with_helmchart(self):
|
||||||
|
# use def-files of singleton Pod and Deployment (replicas=2)
|
||||||
|
helmchartfile_path = "Files/kubernetes/localhelm-0.1.0.tgz"
|
||||||
|
inst_additional_param = {
|
||||||
|
"namespace": "default",
|
||||||
|
"use_helm": "true",
|
||||||
|
"using_helm_install_param": [
|
||||||
|
{
|
||||||
|
"exthelmchart": "false",
|
||||||
|
"helmchartfile_path": helmchartfile_path,
|
||||||
|
"helmreleasename": "vdu1",
|
||||||
|
"helmparameter": [
|
||||||
|
"service.port=8081"
|
||||||
|
]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"exthelmchart": "true",
|
||||||
|
"helmreleasename": "vdu2",
|
||||||
|
"helmrepositoryname": "bitnami",
|
||||||
|
"helmchartname": "apache",
|
||||||
|
"exthelmrepo_url": "https://charts.bitnami.com/bitnami"
|
||||||
|
}
|
||||||
|
]
|
||||||
|
}
|
||||||
|
vnf_instance = self._create_and_instantiate_vnf_instance(
|
||||||
|
"helmchart", inst_additional_param)
|
||||||
|
self._test_scale_cnf(vnf_instance)
|
||||||
|
self._test_heal_cnf_with_sol002(vnf_instance)
|
||||||
|
self._test_heal_cnf_with_sol003(vnf_instance)
|
||||||
|
|
||||||
|
self._terminate_vnf_instance(vnf_instance['id'])
|
||||||
|
self._delete_vnf_instance(vnf_instance['id'])
|
|
@ -736,7 +736,7 @@ def get_dummy_vim_connection_info():
|
||||||
'user_domain_name': 'Default', 'username': 'admin'},
|
'user_domain_name': 'Default', 'username': 'admin'},
|
||||||
'created_at': '', 'deleted': False, 'deleted_at': '',
|
'created_at': '', 'deleted': False, 'deleted_at': '',
|
||||||
'id': 'fake_id', 'updated_at': '',
|
'id': 'fake_id', 'updated_at': '',
|
||||||
'vim_id': 'fake_vim_id', 'vim_type': 'openstack'}
|
'vim_id': 'fake_vim_id', 'vim_type': 'openstack', 'extra': {}}
|
||||||
|
|
||||||
|
|
||||||
def get_dummy_instantiate_vnf_request(**updates):
|
def get_dummy_instantiate_vnf_request(**updates):
|
||||||
|
@ -1445,7 +1445,8 @@ VNFLCMOPOCC_RESPONSE = {
|
||||||
"vimId": 'f8c35bd0-4d67-4436-9f11-14b8a84c92aa',
|
"vimId": 'f8c35bd0-4d67-4436-9f11-14b8a84c92aa',
|
||||||
"vimType": 'openstack',
|
"vimType": 'openstack',
|
||||||
'interfaceInfo': {},
|
'interfaceInfo': {},
|
||||||
"accessInfo": {"key1": 'value1', "key2": 'value2'}}],
|
"accessInfo": {"key1": 'value1', "key2": 'value2'},
|
||||||
|
"extra": {}}],
|
||||||
'vimConnectionInfoDeleteIds': ['f8c35bd0-4d67-4436-9f11-14b8a84c92bb'],
|
'vimConnectionInfoDeleteIds': ['f8c35bd0-4d67-4436-9f11-14b8a84c92bb'],
|
||||||
'vnfPkgId': 'f26f181d-7891-4720-b022-b074ec1733ef',
|
'vnfPkgId': 'f26f181d-7891-4720-b022-b074ec1733ef',
|
||||||
'vnfInstanceName': 'fake_name',
|
'vnfInstanceName': 'fake_name',
|
||||||
|
|
|
@ -198,7 +198,8 @@ class TestController(base.TestCase):
|
||||||
'vim_type': 'test',
|
'vim_type': 'test',
|
||||||
'vim_auth': {'username': 'test', 'password': 'test'},
|
'vim_auth': {'username': 'test', 'password': 'test'},
|
||||||
'placement_attr': {'region': 'TestRegionOne'},
|
'placement_attr': {'region': 'TestRegionOne'},
|
||||||
'tenant': 'test'
|
'tenant': 'test',
|
||||||
|
'extra': {}
|
||||||
}
|
}
|
||||||
self.context = context.get_admin_context()
|
self.context = context.get_admin_context()
|
||||||
|
|
||||||
|
@ -1237,14 +1238,16 @@ class TestController(base.TestCase):
|
||||||
"region": "RegionOne",
|
"region": "RegionOne",
|
||||||
"password": "devstack",
|
"password": "devstack",
|
||||||
"tenant": "85d12da99f8246dfae350dbc7334a473",
|
"tenant": "85d12da99f8246dfae350dbc7334a473",
|
||||||
}
|
},
|
||||||
|
"extra": {}
|
||||||
}
|
}
|
||||||
|
|
||||||
vim_connection_info = objects.VimConnectionInfo(
|
vim_connection_info = objects.VimConnectionInfo(
|
||||||
id=vim_info['id'], vim_id=vim_info['vim_id'],
|
id=vim_info['id'], vim_id=vim_info['vim_id'],
|
||||||
vim_type=vim_info['vim_type'],
|
vim_type=vim_info['vim_type'],
|
||||||
access_info=vim_info['access_info'],
|
access_info=vim_info['access_info'],
|
||||||
interface_info=vim_info['interface_info'])
|
interface_info=vim_info['interface_info'],
|
||||||
|
extra=vim_info['extra'])
|
||||||
|
|
||||||
mock_vnf_by_id.return_value = fakes.return_vnf_instance(
|
mock_vnf_by_id.return_value = fakes.return_vnf_instance(
|
||||||
fields.VnfInstanceState.INSTANTIATED,
|
fields.VnfInstanceState.INSTANTIATED,
|
||||||
|
|
|
@ -1117,3 +1117,143 @@ def fake_vim_connection_info():
|
||||||
return vim_connection.VimConnectionInfo(
|
return vim_connection.VimConnectionInfo(
|
||||||
vim_type="kubernetes",
|
vim_type="kubernetes",
|
||||||
access_info=access_info)
|
access_info=access_info)
|
||||||
|
|
||||||
|
|
||||||
|
def fake_vim_connection_info_with_extra(del_field=None, multi_ip=False):
|
||||||
|
access_info = {
|
||||||
|
'auth_url': 'http://fake_url:6443',
|
||||||
|
'ssl_ca_cert': None}
|
||||||
|
masternode_ip = ["192.168.0.1"]
|
||||||
|
if multi_ip:
|
||||||
|
masternode_ip.append("192.168.0.2")
|
||||||
|
|
||||||
|
helm_info = {
|
||||||
|
'masternode_ip': masternode_ip,
|
||||||
|
'masternode_username': 'dummy_user',
|
||||||
|
'masternode_password': 'dummy_pass'
|
||||||
|
}
|
||||||
|
if del_field and helm_info.get(del_field):
|
||||||
|
del helm_info[del_field]
|
||||||
|
extra = {
|
||||||
|
'helm_info': str(helm_info)
|
||||||
|
}
|
||||||
|
return vim_connection.VimConnectionInfo(
|
||||||
|
vim_type="kubernetes",
|
||||||
|
access_info=access_info,
|
||||||
|
extra=extra)
|
||||||
|
|
||||||
|
|
||||||
|
def fake_inst_vnf_req_for_helmchart(external=True, local=True, namespace=None):
|
||||||
|
additional_params = {"use_helm": "true"}
|
||||||
|
using_helm_install_param = list()
|
||||||
|
if external:
|
||||||
|
using_helm_install_param.append(
|
||||||
|
{
|
||||||
|
"exthelmchart": "true",
|
||||||
|
"helmreleasename": "myrelease-ext",
|
||||||
|
"helmrepositoryname": "sample-charts",
|
||||||
|
"helmchartname": "mychart-ext",
|
||||||
|
"exthelmrepo_url": "http://helmrepo.example.com/sample-charts"
|
||||||
|
}
|
||||||
|
)
|
||||||
|
if local:
|
||||||
|
using_helm_install_param.append(
|
||||||
|
{
|
||||||
|
"exthelmchart": "false",
|
||||||
|
"helmchartfile_path": "Files/kubernetes/localhelm-0.1.0.tgz",
|
||||||
|
"helmreleasename": "myrelease-local",
|
||||||
|
"helmparameter": [
|
||||||
|
"key1=value1",
|
||||||
|
"key2=value2"
|
||||||
|
]
|
||||||
|
}
|
||||||
|
)
|
||||||
|
additional_params['using_helm_install_param'] = using_helm_install_param
|
||||||
|
if namespace:
|
||||||
|
additional_params['namespace'] = namespace
|
||||||
|
|
||||||
|
return objects.InstantiateVnfRequest(additional_params=additional_params)
|
||||||
|
|
||||||
|
|
||||||
|
def execute_cmd_helm_client(*args, **kwargs):
|
||||||
|
ssh_command = args[0]
|
||||||
|
if 'helm get manifest' in ssh_command:
|
||||||
|
result = [
|
||||||
|
'---\n',
|
||||||
|
'# Source: localhelm/templates/deployment.yaml\n',
|
||||||
|
'apiVersion: apps/v1\n',
|
||||||
|
'kind: Deployment\n',
|
||||||
|
'metadata:\n',
|
||||||
|
' name: vdu1\n',
|
||||||
|
'spec:\n',
|
||||||
|
' replicas: 1\n',
|
||||||
|
' selector:\n',
|
||||||
|
' matchLabels:\n',
|
||||||
|
' app: webserver\n',
|
||||||
|
' template:\n',
|
||||||
|
' metadata:\n'
|
||||||
|
' labels:\n'
|
||||||
|
' app: webserver\n'
|
||||||
|
' spec:\n',
|
||||||
|
' containers:\n',
|
||||||
|
' - name: nginx\n'
|
||||||
|
]
|
||||||
|
else:
|
||||||
|
result = ""
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def fake_k8s_objs_deployment_for_helm():
|
||||||
|
obj = [
|
||||||
|
{
|
||||||
|
'status': 'Creating',
|
||||||
|
'object': fake_v1_deployment_for_helm()
|
||||||
|
}
|
||||||
|
]
|
||||||
|
|
||||||
|
return obj
|
||||||
|
|
||||||
|
|
||||||
|
def fake_v1_deployment_for_helm():
|
||||||
|
return client.V1Deployment(
|
||||||
|
api_version='apps/v1',
|
||||||
|
kind='Deployment',
|
||||||
|
metadata=client.V1ObjectMeta(
|
||||||
|
name='vdu1',
|
||||||
|
),
|
||||||
|
status=client.V1DeploymentStatus(
|
||||||
|
replicas=1,
|
||||||
|
ready_replicas=1
|
||||||
|
),
|
||||||
|
spec=client.V1DeploymentSpec(
|
||||||
|
replicas=1,
|
||||||
|
selector=client.V1LabelSelector(
|
||||||
|
match_labels={'app': 'webserver'}
|
||||||
|
),
|
||||||
|
template=client.V1PodTemplateSpec(
|
||||||
|
metadata=client.V1ObjectMeta(
|
||||||
|
labels={'app': 'webserver'}
|
||||||
|
),
|
||||||
|
spec=client.V1PodSpec(
|
||||||
|
containers=[
|
||||||
|
client.V1Container(
|
||||||
|
name='nginx'
|
||||||
|
)
|
||||||
|
]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def fake_k8s_vim_obj():
|
||||||
|
vim_obj = {'vim_id': '76107920-e588-4865-8eca-f33a0f827071',
|
||||||
|
'vim_name': 'fake_k8s_vim',
|
||||||
|
'vim_auth': {
|
||||||
|
'auth_url': 'http://localhost:6443',
|
||||||
|
'password': 'test_pw',
|
||||||
|
'username': 'test_user',
|
||||||
|
'project_name': 'test_project'},
|
||||||
|
'vim_type': 'kubernetes',
|
||||||
|
'extra': {}}
|
||||||
|
return vim_obj
|
||||||
|
|
|
@ -0,0 +1,509 @@
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import copy
|
||||||
|
import eventlet
|
||||||
|
import os
|
||||||
|
import paramiko
|
||||||
|
|
||||||
|
from ddt import ddt
|
||||||
|
from kubernetes import client
|
||||||
|
from oslo_serialization import jsonutils
|
||||||
|
from tacker import context
|
||||||
|
from tacker.db.db_sqlalchemy import models
|
||||||
|
from tacker.extensions import common_services as cs
|
||||||
|
from tacker.extensions import vnfm
|
||||||
|
from tacker import objects
|
||||||
|
from tacker.tests.unit import base
|
||||||
|
from tacker.tests.unit.db import utils
|
||||||
|
from tacker.tests.unit.vnflcm import fakes as vnflcm_fakes
|
||||||
|
from tacker.tests.unit.vnfm.infra_drivers.kubernetes import fakes
|
||||||
|
from tacker.tests.unit.vnfm.infra_drivers.openstack.fixture_data import \
|
||||||
|
fixture_data_utils as fd_utils
|
||||||
|
from tacker.vnfm.infra_drivers.kubernetes.helm import helm_client
|
||||||
|
from tacker.vnfm.infra_drivers.kubernetes import kubernetes_driver
|
||||||
|
from tacker.vnfm import vim_client
|
||||||
|
from unittest import mock
|
||||||
|
|
||||||
|
|
||||||
|
class FakeRemoteCommandExecutor(mock.Mock):
|
||||||
|
def close_session(self):
|
||||||
|
return
|
||||||
|
|
||||||
|
|
||||||
|
class FakeCommander(mock.Mock):
|
||||||
|
def config(self, is_success, errmsg=None):
|
||||||
|
self.is_success = is_success
|
||||||
|
self.errmsg = errmsg
|
||||||
|
|
||||||
|
def execute_command(self, *args, **kwargs):
|
||||||
|
is_success = self.is_success
|
||||||
|
fake_result = FakeCmdResult()
|
||||||
|
stderr = ''
|
||||||
|
stdout = ''
|
||||||
|
return_code = (0) if is_success else (1)
|
||||||
|
stderr, stdout = ('', '') if is_success else ('err', '')
|
||||||
|
if self.errmsg:
|
||||||
|
stderr = [self.errmsg]
|
||||||
|
fake_result.set_std(stderr, stdout, return_code)
|
||||||
|
return fake_result
|
||||||
|
|
||||||
|
|
||||||
|
class FakeCmdResult(mock.Mock):
|
||||||
|
def set_std(self, stderr, stdout, return_code):
|
||||||
|
self.stderr = stderr
|
||||||
|
self.stdout = stdout
|
||||||
|
self.return_code = return_code
|
||||||
|
|
||||||
|
def get_stderr(self):
|
||||||
|
return self.stderr
|
||||||
|
|
||||||
|
def get_stdout(self):
|
||||||
|
return self.stdout
|
||||||
|
|
||||||
|
def get_return_code(self):
|
||||||
|
return self.return_code
|
||||||
|
|
||||||
|
|
||||||
|
class FakeTransport(mock.Mock):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
@ddt
|
||||||
|
class TestKubernetesHelm(base.TestCase):
|
||||||
|
def setUp(self):
|
||||||
|
super(TestKubernetesHelm, self).setUp()
|
||||||
|
self.kubernetes = kubernetes_driver.Kubernetes()
|
||||||
|
self.kubernetes.STACK_RETRIES = 1
|
||||||
|
self.kubernetes.STACK_RETRY_WAIT = 5
|
||||||
|
self.k8s_client_dict = fakes.fake_k8s_client_dict()
|
||||||
|
self.context = context.get_admin_context()
|
||||||
|
self.vnf_instance = fd_utils.get_vnf_instance_object()
|
||||||
|
self.package_path = os.path.join(
|
||||||
|
os.path.dirname(os.path.abspath(__file__)),
|
||||||
|
"../../../../etc/samples/etsi/nfv/test_cnf_helmchart")
|
||||||
|
self._mock_remote_command_executor()
|
||||||
|
self._mock_transport()
|
||||||
|
self.helm_client = helm_client.HelmClient('127.0.0.1', 'user', 'pass')
|
||||||
|
self.helm_client.commander = FakeCommander()
|
||||||
|
|
||||||
|
def _mock_remote_command_executor(self):
|
||||||
|
self.commander = mock.Mock(wraps=FakeRemoteCommandExecutor())
|
||||||
|
fake_commander = mock.Mock()
|
||||||
|
fake_commander.return_value = self.commander
|
||||||
|
self._mock(
|
||||||
|
'tacker.common.cmd_executer.RemoteCommandExecutor',
|
||||||
|
fake_commander)
|
||||||
|
|
||||||
|
def _mock_transport(self):
|
||||||
|
self.transport = mock.Mock(wraps=FakeTransport())
|
||||||
|
fake_transport = mock.Mock()
|
||||||
|
fake_transport.return_value = self.transport
|
||||||
|
self._mock('paramiko.Transport', fake_transport)
|
||||||
|
|
||||||
|
def _mock(self, target, new=mock.DEFAULT):
|
||||||
|
patcher = mock.patch(target, new)
|
||||||
|
return patcher.start()
|
||||||
|
|
||||||
|
@mock.patch.object(eventlet, 'monkey_patch')
|
||||||
|
def test_execute_command_success(self, mock_monkey_patch):
|
||||||
|
self.helm_client.commander.config(True)
|
||||||
|
ssh_command = 'helm install'
|
||||||
|
timeout = 120
|
||||||
|
retry = 1
|
||||||
|
self.helm_client._execute_command(
|
||||||
|
ssh_command, timeout, retry)
|
||||||
|
|
||||||
|
@mock.patch.object(eventlet, 'monkey_patch')
|
||||||
|
def test_execute_command_failed(self, mock_monkey_patch):
|
||||||
|
self.helm_client.commander.config(False)
|
||||||
|
ssh_command = 'helm install'
|
||||||
|
timeout = 120
|
||||||
|
retry = 1
|
||||||
|
self.assertRaises(vnfm.HelmClientRemoteCommandError,
|
||||||
|
self.helm_client._execute_command,
|
||||||
|
ssh_command, timeout, retry)
|
||||||
|
|
||||||
|
@mock.patch.object(eventlet, 'monkey_patch')
|
||||||
|
@mock.patch.object(FakeCommander, 'execute_command')
|
||||||
|
def test_execute_command_timeout(self, mock_execute_command,
|
||||||
|
mock_monkey_patch):
|
||||||
|
mock_execute_command.side_effect = eventlet.timeout.Timeout
|
||||||
|
ssh_command = 'helm install'
|
||||||
|
timeout = 120
|
||||||
|
retry = 1
|
||||||
|
self.assertRaises(vnfm.HelmClientOtherError,
|
||||||
|
self.helm_client._execute_command,
|
||||||
|
ssh_command, timeout, retry)
|
||||||
|
|
||||||
|
def test_pre_instantiation_vnf_helm(self):
|
||||||
|
vnf_instance = fd_utils.get_vnf_instance_object()
|
||||||
|
vim_connection_info = fakes.fake_vim_connection_info_with_extra()
|
||||||
|
vnf_software_images = None
|
||||||
|
vnf_package_path = self.package_path
|
||||||
|
instantiate_vnf_req = fakes.fake_inst_vnf_req_for_helmchart()
|
||||||
|
vnf_resources = self.kubernetes.pre_instantiation_vnf(
|
||||||
|
self.context, vnf_instance, vim_connection_info,
|
||||||
|
vnf_software_images,
|
||||||
|
instantiate_vnf_req, vnf_package_path)
|
||||||
|
self.assertEqual(vnf_resources, {})
|
||||||
|
|
||||||
|
def test_pre_helm_install_with_bool_param(self):
|
||||||
|
vnf_instance = fd_utils.get_vnf_instance_object()
|
||||||
|
vim_connection_info = fakes.fake_vim_connection_info_with_extra()
|
||||||
|
vnf_software_images = None
|
||||||
|
vnf_package_path = self.package_path
|
||||||
|
instantiate_vnf_req = fakes.fake_inst_vnf_req_for_helmchart()
|
||||||
|
instantiate_vnf_req.additional_params['use_helm'] = True
|
||||||
|
using_helm_inst_params = instantiate_vnf_req.additional_params[
|
||||||
|
'using_helm_install_param']
|
||||||
|
using_helm_inst_params[0]['exthelmchart'] = True
|
||||||
|
using_helm_inst_params[1]['exthelmchart'] = False
|
||||||
|
vnf_resources = self.kubernetes.pre_instantiation_vnf(
|
||||||
|
self.context, vnf_instance, vim_connection_info,
|
||||||
|
vnf_software_images,
|
||||||
|
instantiate_vnf_req, vnf_package_path)
|
||||||
|
self.assertEqual(vnf_resources, {})
|
||||||
|
|
||||||
|
def test_pre_helm_install_invaid_vimconnectioninfo_no_helm_info(self):
|
||||||
|
vim_connection_info = fakes.fake_vim_connection_info_with_extra()
|
||||||
|
del vim_connection_info.extra['helm_info']
|
||||||
|
vnf_package_path = self.package_path
|
||||||
|
instantiate_vnf_req = fakes.fake_inst_vnf_req_for_helmchart()
|
||||||
|
exc = self.assertRaises(vnfm.InvalidVimConnectionInfo,
|
||||||
|
self.kubernetes._pre_helm_install,
|
||||||
|
vim_connection_info, instantiate_vnf_req,
|
||||||
|
vnf_package_path)
|
||||||
|
msg = ("Invalid vim_connection_info: "
|
||||||
|
"helm_info is missing in vim_connection_info.extra.")
|
||||||
|
self.assertEqual(msg, exc.format_message())
|
||||||
|
|
||||||
|
def test_pre_helm_install_invaid_vimconnectioninfo_no_masternode_ip(self):
|
||||||
|
vim_connection_info = fakes.fake_vim_connection_info_with_extra(
|
||||||
|
del_field='masternode_ip')
|
||||||
|
vnf_package_path = self.package_path
|
||||||
|
instantiate_vnf_req = fakes.fake_inst_vnf_req_for_helmchart()
|
||||||
|
exc = self.assertRaises(vnfm.InvalidVimConnectionInfo,
|
||||||
|
self.kubernetes._pre_helm_install,
|
||||||
|
vim_connection_info, instantiate_vnf_req,
|
||||||
|
vnf_package_path)
|
||||||
|
msg = ("Invalid vim_connection_info: "
|
||||||
|
"content of helm_info is invalid.")
|
||||||
|
self.assertEqual(msg, exc.format_message())
|
||||||
|
|
||||||
|
def test_pre_helm_install_invalid_helm_param(self):
|
||||||
|
vim_connection_info = fakes.fake_vim_connection_info_with_extra()
|
||||||
|
vnf_package_path = self.package_path
|
||||||
|
instantiate_vnf_req = fakes.fake_inst_vnf_req_for_helmchart(
|
||||||
|
external=True)
|
||||||
|
using_helm_inst_params = instantiate_vnf_req.additional_params[
|
||||||
|
'using_helm_install_param']
|
||||||
|
del using_helm_inst_params[0]['exthelmchart']
|
||||||
|
exc = self.assertRaises(cs.InputValuesMissing,
|
||||||
|
self.kubernetes._pre_helm_install,
|
||||||
|
vim_connection_info, instantiate_vnf_req,
|
||||||
|
vnf_package_path)
|
||||||
|
msg = ("Parameter input values missing for the key '{param}'".format(
|
||||||
|
param='exthelmchart'))
|
||||||
|
self.assertEqual(msg, exc.format_message())
|
||||||
|
|
||||||
|
def test_pre_helm_install_empty_helm_param(self):
|
||||||
|
vim_connection_info = fakes.fake_vim_connection_info_with_extra()
|
||||||
|
vnf_package_path = self.package_path
|
||||||
|
instantiate_vnf_req = fakes.fake_inst_vnf_req_for_helmchart(
|
||||||
|
external=False, local=False)
|
||||||
|
exc = self.assertRaises(cs.InputValuesMissing,
|
||||||
|
self.kubernetes._pre_helm_install,
|
||||||
|
vim_connection_info, instantiate_vnf_req,
|
||||||
|
vnf_package_path)
|
||||||
|
msg = ("Parameter input values missing for the key '{param}'".format(
|
||||||
|
param='using_helm_install_param'))
|
||||||
|
self.assertEqual(msg, exc.format_message())
|
||||||
|
|
||||||
|
def test_pre_helm_install_invalid_chartfile_path(self):
|
||||||
|
vim_connection_info = fakes.fake_vim_connection_info_with_extra()
|
||||||
|
vnf_package_path = self.package_path
|
||||||
|
instantiate_vnf_req = fakes.fake_inst_vnf_req_for_helmchart(
|
||||||
|
external=False)
|
||||||
|
using_helm_inst_params = instantiate_vnf_req.additional_params[
|
||||||
|
'using_helm_install_param']
|
||||||
|
using_helm_inst_params[0]['helmchartfile_path'] = 'invalid_path'
|
||||||
|
exc = self.assertRaises(vnfm.CnfDefinitionNotFound,
|
||||||
|
self.kubernetes._pre_helm_install,
|
||||||
|
vim_connection_info, instantiate_vnf_req,
|
||||||
|
vnf_package_path)
|
||||||
|
msg = _("CNF definition file with path {path} is not found "
|
||||||
|
"in vnf_artifacts.").format(
|
||||||
|
path=using_helm_inst_params[0]['helmchartfile_path'])
|
||||||
|
self.assertEqual(msg, exc.format_message())
|
||||||
|
|
||||||
|
@mock.patch.object(objects.VnfResource, 'create')
|
||||||
|
@mock.patch.object(paramiko.Transport, 'close')
|
||||||
|
@mock.patch.object(paramiko.SFTPClient, 'put')
|
||||||
|
@mock.patch.object(paramiko.SFTPClient, 'from_transport')
|
||||||
|
@mock.patch.object(paramiko.Transport, 'connect')
|
||||||
|
@mock.patch.object(helm_client.HelmClient, '_execute_command')
|
||||||
|
@mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment')
|
||||||
|
def test_instantiate_vnf_using_helmchart(
|
||||||
|
self, mock_read_namespaced_deployment, mock_command,
|
||||||
|
mock_connect, mock_from_transport, mock_put, mock_close,
|
||||||
|
mock_vnf_resource_create):
|
||||||
|
vnf_instance = fd_utils.get_vnf_instance_object()
|
||||||
|
vim_connection_info = fakes.fake_vim_connection_info_with_extra()
|
||||||
|
deployment_obj = fakes.fake_v1_deployment_for_helm()
|
||||||
|
mock_read_namespaced_deployment.return_value = deployment_obj
|
||||||
|
vnfd_dict = fakes.fake_vnf_dict()
|
||||||
|
instantiate_vnf_req = fakes.fake_inst_vnf_req_for_helmchart(
|
||||||
|
external=False)
|
||||||
|
grant_response = None
|
||||||
|
base_hot_dict = None
|
||||||
|
vnf_package_path = self.package_path
|
||||||
|
mock_command.side_effect = fakes.execute_cmd_helm_client
|
||||||
|
result = self.kubernetes.instantiate_vnf(
|
||||||
|
self.context, vnf_instance, vnfd_dict, vim_connection_info,
|
||||||
|
instantiate_vnf_req, grant_response, vnf_package_path,
|
||||||
|
base_hot_dict)
|
||||||
|
self.assertEqual(
|
||||||
|
result,
|
||||||
|
"{'namespace': '', 'name': 'vdu1', " +
|
||||||
|
"'apiVersion': 'apps/v1', 'kind': 'Deployment', " +
|
||||||
|
"'status': 'Create_complete'}")
|
||||||
|
self.assertEqual(mock_read_namespaced_deployment.call_count, 1)
|
||||||
|
|
||||||
|
@mock.patch.object(objects.VnfResource, 'create')
|
||||||
|
@mock.patch.object(paramiko.Transport, 'close')
|
||||||
|
@mock.patch.object(paramiko.SFTPClient, 'put')
|
||||||
|
@mock.patch.object(paramiko.SFTPClient, 'from_transport')
|
||||||
|
@mock.patch.object(paramiko.Transport, 'connect')
|
||||||
|
@mock.patch.object(helm_client.HelmClient, '_execute_command')
|
||||||
|
@mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment')
|
||||||
|
def test_instantiate_vnf_using_helmchart_with_namespace(
|
||||||
|
self, mock_read_namespaced_deployment, mock_command,
|
||||||
|
mock_connect, mock_from_transport, mock_put, mock_close,
|
||||||
|
mock_vnf_resource_create):
|
||||||
|
vnf_instance = fd_utils.get_vnf_instance_object()
|
||||||
|
vim_connection_info = fakes.fake_vim_connection_info_with_extra()
|
||||||
|
deployment_obj = fakes.fake_v1_deployment_for_helm()
|
||||||
|
mock_read_namespaced_deployment.return_value = deployment_obj
|
||||||
|
vnfd_dict = fakes.fake_vnf_dict()
|
||||||
|
instantiate_vnf_req = fakes.fake_inst_vnf_req_for_helmchart(
|
||||||
|
local=False, namespace='dummy_namespace')
|
||||||
|
grant_response = None
|
||||||
|
base_hot_dict = None
|
||||||
|
vnf_package_path = self.package_path
|
||||||
|
mock_command.side_effect = fakes.execute_cmd_helm_client
|
||||||
|
result = self.kubernetes.instantiate_vnf(
|
||||||
|
self.context, vnf_instance, vnfd_dict, vim_connection_info,
|
||||||
|
instantiate_vnf_req, grant_response, vnf_package_path,
|
||||||
|
base_hot_dict)
|
||||||
|
self.assertEqual(
|
||||||
|
result,
|
||||||
|
"{'namespace': 'dummy_namespace', 'name': 'vdu1', " +
|
||||||
|
"'apiVersion': 'apps/v1', 'kind': 'Deployment', " +
|
||||||
|
"'status': 'Create_complete'}")
|
||||||
|
self.assertEqual(mock_read_namespaced_deployment.call_count, 1)
|
||||||
|
|
||||||
|
@mock.patch.object(objects.VnfResource, 'create')
|
||||||
|
@mock.patch.object(paramiko.Transport, 'close')
|
||||||
|
@mock.patch.object(paramiko.SFTPClient, 'put')
|
||||||
|
@mock.patch.object(paramiko.SFTPClient, 'from_transport')
|
||||||
|
@mock.patch.object(paramiko.Transport, 'connect')
|
||||||
|
@mock.patch.object(helm_client.HelmClient, '_execute_command')
|
||||||
|
@mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment')
|
||||||
|
def test_instantiate_vnf_using_helmchart_multiple_ips(
|
||||||
|
self, mock_read_namespaced_deployment, mock_command,
|
||||||
|
mock_connect, mock_from_transport, mock_put, mock_close,
|
||||||
|
mock_vnf_resource_create):
|
||||||
|
vnf_instance = fd_utils.get_vnf_instance_object()
|
||||||
|
vim_connection_info = fakes.fake_vim_connection_info_with_extra(
|
||||||
|
multi_ip=True)
|
||||||
|
deployment_obj = fakes.fake_v1_deployment_for_helm()
|
||||||
|
mock_read_namespaced_deployment.return_value = deployment_obj
|
||||||
|
vnfd_dict = fakes.fake_vnf_dict()
|
||||||
|
instantiate_vnf_req = fakes.fake_inst_vnf_req_for_helmchart(
|
||||||
|
external=False)
|
||||||
|
grant_response = None
|
||||||
|
base_hot_dict = None
|
||||||
|
vnf_package_path = self.package_path
|
||||||
|
mock_command.side_effect = fakes.execute_cmd_helm_client
|
||||||
|
result = self.kubernetes.instantiate_vnf(
|
||||||
|
self.context, vnf_instance, vnfd_dict, vim_connection_info,
|
||||||
|
instantiate_vnf_req, grant_response, vnf_package_path,
|
||||||
|
base_hot_dict)
|
||||||
|
self.assertEqual(
|
||||||
|
result,
|
||||||
|
"{'namespace': '', 'name': 'vdu1', " +
|
||||||
|
"'apiVersion': 'apps/v1', 'kind': 'Deployment', " +
|
||||||
|
"'status': 'Create_complete'}")
|
||||||
|
self.assertEqual(mock_read_namespaced_deployment.call_count, 1)
|
||||||
|
|
||||||
|
@mock.patch.object(paramiko.Transport, 'close')
|
||||||
|
@mock.patch.object(paramiko.SFTPClient, 'put')
|
||||||
|
@mock.patch.object(paramiko.SFTPClient, 'from_transport')
|
||||||
|
@mock.patch.object(paramiko.Transport, 'connect')
|
||||||
|
@mock.patch.object(helm_client.HelmClient, '_execute_command')
|
||||||
|
def test_instantiate_vnf_using_helmchart_put_helmchart_fail(
|
||||||
|
self, mock_command,
|
||||||
|
mock_connect, mock_from_transport, mock_put, mock_close):
|
||||||
|
vnf_instance = fd_utils.get_vnf_instance_object()
|
||||||
|
vim_connection_info = fakes.fake_vim_connection_info_with_extra()
|
||||||
|
vnfd_dict = fakes.fake_vnf_dict()
|
||||||
|
instantiate_vnf_req = fakes.fake_inst_vnf_req_for_helmchart(
|
||||||
|
external=False)
|
||||||
|
grant_response = None
|
||||||
|
base_hot_dict = None
|
||||||
|
vnf_package_path = self.package_path
|
||||||
|
mock_command.side_effect = fakes.execute_cmd_helm_client
|
||||||
|
mock_from_transport.side_effect = paramiko.SSHException()
|
||||||
|
self.assertRaises(paramiko.SSHException,
|
||||||
|
self.kubernetes.instantiate_vnf,
|
||||||
|
self.context, vnf_instance, vnfd_dict, vim_connection_info,
|
||||||
|
instantiate_vnf_req, grant_response, vnf_package_path,
|
||||||
|
base_hot_dict)
|
||||||
|
|
||||||
|
@mock.patch.object(helm_client.HelmClient, '_execute_command')
|
||||||
|
@mock.patch.object(client.CoreV1Api, 'list_namespaced_pod')
|
||||||
|
@mock.patch.object(objects.VnfPackageVnfd, 'get_by_id')
|
||||||
|
@mock.patch('tacker.vnflcm.utils._get_vnfd_dict')
|
||||||
|
def test_post_vnf_instantiation_using_helmchart(
|
||||||
|
self, mock_vnfd_dict, mock_vnf_package_vnfd_get_by_id,
|
||||||
|
mock_list_namespaced_pod, mock_command):
|
||||||
|
vim_connection_info = fakes.fake_vim_connection_info_with_extra()
|
||||||
|
mock_vnfd_dict.return_value = vnflcm_fakes.vnfd_dict_cnf()
|
||||||
|
mock_vnf_package_vnfd_get_by_id.return_value = \
|
||||||
|
vnflcm_fakes.return_vnf_package_vnfd()
|
||||||
|
mock_list_namespaced_pod.return_value =\
|
||||||
|
client.V1PodList(items=[
|
||||||
|
fakes.get_fake_pod_info(kind='Deployment', name='vdu1')])
|
||||||
|
instantiate_vnf_req = fakes.fake_inst_vnf_req_for_helmchart(
|
||||||
|
external=False)
|
||||||
|
mock_command.side_effect = fakes.execute_cmd_helm_client
|
||||||
|
self.kubernetes.post_vnf_instantiation(
|
||||||
|
context=self.context,
|
||||||
|
vnf_instance=self.vnf_instance,
|
||||||
|
vim_connection_info=vim_connection_info,
|
||||||
|
instantiate_vnf_req=instantiate_vnf_req)
|
||||||
|
self.assertEqual(mock_list_namespaced_pod.call_count, 1)
|
||||||
|
# validate stored VnfcResourceInfo
|
||||||
|
vnfc_resource_info_after = \
|
||||||
|
self.vnf_instance.instantiated_vnf_info.vnfc_resource_info
|
||||||
|
self.assertEqual(len(vnfc_resource_info_after), 1)
|
||||||
|
expected_pod = fakes.get_fake_pod_info('Deployment', 'vdu1')
|
||||||
|
self.assertEqual(
|
||||||
|
vnfc_resource_info_after[0].compute_resource.resource_id,
|
||||||
|
expected_pod.metadata.name)
|
||||||
|
self.assertEqual(vnfc_resource_info_after[0].compute_resource.
|
||||||
|
vim_level_resource_type, 'Deployment')
|
||||||
|
self.assertEqual(vnfc_resource_info_after[0].vdu_id, 'VDU1')
|
||||||
|
metadata_after = vnfc_resource_info_after[0].metadata
|
||||||
|
self.assertEqual(jsonutils.loads(
|
||||||
|
metadata_after.get('Deployment')).get('name'), 'vdu1')
|
||||||
|
|
||||||
|
@mock.patch.object(helm_client.HelmClient, '_execute_command')
|
||||||
|
@mock.patch.object(vim_client.VimClient, 'get_vim')
|
||||||
|
def test_delete_using_helmchart(
|
||||||
|
self, mock_get_vim, mock_command):
|
||||||
|
vnf_id = 'fake_vnf_id'
|
||||||
|
mock_get_vim.return_value = fakes.fake_k8s_vim_obj()
|
||||||
|
vim_connection_info = fakes.fake_vim_connection_info_with_extra()
|
||||||
|
instantiate_vnf_req = fakes.fake_inst_vnf_req_for_helmchart(
|
||||||
|
external=False)
|
||||||
|
vnf_instance = copy.deepcopy(self.vnf_instance)
|
||||||
|
vnf_instance.vim_connection_info = [vim_connection_info]
|
||||||
|
vnf_instance.instantiated_vnf_info.additional_params = \
|
||||||
|
instantiate_vnf_req.additional_params
|
||||||
|
terminate_vnf_req = objects.TerminateVnfRequest()
|
||||||
|
mock_command.side_effect = fakes.execute_cmd_helm_client
|
||||||
|
self.kubernetes.delete(plugin=None, context=self.context,
|
||||||
|
vnf_id=vnf_id,
|
||||||
|
auth_attr=utils.get_vim_auth_obj(),
|
||||||
|
vnf_instance=vnf_instance,
|
||||||
|
terminate_vnf_req=terminate_vnf_req)
|
||||||
|
|
||||||
|
@mock.patch.object(helm_client.HelmClient, '_execute_command')
|
||||||
|
@mock.patch.object(vim_client.VimClient, 'get_vim')
|
||||||
|
def test_delete_using_helmchart_with_namespace(
|
||||||
|
self, mock_get_vim, mock_command):
|
||||||
|
vnf_id = 'fake_vnf_id'
|
||||||
|
mock_get_vim.return_value = fakes.fake_k8s_vim_obj()
|
||||||
|
vim_connection_info = fakes.fake_vim_connection_info_with_extra()
|
||||||
|
instantiate_vnf_req = fakes.fake_inst_vnf_req_for_helmchart(
|
||||||
|
local=False, namespace='dummy_namespace')
|
||||||
|
vnf_instance = copy.deepcopy(self.vnf_instance)
|
||||||
|
vnf_instance.vim_connection_info = [vim_connection_info]
|
||||||
|
vnf_instance.instantiated_vnf_info.additional_params = \
|
||||||
|
instantiate_vnf_req.additional_params
|
||||||
|
terminate_vnf_req = objects.TerminateVnfRequest()
|
||||||
|
mock_command.side_effect = fakes.execute_cmd_helm_client
|
||||||
|
self.kubernetes.delete(plugin=None, context=self.context,
|
||||||
|
vnf_id=vnf_id,
|
||||||
|
auth_attr=utils.get_vim_auth_obj(),
|
||||||
|
vnf_instance=vnf_instance,
|
||||||
|
terminate_vnf_req=terminate_vnf_req)
|
||||||
|
|
||||||
|
@mock.patch.object(helm_client.HelmClient, '_execute_command')
|
||||||
|
@mock.patch.object(vim_client.VimClient, 'get_vim')
|
||||||
|
@mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment')
|
||||||
|
@mock.patch.object(objects.VnfResourceList, 'get_by_vnf_instance_id')
|
||||||
|
def test_delete_wait_using_helmchart(
|
||||||
|
self, mock_vnf_resource_list, mock_read_namespaced_deployment,
|
||||||
|
mock_get_vim, mock_command):
|
||||||
|
vnf_id = 'fake_vnf_id'
|
||||||
|
mock_get_vim.return_value = fakes.fake_k8s_vim_obj()
|
||||||
|
vim_connection_info = fakes.fake_vim_connection_info_with_extra()
|
||||||
|
instantiate_vnf_req = fakes.fake_inst_vnf_req_for_helmchart(
|
||||||
|
external=False)
|
||||||
|
vnf_instance = copy.deepcopy(self.vnf_instance)
|
||||||
|
vnf_instance.vim_connection_info = [vim_connection_info]
|
||||||
|
vnf_instance.instantiated_vnf_info.additional_params = \
|
||||||
|
instantiate_vnf_req.additional_params
|
||||||
|
vnf_resource = models.VnfResource()
|
||||||
|
vnf_resource.vnf_instance_id = vnf_instance.id
|
||||||
|
vnf_resource.resource_name = 'default,vdu1'
|
||||||
|
vnf_resource.resource_type = 'apps/v1,Deployment'
|
||||||
|
mock_vnf_resource_list.return_value = [vnf_resource]
|
||||||
|
mock_command.side_effect = fakes.execute_cmd_helm_client
|
||||||
|
self.kubernetes.delete_wait(plugin=None, context=self.context,
|
||||||
|
vnf_id=vnf_id,
|
||||||
|
auth_attr=utils.get_vim_auth_obj(),
|
||||||
|
region_name=None,
|
||||||
|
vnf_instance=vnf_instance)
|
||||||
|
self.assertEqual(mock_read_namespaced_deployment.call_count, 1)
|
||||||
|
|
||||||
|
@mock.patch.object(helm_client.HelmClient, '_execute_command')
|
||||||
|
@mock.patch.object(vim_client.VimClient, 'get_vim')
|
||||||
|
@mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment')
|
||||||
|
@mock.patch.object(objects.VnfResourceList, 'get_by_vnf_instance_id')
|
||||||
|
def test_delete_wait_using_helmchart_unknown_apiversion(
|
||||||
|
self, mock_vnf_resource_list, mock_read_namespaced_deployment,
|
||||||
|
mock_get_vim, mock_command):
|
||||||
|
vnf_id = 'fake_vnf_id'
|
||||||
|
mock_get_vim.return_value = fakes.fake_k8s_vim_obj()
|
||||||
|
vim_connection_info = fakes.fake_vim_connection_info_with_extra()
|
||||||
|
instantiate_vnf_req = fakes.fake_inst_vnf_req_for_helmchart(
|
||||||
|
local=False)
|
||||||
|
vnf_instance = copy.deepcopy(self.vnf_instance)
|
||||||
|
vnf_instance.vim_connection_info = [vim_connection_info]
|
||||||
|
vnf_instance.instantiated_vnf_info.additional_params = \
|
||||||
|
instantiate_vnf_req.additional_params
|
||||||
|
vnf_resource = models.VnfResource()
|
||||||
|
vnf_resource.vnf_instance_id = vnf_instance.id
|
||||||
|
vnf_resource.resource_name = 'default,vdu1'
|
||||||
|
vnf_resource.resource_type = 'apps/v1unknown,Deployment'
|
||||||
|
mock_vnf_resource_list.return_value = [vnf_resource]
|
||||||
|
mock_command.side_effect = fakes.execute_cmd_helm_client
|
||||||
|
self.kubernetes.delete_wait(plugin=None, context=self.context,
|
||||||
|
vnf_id=vnf_id,
|
||||||
|
auth_attr=utils.get_vim_auth_obj(),
|
||||||
|
region_name=None,
|
||||||
|
vnf_instance=vnf_instance)
|
||||||
|
self.assertEqual(mock_read_namespaced_deployment.call_count, 0)
|
|
@ -74,7 +74,7 @@ class TestVIMClient(base.TestCase):
|
||||||
vim_expect = {'vim_auth': {'password': '****'}, 'vim_id': 'aaaa',
|
vim_expect = {'vim_auth': {'password': '****'}, 'vim_id': 'aaaa',
|
||||||
'vim_name': 'VIM0', 'vim_type': 'test_vim',
|
'vim_name': 'VIM0', 'vim_type': 'test_vim',
|
||||||
'placement_attr': {'regions': ['TestRegionOne']},
|
'placement_attr': {'regions': ['TestRegionOne']},
|
||||||
'tenant': 'test'}
|
'tenant': 'test', 'extra': {}}
|
||||||
self.assertEqual(vim_expect, vim_result)
|
self.assertEqual(vim_expect, vim_result)
|
||||||
|
|
||||||
def test_get_vim_with_default_name(self):
|
def test_get_vim_with_default_name(self):
|
||||||
|
@ -91,7 +91,7 @@ class TestVIMClient(base.TestCase):
|
||||||
vim_expect = {'vim_auth': {'password': '****'}, 'vim_id': 'aaaa',
|
vim_expect = {'vim_auth': {'password': '****'}, 'vim_id': 'aaaa',
|
||||||
'vim_name': 'aaaa', 'vim_type': 'test_vim',
|
'vim_name': 'aaaa', 'vim_type': 'test_vim',
|
||||||
'placement_attr': {'regions': ['TestRegionOne']},
|
'placement_attr': {'regions': ['TestRegionOne']},
|
||||||
'tenant': 'test'}
|
'tenant': 'test', 'extra': {}}
|
||||||
self.assertEqual(vim_expect, vim_result)
|
self.assertEqual(vim_expect, vim_result)
|
||||||
|
|
||||||
def test_find_vim_key_with_key_not_found_exception(self):
|
def test_find_vim_key_with_key_not_found_exception(self):
|
||||||
|
|
|
@ -45,9 +45,11 @@ def _get_vim(context, vim_connection_info):
|
||||||
region_name = access_info.get('region')
|
region_name = access_info.get('region')
|
||||||
else:
|
else:
|
||||||
region_name = None
|
region_name = None
|
||||||
|
extra = vim_connection_info[0].extra
|
||||||
else:
|
else:
|
||||||
vim_id = None
|
vim_id = None
|
||||||
region_name = None
|
region_name = None
|
||||||
|
extra = {}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
vim_res = vim_client_obj.get_vim(
|
vim_res = vim_client_obj.get_vim(
|
||||||
|
@ -56,9 +58,13 @@ def _get_vim(context, vim_connection_info):
|
||||||
raise exceptions.VimConnectionNotFound(vim_id=vim_id)
|
raise exceptions.VimConnectionNotFound(vim_id=vim_id)
|
||||||
|
|
||||||
vim_res['vim_auth'].update({'region': region_name})
|
vim_res['vim_auth'].update({'region': region_name})
|
||||||
|
if extra:
|
||||||
|
for key, value in extra.items():
|
||||||
|
vim_res['extra'][key] = value
|
||||||
vim_info = {'id': vim_res['vim_id'], 'vim_id': vim_res['vim_id'],
|
vim_info = {'id': vim_res['vim_id'], 'vim_id': vim_res['vim_id'],
|
||||||
'vim_type': vim_res['vim_type'],
|
'vim_type': vim_res['vim_type'],
|
||||||
'access_info': vim_res['vim_auth']}
|
'access_info': vim_res['vim_auth'],
|
||||||
|
'extra': vim_res.get('extra', {})}
|
||||||
|
|
||||||
return vim_info
|
return vim_info
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,152 @@
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import time
|
||||||
|
|
||||||
|
import eventlet
|
||||||
|
from oslo_log import log as logging
|
||||||
|
import paramiko
|
||||||
|
|
||||||
|
from tacker.common import cmd_executer
|
||||||
|
from tacker.extensions import vnfm
|
||||||
|
|
||||||
|
LOG = logging.getLogger(__name__)
|
||||||
|
HELM_CMD_TIMEOUT = 30
|
||||||
|
HELM_INSTALL_TIMEOUT = 120
|
||||||
|
TRANSPORT_RETRIES = 2
|
||||||
|
TRANSPORT_WAIT = 15
|
||||||
|
|
||||||
|
|
||||||
|
class HelmClient(object):
|
||||||
|
"""Helm client for hosting containerized vnfs"""
|
||||||
|
|
||||||
|
def __init__(self, ip, username, password):
|
||||||
|
self.host_ip = ip
|
||||||
|
self.username = username
|
||||||
|
self.password = password
|
||||||
|
self.commander = cmd_executer.RemoteCommandExecutor(
|
||||||
|
user=username,
|
||||||
|
password=password,
|
||||||
|
host=ip,
|
||||||
|
timeout=HELM_CMD_TIMEOUT)
|
||||||
|
|
||||||
|
def _execute_command(self, ssh_command, timeout=HELM_CMD_TIMEOUT, retry=0):
|
||||||
|
eventlet.monkey_patch()
|
||||||
|
while retry >= 0:
|
||||||
|
try:
|
||||||
|
with eventlet.Timeout(timeout, True):
|
||||||
|
result = self.commander.execute_command(
|
||||||
|
ssh_command, input_data=None)
|
||||||
|
break
|
||||||
|
except eventlet.timeout.Timeout:
|
||||||
|
error_message = ('It is time out, When execute command: {}.'
|
||||||
|
.format(ssh_command))
|
||||||
|
LOG.debug(error_message)
|
||||||
|
retry -= 1
|
||||||
|
if retry < 0:
|
||||||
|
self.close_session()
|
||||||
|
LOG.error(error_message)
|
||||||
|
raise vnfm.HelmClientOtherError(
|
||||||
|
error_message=error_message)
|
||||||
|
time.sleep(30)
|
||||||
|
if result.get_return_code():
|
||||||
|
self.close_session()
|
||||||
|
err = result.get_stderr()
|
||||||
|
LOG.error(err)
|
||||||
|
raise vnfm.HelmClientRemoteCommandError(message=err)
|
||||||
|
return result.get_stdout()
|
||||||
|
|
||||||
|
def add_repository(self, repo_name, repo_url):
|
||||||
|
# execute helm repo add command
|
||||||
|
ssh_command = "helm repo add {} {}".format(repo_name, repo_url)
|
||||||
|
self._execute_command(ssh_command)
|
||||||
|
|
||||||
|
def remove_repository(self, repo_name):
|
||||||
|
# execute helm repo remove command
|
||||||
|
ssh_command = "helm repo remove {}".format(repo_name)
|
||||||
|
self._execute_command(ssh_command)
|
||||||
|
|
||||||
|
def _transport_helmchart(self, source_path, target_path):
|
||||||
|
# transfer helm chart file
|
||||||
|
retry = TRANSPORT_RETRIES
|
||||||
|
while retry > 0:
|
||||||
|
try:
|
||||||
|
connect = paramiko.Transport(self.host_ip, 22)
|
||||||
|
connect.connect(username=self.username, password=self.password)
|
||||||
|
sftp = paramiko.SFTPClient.from_transport(connect)
|
||||||
|
# put helm chart file
|
||||||
|
sftp.put(source_path, target_path)
|
||||||
|
connect.close()
|
||||||
|
return
|
||||||
|
except paramiko.SSHException as e:
|
||||||
|
LOG.debug(e)
|
||||||
|
retry -= 1
|
||||||
|
if retry == 0:
|
||||||
|
self.close_session()
|
||||||
|
LOG.error(e)
|
||||||
|
raise paramiko.SSHException()
|
||||||
|
time.sleep(TRANSPORT_WAIT)
|
||||||
|
|
||||||
|
def put_helmchart(self, source_path, target_dir):
|
||||||
|
# create helm chart directory and change permission
|
||||||
|
ssh_command = ("if [ ! -d {target_dir} ]; then "
|
||||||
|
"`sudo mkdir -p {target_dir}; "
|
||||||
|
"sudo chown -R {username} {target_dir};`; fi").format(
|
||||||
|
target_dir=target_dir, username=self.username)
|
||||||
|
self._execute_command(ssh_command)
|
||||||
|
# get helm chart name and target path
|
||||||
|
chartfile_name = source_path[source_path.rfind(os.sep) + 1:]
|
||||||
|
target_path = os.path.join(target_dir, chartfile_name)
|
||||||
|
# transport helm chart file
|
||||||
|
self._transport_helmchart(source_path, target_path)
|
||||||
|
# decompress helm chart file
|
||||||
|
ssh_command = "tar -zxf {} -C {}".format(target_path, target_dir)
|
||||||
|
self._execute_command(ssh_command)
|
||||||
|
|
||||||
|
def delete_helmchart(self, target_path):
|
||||||
|
# delete helm chart folder
|
||||||
|
ssh_command = "sudo rm -rf {}".format(target_path)
|
||||||
|
self._execute_command(ssh_command)
|
||||||
|
|
||||||
|
def install(self, release_name, chart_name, namespace, parameters):
|
||||||
|
# execute helm install command
|
||||||
|
ssh_command = "helm install {} {}".format(release_name, chart_name)
|
||||||
|
if namespace:
|
||||||
|
ssh_command += " --namespace {}".format(namespace)
|
||||||
|
if parameters:
|
||||||
|
for param in parameters:
|
||||||
|
ssh_command += " --set {}".format(param)
|
||||||
|
self._execute_command(ssh_command, timeout=HELM_INSTALL_TIMEOUT)
|
||||||
|
|
||||||
|
def uninstall(self, release_name, namespace):
|
||||||
|
# execute helm uninstall command
|
||||||
|
ssh_command = "helm uninstall {}".format(release_name)
|
||||||
|
if namespace:
|
||||||
|
ssh_command += " --namespace {}".format(namespace)
|
||||||
|
self._execute_command(ssh_command, timeout=HELM_INSTALL_TIMEOUT)
|
||||||
|
|
||||||
|
def get_manifest(self, release_name, namespace):
|
||||||
|
# execute helm get manifest command
|
||||||
|
ssh_command = "helm get manifest {}".format(release_name)
|
||||||
|
if namespace:
|
||||||
|
ssh_command += " --namespace {}".format(namespace)
|
||||||
|
result = self._execute_command(ssh_command)
|
||||||
|
# convert manifest to text format
|
||||||
|
mf_content = ''.join(result)
|
||||||
|
return mf_content
|
||||||
|
|
||||||
|
def close_session(self):
|
||||||
|
self.commander.close_session()
|
|
@ -325,6 +325,35 @@ class Transformer(object):
|
||||||
self._init_k8s_obj(k8s_obj, file_content_dict, must_param)
|
self._init_k8s_obj(k8s_obj, file_content_dict, must_param)
|
||||||
return k8s_obj
|
return k8s_obj
|
||||||
|
|
||||||
|
def _get_k8s_obj_from_file_content_dict(self, file_content_dict,
|
||||||
|
namespace=None):
|
||||||
|
k8s_obj = {}
|
||||||
|
kind = file_content_dict.get('kind', '')
|
||||||
|
try:
|
||||||
|
k8s_obj['object'] = self._create_k8s_object(
|
||||||
|
kind, file_content_dict)
|
||||||
|
except Exception as e:
|
||||||
|
if isinstance(e, client.rest.ApiException):
|
||||||
|
msg = '{kind} create failure. Reason={reason}'.format(
|
||||||
|
kind=file_content_dict.get('kind', ''), reason=e.body)
|
||||||
|
else:
|
||||||
|
msg = '{kind} create failure. Reason={reason}'.format(
|
||||||
|
kind=file_content_dict.get('kind', ''), reason=e)
|
||||||
|
LOG.error(msg)
|
||||||
|
raise exceptions.InitApiFalse(error=msg)
|
||||||
|
if not file_content_dict.get('metadata', '') and not namespace:
|
||||||
|
k8s_obj['namespace'] = ''
|
||||||
|
elif file_content_dict.get('metadata', '').\
|
||||||
|
get('namespace', ''):
|
||||||
|
k8s_obj['namespace'] = \
|
||||||
|
file_content_dict.get('metadata', '').get(
|
||||||
|
'namespace', '')
|
||||||
|
elif namespace:
|
||||||
|
k8s_obj['namespace'] = namespace
|
||||||
|
else:
|
||||||
|
k8s_obj['namespace'] = ''
|
||||||
|
return k8s_obj
|
||||||
|
|
||||||
def get_k8s_objs_from_yaml(self, artifact_files, vnf_package_path):
|
def get_k8s_objs_from_yaml(self, artifact_files, vnf_package_path):
|
||||||
k8s_objs = []
|
k8s_objs = []
|
||||||
for artifact_file in artifact_files:
|
for artifact_file in artifact_files:
|
||||||
|
@ -339,33 +368,33 @@ class Transformer(object):
|
||||||
file_content = f.read()
|
file_content = f.read()
|
||||||
file_content_dicts = list(yaml.safe_load_all(file_content))
|
file_content_dicts = list(yaml.safe_load_all(file_content))
|
||||||
for file_content_dict in file_content_dicts:
|
for file_content_dict in file_content_dicts:
|
||||||
k8s_obj = {}
|
k8s_obj = self._get_k8s_obj_from_file_content_dict(
|
||||||
kind = file_content_dict.get('kind', '')
|
file_content_dict)
|
||||||
try:
|
k8s_objs.append(k8s_obj)
|
||||||
k8s_obj['object'] = self._create_k8s_object(
|
return k8s_objs
|
||||||
kind, file_content_dict)
|
|
||||||
except Exception as e:
|
def get_k8s_objs_from_manifest(self, mf_content, namespace=None):
|
||||||
if isinstance(e, client.rest.ApiException):
|
mkobj_kind_list = [
|
||||||
msg = \
|
"Pod",
|
||||||
_('{kind} create failure. Reason={reason}'.format(
|
"Service",
|
||||||
kind=file_content_dict.get('kind', ''),
|
"PersistentVolumeClaim",
|
||||||
reason=e.body))
|
"Namespace",
|
||||||
else:
|
"Node",
|
||||||
msg = \
|
"PersistentVolume",
|
||||||
_('{kind} create failure. Reason={reason}'.format(
|
"DaemonSet",
|
||||||
kind=file_content_dict.get('kind', ''),
|
"Deployment",
|
||||||
reason=e))
|
"ReplicaSet",
|
||||||
LOG.error(msg)
|
"StatefulSet",
|
||||||
raise exceptions.InitApiFalse(error=msg)
|
"Job"
|
||||||
if not file_content_dict.get('metadata', ''):
|
]
|
||||||
k8s_obj['namespace'] = ''
|
k8s_objs = []
|
||||||
elif file_content_dict.get('metadata', '').\
|
mf_content_dicts = list(yaml.safe_load_all(mf_content))
|
||||||
get('namespace', ''):
|
for mf_content_dict in mf_content_dicts:
|
||||||
k8s_obj['namespace'] = \
|
kind = mf_content_dict.get('kind', '')
|
||||||
file_content_dict.get('metadata', '').get(
|
if kind in mkobj_kind_list:
|
||||||
'namespace', '')
|
k8s_obj = self._get_k8s_obj_from_file_content_dict(
|
||||||
else:
|
file_content_dict=mf_content_dict,
|
||||||
k8s_obj['namespace'] = ''
|
namespace=namespace)
|
||||||
k8s_objs.append(k8s_obj)
|
k8s_objs.append(k8s_obj)
|
||||||
return k8s_objs
|
return k8s_objs
|
||||||
|
|
||||||
|
|
|
@ -31,6 +31,7 @@ from tacker.common.container import kubernetes_utils
|
||||||
from tacker.common import exceptions
|
from tacker.common import exceptions
|
||||||
from tacker.common import log
|
from tacker.common import log
|
||||||
from tacker.common import utils
|
from tacker.common import utils
|
||||||
|
from tacker.extensions import common_services as cs
|
||||||
from tacker.extensions import vnfm
|
from tacker.extensions import vnfm
|
||||||
from tacker import objects
|
from tacker import objects
|
||||||
from tacker.objects.fields import ErrorPoint as EP
|
from tacker.objects.fields import ErrorPoint as EP
|
||||||
|
@ -39,6 +40,7 @@ from tacker.objects import vnf_package_vnfd as vnfd_obj
|
||||||
from tacker.objects import vnf_resources as vnf_resource_obj
|
from tacker.objects import vnf_resources as vnf_resource_obj
|
||||||
from tacker.vnflcm import utils as vnflcm_utils
|
from tacker.vnflcm import utils as vnflcm_utils
|
||||||
from tacker.vnfm.infra_drivers import abstract_driver
|
from tacker.vnfm.infra_drivers import abstract_driver
|
||||||
|
from tacker.vnfm.infra_drivers.kubernetes.helm import helm_client
|
||||||
from tacker.vnfm.infra_drivers.kubernetes.k8s import translate_outputs
|
from tacker.vnfm.infra_drivers.kubernetes.k8s import translate_outputs
|
||||||
from tacker.vnfm.infra_drivers.kubernetes import translate_template
|
from tacker.vnfm.infra_drivers.kubernetes import translate_template
|
||||||
from tacker.vnfm.infra_drivers import scale_driver
|
from tacker.vnfm.infra_drivers import scale_driver
|
||||||
|
@ -71,6 +73,8 @@ def config_opts():
|
||||||
SCALING_POLICY = 'tosca.policies.tacker.Scaling'
|
SCALING_POLICY = 'tosca.policies.tacker.Scaling'
|
||||||
COMMA_CHARACTER = ','
|
COMMA_CHARACTER = ','
|
||||||
|
|
||||||
|
HELM_CHART_DIR_BASE = "/var/tacker/helm"
|
||||||
|
|
||||||
|
|
||||||
def get_scaling_policy_name(action, policy_name):
|
def get_scaling_policy_name(action, policy_name):
|
||||||
return '%s_scale_%s' % (policy_name, action)
|
return '%s_scale_%s' % (policy_name, action)
|
||||||
|
@ -804,6 +808,37 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
|
||||||
LOG.debug(e)
|
LOG.debug(e)
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def _get_helm_info(self, vim_connection_info):
|
||||||
|
# replace single quote to double quote
|
||||||
|
helm_info = vim_connection_info.extra.get('helm_info')
|
||||||
|
helm_info_dq = helm_info.replace("'", '"')
|
||||||
|
helm_info_dict = jsonutils.loads(helm_info_dq)
|
||||||
|
return helm_info_dict
|
||||||
|
|
||||||
|
def _helm_uninstall(self, context, vnf_instance):
|
||||||
|
inst_vnf_info = vnf_instance.instantiated_vnf_info
|
||||||
|
additional_params = inst_vnf_info.additional_params
|
||||||
|
namespace = additional_params.get('namespace', '')
|
||||||
|
helm_inst_param_list = additional_params.get(
|
||||||
|
'using_helm_install_param')
|
||||||
|
vim_info = vnflcm_utils._get_vim(context,
|
||||||
|
vnf_instance.vim_connection_info)
|
||||||
|
vim_connection_info = objects.VimConnectionInfo.obj_from_primitive(
|
||||||
|
vim_info, context)
|
||||||
|
helm_info = self._get_helm_info(vim_connection_info)
|
||||||
|
ip_list = helm_info.get('masternode_ip')
|
||||||
|
username = helm_info.get('masternode_username')
|
||||||
|
password = helm_info.get('masternode_password')
|
||||||
|
k8s_objs = []
|
||||||
|
# initialize HelmClient
|
||||||
|
helmclient = helm_client.HelmClient(ip_list[0], username, password)
|
||||||
|
for helm_inst_params in helm_inst_param_list:
|
||||||
|
release_name = helm_inst_params.get('helmreleasename')
|
||||||
|
# execute `helm uninstall` command
|
||||||
|
helmclient.uninstall(release_name, namespace)
|
||||||
|
helmclient.close_session()
|
||||||
|
return k8s_objs
|
||||||
|
|
||||||
@log.log
|
@log.log
|
||||||
def delete(self, plugin, context, vnf_id, auth_attr, region_name=None,
|
def delete(self, plugin, context, vnf_id, auth_attr, region_name=None,
|
||||||
vnf_instance=None, terminate_vnf_req=None):
|
vnf_instance=None, terminate_vnf_req=None):
|
||||||
|
@ -814,6 +849,11 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
|
||||||
# execute legacy delete method
|
# execute legacy delete method
|
||||||
self._delete_legacy(vnf_id, auth_cred)
|
self._delete_legacy(vnf_id, auth_cred)
|
||||||
else:
|
else:
|
||||||
|
# check use_helm flag
|
||||||
|
inst_vnf_info = vnf_instance.instantiated_vnf_info
|
||||||
|
if self._is_use_helm_flag(inst_vnf_info.additional_params):
|
||||||
|
self._helm_uninstall(context, vnf_instance)
|
||||||
|
return
|
||||||
# initialize Kubernetes APIs
|
# initialize Kubernetes APIs
|
||||||
k8s_client_dict = self.kubernetes.\
|
k8s_client_dict = self.kubernetes.\
|
||||||
get_k8s_client_dict(auth=auth_cred)
|
get_k8s_client_dict(auth=auth_cred)
|
||||||
|
@ -962,6 +1002,35 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
|
||||||
|
|
||||||
return response
|
return response
|
||||||
|
|
||||||
|
def _post_helm_uninstall(self, context, vnf_instance):
|
||||||
|
inst_vnf_info = vnf_instance.instantiated_vnf_info
|
||||||
|
additional_params = inst_vnf_info.additional_params
|
||||||
|
helm_inst_param_list = additional_params.get(
|
||||||
|
'using_helm_install_param')
|
||||||
|
vim_info = vnflcm_utils._get_vim(context,
|
||||||
|
vnf_instance.vim_connection_info)
|
||||||
|
vim_connection_info = objects.VimConnectionInfo.obj_from_primitive(
|
||||||
|
vim_info, context)
|
||||||
|
helm_info = self._get_helm_info(vim_connection_info)
|
||||||
|
ip_list = helm_info.get('masternode_ip')
|
||||||
|
username = helm_info.get('masternode_username')
|
||||||
|
password = helm_info.get('masternode_password')
|
||||||
|
del_dir = os.path.join(HELM_CHART_DIR_BASE, vnf_instance.id)
|
||||||
|
for ip in ip_list:
|
||||||
|
local_helm_del_flag = False
|
||||||
|
# initialize HelmClient
|
||||||
|
helmclient = helm_client.HelmClient(ip, username, password)
|
||||||
|
for inst_params in helm_inst_param_list:
|
||||||
|
if self._is_exthelmchart(inst_params):
|
||||||
|
repo_name = inst_params.get('helmrepositoryname')
|
||||||
|
# execute `helm repo add` command
|
||||||
|
helmclient.remove_repository(repo_name)
|
||||||
|
else:
|
||||||
|
local_helm_del_flag = True
|
||||||
|
if local_helm_del_flag:
|
||||||
|
helmclient.delete_helmchart(del_dir)
|
||||||
|
helmclient.close_session()
|
||||||
|
|
||||||
@log.log
|
@log.log
|
||||||
def delete_wait(self, plugin, context, vnf_id, auth_attr,
|
def delete_wait(self, plugin, context, vnf_id, auth_attr,
|
||||||
region_name=None, vnf_instance=None):
|
region_name=None, vnf_instance=None):
|
||||||
|
@ -1001,6 +1070,8 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
|
||||||
kind = vnf_resource.resource_type.\
|
kind = vnf_resource.resource_type.\
|
||||||
split(COMMA_CHARACTER)[1]
|
split(COMMA_CHARACTER)[1]
|
||||||
|
|
||||||
|
if not k8s_client_dict.get(api_version):
|
||||||
|
continue
|
||||||
try:
|
try:
|
||||||
self._select_k8s_obj_read_api(
|
self._select_k8s_obj_read_api(
|
||||||
k8s_client_dict=k8s_client_dict,
|
k8s_client_dict=k8s_client_dict,
|
||||||
|
@ -1019,6 +1090,11 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
|
||||||
time.sleep(self.STACK_RETRY_WAIT)
|
time.sleep(self.STACK_RETRY_WAIT)
|
||||||
else:
|
else:
|
||||||
keep_going = False
|
keep_going = False
|
||||||
|
|
||||||
|
# check use_helm flag
|
||||||
|
inst_vnf_info = vnf_instance.instantiated_vnf_info
|
||||||
|
if self._is_use_helm_flag(inst_vnf_info.additional_params):
|
||||||
|
self._post_helm_uninstall(context, vnf_instance)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
LOG.error('Deleting wait VNF got an error due to %s', e)
|
LOG.error('Deleting wait VNF got an error due to %s', e)
|
||||||
raise
|
raise
|
||||||
|
@ -1138,6 +1214,7 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
|
||||||
vdu_defs = policy['vdu_defs']
|
vdu_defs = policy['vdu_defs']
|
||||||
is_found = False
|
is_found = False
|
||||||
error_reason = None
|
error_reason = None
|
||||||
|
target_kinds = ["Deployment", "ReplicaSet", "StatefulSet"]
|
||||||
for vnf_resource in vnf_resources:
|
for vnf_resource in vnf_resources:
|
||||||
# The resource that matches the following is the resource
|
# The resource that matches the following is the resource
|
||||||
# to be scaled:
|
# to be scaled:
|
||||||
|
@ -1154,8 +1231,9 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
|
||||||
split(COMMA_CHARACTER)[0]
|
split(COMMA_CHARACTER)[0]
|
||||||
kind = vnf_resource.resource_type.\
|
kind = vnf_resource.resource_type.\
|
||||||
split(COMMA_CHARACTER)[1]
|
split(COMMA_CHARACTER)[1]
|
||||||
is_found = True
|
if kind in target_kinds:
|
||||||
break
|
is_found = True
|
||||||
|
break
|
||||||
if is_found:
|
if is_found:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
|
@ -1165,13 +1243,6 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
|
||||||
aspect_id=aspect_id)
|
aspect_id=aspect_id)
|
||||||
raise vnfm.CNFScaleFailed(reason=error_reason)
|
raise vnfm.CNFScaleFailed(reason=error_reason)
|
||||||
|
|
||||||
target_kinds = ["Deployment", "ReplicaSet", "StatefulSet"]
|
|
||||||
if kind not in target_kinds:
|
|
||||||
error_reason = _(
|
|
||||||
"Target kind {kind} is out of scale target").\
|
|
||||||
format(kind=kind)
|
|
||||||
raise vnfm.CNFScaleFailed(reason=error_reason)
|
|
||||||
|
|
||||||
scale_info = self._call_read_scale_api(
|
scale_info = self._call_read_scale_api(
|
||||||
app_v1_api_client=app_v1_api_client,
|
app_v1_api_client=app_v1_api_client,
|
||||||
namespace=namespace,
|
namespace=namespace,
|
||||||
|
@ -1304,6 +1375,7 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
|
||||||
vdu_defs = policy['vdu_defs']
|
vdu_defs = policy['vdu_defs']
|
||||||
is_found = False
|
is_found = False
|
||||||
error_reason = None
|
error_reason = None
|
||||||
|
target_kinds = ["Deployment", "ReplicaSet", "StatefulSet"]
|
||||||
for vnf_resource in vnf_resources:
|
for vnf_resource in vnf_resources:
|
||||||
name = vnf_resource.resource_name.\
|
name = vnf_resource.resource_name.\
|
||||||
split(COMMA_CHARACTER)[1]
|
split(COMMA_CHARACTER)[1]
|
||||||
|
@ -1314,8 +1386,9 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
|
||||||
split(COMMA_CHARACTER)[0]
|
split(COMMA_CHARACTER)[0]
|
||||||
kind = vnf_resource.resource_type.\
|
kind = vnf_resource.resource_type.\
|
||||||
split(COMMA_CHARACTER)[1]
|
split(COMMA_CHARACTER)[1]
|
||||||
is_found = True
|
if kind in target_kinds:
|
||||||
break
|
is_found = True
|
||||||
|
break
|
||||||
if is_found:
|
if is_found:
|
||||||
break
|
break
|
||||||
else:
|
else:
|
||||||
|
@ -1407,6 +1480,70 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
|
||||||
def heal_vdu(self, plugin, context, vnf_dict, heal_request_data):
|
def heal_vdu(self, plugin, context, vnf_dict, heal_request_data):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def _is_use_helm_flag(self, additional_params):
|
||||||
|
if not additional_params:
|
||||||
|
return False
|
||||||
|
use_helm = additional_params.get('use_helm')
|
||||||
|
if type(use_helm) == str:
|
||||||
|
return use_helm.lower() == 'true'
|
||||||
|
return bool(use_helm)
|
||||||
|
|
||||||
|
def _is_exthelmchart(self, helm_install_params):
|
||||||
|
exthelmchart = helm_install_params.get('exthelmchart')
|
||||||
|
if type(exthelmchart) == str:
|
||||||
|
return exthelmchart.lower() == 'true'
|
||||||
|
return bool(exthelmchart)
|
||||||
|
|
||||||
|
def _pre_helm_install(self, vim_connection_info,
|
||||||
|
instantiate_vnf_req, vnf_package_path):
|
||||||
|
def _check_param_exists(params_dict, check_param):
|
||||||
|
if check_param not in params_dict.keys():
|
||||||
|
LOG.error("{check_param} is not found".format(
|
||||||
|
check_param=check_param))
|
||||||
|
raise cs.InputValuesMissing(key=check_param)
|
||||||
|
|
||||||
|
# check helm info in vim_connection_info
|
||||||
|
if 'helm_info' not in vim_connection_info.extra.keys():
|
||||||
|
reason = "helm_info is missing in vim_connection_info.extra."
|
||||||
|
LOG.error(reason)
|
||||||
|
raise vnfm.InvalidVimConnectionInfo(reason=reason)
|
||||||
|
helm_info = self._get_helm_info(vim_connection_info)
|
||||||
|
ip_list = helm_info.get('masternode_ip', [])
|
||||||
|
username = helm_info.get('masternode_username', '')
|
||||||
|
password = helm_info.get('masternode_username', '')
|
||||||
|
if not (ip_list and username and password):
|
||||||
|
reason = "content of helm_info is invalid."
|
||||||
|
LOG.error(reason)
|
||||||
|
raise vnfm.InvalidVimConnectionInfo(reason=reason)
|
||||||
|
|
||||||
|
# check helm install params
|
||||||
|
additional_params = instantiate_vnf_req.additional_params
|
||||||
|
_check_param_exists(additional_params, 'using_helm_install_param')
|
||||||
|
helm_install_param_list = additional_params.get(
|
||||||
|
'using_helm_install_param', [])
|
||||||
|
if not helm_install_param_list:
|
||||||
|
LOG.error("using_helm_install_param is empty.")
|
||||||
|
raise cs.InputValuesMissing(key='using_helm_install_param')
|
||||||
|
for helm_install_params in helm_install_param_list:
|
||||||
|
# common parameter check
|
||||||
|
_check_param_exists(helm_install_params, 'exthelmchart')
|
||||||
|
_check_param_exists(helm_install_params, 'helmreleasename')
|
||||||
|
if self._is_exthelmchart(helm_install_params):
|
||||||
|
# parameter check (case: external helm chart)
|
||||||
|
_check_param_exists(helm_install_params, 'helmchartname')
|
||||||
|
_check_param_exists(helm_install_params, 'exthelmrepo_url')
|
||||||
|
_check_param_exists(helm_install_params, 'helmrepositoryname')
|
||||||
|
else:
|
||||||
|
# parameter check (case: local helm chart)
|
||||||
|
_check_param_exists(helm_install_params, 'helmchartfile_path')
|
||||||
|
chartfile_path = helm_install_params.get('helmchartfile_path')
|
||||||
|
abs_helm_chart_path = os.path.join(
|
||||||
|
vnf_package_path, chartfile_path)
|
||||||
|
if not os.path.exists(abs_helm_chart_path):
|
||||||
|
LOG.error('Helm chart file {path} is not found.'.format(
|
||||||
|
path=chartfile_path))
|
||||||
|
raise vnfm.CnfDefinitionNotFound(path=chartfile_path)
|
||||||
|
|
||||||
def _get_target_k8s_files(self, instantiate_vnf_req):
|
def _get_target_k8s_files(self, instantiate_vnf_req):
|
||||||
if instantiate_vnf_req.additional_params and\
|
if instantiate_vnf_req.additional_params and\
|
||||||
CNF_TARGET_FILES_KEY in\
|
CNF_TARGET_FILES_KEY in\
|
||||||
|
@ -1417,9 +1554,39 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
|
||||||
target_k8s_files = list()
|
target_k8s_files = list()
|
||||||
return target_k8s_files
|
return target_k8s_files
|
||||||
|
|
||||||
|
def _create_vnf_resource(self, context, vnf_instance, file_content_dict,
|
||||||
|
namespace=None):
|
||||||
|
vnf_resource = vnf_resource_obj.VnfResource(
|
||||||
|
context=context)
|
||||||
|
vnf_resource.vnf_instance_id = vnf_instance.id
|
||||||
|
metadata = file_content_dict.get('metadata', {})
|
||||||
|
if metadata and metadata.get('namespace', ''):
|
||||||
|
namespace = metadata.get('namespace', '')
|
||||||
|
elif namespace:
|
||||||
|
namespace = namespace
|
||||||
|
else:
|
||||||
|
namespace = ''
|
||||||
|
vnf_resource.resource_name = ','.join([
|
||||||
|
namespace, metadata.get('name', '')])
|
||||||
|
vnf_resource.resource_type = ','.join([
|
||||||
|
file_content_dict.get('apiVersion', ''),
|
||||||
|
file_content_dict.get('kind', '')])
|
||||||
|
vnf_resource.resource_identifier = ''
|
||||||
|
vnf_resource.resource_status = ''
|
||||||
|
return vnf_resource
|
||||||
|
|
||||||
def pre_instantiation_vnf(self, context, vnf_instance,
|
def pre_instantiation_vnf(self, context, vnf_instance,
|
||||||
vim_connection_info, vnf_software_images,
|
vim_connection_info, vnf_software_images,
|
||||||
instantiate_vnf_req, vnf_package_path):
|
instantiate_vnf_req, vnf_package_path):
|
||||||
|
# check use_helm flag
|
||||||
|
if self._is_use_helm_flag(instantiate_vnf_req.additional_params):
|
||||||
|
# parameter check
|
||||||
|
self._pre_helm_install(
|
||||||
|
vim_connection_info, instantiate_vnf_req, vnf_package_path)
|
||||||
|
# NOTE: In case of using helm, vnf_resources is created
|
||||||
|
# after `helm install` command is executed.
|
||||||
|
return {}
|
||||||
|
|
||||||
vnf_resources = dict()
|
vnf_resources = dict()
|
||||||
target_k8s_files = self._get_target_k8s_files(instantiate_vnf_req)
|
target_k8s_files = self._get_target_k8s_files(instantiate_vnf_req)
|
||||||
if not target_k8s_files:
|
if not target_k8s_files:
|
||||||
|
@ -1470,19 +1637,8 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
|
||||||
file_content_dict_list = yaml.safe_load_all(file_content)
|
file_content_dict_list = yaml.safe_load_all(file_content)
|
||||||
vnf_resources_temp = []
|
vnf_resources_temp = []
|
||||||
for file_content_dict in file_content_dict_list:
|
for file_content_dict in file_content_dict_list:
|
||||||
vnf_resource = vnf_resource_obj.VnfResource(
|
vnf_resource = self._create_vnf_resource(
|
||||||
context=context)
|
context, vnf_instance, file_content_dict)
|
||||||
vnf_resource.vnf_instance_id = vnf_instance.id
|
|
||||||
vnf_resource.resource_name = ','.join([
|
|
||||||
file_content_dict.get('metadata', {}).get(
|
|
||||||
'namespace', ''),
|
|
||||||
file_content_dict.get('metadata', {}).get(
|
|
||||||
'name', '')])
|
|
||||||
vnf_resource.resource_type = ','.join([
|
|
||||||
file_content_dict.get('apiVersion', ''),
|
|
||||||
file_content_dict.get('kind', '')])
|
|
||||||
vnf_resource.resource_identifier = ''
|
|
||||||
vnf_resource.resource_status = ''
|
|
||||||
vnf_resources_temp.append(vnf_resource)
|
vnf_resources_temp.append(vnf_resource)
|
||||||
vnf_resources[target_k8s_index] = vnf_resources_temp
|
vnf_resources[target_k8s_index] = vnf_resources_temp
|
||||||
return vnf_resources
|
return vnf_resources
|
||||||
|
@ -1491,13 +1647,76 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
|
||||||
vim_connection_info, vnf_resource):
|
vim_connection_info, vnf_resource):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def _helm_install(self, context, vnf_instance, vim_connection_info,
|
||||||
|
instantiate_vnf_req, vnf_package_path, transformer):
|
||||||
|
additional_params = instantiate_vnf_req.additional_params
|
||||||
|
namespace = additional_params.get('namespace', '')
|
||||||
|
helm_inst_param_list = additional_params.get(
|
||||||
|
'using_helm_install_param')
|
||||||
|
helm_info = self._get_helm_info(vim_connection_info)
|
||||||
|
ip_list = helm_info.get('masternode_ip')
|
||||||
|
username = helm_info.get('masternode_username')
|
||||||
|
password = helm_info.get('masternode_password')
|
||||||
|
vnf_resources = []
|
||||||
|
k8s_objs = []
|
||||||
|
for ip_idx, ip in enumerate(ip_list):
|
||||||
|
# initialize HelmClient
|
||||||
|
helmclient = helm_client.HelmClient(ip, username, password)
|
||||||
|
for inst_params in helm_inst_param_list:
|
||||||
|
release_name = inst_params.get('helmreleasename')
|
||||||
|
parameters = inst_params.get('helmparameter')
|
||||||
|
if self._is_exthelmchart(inst_params):
|
||||||
|
# prepare using external helm chart
|
||||||
|
chart_name = inst_params.get('helmchartname')
|
||||||
|
repo_url = inst_params.get('exthelmrepo_url')
|
||||||
|
repo_name = inst_params.get('helmrepositoryname')
|
||||||
|
# execute `helm repo add` command
|
||||||
|
helmclient.add_repository(repo_name, repo_url)
|
||||||
|
install_chart_name = '/'.join([repo_name, chart_name])
|
||||||
|
else:
|
||||||
|
# prepare using local helm chart
|
||||||
|
chartfile_path = inst_params.get('helmchartfile_path')
|
||||||
|
src_path = os.path.join(vnf_package_path, chartfile_path)
|
||||||
|
dst_dir = os.path.join(
|
||||||
|
HELM_CHART_DIR_BASE, vnf_instance.id)
|
||||||
|
# put helm chart file to Kubernetes controller node
|
||||||
|
helmclient.put_helmchart(src_path, dst_dir)
|
||||||
|
chart_file_name = src_path[src_path.rfind(os.sep) + 1:]
|
||||||
|
chart_name = "-".join(chart_file_name.split("-")[:-1])
|
||||||
|
install_chart_name = os.path.join(dst_dir, chart_name)
|
||||||
|
if ip_idx == 0:
|
||||||
|
# execute `helm install` command
|
||||||
|
helmclient.install(release_name, install_chart_name,
|
||||||
|
namespace, parameters)
|
||||||
|
# get manifest by using `helm get manifest` command
|
||||||
|
mf_content = helmclient.get_manifest(
|
||||||
|
release_name, namespace)
|
||||||
|
k8s_objs_tmp = transformer.get_k8s_objs_from_manifest(
|
||||||
|
mf_content, namespace)
|
||||||
|
for k8s_obj in k8s_objs_tmp:
|
||||||
|
# set status in k8s_obj to 'Creating'
|
||||||
|
k8s_obj['status'] = 'Creating'
|
||||||
|
k8s_objs.extend(k8s_objs_tmp)
|
||||||
|
mf_content_dicts = list(yaml.safe_load_all(mf_content))
|
||||||
|
for mf_content_dict in mf_content_dicts:
|
||||||
|
vnf_resource = self._create_vnf_resource(
|
||||||
|
context, vnf_instance, mf_content_dict, namespace)
|
||||||
|
vnf_resources.append(vnf_resource)
|
||||||
|
helmclient.close_session()
|
||||||
|
# save the vnf resources in the db
|
||||||
|
for vnf_resource in vnf_resources:
|
||||||
|
vnf_resource.create()
|
||||||
|
return k8s_objs
|
||||||
|
|
||||||
def instantiate_vnf(self, context, vnf_instance, vnfd_dict,
|
def instantiate_vnf(self, context, vnf_instance, vnfd_dict,
|
||||||
vim_connection_info, instantiate_vnf_req,
|
vim_connection_info, instantiate_vnf_req,
|
||||||
grant_response, vnf_package_path,
|
grant_response, vnf_package_path,
|
||||||
plugin=None):
|
plugin=None):
|
||||||
target_k8s_files = self._get_target_k8s_files(instantiate_vnf_req)
|
target_k8s_files = self._get_target_k8s_files(instantiate_vnf_req)
|
||||||
auth_attr = vim_connection_info.access_info
|
auth_attr = vim_connection_info.access_info
|
||||||
if not target_k8s_files:
|
use_helm_flag = self._is_use_helm_flag(
|
||||||
|
instantiate_vnf_req.additional_params)
|
||||||
|
if not target_k8s_files and not use_helm_flag:
|
||||||
# The case is based on TOSCA for CNF operation.
|
# The case is based on TOSCA for CNF operation.
|
||||||
# It is out of the scope of this patch.
|
# It is out of the scope of this patch.
|
||||||
instance_id = self.create(
|
instance_id = self.create(
|
||||||
|
@ -1509,9 +1728,14 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
|
||||||
transformer = translate_outputs.Transformer(
|
transformer = translate_outputs.Transformer(
|
||||||
None, None, None, k8s_client_dict)
|
None, None, None, k8s_client_dict)
|
||||||
deployment_dict_list = list()
|
deployment_dict_list = list()
|
||||||
k8s_objs = transformer.\
|
if use_helm_flag:
|
||||||
get_k8s_objs_from_yaml(target_k8s_files, vnf_package_path)
|
k8s_objs = self._helm_install(
|
||||||
k8s_objs = transformer.deploy_k8s(k8s_objs)
|
context, vnf_instance, vim_connection_info,
|
||||||
|
instantiate_vnf_req, vnf_package_path, transformer)
|
||||||
|
else:
|
||||||
|
k8s_objs = transformer.\
|
||||||
|
get_k8s_objs_from_yaml(target_k8s_files, vnf_package_path)
|
||||||
|
k8s_objs = transformer.deploy_k8s(k8s_objs)
|
||||||
vnfd_dict['current_error_point'] = EP.POST_VIM_CONTROL
|
vnfd_dict['current_error_point'] = EP.POST_VIM_CONTROL
|
||||||
k8s_objs = self.create_wait_k8s(
|
k8s_objs = self.create_wait_k8s(
|
||||||
k8s_objs, k8s_client_dict, vnf_instance)
|
k8s_objs, k8s_client_dict, vnf_instance)
|
||||||
|
@ -1536,6 +1760,29 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
|
||||||
vnfd_dict['instance_id'] = resource_info_str
|
vnfd_dict['instance_id'] = resource_info_str
|
||||||
return resource_info_str
|
return resource_info_str
|
||||||
|
|
||||||
|
def _post_helm_install(self, context, vim_connection_info,
|
||||||
|
instantiate_vnf_req, transformer):
|
||||||
|
additional_params = instantiate_vnf_req.additional_params
|
||||||
|
namespace = additional_params.get('namespace', '')
|
||||||
|
helm_inst_param_list = additional_params.get(
|
||||||
|
'using_helm_install_param')
|
||||||
|
helm_info = self._get_helm_info(vim_connection_info)
|
||||||
|
ip_list = helm_info.get('masternode_ip')
|
||||||
|
username = helm_info.get('masternode_username')
|
||||||
|
password = helm_info.get('masternode_password')
|
||||||
|
k8s_objs = []
|
||||||
|
# initialize HelmClient
|
||||||
|
helmclient = helm_client.HelmClient(ip_list[0], username, password)
|
||||||
|
for helm_inst_params in helm_inst_param_list:
|
||||||
|
release_name = helm_inst_params.get('helmreleasename')
|
||||||
|
# get manifest by using `helm get manifest` command
|
||||||
|
mf_content = helmclient.get_manifest(release_name, namespace)
|
||||||
|
k8s_objs_tmp = transformer.get_k8s_objs_from_manifest(
|
||||||
|
mf_content, namespace)
|
||||||
|
k8s_objs.extend(k8s_objs_tmp)
|
||||||
|
helmclient.close_session()
|
||||||
|
return k8s_objs
|
||||||
|
|
||||||
def post_vnf_instantiation(self, context, vnf_instance,
|
def post_vnf_instantiation(self, context, vnf_instance,
|
||||||
vim_connection_info, instantiate_vnf_req):
|
vim_connection_info, instantiate_vnf_req):
|
||||||
"""Initially store VnfcResourceInfo after instantiation
|
"""Initially store VnfcResourceInfo after instantiation
|
||||||
|
@ -1554,9 +1801,13 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
|
||||||
# initialize Transformer
|
# initialize Transformer
|
||||||
transformer = translate_outputs.Transformer(
|
transformer = translate_outputs.Transformer(
|
||||||
None, None, None, None)
|
None, None, None, None)
|
||||||
# get Kubernetes object
|
if self._is_use_helm_flag(instantiate_vnf_req.additional_params):
|
||||||
k8s_objs = transformer.get_k8s_objs_from_yaml(
|
k8s_objs = self._post_helm_install(context,
|
||||||
target_k8s_files, vnf_package_path)
|
vim_connection_info, instantiate_vnf_req, transformer)
|
||||||
|
else:
|
||||||
|
# get Kubernetes object
|
||||||
|
k8s_objs = transformer.get_k8s_objs_from_yaml(
|
||||||
|
target_k8s_files, vnf_package_path)
|
||||||
# get TOSCA node templates
|
# get TOSCA node templates
|
||||||
vnfd_dict = vnflcm_utils._get_vnfd_dict(
|
vnfd_dict = vnflcm_utils._get_vnfd_dict(
|
||||||
context, vnf_instance.vnfd_id,
|
context, vnf_instance.vnfd_id,
|
||||||
|
@ -2094,6 +2345,7 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
|
||||||
aspect_id=scale_vnf_request.aspect_id,
|
aspect_id=scale_vnf_request.aspect_id,
|
||||||
tosca=tosca)
|
tosca=tosca)
|
||||||
is_found = False
|
is_found = False
|
||||||
|
target_kinds = ["Deployment", "ReplicaSet", "StatefulSet"]
|
||||||
for vnf_resource in vnf_resources:
|
for vnf_resource in vnf_resources:
|
||||||
# For CNF operations, Kubernetes resource information is
|
# For CNF operations, Kubernetes resource information is
|
||||||
# stored in vnfc_resource as follows:
|
# stored in vnfc_resource as follows:
|
||||||
|
@ -2103,11 +2355,12 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
|
||||||
for vdu_id, vdu_def in vdu_defs.items():
|
for vdu_id, vdu_def in vdu_defs.items():
|
||||||
vdu_properties = vdu_def.get('properties')
|
vdu_properties = vdu_def.get('properties')
|
||||||
if rsc_name == vdu_properties.get('name'):
|
if rsc_name == vdu_properties.get('name'):
|
||||||
is_found = True
|
|
||||||
namespace = vnf_resource.resource_name.split(',')[0]
|
namespace = vnf_resource.resource_name.split(',')[0]
|
||||||
rsc_kind = vnf_resource.resource_type.split(',')[1]
|
rsc_kind = vnf_resource.resource_type.split(',')[1]
|
||||||
target_vdu_id = vdu_id
|
target_vdu_id = vdu_id
|
||||||
break
|
if rsc_kind in target_kinds:
|
||||||
|
is_found = True
|
||||||
|
break
|
||||||
if is_found:
|
if is_found:
|
||||||
break
|
break
|
||||||
# extract stored Pod names by vdu_id
|
# extract stored Pod names by vdu_id
|
||||||
|
|
|
@ -65,7 +65,8 @@ class VimClient(object):
|
||||||
'vim_name': vim_info.get('name', vim_info['id']),
|
'vim_name': vim_info.get('name', vim_info['id']),
|
||||||
'vim_type': vim_info['type'],
|
'vim_type': vim_info['type'],
|
||||||
'tenant': vim_info['tenant_id'],
|
'tenant': vim_info['tenant_id'],
|
||||||
'placement_attr': vim_info.get('placement_attr', {})}
|
'placement_attr': vim_info.get('placement_attr', {}),
|
||||||
|
'extra': vim_info.get('extra', {})}
|
||||||
return vim_res
|
return vim_res
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
|
|
Loading…
Reference in New Issue