Support Helm chart as interface for Kubernetes VIM
Implements new interface for Kubernetes VIM to handle Helm chart. It enables Users to include Helm chart files as MCIOP in their VNF Packages, to instantiate and to terminate CNF with them. And update sample of MgmtDriver to install and configure Helm package for using Helm cli command in the deployed Kubernetes cluster VNF, and to restore the registered helm repositories and charts after the master node is healed. Implements: blueprint helmchart-k8s-vim Change-Id: I8511b103841d5aba7edcf9ec5bb974bfa3a74bb2
This commit is contained in:
parent
361465b877
commit
08ae05a27a
@ -0,0 +1,10 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Add new interface for Kubernetes VIM to handle Helm chart. It enables Users
|
||||
to include Helm chart files as MCIOP in their VNF Packages, to instantiate
|
||||
and to terminate CNF with them.
|
||||
And update sample of MgmtDriver to install and configure Helm package for
|
||||
using Helm cli command in the deployed Kubernetes cluster VNF, and to
|
||||
restore the registered helm repositories and charts after the master node is
|
||||
healed.
|
49
samples/mgmt_driver/install_helm.sh
Normal file
49
samples/mgmt_driver/install_helm.sh
Normal file
@ -0,0 +1,49 @@
|
||||
#!/bin/bash
|
||||
set -o xtrace
|
||||
|
||||
###############################################################################
|
||||
#
|
||||
# This script will install and setting Helm for Tacker.
|
||||
#
|
||||
###############################################################################
|
||||
|
||||
declare -g HELM_VERSION="3.5.4"
|
||||
declare -g HELM_CHART_DIR="/var/tacker/helm"
|
||||
|
||||
# Install Helm
|
||||
#-------------
|
||||
function install_helm {
|
||||
wget -P /tmp https://get.helm.sh/helm-v$HELM_VERSION-linux-amd64.tar.gz
|
||||
tar zxf /tmp/helm-v$HELM_VERSION-linux-amd64.tar.gz -C /tmp
|
||||
sudo mv /tmp/linux-amd64/helm /usr/local/bin/helm
|
||||
}
|
||||
|
||||
# Install sshpass
|
||||
#----------------
|
||||
function install_sshpass {
|
||||
sudo apt-get install -y sshpass
|
||||
}
|
||||
|
||||
# Create helm chart directory
|
||||
#----------------------------
|
||||
function create_helm_chart_dir {
|
||||
sudo mkdir -p $HELM_CHART_DIR
|
||||
}
|
||||
|
||||
# Set proxy to environment
|
||||
#-------------------------
|
||||
function set_env_proxy {
|
||||
cat <<EOF | sudo tee -a /etc/environment >/dev/null
|
||||
http_proxy=${http_proxy//%40/@}
|
||||
https_proxy=${https_proxy//%40/@}
|
||||
no_proxy=$no_proxy
|
||||
EOF
|
||||
}
|
||||
|
||||
# Main
|
||||
# ____
|
||||
install_helm
|
||||
install_sshpass
|
||||
create_helm_chart_dir
|
||||
set_env_proxy
|
||||
exit 0
|
@ -37,6 +37,10 @@ from tacker.vnfm.mgmt_drivers import vnflcm_abstract_driver
|
||||
LOG = logging.getLogger(__name__)
|
||||
K8S_CMD_TIMEOUT = 30
|
||||
K8S_INSTALL_TIMEOUT = 2700
|
||||
HELM_CMD_TIMEOUT = 30
|
||||
HELM_INSTALL_TIMEOUT = 300
|
||||
HELM_CHART_DIR = "/var/tacker/helm"
|
||||
HELM_CHART_CMP_PATH = "/tmp/tacker-helm.tgz"
|
||||
|
||||
|
||||
class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
@ -97,15 +101,22 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
stdout = result.get_stdout()
|
||||
LOG.debug(stdout)
|
||||
LOG.debug(err)
|
||||
elif type == 'certificate_key' or type == 'install':
|
||||
elif type in ('certificate_key', 'install', 'scp'):
|
||||
if result.get_return_code() != 0:
|
||||
err = result.get_stderr()
|
||||
LOG.error(err)
|
||||
raise exceptions.MgmtDriverRemoteCommandError(err_info=err)
|
||||
elif type == 'helm_repo_list':
|
||||
if result.get_return_code() != 0:
|
||||
err = result.get_stderr()[0].replace('\n', '')
|
||||
if err == 'Error: no repositories to show':
|
||||
return []
|
||||
raise exceptions.MgmtDriverRemoteCommandError(err_info=err)
|
||||
return result.get_stdout()
|
||||
|
||||
def _create_vim(self, context, vnf_instance, server, bearer_token,
|
||||
ssl_ca_cert, vim_name, project_name, master_vm_dict_list):
|
||||
ssl_ca_cert, vim_name, project_name, master_vm_dict_list,
|
||||
masternode_ip_list):
|
||||
# ha: create vim
|
||||
vim_info = {
|
||||
'vim': {
|
||||
@ -133,6 +144,16 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
register_ip, server)
|
||||
vim_info['vim']['auth_url'] = server
|
||||
del vim_info['vim']['auth_cred']['ssl_ca_cert']
|
||||
extra = {}
|
||||
if masternode_ip_list:
|
||||
username = master_vm_dict_list[0].get('ssh').get('username')
|
||||
password = master_vm_dict_list[0].get('ssh').get('password')
|
||||
helm_info = {
|
||||
'masternode_ip': masternode_ip_list,
|
||||
'masternode_username': username,
|
||||
'masternode_password': password}
|
||||
extra['helm_info'] = str(helm_info)
|
||||
vim_info['vim']['extra'] = extra
|
||||
try:
|
||||
nfvo_plugin = NfvoPlugin()
|
||||
created_vim_info = nfvo_plugin.create_vim(context, vim_info)
|
||||
@ -149,7 +170,7 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
}
|
||||
vim_connection_info = objects.VimConnectionInfo(
|
||||
id=id, vim_id=vim_id, vim_type=vim_type,
|
||||
access_info=access_info, interface_info=None
|
||||
access_info=access_info, interface_info=None, extra=extra
|
||||
)
|
||||
vim_connection_infos = vnf_instance.vim_connection_info
|
||||
vim_connection_infos.append(vim_connection_info)
|
||||
@ -304,7 +325,8 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
return hosts_str
|
||||
|
||||
def _init_commander_and_send_install_scripts(self, user, password, host,
|
||||
vnf_package_path=None, script_path=None):
|
||||
vnf_package_path=None, script_path=None,
|
||||
helm_inst_script_path=None):
|
||||
retry = 4
|
||||
while retry > 0:
|
||||
try:
|
||||
@ -320,6 +342,10 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
"../../../samples/mgmt_driver/"
|
||||
"create_admin_token.yaml"),
|
||||
"/tmp/create_admin_token.yaml")
|
||||
if helm_inst_script_path:
|
||||
sftp.put(os.path.join(
|
||||
vnf_package_path, helm_inst_script_path),
|
||||
"/tmp/install_helm.sh")
|
||||
connect.close()
|
||||
commander = cmd_executer.RemoteCommandExecutor(
|
||||
user=user, password=password, host=host,
|
||||
@ -377,9 +403,23 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
self._execute_command(
|
||||
commander, ssh_command, K8S_INSTALL_TIMEOUT, 'install', 0)
|
||||
|
||||
def _install_helm(self, commander, proxy):
|
||||
ssh_command = ""
|
||||
if proxy.get("http_proxy") and proxy.get("https_proxy"):
|
||||
ssh_command += ("export http_proxy={http_proxy}; "
|
||||
"export https_proxy={https_proxy}; "
|
||||
"export no_proxy={no_proxy}; ").format(
|
||||
http_proxy=proxy.get('http_proxy'),
|
||||
https_proxy=proxy.get('https_proxy'),
|
||||
no_proxy=proxy.get('no_proxy'))
|
||||
ssh_command += "bash /tmp/install_helm.sh;"
|
||||
self._execute_command(
|
||||
commander, ssh_command, HELM_INSTALL_TIMEOUT, 'install', 0)
|
||||
|
||||
def _install_k8s_cluster(self, context, vnf_instance,
|
||||
proxy, script_path,
|
||||
master_vm_dict_list, worker_vm_dict_list):
|
||||
master_vm_dict_list, worker_vm_dict_list,
|
||||
helm_inst_script_path):
|
||||
# instantiate: pre /etc/hosts
|
||||
hosts_str = self._get_hosts(
|
||||
master_vm_dict_list, worker_vm_dict_list)
|
||||
@ -399,6 +439,16 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
raise exceptions.MgmtDriverOtherError(
|
||||
error_message="The path of install script is invalid")
|
||||
|
||||
# check helm install and get helm install script_path
|
||||
masternode_ip_list = []
|
||||
if helm_inst_script_path:
|
||||
abs_helm_inst_script_path = os.path.join(
|
||||
vnf_package_path, helm_inst_script_path)
|
||||
if not os.path.exists(abs_helm_inst_script_path):
|
||||
LOG.error('The path of helm install script is invalid.')
|
||||
raise exceptions.MgmtDriverParamInvalid(
|
||||
param='helm_installation_script_path')
|
||||
|
||||
# set no proxy
|
||||
project_name = ''
|
||||
if proxy.get("http_proxy") and proxy.get("https_proxy"):
|
||||
@ -446,7 +496,7 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
k8s_cluster = vm_dict.get('k8s_cluster', {})
|
||||
commander = self._init_commander_and_send_install_scripts(
|
||||
user, password, host,
|
||||
vnf_package_path, script_path)
|
||||
vnf_package_path, script_path, helm_inst_script_path)
|
||||
|
||||
# set /etc/hosts for each node
|
||||
ssh_command = "> /tmp/tmp_hosts"
|
||||
@ -562,6 +612,9 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
bearer_token = self._execute_command(
|
||||
commander, ssh_command,
|
||||
K8S_CMD_TIMEOUT, 'common', 0)[0].replace('\n', '')
|
||||
if helm_inst_script_path:
|
||||
self._install_helm(commander, proxy)
|
||||
masternode_ip_list.append(host)
|
||||
commander.close_session()
|
||||
|
||||
# install worker node
|
||||
@ -597,7 +650,8 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
cluster_ip, kubeadm_token, ssl_ca_cert_hash)
|
||||
commander.close_session()
|
||||
|
||||
return server, bearer_token, ssl_ca_cert, project_name
|
||||
return (server, bearer_token, ssl_ca_cert, project_name,
|
||||
masternode_ip_list)
|
||||
|
||||
def _check_values(self, additional_param):
|
||||
for key, value in additional_param.items():
|
||||
@ -654,6 +708,8 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
master_node = additional_param.get('master_node', {})
|
||||
worker_node = additional_param.get('worker_node', {})
|
||||
proxy = additional_param.get('proxy', {})
|
||||
helm_inst_script_path = additional_param.get(
|
||||
'helm_installation_script_path', None)
|
||||
# check script_path
|
||||
if not script_path:
|
||||
LOG.error('The script_path in the '
|
||||
@ -695,15 +751,16 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
worker_vm_dict_list = self._get_install_info_for_k8s_node(
|
||||
nest_stack_id, worker_node,
|
||||
instantiate_vnf_request.additional_params, 'worker', access_info)
|
||||
server, bearer_token, ssl_ca_cert, project_name = \
|
||||
server, bearer_token, ssl_ca_cert, project_name, masternode_ip_list = \
|
||||
self._install_k8s_cluster(context, vnf_instance,
|
||||
proxy, script_path, master_vm_dict_list,
|
||||
worker_vm_dict_list)
|
||||
worker_vm_dict_list,
|
||||
helm_inst_script_path)
|
||||
|
||||
# register vim with kubernetes cluster info
|
||||
self._create_vim(context, vnf_instance, server,
|
||||
bearer_token, ssl_ca_cert, vim_name, project_name,
|
||||
master_vm_dict_list)
|
||||
master_vm_dict_list, masternode_ip_list)
|
||||
|
||||
def terminate_start(self, context, vnf_instance,
|
||||
terminate_vnf_request, grant,
|
||||
@ -1580,6 +1637,50 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
|
||||
return target_physical_resource_ids
|
||||
|
||||
def _prepare_for_restoring_helm(self, commander, master_ip):
|
||||
helm_info = {}
|
||||
# get helm repo list
|
||||
ssh_command = "helm repo list -o json"
|
||||
result = self._execute_command(
|
||||
commander, ssh_command, K8S_CMD_TIMEOUT, 'helm_repo_list', 0)
|
||||
if result:
|
||||
helmrepo_list = json.loads(result)
|
||||
helm_info['ext_helmrepo_list'] = helmrepo_list
|
||||
# compress local helm chart
|
||||
ssh_command = ("sudo tar -zcf {cmp_path} -P {helm_chart_dir}"
|
||||
.format(cmp_path=HELM_CHART_CMP_PATH,
|
||||
helm_chart_dir=HELM_CHART_DIR))
|
||||
self._execute_command(
|
||||
commander, ssh_command, HELM_INSTALL_TIMEOUT, 'common', 0)
|
||||
helm_info['local_repo_src_ip'] = master_ip
|
||||
|
||||
return helm_info
|
||||
|
||||
def _restore_helm_repo(self, commander, master_username, master_password,
|
||||
local_repo_src_ip, ext_repo_list):
|
||||
# restore local helm chart
|
||||
ssh_command = (
|
||||
"sudo sshpass -p {master_password} "
|
||||
"scp -o StrictHostKeyChecking=no "
|
||||
"{master_username}@{local_repo_src_ip}:{helm_chart_cmp_path} "
|
||||
"{helm_chart_cmp_path};").format(
|
||||
master_password=master_password,
|
||||
master_username=master_username,
|
||||
local_repo_src_ip=local_repo_src_ip,
|
||||
helm_chart_cmp_path=HELM_CHART_CMP_PATH
|
||||
)
|
||||
ssh_command += "sudo tar -Pzxf {helm_chart_cmp_path};".format(
|
||||
helm_chart_cmp_path=HELM_CHART_CMP_PATH)
|
||||
self._execute_command(
|
||||
commander, ssh_command, HELM_CMD_TIMEOUT, 'scp', 0)
|
||||
# restore external helm repository
|
||||
if ext_repo_list:
|
||||
for ext_repo in ext_repo_list:
|
||||
ssh_command += "helm repo add {name} {url};".format(
|
||||
name=ext_repo.get('name'), url=ext_repo.get('url'))
|
||||
self._execute_command(
|
||||
commander, ssh_command, HELM_CMD_TIMEOUT, 'common', 0)
|
||||
|
||||
def heal_start(self, context, vnf_instance,
|
||||
heal_vnf_request, grant,
|
||||
grant_request, **kwargs):
|
||||
@ -1629,7 +1730,7 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
fixed_master_infos, proxy,
|
||||
master_username, master_password, vnf_package_path,
|
||||
script_path, cluster_ip, pod_cidr, cluster_cidr,
|
||||
kubeadm_token, ssl_ca_cert_hash, ha_flag):
|
||||
kubeadm_token, ssl_ca_cert_hash, ha_flag, helm_info):
|
||||
not_fixed_master_nic_ips = [
|
||||
master_ips.get('master_nic_ip')
|
||||
for master_ips in not_fixed_master_infos.values()]
|
||||
@ -1656,7 +1757,8 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
commander = self._init_commander_and_send_install_scripts(
|
||||
master_username, master_password,
|
||||
fixed_master_info.get('master_ssh_ip'),
|
||||
vnf_package_path, script_path)
|
||||
vnf_package_path, script_path,
|
||||
helm_info.get('script_path', None))
|
||||
self._set_node_ip_in_hosts(
|
||||
commander, 'heal_end', hosts_str=hosts_str)
|
||||
if proxy.get('http_proxy') and proxy.get('https_proxy'):
|
||||
@ -1699,6 +1801,12 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
certificate_key=certificate_key)
|
||||
self._execute_command(
|
||||
commander, ssh_command, K8S_INSTALL_TIMEOUT, 'install', 0)
|
||||
if helm_info:
|
||||
self._install_helm(commander, proxy)
|
||||
self._restore_helm_repo(
|
||||
commander, master_username, master_password,
|
||||
helm_info.get('local_repo_src_ip'),
|
||||
helm_info.get('ext_helmrepo_list', ''))
|
||||
commander.close_session()
|
||||
for not_fixed_master_name, not_fixed_master in \
|
||||
not_fixed_master_infos.items():
|
||||
@ -1814,6 +1922,15 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
ssl_ca_cert_hash = self._execute_command(
|
||||
commander, ssh_command,
|
||||
K8S_CMD_TIMEOUT, 'common', 3)[0].replace('\n', '')
|
||||
|
||||
# prepare for restoring helm repository
|
||||
helm_inst_script_path = k8s_cluster_installation_param.get(
|
||||
'helm_installation_script_path', None)
|
||||
helm_info = {}
|
||||
if helm_inst_script_path:
|
||||
helm_info = self._prepare_for_restoring_helm(commander, master_ip)
|
||||
helm_info['script_path'] = helm_inst_script_path
|
||||
|
||||
commander.close_session()
|
||||
if len(fixed_master_infos) + len(not_fixed_master_ssh_ips) == 1:
|
||||
ha_flag = False
|
||||
@ -1829,7 +1946,7 @@ class KubernetesMgmtDriver(vnflcm_abstract_driver.VnflcmMgmtAbstractDriver):
|
||||
fixed_master_infos, proxy,
|
||||
master_username, master_password, vnf_package_path,
|
||||
script_path, cluster_ip, pod_cidr, cluster_cidr,
|
||||
kubeadm_token, ssl_ca_cert_hash, ha_flag)
|
||||
kubeadm_token, ssl_ca_cert_hash, ha_flag, helm_info)
|
||||
if flag_worker:
|
||||
self._fix_worker_node(
|
||||
fixed_worker_infos,
|
||||
|
@ -11,7 +11,12 @@ Content-Type: application/sh
|
||||
Algorithm: SHA-256
|
||||
Hash: bc859fb8ffb9f92a19139553bdd077428a2c9572196e5844f1c912a7a822c249
|
||||
|
||||
Name: Scripts/install_helm.sh
|
||||
Content-Type: application/sh
|
||||
Algorithm: SHA-256
|
||||
Hash: 4af332b05e3e85662d403208e1e6d82e5276cbcd3b82a3562d2e3eb80d1ef714
|
||||
|
||||
Name: Scripts/kubernetes_mgmt.py
|
||||
Content-Type: text/x-python
|
||||
Algorithm: SHA-256
|
||||
Hash: b8c558cad30f219634a668f84d6e04998949e941f3909b5c60374b84dff58545
|
||||
Hash: bf651994ca7422aadeb0a12fed179f44ab709029c2eee9b2b9c7e8cbf339a66d
|
||||
|
@ -38,6 +38,7 @@ class Vim(model_base.BASE,
|
||||
), nullable=False)
|
||||
vim_auth = orm.relationship('VimAuth')
|
||||
status = sa.Column(sa.String(255), nullable=False)
|
||||
extra = sa.Column(types.Json, nullable=True)
|
||||
|
||||
__table_args__ = (
|
||||
schema.UniqueConstraint(
|
||||
|
@ -31,7 +31,7 @@ from tacker.plugins.common import constants
|
||||
|
||||
VIM_ATTRIBUTES = ('id', 'type', 'tenant_id', 'name', 'description',
|
||||
'placement_attr', 'shared', 'is_default',
|
||||
'created_at', 'updated_at', 'status')
|
||||
'created_at', 'updated_at', 'status', 'extra')
|
||||
|
||||
VIM_AUTH_ATTRIBUTES = ('auth_url', 'vim_project', 'password', 'auth_cred')
|
||||
|
||||
@ -87,6 +87,7 @@ class NfvoPluginDb(nfvo.NFVOPluginBase, db_base.CommonDbMixin):
|
||||
placement_attr=vim.get('placement_attr'),
|
||||
is_default=vim.get('is_default'),
|
||||
status=vim.get('status'),
|
||||
extra=vim.get('extra'),
|
||||
deleted_at=datetime.min)
|
||||
context.session.add(vim_db)
|
||||
vim_auth_db = nfvo_db.VimAuth(
|
||||
@ -158,6 +159,8 @@ class NfvoPluginDb(nfvo.NFVOPluginBase, db_base.CommonDbMixin):
|
||||
if 'placement_attr' in vim:
|
||||
vim_db.update(
|
||||
{'placement_attr': vim.get('placement_attr')})
|
||||
if 'extra' in vim:
|
||||
vim_db.update({'extra': vim.get('extra')})
|
||||
vim_auth_db = (self._model_query(
|
||||
context, nfvo_db.VimAuth).filter(
|
||||
nfvo_db.VimAuth.vim_id == vim_id).with_for_update().one())
|
||||
|
@ -129,6 +129,18 @@ class CNFHealWaitFailed(exceptions.TackerException):
|
||||
message = _('%(reason)s')
|
||||
|
||||
|
||||
class InvalidVimConnectionInfo(exceptions.TackerException):
|
||||
message = _('Invalid vim_connection_info: %(reason)s')
|
||||
|
||||
|
||||
class HelmClientRemoteCommandError(exceptions.TackerException):
|
||||
message = _('Failed to execute remote command.')
|
||||
|
||||
|
||||
class HelmClientOtherError(exceptions.TackerException):
|
||||
message = _('An error occurred in HelmClient: %(error_message)s.')
|
||||
|
||||
|
||||
class ServiceTypeNotFound(exceptions.NotFound):
|
||||
message = _('service type %(service_type_id)s could not be found')
|
||||
|
||||
|
@ -30,6 +30,8 @@ class VimConnectionInfo(base.TackerObject, base.TackerPersistentObject):
|
||||
default={}),
|
||||
'access_info': fields.DictOfNullableStringsField(nullable=True,
|
||||
default={}),
|
||||
'extra': fields.DictOfNullableStringsField(nullable=True,
|
||||
default={}),
|
||||
}
|
||||
|
||||
@classmethod
|
||||
@ -39,11 +41,13 @@ class VimConnectionInfo(base.TackerObject, base.TackerPersistentObject):
|
||||
vim_type = data_dict.get('vim_type')
|
||||
access_info = data_dict.get('access_info', {})
|
||||
interface_info = data_dict.get('interface_info', {})
|
||||
extra = data_dict.get('extra', {})
|
||||
obj = cls(id=id,
|
||||
vim_id=vim_id,
|
||||
vim_type=vim_type,
|
||||
interface_info=interface_info,
|
||||
access_info=access_info)
|
||||
access_info=access_info,
|
||||
extra=extra)
|
||||
return obj
|
||||
|
||||
@classmethod
|
||||
@ -62,4 +66,5 @@ class VimConnectionInfo(base.TackerObject, base.TackerPersistentObject):
|
||||
'vim_id': self.vim_id,
|
||||
'vim_type': self.vim_type,
|
||||
'interface_info': self.interface_info,
|
||||
'access_info': self.access_info}
|
||||
'access_info': self.access_info,
|
||||
'extra': self.extra}
|
||||
|
@ -0,0 +1,151 @@
|
||||
tosca_definitions_version: tosca_simple_yaml_1_2
|
||||
|
||||
description: Sample CNF with helmchart
|
||||
|
||||
imports:
|
||||
- etsi_nfv_sol001_common_types.yaml
|
||||
- etsi_nfv_sol001_vnfd_types.yaml
|
||||
- sample_vnfd_types.yaml
|
||||
|
||||
topology_template:
|
||||
inputs:
|
||||
descriptor_id:
|
||||
type: string
|
||||
descriptor_version:
|
||||
type: string
|
||||
provider:
|
||||
type: string
|
||||
product_name:
|
||||
type: string
|
||||
software_version:
|
||||
type: string
|
||||
vnfm_info:
|
||||
type: list
|
||||
entry_schema:
|
||||
type: string
|
||||
flavour_id:
|
||||
type: string
|
||||
flavour_description:
|
||||
type: string
|
||||
|
||||
substitution_mappings:
|
||||
node_type: company.provider.VNF
|
||||
properties:
|
||||
flavour_id: helmchart
|
||||
requirements:
|
||||
virtual_link_external: []
|
||||
|
||||
node_templates:
|
||||
VNF:
|
||||
type: company.provider.VNF
|
||||
properties:
|
||||
flavour_description: A flavour for single resources
|
||||
|
||||
VDU1:
|
||||
type: tosca.nodes.nfv.Vdu.Compute
|
||||
properties:
|
||||
name: vdu1-localhelm
|
||||
description: kubernetes resource as VDU1
|
||||
vdu_profile:
|
||||
min_number_of_instances: 1
|
||||
max_number_of_instances: 3
|
||||
|
||||
VDU2:
|
||||
type: tosca.nodes.nfv.Vdu.Compute
|
||||
properties:
|
||||
name: vdu2-apache
|
||||
description: kubernetes resource as VDU2
|
||||
vdu_profile:
|
||||
min_number_of_instances: 1
|
||||
max_number_of_instances: 3
|
||||
|
||||
policies:
|
||||
- scaling_aspects:
|
||||
type: tosca.policies.nfv.ScalingAspects
|
||||
properties:
|
||||
aspects:
|
||||
vdu1_aspect:
|
||||
name: vdu1_aspect
|
||||
description: vdu1 scaling aspect
|
||||
max_scale_level: 2
|
||||
step_deltas:
|
||||
- delta_1
|
||||
vdu2_aspect:
|
||||
name: vdu2_aspect
|
||||
description: vdu2 scaling aspect
|
||||
max_scale_level: 2
|
||||
step_deltas:
|
||||
- delta_1
|
||||
|
||||
- vdu1_initial_delta:
|
||||
type: tosca.policies.nfv.VduInitialDelta
|
||||
properties:
|
||||
initial_delta:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU1 ]
|
||||
|
||||
- vdu1_scaling_aspect_deltas:
|
||||
type: tosca.policies.nfv.VduScalingAspectDeltas
|
||||
properties:
|
||||
aspect: vdu1_aspect
|
||||
deltas:
|
||||
delta_1:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU1 ]
|
||||
|
||||
- vdu2_initial_delta:
|
||||
type: tosca.policies.nfv.VduInitialDelta
|
||||
properties:
|
||||
initial_delta:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU2 ]
|
||||
|
||||
- vdu2_scaling_aspect_deltas:
|
||||
type: tosca.policies.nfv.VduScalingAspectDeltas
|
||||
properties:
|
||||
aspect: vdu2_aspect
|
||||
deltas:
|
||||
delta_1:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU2 ]
|
||||
|
||||
- instantiation_levels:
|
||||
type: tosca.policies.nfv.InstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
description: Smallest size
|
||||
scale_info:
|
||||
vdu1_aspect:
|
||||
scale_level: 0
|
||||
vdu2_aspect:
|
||||
scale_level: 0
|
||||
instantiation_level_2:
|
||||
description: Largest size
|
||||
scale_info:
|
||||
vdu1_aspect:
|
||||
scale_level: 2
|
||||
vdu2_aspect:
|
||||
scale_level: 2
|
||||
default_level: instantiation_level_1
|
||||
|
||||
- vdu1_instantiation_levels:
|
||||
type: tosca.policies.nfv.VduInstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
number_of_instances: 1
|
||||
instantiation_level_2:
|
||||
number_of_instances: 3
|
||||
targets: [ VDU1 ]
|
||||
|
||||
- vdu2_instantiation_levels:
|
||||
type: tosca.policies.nfv.VduInstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
number_of_instances: 1
|
||||
instantiation_level_2:
|
||||
number_of_instances: 3
|
||||
targets: [ VDU1 ]
|
||||
|
@ -0,0 +1,31 @@
|
||||
tosca_definitions_version: tosca_simple_yaml_1_2
|
||||
|
||||
description: Sample CNF with Helm
|
||||
|
||||
imports:
|
||||
- etsi_nfv_sol001_common_types.yaml
|
||||
- etsi_nfv_sol001_vnfd_types.yaml
|
||||
- sample_vnfd_types.yaml
|
||||
- sample_vnfd_df_helmchart.yaml
|
||||
|
||||
topology_template:
|
||||
inputs:
|
||||
selected_flavour:
|
||||
type: string
|
||||
description: VNF deployment flavour selected by the consumer. It is provided in the API
|
||||
|
||||
node_templates:
|
||||
VNF:
|
||||
type: company.provider.VNF
|
||||
properties:
|
||||
flavour_id: { get_input: selected_flavour }
|
||||
descriptor_id: b1bb0ce7-ebca-4fa7-95ed-4840d70a1177
|
||||
provider: Company
|
||||
product_name: Sample CNF
|
||||
software_version: '1.0'
|
||||
descriptor_version: '1.0'
|
||||
vnfm_info:
|
||||
- Tacker
|
||||
requirements:
|
||||
#- virtual_link_external # mapped in lower-level templates
|
||||
#- virtual_link_internal # mapped in lower-level templates
|
@ -0,0 +1,53 @@
|
||||
tosca_definitions_version: tosca_simple_yaml_1_2
|
||||
|
||||
description: VNF type definition
|
||||
|
||||
imports:
|
||||
- etsi_nfv_sol001_common_types.yaml
|
||||
- etsi_nfv_sol001_vnfd_types.yaml
|
||||
|
||||
node_types:
|
||||
company.provider.VNF:
|
||||
derived_from: tosca.nodes.nfv.VNF
|
||||
properties:
|
||||
descriptor_id:
|
||||
type: string
|
||||
constraints: [ valid_values: [ b1bb0ce7-ebca-4fa7-95ed-4840d70a1177 ] ]
|
||||
default: b1bb0ce7-ebca-4fa7-95ed-4840d70a1177
|
||||
descriptor_version:
|
||||
type: string
|
||||
constraints: [ valid_values: [ '1.0' ] ]
|
||||
default: '1.0'
|
||||
provider:
|
||||
type: string
|
||||
constraints: [ valid_values: [ 'Company' ] ]
|
||||
default: 'Company'
|
||||
product_name:
|
||||
type: string
|
||||
constraints: [ valid_values: [ 'Sample CNF' ] ]
|
||||
default: 'Sample CNF'
|
||||
software_version:
|
||||
type: string
|
||||
constraints: [ valid_values: [ '1.0' ] ]
|
||||
default: '1.0'
|
||||
vnfm_info:
|
||||
type: list
|
||||
entry_schema:
|
||||
type: string
|
||||
constraints: [ valid_values: [ Tacker ] ]
|
||||
default: [ Tacker ]
|
||||
flavour_id:
|
||||
type: string
|
||||
constraints: [ valid_values: [ helmchart ] ]
|
||||
default: helmchart
|
||||
flavour_description:
|
||||
type: string
|
||||
default: ""
|
||||
requirements:
|
||||
- virtual_link_external:
|
||||
capability: tosca.capabilities.nfv.VirtualLinkable
|
||||
- virtual_link_internal:
|
||||
capability: tosca.capabilities.nfv.VirtualLinkable
|
||||
interfaces:
|
||||
Vnflcm:
|
||||
type: tosca.interfaces.nfv.Vnflcm
|
Binary file not shown.
@ -0,0 +1,9 @@
|
||||
TOSCA-Meta-File-Version: 1.0
|
||||
Created-by: dummy_user
|
||||
CSAR-Version: 1.1
|
||||
Entry-Definitions: Definitions/sample_vnfd_top.yaml
|
||||
|
||||
Name: Files/kubernetes/localhelm-0.1.0.tgz
|
||||
Content-Type: application/tar+gzip
|
||||
Algorithm: SHA-256
|
||||
Hash: 837fcfb73e5fc58572851a80a0143373d9d28ec37bd3bdf52c4d7d34b97592d5
|
@ -0,0 +1,447 @@
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import os
|
||||
import time
|
||||
|
||||
from oslo_serialization import jsonutils
|
||||
from oslo_utils import uuidutils
|
||||
from sqlalchemy import desc
|
||||
from sqlalchemy.orm import joinedload
|
||||
|
||||
from tacker.common import exceptions
|
||||
from tacker import context
|
||||
from tacker.db import api as db_api
|
||||
from tacker.db.db_sqlalchemy import api
|
||||
from tacker.db.db_sqlalchemy import models
|
||||
from tacker.objects import fields
|
||||
from tacker.objects import vnf_lcm_op_occs
|
||||
from tacker.tests.functional import base
|
||||
from tacker.tests import utils
|
||||
|
||||
VNF_PACKAGE_UPLOAD_TIMEOUT = 300
|
||||
VNF_INSTANTIATE_TIMEOUT = 600
|
||||
VNF_TERMINATE_TIMEOUT = 600
|
||||
VNF_HEAL_TIMEOUT = 600
|
||||
VNF_SCALE_TIMEOUT = 600
|
||||
RETRY_WAIT_TIME = 5
|
||||
|
||||
|
||||
def _create_and_upload_vnf_package(tacker_client, csar_package_name,
|
||||
user_defined_data):
|
||||
# create vnf package
|
||||
body = jsonutils.dumps({"userDefinedData": user_defined_data})
|
||||
resp, vnf_package = tacker_client.do_request(
|
||||
'/vnfpkgm/v1/vnf_packages', "POST", body=body)
|
||||
|
||||
# upload vnf package
|
||||
csar_package_path = "../../../etc/samples/etsi/nfv/{}".format(
|
||||
csar_package_name)
|
||||
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
|
||||
csar_package_path))
|
||||
|
||||
# Generating unique vnfd id. This is required when multiple workers
|
||||
# are running concurrently. The call below creates a new temporary
|
||||
# CSAR with unique vnfd id.
|
||||
file_path, uniqueid = utils.create_csar_with_unique_vnfd_id(file_path)
|
||||
|
||||
with open(file_path, 'rb') as file_object:
|
||||
resp, resp_body = tacker_client.do_request(
|
||||
'/vnfpkgm/v1/vnf_packages/{}/package_content'.format(
|
||||
vnf_package['id']),
|
||||
"PUT", body=file_object, content_type='application/zip')
|
||||
|
||||
# wait for onboard
|
||||
start_time = int(time.time())
|
||||
show_url = os.path.join('/vnfpkgm/v1/vnf_packages', vnf_package['id'])
|
||||
vnfd_id = None
|
||||
while True:
|
||||
resp, body = tacker_client.do_request(show_url, "GET")
|
||||
if body['onboardingState'] == "ONBOARDED":
|
||||
vnfd_id = body['vnfdId']
|
||||
break
|
||||
|
||||
if ((int(time.time()) - start_time) > VNF_PACKAGE_UPLOAD_TIMEOUT):
|
||||
raise Exception("Failed to onboard vnf package, process could not"
|
||||
" be completed within {} seconds".format(
|
||||
VNF_PACKAGE_UPLOAD_TIMEOUT))
|
||||
|
||||
time.sleep(RETRY_WAIT_TIME)
|
||||
|
||||
# remove temporarily created CSAR file
|
||||
os.remove(file_path)
|
||||
return vnf_package['id'], vnfd_id
|
||||
|
||||
|
||||
class VnfLcmKubernetesHelmTest(base.BaseTackerTest):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
cls.tacker_client = base.BaseTackerTest.tacker_http_client()
|
||||
cls.vnf_package_resource, cls.vnfd_id_resource = \
|
||||
_create_and_upload_vnf_package(
|
||||
cls.tacker_client, "test_cnf_helmchart",
|
||||
{"key": "sample_helmchart_functional"})
|
||||
cls.vnf_instance_ids = []
|
||||
super(VnfLcmKubernetesHelmTest, cls).setUpClass()
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
# Update vnf package operational state to DISABLED
|
||||
update_req_body = jsonutils.dumps({
|
||||
"operationalState": "DISABLED"})
|
||||
base_path = "/vnfpkgm/v1/vnf_packages"
|
||||
for package_id in [cls.vnf_package_resource]:
|
||||
resp, resp_body = cls.tacker_client.do_request(
|
||||
'{base_path}/{id}'.format(id=package_id,
|
||||
base_path=base_path),
|
||||
"PATCH", content_type='application/json',
|
||||
body=update_req_body)
|
||||
|
||||
# Delete vnf package
|
||||
url = '/vnfpkgm/v1/vnf_packages/{}'.format(package_id)
|
||||
cls.tacker_client.do_request(url, "DELETE")
|
||||
|
||||
super(VnfLcmKubernetesHelmTest, cls).tearDownClass()
|
||||
|
||||
def setUp(self):
|
||||
super(VnfLcmKubernetesHelmTest, self).setUp()
|
||||
self.base_vnf_instances_url = "/vnflcm/v1/vnf_instances"
|
||||
self.base_vnf_lcm_op_occs_url = "/vnflcm/v1/vnf_lcm_op_occs"
|
||||
self.context = context.get_admin_context()
|
||||
vim_list = self.client.list_vims()
|
||||
if not vim_list:
|
||||
self.skipTest("Vims are not configured")
|
||||
|
||||
vim_id = 'vim-kubernetes'
|
||||
vim = self.get_vim(vim_list, vim_id)
|
||||
if not vim:
|
||||
self.skipTest("Kubernetes VIM '{}' is missing".format(vim_id))
|
||||
self.vim_id = vim['id']
|
||||
|
||||
def _instantiate_vnf_instance_request(
|
||||
self, flavour_id, vim_id=None, additional_param=None):
|
||||
request_body = {"flavourId": flavour_id}
|
||||
|
||||
if vim_id:
|
||||
request_body["vimConnectionInfo"] = [
|
||||
{"id": uuidutils.generate_uuid(),
|
||||
"vimId": vim_id,
|
||||
"vimType": "kubernetes"}]
|
||||
|
||||
if additional_param:
|
||||
request_body["additionalParams"] = additional_param
|
||||
|
||||
return request_body
|
||||
|
||||
def _create_vnf_instance(self, vnfd_id, vnf_instance_name=None,
|
||||
vnf_instance_description=None):
|
||||
request_body = {'vnfdId': vnfd_id}
|
||||
if vnf_instance_name:
|
||||
request_body['vnfInstanceName'] = vnf_instance_name
|
||||
|
||||
if vnf_instance_description:
|
||||
request_body['vnfInstanceDescription'] = vnf_instance_description
|
||||
|
||||
resp, response_body = self.http_client.do_request(
|
||||
self.base_vnf_instances_url, "POST",
|
||||
body=jsonutils.dumps(request_body))
|
||||
return resp, response_body
|
||||
|
||||
def _delete_wait_vnf_instance(self, id):
|
||||
url = os.path.join("/vnflcm/v1/vnf_instances", id)
|
||||
start_time = int(time.time())
|
||||
while True:
|
||||
resp, body = self.tacker_client.do_request(url, "DELETE")
|
||||
if 204 == resp.status_code:
|
||||
break
|
||||
|
||||
if ((int(time.time()) - start_time) > VNF_TERMINATE_TIMEOUT):
|
||||
raise Exception("Failed to delete vnf instance, process could"
|
||||
" not be completed within {} seconds".format(
|
||||
VNF_TERMINATE_TIMEOUT))
|
||||
|
||||
time.sleep(RETRY_WAIT_TIME)
|
||||
|
||||
def _show_vnf_instance(self, id):
|
||||
show_url = os.path.join("/vnflcm/v1/vnf_instances", id)
|
||||
resp, vnf_instance = self.tacker_client.do_request(show_url, "GET")
|
||||
|
||||
return vnf_instance
|
||||
|
||||
def _vnf_instance_wait(
|
||||
self, id,
|
||||
instantiation_state=fields.VnfInstanceState.INSTANTIATED,
|
||||
timeout=VNF_INSTANTIATE_TIMEOUT):
|
||||
show_url = os.path.join("/vnflcm/v1/vnf_instances", id)
|
||||
start_time = int(time.time())
|
||||
while True:
|
||||
resp, body = self.tacker_client.do_request(show_url, "GET")
|
||||
if body['instantiationState'] == instantiation_state:
|
||||
break
|
||||
|
||||
if ((int(time.time()) - start_time) > timeout):
|
||||
raise Exception("Failed to wait vnf instance, process could"
|
||||
" not be completed within {} seconds".format(timeout))
|
||||
|
||||
time.sleep(RETRY_WAIT_TIME)
|
||||
|
||||
def _instantiate_vnf_instance(self, id, request_body):
|
||||
url = os.path.join(self.base_vnf_instances_url, id, "instantiate")
|
||||
resp, body = self.http_client.do_request(
|
||||
url, "POST", body=jsonutils.dumps(request_body))
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self._vnf_instance_wait(id)
|
||||
|
||||
def _create_and_instantiate_vnf_instance(self, flavour_id,
|
||||
additional_params):
|
||||
# create vnf instance
|
||||
vnf_instance_name = "test_vnf_instance_for_cnf_heal-{}".format(
|
||||
uuidutils.generate_uuid())
|
||||
vnf_instance_description = "vnf instance for cnf heal testing"
|
||||
resp, vnf_instance = self._create_vnf_instance(
|
||||
self.vnfd_id_resource, vnf_instance_name=vnf_instance_name,
|
||||
vnf_instance_description=vnf_instance_description)
|
||||
|
||||
# instantiate vnf instance
|
||||
additional_param = additional_params
|
||||
request_body = self._instantiate_vnf_instance_request(
|
||||
flavour_id, vim_id=self.vim_id, additional_param=additional_param)
|
||||
|
||||
self._instantiate_vnf_instance(vnf_instance['id'], request_body)
|
||||
vnf_instance = self._show_vnf_instance(vnf_instance['id'])
|
||||
self.vnf_instance_ids.append(vnf_instance['id'])
|
||||
|
||||
return vnf_instance
|
||||
|
||||
def _terminate_vnf_instance(self, id):
|
||||
# Terminate vnf forcefully
|
||||
request_body = {
|
||||
"terminationType": fields.VnfInstanceTerminationType.FORCEFUL,
|
||||
}
|
||||
url = os.path.join(self.base_vnf_instances_url, id, "terminate")
|
||||
resp, body = self.http_client.do_request(
|
||||
url, "POST", body=jsonutils.dumps(request_body))
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self._vnf_instance_wait(
|
||||
id,
|
||||
instantiation_state=fields.VnfInstanceState.NOT_INSTANTIATED,
|
||||
timeout=VNF_TERMINATE_TIMEOUT)
|
||||
|
||||
def _delete_vnf_instance(self, id):
|
||||
self._delete_wait_vnf_instance(id)
|
||||
|
||||
# verify vnf instance is deleted
|
||||
url = os.path.join(self.base_vnf_instances_url, id)
|
||||
resp, body = self.http_client.do_request(url, "GET")
|
||||
self.assertEqual(404, resp.status_code)
|
||||
|
||||
def _scale_vnf_instance(self, id, type, aspect_id,
|
||||
number_of_steps=1):
|
||||
url = os.path.join(self.base_vnf_instances_url, id, "scale")
|
||||
# generate body
|
||||
request_body = {
|
||||
"type": type,
|
||||
"aspectId": aspect_id,
|
||||
"numberOfSteps": number_of_steps}
|
||||
resp, body = self.http_client.do_request(
|
||||
url, "POST", body=jsonutils.dumps(request_body))
|
||||
self.assertEqual(202, resp.status_code)
|
||||
|
||||
def _heal_vnf_instance(self, id, vnfc_instance_id):
|
||||
url = os.path.join(self.base_vnf_instances_url, id, "heal")
|
||||
# generate body
|
||||
request_body = {
|
||||
"vnfcInstanceId": vnfc_instance_id}
|
||||
resp, body = self.http_client.do_request(
|
||||
url, "POST", body=jsonutils.dumps(request_body))
|
||||
self.assertEqual(202, resp.status_code)
|
||||
|
||||
@db_api.context_manager.reader
|
||||
def _vnf_notify_get_by_id(self, context, vnf_instance_id,
|
||||
columns_to_join=None):
|
||||
query = api.model_query(
|
||||
context, models.VnfLcmOpOccs,
|
||||
read_deleted="no", project_only=True).filter_by(
|
||||
vnf_instance_id=vnf_instance_id).order_by(
|
||||
desc("created_at"))
|
||||
|
||||
if columns_to_join:
|
||||
for column in columns_to_join:
|
||||
query = query.options(joinedload(column))
|
||||
|
||||
db_vnflcm_op_occ = query.first()
|
||||
|
||||
if not db_vnflcm_op_occ:
|
||||
raise exceptions.VnfInstanceNotFound(id=vnf_instance_id)
|
||||
|
||||
vnflcm_op_occ = vnf_lcm_op_occs.VnfLcmOpOcc.obj_from_db_obj(
|
||||
context, db_vnflcm_op_occ)
|
||||
return vnflcm_op_occ
|
||||
|
||||
def _wait_vnflcm_op_occs(
|
||||
self, context, vnf_instance_id,
|
||||
operation_state='COMPLETED'):
|
||||
start_time = int(time.time())
|
||||
while True:
|
||||
vnflcm_op_occ = self._vnf_notify_get_by_id(
|
||||
context, vnf_instance_id)
|
||||
|
||||
if vnflcm_op_occ.operation_state == operation_state:
|
||||
break
|
||||
|
||||
if ((int(time.time()) - start_time) > VNF_HEAL_TIMEOUT):
|
||||
raise Exception("Failed to wait heal instance")
|
||||
|
||||
time.sleep(RETRY_WAIT_TIME)
|
||||
|
||||
def _get_vnfc_resource_info(self, vnf_instance):
|
||||
inst_vnf_info = vnf_instance['instantiatedVnfInfo']
|
||||
vnfc_resource_info = inst_vnf_info['vnfcResourceInfo']
|
||||
return vnfc_resource_info
|
||||
|
||||
def _test_scale_cnf(self, vnf_instance):
|
||||
"""Test scale in/out CNF"""
|
||||
def _test_scale(id, type, aspect_id, previous_level,
|
||||
delta_num=1, number_of_steps=1):
|
||||
# scale operation
|
||||
self._scale_vnf_instance(id, type, aspect_id, number_of_steps)
|
||||
# wait vnflcm_op_occs.operation_state become COMPLETE
|
||||
self._wait_vnflcm_op_occs(self.context, id)
|
||||
# check scaleStatus after scale operation
|
||||
vnf_instance = self._show_vnf_instance(id)
|
||||
scale_status_after = \
|
||||
vnf_instance['instantiatedVnfInfo']['scaleStatus']
|
||||
if type == 'SCALE_OUT':
|
||||
expected_level = previous_level + number_of_steps
|
||||
else:
|
||||
expected_level = previous_level - number_of_steps
|
||||
for status in scale_status_after:
|
||||
if status.get('aspectId') == aspect_id:
|
||||
self.assertEqual(status.get('scaleLevel'), expected_level)
|
||||
previous_level = status.get('scaleLevel')
|
||||
|
||||
return previous_level
|
||||
|
||||
aspect_id = "vdu1_aspect"
|
||||
scale_status_initial = \
|
||||
vnf_instance['instantiatedVnfInfo']['scaleStatus']
|
||||
self.assertTrue(len(scale_status_initial) > 0)
|
||||
for status in scale_status_initial:
|
||||
self.assertIsNotNone(status.get('aspectId'))
|
||||
self.assertIsNotNone(status.get('scaleLevel'))
|
||||
if status.get('aspectId') == aspect_id:
|
||||
previous_level = status.get('scaleLevel')
|
||||
|
||||
# test scale out
|
||||
previous_level = _test_scale(
|
||||
vnf_instance['id'], 'SCALE_OUT', aspect_id, previous_level)
|
||||
|
||||
# test scale in
|
||||
previous_level = _test_scale(
|
||||
vnf_instance['id'], 'SCALE_IN', aspect_id, previous_level)
|
||||
|
||||
def _test_heal_cnf_with_sol002(self, vnf_instance):
|
||||
"""Test heal as per SOL002 for CNF"""
|
||||
vnf_instance = self._show_vnf_instance(vnf_instance['id'])
|
||||
before_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance)
|
||||
|
||||
# get vnfc_instance_id of heal target
|
||||
before_pod_name = dict()
|
||||
vnfc_instance_id = list()
|
||||
for vnfc_rsc in before_vnfc_rscs:
|
||||
if vnfc_rsc['vduId'] == "vdu1":
|
||||
before_pod_name['vdu1'] = \
|
||||
vnfc_rsc['computeResource']['resourceId']
|
||||
elif vnfc_rsc['vduId'] == "vdu2":
|
||||
before_pod_name['vdu2'] = \
|
||||
vnfc_rsc['computeResource']['resourceId']
|
||||
vnfc_instance_id.append(vnfc_rsc['id'])
|
||||
|
||||
# test heal SOL-002 (partial heal)
|
||||
self._heal_vnf_instance(vnf_instance['id'], vnfc_instance_id)
|
||||
# wait vnflcm_op_occs.operation_state become COMPLETE
|
||||
self._wait_vnflcm_op_occs(self.context, vnf_instance['id'])
|
||||
# check vnfcResourceInfo after heal operation
|
||||
vnf_instance = self._show_vnf_instance(vnf_instance['id'])
|
||||
after_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance)
|
||||
self.assertEqual(len(before_vnfc_rscs), len(after_vnfc_rscs))
|
||||
for vnfc_rsc in after_vnfc_rscs:
|
||||
after_pod_name = vnfc_rsc['computeResource']['resourceId']
|
||||
if vnfc_rsc['vduId'] == "vdu1":
|
||||
# check stored pod name is changed (vdu1)
|
||||
compute_resource = vnfc_rsc['computeResource']
|
||||
before_pod_name = compute_resource['resourceId']
|
||||
self.assertNotEqual(after_pod_name, before_pod_name['vdu1'])
|
||||
elif vnfc_rsc['vduId'] == "vdu2":
|
||||
# check stored pod name is changed (vdu2)
|
||||
compute_resource = vnfc_rsc['computeResource']
|
||||
before_pod_name = compute_resource['resourceId']
|
||||
self.assertNotEqual(after_pod_name, before_pod_name['vdu2'])
|
||||
|
||||
def _test_heal_cnf_with_sol003(self, vnf_instance):
|
||||
"""Test heal as per SOL003 for CNF"""
|
||||
vnf_instance = self._show_vnf_instance(vnf_instance['id'])
|
||||
before_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance)
|
||||
|
||||
# test heal SOL-003 (entire heal)
|
||||
vnfc_instance_id = []
|
||||
self._heal_vnf_instance(vnf_instance['id'], vnfc_instance_id)
|
||||
# wait vnflcm_op_occs.operation_state become COMPLETE
|
||||
self._wait_vnflcm_op_occs(self.context, vnf_instance['id'])
|
||||
# check vnfcResourceInfo after heal operation
|
||||
vnf_instance = self._show_vnf_instance(vnf_instance['id'])
|
||||
after_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance)
|
||||
self.assertEqual(len(before_vnfc_rscs), len(after_vnfc_rscs))
|
||||
# check id and pod name (as computeResource.resourceId) is changed
|
||||
for before_vnfc_rsc in before_vnfc_rscs:
|
||||
for after_vnfc_rsc in after_vnfc_rscs:
|
||||
self.assertNotEqual(
|
||||
before_vnfc_rsc['id'], after_vnfc_rsc['id'])
|
||||
self.assertNotEqual(
|
||||
before_vnfc_rsc['computeResource']['resourceId'],
|
||||
after_vnfc_rsc['computeResource']['resourceId'])
|
||||
|
||||
def test_vnflcm_with_helmchart(self):
|
||||
# use def-files of singleton Pod and Deployment (replicas=2)
|
||||
helmchartfile_path = "Files/kubernetes/localhelm-0.1.0.tgz"
|
||||
inst_additional_param = {
|
||||
"namespace": "default",
|
||||
"use_helm": "true",
|
||||
"using_helm_install_param": [
|
||||
{
|
||||
"exthelmchart": "false",
|
||||
"helmchartfile_path": helmchartfile_path,
|
||||
"helmreleasename": "vdu1",
|
||||
"helmparameter": [
|
||||
"service.port=8081"
|
||||
]
|
||||
},
|
||||
{
|
||||
"exthelmchart": "true",
|
||||
"helmreleasename": "vdu2",
|
||||
"helmrepositoryname": "bitnami",
|
||||
"helmchartname": "apache",
|
||||
"exthelmrepo_url": "https://charts.bitnami.com/bitnami"
|
||||
}
|
||||
]
|
||||
}
|
||||
vnf_instance = self._create_and_instantiate_vnf_instance(
|
||||
"helmchart", inst_additional_param)
|
||||
self._test_scale_cnf(vnf_instance)
|
||||
self._test_heal_cnf_with_sol002(vnf_instance)
|
||||
self._test_heal_cnf_with_sol003(vnf_instance)
|
||||
|
||||
self._terminate_vnf_instance(vnf_instance['id'])
|
||||
self._delete_vnf_instance(vnf_instance['id'])
|
@ -719,7 +719,7 @@ def get_dummy_vim_connection_info():
|
||||
'user_domain_name': 'Default', 'username': 'admin'},
|
||||
'created_at': '', 'deleted': False, 'deleted_at': '',
|
||||
'id': 'fake_id', 'updated_at': '',
|
||||
'vim_id': 'fake_vim_id', 'vim_type': 'openstack'}
|
||||
'vim_id': 'fake_vim_id', 'vim_type': 'openstack', 'extra': {}}
|
||||
|
||||
|
||||
def get_dummy_instantiate_vnf_request(**updates):
|
||||
@ -1428,7 +1428,8 @@ VNFLCMOPOCC_RESPONSE = {
|
||||
"vimId": 'f8c35bd0-4d67-4436-9f11-14b8a84c92aa',
|
||||
"vimType": 'openstack',
|
||||
'interfaceInfo': {},
|
||||
"accessInfo": {"key1": 'value1', "key2": 'value2'}}],
|
||||
"accessInfo": {"key1": 'value1', "key2": 'value2'},
|
||||
"extra": {}}],
|
||||
'vimConnectionInfoDeleteIds': ['f8c35bd0-4d67-4436-9f11-14b8a84c92bb'],
|
||||
'vnfPkgId': 'f26f181d-7891-4720-b022-b074ec1733ef',
|
||||
'vnfInstanceName': 'fake_name',
|
||||
|
@ -198,7 +198,8 @@ class TestController(base.TestCase):
|
||||
'vim_type': 'test',
|
||||
'vim_auth': {'username': 'test', 'password': 'test'},
|
||||
'placement_attr': {'region': 'TestRegionOne'},
|
||||
'tenant': 'test'
|
||||
'tenant': 'test',
|
||||
'extra': {}
|
||||
}
|
||||
self.context = context.get_admin_context()
|
||||
|
||||
@ -1237,14 +1238,16 @@ class TestController(base.TestCase):
|
||||
"region": "RegionOne",
|
||||
"password": "devstack",
|
||||
"tenant": "85d12da99f8246dfae350dbc7334a473",
|
||||
}
|
||||
},
|
||||
"extra": {}
|
||||
}
|
||||
|
||||
vim_connection_info = objects.VimConnectionInfo(
|
||||
id=vim_info['id'], vim_id=vim_info['vim_id'],
|
||||
vim_type=vim_info['vim_type'],
|
||||
access_info=vim_info['access_info'],
|
||||
interface_info=vim_info['interface_info'])
|
||||
interface_info=vim_info['interface_info'],
|
||||
extra=vim_info['extra'])
|
||||
|
||||
mock_vnf_by_id.return_value = fakes.return_vnf_instance(
|
||||
fields.VnfInstanceState.INSTANTIATED,
|
||||
|
@ -1117,3 +1117,143 @@ def fake_vim_connection_info():
|
||||
return vim_connection.VimConnectionInfo(
|
||||
vim_type="kubernetes",
|
||||
access_info=access_info)
|
||||
|
||||
|
||||
def fake_vim_connection_info_with_extra(del_field=None, multi_ip=False):
|
||||
access_info = {
|
||||
'auth_url': 'http://fake_url:6443',
|
||||
'ssl_ca_cert': None}
|
||||
masternode_ip = ["192.168.0.1"]
|
||||
if multi_ip:
|
||||
masternode_ip.append("192.168.0.2")
|
||||
|
||||
helm_info = {
|
||||
'masternode_ip': masternode_ip,
|
||||
'masternode_username': 'dummy_user',
|
||||
'masternode_password': 'dummy_pass'
|
||||
}
|
||||
if del_field and helm_info.get(del_field):
|
||||
del helm_info[del_field]
|
||||
extra = {
|
||||
'helm_info': str(helm_info)
|
||||
}
|
||||
return vim_connection.VimConnectionInfo(
|
||||
vim_type="kubernetes",
|
||||
access_info=access_info,
|
||||
extra=extra)
|
||||
|
||||
|
||||
def fake_inst_vnf_req_for_helmchart(external=True, local=True, namespace=None):
|
||||
additional_params = {"use_helm": "true"}
|
||||
using_helm_install_param = list()
|
||||
if external:
|
||||
using_helm_install_param.append(
|
||||
{
|
||||
"exthelmchart": "true",
|
||||
"helmreleasename": "myrelease-ext",
|
||||
"helmrepositoryname": "sample-charts",
|
||||
"helmchartname": "mychart-ext",
|
||||
"exthelmrepo_url": "http://helmrepo.example.com/sample-charts"
|
||||
}
|
||||
)
|
||||
if local:
|
||||
using_helm_install_param.append(
|
||||
{
|
||||
"exthelmchart": "false",
|
||||
"helmchartfile_path": "Files/kubernetes/localhelm-0.1.0.tgz",
|
||||
"helmreleasename": "myrelease-local",
|
||||
"helmparameter": [
|
||||
"key1=value1",
|
||||
"key2=value2"
|
||||
]
|
||||
}
|
||||
)
|
||||
additional_params['using_helm_install_param'] = using_helm_install_param
|
||||
if namespace:
|
||||
additional_params['namespace'] = namespace
|
||||
|
||||
return objects.InstantiateVnfRequest(additional_params=additional_params)
|
||||
|
||||
|
||||
def execute_cmd_helm_client(*args, **kwargs):
|
||||
ssh_command = args[0]
|
||||
if 'helm get manifest' in ssh_command:
|
||||
result = [
|
||||
'---\n',
|
||||
'# Source: localhelm/templates/deployment.yaml\n',
|
||||
'apiVersion: apps/v1\n',
|
||||
'kind: Deployment\n',
|
||||
'metadata:\n',
|
||||
' name: vdu1\n',
|
||||
'spec:\n',
|
||||
' replicas: 1\n',
|
||||
' selector:\n',
|
||||
' matchLabels:\n',
|
||||
' app: webserver\n',
|
||||
' template:\n',
|
||||
' metadata:\n'
|
||||
' labels:\n'
|
||||
' app: webserver\n'
|
||||
' spec:\n',
|
||||
' containers:\n',
|
||||
' - name: nginx\n'
|
||||
]
|
||||
else:
|
||||
result = ""
|
||||
return result
|
||||
|
||||
|
||||
def fake_k8s_objs_deployment_for_helm():
|
||||
obj = [
|
||||
{
|
||||
'status': 'Creating',
|
||||
'object': fake_v1_deployment_for_helm()
|
||||
}
|
||||
]
|
||||
|
||||
return obj
|
||||
|
||||
|
||||
def fake_v1_deployment_for_helm():
|
||||
return client.V1Deployment(
|
||||
api_version='apps/v1',
|
||||
kind='Deployment',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='vdu1',
|
||||
),
|
||||
status=client.V1DeploymentStatus(
|
||||
replicas=1,
|
||||
ready_replicas=1
|
||||
),
|
||||
spec=client.V1DeploymentSpec(
|
||||
replicas=1,
|
||||
selector=client.V1LabelSelector(
|
||||
match_labels={'app': 'webserver'}
|
||||
),
|
||||
template=client.V1PodTemplateSpec(
|
||||
metadata=client.V1ObjectMeta(
|
||||
labels={'app': 'webserver'}
|
||||
),
|
||||
spec=client.V1PodSpec(
|
||||
containers=[
|
||||
client.V1Container(
|
||||
name='nginx'
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_k8s_vim_obj(< |