Helm chart support for CNF v2 API

This patch enables CNF v2 API to operate using Helm chart.

New vimType 'ETSINFV.HELM.V_3' is introduced.

Since helm VIM uses existing functions of k8s VIM, k8s VIM code
is refactored to share between k8s VIM and helm VIM.

Implements: blueprint helmchart-k8s-vim
Change-Id: I0329a0d43294181b7ffb1494bb5dd2d0528eb5dc
This commit is contained in:
Ken Fujimoto 2022-09-01 13:17:25 +00:00
parent 4467f162c0
commit bb7b3cfce9
51 changed files with 3199 additions and 233 deletions

View File

@ -0,0 +1,9 @@
---
features:
- |
Support LCM operations using Helm chart
for v2 LCM API. This feature enables
v2 LCM operations with Helm chart,
instantiationLevel parameter to
determine the initial number of Pods,
and vimConnectionInfo.extra field.

View File

@ -104,3 +104,24 @@
- inventory_hostname == 'controller' - inventory_hostname == 'controller'
- helm_version is defined - helm_version is defined
- block:
- name: Download Helm
get_url:
url: "https://get.helm.sh/helm-v{{ helm_version }}-linux-amd64.tar.gz"
dest: "/tmp/helm-v{{ helm_version }}-linux-amd64.tar.gz"
force: yes
- name: Unarchive Helm
unarchive:
src: "/tmp/helm-v{{ helm_version }}-linux-amd64.tar.gz"
dest: "/tmp"
remote_src: yes
become: yes
- name: Move Helm binary
shell: mv /tmp/linux-amd64/helm /usr/local/bin/helm
become: yes
when:
- inventory_hostname == 'controller-tacker'
- helm_version is defined

View File

@ -13,6 +13,8 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from tacker._i18n import _
class SolException(Exception): class SolException(Exception):
"""Exception for SOL ProblemDetails """Exception for SOL ProblemDetails
@ -361,3 +363,12 @@ class K8sInvalidManifestFound(SolHttpError400):
class OIDCAuthFailed(SolHttpError400): class OIDCAuthFailed(SolHttpError400):
message = _("OIDC authentication and authorization failed." message = _("OIDC authentication and authorization failed."
" Detail: %(detail)s") " Detail: %(detail)s")
class HelmOperationFailed(SolHttpError422):
title = 'Helm operation failed'
# detail set in the code
class HelmParameterNotFound(SolHttpError400):
message = _("Helm parameter for scale vdu %(vdu_name)s is not found.")

View File

@ -100,10 +100,14 @@ def vim_to_conn_info(vim):
if 'ssl_ca_cert' in vim_auth.keys(): if 'ssl_ca_cert' in vim_auth.keys():
interface_info['ssl_ca_cert'] = vim_auth['ssl_ca_cert'] interface_info['ssl_ca_cert'] = vim_auth['ssl_ca_cert']
vim_type = ('ETSINFV.HELM.V_3'
if vim.get('extra', {}).get('use_helm', False)
else 'kubernetes')
return objects.VimConnectionInfo( return objects.VimConnectionInfo(
vimId=vim['vim_id'], vimId=vim['vim_id'],
vimType='kubernetes', vimType=vim_type,
interfaceInfo=interface_info, interfaceInfo=interface_info,
accessInfo=access_info accessInfo=access_info
) )
raise sol_ex.SolException(sol_detail='not support vim type') raise sol_ex.SolException(sol_detail='not support vim type')

View File

@ -25,6 +25,7 @@ from tacker.sol_refactored.common import exceptions as sol_ex
from tacker.sol_refactored.common import lcm_op_occ_utils as lcmocc_utils from tacker.sol_refactored.common import lcm_op_occ_utils as lcmocc_utils
from tacker.sol_refactored.common import vim_utils from tacker.sol_refactored.common import vim_utils
from tacker.sol_refactored.common import vnf_instance_utils as inst_utils from tacker.sol_refactored.common import vnf_instance_utils as inst_utils
from tacker.sol_refactored.infra_drivers.kubernetes import helm
from tacker.sol_refactored.infra_drivers.kubernetes import kubernetes from tacker.sol_refactored.infra_drivers.kubernetes import kubernetes
from tacker.sol_refactored.infra_drivers.openstack import openstack from tacker.sol_refactored.infra_drivers.openstack import openstack
from tacker.sol_refactored.nfvo import nfvo_client from tacker.sol_refactored.nfvo import nfvo_client
@ -398,11 +399,28 @@ class VnfLcmDriverV2(object):
# to here, although it is better to check in controller. # to here, although it is better to check in controller.
if lcmocc.operationState == v2fields.LcmOperationStateType.STARTING: if lcmocc.operationState == v2fields.LcmOperationStateType.STARTING:
vim_info = inst_utils.select_vim_info(vim_infos) vim_info = inst_utils.select_vim_info(vim_infos)
if (vim_info.vimType == "kubernetes" and if vim_info.vimType == "kubernetes":
not req.get('additionalParams', {}).get( if 'endpoint' not in vim_info.interfaceInfo:
detail = "Required attribute missing in vimConnectionInfo"
raise sol_ex.SolValidationError(detail=detail)
if (not req.get('additionalParams', {}).get(
'lcm-kubernetes-def-files')): 'lcm-kubernetes-def-files')):
raise sol_ex.SolValidationError( raise sol_ex.SolValidationError(
detail="'lcm-kubernetes-def-files' must be specified") detail="'lcm-kubernetes-def-files' must be specified")
elif vim_info.vimType == "ETSINFV.HELM.V_3":
if ('endpoint' not in vim_info.interfaceInfo or
'ssl_ca_cert' not in vim_info.interfaceInfo or
'bearer_token' not in vim_info.accessInfo):
detail = "Required attribute missing in vimConnectionInfo"
raise sol_ex.SolValidationError(detail=detail)
if (not req.get('additionalParams', {}).get(
'helm_chart_path')):
raise sol_ex.SolValidationError(
detail="'helm_chart_path' must be specified")
if (not req.get('additionalParams', {}).get(
'helm_value_names')):
raise sol_ex.SolValidationError(
detail="'helm_value_names' must be specified")
def instantiate_process(self, context, lcmocc, inst, grant_req, def instantiate_process(self, context, lcmocc, inst, grant_req,
grant, vnfd): grant, vnfd):
@ -417,6 +435,9 @@ class VnfLcmDriverV2(object):
elif vim_info.vimType == 'kubernetes': elif vim_info.vimType == 'kubernetes':
driver = kubernetes.Kubernetes() driver = kubernetes.Kubernetes()
driver.instantiate(req, inst, grant_req, grant, vnfd) driver.instantiate(req, inst, grant_req, grant, vnfd)
elif vim_info.vimType == 'ETSINFV.HELM.V_3':
driver = helm.Helm()
driver.instantiate(req, inst, grant_req, grant, vnfd)
else: else:
# should not occur # should not occur
raise sol_ex.SolException(sol_detail='not support vim type') raise sol_ex.SolException(sol_detail='not support vim type')
@ -433,6 +454,9 @@ class VnfLcmDriverV2(object):
elif vim_info.vimType == 'kubernetes': elif vim_info.vimType == 'kubernetes':
driver = kubernetes.Kubernetes() driver = kubernetes.Kubernetes()
driver.instantiate_rollback(req, inst, grant_req, grant, vnfd) driver.instantiate_rollback(req, inst, grant_req, grant, vnfd)
elif vim_info.vimType == 'ETSINFV.HELM.V_3':
driver = helm.Helm()
driver.instantiate_rollback(req, inst, grant_req, grant, vnfd)
else: else:
# should not occur # should not occur
raise sol_ex.SolException(sol_detail='not support vim type') raise sol_ex.SolException(sol_detail='not support vim type')
@ -575,6 +599,9 @@ class VnfLcmDriverV2(object):
elif vim_info.vimType == 'kubernetes': elif vim_info.vimType == 'kubernetes':
driver = kubernetes.Kubernetes() driver = kubernetes.Kubernetes()
driver.terminate(req, inst, grant_req, grant, vnfd) driver.terminate(req, inst, grant_req, grant, vnfd)
elif vim_info.vimType == 'ETSINFV.HELM.V_3':
driver = helm.Helm()
driver.terminate(req, inst, grant_req, grant, vnfd)
else: else:
# should not occur # should not occur
raise sol_ex.SolException(sol_detail='not support vim type') raise sol_ex.SolException(sol_detail='not support vim type')
@ -688,6 +715,9 @@ class VnfLcmDriverV2(object):
elif vim_info.vimType == 'kubernetes': elif vim_info.vimType == 'kubernetes':
driver = kubernetes.Kubernetes() driver = kubernetes.Kubernetes()
driver.scale(req, inst, grant_req, grant, vnfd) driver.scale(req, inst, grant_req, grant, vnfd)
elif vim_info.vimType == 'ETSINFV.HELM.V_3':
driver = helm.Helm()
driver.scale(req, inst, grant_req, grant, vnfd)
else: else:
# should not occur # should not occur
raise sol_ex.SolException(sol_detail='not support vim type') raise sol_ex.SolException(sol_detail='not support vim type')
@ -705,6 +735,9 @@ class VnfLcmDriverV2(object):
elif vim_info.vimType == 'kubernetes': elif vim_info.vimType == 'kubernetes':
driver = kubernetes.Kubernetes() driver = kubernetes.Kubernetes()
driver.scale_rollback(req, inst, grant_req, grant, vnfd) driver.scale_rollback(req, inst, grant_req, grant, vnfd)
elif vim_info.vimType == 'ETSINFV.HELM.V_3':
driver = helm.Helm()
driver.scale_rollback(req, inst, grant_req, grant, vnfd)
else: else:
# should not occur # should not occur
raise sol_ex.SolException(sol_detail='not support vim type') raise sol_ex.SolException(sol_detail='not support vim type')
@ -887,6 +920,9 @@ class VnfLcmDriverV2(object):
elif vim_info.vimType == 'kubernetes': elif vim_info.vimType == 'kubernetes':
driver = kubernetes.Kubernetes() driver = kubernetes.Kubernetes()
driver.heal(req, inst, grant_req, grant, vnfd) driver.heal(req, inst, grant_req, grant, vnfd)
elif vim_info.vimType == 'ETSINFV.HELM.V_3':
driver = helm.Helm()
driver.heal(req, inst, grant_req, grant, vnfd)
else: else:
# should not occur # should not occur
raise sol_ex.SolException(sol_detail='not support vim type') raise sol_ex.SolException(sol_detail='not support vim type')
@ -1055,6 +1091,9 @@ class VnfLcmDriverV2(object):
elif vim_info.vimType == 'kubernetes': elif vim_info.vimType == 'kubernetes':
driver = kubernetes.Kubernetes() driver = kubernetes.Kubernetes()
driver.change_vnfpkg(req, inst, grant_req, grant, vnfd) driver.change_vnfpkg(req, inst, grant_req, grant, vnfd)
elif vim_info.vimType == 'ETSINFV.HELM.V_3':
driver = helm.Helm()
driver.change_vnfpkg(req, inst, grant_req, grant, vnfd)
else: else:
# should not occur # should not occur
raise sol_ex.SolException(sol_detail='not support vim type') raise sol_ex.SolException(sol_detail='not support vim type')
@ -1073,6 +1112,9 @@ class VnfLcmDriverV2(object):
driver = kubernetes.Kubernetes() driver = kubernetes.Kubernetes()
driver.change_vnfpkg_rollback( driver.change_vnfpkg_rollback(
req, inst, grant_req, grant, vnfd) req, inst, grant_req, grant, vnfd)
elif vim_info.vimType == 'ETSINFV.HELM.V_3':
driver = helm.Helm()
driver.change_vnfpkg_rollback(req, inst, grant_req, grant, vnfd)
else: else:
# should not occur # should not occur
raise sol_ex.SolException(sol_detail='not support vim type') raise sol_ex.SolException(sol_detail='not support vim type')

View File

@ -0,0 +1,340 @@
# Copyright (C) 2022 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from oslo_log import log as logging
from tacker.sol_refactored.common import exceptions as sol_ex
from tacker.sol_refactored.common import vnf_instance_utils as inst_utils
from tacker.sol_refactored.infra_drivers.kubernetes import kubernetes_common
from tacker.sol_refactored.infra_drivers.kubernetes import kubernetes_resource
from tacker.sol_refactored.infra_drivers.kubernetes import kubernetes_utils
LOG = logging.getLogger(__name__)
class Helm(kubernetes_common.KubernetesCommon):
def __init__(self):
pass
def instantiate(self, req, inst, grant_req, grant, vnfd):
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
with kubernetes_utils.AuthContextManager(vim_info) as acm:
k8s_api_client = acm.init_k8s_api_client()
helm_client = acm.init_helm_client()
self._instantiate(req, inst, grant_req, grant, vnfd,
k8s_api_client, helm_client)
def _instantiate(self, req, inst, grant_req, grant, vnfd,
k8s_api_client, helm_client):
namespace = req.additionalParams.get('namespace', 'default')
helm_chart_path = req.additionalParams['helm_chart_path']
chart_name = os.path.join(vnfd.csar_dir, helm_chart_path)
release_name = self._get_release_name(inst)
helm_value_names = req.additionalParams['helm_value_names']
vdus_num = self._get_vdus_num_from_grant_req_res_defs(
grant_req.addResources)
# Create parameters
parameters = req.additionalParams.get('helm_parameters', {})
for vdu_name, vdu_num in vdus_num.items():
replicaParam = helm_value_names.get(vdu_name, {}).get('replica')
if replicaParam:
parameters[replicaParam] = vdu_num
if helm_client.is_release_exist(release_name, namespace):
# helm upgrade. It is retry case.
revision = helm_client.upgrade(release_name, chart_name,
namespace, parameters)
else:
# helm install
revision = helm_client.install(release_name, chart_name,
namespace, parameters)
# get manifest from helm chart
k8s_resources = helm_client.get_manifest(release_name, namespace)
k8s_reses = self._create_reses_from_manifest(k8s_api_client, namespace,
k8s_resources)
vdu_reses = self._select_vdu_reses(vnfd, req.flavourId, k8s_reses)
# wait k8s resource create complete
self._wait_k8s_reses_ready(k8s_reses)
# make instantiated info
self._init_instantiated_vnf_info(inst, req.flavourId, vdu_reses,
namespace, helm_chart_path, helm_value_names, release_name,
revision)
self._update_vnfc_info(inst, k8s_api_client)
def instantiate_rollback(self, req, inst, grant_req, grant, vnfd):
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
with kubernetes_utils.AuthContextManager(vim_info) as acm:
k8s_api_client = acm.init_k8s_api_client()
helm_client = acm.init_helm_client()
namespace = req.additionalParams.get('namespace', 'default')
release_name = self._get_release_name(inst)
self._delete_resource(release_name, namespace,
k8s_api_client, helm_client)
def terminate(self, req, inst, grant_req, grant, vnfd):
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
with kubernetes_utils.AuthContextManager(vim_info) as acm:
k8s_api_client = acm.init_k8s_api_client()
helm_client = acm.init_helm_client()
namespace = inst.instantiatedVnfInfo.metadata['namespace']
release_name = inst.instantiatedVnfInfo.metadata['release_name']
self._delete_resource(release_name, namespace,
k8s_api_client, helm_client)
def _delete_resource(self, release_name, namespace, k8s_api_client,
helm_client):
if not helm_client.is_release_exist(release_name, namespace):
LOG.info(f'HELM release {release_name} is not exist.')
return
# get k8s manifest from helm chart
k8s_resources = helm_client.get_manifest(release_name, namespace)
k8s_reses = self._create_reses_from_manifest(k8s_api_client,
namespace, k8s_resources)
# uninstall release
helm_client.uninstall(release_name, namespace)
# wait k8s resource delete complete
self._wait_k8s_reses_deleted(k8s_reses)
def scale(self, req, inst, grant_req, grant, vnfd):
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
with kubernetes_utils.AuthContextManager(vim_info) as acm:
k8s_api_client = acm.init_k8s_api_client()
helm_client = acm.init_helm_client()
self._scale(req, inst, grant_req, grant, vnfd,
k8s_api_client, helm_client)
def _scale(self, req, inst, grant_req, grant, vnfd,
k8s_api_client, helm_client):
if req.type == 'SCALE_OUT':
vdus_num = self._get_vdus_num_from_grant_req_res_defs(
grant_req.addResources)
for vdu_name, vdu_num in vdus_num.items():
vdus_num[vdu_name] = (self._get_current_vdu_num(inst, vdu_name)
+ vdu_num)
elif req.type == 'SCALE_IN':
vdus_num = self._get_vdus_num_from_grant_req_res_defs(
grant_req.removeResources)
for vdu_name, vdu_num in vdus_num.items():
vdus_num[vdu_name] = (self._get_current_vdu_num(inst, vdu_name)
- vdu_num)
metadata = inst.instantiatedVnfInfo.metadata
namespace = metadata['namespace']
release_name = metadata['release_name']
chart_name = os.path.join(vnfd.csar_dir, metadata['helm_chart_path'])
helm_value_names = metadata['helm_value_names']
# Create scale parameters
parameters = {}
for vdu_name, vdu_num in vdus_num.items():
replicaParam = helm_value_names.get(vdu_name, {}).get('replica')
if not replicaParam:
raise sol_ex.HelmParameterNotFound(vdu_name=vdu_name)
parameters[replicaParam] = vdu_num
# update
revision = helm_client.upgrade(release_name, chart_name,
namespace, parameters)
vdu_reses = []
for vdu_name, vdu_num in vdus_num.items():
vdu_res = self._get_vdu_res(inst, k8s_api_client, vdu_name)
vdu_res.body['spec']['replicas'] = vdu_num
vdu_reses.append(vdu_res)
# wait k8s resource create complete
self._wait_k8s_reses_updated(vdu_reses, k8s_api_client,
namespace, old_pods_names=set())
# make instantiated info
self._update_vnfc_info(inst, k8s_api_client)
metadata['revision'] = revision
def scale_rollback(self, req, inst, grant_req, grant, vnfd):
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
with kubernetes_utils.AuthContextManager(vim_info) as acm:
k8s_api_client = acm.init_k8s_api_client()
helm_client = acm.init_helm_client()
self._scale_rollback(req, inst, grant_req, grant, vnfd,
k8s_api_client, helm_client)
def _scale_rollback(self, req, inst, grant_req, grant, vnfd,
k8s_api_client, helm_client):
vdus_num = self._get_vdus_num_from_grant_req_res_defs(
grant_req.addResources)
metadata = inst.instantiatedVnfInfo.metadata
namespace = metadata['namespace']
release_name = metadata['release_name']
revision = metadata['revision']
# rollback
helm_client.rollback(release_name, revision, namespace)
vdu_reses = [self._get_vdu_res(inst, k8s_api_client, vdu_name)
for vdu_name in vdus_num]
# wait k8s resource create complete
self._wait_k8s_reses_updated(vdu_reses, k8s_api_client,
namespace, old_pods_names=set())
def change_vnfpkg(self, req, inst, grant_req, grant, vnfd):
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
with kubernetes_utils.AuthContextManager(vim_info) as acm:
k8s_api_client = acm.init_k8s_api_client()
helm_client = acm.init_helm_client()
if req.additionalParams['upgrade_type'] == 'RollingUpdate':
self._change_vnfpkg_rolling_update(req, inst, grant_req,
grant, vnfd, k8s_api_client, helm_client)
else:
# not reach here
pass
def _change_vnfpkg_rolling_update(self, req, inst, grant_req, grant, vnfd,
k8s_api_client, helm_client):
metadata = inst.instantiatedVnfInfo.metadata
namespace = metadata['namespace']
release_name = metadata['release_name']
helm_chart_path = req.additionalParams.get('helm_chart_path',
metadata['helm_chart_path'])
chart_name = os.path.join(vnfd.csar_dir, helm_chart_path)
vdus_num = self._get_vdus_num_from_grant_req_res_defs(
grant_req.addResources)
# update
revision = helm_client.upgrade(release_name, chart_name,
namespace, {})
# get manifest from helm chart
k8s_resources = helm_client.get_manifest(release_name, namespace)
k8s_reses = self._create_reses_from_manifest(
k8s_api_client, namespace, k8s_resources)
vdu_reses = self._select_vdu_reses(
vnfd, inst.instantiatedVnfInfo.flavourId, k8s_reses)
# wait k8s resource update complete
target_reses = {vdu: res for vdu, res in vdu_reses.items()
if vdu in vdus_num.keys()}
old_pods_names = {vnfc.computeResource.resourceId
for vnfc in inst.instantiatedVnfInfo.vnfcResourceInfo
if vnfc.vduId in vdus_num.keys()}
self._wait_k8s_reses_updated(
list(target_reses.values()), k8s_api_client, namespace,
old_pods_names)
# make instantiated info
self._update_vnfc_info(inst, k8s_api_client)
metadata['vdu_reses'].update(
{vdu: res.body for vdu, res in target_reses.items()})
metadata['helm_chart_path'] = helm_chart_path
metadata['revision'] = revision
inst.vnfdId = req.vnfdId
def change_vnfpkg_rollback(self, req, inst, grant_req, grant, vnfd):
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
with kubernetes_utils.AuthContextManager(vim_info) as acm:
k8s_api_client = acm.init_k8s_api_client()
helm_client = acm.init_helm_client()
if req.additionalParams['upgrade_type'] == 'RollingUpdate':
self._change_vnfpkg_rolling_update_rollback(
req, inst, grant_req, grant, vnfd, k8s_api_client,
helm_client)
else:
# not reach here
pass
def _change_vnfpkg_rolling_update_rollback(self, req, inst, grant_req,
grant, vnfd, k8s_api_client, helm_client):
metadata = inst.instantiatedVnfInfo.metadata
namespace = metadata['namespace']
release_name = metadata['release_name']
revision = metadata['revision']
original_pods = {vnfc.computeResource.resourceId for vnfc in
inst.instantiatedVnfInfo.vnfcResourceInfo}
all_pods = kubernetes_utils.list_namespaced_pods(
k8s_api_client, namespace)
current_pods = {pod.metadata.name for pod in all_pods}
old_pods_names = current_pods - original_pods
# rollback
helm_client.rollback(release_name, revision, namespace)
target_vdus = {res_def.resourceTemplateId
for res_def in grant_req.addResources
if res_def.type == 'COMPUTE'}
target_reses = [self._get_vdu_res(inst, k8s_api_client, vdu_name)
for vdu_name in target_vdus]
# wait k8s resource update complete
self._wait_k8s_reses_updated(
target_reses, k8s_api_client, namespace, old_pods_names)
# make instantiated info
self._update_vnfc_info(inst, k8s_api_client)
def _create_reses_from_manifest(self, k8s_api_client, namespace,
k8s_resources):
for k8s_res in k8s_resources:
if k8s_res['kind'] in kubernetes_utils.SUPPORTED_NAMESPACE_KINDS:
k8s_res.setdefault('metadata', {})
k8s_res['metadata'].setdefault('namespace', namespace)
k8s_reses = []
for k8s_res in k8s_resources:
try:
cls = getattr(kubernetes_resource, k8s_res['kind'])
k8s_reses.append(cls(k8s_api_client, k8s_res))
except AttributeError:
LOG.info("Not support kind %s. ignored.", k8s_res['kind'])
return k8s_reses
def _get_release_name(self, inst):
return f'vnf-{inst.id}'
def _init_instantiated_vnf_info(self, inst, flavour_id, vdu_reses,
namespace, helm_chart_path, helm_value_names, release_name,
revision):
super()._init_instantiated_vnf_info(inst, flavour_id, vdu_reses,
namespace)
inst.instantiatedVnfInfo.metadata.update(
{
'helm_chart_path': helm_chart_path,
'helm_value_names': helm_value_names,
'release_name': release_name,
'revision': revision
}
)
def _is_match_pod_naming_rule(self, rsc_kind, rsc_name, pod_name):
return rsc_name in pod_name

View File

@ -0,0 +1,100 @@
# Copyright (C) 2022 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import subprocess
import yaml
from oslo_log import log as logging
from tacker.sol_refactored.common import exceptions as sol_ex
LOG = logging.getLogger(__name__)
HELM_INSTALL_TIMEOUT = "120s"
class HelmClient():
def __init__(self, helm_auth_params):
self.helm_auth_params = helm_auth_params
def _execute_command(self, helm_command, raise_ex=True):
helm_command.extend(self.helm_auth_params)
result = subprocess.run(helm_command,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True)
if raise_ex and result.returncode != 0:
raise sol_ex.HelmOperationFailed(sol_detail=str(result))
return result
def _get_revision(self, result):
for line in result.stdout.split('\n'):
if 'REVISION' in line:
revision = line.split()[-1]
return revision
def is_release_exist(self, release_name, namespace):
# execute helm status command
helm_command = ["helm", "status", release_name, "--namespace",
namespace]
result = self._execute_command(helm_command, False)
return result.returncode == 0
def install(self, release_name, chart_name, namespace, parameters):
# execute helm install command
helm_command = ["helm", "install", release_name, chart_name,
"--namespace", namespace, "--create-namespace"]
if parameters:
set_params = ','.join([f"{key}={value}"
for key, value in parameters.items()])
helm_command.extend(["--set", set_params])
helm_command.extend(["--timeout", HELM_INSTALL_TIMEOUT])
result = self._execute_command(helm_command)
return self._get_revision(result)
def upgrade(self, release_name, chart_name, namespace, parameters):
# execute helm install command
helm_command = ["helm", "upgrade", release_name, chart_name,
"--namespace", namespace, "--reuse-values"]
if parameters:
set_params = ','.join([f"{key}={value}"
for key, value in parameters.items()])
helm_command.extend(["--set", set_params])
helm_command.extend(["--timeout", HELM_INSTALL_TIMEOUT])
result = self._execute_command(helm_command)
return self._get_revision(result)
def uninstall(self, release_name, namespace):
# execute helm uninstall command
helm_command = ["helm", "uninstall", release_name, "--namespace",
namespace, "--timeout", HELM_INSTALL_TIMEOUT]
self._execute_command(helm_command)
def get_manifest(self, release_name, namespace):
# execute helm get manifest command
helm_command = ["helm", "get", "manifest", release_name,
"--namespace", namespace]
result = self._execute_command(helm_command)
return list(yaml.safe_load_all(result.stdout))
def rollback(self, release_name, revision_no, namespace):
# execute helm get manifest command
helm_command = ["helm", "rollback", release_name, revision_no,
"--namespace", namespace]
self._execute_command(helm_command)

View File

@ -13,16 +13,16 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import re
from kubernetes import client from kubernetes import client
from oslo_log import log as logging from oslo_log import log as logging
from oslo_service import loopingcall
from tacker.sol_refactored.common import config from tacker.sol_refactored.common import config
from tacker.sol_refactored.common import exceptions as sol_ex from tacker.sol_refactored.common import exceptions as sol_ex
from tacker.sol_refactored.common import vnf_instance_utils as inst_utils from tacker.sol_refactored.common import vnf_instance_utils as inst_utils
from tacker.sol_refactored.infra_drivers.kubernetes import kubernetes_resource from tacker.sol_refactored.infra_drivers.kubernetes import kubernetes_common
from tacker.sol_refactored.infra_drivers.kubernetes import kubernetes_utils from tacker.sol_refactored.infra_drivers.kubernetes import kubernetes_utils
from tacker.sol_refactored import objects
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -30,11 +30,10 @@ LOG = logging.getLogger(__name__)
CONF = config.CONF CONF = config.CONF
CHECK_INTERVAL = 10 CHECK_INTERVAL = 10
TARGET_KIND = {"Pod", "Deployment", "DaemonSet", "StatefulSet", "ReplicaSet"}
SCALABLE_KIND = {"Deployment", "ReplicaSet", "StatefulSet"} SCALABLE_KIND = {"Deployment", "ReplicaSet", "StatefulSet"}
class Kubernetes(object): class Kubernetes(kubernetes_common.KubernetesCommon):
def __init__(self): def __init__(self):
pass pass
@ -75,8 +74,8 @@ class Kubernetes(object):
self._wait_k8s_reses_ready(k8s_reses) self._wait_k8s_reses_ready(k8s_reses)
# make instantiated info # make instantiated info
self._init_instantiated_vnf_info( self._init_instantiated_vnf_info(inst, req.flavourId, vdu_reses,
inst, req.flavourId, target_k8s_files, vdu_reses, namespace) namespace, target_k8s_files)
self._update_vnfc_info(inst, k8s_api_client) self._update_vnfc_info(inst, k8s_api_client)
def _setup_k8s_reses(self, vnfd, target_k8s_files, k8s_api_client, def _setup_k8s_reses(self, vnfd, target_k8s_files, k8s_api_client,
@ -207,8 +206,8 @@ class Kubernetes(object):
vnfd, inst.instantiatedVnfInfo.flavourId, k8s_reses) vnfd, inst.instantiatedVnfInfo.flavourId, k8s_reses)
self._init_instantiated_vnf_info( self._init_instantiated_vnf_info(
inst, inst.instantiatedVnfInfo.flavourId, target_k8s_files, inst, inst.instantiatedVnfInfo.flavourId, vdu_reses,
vdu_reses, namespace) namespace, target_k8s_files)
self._change_vnfpkg_rolling_update( self._change_vnfpkg_rolling_update(
inst, grant_req, grant, vnfd, k8s_api_client, namespace, inst, grant_req, grant, vnfd, k8s_api_client, namespace,
@ -245,53 +244,6 @@ class Kubernetes(object):
# not reach here # not reach here
pass pass
def heal(self, req, inst, grant_req, grant, vnfd):
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
with kubernetes_utils.AuthContextManager(vim_info) as acm:
k8s_api_client = acm.init_k8s_api_client()
self._heal(req, inst, grant_req, grant, vnfd, k8s_api_client)
def _heal(self, req, inst, grant_req, grant, vnfd, k8s_api_client):
namespace = inst.instantiatedVnfInfo.metadata['namespace']
# get heal Pod name
vnfc_res_ids = [res_def.resource.resourceId
for res_def in grant_req.removeResources
if res_def.type == 'COMPUTE']
target_vnfcs = [vnfc
for vnfc in inst.instantiatedVnfInfo.vnfcResourceInfo
if vnfc.computeResource.resourceId in vnfc_res_ids]
# check running Pod
all_pods = kubernetes_utils.list_namespaced_pods(
k8s_api_client, namespace)
current_pods_name = [pod.metadata.name for pod in all_pods]
old_pods_names = set()
vdu_reses = {}
for vnfc in target_vnfcs:
if vnfc.id not in current_pods_name:
# may happen when retry or auto healing
msg = f'heal target pod {vnfc.id} is not in the running pod.'
LOG.error(msg)
continue
if vnfc.vduId in vdu_reses:
res = vdu_reses[vnfc.vduId]
else:
res = self._get_vdu_res(inst, k8s_api_client, vnfc.vduId)
vdu_reses[vnfc.vduId] = res
res.delete_pod(vnfc.id)
old_pods_names.add(vnfc.id)
# wait k8s resource create complete
if old_pods_names:
self._wait_k8s_reses_updated(list(vdu_reses.values()),
k8s_api_client, namespace, old_pods_names)
# make instantiated info
self._update_vnfc_info(inst, k8s_api_client)
def _scale_k8s_resource(self, inst, vdus_num, k8s_api_client): def _scale_k8s_resource(self, inst, vdus_num, k8s_api_client):
namespace = inst.instantiatedVnfInfo.metadata['namespace'] namespace = inst.instantiatedVnfInfo.metadata['namespace']
@ -344,131 +296,36 @@ class Kubernetes(object):
k8s_api_client = acm.init_k8s_api_client() k8s_api_client = acm.init_k8s_api_client()
self._scale_k8s_resource(inst, vdus_num, k8s_api_client) self._scale_k8s_resource(inst, vdus_num, k8s_api_client)
def _get_vdus_num_from_grant_req_res_defs(self, res_defs): def _init_instantiated_vnf_info(self, inst, flavour_id, vdu_reses,
vdus_num = {} namespace, target_k8s_files):
for res_def in res_defs: super()._init_instantiated_vnf_info(inst, flavour_id,
if res_def.type == 'COMPUTE': vdu_reses, namespace)
vdus_num.setdefault(res_def.resourceTemplateId, 0) inst.instantiatedVnfInfo.metadata.update(
vdus_num[res_def.resourceTemplateId] += 1 {'lcm-kubernetes-def-files': target_k8s_files}
return vdus_num
def _get_current_vdu_num(self, inst, vdu):
num = 0
for vnfc in inst.instantiatedVnfInfo.vnfcResourceInfo:
if vnfc.vduId == vdu:
num += 1
return num
def _select_vdu_reses(self, vnfd, flavour_id, k8s_reses):
vdu_nodes = vnfd.get_vdu_nodes(flavour_id)
vdu_ids = {value.get('properties').get('name'): key
for key, value in vdu_nodes.items()}
return {vdu_ids[res.name]: res
for res in k8s_reses
if res.kind in TARGET_KIND and res.name in vdu_ids}
def _init_instantiated_vnf_info(self, inst, flavour_id, def_files,
vdu_reses, namespace):
metadata = {
'namespace': namespace,
'lcm-kubernetes-def-files': def_files,
'vdu_reses': {vdu_name: vdu_res.body
for vdu_name, vdu_res in vdu_reses.items()}
}
inst.instantiatedVnfInfo = objects.VnfInstanceV2_InstantiatedVnfInfo(
flavourId=flavour_id,
vnfState='STARTED',
metadata=metadata
) )
def _get_vdu_res(self, inst, k8s_api_client, vdu): def _is_match_pod_naming_rule(self, rsc_kind, rsc_name, pod_name):
# must be found match_result = None
res = inst.instantiatedVnfInfo.metadata['vdu_reses'][vdu] if rsc_kind == 'Pod':
cls = getattr(kubernetes_resource, res['kind']) # Expected example: name
return cls(k8s_api_client, res) if rsc_name == pod_name:
return True
elif rsc_kind == 'Deployment':
# Expected example: name-012789abef-019az
# NOTE(horie): The naming rule of Pod in deployment is
# "(deployment name)-(pod template hash)-(5 charactors)".
# The "pod template hash" string is generated from 32 bit hash.
# This may be from 1 to 10 caracters but not sure the lower limit
# from the source code of Kubernetes.
match_result = re.match(
rsc_name + '-([0-9a-f]{1,10})-([0-9a-z]{5})+$', pod_name)
elif rsc_kind in ('ReplicaSet', 'DaemonSet'):
# Expected example: name-019az
match_result = re.match(rsc_name + '-([0-9a-z]{5})+$', pod_name)
elif rsc_kind == 'StatefulSet':
# Expected example: name-0
match_result = re.match(rsc_name + '-[0-9]+$', pod_name)
if match_result:
return True
def _update_vnfc_info(self, inst, k8s_api_client): return False
all_pods = kubernetes_utils.list_namespaced_pods(
k8s_api_client, inst.instantiatedVnfInfo.metadata['namespace'])
vnfc_resources = []
for pod in all_pods:
pod_name = pod.metadata.name
for vdu_name, vdu_res in (
inst.instantiatedVnfInfo.metadata['vdu_reses'].items()):
if kubernetes_utils.is_match_pod_naming_rule(
vdu_res['kind'], vdu_res['metadata']['name'],
pod_name):
vnfc_resources.append(objects.VnfcResourceInfoV2(
id=pod_name,
vduId=vdu_name,
computeResource=objects.ResourceHandle(
resourceId=pod_name,
vimLevelResourceType=vdu_res['kind']
),
# lcmocc_utils.update_lcmocc assumes its existence
metadata={}
))
inst.instantiatedVnfInfo.vnfcResourceInfo = vnfc_resources
# make vnfcInfo
# NOTE: vnfcInfo only exists in SOL002
inst.instantiatedVnfInfo.vnfcInfo = [
objects.VnfcInfoV2(
id=f'{vnfc_res_info.vduId}-{vnfc_res_info.id}',
vduId=vnfc_res_info.vduId,
vnfcResourceInfoId=vnfc_res_info.id,
vnfcState='STARTED'
)
for vnfc_res_info in vnfc_resources
]
def _check_status(self, check_func, *args):
timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(
check_func, *args)
try:
timer.start(interval=CHECK_INTERVAL,
timeout=CONF.v2_vnfm.kubernetes_vim_rsc_wait_timeout).wait()
except loopingcall.LoopingCallTimeOut:
raise sol_ex.K8sOperaitionTimeout()
def _wait_k8s_reses_ready(self, k8s_reses):
def _check_ready(check_reses):
ok_reses = {res for res in check_reses if res.is_ready()}
check_reses -= ok_reses
if not check_reses:
raise loopingcall.LoopingCallDone()
check_reses = set(k8s_reses)
self._check_status(_check_ready, check_reses)
def _wait_k8s_reses_deleted(self, k8s_reses):
def _check_deleted(check_reses):
ok_reses = {res for res in check_reses if not res.is_exists()}
check_reses -= ok_reses
if not check_reses:
raise loopingcall.LoopingCallDone()
check_reses = set(k8s_reses)
self._check_status(_check_deleted, check_reses)
def _wait_k8s_reses_updated(self, k8s_reses, k8s_api_client, namespace,
old_pods_names):
def _check_update(check_reses, k8s_api_client, namespace,
old_pods_names):
ok_reses = set()
all_pods = kubernetes_utils.list_namespaced_pods(
k8s_api_client, namespace)
for res in check_reses:
pods_info = [pod for pod in all_pods
if kubernetes_utils.is_match_pod_naming_rule(
res.kind, res.name, pod.metadata.name)]
if res.is_update(pods_info, old_pods_names):
ok_reses.add(res)
check_reses -= ok_reses
if not check_reses:
raise loopingcall.LoopingCallDone()
check_reses = set(k8s_reses)
self._check_status(_check_update, check_reses, k8s_api_client,
namespace, old_pods_names)

View File

@ -0,0 +1,216 @@
# Copyright (C) 2022 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from oslo_service import loopingcall
from tacker.sol_refactored.common import config
from tacker.sol_refactored.common import exceptions as sol_ex
from tacker.sol_refactored.common import vnf_instance_utils as inst_utils
from tacker.sol_refactored.infra_drivers.kubernetes import kubernetes_resource
from tacker.sol_refactored.infra_drivers.kubernetes import kubernetes_utils
from tacker.sol_refactored import objects
LOG = logging.getLogger(__name__)
CONF = config.CONF
CHECK_INTERVAL = 10
TARGET_KIND = {"Pod", "Deployment", "DaemonSet", "StatefulSet", "ReplicaSet"}
class KubernetesCommon(object):
def __init__(self):
pass
def heal(self, req, inst, grant_req, grant, vnfd):
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
with kubernetes_utils.AuthContextManager(vim_info) as acm:
k8s_api_client = acm.init_k8s_api_client()
self._heal(req, inst, grant_req, grant, vnfd, k8s_api_client)
def _heal(self, req, inst, grant_req, grant, vnfd, k8s_api_client):
namespace = inst.instantiatedVnfInfo.metadata['namespace']
# get heal Pod name
vnfc_res_ids = [res_def.resource.resourceId
for res_def in grant_req.removeResources
if res_def.type == 'COMPUTE']
target_vnfcs = [vnfc
for vnfc in inst.instantiatedVnfInfo.vnfcResourceInfo
if vnfc.computeResource.resourceId in vnfc_res_ids]
# check running Pod
all_pods = kubernetes_utils.list_namespaced_pods(
k8s_api_client, namespace)
current_pods_name = [pod.metadata.name for pod in all_pods]
old_pods_names = set()
vdu_reses = {}
for vnfc in target_vnfcs:
if vnfc.id not in current_pods_name:
# may happen when retry or auto healing
msg = f'heal target pod {vnfc.id} is not in the running pod.'
LOG.error(msg)
continue
if vnfc.vduId in vdu_reses:
res = vdu_reses[vnfc.vduId]
else:
res = self._get_vdu_res(inst, k8s_api_client, vnfc.vduId)
vdu_reses[vnfc.vduId] = res
res.delete_pod(vnfc.id)
old_pods_names.add(vnfc.id)
# wait k8s resource create complete
if old_pods_names:
self._wait_k8s_reses_updated(list(vdu_reses.values()),
k8s_api_client, namespace, old_pods_names)
# make instantiated info
self._update_vnfc_info(inst, k8s_api_client)
def _get_vdus_num_from_grant_req_res_defs(self, res_defs):
vdus_num = {}
for res_def in res_defs:
if res_def.type == 'COMPUTE':
vdus_num.setdefault(res_def.resourceTemplateId, 0)
vdus_num[res_def.resourceTemplateId] += 1
return vdus_num
def _get_current_vdu_num(self, inst, vdu):
num = 0
for vnfc in inst.instantiatedVnfInfo.vnfcResourceInfo:
if vnfc.vduId == vdu:
num += 1
return num
def _select_vdu_reses(self, vnfd, flavour_id, k8s_reses):
vdu_nodes = vnfd.get_vdu_nodes(flavour_id)
vdu_ids = {value.get('properties').get('name'): key
for key, value in vdu_nodes.items()}
# res.name is properties.name itself or
# {properties.name}-{some string}. later is helm case.
return {vdu_ids[res.name.split("-")[0]]: res
for res in k8s_reses
if (res.kind in TARGET_KIND
and res.name.split("-")[0] in vdu_ids)}
def _init_instantiated_vnf_info(self, inst, flavour_id,
vdu_reses, namespace):
metadata = {
'namespace': namespace,
'vdu_reses': {vdu_name: vdu_res.body
for vdu_name, vdu_res in vdu_reses.items()}
}
inst.instantiatedVnfInfo = objects.VnfInstanceV2_InstantiatedVnfInfo(
flavourId=flavour_id,
vnfState='STARTED',
metadata=metadata
)
def _get_vdu_res(self, inst, k8s_api_client, vdu):
# must be found
res = inst.instantiatedVnfInfo.metadata['vdu_reses'][vdu]
cls = getattr(kubernetes_resource, res['kind'])
return cls(k8s_api_client, res)
def _update_vnfc_info(self, inst, k8s_api_client):
all_pods = kubernetes_utils.list_namespaced_pods(
k8s_api_client, inst.instantiatedVnfInfo.metadata['namespace'])
vnfc_resources = []
for pod in all_pods:
pod_name = pod.metadata.name
for vdu_name, vdu_res in (
inst.instantiatedVnfInfo.metadata['vdu_reses'].items()):
if self._is_match_pod_naming_rule(
vdu_res['kind'], vdu_res['metadata']['name'],
pod_name):
vnfc_resources.append(objects.VnfcResourceInfoV2(
id=pod_name,
vduId=vdu_name,
computeResource=objects.ResourceHandle(
resourceId=pod_name,
vimLevelResourceType=vdu_res['kind']
),
# lcmocc_utils.update_lcmocc assumes its existence
metadata={}
))
inst.instantiatedVnfInfo.vnfcResourceInfo = vnfc_resources
# make vnfcInfo
# NOTE: vnfcInfo only exists in SOL002
inst.instantiatedVnfInfo.vnfcInfo = [
objects.VnfcInfoV2(
id=f'{vnfc_res_info.vduId}-{vnfc_res_info.id}',
vduId=vnfc_res_info.vduId,
vnfcResourceInfoId=vnfc_res_info.id,
vnfcState='STARTED'
)
for vnfc_res_info in vnfc_resources
]
def _check_status(self, check_func, *args):
timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(
check_func, *args)
try:
timer.start(interval=CHECK_INTERVAL,
timeout=CONF.v2_vnfm.kubernetes_vim_rsc_wait_timeout).wait()
except loopingcall.LoopingCallTimeOut:
raise sol_ex.K8sOperaitionTimeout()
def _wait_k8s_reses_ready(self, k8s_reses):
def _check_ready(check_reses):
ok_reses = {res for res in check_reses if res.is_ready()}
check_reses -= ok_reses
if not check_reses:
raise loopingcall.LoopingCallDone()
check_reses = set(k8s_reses)
self._check_status(_check_ready, check_reses)
def _wait_k8s_reses_deleted(self, k8s_reses):
def _check_deleted(check_reses):
ok_reses = {res for res in check_reses if not res.is_exists()}
check_reses -= ok_reses
if not check_reses:
raise loopingcall.LoopingCallDone()
check_reses = set(k8s_reses)
self._check_status(_check_deleted, check_reses)
def _wait_k8s_reses_updated(self, k8s_reses, k8s_api_client, namespace,
old_pods_names):
def _check_updated(check_reses, k8s_api_client, namespace,
old_pods_names):
ok_reses = set()
all_pods = kubernetes_utils.list_namespaced_pods(
k8s_api_client, namespace)
for res in check_reses:
pods_info = [pod for pod in all_pods
if self._is_match_pod_naming_rule(
res.kind, res.name, pod.metadata.name)]
if res.is_update(pods_info, old_pods_names):
ok_reses.add(res)
check_reses -= ok_reses
if not check_reses:
raise loopingcall.LoopingCallDone()
check_reses = set(k8s_reses)
self._check_status(_check_updated, check_reses, k8s_api_client,
namespace, old_pods_names)

View File

@ -25,62 +25,36 @@ import yaml
from tacker.sol_refactored.common import exceptions as sol_ex from tacker.sol_refactored.common import exceptions as sol_ex
from tacker.sol_refactored.common import oidc_utils from tacker.sol_refactored.common import oidc_utils
from tacker.sol_refactored.infra_drivers.kubernetes import helm_utils
from tacker.sol_refactored.infra_drivers.kubernetes import kubernetes_resource from tacker.sol_refactored.infra_drivers.kubernetes import kubernetes_resource
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
SUPPORTED_NAMESPACE_KINDS = [ SUPPORTED_NAMESPACE_KINDS = {
"Pod",
"Binding", "Binding",
"ConfigMap", "ConfigMap",
"LimitRange",
"PersistentVolumeClaim",
"PodTemplate",
"ResourceQuota",
"Secret",
"ServiceAccount",
"Service",
"ControllerRevision", "ControllerRevision",
"DaemonSet", "DaemonSet",
"Deployment", "Deployment",
"ReplicaSet",
"StatefulSet",
"LocalSubjectAccessReview",
"HorizontalPodAutoscaler", "HorizontalPodAutoscaler",
"Job", "Job",
"Lease", "Lease",
"LimitRange",
"LocalSubjectAccessReview",
"NetworkPolicy", "NetworkPolicy",
"RoleBinding", "PersistentVolumeClaim",
"Pod",
"PodTemplate",
"ReplicaSet",
"ResourceQuota",
"Role" "Role"
] "RoleBinding",
"Secret",
"Service",
def is_match_pod_naming_rule(rsc_kind, rsc_name, pod_name): "ServiceAccount",
match_result = None "StatefulSet",
if rsc_kind == 'Pod': }
# Expected example: name
if rsc_name == pod_name:
return True
elif rsc_kind == 'Deployment':
# Expected example: name-012789abef-019az
# NOTE(horie): The naming rule of Pod in deployment is
# "(deployment name)-(pod template hash)-(5 charactors)".
# The "pod template hash" string is generated from 32 bit hash.
# This may be from 1 to 10 caracters but not sure the lower limit
# from the source code of Kubernetes.
match_result = re.match(
rsc_name + '-([0-9a-f]{1,10})-([0-9a-z]{5})+$', pod_name)
elif rsc_kind in ('ReplicaSet', 'DaemonSet'):
# Expected example: name-019az
match_result = re.match(rsc_name + '-([0-9a-z]{5})+$', pod_name)
elif rsc_kind == 'StatefulSet':
# Expected example: name-0
match_result = re.match(rsc_name + '-[0-9]+$', pod_name)
if match_result:
return True
return False
def get_k8s_reses_from_json_files(target_k8s_files, vnfd, k8s_api_client, def get_k8s_reses_from_json_files(target_k8s_files, vnfd, k8s_api_client,
@ -122,8 +96,11 @@ def get_k8s_reses_from_json_files(target_k8s_files, vnfd, k8s_api_client,
k8s_reses = [] k8s_reses = []
for k8s_res in k8s_resources: for k8s_res in k8s_resources:
cls = getattr(kubernetes_resource, k8s_res['kind']) try:
k8s_reses.append(cls(k8s_api_client, k8s_res)) cls = getattr(kubernetes_resource, k8s_res['kind'])
k8s_reses.append(cls(k8s_api_client, k8s_res))
except AttributeError:
LOG.info("Not support kind %s. ignored.", k8s_res['kind'])
return k8s_reses, namespace return k8s_reses, namespace
@ -146,6 +123,8 @@ class AuthContextManager:
os.remove(self.ca_cert_file) os.remove(self.ca_cert_file)
def _create_ca_cert_file(self, ca_cert_str): def _create_ca_cert_file(self, ca_cert_str):
if self.ca_cert_file:
return
file_descriptor, self.ca_cert_file = tempfile.mkstemp() file_descriptor, self.ca_cert_file = tempfile.mkstemp()
ca_cert = re.sub(r'\s', '\n', ca_cert_str) ca_cert = re.sub(r'\s', '\n', ca_cert_str)
ca_cert = re.sub(r'BEGIN\nCERT', r'BEGIN CERT', ca_cert) ca_cert = re.sub(r'BEGIN\nCERT', r'BEGIN CERT', ca_cert)
@ -194,3 +173,17 @@ class AuthContextManager:
k8s_config.verify_ssl = False k8s_config.verify_ssl = False
return client.api_client.ApiClient(configuration=k8s_config) return client.api_client.ApiClient(configuration=k8s_config)
def _get_helm_auth_params(self):
kube_apiserver = self.vim_info.interfaceInfo['endpoint']
kube_token = self.vim_info.accessInfo['bearer_token']
self._create_ca_cert_file(
self.vim_info.interfaceInfo['ssl_ca_cert'])
helm_auth_params = ["--kube-apiserver", kube_apiserver,
"--kube-ca-file", self.ca_cert_file,
"--kube-token", kube_token]
return helm_auth_params
def init_helm_client(self):
return helm_utils.HelmClient(self._get_helm_auth_params())

View File

@ -63,7 +63,6 @@ def max_sample_instantiate(auth_url, bearer_token, ssl_ca_cert=None):
"interfaceInfo": {"endpoint": auth_url}, "interfaceInfo": {"endpoint": auth_url},
"accessInfo": { "accessInfo": {
"bearer_token": bearer_token, "bearer_token": bearer_token,
"region": "RegionOne",
}, },
"extra": {"dummy-key": "dummy-val"} "extra": {"dummy-key": "dummy-val"}
} }
@ -73,7 +72,6 @@ def max_sample_instantiate(auth_url, bearer_token, ssl_ca_cert=None):
"interfaceInfo": {"endpoint": auth_url}, "interfaceInfo": {"endpoint": auth_url},
"accessInfo": { "accessInfo": {
"username": "dummy_user", "username": "dummy_user",
"region": "RegionOne",
"password": "dummy_password", "password": "dummy_password",
}, },
"extra": {"dummy-key": "dummy-val"} "extra": {"dummy-key": "dummy-val"}
@ -217,7 +215,6 @@ def error_handling_instantiate(auth_url, bearer_token):
"interfaceInfo": {"endpoint": auth_url}, "interfaceInfo": {"endpoint": auth_url},
"accessInfo": { "accessInfo": {
"bearer_token": bearer_token, "bearer_token": bearer_token,
"region": "RegionOne",
}, },
"extra": {"dummy-key": "dummy-val"} "extra": {"dummy-key": "dummy-val"}
} }
@ -257,7 +254,6 @@ def change_vnfpkg_instantiate(auth_url, bearer_token):
"interfaceInfo": {"endpoint": auth_url}, "interfaceInfo": {"endpoint": auth_url},
"accessInfo": { "accessInfo": {
"bearer_token": bearer_token, "bearer_token": bearer_token,
"region": "RegionOne",
}, },
"extra": {"dummy-key": "dummy-val"} "extra": {"dummy-key": "dummy-val"}
} }
@ -311,3 +307,103 @@ def change_vnfpkg_terminate():
return { return {
"terminationType": "FORCEFUL" "terminationType": "FORCEFUL"
} }
def test_helm_instantiate_create(vnfd_id):
return {
"vnfdId": vnfd_id,
"vnfInstanceName": "test_helm_instantiate",
"vnfInstanceDescription": "test_helm_instantiate",
"metadata": {"dummy-key": "dummy-val"}
}
def helm_instantiate(auth_url, bearer_token, ssl_ca_cert):
vim_id_1 = uuidutils.generate_uuid()
vim_1 = {
"vimId": vim_id_1,
"vimType": "ETSINFV.HELM.V_3",
"interfaceInfo": {
"endpoint": auth_url,
"ssl_ca_cert": ssl_ca_cert
},
"accessInfo": {
"bearer_token": bearer_token
}
}
return {
"flavourId": "simple",
"vimConnectionInfo": {
"vim1": vim_1,
},
"additionalParams": {
"helm_chart_path": "Files/kubernetes/test-chart-0.1.0.tgz",
"helm_parameters": {
"service.port": 8081
},
"helm_value_names": {
"VDU1": {
"replica": "replicaCountVdu1"
},
"VDU2": {
"replica": "replicaCountVdu2"
}
},
"namespace": "default"
}
}
def helm_terminate():
return {
"terminationType": "FORCEFUL"
}
def helm_scale_out():
return {
"type": "SCALE_OUT",
"aspectId": "vdu2_aspect",
"numberOfSteps": 2
}
def helm_scale_in():
return {
"type": "SCALE_IN",
"aspectId": "vdu2_aspect",
"numberOfSteps": 1
}
def helm_heal(vnfc_ids):
return {
"vnfcInstanceId": vnfc_ids
}
def helm_change_vnfpkg(vnfd_id):
return {
"vnfdId": vnfd_id,
"additionalParams": {
"upgrade_type": "RollingUpdate",
"helm_chart_path": "Files/kubernetes/test-chart-0.1.1.tgz",
"vdu_params": [{
"vdu_id": "VDU2"
}]
}
}
def helm_error_handling_change_vnfpkg(vnfd_id):
return {
"vnfdId": vnfd_id,
"additionalParams": {
"upgrade_type": "RollingUpdate",
"helm_chart_path": "Files/kubernetes/test-chart-error-"
"handling.tgz",
"vdu_params": [{
"vdu_id": "VDU2"
}]
}
}

View File

@ -0,0 +1,178 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: Simple deployment flavour for Sample VNF
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
- sample_cnf_types.yaml
topology_template:
inputs:
descriptor_id:
type: string
descriptor_version:
type: string
provider:
type: string
product_name:
type: string
software_version:
type: string
vnfm_info:
type: list
entry_schema:
type: string
flavour_id:
type: string
flavour_description:
type: string
substitution_mappings:
node_type: company.provider.VNF
properties:
flavour_id: simple
requirements:
virtual_link_external: []
node_templates:
VNF:
type: company.provider.VNF
properties:
flavour_description: A simple flavour
interfaces:
Vnflcm:
instantiate_start:
implementation: sample-script
instantiate_end:
implementation: sample-script
terminate_start:
implementation: sample-script
terminate_end:
implementation: sample-script
scale_start:
implementation: sample-script
scale_end:
implementation: sample-script
heal_start:
implementation: sample-script
heal_end:
implementation: sample-script
modify_information_start:
implementation: sample-script
modify_information_end:
implementation: sample-script
artifacts:
sample-script:
description: Sample script
type: tosca.artifacts.Implementation.Python
file: ../Scripts/sample_script.py
VDU1:
type: tosca.nodes.nfv.Vdu.Compute
properties:
name: vdu1
description: VDU1 compute node
vdu_profile:
min_number_of_instances: 1
max_number_of_instances: 3
VDU2:
type: tosca.nodes.nfv.Vdu.Compute
properties:
name: vdu2
description: VDU2 compute node
vdu_profile:
min_number_of_instances: 1
max_number_of_instances: 3
policies:
- scaling_aspects:
type: tosca.policies.nfv.ScalingAspects
properties:
aspects:
vdu1_aspect:
name: vdu1_aspect
description: vdu1 scaling aspect
max_scale_level: 2
step_deltas:
- delta_1
vdu2_aspect:
name: vdu2_aspect
description: vdu2 scaling aspect
max_scale_level: 2
step_deltas:
- delta_1
- VDU1_initial_delta:
type: tosca.policies.nfv.VduInitialDelta
properties:
initial_delta:
number_of_instances: 1
targets: [ VDU1 ]
- VDU1_scaling_aspect_deltas:
type: tosca.policies.nfv.VduScalingAspectDeltas
properties:
aspect: vdu1_aspect
deltas:
delta_1:
number_of_instances: 1
targets: [ VDU1 ]
- VDU2_initial_delta:
type: tosca.policies.nfv.VduInitialDelta
properties:
initial_delta:
number_of_instances: 1
targets: [ VDU2 ]
- VDU2_scaling_aspect_deltas:
type: tosca.policies.nfv.VduScalingAspectDeltas
properties:
aspect: vdu2_aspect
deltas:
delta_1:
number_of_instances: 1
targets: [ VDU2 ]
- instantiation_levels:
type: tosca.policies.nfv.InstantiationLevels
properties:
levels:
instantiation_level_1:
description: Smallest size
scale_info:
vdu1_aspect:
scale_level: 0
vdu2_aspect:
scale_level: 0
instantiation_level_2:
description: Largest size
scale_info:
vdu1_aspect:
scale_level: 2
vdu2_aspect:
scale_level: 2
default_level: instantiation_level_1
- VDU1_instantiation_levels:
type: tosca.policies.nfv.VduInstantiationLevels
properties:
levels:
instantiation_level_1:
number_of_instances: 1
instantiation_level_2:
number_of_instances: 3
targets: [ VDU1 ]
- VDU2_instantiation_levels:
type: tosca.policies.nfv.VduInstantiationLevels
properties:
levels:
instantiation_level_1:
number_of_instances: 1
instantiation_level_2:
number_of_instances: 3
targets: [ VDU2 ]

View File

@ -0,0 +1,31 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: Sample VNF
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
- sample_cnf_types.yaml
- sample_cnf_df_simple.yaml
topology_template:
inputs:
selected_flavour:
type: string
description: VNF deployment flavour selected by the consumer. It is provided in the API
node_templates:
VNF:
type: company.provider.VNF
properties:
flavour_id: { get_input: selected_flavour }
descriptor_id: b1bb0ce7-ebca-4fa7-95ed-4840d7000000
provider: Company
product_name: Sample VNF
software_version: '1.0'
descriptor_version: '1.0'
vnfm_info:
- Tacker
requirements:
#- virtual_link_external # mapped in lower-level templates
#- virtual_link_internal # mapped in lower-level templates

View File

@ -0,0 +1,53 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: VNF type definition
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
node_types:
company.provider.VNF:
derived_from: tosca.nodes.nfv.VNF
properties:
descriptor_id:
type: string
constraints: [ valid_values: [ b1bb0ce7-ebca-4fa7-95ed-4840d7000000 ] ]
default: b1bb0ce7-ebca-4fa7-95ed-4840d7000000
descriptor_version:
type: string
constraints: [ valid_values: [ '1.0' ] ]
default: '1.0'
provider:
type: string
constraints: [ valid_values: [ 'Company' ] ]
default: 'Company'
product_name:
type: string
constraints: [ valid_values: [ 'Sample VNF' ] ]
default: 'Sample VNF'
software_version:
type: string
constraints: [ valid_values: [ '1.0' ] ]
default: '1.0'
vnfm_info:
type: list
entry_schema:
type: string
constraints: [ valid_values: [ Tacker ] ]
default: [ Tacker ]
flavour_id:
type: string
constraints: [ valid_values: [ simple,complex ] ]
default: simple
flavour_description:
type: string
default: ""
requirements:
- virtual_link_external:
capability: tosca.capabilities.nfv.VirtualLinkable
- virtual_link_internal:
capability: tosca.capabilities.nfv.VirtualLinkable
interfaces:
Vnflcm:
type: tosca.interfaces.nfv.Vnflcm

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,6 @@
apiVersion: v2
appVersion: 1.16.0
description: A Helm chart for Kubernetes
name: test-chart
type: application
version: 0.1.1

View File

@ -0,0 +1,22 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "test-chart.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "test-chart.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "test-chart.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "test-chart.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}

View File

@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "test-chart.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "test-chart.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "test-chart.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "test-chart.labels" -}}
helm.sh/chart: {{ include "test-chart.chart" . }}
{{ include "test-chart.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "test-chart.selectorLabels" -}}
app.kubernetes.io/name: {{ include "test-chart.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "test-chart.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "test-chart.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,53 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: vdu1-{{ include "test-chart.fullname" . }}
labels:
{{- include "test-chart.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCountVdu1 }}
{{- end }}
selector:
matchLabels:
{{- include "test-chart.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "test-chart.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "test-chart.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 80
protocol: TCP
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,53 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: vdu2-{{ include "test-chart.fullname" . }}
labels:
{{- include "test-chart.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCountVdu2 }}
{{- end }}
selector:
matchLabels:
{{- include "test-chart.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "test-chart.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "test-chart.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: nginx:alpine
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 80
protocol: TCP
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,28 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "test-chart.fullname" . }}
labels:
{{- include "test-chart.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "test-chart.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,61 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "test-chart.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
{{- end }}
{{- end }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "test-chart.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "test-chart.fullname" . }}
labels:
{{- include "test-chart.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "test-chart.selectorLabels" . | nindent 4 }}

View File

@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "test-chart.serviceAccountName" . }}
labels:
{{- include "test-chart.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "test-chart.fullname" . }}-test-connection"
labels:
{{- include "test-chart.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "test-chart.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never

View File

@ -0,0 +1,83 @@
# Default values for test-chart.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCountVdu1: 1
replicaCountVdu2: 1
image:
repository: nginx
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -0,0 +1,68 @@
# Copyright (C) 2022 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import os
import pickle
import sys
class FailScript(object):
"""Define error method for each operation
For example:
def instantiate_start(self):
if os.path.exists('/tmp/instantiate_start')
raise Exception('test instantiate_start error')
"""
def __init__(self, req, inst, grant_req, grant, csar_dir):
self.req = req
self.inst = inst
self.grant_req = grant_req
self.grant = grant
self.csar_dir = csar_dir
def _fail(self, method):
if os.path.exists(f'/tmp/{method}'):
raise Exception(f'test {method} error')
def __getattr__(self, name):
return functools.partial(self._fail, name)
def main():
script_dict = pickle.load(sys.stdin.buffer)
operation = script_dict['operation']
req = script_dict['request']
inst = script_dict['vnf_instance']
grant_req = script_dict['grant_request']
grant = script_dict['grant_response']
csar_dir = script_dict['tmp_csar_dir']
script = FailScript(req, inst, grant_req, grant, csar_dir)
getattr(script, operation)()
if __name__ == "__main__":
try:
main()
os._exit(0)
except Exception as ex:
sys.stderr.write(str(ex))
sys.stderr.flush()
os._exit(1)

View File

@ -0,0 +1,9 @@
TOSCA-Meta-File-Version: 1.0
Created-by: dummy_user
CSAR-Version: 1.1
Entry-Definitions: Definitions/sample_cnf_top.vnfd.yaml
Name: Files/kubernetes/test-chart-0.1.1.tgz
Content-Type: test-data
Algorithm: SHA-256
Hash: 6d5177b34c732835cc15f9f39c3f93867f10af4d69c2224dd9122645b1ee17f7

View File

@ -0,0 +1,47 @@
# Copyright (C) 2022 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import shutil
import tempfile
from oslo_utils import uuidutils
from tacker.tests.functional.sol_kubernetes_v2 import paramgen
from tacker.tests.functional.sol_v2_common import utils
zip_file_name = os.path.basename(os.path.abspath(".")) + '.zip'
tmp_dir = tempfile.mkdtemp()
vnfd_id = uuidutils.generate_uuid()
# tacker/tests/functional/sol_kubernetes_v2/samples/{package_name}
utils.make_zip(".", tmp_dir, vnfd_id)
shutil.move(os.path.join(tmp_dir, zip_file_name), ".")
shutil.rmtree(tmp_dir)
helm_change_vnfpkg = (
paramgen.helm_change_vnfpkg(vnfd_id))
helm_error_handling_change_vnfpkg = (
paramgen.helm_error_handling_change_vnfpkg(vnfd_id))
with open("helm_change_vnfpkg", "w", encoding='utf-8') as f:
f.write(json.dumps(helm_change_vnfpkg, indent=2))
with open("helm_error_handling_change_vnfpkg", "w", encoding='utf-8') as f:
f.write(json.dumps(helm_error_handling_change_vnfpkg, indent=2))

View File

@ -0,0 +1,177 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: Simple deployment flavour for Sample VNF
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
- sample_cnf_types.yaml
topology_template:
inputs:
descriptor_id:
type: string
descriptor_version:
type: string
provider:
type: string
product_name:
type: string
software_version:
type: string
vnfm_info:
type: list
entry_schema:
type: string
flavour_id:
type: string
flavour_description:
type: string
substitution_mappings:
node_type: company.provider.VNF
properties:
flavour_id: simple
requirements:
virtual_link_external: []
node_templates:
VNF:
type: company.provider.VNF
properties:
flavour_description: A simple flavour
interfaces:
Vnflcm:
instantiate_start:
implementation: sample-script
instantiate_end:
implementation: sample-script
terminate_start:
implementation: sample-script
terminate_end:
implementation: sample-script
scale_start:
implementation: sample-script
scale_end:
implementation: sample-script
heal_start:
implementation: sample-script
heal_end:
implementation: sample-script
modify_information_start:
implementation: sample-script
modify_information_end:
implementation: sample-script
artifacts:
sample-script:
description: Sample script
type: tosca.artifacts.Implementation.Python
file: ../Scripts/sample_script.py
VDU1:
type: tosca.nodes.nfv.Vdu.Compute
properties:
name: vdu1
description: VDU1 compute node
vdu_profile:
min_number_of_instances: 1
max_number_of_instances: 3
VDU2:
type: tosca.nodes.nfv.Vdu.Compute
properties:
name: vdu2
description: VDU2 compute node
vdu_profile:
min_number_of_instances: 1
max_number_of_instances: 3
policies:
- scaling_aspects:
type: tosca.policies.nfv.ScalingAspects
properties:
aspects:
vdu1_aspect:
name: vdu1_aspect
description: vdu1 scaling aspect
max_scale_level: 2
step_deltas:
- delta_1
vdu2_aspect:
name: vdu2_aspect
description: vdu2 scaling aspect
max_scale_level: 2
step_deltas:
- delta_1
- VDU1_initial_delta:
type: tosca.policies.nfv.VduInitialDelta
properties:
initial_delta:
number_of_instances: 1
targets: [ VDU1 ]
- VDU1_scaling_aspect_deltas:
type: tosca.policies.nfv.VduScalingAspectDeltas
properties:
aspect: vdu1_aspect
deltas:
delta_1:
number_of_instances: 1
targets: [ VDU1 ]
- VDU2_initial_delta:
type: tosca.policies.nfv.VduInitialDelta
properties:
initial_delta:
number_of_instances: 1
targets: [ VDU2 ]
- VDU2_scaling_aspect_deltas:
type: tosca.policies.nfv.VduScalingAspectDeltas
properties:
aspect: vdu2_aspect
deltas:
delta_1:
number_of_instances: 1
targets: [ VDU2 ]
- instantiation_levels:
type: tosca.policies.nfv.InstantiationLevels
properties:
levels:
instantiation_level_1:
description: Smallest size
scale_info:
vdu1_aspect:
scale_level: 0
vdu2_aspect:
scale_level: 0
instantiation_level_2:
description: Largest size
scale_info:
vdu1_aspect:
scale_level: 2
vdu2_aspect:
scale_level: 2
default_level: instantiation_level_1
- VDU1_instantiation_levels:
type: tosca.policies.nfv.VduInstantiationLevels
properties:
levels:
instantiation_level_1:
number_of_instances: 1
instantiation_level_2:
number_of_instances: 3
targets: [ VDU1 ]
- VDU2_instantiation_levels:
type: tosca.policies.nfv.VduInstantiationLevels
properties:
levels:
instantiation_level_1:
number_of_instances: 1
instantiation_level_2:
number_of_instances: 3
targets: [ VDU2 ]

View File

@ -0,0 +1,31 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: Sample VNF
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
- sample_cnf_types.yaml
- sample_cnf_df_simple.yaml
topology_template:
inputs:
selected_flavour:
type: string
description: VNF deployment flavour selected by the consumer. It is provided in the API
node_templates:
VNF:
type: company.provider.VNF
properties:
flavour_id: { get_input: selected_flavour }
descriptor_id: b1bb0ce7-ebca-4fa7-95ed-4840d7000000
provider: Company
product_name: Sample VNF
software_version: '1.0'
descriptor_version: '1.0'
vnfm_info:
- Tacker
requirements:
#- virtual_link_external # mapped in lower-level templates
#- virtual_link_internal # mapped in lower-level templates

View File

@ -0,0 +1,53 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: VNF type definition
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
node_types:
company.provider.VNF:
derived_from: tosca.nodes.nfv.VNF
properties:
descriptor_id:
type: string
constraints: [ valid_values: [ b1bb0ce7-ebca-4fa7-95ed-4840d7000000 ] ]
default: b1bb0ce7-ebca-4fa7-95ed-4840d7000000
descriptor_version:
type: string
constraints: [ valid_values: [ '1.0' ] ]
default: '1.0'
provider:
type: string
constraints: [ valid_values: [ 'Company' ] ]
default: 'Company'
product_name:
type: string
constraints: [ valid_values: [ 'Sample VNF' ] ]
default: 'Sample VNF'
software_version:
type: string
constraints: [ valid_values: [ '1.0' ] ]
default: '1.0'
vnfm_info:
type: list
entry_schema:
type: string
constraints: [ valid_values: [ Tacker ] ]
default: [ Tacker ]
flavour_id:
type: string
constraints: [ valid_values: [ simple,complex ] ]
default: simple
flavour_description:
type: string
default: ""
requirements:
- virtual_link_external:
capability: tosca.capabilities.nfv.VirtualLinkable
- virtual_link_internal:
capability: tosca.capabilities.nfv.VirtualLinkable
interfaces:
Vnflcm:
type: tosca.interfaces.nfv.Vnflcm

View File

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View File

@ -0,0 +1,6 @@
apiVersion: v2
appVersion: 1.16.0
description: A Helm chart for Kubernetes
name: test-chart
type: application
version: 0.1.0

View File

@ -0,0 +1,22 @@
1. Get the application URL by running these commands:
{{- if .Values.ingress.enabled }}
{{- range $host := .Values.ingress.hosts }}
{{- range .paths }}
http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }}
{{- end }}
{{- end }}
{{- else if contains "NodePort" .Values.service.type }}
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "test-chart.fullname" . }})
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
echo http://$NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "test-chart.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "test-chart.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
echo http://$SERVICE_IP:{{ .Values.service.port }}
{{- else if contains "ClusterIP" .Values.service.type }}
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "test-chart.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}")
export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}")
echo "Visit http://127.0.0.1:8080 to use your application"
kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT
{{- end }}

View File

@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "test-chart.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "test-chart.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "test-chart.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "test-chart.labels" -}}
helm.sh/chart: {{ include "test-chart.chart" . }}
{{ include "test-chart.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "test-chart.selectorLabels" -}}
app.kubernetes.io/name: {{ include "test-chart.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "test-chart.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "test-chart.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,53 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: vdu1-{{ include "test-chart.fullname" . }}
labels:
{{- include "test-chart.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCountVdu1 }}
{{- end }}
selector:
matchLabels:
{{- include "test-chart.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "test-chart.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "test-chart.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 80
protocol: TCP
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,53 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: vdu2-{{ include "test-chart.fullname" . }}
labels:
{{- include "test-chart.labels" . | nindent 4 }}
spec:
{{- if not .Values.autoscaling.enabled }}
replicas: {{ .Values.replicaCountVdu2 }}
{{- end }}
selector:
matchLabels:
{{- include "test-chart.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "test-chart.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "test-chart.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: nginx
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- name: http
containerPort: 80
protocol: TCP
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@ -0,0 +1,28 @@
{{- if .Values.autoscaling.enabled }}
apiVersion: autoscaling/v2beta1
kind: HorizontalPodAutoscaler
metadata:
name: {{ include "test-chart.fullname" . }}
labels:
{{- include "test-chart.labels" . | nindent 4 }}
spec:
scaleTargetRef:
apiVersion: apps/v1
kind: Deployment
name: {{ include "test-chart.fullname" . }}
minReplicas: {{ .Values.autoscaling.minReplicas }}
maxReplicas: {{ .Values.autoscaling.maxReplicas }}
metrics:
{{- if .Values.autoscaling.targetCPUUtilizationPercentage }}
- type: Resource
resource:
name: cpu
targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }}
{{- end }}
{{- if .Values.autoscaling.targetMemoryUtilizationPercentage }}
- type: Resource
resource:
name: memory
targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,61 @@
{{- if .Values.ingress.enabled -}}
{{- $fullName := include "test-chart.fullname" . -}}
{{- $svcPort := .Values.service.port -}}
{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }}
{{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }}
{{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}}
{{- end }}
{{- end }}
{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1
{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}}
apiVersion: networking.k8s.io/v1beta1
{{- else -}}
apiVersion: extensions/v1beta1
{{- end }}
kind: Ingress
metadata:
name: {{ $fullName }}
labels:
{{- include "test-chart.labels" . | nindent 4 }}
{{- with .Values.ingress.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }}
ingressClassName: {{ .Values.ingress.className }}
{{- end }}
{{- if .Values.ingress.tls }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}
- host: {{ .host | quote }}
http:
paths:
{{- range .paths }}
- path: {{ .path }}
{{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }}
pathType: {{ .pathType }}
{{- end }}
backend:
{{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }}
service:
name: {{ $fullName }}
port:
number: {{ $svcPort }}
{{- else }}
serviceName: {{ $fullName }}
servicePort: {{ $svcPort }}
{{- end }}
{{- end }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Service
metadata:
name: {{ include "test-chart.fullname" . }}
labels:
{{- include "test-chart.labels" . | nindent 4 }}
spec:
type: {{ .Values.service.type }}
ports:
- port: {{ .Values.service.port }}
targetPort: http
protocol: TCP
name: http
selector:
{{- include "test-chart.selectorLabels" . | nindent 4 }}

View File

@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "test-chart.serviceAccountName" . }}
labels:
{{- include "test-chart.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: Pod
metadata:
name: "{{ include "test-chart.fullname" . }}-test-connection"
labels:
{{- include "test-chart.labels" . | nindent 4 }}
annotations:
"helm.sh/hook": test
spec:
containers:
- name: wget
image: busybox
command: ['wget']
args: ['{{ include "test-chart.fullname" . }}:{{ .Values.service.port }}']
restartPolicy: Never

View File

@ -0,0 +1,83 @@
# Default values for test-chart.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCountVdu1: 1
replicaCountVdu2: 1
image:
repository: nginx
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
service:
type: ClusterIP
port: 80
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
autoscaling:
enabled: false
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
# targetMemoryUtilizationPercentage: 80
nodeSelector: {}
tolerations: []
affinity: {}

View File

@ -0,0 +1,68 @@
# Copyright (C) 2022 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import os
import pickle
import sys
class FailScript(object):
"""Define error method for each operation
For example:
def instantiate_start(self):
if os.path.exists('/tmp/instantiate_start')
raise Exception('test instantiate_start error')
"""
def __init__(self, req, inst, grant_req, grant, csar_dir):
self.req = req
self.inst = inst
self.grant_req = grant_req
self.grant = grant
self.csar_dir = csar_dir
def _fail(self, method):
if os.path.exists(f'/tmp/{method}'):
raise Exception(f'test {method} error')
def __getattr__(self, name):
return functools.partial(self._fail, name)
def main():
script_dict = pickle.load(sys.stdin.buffer)
operation = script_dict['operation']
req = script_dict['request']
inst = script_dict['vnf_instance']
grant_req = script_dict['grant_request']
grant = script_dict['grant_response']
csar_dir = script_dict['tmp_csar_dir']
script = FailScript(req, inst, grant_req, grant, csar_dir)
getattr(script, operation)()
if __name__ == "__main__":
try:
main()
os._exit(0)
except Exception as ex:
sys.stderr.write(str(ex))
sys.stderr.flush()
os._exit(1)

View File

@ -0,0 +1,9 @@
TOSCA-Meta-File-Version: 1.0
Created-by: dummy_user
CSAR-Version: 1.1
Entry-Definitions: Definitions/sample_cnf_top.vnfd.yaml
Name: Files/kubernetes/test-chart-0.1.0.tgz
Content-Type: test-data
Algorithm: SHA-256
Hash: 511df66c2d34bc2d3b1ea80118c4ad3c61ad7816a45bfadbb223d172b8503d30

View File

@ -0,0 +1,68 @@
# Copyright (C) 2022 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import os
import shutil
import tempfile
from oslo_utils import uuidutils
from tacker.tests.functional.sol_kubernetes_v2 import paramgen
from tacker.tests.functional.sol_v2_common import utils
zip_file_name = os.path.basename(os.path.abspath(".")) + '.zip'
tmp_dir = tempfile.mkdtemp()
vnfd_id = uuidutils.generate_uuid()
# tacker/tests/functional/sol_kubernetes_v2/samples/{package_name}
utils.make_zip(".", tmp_dir, vnfd_id)
shutil.move(os.path.join(tmp_dir, zip_file_name), ".")
shutil.rmtree(tmp_dir)
create_req = paramgen.test_helm_instantiate_create(vnfd_id)
# if you instantiate with all k8s resource
# please change auth_url and bear_token to your own k8s cluster's info
auth_url = "https://127.0.0.1:6443"
bearer_token = "your_k8s_cluster_bearer_token"
ssl_ca_cert = "k8s_ssl_ca_cert"
helm_instantiate_req = paramgen.helm_instantiate(
auth_url, bearer_token, ssl_ca_cert)
helm_terminate_req = paramgen.helm_terminate()
helm_scale_out = paramgen.helm_scale_out()
helm_scale_in = paramgen.helm_scale_in()
helm_heal = paramgen.helm_heal(["replace real vnfc ids"])
with open("create_req", "w", encoding='utf-8') as f:
f.write(json.dumps(create_req, indent=2))
with open("helm_instantiate_req", "w", encoding='utf-8') as f:
f.write(json.dumps(helm_instantiate_req, indent=2))
with open("helm_terminate_req", "w", encoding='utf-8') as f:
f.write(json.dumps(helm_terminate_req, indent=2))
with open("helm_scale_out", "w", encoding='utf-8') as f:
f.write(json.dumps(helm_scale_out, indent=2))
with open("helm_scale_in", "w", encoding='utf-8') as f:
f.write(json.dumps(helm_scale_in, indent=2))
with open("helm_heal", "w", encoding='utf-8') as f:
f.write(json.dumps(helm_heal, indent=2))

View File

@ -0,0 +1,545 @@
# Copyright (C) 2022 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import time
from tacker.tests.functional.sol_kubernetes_v2 import base_v2
from tacker.tests.functional.sol_kubernetes_v2 import paramgen
class VnfLcmHelmTest(base_v2.BaseVnfLcmKubernetesV2Test):
@classmethod
def setUpClass(cls):
super(VnfLcmHelmTest, cls).setUpClass()
cur_dir = os.path.dirname(__file__)
test_helm_instantiate_path = os.path.join(
cur_dir, "samples/test_helm_instantiate")
cls.vnf_pkg_1, cls.vnfd_id_1 = cls.create_vnf_package(
test_helm_instantiate_path)
test_helm_change_vnf_pkg_path = os.path.join(
cur_dir, "samples/test_helm_change_vnf_pkg")
cls.vnf_pkg_2, cls.vnfd_id_2 = cls.create_vnf_package(
test_helm_change_vnf_pkg_path)
@classmethod
def tearDownClass(cls):
super(VnfLcmHelmTest, cls).tearDownClass()
cls.delete_vnf_package(cls.vnf_pkg_1)
cls.delete_vnf_package(cls.vnf_pkg_2)
def setUp(self):
super(VnfLcmHelmTest, self).setUp()
def test_basic_lcms(self):
"""Test basic LCM operations
* About LCM operations:
This test includes the following operations.
- 1. Create a new VNF instance resource
- 2. Instantiate a VNF instance
- 3. Show VNF instance
- 4. Scale out a VNF instance
- 5. Show VNF instance
- 6. Scale in a VNF instance
- 7. Show VNF instance
- 8. Heal in a VNF instance
- 9. Show VNF instance
- 10. Change Current VNF Package
- 11. Show VNF instance
- 12. Terminate a VNF instance
- 13. Delete a VNF instance
"""
# 1. Create a new VNF instance resource
# NOTE: extensions and vnfConfigurableProperties are omitted
# because they are commented out in etsi_nfv_sol001.
expected_inst_attrs = [
'id',
'vnfInstanceName',
'vnfInstanceDescription',
'vnfdId',
'vnfProvider',
'vnfProductName',
'vnfSoftwareVersion',
'vnfdVersion',
# 'vnfConfigurableProperties', # omitted
# 'vimConnectionInfo', # omitted
'instantiationState',
# 'instantiatedVnfInfo', # omitted
'metadata',
# 'extensions', # omitted
'_links'
]
create_req = paramgen.test_helm_instantiate_create(
self.vnfd_id_1)
resp, body = self.create_vnf_instance(create_req)
self.assertEqual(201, resp.status_code)
self.check_resp_headers_in_create(resp)
self.check_resp_body(body, expected_inst_attrs)
inst_id = body['id']
# check usageState of VNF Package
usage_state = self.get_vnf_package(self.vnf_pkg_1)['usageState']
self.assertEqual('IN_USE', usage_state)
# 2. Instantiate a VNF instance
instantiate_req = paramgen.helm_instantiate(
self.auth_url, self.bearer_token, self.ssl_ca_cert)
resp, body = self.instantiate_vnf_instance(inst_id, instantiate_req)
self.assertEqual(202, resp.status_code)
self.check_resp_headers_in_operation_task(resp)
lcmocc_id = os.path.basename(resp.headers['Location'])
self.wait_lcmocc_complete(lcmocc_id)
# 3. Show VNF instance
additional_inst_attrs = [
'vimConnectionInfo',
'instantiatedVnfInfo'
]
expected_inst_attrs.extend(additional_inst_attrs)
resp, body = self.show_vnf_instance(inst_id)
self.assertEqual(200, resp.status_code)
self.check_resp_headers_in_get(resp)
self.check_resp_body(body, expected_inst_attrs)
# check vnfc_resource_info
vnfc_resource_infos = body['instantiatedVnfInfo']['vnfcResourceInfo']
vdu_nums = {'VDU1': 0, 'VDU2': 0}
for vnfc_info in vnfc_resource_infos:
if vnfc_info['vduId'] == 'VDU1':
self.assertEqual('Deployment', vnfc_info[
'computeResource']['vimLevelResourceType'])
vdu_nums['VDU1'] += 1
elif vnfc_info['vduId'] == 'VDU2':
self.assertEqual('Deployment', vnfc_info[
'computeResource']['vimLevelResourceType'])
vdu_nums['VDU2'] += 1
expected = {'VDU1': 1, 'VDU2': 1}
self.assertEqual(expected, vdu_nums)
# 4. Scale out a VNF instance
scale_out_req = paramgen.helm_scale_out()
resp, body = self.scale_vnf_instance(inst_id, scale_out_req)
self.assertEqual(202, resp.status_code)
self.check_resp_headers_in_operation_task(resp)
lcmocc_id = os.path.basename(resp.headers['Location'])
self.wait_lcmocc_complete(lcmocc_id)
# 5. Show VNF instance
resp, body = self.show_vnf_instance(inst_id)
self.assertEqual(200, resp.status_code)
self.check_resp_headers_in_get(resp)
# check vnfc_resource_info
vnfc_resource_infos = body['instantiatedVnfInfo']['vnfcResourceInfo']
vdu2_infos = [vnfc_info for vnfc_info in vnfc_resource_infos
if vnfc_info['vduId'] == 'VDU2']
self.assertEqual(3, len(vdu2_infos))
# 6. Scale in a VNF instance
scale_in_req = paramgen.helm_scale_in()
resp, body = self.scale_vnf_instance(inst_id, scale_in_req)
self.assertEqual(202, resp.status_code)
self.check_resp_headers_in_operation_task(resp)
lcmocc_id = os.path.basename(resp.headers['Location'])
self.wait_lcmocc_complete(lcmocc_id)
# 7. Show VNF instance
resp, body = self.show_vnf_instance(inst_id)
self.assertEqual(200, resp.status_code)
self.check_resp_headers_in_get(resp)
# check vnfc_resource_info
vnfc_resource_infos = body['instantiatedVnfInfo']['vnfcResourceInfo']
vdu2_infos = [vnfc_info for vnfc_info in vnfc_resource_infos
if vnfc_info['vduId'] == 'VDU2']
self.assertEqual(2, len(vdu2_infos))
# 8. Heal a VNF instance
vnfc_infos = body['instantiatedVnfInfo']['vnfcInfo']
vdu2_ids = [vnfc_info['id'] for vnfc_info in vnfc_infos
if vnfc_info['vduId'] == 'VDU2']
target = [vdu2_ids[0]]
heal_req = paramgen.helm_heal(target)
resp, body = self.heal_vnf_instance(inst_id, heal_req)
self.assertEqual(202, resp.status_code)
self.check_resp_headers_in_operation_task(resp)
lcmocc_id = os.path.basename(resp.headers['Location'])
self.wait_lcmocc_complete(lcmocc_id)
# 9. Show VNF instance
resp, body = self.show_vnf_instance(inst_id)
self.assertEqual(200, resp.status_code)
self.check_resp_headers_in_get(resp)
# check vnfc_resource_info
vnfc_infos = body['instantiatedVnfInfo']['vnfcInfo']
result_vdu2_ids = [vnfc_info['id'] for vnfc_info in vnfc_infos
if vnfc_info['vduId'] == 'VDU2']
self.assertEqual(2, len(result_vdu2_ids))
self.assertNotIn(vdu2_ids[0], result_vdu2_ids)
self.assertIn(vdu2_ids[1], result_vdu2_ids)
# 10. Change Current VNF Package
vnfc_resource_infos = body['instantiatedVnfInfo']['vnfcResourceInfo']
before_vdu2_ids = [vnfc_info['id'] for vnfc_info in vnfc_resource_infos
if vnfc_info['vduId'] == 'VDU2']
change_vnfpkg_req = paramgen.helm_change_vnfpkg(self.vnfd_id_2)
resp, body = self.change_vnfpkg(inst_id, change_vnfpkg_req)
self.assertEqual(202, resp.status_code)
self.check_resp_headers_in_operation_task(resp)
lcmocc_id = os.path.basename(resp.headers['Location'])
self.wait_lcmocc_complete(lcmocc_id)
time.sleep(3)
# check usageState of VNF Package
usage_state = self.get_vnf_package(self.vnf_pkg_1).get('usageState')
self.assertEqual('NOT_IN_USE', usage_state)
# check usageState of VNF Package
usage_state = self.get_vnf_package(self.vnf_pkg_2).get('usageState')
self.assertEqual('IN_USE', usage_state)
# 11. Show VNF instance
additional_inst_attrs = [
'vimConnectionInfo',
'instantiatedVnfInfo'
]
expected_inst_attrs.extend(additional_inst_attrs)
resp, body = self.show_vnf_instance(inst_id)
self.assertEqual(200, resp.status_code)
self.check_resp_headers_in_get(resp)
self.check_resp_body(body, expected_inst_attrs)
vnfc_resource_infos = body['instantiatedVnfInfo']['vnfcResourceInfo']
after_vdu2_ids = [vnfc_info['id'] for vnfc_info in vnfc_resource_infos
if vnfc_info['vduId'] == 'VDU2']
self.assertEqual(2, len(after_vdu2_ids))
self.assertNotEqual(before_vdu2_ids, after_vdu2_ids)
# 12. Terminate a VNF instance
terminate_req = paramgen.helm_terminate()
resp, body = self.terminate_vnf_instance(inst_id, terminate_req)
self.assertEqual(202, resp.status_code)
self.check_resp_headers_in_operation_task(resp)
lcmocc_id = os.path.basename(resp.headers['Location'])
self.wait_lcmocc_complete(lcmocc_id)
# wait a bit because there is a bit time lag between lcmocc DB
# update and terminate completion.
time.sleep(3)
# 13. Delete a VNF instance
resp, body = self.delete_vnf_instance(inst_id)
self.assertEqual(204, resp.status_code)
self.check_resp_headers_in_delete(resp)
# check deletion of VNF instance
resp, body = self.show_vnf_instance(inst_id)
self.assertEqual(404, resp.status_code)
# check usageState of VNF Package
usage_state = self.get_vnf_package(self.vnf_pkg_1).get('usageState')
self.assertEqual('NOT_IN_USE', usage_state)
def _put_fail_file(self, operation):
with open(f'/tmp/{operation}', 'w'):
pass
def _rm_fail_file(self, operation):
os.remove(f'/tmp/{operation}')
def test_instantiate_rollback(self):
"""Test LCM operations with all attributes set
* About LCM operations:
This test includes the following operations.
- 1. Create a new VNF instance resource
- 2. Instantiate a VNF instance => FAILED_TEMP
- 3. Show VNF instance
- 4. Rollback instantiate
- 5. Show VNF instance
- 6. Delete a VNF instance
"""
# 1. Create a new VNF instance resource
create_req = paramgen.test_helm_instantiate_create(
self.vnfd_id_1)
resp, body = self.create_vnf_instance(create_req)
self.assertEqual(201, resp.status_code)
self.check_resp_headers_in_create(resp)
inst_id = body['id']
# 2. Instantiate a VNF instance => FAILED_TEMP
self._put_fail_file('instantiate_end')
instantiate_req = paramgen.helm_instantiate(
self.auth_url, self.bearer_token, self.ssl_ca_cert)
resp, body = self.instantiate_vnf_instance(inst_id, instantiate_req)
self.assertEqual(202, resp.status_code)
self.check_resp_headers_in_operation_task(resp)
lcmocc_id = os.path.basename(resp.headers['Location'])
self.wait_lcmocc_failed_temp(lcmocc_id)
self._rm_fail_file('instantiate_end')
# 3. Show VNF instance
resp, body = self.show_vnf_instance(inst_id)
self.assertEqual(200, resp.status_code)
self.check_resp_headers_in_get(resp)
self.assertEqual('NOT_INSTANTIATED', body['instantiationState'])
# 4. Rollback instantiate
resp, body = self.rollback_lcmocc(lcmocc_id)
self.assertEqual(202, resp.status_code)
self.wait_lcmocc_rolled_back(lcmocc_id)
# 5. Show VNF instance
resp, body = self.show_vnf_instance(inst_id)
self.assertEqual(200, resp.status_code)
self.check_resp_headers_in_get(resp)
self.assertEqual('NOT_INSTANTIATED', body['instantiationState'])
# 6. Delete a VNF instance
resp, body = self.delete_vnf_instance(inst_id)
self.assertEqual(204, resp.status_code)
self.check_resp_headers_in_delete(resp)
def test_scale_out_rollback(self):
"""Test LCM operations with all attributes set
* About LCM operations:
This test includes the following operations.
- 1. Create a new VNF instance resource
- 2. Instantiate a VNF instance
- 3. Show VNF instance
- 4. Scale out a VNF instance => FAILED_TEMP
- 5. Rollback scale out
- 6. Show VNF instance
- 7. Terminate a VNF instance
- 8. Delete a VNF instance
"""
# 1. Create a new VNF instance resource
create_req = paramgen.test_helm_instantiate_create(
self.vnfd_id_1)
resp, body = self.create_vnf_instance(create_req)
self.assertEqual(201, resp.status_code)
self.check_resp_headers_in_create(resp)
inst_id = body['id']
# 2. Instantiate a VNF instance
instantiate_req = paramgen.helm_instantiate(
self.auth_url, self.bearer_token, self.ssl_ca_cert)
resp, body = self.instantiate_vnf_instance(inst_id, instantiate_req)
self.assertEqual(202, resp.status_code)
self.check_resp_headers_in_operation_task(resp)
lcmocc_id = os.path.basename(resp.headers['Location'])
self.wait_lcmocc_complete(lcmocc_id)
# 3. Show VNF instance
resp, body = self.show_vnf_instance(inst_id)
self.assertEqual(200, resp.status_code)
self.check_resp_headers_in_get(resp)
vnfc_resource_infos = body['instantiatedVnfInfo']['vnfcResourceInfo']
vdu2_ids_0 = {vnfc_info['id'] for vnfc_info in vnfc_resource_infos
if vnfc_info['vduId'] == 'VDU2'}
self.assertEqual(1, len(vdu2_ids_0))
# 4. Scale out a VNF instance => FAILED_TEMP
self._put_fail_file('scale_end')
scale_out_req = paramgen.helm_scale_out()
resp, body = self.scale_vnf_instance(inst_id, scale_out_req)
self.assertEqual(202, resp.status_code)
self.check_resp_headers_in_operation_task(resp)
lcmocc_id = os.path.basename(resp.headers['Location'])
self.wait_lcmocc_failed_temp(lcmocc_id)
self._rm_fail_file('scale_end')
# 5. Rollback scale out
resp, body = self.rollback_lcmocc(lcmocc_id)
self.assertEqual(202, resp.status_code)
self.wait_lcmocc_rolled_back(lcmocc_id)
# 6. Show VNF instance
resp, body = self.show_vnf_instance(inst_id)
self.assertEqual(200, resp.status_code)
self.check_resp_headers_in_get(resp)
vnfc_resource_infos = body['instantiatedVnfInfo']['vnfcResourceInfo']
vdu2_ids_1 = {vnfc_info['id'] for vnfc_info in vnfc_resource_infos
if vnfc_info['vduId'] == 'VDU2'}
self.assertEqual(vdu2_ids_0, vdu2_ids_1)
# 7. Terminate a VNF instance
terminate_req = paramgen.helm_terminate()
resp, body = self.terminate_vnf_instance(inst_id, terminate_req)
self.assertEqual(202, resp.status_code)
self.check_resp_headers_in_operation_task(resp)
lcmocc_id = os.path.basename(resp.headers['Location'])
self.wait_lcmocc_complete(lcmocc_id)
# wait a bit because there is a bit time lag between lcmocc DB
# update and terminate completion.
time.sleep(3)
# 8. Delete a VNF instance
resp, body = self.delete_vnf_instance(inst_id)
self.assertEqual(204, resp.status_code)
self.check_resp_headers_in_delete(resp)
def test_change_vnfpkg_rollback(self):
"""Test LCM operations error handing
* About LCM operations:
This test includes the following operations.
- 1. Create a new VNF instance resource
- 2. Instantiate a VNF instance
- 3. Show VNF instance
- 4. Change Current VNF Package => FAILED_TEMP
- 5. Rollback Change Current VNF Package
- 6. Show VNF instance
- 7. Terminate a VNF instance
- 8. Delete a VNF instance
"""
# 1. Create a new VNF instance resource
# NOTE: extensions and vnfConfigurableProperties are omitted
# because they are commented out in etsi_nfv_sol001.
expected_inst_attrs = [
'id',
'vnfInstanceName',
'vnfInstanceDescription',
'vnfdId',
'vnfProvider',
'vnfProductName',
'vnfSoftwareVersion',
'vnfdVersion',
# 'vnfConfigurableProperties', # omitted
# 'vimConnectionInfo', # omitted
'instantiationState',
# 'instantiatedVnfInfo', # omitted
'metadata',
# 'extensions', # omitted
'_links'
]
create_req = paramgen.test_helm_instantiate_create(
self.vnfd_id_1)
resp, body = self.create_vnf_instance(create_req)
self.assertEqual(201, resp.status_code)
self.check_resp_headers_in_create(resp)
self.check_resp_body(body, expected_inst_attrs)
inst_id = body['id']
# check usageState of VNF Package
usage_state = self.get_vnf_package(self.vnf_pkg_1)['usageState']
self.assertEqual('IN_USE', usage_state)
# 2. Instantiate a VNF instance
instantiate_req = paramgen.helm_instantiate(
self.auth_url, self.bearer_token, self.ssl_ca_cert)
resp, body = self.instantiate_vnf_instance(inst_id, instantiate_req)
self.assertEqual(202, resp.status_code)
self.check_resp_headers_in_operation_task(resp)
lcmocc_id = os.path.basename(resp.headers['Location'])
self.wait_lcmocc_complete(lcmocc_id)
# 3. Show VNF instance
additional_inst_attrs = [
'vimConnectionInfo',
'instantiatedVnfInfo'
]
expected_inst_attrs.extend(additional_inst_attrs)
resp, body = self.show_vnf_instance(inst_id)
self.assertEqual(200, resp.status_code)
self.check_resp_headers_in_get(resp)
self.check_resp_body(body, expected_inst_attrs)
vnfc_resource_infos = body['instantiatedVnfInfo']['vnfcResourceInfo']
before_vdu2_ids = [vnfc_info['id'] for vnfc_info in vnfc_resource_infos
if vnfc_info['vduId'] == 'VDU2']
# 4. Change Current VNF Package => FAILED_TEMP
change_vnfpkg_req = paramgen.helm_error_handling_change_vnfpkg(
self.vnfd_id_2)
resp, body = self.change_vnfpkg(inst_id, change_vnfpkg_req)
self.assertEqual(202, resp.status_code)
self.check_resp_headers_in_operation_task(resp)
lcmocc_id = os.path.basename(resp.headers['Location'])
self.wait_lcmocc_failed_temp(lcmocc_id)
# 5. Rollback Change Current VNF Package
resp, body = self.rollback_lcmocc(lcmocc_id)
self.assertEqual(202, resp.status_code)
self.check_resp_headers_in_delete(resp)
self.wait_lcmocc_rolled_back(lcmocc_id)
# check usageState of VNF Package
usage_state = self.get_vnf_package(self.vnf_pkg_2).get('usageState')
self.assertEqual('NOT_IN_USE', usage_state)
# 6. Show VNF instance
additional_inst_attrs = [
'vimConnectionInfo',
'instantiatedVnfInfo'
]
expected_inst_attrs.extend(additional_inst_attrs)
resp, body = self.show_vnf_instance(inst_id)
self.assertEqual(200, resp.status_code)
self.check_resp_headers_in_get(resp)
self.check_resp_body(body, expected_inst_attrs)
vnfc_resource_infos = body['instantiatedVnfInfo']['vnfcResourceInfo']
after_vdu2_ids = [vnfc_info['id'] for vnfc_info in vnfc_resource_infos
if vnfc_info['vduId'] == 'VDU2']
self.assertEqual(before_vdu2_ids, after_vdu2_ids)
# 7. Terminate a VNF instance
terminate_req = paramgen.helm_terminate()
resp, body = self.terminate_vnf_instance(inst_id, terminate_req)
self.assertEqual(202, resp.status_code)
self.check_resp_headers_in_operation_task(resp)
lcmocc_id = os.path.basename(resp.headers['Location'])
self.wait_lcmocc_complete(lcmocc_id)
# wait a bit because there is a bit time lag between lcmocc DB
# update and terminate completion.
time.sleep(3)
# 8. Delete a VNF instance
resp, body = self.delete_vnf_instance(inst_id)
self.assertEqual(204, resp.status_code)
self.check_resp_headers_in_delete(resp)
# check deletion of VNF instance
resp, body = self.show_vnf_instance(inst_id)
self.assertEqual(404, resp.status_code)

View File

@ -0,0 +1,74 @@
# Copyright (C) 2022 Nippon Telegraph and Telephone Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
from unittest import mock
from tacker.sol_refactored.common import exceptions as sol_ex
from tacker.sol_refactored.common import vnfd_utils
from tacker.sol_refactored.infra_drivers.kubernetes import helm
from tacker.sol_refactored import objects
from tacker.tests.unit import base
CNF_SAMPLE_VNFD_ID = "b1bb0ce7-ebca-4fa7-95ed-4840d70a1177"
class TestHelm(base.TestCase):
def setUp(self):
super(TestHelm, self).setUp()
objects.register_all()
self.driver = helm.Helm()
cur_dir = os.path.dirname(__file__)
# NOTE: bollow a sample of k8s at the moment since it is enough
# for current tests.
sample_dir = os.path.join(cur_dir, "../..", "samples")
self.vnfd_1 = vnfd_utils.Vnfd(CNF_SAMPLE_VNFD_ID)
self.vnfd_1.init_from_csar_dir(os.path.join(sample_dir, "sample2"))
def test_scale_invalid_parameter(self):
req = objects.ScaleVnfRequest(
type='SCALE_OUT',
aspectId='vdu1_aspect',
numberOfSteps=1
)
inst_vnf_info = objects.VnfInstanceV2_InstantiatedVnfInfo(
vnfcResourceInfo=[objects.VnfcResourceInfoV2(vduId='VDU1')],
metadata={
'namespace': 'default',
'release_name': 'test-release',
'helm_chart_path': 'Files/kubernetes/test-chart.tgz',
'helm_value_names': {'VDU2': {'replica': 'values.replica'}}
}
)
inst = objects.VnfInstanceV2(
instantiatedVnfInfo=inst_vnf_info
)
grant_req = objects.GrantRequestV1(
addResources=[
objects.ResourceDefinitionV1(
type='COMPUTE',
resourceTemplateId='VDU1'
)
]
)
expected_ex = sol_ex.HelmParameterNotFound(vdu_name='VDU1')
ex = self.assertRaises(sol_ex.HelmParameterNotFound,
self.driver._scale, req, inst, grant_req, mock.Mock(),
self.vnfd_1, mock.Mock(), mock.Mock())
self.assertEqual(expected_ex.detail, ex.detail)