Support Basic Lifecycle Operations of CNF v2 API
This patch implements the following APIs defined in ETSI NFV-SOL003 v3.3.1 5.5.2.11a with 'Kubernetes' vim. 1. Create VNF 2. Instantiate VNF 3. Change Current VNF Package 4. Terminate VNF 5. Delete VNF Retry and rollback operation of change current vnf package API with 'Kubernetes' vim are also supported. There are the following points to note. 1. Currently only 'RollingUpdate' is supported for CNF deployment update. 2. 'Instantiate' VNF cannot rollback and retry with 'Kubernetes' vim. It will be supported in future. Implements: blueprint upgrade-vnf-package Change-Id: Iae477cb4347ae01718d8dbf6ab8b2289e1758039
This commit is contained in:
parent
78dbba65c7
commit
df0f8fc6bd
15
.zuul.yaml
15
.zuul.yaml
@ -515,6 +515,20 @@
|
||||
- openstack/heat-translator
|
||||
- openstack/tosca-parser
|
||||
|
||||
- job:
|
||||
name: tacker-functional-devstack-multinode-sol-kubernetes-v2
|
||||
parent: tacker-functional-devstack-multinode-sol-kubernetes
|
||||
description: |
|
||||
Multinodes job for SOL Kubernetes V2 devstack-based functional tests
|
||||
host-vars:
|
||||
controller-tacker:
|
||||
devstack_local_conf:
|
||||
post-config:
|
||||
$TACKER_CONF:
|
||||
v2_vnfm:
|
||||
kubernetes_vim_rsc_wait_timeout: 800
|
||||
tox_envlist: dsvm-functional-sol-kubernetes-v2
|
||||
|
||||
- project:
|
||||
templates:
|
||||
- check-requirements
|
||||
@ -531,4 +545,5 @@
|
||||
- tacker-functional-devstack-multinode-sol-kubernetes
|
||||
- tacker-functional-devstack-multinode-libs-master
|
||||
- tacker-functional-devstack-multinode-sol-v2
|
||||
- tacker-functional-devstack-multinode-sol-kubernetes-v2
|
||||
- tacker-functional-devstack-multinode-sol-multi-tenant
|
||||
|
12
releasenotes/notes/add-v2-cnf-api-a938c17b5c57fb67.yaml
Normal file
12
releasenotes/notes/add-v2-cnf-api-a938c17b5c57fb67.yaml
Normal file
@ -0,0 +1,12 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Support Container based VNF operation with the following new version of
|
||||
RESTful APIs based on ETSI NFV specifications.
|
||||
Version "2.0.0" API of Instantiate VNF, Terminate VNF, and Change Current
|
||||
VNF Package support Kubernetes vim. Other APIs besides Scale
|
||||
VNF and Heal VNF, such as Create VNF, Delete VNF, List VNF, Show VNF,
|
||||
and so on have already supported Kubernetes vim because they use
|
||||
common processing that does not depend on vim type: OpenStack, Kubernetes.
|
||||
Version "2.0.0" API of Scale VNF and Heal VNF with Kubernetes vim will
|
||||
be supported in future.
|
@ -117,10 +117,10 @@ valid_description_regex = valid_description_regex_base % (
|
||||
keyvalue_pairs = {
|
||||
'type': 'object',
|
||||
'patternProperties': {
|
||||
'^[a-zA-Z0-9-_:. /]{1,255}$': {
|
||||
'^[a-zA-Z0-9-_:. /]{1,1024}$': {
|
||||
'anyOf': [
|
||||
{'type': 'array'},
|
||||
{'type': 'string', 'maxLength': 255},
|
||||
{'type': 'string', 'maxLength': 1024},
|
||||
{'type': 'object'},
|
||||
{'type': 'null'},
|
||||
{'type': 'boolean'}
|
||||
|
@ -31,6 +31,9 @@ VNFM_OPTS = [
|
||||
cfg.IntOpt('openstack_vim_stack_create_timeout',
|
||||
default=20,
|
||||
help='Timeout (in minuts) of heat stack creation.'),
|
||||
cfg.IntOpt('kubernetes_vim_rsc_wait_timeout',
|
||||
default=500,
|
||||
help='Timeout (second) of k8s res creation.'),
|
||||
# NOTE: This is for test use since it is convenient to be able to delete
|
||||
# under development.
|
||||
cfg.BoolOpt('test_enable_lcm_op_occ_delete',
|
||||
|
@ -274,3 +274,57 @@ class CoordinateVNFExecutionFailed(SolHttpError422):
|
||||
|
||||
class VmRunningFailed(SolHttpError422):
|
||||
message = _("VM is running incorrectly. Reason: '%(error_info)s'")
|
||||
|
||||
|
||||
class CnfDefinitionNotFound(SolHttpError400):
|
||||
message = _("'%(diff_files)s' do not exist in vnf artifact files")
|
||||
|
||||
|
||||
class NamespaceNotUniform(SolHttpError400):
|
||||
message = _("There are multiple namespaces in the manifest file. Only one"
|
||||
"namespace can be used in one VNF.")
|
||||
|
||||
|
||||
class ExecuteK8SResourceCreateApiFailed(SolHttpError400):
|
||||
message = _("An error occurred when creating k8s resource.")
|
||||
|
||||
|
||||
class CreateK8SResourceFailed(SolHttpError400):
|
||||
message = _("An error occurred when creating k8s resource.")
|
||||
|
||||
|
||||
class ReadEndpointsFalse(SolHttpError400):
|
||||
message = _("read endpoints failed. kind:'%(kind)s'.")
|
||||
|
||||
|
||||
class DeleteK8SResourceFailed(SolHttpError400):
|
||||
message = _("An error occurred when deleting k8s resource.")
|
||||
|
||||
|
||||
class UnmatchedFileException(SolHttpError400):
|
||||
message = _("The updated file '%(new_file_path)s' does not match the"
|
||||
" original file. Some resources may be missing.")
|
||||
|
||||
|
||||
class UnSupportedKindException(SolHttpError400):
|
||||
message = _("The update file '%(new_file_path)s' does not contain"
|
||||
" 'Deployment' resource and other types of updates are not"
|
||||
" currently supported.")
|
||||
|
||||
|
||||
class NotFoundUpdateFileException(SolHttpError400):
|
||||
message = _("No original file matching the update file"
|
||||
" '%(new_file_path)s' was found.")
|
||||
|
||||
|
||||
class MissingParameterException(SolHttpError400):
|
||||
message = _("If you set vdu_params parameter in request body,"
|
||||
" the 'vdu_id' is necessary.")
|
||||
|
||||
|
||||
class UpdateK8SResourceFailed(SolHttpError400):
|
||||
message = _("An error occurred when updating k8s resource.")
|
||||
|
||||
|
||||
class NotSupportOperationType(SolHttpError404):
|
||||
message = _("This operation is not currently supported.")
|
||||
|
@ -43,14 +43,15 @@ def get_vim(context, vim_id):
|
||||
|
||||
|
||||
def vim_to_conn_info(vim):
|
||||
region = None
|
||||
if vim.get('placement_attr', {}).get('regions'):
|
||||
region = vim['placement_attr']['regions'][0]
|
||||
|
||||
vim_auth = vim['vim_auth']
|
||||
|
||||
if vim['vim_type'] == "openstack":
|
||||
# see. https://nfvwiki.etsi.org/index.php
|
||||
# ?title=ETSINFV.OPENSTACK_KEYSTONE.V_3
|
||||
region = None
|
||||
if vim.get('placement_attr', {}).get('regions'):
|
||||
region = vim['placement_attr']['regions'][0]
|
||||
|
||||
vim_auth = vim['vim_auth']
|
||||
access_info = {
|
||||
'username': vim_auth['username'],
|
||||
'password': vim_auth['password'],
|
||||
@ -74,6 +75,27 @@ def vim_to_conn_info(vim):
|
||||
interfaceInfo=interface_info,
|
||||
accessInfo=access_info
|
||||
)
|
||||
else: # k8s
|
||||
# TODO(oda-g): not supported at the moment
|
||||
pass
|
||||
if vim['vim_type'] == "kubernetes": # k8s
|
||||
if vim_auth['username'] and vim_auth['password']:
|
||||
access_info = {
|
||||
'username': vim_auth['username'],
|
||||
'password': vim_auth['password']
|
||||
}
|
||||
elif vim_auth['bearer_token']:
|
||||
access_info = {
|
||||
'bearer_token': vim_auth['bearer_token']
|
||||
}
|
||||
|
||||
interface_info = {
|
||||
'endpoint': vim_auth['auth_url']
|
||||
}
|
||||
if 'ssl_ca_cert' in vim_auth.keys():
|
||||
interface_info['ssl_ca_cert'] = vim_auth['ssl_ca_cert']
|
||||
|
||||
return objects.VimConnectionInfo(
|
||||
vimId=vim['vim_id'],
|
||||
vimType='kubernetes',
|
||||
interfaceInfo=interface_info,
|
||||
accessInfo=access_info
|
||||
)
|
||||
raise sol_ex.SolException(sol_detail='not support vim type')
|
||||
|
@ -16,6 +16,7 @@
|
||||
|
||||
import io
|
||||
import os
|
||||
import re
|
||||
import shutil
|
||||
import tempfile
|
||||
import zipfile
|
||||
@ -421,3 +422,35 @@ class Vnfd(object):
|
||||
|
||||
# should not occur
|
||||
return 0
|
||||
|
||||
def get_vnf_artifact_files(self):
|
||||
|
||||
def _get_file_contents(path):
|
||||
with open(path, 'rb') as file_object:
|
||||
content = re.split(b'\n\n+', file_object.read())
|
||||
return content
|
||||
|
||||
mani_artifact_files = []
|
||||
meta_artifacts_files = []
|
||||
|
||||
if self.tosca_meta.get('ETSI-Entry-Manifest'):
|
||||
manifest_path = os.path.join(
|
||||
self.csar_dir, self.tosca_meta.get('ETSI-Entry-Manifest'))
|
||||
|
||||
mani_artifact_files = [
|
||||
yaml.safe_load(content).get('Source')
|
||||
for content in _get_file_contents(manifest_path) if
|
||||
yaml.safe_load(content) and
|
||||
yaml.safe_load(content).get('Source')]
|
||||
else:
|
||||
tosca_path = os.path.join(
|
||||
self.csar_dir, 'TOSCA-Metadata', 'TOSCA.meta')
|
||||
|
||||
meta_artifacts_files = [
|
||||
yaml.safe_load(content).get('Name')
|
||||
for content in _get_file_contents(tosca_path) if
|
||||
yaml.safe_load(content) and yaml.safe_load(
|
||||
content).get('Name')]
|
||||
|
||||
mani_artifact_files.extend(meta_artifacts_files)
|
||||
return mani_artifact_files
|
||||
|
@ -188,8 +188,12 @@ class ConductorV2(object):
|
||||
self.endpoint)
|
||||
|
||||
try:
|
||||
vnfd = self.nfvo_client.get_vnfd(context, inst.vnfdId,
|
||||
all_contents=True)
|
||||
if lcmocc.operation == fields.LcmOperationType.CHANGE_VNFPKG:
|
||||
vnfd = self.nfvo_client.get_vnfd(
|
||||
context, lcmocc.operationParams.vnfdId, all_contents=True)
|
||||
else:
|
||||
vnfd = self.nfvo_client.get_vnfd(context, inst.vnfdId,
|
||||
all_contents=True)
|
||||
grant_req, grant = lcmocc_utils.get_grant_req_and_grant(context,
|
||||
lcmocc)
|
||||
self.vnflcm_driver.post_grant(context, lcmocc, inst, grant_req,
|
||||
|
@ -16,15 +16,19 @@
|
||||
import os
|
||||
import pickle
|
||||
import subprocess
|
||||
from urllib.parse import urlparse
|
||||
import urllib.request as urllib2
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import uuidutils
|
||||
import yaml
|
||||
|
||||
from tacker.sol_refactored.common import config
|
||||
from tacker.sol_refactored.common import exceptions as sol_ex
|
||||
from tacker.sol_refactored.common import lcm_op_occ_utils as lcmocc_utils
|
||||
from tacker.sol_refactored.common import vim_utils
|
||||
from tacker.sol_refactored.common import vnf_instance_utils as inst_utils
|
||||
from tacker.sol_refactored.infra_drivers.kubernetes import kubernetes
|
||||
from tacker.sol_refactored.infra_drivers.openstack import openstack
|
||||
from tacker.sol_refactored.nfvo import nfvo_client
|
||||
from tacker.sol_refactored import objects
|
||||
@ -405,8 +409,11 @@ class VnfLcmDriverV2(object):
|
||||
if vim_info.vimType == 'ETSINFV.OPENSTACK_KEYSTONE.V_3':
|
||||
driver = openstack.Openstack()
|
||||
driver.instantiate(req, inst, grant_req, grant, vnfd)
|
||||
elif vim_info.vimType == 'kubernetes': # k8s
|
||||
driver = kubernetes.Kubernetes()
|
||||
driver.instantiate(req, inst, grant_req, grant, vnfd)
|
||||
else:
|
||||
# only support openstack at the moment
|
||||
# should not occur
|
||||
raise sol_ex.SolException(sol_detail='not support vim type')
|
||||
|
||||
inst.instantiationState = 'INSTANTIATED'
|
||||
@ -557,8 +564,11 @@ class VnfLcmDriverV2(object):
|
||||
if vim_info.vimType == 'ETSINFV.OPENSTACK_KEYSTONE.V_3':
|
||||
driver = openstack.Openstack()
|
||||
driver.terminate(req, inst, grant_req, grant, vnfd)
|
||||
elif vim_info.vimType == 'kubernetes': # k8s
|
||||
driver = kubernetes.Kubernetes()
|
||||
driver.terminate(req, inst, grant_req, grant, vnfd)
|
||||
else:
|
||||
# only support openstack at the moment
|
||||
# should not occur
|
||||
raise sol_ex.SolException(sol_detail='not support vim type')
|
||||
|
||||
inst.instantiationState = 'NOT_INSTANTIATED'
|
||||
@ -959,10 +969,15 @@ class VnfLcmDriverV2(object):
|
||||
def change_vnfpkg_grant(self, grant_req, req, inst, vnfd):
|
||||
inst_info = inst.instantiatedVnfInfo
|
||||
grant_req.flavourId = inst_info.flavourId
|
||||
target_vdu_ids = [
|
||||
vdu_param.get('vdu_id')
|
||||
for vdu_param in req.additionalParams.get('vdu_params', [])
|
||||
]
|
||||
if req.additionalParams.get('vdu_params'):
|
||||
target_vdu_ids = [
|
||||
vdu_param.get(
|
||||
'vdu_id') for vdu_param in req.additionalParams.get(
|
||||
'vdu_params')]
|
||||
else:
|
||||
if inst_info.obj_attr_is_set('vnfcResourceInfo'):
|
||||
target_vdu_ids = [inst_vnc.vduId for inst_vnc in
|
||||
inst_info.vnfcResourceInfo]
|
||||
|
||||
if req.additionalParams.get('upgrade_type') == 'RollingUpdate':
|
||||
update_reses = []
|
||||
@ -1012,6 +1027,70 @@ class VnfLcmDriverV2(object):
|
||||
# not reach here at the moment
|
||||
pass
|
||||
|
||||
def _pre_check_for_change_vnfpkg(self, context, req, inst, vnfd):
|
||||
def _get_file_content(file_path):
|
||||
if ((urlparse(file_path).scheme == 'file') or
|
||||
(bool(urlparse(file_path).scheme) and
|
||||
bool(urlparse(file_path).netloc))):
|
||||
with urllib2.urlopen(file_path) as file_object:
|
||||
file_content = file_object.read()
|
||||
else:
|
||||
with open(file_path, 'rb') as file_object:
|
||||
file_content = file_object.read()
|
||||
return file_content
|
||||
|
||||
vnf_artifact_files = vnfd.get_vnf_artifact_files()
|
||||
if req.additionalParams.get('lcm-kubernetes-def-files') is None:
|
||||
target_k8s_files = inst.metadata.get('lcm-kubernetes-def-files')
|
||||
else:
|
||||
target_k8s_files = []
|
||||
new_file_paths = req.additionalParams.get(
|
||||
'lcm-kubernetes-def-files')
|
||||
old_vnfd = self.nfvo_client.get_vnfd(
|
||||
context=context, vnfd_id=inst.vnfdId, all_contents=False)
|
||||
old_file_paths = inst.metadata.get('lcm-kubernetes-def-files')
|
||||
|
||||
for new_file_path in new_file_paths:
|
||||
new_file_infos = [
|
||||
{"kind": content.get('kind'),
|
||||
"name": content.get('metadata', {}).get('name', '')}
|
||||
for content in list(yaml.safe_load_all(
|
||||
_get_file_content(os.path.join(
|
||||
vnfd.csar_dir, new_file_path))))]
|
||||
for old_file_path in old_file_paths:
|
||||
find_flag = False
|
||||
old_file_infos = [
|
||||
{"kind": content.get('kind'),
|
||||
"name": content.get('metadata', {}).get('name', '')}
|
||||
for content in list(yaml.safe_load_all(
|
||||
_get_file_content(os.path.join(
|
||||
old_vnfd.csar_dir, old_file_path))))]
|
||||
resources = [info for info in old_file_infos
|
||||
if info in new_file_infos]
|
||||
if len(resources) != 0:
|
||||
if len(resources) != len(old_file_infos):
|
||||
raise sol_ex.UnmatchedFileException(
|
||||
new_file_path=new_file_path)
|
||||
if 'Deployment' not in [res.get(
|
||||
'kind') for res in resources]:
|
||||
raise sol_ex.UnSupportedKindException(
|
||||
new_file_path=new_file_path)
|
||||
old_file_paths.remove(old_file_path)
|
||||
target_k8s_files.append(new_file_path)
|
||||
find_flag = True
|
||||
break
|
||||
continue
|
||||
if not find_flag:
|
||||
raise sol_ex.NotFoundUpdateFileException(
|
||||
new_file_path=new_file_path)
|
||||
|
||||
target_k8s_files.extend(old_file_paths)
|
||||
if set(target_k8s_files).difference(set(vnf_artifact_files)):
|
||||
diff_files = ','.join(list(set(
|
||||
target_k8s_files).difference(set(vnf_artifact_files))))
|
||||
raise sol_ex.CnfDefinitionNotFound(diff_files=diff_files)
|
||||
return target_k8s_files
|
||||
|
||||
def change_vnfpkg_process(
|
||||
self, context, lcmocc, inst, grant_req, grant, vnfd):
|
||||
inst_saved = inst.obj_clone()
|
||||
@ -1024,8 +1103,20 @@ class VnfLcmDriverV2(object):
|
||||
except Exception as ex:
|
||||
lcmocc_utils.update_lcmocc(lcmocc, inst_saved, inst)
|
||||
raise Exception from ex
|
||||
elif vim_info.vimType == 'kubernetes': # k8s
|
||||
target_k8s_files = self._pre_check_for_change_vnfpkg(
|
||||
context, req, inst, vnfd)
|
||||
update_req = req.obj_clone()
|
||||
update_req.additionalParams[
|
||||
'lcm-kubernetes-def-files'] = target_k8s_files
|
||||
driver = kubernetes.Kubernetes()
|
||||
try:
|
||||
driver.change_vnfpkg(update_req, inst, grant_req, grant, vnfd)
|
||||
except Exception as ex:
|
||||
lcmocc_utils.update_lcmocc(lcmocc, inst_saved, inst)
|
||||
raise Exception from ex
|
||||
else:
|
||||
# only support openstack at the moment
|
||||
# should not occur
|
||||
raise sol_ex.SolException(sol_detail='not support vim type')
|
||||
|
||||
def change_ext_conn_rollback(self, context, lcmocc, inst, grant_req,
|
||||
@ -1047,6 +1138,10 @@ class VnfLcmDriverV2(object):
|
||||
if vim_info.vimType == 'ETSINFV.OPENSTACK_KEYSTONE.V_3':
|
||||
driver.change_vnfpkg_rollback(
|
||||
req, inst, grant_req, grant, vnfd, lcmocc)
|
||||
elif vim_info.vimType == 'kubernetes': # k8s
|
||||
driver = kubernetes.Kubernetes()
|
||||
driver.change_vnfpkg_rollback(
|
||||
req, inst, grant_req, grant, vnfd, lcmocc)
|
||||
else:
|
||||
# only support openstack at the moment
|
||||
# should not occur
|
||||
raise sol_ex.SolException(sol_detail='not support vim type')
|
||||
|
@ -556,6 +556,23 @@ class VnfLcmControllerV2(sol_wsgi.SolAPIController):
|
||||
if lcmocc.operationState != v2fields.LcmOperationStateType.FAILED_TEMP:
|
||||
raise sol_ex.LcmOpOccNotFailedTemp(lcmocc_id=lcmocc.id)
|
||||
|
||||
# TODO(YiFeng) support retry operation for CNF instantiate
|
||||
# At present, the retry operation will fail for instantiate with
|
||||
# kubernetes vim.
|
||||
if lcmocc.operation == v2fields.LcmOperationType.INSTANTIATE:
|
||||
if lcmocc.operationParams.obj_attr_is_set('vimConnectionInfo'):
|
||||
vim_infos = lcmocc.operationParams.vimConnectionInfo
|
||||
else:
|
||||
vim_info = vim_utils.get_default_vim(context)
|
||||
vim_infos = {"default": vim_info}
|
||||
else:
|
||||
inst = inst_utils.get_inst(context, lcmocc.vnfInstanceId)
|
||||
vim_infos = inst.vimConnectionInfo
|
||||
vim_info = inst_utils.select_vim_info(vim_infos)
|
||||
if lcmocc.operation != 'CHANGE_VNFPKG' and (
|
||||
vim_info.vimType == 'kubernetes'):
|
||||
raise sol_ex.NotSupportOperationType
|
||||
|
||||
self.conductor_rpc.retry_lcm_op(context, lcmocc.id)
|
||||
|
||||
return sol_wsgi.SolResponse(202, None)
|
||||
@ -571,6 +588,23 @@ class VnfLcmControllerV2(sol_wsgi.SolAPIController):
|
||||
if lcmocc.operationState != v2fields.LcmOperationStateType.FAILED_TEMP:
|
||||
raise sol_ex.LcmOpOccNotFailedTemp(lcmocc_id=lcmocc.id)
|
||||
|
||||
# TODO(YiFeng) support rollback operation for CNF instantiate
|
||||
# At present, the rollback operation will fail for instantiate with
|
||||
# kubernetes vim.
|
||||
if lcmocc.operation == v2fields.LcmOperationType.INSTANTIATE:
|
||||
if lcmocc.operationParams.obj_attr_is_set('vimConnectionInfo'):
|
||||
vim_infos = lcmocc.operationParams.vimConnectionInfo
|
||||
else:
|
||||
vim_info = vim_utils.get_default_vim(context)
|
||||
vim_infos = {"default": vim_info}
|
||||
else:
|
||||
inst = inst_utils.get_inst(context, lcmocc.vnfInstanceId)
|
||||
vim_infos = inst.vimConnectionInfo
|
||||
vim_info = inst_utils.select_vim_info(vim_infos)
|
||||
if lcmocc.operation != 'CHANGE_VNFPKG' and (
|
||||
vim_info.vimType == 'kubernetes'):
|
||||
raise sol_ex.NotSupportOperationType
|
||||
|
||||
self.conductor_rpc.rollback_lcm_op(context, lcmocc.id)
|
||||
|
||||
return sol_wsgi.SolResponse(202, None)
|
||||
|
353
tacker/sol_refactored/infra_drivers/kubernetes/kubernetes.py
Normal file
353
tacker/sol_refactored/infra_drivers/kubernetes/kubernetes.py
Normal file
@ -0,0 +1,353 @@
|
||||
# Copyright (C) 2022 Fujitsu
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import os
|
||||
import pickle
|
||||
import subprocess
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from tacker.sol_refactored.common import config
|
||||
from tacker.sol_refactored.common import exceptions as sol_ex
|
||||
from tacker.sol_refactored.common import vnf_instance_utils as inst_utils
|
||||
from tacker.sol_refactored.infra_drivers.kubernetes import kubernetes_utils
|
||||
from tacker.sol_refactored import objects
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = config.CONF
|
||||
|
||||
|
||||
class Kubernetes(object):
|
||||
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
def instantiate(self, req, inst, grant_req, grant, vnfd):
|
||||
# pre instantiate cnf
|
||||
target_k8s_files = req.additionalParams.get(
|
||||
'lcm-kubernetes-def-files')
|
||||
vnf_artifact_files = vnfd.get_vnf_artifact_files()
|
||||
|
||||
if vnf_artifact_files is None or set(target_k8s_files).difference(
|
||||
set(vnf_artifact_files)):
|
||||
if vnf_artifact_files:
|
||||
diff_files = ','.join(list(set(
|
||||
target_k8s_files).difference(set(vnf_artifact_files))))
|
||||
else:
|
||||
diff_files = ','.join(target_k8s_files)
|
||||
raise sol_ex.CnfDefinitionNotFound(diff_files=diff_files)
|
||||
|
||||
# get k8s content from yaml file
|
||||
k8s_resources, namespace = kubernetes_utils.get_k8s_json_file(
|
||||
req, inst, target_k8s_files, vnfd, 'INSTANTIATE')
|
||||
|
||||
# sort k8s resource
|
||||
sorted_k8s_reses = kubernetes_utils.sort_k8s_resource(
|
||||
k8s_resources, 'INSTANTIATE')
|
||||
|
||||
# deploy k8s resources with sorted resources
|
||||
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
|
||||
k8s_client = kubernetes_utils.KubernetesClient(vim_info)
|
||||
created_k8s_reses = k8s_client.create_k8s_resource(
|
||||
sorted_k8s_reses, namespace)
|
||||
|
||||
# wait k8s resource create complete
|
||||
k8s_client.wait_k8s_res_create(created_k8s_reses)
|
||||
|
||||
# make instantiated info
|
||||
all_pods = k8s_client.list_namespaced_pods(namespace)
|
||||
self._make_cnf_instantiated_info(
|
||||
req, inst, vnfd, namespace, created_k8s_reses, all_pods)
|
||||
|
||||
def terminate(self, req, inst, grant_req, grant, vnfd):
|
||||
target_k8s_files = inst.metadata.get('lcm-kubernetes-def-files')
|
||||
|
||||
# get k8s content from yaml file
|
||||
k8s_resources, namespace = kubernetes_utils.get_k8s_json_file(
|
||||
req, inst, target_k8s_files, vnfd, 'TERMINATE')
|
||||
|
||||
# sort k8s resource
|
||||
sorted_k8s_reses = kubernetes_utils.sort_k8s_resource(
|
||||
k8s_resources, 'TERMINATE')
|
||||
|
||||
# delete k8s resources with sorted resources
|
||||
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
|
||||
k8s_client = kubernetes_utils.KubernetesClient(vim_info)
|
||||
k8s_client.delete_k8s_resource(req, sorted_k8s_reses, namespace)
|
||||
|
||||
# wait k8s resource delete complete
|
||||
k8s_client.wait_k8s_res_delete(sorted_k8s_reses, namespace)
|
||||
|
||||
def change_vnfpkg(self, req, inst, grant_req, grant, vnfd):
|
||||
if req.additionalParams.get('upgrade_type') == 'RollingUpdate':
|
||||
# get deployment name from vnfd
|
||||
deployment_names, namespace = (
|
||||
self._get_update_deployment_names_and_namespace(
|
||||
vnfd, req, inst))
|
||||
|
||||
# check deployment exists in kubernetes
|
||||
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
|
||||
k8s_client = kubernetes_utils.KubernetesClient(vim_info)
|
||||
k8s_client.check_deployment_exist(deployment_names, namespace)
|
||||
|
||||
# get new deployment body
|
||||
new_deploy_reses = kubernetes_utils.get_new_deployment_body(
|
||||
req, inst, vnfd, deployment_names, operation='CHANGE_VNFPKG')
|
||||
|
||||
# apply new deployment
|
||||
k8s_client.update_k8s_resource(new_deploy_reses, namespace)
|
||||
|
||||
# wait k8s resource update complete
|
||||
old_pods_names = [vnfc.computeResource.resourceId for vnfc in
|
||||
inst.instantiatedVnfInfo.vnfcResourceInfo]
|
||||
try:
|
||||
k8s_client.wait_k8s_res_update(
|
||||
new_deploy_reses, namespace, old_pods_names)
|
||||
except sol_ex.UpdateK8SResourceFailed as ex:
|
||||
self._update_cnf_instantiated_info(
|
||||
inst, deployment_names, k8s_client.list_namespaced_pods(
|
||||
namespace=namespace))
|
||||
raise ex
|
||||
|
||||
# execute coordinate vnf script
|
||||
try:
|
||||
self._execute_coordinate_vnf_script(
|
||||
req, inst, grant_req, grant, vnfd, 'CHANGE_VNFPKG',
|
||||
namespace, new_deploy_reses)
|
||||
except sol_ex.CoordinateVNFExecutionFailed as ex:
|
||||
self._update_cnf_instantiated_info(
|
||||
inst, deployment_names, k8s_client.list_namespaced_pods(
|
||||
namespace=namespace))
|
||||
raise ex
|
||||
|
||||
# update cnf instantiated info
|
||||
all_pods = k8s_client.list_namespaced_pods(namespace)
|
||||
self._update_cnf_instantiated_info(
|
||||
inst, deployment_names, all_pods)
|
||||
|
||||
else:
|
||||
# TODO(YiFeng): Blue-Green type will be supported in next version.
|
||||
raise sol_ex.SolException(sol_detail='not support update type')
|
||||
|
||||
inst.vnfdId = req.vnfdId
|
||||
if set(req.additionalParams.get(
|
||||
'lcm-kubernetes-def-files')).difference(set(
|
||||
inst.metadata.get('lcm-kubernetes-def-files'))):
|
||||
inst.metadata['lcm-kubernetes-def-files'] = (
|
||||
req.additionalParams.get('lcm-kubernetes-def-files'))
|
||||
|
||||
def change_vnfpkg_rollback(
|
||||
self, req, inst, grant_req, grant, vnfd, lcmocc):
|
||||
if not lcmocc.obj_attr_is_set('resourceChanges'):
|
||||
return
|
||||
if req.additionalParams.get('upgrade_type') == 'RollingUpdate':
|
||||
deployment_names = list({
|
||||
affected_vnfc.metadata['Deployment']['name'] for affected_vnfc
|
||||
in lcmocc.resourceChanges.affectedVnfcs if
|
||||
affected_vnfc.changeType == 'ADDED'})
|
||||
namespace = inst.metadata.get('namespace')
|
||||
|
||||
old_deploy_reses = kubernetes_utils.get_new_deployment_body(
|
||||
req, inst, vnfd, deployment_names,
|
||||
operation='CHANGE_VNFPKG_ROLLBACK')
|
||||
|
||||
# apply old deployment
|
||||
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
|
||||
k8s_client = kubernetes_utils.KubernetesClient(vim_info)
|
||||
k8s_client.update_k8s_resource(old_deploy_reses, namespace)
|
||||
|
||||
# wait k8s resource update complete
|
||||
old_pods_names = [vnfc.computeResource.resourceId for vnfc in
|
||||
inst.instantiatedVnfInfo.vnfcResourceInfo]
|
||||
try:
|
||||
k8s_client.wait_k8s_res_update(
|
||||
old_deploy_reses, namespace, old_pods_names)
|
||||
except sol_ex.UpdateK8SResourceFailed as ex:
|
||||
raise ex
|
||||
|
||||
# execute coordinate vnf script
|
||||
try:
|
||||
self._execute_coordinate_vnf_script(
|
||||
req, inst, grant_req, grant, vnfd,
|
||||
'CHANGE_VNFPKG_ROLLBACK',
|
||||
namespace, old_deploy_reses)
|
||||
except sol_ex.CoordinateVNFExecutionFailed as ex:
|
||||
raise ex
|
||||
|
||||
# update cnf instantiated info
|
||||
all_pods = k8s_client.list_namespaced_pods(namespace)
|
||||
self._update_cnf_instantiated_info(
|
||||
inst, deployment_names, all_pods)
|
||||
|
||||
else:
|
||||
# TODO(YiFeng): Blue-Green type will be supported in next version.
|
||||
raise sol_ex.SolException(sol_detail='not support update type')
|
||||
|
||||
def _get_update_deployment_names_and_namespace(self, vnfd, req, inst):
|
||||
vdu_nodes = vnfd.get_vdu_nodes(
|
||||
flavour_id=inst.instantiatedVnfInfo.flavourId)
|
||||
|
||||
if req.additionalParams.get('vdu_params'):
|
||||
target_vdus = [vdu_param.get('vdu_id') for vdu_param
|
||||
in req.additionalParams.get('vdu_params')]
|
||||
if None in target_vdus:
|
||||
raise sol_ex.MissingParameterException
|
||||
else:
|
||||
target_vdus = [inst_vnc.vduId for inst_vnc in
|
||||
inst.instantiatedVnfInfo.vnfcResourceInfo]
|
||||
|
||||
deployment_names = [value.get('properties', {}).get('name')
|
||||
for name, value in vdu_nodes.items()
|
||||
if name in target_vdus]
|
||||
namespace = inst.metadata.get('namespace')
|
||||
|
||||
return deployment_names, namespace
|
||||
|
||||
def _make_cnf_instantiated_info(
|
||||
self, req, inst, vnfd, namespace, created_k8s_reses, all_pods):
|
||||
flavour_id = req.flavourId
|
||||
target_kinds = {"Pod", "Deployment", "DaemonSet",
|
||||
"StatefulSet", "ReplicaSet"}
|
||||
|
||||
vdu_nodes = vnfd.get_vdu_nodes(flavour_id)
|
||||
vdu_ids = {value.get('properties').get('name'): key
|
||||
for key, value in vdu_nodes.items()}
|
||||
|
||||
vnfc_resources = []
|
||||
for k8s_res in created_k8s_reses:
|
||||
if k8s_res.get('kind', '') not in target_kinds:
|
||||
continue
|
||||
for pod in all_pods:
|
||||
pod_name = pod.metadata.name
|
||||
match_result = kubernetes_utils.is_match_pod_naming_rule(
|
||||
k8s_res.get('kind', ''), k8s_res.get('name', ''),
|
||||
pod_name)
|
||||
if match_result:
|
||||
metadata = {}
|
||||
metadata[k8s_res.get('kind')] = k8s_res.get('metadata')
|
||||
if k8s_res.get('kind') != 'Pod':
|
||||
metadata['Pod'] = pod.metadata.to_dict()
|
||||
vnfc_resource = objects.VnfcResourceInfoV2(
|
||||
id=uuidutils.generate_uuid(),
|
||||
vduId=vdu_ids.get(k8s_res.get('name', '')),
|
||||
computeResource=objects.ResourceHandle(
|
||||
resourceId=pod_name,
|
||||
vimLevelResourceType=k8s_res.get('kind')
|
||||
),
|
||||
metadata=metadata
|
||||
)
|
||||
vnfc_resources.append(vnfc_resource)
|
||||
|
||||
inst_vnf_info = objects.VnfInstanceV2_InstantiatedVnfInfo(
|
||||
flavourId=flavour_id,
|
||||
vnfState='STARTED',
|
||||
)
|
||||
|
||||
if vnfc_resources:
|
||||
inst_vnf_info.vnfcResourceInfo = vnfc_resources
|
||||
# make vnfcInfo
|
||||
# NOTE: vnfcInfo only exists in SOL002
|
||||
inst_vnf_info.vnfcInfo = [
|
||||
objects.VnfcInfoV2(
|
||||
id=f'{vnfc_res_info.vduId}-{vnfc_res_info.id}',
|
||||
vduId=vnfc_res_info.vduId,
|
||||
vnfcResourceInfoId=vnfc_res_info.id,
|
||||
vnfcState='STARTED'
|
||||
)
|
||||
for vnfc_res_info in vnfc_resources
|
||||
]
|
||||
|
||||
inst.instantiatedVnfInfo = inst_vnf_info
|
||||
inst.metadata = {"namespace": namespace if namespace else None}
|
||||
inst.metadata['lcm-kubernetes-def-files'] = req.additionalParams.get(
|
||||
'lcm-kubernetes-def-files')
|
||||
|
||||
def _execute_coordinate_vnf_script(
|
||||
self, req, inst, grant_req, grant, vnfd,
|
||||
operation, namespace, new_deploy_reses):
|
||||
coordinate_vnf = None
|
||||
coordinate_vnf_class = None
|
||||
if req.obj_attr_is_set('additionalParams'):
|
||||
if operation == 'CHANGE_VNFPKG':
|
||||
coordinate_vnf = req.additionalParams.get(
|
||||
'lcm-operation-coordinate-new-vnf')
|
||||
coordinate_vnf_class = req.additionalParams.get(
|
||||
'lcm-operation-coordinate-new-vnf-class')
|
||||
else:
|
||||
coordinate_vnf = req.additionalParams.get(
|
||||
'lcm-operation-coordinate-old-vnf')
|
||||
coordinate_vnf_class = req.additionalParams.get(
|
||||
'lcm-operation-coordinate-old-vnf-class')
|
||||
|
||||
if coordinate_vnf and coordinate_vnf_class:
|
||||
tmp_csar_dir = vnfd.make_tmp_csar_dir()
|
||||
script_dict = {
|
||||
"request": req.to_dict(),
|
||||
"vnf_instance": inst.to_dict(),
|
||||
"grant_request": grant_req.to_dict(),
|
||||
"grant_response": grant.to_dict(),
|
||||
"tmp_csar_dir": tmp_csar_dir,
|
||||
"k8s_info": {
|
||||
"namespace": namespace,
|
||||
"new_deploy_reses": new_deploy_reses
|
||||
}
|
||||
}
|
||||
script_path = os.path.join(tmp_csar_dir, coordinate_vnf)
|
||||
out = subprocess.run(["python3", script_path],
|
||||
input=pickle.dumps(script_dict),
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
if out.returncode != 0:
|
||||
LOG.error(out)
|
||||
raise sol_ex.CoordinateVNFExecutionFailed
|
||||
|
||||
def _update_cnf_instantiated_info(self, inst, deployment_names, all_pods):
|
||||
error_resource = None
|
||||
for vnfc in inst.instantiatedVnfInfo.vnfcResourceInfo:
|
||||
if (vnfc.computeResource.vimLevelResourceType == 'Deployment'
|
||||
) and (vnfc.metadata.get('Deployment').get(
|
||||
'name') in deployment_names):
|
||||
pods_info = [pod for pod in all_pods if
|
||||
kubernetes_utils.is_match_pod_naming_rule(
|
||||
'Deployment',
|
||||
vnfc.metadata.get('Deployment').get('name'),
|
||||
pod.metadata.name)]
|
||||
if 'Pending' in [pod.status.phase for pod in pods_info] or (
|
||||
'Unknown' in [pod.status.phase for pod in pods_info]):
|
||||
pod_name = [pod.metadata.name for pod in pods_info
|
||||
if pod.status.phase in [
|
||||
'Pending', 'Unknown']][0]
|
||||
error_resource = objects.VnfcResourceInfoV2(
|
||||
id=uuidutils.generate_uuid(),
|
||||
vduId=vnfc.vduId,
|
||||
computeResource=objects.ResourceHandle(
|
||||
resourceId=pod_name,
|
||||
vimLevelResourceType='Deployment'
|
||||
),
|
||||
metadata={'Deployment': vnfc.metadata.get(
|
||||
'Deployment')}
|
||||
)
|
||||
continue
|
||||
pod_info = pods_info.pop(-1)
|
||||
vnfc.id = uuidutils.generate_uuid()
|
||||
vnfc.computeResource.resourceId = pod_info.metadata.name
|
||||
vnfc.metadata['Pod'] = pod_info.metadata.to_dict()
|
||||
all_pods.remove(pod_info)
|
||||
|
||||
if error_resource:
|
||||
inst.instantiatedVnfInfo.vnfcResourceInfo.append(error_resource)
|
@ -0,0 +1,726 @@
|
||||
# Copyright (C) 2022 Fujitsu
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
import ipaddress
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
from urllib.parse import urlparse
|
||||
import urllib.request as urllib2
|
||||
|
||||
from kubernetes import client
|
||||
from oslo_log import log as logging
|
||||
from oslo_service import loopingcall
|
||||
import yaml
|
||||
|
||||
from tacker.sol_refactored.common import config
|
||||
from tacker.sol_refactored.common import exceptions as sol_ex
|
||||
from tacker.sol_refactored.objects.v2 import fields as v2fields
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = config.CONF
|
||||
CHECK_INTERVAL = 10
|
||||
SUPPORTED_NAMESPACE_KINDS = [
|
||||
"Pod",
|
||||
"Binding",
|
||||
"ConfigMap",
|
||||
"LimitRange",
|
||||
"PersistentVolumeClaim",
|
||||
"PodTemplate",
|
||||
"ResourceQuota",
|
||||
"Secret",
|
||||
"ServiceAccount",
|
||||
"Service",
|
||||
"ControllerRevision",
|
||||
"DaemonSet",
|
||||
"Deployment",
|
||||
"ReplicaSet",
|
||||
"StatefulSet",
|
||||
"LocalSubjectAccessReview",
|
||||
"HorizontalPodAutoscaler",
|
||||
"Job",
|
||||
"Lease",
|
||||
"NetworkPolicy",
|
||||
"RoleBinding",
|
||||
"Role"
|
||||
]
|
||||
RESOURCE_CREATION_ORDER = [
|
||||
"StorageClass",
|
||||
"PersistentVolume",
|
||||
"PriorityClass",
|
||||
"Namespace",
|
||||
"LimitRange",
|
||||
"ResourceQuota",
|
||||
"HorizontalPodAutoscaler",
|
||||
"NetworkPolicy",
|
||||
"Service",
|
||||
"Endpoints",
|
||||
"PersistentVolumeClaim",
|
||||
"ConfigMap",
|
||||
"Secret",
|
||||
"Pod",
|
||||
"Binding",
|
||||
"StatefulSet",
|
||||
"Job",
|
||||
"Deployment",
|
||||
"DaemonSet",
|
||||
]
|
||||
STATUS_CHECK_RES = [
|
||||
"Pod",
|
||||
"Service",
|
||||
"PersistentVolumeClaim",
|
||||
"Namespace",
|
||||
"Node",
|
||||
"PersistentVolume",
|
||||
"APIService",
|
||||
"DaemonSet",
|
||||
"Deployment",
|
||||
"ReplicaSet",
|
||||
"StatefulSet",
|
||||
"Job",
|
||||
"VolumeAttachment"
|
||||
]
|
||||
|
||||
|
||||
class KubernetesClient(object):
|
||||
|
||||
def __init__(self, vim_info):
|
||||
self.k8s_api_client = init_k8s_api_client(vim_info)
|
||||
self.k8s_clients = get_k8s_clients(self.k8s_api_client)
|
||||
|
||||
def create_k8s_resource(self, sorted_k8s_reses, namespace):
|
||||
created_k8s_reses = []
|
||||
|
||||
for k8s_res in sorted_k8s_reses:
|
||||
kind = k8s_res.get('kind', '')
|
||||
api_version = k8s_res.get('apiVersion', '')
|
||||
name = k8s_res.get('metadata', {}).get('name', '')
|
||||
metadata = k8s_res.get('metadata', {})
|
||||
body = k8s_res
|
||||
k8s_client = self.k8s_clients[api_version]
|
||||
try:
|
||||
if kind in SUPPORTED_NAMESPACE_KINDS:
|
||||
k8s_method = getattr(
|
||||
k8s_client, f"create_namespaced_{convert(kind)}")
|
||||
k8s_method(namespace=namespace, body=body)
|
||||
create_k8s_res = {
|
||||
"api_version": api_version,
|
||||
"namespace": namespace,
|
||||
"kind": kind,
|
||||
"name": name,
|
||||
"metadata": metadata,
|
||||
"status": "CREATE_IN_PROCESS"
|
||||
}
|
||||
else:
|
||||
k8s_method = getattr(
|
||||
k8s_client, f"create_{convert(kind)}")
|
||||
k8s_method(body=body)
|
||||
create_k8s_res = {
|
||||
"api_version": api_version,
|
||||
"kind": kind,
|
||||
"name": name,
|
||||
"metadata": metadata,
|
||||
"status": "CREATE_IN_PROCESS"
|
||||
}
|
||||
created_k8s_reses.append(create_k8s_res)
|
||||
except Exception as ex:
|
||||
LOG.error(ex)
|
||||
raise sol_ex.ExecuteK8SResourceCreateApiFailed
|
||||
return created_k8s_reses
|
||||
|
||||
def delete_k8s_resource(self, req, sorted_k8s_reses, namespace):
|
||||
if req.terminationType:
|
||||
if req.terminationType == 'GRACEFUL' and req.obj_attr_is_set(
|
||||
'gracefulTerminationTimeout'):
|
||||
body = client.V1DeleteOptions(
|
||||
propagation_policy='Foreground',
|
||||
grace_period_seconds=req.gracefulTerminationTimeout)
|
||||
else:
|
||||
body = client.V1DeleteOptions(
|
||||
propagation_policy='Foreground',
|
||||
grace_period_seconds=0)
|
||||
|
||||
for k8s_res in sorted_k8s_reses:
|
||||
kind = k8s_res.get('kind', '')
|
||||
api_version = k8s_res.get('apiVersion', '')
|
||||
name = k8s_res.get('metadata', {}).get('name', '')
|
||||
k8s_client = self.k8s_clients[api_version]
|
||||
|
||||
if kind == 'StatefulSet':
|
||||
pvcs_for_delete = self._get_pvcs_for_delete(
|
||||
sfs_name=name, namespace=namespace)
|
||||
|
||||
if kind == 'ControllerRevision':
|
||||
body = client.V1DeleteOptions(
|
||||
propagation_policy='Background')
|
||||
try:
|
||||
if kind in SUPPORTED_NAMESPACE_KINDS:
|
||||
k8s_method = getattr(
|
||||
k8s_client, f"delete_namespaced_{convert(kind)}")
|
||||
k8s_method(name=name, namespace=namespace, body=body)
|
||||
else:
|
||||
k8s_method = getattr(
|
||||
k8s_client, f"delete_{convert(kind)}")
|
||||
k8s_method(name=name, body=body)
|
||||
k8s_res.update(status='DELETE_IN_PROGRESS')
|
||||
except Exception as ex:
|
||||
k8s_res.update(status='DELETE_IN_PROGRESS')
|
||||
LOG.debug(ex)
|
||||
|
||||
if kind == 'StatefulSet' and len(pvcs_for_delete) > 0:
|
||||
for delete_pvc_name in pvcs_for_delete:
|
||||
try:
|
||||
self.k8s_clients[
|
||||
'v1'].delete_namespaced_persistent_volume_claim(
|
||||
name=delete_pvc_name, namespace=namespace,
|
||||
body=body)
|
||||
except Exception as ex:
|
||||
LOG.debug(ex)
|
||||
|
||||
def update_k8s_resource(self, new_reses, namespace):
|
||||
for k8s_res in new_reses:
|
||||
kind = k8s_res.get('kind', '')
|
||||
api_version = k8s_res.get('apiVersion', '')
|
||||
name = k8s_res.get('metadata', {}).get('name', '')
|
||||
k8s_client = self.k8s_clients[api_version]
|
||||
k8s_method = getattr(
|
||||
k8s_client, f"patch_namespaced_{convert(kind)}")
|
||||
try:
|
||||
k8s_method(name=name, namespace=namespace, body=k8s_res)
|
||||
k8s_res.update(status='UPDATE_IN_PROCESS')
|
||||
except Exception as e:
|
||||
LOG.error(f'update resource failed. kind: {kind},'
|
||||
f' name: {name}')
|
||||
raise sol_ex.UpdateK8SResourceFailed from e
|
||||
|
||||
def list_namespaced_pods(self, namespace):
|
||||
if namespace is None:
|
||||
return None
|
||||
return self.k8s_clients['v1'].list_namespaced_pod(
|
||||
namespace=namespace).items
|
||||
|
||||
def check_deployment_exist(self, deployment_names, namespace):
|
||||
for name in deployment_names:
|
||||
try:
|
||||
self.k8s_clients['apps/v1'].read_namespaced_deployment(
|
||||
name=name, namespace=namespace)
|
||||
except Exception as ex:
|
||||
LOG.error(f'update deployment {name} does'
|
||||
f' not exist in kubernetes cluster')
|
||||
raise ex
|
||||
|
||||
def _get_pvcs_for_delete(self, sfs_name, namespace):
|
||||
pvcs_for_delete = []
|
||||
try:
|
||||
resp_read_sfs = self.k8s_clients[
|
||||
'apps/v1'].read_namespaced_stateful_set(sfs_name, namespace)
|
||||
sfs_spec = resp_read_sfs.spec
|
||||
volume_claim_templates = sfs_spec.volume_claim_templates
|
||||
|
||||
try:
|
||||
resps_pvc = self.k8s_clients[
|
||||
'v1'].list_namespaced_persistent_volume_claim(namespace)
|
||||
pvcs = resps_pvc.items
|
||||
for volume_claim_template in volume_claim_templates:
|
||||
pvc_template_metadata = volume_claim_template.metadata
|
||||
match_pattern = '-'.join(
|
||||
[pvc_template_metadata.name, sfs_name, ""])
|
||||
|
||||
for pvc in pvcs:
|
||||
pvc_metadata = pvc.metadata
|
||||
pvc_name = pvc_metadata.name
|
||||
match_result = re.match(
|
||||
match_pattern + '[0-9]+$', pvc_name)
|
||||
if match_result is not None:
|
||||
pvcs_for_delete.append(pvc_name)
|
||||
except Exception:
|
||||
pass
|
||||
except Exception:
|
||||
pass
|
||||
return pvcs_for_delete
|
||||
|
||||
def _wait_completion(self, k8s_reses, operation,
|
||||
namespace=None, old_pods_names=None):
|
||||
def _check_create_status():
|
||||
for k8s_res in k8s_reses:
|
||||
if k8s_res['status'] != 'CREATE_COMPLETE':
|
||||
if k8s_res.get('kind') in STATUS_CHECK_RES:
|
||||
res_check_method = getattr(
|
||||
self, f"_check_status_"
|
||||
f"{convert(k8s_res.get('kind'))}")
|
||||
res_check_method(k8s_res)
|
||||
else:
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
statuses = {res['status'] for res in k8s_reses}
|
||||
if len(statuses) == 1 and statuses.pop() == 'CREATE_COMPLETE':
|
||||
raise loopingcall.LoopingCallDone()
|
||||
if len(statuses) > 1 and (int(time.time()) - start_time > timeout):
|
||||
raise sol_ex.CreateK8SResourceFailed
|
||||
|
||||
def _check_delete_status():
|
||||
for k8s_res in k8s_reses:
|
||||
kind = k8s_res.get('kind', '')
|
||||
api_version = k8s_res.get('apiVersion', '')
|
||||
name = k8s_res.get('metadata', {}).get('name', '')
|
||||
k8s_client = self.k8s_clients[api_version]
|
||||
if k8s_res['status'] != 'DELETE_COMPLETE':
|
||||
try:
|
||||
if kind in SUPPORTED_NAMESPACE_KINDS:
|
||||
k8s_method = getattr(
|
||||
k8s_client, f'read_namespaced_{convert(kind)}')
|
||||
k8s_method(name=name, namespace=namespace)
|
||||
else:
|
||||
k8s_method = getattr(
|
||||
k8s_client, f'read_{convert(kind)}')
|
||||
k8s_method(name=name)
|
||||
except Exception:
|
||||
k8s_res.update(status='DELETE_COMPLETE')
|
||||
statuses = {res['status'] for res in k8s_reses}
|
||||
if len(statuses) == 1 and statuses.pop() == 'DELETE_COMPLETE':
|
||||
raise loopingcall.LoopingCallDone()
|
||||
if len(statuses) > 1 and (int(time.time()) - start_time > timeout):
|
||||
raise sol_ex.DeleteK8SResourceFailed
|
||||
|
||||
def _check_update_status():
|
||||
all_namespaced_pods = self.list_namespaced_pods(namespace)
|
||||
for k8s_res in k8s_reses:
|
||||
if k8s_res['status'] not in ['UPDATE_COMPLETE',
|
||||
'UPDATE_FAILED']:
|
||||
kind = k8s_res.get('kind', '')
|
||||
api_version = k8s_res.get('apiVersion', '')
|
||||
name = k8s_res.get('metadata', {}).get('name', '')
|
||||
k8s_client = self.k8s_clients[api_version]
|
||||
k8s_method = getattr(
|
||||
k8s_client, f'read_namespaced_{convert(kind)}')
|
||||
k8s_info = k8s_method(name=name, namespace=namespace)
|
||||
replicas = k8s_info.spec.replicas
|
||||
|
||||
pods_info = [pod for pod in all_namespaced_pods if
|
||||
is_match_pod_naming_rule(
|
||||
kind, name, pod.metadata.name)]
|
||||
pending_flag = False
|
||||
unkown_flag = False
|
||||
for pod_info in pods_info:
|
||||
if pod_info.status.phase == 'Pending':
|
||||
pending_flag = True
|
||||
elif pod_info.status.phase == 'Unknown':
|
||||
unkown_flag = True
|
||||
|
||||
if not pending_flag and not unkown_flag and len(
|
||||
pods_info) == replicas and (
|
||||
pods_info[0].metadata.name not in old_pods_names):
|
||||
k8s_res.update(status='UPDATE_COMPLETE')
|
||||
|
||||
if unkown_flag:
|
||||
k8s_res.update(status='UPDATE_FAILED')
|
||||
|
||||
statuses = {res['status'] for res in k8s_reses}
|
||||
if len(statuses) == 1 and list(statuses)[0] == 'UPDATE_COMPLETE':
|
||||
raise loopingcall.LoopingCallDone()
|
||||
if (list(statuses)[0] == 'UPDATE_IN_PROCESS' and (int(
|
||||
time.time()) - start_time > timeout)) or (
|
||||
'UPDATE_FAILED' in statuses):
|
||||
raise sol_ex.UpdateK8SResourceFailed
|
||||
|
||||
start_time = int(time.time())
|
||||
timeout = CONF.v2_vnfm.kubernetes_vim_rsc_wait_timeout
|
||||
|
||||
if operation == v2fields.LcmOperationType.INSTANTIATE:
|
||||
timer = loopingcall.FixedIntervalLoopingCall(_check_create_status)
|
||||
elif operation == v2fields.LcmOperationType.TERMINATE:
|
||||
timer = loopingcall.FixedIntervalLoopingCall(_check_delete_status)
|
||||
else:
|
||||
timer = loopingcall.FixedIntervalLoopingCall(_check_update_status)
|
||||
timer.start(interval=CHECK_INTERVAL).wait()
|
||||
|
||||
def _check_status_pod(self, k8s_res):
|
||||
pod = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_namespaced_pod(
|
||||
namespace=k8s_res.get('namespace'),
|
||||
name=k8s_res.get('name'))
|
||||
|
||||
if pod.status.phase and pod.status.phase == 'Running':
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_stateful_set(self, k8s_res):
|
||||
namespace = k8s_res.get('namespace')
|
||||
name = k8s_res.get('name')
|
||||
|
||||
stateful_set = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_namespaced_stateful_set(
|
||||
namespace=namespace, name=name)
|
||||
pvc_statuses = []
|
||||
replicas = stateful_set.status.replicas
|
||||
if replicas and replicas == stateful_set.status.ready_replicas:
|
||||
for i in range(0, stateful_set.spec.replicas):
|
||||
volume_claim_templates = (
|
||||
stateful_set.spec.volume_claim_templates)
|
||||
for volume_claim_template in volume_claim_templates:
|
||||
pvc_name = "-".join(
|
||||
[volume_claim_template.metadata.name,
|
||||
k8s_res.get('name'), str(i)])
|
||||
persistent_volume_claim = (
|
||||
self.k8s_clients[
|
||||
'v1'].read_namespaced_persistent_volume_claim(
|
||||
namespace=namespace, name=pvc_name))
|
||||
pvc_statuses.append(persistent_volume_claim.status.phase)
|
||||
if len(set(pvc_statuses)) == 1 and pvc_statuses[0] == 'Bound':
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_service(self, k8s_res):
|
||||
namespace = k8s_res.get('namespace')
|
||||
name = k8s_res.get('name')
|
||||
|
||||
service = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_namespaced_service(
|
||||
namespace=namespace, name=name)
|
||||
status_flag = False
|
||||
if service.spec.cluster_ip in ['', None] or check_is_ip(
|
||||
service.spec.cluster_ip):
|
||||
try:
|
||||
endpoint = self.k8s_clients['v1'].read_namespaced_endpoints(
|
||||
namespace=namespace, name=name)
|
||||
if endpoint:
|
||||
status_flag = True
|
||||
except Exception as e:
|
||||
raise sol_ex.ReadEndpointsFalse(
|
||||
kind=k8s_res.get('kind')) from e
|
||||
|
||||
if status_flag:
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_persistent_volume_claim(self, k8s_res):
|
||||
namespace = k8s_res.get('namespace')
|
||||
name = k8s_res.get('name')
|
||||
|
||||
claim = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_namespaced_persistent_volume_claim(
|
||||
namespace=namespace, name=name)
|
||||
|
||||
if claim.status.phase and claim.status.phase == 'Bound':
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_namespace(self, k8s_res):
|
||||
name = k8s_res.get('name')
|
||||
|
||||
name_space = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_namespace(name=name)
|
||||
if name_space.status.phase and name_space.status.phase == 'Active':
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_node(self, k8s_res):
|
||||
name = k8s_res.get('name')
|
||||
|
||||
node = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_node(name=name)
|
||||
status_flag = False
|
||||
for condition in node.status.conditions:
|
||||
if condition.type == 'Ready':
|
||||
if condition.status == 'True':
|
||||
status_flag = True
|
||||
break
|
||||
else:
|
||||
continue
|
||||
if status_flag:
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_persistent_volume(self, k8s_res):
|
||||
name = k8s_res.get('name')
|
||||
|
||||
volume = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_persistent_volume(name=name)
|
||||
if volume.status.phase and volume.status.phase in [
|
||||
'Available', 'Bound']:
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_api_service(self, k8s_res):
|
||||
name = k8s_res.get('name')
|
||||
|
||||
api_service = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_api_service(name=name)
|
||||
status_flag = False
|
||||
for condition in api_service.status.conditions:
|
||||
if condition.type == 'Available':
|
||||
if condition.status == 'True':
|
||||
status_flag = True
|
||||
break
|
||||
else:
|
||||
continue
|
||||
if status_flag:
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_daemon_set(self, k8s_res):
|
||||
namespace = k8s_res.get('namespace')
|
||||
name = k8s_res.get('name')
|
||||
|
||||
daemon_set = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_namespaced_daemon_set(
|
||||
namespace=namespace, name=name)
|
||||
if daemon_set.status.desired_number_scheduled and (
|
||||
daemon_set.status.desired_number_scheduled ==
|
||||
daemon_set.status.number_ready):
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_deployment(self, k8s_res):
|
||||
namespace = k8s_res.get('namespace')
|
||||
name = k8s_res.get('name')
|
||||
|
||||
deployment = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_namespaced_deployment(
|
||||
namespace=namespace, name=name)
|
||||
if deployment.status.replicas and (
|
||||
deployment.status.replicas ==
|
||||
deployment.status.ready_replicas):
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_replica_set(self, k8s_res):
|
||||
namespace = k8s_res.get('namespace')
|
||||
name = k8s_res.get('name')
|
||||
|
||||
replica_set = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_namespaced_replica_set(
|
||||
namespace=namespace, name=name)
|
||||
if replica_set.status.replicas and (
|
||||
replica_set.status.replicas ==
|
||||
replica_set.status.ready_replicas):
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_job(self, k8s_res):
|
||||
namespace = k8s_res.get('namespace')
|
||||
name = k8s_res.get('name')
|
||||
|
||||
job = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_namespaced_job(
|
||||
namespace=namespace, name=name)
|
||||
if job.spec.completions and (
|
||||
job.spec.completions == job.status.succeeded):
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_volume_attachment(self, k8s_res):
|
||||
name = k8s_res.get('name')
|
||||
|
||||
volume = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_volume_attachment(name=name)
|
||||
if volume.status.attached:
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def wait_k8s_res_create(self, created_k8s_reses):
|
||||
self._wait_completion(created_k8s_reses, operation='INSTANTIATE')
|
||||
|
||||
def wait_k8s_res_delete(self, sorted_k8s_reses, namespace):
|
||||
self._wait_completion(
|
||||
sorted_k8s_reses, operation='TERMINATE', namespace=namespace)
|
||||
|
||||
def wait_k8s_res_update(self, new_k8s_reses, namespace,
|
||||
old_pods_names=None):
|
||||
self._wait_completion(
|
||||
new_k8s_reses, operation='UPDATE', namespace=namespace,
|
||||
old_pods_names=old_pods_names)
|
||||
|
||||
|
||||
def is_match_pod_naming_rule(rsc_kind, rsc_name, pod_name):
|
||||
match_result = None
|
||||
if rsc_kind == 'Pod':
|
||||
# Expected example: name
|
||||
if rsc_name == pod_name:
|
||||
match_result = True
|
||||
elif rsc_kind == 'Deployment':
|
||||
# Expected example: name-012789abef-019az
|
||||
# NOTE(horie): The naming rule of Pod in deployment is
|
||||
# "(deployment name)-(pod template hash)-(5 charactors)".
|
||||
# The "pod template hash" string is generated from 32 bit hash.
|
||||
# This may be from 1 to 10 caracters but not sure the lower limit
|
||||
# from the source code of Kubernetes.
|
||||
match_result = re.match(
|
||||
rsc_name + '-([0-9a-f]{1,10})-([0-9a-z]{5})+$',
|
||||
pod_name)
|
||||
elif rsc_kind in ('ReplicaSet', 'DaemonSet'):
|
||||
# Expected example: name-019az
|
||||
match_result = re.match(
|
||||
rsc_name + '-([0-9a-z]{5})+$',
|
||||
pod_name)
|
||||
elif rsc_kind == 'StatefulSet':
|
||||
# Expected example: name-0
|
||||
match_result = re.match(
|
||||
rsc_name + '-[0-9]+$',
|
||||
pod_name)
|
||||
if match_result:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def check_is_ip(ip_addr):
|
||||
try:
|
||||
ipaddress.ip_address(ip_addr)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def convert(tmp_name):
|
||||
name_with_underscores = re.sub(
|
||||
'(.)([A-Z][a-z]+)', r'\1_\2', tmp_name)
|
||||
return re.sub('([a-z0-9])([A-Z])', r'\1_\2',
|
||||
name_with_underscores).lower()
|
||||
|
||||
|
||||
def init_k8s_api_client(vim_info):
|
||||
k8s_config = client.Configuration()
|
||||
k8s_config.host = vim_info.interfaceInfo['endpoint']
|
||||
|
||||
if ('username' in vim_info.accessInfo and 'password'
|
||||
in vim_info.accessInfo and vim_info.accessInfo.get(
|
||||
'password') is not None):
|
||||
k8s_config.username = vim_info.accessInfo['username']
|
||||
k8s_config.password = vim_info.accessInfo['password']
|
||||
basic_token = k8s_config.get_basic_auth_token()
|
||||
k8s_config.api_key['authorization'] = basic_token
|
||||
|
||||
if 'bearer_token' in vim_info.accessInfo:
|
||||
k8s_config.api_key_prefix['authorization'] = 'Bearer'
|
||||
k8s_config.api_key['authorization'] = vim_info.accessInfo[
|
||||
'bearer_token']
|
||||
|
||||
if 'ssl_ca_cert' in vim_info.accessInfo:
|
||||
k8s_config.ssl_ca_cert = vim_info.accessInfo['ssl_ca_cert']
|
||||
k8s_config.verify_ssl = True
|
||||
else:
|
||||
k8s_config.verify_ssl = False
|
||||
|
||||
return client.api_client.ApiClient(configuration=k8s_config)
|
||||
|
||||
|
||||
def get_k8s_clients(k8s_api_client):
|
||||
k8s_clients = {
|
||||
"v1": client.CoreV1Api(api_client=k8s_api_client),
|
||||
"apiregistration.k8s.io/v1":
|
||||
client.ApiregistrationV1Api(api_client=k8s_api_client),
|
||||
"apps/v1": client.AppsV1Api(api_client=k8s_api_client),
|
||||
"authentication.k8s.io/v1":
|
||||
client.AuthenticationV1Api(api_client=k8s_api_client),
|
||||
"authorization.k8s.io/v1":
|
||||
client.AuthorizationV1Api(api_client=k8s_api_client),
|
||||
"autoscaling/v1": client.AutoscalingV1Api(
|
||||
api_client=k8s_api_client),
|
||||
"batch/v1": client.BatchV1Api(api_client=k8s_api_client),
|
||||
"coordination.k8s.io/v1":
|
||||
client.CoordinationV1Api(api_client=k8s_api_client),
|
||||
"networking.k8s.io/v1":
|
||||
client.NetworkingV1Api(api_client=k8s_api_client),
|
||||
"rbac.authorization.k8s.io/v1":
|
||||
client.RbacAuthorizationV1Api(api_client=k8s_api_client),
|
||||
"scheduling.k8s.io/v1":
|
||||
client.SchedulingV1Api(api_client=k8s_api_client),
|
||||
"storage.k8s.io/v1":
|
||||
client.StorageV1Api(api_client=k8s_api_client)
|
||||
}
|
||||
|
||||
return k8s_clients
|
||||
|
||||
|
||||
def get_k8s_json_file(req, inst, target_k8s_files, vnfd, operation):
|
||||
|
||||
def _update_k8s_resources(namespace):
|
||||
for k8s_res in k8s_resources:
|
||||
if (k8s_res.get('kind', '') in SUPPORTED_NAMESPACE_KINDS and
|
||||
k8s_res.get('metadata') is None):
|
||||
k8s_res.update(metadata={})
|
||||
if k8s_res.get('kind', '') in SUPPORTED_NAMESPACE_KINDS:
|
||||
k8s_res['metadata'].update(namespace=namespace)
|
||||
|
||||
k8s_resources = []
|
||||
|
||||
for target_k8s_file in target_k8s_files:
|
||||
if ((urlparse(target_k8s_file).scheme == 'file') or
|
||||
(bool(urlparse(target_k8s_file).scheme) and
|
||||
bool(urlparse(target_k8s_file).netloc))):
|
||||
with urllib2.urlopen(target_k8s_file) as file_object:
|
||||
file_content = file_object.read()
|
||||
else:
|
||||
file_path = os.path.join(vnfd.csar_dir, target_k8s_file)
|
||||
with open(file_path, 'rb') as file_object:
|
||||
file_content = file_object.read()
|
||||
|
||||
k8s_resources.extend(list(yaml.safe_load_all(file_content)))
|
||||
|
||||
# check namespace
|
||||
if operation == v2fields.LcmOperationType.INSTANTIATE:
|
||||
if req.additionalParams.get('namespace') is None:
|
||||
_update_k8s_resources('default')
|
||||
namespaces = {
|
||||
k8s_res['metadata']['namespace'] for k8s_res in
|
||||
k8s_resources if k8s_res.get('kind') in
|
||||
SUPPORTED_NAMESPACE_KINDS}
|
||||
if len(namespaces) > 1:
|
||||
raise sol_ex.NamespaceNotUniform()
|
||||
return k8s_resources, namespaces.pop() if namespaces else None
|
||||
|
||||
_update_k8s_resources(req.additionalParams.get('namespace'))
|
||||
return k8s_resources, req.additionalParams.get('namespace')
|
||||
|
||||
return k8s_resources, inst.metadata.get('namespace')
|
||||
|
||||
|
||||
def sort_k8s_resource(k8s_resources, operation):
|
||||
pos = 0
|
||||
sorted_k8s_reses = []
|
||||
|
||||
if operation == v2fields.LcmOperationType.INSTANTIATE:
|
||||
sort_order = RESOURCE_CREATION_ORDER
|
||||
else:
|
||||
sort_order = list(reversed(RESOURCE_CREATION_ORDER))
|
||||
|
||||
copy_k8s_resources = copy.deepcopy(k8s_resources)
|
||||
|
||||
for kind in sort_order:
|
||||
for res_index, res in enumerate(copy_k8s_resources):
|
||||
if res.get('kind', '') == kind:
|
||||
index = k8s_resources.index(res)
|
||||
sorted_k8s_reses.append(k8s_resources.pop(index))
|
||||
# Other kind (such as PodTemplate, Node, and so on) that are
|
||||
# not present in `RESOURCE_CREATION_ORDER` are inserted in
|
||||
# place of the Service kind and created/deleted in the same
|
||||
# order as the Service kind.
|
||||
if kind == 'Service':
|
||||
pos = len(sorted_k8s_reses)
|
||||
|
||||
for k8s_res in k8s_resources:
|
||||
sorted_k8s_reses.insert(pos, k8s_res)
|
||||
|
||||
return sorted_k8s_reses
|
||||
|
||||
|
||||
def get_new_deployment_body(
|
||||
req, inst, vnfd, deployment_names, operation):
|
||||
if operation == v2fields.LcmOperationType.CHANGE_VNFPKG:
|
||||
target_k8s_files = req.additionalParams.get(
|
||||
'lcm-kubernetes-def-files')
|
||||
else:
|
||||
target_k8s_files = inst.metadata.get('lcm-kubernetes-def-files')
|
||||
|
||||
new_k8s_resources, namespace = get_k8s_json_file(
|
||||
req, inst, target_k8s_files, vnfd, operation)
|
||||
|
||||
new_deploy_reses = []
|
||||
for k8s_res in new_k8s_resources:
|
||||
if k8s_res.get('kind', '') == 'Deployment' and k8s_res.get(
|
||||
'metadata', {}).get('name', '') in deployment_names:
|
||||
k8s_res['metadata']['namespace'] = namespace
|
||||
new_deploy_reses.append(k8s_res)
|
||||
|
||||
return new_deploy_reses
|
307
tacker/tests/functional/sol_kubernetes_v2/base_v2.py
Normal file
307
tacker/tests/functional/sol_kubernetes_v2/base_v2.py
Normal file
@ -0,0 +1,307 @@
|
||||
# Copyright (C) 2022 Fujitsu
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
import time
|
||||
|
||||
from oslo_config import cfg
|
||||
from oslo_utils import uuidutils
|
||||
from tempest.lib import base
|
||||
import yaml
|
||||
|
||||
from tacker.sol_refactored.common import http_client
|
||||
from tacker.sol_refactored import objects
|
||||
from tacker.tests.functional.sol_v2 import utils
|
||||
from tacker.tests import utils as base_utils
|
||||
from tacker import version
|
||||
|
||||
VNF_PACKAGE_UPLOAD_TIMEOUT = 300
|
||||
VNF_INSTANTIATE_TIMEOUT = 600
|
||||
VNF_TERMINATE_TIMEOUT = 600
|
||||
RETRY_WAIT_TIME = 5
|
||||
|
||||
|
||||
class BaseVnfLcmKubernetesV2Test(base.BaseTestCase):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
super(BaseVnfLcmKubernetesV2Test, cls).setUpClass()
|
||||
"""Base test case class for SOL v2 kubernetes functional tests."""
|
||||
|
||||
cfg.CONF(args=['--config-file', '/etc/tacker/tacker.conf'],
|
||||
project='tacker',
|
||||
version='%%prog %s' % version.version_info.release_string())
|
||||
objects.register_all()
|
||||
|
||||
k8s_vim_info = cls.get_k8s_vim_info()
|
||||
cls.auth_url = k8s_vim_info.interfaceInfo['endpoint']
|
||||
cls.bearer_token = k8s_vim_info.accessInfo['bearer_token']
|
||||
|
||||
vim_info = cls.get_vim_info()
|
||||
auth = http_client.KeystonePasswordAuthHandle(
|
||||
auth_url=vim_info.interfaceInfo['endpoint'],
|
||||
username=vim_info.accessInfo['username'],
|
||||
password=vim_info.accessInfo['password'],
|
||||
project_name=vim_info.accessInfo['project'],
|
||||
user_domain_name=vim_info.accessInfo['userDomain'],
|
||||
project_domain_name=vim_info.accessInfo['projectDomain']
|
||||
)
|
||||
cls.tacker_client = http_client.HttpClient(auth)
|
||||
|
||||
@classmethod
|
||||
def get_vim_info(cls):
|
||||
vim_params = yaml.safe_load(base_utils.read_file('local-vim.yaml'))
|
||||
vim_params['auth_url'] += '/v3'
|
||||
|
||||
vim_info = objects.VimConnectionInfo(
|
||||
interfaceInfo={'endpoint': vim_params['auth_url']},
|
||||
accessInfo={
|
||||
'region': 'RegionOne',
|
||||
'project': vim_params['project_name'],
|
||||
'username': vim_params['username'],
|
||||
'password': vim_params['password'],
|
||||
'userDomain': vim_params['user_domain_name'],
|
||||
'projectDomain': vim_params['project_domain_name']
|
||||
}
|
||||
)
|
||||
|
||||
return vim_info
|
||||
|
||||
@classmethod
|
||||
def get_k8s_vim_info(cls):
|
||||
vim_params = yaml.safe_load(base_utils.read_file('local-k8s-vim.yaml'))
|
||||
|
||||
vim_info = objects.VimConnectionInfo(
|
||||
interfaceInfo={'endpoint': vim_params['auth_url']},
|
||||
accessInfo={
|
||||
'region': 'RegionOne',
|
||||
'bearer_token': vim_params['bearer_token']
|
||||
}
|
||||
)
|
||||
return vim_info
|
||||
|
||||
@classmethod
|
||||
def get_k8s_vim_id(cls):
|
||||
vim_list = cls.list_vims(cls)
|
||||
if len(vim_list.values()) == 0:
|
||||
assert False, "vim_list is Empty: Default VIM is missing"
|
||||
|
||||
for vim_list in vim_list.values():
|
||||
for vim in vim_list:
|
||||
if vim['name'] == 'vim-kubernetes':
|
||||
return vim['id']
|
||||
return None
|
||||
|
||||
@classmethod
|
||||
def create_vnf_package(cls, sample_path, user_data={}, image_path=None):
|
||||
vnfd_id = uuidutils.generate_uuid()
|
||||
tmp_dir = tempfile.mkdtemp()
|
||||
|
||||
utils.make_zip(sample_path, tmp_dir, vnfd_id, image_path)
|
||||
|
||||
zip_file_name = os.path.basename(os.path.abspath(sample_path)) + ".zip"
|
||||
zip_file_path = os.path.join(tmp_dir, zip_file_name)
|
||||
|
||||
path = "/vnfpkgm/v1/vnf_packages"
|
||||
req_body = {'userDefinedData': user_data}
|
||||
resp, body = cls.tacker_client.do_request(
|
||||
path, "POST", expected_status=[201], body=req_body)
|
||||
|
||||
pkg_id = body['id']
|
||||
|
||||
with open(zip_file_path, 'rb') as fp:
|
||||
path = f"/vnfpkgm/v1/vnf_packages/{pkg_id}/package_content"
|
||||
resp, body = cls.tacker_client.do_request(
|
||||
path, "PUT", body=fp, content_type='application/zip',
|
||||
expected_status=[202])
|
||||
|
||||
# wait for onboard
|
||||
timeout = VNF_PACKAGE_UPLOAD_TIMEOUT
|
||||
start_time = int(time.time())
|
||||
path = f"/vnfpkgm/v1/vnf_packages/{pkg_id}"
|
||||
while True:
|
||||
resp, body = cls.tacker_client.do_request(
|
||||
path, "GET", expected_status=[200])
|
||||
if body['onboardingState'] == "ONBOARDED":
|
||||
break
|
||||
|
||||
if ((int(time.time()) - start_time) > timeout):
|
||||
raise Exception("Failed to onboard vnf package")
|
||||
|
||||
time.sleep(5)
|
||||
|
||||
shutil.rmtree(tmp_dir)
|
||||
|
||||
return pkg_id, vnfd_id
|
||||
|
||||
@classmethod
|
||||
def delete_vnf_package(cls, pkg_id):
|
||||
path = f"/vnfpkgm/v1/vnf_packages/{pkg_id}"
|
||||
req_body = {"operationalState": "DISABLED"}
|
||||
resp, _ = cls.tacker_client.do_request(
|
||||
path, "PATCH", body=req_body)
|
||||
if resp.status_code != 200:
|
||||
print("failed to set operationalState to DISABLED")
|
||||
return
|
||||
|
||||
cls.tacker_client.do_request(path, "DELETE")
|
||||
|
||||
def list_vims(self):
|
||||
path = "/v1.0/vims.json"
|
||||
resp, body = self.tacker_client.do_request(path, "GET")
|
||||
return body
|
||||
|
||||
def get_vnf_package(self, pkg_id):
|
||||
path = f"/vnfpkgm/v1/vnf_packages/{pkg_id}"
|
||||
resp, body = self.tacker_client.do_request(path, "GET")
|
||||
return body
|
||||
|
||||
def create_vnf_instance(self, req_body):
|
||||
path = "/vnflcm/v2/vnf_instances"
|
||||
return self.tacker_client.do_request(
|
||||
path, "POST", body=req_body, version="2.0.0")
|
||||
|
||||
def delete_vnf_instance(self, inst_id):
|
||||
path = f"/vnflcm/v2/vnf_instances/{inst_id}"
|
||||
return self.tacker_client.do_request(
|
||||
path, "DELETE", version="2.0.0")
|
||||
|
||||
def show_vnf_instance(self, inst_id):
|
||||
path = f"/vnflcm/v2/vnf_instances/{inst_id}"
|
||||
return self.tacker_client.do_request(
|
||||
path, "GET", version="2.0.0")
|
||||
|
||||
def instantiate_vnf_instance(self, inst_id, req_body):
|
||||
path = f"/vnflcm/v2/vnf_instances/{inst_id}/instantiate"
|
||||
return self.tacker_client.do_request(
|
||||
path, "POST", body=req_body, version="2.0.0")
|
||||
|
||||
def change_vnfpkg(self, inst_id, req_body):
|
||||
path = f"/vnflcm/v2/vnf_instances/{inst_id}/change_vnfpkg"
|
||||
return self.tacker_client.do_request(
|
||||
path, "POST", body=req_body, version="2.0.0")
|
||||
|
||||
def terminate_vnf_instance(self, inst_id, req_body):
|
||||
path = f"/vnflcm/v2/vnf_instances/{inst_id}/terminate"
|
||||
return self.tacker_client.do_request(
|
||||
path, "POST", body=req_body, version="2.0.0")
|
||||
|
||||
def rollback_lcmocc(self, lcmocc_id):
|
||||
path = f"/vnflcm/v2/vnf_lcm_op_occs/{lcmocc_id}/rollback"
|
||||
return self.tacker_client.do_request(
|
||||
path, "POST", version="2.0.0")
|
||||
|
||||
def retry_lcmocc(self, lcmocc_id):
|
||||
path = f"/vnflcm/v2/vnf_lcm_op_occs/{lcmocc_id}/retry"
|
||||
return self.tacker_client.do_request(
|
||||
path, "POST", version="2.0.0")
|
||||
|
||||
def fail_lcmocc(self, lcmocc_id):
|
||||
path = f"/vnflcm/v2/vnf_lcm_op_occs/{lcmocc_id}/fail"
|
||||
return self.tacker_client.do_request(
|
||||
path, "POST", version="2.0.0")
|
||||
|
||||
def show_lcmocc(self, lcmocc_id):
|
||||
path = f"/vnflcm/v2/vnf_lcm_op_occs/{lcmocc_id}"
|
||||
return self.tacker_client.do_request(
|
||||
path, "GET", version="2.0.0")
|
||||
|
||||
def _check_resp_headers(self, resp, supported_headers):
|
||||
unsupported_headers = ['Link', 'Retry-After',
|
||||
'Content-Range', 'WWW-Authenticate']
|
||||
for s in supported_headers:
|
||||
if s not in resp.headers:
|
||||
raise Exception("Supported header doesn't exist: %s" % s)
|
||||
for u in unsupported_headers:
|
||||
if u in resp.headers:
|
||||
raise Exception("Unsupported header exist: %s" % u)
|
||||
|
||||
def check_resp_headers_in_create(self, resp):
|
||||
# includes location header and response body
|
||||
supported_headers = ['Version', 'Location', 'Content-Type',
|
||||
'Accept-Ranges']
|
||||
self._check_resp_headers(resp, supported_headers)
|
||||
|
||||
def check_resp_body(self, body, expected_attrs):
|
||||
for attr in expected_attrs:
|
||||
if attr not in body:
|
||||
raise Exception("Expected attribute doesn't exist: %s" % attr)
|
||||
|
||||
def check_resp_headers_in_operation_task(self, resp):
|
||||
# includes location header and no response body
|
||||
supported_headers = ['Version', 'Location']
|
||||
self._check_resp_headers(resp, supported_headers)
|
||||
|
||||
def check_resp_headers_in_get(self, resp):
|
||||
# includes response body and no location header
|
||||
supported_headers = ['Version', 'Content-Type',
|
||||
'Accept-Ranges']
|
||||
self._check_resp_headers(resp, supported_headers)
|
||||
|
||||
def check_resp_headers_in_delete(self, resp):
|
||||
# no location header and response body
|
||||
supported_headers = ['Version']
|
||||
self._check_resp_headers(resp, supported_headers)
|
||||
|
||||
def wait_lcmocc_complete(self, lcmocc_id):
|
||||
# NOTE: It is not necessary to set timeout because the operation
|
||||
# itself set timeout and the state will become 'FAILED_TEMP'.
|
||||
path = f"/vnflcm/v2/vnf_lcm_op_occs/{lcmocc_id}"
|
||||
while True:
|
||||
time.sleep(5)
|
||||
_, body = self.tacker_client.do_request(
|
||||
path, "GET", expected_status=[200], version="2.0.0")
|
||||
state = body['operationState']
|
||||
if state == 'COMPLETED':
|
||||
return
|
||||
elif state in ['STARTING', 'PROCESSING']:
|
||||
continue
|
||||
else: # FAILED_TEMP or ROLLED_BACK
|
||||
raise Exception("Operation failed. state: %s" % state)
|
||||
|
||||
def wait_lcmocc_failed_temp(self, lcmocc_id):
|
||||
# NOTE: It is not necessary to set timeout because the operation
|
||||
# itself set timeout and the state will become 'FAILED_TEMP'.
|
||||
path = f"/vnflcm/v2/vnf_lcm_op_occs/{lcmocc_id}"
|
||||
while True:
|
||||
time.sleep(5)
|
||||
_, body = self.tacker_client.do_request(
|
||||
path, "GET", expected_status=[200], version="2.0.0")
|
||||
state = body['operationState']
|
||||
if state == 'FAILED_TEMP':
|
||||
return
|
||||
elif state in ['STARTING', 'PROCESSING']:
|
||||
continue
|
||||
elif state == 'COMPLETED':
|
||||
raise Exception("Operation unexpected COMPLETED.")
|
||||
else: # ROLLED_BACK
|
||||
raise Exception("Operation failed. state: %s" % state)
|
||||
|
||||
def wait_lcmocc_rolled_back(self, lcmocc_id):
|
||||
# NOTE: It is not necessary to set timeout because the operation
|
||||
# itself set timeout and the state will become 'FAILED_TEMP'.
|
||||
path = f"/vnflcm/v2/vnf_lcm_op_occs/{lcmocc_id}"
|
||||
while True:
|
||||
time.sleep(5)
|
||||
_, body = self.tacker_client.do_request(
|
||||
path, "GET", expected_status=[200], version="2.0.0")
|
||||
state = body['operationState']
|
||||
if state == 'ROLLED_BACK':
|
||||
return
|
||||
if state in ['ROLLING_BACK']:
|
||||
continue
|
||||
|
||||
raise Exception(f"Operation failed. state: {state}")
|
286
tacker/tests/functional/sol_kubernetes_v2/paramgen.py
Normal file
286
tacker/tests/functional/sol_kubernetes_v2/paramgen.py
Normal file
@ -0,0 +1,286 @@
|
||||
# Copyright (C) 2022 Fujitsu
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
|
||||
def test_instantiate_cnf_resources_create(vnfd_id):
|
||||
# All attributes are set.
|
||||
# NOTE: All of the following cardinality attributes are set.
|
||||
# In addition, 0..N or 1..N attributes are set to 2 or more.
|
||||
# - 0..1 (1)
|
||||
# - 0..N (2 or more)
|
||||
# - 1
|
||||
# - 1..N (2 or more)
|
||||
return {
|
||||
"vnfdId": vnfd_id,
|
||||
"vnfInstanceName": "test_instantiate_cnf_resources",
|
||||
"vnfInstanceDescription": "test_instantiate_cnf_resources",
|
||||
"metadata": {"dummy-key": "dummy-val"}
|
||||
}
|
||||
|
||||
|
||||
def test_instantiate_cnf_resources_terminate():
|
||||
# All attributes are set.
|
||||
# NOTE: All of the following cardinality attributes are set.
|
||||
# In addition, 0..N or 1..N attributes are set to 2 or more.
|
||||
# - 0..1 (1)
|
||||
# - 0..N (2 or more)
|
||||
# - 1
|
||||
# - 1..N (2 or more)
|
||||
return {
|
||||
"terminationType": "GRACEFUL",
|
||||
"gracefulTerminationTimeout": 5,
|
||||
"additionalParams": {"dummy-key": "dummy-val"}
|
||||
}
|
||||
|
||||
|
||||
def max_sample_instantiate(auth_url, bearer_token):
|
||||
# All attributes are set.
|
||||
# NOTE: All of the following cardinality attributes are set.
|
||||
# In addition, 0..N or 1..N attributes are set to 2 or more.
|
||||
# - 0..1 (1)
|
||||
# - 0..N (2 or more)
|
||||
# - 1
|
||||
# - 1..N (2 or more)
|
||||
vim_id_1 = uuidutils.generate_uuid()
|
||||
vim_id_2 = uuidutils.generate_uuid()
|
||||
vim_1 = {
|
||||
"vimId": vim_id_1,
|
||||
"vimType": "kubernetes",
|
||||
"interfaceInfo": {"endpoint": auth_url},
|
||||
"accessInfo": {
|
||||
"bearer_token": bearer_token,
|
||||
"region": "RegionOne",
|
||||
},
|
||||
"extra": {"dummy-key": "dummy-val"}
|
||||
}
|
||||
vim_2 = {
|
||||
"vimId": vim_id_2,
|
||||
"vimType": "kubernetes",
|
||||
"interfaceInfo": {"endpoint": auth_url},
|
||||
"accessInfo": {
|
||||
"username": "dummy_user",
|
||||
"region": "RegionOne",
|
||||
"password": "dummy_password",
|
||||
},
|
||||
"extra": {"dummy-key": "dummy-val"}
|
||||
}
|
||||
return {
|
||||
"flavourId": "simple",
|
||||
"vimConnectionInfo": {
|
||||
"vim1": vim_1,
|
||||
"vim2": vim_2
|
||||
},
|
||||
"additionalParams": {
|
||||
"lcm-kubernetes-def-files": [
|
||||
# "Files/kubernetes/bindings.yaml",
|
||||
"Files/kubernetes/clusterrole_clusterrolebinding_SA.yaml",
|
||||
"Files/kubernetes/config-map.yaml",
|
||||
"Files/kubernetes/controller-revision.yaml",
|
||||
"Files/kubernetes/daemon-set.yaml",
|
||||
"Files/kubernetes/deployment.yaml",
|
||||
"Files/kubernetes/horizontal-pod-autoscaler.yaml",
|
||||
"Files/kubernetes/job.yaml",
|
||||
"Files/kubernetes/limit-range.yaml",
|
||||
"Files/kubernetes/local-subject-access-review.yaml",
|
||||
"Files/kubernetes/multiple_yaml_lease.yaml",
|
||||
"Files/kubernetes/multiple_yaml_network-policy.yaml",
|
||||
"Files/kubernetes/multiple_yaml_priority-class.yaml",
|
||||
"Files/kubernetes/namespace.yaml",
|
||||
"Files/kubernetes/persistent-volume-0.yaml",
|
||||
"Files/kubernetes/persistent-volume-1.yaml",
|
||||
"Files/kubernetes/pod.yaml",
|
||||
"Files/kubernetes/pod-template.yaml",
|
||||
"Files/kubernetes/replicaset_service_secret.yaml",
|
||||
"Files/kubernetes/resource-quota.yaml",
|
||||
"Files/kubernetes/role_rolebinding_SA.yaml",
|
||||
"Files/kubernetes/self-subject-access-review_and"
|
||||
"_self-subject-rule-review.yaml",
|
||||
"Files/kubernetes/statefulset.yaml",
|
||||
"Files/kubernetes/storage-class.yaml",
|
||||
"Files/kubernetes/storage-class_pv_pvc.yaml",
|
||||
"Files/kubernetes/subject-access-review.yaml",
|
||||
"Files/kubernetes/token-review.yaml"
|
||||
],
|
||||
"namespace": "default"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def max_sample_terminate():
|
||||
# All attributes are set.
|
||||
# NOTE: All of the following cardinality attributes are set.
|
||||
# In addition, 0..N or 1..N attributes are set to 2 or more.
|
||||
# - 0..1 (1)
|
||||
# - 0..N (2 or more)
|
||||
# - 1
|
||||
# - 1..N (2 or more)
|
||||
return {
|
||||
"terminationType": "GRACEFUL",
|
||||
"gracefulTerminationTimeout": 5,
|
||||
"additionalParams": {"dummy-key": "dummy-val"}
|
||||
}
|
||||
|
||||
|
||||
def min_sample_instantiate(vim_id_1):
|
||||
vim_1 = {
|
||||
"vimId": vim_id_1,
|
||||
"vimType": "kubernetes",
|
||||
}
|
||||
return {
|
||||
"flavourId": "simple",
|
||||
"vimConnectionInfo": {
|
||||
"vim1": vim_1,
|
||||
},
|
||||
"additionalParams": {
|
||||
"lcm-kubernetes-def-files": [
|
||||
"Files/kubernetes/pod.yaml"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def min_sample_terminate():
|
||||
# Omit except for required attributes
|
||||
# NOTE: Only the following cardinality attributes are set.
|
||||
# - 1
|
||||
# - 1..N (1)
|
||||
return {
|
||||
"terminationType": "FORCEFUL"
|
||||
}
|
||||
|
||||
|
||||
def change_vnfpkg_instantiate(auth_url, bearer_token):
|
||||
# All attributes are set.
|
||||
# NOTE: All of the following cardinality attributes are set.
|
||||
# In addition, 0..N or 1..N attributes are set to 2 or more.
|
||||
# - 0..1 (1)
|
||||
# - 0..N (2 or more)
|
||||
# - 1
|
||||
# - 1..N (2 or more)
|
||||
vim_id_1 = uuidutils.generate_uuid()
|
||||
vim_1 = {
|
||||
"vimId": vim_id_1,
|
||||
"vimType": "kubernetes",
|
||||
"interfaceInfo": {"endpoint": auth_url},
|
||||
"accessInfo": {
|
||||
"bearer_token": bearer_token,
|
||||
"region": "RegionOne",
|
||||
},
|
||||
"extra": {"dummy-key": "dummy-val"}
|
||||
}
|
||||
|
||||
return {
|
||||
"flavourId": "simple",
|
||||
"vimConnectionInfo": {
|
||||
"vim1": vim_1
|
||||
},
|
||||
"additionalParams": {
|
||||
"lcm-kubernetes-def-files": [
|
||||
"Files/kubernetes/deployment.yaml",
|
||||
"Files/kubernetes/namespace.yaml"
|
||||
],
|
||||
"namespace": "curry"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def change_vnfpkg_all_params(vnfd_id):
|
||||
return {
|
||||
"vnfdId": vnfd_id,
|
||||
"additionalParams": {
|
||||
"upgrade_type": "RollingUpdate",
|
||||
"lcm-operation-coordinate-old-vnf":
|
||||
"Scripts/coordinate_old_vnf.py",
|
||||
"lcm-operation-coordinate-old-vnf-class": "CoordinateOldVnf",
|
||||
"lcm-operation-coordinate-new-vnf":
|
||||
"Scripts/coordinate_new_vnf.py",
|
||||
"lcm-operation-coordinate-new-vnf-class": "CoordinateNewVnf",
|
||||
"lcm-kubernetes-def-files": [
|
||||
"Files/new_kubernetes/new_deployment.yaml"],
|
||||
"vdu_params": [{
|
||||
"vdu_id": "VDU2"
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def change_vnfpkg_instantiate_min(vim_id_1):
|
||||
vim_1 = {
|
||||
"vimId": vim_id_1,
|
||||
"vimType": "kubernetes",
|
||||
}
|
||||
return {
|
||||
"flavourId": "simple",
|
||||
"vimConnectionInfo": {
|
||||
"vim1": vim_1,
|
||||
},
|
||||
"additionalParams": {
|
||||
"lcm-kubernetes-def-files": [
|
||||
"Files/kubernetes/deployment.yaml"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def change_vnfpkg_min(vnfd_id):
|
||||
return {
|
||||
"vnfdId": vnfd_id,
|
||||
"additionalParams": {
|
||||
"upgrade_type": "RollingUpdate",
|
||||
"lcm-operation-coordinate-old-vnf":
|
||||
"Scripts/coordinate_old_vnf.py",
|
||||
"lcm-operation-coordinate-old-vnf-class": "CoordinateOldVnf",
|
||||
"lcm-operation-coordinate-new-vnf":
|
||||
"Scripts/coordinate_new_vnf.py",
|
||||
"lcm-operation-coordinate-new-vnf-class": "CoordinateNewVnf"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def change_vnfpkg_instantiate_error_handing(vim_id_1):
|
||||
vim_1 = {
|
||||
"vimId": vim_id_1,
|
||||
"vimType": "kubernetes",
|
||||
}
|
||||
return {
|
||||
"flavourId": "simple",
|
||||
"vimConnectionInfo": {
|
||||
"vim1": vim_1,
|
||||
},
|
||||
"additionalParams": {
|
||||
"lcm-kubernetes-def-files": [
|
||||
"Files/kubernetes/deployment_fail_test.yaml"
|
||||
]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def change_vnfpkg_error(vnfd_id):
|
||||
return {
|
||||
"vnfdId": vnfd_id,
|
||||
"additionalParams": {
|
||||
"upgrade_type": "RollingUpdate",
|
||||
"lcm-operation-coordinate-old-vnf":
|
||||
"Scripts/coordinate_old_vnf.py",
|
||||
"lcm-operation-coordinate-old-vnf-class": "CoordinateOldVnf",
|
||||
"lcm-operation-coordinate-new-vnf":
|
||||
"Scripts/coordinate_new_vnf.py",
|
||||
"lcm-operation-coordinate-new-vnf-class": "CoordinateNewVnf",
|
||||
"lcm-kubernetes-def-files": [
|
||||
"Files/new_kubernetes/error_deployment.yaml"]
|
||||
}
|
||||
}
|
@ -0,0 +1,202 @@
|
||||
tosca_definitions_version: tosca_simple_yaml_1_2
|
||||
description: ETSI NFV SOL 001 common types definitions version 2.6.1
|
||||
metadata:
|
||||
template_name: etsi_nfv_sol001_common_types
|
||||
template_author: ETSI_NFV
|
||||
template_version: 2.6.1
|
||||
|
||||
data_types:
|
||||
tosca.datatypes.nfv.L2AddressData:
|
||||
derived_from: tosca.datatypes.Root
|
||||
description: Describes the information on the MAC addresses to be assigned to a connection point.
|
||||
properties:
|
||||
mac_address_assignment:
|
||||
type: boolean
|
||||
description: Specifies if the address assignment is the responsibility of management and orchestration function or not. If it is set to True, it is the management and orchestration function responsibility
|
||||
required: true
|
||||
|
||||
tosca.datatypes.nfv.L3AddressData:
|
||||
derived_from: tosca.datatypes.Root
|
||||
description: Provides information about Layer 3 level addressing scheme and parameters applicable to a CP
|
||||
properties:
|
||||
ip_address_assignment:
|
||||
type: boolean
|
||||
description: Specifies if the address assignment is the responsibility of management and orchestration function or not. If it is set to True, it is the management and orchestration function responsibility
|
||||
required: true
|
||||
floating_ip_activated:
|
||||
type: boolean
|
||||
description: Specifies if the floating IP scheme is activated on the Connection Point or not
|
||||
required: true
|
||||
ip_address_type:
|
||||
type: string
|
||||
description: Defines address type. The address type should be aligned with the address type supported by the layer_protocols properties of the parent VnfExtCp
|
||||
required: false
|
||||
constraints:
|
||||
- valid_values: [ ipv4, ipv6 ]
|
||||
number_of_ip_address:
|
||||
type: integer
|
||||
description: Minimum number of IP addresses to be assigned
|
||||
required: false
|
||||
constraints:
|
||||
- greater_than: 0
|
||||
|
||||
tosca.datatypes.nfv.AddressData:
|
||||
derived_from: tosca.datatypes.Root
|
||||
description: Describes information about the addressing scheme and parameters applicable to a CP
|
||||
properties:
|
||||
address_type:
|
||||
type: string
|
||||
description: Describes the type of the address to be assigned to a connection point. The content type shall be aligned with the address type supported by the layerProtocol property of the connection point
|
||||
required: true
|
||||
constraints:
|
||||
- valid_values: [ mac_address, ip_address ]
|
||||
l2_address_data:
|
||||
type: tosca.datatypes.nfv.L2AddressData
|
||||
description: Provides the information on the MAC addresses to be assigned to a connection point.
|
||||
required: false
|
||||
l3_address_data:
|
||||
type: tosca.datatypes.nfv.L3AddressData
|
||||
description: Provides the information on the IP addresses to be assigned to a connection point
|
||||
required: false
|
||||
|
||||
tosca.datatypes.nfv.ConnectivityType:
|
||||
derived_from: tosca.datatypes.Root
|
||||
description: describes additional connectivity information of a virtualLink
|
||||
properties:
|
||||
layer_protocols:
|
||||
type: list
|
||||
description: Identifies the protocol a virtualLink gives access to (ethernet, mpls, odu2, ipv4, ipv6, pseudo-wire).The top layer protocol of the virtualLink protocol stack shall always be provided. The lower layer protocols may be included when there are specific requirements on these layers.
|
||||
required: true
|
||||
entry_schema:
|
||||
type: string
|
||||
constraints:
|
||||
- valid_values: [ ethernet, mpls, odu2, ipv4, ipv6, pseudo-wire ]
|
||||
flow_pattern:
|
||||
type: string
|
||||
description: Identifies the flow pattern of the connectivity
|
||||
required: false
|
||||
constraints:
|
||||
- valid_values: [ line, tree, mesh ]
|
||||
|
||||
tosca.datatypes.nfv.LinkBitrateRequirements:
|
||||
derived_from: tosca.datatypes.Root
|
||||
description: describes the requirements in terms of bitrate for a virtual link
|
||||
properties:
|
||||
root:
|
||||
type: integer # in bits per second
|
||||
description: Specifies the throughput requirement in bits per second of the link (e.g. bitrate of E-Line, root bitrate of E-Tree, aggregate capacity of E-LAN).
|
||||
required: true
|
||||
constraints:
|
||||
- greater_or_equal: 0
|
||||
leaf:
|
||||
type: integer # in bits per second
|
||||
description: Specifies the throughput requirement in bits per second of leaf connections to the link when applicable to the connectivity type (e.g. for E-Tree and E LAN branches).
|
||||
required: false
|
||||
constraints:
|
||||
- greater_or_equal: 0
|
||||
|
||||
tosca.datatypes.nfv.CpProtocolData:
|
||||
derived_from: tosca.datatypes.Root
|
||||
description: Describes and associates the protocol layer that a CP uses together with other protocol and connection point information
|
||||
properties:
|
||||
associated_layer_protocol:
|
||||
type: string
|
||||
required: true
|
||||
description: One of the values of the property layer_protocols of the CP
|
||||
constraints:
|
||||
- valid_values: [ ethernet, mpls, odu2, ipv4, ipv6, pseudo-wire ]
|
||||
address_data:
|
||||
type: list
|
||||
description: Provides information on the addresses to be assigned to the CP
|
||||
entry_schema:
|
||||
type: tosca.datatypes.nfv.AddressData
|
||||
required: false
|
||||
|
||||
tosca.datatypes.nfv.VnfProfile:
|
||||
derived_from: tosca.datatypes.Root
|
||||
description: describes a profile for instantiating VNFs of a particular NS DF according to a specific VNFD and VNF DF.
|
||||
properties:
|
||||
instantiation_level:
|
||||
type: string
|
||||
description: Identifier of the instantiation level of the VNF DF to be used for instantiation. If not present, the default instantiation level as declared in the VNFD shall be used.
|
||||
required: false
|
||||
min_number_of_instances:
|
||||
type: integer
|
||||
description: Minimum number of instances of the VNF based on this VNFD that is permitted to exist for this VnfProfile.
|
||||
required: true
|
||||
constraints:
|
||||
- greater_or_equal: 0
|
||||
max_number_of_instances:
|
||||
type: integer
|
||||
description: Maximum number of instances of the VNF based on this VNFD that is permitted to exist for this VnfProfile.
|
||||
required: true
|
||||
constraints:
|
||||
- greater_or_equal: 0
|
||||
|
||||
tosca.datatypes.nfv.Qos:
|
||||
derived_from: tosca.datatypes.Root
|
||||
description: describes QoS data for a given VL used in a VNF deployment flavour
|
||||
properties:
|
||||
latency:
|
||||
type: scalar-unit.time #Number
|
||||
description: Specifies the maximum latency
|
||||
required: true
|
||||
constraints:
|
||||
- greater_than: 0 s
|
||||
packet_delay_variation:
|
||||
type: scalar-unit.time #Number
|
||||
description: Specifies the maximum jitter
|
||||
required: true
|
||||
constraints:
|
||||
- greater_or_equal: 0 s
|
||||
packet_loss_ratio:
|
||||
type: float
|
||||
description: Specifies the maximum packet loss ratio
|
||||
required: false
|
||||
constraints:
|
||||
- in_range: [ 0.0, 1.0 ]
|
||||
|
||||
capability_types:
|
||||
tosca.capabilities.nfv.VirtualLinkable:
|
||||
derived_from: tosca.capabilities.Node
|
||||
description: A node type that includes the VirtualLinkable capability indicates that it can be pointed by tosca.relationships.nfv.VirtualLinksTo relationship type
|
||||
|
||||
relationship_types:
|
||||
tosca.relationships.nfv.VirtualLinksTo:
|
||||
derived_from: tosca.relationships.DependsOn
|
||||
description: Represents an association relationship between the VduCp and VnfVirtualLink node types
|
||||
valid_target_types: [ tosca.capabilities.nfv.VirtualLinkable ]
|
||||
|
||||
node_types:
|
||||
tosca.nodes.nfv.Cp:
|
||||
derived_from: tosca.nodes.Root
|
||||
description: Provides information regarding the purpose of the connection point
|
||||
properties:
|
||||
layer_protocols:
|
||||
type: list
|
||||
description: Identifies which protocol the connection point uses for connectivity purposes
|
||||
required: true
|
||||
entry_schema:
|
||||
type: string
|
||||
constraints:
|
||||
- valid_values: [ ethernet, mpls, odu2, ipv4, ipv6, pseudo-wire ]
|
||||
role: #Name in ETSI NFV IFA011 v0.7.3: cpRole
|
||||
type: string
|
||||
description: Identifies the role of the port in the context of the traffic flow patterns in the VNF or parent NS
|
||||
required: false
|
||||
constraints:
|
||||
- valid_values: [ root, leaf ]
|
||||
description:
|
||||
type: string
|
||||
description: Provides human-readable information on the purpose of the connection point
|
||||
required: false
|
||||
protocol:
|
||||
type: list
|
||||
description: Provides information on the addresses to be assigned to the connection point(s) instantiated from this Connection Point Descriptor
|
||||
required: false
|
||||
entry_schema:
|
||||
type: tosca.datatypes.nfv.CpProtocolData
|
||||
trunk_mode:
|
||||
type: boolean
|
||||
description: Provides information about whether the CP instantiated from this Cp is in Trunk mode (802.1Q or other), When operating in "trunk mode", the Cp is capable of carrying traffic for several VLANs. Absence of this property implies that trunkMode is not configured for the Cp i.e. It is equivalent to boolean value "false".
|
||||
required: false
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,169 @@
|
||||
tosca_definitions_version: tosca_simple_yaml_1_2
|
||||
|
||||
description: Simple deployment flavour for Sample VNF
|
||||
|
||||
imports:
|
||||
- etsi_nfv_sol001_common_types.yaml
|
||||
- etsi_nfv_sol001_vnfd_types.yaml
|
||||
- sample_cnf_types.yaml
|
||||
|
||||
topology_template:
|
||||
inputs:
|
||||
descriptor_id:
|
||||
type: string
|
||||
descriptor_version:
|
||||
type: string
|
||||
provider:
|
||||
type: string
|
||||
product_name:
|
||||
type: string
|
||||
software_version:
|
||||
type: string
|
||||
vnfm_info:
|
||||
type: list
|
||||
entry_schema:
|
||||
type: string
|
||||
flavour_id:
|
||||
type: string
|
||||
flavour_description:
|
||||
type: string
|
||||
|
||||
substitution_mappings:
|
||||
node_type: company.provider.VNF
|
||||
properties:
|
||||
flavour_id: simple
|
||||
requirements:
|
||||
virtual_link_external: []
|
||||
|
||||
node_templates:
|
||||
VNF:
|
||||
type: company.provider.VNF
|
||||
properties:
|
||||
flavour_description: A simple flavour
|
||||
|
||||
VDU1:
|
||||
type: tosca.nodes.nfv.Vdu.Compute
|
||||
properties:
|
||||
name: vdu1
|
||||
description: VDU1 compute node
|
||||
vdu_profile:
|
||||
min_number_of_instances: 1
|
||||
max_number_of_instances: 1
|
||||
|
||||
VDU2:
|
||||
type: tosca.nodes.nfv.Vdu.Compute
|
||||
properties:
|
||||
name: vdu2
|
||||
description: VDU2 compute node
|
||||
vdu_profile:
|
||||
min_number_of_instances: 1
|
||||
max_number_of_instances: 3
|
||||
|
||||
VDU3:
|
||||
type: tosca.nodes.nfv.Vdu.Compute
|
||||
properties:
|
||||
name: vdu3
|
||||
description: VDU2 compute node
|
||||
vdu_profile:
|
||||
min_number_of_instances: 1
|
||||
max_number_of_instances: 3
|
||||
|
||||
policies:
|
||||
- scaling_aspects:
|
||||
type: tosca.policies.nfv.ScalingAspects
|
||||
properties:
|
||||
aspects:
|
||||
vdu2_aspect:
|
||||
name: vdu2_aspect
|
||||
description: vdu2 scaling aspect
|
||||
max_scale_level: 2
|
||||
step_deltas:
|
||||
- delta_1
|
||||
vdu3_aspect:
|
||||
name: vdu3_aspect
|
||||
description: vdu3 scaling aspect
|
||||
max_scale_level: 2
|
||||
step_deltas:
|
||||
- delta_1
|
||||
|
||||
- VDU2_initial_delta:
|
||||
type: tosca.policies.nfv.VduInitialDelta
|
||||
properties:
|
||||
initial_delta:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU2 ]
|
||||
|
||||
- VDU2_scaling_aspect_deltas:
|
||||
type: tosca.policies.nfv.VduScalingAspectDeltas
|
||||
properties:
|
||||
aspect: vdu2_aspect
|
||||
deltas:
|
||||
delta_1:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU2 ]
|
||||
|
||||
- VDU3_initial_delta:
|
||||
type: tosca.policies.nfv.VduInitialDelta
|
||||
properties:
|
||||
initial_delta:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU3 ]
|
||||
|
||||
- VDU3_scaling_aspect_deltas:
|
||||
type: tosca.policies.nfv.VduScalingAspectDeltas
|
||||
properties:
|
||||
aspect: vdu3_aspect
|
||||
deltas:
|
||||
delta_1:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU3 ]
|
||||
|
||||
- instantiation_levels:
|
||||
type: tosca.policies.nfv.InstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
description: Smallest size
|
||||
scale_info:
|
||||
vdu2_aspect:
|
||||
scale_level: 0
|
||||
vdu3_aspect:
|
||||
scale_level: 0
|
||||
instantiation_level_2:
|
||||
description: Largest size
|
||||
scale_info:
|
||||
vdu2_aspect:
|
||||
scale_level: 2
|
||||
vdu3_aspect:
|
||||
scale_level: 2
|
||||
default_level: instantiation_level_1
|
||||
|
||||
- VDU1_instantiation_levels:
|
||||
type: tosca.policies.nfv.VduInstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
number_of_instances: 1
|
||||
instantiation_level_2:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU1 ]
|
||||
|
||||
- VDU2_instantiation_levels:
|
||||
type: tosca.policies.nfv.VduInstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
number_of_instances: 1
|
||||
instantiation_level_2:
|
||||
number_of_instances: 3
|
||||
targets: [ VDU2 ]
|
||||
|
||||
- VDU3_instantiation_levels:
|
||||
type: tosca.policies.nfv.VduInstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
number_of_instances: 1
|
||||
instantiation_level_2:
|
||||
number_of_instances: 3
|
||||
targets: [ VDU3 ]
|
@ -0,0 +1,31 @@
|
||||
tosca_definitions_version: tosca_simple_yaml_1_2
|
||||
|
||||
description: Sample VNF
|
||||
|
||||
imports:
|
||||
- etsi_nfv_sol001_common_types.yaml
|
||||
- etsi_nfv_sol001_vnfd_types.yaml
|
||||
- sample_cnf_types.yaml
|
||||
- sample_cnf_df_simple.yaml
|
||||
|
||||
topology_template:
|
||||
inputs:
|
||||
selected_flavour:
|
||||
type: string
|
||||
description: VNF deployment flavour selected by the consumer. It is provided in the API
|
||||
|
||||
node_templates:
|
||||
VNF:
|
||||
type: company.provider.VNF
|
||||
properties:
|
||||
flavour_id: { get_input: selected_flavour }
|
||||
descriptor_id: b1bb0ce7-ebca-4fa7-95ed-4840d7000000
|
||||
provider: Company
|
||||
product_name: Sample VNF
|
||||
software_version: '1.0'
|
||||
descriptor_version: '1.0'
|
||||
vnfm_info:
|
||||
- Tacker
|
||||
requirements:
|
||||
#- virtual_link_external # mapped in lower-level templates
|
||||
#- virtual_link_internal # mapped in lower-level templates
|
@ -0,0 +1,53 @@
|
||||
tosca_definitions_version: tosca_simple_yaml_1_2
|
||||
|
||||
description: VNF type definition
|
||||
|
||||
imports:
|
||||
- etsi_nfv_sol001_common_types.yaml
|
||||
- etsi_nfv_sol001_vnfd_types.yaml
|
||||
|
||||
node_types:
|
||||
company.provider.VNF:
|
||||
derived_from: tosca.nodes.nfv.VNF
|
||||
properties:
|
||||
descriptor_id:
|
||||
type: string
|
||||
constraints: [ valid_values: [ b1bb0ce7-ebca-4fa7-95ed-4840d7000000 ] ]
|
||||
default: b1bb0ce7-ebca-4fa7-95ed-4840d7000000
|
||||
descriptor_version:
|
||||
type: string
|
||||
constraints: [ valid_values: [ '1.0' ] ]
|
||||
default: '1.0'
|
||||
provider:
|
||||
type: string
|
||||
constraints: [ valid_values: [ 'Company' ] ]
|
||||
default: 'Company'
|
||||
product_name:
|
||||
type: string
|
||||
constraints: [ valid_values: [ 'Sample VNF' ] ]
|
||||
default: 'Sample VNF'
|
||||
software_version:
|
||||
type: string
|
||||
constraints: [ valid_values: [ '1.0' ] ]
|
||||
default: '1.0'
|
||||
vnfm_info:
|
||||
type: list
|
||||
entry_schema:
|
||||
type: string
|
||||
constraints: [ valid_values: [ Tacker ] ]
|
||||
default: [ Tacker ]
|
||||
flavour_id:
|
||||
type: string
|
||||
constraints: [ valid_values: [ simple,complex ] ]
|
||||
default: simple
|
||||
flavour_description:
|
||||
type: string
|
||||
default: ""
|
||||
requirements:
|
||||
- virtual_link_external:
|
||||
capability: tosca.capabilities.nfv.VirtualLinkable
|
||||
- virtual_link_internal:
|
||||
capability: tosca.capabilities.nfv.VirtualLinkable
|
||||
interfaces:
|
||||
Vnflcm:
|
||||
type: tosca.interfaces.nfv.Vnflcm
|
@ -0,0 +1,24 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: vdu2
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webserver
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webserver
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
strategy:
|
||||
type: RollingUpdate
|
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: curry
|
@ -0,0 +1,28 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: vdu3
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webserver
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webserver
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: error-image
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: nginx-app-original
|
||||
strategy:
|
||||
type: RollingUpdate
|
@ -0,0 +1,24 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: vdu2
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webserver
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webserver
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
strategy:
|
||||
type: RollingUpdate
|
@ -0,0 +1,66 @@
|
||||
# Copyright (C) 2022 Fujitsu
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import os
|
||||
import pickle
|
||||
import sys
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CMD_TIMEOUT = 30
|
||||
SERVER_WAIT_COMPLETE_TIME = 60
|
||||
SSH_CONNECT_RETRY_COUNT = 4
|
||||
|
||||
|
||||
class SampleNewCoordinateVNFScript(object):
|
||||
|
||||
def __init__(self, req, inst, grant_req, grant, csar_dir, k8s_info):
|
||||
self.req = req
|
||||
self.inst = inst
|
||||
self.grant_req = grant_req
|
||||
self.grant = grant
|
||||
self.csar_dir = csar_dir
|
||||
self.k8s_info = k8s_info
|
||||
|
||||
def coordinate_vnf(self):
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
operation = "coordinate_vnf"
|
||||
script_dict = pickle.load(sys.stdin.buffer)
|
||||
req = script_dict['request']
|
||||
inst = script_dict['vnf_instance']
|
||||
grant_req = script_dict['grant_request']
|
||||
grant = script_dict['grant_response']
|
||||
csar_dir = script_dict['tmp_csar_dir']
|
||||
k8s_info = script_dict['k8s_info']
|
||||
script = SampleNewCoordinateVNFScript(
|
||||
req, inst, grant_req, grant,
|
||||
csar_dir, k8s_info)
|
||||
try:
|
||||
getattr(script, operation)()
|
||||
except Exception:
|
||||
raise Exception
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
os._exit(0)
|
||||
except Exception as ex:
|
||||
sys.stderr.write(str(ex))
|
||||
sys.stderr.flush()
|
||||
os._exit(1)
|
@ -0,0 +1,63 @@
|
||||
# Copyright (C) 2022 Fujitsu
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import os
|
||||
import pickle
|
||||
import sys
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SampleOldCoordinateVNFScript(object):
|
||||
|
||||
def __init__(self, req, inst, grant_req, grant, csar_dir, k8s_info):
|
||||
self.req = req
|
||||
self.inst = inst
|
||||
self.grant_req = grant_req
|
||||
self.grant = grant
|
||||
self.csar_dir = csar_dir
|
||||
self.k8s_info = k8s_info
|
||||
|
||||
def coordinate_vnf(self):
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
operation = "coordinate_vnf"
|
||||
script_dict = pickle.load(sys.stdin.buffer)
|
||||
req = script_dict['request']
|
||||
inst = script_dict['vnf_instance']
|
||||
grant_req = script_dict['grant_request']
|
||||
grant = script_dict['grant_response']
|
||||
csar_dir = script_dict['tmp_csar_dir']
|
||||
k8s_info = script_dict['k8s_info']
|
||||
script = SampleOldCoordinateVNFScript(
|
||||
req, inst, grant_req, grant,
|
||||
csar_dir, k8s_info)
|
||||
try:
|
||||
getattr(script, operation)()
|
||||
except Exception:
|
||||
raise Exception
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
os._exit(0)
|
||||
except Exception as ex:
|
||||
sys.stderr.write(str(ex))
|
||||
sys.stderr.flush()
|
||||
os._exit(1)
|
@ -0,0 +1,25 @@
|
||||
TOSCA-Meta-File-Version: 1.0
|
||||
Created-by: dummy_user
|
||||
CSAR-Version: 1.1
|
||||
Entry-Definitions: Definitions/sample_cnf_top.vnfd.yaml
|
||||
|
||||
Name: Files/kubernetes/deployment.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: 30071afb22afcb0e54e03df3d22f0852994b4120ca85ac72e9c207c97a4755a8
|
||||
|
||||
Name: Files/new_kubernetes/error_deployment.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: 1386a46e16e1c07aef97d9c1bb6ca7a6f2af99b314ecac42094634a59577a060
|
||||
|
||||
Name: Files/new_kubernetes/new_deployment.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: 30071afb22afcb0e54e03df3d22f0852994b4120ca85ac72e9c207c97a4755a8
|
||||
|
||||
Name: Files/kubernetes/namespace.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: c2af464e4b1646da9d2e6ccfdc44cf744753459a001c3469135d04dbb56bb293
|
||||
|
@ -0,0 +1,47 @@
|
||||
# Copyright (C) 2022 Fujitsu
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from tacker.tests.functional.sol_kubernetes_v2 import paramgen
|
||||
from tacker.tests.functional.sol_v2 import utils
|
||||
|
||||
|
||||
zip_file_name = os.path.basename(os.path.abspath(".")) + '.zip'
|
||||
tmp_dir = tempfile.mkdtemp()
|
||||
vnfd_id = uuidutils.generate_uuid()
|
||||
|
||||
# tacker/tests/functional/sol_kubernetes_v2/samples/{package_name}
|
||||
utils.make_zip(".", tmp_dir, vnfd_id)
|
||||
|
||||
shutil.move(os.path.join(tmp_dir, zip_file_name), ".")
|
||||
shutil.rmtree(tmp_dir)
|
||||
|
||||
# if you change_vnfpkg with all parameters
|
||||
change_vnfpkg_all_params = paramgen.change_vnfpkg_all_params(vnfd_id)
|
||||
|
||||
# if you change_vnfpkg with no operational parameters
|
||||
change_vnfpkg_min = paramgen.change_vnfpkg_min(vnfd_id)
|
||||
|
||||
with open("change_vnfpkg_all_params", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(change_vnfpkg_all_params, indent=2))
|
||||
|
||||
with open("change_vnfpkg_min", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(change_vnfpkg_min, indent=2))
|
@ -0,0 +1,304 @@
|
||||
tosca_definitions_version: tosca_simple_yaml_1_2
|
||||
|
||||
description: Simple deployment flavour for Sample VNF
|
||||
|
||||
imports:
|
||||
- etsi_nfv_sol001_common_types.yaml
|
||||
- etsi_nfv_sol001_vnfd_types.yaml
|
||||
- sample_cnf_types.yaml
|
||||
|
||||
topology_template:
|
||||
inputs:
|
||||
descriptor_id:
|
||||
type: string
|
||||
descriptor_version:
|
||||
type: string
|
||||
provider:
|
||||
type: string
|
||||
product_name:
|
||||
type: string
|
||||
software_version:
|
||||
type: string
|
||||
vnfm_info:
|
||||
type: list
|
||||
entry_schema:
|
||||
type: string
|
||||
flavour_id:
|
||||
type: string
|
||||
flavour_description:
|
||||
type: string
|
||||
|
||||
substitution_mappings:
|
||||
node_type: company.provider.VNF
|
||||
properties:
|
||||
flavour_id: simple
|
||||
requirements:
|
||||
virtual_link_external: []
|
||||
|
||||
node_templates:
|
||||
VNF:
|
||||
type: company.provider.VNF
|
||||
properties:
|
||||
flavour_description: A simple flavour
|
||||
|
||||
VDU1:
|
||||
type: tosca.nodes.nfv.Vdu.Compute
|
||||
properties:
|
||||
name: vdu1
|
||||
description: VDU1 compute node
|
||||
vdu_profile:
|
||||
min_number_of_instances: 1
|
||||
max_number_of_instances: 1
|
||||
|
||||
VDU2:
|
||||
type: tosca.nodes.nfv.Vdu.Compute
|
||||
properties:
|
||||
name: vdu2
|
||||
description: VDU2 compute node
|
||||
vdu_profile:
|
||||
min_number_of_instances: 2
|
||||
max_number_of_instances: 3
|
||||
|
||||
VDU3:
|
||||
type: tosca.nodes.nfv.Vdu.Compute
|
||||
properties:
|
||||
name: vdu3
|
||||
description: VDU3 compute node
|
||||
vdu_profile:
|
||||
min_number_of_instances: 1
|
||||
max_number_of_instances: 3
|
||||
|
||||
VDU4:
|
||||
type: tosca.nodes.nfv.Vdu.Compute
|
||||
properties:
|
||||
name: vdu4
|
||||
description: VDU4 compute node
|
||||
vdu_profile:
|
||||
min_number_of_instances: 1
|
||||
max_number_of_instances: 3
|
||||
|
||||
VDU5:
|
||||
type: tosca.nodes.nfv.Vdu.Compute
|
||||
properties:
|
||||
name: vdu5
|
||||
description: VDU5 compute node
|
||||
vdu_profile:
|
||||
min_number_of_instances: 1
|
||||
max_number_of_instances: 3
|
||||
|
||||
VDU6:
|
||||
type: tosca.nodes.nfv.Vdu.Compute
|
||||
properties:
|
||||
name: vdu6
|
||||
description: VDU6 compute node
|
||||
vdu_profile:
|
||||
min_number_of_instances: 1
|
||||
max_number_of_instances: 3
|
||||
|
||||
policies:
|
||||
- scaling_aspects:
|
||||
type: tosca.policies.nfv.ScalingAspects
|
||||
properties:
|
||||
aspects:
|
||||
vdu2_aspect:
|
||||
name: vdu2_aspect
|
||||
description: vdu2 scaling aspect
|
||||
max_scale_level: 2
|
||||
step_deltas:
|
||||
- delta_1
|
||||
vdu3_aspect:
|
||||
name: vdu3_aspect
|
||||
description: vdu3 scaling aspect
|
||||
max_scale_level: 2
|
||||
step_deltas:
|
||||
- delta_1
|
||||
vdu4_aspect:
|
||||
name: vdu4_aspect
|
||||
description: vdu4 scaling aspect
|
||||
max_scale_level: 2
|
||||
step_deltas:
|
||||
- delta_1
|
||||
vdu5_aspect:
|
||||
name: vdu5_aspect
|
||||
description: vdu5 scaling aspect
|
||||
max_scale_level: 2
|
||||
step_deltas:
|
||||
- delta_1
|
||||
vdu6_aspect:
|
||||
name: vdu6_aspect
|
||||
description: vdu6 scaling aspect
|
||||
max_scale_level: 2
|
||||
step_deltas:
|
||||
- delta_1
|
||||
|
||||
- VDU2_initial_delta:
|
||||
type: tosca.policies.nfv.VduInitialDelta
|
||||
properties:
|
||||
initial_delta:
|
||||
number_of_instances: 2
|
||||
targets: [ VDU2 ]
|
||||
|
||||
- VDU2_scaling_aspect_deltas:
|
||||
type: tosca.policies.nfv.VduScalingAspectDeltas
|
||||
properties:
|
||||
aspect: vdu2_aspect
|
||||
deltas:
|
||||
delta_1:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU2 ]
|
||||
|
||||
- VDU3_initial_delta:
|
||||
type: tosca.policies.nfv.VduInitialDelta
|
||||
properties:
|
||||
initial_delta:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU3 ]
|
||||
|
||||
- VDU3_scaling_aspect_deltas:
|
||||
type: tosca.policies.nfv.VduScalingAspectDeltas
|
||||
properties:
|
||||
aspect: vdu3_aspect
|
||||
deltas:
|
||||
delta_1:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU3 ]
|
||||
|
||||
- VDU4_initial_delta:
|
||||
type: tosca.policies.nfv.VduInitialDelta
|
||||
properties:
|
||||
initial_delta:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU4 ]
|
||||
|
||||
- VDU4_scaling_aspect_deltas:
|
||||
type: tosca.policies.nfv.VduScalingAspectDeltas
|
||||
properties:
|
||||
aspect: vdu4_aspect
|
||||
deltas:
|
||||
delta_1:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU4 ]
|
||||
|
||||
- VDU5_initial_delta:
|
||||
type: tosca.policies.nfv.VduInitialDelta
|
||||
properties:
|
||||
initial_delta:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU5 ]
|
||||
|
||||
- VDU5_scaling_aspect_deltas:
|
||||
type: tosca.policies.nfv.VduScalingAspectDeltas
|
||||
properties:
|
||||
aspect: vdu5_aspect
|
||||
deltas:
|
||||
delta_1:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU5 ]
|
||||
|
||||
- VDU6_initial_delta:
|
||||
type: tosca.policies.nfv.VduInitialDelta
|
||||
properties:
|
||||
initial_delta:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU6 ]
|
||||
|
||||
- VDU6_scaling_aspect_deltas:
|
||||
type: tosca.policies.nfv.VduScalingAspectDeltas
|
||||
properties:
|
||||
aspect: vdu6_aspect
|
||||
deltas:
|
||||
delta_1:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU6 ]
|
||||
|
||||
- instantiation_levels:
|
||||
type: tosca.policies.nfv.InstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
description: Smallest size
|
||||
scale_info:
|
||||
vdu2_aspect:
|
||||
scale_level: 0
|
||||
vdu3_aspect:
|
||||
scale_level: 0
|
||||
vdu4_aspect:
|
||||
scale_level: 2
|
||||
vdu5_aspect:
|
||||
scale_level: 2
|
||||
vdu6_aspect:
|
||||
scale_level: 2
|
||||
instantiation_level_2:
|
||||
description: Largest size
|
||||
scale_info:
|
||||
vdu2_aspect:
|
||||
scale_level: 2
|
||||
vdu3_aspect:
|
||||
scale_level: 2
|
||||
vdu4_aspect:
|
||||
scale_level: 2
|
||||
vdu5_aspect:
|
||||
scale_level: 2
|
||||
vdu6_aspect:
|
||||
scale_level: 2
|
||||
default_level: instantiation_level_1
|
||||
|
||||
- VDU1_instantiation_levels:
|
||||
type: tosca.policies.nfv.VduInstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
number_of_instances: 1
|
||||
instantiation_level_2:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU1 ]
|
||||
|
||||
- VDU2_instantiation_levels:
|
||||
type: tosca.policies.nfv.VduInstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
number_of_instances: 2
|
||||
instantiation_level_2:
|
||||
number_of_instances: 3
|
||||
targets: [ VDU2 ]
|
||||
|
||||
- VDU3_instantiation_levels:
|
||||
type: tosca.policies.nfv.VduInstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
number_of_instances: 1
|
||||
instantiation_level_2:
|
||||
number_of_instances: 3
|
||||
targets: [ VDU3 ]
|
||||
|
||||
- VDU4_instantiation_levels:
|
||||
type: tosca.policies.nfv.VduInstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
number_of_instances: 1
|
||||
instantiation_level_2:
|
||||
number_of_instances: 3
|
||||
targets: [ VDU4 ]
|
||||
|
||||
- VDU5_instantiation_levels:
|
||||
type: tosca.policies.nfv.VduInstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
number_of_instances: 1
|
||||
instantiation_level_2:
|
||||
number_of_instances: 3
|
||||
targets: [ VDU5 ]
|
||||
|
||||
- VDU6_instantiation_levels:
|
||||
type: tosca.policies.nfv.VduInstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
number_of_instances: 1
|
||||
instantiation_level_2:
|
||||
number_of_instances: 3
|
||||
targets: [ VDU6 ]
|
@ -0,0 +1,31 @@
|
||||
tosca_definitions_version: tosca_simple_yaml_1_2
|
||||
|
||||
description: Sample VNF
|
||||
|
||||
imports:
|
||||
- etsi_nfv_sol001_common_types.yaml
|
||||
- etsi_nfv_sol001_vnfd_types.yaml
|
||||
- sample_cnf_types.yaml
|
||||
- sample_cnf_df_simple.yaml
|
||||
|
||||
topology_template:
|
||||
inputs:
|
||||
selected_flavour:
|
||||
type: string
|
||||
description: VNF deployment flavour selected by the consumer. It is provided in the API
|
||||
|
||||
node_templates:
|
||||
VNF:
|
||||
type: company.provider.VNF
|
||||
properties:
|
||||
flavour_id: { get_input: selected_flavour }
|
||||
descriptor_id: b1bb0ce7-ebca-4fa7-95ed-4840d7000000
|
||||
provider: Company
|
||||
product_name: Sample VNF
|
||||
software_version: '1.0'
|
||||
descriptor_version: '1.0'
|
||||
vnfm_info:
|
||||
- Tacker
|
||||
requirements:
|
||||
#- virtual_link_external # mapped in lower-level templates
|
||||
#- virtual_link_internal # mapped in lower-level templates
|
@ -0,0 +1,53 @@
|
||||
tosca_definitions_version: tosca_simple_yaml_1_2
|
||||
|
||||
description: VNF type definition
|
||||
|
||||
imports:
|
||||
- etsi_nfv_sol001_common_types.yaml
|
||||
- etsi_nfv_sol001_vnfd_types.yaml
|
||||
|
||||
node_types:
|
||||
company.provider.VNF:
|
||||
derived_from: tosca.nodes.nfv.VNF
|
||||
properties:
|
||||
descriptor_id:
|
||||
type: string
|
||||
constraints: [ valid_values: [ b1bb0ce7-ebca-4fa7-95ed-4840d7000000 ] ]
|
||||
default: b1bb0ce7-ebca-4fa7-95ed-4840d7000000
|
||||
descriptor_version:
|
||||
type: string
|
||||
constraints: [ valid_values: [ '1.0' ] ]
|
||||
default: '1.0'
|
||||
provider:
|
||||
type: string
|
||||
constraints: [ valid_values: [ 'Company' ] ]
|
||||
default: 'Company'
|
||||
product_name:
|
||||
type: string
|
||||
constraints: [ valid_values: [ 'Sample VNF' ] ]
|
||||
default: 'Sample VNF'
|
||||
software_version:
|
||||
type: string
|
||||
constraints: [ valid_values: [ '1.0' ] ]
|
||||
default: '1.0'
|
||||
vnfm_info:
|
||||
type: list
|
||||
entry_schema:
|
||||
type: string
|
||||
constraints: [ valid_values: [ Tacker ] ]
|
||||
default: [ Tacker ]
|
||||
flavour_id:
|
||||
type: string
|
||||
constraints: [ valid_values: [ simple,complex ] ]
|
||||
default: simple
|
||||
flavour_description:
|
||||
type: string
|
||||
default: ""
|
||||
requirements:
|
||||
- virtual_link_external:
|
||||
capability: tosca.capabilities.nfv.VirtualLinkable
|
||||
- virtual_link_internal:
|
||||
capability: tosca.capabilities.nfv.VirtualLinkable
|
||||
interfaces:
|
||||
Vnflcm:
|
||||
type: tosca.interfaces.nfv.Vnflcm
|
@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Binding
|
||||
metadata:
|
||||
name: vdu1
|
||||
namespace: default
|
||||
target:
|
||||
apiVersion: v1
|
||||
kind: Node
|
||||
namespace: default
|
||||
name: k8-worker2
|
@ -0,0 +1,29 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: curry-cluster-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: curry-cluster-sa
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: curry-cluster-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: curry-cluster-role
|
||||
subjects:
|
||||
- apiGroup: ""
|
||||
kind: ServiceAccount
|
||||
name: curry-cluster-sa
|
||||
namespace: default
|
@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
data:
|
||||
param0: key1
|
||||
param1: key2
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: curry-test001
|
||||
namespace: default
|
@ -0,0 +1,8 @@
|
||||
apiVersion: apps/v1
|
||||
kind: ControllerRevision
|
||||
data:
|
||||
raw: test
|
||||
metadata:
|
||||
name: curry-test001
|
||||
namespace: default
|
||||
revision: 1
|
@ -0,0 +1,24 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: vdu6
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- image: nginx
|
||||
name: nginx
|
||||
resources:
|
||||
limits:
|
||||
memory: "200Mi"
|
||||
requests:
|
||||
memory: "100Mi"
|
||||
ports:
|
||||
- containerPort: 8082
|
@ -0,0 +1,29 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: vdu2
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webserver
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webserver
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
resources:
|
||||
limits:
|
||||
memory: "200Mi"
|
||||
requests:
|
||||
memory: "100Mi"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
strategy:
|
||||
type: RollingUpdate
|
@ -0,0 +1,29 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: vdu3
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webserver
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webserver
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
resources:
|
||||
limits:
|
||||
memory: "200Mi"
|
||||
requests:
|
||||
memory: "100Mi"
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
strategy:
|
||||
type: RollingUpdate
|
@ -0,0 +1,13 @@
|
||||
apiVersion: autoscaling/v1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: curry-hpa-vdu001
|
||||
namespace: default
|
||||
spec:
|
||||
maxReplicas: 3
|
||||
minReplicas: 1
|
||||
scaleTargetRef:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
name: curry-svc-vdu001
|
||||
targetCPUUtilizationPercentage: 40
|
@ -0,0 +1,30 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: curryjob
|
||||
name: curryjob
|
||||
namespace: default
|
||||
spec:
|
||||
completions: 5
|
||||
parallelism: 2
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: curryjob
|
||||
spec:
|
||||
containers:
|
||||
- command: ["sh", "-c"]
|
||||
args:
|
||||
- echo CURRY
|
||||
image: celebdor/kuryr-demo
|
||||
resources:
|
||||
limits:
|
||||
memory: "20Mi"
|
||||
requests:
|
||||
memory: "10Mi"
|
||||
name: curryjob
|
||||
restartPolicy: OnFailure
|
||||
status: {}
|
@ -0,0 +1,10 @@
|
||||
apiVersion: "v1"
|
||||
kind: "LimitRange"
|
||||
metadata:
|
||||
name: "limits"
|
||||
namespace: default
|
||||
spec:
|
||||
limits:
|
||||
- type: "Container"
|
||||
defaultRequest:
|
||||
cpu: "100m"
|
@ -0,0 +1,11 @@
|
||||
apiVersion: authorization.k8s.io/v1
|
||||
kind: LocalSubjectAccessReview
|
||||
metadata:
|
||||
namespace: default
|
||||
spec:
|
||||
user: curry-sa
|
||||
resourceAttributes:
|
||||
group: apps
|
||||
resource: deployments
|
||||
verb: create
|
||||
namespace: default
|
@ -0,0 +1,8 @@
|
||||
apiVersion: coordination.k8s.io/v1
|
||||
kind: Lease
|
||||
metadata:
|
||||
name: curry-lease
|
||||
namespace: default
|
||||
spec:
|
||||
holderIdentity: master
|
||||
leaseDurationSeconds: 40
|
@ -0,0 +1,17 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: all-deny
|
||||
namespace: default
|
||||
spec:
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Egress
|
||||
egress:
|
||||
- ports:
|
||||
- port: 53
|
||||
protocol: UDP
|
||||
- port: 53
|
||||
protocol: TCP
|
||||
to:
|
||||
- namespaceSelector: {}
|
@ -0,0 +1,7 @@
|
||||
apiVersion: scheduling.k8s.io/v1
|
||||
kind: PriorityClass
|
||||
metadata:
|
||||
name: high-priority
|
||||
value: 1000000
|
||||
globalDefault: false
|
||||
description: "Priority Class Test"
|
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: curry
|
@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: curry-sc-pv
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
capacity:
|
||||
storage: 1Gi
|
||||
hostPath:
|
||||
path: /data/curry-sc-test
|
||||
type: DirectoryOrCreate
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
storageClassName: curry-sc-local
|
@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: curry-sc-pv-0
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
capacity:
|
||||
storage: 1Gi
|
||||
hostPath:
|
||||
path: /data/curry-sc-test-1
|
||||
type: DirectoryOrCreate
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
storageClassName: curry-sc-local
|
@ -0,0 +1,43 @@
|
||||
apiVersion: v1
|
||||
kind: PodTemplate
|
||||
metadata:
|
||||
name: curry-test001
|
||||
namespace: default
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webserver
|
||||
scaling_name: SP1
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: param0
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: param0
|
||||
name: curry-test001
|
||||
- name: param1
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: param1
|
||||
name: curry-test001
|
||||
image: celebdor/kuryr-demo
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: web-server
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 512M
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 512M
|
||||
volumeMounts:
|
||||
- name: curry-claim-volume
|
||||
mountPath: /data
|
||||
volumes:
|
||||
- name: curry-claim-volume
|
||||
persistentVolumeClaim:
|
||||
claimName: curry-pv-claim
|
||||
terminationGracePeriodSeconds: 0
|
@ -0,0 +1,17 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
namespace: default
|
||||
name: vdu1
|
||||
spec:
|
||||
containers:
|
||||
- image: celebdor/kuryr-demo
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: web-server
|
||||
resources:
|
||||
limits:
|
||||
memory: "200Mi"
|
||||
requests:
|
||||
memory: "100Mi"
|
||||
ports:
|
||||
- containerPort: 8080
|
@ -0,0 +1,56 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: webserver
|
||||
vdu_name: curry-svc-vdu001
|
||||
name: curry-svc-vdu001-multiple
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- name: "80"
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app: webserver
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
param0: a2V5MQ==
|
||||
param1: a2V5Mg==
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: curry-sc-multiple
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: ReplicaSet
|
||||
metadata:
|
||||
name: vdu3
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webserver
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webserver
|
||||
spec:
|
||||
containers:
|
||||
- image: nginx
|
||||
name: nginx
|
||||
resources:
|
||||
limits:
|
||||
memory: "200Mi"
|
||||
requests:
|
||||
memory: "100Mi"
|
||||
env:
|
||||
- name: param0
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: param0
|
||||
name: curry-sc-multiple
|
@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: ResourceQuota
|
||||
metadata:
|
||||
name: curry-rq
|
||||
namespace: default
|
||||
spec:
|
||||
hard:
|
||||
cpu: "1000m"
|
||||
memory: 2Gi
|
||||
scopes:
|
||||
- NotBestEffort
|
@ -0,0 +1,31 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: curry-role
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: curry-sa
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: curry-rolebinding
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: curry-role
|
||||
subjects:
|
||||
- apiGroup: ""
|
||||
kind: ServiceAccount
|
||||
name: curry-sa
|
||||
namespace: default
|
@ -0,0 +1,14 @@
|
||||
---
|
||||
apiVersion: authorization.k8s.io/v1
|
||||
kind: SelfSubjectAccessReview
|
||||
spec:
|
||||
resourceAttributes:
|
||||
group: apps
|
||||
resource: deployments
|
||||
verb: create
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: authorization.k8s.io/v1
|
||||
kind: SelfSubjectRulesReview
|
||||
spec:
|
||||
namespace: default
|
@ -0,0 +1,41 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: vdu5
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
serviceName: "nginx"
|
||||
replicas: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: nginx
|
||||
resources:
|
||||
limits:
|
||||
memory: "200Mi"
|
||||
requests:
|
||||
memory: "100Mi"
|
||||
image: k8s.gcr.io/nginx-slim:0.8
|
||||
ports:
|
||||
- containerPort: 8081
|
||||
name: web
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: www
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: "curry-sc-local"
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
@ -0,0 +1,6 @@
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: curry-sc-local
|
||||
provisioner: kubernetes.io/no-provisioner
|
||||
volumeBindingMode: Immediate
|
@ -0,0 +1,37 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: curry-sc-pvc
|
||||
namespace: default
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 2Gi
|
||||
storageClassName: my-storage-class
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: curry-sc-pv-1
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
capacity:
|
||||
storage: 2Gi
|
||||
hostPath:
|
||||
path: /data/curry-sc-test
|
||||
type: DirectoryOrCreate
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
storageClassName: my-storage-class
|
||||
---
|
||||
apiVersion: storage.k8s.io/v1
|
||||
kind: StorageClass
|
||||
metadata:
|
||||
name: my-storage-class
|
||||
provisioner: kubernetes.io/no-provisioner
|
||||
volumeBindingMode: Immediate
|
||||
|
@ -0,0 +1,9 @@
|
||||
apiVersion: authorization.k8s.io/v1
|
||||
kind: SubjectAccessReview
|
||||
spec:
|
||||
user: curry-sa
|
||||
resourceAttributes:
|
||||
group: apps
|
||||
resource: deployments
|
||||
verb: create
|
||||
namespace: default
|
@ -0,0 +1,9 @@
|
||||
apiVersion: authentication.k8s.io/v1
|
||||
kind: TokenReview
|
||||
metadata:
|
||||
name: curry-tokenreview-test
|
||||
spec:
|
||||
# SA_TOKEN=$(kubectl describe secret $(kubectl get secrets |
|
||||
# grep curry-sa | cut -f1 -d ' ') | grep -E '^token' |
|
||||
# cut -f2 -d':' | tr -d '\t'); echo $SA_TOKEN
|
||||
token: "<SA_TOKEN>"
|
@ -0,0 +1,63 @@
|
||||
# Copyright (C) 2022 Fujitsu
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import os
|
||||
import pickle
|
||||
import sys
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SampleOldCoordinateVNFScript(object):
|
||||
|
||||
def __init__(self, req, inst, grant_req, grant, csar_dir, k8s_info):
|
||||
self.req = req
|
||||
self.inst = inst
|
||||
self.grant_req = grant_req
|
||||
self.grant = grant
|
||||
self.csar_dir = csar_dir
|
||||
self.k8s_info = k8s_info
|
||||
|
||||
def coordinate_vnf(self):
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
operation = "coordinate_vnf"
|
||||
script_dict = pickle.load(sys.stdin.buffer)
|
||||
req = script_dict['request']
|
||||
inst = script_dict['vnf_instance']
|
||||
grant_req = script_dict['grant_request']
|
||||
grant = script_dict['grant_response']
|
||||
csar_dir = script_dict['tmp_csar_dir']
|
||||
k8s_info = script_dict['k8s_info']
|
||||
script = SampleOldCoordinateVNFScript(
|
||||
req, inst, grant_req, grant,
|
||||
csar_dir, k8s_info)
|
||||
try:
|
||||
getattr(script, operation)()
|
||||
except Exception:
|
||||
raise Exception
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
os._exit(0)
|
||||
except Exception as ex:
|
||||
sys.stderr.write(str(ex))
|
||||
sys.stderr.flush()
|
||||
os._exit(1)
|
@ -0,0 +1,144 @@
|
||||
TOSCA-Meta-File-Version: 1.0
|
||||
Created-by: dummy_user
|
||||
CSAR-Version: 1.1
|
||||
Entry-Definitions: Definitions/sample_cnf_top.vnfd.yaml
|
||||
|
||||
Name: Files/kubernetes/bindings.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: b40d57afaa93fb67a4b167482cbf8d423c67fbfa0fb141eb62c7c102d4cda347
|
||||
|
||||
Name: Files/kubernetes/clusterrole_clusterrolebinding_SA.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: f808fee02df7230a0e3026f97d745569aba6653a78b043c89bf82d0ba95833bd
|
||||
|
||||
Name: Files/kubernetes/config-map.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: c6d71870559226244c47618ff4bfd59e9835c471dea2da84a136434f8f77ada0
|
||||
|
||||
Name: Files/kubernetes/controller-revision.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: 4042352e0de6aa0ad28d44354bd8e0d62fc8e753c8f52b7edf69d2a7a25d8f8d
|
||||
|
||||
Name: Files/kubernetes/daemon-set.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: 61b88b590692f735d3a30cb1aa6a93699e9720bf816ac08077b3a859a1634e8c
|
||||
|
||||
Name: Files/kubernetes/deployment.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: ecfce3fe9d6734678825287e6aef29b423cb24c154b7eb95292be4145ad9e261
|
||||
|
||||
Name: Files/kubernetes/deployment_fail_test.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: 2a83656567410141fb50086cdd3e73bd9fb9721bee444872c22338258dd076e0
|
||||
|
||||
Name: Files/kubernetes/horizontal-pod-autoscaler.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: aa95058d04ef61159712e7c567220b3f5c275251d78b0000bc04575f23c55894
|
||||
|
||||
Name: Files/kubernetes/job.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: 68fbadafed2a325231cb7d8cf70823611de422669c6e42029ba7d51cace0b978
|
||||
|
||||
Name: Files/kubernetes/limit-range.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: 0cd1b42e0993471fed8b0876dcef8122b292aedf430a5ced6a028660a6aede9e
|
||||
|
||||
Name: Files/kubernetes/local-subject-access-review.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: 01c4348cd59dd69667b92c76910043e067a69950078bea9479fc0a7bb09ff0e7
|
||||
|
||||
Name: Files/kubernetes/multiple_yaml_lease.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: 03999b641569b3480c8d667b632c85c01ee707a93125343eee71b096181fa8c3
|
||||
|
||||
Name: Files/kubernetes/multiple_yaml_network-policy.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: 98f8f8a055afe8e8ddfb26b02d938a457226e0a1afa03ef69623a734aec49295
|
||||
|
||||
Name: Files/kubernetes/multiple_yaml_priority-class.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: 2b5aa46d52f29f0c5d82375a727ef15795d33f5c55c09fc7c3a8774ee713db1f
|
||||
|
||||
Name: Files/kubernetes/namespace.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: c2af464e4b1646da9d2e6ccfdc44cf744753459a001c3469135d04dbb56bb293
|
||||
|
||||
Name: Files/kubernetes/persistent-volume-0.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: a1e8fe505cb32672eb6d96c9b2e3178a6e0828aa41082c096f9fe29dc64f39f4
|
||||
|
||||
Name: Files/kubernetes/persistent-volume-1.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: 934bb29d10c75053c244c9acb1cb259c4a5616dbe931a02da8072322aa76cabc
|
||||
|
||||
Name: Files/kubernetes/pod-template.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: 5d4d3d399e04cdba1f9c691ac7e690e295ff02b7c935abae873b68a83a858c50
|
||||
|
||||
Name: Files/kubernetes/pod.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: 766f27b71a25922a95f847a1b490bf80c3a1a00a2574dee058555602e4da0555
|
||||
|
||||
Name: Files/kubernetes/replicaset_service_secret.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: 848537a1bb84053cf179a169d0b05036a79637bc234128908dce6c0d5d2abbdb
|
||||
|
||||
Name: Files/kubernetes/resource-quota.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: 0cf5e5b69f0752a8c9b5ebb09aee2dccf49d53b580c0c1cb260a95d7f92c7861
|
||||
|
||||
Name: Files/kubernetes/role_rolebinding_SA.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: 5d67ef70773d1673c3a115ab0f2fe2efebc841acaeafad056444e23e23664bbc
|
||||
|
||||
Name: Files/kubernetes/self-subject-access-review_and_self-subject-rule-review.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: 83bd9c40db8c798d0cab0e793a4b40a4ac7eca4fec4fba89ab4257d0f397db40
|
||||
|
||||
Name: Files/kubernetes/statefulset.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: 17770bc179cc8200bbdf827c46d1ae0156368c188045c2a032279e0382852214
|
||||
|
||||
Name: Files/kubernetes/storage-class.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: ccde582b3c81019991a2753a73061f5954cf1fd5f5dfa2e4a0e2b4458b424cf5
|
||||
|
||||
Name: Files/kubernetes/storage-class_pv_pvc.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: edc5e4d0b6c8e0c7e0e9ce199aa2b36b95d36442ff3daf309fb46f784ad14722
|
||||
|
||||
Name: Files/kubernetes/subject-access-review.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: ef937e9c90c1cb6093092ba2043c11e353d572736b04f798a49b785049fec552
|
||||
|
||||
Name: Files/kubernetes/token-review.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: 468d9d53a3125c5850c6473d324c94f00b91a1e3536d1a62c7c7eb80fd7aa6d2
|
@ -0,0 +1,74 @@
|
||||
# Copyright (C) 2022 Fujitsu
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import json
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
|
||||
from oslo_utils import uuidutils
|
||||
|
||||
from tacker.tests.functional.sol_kubernetes_v2 import paramgen
|
||||
from tacker.tests.functional.sol_v2 import utils
|
||||
|
||||
|
||||
zip_file_name = os.path.basename(os.path.abspath(".")) + '.zip'
|
||||
tmp_dir = tempfile.mkdtemp()
|
||||
vnfd_id = uuidutils.generate_uuid()
|
||||
|
||||
# tacker/tests/functional/sol_kubernetes_v2/samples/{package_name}
|
||||
utils.make_zip(".", tmp_dir, vnfd_id)
|
||||
|
||||
shutil.move(os.path.join(tmp_dir, zip_file_name), ".")
|
||||
shutil.rmtree(tmp_dir)
|
||||
|
||||
create_req = paramgen.test_instantiate_cnf_resources_create(vnfd_id)
|
||||
|
||||
# if you instantiate with all k8s resource
|
||||
# please change auth_url and bear_token to your own k8s cluster's info
|
||||
auth_url = "https://127.0.0.1:6443"
|
||||
bearer_token = "your_k8s_cluster_bearer_token"
|
||||
max_sample_instantiate = paramgen.max_sample_instantiate(
|
||||
auth_url, bearer_token)
|
||||
|
||||
max_sample_terminate = paramgen.max_sample_terminate()
|
||||
|
||||
# if you instantiate with only one resource
|
||||
# please change vim_id to your k8s's vim id
|
||||
vim_id = "your k8s vim's id"
|
||||
min_sample_instantiate = paramgen.min_sample_instantiate(vim_id)
|
||||
min_sample_terminate = paramgen.min_sample_terminate()
|
||||
|
||||
# if you want to use `change_vnfpkg` operation
|
||||
change_vnfpkg_instantiate = paramgen.change_vnfpkg_instantiate(
|
||||
auth_url, bearer_token)
|
||||
|
||||
with open("create_req", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(create_req, indent=2))
|
||||
|
||||
with open("max_sample_instantiate", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(max_sample_instantiate, indent=2))
|
||||
|
||||
with open("max_sample_terminate", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(max_sample_terminate, indent=2))
|
||||
|
||||
with open("min_sample_instantiate", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(min_sample_instantiate, indent=2))
|
||||
|
||||
with open("min_sample_terminate", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(min_sample_terminate, indent=2))
|
||||
|
||||
with open("change_vnfpkg_instantiate", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(change_vnfpkg_instantiate, indent=2))
|
330
tacker/tests/functional/sol_kubernetes_v2/test_change_vnfpkg.py
Normal file
330
tacker/tests/functional/sol_kubernetes_v2/test_change_vnfpkg.py
Normal file
@ -0,0 +1,330 @@
|
||||
# Copyright (C) 2022 FUJITSU
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ddt
|
||||
import os
|
||||
import time
|
||||
|
||||
from tacker.tests.functional.sol_kubernetes_v2 import base_v2
|
||||
from tacker.tests.functional.sol_kubernetes_v2 import paramgen
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class VnfLcmKubernetesChangeVnfpkgTest(base_v2.BaseVnfLcmKubernetesV2Test):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
super(VnfLcmKubernetesChangeVnfpkgTest, cls).setUpClass()
|
||||
|
||||
cur_dir = os.path.dirname(__file__)
|
||||
|
||||
test_instantiate_cnf_resources_path = os.path.join(
|
||||
cur_dir, "samples/test_instantiate_cnf_resources")
|
||||
cls.vnf_pkg_1, cls.vnfd_id_1 = cls.create_vnf_package(
|
||||
test_instantiate_cnf_resources_path)
|
||||
|
||||
test_change_vnf_pkg_with_deployment_path = os.path.join(
|
||||
cur_dir, "samples/test_change_vnf_pkg_with_deployment")
|
||||
cls.vnf_pkg_2, cls.vnfd_id_2 = cls.create_vnf_package(
|
||||
test_change_vnf_pkg_with_deployment_path)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
super(VnfLcmKubernetesChangeVnfpkgTest, cls).tearDownClass()
|
||||
|
||||
cls.delete_vnf_package(cls.vnf_pkg_1)
|
||||
cls.delete_vnf_package(cls.vnf_pkg_2)
|
||||
|
||||
def setUp(self):
|
||||
super(VnfLcmKubernetesChangeVnfpkgTest, self).setUp()
|
||||
|
||||
def test_change_vnfpkg_for_deployment_res_with_all_params(self):
|
||||
"""Test ChangeCurrentVNFPackage with all attributes set
|
||||
|
||||
* About attributes:
|
||||
All of the following cardinality attributes are set.
|
||||
In addition, 0..N or 1..N attributes are set to 2 or more.
|
||||
- 0..1 (1)
|
||||
- 0..N (2 or more)
|
||||
- 1
|
||||
- 1..N (2 or more)
|
||||
|
||||
* About LCM operations:
|
||||
This test includes the following operations.
|
||||
- 1. Create a new VNF instance resource
|
||||
- 2. Instantiate a VNF instance
|
||||
- 3. Show VNF instance
|
||||
- 4. Change Current VNF Package
|
||||
- 5. Show VNF instance
|
||||
- 6. Terminate a VNF instance
|
||||
- 7. Delete a VNF instance
|
||||
"""
|
||||
|
||||
# 1. Create a new VNF instance resource
|
||||
# NOTE: extensions and vnfConfigurableProperties are omitted
|
||||
# because they are commented out in etsi_nfv_sol001.
|
||||
expected_inst_attrs = [
|
||||
'id',
|
||||
'vnfInstanceName',
|
||||
'vnfInstanceDescription',
|
||||
'vnfdId',
|
||||
'vnfProvider',
|
||||
'vnfProductName',
|
||||
'vnfSoftwareVersion',
|
||||
'vnfdVersion',
|
||||
# 'vnfConfigurableProperties', # omitted
|
||||
# 'vimConnectionInfo', # omitted
|
||||
'instantiationState',
|
||||
# 'instantiatedVnfInfo', # omitted
|
||||
'metadata',
|
||||
# 'extensions', # omitted
|
||||
'_links'
|
||||
]
|
||||
create_req = paramgen.test_instantiate_cnf_resources_create(
|
||||
self.vnfd_id_1)
|
||||
resp, body = self.create_vnf_instance(create_req)
|
||||
self.assertEqual(201, resp.status_code)
|
||||
self.check_resp_headers_in_create(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
inst_id = body['id']
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_1)['usageState']
|
||||
self.assertEqual('IN_USE', usage_state)
|
||||
|
||||
# 2. Instantiate a VNF instance
|
||||
instantiate_req = paramgen.change_vnfpkg_instantiate(
|
||||
self.auth_url, self.bearer_token)
|
||||
resp, body = self.instantiate_vnf_instance(inst_id, instantiate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# check vnfc_resource_info
|
||||
# TODO()
|
||||
|
||||
# 3. Show VNF instance
|
||||
additional_inst_attrs = [
|
||||
'vimConnectionInfo',
|
||||
'instantiatedVnfInfo'
|
||||
]
|
||||
expected_inst_attrs.extend(additional_inst_attrs)
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
before_resource_ids = [vnfc_info['computeResource']['resourceId']
|
||||
for vnfc_info in vnfc_resource_infos]
|
||||
|
||||
# 4. Change Current VNF Package
|
||||
change_vnfpkg_req = paramgen.change_vnfpkg_all_params(self.vnfd_id_2)
|
||||
resp, body = self.change_vnfpkg(inst_id, change_vnfpkg_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_1).get('usageState')
|
||||
self.assertEqual('NOT_IN_USE', usage_state)
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_2).get('usageState')
|
||||
self.assertEqual('IN_USE', usage_state)
|
||||
|
||||
# 5. Show VNF instance
|
||||
additional_inst_attrs = [
|
||||
'vimConnectionInfo',
|
||||
'instantiatedVnfInfo'
|
||||
]
|
||||
expected_inst_attrs.extend(additional_inst_attrs)
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
after_resource_ids = [vnfc_info['computeResource']['resourceId']
|
||||
for vnfc_info in vnfc_resource_infos]
|
||||
self.assertNotEqual(before_resource_ids, after_resource_ids)
|
||||
|
||||
# 6. Terminate a VNF instance
|
||||
terminate_req = paramgen.max_sample_terminate()
|
||||
resp, body = self.terminate_vnf_instance(inst_id, terminate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# wait a bit because there is a bit time lag between lcmocc DB
|
||||
# update and terminate completion.
|
||||
time.sleep(10)
|
||||
|
||||
# 7. Delete a VNF instance
|
||||
resp, body = self.delete_vnf_instance(inst_id)
|
||||
self.assertEqual(204, resp.status_code)
|
||||
self.check_resp_headers_in_delete(resp)
|
||||
|
||||
# check deletion of VNF instance
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(404, resp.status_code)
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_2).get('usageState')
|
||||
self.assertEqual('NOT_IN_USE', usage_state)
|
||||
|
||||
def test_change_vnfpkg_for_deployment_res_with_no_op_params(self):
|
||||
"""Test ChangeCurrentVNFPackage with no optional attributes
|
||||
|
||||
* About attributes:
|
||||
Omit except for required attributes.
|
||||
Only the following cardinality attributes are set.
|
||||
- 1
|
||||
- 1..N (1)
|
||||
|
||||
* About LCM operations:
|
||||
This test includes the following operations.
|
||||
- 1. Create a new VNF instance resource
|
||||
- 2. Instantiate a VNF instance
|
||||
- 3. Show VNF instance
|
||||
- 4. Change Current VNF Package
|
||||
- 5. Show VNF instance
|
||||
- 6. Terminate a VNF instance
|
||||
- 7. Delete a VNF instance
|
||||
"""
|
||||
|
||||
# 1. Create a new VNF instance resource
|
||||
# NOTE: extensions and vnfConfigurableProperties are omitted
|
||||
# because they are commented out in etsi_nfv_sol001.
|
||||
expected_inst_attrs = [
|
||||
'id',
|
||||
'vnfInstanceName',
|
||||
'vnfInstanceDescription',
|
||||
'vnfdId',
|
||||
'vnfProvider',
|
||||
'vnfProductName',
|
||||
'vnfSoftwareVersion',
|
||||
'vnfdVersion',
|
||||
# 'vnfConfigurableProperties', # omitted
|
||||
# 'vimConnectionInfo', # omitted
|
||||
'instantiationState',
|
||||
# 'instantiatedVnfInfo', # omitted
|
||||
'metadata',
|
||||
# 'extensions', # omitted
|
||||
'_links'
|
||||
]
|
||||
create_req = paramgen.test_instantiate_cnf_resources_create(
|
||||
self.vnfd_id_1)
|
||||
resp, body = self.create_vnf_instance(create_req)
|
||||
self.assertEqual(201, resp.status_code)
|
||||
self.check_resp_headers_in_create(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
inst_id = body['id']
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_1)['usageState']
|
||||
self.assertEqual('IN_USE', usage_state)
|
||||
|
||||
# 2. Instantiate a VNF instance
|
||||
vim_id = self.get_k8s_vim_id()
|
||||
instantiate_req = paramgen.change_vnfpkg_instantiate_min(vim_id)
|
||||
resp, body = self.instantiate_vnf_instance(inst_id, instantiate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# 3. Show VNF instance
|
||||
additional_inst_attrs = [
|
||||
'vimConnectionInfo',
|
||||
'instantiatedVnfInfo'
|
||||
]
|
||||
expected_inst_attrs.extend(additional_inst_attrs)
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
before_resource_ids = [vnfc_info['computeResource']['resourceId']
|
||||
for vnfc_info in vnfc_resource_infos]
|
||||
|
||||
# 4. Change Current VNF Package
|
||||
change_vnfpkg_req = paramgen.change_vnfpkg_min(self.vnfd_id_2)
|
||||
resp, body = self.change_vnfpkg(inst_id, change_vnfpkg_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_1).get('usageState')
|
||||
self.assertEqual('NOT_IN_USE', usage_state)
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_2).get('usageState')
|
||||
self.assertEqual('IN_USE', usage_state)
|
||||
|
||||
# 5. Show VNF instance
|
||||
additional_inst_attrs = [
|
||||
'vimConnectionInfo',
|
||||
'instantiatedVnfInfo'
|
||||
]
|
||||
expected_inst_attrs.extend(additional_inst_attrs)
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
after_resource_ids = [vnfc_info['computeResource']['resourceId']
|
||||
for vnfc_info in vnfc_resource_infos]
|
||||
self.assertNotEqual(before_resource_ids, after_resource_ids)
|
||||
|
||||
# 6. Terminate a VNF instance
|
||||
terminate_req = paramgen.max_sample_terminate()
|
||||
resp, body = self.terminate_vnf_instance(inst_id, terminate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# wait a bit because there is a bit time lag between lcmocc DB
|
||||
# update and terminate completion.
|
||||
time.sleep(10)
|
||||
|
||||
# 7. Delete a VNF instance
|
||||
resp, body = self.delete_vnf_instance(inst_id)
|
||||
self.assertEqual(204, resp.status_code)
|
||||
self.check_resp_headers_in_delete(resp)
|
||||
|
||||
# check deletion of VNF instance
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(404, resp.status_code)
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_2).get('usageState')
|
||||
self.assertEqual('NOT_IN_USE', usage_state)
|
266
tacker/tests/functional/sol_kubernetes_v2/test_vnflcm_basic.py
Normal file
266
tacker/tests/functional/sol_kubernetes_v2/test_vnflcm_basic.py
Normal file
@ -0,0 +1,266 @@
|
||||
# Copyright (C) 2022 FUJITSU
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ddt
|
||||
import os
|
||||
import time
|
||||
|
||||
from tacker.tests.functional.sol_kubernetes_v2 import base_v2
|
||||
from tacker.tests.functional.sol_kubernetes_v2 import paramgen
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class VnfLcmKubernetesTest(base_v2.BaseVnfLcmKubernetesV2Test):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
super(VnfLcmKubernetesTest, cls).setUpClass()
|
||||
|
||||
cur_dir = os.path.dirname(__file__)
|
||||
|
||||
test_instantiate_cnf_resources_path = os.path.join(
|
||||
cur_dir, "samples/test_instantiate_cnf_resources")
|
||||
cls.vnf_pkg_1, cls.vnfd_id_1 = cls.create_vnf_package(
|
||||
test_instantiate_cnf_resources_path)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
super(VnfLcmKubernetesTest, cls).tearDownClass()
|
||||
|
||||
cls.delete_vnf_package(cls.vnf_pkg_1)
|
||||
|
||||
def setUp(self):
|
||||
super(VnfLcmKubernetesTest, self).setUp()
|
||||
|
||||
def test_basic_lcms_max(self):
|
||||
"""Test LCM operations with all attributes set
|
||||
|
||||
* About attributes:
|
||||
All of the following cardinality attributes are set.
|
||||
In addition, 0..N or 1..N attributes are set to 2 or more.
|
||||
- 0..1 (1)
|
||||
- 0..N (2 or more)
|
||||
- 1
|
||||
- 1..N (2 or more)
|
||||
|
||||
* About LCM operations:
|
||||
This test includes the following operations.
|
||||
- 1. Create a new VNF instance resource
|
||||
- 2. Instantiate a VNF instance
|
||||
- 3. Show VNF instance
|
||||
- 4. Terminate a VNF instance
|
||||
- 5. Delete a VNF instance
|
||||
"""
|
||||
|
||||
# 1. Create a new VNF instance resource
|
||||
# NOTE: extensions and vnfConfigurableProperties are omitted
|
||||
# because they are commented out in etsi_nfv_sol001.
|
||||
expected_inst_attrs = [
|
||||
'id',
|
||||
'vnfInstanceName',
|
||||
'vnfInstanceDescription',
|
||||
'vnfdId',
|
||||
'vnfProvider',
|
||||
'vnfProductName',
|
||||
'vnfSoftwareVersion',
|
||||
'vnfdVersion',
|
||||
# 'vnfConfigurableProperties', # omitted
|
||||
# 'vimConnectionInfo', # omitted
|
||||
'instantiationState',
|
||||
# 'instantiatedVnfInfo', # omitted
|
||||
'metadata',
|
||||
# 'extensions', # omitted
|
||||
'_links'
|
||||
]
|
||||
create_req = paramgen.test_instantiate_cnf_resources_create(
|
||||
self.vnfd_id_1)
|
||||
resp, body = self.create_vnf_instance(create_req)
|
||||
self.assertEqual(201, resp.status_code)
|
||||
self.check_resp_headers_in_create(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
inst_id = body['id']
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_1)['usageState']
|
||||
self.assertEqual('IN_USE', usage_state)
|
||||
|
||||
# 2. Instantiate a VNF instance
|
||||
instantiate_req = paramgen.max_sample_instantiate(
|
||||
self.auth_url, self.bearer_token)
|
||||
resp, body = self.instantiate_vnf_instance(inst_id, instantiate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# 3. Show VNF instance
|
||||
additional_inst_attrs = [
|
||||
'vimConnectionInfo',
|
||||
'instantiatedVnfInfo'
|
||||
]
|
||||
expected_inst_attrs.extend(additional_inst_attrs)
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
# check vnfc_resource_info
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
for vnfc_info in vnfc_resource_infos:
|
||||
if vnfc_info['vduId'] == 'VDU1':
|
||||
self.assertEqual('Pod', vnfc_info[
|
||||
'computeResource']['vimLevelResourceType'])
|
||||
elif vnfc_info['vduId'] == 'VDU2':
|
||||
self.assertEqual('Deployment', vnfc_info[
|
||||
'computeResource']['vimLevelResourceType'])
|
||||
elif vnfc_info['vduId'] == 'VDU3':
|
||||
self.assertEqual('ReplicaSet', vnfc_info[
|
||||
'computeResource']['vimLevelResourceType'])
|
||||
elif vnfc_info['vduId'] == 'VDU5':
|
||||
self.assertEqual('StatefulSet', vnfc_info[
|
||||
'computeResource']['vimLevelResourceType'])
|
||||
elif vnfc_info['vduId'] == 'VDU6':
|
||||
self.assertEqual('DaemonSet', vnfc_info[
|
||||
'computeResource']['vimLevelResourceType'])
|
||||
|
||||
# 4. Terminate a VNF instance
|
||||
terminate_req = paramgen.max_sample_terminate()
|
||||
resp, body = self.terminate_vnf_instance(inst_id, terminate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# wait a bit because there is a bit time lag between lcmocc DB
|
||||
# update and terminate completion.
|
||||
time.sleep(10)
|
||||
|
||||
# 5. Delete a VNF instance
|
||||
resp, body = self.delete_vnf_instance(inst_id)
|
||||
self.assertEqual(204, resp.status_code)
|
||||
self.check_resp_headers_in_delete(resp)
|
||||
|
||||
# check deletion of VNF instance
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(404, resp.status_code)
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_1).get('usageState')
|
||||
self.assertEqual('NOT_IN_USE', usage_state)
|
||||
|
||||
def test_basic_lcms_min(self):
|
||||
"""Test LCM operations with all attributes set
|
||||
|
||||
* About attributes:
|
||||
Omit except for required attributes.
|
||||
Only the following cardinality attributes are set.
|
||||
- 1
|
||||
- 1..N (1)
|
||||
|
||||
* About LCM operations:
|
||||
This test includes the following operations.
|
||||
- 1. Create a new VNF instance resource
|
||||
- 2. Instantiate a VNF instance
|
||||
- 3. Show VNF instance
|
||||
- 4. Terminate a VNF instance
|
||||
- 5. Delete a VNF instance
|
||||
"""
|
||||
|
||||
# 1. Create a new VNF instance resource
|
||||
# NOTE: extensions and vnfConfigurableProperties are omitted
|
||||
# because they are commented out in etsi_nfv_sol001.
|
||||
expected_inst_attrs = [
|
||||
'id',
|
||||
'vnfInstanceName',
|
||||
'vnfInstanceDescription',
|
||||
'vnfdId',
|
||||
'vnfProvider',
|
||||
'vnfProductName',
|
||||
'vnfSoftwareVersion',
|
||||
'vnfdVersion',
|
||||
# 'vnfConfigurableProperties', # omitted
|
||||
# 'vimConnectionInfo', # omitted
|
||||
'instantiationState',
|
||||
# 'instantiatedVnfInfo', # omitted
|
||||
'metadata',
|
||||
# 'extensions', # omitted
|
||||
'_links'
|
||||
]
|
||||
create_req = paramgen.test_instantiate_cnf_resources_create(
|
||||
self.vnfd_id_1)
|
||||
resp, body = self.create_vnf_instance(create_req)
|
||||
self.assertEqual(201, resp.status_code)
|
||||
self.check_resp_headers_in_create(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
inst_id = body['id']
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_1)['usageState']
|
||||
self.assertEqual('IN_USE', usage_state)
|
||||
|
||||
# 2. Instantiate a VNF instance
|
||||
vim_id = self.get_k8s_vim_id()
|
||||
instantiate_req = paramgen.min_sample_instantiate(vim_id)
|
||||
resp, body = self.instantiate_vnf_instance(inst_id, instantiate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# 3. Show VNF instance
|
||||
additional_inst_attrs = [
|
||||
'vimConnectionInfo',
|
||||
'instantiatedVnfInfo'
|
||||
]
|
||||
expected_inst_attrs.extend(additional_inst_attrs)
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
# check vnfc_resource_info
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
self.assertEqual(1, len(vnfc_resource_infos))
|
||||
|
||||
# 4. Terminate a VNF instance
|
||||
terminate_req = paramgen.max_sample_terminate()
|
||||
resp, body = self.terminate_vnf_instance(inst_id, terminate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# wait a bit because there is a bit time lag between lcmocc DB
|
||||
# update and terminate completion.
|
||||
time.sleep(10)
|
||||
|
||||
# 5. Delete a VNF instance
|
||||
resp, body = self.delete_vnf_instance(inst_id)
|
||||
self.assertEqual(204, resp.status_code)
|
||||
self.check_resp_headers_in_delete(resp)
|
||||
|
||||
# check deletion of VNF instance
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(404, resp.status_code)
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_1).get('usageState')
|
||||
self.assertEqual('NOT_IN_USE', usage_state)
|
@ -0,0 +1,471 @@
|
||||
# Copyright (C) 2022 FUJITSU
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ddt
|
||||
import os
|
||||
import time
|
||||
|
||||
from tacker.tests.functional.sol_kubernetes_v2 import base_v2
|
||||
from tacker.tests.functional.sol_kubernetes_v2 import paramgen
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class VnfLcmKubernetesErrorHandingTest(base_v2.BaseVnfLcmKubernetesV2Test):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
super(VnfLcmKubernetesErrorHandingTest, cls).setUpClass()
|
||||
|
||||
cur_dir = os.path.dirname(__file__)
|
||||
|
||||
test_instantiate_cnf_resources_path = os.path.join(
|
||||
cur_dir, "samples/test_instantiate_cnf_resources")
|
||||
cls.vnf_pkg_1, cls.vnfd_id_1 = cls.create_vnf_package(
|
||||
test_instantiate_cnf_resources_path)
|
||||
|
||||
test_change_vnf_pkg_with_deployment_path = os.path.join(
|
||||
cur_dir, "samples/test_change_vnf_pkg_with_deployment")
|
||||
cls.vnf_pkg_2, cls.vnfd_id_2 = cls.create_vnf_package(
|
||||
test_change_vnf_pkg_with_deployment_path)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
super(VnfLcmKubernetesErrorHandingTest, cls).tearDownClass()
|
||||
|
||||
cls.delete_vnf_package(cls.vnf_pkg_1)
|
||||
cls.delete_vnf_package(cls.vnf_pkg_2)
|
||||
|
||||
def setUp(self):
|
||||
super(VnfLcmKubernetesErrorHandingTest, self).setUp()
|
||||
|
||||
def test_change_vnfpkg_failed_in_update_wait_and_rollback(self):
|
||||
"""Test LCM operations error handing
|
||||
|
||||
* About LCM operations:
|
||||
This test includes the following operations.
|
||||
- 1. Create a new VNF instance resource
|
||||
- 2. Instantiate a VNF instance
|
||||
- 3. Show VNF instance
|
||||
- 4. Change Current VNF Package
|
||||
- 5. Rollback Change Current VNF Package
|
||||
- 6. Show VNF instance
|
||||
- 7. Terminate a VNF instance
|
||||
- 8. Delete a VNF instance
|
||||
"""
|
||||
|
||||
# 1. Create a new VNF instance resource
|
||||
# NOTE: extensions and vnfConfigurableProperties are omitted
|
||||
# because they are commented out in etsi_nfv_sol001.
|
||||
expected_inst_attrs = [
|
||||
'id',
|
||||
'vnfInstanceName',
|
||||
'vnfInstanceDescription',
|
||||
'vnfdId',
|
||||
'vnfProvider',
|
||||
'vnfProductName',
|
||||
'vnfSoftwareVersion',
|
||||
'vnfdVersion',
|
||||
# 'vnfConfigurableProperties', # omitted
|
||||
# 'vimConnectionInfo', # omitted
|
||||
'instantiationState',
|
||||
# 'instantiatedVnfInfo', # omitted
|
||||
'metadata',
|
||||
# 'extensions', # omitted
|
||||
'_links'
|
||||
]
|
||||
create_req = paramgen.test_instantiate_cnf_resources_create(
|
||||
self.vnfd_id_1)
|
||||
resp, body = self.create_vnf_instance(create_req)
|
||||
self.assertEqual(201, resp.status_code)
|
||||
self.check_resp_headers_in_create(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
inst_id = body['id']
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_1)['usageState']
|
||||
self.assertEqual('IN_USE', usage_state)
|
||||
|
||||
# 2. Instantiate a VNF instance
|
||||
vim_id = self.get_k8s_vim_id()
|
||||
instantiate_req = paramgen.change_vnfpkg_instantiate_error_handing(
|
||||
vim_id)
|
||||
resp, body = self.instantiate_vnf_instance(inst_id, instantiate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# 3. Show VNF instance
|
||||
additional_inst_attrs = [
|
||||
'vimConnectionInfo',
|
||||
'instantiatedVnfInfo'
|
||||
]
|
||||
expected_inst_attrs.extend(additional_inst_attrs)
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
before_resource_ids = [vnfc_info['computeResource']['resourceId']
|
||||
for vnfc_info in vnfc_resource_infos]
|
||||
|
||||
# 4. Change Current VNF Package (will fail)
|
||||
change_vnfpkg_req = paramgen.change_vnfpkg_error(self.vnfd_id_2)
|
||||
resp, body = self.change_vnfpkg(inst_id, change_vnfpkg_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_failed_temp(lcmocc_id)
|
||||
|
||||
# 5. Rollback Change Current VNF Package operation
|
||||
resp, body = self.rollback_lcmocc(lcmocc_id)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_delete(resp)
|
||||
self.wait_lcmocc_rolled_back(lcmocc_id)
|
||||
|
||||
# 6. Show VNF instance
|
||||
additional_inst_attrs = [
|
||||
'vimConnectionInfo',
|
||||
'instantiatedVnfInfo'
|
||||
]
|
||||
expected_inst_attrs.extend(additional_inst_attrs)
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
after_resource_ids = [vnfc_info['computeResource']['resourceId']
|
||||
for vnfc_info in vnfc_resource_infos]
|
||||
self.assertNotEqual(before_resource_ids, after_resource_ids)
|
||||
|
||||
# 7. Terminate a VNF instance
|
||||
terminate_req = paramgen.max_sample_terminate()
|
||||
resp, body = self.terminate_vnf_instance(inst_id, terminate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# wait a bit because there is a bit time lag between lcmocc DB
|
||||
# update and terminate completion.
|
||||
time.sleep(10)
|
||||
|
||||
# 8. Delete a VNF instance
|
||||
resp, body = self.delete_vnf_instance(inst_id)
|
||||
self.assertEqual(204, resp.status_code)
|
||||
self.check_resp_headers_in_delete(resp)
|
||||
|
||||
# check deletion of VNF instance
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(404, resp.status_code)
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_2).get('usageState')
|
||||
self.assertEqual('NOT_IN_USE', usage_state)
|
||||
|
||||
def test_change_vnfpkg_failed_and_retry(self):
|
||||
"""Test LCM operations error handing
|
||||
|
||||
* About LCM operations:
|
||||
This test includes the following operations.
|
||||
- 1. Create a new VNF instance resource
|
||||
- 2. Instantiate a VNF instance
|
||||
- 3. Show VNF instance
|
||||
- 4. Change Current VNF Package(will fail)
|
||||
- 5. Retry Change Current VNF Package
|
||||
- 6. Rollback Change Current VNF Package
|
||||
- 7. Show VNF instance
|
||||
- 8. Terminate a VNF instance
|
||||
- 9. Delete a VNF instance
|
||||
"""
|
||||
|
||||
# 1. Create a new VNF instance resource
|
||||
# NOTE: extensions and vnfConfigurableProperties are omitted
|
||||
# because they are commented out in etsi_nfv_sol001.
|
||||
expected_inst_attrs = [
|
||||
'id',
|
||||
'vnfInstanceName',
|
||||
'vnfInstanceDescription',
|
||||
'vnfdId',
|
||||
'vnfProvider',
|
||||
'vnfProductName',
|
||||
'vnfSoftwareVersion',
|
||||
'vnfdVersion',
|
||||
# 'vnfConfigurableProperties', # omitted
|
||||
# 'vimConnectionInfo', # omitted
|
||||
'instantiationState',
|
||||
# 'instantiatedVnfInfo', # omitted
|
||||
'metadata',
|
||||
# 'extensions', # omitted
|
||||
'_links'
|
||||
]
|
||||
create_req = paramgen.test_instantiate_cnf_resources_create(
|
||||
self.vnfd_id_1)
|
||||
resp, body = self.create_vnf_instance(create_req)
|
||||
self.assertEqual(201, resp.status_code)
|
||||
self.check_resp_headers_in_create(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
inst_id = body['id']
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_1)['usageState']
|
||||
self.assertEqual('IN_USE', usage_state)
|
||||
|
||||
# 2. Instantiate a VNF instance
|
||||
vim_id = self.get_k8s_vim_id()
|
||||
instantiate_req = paramgen.change_vnfpkg_instantiate_error_handing(
|
||||
vim_id)
|
||||
resp, body = self.instantiate_vnf_instance(inst_id, instantiate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# 3. Show VNF instance
|
||||
additional_inst_attrs = [
|
||||
'vimConnectionInfo',
|
||||
'instantiatedVnfInfo'
|
||||
]
|
||||
expected_inst_attrs.extend(additional_inst_attrs)
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
before_resource_ids = [vnfc_info['computeResource']['resourceId']
|
||||
for vnfc_info in vnfc_resource_infos]
|
||||
|
||||
# 4. Change Current VNF Package (will fail)
|
||||
change_vnfpkg_req = paramgen.change_vnfpkg_error(self.vnfd_id_2)
|
||||
resp, body = self.change_vnfpkg(inst_id, change_vnfpkg_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_failed_temp(lcmocc_id)
|
||||
|
||||
# 5. Retry Change Current VNF Package operation
|
||||
resp, body = self.retry_lcmocc(lcmocc_id)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_delete(resp)
|
||||
self.wait_lcmocc_failed_temp(lcmocc_id)
|
||||
|
||||
# 6. Rollback Change Current VNF Package operation
|
||||
resp, body = self.rollback_lcmocc(lcmocc_id)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_delete(resp)
|
||||
self.wait_lcmocc_rolled_back(lcmocc_id)
|
||||
|
||||
# 7. Show VNF instance
|
||||
additional_inst_attrs = [
|
||||
'vimConnectionInfo',
|
||||
'instantiatedVnfInfo'
|
||||
]
|
||||
expected_inst_attrs.extend(additional_inst_attrs)
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
after_resource_ids = [vnfc_info['computeResource']['resourceId']
|
||||
for vnfc_info in vnfc_resource_infos]
|
||||
self.assertNotEqual(before_resource_ids, after_resource_ids)
|
||||
|
||||
# 8. Terminate a VNF instance
|
||||
terminate_req = paramgen.max_sample_terminate()
|
||||
resp, body = self.terminate_vnf_instance(inst_id, terminate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# wait a bit because there is a bit time lag between lcmocc DB
|
||||
# update and terminate completion.
|
||||
time.sleep(10)
|
||||
|
||||
# 9. Delete a VNF instance
|
||||
resp, body = self.delete_vnf_instance(inst_id)
|
||||
self.assertEqual(204, resp.status_code)
|
||||
self.check_resp_headers_in_delete(resp)
|
||||
|
||||
# check deletion of VNF instance
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(404, resp.status_code)
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_2).get('usageState')
|
||||
self.assertEqual('NOT_IN_USE', usage_state)
|
||||
|
||||
def test_change_vnfpkg_failed_and_fail(self):
|
||||
"""Test LCM operations error handing
|
||||
|
||||
* About LCM operations:
|
||||
This test includes the following operations.
|
||||
- 1. Create a new VNF instance resource
|
||||
- 2. Instantiate a VNF instance
|
||||
- 3. Show VNF instance
|
||||
- 4. Change Current VNF Package
|
||||
- 5. Fail Change Current VNF Package
|
||||
- 6. Show VNF instance
|
||||
- 7. Terminate VNF instance
|
||||
- 8. Delete a VNF instance
|
||||
"""
|
||||
|
||||
# 1. Create a new VNF instance resource
|
||||
# NOTE: extensions and vnfConfigurableProperties are omitted
|
||||
# because they are commented out in etsi_nfv_sol001.
|
||||
expected_inst_attrs = [
|
||||
'id',
|
||||
'vnfInstanceName',
|
||||
'vnfInstanceDescription',
|
||||
'vnfdId',
|
||||
'vnfProvider',
|
||||
'vnfProductName',
|
||||
'vnfSoftwareVersion',
|
||||
'vnfdVersion',
|
||||
# 'vnfConfigurableProperties', # omitted
|
||||
# 'vimConnectionInfo', # omitted
|
||||
'instantiationState',
|
||||
# 'instantiatedVnfInfo', # omitted
|
||||
'metadata',
|
||||
# 'extensions', # omitted
|
||||
'_links'
|
||||
]
|
||||
create_req = paramgen.test_instantiate_cnf_resources_create(
|
||||
self.vnfd_id_1)
|
||||
resp, body = self.create_vnf_instance(create_req)
|
||||
self.assertEqual(201, resp.status_code)
|
||||
self.check_resp_headers_in_create(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
inst_id = body['id']
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_1)['usageState']
|
||||
self.assertEqual('IN_USE', usage_state)
|
||||
|
||||
# 2. Instantiate a VNF instance
|
||||
vim_id = self.get_k8s_vim_id()
|
||||
instantiate_req = paramgen.change_vnfpkg_instantiate_error_handing(
|
||||
vim_id)
|
||||
resp, body = self.instantiate_vnf_instance(inst_id, instantiate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# 3. Show VNF instance
|
||||
additional_inst_attrs = [
|
||||
'vimConnectionInfo',
|
||||
'instantiatedVnfInfo'
|
||||
]
|
||||
expected_inst_attrs.extend(additional_inst_attrs)
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
before_resource_ids = [vnfc_info['computeResource']['resourceId']
|
||||
for vnfc_info in vnfc_resource_infos]
|
||||
|
||||
# 4. Change Current VNF Package (will fail)
|
||||
change_vnfpkg_req = paramgen.change_vnfpkg_error(self.vnfd_id_2)
|
||||
resp, body = self.change_vnfpkg(inst_id, change_vnfpkg_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_failed_temp(lcmocc_id)
|
||||
|
||||
# 5. Fail Change Current VNF Package operation
|
||||
expected_inst_attrs_fail = [
|
||||
'id',
|
||||
'operationState',
|
||||
'stateEnteredTime',
|
||||
'startTime',
|
||||
'vnfInstanceId',
|
||||
'grantId',
|
||||
'operation',
|
||||
'isAutomaticInvocation',
|
||||
'operationParams',
|
||||
'isCancelPending',
|
||||
'error',
|
||||
'_links'
|
||||
]
|
||||
resp, body = self.fail_lcmocc(lcmocc_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs_fail)
|
||||
resp, body = self.show_lcmocc(lcmocc_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.assertEqual('FAILED', body['operationState'])
|
||||
|
||||
# 6. Show VNF instance
|
||||
additional_inst_attrs = [
|
||||
'vimConnectionInfo',
|
||||
'instantiatedVnfInfo'
|
||||
]
|
||||
expected_inst_attrs.extend(additional_inst_attrs)
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
after_resource_ids = [vnfc_info['computeResource']['resourceId']
|
||||
for vnfc_info in vnfc_resource_infos]
|
||||
self.assertEqual(before_resource_ids, after_resource_ids)
|
||||
|
||||
# 7. Terminate a VNF instance
|
||||
terminate_req = paramgen.max_sample_terminate()
|
||||
resp, body = self.terminate_vnf_instance(inst_id, terminate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# wait a bit because there is a bit time lag between lcmocc DB
|
||||
# update and terminate completion.
|
||||
time.sleep(10)
|
||||
|
||||
# 8. Delete a VNF instance
|
||||
resp, body = self.delete_vnf_instance(inst_id)
|
||||
self.assertEqual(204, resp.status_code)
|
||||
self.check_resp_headers_in_delete(resp)
|
||||
|
||||
# check deletion of VNF instance
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(404, resp.status_code)
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_2).get('usageState')
|
||||
self.assertEqual('NOT_IN_USE', usage_state)
|
@ -22,12 +22,14 @@ from oslo_utils import uuidutils
|
||||
from tacker import context
|
||||
from tacker.sol_refactored.common import vnfd_utils
|
||||
from tacker.sol_refactored.conductor import vnflcm_driver_v2
|
||||
from tacker.sol_refactored.infra_drivers.kubernetes import kubernetes
|
||||
from tacker.sol_refactored.nfvo import nfvo_client
|
||||
from tacker.sol_refactored import objects
|
||||
from tacker.sol_refactored.objects.v2 import fields
|
||||
from tacker.tests import base
|
||||
|
||||
|
||||
CNF_SAMPLE_VNFD_ID = "b1bb0ce7-ebca-4fa7-95ed-4840d70a1177"
|
||||
SAMPLE_VNFD_ID = "b1bb0ce7-ebca-4fa7-95ed-4840d7000000"
|
||||
SAMPLE_FLAVOUR_ID = "simple"
|
||||
|
||||
@ -125,6 +127,28 @@ _inst_req_example = {
|
||||
}
|
||||
}
|
||||
}
|
||||
_inst_cnf_req_example = {
|
||||
"flavourId": "simple",
|
||||
"additionalParams": {
|
||||
"lcm-kubernetes-def-files": [
|
||||
"Files/kubernetes/deployment.yaml",
|
||||
"Files/kubernetes/namespace.yaml",
|
||||
"Files/kubernetes/pod.yaml",
|
||||
],
|
||||
"namespace": "curry"
|
||||
},
|
||||
"vimConnectionInfo": {
|
||||
"vim1": {
|
||||
"vimType": "kubernetes",
|
||||
"vimId": uuidutils.generate_uuid(),
|
||||
"interfaceInfo": {"endpoint": "https://127.0.0.1:6443"},
|
||||
"accessInfo": {
|
||||
"bearer_token": "secret_token",
|
||||
"region": "RegionOne"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# ChangeExtVnfConnectivityRequest example for change_ext_conn grant test
|
||||
_ext_vl_3 = {
|
||||
@ -577,7 +601,63 @@ _inst_info_example = {
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
# instantiatedVnfInfo example for CNF terminate
|
||||
_inst_info_cnf_example = {
|
||||
"flavourId": "simple",
|
||||
"vnfState": "STARTED",
|
||||
"vnfcResourceInfo": [
|
||||
{
|
||||
"id": "c8cb522d-ddf8-4136-9c85-92bab8f2993d",
|
||||
"vduId": "VDU1",
|
||||
"computeResource": {
|
||||
"resourceId": "vdu1-5588797866-fs6vb",
|
||||
"vimLevelResourceType": "OS::Nova::Server"
|
||||
},
|
||||
"metadata": {
|
||||
"Pod": {
|
||||
"name": "vdu1-5588797866-fs6vb",
|
||||
"namespace": "curry"
|
||||
},
|
||||
"Deployment": {
|
||||
"name": "vdu1",
|
||||
"namespace": "curry"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "124e74c2-cc0d-f187-add2-2000326c195b",
|
||||
"vduId": "VDU1",
|
||||
"computeResource": {
|
||||
"resourceId": "vdu1-5588797866-v8sl2",
|
||||
"vimLevelResourceType": "Deployment",
|
||||
},
|
||||
"metadata": {
|
||||
"Pod": {
|
||||
"name": "vdu1-5588797866-v8sl2",
|
||||
"namespace": "curry"
|
||||
},
|
||||
"Deployment": {
|
||||
"name": "vdu1",
|
||||
"namespace": "curry"
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"id": "55008a17-956b-66a4-77e3-340723695bac",
|
||||
"vduId": "VDU2",
|
||||
"computeResource": {
|
||||
"resourceId": "vdu2",
|
||||
"vimLevelResourceType": "Pod",
|
||||
},
|
||||
"metadata": {
|
||||
"Pod": {
|
||||
"name": "vdu2",
|
||||
"namespace": "curry"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
# modify_info_process example
|
||||
_modify_inst_example = {
|
||||
"vnfInstanceName": "instance_name",
|
||||
@ -649,6 +729,32 @@ _change_vnfpkg_example = {
|
||||
}]
|
||||
}
|
||||
}
|
||||
_change_cnf_vnfpkg_example = {
|
||||
"vnfdId": 'ff60b74a-df4d-5c78-f5bf-19e129da8fff',
|
||||
"additionalParams": {
|
||||
"upgrade_type": "RollingUpdate",
|
||||
"lcm-operation-coordinate-old-vnf": "Scripts/coordinate_old_vnf.py",
|
||||
"lcm-operation-coordinate-old-vnf-class": "CoordinateOldVnf",
|
||||
"lcm-operation-coordinate-new-vnf": "Scripts/coordinate_new_vnf.py",
|
||||
"lcm-operation-coordinate-new-vnf-class": "CoordinateNewVnf",
|
||||
"lcm-kubernetes-def-files": [
|
||||
"Files/new_kubernetes/new_deployment.yaml"
|
||||
],
|
||||
"vdu_params": [{
|
||||
"vduId": "VDU1"
|
||||
}]
|
||||
}
|
||||
}
|
||||
_update_resources = {
|
||||
"affectedVnfcs": [{
|
||||
"metadata": {
|
||||
"Deployment": {
|
||||
"name": "vdu1"
|
||||
}
|
||||
},
|
||||
"changeType": "ADDED"
|
||||
}]
|
||||
}
|
||||
|
||||
|
||||
class TestVnfLcmDriverV2(base.BaseTestCase):
|
||||
@ -665,6 +771,13 @@ class TestVnfLcmDriverV2(base.BaseTestCase):
|
||||
self.vnfd_1 = vnfd_utils.Vnfd(SAMPLE_VNFD_ID)
|
||||
self.vnfd_1.init_from_csar_dir(os.path.join(sample_dir, "sample1"))
|
||||
|
||||
self.vnfd_2 = vnfd_utils.Vnfd(CNF_SAMPLE_VNFD_ID)
|
||||
self.vnfd_2.init_from_csar_dir(os.path.join(sample_dir, "sample2"))
|
||||
|
||||
self.vnfd_3 = vnfd_utils.Vnfd(CNF_SAMPLE_VNFD_ID)
|
||||
self.vnfd_3.init_from_csar_dir(os.path.join(sample_dir,
|
||||
"change_vnfpkg_sample"))
|
||||
|
||||
def _grant_req_links(self, lcmocc_id, inst_id):
|
||||
return {
|
||||
'vnfLcmOpOcc': {
|
||||
@ -1901,3 +2014,175 @@ class TestVnfLcmDriverV2(base.BaseTestCase):
|
||||
self.assertEqual(
|
||||
check_reses[j]['vimLevelResourceType'],
|
||||
remove_reses[j]['resource']['vimLevelResourceType'])
|
||||
|
||||
@mock.patch.object(nfvo_client.NfvoClient, 'grant')
|
||||
def test_cnf_instantiate_grant(self, mocked_grant):
|
||||
# prepare
|
||||
req = objects.InstantiateVnfRequest.from_dict(_inst_cnf_req_example)
|
||||
inst = objects.VnfInstanceV2(
|
||||
# required fields
|
||||
id=uuidutils.generate_uuid(),
|
||||
vnfdId=CNF_SAMPLE_VNFD_ID,
|
||||
vnfProvider='provider',
|
||||
vnfProductName='product name',
|
||||
vnfSoftwareVersion='software version',
|
||||
vnfdVersion='vnfd version',
|
||||
instantiationState='NOT_INSTANTIATED'
|
||||
)
|
||||
lcmocc = objects.VnfLcmOpOccV2(
|
||||
# required fields
|
||||
id=uuidutils.generate_uuid(),
|
||||
operationState=fields.LcmOperationStateType.STARTING,
|
||||
stateEnteredTime=datetime.utcnow(),
|
||||
startTime=datetime.utcnow(),
|
||||
vnfInstanceId=inst.id,
|
||||
operation=fields.LcmOperationType.INSTANTIATE,
|
||||
isAutomaticInvocation=False,
|
||||
isCancelPending=False,
|
||||
operationParams=req)
|
||||
|
||||
mocked_grant.return_value = objects.GrantV1()
|
||||
|
||||
# run instantiate_grant
|
||||
grant_req, _ = self.driver.grant(
|
||||
self.context, lcmocc, inst, self.vnfd_2)
|
||||
|
||||
# check grant_req is constructed according to intention
|
||||
grant_req = grant_req.to_dict()
|
||||
expected_fixed_items = {
|
||||
'vnfInstanceId': inst.id,
|
||||
'vnfLcmOpOccId': lcmocc.id,
|
||||
'vnfdId': CNF_SAMPLE_VNFD_ID,
|
||||
'flavourId': SAMPLE_FLAVOUR_ID,
|
||||
'operation': 'INSTANTIATE',
|
||||
'isAutomaticInvocation': False,
|
||||
'_links': self._grant_req_links(lcmocc.id, inst.id)
|
||||
}
|
||||
for key, value in expected_fixed_items.items():
|
||||
self.assertEqual(value, grant_req[key])
|
||||
|
||||
add_reses = grant_req['addResources']
|
||||
check_reses = {
|
||||
'COMPUTE': {'VDU1': [], 'VDU2': []}
|
||||
}
|
||||
expected_num = {
|
||||
'COMPUTE': {'VDU1': 2, 'VDU2': 1}
|
||||
}
|
||||
for res in add_reses:
|
||||
check_reses[res['type']][res['resourceTemplateId']].append(
|
||||
res['id'])
|
||||
|
||||
for key, value in check_reses.items():
|
||||
for name, ids in value.items():
|
||||
self.assertEqual(expected_num[key][name], len(ids))
|
||||
|
||||
@mock.patch.object(nfvo_client.NfvoClient, 'grant')
|
||||
def test_cnf_terminate_grant(self, mocked_grant):
|
||||
# prepare
|
||||
inst = objects.VnfInstanceV2(
|
||||
# required fields
|
||||
id=uuidutils.generate_uuid(),
|
||||
vnfdId=CNF_SAMPLE_VNFD_ID,
|
||||
vnfProvider='provider',
|
||||
vnfProductName='product name',
|
||||
vnfSoftwareVersion='software version',
|
||||
vnfdVersion='vnfd version',
|
||||
instantiationState='INSTANTIATED'
|
||||
)
|
||||
inst_info = objects.VnfInstanceV2_InstantiatedVnfInfo.from_dict(
|
||||
_inst_info_cnf_example)
|
||||
inst.instantiatedVnfInfo = inst_info
|
||||
req = objects.TerminateVnfRequest.from_dict(
|
||||
{"terminationType": "FORCEFUL"})
|
||||
lcmocc = objects.VnfLcmOpOccV2(
|
||||
# required fields
|
||||
id=uuidutils.generate_uuid(),
|
||||
operationState=fields.LcmOperationStateType.STARTING,
|
||||
stateEnteredTime=datetime.utcnow(),
|
||||
startTime=datetime.utcnow(),
|
||||
vnfInstanceId=inst.id,
|
||||
operation=fields.LcmOperationType.TERMINATE,
|
||||
isAutomaticInvocation=False,
|
||||
isCancelPending=False,
|
||||
operationParams=req)
|
||||
|
||||
mocked_grant.return_value = objects.GrantV1()
|
||||
|
||||
# run terminate_grant
|
||||
grant_req, _ = self.driver.grant(
|
||||
self.context, lcmocc, inst, self.vnfd_2)
|
||||
|
||||
# check grant_req is constructed according to intention
|
||||
grant_req = grant_req.to_dict()
|
||||
expected_fixed_items = {
|
||||
'vnfInstanceId': inst.id,
|
||||
'vnfLcmOpOccId': lcmocc.id,
|
||||
'vnfdId': CNF_SAMPLE_VNFD_ID,
|
||||
'operation': 'TERMINATE',
|
||||
'isAutomaticInvocation': False,
|
||||
'_links': self._grant_req_links(lcmocc.id, inst.id)
|
||||
}
|
||||
for key, value in expected_fixed_items.items():
|
||||
self.assertEqual(value, grant_req[key])
|
||||
|
||||
rm_reses = grant_req['removeResources']
|
||||
check_reses = {
|
||||
'COMPUTE': {'VDU1': [], 'VDU2': []}
|
||||
}
|
||||
expected_res_ids = {
|
||||
'COMPUTE': {
|
||||
'VDU1': ['vdu1-5588797866-fs6vb', 'vdu1-5588797866-v8sl2'],
|
||||
'VDU2': ['vdu2']
|
||||
}
|
||||
}
|
||||
for res in rm_reses:
|
||||
check_reses[res['type']][res['resourceTemplateId']].append(
|
||||
res['resource']['resourceId'])
|
||||
|
||||
for key, value in check_reses.items():
|
||||
for name, ids in value.items():
|
||||
self.assertEqual(expected_res_ids[key][name], ids)
|
||||
|
||||
@mock.patch.object(kubernetes.Kubernetes, 'change_vnfpkg')
|
||||
@mock.patch.object(nfvo_client.NfvoClient, 'get_vnfd')
|
||||
def test_cnf_change_vnfpkg(self, mock_vnfd, mock_change_vnfpkg):
|
||||
# prepare
|
||||
req_inst = objects.InstantiateVnfRequest.from_dict(
|
||||
_inst_cnf_req_example)
|
||||
inst = objects.VnfInstanceV2(
|
||||
# required fields
|
||||
id=uuidutils.generate_uuid(),
|
||||
vnfdId=CNF_SAMPLE_VNFD_ID,
|
||||
vnfProvider='provider',
|
||||
vnfProductName='product name',
|
||||
vnfSoftwareVersion='software version',
|
||||
vnfdVersion='vnfd version',
|
||||
instantiationState='INSTANTIATED',
|
||||
vimConnectionInfo=req_inst.vimConnectionInfo,
|
||||
metadata={'lcm-kubernetes-def-files': [
|
||||
'Files/kubernetes/deployment.yaml']}
|
||||
)
|
||||
inst_info = objects.VnfInstanceV2_InstantiatedVnfInfo.from_dict(
|
||||
_inst_info_cnf_example)
|
||||
inst.instantiatedVnfInfo = inst_info
|
||||
|
||||
req = objects.ChangeCurrentVnfPkgRequest.from_dict(
|
||||
_change_cnf_vnfpkg_example)
|
||||
grant_req = objects.GrantRequestV1(
|
||||
operation=fields.LcmOperationType.CHANGE_VNFPKG
|
||||
)
|
||||
grant = objects.GrantV1()
|
||||
lcmocc = objects.VnfLcmOpOccV2(
|
||||
# required fields
|
||||
id=uuidutils.generate_uuid(),
|
||||
operationState=fields.LcmOperationStateType.STARTING,
|
||||
stateEnteredTime=datetime.utcnow(),
|
||||
startTime=datetime.utcnow(),
|
||||
vnfInstanceId=inst.id,
|
||||
operation=fields.LcmOperationType.CHANGE_VNFPKG,
|
||||
isAutomaticInvocation=False,
|
||||
isCancelPending=False,
|
||||
operationParams=req)
|
||||
mock_vnfd.return_value = self.vnfd_2
|
||||
self.driver.change_vnfpkg_process(
|
||||
self.context, lcmocc, inst, grant_req, grant, self.vnfd_3)
|
||||
|
@ -0,0 +1,546 @@
|
||||
# Copyright (C) 2022 FUJITSU
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from kubernetes import client
|
||||
|
||||
|
||||
def fake_namespace():
|
||||
return client.V1Namespace(
|
||||
api_version='v1',
|
||||
kind='Namespace',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='curry'
|
||||
),
|
||||
status=client.V1NamespaceStatus(
|
||||
phase='Active'
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_deployment(ready_replicas=0):
|
||||
return client.V1Deployment(
|
||||
api_version='apps/v1',
|
||||
kind='Deployment',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='vdu1',
|
||||
namespace='curry'
|
||||
),
|
||||
status=client.V1DeploymentStatus(
|
||||
replicas=2,
|
||||
ready_replicas=ready_replicas
|
||||
),
|
||||
spec=client.V1DeploymentSpec(
|
||||
replicas=2,
|
||||
selector=client.V1LabelSelector(
|
||||
match_labels={'app': 'webserver'}
|
||||
),
|
||||
template=client.V1PodTemplateSpec(
|
||||
metadata=client.V1ObjectMeta(
|
||||
labels={'app': 'webserver'}
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_pods(name1='vdu1-5588797866-fs6vb',
|
||||
name2='vdu1-5588797866-v8sl2',
|
||||
failed_pod=False):
|
||||
common_pods = client.V1PodList(
|
||||
items=[client.V1Pod(
|
||||
metadata=client.V1ObjectMeta(
|
||||
name=name1
|
||||
),
|
||||
status=client.V1PodStatus(
|
||||
phase="Running"
|
||||
)
|
||||
), client.V1Pod(
|
||||
metadata=client.V1ObjectMeta(
|
||||
name=name2
|
||||
),
|
||||
status=client.V1PodStatus(
|
||||
phase="Running"
|
||||
)
|
||||
)]
|
||||
)
|
||||
if failed_pod:
|
||||
common_pods.items.append(client.V1Pod(
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='vdu1-5588797866-v9644'
|
||||
),
|
||||
status=client.V1PodStatus(
|
||||
phase="Unknown"
|
||||
)
|
||||
))
|
||||
return common_pods
|
||||
|
||||
|
||||
def fake_pod_vdu2(name='vdu2-v8sl2'):
|
||||
return client.V1PodList(
|
||||
items=[client.V1Pod(
|
||||
metadata=client.V1ObjectMeta(
|
||||
name=name
|
||||
),
|
||||
status=client.V1PodStatus(
|
||||
phase="Running"
|
||||
)
|
||||
)]
|
||||
)
|
||||
|
||||
|
||||
def fake_none():
|
||||
return client.exceptions.ApiException
|
||||
|
||||
|
||||
def fake_sa():
|
||||
return client.V1ServiceAccount(
|
||||
api_version='v1',
|
||||
kind='ServiceAccount',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='curry-cluster-sa',
|
||||
namespace='default'
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_cluster_role():
|
||||
return client.V1ClusterRole(
|
||||
api_version='rbac.authorization.k8s.io/v1',
|
||||
kind='ClusterRole',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='curry-cluster-role'
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_cluster_role_binding():
|
||||
return client.V1ClusterRoleBinding(
|
||||
api_version='rbac.authorization.k8s.io/v1',
|
||||
kind='ClusterRoleBinding',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='curry-cluster-rolebinding'
|
||||
),
|
||||
role_ref='test'
|
||||
)
|
||||
|
||||
|
||||
def fake_role():
|
||||
return client.V1Role(
|
||||
api_version='rbac.authorization.k8s.io/v1',
|
||||
kind='Role',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='curry-role'
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_role_binding():
|
||||
return client.V1RoleBinding(
|
||||
api_version='rbac.authorization.k8s.io/v1',
|
||||
kind='RoleBinding',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='curry--rolebinding'
|
||||
),
|
||||
role_ref='test'
|
||||
)
|
||||
|
||||
|
||||
def fake_config_map():
|
||||
return client.V1ConfigMap(
|
||||
api_version='v1',
|
||||
kind='ConfigMap',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='curry-test001',
|
||||
namespace='default'
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_cr():
|
||||
return client.V1ControllerRevision(
|
||||
api_version='apps/v1',
|
||||
kind='ControllerRevision',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='curry-test001',
|
||||
namespace='default'
|
||||
),
|
||||
revision='test'
|
||||
)
|
||||
|
||||
|
||||
def fake_daemon_set(number_ready=0):
|
||||
return client.V1DaemonSet(
|
||||
api_version='apps/v1',
|
||||
kind='DaemonSet',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='curry-test001',
|
||||
namespace='default'
|
||||
),
|
||||
status=client.V1DaemonSetStatus(
|
||||
number_ready=number_ready,
|
||||
desired_number_scheduled=1,
|
||||
current_number_scheduled=1,
|
||||
number_misscheduled=0,
|
||||
),
|
||||
spec=client.V1DaemonSetSpec(
|
||||
selector=client.V1LabelSelector(
|
||||
match_labels={'app': 'webserver'}
|
||||
),
|
||||
template=client.V1PodTemplateSpec(
|
||||
metadata=client.V1ObjectMeta(
|
||||
labels={'app': 'webserver'}
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_hpa():
|
||||
return client.V1HorizontalPodAutoscaler(
|
||||
api_version='autoscaling/v1',
|
||||
kind='HorizontalPodAutoscaler',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='curry-hpa-vdu001',
|
||||
namespace='default'
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_job(succeeded=1):
|
||||
return client.V1Job(
|
||||
api_version='batch/v1',
|
||||
kind='Job',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='curry-test001',
|
||||
namespace='default'
|
||||
),
|
||||
spec=client.V1JobSpec(
|
||||
completions=5,
|
||||
template=client.V1PodTemplateSpec(
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='curry-test001',
|
||||
namespace='default'
|
||||
),
|
||||
spec=client.V1PodSpec(
|
||||
hostname='job',
|
||||
containers=['image']
|
||||
)
|
||||
)
|
||||
),
|
||||
status=client.V1JobStatus(
|
||||
succeeded=succeeded,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_lr():
|
||||
return client.V1LimitRange(
|
||||
api_version='v1',
|
||||
kind='LimitRange',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='limits',
|
||||
namespace='default'
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_lease():
|
||||
return client.V1Lease(
|
||||
api_version='coordination.k8s.io/v1',
|
||||
kind='Lease',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='curry-lease',
|
||||
namespace='default'
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_np():
|
||||
return client.V1NetworkPolicy(
|
||||
api_version='networking.k8s.io/v1',
|
||||
kind='NetworkPolicy',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='all-deny',
|
||||
namespace='default'
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_pc():
|
||||
return client.V1PriorityClass(
|
||||
api_version='scheduling.k8s.io/v1',
|
||||
kind='PriorityClass',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='high-priority'
|
||||
),
|
||||
value=1000000
|
||||
)
|
||||
|
||||
|
||||
def fake_persistent_volume(
|
||||
name='curry-sc-pv', phase='UnAvailable'):
|
||||
return client.V1PersistentVolume(
|
||||
api_version='v1',
|
||||
kind='PersistentVolume',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name=name
|
||||
),
|
||||
status=client.V1PersistentVolumeStatus(
|
||||
phase=phase
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_pod(phase='Pending'):
|
||||
return client.V1Pod(
|
||||
api_version='v1',
|
||||
kind='Pod',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='vdu2',
|
||||
namespace='default'
|
||||
),
|
||||
status=client.V1PodStatus(
|
||||
phase=phase,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_pt():
|
||||
return client.V1PodTemplate(
|
||||
api_version='v1',
|
||||
kind='PodTemplate',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='curry-test001',
|
||||
namespace='default'
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_rs(ready_replicas=0):
|
||||
return client.V1ReplicaSet(
|
||||
api_version='apps/v1',
|
||||
kind='ReplicaSet',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='curry-test001',
|
||||
namespace='default'
|
||||
),
|
||||
status=client.V1ReplicaSetStatus(
|
||||
replicas=2,
|
||||
ready_replicas=ready_replicas
|
||||
),
|
||||
spec=client.V1ReplicaSetSpec(
|
||||
replicas=2,
|
||||
selector=client.V1LabelSelector(
|
||||
match_labels={'app': 'webserver'}
|
||||
),
|
||||
template=client.V1PodTemplateSpec(
|
||||
metadata=client.V1ObjectMeta(
|
||||
labels={'app': 'webserver'}
|
||||
)
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_service():
|
||||
return client.V1Service(
|
||||
api_version='v1',
|
||||
kind='Service',
|
||||
metadata=client.V1ObjectMeta(
|
||||
labels={
|
||||
'app': 'webserver',
|
||||
'vdu_name': 'curry-svc-vdu001'
|
||||
},
|
||||
name='curry-svc-vdu001',
|
||||
namespace='default'
|
||||
),
|
||||
spec=client.V1ServiceSpec(
|
||||
cluster_ip=''
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_sec():
|
||||
return client.V1Secret(
|
||||
api_version='v1',
|
||||
kind='Secret',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='curry-sc-multiple',
|
||||
namespace='default'
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_rq():
|
||||
return client.V1ResourceQuota(
|
||||
api_version='v1',
|
||||
kind='ResourceQuota',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='curry-rq',
|
||||
namespace='default'
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_stateful_set(ready_replicas=0):
|
||||
return client.V1StatefulSet(
|
||||
api_version='apps/v1',
|
||||
kind='StatefulSet',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='vdu1',
|
||||
namespace='default'
|
||||
),
|
||||
spec=client.V1StatefulSetSpec(
|
||||
replicas=1,
|
||||
volume_claim_templates=[
|
||||
client.V1PersistentVolumeClaim(
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='www'
|
||||
)
|
||||
)
|
||||
],
|
||||
selector=client.V1LabelSelector(
|
||||
match_labels={'app': 'nginx'}
|
||||
),
|
||||
template=client.V1PodTemplateSpec(
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='curry-test001',
|
||||
namespace='curryns'
|
||||
)
|
||||
),
|
||||
service_name='nginx'
|
||||
),
|
||||
status=client.V1StatefulSetStatus(
|
||||
replicas=2,
|
||||
ready_replicas=ready_replicas
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
def fake_pvc(name):
|
||||
return client.V1PersistentVolumeClaim(
|
||||
api_version='v1',
|
||||
kind='PersistentVolumeClaim',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name=name
|
||||
),
|
||||
status=client.V1PersistentVolumeClaimStatus(
|
||||
phase='Bound'
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_pvcs():
|
||||
return client.V1PersistentVolumeClaimList(
|
||||
items=[client.V1PersistentVolumeClaim(
|
||||
api_version='v1',
|
||||
kind='PersistentVolumeClaim',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='www-vdu1-0'
|
||||
),
|
||||
status=client.V1PersistentVolumeClaimStatus(
|
||||
phase='Bound'
|
||||
)
|
||||
), client.V1PersistentVolumeClaim(
|
||||
api_version='v1',
|
||||
kind='PersistentVolumeClaim',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='www-vdu1-1'
|
||||
),
|
||||
status=client.V1PersistentVolumeClaimStatus(
|
||||
phase='Bound'
|
||||
)
|
||||
), client.V1PersistentVolumeClaim(
|
||||
api_version='v1',
|
||||
kind='PersistentVolumeClaim',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='test'
|
||||
),
|
||||
status=client.V1PersistentVolumeClaimStatus(
|
||||
phase='Bound'
|
||||
)
|
||||
),
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def fake_sc(name='curry-sc-local'):
|
||||
return client.V1StorageClass(
|
||||
api_version='v1',
|
||||
kind='StorageClass',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name=name
|
||||
),
|
||||
provisioner='kubernetes.io/no-provisioner'
|
||||
)
|
||||
|
||||
|
||||
def fake_api_service(type='Available'):
|
||||
return client.V1APIService(
|
||||
api_version='apiregistration.k8s.io/v1',
|
||||
kind='APIService',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='curry-test001',
|
||||
namespace='curryns'
|
||||
),
|
||||
status=client.V1APIServiceStatus(
|
||||
conditions=[
|
||||
client.V1APIServiceCondition(
|
||||
type=type,
|
||||
status='True'
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_volume_attachment(attached='True'):
|
||||
return client.V1VolumeAttachment(
|
||||
api_version='storage.k8s.io/v1',
|
||||
kind='VolumeAttachment',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='curry-test001',
|
||||
namespace='curryns'
|
||||
),
|
||||
spec=client.V1VolumeAttachmentSpec(
|
||||
attacher='nginx',
|
||||
node_name='nginx',
|
||||
source=client.V1VolumeAttachmentSource(
|
||||
persistent_volume_name='curry-sc-pvc'
|
||||
)
|
||||
),
|
||||
status=client.V1VolumeAttachmentStatus(
|
||||
attached=attached,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def fake_node(type='Ready', status='True'):
|
||||
return client.V1Node(
|
||||
api_version='v1',
|
||||
kind='Node',
|
||||
metadata=client.V1ObjectMeta(
|
||||
name='curry-node-test',
|
||||
labels={'name': 'curry-node-test'}
|
||||
),
|
||||
status=client.V1NodeStatus(
|
||||
conditions=[
|
||||
client.V1NodeCondition(
|
||||
status=status,
|
||||
type=type
|
||||
)
|
||||
]
|
||||
)
|
||||
)
|
File diff suppressed because it is too large
Load Diff
@ -0,0 +1,131 @@
|
||||
tosca_definitions_version: tosca_simple_yaml_1_2
|
||||
|
||||
description: Simple deployment flavour for Sample VNF
|
||||
|
||||
imports:
|
||||
- etsi_nfv_sol001_common_types.yaml
|
||||
- etsi_nfv_sol001_vnfd_types.yaml
|
||||
- sample_cnf_types.yaml
|
||||
|
||||
topology_template:
|
||||
inputs:
|
||||
descriptor_id:
|
||||
type: string
|
||||
descriptor_version:
|
||||
type: string
|
||||
provider:
|
||||
type: string
|
||||
product_name:
|
||||
type: string
|
||||
software_version:
|
||||
type: string
|
||||
vnfm_info:
|
||||
type: list
|
||||
entry_schema:
|
||||
type: string
|
||||
flavour_id:
|
||||
type: string
|
||||
flavour_description:
|
||||
type: string
|
||||
|
||||
substitution_mappings:
|
||||
node_type: company.provider.VNF
|
||||
properties:
|
||||
flavour_id: simple
|
||||
requirements:
|
||||
virtual_link_external: []
|
||||
|
||||
node_templates:
|
||||
VNF:
|
||||
type: company.provider.VNF
|
||||
properties:
|
||||
flavour_description: A simple flavour
|
||||
|
||||
VDU1:
|
||||
type: tosca.nodes.nfv.Vdu.Compute
|
||||
properties:
|
||||
name: vdu1
|
||||
description: VDU1 compute node
|
||||
vdu_profile:
|
||||
min_number_of_instances: 1
|
||||
max_number_of_instances: 3
|
||||
|
||||
VDU2:
|
||||
type: tosca.nodes.nfv.Vdu.Compute
|
||||
properties:
|
||||
name: vdu2
|
||||
description: VDU1 compute node
|
||||
vdu_profile:
|
||||
min_number_of_instances: 1
|
||||
max_number_of_instances: 1
|
||||
|
||||
policies:
|
||||
- scaling_aspects:
|
||||
type: tosca.policies.nfv.ScalingAspects
|
||||
properties:
|
||||
aspects:
|
||||
vdu1_aspect:
|
||||
name: vdu1_aspect
|
||||
description: vdu1 scaling aspect
|
||||
max_scale_level: 2
|
||||
step_deltas:
|
||||
- delta_1
|
||||
|
||||
- vdu1_initial_delta:
|
||||
type: tosca.policies.nfv.VduInitialDelta
|
||||
properties:
|
||||
initial_delta:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU1 ]
|
||||
|
||||
- vdu1_scaling_aspect_deltas:
|
||||
type: tosca.policies.nfv.VduScalingAspectDeltas
|
||||
properties:
|
||||
aspect: vdu1_aspect
|
||||
deltas:
|
||||
delta_1:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU1 ]
|
||||
|
||||
- vdu2_initial_delta:
|
||||
type: tosca.policies.nfv.VduInitialDelta
|
||||
properties:
|
||||
initial_delta:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU2 ]
|
||||
|
||||
- instantiation_levels:
|
||||
type: tosca.policies.nfv.InstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
description: Smallest size
|
||||
scale_info:
|
||||
vdu1_aspect:
|
||||
scale_level: 0
|
||||
instantiation_level_2:
|
||||
description: Largest size
|
||||
scale_info:
|
||||
vdu1_aspect:
|
||||
scale_level: 2
|
||||
default_level: instantiation_level_1
|
||||
|
||||
- VDU1_instantiation_levels:
|
||||
type: tosca.policies.nfv.VduInstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
number_of_instances: 2
|
||||
instantiation_level_2:
|
||||
number_of_instances: 3
|
||||
targets: [ VDU1 ]
|
||||
|
||||
- VDU1_instantiation_levels:
|
||||
type: tosca.policies.nfv.VduInstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
number_of_instances: 1
|
||||
instantiation_level_2:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU2 ]
|
@ -0,0 +1,24 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: vdu1
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webserver
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webserver
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
strategy:
|
||||
type: RollingUpdate
|
@ -0,0 +1,24 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: vdu1
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webserver
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webserver
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
strategy:
|
||||
type: RollingUpdate
|
@ -0,0 +1,67 @@
|
||||
# Copyright (C) 2022 Fujitsu
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import os
|
||||
import pickle
|
||||
import sys
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CMD_TIMEOUT = 30
|
||||
SERVER_WAIT_COMPLETE_TIME = 60
|
||||
SSH_CONNECT_RETRY_COUNT = 4
|
||||
|
||||
|
||||
class SampleNewCoordinateVNFScript(object):
|
||||
|
||||
def __init__(self, req, inst, grant_req, grant, csar_dir, k8s_info):
|
||||
self.req = req
|
||||
self.inst = inst
|
||||
self.grant_req = grant_req
|
||||
self.grant = grant
|
||||
self.csar_dir = csar_dir
|
||||
self.k8s_info = k8s_info
|
||||
|
||||
def coordinate_vnf(self):
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
operation = "coordinate_vnf"
|
||||
script_dict = pickle.load(sys.stdin.buffer)
|
||||
req = script_dict['request']
|
||||
inst = script_dict['vnf_instance']
|
||||
grant_req = script_dict['grant_request']
|
||||
grant = script_dict['grant_response']
|
||||
csar_dir = script_dict['tmp_csar_dir']
|
||||
k8s_info = script_dict['k8s_info']
|
||||
script = SampleNewCoordinateVNFScript(
|
||||
req, inst, grant_req, grant,
|
||||
csar_dir, k8s_info)
|
||||
try:
|
||||
getattr(script, operation)()
|
||||
except Exception:
|
||||
raise Exception
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
os._exit(0)
|
||||
except Exception as ex:
|
||||
sys.stderr.write(str(ex))
|
||||
sys.stderr.flush()
|
||||
os._exit(1)
|
@ -0,0 +1,14 @@
|
||||
TOSCA-Meta-File-Version: 1.0
|
||||
Created-by: dummy_user
|
||||
CSAR-Version: 1.1
|
||||
Entry-Definitions: Definitions/sample_cnf_top.vnfd.yaml
|
||||
|
||||
Name: Files/kubernetes/deployment.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: c9a9b54cb0786ba786f1b7f7f2d0b2e18ae6b1b5b55e95f3855e2f61f8cfdf61
|
||||
|
||||
Name: Files/new_kubernetes/new_deployment.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: c9a9b54cb0786ba786f1b7f7f2d0b2e18ae6b1b5b55e95f3855e2f61f8cfdf61
|
@ -0,0 +1,131 @@
|
||||
tosca_definitions_version: tosca_simple_yaml_1_2
|
||||
|
||||
description: Simple deployment flavour for Sample VNF
|
||||
|
||||
imports:
|
||||
- etsi_nfv_sol001_common_types.yaml
|
||||
- etsi_nfv_sol001_vnfd_types.yaml
|
||||
- sample_cnf_types.yaml
|
||||
|
||||
topology_template:
|
||||
inputs:
|
||||
descriptor_id:
|
||||
type: string
|
||||
descriptor_version:
|
||||
type: string
|
||||
provider:
|
||||
type: string
|
||||
product_name:
|
||||
type: string
|
||||
software_version:
|
||||
type: string
|
||||
vnfm_info:
|
||||
type: list
|
||||
entry_schema:
|
||||
type: string
|
||||
flavour_id:
|
||||
type: string
|
||||
flavour_description:
|
||||
type: string
|
||||
|
||||
substitution_mappings:
|
||||
node_type: company.provider.VNF
|
||||
properties:
|
||||
flavour_id: simple
|
||||
requirements:
|
||||
virtual_link_external: []
|
||||
|
||||
node_templates:
|
||||
VNF:
|
||||
type: company.provider.VNF
|
||||
properties:
|
||||
flavour_description: A simple flavour
|
||||
|
||||
VDU1:
|
||||
type: tosca.nodes.nfv.Vdu.Compute
|
||||
properties:
|
||||
name: vdu1
|
||||
description: VDU1 compute node
|
||||
vdu_profile:
|
||||
min_number_of_instances: 1
|
||||
max_number_of_instances: 3
|
||||
|
||||
VDU2:
|
||||
type: tosca.nodes.nfv.Vdu.Compute
|
||||
properties:
|
||||
name: vdu2
|
||||
description: VDU1 compute node
|
||||
vdu_profile:
|
||||
min_number_of_instances: 1
|
||||
max_number_of_instances: 1
|
||||
|
||||
policies:
|
||||
- scaling_aspects:
|
||||
type: tosca.policies.nfv.ScalingAspects
|
||||
properties:
|
||||
aspects:
|
||||
vdu1_aspect:
|
||||
name: vdu1_aspect
|
||||
description: vdu1 scaling aspect
|
||||
max_scale_level: 2
|
||||
step_deltas:
|
||||
- delta_1
|
||||
|
||||
- vdu1_initial_delta:
|
||||
type: tosca.policies.nfv.VduInitialDelta
|
||||
properties:
|
||||
initial_delta:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU1 ]
|
||||
|
||||
- vdu1_scaling_aspect_deltas:
|
||||
type: tosca.policies.nfv.VduScalingAspectDeltas
|
||||
properties:
|
||||
aspect: vdu1_aspect
|
||||
deltas:
|
||||
delta_1:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU1 ]
|
||||
|
||||
- vdu2_initial_delta:
|
||||
type: tosca.policies.nfv.VduInitialDelta
|
||||
properties:
|
||||
initial_delta:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU2 ]
|
||||
|
||||
- instantiation_levels:
|
||||
type: tosca.policies.nfv.InstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
description: Smallest size
|
||||
scale_info:
|
||||
vdu1_aspect:
|
||||
scale_level: 0
|
||||
instantiation_level_2:
|
||||
description: Largest size
|
||||
scale_info:
|
||||
vdu1_aspect:
|
||||
scale_level: 2
|
||||
default_level: instantiation_level_1
|
||||
|
||||
- VDU1_instantiation_levels:
|
||||
type: tosca.policies.nfv.VduInstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
number_of_instances: 2
|
||||
instantiation_level_2:
|
||||
number_of_instances: 3
|
||||
targets: [ VDU1 ]
|
||||
|
||||
- VDU1_instantiation_levels:
|
||||
type: tosca.policies.nfv.VduInstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
number_of_instances: 1
|
||||
instantiation_level_2:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU2 ]
|
@ -0,0 +1,11 @@
|
||||
apiVersion: apiregistration.k8s.io/v1
|
||||
kind: APIService
|
||||
metadata:
|
||||
name: v1beta1.currytest.k8s.io
|
||||
spec:
|
||||
group: currytest.k8s.io
|
||||
groupPriorityMinimum: 17000
|
||||
version: v1beta1
|
||||
versionPriority: 5
|
||||
service:
|
||||
name: test
|
@ -0,0 +1,10 @@
|
||||
apiVersion: v1
|
||||
kind: Binding
|
||||
metadata:
|
||||
name: curry-endpoint-test001
|
||||
namespace: default
|
||||
target:
|
||||
apiVersion: v1
|
||||
kind: Node
|
||||
namespace: default
|
||||
name: k8-worker2
|
@ -0,0 +1,29 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: curry-cluster-role
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: curry-cluster-sa
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: curry-cluster-rolebinding
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: curry-cluster-role
|
||||
subjects:
|
||||
- apiGroup: ""
|
||||
kind: ServiceAccount
|
||||
name: curry-cluster-sa
|
||||
namespace: default
|
@ -0,0 +1,8 @@
|
||||
apiVersion: v1
|
||||
data:
|
||||
param0: key1
|
||||
param1: key2
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: curry-test001
|
||||
namespace: default
|
@ -0,0 +1,8 @@
|
||||
apiVersion: apps/v1
|
||||
kind: ControllerRevision
|
||||
data:
|
||||
raw: test
|
||||
metadata:
|
||||
name: curry-test001
|
||||
namespace: default
|
||||
revision: 1
|
@ -0,0 +1,19 @@
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
name: vdu2
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
containers:
|
||||
- image: nginx
|
||||
name: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
@ -0,0 +1,24 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: vdu1
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webserver
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webserver
|
||||
spec:
|
||||
containers:
|
||||
- name: nginx
|
||||
image: nginx:alpine
|
||||
imagePullPolicy: IfNotPresent
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
strategy:
|
||||
type: RollingUpdate
|
@ -0,0 +1,13 @@
|
||||
apiVersion: autoscaling/v1
|
||||
kind: HorizontalPodAutoscaler
|
||||
metadata:
|
||||
name: curry-hpa-vdu001
|
||||
namespace: default
|
||||
spec:
|
||||
maxReplicas: 3
|
||||
minReplicas: 1
|
||||
scaleTargetRef:
|
||||
apiVersion: extensions/v1beta1
|
||||
kind: Deployment
|
||||
name: curry-svc-vdu001
|
||||
targetCPUUtilizationPercentage: 40
|
@ -0,0 +1,25 @@
|
||||
apiVersion: batch/v1
|
||||
kind: Job
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: curryjob
|
||||
name: curryjob
|
||||
namespace: default
|
||||
spec:
|
||||
completions: 5
|
||||
parallelism: 2
|
||||
template:
|
||||
metadata:
|
||||
creationTimestamp: null
|
||||
labels:
|
||||
run: curryjob
|
||||
spec:
|
||||
containers:
|
||||
- command: ["sh", "-c"]
|
||||
args:
|
||||
- echo CURRY
|
||||
image: celebdor/kuryr-demo
|
||||
name: curryjob
|
||||
restartPolicy: OnFailure
|
||||
status: {}
|
@ -0,0 +1,10 @@
|
||||
apiVersion: "v1"
|
||||
kind: "LimitRange"
|
||||
metadata:
|
||||
name: "limits"
|
||||
namespace: default
|
||||
spec:
|
||||
limits:
|
||||
- type: "Container"
|
||||
defaultRequest:
|
||||
cpu: "100m"
|
@ -0,0 +1,11 @@
|
||||
apiVersion: authorization.k8s.io/v1
|
||||
kind: LocalSubjectAccessReview
|
||||
metadata:
|
||||
namespace: default
|
||||
spec:
|
||||
user: curry-sa
|
||||
resourceAttributes:
|
||||
group: apps
|
||||
resource: deployments
|
||||
verb: create
|
||||
namespace: default
|
@ -0,0 +1,8 @@
|
||||
apiVersion: coordination.k8s.io/v1
|
||||
kind: Lease
|
||||
metadata:
|
||||
name: curry-lease
|
||||
namespace: default
|
||||
spec:
|
||||
holderIdentity: master
|
||||
leaseDurationSeconds: 40
|
@ -0,0 +1,17 @@
|
||||
apiVersion: networking.k8s.io/v1
|
||||
kind: NetworkPolicy
|
||||
metadata:
|
||||
name: all-deny
|
||||
namespace: default
|
||||
spec:
|
||||
podSelector: {}
|
||||
policyTypes:
|
||||
- Egress
|
||||
egress:
|
||||
- ports:
|
||||
- port: 53
|
||||
protocol: UDP
|
||||
- port: 53
|
||||
protocol: TCP
|
||||
to:
|
||||
- namespaceSelector: {}
|
@ -0,0 +1,7 @@
|
||||
apiVersion: scheduling.k8s.io/v1
|
||||
kind: PriorityClass
|
||||
metadata:
|
||||
name: high-priority
|
||||
value: 1000000
|
||||
globalDefault: false
|
||||
description: "Priority Class Test"
|
@ -0,0 +1,4 @@
|
||||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
name: curry
|
@ -0,0 +1,41 @@
|
||||
apiVersion: v1
|
||||
kind: Node
|
||||
metadata:
|
||||
name: curry-node-test
|
||||
labels:
|
||||
name: curry-node-test
|
||||
spec:
|
||||
configSource:
|
||||
configMap:
|
||||
name: CONFIG_MAP_NAME
|
||||
namespace: kube-system
|
||||
kubeletConfigKey: kubelet
|
||||
taints:
|
||||
- effect: 'test'
|
||||
key: 'test'
|
||||
status:
|
||||
addresses:
|
||||
- address: '1.1.1.1'
|
||||
type: 'test'
|
||||
conditions:
|
||||
- status: True
|
||||
type: Node
|
||||
daemonEndpoints:
|
||||
kubeletEndpoint:
|
||||
port: 8080
|
||||
images:
|
||||
- names: 'test'
|
||||
nodeInfo:
|
||||
architecture: 'test'
|
||||
bootId: 'test'
|
||||
containerRuntimeVersion: 'test'
|
||||
kernelVersion: 'test'
|
||||
kubeProxyVersion: 'test'
|
||||
kubeletVersion: 'test'
|
||||
machineId: 'test'
|
||||
operatingSystem: 'test'
|
||||
osImage: 'test'
|
||||
systemUuid: 'test'
|
||||
volumesAttached:
|
||||
- device_path: 'test'
|
||||
name: 'test'
|
@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: curry-sc-pv
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
capacity:
|
||||
storage: 1Gi
|
||||
hostPath:
|
||||
path: /data/curry-sc-test
|
||||
type: DirectoryOrCreate
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
storageClassName: curry-sc-local
|
@ -0,0 +1,14 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolume
|
||||
metadata:
|
||||
name: curry-sc-pv-0
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
capacity:
|
||||
storage: 1Gi
|
||||
hostPath:
|
||||
path: /data/curry-sc-test-1
|
||||
type: DirectoryOrCreate
|
||||
persistentVolumeReclaimPolicy: Delete
|
||||
storageClassName: curry-sc-local
|
@ -0,0 +1,43 @@
|
||||
apiVersion: v1
|
||||
kind: PodTemplate
|
||||
metadata:
|
||||
name: curry-test001
|
||||
namespace: default
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webserver
|
||||
scaling_name: SP1
|
||||
spec:
|
||||
containers:
|
||||
- env:
|
||||
- name: param0
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: param0
|
||||
name: curry-test001
|
||||
- name: param1
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: param1
|
||||
name: curry-test001
|
||||
image: celebdor/kuryr-demo
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: web-server
|
||||
ports:
|
||||
- containerPort: 8080
|
||||
resources:
|
||||
limits:
|
||||
cpu: 500m
|
||||
memory: 512M
|
||||
requests:
|
||||
cpu: 500m
|
||||
memory: 512M
|
||||
volumeMounts:
|
||||
- name: curry-claim-volume
|
||||
mountPath: /data
|
||||
volumes:
|
||||
- name: curry-claim-volume
|
||||
persistentVolumeClaim:
|
||||
claimName: curry-pv-claim
|
||||
terminationGracePeriodSeconds: 0
|
@ -0,0 +1,12 @@
|
||||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
namespace: default
|
||||
name: vdu2
|
||||
spec:
|
||||
containers:
|
||||
- image: celebdor/kuryr-demo
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: web-server
|
||||
ports:
|
||||
- containerPort: 8080
|
@ -0,0 +1,51 @@
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
labels:
|
||||
app: webserver
|
||||
vdu_name: curry-svc-vdu001
|
||||
name: curry-svc-vdu001-multiple
|
||||
namespace: default
|
||||
spec:
|
||||
ports:
|
||||
- name: "80"
|
||||
port: 80
|
||||
targetPort: 8080
|
||||
selector:
|
||||
app: webserver
|
||||
type: ClusterIP
|
||||
---
|
||||
apiVersion: v1
|
||||
data:
|
||||
param0: a2V5MQ==
|
||||
param1: a2V5Mg==
|
||||
kind: Secret
|
||||
metadata:
|
||||
name: curry-sc-multiple
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: ReplicaSet
|
||||
metadata:
|
||||
name: vdu1
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 2
|
||||
selector:
|
||||
matchLabels:
|
||||
app: webserver
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: webserver
|
||||
spec:
|
||||
containers:
|
||||
- image: nginx
|
||||
name: nginx
|
||||
env:
|
||||
- name: param0
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: param0
|
||||
name: curry-sc-multiple
|
@ -0,0 +1,11 @@
|
||||
apiVersion: v1
|
||||
kind: ResourceQuota
|
||||
metadata:
|
||||
name: curry-rq
|
||||
namespace: default
|
||||
spec:
|
||||
hard:
|
||||
cpu: "1000m"
|
||||
memory: 2Gi
|
||||
scopes:
|
||||
- NotBestEffort
|
@ -0,0 +1,31 @@
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: curry-role
|
||||
namespace: default
|
||||
rules:
|
||||
- apiGroups: [""]
|
||||
resources: ["pods"]
|
||||
verbs: ["get", "watch", "list"]
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: curry-sa
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: curry-rolebinding
|
||||
namespace: default
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: curry-role
|
||||
subjects:
|
||||
- apiGroup: ""
|
||||
kind: ServiceAccount
|
||||
name: curry-sa
|
||||
namespace: default
|
@ -0,0 +1,14 @@
|
||||
---
|
||||
apiVersion: authorization.k8s.io/v1
|
||||
kind: SelfSubjectAccessReview
|
||||
spec:
|
||||
resourceAttributes:
|
||||
group: apps
|
||||
resource: deployments
|
||||
verb: create
|
||||
namespace: default
|
||||
---
|
||||
apiVersion: authorization.k8s.io/v1
|
||||
kind: SelfSubjectRulesReview
|
||||
spec:
|
||||
namespace: default
|
@ -0,0 +1,36 @@
|
||||
apiVersion: apps/v1
|
||||
kind: StatefulSet
|
||||
metadata:
|
||||
name: vdu1
|
||||
namespace: default
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: nginx
|
||||
serviceName: "nginx"
|
||||
replicas: 2
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: nginx
|
||||
spec:
|
||||
terminationGracePeriodSeconds: 10
|
||||
containers:
|
||||
- name: nginx
|
||||
image: k8s.gcr.io/nginx-slim:0.8
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: web
|
||||
volumeMounts:
|
||||
- name: www
|
||||
mountPath: /usr/share/nginx/html
|
||||
volumeClaimTemplates:
|
||||
- metadata:
|
||||
name: www
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
storageClassName: "curry-sc-local"
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user