Merge "CNF v2 API enhance and refactor"
This commit is contained in:
commit
693dce5f5d
@ -0,0 +1,11 @@
|
||||
---
|
||||
features:
|
||||
- |
|
||||
Support CNF Heal/Scale/Rollback operation in v2 LCM API.
|
||||
This feature provides the following CNF operations in
|
||||
v2 LCM API based on ETSI NFV specifications and
|
||||
instantiatiationLevel parameter to determine
|
||||
the initial number of Pods.
|
||||
* Scale VNF task
|
||||
* Heal VNF task
|
||||
* Rollback operation task (instantiate and scale-out)
|
@ -338,3 +338,21 @@ class MalformedRequestBody(SolHttpError400):
|
||||
|
||||
class InvalidPagingMarker(SolHttpError400):
|
||||
message = _("Paging marker value %(marker)s is invalid.")
|
||||
|
||||
|
||||
class K8sOperationFailed(SolHttpError422):
|
||||
# title and detail are set in the code from kubernetes operation
|
||||
pass
|
||||
|
||||
|
||||
class K8sOperaitionTimeout(SolHttpError422):
|
||||
message = _("Kubernetes operation did not complete within"
|
||||
" the timeout period.")
|
||||
|
||||
|
||||
class K8sResourceNotFound(SolHttpError404):
|
||||
message = _("Kubernetes resource %(rsc_name)s is not found.")
|
||||
|
||||
|
||||
class K8sInvalidManifestFound(SolHttpError400):
|
||||
message = _("Invalid manifest found.")
|
||||
|
@ -16,12 +16,9 @@
|
||||
import os
|
||||
import pickle
|
||||
import subprocess
|
||||
from urllib.parse import urlparse
|
||||
import urllib.request as urllib2
|
||||
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import uuidutils
|
||||
import yaml
|
||||
|
||||
from tacker.sol_refactored.common import config
|
||||
from tacker.sol_refactored.common import exceptions as sol_ex
|
||||
@ -396,6 +393,17 @@ class VnfLcmDriverV2(object):
|
||||
|
||||
inst.vimConnectionInfo = vim_infos
|
||||
|
||||
# vimType dependent parameter check
|
||||
# NOTE: controller cannot check because vim_info is not identified
|
||||
# to here, although it is better to check in controller.
|
||||
if lcmocc.operationState == v2fields.LcmOperationStateType.STARTING:
|
||||
vim_info = inst_utils.select_vim_info(vim_infos)
|
||||
if (vim_info.vimType == "kubernetes" and
|
||||
not req.get('additionalParams', {}).get(
|
||||
'lcm-kubernetes-def-files')):
|
||||
raise sol_ex.SolValidationError(
|
||||
detail="'lcm-kubernetes-def-files' must be specified")
|
||||
|
||||
def instantiate_process(self, context, lcmocc, inst, grant_req,
|
||||
grant, vnfd):
|
||||
req = lcmocc.operationParams
|
||||
@ -406,7 +414,7 @@ class VnfLcmDriverV2(object):
|
||||
if vim_info.vimType == 'ETSINFV.OPENSTACK_KEYSTONE.V_3':
|
||||
driver = openstack.Openstack()
|
||||
driver.instantiate(req, inst, grant_req, grant, vnfd)
|
||||
elif vim_info.vimType == 'kubernetes': # k8s
|
||||
elif vim_info.vimType == 'kubernetes':
|
||||
driver = kubernetes.Kubernetes()
|
||||
driver.instantiate(req, inst, grant_req, grant, vnfd)
|
||||
else:
|
||||
@ -422,8 +430,11 @@ class VnfLcmDriverV2(object):
|
||||
if vim_info.vimType == 'ETSINFV.OPENSTACK_KEYSTONE.V_3':
|
||||
driver = openstack.Openstack()
|
||||
driver.instantiate_rollback(req, inst, grant_req, grant, vnfd)
|
||||
elif vim_info.vimType == 'kubernetes':
|
||||
driver = kubernetes.Kubernetes()
|
||||
driver.instantiate_rollback(req, inst, grant_req, grant, vnfd)
|
||||
else:
|
||||
# only support openstack at the moment
|
||||
# should not occur
|
||||
raise sol_ex.SolException(sol_detail='not support vim type')
|
||||
|
||||
def _make_res_def_for_remove_vnfcs(self, inst_info, inst_vnfcs,
|
||||
@ -561,7 +572,7 @@ class VnfLcmDriverV2(object):
|
||||
if vim_info.vimType == 'ETSINFV.OPENSTACK_KEYSTONE.V_3':
|
||||
driver = openstack.Openstack()
|
||||
driver.terminate(req, inst, grant_req, grant, vnfd)
|
||||
elif vim_info.vimType == 'kubernetes': # k8s
|
||||
elif vim_info.vimType == 'kubernetes':
|
||||
driver = kubernetes.Kubernetes()
|
||||
driver.terminate(req, inst, grant_req, grant, vnfd)
|
||||
else:
|
||||
@ -674,8 +685,11 @@ class VnfLcmDriverV2(object):
|
||||
if vim_info.vimType == 'ETSINFV.OPENSTACK_KEYSTONE.V_3':
|
||||
driver = openstack.Openstack()
|
||||
driver.scale(req, inst, grant_req, grant, vnfd)
|
||||
elif vim_info.vimType == 'kubernetes':
|
||||
driver = kubernetes.Kubernetes()
|
||||
driver.scale(req, inst, grant_req, grant, vnfd)
|
||||
else:
|
||||
# only support openstack at the moment
|
||||
# should not occur
|
||||
raise sol_ex.SolException(sol_detail='not support vim type')
|
||||
|
||||
def scale_rollback(self, context, lcmocc, inst, grant_req,
|
||||
@ -688,8 +702,11 @@ class VnfLcmDriverV2(object):
|
||||
if vim_info.vimType == 'ETSINFV.OPENSTACK_KEYSTONE.V_3':
|
||||
driver = openstack.Openstack()
|
||||
driver.scale_rollback(req, inst, grant_req, grant, vnfd)
|
||||
elif vim_info.vimType == 'kubernetes':
|
||||
driver = kubernetes.Kubernetes()
|
||||
driver.scale_rollback(req, inst, grant_req, grant, vnfd)
|
||||
else:
|
||||
# only support openstack at the moment
|
||||
# should not occur
|
||||
raise sol_ex.SolException(sol_detail='not support vim type')
|
||||
|
||||
def _modify_from_vnfd_prop(self, inst, vnfd_prop, attr):
|
||||
@ -867,8 +884,11 @@ class VnfLcmDriverV2(object):
|
||||
if vim_info.vimType == 'ETSINFV.OPENSTACK_KEYSTONE.V_3':
|
||||
driver = openstack.Openstack()
|
||||
driver.heal(req, inst, grant_req, grant, vnfd)
|
||||
elif vim_info.vimType == 'kubernetes':
|
||||
driver = kubernetes.Kubernetes()
|
||||
driver.heal(req, inst, grant_req, grant, vnfd)
|
||||
else:
|
||||
# only support openstack at the moment
|
||||
# should not occur
|
||||
raise sol_ex.SolException(sol_detail='not support vim type')
|
||||
|
||||
def change_ext_conn_grant(self, grant_req, req, inst, vnfd):
|
||||
@ -992,14 +1012,11 @@ class VnfLcmDriverV2(object):
|
||||
if not inst_info.obj_attr_is_set('vnfcResourceInfo'):
|
||||
return
|
||||
|
||||
if req.additionalParams.get('vdu_params'):
|
||||
vdu_ids = [vdu_param['vdu_id']
|
||||
for vdu_param in req.additionalParams['vdu_params']]
|
||||
inst_vnfcs = [inst_vnfc
|
||||
for inst_vnfc in inst_info.vnfcResourceInfo
|
||||
if inst_vnfc.vduId in vdu_ids]
|
||||
else:
|
||||
inst_vnfcs = inst_info.vnfcResourceInfo
|
||||
|
||||
add_reses = []
|
||||
rm_reses = []
|
||||
@ -1025,70 +1042,6 @@ class VnfLcmDriverV2(object):
|
||||
if rm_reses:
|
||||
grant_req.removeResources = rm_reses
|
||||
|
||||
def _pre_check_for_change_vnfpkg(self, context, req, inst, vnfd):
|
||||
def _get_file_content(file_path):
|
||||
if ((urlparse(file_path).scheme == 'file') or
|
||||
(bool(urlparse(file_path).scheme) and
|
||||
bool(urlparse(file_path).netloc))):
|
||||
with urllib2.urlopen(file_path) as file_object:
|
||||
file_content = file_object.read()
|
||||
else:
|
||||
with open(file_path, 'rb') as file_object:
|
||||
file_content = file_object.read()
|
||||
return file_content
|
||||
|
||||
vnf_artifact_files = vnfd.get_vnf_artifact_files()
|
||||
if req.additionalParams.get('lcm-kubernetes-def-files') is None:
|
||||
target_k8s_files = inst.metadata.get('lcm-kubernetes-def-files')
|
||||
else:
|
||||
target_k8s_files = []
|
||||
new_file_paths = req.additionalParams.get(
|
||||
'lcm-kubernetes-def-files')
|
||||
old_vnfd = self.nfvo_client.get_vnfd(
|
||||
context=context, vnfd_id=inst.vnfdId, all_contents=False)
|
||||
old_file_paths = inst.metadata.get('lcm-kubernetes-def-files')
|
||||
|
||||
for new_file_path in new_file_paths:
|
||||
new_file_infos = [
|
||||
{"kind": content.get('kind'),
|
||||
"name": content.get('metadata', {}).get('name', '')}
|
||||
for content in list(yaml.safe_load_all(
|
||||
_get_file_content(os.path.join(
|
||||
vnfd.csar_dir, new_file_path))))]
|
||||
for old_file_path in old_file_paths:
|
||||
find_flag = False
|
||||
old_file_infos = [
|
||||
{"kind": content.get('kind'),
|
||||
"name": content.get('metadata', {}).get('name', '')}
|
||||
for content in list(yaml.safe_load_all(
|
||||
_get_file_content(os.path.join(
|
||||
old_vnfd.csar_dir, old_file_path))))]
|
||||
resources = [info for info in old_file_infos
|
||||
if info in new_file_infos]
|
||||
if len(resources) != 0:
|
||||
if len(resources) != len(old_file_infos):
|
||||
raise sol_ex.UnmatchedFileException(
|
||||
new_file_path=new_file_path)
|
||||
if 'Deployment' not in [res.get(
|
||||
'kind') for res in resources]:
|
||||
raise sol_ex.UnSupportedKindException(
|
||||
new_file_path=new_file_path)
|
||||
old_file_paths.remove(old_file_path)
|
||||
target_k8s_files.append(new_file_path)
|
||||
find_flag = True
|
||||
break
|
||||
continue
|
||||
if not find_flag:
|
||||
raise sol_ex.NotFoundUpdateFileException(
|
||||
new_file_path=new_file_path)
|
||||
|
||||
target_k8s_files.extend(old_file_paths)
|
||||
if set(target_k8s_files).difference(set(vnf_artifact_files)):
|
||||
diff_files = ','.join(list(set(
|
||||
target_k8s_files).difference(set(vnf_artifact_files))))
|
||||
raise sol_ex.CnfDefinitionNotFound(diff_files=diff_files)
|
||||
return target_k8s_files
|
||||
|
||||
def change_vnfpkg_process(
|
||||
self, context, lcmocc, inst, grant_req, grant, vnfd):
|
||||
req = lcmocc.operationParams
|
||||
@ -1099,14 +1052,9 @@ class VnfLcmDriverV2(object):
|
||||
if vim_info.vimType == 'ETSINFV.OPENSTACK_KEYSTONE.V_3':
|
||||
driver = openstack.Openstack()
|
||||
driver.change_vnfpkg(req, inst, grant_req, grant, vnfd)
|
||||
elif vim_info.vimType == 'kubernetes': # k8s
|
||||
target_k8s_files = self._pre_check_for_change_vnfpkg(
|
||||
context, req, inst, vnfd)
|
||||
update_req = req.obj_clone()
|
||||
update_req.additionalParams[
|
||||
'lcm-kubernetes-def-files'] = target_k8s_files
|
||||
elif vim_info.vimType == 'kubernetes':
|
||||
driver = kubernetes.Kubernetes()
|
||||
driver.change_vnfpkg(update_req, inst, grant_req, grant, vnfd)
|
||||
driver.change_vnfpkg(req, inst, grant_req, grant, vnfd)
|
||||
else:
|
||||
# should not occur
|
||||
raise sol_ex.SolException(sol_detail='not support vim type')
|
||||
@ -1121,10 +1069,10 @@ class VnfLcmDriverV2(object):
|
||||
driver = openstack.Openstack()
|
||||
driver.change_vnfpkg_rollback(
|
||||
req, inst, grant_req, grant, vnfd, lcmocc)
|
||||
elif vim_info.vimType == 'kubernetes': # k8s
|
||||
elif vim_info.vimType == 'kubernetes':
|
||||
driver = kubernetes.Kubernetes()
|
||||
driver.change_vnfpkg_rollback(
|
||||
req, inst, grant_req, grant, vnfd, lcmocc)
|
||||
req, inst, grant_req, grant, vnfd)
|
||||
else:
|
||||
# should not occur
|
||||
raise sol_ex.SolException(sol_detail='not support vim type')
|
||||
|
@ -447,14 +447,16 @@ class VnfLcmControllerV2(sol_wsgi.SolAPIController):
|
||||
raise sol_ex.NotSupportUpgradeType(upgrade_type=upgrade_type)
|
||||
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
|
||||
vdu_params = additional_params.get('vdu_params')
|
||||
if (vim_info.vimType == 'ETSINFV.OPENSTACK_KEYSTONE.V_3' and
|
||||
vdu_params is None):
|
||||
if vdu_params is None:
|
||||
raise sol_ex.SolValidationError(
|
||||
detail="'vdu_params' must exist in additionalParams")
|
||||
if vdu_params:
|
||||
self._check_vdu_params(inst, vdu_params, vim_info.vimType,
|
||||
additional_params.get('lcm-operation-coordinate-new-vnf'),
|
||||
additional_params.get('lcm-operation-coordinate-old-vnf'))
|
||||
if (vim_info.vimType == "kubernetes" and
|
||||
not additional_params.get('lcm-kubernetes-def-files')):
|
||||
raise sol_ex.SolValidationError(
|
||||
detail="'lcm-kubernetes-def-files' must be specified")
|
||||
|
||||
lcmocc = self._new_lcmocc(id, v2fields.LcmOperationType.CHANGE_VNFPKG,
|
||||
body)
|
||||
@ -609,9 +611,6 @@ class VnfLcmControllerV2(sol_wsgi.SolAPIController):
|
||||
inst = inst_utils.get_inst(context, lcmocc.vnfInstanceId)
|
||||
vim_infos = inst.vimConnectionInfo
|
||||
vim_info = inst_utils.select_vim_info(vim_infos)
|
||||
if lcmocc.operation != 'CHANGE_VNFPKG' and (
|
||||
vim_info.vimType == 'kubernetes'):
|
||||
raise sol_ex.NotSupportOperationType
|
||||
|
||||
self.conductor_rpc.retry_lcm_op(context, lcmocc.id)
|
||||
|
||||
@ -641,9 +640,6 @@ class VnfLcmControllerV2(sol_wsgi.SolAPIController):
|
||||
inst = inst_utils.get_inst(context, lcmocc.vnfInstanceId)
|
||||
vim_infos = inst.vimConnectionInfo
|
||||
vim_info = inst_utils.select_vim_info(vim_infos)
|
||||
if lcmocc.operation != 'CHANGE_VNFPKG' and (
|
||||
vim_info.vimType == 'kubernetes'):
|
||||
raise sol_ex.NotSupportOperationType
|
||||
|
||||
self.conductor_rpc.rollback_lcm_op(context, lcmocc.id)
|
||||
|
||||
|
@ -13,17 +13,14 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import os
|
||||
import pickle
|
||||
import subprocess
|
||||
|
||||
from kubernetes import client
|
||||
from oslo_log import log as logging
|
||||
from oslo_utils import uuidutils
|
||||
from oslo_service import loopingcall
|
||||
|
||||
from tacker.sol_refactored.common import config
|
||||
from tacker.sol_refactored.common import exceptions as sol_ex
|
||||
from tacker.sol_refactored.common import vnf_instance_utils as inst_utils
|
||||
from tacker.sol_refactored.infra_drivers.kubernetes import kubernetes_resource
|
||||
from tacker.sol_refactored.infra_drivers.kubernetes import kubernetes_utils
|
||||
from tacker.sol_refactored import objects
|
||||
|
||||
@ -31,6 +28,10 @@ from tacker.sol_refactored import objects
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = config.CONF
|
||||
CHECK_INTERVAL = 10
|
||||
|
||||
TARGET_KIND = {"Pod", "Deployment", "DaemonSet", "StatefulSet", "ReplicaSet"}
|
||||
SCALABLE_KIND = {"Deployment", "ReplicaSet", "StatefulSet"}
|
||||
|
||||
|
||||
class Kubernetes(object):
|
||||
@ -39,272 +40,380 @@ class Kubernetes(object):
|
||||
pass
|
||||
|
||||
def instantiate(self, req, inst, grant_req, grant, vnfd):
|
||||
# pre instantiate cnf
|
||||
target_k8s_files = req.additionalParams.get(
|
||||
'lcm-kubernetes-def-files')
|
||||
vnf_artifact_files = vnfd.get_vnf_artifact_files()
|
||||
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
|
||||
with kubernetes_utils.AuthContextManager(vim_info) as acm:
|
||||
k8s_api_client = acm.init_k8s_api_client()
|
||||
self._instantiate(req, inst, grant_req, grant, vnfd,
|
||||
k8s_api_client)
|
||||
|
||||
if vnf_artifact_files is None or set(target_k8s_files).difference(
|
||||
set(vnf_artifact_files)):
|
||||
if vnf_artifact_files:
|
||||
diff_files = ','.join(list(set(
|
||||
target_k8s_files).difference(set(vnf_artifact_files))))
|
||||
else:
|
||||
diff_files = ','.join(target_k8s_files)
|
||||
def _instantiate(self, req, inst, grant_req, grant, vnfd, k8s_api_client):
|
||||
target_k8s_files = req.additionalParams['lcm-kubernetes-def-files']
|
||||
|
||||
k8s_reses, namespace = self._setup_k8s_reses(
|
||||
vnfd, target_k8s_files, k8s_api_client,
|
||||
req.additionalParams.get('namespace'))
|
||||
|
||||
vdus_num = self._get_vdus_num_from_grant_req_res_defs(
|
||||
grant_req.addResources)
|
||||
vdu_reses = self._select_vdu_reses(vnfd, req.flavourId, k8s_reses)
|
||||
|
||||
for vdu_name, vdu_res in vdu_reses.items():
|
||||
if vdu_name not in vdus_num:
|
||||
LOG.debug(f'resource name {vdu_res.name} in the kubernetes'
|
||||
f' manifest does not match the VNFD.')
|
||||
continue
|
||||
|
||||
if vdu_res.kind in SCALABLE_KIND:
|
||||
vdu_res.body['spec']['replicas'] = vdus_num[vdu_name]
|
||||
|
||||
# deploy k8s resources
|
||||
for k8s_res in k8s_reses:
|
||||
if not k8s_res.is_exists():
|
||||
k8s_res.create()
|
||||
|
||||
# wait k8s resource create complete
|
||||
self._wait_k8s_reses_ready(k8s_reses)
|
||||
|
||||
# make instantiated info
|
||||
self._init_instantiated_vnf_info(
|
||||
inst, req.flavourId, target_k8s_files, vdu_reses, namespace)
|
||||
self._update_vnfc_info(inst, k8s_api_client)
|
||||
|
||||
def _setup_k8s_reses(self, vnfd, target_k8s_files, k8s_api_client,
|
||||
namespace):
|
||||
# NOTE: this check should be done in STARTING phase.
|
||||
vnf_artifact_files = vnfd.get_vnf_artifact_files()
|
||||
diff_files = set(target_k8s_files) - set(vnf_artifact_files)
|
||||
if diff_files:
|
||||
diff_files = ','.join(list(diff_files))
|
||||
raise sol_ex.CnfDefinitionNotFound(diff_files=diff_files)
|
||||
|
||||
# get k8s content from yaml file
|
||||
k8s_resources, namespace = kubernetes_utils.get_k8s_json_file(
|
||||
req, inst, target_k8s_files, vnfd, 'INSTANTIATE')
|
||||
return kubernetes_utils.get_k8s_reses_from_json_files(
|
||||
target_k8s_files, vnfd, k8s_api_client, namespace)
|
||||
|
||||
# sort k8s resource
|
||||
sorted_k8s_reses = kubernetes_utils.sort_k8s_resource(
|
||||
k8s_resources, 'INSTANTIATE')
|
||||
|
||||
# deploy k8s resources with sorted resources
|
||||
def instantiate_rollback(self, req, inst, grant_req, grant, vnfd):
|
||||
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
|
||||
# This is Context Manager for creation and deletion
|
||||
# of CA certificate temp file
|
||||
with kubernetes_utils.CaCertFileContextManager(
|
||||
vim_info.interfaceInfo.get('ssl_ca_cert')) as ca_cert_cm:
|
||||
with kubernetes_utils.AuthContextManager(vim_info) as acm:
|
||||
k8s_api_client = acm.init_k8s_api_client()
|
||||
self._instantiate_rollback(req, inst, grant_req, grant, vnfd,
|
||||
k8s_api_client)
|
||||
|
||||
# add an item ca_cert_file:file_path into vim_info.interfaceInfo,
|
||||
# and will be deleted in KubernetesClient
|
||||
vim_info.interfaceInfo['ca_cert_file'] = ca_cert_cm.file_path
|
||||
def _instantiate_rollback(self, req, inst, grant_req, grant, vnfd,
|
||||
k8s_api_client):
|
||||
target_k8s_files = req.additionalParams['lcm-kubernetes-def-files']
|
||||
|
||||
k8s_client = kubernetes_utils.KubernetesClient(vim_info)
|
||||
created_k8s_reses = k8s_client.create_k8s_resource(
|
||||
sorted_k8s_reses, namespace)
|
||||
try:
|
||||
k8s_reses, _ = self._setup_k8s_reses(
|
||||
vnfd, target_k8s_files, k8s_api_client,
|
||||
req.additionalParams.get('namespace'))
|
||||
except sol_ex.SolException:
|
||||
# it means it failed in a basic check and it failes always.
|
||||
# nothing to do since instantiate failed in it too.
|
||||
return
|
||||
k8s_reses.reverse()
|
||||
|
||||
# wait k8s resource create complete
|
||||
k8s_client.wait_k8s_res_create(created_k8s_reses)
|
||||
|
||||
# make instantiated info
|
||||
all_pods = k8s_client.list_namespaced_pods(namespace)
|
||||
self._make_cnf_instantiated_info(
|
||||
req, inst, vnfd, namespace, created_k8s_reses, all_pods)
|
||||
|
||||
def terminate(self, req, inst, grant_req, grant, vnfd):
|
||||
target_k8s_files = inst.metadata.get('lcm-kubernetes-def-files')
|
||||
|
||||
# get k8s content from yaml file
|
||||
k8s_resources, namespace = kubernetes_utils.get_k8s_json_file(
|
||||
req, inst, target_k8s_files, vnfd, 'TERMINATE')
|
||||
|
||||
# sort k8s resource
|
||||
sorted_k8s_reses = kubernetes_utils.sort_k8s_resource(
|
||||
k8s_resources, 'TERMINATE')
|
||||
|
||||
# delete k8s resources with sorted resources
|
||||
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
|
||||
# This is Context Manager for creation and deletion
|
||||
# of CA certificate temp file
|
||||
with kubernetes_utils.CaCertFileContextManager(
|
||||
vim_info.interfaceInfo.get('ssl_ca_cert')) as ca_cert_cm:
|
||||
|
||||
# add an item ca_cert_file:file_path into vim_info.interfaceInfo,
|
||||
# and will be deleted in KubernetesClient
|
||||
vim_info.interfaceInfo['ca_cert_file'] = ca_cert_cm.file_path
|
||||
|
||||
k8s_client = kubernetes_utils.KubernetesClient(vim_info)
|
||||
k8s_client.delete_k8s_resource(req, sorted_k8s_reses, namespace)
|
||||
# delete k8s resources
|
||||
body = client.V1DeleteOptions(propagation_policy='Foreground')
|
||||
self._delete_k8s_resource(k8s_reses, body)
|
||||
|
||||
# wait k8s resource delete complete
|
||||
k8s_client.wait_k8s_res_delete(sorted_k8s_reses, namespace)
|
||||
self._wait_k8s_reses_deleted(k8s_reses)
|
||||
|
||||
def change_vnfpkg(self, req, inst, grant_req, grant, vnfd):
|
||||
if req.additionalParams.get('upgrade_type') == 'RollingUpdate':
|
||||
# get deployment name from vnfd
|
||||
deployment_names, namespace = (
|
||||
self._get_update_deployment_names_and_namespace(
|
||||
vnfd, req, inst))
|
||||
def _delete_k8s_resource(self, k8s_reses, body):
|
||||
for k8s_res in k8s_reses:
|
||||
if k8s_res.is_exists():
|
||||
k8s_res.delete(body)
|
||||
|
||||
# check deployment exists in kubernetes
|
||||
def terminate(self, req, inst, grant_req, grant, vnfd):
|
||||
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
|
||||
# This is Context Manager for creation and deletion
|
||||
# of CA certificate temp file
|
||||
with kubernetes_utils.CaCertFileContextManager(
|
||||
vim_info.interfaceInfo.get('ssl_ca_cert')) as ca_cert_cm:
|
||||
with kubernetes_utils.AuthContextManager(vim_info) as acm:
|
||||
k8s_api_client = acm.init_k8s_api_client()
|
||||
self._terminate(req, inst, grant_req, grant, vnfd,
|
||||
k8s_api_client)
|
||||
|
||||
# add an item ca_cert_file:file_path
|
||||
# into vim_info.interfaceInfo,
|
||||
# and will be deleted in KubernetesClient
|
||||
vim_info.interfaceInfo['ca_cert_file'] = ca_cert_cm.file_path
|
||||
def _terminate(self, req, inst, grant_req, grant, vnfd, k8s_api_client):
|
||||
target_k8s_files = inst.instantiatedVnfInfo.metadata[
|
||||
'lcm-kubernetes-def-files']
|
||||
|
||||
k8s_client = kubernetes_utils.KubernetesClient(vim_info)
|
||||
k8s_client.check_deployment_exist(deployment_names, namespace)
|
||||
# get k8s content from yaml file
|
||||
namespace = inst.instantiatedVnfInfo.metadata['namespace']
|
||||
k8s_reses, _ = kubernetes_utils.get_k8s_reses_from_json_files(
|
||||
target_k8s_files, vnfd, k8s_api_client, namespace)
|
||||
k8s_reses.reverse()
|
||||
|
||||
# get new deployment body
|
||||
new_deploy_reses = kubernetes_utils.get_new_deployment_body(
|
||||
req, inst, vnfd, deployment_names,
|
||||
operation='CHANGE_VNFPKG')
|
||||
# delete k8s resources
|
||||
timeout = 0
|
||||
if req.terminationType == 'GRACEFUL':
|
||||
timeout = CONF.v2_vnfm.default_graceful_termination_timeout
|
||||
if req.obj_attr_is_set('gracefulTerminationTimeout'):
|
||||
timeout = req.gracefulTerminationTimeout
|
||||
|
||||
body = client.V1DeleteOptions(propagation_policy='Foreground',
|
||||
grace_period_seconds=timeout)
|
||||
self._delete_k8s_resource(k8s_reses, body)
|
||||
|
||||
# wait k8s resource delete complete
|
||||
self._wait_k8s_reses_deleted(k8s_reses)
|
||||
|
||||
def _change_vnfpkg_rolling_update(
|
||||
self, inst, grant_req, grant, vnfd, k8s_api_client,
|
||||
namespace, old_pods_names):
|
||||
|
||||
vdus_num = self._get_vdus_num_from_grant_req_res_defs(
|
||||
grant_req.addResources)
|
||||
vdu_reses = []
|
||||
for vdu_name, vdu_num in vdus_num.items():
|
||||
vdu_res = self._get_vdu_res(inst, k8s_api_client, vdu_name)
|
||||
vdu_res.body['spec']['replicas'] = vdu_num
|
||||
vdu_reses.append(vdu_res)
|
||||
|
||||
# apply new deployment
|
||||
k8s_client.update_k8s_resource(new_deploy_reses, namespace)
|
||||
for vdu_res in vdu_reses:
|
||||
vdu_res.patch()
|
||||
|
||||
# wait k8s resource update complete
|
||||
old_pods_names = [vnfc.computeResource.resourceId for vnfc in
|
||||
inst.instantiatedVnfInfo.vnfcResourceInfo]
|
||||
try:
|
||||
k8s_client.wait_k8s_res_update(
|
||||
new_deploy_reses, namespace, old_pods_names)
|
||||
except sol_ex.UpdateK8SResourceFailed as ex:
|
||||
self._update_cnf_instantiated_info(
|
||||
inst, deployment_names,
|
||||
k8s_client.list_namespaced_pods(
|
||||
namespace=namespace))
|
||||
raise ex
|
||||
|
||||
# execute coordinate vnf script
|
||||
try:
|
||||
self._execute_coordinate_vnf_script(
|
||||
req, inst, grant_req, grant, vnfd, 'CHANGE_VNFPKG',
|
||||
namespace, new_deploy_reses)
|
||||
except sol_ex.CoordinateVNFExecutionFailed as ex:
|
||||
self._update_cnf_instantiated_info(
|
||||
inst, deployment_names,
|
||||
k8s_client.list_namespaced_pods(
|
||||
namespace=namespace))
|
||||
raise ex
|
||||
self._wait_k8s_reses_updated(
|
||||
vdu_reses, k8s_api_client, namespace, old_pods_names)
|
||||
|
||||
# update cnf instantiated info
|
||||
all_pods = k8s_client.list_namespaced_pods(namespace)
|
||||
self._update_cnf_instantiated_info(
|
||||
inst, deployment_names, all_pods)
|
||||
self._update_vnfc_info(inst, k8s_api_client)
|
||||
|
||||
def change_vnfpkg(self, req, inst, grant_req, grant, vnfd):
|
||||
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
|
||||
with kubernetes_utils.AuthContextManager(vim_info) as acm:
|
||||
k8s_api_client = acm.init_k8s_api_client()
|
||||
self._change_vnfpkg(req, inst, grant_req, grant, vnfd,
|
||||
k8s_api_client)
|
||||
|
||||
def _change_vnfpkg(self, req, inst, grant_req, grant, vnfd,
|
||||
k8s_api_client):
|
||||
if req.additionalParams['upgrade_type'] == 'RollingUpdate':
|
||||
target_k8s_files = req.additionalParams[
|
||||
'lcm-kubernetes-def-files']
|
||||
namespace = inst.instantiatedVnfInfo.metadata['namespace']
|
||||
|
||||
target_vdus = {res_def.resourceTemplateId
|
||||
for res_def in grant_req.addResources
|
||||
if res_def.type == 'COMPUTE'}
|
||||
old_pods_names = {vnfc.computeResource.resourceId
|
||||
for vnfc in inst.instantiatedVnfInfo.vnfcResourceInfo
|
||||
if vnfc.vduId in target_vdus}
|
||||
|
||||
k8s_reses, _ = self._setup_k8s_reses(
|
||||
vnfd, target_k8s_files, k8s_api_client, namespace)
|
||||
|
||||
vdu_reses = self._select_vdu_reses(
|
||||
vnfd, inst.instantiatedVnfInfo.flavourId, k8s_reses)
|
||||
|
||||
self._init_instantiated_vnf_info(
|
||||
inst, inst.instantiatedVnfInfo.flavourId, target_k8s_files,
|
||||
vdu_reses, namespace)
|
||||
|
||||
self._change_vnfpkg_rolling_update(
|
||||
inst, grant_req, grant, vnfd, k8s_api_client, namespace,
|
||||
old_pods_names)
|
||||
else:
|
||||
# TODO(YiFeng): Blue-Green type will be supported in next version.
|
||||
raise sol_ex.SolException(sol_detail='not support update type')
|
||||
# not reach here
|
||||
pass
|
||||
|
||||
inst.vnfdId = req.vnfdId
|
||||
if set(req.additionalParams.get(
|
||||
'lcm-kubernetes-def-files')).difference(set(
|
||||
inst.metadata.get('lcm-kubernetes-def-files'))):
|
||||
inst.metadata['lcm-kubernetes-def-files'] = (
|
||||
req.additionalParams.get('lcm-kubernetes-def-files'))
|
||||
|
||||
def change_vnfpkg_rollback(
|
||||
self, req, inst, grant_req, grant, vnfd, lcmocc):
|
||||
if not lcmocc.obj_attr_is_set('resourceChanges'):
|
||||
return
|
||||
if req.additionalParams.get('upgrade_type') == 'RollingUpdate':
|
||||
deployment_names = list({
|
||||
affected_vnfc.metadata['Deployment']['name'] for affected_vnfc
|
||||
in lcmocc.resourceChanges.affectedVnfcs if
|
||||
affected_vnfc.changeType == 'ADDED'})
|
||||
namespace = inst.metadata.get('namespace')
|
||||
|
||||
old_deploy_reses = kubernetes_utils.get_new_deployment_body(
|
||||
req, inst, vnfd, deployment_names,
|
||||
operation='CHANGE_VNFPKG_ROLLBACK')
|
||||
|
||||
# apply old deployment
|
||||
def change_vnfpkg_rollback(self, req, inst, grant_req, grant, vnfd):
|
||||
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
|
||||
# This is Context Manager for creation and deletion
|
||||
# of CA certificate temp file
|
||||
with kubernetes_utils.CaCertFileContextManager(
|
||||
vim_info.interfaceInfo.get('ssl_ca_cert')) as ca_cert_cm:
|
||||
with kubernetes_utils.AuthContextManager(vim_info) as acm:
|
||||
k8s_api_client = acm.init_k8s_api_client()
|
||||
self._change_vnfpkg_rollback(req, inst, grant_req, grant, vnfd,
|
||||
k8s_api_client)
|
||||
|
||||
# add an item ca_cert_file:file_path
|
||||
# into vim_info.interfaceInfo,
|
||||
# and will be deleted in KubernetesClient
|
||||
vim_info.interfaceInfo['ca_cert_file'] = ca_cert_cm.file_path
|
||||
def _change_vnfpkg_rollback(self, req, inst, grant_req, grant, vnfd,
|
||||
k8s_api_client):
|
||||
if req.additionalParams['upgrade_type'] == 'RollingUpdate':
|
||||
namespace = inst.instantiatedVnfInfo.metadata['namespace']
|
||||
|
||||
k8s_client = kubernetes_utils.KubernetesClient(vim_info)
|
||||
k8s_client.update_k8s_resource(old_deploy_reses, namespace)
|
||||
|
||||
# wait k8s resource update complete
|
||||
old_pods_names = [vnfc.computeResource.resourceId for vnfc in
|
||||
inst.instantiatedVnfInfo.vnfcResourceInfo]
|
||||
try:
|
||||
k8s_client.wait_k8s_res_update(
|
||||
old_deploy_reses, namespace, old_pods_names)
|
||||
except sol_ex.UpdateK8SResourceFailed as ex:
|
||||
raise ex
|
||||
|
||||
# execute coordinate vnf script
|
||||
try:
|
||||
self._execute_coordinate_vnf_script(
|
||||
req, inst, grant_req, grant, vnfd,
|
||||
'CHANGE_VNFPKG_ROLLBACK',
|
||||
namespace, old_deploy_reses)
|
||||
except sol_ex.CoordinateVNFExecutionFailed as ex:
|
||||
raise ex
|
||||
|
||||
# update cnf instantiated info
|
||||
all_pods = k8s_client.list_namespaced_pods(namespace)
|
||||
self._update_cnf_instantiated_info(
|
||||
inst, deployment_names, all_pods)
|
||||
original_pods = {vnfc.computeResource.resourceId for vnfc in
|
||||
inst.instantiatedVnfInfo.vnfcResourceInfo}
|
||||
all_pods = kubernetes_utils.list_namespaced_pods(
|
||||
k8s_api_client, namespace)
|
||||
current_pods = {pod.metadata.name for pod in all_pods}
|
||||
old_pods_names = current_pods - original_pods
|
||||
|
||||
self._change_vnfpkg_rolling_update(
|
||||
inst, grant_req, grant, vnfd, k8s_api_client, namespace,
|
||||
old_pods_names)
|
||||
else:
|
||||
# TODO(YiFeng): Blue-Green type will be supported in next version.
|
||||
raise sol_ex.SolException(sol_detail='not support update type')
|
||||
# not reach here
|
||||
pass
|
||||
|
||||
def _get_update_deployment_names_and_namespace(self, vnfd, req, inst):
|
||||
vdu_nodes = vnfd.get_vdu_nodes(
|
||||
flavour_id=inst.instantiatedVnfInfo.flavourId)
|
||||
def heal(self, req, inst, grant_req, grant, vnfd):
|
||||
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
|
||||
with kubernetes_utils.AuthContextManager(vim_info) as acm:
|
||||
k8s_api_client = acm.init_k8s_api_client()
|
||||
self._heal(req, inst, grant_req, grant, vnfd, k8s_api_client)
|
||||
|
||||
if req.additionalParams.get('vdu_params'):
|
||||
target_vdus = [vdu_param.get('vdu_id') for vdu_param
|
||||
in req.additionalParams.get('vdu_params')]
|
||||
if None in target_vdus:
|
||||
raise sol_ex.MissingParameterException
|
||||
def _heal(self, req, inst, grant_req, grant, vnfd, k8s_api_client):
|
||||
namespace = inst.instantiatedVnfInfo.metadata['namespace']
|
||||
|
||||
# get heal Pod name
|
||||
vnfc_res_ids = [res_def.resource.resourceId
|
||||
for res_def in grant_req.removeResources
|
||||
if res_def.type == 'COMPUTE']
|
||||
|
||||
target_vnfcs = [vnfc
|
||||
for vnfc in inst.instantiatedVnfInfo.vnfcResourceInfo
|
||||
if vnfc.computeResource.resourceId in vnfc_res_ids]
|
||||
|
||||
# check running Pod
|
||||
all_pods = kubernetes_utils.list_namespaced_pods(
|
||||
k8s_api_client, namespace)
|
||||
current_pods_name = [pod.metadata.name for pod in all_pods]
|
||||
|
||||
old_pods_names = set()
|
||||
vdu_reses = {}
|
||||
for vnfc in target_vnfcs:
|
||||
if vnfc.id not in current_pods_name:
|
||||
# may happen when retry or auto healing
|
||||
msg = f'heal target pod {vnfc.id} is not in the running pod.'
|
||||
LOG.error(msg)
|
||||
continue
|
||||
if vnfc.vduId in vdu_reses:
|
||||
res = vdu_reses[vnfc.vduId]
|
||||
else:
|
||||
target_vdus = [inst_vnc.vduId for inst_vnc in
|
||||
inst.instantiatedVnfInfo.vnfcResourceInfo]
|
||||
res = self._get_vdu_res(inst, k8s_api_client, vnfc.vduId)
|
||||
vdu_reses[vnfc.vduId] = res
|
||||
res.delete_pod(vnfc.id)
|
||||
old_pods_names.add(vnfc.id)
|
||||
|
||||
deployment_names = [value.get('properties', {}).get('name')
|
||||
for name, value in vdu_nodes.items()
|
||||
if name in target_vdus]
|
||||
namespace = inst.metadata.get('namespace')
|
||||
# wait k8s resource create complete
|
||||
if old_pods_names:
|
||||
self._wait_k8s_reses_updated(list(vdu_reses.values()),
|
||||
k8s_api_client, namespace, old_pods_names)
|
||||
|
||||
return deployment_names, namespace
|
||||
# make instantiated info
|
||||
self._update_vnfc_info(inst, k8s_api_client)
|
||||
|
||||
def _make_cnf_instantiated_info(
|
||||
self, req, inst, vnfd, namespace, created_k8s_reses, all_pods):
|
||||
flavour_id = req.flavourId
|
||||
target_kinds = {"Pod", "Deployment", "DaemonSet",
|
||||
"StatefulSet", "ReplicaSet"}
|
||||
def _scale_k8s_resource(self, inst, vdus_num, k8s_api_client):
|
||||
namespace = inst.instantiatedVnfInfo.metadata['namespace']
|
||||
|
||||
vdu_reses = []
|
||||
for vdu_name, vdu_num in vdus_num.items():
|
||||
vdu_res = self._get_vdu_res(inst, k8s_api_client, vdu_name)
|
||||
if vdu_res.kind not in SCALABLE_KIND:
|
||||
LOG.error(f'scale vdu {vdu_name}'
|
||||
f' is not scalable resource')
|
||||
continue
|
||||
vdu_res.scale(vdu_num)
|
||||
vdu_reses.append(vdu_res)
|
||||
|
||||
# wait k8s resource create complete
|
||||
self._wait_k8s_reses_updated(vdu_reses, k8s_api_client,
|
||||
namespace, old_pods_names=set())
|
||||
|
||||
# make instantiated info
|
||||
self._update_vnfc_info(inst, k8s_api_client)
|
||||
|
||||
def scale(self, req, inst, grant_req, grant, vnfd):
|
||||
|
||||
if req.type == 'SCALE_OUT':
|
||||
vdus_num = self._get_vdus_num_from_grant_req_res_defs(
|
||||
grant_req.addResources)
|
||||
for vdu_name, vdu_num in vdus_num.items():
|
||||
vdus_num[vdu_name] = (self._get_current_vdu_num(inst, vdu_name)
|
||||
+ vdu_num)
|
||||
elif req.type == 'SCALE_IN':
|
||||
vdus_num = self._get_vdus_num_from_grant_req_res_defs(
|
||||
grant_req.removeResources)
|
||||
for vdu_name, vdu_num in vdus_num.items():
|
||||
vdus_num[vdu_name] = (self._get_current_vdu_num(inst, vdu_name)
|
||||
- vdu_num)
|
||||
|
||||
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
|
||||
with kubernetes_utils.AuthContextManager(vim_info) as acm:
|
||||
k8s_api_client = acm.init_k8s_api_client()
|
||||
self._scale_k8s_resource(inst, vdus_num, k8s_api_client)
|
||||
|
||||
def scale_rollback(self, req, inst, grant_req, grant, vnfd):
|
||||
|
||||
vdus_num = self._get_vdus_num_from_grant_req_res_defs(
|
||||
grant_req.addResources)
|
||||
for vdu_name, _ in vdus_num.items():
|
||||
vdus_num[vdu_name] = self._get_current_vdu_num(inst, vdu_name)
|
||||
|
||||
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
|
||||
with kubernetes_utils.AuthContextManager(vim_info) as acm:
|
||||
k8s_api_client = acm.init_k8s_api_client()
|
||||
self._scale_k8s_resource(inst, vdus_num, k8s_api_client)
|
||||
|
||||
def _get_vdus_num_from_grant_req_res_defs(self, res_defs):
|
||||
vdus_num = {}
|
||||
for res_def in res_defs:
|
||||
if res_def.type == 'COMPUTE':
|
||||
vdus_num.setdefault(res_def.resourceTemplateId, 0)
|
||||
vdus_num[res_def.resourceTemplateId] += 1
|
||||
return vdus_num
|
||||
|
||||
def _get_current_vdu_num(self, inst, vdu):
|
||||
num = 0
|
||||
for vnfc in inst.instantiatedVnfInfo.vnfcResourceInfo:
|
||||
if vnfc.vduId == vdu:
|
||||
num += 1
|
||||
return num
|
||||
|
||||
def _select_vdu_reses(self, vnfd, flavour_id, k8s_reses):
|
||||
vdu_nodes = vnfd.get_vdu_nodes(flavour_id)
|
||||
vdu_ids = {value.get('properties').get('name'): key
|
||||
for key, value in vdu_nodes.items()}
|
||||
return {vdu_ids[res.name]: res
|
||||
for res in k8s_reses
|
||||
if res.kind in TARGET_KIND and res.name in vdu_ids}
|
||||
|
||||
vnfc_resources = []
|
||||
for k8s_res in created_k8s_reses:
|
||||
if k8s_res.get('kind', '') not in target_kinds:
|
||||
continue
|
||||
for pod in all_pods:
|
||||
pod_name = pod.metadata.name
|
||||
match_result = kubernetes_utils.is_match_pod_naming_rule(
|
||||
k8s_res.get('kind', ''), k8s_res.get('name', ''),
|
||||
pod_name)
|
||||
if match_result:
|
||||
metadata = {}
|
||||
metadata[k8s_res.get('kind')] = k8s_res.get('metadata')
|
||||
if k8s_res.get('kind') != 'Pod':
|
||||
metadata['Pod'] = pod.metadata.to_dict()
|
||||
vnfc_resource = objects.VnfcResourceInfoV2(
|
||||
id=uuidutils.generate_uuid(),
|
||||
vduId=vdu_ids.get(k8s_res.get('name', '')),
|
||||
computeResource=objects.ResourceHandle(
|
||||
resourceId=pod_name,
|
||||
vimLevelResourceType=k8s_res.get('kind')
|
||||
),
|
||||
metadata=metadata
|
||||
)
|
||||
vnfc_resources.append(vnfc_resource)
|
||||
|
||||
inst_vnf_info = objects.VnfInstanceV2_InstantiatedVnfInfo(
|
||||
def _init_instantiated_vnf_info(self, inst, flavour_id, def_files,
|
||||
vdu_reses, namespace):
|
||||
metadata = {
|
||||
'namespace': namespace,
|
||||
'lcm-kubernetes-def-files': def_files,
|
||||
'vdu_reses': {vdu_name: vdu_res.body
|
||||
for vdu_name, vdu_res in vdu_reses.items()}
|
||||
}
|
||||
inst.instantiatedVnfInfo = objects.VnfInstanceV2_InstantiatedVnfInfo(
|
||||
flavourId=flavour_id,
|
||||
vnfState='STARTED',
|
||||
metadata=metadata
|
||||
)
|
||||
|
||||
if vnfc_resources:
|
||||
inst_vnf_info.vnfcResourceInfo = vnfc_resources
|
||||
def _get_vdu_res(self, inst, k8s_api_client, vdu):
|
||||
# must be found
|
||||
res = inst.instantiatedVnfInfo.metadata['vdu_reses'][vdu]
|
||||
cls = getattr(kubernetes_resource, res['kind'])
|
||||
return cls(k8s_api_client, res)
|
||||
|
||||
def _update_vnfc_info(self, inst, k8s_api_client):
|
||||
all_pods = kubernetes_utils.list_namespaced_pods(
|
||||
k8s_api_client, inst.instantiatedVnfInfo.metadata['namespace'])
|
||||
vnfc_resources = []
|
||||
for pod in all_pods:
|
||||
pod_name = pod.metadata.name
|
||||
for vdu_name, vdu_res in (
|
||||
inst.instantiatedVnfInfo.metadata['vdu_reses'].items()):
|
||||
if kubernetes_utils.is_match_pod_naming_rule(
|
||||
vdu_res['kind'], vdu_res['metadata']['name'],
|
||||
pod_name):
|
||||
vnfc_resources.append(objects.VnfcResourceInfoV2(
|
||||
id=pod_name,
|
||||
vduId=vdu_name,
|
||||
computeResource=objects.ResourceHandle(
|
||||
resourceId=pod_name,
|
||||
vimLevelResourceType=vdu_res['kind']
|
||||
),
|
||||
# lcmocc_utils.update_lcmocc assumes its existence
|
||||
metadata={}
|
||||
))
|
||||
|
||||
inst.instantiatedVnfInfo.vnfcResourceInfo = vnfc_resources
|
||||
|
||||
# make vnfcInfo
|
||||
# NOTE: vnfcInfo only exists in SOL002
|
||||
inst_vnf_info.vnfcInfo = [
|
||||
inst.instantiatedVnfInfo.vnfcInfo = [
|
||||
objects.VnfcInfoV2(
|
||||
id=f'{vnfc_res_info.vduId}-{vnfc_res_info.id}',
|
||||
vduId=vnfc_res_info.vduId,
|
||||
@ -314,76 +423,52 @@ class Kubernetes(object):
|
||||
for vnfc_res_info in vnfc_resources
|
||||
]
|
||||
|
||||
inst.instantiatedVnfInfo = inst_vnf_info
|
||||
inst.metadata = {"namespace": namespace if namespace else None}
|
||||
inst.metadata['lcm-kubernetes-def-files'] = req.additionalParams.get(
|
||||
'lcm-kubernetes-def-files')
|
||||
def _check_status(self, check_func, *args):
|
||||
timer = loopingcall.FixedIntervalWithTimeoutLoopingCall(
|
||||
check_func, *args)
|
||||
try:
|
||||
timer.start(interval=CHECK_INTERVAL,
|
||||
timeout=CONF.v2_vnfm.kubernetes_vim_rsc_wait_timeout).wait()
|
||||
except loopingcall.LoopingCallTimeOut:
|
||||
raise sol_ex.K8sOperaitionTimeout()
|
||||
|
||||
def _execute_coordinate_vnf_script(
|
||||
self, req, inst, grant_req, grant, vnfd,
|
||||
operation, namespace, new_deploy_reses):
|
||||
coordinate_vnf = None
|
||||
if req.obj_attr_is_set('additionalParams'):
|
||||
if operation == 'CHANGE_VNFPKG':
|
||||
coordinate_vnf = req.additionalParams.get(
|
||||
'lcm-operation-coordinate-new-vnf')
|
||||
else:
|
||||
coordinate_vnf = req.additionalParams.get(
|
||||
'lcm-operation-coordinate-old-vnf')
|
||||
def _wait_k8s_reses_ready(self, k8s_reses):
|
||||
def _check_ready(check_reses):
|
||||
ok_reses = {res for res in check_reses if res.is_ready()}
|
||||
check_reses -= ok_reses
|
||||
if not check_reses:
|
||||
raise loopingcall.LoopingCallDone()
|
||||
|
||||
if coordinate_vnf:
|
||||
tmp_csar_dir = vnfd.make_tmp_csar_dir()
|
||||
script_dict = {
|
||||
"request": req.to_dict(),
|
||||
"vnf_instance": inst.to_dict(),
|
||||
"grant_request": grant_req.to_dict(),
|
||||
"grant_response": grant.to_dict(),
|
||||
"tmp_csar_dir": tmp_csar_dir,
|
||||
"k8s_info": {
|
||||
"namespace": namespace,
|
||||
"new_deploy_reses": new_deploy_reses
|
||||
}
|
||||
}
|
||||
script_path = os.path.join(tmp_csar_dir, coordinate_vnf)
|
||||
out = subprocess.run(["python3", script_path],
|
||||
input=pickle.dumps(script_dict),
|
||||
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
if out.returncode != 0:
|
||||
LOG.error(out)
|
||||
raise sol_ex.CoordinateVNFExecutionFailed
|
||||
check_reses = set(k8s_reses)
|
||||
self._check_status(_check_ready, check_reses)
|
||||
|
||||
def _update_cnf_instantiated_info(self, inst, deployment_names, all_pods):
|
||||
error_resource = None
|
||||
for vnfc in inst.instantiatedVnfInfo.vnfcResourceInfo:
|
||||
if (vnfc.computeResource.vimLevelResourceType == 'Deployment'
|
||||
) and (vnfc.metadata.get('Deployment').get(
|
||||
'name') in deployment_names):
|
||||
pods_info = [pod for pod in all_pods if
|
||||
kubernetes_utils.is_match_pod_naming_rule(
|
||||
'Deployment',
|
||||
vnfc.metadata.get('Deployment').get('name'),
|
||||
pod.metadata.name)]
|
||||
if 'Pending' in [pod.status.phase for pod in pods_info] or (
|
||||
'Unknown' in [pod.status.phase for pod in pods_info]):
|
||||
pod_name = [pod.metadata.name for pod in pods_info
|
||||
if pod.status.phase in [
|
||||
'Pending', 'Unknown']][0]
|
||||
error_resource = objects.VnfcResourceInfoV2(
|
||||
id=uuidutils.generate_uuid(),
|
||||
vduId=vnfc.vduId,
|
||||
computeResource=objects.ResourceHandle(
|
||||
resourceId=pod_name,
|
||||
vimLevelResourceType='Deployment'
|
||||
),
|
||||
metadata={'Deployment': vnfc.metadata.get(
|
||||
'Deployment')}
|
||||
)
|
||||
continue
|
||||
pod_info = pods_info.pop(-1)
|
||||
vnfc.id = uuidutils.generate_uuid()
|
||||
vnfc.computeResource.resourceId = pod_info.metadata.name
|
||||
vnfc.metadata['Pod'] = pod_info.metadata.to_dict()
|
||||
all_pods.remove(pod_info)
|
||||
def _wait_k8s_reses_deleted(self, k8s_reses):
|
||||
def _check_deleted(check_reses):
|
||||
ok_reses = {res for res in check_reses if not res.is_exists()}
|
||||
check_reses -= ok_reses
|
||||
if not check_reses:
|
||||
raise loopingcall.LoopingCallDone()
|
||||
|
||||
if error_resource:
|
||||
inst.instantiatedVnfInfo.vnfcResourceInfo.append(error_resource)
|
||||
check_reses = set(k8s_reses)
|
||||
self._check_status(_check_deleted, check_reses)
|
||||
|
||||
def _wait_k8s_reses_updated(self, k8s_reses, k8s_api_client, namespace,
|
||||
old_pods_names):
|
||||
def _check_update(check_reses, k8s_api_client, namespace,
|
||||
old_pods_names):
|
||||
ok_reses = set()
|
||||
all_pods = kubernetes_utils.list_namespaced_pods(
|
||||
k8s_api_client, namespace)
|
||||
for res in check_reses:
|
||||
pods_info = [pod for pod in all_pods
|
||||
if kubernetes_utils.is_match_pod_naming_rule(
|
||||
res.kind, res.name, pod.metadata.name)]
|
||||
if res.is_update(pods_info, old_pods_names):
|
||||
ok_reses.add(res)
|
||||
check_reses -= ok_reses
|
||||
if not check_reses:
|
||||
raise loopingcall.LoopingCallDone()
|
||||
|
||||
check_reses = set(k8s_reses)
|
||||
self._check_status(_check_update, check_reses, k8s_api_client,
|
||||
namespace, old_pods_names)
|
||||
|
@ -0,0 +1,586 @@
|
||||
# Copyright (C) 2022 Nippon Telegraph and Telephone Corporation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import inspect
|
||||
import ipaddress
|
||||
import re
|
||||
import time
|
||||
|
||||
from kubernetes import client
|
||||
from oslo_log import log as logging
|
||||
|
||||
from tacker.sol_refactored.common import config
|
||||
from tacker.sol_refactored.common import exceptions as sol_ex
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
CONF = config.CONF
|
||||
CHECK_INTERVAL = 10
|
||||
|
||||
|
||||
def convert(name):
|
||||
return re.sub('([A-Z])', lambda x: '_' + x.group(1).lower(), name)
|
||||
|
||||
|
||||
class CommonResource:
|
||||
# Default API Class
|
||||
api_class = client.CoreV1Api
|
||||
|
||||
def __init__(self, k8s_api_client, k8s_res):
|
||||
self.k8s_api_client = k8s_api_client
|
||||
self.k8s_client = self.api_class(api_client=self.k8s_api_client)
|
||||
self.kind = k8s_res['kind']
|
||||
self.namespace = k8s_res.get('metadata', {}).get('namespace')
|
||||
self.name = k8s_res.get('metadata', {}).get('name')
|
||||
self.metadata = k8s_res.get('metadata', {})
|
||||
self.body = k8s_res
|
||||
|
||||
def create(self):
|
||||
pass
|
||||
|
||||
def read(self):
|
||||
pass
|
||||
|
||||
def delete(self, body):
|
||||
pass
|
||||
|
||||
def is_exists(self):
|
||||
try:
|
||||
return self.read() is not None
|
||||
except sol_ex.K8sResourceNotFound:
|
||||
return False
|
||||
|
||||
def is_ready(self):
|
||||
return True
|
||||
|
||||
|
||||
class NamespacedResource(CommonResource):
|
||||
|
||||
def create(self):
|
||||
method = getattr(self.k8s_client,
|
||||
'create_namespaced' + convert(self.__class__.__name__))
|
||||
try:
|
||||
method(namespace=self.namespace, body=self.body)
|
||||
except Exception as ex:
|
||||
operation = inspect.currentframe().f_code.co_name
|
||||
sol_title = "%s failed" % operation
|
||||
raise sol_ex.K8sOperationFailed(sol_title=sol_title,
|
||||
sol_detail=str(ex))
|
||||
|
||||
def read(self):
|
||||
method = getattr(self.k8s_client,
|
||||
'read_namespaced' + convert(self.__class__.__name__))
|
||||
try:
|
||||
return method(namespace=self.namespace, name=self.name)
|
||||
except Exception as ex:
|
||||
if isinstance(ex, client.ApiException) and ex.status == 404:
|
||||
raise sol_ex.K8sResourceNotFound(rsc_name=self.name)
|
||||
else:
|
||||
operation = inspect.currentframe().f_code.co_name
|
||||
sol_title = "%s failed" % operation
|
||||
raise sol_ex.K8sOperationFailed(sol_title=sol_title,
|
||||
sol_detail=str(ex))
|
||||
|
||||
def delete(self, body):
|
||||
method = getattr(self.k8s_client,
|
||||
'delete_namespaced' + convert(self.__class__.__name__))
|
||||
try:
|
||||
method(namespace=self.namespace, name=self.name, body=body)
|
||||
except Exception as ex:
|
||||
operation = inspect.currentframe().f_code.co_name
|
||||
sol_title = "%s failed" % operation
|
||||
raise sol_ex.K8sOperationFailed(sol_title=sol_title,
|
||||
sol_detail=str(ex))
|
||||
|
||||
def patch(self):
|
||||
method = getattr(self.k8s_client,
|
||||
'patch_namespaced' + convert(self.__class__.__name__))
|
||||
try:
|
||||
method(namespace=self.namespace, name=self.name, body=self.body)
|
||||
except Exception as ex:
|
||||
operation = inspect.currentframe().f_code.co_name
|
||||
sol_title = "%s failed" % operation
|
||||
raise sol_ex.K8sOperationFailed(sol_title=sol_title,
|
||||
sol_detail=str(ex))
|
||||
|
||||
def scale(self, scale_replicas):
|
||||
body = {'spec': {'replicas': scale_replicas}}
|
||||
method = getattr(self.k8s_client,
|
||||
'patch_namespaced' + convert(self.__class__.__name__) + '_scale')
|
||||
try:
|
||||
method(namespace=self.namespace, name=self.name, body=body)
|
||||
self.body['spec']['replicas'] = scale_replicas
|
||||
except Exception as ex:
|
||||
operation = inspect.currentframe().f_code.co_name
|
||||
sol_title = "%s failed" % operation
|
||||
raise sol_ex.K8sOperationFailed(sol_title=sol_title,
|
||||
sol_detail=str(ex))
|
||||
|
||||
def delete_pod(self, pod_name):
|
||||
body = client.V1DeleteOptions(propagation_policy='Foreground')
|
||||
v1 = client.CoreV1Api(api_client=self.k8s_api_client)
|
||||
try:
|
||||
v1.delete_namespaced_pod(namespace=self.namespace,
|
||||
name=pod_name, body=body)
|
||||
except Exception as ex:
|
||||
operation = inspect.currentframe().f_code.co_name
|
||||
sol_title = "%s failed" % operation
|
||||
raise sol_ex.K8sOperationFailed(sol_title=sol_title,
|
||||
sol_detail=str(ex))
|
||||
|
||||
|
||||
class ClusterResource(CommonResource):
|
||||
|
||||
def create(self):
|
||||
method = getattr(self.k8s_client,
|
||||
'create' + convert(self.__class__.__name__))
|
||||
try:
|
||||
method(body=self.body)
|
||||
except Exception as ex:
|
||||
operation = inspect.currentframe().f_code.co_name
|
||||
sol_title = "%s failed" % operation
|
||||
raise sol_ex.K8sOperationFailed(sol_title=sol_title,
|
||||
sol_detail=str(ex))
|
||||
|
||||
def read(self):
|
||||
method = getattr(self.k8s_client,
|
||||
'read' + convert(self.__class__.__name__))
|
||||
try:
|
||||
return method(name=self.name)
|
||||
except Exception as ex:
|
||||
if isinstance(ex, client.ApiException) and ex.status == 404:
|
||||
raise sol_ex.K8sResourceNotFound(rsc_name=self.name)
|
||||
else:
|
||||
operation = inspect.currentframe().f_code.co_name
|
||||
sol_title = "%s failed" % operation
|
||||
raise sol_ex.K8sOperationFailed(sol_title=sol_title,
|
||||
sol_detail=str(ex))
|
||||
|
||||
def delete(self, body):
|
||||
method = getattr(self.k8s_client,
|
||||
'delete' + convert(self.__class__.__name__))
|
||||
try:
|
||||
method(name=self.name, body=body)
|
||||
except Exception as ex:
|
||||
operation = inspect.currentframe().f_code.co_name
|
||||
sol_title = "%s failed" % operation
|
||||
raise sol_ex.K8sOperationFailed(sol_title=sol_title,
|
||||
sol_detail=str(ex))
|
||||
|
||||
def patch(self):
|
||||
method = getattr(self.k8s_client,
|
||||
'patch' + convert(self.__class__.__name__))
|
||||
try:
|
||||
method(namespace=self.namespace, name=self.name, body=self.body)
|
||||
except Exception as ex:
|
||||
operation = inspect.currentframe().f_code.co_name
|
||||
sol_title = "%s failed" % operation
|
||||
raise sol_ex.K8sOperationFailed(sol_title=sol_title,
|
||||
sol_detail=str(ex))
|
||||
|
||||
|
||||
class AuthenticationResource(CommonResource):
|
||||
def create(self):
|
||||
method = getattr(self.k8s_client,
|
||||
'create' + convert(self.__class__.__name__))
|
||||
try:
|
||||
method(body=self.body)
|
||||
except Exception as ex:
|
||||
operation = inspect.currentframe().f_code.co_name
|
||||
sol_title = "%s failed" % operation
|
||||
raise sol_ex.K8sOperationFailed(sol_title=sol_title,
|
||||
sol_detail=str(ex))
|
||||
|
||||
|
||||
class ComponentStatus(CommonResource):
|
||||
pass
|
||||
|
||||
|
||||
class ConfigMap(NamespacedResource):
|
||||
pass
|
||||
|
||||
|
||||
class Container(CommonResource):
|
||||
pass
|
||||
|
||||
|
||||
class LimitRange(NamespacedResource):
|
||||
pass
|
||||
|
||||
|
||||
class Namespace(ClusterResource):
|
||||
|
||||
def is_ready(self):
|
||||
namespace_info = self.read()
|
||||
return (namespace_info.status.phase and
|
||||
namespace_info.status.phase == 'Active')
|
||||
|
||||
|
||||
class Node(ClusterResource):
|
||||
|
||||
def is_ready(self):
|
||||
node_info = self.read()
|
||||
for condition in node_info.status.conditions:
|
||||
if condition.type == 'Ready' and condition.status == 'True':
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
class PersistentVolume(ClusterResource):
|
||||
|
||||
def is_ready(self):
|
||||
volume_info = self.read()
|
||||
return (volume_info.status.phase and
|
||||
volume_info.status.phase in ['Available', 'Bound'])
|
||||
|
||||
|
||||
class PersistentVolumeClaim(NamespacedResource):
|
||||
|
||||
def is_ready(self):
|
||||
claim_info = self.read()
|
||||
return claim_info.status.phase and claim_info.status.phase == 'Bound'
|
||||
|
||||
|
||||
class Pod(NamespacedResource):
|
||||
|
||||
def delete_pod(self, pod_name):
|
||||
# Get Pod information before deletition
|
||||
pod_info = self.read()
|
||||
body = client.V1DeleteOptions(propagation_policy='Foreground')
|
||||
self.delete(body=body)
|
||||
|
||||
timeout = CONF.v2_vnfm.kubernetes_vim_rsc_wait_timeout
|
||||
max_check_count = (timeout / CHECK_INTERVAL)
|
||||
check_count = 0
|
||||
while (check_count < max_check_count):
|
||||
if not self.is_exists():
|
||||
break
|
||||
check_count += 1
|
||||
time.sleep(CHECK_INTERVAL)
|
||||
else:
|
||||
raise sol_ex.K8sOperaitionTimeout()
|
||||
|
||||
create_info = client.V1Pod(metadata=self.metadata, spec=pod_info.spec)
|
||||
self.k8s_client.create_namespaced_pod(
|
||||
namespace=self.namespace, body=create_info)
|
||||
|
||||
def is_ready(self):
|
||||
pod_info = self.read()
|
||||
return pod_info.status.phase and pod_info.status.phase == 'Running'
|
||||
|
||||
def is_update(self, pods_info, old_pods_names):
|
||||
return self.is_ready()
|
||||
|
||||
|
||||
class PodTemplate(NamespacedResource):
|
||||
pass
|
||||
|
||||
|
||||
class ResourceQuota(NamespacedResource):
|
||||
pass
|
||||
|
||||
|
||||
class Secret(NamespacedResource):
|
||||
pass
|
||||
|
||||
|
||||
class Service(NamespacedResource):
|
||||
|
||||
def is_ready(self):
|
||||
|
||||
def _check_is_ip(ip_addr):
|
||||
try:
|
||||
ipaddress.ip_address(ip_addr)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
service_info = self.read()
|
||||
if service_info.spec.cluster_ip in ['', None] or _check_is_ip(
|
||||
service_info.spec.cluster_ip):
|
||||
try:
|
||||
endpoint_info = self.k8s_client.read_namespaced_endpoints(
|
||||
namespace=self.namespace, name=self.name)
|
||||
if endpoint_info:
|
||||
return True
|
||||
except Exception as ex:
|
||||
sol_title = "Read Endpoint failed"
|
||||
raise sol_ex.K8sOperationFailed(sol_title=sol_title,
|
||||
sol_detail=str(ex))
|
||||
|
||||
|
||||
class ServiceAccount(NamespacedResource):
|
||||
pass
|
||||
|
||||
|
||||
class Volume(CommonResource):
|
||||
pass
|
||||
|
||||
|
||||
class ControllerRevision(NamespacedResource):
|
||||
api_class = client.AppsV1Api
|
||||
|
||||
def delete(self, body):
|
||||
body = client.V1DeleteOptions(
|
||||
propagation_policy='Background')
|
||||
try:
|
||||
self.k8s_client.delete_namespaced_controller_revision(
|
||||
namespace=self.namespace, name=self.name, body=body)
|
||||
except Exception as ex:
|
||||
operation = inspect.currentframe().f_code.co_name
|
||||
sol_title = "%s failed" % operation
|
||||
raise sol_ex.K8sOperationFailed(sol_title=sol_title,
|
||||
sol_detail=str(ex))
|
||||
|
||||
|
||||
class DaemonSet(NamespacedResource):
|
||||
api_class = client.AppsV1Api
|
||||
|
||||
def is_ready(self):
|
||||
daemonset_info = self.read()
|
||||
return (daemonset_info.status.desired_number_scheduled and
|
||||
(daemonset_info.status.desired_number_scheduled ==
|
||||
daemonset_info.status.number_ready))
|
||||
|
||||
def is_update(self, pods_info, old_pods_names):
|
||||
daemonset_info = self.read()
|
||||
replicas = daemonset_info.status.desired_number_scheduled
|
||||
|
||||
for pod_info in pods_info:
|
||||
if (pod_info.status.phase != 'Running' or
|
||||
pod_info.metadata.name in old_pods_names):
|
||||
return False
|
||||
|
||||
return len(pods_info) == replicas
|
||||
|
||||
|
||||
class Deployment(NamespacedResource):
|
||||
api_class = client.AppsV1Api
|
||||
|
||||
def is_ready(self):
|
||||
deployment_info = self.read()
|
||||
return (deployment_info.status.replicas and
|
||||
(deployment_info.status.replicas ==
|
||||
deployment_info.status.ready_replicas))
|
||||
|
||||
def is_update(self, pods_info, old_pods_names):
|
||||
deployment_info = self.read()
|
||||
replicas = deployment_info.spec.replicas
|
||||
|
||||
for pod_info in pods_info:
|
||||
if (pod_info.status.phase != 'Running' or
|
||||
pod_info.metadata.name in old_pods_names):
|
||||
return False
|
||||
|
||||
return len(pods_info) == replicas
|
||||
|
||||
|
||||
class ReplicaSet(NamespacedResource):
|
||||
api_class = client.AppsV1Api
|
||||
|
||||
def is_ready(self):
|
||||
replicaset_info = self.read()
|
||||
return (replicaset_info.status.replicas and
|
||||
(replicaset_info.status.replicas ==
|
||||
replicaset_info.status.ready_replicas))
|
||||
|
||||
def is_update(self, pods_info, old_pods_names):
|
||||
replicaset_info = self.read()
|
||||
replicas = replicaset_info.spec.replicas
|
||||
|
||||
for pod_info in pods_info:
|
||||
if (pod_info.status.phase != 'Running' or
|
||||
pod_info.metadata.name in old_pods_names):
|
||||
return False
|
||||
|
||||
return len(pods_info) == replicas
|
||||
|
||||
|
||||
class StatefulSet(NamespacedResource):
|
||||
api_class = client.AppsV1Api
|
||||
|
||||
def delete(self, body):
|
||||
pvcs_for_delete = []
|
||||
try:
|
||||
resp_read_sfs = self.read()
|
||||
sfs_spec = resp_read_sfs.spec
|
||||
volume_claim_templates = sfs_spec.volume_claim_templates
|
||||
|
||||
v1 = client.CoreV1Api(api_client=self.k8s_api_client)
|
||||
resps_pvc = v1.list_namespaced_persistent_volume_claim(
|
||||
namespace=self.namespace)
|
||||
pvcs = resps_pvc.items
|
||||
for volume_claim_template in volume_claim_templates:
|
||||
pvc_template_metadata = volume_claim_template.metadata
|
||||
match_pattern = '-'.join(
|
||||
[pvc_template_metadata.name, self.name, ""])
|
||||
|
||||
for pvc in pvcs:
|
||||
pvc_metadata = pvc.metadata
|
||||
pvc_name = pvc_metadata.name
|
||||
match_result = re.match(
|
||||
match_pattern + '[0-9]+$', pvc_name)
|
||||
if match_result is not None:
|
||||
pvcs_for_delete.append(pvc_name)
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
try:
|
||||
self.k8s_client.delete_namespaced_stateful_set(
|
||||
namespace=self.namespace, name=self.name, body=body)
|
||||
|
||||
for delete_pvc_name in pvcs_for_delete:
|
||||
try:
|
||||
v1 = client.CoreV1Api(api_client=self.k8s_api_client)
|
||||
v1.delete_namespaced_persistent_volume_claim(
|
||||
name=delete_pvc_name, namespace=self.namespace,
|
||||
body=body)
|
||||
except Exception as ex:
|
||||
operation = inspect.currentframe().f_code.co_name
|
||||
sol_title = "%s failed" % operation
|
||||
raise sol_ex.K8sOperationFailed(sol_title=sol_title,
|
||||
sol_detail=str(ex))
|
||||
except Exception as ex:
|
||||
operation = inspect.currentframe().f_code.co_name
|
||||
sol_title = "%s failed" % operation
|
||||
raise sol_ex.K8sOperationFailed(sol_title=sol_title,
|
||||
sol_detail=str(ex))
|
||||
|
||||
def is_ready(self):
|
||||
statefulset_info = self.read()
|
||||
replicas = statefulset_info.status.replicas
|
||||
if replicas == statefulset_info.status.ready_replicas:
|
||||
for i in range(0, statefulset_info.spec.replicas):
|
||||
volume_claim_templates = (
|
||||
statefulset_info.spec.volume_claim_templates)
|
||||
for volume_claim_template in volume_claim_templates:
|
||||
pvc_name = "-".join(
|
||||
[volume_claim_template.metadata.name,
|
||||
self.name, str(i)])
|
||||
v1 = client.CoreV1Api(api_client=self.k8s_api_client)
|
||||
persistent_volume_claim = (
|
||||
v1.read_namespaced_persistent_volume_claim(
|
||||
namespace=self.namespace, name=pvc_name))
|
||||
if persistent_volume_claim.status.phase != 'Bound':
|
||||
return False
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
def is_update(self, pods_info, old_pods_names):
|
||||
statefulset_info = self.read()
|
||||
replicas = statefulset_info.spec.replicas
|
||||
|
||||
for pod_info in pods_info:
|
||||
if pod_info.status.phase != 'Running':
|
||||
return False
|
||||
|
||||
return len(pods_info) == replicas
|
||||
|
||||
|
||||
class HorizontalPodAutoscaler(NamespacedResource):
|
||||
api_class = client.AutoscalingV1Api
|
||||
|
||||
|
||||
class Job(NamespacedResource):
|
||||
api_class = client.BatchV1Api
|
||||
|
||||
def is_ready(self):
|
||||
job_info = self.read()
|
||||
return (job_info.spec.completions and
|
||||
job_info.spec.completions == job_info.status.succeeded)
|
||||
|
||||
|
||||
class APIService(ClusterResource):
|
||||
api_class = client.ApiregistrationV1Api
|
||||
|
||||
def is_ready(self):
|
||||
api_service_info = self.read()
|
||||
for condition in api_service_info.status.conditions:
|
||||
if condition.type == 'Available':
|
||||
if condition.status != 'True':
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
class TokenReview(AuthenticationResource):
|
||||
api_class = client.AuthenticationV1Api
|
||||
|
||||
|
||||
class LocalSubjectAccessReview(AuthenticationResource):
|
||||
api_class = client.AuthorizationV1Api
|
||||
|
||||
def create(self):
|
||||
try:
|
||||
self.k8s_client.create_namespaced_local_subject_access_review(
|
||||
namespace=self.namespace, body=self.body)
|
||||
except Exception as ex:
|
||||
operation = inspect.currentframe().f_code.co_name
|
||||
sol_title = "%s failed" % operation
|
||||
raise sol_ex.K8sOperationFailed(sol_title=sol_title,
|
||||
sol_detail=str(ex))
|
||||
|
||||
|
||||
class SelfSubjectAccessReview(AuthenticationResource):
|
||||
api_class = client.AuthorizationV1Api
|
||||
|
||||
|
||||
class SelfSubjectRulesReview(AuthenticationResource):
|
||||
api_class = client.AuthorizationV1Api
|
||||
|
||||
|
||||
class SubjectAccessReview(AuthenticationResource):
|
||||
api_class = client.AuthorizationV1Api
|
||||
|
||||
|
||||
class Lease(NamespacedResource):
|
||||
api_class = client.CoordinationV1Api
|
||||
|
||||
|
||||
class NetworkPolicy(NamespacedResource):
|
||||
api_class = client.NetworkingV1Api
|
||||
|
||||
|
||||
class ClusterRole(ClusterResource):
|
||||
api_class = client.RbacAuthorizationV1Api
|
||||
|
||||
|
||||
class ClusterRoleBinding(ClusterResource):
|
||||
api_class = client.RbacAuthorizationV1Api
|
||||
|
||||
|
||||
class Role(NamespacedResource):
|
||||
api_class = client.RbacAuthorizationV1Api
|
||||
|
||||
|
||||
class RoleBinding(NamespacedResource):
|
||||
api_class = client.RbacAuthorizationV1Api
|
||||
|
||||
|
||||
class PriorityClass(ClusterResource):
|
||||
api_class = client.SchedulingV1Api
|
||||
|
||||
|
||||
class StorageClass(ClusterResource):
|
||||
api_class = client.StorageV1Api
|
||||
|
||||
|
||||
class VolumeAttachment(ClusterResource):
|
||||
api_class = client.StorageV1Api
|
||||
|
||||
def is_ready(self):
|
||||
volume_info = self.read()
|
||||
return volume_info.status.attached
|
@ -13,28 +13,22 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import copy
|
||||
import ipaddress
|
||||
import os
|
||||
import re
|
||||
import tempfile
|
||||
import time
|
||||
from urllib.parse import urlparse
|
||||
import urllib.request as urllib2
|
||||
|
||||
from kubernetes import client
|
||||
from oslo_log import log as logging
|
||||
from oslo_service import loopingcall
|
||||
import yaml
|
||||
|
||||
from tacker.sol_refactored.common import config
|
||||
from tacker.sol_refactored.common import exceptions as sol_ex
|
||||
from tacker.sol_refactored.objects.v2 import fields as v2fields
|
||||
from tacker.sol_refactored.infra_drivers.kubernetes import kubernetes_resource
|
||||
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CONF = config.CONF
|
||||
CHECK_INTERVAL = 10
|
||||
|
||||
SUPPORTED_NAMESPACE_KINDS = [
|
||||
"Pod",
|
||||
"Binding",
|
||||
@ -59,478 +53,6 @@ SUPPORTED_NAMESPACE_KINDS = [
|
||||
"RoleBinding",
|
||||
"Role"
|
||||
]
|
||||
RESOURCE_CREATION_ORDER = [
|
||||
"StorageClass",
|
||||
"PersistentVolume",
|
||||
"PriorityClass",
|
||||
"Namespace",
|
||||
"LimitRange",
|
||||
"ResourceQuota",
|
||||
"HorizontalPodAutoscaler",
|
||||
"NetworkPolicy",
|
||||
"Service",
|
||||
"Endpoints",
|
||||
"PersistentVolumeClaim",
|
||||
"ConfigMap",
|
||||
"Secret",
|
||||
"Pod",
|
||||
"Binding",
|
||||
"StatefulSet",
|
||||
"Job",
|
||||
"Deployment",
|
||||
"DaemonSet",
|
||||
]
|
||||
STATUS_CHECK_RES = [
|
||||
"Pod",
|
||||
"Service",
|
||||
"PersistentVolumeClaim",
|
||||
"Namespace",
|
||||
"Node",
|
||||
"PersistentVolume",
|
||||
"APIService",
|
||||
"DaemonSet",
|
||||
"Deployment",
|
||||
"ReplicaSet",
|
||||
"StatefulSet",
|
||||
"Job",
|
||||
"VolumeAttachment"
|
||||
]
|
||||
|
||||
|
||||
class KubernetesClient(object):
|
||||
|
||||
def __init__(self, vim_info):
|
||||
self.k8s_api_client = init_k8s_api_client(vim_info)
|
||||
self.k8s_clients = get_k8s_clients(self.k8s_api_client)
|
||||
|
||||
def create_k8s_resource(self, sorted_k8s_reses, namespace):
|
||||
created_k8s_reses = []
|
||||
|
||||
for k8s_res in sorted_k8s_reses:
|
||||
kind = k8s_res.get('kind', '')
|
||||
api_version = k8s_res.get('apiVersion', '')
|
||||
name = k8s_res.get('metadata', {}).get('name', '')
|
||||
metadata = k8s_res.get('metadata', {})
|
||||
body = k8s_res
|
||||
k8s_client = self.k8s_clients[api_version]
|
||||
try:
|
||||
if kind in SUPPORTED_NAMESPACE_KINDS:
|
||||
k8s_method = getattr(
|
||||
k8s_client, f"create_namespaced_{convert(kind)}")
|
||||
k8s_method(namespace=namespace, body=body)
|
||||
create_k8s_res = {
|
||||
"api_version": api_version,
|
||||
"namespace": namespace,
|
||||
"kind": kind,
|
||||
"name": name,
|
||||
"metadata": metadata,
|
||||
"status": "CREATE_IN_PROCESS"
|
||||
}
|
||||
else:
|
||||
k8s_method = getattr(
|
||||
k8s_client, f"create_{convert(kind)}")
|
||||
k8s_method(body=body)
|
||||
create_k8s_res = {
|
||||
"api_version": api_version,
|
||||
"kind": kind,
|
||||
"name": name,
|
||||
"metadata": metadata,
|
||||
"status": "CREATE_IN_PROCESS"
|
||||
}
|
||||
created_k8s_reses.append(create_k8s_res)
|
||||
except Exception as ex:
|
||||
LOG.error(ex)
|
||||
raise sol_ex.ExecuteK8SResourceCreateApiFailed
|
||||
return created_k8s_reses
|
||||
|
||||
def delete_k8s_resource(self, req, sorted_k8s_reses, namespace):
|
||||
if req.terminationType:
|
||||
if req.terminationType == 'GRACEFUL' and req.obj_attr_is_set(
|
||||
'gracefulTerminationTimeout'):
|
||||
body = client.V1DeleteOptions(
|
||||
propagation_policy='Foreground',
|
||||
grace_period_seconds=req.gracefulTerminationTimeout)
|
||||
else:
|
||||
body = client.V1DeleteOptions(
|
||||
propagation_policy='Foreground',
|
||||
grace_period_seconds=0)
|
||||
|
||||
for k8s_res in sorted_k8s_reses:
|
||||
kind = k8s_res.get('kind', '')
|
||||
api_version = k8s_res.get('apiVersion', '')
|
||||
name = k8s_res.get('metadata', {}).get('name', '')
|
||||
k8s_client = self.k8s_clients[api_version]
|
||||
|
||||
if kind == 'StatefulSet':
|
||||
pvcs_for_delete = self._get_pvcs_for_delete(
|
||||
sfs_name=name, namespace=namespace)
|
||||
|
||||
if kind == 'ControllerRevision':
|
||||
body = client.V1DeleteOptions(
|
||||
propagation_policy='Background')
|
||||
try:
|
||||
if kind in SUPPORTED_NAMESPACE_KINDS:
|
||||
k8s_method = getattr(
|
||||
k8s_client, f"delete_namespaced_{convert(kind)}")
|
||||
k8s_method(name=name, namespace=namespace, body=body)
|
||||
else:
|
||||
k8s_method = getattr(
|
||||
k8s_client, f"delete_{convert(kind)}")
|
||||
k8s_method(name=name, body=body)
|
||||
k8s_res.update(status='DELETE_IN_PROGRESS')
|
||||
except Exception as ex:
|
||||
k8s_res.update(status='DELETE_IN_PROGRESS')
|
||||
LOG.debug(ex)
|
||||
|
||||
if kind == 'StatefulSet' and len(pvcs_for_delete) > 0:
|
||||
for delete_pvc_name in pvcs_for_delete:
|
||||
try:
|
||||
self.k8s_clients[
|
||||
'v1'].delete_namespaced_persistent_volume_claim(
|
||||
name=delete_pvc_name, namespace=namespace,
|
||||
body=body)
|
||||
except Exception as ex:
|
||||
LOG.debug(ex)
|
||||
|
||||
def update_k8s_resource(self, new_reses, namespace):
|
||||
for k8s_res in new_reses:
|
||||
kind = k8s_res.get('kind', '')
|
||||
api_version = k8s_res.get('apiVersion', '')
|
||||
name = k8s_res.get('metadata', {}).get('name', '')
|
||||
k8s_client = self.k8s_clients[api_version]
|
||||
k8s_method = getattr(
|
||||
k8s_client, f"patch_namespaced_{convert(kind)}")
|
||||
try:
|
||||
k8s_method(name=name, namespace=namespace, body=k8s_res)
|
||||
k8s_res.update(status='UPDATE_IN_PROCESS')
|
||||
except Exception as e:
|
||||
LOG.error(f'update resource failed. kind: {kind},'
|
||||
f' name: {name}')
|
||||
raise sol_ex.UpdateK8SResourceFailed from e
|
||||
|
||||
def list_namespaced_pods(self, namespace):
|
||||
if namespace is None:
|
||||
return None
|
||||
return self.k8s_clients['v1'].list_namespaced_pod(
|
||||
namespace=namespace).items
|
||||
|
||||
def check_deployment_exist(self, deployment_names, namespace):
|
||||
for name in deployment_names:
|
||||
try:
|
||||
self.k8s_clients['apps/v1'].read_namespaced_deployment(
|
||||
name=name, namespace=namespace)
|
||||
except Exception as ex:
|
||||
LOG.error(f'update deployment {name} does'
|
||||
f' not exist in kubernetes cluster')
|
||||
raise ex
|
||||
|
||||
def _get_pvcs_for_delete(self, sfs_name, namespace):
|
||||
pvcs_for_delete = []
|
||||
try:
|
||||
resp_read_sfs = self.k8s_clients[
|
||||
'apps/v1'].read_namespaced_stateful_set(sfs_name, namespace)
|
||||
sfs_spec = resp_read_sfs.spec
|
||||
volume_claim_templates = sfs_spec.volume_claim_templates
|
||||
|
||||
try:
|
||||
resps_pvc = self.k8s_clients[
|
||||
'v1'].list_namespaced_persistent_volume_claim(namespace)
|
||||
pvcs = resps_pvc.items
|
||||
for volume_claim_template in volume_claim_templates:
|
||||
pvc_template_metadata = volume_claim_template.metadata
|
||||
match_pattern = '-'.join(
|
||||
[pvc_template_metadata.name, sfs_name, ""])
|
||||
|
||||
for pvc in pvcs:
|
||||
pvc_metadata = pvc.metadata
|
||||
pvc_name = pvc_metadata.name
|
||||
match_result = re.match(
|
||||
match_pattern + '[0-9]+$', pvc_name)
|
||||
if match_result is not None:
|
||||
pvcs_for_delete.append(pvc_name)
|
||||
except Exception:
|
||||
pass
|
||||
except Exception:
|
||||
pass
|
||||
return pvcs_for_delete
|
||||
|
||||
def _wait_completion(self, k8s_reses, operation,
|
||||
namespace=None, old_pods_names=None):
|
||||
def _check_create_status():
|
||||
for k8s_res in k8s_reses:
|
||||
if k8s_res['status'] != 'CREATE_COMPLETE':
|
||||
if k8s_res.get('kind') in STATUS_CHECK_RES:
|
||||
res_check_method = getattr(
|
||||
self, f"_check_status_"
|
||||
f"{convert(k8s_res.get('kind'))}")
|
||||
res_check_method(k8s_res)
|
||||
else:
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
statuses = {res['status'] for res in k8s_reses}
|
||||
if len(statuses) == 1 and statuses.pop() == 'CREATE_COMPLETE':
|
||||
raise loopingcall.LoopingCallDone()
|
||||
if len(statuses) > 1 and (int(time.time()) - start_time > timeout):
|
||||
raise sol_ex.CreateK8SResourceFailed
|
||||
|
||||
def _check_delete_status():
|
||||
for k8s_res in k8s_reses:
|
||||
kind = k8s_res.get('kind', '')
|
||||
api_version = k8s_res.get('apiVersion', '')
|
||||
name = k8s_res.get('metadata', {}).get('name', '')
|
||||
k8s_client = self.k8s_clients[api_version]
|
||||
if k8s_res['status'] != 'DELETE_COMPLETE':
|
||||
try:
|
||||
if kind in SUPPORTED_NAMESPACE_KINDS:
|
||||
k8s_method = getattr(
|
||||
k8s_client, f'read_namespaced_{convert(kind)}')
|
||||
k8s_method(name=name, namespace=namespace)
|
||||
else:
|
||||
k8s_method = getattr(
|
||||
k8s_client, f'read_{convert(kind)}')
|
||||
k8s_method(name=name)
|
||||
except Exception:
|
||||
k8s_res.update(status='DELETE_COMPLETE')
|
||||
statuses = {res['status'] for res in k8s_reses}
|
||||
if len(statuses) == 1 and statuses.pop() == 'DELETE_COMPLETE':
|
||||
raise loopingcall.LoopingCallDone()
|
||||
if len(statuses) > 1 and (int(time.time()) - start_time > timeout):
|
||||
raise sol_ex.DeleteK8SResourceFailed
|
||||
|
||||
def _check_update_status():
|
||||
all_namespaced_pods = self.list_namespaced_pods(namespace)
|
||||
for k8s_res in k8s_reses:
|
||||
if k8s_res['status'] not in ['UPDATE_COMPLETE',
|
||||
'UPDATE_FAILED']:
|
||||
kind = k8s_res.get('kind', '')
|
||||
api_version = k8s_res.get('apiVersion', '')
|
||||
name = k8s_res.get('metadata', {}).get('name', '')
|
||||
k8s_client = self.k8s_clients[api_version]
|
||||
k8s_method = getattr(
|
||||
k8s_client, f'read_namespaced_{convert(kind)}')
|
||||
k8s_info = k8s_method(name=name, namespace=namespace)
|
||||
replicas = k8s_info.spec.replicas
|
||||
|
||||
pods_info = [pod for pod in all_namespaced_pods if
|
||||
is_match_pod_naming_rule(
|
||||
kind, name, pod.metadata.name)]
|
||||
pending_flag = False
|
||||
unkown_flag = False
|
||||
for pod_info in pods_info:
|
||||
if pod_info.status.phase == 'Pending':
|
||||
pending_flag = True
|
||||
elif pod_info.status.phase == 'Unknown':
|
||||
unkown_flag = True
|
||||
|
||||
if not pending_flag and not unkown_flag and len(
|
||||
pods_info) == replicas and (
|
||||
pods_info[0].metadata.name not in old_pods_names):
|
||||
k8s_res.update(status='UPDATE_COMPLETE')
|
||||
|
||||
if unkown_flag:
|
||||
k8s_res.update(status='UPDATE_FAILED')
|
||||
|
||||
statuses = {res['status'] for res in k8s_reses}
|
||||
if len(statuses) == 1 and list(statuses)[0] == 'UPDATE_COMPLETE':
|
||||
raise loopingcall.LoopingCallDone()
|
||||
if (list(statuses)[0] == 'UPDATE_IN_PROCESS' and (int(
|
||||
time.time()) - start_time > timeout)) or (
|
||||
'UPDATE_FAILED' in statuses):
|
||||
raise sol_ex.UpdateK8SResourceFailed
|
||||
|
||||
start_time = int(time.time())
|
||||
timeout = CONF.v2_vnfm.kubernetes_vim_rsc_wait_timeout
|
||||
|
||||
if operation == v2fields.LcmOperationType.INSTANTIATE:
|
||||
timer = loopingcall.FixedIntervalLoopingCall(_check_create_status)
|
||||
elif operation == v2fields.LcmOperationType.TERMINATE:
|
||||
timer = loopingcall.FixedIntervalLoopingCall(_check_delete_status)
|
||||
else:
|
||||
timer = loopingcall.FixedIntervalLoopingCall(_check_update_status)
|
||||
timer.start(interval=CHECK_INTERVAL).wait()
|
||||
|
||||
def _check_status_pod(self, k8s_res):
|
||||
pod = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_namespaced_pod(
|
||||
namespace=k8s_res.get('namespace'),
|
||||
name=k8s_res.get('name'))
|
||||
|
||||
if pod.status.phase and pod.status.phase == 'Running':
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_stateful_set(self, k8s_res):
|
||||
namespace = k8s_res.get('namespace')
|
||||
name = k8s_res.get('name')
|
||||
|
||||
stateful_set = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_namespaced_stateful_set(
|
||||
namespace=namespace, name=name)
|
||||
pvc_statuses = []
|
||||
replicas = stateful_set.status.replicas
|
||||
if replicas and replicas == stateful_set.status.ready_replicas:
|
||||
for i in range(0, stateful_set.spec.replicas):
|
||||
volume_claim_templates = (
|
||||
stateful_set.spec.volume_claim_templates)
|
||||
for volume_claim_template in volume_claim_templates:
|
||||
pvc_name = "-".join(
|
||||
[volume_claim_template.metadata.name,
|
||||
k8s_res.get('name'), str(i)])
|
||||
persistent_volume_claim = (
|
||||
self.k8s_clients[
|
||||
'v1'].read_namespaced_persistent_volume_claim(
|
||||
namespace=namespace, name=pvc_name))
|
||||
pvc_statuses.append(persistent_volume_claim.status.phase)
|
||||
if len(set(pvc_statuses)) == 1 and pvc_statuses[0] == 'Bound':
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_service(self, k8s_res):
|
||||
namespace = k8s_res.get('namespace')
|
||||
name = k8s_res.get('name')
|
||||
|
||||
service = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_namespaced_service(
|
||||
namespace=namespace, name=name)
|
||||
status_flag = False
|
||||
if service.spec.cluster_ip in ['', None] or check_is_ip(
|
||||
service.spec.cluster_ip):
|
||||
try:
|
||||
endpoint = self.k8s_clients['v1'].read_namespaced_endpoints(
|
||||
namespace=namespace, name=name)
|
||||
if endpoint:
|
||||
status_flag = True
|
||||
except Exception as e:
|
||||
raise sol_ex.ReadEndpointsFalse(
|
||||
kind=k8s_res.get('kind')) from e
|
||||
|
||||
if status_flag:
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_persistent_volume_claim(self, k8s_res):
|
||||
namespace = k8s_res.get('namespace')
|
||||
name = k8s_res.get('name')
|
||||
|
||||
claim = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_namespaced_persistent_volume_claim(
|
||||
namespace=namespace, name=name)
|
||||
|
||||
if claim.status.phase and claim.status.phase == 'Bound':
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_namespace(self, k8s_res):
|
||||
name = k8s_res.get('name')
|
||||
|
||||
name_space = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_namespace(name=name)
|
||||
if name_space.status.phase and name_space.status.phase == 'Active':
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_node(self, k8s_res):
|
||||
name = k8s_res.get('name')
|
||||
|
||||
node = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_node(name=name)
|
||||
status_flag = False
|
||||
for condition in node.status.conditions:
|
||||
if condition.type == 'Ready':
|
||||
if condition.status == 'True':
|
||||
status_flag = True
|
||||
break
|
||||
else:
|
||||
continue
|
||||
if status_flag:
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_persistent_volume(self, k8s_res):
|
||||
name = k8s_res.get('name')
|
||||
|
||||
volume = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_persistent_volume(name=name)
|
||||
if volume.status.phase and volume.status.phase in [
|
||||
'Available', 'Bound']:
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_api_service(self, k8s_res):
|
||||
name = k8s_res.get('name')
|
||||
|
||||
api_service = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_api_service(name=name)
|
||||
status_flag = False
|
||||
for condition in api_service.status.conditions:
|
||||
if condition.type == 'Available':
|
||||
if condition.status == 'True':
|
||||
status_flag = True
|
||||
break
|
||||
else:
|
||||
continue
|
||||
if status_flag:
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_daemon_set(self, k8s_res):
|
||||
namespace = k8s_res.get('namespace')
|
||||
name = k8s_res.get('name')
|
||||
|
||||
daemon_set = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_namespaced_daemon_set(
|
||||
namespace=namespace, name=name)
|
||||
if daemon_set.status.desired_number_scheduled and (
|
||||
daemon_set.status.desired_number_scheduled ==
|
||||
daemon_set.status.number_ready):
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_deployment(self, k8s_res):
|
||||
namespace = k8s_res.get('namespace')
|
||||
name = k8s_res.get('name')
|
||||
|
||||
deployment = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_namespaced_deployment(
|
||||
namespace=namespace, name=name)
|
||||
if deployment.status.replicas and (
|
||||
deployment.status.replicas ==
|
||||
deployment.status.ready_replicas):
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_replica_set(self, k8s_res):
|
||||
namespace = k8s_res.get('namespace')
|
||||
name = k8s_res.get('name')
|
||||
|
||||
replica_set = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_namespaced_replica_set(
|
||||
namespace=namespace, name=name)
|
||||
if replica_set.status.replicas and (
|
||||
replica_set.status.replicas ==
|
||||
replica_set.status.ready_replicas):
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_job(self, k8s_res):
|
||||
namespace = k8s_res.get('namespace')
|
||||
name = k8s_res.get('name')
|
||||
|
||||
job = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_namespaced_job(
|
||||
namespace=namespace, name=name)
|
||||
if job.spec.completions and (
|
||||
job.spec.completions == job.status.succeeded):
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def _check_status_volume_attachment(self, k8s_res):
|
||||
name = k8s_res.get('name')
|
||||
|
||||
volume = self.k8s_clients[k8s_res.get(
|
||||
'api_version')].read_volume_attachment(name=name)
|
||||
if volume.status.attached:
|
||||
k8s_res.update(status='CREATE_COMPLETE')
|
||||
|
||||
def wait_k8s_res_create(self, created_k8s_reses):
|
||||
self._wait_completion(created_k8s_reses, operation='INSTANTIATE')
|
||||
|
||||
def wait_k8s_res_delete(self, sorted_k8s_reses, namespace):
|
||||
self._wait_completion(
|
||||
sorted_k8s_reses, operation='TERMINATE', namespace=namespace)
|
||||
|
||||
def wait_k8s_res_update(self, new_k8s_reses, namespace,
|
||||
old_pods_names=None):
|
||||
self._wait_completion(
|
||||
new_k8s_reses, operation='UPDATE', namespace=namespace,
|
||||
old_pods_names=old_pods_names)
|
||||
|
||||
|
||||
def is_match_pod_naming_rule(rsc_kind, rsc_name, pod_name):
|
||||
@ -538,7 +60,7 @@ def is_match_pod_naming_rule(rsc_kind, rsc_name, pod_name):
|
||||
if rsc_kind == 'Pod':
|
||||
# Expected example: name
|
||||
if rsc_name == pod_name:
|
||||
match_result = True
|
||||
return True
|
||||
elif rsc_kind == 'Deployment':
|
||||
# Expected example: name-012789abef-019az
|
||||
# NOTE(horie): The naming rule of Pod in deployment is
|
||||
@ -547,106 +69,21 @@ def is_match_pod_naming_rule(rsc_kind, rsc_name, pod_name):
|
||||
# This may be from 1 to 10 caracters but not sure the lower limit
|
||||
# from the source code of Kubernetes.
|
||||
match_result = re.match(
|
||||
rsc_name + '-([0-9a-f]{1,10})-([0-9a-z]{5})+$',
|
||||
pod_name)
|
||||
rsc_name + '-([0-9a-f]{1,10})-([0-9a-z]{5})+$', pod_name)
|
||||
elif rsc_kind in ('ReplicaSet', 'DaemonSet'):
|
||||
# Expected example: name-019az
|
||||
match_result = re.match(
|
||||
rsc_name + '-([0-9a-z]{5})+$',
|
||||
pod_name)
|
||||
match_result = re.match(rsc_name + '-([0-9a-z]{5})+$', pod_name)
|
||||
elif rsc_kind == 'StatefulSet':
|
||||
# Expected example: name-0
|
||||
match_result = re.match(
|
||||
rsc_name + '-[0-9]+$',
|
||||
pod_name)
|
||||
match_result = re.match(rsc_name + '-[0-9]+$', pod_name)
|
||||
if match_result:
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
|
||||
def check_is_ip(ip_addr):
|
||||
try:
|
||||
ipaddress.ip_address(ip_addr)
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
|
||||
def convert(tmp_name):
|
||||
name_with_underscores = re.sub(
|
||||
'(.)([A-Z][a-z]+)', r'\1_\2', tmp_name)
|
||||
return re.sub('([a-z0-9])([A-Z])', r'\1_\2',
|
||||
name_with_underscores).lower()
|
||||
|
||||
|
||||
def init_k8s_api_client(vim_info):
|
||||
k8s_config = client.Configuration()
|
||||
k8s_config.host = vim_info.interfaceInfo['endpoint']
|
||||
|
||||
ca_cert_file = (vim_info.interfaceInfo.pop('ca_cert_file')
|
||||
if 'ca_cert_file' in vim_info.interfaceInfo else None)
|
||||
|
||||
if ('username' in vim_info.accessInfo and 'password'
|
||||
in vim_info.accessInfo and vim_info.accessInfo.get(
|
||||
'password') is not None):
|
||||
k8s_config.username = vim_info.accessInfo['username']
|
||||
k8s_config.password = vim_info.accessInfo['password']
|
||||
basic_token = k8s_config.get_basic_auth_token()
|
||||
k8s_config.api_key['authorization'] = basic_token
|
||||
|
||||
if 'bearer_token' in vim_info.accessInfo:
|
||||
k8s_config.api_key_prefix['authorization'] = 'Bearer'
|
||||
k8s_config.api_key['authorization'] = vim_info.accessInfo[
|
||||
'bearer_token']
|
||||
|
||||
if 'ssl_ca_cert' in vim_info.interfaceInfo and ca_cert_file:
|
||||
k8s_config.ssl_ca_cert = ca_cert_file
|
||||
k8s_config.verify_ssl = True
|
||||
else:
|
||||
k8s_config.verify_ssl = False
|
||||
|
||||
return client.api_client.ApiClient(configuration=k8s_config)
|
||||
|
||||
|
||||
def get_k8s_clients(k8s_api_client):
|
||||
k8s_clients = {
|
||||
"v1": client.CoreV1Api(api_client=k8s_api_client),
|
||||
"apiregistration.k8s.io/v1":
|
||||
client.ApiregistrationV1Api(api_client=k8s_api_client),
|
||||
"apps/v1": client.AppsV1Api(api_client=k8s_api_client),
|
||||
"authentication.k8s.io/v1":
|
||||
client.AuthenticationV1Api(api_client=k8s_api_client),
|
||||
"authorization.k8s.io/v1":
|
||||
client.AuthorizationV1Api(api_client=k8s_api_client),
|
||||
"autoscaling/v1": client.AutoscalingV1Api(
|
||||
api_client=k8s_api_client),
|
||||
"batch/v1": client.BatchV1Api(api_client=k8s_api_client),
|
||||
"coordination.k8s.io/v1":
|
||||
client.CoordinationV1Api(api_client=k8s_api_client),
|
||||
"networking.k8s.io/v1":
|
||||
client.NetworkingV1Api(api_client=k8s_api_client),
|
||||
"rbac.authorization.k8s.io/v1":
|
||||
client.RbacAuthorizationV1Api(api_client=k8s_api_client),
|
||||
"scheduling.k8s.io/v1":
|
||||
client.SchedulingV1Api(api_client=k8s_api_client),
|
||||
"storage.k8s.io/v1":
|
||||
client.StorageV1Api(api_client=k8s_api_client)
|
||||
}
|
||||
|
||||
return k8s_clients
|
||||
|
||||
|
||||
def get_k8s_json_file(req, inst, target_k8s_files, vnfd, operation):
|
||||
|
||||
def _update_k8s_resources(namespace):
|
||||
for k8s_res in k8s_resources:
|
||||
if (k8s_res.get('kind', '') in SUPPORTED_NAMESPACE_KINDS and
|
||||
k8s_res.get('metadata') is None):
|
||||
k8s_res.update(metadata={})
|
||||
if k8s_res.get('kind', '') in SUPPORTED_NAMESPACE_KINDS:
|
||||
k8s_res['metadata'].update(namespace=namespace)
|
||||
|
||||
def get_k8s_reses_from_json_files(target_k8s_files, vnfd, k8s_api_client,
|
||||
namespace):
|
||||
k8s_resources = []
|
||||
|
||||
for target_k8s_file in target_k8s_files:
|
||||
@ -662,93 +99,81 @@ def get_k8s_json_file(req, inst, target_k8s_files, vnfd, operation):
|
||||
|
||||
k8s_resources.extend(list(yaml.safe_load_all(file_content)))
|
||||
|
||||
for k8s_res in k8s_resources:
|
||||
if not k8s_res.get('kind'):
|
||||
raise sol_ex.K8sInvalidManifestFound()
|
||||
if k8s_res['kind'] in SUPPORTED_NAMESPACE_KINDS:
|
||||
k8s_res.setdefault('metadata', {})
|
||||
if namespace is None:
|
||||
k8s_res['metadata'].setdefault('namespace', 'default')
|
||||
else:
|
||||
k8s_res['metadata']['namespace'] = namespace
|
||||
|
||||
# check namespace
|
||||
if operation == v2fields.LcmOperationType.INSTANTIATE:
|
||||
if req.additionalParams.get('namespace') is None:
|
||||
_update_k8s_resources('default')
|
||||
namespaces = {
|
||||
k8s_res['metadata']['namespace'] for k8s_res in
|
||||
k8s_resources if k8s_res.get('kind') in
|
||||
SUPPORTED_NAMESPACE_KINDS}
|
||||
if namespace is None:
|
||||
namespaces = {k8s_res['metadata']['namespace']
|
||||
for k8s_res in k8s_resources
|
||||
if k8s_res['kind'] in SUPPORTED_NAMESPACE_KINDS}
|
||||
if len(namespaces) > 1:
|
||||
raise sol_ex.NamespaceNotUniform()
|
||||
return k8s_resources, namespaces.pop() if namespaces else None
|
||||
|
||||
_update_k8s_resources(req.additionalParams.get('namespace'))
|
||||
return k8s_resources, req.additionalParams.get('namespace')
|
||||
|
||||
return k8s_resources, inst.metadata.get('namespace')
|
||||
|
||||
|
||||
def sort_k8s_resource(k8s_resources, operation):
|
||||
pos = 0
|
||||
sorted_k8s_reses = []
|
||||
|
||||
if operation == v2fields.LcmOperationType.INSTANTIATE:
|
||||
sort_order = RESOURCE_CREATION_ORDER
|
||||
else:
|
||||
sort_order = list(reversed(RESOURCE_CREATION_ORDER))
|
||||
|
||||
copy_k8s_resources = copy.deepcopy(k8s_resources)
|
||||
|
||||
for kind in sort_order:
|
||||
for res_index, res in enumerate(copy_k8s_resources):
|
||||
if res.get('kind', '') == kind:
|
||||
index = k8s_resources.index(res)
|
||||
sorted_k8s_reses.append(k8s_resources.pop(index))
|
||||
# Other kind (such as PodTemplate, Node, and so on) that are
|
||||
# not present in `RESOURCE_CREATION_ORDER` are inserted in
|
||||
# place of the Service kind and created/deleted in the same
|
||||
# order as the Service kind.
|
||||
if kind == 'Service':
|
||||
pos = len(sorted_k8s_reses)
|
||||
namespace = namespaces.pop() if namespaces else 'default'
|
||||
|
||||
k8s_reses = []
|
||||
for k8s_res in k8s_resources:
|
||||
sorted_k8s_reses.insert(pos, k8s_res)
|
||||
cls = getattr(kubernetes_resource, k8s_res['kind'])
|
||||
k8s_reses.append(cls(k8s_api_client, k8s_res))
|
||||
|
||||
return sorted_k8s_reses
|
||||
return k8s_reses, namespace
|
||||
|
||||
|
||||
def get_new_deployment_body(
|
||||
req, inst, vnfd, deployment_names, operation):
|
||||
if operation == v2fields.LcmOperationType.CHANGE_VNFPKG:
|
||||
target_k8s_files = req.additionalParams.get(
|
||||
'lcm-kubernetes-def-files')
|
||||
else:
|
||||
target_k8s_files = inst.metadata.get('lcm-kubernetes-def-files')
|
||||
|
||||
new_k8s_resources, namespace = get_k8s_json_file(
|
||||
req, inst, target_k8s_files, vnfd, operation)
|
||||
|
||||
new_deploy_reses = []
|
||||
for k8s_res in new_k8s_resources:
|
||||
if k8s_res.get('kind', '') == 'Deployment' and k8s_res.get(
|
||||
'metadata', {}).get('name', '') in deployment_names:
|
||||
k8s_res['metadata']['namespace'] = namespace
|
||||
new_deploy_reses.append(k8s_res)
|
||||
|
||||
return new_deploy_reses
|
||||
def list_namespaced_pods(k8s_api_client, namespace):
|
||||
k8s_client = client.CoreV1Api(api_client=k8s_api_client)
|
||||
return k8s_client.list_namespaced_pod(namespace=namespace).items
|
||||
|
||||
|
||||
class CaCertFileContextManager:
|
||||
def __init__(self, ca_cert_str):
|
||||
self._file_descriptor = None
|
||||
self.file_path = None
|
||||
self.ca_cert_str = ca_cert_str
|
||||
class AuthContextManager:
|
||||
def __init__(self, vim_info):
|
||||
self.vim_info = vim_info
|
||||
self.ca_cert_file = None
|
||||
|
||||
def __enter__(self):
|
||||
if not self.ca_cert_str:
|
||||
return self
|
||||
self._file_descriptor, self.file_path = tempfile.mkstemp()
|
||||
ca_cert = re.sub(r'\s', '\n', self.ca_cert_str)
|
||||
ca_cert = re.sub(r'BEGIN\nCERT', r'BEGIN CERT', ca_cert)
|
||||
ca_cert = re.sub(r'END\nCERT', r'END CERT', ca_cert)
|
||||
# write ca cert file
|
||||
os.write(self._file_descriptor, ca_cert.encode())
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, exc_traceback):
|
||||
if not self.ca_cert_str:
|
||||
return
|
||||
os.close(self._file_descriptor)
|
||||
os.remove(self.file_path)
|
||||
if self.ca_cert_file:
|
||||
os.remove(self.ca_cert_file)
|
||||
|
||||
def _create_ca_cert_file(self, ca_cert_str):
|
||||
file_descriptor, self.ca_cert_file = tempfile.mkstemp()
|
||||
ca_cert = re.sub(r'\s', '\n', ca_cert_str)
|
||||
ca_cert = re.sub(r'BEGIN\nCERT', r'BEGIN CERT', ca_cert)
|
||||
ca_cert = re.sub(r'END\nCERT', r'END CERT', ca_cert)
|
||||
# write ca cert file
|
||||
os.write(file_descriptor, ca_cert.encode())
|
||||
os.close(file_descriptor)
|
||||
|
||||
def init_k8s_api_client(self):
|
||||
k8s_config = client.Configuration()
|
||||
k8s_config.host = self.vim_info.interfaceInfo['endpoint']
|
||||
|
||||
if ('username' in self.vim_info.accessInfo and
|
||||
self.vim_info.accessInfo.get('password') is not None):
|
||||
k8s_config.username = self.vim_info.accessInfo['username']
|
||||
k8s_config.password = self.vim_info.accessInfo['password']
|
||||
basic_token = k8s_config.get_basic_auth_token()
|
||||
k8s_config.api_key['authorization'] = basic_token
|
||||
|
||||
if 'bearer_token' in self.vim_info.accessInfo:
|
||||
k8s_config.api_key_prefix['authorization'] = 'Bearer'
|
||||
k8s_config.api_key['authorization'] = self.vim_info.accessInfo[
|
||||
'bearer_token']
|
||||
|
||||
if 'ssl_ca_cert' in self.vim_info.interfaceInfo:
|
||||
self._create_ca_cert_file(
|
||||
self.vim_info.interfaceInfo['ssl_ca_cert'])
|
||||
k8s_config.ssl_ca_cert = self.ca_cert_file
|
||||
k8s_config.verify_ssl = True
|
||||
else:
|
||||
k8s_config.verify_ssl = False
|
||||
|
||||
return client.api_client.ApiClient(configuration=k8s_config)
|
||||
|
@ -84,6 +84,9 @@ class VnfInstanceV2_InstantiatedVnfInfo(base.TackerObject,
|
||||
'VirtualStorageResourceInfoV2', nullable=True),
|
||||
# NOTE: vnfcInfo exists in SOL002 only.
|
||||
'vnfcInfo': fields.ListOfObjectsField('VnfcInfoV2', nullable=True),
|
||||
# NOTE: metadata is not defined in SOL003. it is original
|
||||
# definition of Tacker.
|
||||
'metadata': fields.KeyValuePairsField(nullable=True),
|
||||
}
|
||||
|
||||
|
||||
|
@ -193,6 +193,16 @@ class BaseVnfLcmKubernetesV2Test(base.BaseTestCase):
|
||||
return self.tacker_client.do_request(
|
||||
path, "POST", body=req_body, version="2.0.0")
|
||||
|
||||
def scale_vnf_instance(self, inst_id, req_body):
|
||||
path = f"/vnflcm/v2/vnf_instances/{inst_id}/scale"
|
||||
return self.tacker_client.do_request(
|
||||
path, "POST", body=req_body, version="2.0.0")
|
||||
|
||||
def heal_vnf_instance(self, inst_id, req_body):
|
||||
path = f"/vnflcm/v2/vnf_instances/{inst_id}/heal"
|
||||
return self.tacker_client.do_request(
|
||||
path, "POST", body=req_body, version="2.0.0")
|
||||
|
||||
def change_vnfpkg(self, inst_id, req_body):
|
||||
path = f"/vnflcm/v2/vnf_instances/{inst_id}/change_vnfpkg"
|
||||
return self.tacker_client.do_request(
|
||||
|
@ -138,6 +138,28 @@ def max_sample_terminate():
|
||||
}
|
||||
|
||||
|
||||
def max_sample_scale_out():
|
||||
return {
|
||||
"type": "SCALE_OUT",
|
||||
"aspectId": "vdu3_aspect",
|
||||
"numberOfSteps": 2
|
||||
}
|
||||
|
||||
|
||||
def max_sample_scale_in():
|
||||
return {
|
||||
"type": "SCALE_IN",
|
||||
"aspectId": "vdu3_aspect",
|
||||
"numberOfSteps": 1
|
||||
}
|
||||
|
||||
|
||||
def max_sample_heal(vnfc_ids):
|
||||
return {
|
||||
"vnfcInstanceId": vnfc_ids
|
||||
}
|
||||
|
||||
|
||||
def min_sample_instantiate(vim_id_1):
|
||||
vim_1 = {
|
||||
"vimId": vim_id_1,
|
||||
@ -166,14 +188,7 @@ def min_sample_terminate():
|
||||
}
|
||||
|
||||
|
||||
def change_vnfpkg_instantiate(auth_url, bearer_token):
|
||||
# All attributes are set.
|
||||
# NOTE: All of the following cardinality attributes are set.
|
||||
# In addition, 0..N or 1..N attributes are set to 2 or more.
|
||||
# - 0..1 (1)
|
||||
# - 0..N (2 or more)
|
||||
# - 1
|
||||
# - 1..N (2 or more)
|
||||
def error_handling_instantiate(auth_url, bearer_token):
|
||||
vim_id_1 = uuidutils.generate_uuid()
|
||||
vim_1 = {
|
||||
"vimId": vim_id_1,
|
||||
@ -191,44 +206,6 @@ def change_vnfpkg_instantiate(auth_url, bearer_token):
|
||||
"vimConnectionInfo": {
|
||||
"vim1": vim_1
|
||||
},
|
||||
"additionalParams": {
|
||||
"lcm-kubernetes-def-files": [
|
||||
"Files/kubernetes/deployment.yaml",
|
||||
"Files/kubernetes/namespace.yaml"
|
||||
],
|
||||
"namespace": "curry"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def change_vnfpkg_all_params(vnfd_id):
|
||||
return {
|
||||
"vnfdId": vnfd_id,
|
||||
"additionalParams": {
|
||||
"upgrade_type": "RollingUpdate",
|
||||
"lcm-operation-coordinate-old-vnf":
|
||||
"Scripts/coordinate_old_vnf.py",
|
||||
"lcm-operation-coordinate-new-vnf":
|
||||
"Scripts/coordinate_new_vnf.py",
|
||||
"lcm-kubernetes-def-files": [
|
||||
"Files/new_kubernetes/new_deployment.yaml"],
|
||||
"vdu_params": [{
|
||||
"vdu_id": "VDU2"
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def change_vnfpkg_instantiate_min(vim_id_1):
|
||||
vim_1 = {
|
||||
"vimId": vim_id_1,
|
||||
"vimType": "kubernetes",
|
||||
}
|
||||
return {
|
||||
"flavourId": "simple",
|
||||
"vimConnectionInfo": {
|
||||
"vim1": vim_1,
|
||||
},
|
||||
"additionalParams": {
|
||||
"lcm-kubernetes-def-files": [
|
||||
"Files/kubernetes/deployment.yaml"
|
||||
@ -237,33 +214,59 @@ def change_vnfpkg_instantiate_min(vim_id_1):
|
||||
}
|
||||
|
||||
|
||||
def change_vnfpkg_min(vnfd_id):
|
||||
def error_handling_scale_out():
|
||||
return {
|
||||
"type": "SCALE_OUT",
|
||||
"aspectId": "vdu2_aspect",
|
||||
"numberOfSteps": 1
|
||||
}
|
||||
|
||||
|
||||
def error_handling_terminate():
|
||||
return {
|
||||
"terminationType": "FORCEFUL"
|
||||
}
|
||||
|
||||
|
||||
def change_vnfpkg_instantiate(auth_url, bearer_token):
|
||||
vim_id_1 = uuidutils.generate_uuid()
|
||||
vim_1 = {
|
||||
"vimId": vim_id_1,
|
||||
"vimType": "kubernetes",
|
||||
"interfaceInfo": {"endpoint": auth_url},
|
||||
"accessInfo": {
|
||||
"bearer_token": bearer_token,
|
||||
"region": "RegionOne",
|
||||
},
|
||||
"extra": {"dummy-key": "dummy-val"}
|
||||
}
|
||||
|
||||
return {
|
||||
"flavourId": "simple",
|
||||
"vimConnectionInfo": {
|
||||
"vim1": vim_1
|
||||
},
|
||||
"additionalParams": {
|
||||
"lcm-kubernetes-def-files": [
|
||||
"Files/kubernetes/namespace.yaml",
|
||||
"Files/kubernetes/deployment.yaml"
|
||||
],
|
||||
"namespace": "curry"
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def change_vnfpkg(vnfd_id):
|
||||
return {
|
||||
"vnfdId": vnfd_id,
|
||||
"additionalParams": {
|
||||
"upgrade_type": "RollingUpdate",
|
||||
"lcm-operation-coordinate-old-vnf":
|
||||
"Scripts/coordinate_old_vnf.py",
|
||||
"lcm-operation-coordinate-new-vnf":
|
||||
"Scripts/coordinate_new_vnf.py",
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def change_vnfpkg_instantiate_error_handing(vim_id_1):
|
||||
vim_1 = {
|
||||
"vimId": vim_id_1,
|
||||
"vimType": "kubernetes",
|
||||
}
|
||||
return {
|
||||
"flavourId": "simple",
|
||||
"vimConnectionInfo": {
|
||||
"vim1": vim_1,
|
||||
},
|
||||
"additionalParams": {
|
||||
"lcm-kubernetes-def-files": [
|
||||
"Files/kubernetes/deployment_fail_test.yaml"
|
||||
]
|
||||
"Files/kubernetes/namespace.yaml",
|
||||
"Files/new_kubernetes/new_deployment.yaml"],
|
||||
"vdu_params": [{
|
||||
"vdu_id": "VDU2"
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
@ -273,11 +276,17 @@ def change_vnfpkg_error(vnfd_id):
|
||||
"vnfdId": vnfd_id,
|
||||
"additionalParams": {
|
||||
"upgrade_type": "RollingUpdate",
|
||||
"lcm-operation-coordinate-old-vnf":
|
||||
"Scripts/coordinate_old_vnf.py",
|
||||
"lcm-operation-coordinate-new-vnf":
|
||||
"Scripts/coordinate_new_vnf.py",
|
||||
"lcm-kubernetes-def-files": [
|
||||
"Files/new_kubernetes/error_deployment.yaml"]
|
||||
"Files/kubernetes/namespace.yaml",
|
||||
"Files/new_kubernetes/not_exist_deployment.yaml"],
|
||||
"vdu_params": [{
|
||||
"vdu_id": "VDU2"
|
||||
}]
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def change_vnfpkg_terminate():
|
||||
return {
|
||||
"terminationType": "FORCEFUL"
|
||||
}
|
||||
|
@ -40,6 +40,33 @@ topology_template:
|
||||
type: company.provider.VNF
|
||||
properties:
|
||||
flavour_description: A simple flavour
|
||||
interfaces:
|
||||
Vnflcm:
|
||||
instantiate_start:
|
||||
implementation: sample-script
|
||||
instantiate_end:
|
||||
implementation: sample-script
|
||||
terminate_start:
|
||||
implementation: sample-script
|
||||
terminate_end:
|
||||
implementation: sample-script
|
||||
scale_start:
|
||||
implementation: sample-script
|
||||
scale_end:
|
||||
implementation: sample-script
|
||||
heal_start:
|
||||
implementation: sample-script
|
||||
heal_end:
|
||||
implementation: sample-script
|
||||
modify_information_start:
|
||||
implementation: sample-script
|
||||
modify_information_end:
|
||||
implementation: sample-script
|
||||
artifacts:
|
||||
sample-script:
|
||||
description: Sample script
|
||||
type: tosca.artifacts.Implementation.Python
|
||||
file: ../Scripts/sample_script.py
|
||||
|
||||
VDU1:
|
||||
type: tosca.nodes.nfv.Vdu.Compute
|
||||
@ -63,11 +90,29 @@ topology_template:
|
||||
type: tosca.nodes.nfv.Vdu.Compute
|
||||
properties:
|
||||
name: vdu3
|
||||
description: VDU2 compute node
|
||||
description: VDU3 compute node
|
||||
vdu_profile:
|
||||
min_number_of_instances: 1
|
||||
max_number_of_instances: 3
|
||||
|
||||
VDU5:
|
||||
type: tosca.nodes.nfv.Vdu.Compute
|
||||
properties:
|
||||
name: vdu5
|
||||
description: VDU5 compute node
|
||||
vdu_profile:
|
||||
min_number_of_instances: 1
|
||||
max_number_of_instances: 3
|
||||
|
||||
VDU6:
|
||||
type: tosca.nodes.nfv.Vdu.Compute
|
||||
properties:
|
||||
name: vdu6
|
||||
description: VDU6 compute node
|
||||
vdu_profile:
|
||||
min_number_of_instances: 1
|
||||
max_number_of_instances: 1
|
||||
|
||||
policies:
|
||||
- scaling_aspects:
|
||||
type: tosca.policies.nfv.ScalingAspects
|
||||
@ -85,12 +130,18 @@ topology_template:
|
||||
max_scale_level: 2
|
||||
step_deltas:
|
||||
- delta_1
|
||||
vdu5_aspect:
|
||||
name: vdu5_aspect
|
||||
description: vdu5 scaling aspect
|
||||
max_scale_level: 2
|
||||
step_deltas:
|
||||
- delta_1
|
||||
|
||||
- VDU2_initial_delta:
|
||||
type: tosca.policies.nfv.VduInitialDelta
|
||||
properties:
|
||||
initial_delta:
|
||||
number_of_instances: 1
|
||||
number_of_instances: 2
|
||||
targets: [ VDU2 ]
|
||||
|
||||
- VDU2_scaling_aspect_deltas:
|
||||
@ -118,6 +169,22 @@ topology_template:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU3 ]
|
||||
|
||||
- VDU5_initial_delta:
|
||||
type: tosca.policies.nfv.VduInitialDelta
|
||||
properties:
|
||||
initial_delta:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU5 ]
|
||||
|
||||
- VDU5_scaling_aspect_deltas:
|
||||
type: tosca.policies.nfv.VduScalingAspectDeltas
|
||||
properties:
|
||||
aspect: vdu5_aspect
|
||||
deltas:
|
||||
delta_1:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU5 ]
|
||||
|
||||
- instantiation_levels:
|
||||
type: tosca.policies.nfv.InstantiationLevels
|
||||
properties:
|
||||
@ -126,9 +193,11 @@ topology_template:
|
||||
description: Smallest size
|
||||
scale_info:
|
||||
vdu2_aspect:
|
||||
scale_level: 0
|
||||
scale_level: 1
|
||||
vdu3_aspect:
|
||||
scale_level: 0
|
||||
vdu5_aspect:
|
||||
scale_level: 0
|
||||
instantiation_level_2:
|
||||
description: Largest size
|
||||
scale_info:
|
||||
@ -136,6 +205,8 @@ topology_template:
|
||||
scale_level: 2
|
||||
vdu3_aspect:
|
||||
scale_level: 2
|
||||
vdu5_aspect:
|
||||
scale_level: 2
|
||||
default_level: instantiation_level_1
|
||||
|
||||
- VDU1_instantiation_levels:
|
||||
@ -153,7 +224,7 @@ topology_template:
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
number_of_instances: 1
|
||||
number_of_instances: 2
|
||||
instantiation_level_2:
|
||||
number_of_instances: 3
|
||||
targets: [ VDU2 ]
|
||||
@ -167,3 +238,13 @@ topology_template:
|
||||
instantiation_level_2:
|
||||
number_of_instances: 3
|
||||
targets: [ VDU3 ]
|
||||
|
||||
- VDU5_instantiation_levels:
|
||||
type: tosca.policies.nfv.VduInstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
number_of_instances: 1
|
||||
instantiation_level_2:
|
||||
number_of_instances: 3
|
||||
targets: [ VDU5 ]
|
||||
|
@ -1,7 +1,7 @@
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: vdu3
|
||||
name: vdu2
|
||||
namespace: default
|
||||
spec:
|
||||
replicas: 2
|
||||
@ -20,9 +20,5 @@ spec:
|
||||
ports:
|
||||
- containerPort: 80
|
||||
protocol: TCP
|
||||
volumes:
|
||||
- name: config
|
||||
configMap:
|
||||
name: nginx-app-original
|
||||
strategy:
|
||||
type: RollingUpdate
|
||||
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2022 Fujitsu
|
||||
# Copyright (C) 2022 Nippon Telegraph and Telephone Corporation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -12,48 +12,50 @@
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import functools
|
||||
import os
|
||||
import pickle
|
||||
import sys
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
CMD_TIMEOUT = 30
|
||||
SERVER_WAIT_COMPLETE_TIME = 60
|
||||
SSH_CONNECT_RETRY_COUNT = 4
|
||||
class FailScript(object):
|
||||
"""Define error method for each operation
|
||||
|
||||
For example:
|
||||
|
||||
class SampleNewCoordinateVNFScript(object):
|
||||
def instantiate_start(self):
|
||||
if os.path.exists('/tmp/instantiate_start')
|
||||
raise Exception('test instantiate_start error')
|
||||
"""
|
||||
|
||||
def __init__(self, req, inst, grant_req, grant, csar_dir, k8s_info):
|
||||
def __init__(self, req, inst, grant_req, grant, csar_dir):
|
||||
self.req = req
|
||||
self.inst = inst
|
||||
self.grant_req = grant_req
|
||||
self.grant = grant
|
||||
self.csar_dir = csar_dir
|
||||
self.k8s_info = k8s_info
|
||||
|
||||
def coordinate_vnf(self):
|
||||
pass
|
||||
def _fail(self, method):
|
||||
if os.path.exists(f'/tmp/{method}'):
|
||||
raise Exception(f'test {method} error')
|
||||
|
||||
def __getattr__(self, name):
|
||||
return functools.partial(self._fail, name)
|
||||
|
||||
|
||||
def main():
|
||||
operation = "coordinate_vnf"
|
||||
script_dict = pickle.load(sys.stdin.buffer)
|
||||
|
||||
operation = script_dict['operation']
|
||||
req = script_dict['request']
|
||||
inst = script_dict['vnf_instance']
|
||||
grant_req = script_dict['grant_request']
|
||||
grant = script_dict['grant_response']
|
||||
csar_dir = script_dict['tmp_csar_dir']
|
||||
k8s_info = script_dict['k8s_info']
|
||||
script = SampleNewCoordinateVNFScript(
|
||||
req, inst, grant_req, grant,
|
||||
csar_dir, k8s_info)
|
||||
try:
|
||||
|
||||
script = FailScript(req, inst, grant_req, grant, csar_dir)
|
||||
getattr(script, operation)()
|
||||
except Exception:
|
||||
raise Exception
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
@ -11,7 +11,7 @@ Hash: 30071afb22afcb0e54e03df3d22f0852994b4120ca85ac72e9c207c97a4755a8
|
||||
Name: Files/new_kubernetes/error_deployment.yaml
|
||||
Content-Type: test-data
|
||||
Algorithm: SHA-256
|
||||
Hash: 1386a46e16e1c07aef97d9c1bb6ca7a6f2af99b314ecac42094634a59577a060
|
||||
Hash: 1fad945911465b5c12fe20bc8350f17b417e5212544d442f2d56df15b8802d8d
|
||||
|
||||
Name: Files/new_kubernetes/new_deployment.yaml
|
||||
Content-Type: test-data
|
||||
|
@ -35,13 +35,18 @@ shutil.move(os.path.join(tmp_dir, zip_file_name), ".")
|
||||
shutil.rmtree(tmp_dir)
|
||||
|
||||
# if you change_vnfpkg with all parameters
|
||||
change_vnfpkg_all_params = paramgen.change_vnfpkg_all_params(vnfd_id)
|
||||
change_vnfpkg = paramgen.change_vnfpkg(vnfd_id)
|
||||
|
||||
# if you change_vnfpkg with no operational parameters
|
||||
change_vnfpkg_min = paramgen.change_vnfpkg_min(vnfd_id)
|
||||
change_vnfpkg_error = paramgen.change_vnfpkg_error(vnfd_id)
|
||||
|
||||
with open("change_vnfpkg_all_params", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(change_vnfpkg_all_params, indent=2))
|
||||
change_vnfpkg_terminate = paramgen.change_vnfpkg_terminate()
|
||||
|
||||
with open("change_vnfpkg_min", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(change_vnfpkg_min, indent=2))
|
||||
with open("change_vnfpkg", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(change_vnfpkg, indent=2))
|
||||
|
||||
with open("change_vnfpkg_error", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(change_vnfpkg_error, indent=2))
|
||||
|
||||
with open("change_vnfpkg_terminate", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(change_vnfpkg_terminate, indent=2))
|
||||
|
@ -40,6 +40,33 @@ topology_template:
|
||||
type: company.provider.VNF
|
||||
properties:
|
||||
flavour_description: A simple flavour
|
||||
interfaces:
|
||||
Vnflcm:
|
||||
instantiate_start:
|
||||
implementation: sample-script
|
||||
instantiate_end:
|
||||
implementation: sample-script
|
||||
terminate_start:
|
||||
implementation: sample-script
|
||||
terminate_end:
|
||||
implementation: sample-script
|
||||
scale_start:
|
||||
implementation: sample-script
|
||||
scale_end:
|
||||
implementation: sample-script
|
||||
heal_start:
|
||||
implementation: sample-script
|
||||
heal_end:
|
||||
implementation: sample-script
|
||||
modify_information_start:
|
||||
implementation: sample-script
|
||||
modify_information_end:
|
||||
implementation: sample-script
|
||||
artifacts:
|
||||
sample-script:
|
||||
description: Sample script
|
||||
type: tosca.artifacts.Implementation.Python
|
||||
file: ../Scripts/sample_script.py
|
||||
|
||||
VDU1:
|
||||
type: tosca.nodes.nfv.Vdu.Compute
|
||||
@ -56,7 +83,7 @@ topology_template:
|
||||
name: vdu2
|
||||
description: VDU2 compute node
|
||||
vdu_profile:
|
||||
min_number_of_instances: 2
|
||||
min_number_of_instances: 1
|
||||
max_number_of_instances: 3
|
||||
|
||||
VDU3:
|
||||
@ -68,15 +95,6 @@ topology_template:
|
||||
min_number_of_instances: 1
|
||||
max_number_of_instances: 3
|
||||
|
||||
VDU4:
|
||||
type: tosca.nodes.nfv.Vdu.Compute
|
||||
properties:
|
||||
name: vdu4
|
||||
description: VDU4 compute node
|
||||
vdu_profile:
|
||||
min_number_of_instances: 1
|
||||
max_number_of_instances: 3
|
||||
|
||||
VDU5:
|
||||
type: tosca.nodes.nfv.Vdu.Compute
|
||||
properties:
|
||||
@ -93,7 +111,7 @@ topology_template:
|
||||
description: VDU6 compute node
|
||||
vdu_profile:
|
||||
min_number_of_instances: 1
|
||||
max_number_of_instances: 3
|
||||
max_number_of_instances: 1
|
||||
|
||||
policies:
|
||||
- scaling_aspects:
|
||||
@ -112,24 +130,12 @@ topology_template:
|
||||
max_scale_level: 2
|
||||
step_deltas:
|
||||
- delta_1
|
||||
vdu4_aspect:
|
||||
name: vdu4_aspect
|
||||
description: vdu4 scaling aspect
|
||||
max_scale_level: 2
|
||||
step_deltas:
|
||||
- delta_1
|
||||
vdu5_aspect:
|
||||
name: vdu5_aspect
|
||||
description: vdu5 scaling aspect
|
||||
max_scale_level: 2
|
||||
step_deltas:
|
||||
- delta_1
|
||||
vdu6_aspect:
|
||||
name: vdu6_aspect
|
||||
description: vdu6 scaling aspect
|
||||
max_scale_level: 2
|
||||
step_deltas:
|
||||
- delta_1
|
||||
|
||||
- VDU2_initial_delta:
|
||||
type: tosca.policies.nfv.VduInitialDelta
|
||||
@ -163,22 +169,6 @@ topology_template:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU3 ]
|
||||
|
||||
- VDU4_initial_delta:
|
||||
type: tosca.policies.nfv.VduInitialDelta
|
||||
properties:
|
||||
initial_delta:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU4 ]
|
||||
|
||||
- VDU4_scaling_aspect_deltas:
|
||||
type: tosca.policies.nfv.VduScalingAspectDeltas
|
||||
properties:
|
||||
aspect: vdu4_aspect
|
||||
deltas:
|
||||
delta_1:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU4 ]
|
||||
|
||||
- VDU5_initial_delta:
|
||||
type: tosca.policies.nfv.VduInitialDelta
|
||||
properties:
|
||||
@ -195,22 +185,6 @@ topology_template:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU5 ]
|
||||
|
||||
- VDU6_initial_delta:
|
||||
type: tosca.policies.nfv.VduInitialDelta
|
||||
properties:
|
||||
initial_delta:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU6 ]
|
||||
|
||||
- VDU6_scaling_aspect_deltas:
|
||||
type: tosca.policies.nfv.VduScalingAspectDeltas
|
||||
properties:
|
||||
aspect: vdu6_aspect
|
||||
deltas:
|
||||
delta_1:
|
||||
number_of_instances: 1
|
||||
targets: [ VDU6 ]
|
||||
|
||||
- instantiation_levels:
|
||||
type: tosca.policies.nfv.InstantiationLevels
|
||||
properties:
|
||||
@ -219,15 +193,11 @@ topology_template:
|
||||
description: Smallest size
|
||||
scale_info:
|
||||
vdu2_aspect:
|
||||
scale_level: 0
|
||||
scale_level: 1
|
||||
vdu3_aspect:
|
||||
scale_level: 0
|
||||
vdu4_aspect:
|
||||
scale_level: 2
|
||||
vdu5_aspect:
|
||||
scale_level: 2
|
||||
vdu6_aspect:
|
||||
scale_level: 2
|
||||
scale_level: 0
|
||||
instantiation_level_2:
|
||||
description: Largest size
|
||||
scale_info:
|
||||
@ -235,12 +205,8 @@ topology_template:
|
||||
scale_level: 2
|
||||
vdu3_aspect:
|
||||
scale_level: 2
|
||||
vdu4_aspect:
|
||||
scale_level: 2
|
||||
vdu5_aspect:
|
||||
scale_level: 2
|
||||
vdu6_aspect:
|
||||
scale_level: 2
|
||||
default_level: instantiation_level_1
|
||||
|
||||
- VDU1_instantiation_levels:
|
||||
@ -273,16 +239,6 @@ topology_template:
|
||||
number_of_instances: 3
|
||||
targets: [ VDU3 ]
|
||||
|
||||
- VDU4_instantiation_levels:
|
||||
type: tosca.policies.nfv.VduInstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
number_of_instances: 1
|
||||
instantiation_level_2:
|
||||
number_of_instances: 3
|
||||
targets: [ VDU4 ]
|
||||
|
||||
- VDU5_instantiation_levels:
|
||||
type: tosca.policies.nfv.VduInstantiationLevels
|
||||
properties:
|
||||
@ -292,13 +248,3 @@ topology_template:
|
||||
instantiation_level_2:
|
||||
number_of_instances: 3
|
||||
targets: [ VDU5 ]
|
||||
|
||||
- VDU6_instantiation_levels:
|
||||
type: tosca.policies.nfv.VduInstantiationLevels
|
||||
properties:
|
||||
levels:
|
||||
instantiation_level_1:
|
||||
number_of_instances: 1
|
||||
instantiation_level_2:
|
||||
number_of_instances: 3
|
||||
targets: [ VDU6 ]
|
||||
|
@ -1,63 +0,0 @@
|
||||
# Copyright (C) 2022 Fujitsu
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
import os
|
||||
import pickle
|
||||
import sys
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SampleOldCoordinateVNFScript(object):
|
||||
|
||||
def __init__(self, req, inst, grant_req, grant, csar_dir, k8s_info):
|
||||
self.req = req
|
||||
self.inst = inst
|
||||
self.grant_req = grant_req
|
||||
self.grant = grant
|
||||
self.csar_dir = csar_dir
|
||||
self.k8s_info = k8s_info
|
||||
|
||||
def coordinate_vnf(self):
|
||||
pass
|
||||
|
||||
|
||||
def main():
|
||||
operation = "coordinate_vnf"
|
||||
script_dict = pickle.load(sys.stdin.buffer)
|
||||
req = script_dict['request']
|
||||
inst = script_dict['vnf_instance']
|
||||
grant_req = script_dict['grant_request']
|
||||
grant = script_dict['grant_response']
|
||||
csar_dir = script_dict['tmp_csar_dir']
|
||||
k8s_info = script_dict['k8s_info']
|
||||
script = SampleOldCoordinateVNFScript(
|
||||
req, inst, grant_req, grant,
|
||||
csar_dir, k8s_info)
|
||||
try:
|
||||
getattr(script, operation)()
|
||||
except Exception:
|
||||
raise Exception
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
main()
|
||||
os._exit(0)
|
||||
except Exception as ex:
|
||||
sys.stderr.write(str(ex))
|
||||
sys.stderr.flush()
|
||||
os._exit(1)
|
@ -1,4 +1,4 @@
|
||||
# Copyright (C) 2022 Fujitsu
|
||||
# Copyright (C) 2022 Nippon Telegraph and Telephone Corporation
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
@ -12,45 +12,50 @@
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import functools
|
||||
import os
|
||||
import pickle
|
||||
import sys
|
||||
|
||||
from oslo_log import log as logging
|
||||
|
||||
LOG = logging.getLogger(__name__)
|
||||
class FailScript(object):
|
||||
"""Define error method for each operation
|
||||
|
||||
For example:
|
||||
|
||||
class SampleOldCoordinateVNFScript(object):
|
||||
def instantiate_start(self):
|
||||
if os.path.exists('/tmp/instantiate_start')
|
||||
raise Exception('test instantiate_start error')
|
||||
"""
|
||||
|
||||
def __init__(self, req, inst, grant_req, grant, csar_dir, k8s_info):
|
||||
def __init__(self, req, inst, grant_req, grant, csar_dir):
|
||||
self.req = req
|
||||
self.inst = inst
|
||||
self.grant_req = grant_req
|
||||
self.grant = grant
|
||||
self.csar_dir = csar_dir
|
||||
self.k8s_info = k8s_info
|
||||
|
||||
def coordinate_vnf(self):
|
||||
pass
|
||||
def _fail(self, method):
|
||||
if os.path.exists(f'/tmp/{method}'):
|
||||
raise Exception(f'test {method} error')
|
||||
|
||||
def __getattr__(self, name):
|
||||
return functools.partial(self._fail, name)
|
||||
|
||||
|
||||
def main():
|
||||
operation = "coordinate_vnf"
|
||||
script_dict = pickle.load(sys.stdin.buffer)
|
||||
|
||||
operation = script_dict['operation']
|
||||
req = script_dict['request']
|
||||
inst = script_dict['vnf_instance']
|
||||
grant_req = script_dict['grant_request']
|
||||
grant = script_dict['grant_response']
|
||||
csar_dir = script_dict['tmp_csar_dir']
|
||||
k8s_info = script_dict['k8s_info']
|
||||
script = SampleOldCoordinateVNFScript(
|
||||
req, inst, grant_req, grant,
|
||||
csar_dir, k8s_info)
|
||||
try:
|
||||
|
||||
script = FailScript(req, inst, grant_req, grant, csar_dir)
|
||||
getattr(script, operation)()
|
||||
except Exception:
|
||||
raise Exception
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
@ -44,6 +44,9 @@ max_sample_instantiate = paramgen.max_sample_instantiate(
|
||||
auth_url, bearer_token)
|
||||
|
||||
max_sample_terminate = paramgen.max_sample_terminate()
|
||||
max_sample_scale_out = paramgen.max_sample_scale_out()
|
||||
max_sample_scale_in = paramgen.max_sample_scale_in()
|
||||
max_sample_heal = paramgen.max_sample_heal(["replace real vnfc ids"])
|
||||
|
||||
# if you instantiate with only one resource
|
||||
# please change vim_id to your k8s's vim id
|
||||
@ -55,6 +58,11 @@ min_sample_terminate = paramgen.min_sample_terminate()
|
||||
change_vnfpkg_instantiate = paramgen.change_vnfpkg_instantiate(
|
||||
auth_url, bearer_token)
|
||||
|
||||
error_handling_instantiate = paramgen.error_handling_instantiate(
|
||||
auth_url, bearer_token)
|
||||
error_handling_scale_out = paramgen.error_handling_scale_out()
|
||||
error_handling_terminate = paramgen.error_handling_terminate()
|
||||
|
||||
with open("create_req", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(create_req, indent=2))
|
||||
|
||||
@ -64,6 +72,15 @@ with open("max_sample_instantiate", "w", encoding='utf-8') as f:
|
||||
with open("max_sample_terminate", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(max_sample_terminate, indent=2))
|
||||
|
||||
with open("max_sample_scale_out", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(max_sample_scale_out, indent=2))
|
||||
|
||||
with open("max_sample_scale_in", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(max_sample_scale_in, indent=2))
|
||||
|
||||
with open("max_sample_heal", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(max_sample_heal, indent=2))
|
||||
|
||||
with open("min_sample_instantiate", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(min_sample_instantiate, indent=2))
|
||||
|
||||
@ -72,3 +89,12 @@ with open("min_sample_terminate", "w", encoding='utf-8') as f:
|
||||
|
||||
with open("change_vnfpkg_instantiate", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(change_vnfpkg_instantiate, indent=2))
|
||||
|
||||
with open("error_handling_instantiate", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(error_handling_instantiate, indent=2))
|
||||
|
||||
with open("error_handling_scale_out", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(error_handling_scale_out, indent=2))
|
||||
|
||||
with open("error_handling_terminate", "w", encoding='utf-8') as f:
|
||||
f.write(json.dumps(error_handling_terminate, indent=2))
|
||||
|
@ -13,7 +13,6 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ddt
|
||||
import os
|
||||
import time
|
||||
|
||||
@ -21,7 +20,6 @@ from tacker.tests.functional.sol_kubernetes_v2 import base_v2
|
||||
from tacker.tests.functional.sol_kubernetes_v2 import paramgen
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class VnfLcmKubernetesChangeVnfpkgTest(base_v2.BaseVnfLcmKubernetesV2Test):
|
||||
|
||||
@classmethod
|
||||
@ -50,16 +48,8 @@ class VnfLcmKubernetesChangeVnfpkgTest(base_v2.BaseVnfLcmKubernetesV2Test):
|
||||
def setUp(self):
|
||||
super(VnfLcmKubernetesChangeVnfpkgTest, self).setUp()
|
||||
|
||||
def test_change_vnfpkg_for_deployment_res_with_all_params(self):
|
||||
"""Test ChangeCurrentVNFPackage with all attributes set
|
||||
|
||||
* About attributes:
|
||||
All of the following cardinality attributes are set.
|
||||
In addition, 0..N or 1..N attributes are set to 2 or more.
|
||||
- 0..1 (1)
|
||||
- 0..N (2 or more)
|
||||
- 1
|
||||
- 1..N (2 or more)
|
||||
def test_change_vnfpkg_for_deployment_res(self):
|
||||
"""Test ChangeCurrentVNFPackage
|
||||
|
||||
* About LCM operations:
|
||||
This test includes the following operations.
|
||||
@ -114,9 +104,6 @@ class VnfLcmKubernetesChangeVnfpkgTest(base_v2.BaseVnfLcmKubernetesV2Test):
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# check vnfc_resource_info
|
||||
# TODO()
|
||||
|
||||
# 3. Show VNF instance
|
||||
additional_inst_attrs = [
|
||||
'vimConnectionInfo',
|
||||
@ -128,19 +115,20 @@ class VnfLcmKubernetesChangeVnfpkgTest(base_v2.BaseVnfLcmKubernetesV2Test):
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
before_resource_ids = [vnfc_info['computeResource']['resourceId']
|
||||
for vnfc_info in vnfc_resource_infos]
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo']['vnfcResourceInfo']
|
||||
before_resource_ids = {vnfc_info['computeResource']['resourceId']
|
||||
for vnfc_info in vnfc_resource_infos}
|
||||
self.assertEqual(2, len(before_resource_ids))
|
||||
|
||||
# 4. Change Current VNF Package
|
||||
change_vnfpkg_req = paramgen.change_vnfpkg_all_params(self.vnfd_id_2)
|
||||
change_vnfpkg_req = paramgen.change_vnfpkg(self.vnfd_id_2)
|
||||
resp, body = self.change_vnfpkg(inst_id, change_vnfpkg_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
time.sleep(3)
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_1).get('usageState')
|
||||
@ -160,14 +148,14 @@ class VnfLcmKubernetesChangeVnfpkgTest(base_v2.BaseVnfLcmKubernetesV2Test):
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
after_resource_ids = [vnfc_info['computeResource']['resourceId']
|
||||
for vnfc_info in vnfc_resource_infos]
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo']['vnfcResourceInfo']
|
||||
after_resource_ids = {vnfc_info['computeResource']['resourceId']
|
||||
for vnfc_info in vnfc_resource_infos}
|
||||
self.assertEqual(2, len(after_resource_ids))
|
||||
self.assertNotEqual(before_resource_ids, after_resource_ids)
|
||||
|
||||
# 6. Terminate a VNF instance
|
||||
terminate_req = paramgen.max_sample_terminate()
|
||||
terminate_req = paramgen.change_vnfpkg_terminate()
|
||||
resp, body = self.terminate_vnf_instance(inst_id, terminate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
@ -177,7 +165,7 @@ class VnfLcmKubernetesChangeVnfpkgTest(base_v2.BaseVnfLcmKubernetesV2Test):
|
||||
|
||||
# wait a bit because there is a bit time lag between lcmocc DB
|
||||
# update and terminate completion.
|
||||
time.sleep(10)
|
||||
time.sleep(3)
|
||||
|
||||
# 7. Delete a VNF instance
|
||||
resp, body = self.delete_vnf_instance(inst_id)
|
||||
@ -192,14 +180,8 @@ class VnfLcmKubernetesChangeVnfpkgTest(base_v2.BaseVnfLcmKubernetesV2Test):
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_2).get('usageState')
|
||||
self.assertEqual('NOT_IN_USE', usage_state)
|
||||
|
||||
def test_change_vnfpkg_for_deployment_res_with_no_op_params(self):
|
||||
"""Test ChangeCurrentVNFPackage with no optional attributes
|
||||
|
||||
* About attributes:
|
||||
Omit except for required attributes.
|
||||
Only the following cardinality attributes are set.
|
||||
- 1
|
||||
- 1..N (1)
|
||||
def test_change_vnfpkg_failed_and_rollback(self):
|
||||
"""Test LCM operations error handing
|
||||
|
||||
* About LCM operations:
|
||||
This test includes the following operations.
|
||||
@ -207,9 +189,10 @@ class VnfLcmKubernetesChangeVnfpkgTest(base_v2.BaseVnfLcmKubernetesV2Test):
|
||||
- 2. Instantiate a VNF instance
|
||||
- 3. Show VNF instance
|
||||
- 4. Change Current VNF Package
|
||||
- 5. Show VNF instance
|
||||
- 6. Terminate a VNF instance
|
||||
- 7. Delete a VNF instance
|
||||
- 5. Rollback Change Current VNF Package
|
||||
- 6. Show VNF instance
|
||||
- 7. Terminate a VNF instance
|
||||
- 8. Delete a VNF instance
|
||||
"""
|
||||
|
||||
# 1. Create a new VNF instance resource
|
||||
@ -245,8 +228,8 @@ class VnfLcmKubernetesChangeVnfpkgTest(base_v2.BaseVnfLcmKubernetesV2Test):
|
||||
self.assertEqual('IN_USE', usage_state)
|
||||
|
||||
# 2. Instantiate a VNF instance
|
||||
vim_id = self.get_k8s_vim_id()
|
||||
instantiate_req = paramgen.change_vnfpkg_instantiate_min(vim_id)
|
||||
instantiate_req = paramgen.change_vnfpkg_instantiate(
|
||||
self.auth_url, self.bearer_token)
|
||||
resp, body = self.instantiate_vnf_instance(inst_id, instantiate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
@ -265,28 +248,26 @@ class VnfLcmKubernetesChangeVnfpkgTest(base_v2.BaseVnfLcmKubernetesV2Test):
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo']['vnfcResourceInfo']
|
||||
before_resource_ids = [vnfc_info['computeResource']['resourceId']
|
||||
for vnfc_info in vnfc_resource_infos]
|
||||
|
||||
# 4. Change Current VNF Package
|
||||
change_vnfpkg_req = paramgen.change_vnfpkg_min(self.vnfd_id_2)
|
||||
# 4. Change Current VNF Package (will fail)
|
||||
change_vnfpkg_req = paramgen.change_vnfpkg_error(self.vnfd_id_2)
|
||||
resp, body = self.change_vnfpkg(inst_id, change_vnfpkg_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
self.wait_lcmocc_failed_temp(lcmocc_id)
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_1).get('usageState')
|
||||
self.assertEqual('NOT_IN_USE', usage_state)
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_2).get('usageState')
|
||||
self.assertEqual('IN_USE', usage_state)
|
||||
# 5. Rollback Change Current VNF Package operation
|
||||
resp, body = self.rollback_lcmocc(lcmocc_id)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_delete(resp)
|
||||
self.wait_lcmocc_rolled_back(lcmocc_id)
|
||||
|
||||
# 5. Show VNF instance
|
||||
# 6. Show VNF instance
|
||||
additional_inst_attrs = [
|
||||
'vimConnectionInfo',
|
||||
'instantiatedVnfInfo'
|
||||
@ -297,14 +278,13 @@ class VnfLcmKubernetesChangeVnfpkgTest(base_v2.BaseVnfLcmKubernetesV2Test):
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo']['vnfcResourceInfo']
|
||||
after_resource_ids = [vnfc_info['computeResource']['resourceId']
|
||||
for vnfc_info in vnfc_resource_infos]
|
||||
self.assertNotEqual(before_resource_ids, after_resource_ids)
|
||||
self.assertEqual(before_resource_ids, after_resource_ids)
|
||||
|
||||
# 6. Terminate a VNF instance
|
||||
terminate_req = paramgen.max_sample_terminate()
|
||||
# 7. Terminate a VNF instance
|
||||
terminate_req = paramgen.change_vnfpkg_terminate()
|
||||
resp, body = self.terminate_vnf_instance(inst_id, terminate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
@ -314,9 +294,9 @@ class VnfLcmKubernetesChangeVnfpkgTest(base_v2.BaseVnfLcmKubernetesV2Test):
|
||||
|
||||
# wait a bit because there is a bit time lag between lcmocc DB
|
||||
# update and terminate completion.
|
||||
time.sleep(10)
|
||||
time.sleep(3)
|
||||
|
||||
# 7. Delete a VNF instance
|
||||
# 8. Delete a VNF instance
|
||||
resp, body = self.delete_vnf_instance(inst_id)
|
||||
self.assertEqual(204, resp.status_code)
|
||||
self.check_resp_headers_in_delete(resp)
|
||||
|
@ -60,8 +60,14 @@ class VnfLcmKubernetesTest(base_v2.BaseVnfLcmKubernetesV2Test):
|
||||
- 1. Create a new VNF instance resource
|
||||
- 2. Instantiate a VNF instance
|
||||
- 3. Show VNF instance
|
||||
- 4. Terminate a VNF instance
|
||||
- 5. Delete a VNF instance
|
||||
- 4. Scale out a VNF instance
|
||||
- 5. Show VNF instance
|
||||
- 6. Scale in a VNF instance
|
||||
- 7. Show VNF instance
|
||||
- 8. Heal in a VNF instance
|
||||
- 9. Show VNF instance
|
||||
- 10. Terminate a VNF instance
|
||||
- 11. Delete a VNF instance
|
||||
"""
|
||||
|
||||
# 1. Create a new VNF instance resource
|
||||
@ -118,26 +124,99 @@ class VnfLcmKubernetesTest(base_v2.BaseVnfLcmKubernetesV2Test):
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
# check vnfc_resource_info
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo']['vnfcResourceInfo']
|
||||
vdu_nums = {'VDU1': 0, 'VDU2': 0, 'VDU3': 0, 'VDU5': 0, 'VDU6': 0}
|
||||
for vnfc_info in vnfc_resource_infos:
|
||||
if vnfc_info['vduId'] == 'VDU1':
|
||||
self.assertEqual('Pod', vnfc_info[
|
||||
'computeResource']['vimLevelResourceType'])
|
||||
vdu_nums['VDU1'] += 1
|
||||
elif vnfc_info['vduId'] == 'VDU2':
|
||||
self.assertEqual('Deployment', vnfc_info[
|
||||
'computeResource']['vimLevelResourceType'])
|
||||
vdu_nums['VDU2'] += 1
|
||||
elif vnfc_info['vduId'] == 'VDU3':
|
||||
self.assertEqual('ReplicaSet', vnfc_info[
|
||||
'computeResource']['vimLevelResourceType'])
|
||||
vdu_nums['VDU3'] += 1
|
||||
elif vnfc_info['vduId'] == 'VDU5':
|
||||
self.assertEqual('StatefulSet', vnfc_info[
|
||||
'computeResource']['vimLevelResourceType'])
|
||||
vdu_nums['VDU5'] += 1
|
||||
elif vnfc_info['vduId'] == 'VDU6':
|
||||
self.assertEqual('DaemonSet', vnfc_info[
|
||||
'computeResource']['vimLevelResourceType'])
|
||||
vdu_nums['VDU6'] += 1
|
||||
expected = {'VDU1': 1, 'VDU2': 2, 'VDU3': 1, 'VDU5': 1, 'VDU6': 1}
|
||||
self.assertEqual(expected, vdu_nums)
|
||||
|
||||
# 4. Terminate a VNF instance
|
||||
# 4. Scale out a VNF instance
|
||||
scale_out_req = paramgen.max_sample_scale_out()
|
||||
resp, body = self.scale_vnf_instance(inst_id, scale_out_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# 5. Show VNF instance
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
|
||||
# check vnfc_resource_info
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo']['vnfcResourceInfo']
|
||||
vdu3_infos = [vnfc_info for vnfc_info in vnfc_resource_infos
|
||||
if vnfc_info['vduId'] == 'VDU3']
|
||||
self.assertEqual(3, len(vdu3_infos))
|
||||
|
||||
# 6. Scale in a VNF instance
|
||||
scale_in_req = paramgen.max_sample_scale_in()
|
||||
resp, body = self.scale_vnf_instance(inst_id, scale_in_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# 7. Show VNF instance
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
|
||||
# check vnfc_resource_info
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo']['vnfcResourceInfo']
|
||||
vdu3_infos = [vnfc_info for vnfc_info in vnfc_resource_infos
|
||||
if vnfc_info['vduId'] == 'VDU3']
|
||||
self.assertEqual(2, len(vdu3_infos))
|
||||
|
||||
# 8. Heal a VNF instance
|
||||
vnfc_infos = body['instantiatedVnfInfo']['vnfcInfo']
|
||||
vdu2_ids = [vnfc_info['id'] for vnfc_info in vnfc_infos
|
||||
if vnfc_info['vduId'] == 'VDU2']
|
||||
target = [vdu2_ids[0]]
|
||||
heal_req = paramgen.max_sample_heal(target)
|
||||
resp, body = self.heal_vnf_instance(inst_id, heal_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# 9. Show VNF instance
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
|
||||
# check vnfc_resource_info
|
||||
vnfc_infos = body['instantiatedVnfInfo']['vnfcInfo']
|
||||
result_vdu2_ids = [vnfc_info['id'] for vnfc_info in vnfc_infos
|
||||
if vnfc_info['vduId'] == 'VDU2']
|
||||
self.assertEqual(2, len(result_vdu2_ids))
|
||||
self.assertNotIn(vdu2_ids[0], result_vdu2_ids)
|
||||
self.assertIn(vdu2_ids[1], result_vdu2_ids)
|
||||
|
||||
# 10. Terminate a VNF instance
|
||||
terminate_req = paramgen.max_sample_terminate()
|
||||
resp, body = self.terminate_vnf_instance(inst_id, terminate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
@ -148,9 +227,9 @@ class VnfLcmKubernetesTest(base_v2.BaseVnfLcmKubernetesV2Test):
|
||||
|
||||
# wait a bit because there is a bit time lag between lcmocc DB
|
||||
# update and terminate completion.
|
||||
time.sleep(10)
|
||||
time.sleep(3)
|
||||
|
||||
# 5. Delete a VNF instance
|
||||
# 11. Delete a VNF instance
|
||||
resp, body = self.delete_vnf_instance(inst_id)
|
||||
self.assertEqual(204, resp.status_code)
|
||||
self.check_resp_headers_in_delete(resp)
|
||||
@ -250,7 +329,7 @@ class VnfLcmKubernetesTest(base_v2.BaseVnfLcmKubernetesV2Test):
|
||||
|
||||
# wait a bit because there is a bit time lag between lcmocc DB
|
||||
# update and terminate completion.
|
||||
time.sleep(10)
|
||||
time.sleep(3)
|
||||
|
||||
# 5. Delete a VNF instance
|
||||
resp, body = self.delete_vnf_instance(inst_id)
|
||||
@ -264,3 +343,152 @@ class VnfLcmKubernetesTest(base_v2.BaseVnfLcmKubernetesV2Test):
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_1).get('usageState')
|
||||
self.assertEqual('NOT_IN_USE', usage_state)
|
||||
|
||||
def _put_fail_file(self, operation):
|
||||
with open(f'/tmp/{operation}', 'w'):
|
||||
pass
|
||||
|
||||
def _rm_fail_file(self, operation):
|
||||
os.remove(f'/tmp/{operation}')
|
||||
|
||||
def test_instantiate_rollback(self):
|
||||
"""Test LCM operations with all attributes set
|
||||
|
||||
* About LCM operations:
|
||||
This test includes the following operations.
|
||||
- 1. Create a new VNF instance resource
|
||||
- 2. Instantiate a VNF instance => FAILED_TEMP
|
||||
- 3. Show VNF instance
|
||||
- 4. Rollback instantiate
|
||||
- 5. Show VNF instance
|
||||
- 6. Delete a VNF instance
|
||||
"""
|
||||
|
||||
# 1. Create a new VNF instance resource
|
||||
create_req = paramgen.test_instantiate_cnf_resources_create(
|
||||
self.vnfd_id_1)
|
||||
resp, body = self.create_vnf_instance(create_req)
|
||||
self.assertEqual(201, resp.status_code)
|
||||
self.check_resp_headers_in_create(resp)
|
||||
inst_id = body['id']
|
||||
|
||||
# 2. Instantiate a VNF instance
|
||||
self._put_fail_file('instantiate_end')
|
||||
instantiate_req = paramgen.error_handling_instantiate(
|
||||
self.auth_url, self.bearer_token)
|
||||
resp, body = self.instantiate_vnf_instance(inst_id, instantiate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_failed_temp(lcmocc_id)
|
||||
self._rm_fail_file('instantiate_end')
|
||||
|
||||
# 3. Show VNF instance
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.assertEqual('NOT_INSTANTIATED', body['instantiationState'])
|
||||
|
||||
# 4. Rollback instantiate
|
||||
resp, body = self.rollback_lcmocc(lcmocc_id)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.wait_lcmocc_rolled_back(lcmocc_id)
|
||||
|
||||
# 5. Show VNF instance
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.assertEqual('NOT_INSTANTIATED', body['instantiationState'])
|
||||
|
||||
# 6. Delete a VNF instance
|
||||
resp, body = self.delete_vnf_instance(inst_id)
|
||||
self.assertEqual(204, resp.status_code)
|
||||
self.check_resp_headers_in_delete(resp)
|
||||
|
||||
def test_scale_out_rollback(self):
|
||||
"""Test LCM operations with all attributes set
|
||||
|
||||
* About LCM operations:
|
||||
This test includes the following operations.
|
||||
- 1. Create a new VNF instance resource
|
||||
- 2. Instantiate a VNF instance
|
||||
- 3. Show VNF instance
|
||||
- 4. Scale out ==> FAILED_TEMP
|
||||
- 5. Rollback
|
||||
- 5. Show VNF instance
|
||||
- 6. Terminate a VNF instance
|
||||
- 7. Delete a VNF instance
|
||||
"""
|
||||
|
||||
# 1. Create a new VNF instance resource
|
||||
create_req = paramgen.test_instantiate_cnf_resources_create(
|
||||
self.vnfd_id_1)
|
||||
resp, body = self.create_vnf_instance(create_req)
|
||||
self.assertEqual(201, resp.status_code)
|
||||
self.check_resp_headers_in_create(resp)
|
||||
inst_id = body['id']
|
||||
|
||||
# 2. Instantiate a VNF instance
|
||||
instantiate_req = paramgen.error_handling_instantiate(
|
||||
self.auth_url, self.bearer_token)
|
||||
resp, body = self.instantiate_vnf_instance(inst_id, instantiate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# 3. Show VNF instance
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo']['vnfcResourceInfo']
|
||||
vdu2_ids_0 = {vnfc_info['id'] for vnfc_info in vnfc_resource_infos
|
||||
if vnfc_info['vduId'] == 'VDU2'}
|
||||
self.assertEqual(2, len(vdu2_ids_0))
|
||||
|
||||
# 4. Scale out a VNF instance
|
||||
self._put_fail_file('scale_end')
|
||||
scale_out_req = paramgen.error_handling_scale_out()
|
||||
resp, body = self.scale_vnf_instance(inst_id, scale_out_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_failed_temp(lcmocc_id)
|
||||
self._rm_fail_file('scale_end')
|
||||
|
||||
# 5. Rollback instantiate
|
||||
resp, body = self.rollback_lcmocc(lcmocc_id)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.wait_lcmocc_rolled_back(lcmocc_id)
|
||||
|
||||
# 6. Show VNF instance
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo']['vnfcResourceInfo']
|
||||
vdu2_ids_1 = {vnfc_info['id'] for vnfc_info in vnfc_resource_infos
|
||||
if vnfc_info['vduId'] == 'VDU2'}
|
||||
self.assertEqual(vdu2_ids_0, vdu2_ids_1)
|
||||
|
||||
# 7. Terminate a VNF instance
|
||||
terminate_req = paramgen.error_handling_terminate()
|
||||
resp, body = self.terminate_vnf_instance(inst_id, terminate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# wait a bit because there is a bit time lag between lcmocc DB
|
||||
# update and terminate completion.
|
||||
time.sleep(3)
|
||||
|
||||
# 8. Delete a VNF instance
|
||||
resp, body = self.delete_vnf_instance(inst_id)
|
||||
self.assertEqual(204, resp.status_code)
|
||||
self.check_resp_headers_in_delete(resp)
|
||||
|
@ -1,474 +0,0 @@
|
||||
# Copyright (C) 2022 FUJITSU
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import ddt
|
||||
import os
|
||||
import time
|
||||
import unittest
|
||||
|
||||
from tacker.tests.functional.sol_kubernetes_v2 import base_v2
|
||||
from tacker.tests.functional.sol_kubernetes_v2 import paramgen
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class VnfLcmKubernetesErrorHandingTest(base_v2.BaseVnfLcmKubernetesV2Test):
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
super(VnfLcmKubernetesErrorHandingTest, cls).setUpClass()
|
||||
|
||||
cur_dir = os.path.dirname(__file__)
|
||||
|
||||
test_instantiate_cnf_resources_path = os.path.join(
|
||||
cur_dir, "samples/test_instantiate_cnf_resources")
|
||||
cls.vnf_pkg_1, cls.vnfd_id_1 = cls.create_vnf_package(
|
||||
test_instantiate_cnf_resources_path)
|
||||
|
||||
test_change_vnf_pkg_with_deployment_path = os.path.join(
|
||||
cur_dir, "samples/test_change_vnf_pkg_with_deployment")
|
||||
cls.vnf_pkg_2, cls.vnfd_id_2 = cls.create_vnf_package(
|
||||
test_change_vnf_pkg_with_deployment_path)
|
||||
|
||||
@classmethod
|
||||
def tearDownClass(cls):
|
||||
super(VnfLcmKubernetesErrorHandingTest, cls).tearDownClass()
|
||||
|
||||
cls.delete_vnf_package(cls.vnf_pkg_1)
|
||||
cls.delete_vnf_package(cls.vnf_pkg_2)
|
||||
|
||||
def setUp(self):
|
||||
super(VnfLcmKubernetesErrorHandingTest, self).setUp()
|
||||
|
||||
@unittest.skip("Until refactor CNF v2 API")
|
||||
def test_change_vnfpkg_failed_in_update_wait_and_rollback(self):
|
||||
"""Test LCM operations error handing
|
||||
|
||||
* About LCM operations:
|
||||
This test includes the following operations.
|
||||
- 1. Create a new VNF instance resource
|
||||
- 2. Instantiate a VNF instance
|
||||
- 3. Show VNF instance
|
||||
- 4. Change Current VNF Package
|
||||
- 5. Rollback Change Current VNF Package
|
||||
- 6. Show VNF instance
|
||||
- 7. Terminate a VNF instance
|
||||
- 8. Delete a VNF instance
|
||||
"""
|
||||
|
||||
# 1. Create a new VNF instance resource
|
||||
# NOTE: extensions and vnfConfigurableProperties are omitted
|
||||
# because they are commented out in etsi_nfv_sol001.
|
||||
expected_inst_attrs = [
|
||||
'id',
|
||||
'vnfInstanceName',
|
||||
'vnfInstanceDescription',
|
||||
'vnfdId',
|
||||
'vnfProvider',
|
||||
'vnfProductName',
|
||||
'vnfSoftwareVersion',
|
||||
'vnfdVersion',
|
||||
# 'vnfConfigurableProperties', # omitted
|
||||
# 'vimConnectionInfo', # omitted
|
||||
'instantiationState',
|
||||
# 'instantiatedVnfInfo', # omitted
|
||||
'metadata',
|
||||
# 'extensions', # omitted
|
||||
'_links'
|
||||
]
|
||||
create_req = paramgen.test_instantiate_cnf_resources_create(
|
||||
self.vnfd_id_1)
|
||||
resp, body = self.create_vnf_instance(create_req)
|
||||
self.assertEqual(201, resp.status_code)
|
||||
self.check_resp_headers_in_create(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
inst_id = body['id']
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_1)['usageState']
|
||||
self.assertEqual('IN_USE', usage_state)
|
||||
|
||||
# 2. Instantiate a VNF instance
|
||||
vim_id = self.get_k8s_vim_id()
|
||||
instantiate_req = paramgen.change_vnfpkg_instantiate_error_handing(
|
||||
vim_id)
|
||||
resp, body = self.instantiate_vnf_instance(inst_id, instantiate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# 3. Show VNF instance
|
||||
additional_inst_attrs = [
|
||||
'vimConnectionInfo',
|
||||
'instantiatedVnfInfo'
|
||||
]
|
||||
expected_inst_attrs.extend(additional_inst_attrs)
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
before_resource_ids = [vnfc_info['computeResource']['resourceId']
|
||||
for vnfc_info in vnfc_resource_infos]
|
||||
|
||||
# 4. Change Current VNF Package (will fail)
|
||||
change_vnfpkg_req = paramgen.change_vnfpkg_error(self.vnfd_id_2)
|
||||
resp, body = self.change_vnfpkg(inst_id, change_vnfpkg_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_failed_temp(lcmocc_id)
|
||||
|
||||
# 5. Rollback Change Current VNF Package operation
|
||||
resp, body = self.rollback_lcmocc(lcmocc_id)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_delete(resp)
|
||||
self.wait_lcmocc_rolled_back(lcmocc_id)
|
||||
|
||||
# 6. Show VNF instance
|
||||
additional_inst_attrs = [
|
||||
'vimConnectionInfo',
|
||||
'instantiatedVnfInfo'
|
||||
]
|
||||
expected_inst_attrs.extend(additional_inst_attrs)
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
after_resource_ids = [vnfc_info['computeResource']['resourceId']
|
||||
for vnfc_info in vnfc_resource_infos]
|
||||
self.assertNotEqual(before_resource_ids, after_resource_ids)
|
||||
|
||||
# 7. Terminate a VNF instance
|
||||
terminate_req = paramgen.max_sample_terminate()
|
||||
resp, body = self.terminate_vnf_instance(inst_id, terminate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# wait a bit because there is a bit time lag between lcmocc DB
|
||||
# update and terminate completion.
|
||||
time.sleep(10)
|
||||
|
||||
# 8. Delete a VNF instance
|
||||
resp, body = self.delete_vnf_instance(inst_id)
|
||||
self.assertEqual(204, resp.status_code)
|
||||
self.check_resp_headers_in_delete(resp)
|
||||
|
||||
# check deletion of VNF instance
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(404, resp.status_code)
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_2).get('usageState')
|
||||
self.assertEqual('NOT_IN_USE', usage_state)
|
||||
|
||||
@unittest.skip("Until refactor CNF v2 API")
|
||||
def test_change_vnfpkg_failed_and_retry(self):
|
||||
"""Test LCM operations error handing
|
||||
|
||||
* About LCM operations:
|
||||
This test includes the following operations.
|
||||
- 1. Create a new VNF instance resource
|
||||
- 2. Instantiate a VNF instance
|
||||
- 3. Show VNF instance
|
||||
- 4. Change Current VNF Package(will fail)
|
||||
- 5. Retry Change Current VNF Package
|
||||
- 6. Rollback Change Current VNF Package
|
||||
- 7. Show VNF instance
|
||||
- 8. Terminate a VNF instance
|
||||
- 9. Delete a VNF instance
|
||||
"""
|
||||
|
||||
# 1. Create a new VNF instance resource
|
||||
# NOTE: extensions and vnfConfigurableProperties are omitted
|
||||
# because they are commented out in etsi_nfv_sol001.
|
||||
expected_inst_attrs = [
|
||||
'id',
|
||||
'vnfInstanceName',
|
||||
'vnfInstanceDescription',
|
||||
'vnfdId',
|
||||
'vnfProvider',
|
||||
'vnfProductName',
|
||||
'vnfSoftwareVersion',
|
||||
'vnfdVersion',
|
||||
# 'vnfConfigurableProperties', # omitted
|
||||
# 'vimConnectionInfo', # omitted
|
||||
'instantiationState',
|
||||
# 'instantiatedVnfInfo', # omitted
|
||||
'metadata',
|
||||
# 'extensions', # omitted
|
||||
'_links'
|
||||
]
|
||||
create_req = paramgen.test_instantiate_cnf_resources_create(
|
||||
self.vnfd_id_1)
|
||||
resp, body = self.create_vnf_instance(create_req)
|
||||
self.assertEqual(201, resp.status_code)
|
||||
self.check_resp_headers_in_create(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
inst_id = body['id']
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_1)['usageState']
|
||||
self.assertEqual('IN_USE', usage_state)
|
||||
|
||||
# 2. Instantiate a VNF instance
|
||||
vim_id = self.get_k8s_vim_id()
|
||||
instantiate_req = paramgen.change_vnfpkg_instantiate_error_handing(
|
||||
vim_id)
|
||||
resp, body = self.instantiate_vnf_instance(inst_id, instantiate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# 3. Show VNF instance
|
||||
additional_inst_attrs = [
|
||||
'vimConnectionInfo',
|
||||
'instantiatedVnfInfo'
|
||||
]
|
||||
expected_inst_attrs.extend(additional_inst_attrs)
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
before_resource_ids = [vnfc_info['computeResource']['resourceId']
|
||||
for vnfc_info in vnfc_resource_infos]
|
||||
|
||||
# 4. Change Current VNF Package (will fail)
|
||||
change_vnfpkg_req = paramgen.change_vnfpkg_error(self.vnfd_id_2)
|
||||
resp, body = self.change_vnfpkg(inst_id, change_vnfpkg_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_failed_temp(lcmocc_id)
|
||||
|
||||
# 5. Retry Change Current VNF Package operation
|
||||
resp, body = self.retry_lcmocc(lcmocc_id)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_delete(resp)
|
||||
self.wait_lcmocc_failed_temp(lcmocc_id)
|
||||
|
||||
# 6. Rollback Change Current VNF Package operation
|
||||
resp, body = self.rollback_lcmocc(lcmocc_id)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_delete(resp)
|
||||
self.wait_lcmocc_rolled_back(lcmocc_id)
|
||||
|
||||
# 7. Show VNF instance
|
||||
additional_inst_attrs = [
|
||||
'vimConnectionInfo',
|
||||
'instantiatedVnfInfo'
|
||||
]
|
||||
expected_inst_attrs.extend(additional_inst_attrs)
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
after_resource_ids = [vnfc_info['computeResource']['resourceId']
|
||||
for vnfc_info in vnfc_resource_infos]
|
||||
self.assertNotEqual(before_resource_ids, after_resource_ids)
|
||||
|
||||
# 8. Terminate a VNF instance
|
||||
terminate_req = paramgen.max_sample_terminate()
|
||||
resp, body = self.terminate_vnf_instance(inst_id, terminate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# wait a bit because there is a bit time lag between lcmocc DB
|
||||
# update and terminate completion.
|
||||
time.sleep(10)
|
||||
|
||||
# 9. Delete a VNF instance
|
||||
resp, body = self.delete_vnf_instance(inst_id)
|
||||
self.assertEqual(204, resp.status_code)
|
||||
self.check_resp_headers_in_delete(resp)
|
||||
|
||||
# check deletion of VNF instance
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(404, resp.status_code)
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_2).get('usageState')
|
||||
self.assertEqual('NOT_IN_USE', usage_state)
|
||||
|
||||
def test_change_vnfpkg_failed_and_fail(self):
|
||||
"""Test LCM operations error handing
|
||||
|
||||
* About LCM operations:
|
||||
This test includes the following operations.
|
||||
- 1. Create a new VNF instance resource
|
||||
- 2. Instantiate a VNF instance
|
||||
- 3. Show VNF instance
|
||||
- 4. Change Current VNF Package
|
||||
- 5. Fail Change Current VNF Package
|
||||
- 6. Show VNF instance
|
||||
- 7. Terminate VNF instance
|
||||
- 8. Delete a VNF instance
|
||||
"""
|
||||
|
||||
# 1. Create a new VNF instance resource
|
||||
# NOTE: extensions and vnfConfigurableProperties are omitted
|
||||
# because they are commented out in etsi_nfv_sol001.
|
||||
expected_inst_attrs = [
|
||||
'id',
|
||||
'vnfInstanceName',
|
||||
'vnfInstanceDescription',
|
||||
'vnfdId',
|
||||
'vnfProvider',
|
||||
'vnfProductName',
|
||||
'vnfSoftwareVersion',
|
||||
'vnfdVersion',
|
||||
# 'vnfConfigurableProperties', # omitted
|
||||
# 'vimConnectionInfo', # omitted
|
||||
'instantiationState',
|
||||
# 'instantiatedVnfInfo', # omitted
|
||||
'metadata',
|
||||
# 'extensions', # omitted
|
||||
'_links'
|
||||
]
|
||||
create_req = paramgen.test_instantiate_cnf_resources_create(
|
||||
self.vnfd_id_1)
|
||||
resp, body = self.create_vnf_instance(create_req)
|
||||
self.assertEqual(201, resp.status_code)
|
||||
self.check_resp_headers_in_create(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
inst_id = body['id']
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_1)['usageState']
|
||||
self.assertEqual('IN_USE', usage_state)
|
||||
|
||||
# 2. Instantiate a VNF instance
|
||||
vim_id = self.get_k8s_vim_id()
|
||||
instantiate_req = paramgen.change_vnfpkg_instantiate_error_handing(
|
||||
vim_id)
|
||||
resp, body = self.instantiate_vnf_instance(inst_id, instantiate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# 3. Show VNF instance
|
||||
additional_inst_attrs = [
|
||||
'vimConnectionInfo',
|
||||
'instantiatedVnfInfo'
|
||||
]
|
||||
expected_inst_attrs.extend(additional_inst_attrs)
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
before_resource_ids = [vnfc_info['computeResource']['resourceId']
|
||||
for vnfc_info in vnfc_resource_infos]
|
||||
|
||||
# 4. Change Current VNF Package (will fail)
|
||||
change_vnfpkg_req = paramgen.change_vnfpkg_error(self.vnfd_id_2)
|
||||
resp, body = self.change_vnfpkg(inst_id, change_vnfpkg_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_failed_temp(lcmocc_id)
|
||||
|
||||
# 5. Fail Change Current VNF Package operation
|
||||
expected_inst_attrs_fail = [
|
||||
'id',
|
||||
'operationState',
|
||||
'stateEnteredTime',
|
||||
'startTime',
|
||||
'vnfInstanceId',
|
||||
'grantId',
|
||||
'operation',
|
||||
'isAutomaticInvocation',
|
||||
'operationParams',
|
||||
'isCancelPending',
|
||||
'error',
|
||||
'_links'
|
||||
]
|
||||
resp, body = self.fail_lcmocc(lcmocc_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs_fail)
|
||||
resp, body = self.show_lcmocc(lcmocc_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.assertEqual('FAILED', body['operationState'])
|
||||
|
||||
# 6. Show VNF instance
|
||||
additional_inst_attrs = [
|
||||
'vimConnectionInfo',
|
||||
'instantiatedVnfInfo'
|
||||
]
|
||||
expected_inst_attrs.extend(additional_inst_attrs)
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.check_resp_headers_in_get(resp)
|
||||
self.check_resp_body(body, expected_inst_attrs)
|
||||
|
||||
vnfc_resource_infos = body['instantiatedVnfInfo'].get(
|
||||
'vnfcResourceInfo')
|
||||
after_resource_ids = [vnfc_info['computeResource']['resourceId']
|
||||
for vnfc_info in vnfc_resource_infos]
|
||||
self.assertEqual(before_resource_ids, after_resource_ids)
|
||||
|
||||
# 7. Terminate a VNF instance
|
||||
terminate_req = paramgen.max_sample_terminate()
|
||||
resp, body = self.terminate_vnf_instance(inst_id, terminate_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
self.check_resp_headers_in_operation_task(resp)
|
||||
|
||||
lcmocc_id = os.path.basename(resp.headers['Location'])
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# wait a bit because there is a bit time lag between lcmocc DB
|
||||
# update and terminate completion.
|
||||
time.sleep(10)
|
||||
|
||||
# 8. Delete a VNF instance
|
||||
resp, body = self.delete_vnf_instance(inst_id)
|
||||
self.assertEqual(204, resp.status_code)
|
||||
self.check_resp_headers_in_delete(resp)
|
||||
|
||||
# check deletion of VNF instance
|
||||
resp, body = self.show_vnf_instance(inst_id)
|
||||
self.assertEqual(404, resp.status_code)
|
||||
|
||||
# check usageState of VNF Package
|
||||
usage_state = self.get_vnf_package(self.vnf_pkg_2).get('usageState')
|
||||
self.assertEqual('NOT_IN_USE', usage_state)
|
File diff suppressed because it is too large
Load Diff
Loading…
Reference in New Issue
Block a user