Merge "Support CNF heal operations based on ETSI NFV"

This commit is contained in:
Zuul 2021-03-25 03:41:35 +00:00 committed by Gerrit Code Review
commit cc4b7b6bef
28 changed files with 2873 additions and 39 deletions

View File

@ -0,0 +1,10 @@
---
features:
- |
Add Container based VNF heal operation support with ETSI NFV-SOL002 and
SOL003 v2.6.1 VNF Lifecycle Management. For "Heal VNFC with SOL002", users
can heal Pod (mapped as VNFC) that is singleton or created using controller
resources such as Kubernetes Deployment, DaemonSet, StatefulSet, and
ReplicaSet. For "Heal VNF instance with SOL003", users can heal entire VNF
instance by termination and instantiation of the VNF. And the VNFC resource
information are stored and updated for the heal operation of the Pod.

View File

@ -737,7 +737,8 @@ class Conductor(manager.Manager):
self.vnf_manager.invoke(vim_connection_info.vim_type,
'post_vnf_instantiation', context=context,
vnf_instance=vnf_instance,
vim_connection_info=vim_connection_info)
vim_connection_info=vim_connection_info,
instantiate_vnf_req=instantiate_vnf_req)
except Exception as ex:
try:

View File

@ -117,6 +117,14 @@ class CNFScaleWaitFailed(exceptions.TackerException):
message = _('CNF Scale Wait Failed with reason: %(reason)s')
class CNFHealFailed(exceptions.TackerException):
message = _('%(reason)s')
class CNFHealWaitFailed(exceptions.TackerException):
message = _('%(reason)s')
class ServiceTypeNotFound(exceptions.NotFound):
message = _('service type %(service_type_id)s could not be found')

View File

@ -0,0 +1,114 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: Sample VNF
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
- helloworld3_types.yaml
topology_template:
inputs:
descriptor_id:
type: string
descriptor_version:
type: string
provider:
type: string
product_name:
type: string
software_version:
type: string
vnfm_info:
type: list
entry_schema:
type: string
flavour_id:
type: string
flavour_description:
type: string
substitution_mappings:
node_type: company.provider.VNF
properties:
flavour_id: complex
requirements:
virtual_link_external: []
node_templates:
VNF:
type: company.provider.VNF
properties:
flavour_description: A flavour for multiple resources
VDU1:
type: tosca.nodes.nfv.Vdu.Compute
properties:
name: vdu1-heal-complex
description: kubernetes resource as VDU1
vdu_profile:
min_number_of_instances: 2
max_number_of_instances: 3
VDU2:
type: tosca.nodes.nfv.Vdu.Compute
properties:
name: vdu2-heal
description: kubernetes resource as VDU2
vdu_profile:
min_number_of_instances: 1
max_number_of_instances: 1
policies:
- scaling_aspects:
type: tosca.policies.nfv.ScalingAspects
properties:
aspects:
vdu1_aspect:
name: vdu1_aspect
description: vdu1 scaling aspect
max_scale_level: 1
step_deltas:
- delta_1
- vdu1_initial_delta:
type: tosca.policies.nfv.VduInitialDelta
properties:
initial_delta:
number_of_instances: 1
targets: [ VDU1 ]
- vdu1_scaling_aspect_deltas:
type: tosca.policies.nfv.VduScalingAspectDeltas
properties:
aspect: vdu1_aspect
deltas:
delta_1:
number_of_instances: 1
targets: [ VDU1 ]
- instantiation_levels:
type: tosca.policies.nfv.InstantiationLevels
properties:
levels:
instantiation_level_1:
description: Smallest size
scale_info:
vdu1_aspect:
scale_level: 0
instantiation_level_2:
description: Largest size
scale_info:
vdu1_aspect:
scale_level: 1
default_level: instantiation_level_1
- vdu1_instantiation_levels:
type: tosca.policies.nfv.VduInstantiationLevels
properties:
levels:
instantiation_level_1:
number_of_instances: 2
instantiation_level_2:
number_of_instances: 3
targets: [ VDU1 ]

View File

@ -0,0 +1,105 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: Sample VNF
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
- helloworld3_types.yaml
topology_template:
inputs:
descriptor_id:
type: string
descriptor_version:
type: string
provider:
type: string
product_name:
type: string
software_version:
type: string
vnfm_info:
type: list
entry_schema:
type: string
flavour_id:
type: string
flavour_description:
type: string
substitution_mappings:
node_type: company.provider.VNF
properties:
flavour_id: simple
requirements:
virtual_link_external: []
node_templates:
VNF:
type: company.provider.VNF
properties:
flavour_description: A simple flavour
VDU1:
type: tosca.nodes.nfv.Vdu.Compute
properties:
name: vdu1-heal-simple
description: kubernetes controller resource as VDU
vdu_profile:
min_number_of_instances: 2
max_number_of_instances: 3
policies:
- scaling_aspects:
type: tosca.policies.nfv.ScalingAspects
properties:
aspects:
vdu1_aspect:
name: vdu1_aspect
description: vdu1 scaling aspect
max_scale_level: 1
step_deltas:
- delta_1
- VDU1_initial_delta:
type: tosca.policies.nfv.VduInitialDelta
properties:
initial_delta:
number_of_instances: 1
targets: [ VDU1 ]
- VDU1_scaling_aspect_deltas:
type: tosca.policies.nfv.VduScalingAspectDeltas
properties:
aspect: vdu1_aspect
deltas:
delta_1:
number_of_instances: 1
targets: [ VDU1 ]
- instantiation_levels:
type: tosca.policies.nfv.InstantiationLevels
properties:
levels:
instantiation_level_1:
description: Smallest size
scale_info:
vdu1_aspect:
scale_level: 0
instantiation_level_2:
description: Largest size
scale_info:
vdu1_aspect:
scale_level: 1
default_level: instantiation_level_1
- vdu1_instantiation_levels:
type: tosca.policies.nfv.VduInstantiationLevels
properties:
levels:
instantiation_level_1:
number_of_instances: 2
instantiation_level_2:
number_of_instances: 3
targets: [ VDU1 ]

View File

@ -0,0 +1,32 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: Sample VNF
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
- helloworld3_types.yaml
- helloworld3_df_simple.yaml
- helloworld3_df_complex.yaml
topology_template:
inputs:
selected_flavour:
type: string
description: VNF deployment flavour selected by the consumer. It is provided in the API
node_templates:
VNF:
type: company.provider.VNF
properties:
flavour_id: { get_input: selected_flavour }
descriptor_id: b1bb0ce7-ebca-4fa7-95ed-4840d70a1177
provider: Company
product_name: Sample VNF
software_version: '1.0'
descriptor_version: '1.0'
vnfm_info:
- Tacker
requirements:
#- virtual_link_external # mapped in lower-level templates
#- virtual_link_internal # mapped in lower-level templates

View File

@ -0,0 +1,53 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: VNF type definition
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
node_types:
company.provider.VNF:
derived_from: tosca.nodes.nfv.VNF
properties:
descriptor_id:
type: string
constraints: [ valid_values: [ b1bb0ce7-ebca-4fa7-95ed-4840d70a1177 ] ]
default: b1bb0ce7-ebca-4fa7-95ed-4840d70a1177
descriptor_version:
type: string
constraints: [ valid_values: [ '1.0' ] ]
default: '1.0'
provider:
type: string
constraints: [ valid_values: [ 'Company' ] ]
default: 'Company'
product_name:
type: string
constraints: [ valid_values: [ 'Sample VNF' ] ]
default: 'Sample VNF'
software_version:
type: string
constraints: [ valid_values: [ '1.0' ] ]
default: '1.0'
vnfm_info:
type: list
entry_schema:
type: string
constraints: [ valid_values: [ Tacker ] ]
default: [ Tacker ]
flavour_id:
type: string
constraints: [ valid_values: [ simple,complex ] ]
default: simple
flavour_description:
type: string
default: ""
requirements:
- virtual_link_external:
capability: tosca.capabilities.nfv.VirtualLinkable
- virtual_link_internal:
capability: tosca.capabilities.nfv.VirtualLinkable
interfaces:
Vnflcm:
type: tosca.interfaces.nfv.Vnflcm

View File

@ -0,0 +1,22 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: vdu1-heal-complex
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: webserver
template:
metadata:
labels:
app: webserver
spec:
containers:
- name: nginx
image: nginx
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
protocol: TCP

View File

@ -0,0 +1,22 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: vdu1-heal-simple
namespace: default
spec:
replicas: 2
selector:
matchLabels:
app: webserver
template:
metadata:
labels:
app: webserver
spec:
containers:
- name: nginx
image: nginx
imagePullPolicy: IfNotPresent
ports:
- containerPort: 80
protocol: TCP

View File

@ -0,0 +1,12 @@
apiVersion: v1
kind: Pod
metadata:
name: vdu2-heal
namespace: default
spec:
containers:
- image: nginx
imagePullPolicy: IfNotPresent
name: webserver2
ports:
- containerPort: 8080

View File

@ -0,0 +1,19 @@
TOSCA-Meta-File-Version: 1.0
Created-by: dummy_user
CSAR-Version: 1.1
Entry-Definitions: Definitions/helloworld3_top.vnfd.yaml
Name: Files/kubernetes/pod_heal.yaml
Content-Type: application/yaml
Algorithm: SHA-256
Hash: 08fabdd52e8a386669f177c0a7a8a351b036bcde3bf399ca1816455d81dd191c
Name: Files/kubernetes/deployment_heal_simple.yaml
Content-Type: application/yaml
Algorithm: SHA-256
Hash: 39c9b301d04714c6b124b333057a22d316835c3cb340c4e2ebfadc296c3fbfbc
Name: Files/kubernetes/deployment_heal_complex.yaml
Content-Type: application/yaml
Algorithm: SHA-256
Hash: 06c018b9f4b231a604a6cd223a2552fecc4c6dc8bedf9325e84f7fe2b6fe8492

View File

@ -0,0 +1,124 @@
tosca_definitions_version: tosca_simple_yaml_1_2
description: Simple deployment flavour for Sample VNF
imports:
- etsi_nfv_sol001_common_types.yaml
- etsi_nfv_sol001_vnfd_types.yaml
- helloworld3_types.yaml
topology_template:
inputs:
descriptor_id:
type: string
descriptor_version:
type: string
provider:
type: string
product_name:
type: string
software_version:
type: string
vnfm_info:
type: list
entry_schema:
type: string
flavour_id:
type: string
flavour_description:
type: string
substitution_mappings:
node_type: company.provider.VNF
properties:
flavour_id: simple
requirements:
virtual_link_external: []
node_templates:
VNF:
type: company.provider.VNF
properties:
flavour_description: A simple flavour
VDU1:
type: tosca.nodes.nfv.Vdu.Compute
properties:
name: vdu1
description: VDU1 compute node
vdu_profile:
min_number_of_instances: 1
max_number_of_instances: 1
VDU2:
type: tosca.nodes.nfv.Vdu.Compute
properties:
name: vdu2
description: VDU2 compute node
vdu_profile:
min_number_of_instances: 1
max_number_of_instances: 3
policies:
- scaling_aspects:
type: tosca.policies.nfv.ScalingAspects
properties:
aspects:
vdu2_aspect:
name: vdu2_aspect
description: vdu2 scaling aspect
max_scale_level: 2
step_deltas:
- delta_1
- VDU2_initial_delta:
type: tosca.policies.nfv.VduInitialDelta
properties:
initial_delta:
number_of_instances: 1
targets: [ VDU2 ]
- VDU2_scaling_aspect_deltas:
type: tosca.policies.nfv.VduScalingAspectDeltas
properties:
aspect: vdu2_aspect
deltas:
delta_1:
number_of_instances: 1
targets: [ VDU2 ]
- instantiation_levels:
type: tosca.policies.nfv.InstantiationLevels
properties:
levels:
instantiation_level_1:
description: Smallest size
scale_info:
vdu2_aspect:
scale_level: 0
instantiation_level_2:
description: Largest size
scale_info:
vdu2_aspect:
scale_level: 2
default_level: instantiation_level_1
- VDU1_instantiation_levels:
type: tosca.policies.nfv.VduInstantiationLevels
properties:
levels:
instantiation_level_1:
number_of_instances: 1
instantiation_level_2:
number_of_instances: 1
targets: [ VDU1 ]
- VDU2_instantiation_levels:
type: tosca.policies.nfv.VduInstantiationLevels
properties:
levels:
instantiation_level_1:
number_of_instances: 1
instantiation_level_2:
number_of_instances: 3
targets: [ VDU2 ]

View File

@ -1,7 +1,7 @@
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: nginx
name: vdu1
namespace: default
spec:
selector:

View File

@ -1,7 +1,7 @@
apiVersion: apps/v1
kind: Deployment
metadata:
name: curry-probe-test001
name: vdu2
namespace: default
spec:
replicas: 1

View File

@ -2,7 +2,7 @@ apiVersion: v1
kind: Pod
metadata:
namespace: default
name: curry-endpoint-test001
name: vdu1
spec:
containers:
- image: celebdor/kuryr-demo

View File

@ -28,7 +28,7 @@ metadata:
apiVersion: apps/v1
kind: ReplicaSet
metadata:
name: curry-replicaset-multiple
name: vdu2
namespace: default
spec:
replicas: 2

View File

@ -1,7 +1,7 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: curry-ns-statefulset
name: vdu2
namespace: default
spec:
selector:

View File

@ -26,12 +26,12 @@ Hash: 4042352e0de6aa0ad28d44354bd8e0d62fc8e753c8f52b7edf69d2a7a25d8f8d
Name: Files/kubernetes/daemon-set.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: c0750df79c9ba2824b032b6a485764486b014021aa6dade5ef61f1c10569412f
Hash: f8ed04536a8795af4828b2f731225abc34986f9ea30237d9652669ca57d9d217
Name: Files/kubernetes/deployment.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: 6a40dfb06764394fb604ae807d1198bc2e2ee8aece3b9483dfde48e53f316a58
Hash: 80f160c9bdd9daa6d0111c8d40b5575946b8c0f23696aa8d91d20f313adae087
Name: Files/kubernetes/horizontal-pod-autoscaler.yaml
Content-Type: test-data
@ -91,12 +91,12 @@ Hash: 5d4d3d399e04cdba1f9c691ac7e690e295ff02b7c935abae873b68a83a858c50
Name: Files/kubernetes/pod.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: a708dcf5ba4d3a7c675f18b71484a32b7e4446e80e57dcc3035b8a921c3f659d
Hash: 6c97b1a8fc8d21a6a9e7ab1c383b49d3ec31f79a83de218f5537d18531ddfbd8
Name: Files/kubernetes/replicaset_service_secret.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: 8ed52e5e167890efd7fba29c748f717dff01d68b60ff9a06af178cbafdfdc765
Hash: 7d83ba61def65be3203b164b496057e4d062249804df82eba1831111cc4614a0
Name: Files/kubernetes/resource-quota.yaml
Content-Type: test-data
@ -116,7 +116,7 @@ Hash: 83bd9c40db8c798d0cab0e793a4b40a4ac7eca4fec4fba89ab4257d0f397db40
Name: Files/kubernetes/statefulset.yaml
Content-Type: test-data
Algorithm: SHA-256
Hash: d0beddd39f6808cb62094146778961b068871393df3474e0787145639a94f649
Hash: 6829939e8b30a36c69d0e84c65b36701712c89bfbe827536cba8c0cdb15a816b
Name: Files/kubernetes/storage-class.yaml
Content-Type: test-data

View File

@ -0,0 +1,403 @@
# Copyright (C) 2020 FUJITSU
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import os
import time
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from sqlalchemy import desc
from sqlalchemy.orm import joinedload
from tacker.common import exceptions
from tacker import context
from tacker.db import api as db_api
from tacker.db.db_sqlalchemy import api
from tacker.db.db_sqlalchemy import models
from tacker.objects import fields
from tacker.objects import vnf_lcm_op_occs
from tacker.tests.functional import base
from tacker.tests import utils
VNF_PACKAGE_UPLOAD_TIMEOUT = 300
VNF_INSTANTIATE_TIMEOUT = 600
VNF_TERMINATE_TIMEOUT = 600
VNF_HEAL_TIMEOUT = 600
RETRY_WAIT_TIME = 5
def _create_and_upload_vnf_package(tacker_client, csar_package_name,
user_defined_data):
# create vnf package
body = jsonutils.dumps({"userDefinedData": user_defined_data})
resp, vnf_package = tacker_client.do_request(
'/vnfpkgm/v1/vnf_packages', "POST", body=body)
# upload vnf package
csar_package_path = "../../../etc/samples/etsi/nfv/%s" % csar_package_name
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__),
csar_package_path))
# Generating unique vnfd id. This is required when multiple workers
# are running concurrently. The call below creates a new temporary
# CSAR with unique vnfd id.
file_path, uniqueid = utils.create_csar_with_unique_vnfd_id(file_path)
with open(file_path, 'rb') as file_object:
resp, resp_body = tacker_client.do_request(
'/vnfpkgm/v1/vnf_packages/{id}/package_content'.format(
id=vnf_package['id']),
"PUT", body=file_object, content_type='application/zip')
# wait for onboard
start_time = int(time.time())
show_url = os.path.join('/vnfpkgm/v1/vnf_packages', vnf_package['id'])
vnfd_id = None
while True:
resp, body = tacker_client.do_request(show_url, "GET")
if body['onboardingState'] == "ONBOARDED":
vnfd_id = body['vnfdId']
break
if ((int(time.time()) - start_time) > VNF_PACKAGE_UPLOAD_TIMEOUT):
raise Exception("Failed to onboard vnf package, process could not"
" be completed within %d seconds", VNF_PACKAGE_UPLOAD_TIMEOUT)
time.sleep(RETRY_WAIT_TIME)
# remove temporarily created CSAR file
os.remove(file_path)
return vnf_package['id'], vnfd_id
def _delete_wait_vnf_instance(tacker_client, id):
url = os.path.join("/vnflcm/v1/vnf_instances", id)
start_time = int(time.time())
while True:
resp, body = tacker_client.do_request(url, "DELETE")
if 204 == resp.status_code:
break
if ((int(time.time()) - start_time) > VNF_TERMINATE_TIMEOUT):
raise Exception("Failed to delete vnf instance, process could not"
" be completed within %d seconds", VNF_TERMINATE_TIMEOUT)
time.sleep(RETRY_WAIT_TIME)
def _show_vnf_instance(tacker_client, id):
show_url = os.path.join("/vnflcm/v1/vnf_instances", id)
resp, vnf_instance = tacker_client.do_request(show_url, "GET")
return vnf_instance
def _vnf_instance_wait(
tacker_client, id,
instantiation_state=fields.VnfInstanceState.INSTANTIATED,
timeout=VNF_INSTANTIATE_TIMEOUT):
show_url = os.path.join("/vnflcm/v1/vnf_instances", id)
start_time = int(time.time())
while True:
resp, body = tacker_client.do_request(show_url, "GET")
if body['instantiationState'] == instantiation_state:
break
if ((int(time.time()) - start_time) > timeout):
raise Exception("Failed to wait vnf instance, process could not"
" be completed within %d seconds", timeout)
time.sleep(RETRY_WAIT_TIME)
class VnfLcmKubernetesHealTest(base.BaseTackerTest):
@classmethod
def setUpClass(cls):
cls.tacker_client = base.BaseTackerTest.tacker_http_client()
cls.vnf_package_resource, cls.vnfd_id_resource = \
_create_and_upload_vnf_package(
cls.tacker_client, "test_cnf_heal",
{"key": "sample_heal_functional"})
cls.vnf_instance_ids = []
super(VnfLcmKubernetesHealTest, cls).setUpClass()
@classmethod
def tearDownClass(cls):
# Update vnf package operational state to DISABLED
update_req_body = jsonutils.dumps({
"operationalState": "DISABLED"})
base_path = "/vnfpkgm/v1/vnf_packages"
for package_id in [cls.vnf_package_resource]:
resp, resp_body = cls.tacker_client.do_request(
'{base_path}/{id}'.format(id=package_id,
base_path=base_path),
"PATCH", content_type='application/json',
body=update_req_body)
# Delete vnf package
url = '/vnfpkgm/v1/vnf_packages/%s' % package_id
cls.tacker_client.do_request(url, "DELETE")
super(VnfLcmKubernetesHealTest, cls).tearDownClass()
def setUp(self):
super(VnfLcmKubernetesHealTest, self).setUp()
self.base_vnf_instances_url = "/vnflcm/v1/vnf_instances"
self.base_vnf_lcm_op_occs_url = "/vnflcm/v1/vnf_lcm_op_occs"
self.context = context.get_admin_context()
vim_list = self.client.list_vims()
if not vim_list:
self.skipTest("Vims are not configured")
vim_id = 'vim-kubernetes'
vim = self.get_vim(vim_list, vim_id)
if not vim:
self.skipTest("Kubernetes VIM '%s' is missing" % vim_id)
self.vim_id = vim['id']
def _instantiate_vnf_instance_request(
self, flavour_id, vim_id=None, additional_param=None):
request_body = {"flavourId": flavour_id}
if vim_id:
request_body["vimConnectionInfo"] = [
{"id": uuidutils.generate_uuid(),
"vimId": vim_id,
"vimType": "kubernetes"}]
if additional_param:
request_body["additionalParams"] = additional_param
return request_body
def _create_vnf_instance(self, vnfd_id, vnf_instance_name=None,
vnf_instance_description=None):
request_body = {'vnfdId': vnfd_id}
if vnf_instance_name:
request_body['vnfInstanceName'] = vnf_instance_name
if vnf_instance_description:
request_body['vnfInstanceDescription'] = vnf_instance_description
resp, response_body = self.http_client.do_request(
self.base_vnf_instances_url, "POST",
body=jsonutils.dumps(request_body))
return resp, response_body
def _instantiate_vnf_instance(self, id, request_body):
url = os.path.join(self.base_vnf_instances_url, id, "instantiate")
resp, body = self.http_client.do_request(
url, "POST", body=jsonutils.dumps(request_body))
self.assertEqual(202, resp.status_code)
_vnf_instance_wait(self.tacker_client, id)
def _create_and_instantiate_vnf_instance(self, flavour_id,
additional_params):
# create vnf instance
vnf_instance_name = "test_vnf_instance_for_cnf_heal-%s" % \
uuidutils.generate_uuid()
vnf_instance_description = "vnf instance for cnf heal testing"
resp, vnf_instance = self._create_vnf_instance(
self.vnfd_id_resource, vnf_instance_name=vnf_instance_name,
vnf_instance_description=vnf_instance_description)
# instantiate vnf instance
additional_param = additional_params
request_body = self._instantiate_vnf_instance_request(
flavour_id, vim_id=self.vim_id, additional_param=additional_param)
self._instantiate_vnf_instance(vnf_instance['id'], request_body)
vnf_instance = _show_vnf_instance(
self.tacker_client, vnf_instance['id'])
self.vnf_instance_ids.append(vnf_instance['id'])
return vnf_instance
def _terminate_vnf_instance(self, id):
# Terminate vnf forcefully
request_body = {
"terminationType": fields.VnfInstanceTerminationType.FORCEFUL,
}
url = os.path.join(self.base_vnf_instances_url, id, "terminate")
resp, body = self.http_client.do_request(
url, "POST", body=jsonutils.dumps(request_body))
self.assertEqual(202, resp.status_code)
_vnf_instance_wait(
self.tacker_client, id,
instantiation_state=fields.VnfInstanceState.NOT_INSTANTIATED,
timeout=VNF_TERMINATE_TIMEOUT)
def _delete_vnf_instance(self, id):
_delete_wait_vnf_instance(self.tacker_client, id)
# verify vnf instance is deleted
url = os.path.join(self.base_vnf_instances_url, id)
resp, body = self.http_client.do_request(url, "GET")
self.assertEqual(404, resp.status_code)
def _heal_vnf_instance(self, id, vnfc_instance_id):
url = os.path.join(self.base_vnf_instances_url, id, "heal")
# generate body
request_body = {
"vnfcInstanceId": vnfc_instance_id}
resp, body = self.http_client.do_request(
url, "POST", body=jsonutils.dumps(request_body))
self.assertEqual(202, resp.status_code)
@db_api.context_manager.reader
def _vnf_notify_get_by_id(self, context, vnf_instance_id,
columns_to_join=None):
query = api.model_query(
context, models.VnfLcmOpOccs,
read_deleted="no", project_only=True).filter_by(
vnf_instance_id=vnf_instance_id).order_by(
desc("created_at"))
if columns_to_join:
for column in columns_to_join:
query = query.options(joinedload(column))
db_vnflcm_op_occ = query.first()
if not db_vnflcm_op_occ:
raise exceptions.VnfInstanceNotFound(id=vnf_instance_id)
vnflcm_op_occ = vnf_lcm_op_occs.VnfLcmOpOcc.obj_from_db_obj(
context, db_vnflcm_op_occ)
return vnflcm_op_occ
def _wait_vnflcm_op_occs(
self, context, vnf_instance_id,
operation_state='COMPLETED'):
start_time = int(time.time())
while True:
vnflcm_op_occ = self._vnf_notify_get_by_id(
context, vnf_instance_id)
if vnflcm_op_occ.operation_state == operation_state:
break
if ((int(time.time()) - start_time) > VNF_HEAL_TIMEOUT):
raise Exception("Failed to wait heal instance")
time.sleep(RETRY_WAIT_TIME)
def _get_vnfc_resource_info(self, vnf_instance):
inst_vnf_info = vnf_instance['instantiatedVnfInfo']
vnfc_resource_info = inst_vnf_info['vnfcResourceInfo']
return vnfc_resource_info
def test_heal_cnf_with_sol002(self):
"""Test heal as per SOL002 for CNF
This test will instantiate cnf. Heal API will be invoked as per SOL002
i.e. with vnfcInstanceId, so that the specified vnfc instance is healed
which includes Kubernetes resources (Pod and Deployment).
"""
# use def-files of singleton Pod and Deployment (replicas=2)
inst_additional_param = {
"lcm-kubernetes-def-files": [
"Files/kubernetes/deployment_heal_complex.yaml",
"Files/kubernetes/pod_heal.yaml"]}
vnf_instance = self._create_and_instantiate_vnf_instance(
"complex", inst_additional_param)
before_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance)
# get vnfc_instance_id of heal target
deployment_target_vnfc = None
for vnfc_rsc in before_vnfc_rscs:
compute_resource = vnfc_rsc['computeResource']
rsc_kind = compute_resource['vimLevelResourceType']
if rsc_kind == 'Pod':
# target 1: Singleton Pod
pod_target_vnfc = vnfc_rsc
elif not deployment_target_vnfc:
# target 2: Deployment's Pod
deployment_target_vnfc = vnfc_rsc
else:
# not target: Deployment's remianing one
deployment_not_target_vnfc = vnfc_rsc
# test heal SOL-002 (partial heal)
vnfc_instance_id = \
[pod_target_vnfc['id'], deployment_target_vnfc['id']]
self._heal_vnf_instance(vnf_instance['id'], vnfc_instance_id)
# wait vnflcm_op_occs.operation_state become COMPLETE
self._wait_vnflcm_op_occs(self.context, vnf_instance['id'])
# check vnfcResourceInfo after heal operation
vnf_instance = _show_vnf_instance(
self.tacker_client, vnf_instance['id'])
after_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance)
self.assertEqual(len(before_vnfc_rscs), len(after_vnfc_rscs))
for vnfc_rsc in after_vnfc_rscs:
after_pod_name = vnfc_rsc['computeResource']['resourceId']
if vnfc_rsc['id'] == pod_target_vnfc['id']:
# check stored pod name is not changed (Pod)
after_resource = pod_target_vnfc
compute_resource = after_resource['computeResource']
before_pod_name = compute_resource['resourceId']
self.assertEqual(after_pod_name, before_pod_name)
elif vnfc_rsc['id'] == deployment_target_vnfc['id']:
# check stored pod name is changed (Deployment)
after_resource = deployment_target_vnfc
compute_resource = after_resource['computeResource']
before_pod_name = compute_resource['resourceId']
self.assertNotEqual(after_pod_name, before_pod_name)
else:
# check stored pod name is not changed (not target)
after_resource = deployment_not_target_vnfc
compute_resource = after_resource['computeResource']
before_pod_name = compute_resource['resourceId']
self.assertEqual(after_pod_name, before_pod_name)
self._terminate_vnf_instance(vnf_instance['id'])
self._delete_vnf_instance(vnf_instance['id'])
def test_heal_cnf_with_sol003(self):
"""Test heal as per SOL003 for CNF
This test will instantiate cnf. Heal API will be invoked as per SOL003
i.e. without passing vnfcInstanceId, so that the entire vnf is healed
which includes Kubernetes resource (Deployment).
"""
# use def-files of Deployment (replicas=2)
inst_additional_param = {
"lcm-kubernetes-def-files": [
"Files/kubernetes/deployment_heal_simple.yaml"]}
vnf_instance = self._create_and_instantiate_vnf_instance(
"simple", inst_additional_param)
before_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance)
# test heal SOL-003 (entire heal)
vnfc_instance_id = []
self._heal_vnf_instance(vnf_instance['id'], vnfc_instance_id)
# wait vnflcm_op_occs.operation_state become COMPLETE
self._wait_vnflcm_op_occs(self.context, vnf_instance['id'])
# check vnfcResourceInfo after heal operation
vnf_instance = _show_vnf_instance(
self.tacker_client, vnf_instance['id'])
after_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance)
self.assertEqual(len(before_vnfc_rscs), len(after_vnfc_rscs))
# check id and pod name (as computeResource.resourceId) is changed
for before_vnfc_rsc in before_vnfc_rscs:
for after_vnfc_rsc in after_vnfc_rscs:
self.assertNotEqual(
before_vnfc_rsc['id'], after_vnfc_rsc['id'])
self.assertNotEqual(
before_vnfc_rsc['computeResource']['resourceId'],
after_vnfc_rsc['computeResource']['resourceId'])
# terminate vnf instance
self._terminate_vnf_instance(vnf_instance['id'])
self._delete_vnf_instance(vnf_instance['id'])

View File

@ -13,9 +13,14 @@
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from kubernetes import client
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from tacker.db.db_sqlalchemy import models
from tacker import objects
from tacker.objects import vim_connection
from tacker.tests import uuidsentinel
CREATE_K8S_FALSE_VALUE = None
@ -485,6 +490,17 @@ def fake_v1_deployment():
status=client.V1DeploymentStatus(
replicas=1,
ready_replicas=1
),
spec=client.V1DeploymentSpec(
replicas=2,
selector=client.V1LabelSelector(
match_labels={'app': 'webserver'}
),
template=client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(
labels={'app': 'webserver'}
)
)
)
)
@ -515,6 +531,17 @@ def fake_v1_replica_set():
status=client.V1ReplicaSetStatus(
replicas=1,
ready_replicas=1
),
spec=client.V1ReplicaSetSpec(
replicas=2,
selector=client.V1LabelSelector(
match_labels={'app': 'webserver'}
),
template=client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(
labels={'app': 'webserver'}
)
)
)
)
@ -923,6 +950,16 @@ def fake_daemon_set():
desired_number_scheduled=13,
current_number_scheduled=4,
number_misscheduled=2,
),
spec=client.V1DaemonSetSpec(
selector=client.V1LabelSelector(
match_labels={'app': 'webserver'}
),
template=client.V1PodTemplateSpec(
metadata=client.V1ObjectMeta(
labels={'app': 'webserver'}
)
)
)
)
@ -1015,13 +1052,65 @@ def get_vnf_resource_list(kind, name='fake_name'):
return [vnf_resource]
def get_fake_pod_info(kind, name='fake_name', pod_status='Running'):
if kind == 'Deployment':
pod_name = _('{name}-1234567890-abcde').format(name=name)
elif kind == 'ReplicaSet':
pod_name = _('{name}-12345').format(name=name)
elif kind == 'StatefulSet':
pod_name = _('{name}-1').format(name=name)
def get_fake_pod_info(kind, name='fake_name', pod_status='Running',
pod_name=None):
if not pod_name:
if kind == 'Deployment':
pod_name = _('{name}-1234567890-abcde').format(name=name)
elif kind == 'ReplicaSet' or kind == 'DaemonSet':
pod_name = _('{name}-12345').format(name=name)
elif kind == 'StatefulSet':
pod_name = _('{name}-1').format(name=name)
elif kind == 'Pod':
pod_name = name
return client.V1Pod(
metadata=client.V1ObjectMeta(name=pod_name),
metadata=client.V1ObjectMeta(name=pod_name,
creation_timestamp=datetime.datetime.now().isoformat('T')),
status=client.V1PodStatus(phase=pod_status))
def fake_vnfc_resource_info(vdu_id='VDU1', rsc_kind='Deployment',
rsc_name='fake_name', pod_name=None,
namespace=None):
def _get_metadata_str(name, namespace="fake_namespace"):
if namespace == "brank":
namespace = ""
metadata = {
'name': name,
'namespace': namespace}
return jsonutils.dumps(metadata)
vnfc_obj = objects.VnfcResourceInfo()
vnfc_obj.id = uuidutils.generate_uuid()
vnfc_obj.vdu_id = vdu_id
if not pod_name:
v1_pod = get_fake_pod_info(rsc_kind, rsc_name)
pod_name = v1_pod.metadata.name
compute_resource = objects.ResourceHandle(
resource_id=pod_name,
vim_level_resource_type=rsc_kind)
vnfc_obj.compute_resource = compute_resource
metadata = {}
if namespace:
metadata['Pod'] = _get_metadata_str(
name=pod_name, namespace=namespace)
if rsc_kind != 'Pod':
metadata[rsc_kind] = _get_metadata_str(
name=rsc_name, namespace=namespace)
else:
metadata['Pod'] = _get_metadata_str(name=pod_name)
if rsc_kind != 'Pod':
metadata[rsc_kind] = _get_metadata_str(name=rsc_name)
vnfc_obj.metadata = metadata
return vnfc_obj
def fake_vim_connection_info():
access_info = {
'auth_url': 'http://fake_url:6443',
'ssl_ca_cert': None}
return vim_connection.VimConnectionInfo(
vim_type="kubernetes",
access_info=access_info)

View File

@ -1382,10 +1382,11 @@ class TestOpenStack(base.FixturedTestCase):
vnf_link_ports[0].resource_handle.vim_level_resource_type,
'physical_resource_id': uuidsentinel.cp1_resource_id}]
inst_req_info = fd_utils.get_instantiate_vnf_request()
self._responses_in_stack_list(inst_vnf_info.instance_id,
resources=resources)
self.openstack.post_vnf_instantiation(
self.context, vnf_instance, vim_connection_info)
self.context, vnf_instance, vim_connection_info, inst_req_info)
self.assertEqual(vnf_instance.instantiated_vnf_info.
vnfc_resource_info[0].metadata['stack_id'],
inst_vnf_info.instance_id)
@ -1453,8 +1454,9 @@ class TestOpenStack(base.FixturedTestCase):
'physical_resource_id': uuidsentinel.v_l_resource_info_id}]
self._responses_in_stack_list(inst_vnf_info.instance_id,
resources=resources)
inst_req_info = fd_utils.get_instantiate_vnf_request()
self.openstack.post_vnf_instantiation(
self.context, vnf_instance, vim_connection_info)
self.context, vnf_instance, vim_connection_info, inst_req_info)
self.assertEqual(vnf_instance.instantiated_vnf_info.
vnfc_resource_info[0].metadata['stack_id'],
inst_vnf_info.instance_id)
@ -1623,7 +1625,7 @@ class TestOpenStack(base.FixturedTestCase):
"UPDATE_COMPLETE"])
stack = self.openstack.heal_vnf_wait(
self.context, vnf_instance, vim_connection_info)
self.context, vnf_instance, vim_connection_info, None)
self.assertEqual('UPDATE_COMPLETE', stack.stack_status)
def test_heal_vnf_wait_fail(self):
@ -1640,7 +1642,7 @@ class TestOpenStack(base.FixturedTestCase):
self.openstack.STACK_RETRIES = 1
result = self.assertRaises(vnfm.VNFHealWaitFailed,
self.openstack.heal_vnf_wait, self.context, vnf_instance,
vim_connection_info)
vim_connection_info, None)
expected_msg = ("VNF Heal action is not completed within 10 seconds "
"on stack %s") % inst_vnf_info.instance_id

View File

@ -635,7 +635,8 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver):
self._vnf_manager.invoke(
vim_connection_info.vim_type, 'heal_vnf_wait',
context=context, vnf_instance=vnf_instance,
vim_connection_info=vim_connection_info)
vim_connection_info=vim_connection_info,
heal_vnf_request=heal_vnf_request)
except Exception as exp:
LOG.error("Failed to update vnf %(id)s resources for instance "
"%(instance)s. Error: %(error)s",
@ -699,7 +700,8 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver):
self._vnf_manager.invoke(
vim_connection_info.vim_type, 'post_vnf_instantiation',
context=context, vnf_instance=vnf_instance,
vim_connection_info=vim_connection_info)
vim_connection_info=vim_connection_info,
instantiate_vnf_req=instantiate_vnf_request)
except Exception as exc:
with excutils.save_and_reraise_exception() as exc_ctxt:

View File

@ -97,7 +97,7 @@ class VnfAbstractDriver(extensions.PluginInterface, metaclass=abc.ABCMeta):
@abc.abstractmethod
def post_vnf_instantiation(self, context, vnf_instance,
vim_connection_info):
vim_connection_info, instantiate_vnf_req):
pass
@abc.abstractmethod
@ -114,7 +114,8 @@ class VnfAbstractDriver(extensions.PluginInterface, metaclass=abc.ABCMeta):
pass
@abc.abstractmethod
def heal_vnf_wait(self, context, vnf_instance, vim_connection_info):
def heal_vnf_wait(self, context, vnf_instance, vim_connection_info,
heal_vnf_request):
"""Check vnf is healed successfully"""
pass

View File

@ -486,6 +486,12 @@ class Transformer(object):
return sorted_k8s_objs
def get_object_meta(self, content):
must_param = {}
v1_object_meta = client.V1ObjectMeta()
self._init_k8s_obj(v1_object_meta, content, must_param)
return v1_object_meta
# config_labels configures label
def config_labels(self, deployment_name=None, scaling_name=None):
label = dict()

View File

@ -23,6 +23,7 @@ from kubernetes import client
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import uuidutils
from toscaparser import tosca_template
from tacker._i18n import _
@ -46,6 +47,7 @@ from urllib.parse import urlparse
CNF_TARGET_FILES_KEY = 'lcm-kubernetes-def-files'
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
VNFC_POD_NOT_FOUND = "POD_NOT_FOUND"
OPTS = [
cfg.IntOpt('stack_retries',
@ -1247,7 +1249,11 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
def _is_match_pod_naming_rule(self, rsc_kind, rsc_name, pod_name):
match_result = None
if rsc_kind == 'Deployment':
if rsc_kind == 'Pod':
# Expected example: name
if rsc_name == pod_name:
match_result = True
elif rsc_kind == 'Deployment':
# Expected example: name-012789abef-019az
# NOTE(horie): The naming rule of Pod in deployment is
# "(deployment name)-(pod template hash)-(5 charactors)".
@ -1257,7 +1263,7 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
match_result = re.match(
rsc_name + '-([0-9a-f]{1,10})-([0-9a-z]{5})+$',
pod_name)
elif rsc_kind == 'ReplicaSet':
elif rsc_kind == 'ReplicaSet' or rsc_kind == 'DaemonSet':
# Expected example: name-019az
match_result = re.match(
rsc_name + '-([0-9a-z]{5})+$',
@ -1528,19 +1534,501 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
return resource_info_str
def post_vnf_instantiation(self, context, vnf_instance,
vim_connection_info):
pass
vim_connection_info, instantiate_vnf_req):
"""Initially store VnfcResourceInfo after instantiation
After instantiation, this function gets pods information from
Kubernetes VIM and store information such as pod name and resource kind
and metadata, and vdu id.
"""
auth_attr = vim_connection_info.access_info
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
try:
# get Kubernetes object files
target_k8s_files = self._get_target_k8s_files(instantiate_vnf_req)
vnf_package_path = vnflcm_utils._get_vnf_package_path(
context, vnf_instance.vnfd_id)
# initialize Transformer
transformer = translate_outputs.Transformer(
None, None, None, None)
# get Kubernetes object
k8s_objs = transformer.get_k8s_objs_from_yaml(
target_k8s_files, vnf_package_path)
# get TOSCA node templates
vnfd_dict = vnflcm_utils._get_vnfd_dict(
context, vnf_instance.vnfd_id,
vnf_instance.instantiated_vnf_info.flavour_id)
tosca = tosca_template.ToscaTemplate(
parsed_params={}, a_file=False, yaml_dict_tpl=vnfd_dict)
tosca_node_tpls = tosca.topology_template.nodetemplates
# get vdu_ids dict {vdu_name(as pod_name): vdu_id}
vdu_ids = {}
for node_tpl in tosca_node_tpls:
for node_name, node_value in node_tpl.templates.items():
if node_value.get('type') == "tosca.nodes.nfv.Vdu.Compute":
vdu_id = node_name
vdu_name = node_value.get('properties').get('name')
vdu_ids[vdu_name] = vdu_id
# initialize Kubernetes APIs
core_v1_api_client = self.kubernetes.get_core_v1_api_client(
auth=auth_cred)
target_kinds = ["Pod", "Deployment", "DaemonSet", "StatefulSet",
"ReplicaSet"]
pod_list_dict = {}
vnfc_resource_list = []
for k8s_obj in k8s_objs:
rsc_kind = k8s_obj.get('object').kind
if rsc_kind not in target_kinds:
# Skip if rsc_kind is not target kind
continue
rsc_name = k8s_obj.get('object').metadata.name
namespace = k8s_obj.get('object').metadata.namespace
if not namespace:
namespace = "default"
# get V1PodList by namespace
if namespace in pod_list_dict.keys():
pod_list = pod_list_dict.get(namespace)
else:
pod_list = core_v1_api_client.list_namespaced_pod(
namespace=namespace)
pod_list_dict[namespace] = pod_list
# get initially store VnfcResourceInfo after instantiation
for pod in pod_list.items:
pod_name = pod.metadata.name
match_result = self._is_match_pod_naming_rule(
rsc_kind, rsc_name, pod_name)
if match_result:
# get metadata
metadata = {}
metadata[rsc_kind] = jsonutils.dumps(
k8s_obj.get('object').metadata.to_dict())
if rsc_kind != 'Pod':
metadata['Pod'] = jsonutils.dumps(
k8s_obj.get('object').spec.template.metadata.
to_dict())
# generate VnfcResourceInfo
vnfc_resource = objects.VnfcResourceInfo()
vnfc_resource.id = uuidutils.generate_uuid()
vnfc_resource.vdu_id = vdu_ids.get(rsc_name)
resource = objects.ResourceHandle()
resource.resource_id = pod_name
resource.vim_level_resource_type = rsc_kind
vnfc_resource.compute_resource = resource
vnfc_resource.metadata = metadata
vnfc_resource_list.append(vnfc_resource)
if vnfc_resource_list:
inst_vnf_info = vnf_instance.instantiated_vnf_info
inst_vnf_info.vnfc_resource_info = vnfc_resource_list
except Exception as e:
LOG.error('Update vnfc resource info got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
def _get_vnfc_rscs_with_vnfc_id(self, inst_vnf_info, heal_vnf_request):
if not heal_vnf_request.vnfc_instance_id:
# include all vnfc resources
return [resource for resource in inst_vnf_info.vnfc_resource_info]
vnfc_resources = []
for vnfc_resource in inst_vnf_info.vnfc_resource_info:
if vnfc_resource.id in heal_vnf_request.vnfc_instance_id:
vnfc_resources.append(vnfc_resource)
return vnfc_resources
def _get_added_pod_names(self, core_v1_api_client, inst_vnf_info, vdu_id,
vnfc_resource, pod_list_dict):
compute_resource = vnfc_resource.compute_resource
rsc_kind = compute_resource.vim_level_resource_type
rsc_metadata = jsonutils.loads(
vnfc_resource.metadata.get(rsc_kind))
namespace = rsc_metadata.get('namespace')
if not namespace:
namespace = "default"
rsc_name = rsc_metadata.get('name')
# Get pod list from kubernetes
if namespace in pod_list_dict.keys():
pod_list = pod_list_dict.get(namespace)
else:
pod_list = core_v1_api_client.list_namespaced_pod(
namespace=namespace)
pod_list_dict[namespace] = pod_list
# Sort by newest creation_timestamp
sorted_pod_list = sorted(pod_list.items, key=lambda x:
x.metadata.creation_timestamp, reverse=True)
# Get the associated pod name that runs with the actual kubernetes
actual_pod_names = list()
for pod in sorted_pod_list:
match_result = self._is_match_pod_naming_rule(
rsc_kind, rsc_name, pod.metadata.name)
if match_result:
actual_pod_names.append(pod.metadata.name)
# Get the associated pod name stored in vnfcResourceInfo
stored_pod_names = []
for vnfc_rsc_info in inst_vnf_info.vnfc_resource_info:
if vnfc_rsc_info.vdu_id == vnfc_resource.vdu_id:
stored_pod_names.append(
vnfc_rsc_info.compute_resource.resource_id)
# Get the added pod name that does not exist in vnfcResourceInfo
added_pod_names = [
actl_pn for actl_pn in actual_pod_names
if actl_pn not in stored_pod_names
]
return actual_pod_names, added_pod_names
def heal_vnf(self, context, vnf_instance, vim_connection_info,
heal_vnf_request):
raise NotImplementedError()
"""Heal function
def heal_vnf_wait(self, context, vnf_instance, vim_connection_info):
raise NotImplementedError()
This function heals vnfc instances (mapped as Pod),
and update vnfcResourceInfo which are not the target of healing
before healing operation.
"""
# initialize Kubernetes APIs
auth_attr = vim_connection_info.access_info
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
inst_vnf_info = vnf_instance.instantiated_vnf_info
try:
core_v1_api_client = self.kubernetes.get_core_v1_api_client(
auth=auth_cred)
# get vnfc_resource_info list for healing
vnfc_resources = self._get_vnfc_rscs_with_vnfc_id(
inst_vnf_info=inst_vnf_info,
heal_vnf_request=heal_vnf_request
)
# Updates resource_id in vnfc_resource_info which are not the
# target of healing before heal operation because they may have
# been re-created by kubelet of Kubernetes automatically and their
# resource_id (as Pod name) have been already changed
updated_vdu_ids = []
pod_list_dict = {}
for vnfc_resource in vnfc_resources:
vdu_id = vnfc_resource.vdu_id
if vdu_id in updated_vdu_ids:
# For updated vdu_id, go to the next Loop
continue
actual_pod_names, added_pod_names = self._get_added_pod_names(
core_v1_api_client, inst_vnf_info, vdu_id, vnfc_resource,
pod_list_dict)
if added_pod_names:
heal_target_ids = heal_vnf_request.vnfc_instance_id
for vnfc_rsc in inst_vnf_info.vnfc_resource_info:
stored_pod_name = vnfc_rsc.compute_resource.resource_id
# Updated vnfcResourceInfo of the same vdu_id other
# than heal target
if (vnfc_rsc.id not in heal_target_ids) and\
(vdu_id == vnfc_rsc.vdu_id) and\
(stored_pod_name not in actual_pod_names):
pod_name = added_pod_names.pop()
vnfc_rsc.compute_resource.resource_id = pod_name
LOG.warning("Update resource_id before healing,"
" vnfc_resource_info.id:%(vnfc_id)s,"
" pod_name:%(pod_name)s",
{'vnfc_id': vnfc_rsc.id,
'pod_name': pod_name})
if not added_pod_names:
break
updated_vdu_ids.append(vdu_id)
for vnfc_resource in vnfc_resources:
body = client.V1DeleteOptions(propagation_policy='Foreground')
compute_resource = vnfc_resource.compute_resource
rsc_kind = compute_resource.vim_level_resource_type
pod_name = compute_resource.resource_id
rsc_metadata = jsonutils.loads(
vnfc_resource.metadata.get(rsc_kind))
namespace = rsc_metadata.get('namespace')
if not namespace:
namespace = "default"
if rsc_kind == 'Pod':
rsc_name = rsc_metadata.get('name')
# Get pod information for re-creation before deletion
pod_info = core_v1_api_client.read_namespaced_pod(
namespace=namespace,
name=rsc_name
)
# Delete Pod
core_v1_api_client.delete_namespaced_pod(
namespace=namespace,
name=pod_name,
body=body
)
# Check and wait that the Pod is deleted
stack_retries = self.STACK_RETRIES
for cnt in range(self.STACK_RETRIES):
try:
core_v1_api_client.read_namespaced_pod(
namespace=namespace,
name=pod_name
)
except Exception as e:
if e.status == 404:
break
else:
error_reason = _("Failed the request to read a"
" Pod information. namespace: {namespace},"
" pod_name: {name}, kind: {kind}, Reason: "
"{exception}").format(
namespace=namespace, name=pod_name,
kind=rsc_kind, exception=e)
raise vnfm.CNFHealFailed(reason=error_reason)
stack_retries = stack_retries - 1
time.sleep(self.STACK_RETRY_WAIT)
# Number of retries exceeded retry count
if stack_retries == 0:
error_reason = _("Resource healing is not completed"
"within {wait} seconds").format(wait=(
self.STACK_RETRIES * self.STACK_RETRY_WAIT))
LOG.error("CNF Healing failed: %(reason)s",
{'reason': error_reason})
raise vnfm.CNFHealFailed(reason=error_reason)
# Recreate pod using retained pod_info
transformer = translate_outputs.Transformer(
None, None, None, None)
metadata = transformer.get_object_meta(rsc_metadata)
body = client.V1Pod(metadata=metadata, spec=pod_info.spec)
core_v1_api_client.create_namespaced_pod(
namespace=namespace,
body=body
)
elif (rsc_kind in ['Deployment', 'DaemonSet', 'StatefulSet',
'ReplicaSet']):
try:
# Delete Pod (Pod is automatically re-created)
core_v1_api_client.delete_namespaced_pod(
namespace=namespace,
name=pod_name,
body=body
)
except Exception as e:
if e.status == 404:
# If when the pod to be deleted does not exist,
# change resource_id to "POD_NOT_FOUND"
compute_resource = vnfc_resource.compute_resource
compute_resource.resource_id = VNFC_POD_NOT_FOUND
LOG.warning("Target pod to delete is not found,"
" vnfc_resource_info.id:%(vnfc_id)s,"
" pod_name:%(pod_name)s",
{'vnfc_id': vnfc_resource.id,
'pod_name': pod_name})
else:
error_reason = _("Failed the request to delete a "
"Pod. namespace: {namespace}, pod_name: {name}"
", kind: {kind}, Reason: {exception}").format(
namespace=namespace, name=pod_name,
kind=rsc_kind, exception=e)
raise vnfm.CNFHealFailed(reason=error_reason)
else:
error_reason = _(
"{vnfc_instance_id} is a kind of Kubertnetes"
" resource that is not covered").format(
vnfc_instance_id=vnfc_resource.id)
LOG.error("CNF Heal failed: %(reason)s",
{'reason': error_reason})
raise vnfm.CNFHealFailed(reason=error_reason)
except Exception as e:
LOG.error('Healing CNF got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
def heal_vnf_wait(self, context, vnf_instance,
vim_connection_info, heal_vnf_request):
"""heal wait function
Wait until all status from Pod objects is RUNNING.
"""
# initialize Kubernetes APIs
auth_attr = vim_connection_info.access_info
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
try:
core_v1_api_client = self.kubernetes.get_core_v1_api_client(
auth=auth_cred)
app_v1_api_client = self.kubernetes.get_app_v1_api_client(
auth=auth_cred)
vnfc_resources = self._get_vnfc_rscs_with_vnfc_id(
inst_vnf_info=vnf_instance.instantiated_vnf_info,
heal_vnf_request=heal_vnf_request)
# Exclude entries where pods were not found when heal
vnfc_resources = [rsc for rsc in vnfc_resources
if rsc.compute_resource.
resource_id != VNFC_POD_NOT_FOUND]
if not vnfc_resources:
# If heal is not running, wait is no need
return
# Get kubernetes resource information from target vnfcResourceInfo
k8s_resources = list()
for vnfc_resource in vnfc_resources:
info = {}
compute_resource = vnfc_resource.compute_resource
info['kind'] = compute_resource.vim_level_resource_type
rsc_metadata = jsonutils.loads(
vnfc_resource.metadata.get(info['kind']))
info['name'] = rsc_metadata.get('name')
info['namespace'] = rsc_metadata.get('namespace')
k8s_resources.append(info)
# exclude duplicate entries
k8s_resources = list(map(jsonutils.loads,
set(map(jsonutils.dumps, k8s_resources))))
# get replicas of scalable resources for checking number of pod
scalable_kinds = ["Deployment", "ReplicaSet", "StatefulSet"]
for k8s_resource in k8s_resources:
if k8s_resource.get('kind') in scalable_kinds:
scale_info = self._call_read_scale_api(
app_v1_api_client=app_v1_api_client,
namespace=k8s_resource.get('namespace'),
name=k8s_resource.get('name'),
kind=k8s_resource.get('kind'))
k8s_resource['replicas'] = scale_info.spec.replicas
stack_retries = self.STACK_RETRIES
status = 'Pending'
while status == 'Pending' and stack_retries > 0:
pods_information = []
pod_list_dict = {}
is_unmatch_pods_num = False
# Get related pod information and check status
for k8s_resource in k8s_resources:
namespace = k8s_resource.get('namespace')
if namespace in pod_list_dict.keys():
pod_list = pod_list_dict.get(namespace)
else:
pod_list = core_v1_api_client.list_namespaced_pod(
namespace=k8s_resource.get('namespace'))
pod_list_dict[namespace] = pod_list
tmp_pods_info = list()
for pod in pod_list.items:
match_result = self._is_match_pod_naming_rule(
k8s_resource.get('kind'),
k8s_resource.get('name'),
pod.metadata.name)
if match_result:
tmp_pods_info.append(pod)
# NOTE(ueha): The status of pod being deleted is retrieved
# as "Running", which cause incorrect information to be
# stored in vnfcResouceInfo. Therefore, for the scalable
# kinds, by comparing the actual number of pods with the
# replicas, it can wait until the pod deletion is complete
# and store correct information to vnfcResourceInfo.
if k8s_resource.get('kind') in scalable_kinds and \
k8s_resource.get('replicas') != len(tmp_pods_info):
LOG.warning("Unmatch number of pod. (kind: %(kind)s,"
" name: %(name)s, replicas: %(replicas)s,"
" actual_pod_num: %(actual_pod_num)s)", {
'kind': k8s_resource.get('kind'),
'name': k8s_resource.get('name'),
'replicas': str(k8s_resource.get('replicas')),
'actual_pod_num': str(len(tmp_pods_info))})
is_unmatch_pods_num = True
pods_information.extend(tmp_pods_info)
status = self._get_pod_status(pods_information)
if status == 'Unknown':
error_reason = _("Pod status is found Unknown")
LOG.warning("CNF Healing failed: %(reason)s",
{'reason': error_reason})
raise vnfm.CNFHealWaitFailed(reason=error_reason)
elif status == 'Pending' or is_unmatch_pods_num:
time.sleep(self.STACK_RETRY_WAIT)
stack_retries = stack_retries - 1
status = 'Pending'
if stack_retries == 0 and status != 'Running':
error_reason = _("Resource healing is not completed within"
" {wait} seconds").format(
wait=(self.STACK_RETRIES *
self.STACK_RETRY_WAIT))
LOG.error("CNF Healing failed: %(reason)s",
{'reason': error_reason})
raise vnfm.CNFHealWaitFailed(reason=error_reason)
except Exception as e:
LOG.error('Healing wait CNF got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
def post_heal_vnf(self, context, vnf_instance, vim_connection_info,
heal_vnf_request):
raise NotImplementedError()
"""Update VnfcResourceInfo after healing"""
# initialize Kubernetes APIs
auth_attr = vim_connection_info.access_info
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
inst_vnf_info = vnf_instance.instantiated_vnf_info
try:
core_v1_api_client = self.kubernetes.get_core_v1_api_client(
auth=auth_cred)
vnfc_resources = self._get_vnfc_rscs_with_vnfc_id(
inst_vnf_info=inst_vnf_info,
heal_vnf_request=heal_vnf_request
)
# initialize
updated_vdu_ids = []
pod_list_dict = {}
for vnfc_resource in vnfc_resources:
vdu_id = vnfc_resource.vdu_id
if vdu_id in updated_vdu_ids:
# For updated vdu_id, go to the next Loop
continue
compute_resource = vnfc_resource.compute_resource
rsc_kind = compute_resource.vim_level_resource_type
pod_name = compute_resource.resource_id
if rsc_kind == 'Pod' or rsc_kind == 'StatefulSet':
# No update required as the pod name does not change
continue
# Update vnfcResourceInfo when other rsc_kind
# (Deployment, DaemonSet, ReplicaSet)
actual_pod_names, added_pod_names = self._get_added_pod_names(
core_v1_api_client, inst_vnf_info, vdu_id, vnfc_resource,
pod_list_dict)
updated_vnfc_ids = []
# Update entries that pod was not found when heal_vnf method
if added_pod_names:
for vnfc_rsc in vnfc_resources:
rsc_id = vnfc_rsc.compute_resource.resource_id
if vdu_id == vnfc_rsc.vdu_id and \
rsc_id == VNFC_POD_NOT_FOUND:
pod_name = added_pod_names.pop()
vnfc_rsc.compute_resource.resource_id = pod_name
LOG.warning("Update resource_id of the"
" entry where the pod was not found,"
" vnfc_resource_info.id:%(vnfc_id)s,"
" new podname:%(pod_name)s",
{'vnfc_id': vnfc_rsc.id,
'pod_name': pod_name})
updated_vnfc_ids.append(vnfc_rsc.id)
if not added_pod_names:
break
# Update entries that was healed successful
if added_pod_names:
for vnfc_rsc_id in heal_vnf_request.vnfc_instance_id:
if vnfc_rsc_id in updated_vnfc_ids:
# If the entry has already been updated,
# go to the next loop
continue
for vnfc_rsc in vnfc_resources:
if vdu_id == vnfc_rsc.vdu_id and \
vnfc_rsc_id == vnfc_rsc.id:
pod_name = added_pod_names.pop()
compute_resource = vnfc_rsc.compute_resource
compute_resource.resource_id = pod_name
if not added_pod_names:
break
updated_vdu_ids.append(vdu_id)
except Exception as e:
LOG.error('Post healing CNF got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
def get_scale_ids(self,
plugin,
@ -1568,7 +2056,90 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
def scale_resource_update(self, context, vnf_instance,
scale_vnf_request,
vim_connection_info):
pass
"""Update VnfcResourceInfo after scaling"""
auth_attr = vim_connection_info.access_info
auth_cred, file_descriptor = self._get_auth_creds(auth_attr)
inst_vnf_info = vnf_instance.instantiated_vnf_info
try:
# initialize Kubernetes APIs
core_v1_api_client = self.kubernetes.get_core_v1_api_client(
auth=auth_cred)
vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id(
context, vnf_instance.id)
# get scale target informations
vnfd_dict = vnflcm_utils._get_vnfd_dict(context,
vnf_instance.vnfd_id,
inst_vnf_info.flavour_id)
tosca = tosca_template.ToscaTemplate(parsed_params={},
a_file=False,
yaml_dict_tpl=vnfd_dict)
extract_policy_infos = vnflcm_utils.get_extract_policy_infos(tosca)
vdu_defs = vnflcm_utils.get_target_vdu_def_dict(
extract_policy_infos=extract_policy_infos,
aspect_id=scale_vnf_request.aspect_id,
tosca=tosca)
is_found = False
for vnf_resource in vnf_resources:
# For CNF operations, Kubernetes resource information is
# stored in vnfc_resource as follows:
# - resource_name : "namespace,name"
# - resource_type : "api_version,kind"
rsc_name = vnf_resource.resource_name.split(',')[1]
for vdu_id, vdu_def in vdu_defs.items():
vdu_properties = vdu_def.get('properties')
if rsc_name == vdu_properties.get('name'):
is_found = True
namespace = vnf_resource.resource_name.split(',')[0]
rsc_kind = vnf_resource.resource_type.split(',')[1]
target_vdu_id = vdu_id
break
if is_found:
break
# extract stored Pod names by vdu_id
stored_pod_list = []
metadata = None
for vnfc_resource in inst_vnf_info.vnfc_resource_info:
if vnfc_resource.vdu_id == target_vdu_id:
stored_pod_list.append(
vnfc_resource.compute_resource.resource_id)
if not metadata:
# get metadata for new VnfcResourceInfo entry
metadata = vnfc_resource.metadata
# get actual Pod name list
pod_list = core_v1_api_client.list_namespaced_pod(
namespace=namespace)
actual_pod_list = []
for pod in pod_list.items:
match_result = self._is_match_pod_naming_rule(
rsc_kind, rsc_name, pod.metadata.name)
if match_result:
actual_pod_list.append(pod.metadata.name)
# Remove the reduced pods from VnfcResourceInfo
del_index = []
for index, vnfc in enumerate(inst_vnf_info.vnfc_resource_info):
if vnfc.compute_resource.resource_id not in actual_pod_list \
and vnfc.vdu_id == target_vdu_id:
del_index.append(index)
for ind in reversed(del_index):
inst_vnf_info.vnfc_resource_info.pop(ind)
# Add the increased pods to VnfcResourceInfo
for actual_pod_name in actual_pod_list:
if actual_pod_name not in stored_pod_list:
add_vnfc_resource = objects.VnfcResourceInfo()
add_vnfc_resource.id = uuidutils.generate_uuid()
add_vnfc_resource.vdu_id = target_vdu_id
resource = objects.ResourceHandle()
resource.resource_id = actual_pod_name
resource.vim_level_resource_type = rsc_kind
add_vnfc_resource.compute_resource = resource
add_vnfc_resource.metadata = metadata
inst_vnf_info.vnfc_resource_info.append(
add_vnfc_resource)
except Exception as e:
LOG.error('Update vnfc resource info got an error due to %s', e)
raise
finally:
self.clean_authenticate_vim(auth_cred, file_descriptor)
def scale_in_reverse(self,
context,

View File

@ -91,7 +91,7 @@ class VnfNoop(abstract_driver.VnfAbstractDriver):
pass
def post_vnf_instantiation(self, context, vnf_instance,
vim_connection_info):
vim_connection_info, instantiate_vnf_req):
pass
def heal_vnf(self, context, vnf_instance, vim_connection_info,

View File

@ -907,7 +907,7 @@ class OpenStack(abstract_driver.VnfAbstractDriver,
@log.log
def post_vnf_instantiation(self, context, vnf_instance,
vim_connection_info):
vim_connection_info, instantiate_vnf_req):
inst_vnf_info = vnf_instance.instantiated_vnf_info
access_info = vim_connection_info.access_info
@ -1221,7 +1221,8 @@ class OpenStack(abstract_driver.VnfAbstractDriver,
heatclient.update(stack_id=inst_vnf_info.instance_id, existing=True)
@log.log
def heal_vnf_wait(self, context, vnf_instance, vim_connection_info):
def heal_vnf_wait(self, context, vnf_instance, vim_connection_info,
heal_vnf_request):
"""Check vnf is healed successfully"""
access_info = vim_connection_info.access_info