Browse Source
Implements new interface for Kubernetes VIM to handle Helm chart. It enables Users to include Helm chart files as MCIOP in their VNF Packages, to instantiate and to terminate CNF with them. And update sample of MgmtDriver to install and configure Helm package for using Helm cli command in the deployed Kubernetes cluster VNF, and to restore the registered helm repositories and charts after the master node is healed. Implements: blueprint helmchart-k8s-vim Change-Id: I8511b103841d5aba7edcf9ec5bb974bfa3a74bb2changes/92/805392/23
25 changed files with 2073 additions and 86 deletions
@ -0,0 +1,10 @@
|
||||
--- |
||||
features: |
||||
- | |
||||
Add new interface for Kubernetes VIM to handle Helm chart. It enables Users |
||||
to include Helm chart files as MCIOP in their VNF Packages, to instantiate |
||||
and to terminate CNF with them. |
||||
And update sample of MgmtDriver to install and configure Helm package for |
||||
using Helm cli command in the deployed Kubernetes cluster VNF, and to |
||||
restore the registered helm repositories and charts after the master node is |
||||
healed. |
@ -0,0 +1,49 @@
|
||||
#!/bin/bash |
||||
set -o xtrace |
||||
|
||||
############################################################################### |
||||
# |
||||
# This script will install and setting Helm for Tacker. |
||||
# |
||||
############################################################################### |
||||
|
||||
declare -g HELM_VERSION="3.5.4" |
||||
declare -g HELM_CHART_DIR="/var/tacker/helm" |
||||
|
||||
# Install Helm |
||||
#------------- |
||||
function install_helm { |
||||
wget -P /tmp https://get.helm.sh/helm-v$HELM_VERSION-linux-amd64.tar.gz |
||||
tar zxf /tmp/helm-v$HELM_VERSION-linux-amd64.tar.gz -C /tmp |
||||
sudo mv /tmp/linux-amd64/helm /usr/local/bin/helm |
||||
} |
||||
|
||||
# Install sshpass |
||||
#---------------- |
||||
function install_sshpass { |
||||
sudo apt-get install -y sshpass |
||||
} |
||||
|
||||
# Create helm chart directory |
||||
#---------------------------- |
||||
function create_helm_chart_dir { |
||||
sudo mkdir -p $HELM_CHART_DIR |
||||
} |
||||
|
||||
# Set proxy to environment |
||||
#------------------------- |
||||
function set_env_proxy { |
||||
cat <<EOF | sudo tee -a /etc/environment >/dev/null |
||||
http_proxy=${http_proxy//%40/@} |
||||
https_proxy=${https_proxy//%40/@} |
||||
no_proxy=$no_proxy |
||||
EOF |
||||
} |
||||
|
||||
# Main |
||||
# ____ |
||||
install_helm |
||||
install_sshpass |
||||
create_helm_chart_dir |
||||
set_env_proxy |
||||
exit 0 |
@ -0,0 +1,151 @@
|
||||
tosca_definitions_version: tosca_simple_yaml_1_2 |
||||
|
||||
description: Sample CNF with helmchart |
||||
|
||||
imports: |
||||
- etsi_nfv_sol001_common_types.yaml |
||||
- etsi_nfv_sol001_vnfd_types.yaml |
||||
- sample_vnfd_types.yaml |
||||
|
||||
topology_template: |
||||
inputs: |
||||
descriptor_id: |
||||
type: string |
||||
descriptor_version: |
||||
type: string |
||||
provider: |
||||
type: string |
||||
product_name: |
||||
type: string |
||||
software_version: |
||||
type: string |
||||
vnfm_info: |
||||
type: list |
||||
entry_schema: |
||||
type: string |
||||
flavour_id: |
||||
type: string |
||||
flavour_description: |
||||
type: string |
||||
|
||||
substitution_mappings: |
||||
node_type: company.provider.VNF |
||||
properties: |
||||
flavour_id: helmchart |
||||
requirements: |
||||
virtual_link_external: [] |
||||
|
||||
node_templates: |
||||
VNF: |
||||
type: company.provider.VNF |
||||
properties: |
||||
flavour_description: A flavour for single resources |
||||
|
||||
VDU1: |
||||
type: tosca.nodes.nfv.Vdu.Compute |
||||
properties: |
||||
name: vdu1-localhelm |
||||
description: kubernetes resource as VDU1 |
||||
vdu_profile: |
||||
min_number_of_instances: 1 |
||||
max_number_of_instances: 3 |
||||
|
||||
VDU2: |
||||
type: tosca.nodes.nfv.Vdu.Compute |
||||
properties: |
||||
name: vdu2-apache |
||||
description: kubernetes resource as VDU2 |
||||
vdu_profile: |
||||
min_number_of_instances: 1 |
||||
max_number_of_instances: 3 |
||||
|
||||
policies: |
||||
- scaling_aspects: |
||||
type: tosca.policies.nfv.ScalingAspects |
||||
properties: |
||||
aspects: |
||||
vdu1_aspect: |
||||
name: vdu1_aspect |
||||
description: vdu1 scaling aspect |
||||
max_scale_level: 2 |
||||
step_deltas: |
||||
- delta_1 |
||||
vdu2_aspect: |
||||
name: vdu2_aspect |
||||
description: vdu2 scaling aspect |
||||
max_scale_level: 2 |
||||
step_deltas: |
||||
- delta_1 |
||||
|
||||
- vdu1_initial_delta: |
||||
type: tosca.policies.nfv.VduInitialDelta |
||||
properties: |
||||
initial_delta: |
||||
number_of_instances: 1 |
||||
targets: [ VDU1 ] |
||||
|
||||
- vdu1_scaling_aspect_deltas: |
||||
type: tosca.policies.nfv.VduScalingAspectDeltas |
||||
properties: |
||||
aspect: vdu1_aspect |
||||
deltas: |
||||
delta_1: |
||||
number_of_instances: 1 |
||||
targets: [ VDU1 ] |
||||
|
||||
- vdu2_initial_delta: |
||||
type: tosca.policies.nfv.VduInitialDelta |
||||
properties: |
||||
initial_delta: |
||||
number_of_instances: 1 |
||||
targets: [ VDU2 ] |
||||
|
||||
- vdu2_scaling_aspect_deltas: |
||||
type: tosca.policies.nfv.VduScalingAspectDeltas |
||||
properties: |
||||
aspect: vdu2_aspect |
||||
deltas: |
||||
delta_1: |
||||
number_of_instances: 1 |
||||
targets: [ VDU2 ] |
||||
|
||||
- instantiation_levels: |
||||
type: tosca.policies.nfv.InstantiationLevels |
||||
properties: |
||||
levels: |
||||
instantiation_level_1: |
||||
description: Smallest size |
||||
scale_info: |
||||
vdu1_aspect: |
||||
scale_level: 0 |
||||
vdu2_aspect: |
||||
scale_level: 0 |
||||
instantiation_level_2: |
||||
description: Largest size |
||||
scale_info: |
||||
vdu1_aspect: |
||||
scale_level: 2 |
||||
vdu2_aspect: |
||||
scale_level: 2 |
||||
default_level: instantiation_level_1 |
||||
|
||||
- vdu1_instantiation_levels: |
||||
type: tosca.policies.nfv.VduInstantiationLevels |
||||
properties: |
||||
levels: |
||||
instantiation_level_1: |
||||
number_of_instances: 1 |
||||
instantiation_level_2: |
||||
number_of_instances: 3 |
||||
targets: [ VDU1 ] |
||||
|
||||
- vdu2_instantiation_levels: |
||||
type: tosca.policies.nfv.VduInstantiationLevels |
||||
properties: |
||||
levels: |
||||
instantiation_level_1: |
||||
number_of_instances: 1 |
||||
instantiation_level_2: |
||||
number_of_instances: 3 |
||||
targets: [ VDU1 ] |
||||
|
@ -0,0 +1,31 @@
|
||||
tosca_definitions_version: tosca_simple_yaml_1_2 |
||||
|
||||
description: Sample CNF with Helm |
||||
|
||||
imports: |
||||
- etsi_nfv_sol001_common_types.yaml |
||||
- etsi_nfv_sol001_vnfd_types.yaml |
||||
- sample_vnfd_types.yaml |
||||
- sample_vnfd_df_helmchart.yaml |
||||
|
||||
topology_template: |
||||
inputs: |
||||
selected_flavour: |
||||
type: string |
||||
description: VNF deployment flavour selected by the consumer. It is provided in the API |
||||
|
||||
node_templates: |
||||
VNF: |
||||
type: company.provider.VNF |
||||
properties: |
||||
flavour_id: { get_input: selected_flavour } |
||||
descriptor_id: b1bb0ce7-ebca-4fa7-95ed-4840d70a1177 |
||||
provider: Company |
||||
product_name: Sample CNF |
||||
software_version: '1.0' |
||||
descriptor_version: '1.0' |
||||
vnfm_info: |
||||
- Tacker |
||||
requirements: |
||||
#- virtual_link_external # mapped in lower-level templates |
||||
#- virtual_link_internal # mapped in lower-level templates |
@ -0,0 +1,53 @@
|
||||
tosca_definitions_version: tosca_simple_yaml_1_2 |
||||
|
||||
description: VNF type definition |
||||
|
||||
imports: |
||||
- etsi_nfv_sol001_common_types.yaml |
||||
- etsi_nfv_sol001_vnfd_types.yaml |
||||
|
||||
node_types: |
||||
company.provider.VNF: |
||||
derived_from: tosca.nodes.nfv.VNF |
||||
properties: |
||||
descriptor_id: |
||||
type: string |
||||
constraints: [ valid_values: [ b1bb0ce7-ebca-4fa7-95ed-4840d70a1177 ] ] |
||||
default: b1bb0ce7-ebca-4fa7-95ed-4840d70a1177 |
||||
descriptor_version: |
||||
type: string |
||||
constraints: [ valid_values: [ '1.0' ] ] |
||||
default: '1.0' |
||||
provider: |
||||
type: string |
||||
constraints: [ valid_values: [ 'Company' ] ] |
||||
default: 'Company' |
||||
product_name: |
||||
type: string |
||||
constraints: [ valid_values: [ 'Sample CNF' ] ] |
||||
default: 'Sample CNF' |
||||
software_version: |
||||
type: string |
||||
constraints: [ valid_values: [ '1.0' ] ] |
||||
default: '1.0' |
||||
vnfm_info: |
||||
type: list |
||||
entry_schema: |
||||
type: string |
||||
constraints: [ valid_values: [ Tacker ] ] |
||||
default: [ Tacker ] |
||||
flavour_id: |
||||
type: string |
||||
constraints: [ valid_values: [ helmchart ] ] |
||||
default: helmchart |
||||
flavour_description: |
||||
type: string |
||||
default: "" |
||||
requirements: |
||||
- virtual_link_external: |
||||
capability: tosca.capabilities.nfv.VirtualLinkable |
||||
- virtual_link_internal: |
||||
capability: tosca.capabilities.nfv.VirtualLinkable |
||||
interfaces: |
||||
Vnflcm: |
||||
type: tosca.interfaces.nfv.Vnflcm |
Binary file not shown.
@ -0,0 +1,9 @@
|
||||
TOSCA-Meta-File-Version: 1.0 |
||||
Created-by: dummy_user |
||||
CSAR-Version: 1.1 |
||||
Entry-Definitions: Definitions/sample_vnfd_top.yaml |
||||
|
||||
Name: Files/kubernetes/localhelm-0.1.0.tgz |
||||
Content-Type: application/tar+gzip |
||||
Algorithm: SHA-256 |
||||
Hash: 837fcfb73e5fc58572851a80a0143373d9d28ec37bd3bdf52c4d7d34b97592d5 |
@ -0,0 +1,447 @@
|
||||
# All Rights Reserved. |
||||
# |
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may |
||||
# not use this file except in compliance with the License. You may obtain |
||||
# a copy of the License at |
||||
# |
||||
# http://www.apache.org/licenses/LICENSE-2.0 |
||||
# |
||||
# Unless required by applicable law or agreed to in writing, software |
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT |
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the |
||||
# License for the specific language governing permissions and limitations |
||||
# under the License. |
||||
|
||||
import os |
||||
import time |
||||
|
||||
from oslo_serialization import jsonutils |
||||
from oslo_utils import uuidutils |
||||
from sqlalchemy import desc |
||||
from sqlalchemy.orm import joinedload |
||||
|
||||
from tacker.common import exceptions |
||||
from tacker import context |
||||
from tacker.db import api as db_api |
||||
from tacker.db.db_sqlalchemy import api |
||||
from tacker.db.db_sqlalchemy import models |
||||
from tacker.objects import fields |
||||
from tacker.objects import vnf_lcm_op_occs |
||||
from tacker.tests.functional import base |
||||
from tacker.tests import utils |
||||
|
||||
VNF_PACKAGE_UPLOAD_TIMEOUT = 300 |
||||
VNF_INSTANTIATE_TIMEOUT = 600 |
||||
VNF_TERMINATE_TIMEOUT = 600 |
||||
VNF_HEAL_TIMEOUT = 600 |
||||
VNF_SCALE_TIMEOUT = 600 |
||||
RETRY_WAIT_TIME = 5 |
||||
|
||||
|
||||
def _create_and_upload_vnf_package(tacker_client, csar_package_name, |
||||
user_defined_data): |
||||
# create vnf package |
||||
body = jsonutils.dumps({"userDefinedData": user_defined_data}) |
||||
resp, vnf_package = tacker_client.do_request( |
||||
'/vnfpkgm/v1/vnf_packages', "POST", body=body) |
||||
|
||||
# upload vnf package |
||||
csar_package_path = "../../../etc/samples/etsi/nfv/{}".format( |
||||
csar_package_name) |
||||
file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), |
||||
csar_package_path)) |
||||
|
||||
# Generating unique vnfd id. This is required when multiple workers |
||||
# are running concurrently. The call below creates a new temporary |
||||
# CSAR with unique vnfd id. |
||||
file_path, uniqueid = utils.create_csar_with_unique_vnfd_id(file_path) |
||||
|
||||
with open(file_path, 'rb') as file_object: |
||||
resp, resp_body = tacker_client.do_request( |
||||
'/vnfpkgm/v1/vnf_packages/{}/package_content'.format( |
||||
vnf_package['id']), |
||||
"PUT", body=file_object, content_type='application/zip') |
||||
|
||||
# wait for onboard |
||||
start_time = int(time.time()) |
||||
show_url = os.path.join('/vnfpkgm/v1/vnf_packages', vnf_package['id']) |
||||
vnfd_id = None |
||||
while True: |
||||
resp, body = tacker_client.do_request(show_url, "GET") |
||||
if body['onboardingState'] == "ONBOARDED": |
||||
vnfd_id = body['vnfdId'] |
||||
break |
||||
|
||||
if ((int(time.time()) - start_time) > VNF_PACKAGE_UPLOAD_TIMEOUT): |
||||
raise Exception("Failed to onboard vnf package, process could not" |
||||
" be completed within {} seconds".format( |
||||
VNF_PACKAGE_UPLOAD_TIMEOUT)) |
||||
|
||||
time.sleep(RETRY_WAIT_TIME) |
||||
|
||||
# remove temporarily created CSAR file |
||||
os.remove(file_path) |
||||
return vnf_package['id'], vnfd_id |
||||
|
||||
|
||||
class VnfLcmKubernetesHelmTest(base.BaseTackerTest): |
||||
|
||||
@classmethod |
||||
def setUpClass(cls): |
||||
cls.tacker_client = base.BaseTackerTest.tacker_http_client() |
||||
cls.vnf_package_resource, cls.vnfd_id_resource = \ |
||||
_create_and_upload_vnf_package( |
||||
cls.tacker_client, "test_cnf_helmchart", |
||||
{"key": "sample_helmchart_functional"}) |
||||
cls.vnf_instance_ids = [] |
||||
super(VnfLcmKubernetesHelmTest, cls).setUpClass() |
||||
|
||||
@classmethod |
||||
def tearDownClass(cls): |
||||
# Update vnf package operational state to DISABLED |
||||
update_req_body = jsonutils.dumps({ |
||||
"operationalState": "DISABLED"}) |
||||
base_path = "/vnfpkgm/v1/vnf_packages" |
||||
for package_id in [cls.vnf_package_resource]: |
||||
resp, resp_body = cls.tacker_client.do_request( |
||||
'{base_path}/{id}'.format(id=package_id, |
||||
base_path=base_path), |
||||
"PATCH", content_type='application/json', |
||||
body=update_req_body) |
||||
|
||||
# Delete vnf package |
||||
url = '/vnfpkgm/v1/vnf_packages/{}'.format(package_id) |
||||
cls.tacker_client.do_request(url, "DELETE") |
||||
|
||||
super(VnfLcmKubernetesHelmTest, cls).tearDownClass() |
||||
|
||||
def setUp(self): |
||||
super(VnfLcmKubernetesHelmTest, self).setUp() |
||||
self.base_vnf_instances_url = "/vnflcm/v1/vnf_instances" |
||||
self.base_vnf_lcm_op_occs_url = "/vnflcm/v1/vnf_lcm_op_occs" |
||||
self.context = context.get_admin_context() |
||||
vim_list = self.client.list_vims() |
||||
if not vim_list: |
||||
self.skipTest("Vims are not configured") |
||||
|
||||
vim_id = 'vim-kubernetes' |
||||
vim = self.get_vim(vim_list, vim_id) |
||||
if not vim: |
||||
self.skipTest("Kubernetes VIM '{}' is missing".format(vim_id)) |
||||
self.vim_id = vim['id'] |
||||
|
||||
def _instantiate_vnf_instance_request( |
||||
self, flavour_id, vim_id=None, additional_param=None): |
||||
request_body = {"flavourId": flavour_id} |
||||
|
||||
if vim_id: |
||||
request_body["vimConnectionInfo"] = [ |
||||
{"id": uuidutils.generate_uuid(), |
||||
"vimId": vim_id, |
||||
"vimType": "kubernetes"}] |
||||
|
||||
if additional_param: |
||||
request_body["additionalParams"] = additional_param |
||||
|
||||
return request_body |
||||
|
||||
def _create_vnf_instance(self, vnfd_id, vnf_instance_name=None, |
||||
vnf_instance_description=None): |
||||
request_body = {'vnfdId': vnfd_id} |
||||
if vnf_instance_name: |
||||
request_body['vnfInstanceName'] = vnf_instance_name |
||||
|
||||
if vnf_instance_description: |
||||
request_body['vnfInstanceDescription'] = vnf_instance_description |
||||
|
||||
resp, response_body = self.http_client.do_request( |
||||
self.base_vnf_instances_url, "POST", |
||||
body=jsonutils.dumps(request_body)) |
||||
return resp, response_body |
||||
|
||||
def _delete_wait_vnf_instance(self, id): |
||||
url = os.path.join("/vnflcm/v1/vnf_instances", id) |
||||
start_time = int(time.time()) |
||||
while True: |
||||
resp, body = self.tacker_client.do_request(url, "DELETE") |
||||
if 204 == resp.status_code: |
||||
break |
||||
|
||||
if ((int(time.time()) - start_time) > VNF_TERMINATE_TIMEOUT): |
||||
raise Exception("Failed to delete vnf instance, process could" |
||||
" not be completed within {} seconds".format( |
||||
VNF_TERMINATE_TIMEOUT)) |
||||
|
||||
time.sleep(RETRY_WAIT_TIME) |
||||
|
||||
def _show_vnf_instance(self, id): |
||||
show_url = os.path.join("/vnflcm/v1/vnf_instances", id) |
||||
resp, vnf_instance = self.tacker_client.do_request(show_url, "GET") |
||||
|
||||
return vnf_instance |
||||
|
||||
def _vnf_instance_wait( |
||||
self, id, |
||||
instantiation_state=fields.VnfInstanceState.INSTANTIATED, |
||||
timeout=VNF_INSTANTIATE_TIMEOUT): |
||||
show_url = os.path.join("/vnflcm/v1/vnf_instances", id) |
||||
start_time = int(time.time()) |
||||
while True: |
||||
resp, body = self.tacker_client.do_request(show_url, "GET") |
||||
if body['instantiationState'] == instantiation_state: |
||||
break |
||||
|
||||
if ((int(time.time()) - start_time) > timeout): |
||||
raise Exception("Failed to wait vnf instance, process could" |
||||
" not be completed within {} seconds".format(timeout)) |
||||
|
||||
time.sleep(RETRY_WAIT_TIME) |
||||
|
||||
def _instantiate_vnf_instance(self, id, request_body): |
||||
url = os.path.join(self.base_vnf_instances_url, id, "instantiate") |
||||
resp, body = self.http_client.do_request( |
||||
url, "POST", body=jsonutils.dumps(request_body)) |
||||
self.assertEqual(202, resp.status_code) |
||||
self._vnf_instance_wait(id) |
||||
|
||||
def _create_and_instantiate_vnf_instance(self, flavour_id, |
||||
additional_params): |
||||
# create vnf instance |
||||
vnf_instance_name = "test_vnf_instance_for_cnf_heal-{}".format( |
||||
uuidutils.generate_uuid()) |
||||
vnf_instance_description = "vnf instance for cnf heal testing" |
||||
resp, vnf_instance = self._create_vnf_instance( |
||||
self.vnfd_id_resource, vnf_instance_name=vnf_instance_name, |
||||
vnf_instance_description=vnf_instance_description) |
||||
|
||||
# instantiate vnf instance |
||||
additional_param = additional_params |
||||
request_body = self._instantiate_vnf_instance_request( |
||||
flavour_id, vim_id=self.vim_id, additional_param=additional_param) |
||||
|
||||
self._instantiate_vnf_instance(vnf_instance['id'], request_body) |
||||
vnf_instance = self._show_vnf_instance(vnf_instance['id']) |
||||
self.vnf_instance_ids.append(vnf_instance['id']) |
||||
|
||||
return vnf_instance |
||||
|
||||
def _terminate_vnf_instance(self, id): |
||||
# Terminate vnf forcefully |
||||
request_body = { |
||||
"terminationType": fields.VnfInstanceTerminationType.FORCEFUL, |
||||
} |
||||
url = os.path.join(self.base_vnf_instances_url, id, "terminate") |
||||
resp, body = self.http_client.do_request( |
||||
url, "POST", body=jsonutils.dumps(request_body)) |
||||
self.assertEqual(202, resp.status_code) |
||||
self._vnf_instance_wait( |
||||
id, |
||||
instantiation_state=fields.VnfInstanceState.NOT_INSTANTIATED, |
||||
timeout=VNF_TERMINATE_TIMEOUT) |
||||
|
||||
def _delete_vnf_instance(self, id): |
||||
self._delete_wait_vnf_instance(id) |
||||
|
||||
# verify vnf instance is deleted |
||||
url = os.path.join(self.base_vnf_instances_url, id) |
||||
resp, body = self.http_client.do_request(url, "GET") |
||||
self.assertEqual(404, resp.status_code) |
||||
|
||||
def _scale_vnf_instance(self, id, type, aspect_id, |
||||
number_of_steps=1): |
||||
url = os.path.join(self.base_vnf_instances_url, id, "scale") |
||||
# generate body |
||||
request_body = { |
||||
"type": type, |
||||
"aspectId": aspect_id, |
||||
"numberOfSteps": number_of_steps} |
||||
resp, body = self.http_client.do_request( |
||||
url, "POST", body=jsonutils.dumps(request_body)) |
||||
self.assertEqual(202, resp.status_code) |
||||
|
||||
def _heal_vnf_instance(self, id, vnfc_instance_id): |
||||
url = os.path.join(self.base_vnf_instances_url, id, "heal") |
||||
# generate body |
||||
request_body = { |
||||
"vnfcInstanceId": vnfc_instance_id} |
||||
resp, body = self.http_client.do_request( |
||||
url, "POST", body=jsonutils.dumps(request_body)) |
||||
self.assertEqual(202, resp.status_code) |
||||
|
||||
@db_api.context_manager.reader |
||||
def _vnf_notify_get_by_id(self, context, vnf_instance_id, |
||||
columns_to_join=None): |
||||
query = api.model_query( |
||||
context, models.VnfLcmOpOccs, |
||||
read_deleted="no", project_only=True).filter_by( |
||||
vnf_instance_id=vnf_instance_id).order_by( |
||||
desc("created_at")) |
||||
|
||||
if columns_to_join: |
||||
for column in columns_to_join: |
||||
query = query.options(joinedload(column)) |
||||
|
||||
db_vnflcm_op_occ = query.first() |
||||
|
||||
if not db_vnflcm_op_occ: |
||||
raise exceptions.VnfInstanceNotFound(id=vnf_instance_id) |
||||
|
||||
vnflcm_op_occ = vnf_lcm_op_occs.VnfLcmOpOcc.obj_from_db_obj( |
||||
context, db_vnflcm_op_occ) |
||||
return vnflcm_op_occ |
||||
|
||||
def _wait_vnflcm_op_occs( |
||||
self, context, vnf_instance_id, |
||||
operation_state='COMPLETED'): |
||||
start_time = int(time.time()) |
||||
while True: |
||||
vnflcm_op_occ = self._vnf_notify_get_by_id( |
||||
context, vnf_instance_id) |
||||
|
||||
if vnflcm_op_occ.operation_state == operation_state: |
||||
break |
||||
|
||||
if ((int(time.time()) - start_time) > VNF_HEAL_TIMEOUT): |
||||
raise Exception("Failed to wait heal instance") |
||||
|
||||
time.sleep(RETRY_WAIT_TIME) |
||||
|
||||
def _get_vnfc_resource_info(self, vnf_instance): |
||||
inst_vnf_info = vnf_instance['instantiatedVnfInfo'] |
||||
vnfc_resource_info = inst_vnf_info['vnfcResourceInfo'] |
||||
return vnfc_resource_info |
||||
|
||||
def _test_scale_cnf(self, vnf_instance): |
||||
"""Test scale in/out CNF""" |
||||
def _test_scale(id, type, aspect_id, previous_level, |
||||
delta_num=1, number_of_steps=1): |
||||
# scale operation |
||||
self._scale_vnf_instance(id, type, aspect_id, number_of_steps) |
||||
# wait vnflcm_op_occs.operation_state become COMPLETE |
||||
self._wait_vnflcm_op_occs(self.context, id) |
||||
# check scaleStatus after scale operation |
||||
vnf_instance = self._show_vnf_instance(id) |
||||
scale_status_after = \ |
||||
vnf_instance['instantiatedVnfInfo']['scaleStatus'] |
||||
if type == 'SCALE_OUT': |
||||
expected_level = previous_level + number_of_steps |
||||
else: |
||||
expected_level = previous_level - number_of_steps |
||||
for status in scale_status_after: |
||||
if status.get('aspectId') == aspect_id: |
||||
self.assertEqual(status.get('scaleLevel'), expected_level) |
||||
previous_level = status.get('scaleLevel') |
||||
|
||||
return previous_level |
||||
|
||||
aspect_id = "vdu1_aspect" |
||||
scale_status_initial = \ |
||||
vnf_instance['instantiatedVnfInfo']['scaleStatus'] |
||||
self.assertTrue(len(scale_status_initial) > 0) |
||||
for status in scale_status_initial: |
||||
self.assertIsNotNone(status.get('aspectId')) |
||||
self.assertIsNotNone(status.get('scaleLevel')) |
||||
if status.get('aspectId') == aspect_id: |
||||
previous_level = status.get('scaleLevel') |
||||
|
||||
# test scale out |
||||
previous_level = _test_scale( |
||||
vnf_instance['id'], 'SCALE_OUT', aspect_id, previous_level) |
||||
|
||||
# test scale in |
||||
previous_level = _test_scale( |
||||
vnf_instance['id'], 'SCALE_IN', aspect_id, previous_level) |
||||
|
||||
def _test_heal_cnf_with_sol002(self, vnf_instance): |
||||
"""Test heal as per SOL002 for CNF""" |
||||
vnf_instance = self._show_vnf_instance(vnf_instance['id']) |
||||
before_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance) |
||||
|
||||
# get vnfc_instance_id of heal target |
||||
before_pod_name = dict() |
||||
vnfc_instance_id = list() |
||||
for vnfc_rsc in before_vnfc_rscs: |
||||
if vnfc_rsc['vduId'] == "vdu1": |
||||
before_pod_name['vdu1'] = \ |
||||
vnfc_rsc['computeResource']['resourceId'] |
||||
elif vnfc_rsc['vduId'] == "vdu2": |
||||
before_pod_name['vdu2'] = \ |
||||
vnfc_rsc['computeResource']['resourceId'] |
||||
vnfc_instance_id.append(vnfc_rsc['id']) |
||||
|
||||
# test heal SOL-002 (partial heal) |
||||
self._heal_vnf_instance(vnf_instance['id'], vnfc_instance_id) |
||||
# wait vnflcm_op_occs.operation_state become COMPLETE |
||||
self._wait_vnflcm_op_occs(self.context, vnf_instance['id']) |
||||
# check vnfcResourceInfo after heal operation |
||||
vnf_instance = self._show_vnf_instance(vnf_instance['id']) |
||||
after_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance) |
||||
self.assertEqual(len(before_vnfc_rscs), len(after_vnfc_rscs)) |
||||
for vnfc_rsc in after_vnfc_rscs: |
||||
after_pod_name = vnfc_rsc['computeResource']['resourceId'] |
||||
if vnfc_rsc['vduId'] == "vdu1": |
||||
# check stored pod name is changed (vdu1) |
||||
compute_resource = vnfc_rsc['computeResource'] |
||||
before_pod_name = compute_resource['resourceId'] |
||||
self.assertNotEqual(after_pod_name, before_pod_name['vdu1']) |
||||
elif vnfc_rsc['vduId'] == "vdu2": |
||||
# check stored pod name is changed (vdu2) |
||||
compute_resource = vnfc_rsc['computeResource'] |
||||
before_pod_name = compute_resource['resourceId'] |
||||
self.assertNotEqual(after_pod_name, before_pod_name['vdu2']) |
||||
|
||||
def _test_heal_cnf_with_sol003(self, vnf_instance): |
||||
"""Test heal as per SOL003 for CNF""" |
||||
vnf_instance = self._show_vnf_instance(vnf_instance['id']) |
||||
before_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance) |
||||
|
||||
# test heal SOL-003 (entire heal) |
||||
vnfc_instance_id = [] |
||||
self._heal_vnf_instance(vnf_instance['id'], vnfc_instance_id) |
||||
# wait vnflcm_op_occs.operation_state become COMPLETE |
||||
self._wait_vnflcm_op_occs(self.context, vnf_instance['id']) |
||||
# check vnfcResourceInfo after heal operation |
||||
vnf_instance = self._show_vnf_instance(vnf_instance['id']) |
||||
after_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance) |
||||
self.assertEqual(len(before_vnfc_rscs), len(after_vnfc_rscs)) |
||||
# check id and pod name (as computeResource.resourceId) is changed |
||||
for before_vnfc_rsc in before_vnfc_rscs: |
||||
for after_vnfc_rsc in after_vnfc_rscs: |
||||
self.assertNotEqual( |
||||
before_vnfc_rsc['id'], after_vnfc_rsc['id']) |
||||
self.assertNotEqual( |
||||
before_vnfc_rsc['computeResource']['resourceId'], |
||||
after_vnfc_rsc['computeResource']['resourceId']) |
||||
|
||||
def test_vnflcm_with_helmchart(self): |
||||
# use def-files of singleton Pod and Deployment (replicas=2) |
||||
helmchartfile_path = "Files/kubernetes/localhelm-0.1.0.tgz" |
||||
inst_additional_param = { |
||||
"namespace": "default", |
||||
"use_helm": "true", |
||||
"using_helm_install_param": [ |
||||
{ |
||||
"exthelmchart": "false", |
||||
"helmchartfile_path": helmchartfile_path, |
||||
"helmreleasename": "vdu1", |
||||
"helmparameter": [ |
||||
"service.port=8081" |
||||
] |
||||
}, |
||||
{ |
||||
"exthelmchart": "true", |
||||
"helmreleasename": "vdu2", |
||||
"helmrepositoryname": "bitnami", |
||||
"helmchartname": "apache", |
||||
"exthelmrepo_url": "https://charts.bitnami.com/bitnami" |
||||
} |
||||
] |
||||
} |
||||
vnf_instance = self._create_and_instantiate_vnf_instance( |
||||
"helmchart", inst_additional_param) |
||||
self._test_scale_cnf(vnf_instance) |
||||
self._test_heal_cnf_with_sol002(vnf_instance) |
||||
self._test_heal_cnf_with_sol003(vnf_instance) |
||||
|
||||
self._terminate_vnf_instance(vnf_instance['id']) |
||||
self._delete_vnf_instance(vnf_instance['id']) |