From 667377b849d40846dd750700d7aa40e2097393cc Mon Sep 17 00:00:00 2001 From: Ayumu Ueha Date: Mon, 29 Mar 2021 10:15:48 +0000 Subject: [PATCH] Fix some bugs related legacy cnf operations This patch fixes some bugs related legacy cnf operations below: * Delete the "unsupported key check" of policies in TOSCA template, to resolve `openstack vnf create` command failure due to property validation of policies. * Change the branch condition of legacy/sol, to resolve the `openstack vnf scale` command failure due to keyError. * Add interval to each retry loops of delete_wait_legacy, to resolve the `openstack vnf delete` command failure. And this patch revises the documentation for legacy cnf operations to work properly. Closes-Bug: #1920085 Change-Id: If14b95b2f9dfafe994a5ebf4f447a2bf7d27981c --- .../user/containerized_vnf_usage_guide.rst | 8 +++++- ...sca-vnfd-containerized-two-containers.yaml | 4 ++- .../vnfd/tosca-vnfd-containerized.yaml | 2 ++ .../vnfm/infra_drivers/kubernetes/fakes.py | 27 ++++++++++--------- .../kubernetes/test_kubernetes_driver.py | 15 ++++++----- .../kubernetes/k8s/translate_inputs.py | 7 +++-- .../kubernetes/kubernetes_driver.py | 13 ++++----- 7 files changed, 47 insertions(+), 29 deletions(-) diff --git a/doc/source/user/containerized_vnf_usage_guide.rst b/doc/source/user/containerized_vnf_usage_guide.rst index 35790d171..954197dc8 100644 --- a/doc/source/user/containerized_vnf_usage_guide.rst +++ b/doc/source/user/containerized_vnf_usage_guide.rst @@ -83,6 +83,8 @@ Kubernetes environment with one container per VDU. min_instances: 1 max_instances: 3 target_cpu_utilization_percentage: 40 + default_instances: 1 # required parameter but ignored for cnf + increment: 1 # required parameter but ignored for cnf In "vnfcs", there are 2 components: front_end and rss_reader. We model them as Containers [#second]_ inside a Pod [#third]_. To provide @@ -158,6 +160,8 @@ will be added in the future. min_instances: 1 max_instances: 3 target_cpu_utilization_percentage: 40 + default_instances: 1 # required parameter but ignored for cnf + increment: 1 # required parameter but ignored for cnf 2. Two containers per VDU example ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -204,6 +208,8 @@ Similar to the above example, in this scenario, we define 2 containers in VDU1. min_instances: 1 max_instances: 3 target_cpu_utilization_percentage: 40 + default_instances: 1 # required parameter but ignored for cnf + increment: 1 # required parameter but ignored for cnf Viewing a containerized VNF ~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -284,7 +290,7 @@ User also can scale VNF manually, by running the following commands: .. code-block:: console - $ openstack vnf scale --vnf-name VNF1 --scaling-policy-name SP1 --scaling-type out + $ openstack vnf scale --scaling-policy-name SP1 --scaling-type out VNF1 $ kubectl get deployment NAME DESIRED CURRENT UP-TO-DATE AVAILABLE AGE diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-containerized-two-containers.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-containerized-two-containers.yaml index 61a7a6aac..dc3c663b8 100644 --- a/samples/tosca-templates/vnfd/tosca-vnfd-containerized-two-containers.yaml +++ b/samples/tosca-templates/vnfd/tosca-vnfd-containerized-two-containers.yaml @@ -34,4 +34,6 @@ topology_template: properties: min_instances: 1 max_instances: 3 - target_cpu_utilization_percentage: 40 \ No newline at end of file + target_cpu_utilization_percentage: 40 + default_instances: 1 # required parameter but ignored for cnf + increment: 1 # required parameter but ignored for cnf diff --git a/samples/tosca-templates/vnfd/tosca-vnfd-containerized.yaml b/samples/tosca-templates/vnfd/tosca-vnfd-containerized.yaml index cc137768d..a98d45635 100644 --- a/samples/tosca-templates/vnfd/tosca-vnfd-containerized.yaml +++ b/samples/tosca-templates/vnfd/tosca-vnfd-containerized.yaml @@ -34,3 +34,5 @@ topology_template: min_instances: 1 max_instances: 10 target_cpu_utilization_percentage: 50 + default_instances: 1 # required parameter but ignored for cnf + increment: 1 # required parameter but ignored for cnf diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/fakes.py b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/fakes.py index 85ca86aa1..9900322a9 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/fakes.py +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/fakes.py @@ -1023,21 +1023,24 @@ def fake_pod_list(): ) -def get_scale_policy(type, aspect_id='vdu1', delta_num=1): +def get_scale_policy(type, aspect_id='vdu1', delta_num=1, is_legacy=False): policy = dict() - policy['vnf_instance_id'] = uuidsentinel.vnf_instance_id policy['action'] = type policy['name'] = aspect_id - policy['delta_num'] = delta_num - policy['vdu_defs'] = { - 'VDU1': { - 'type': 'tosca.nodes.nfv.Vdu.Compute', - 'properties': { - 'name': 'fake_name', - 'description': 'test description', - 'vdu_profile': { - 'min_number_of_instances': 1, - 'max_number_of_instances': 3}}}} + if is_legacy: + policy['instance_id'] = "fake_namespace,fake_name" + else: + policy['vnf_instance_id'] = uuidsentinel.vnf_instance_id + policy['delta_num'] = delta_num + policy['vdu_defs'] = { + 'VDU1': { + 'type': 'tosca.nodes.nfv.Vdu.Compute', + 'properties': { + 'name': 'fake_name', + 'description': 'test description', + 'vdu_profile': { + 'min_number_of_instances': 1, + 'max_number_of_instances': 3}}}} return policy diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_kubernetes_driver.py b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_kubernetes_driver.py index 2a512425a..d055dcc10 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_kubernetes_driver.py +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_kubernetes_driver.py @@ -2352,7 +2352,8 @@ class TestKubernetes(base.TestCase): mock_read_namespaced_deployment, mock_patch_namespaced_deployment_scale, mock_read_namespaced_horizontal_pod_autoscaler): - policy = fakes.get_scale_policy(type=scale_type, aspect_id='SP1') + policy = fakes.get_scale_policy( + type=scale_type, aspect_id='SP1', is_legacy=True) policy['instance_id'] = "fake_namespace,fake_name" mock_vnf_resource_list.return_value = [] mock_read_namespaced_deployment.return_value = \ @@ -2569,8 +2570,8 @@ class TestKubernetes(base.TestCase): @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") def test_scale_wait_legacy(self, mock_vnf_resource_list, mock_list_namespaced_pod): - policy = fakes.get_scale_policy(type='out', aspect_id='SP1') - policy['instance_id'] = "fake_namespace,fake_name" + policy = fakes.get_scale_policy( + type='out', aspect_id='SP1', is_legacy=True) mock_vnf_resource_list.return_value = [] mock_list_namespaced_pod.return_value = \ client.V1PodList(items=[ @@ -2587,8 +2588,8 @@ class TestKubernetes(base.TestCase): @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") def test_scale_wait_legacy_retry_over(self, mock_vnf_resource_list, mock_list_namespaced_pod): - policy = fakes.get_scale_policy(type='out', aspect_id='SP1') - policy['instance_id'] = "fake_namespace,fake_name" + policy = fakes.get_scale_policy( + type='out', aspect_id='SP1', is_legacy=True) mock_vnf_resource_list.return_value = [] mock_list_namespaced_pod.return_value = \ client.V1PodList(items=[ @@ -2603,8 +2604,8 @@ class TestKubernetes(base.TestCase): @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") def test_scale_wait_legacy_status_unknown(self, mock_vnf_resource_list, mock_list_namespaced_pod): - policy = fakes.get_scale_policy(type='out', aspect_id='SP1') - policy['instance_id'] = "fake_namespace,fake_name" + policy = fakes.get_scale_policy( + type='out', aspect_id='SP1', is_legacy=True) mock_vnf_resource_list.return_value = [] mock_list_namespaced_pod.return_value = \ client.V1PodList(items=[ diff --git a/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_inputs.py b/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_inputs.py index fb1d8e909..9ab7791bb 100644 --- a/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_inputs.py +++ b/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_inputs.py @@ -189,8 +189,11 @@ class Parser(object): and vdu_name in policy.targets: count = count + 1 policy_props = policy.properties - self.check_unsupported_key(policy_props, - ALLOWED_SCALING_OBJECT_PROPS) + # NOTE(ueha): check_unsupported_key() is commented out to + # resolve vnf create error due to required + # parameters of policies. + # self.check_unsupported_key(policy_props, + # ALLOWED_SCALING_OBJECT_PROPS) scaling_obj.scaling_name = policy.name scaling_obj.target_cpu_utilization_percentage = \ policy_props.get( diff --git a/tacker/vnfm/infra_drivers/kubernetes/kubernetes_driver.py b/tacker/vnfm/infra_drivers/kubernetes/kubernetes_driver.py index 48bd1f490..f08855718 100644 --- a/tacker/vnfm/infra_drivers/kubernetes/kubernetes_driver.py +++ b/tacker/vnfm/infra_drivers/kubernetes/kubernetes_driver.py @@ -930,6 +930,7 @@ class Kubernetes(abstract_driver.VnfAbstractDriver, # If one of objects is still alive, keeps on waiting if count > 0: keep_going = True + time.sleep(self.STACK_RETRY_WAIT) else: keep_going = False except Exception as e: @@ -1123,13 +1124,13 @@ class Kubernetes(abstract_driver.VnfAbstractDriver, """ # initialize Kubernetes APIs auth_cred, file_descriptor = self._get_auth_creds(auth_attr) - vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id( - context, policy['vnf_instance_id']) try: - if not vnf_resources: + if not policy.get('vnf_instance_id'): # execute legacy scale method self._scale_legacy(policy, auth_cred) else: + vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id( + context, policy['vnf_instance_id']) app_v1_api_client = self.kubernetes.get_app_v1_api_client( auth=auth_cred) aspect_id = policy['name'] @@ -1287,13 +1288,13 @@ class Kubernetes(abstract_driver.VnfAbstractDriver, """ # initialize Kubernetes APIs auth_cred, file_descriptor = self._get_auth_creds(auth_attr) - vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id( - context, policy['vnf_instance_id']) try: - if not vnf_resources: + if not policy.get('vnf_instance_id'): # execute legacy scale_wait method self._scale_wait_legacy(policy, auth_cred) else: + vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id( + context, policy['vnf_instance_id']) core_v1_api_client = self.kubernetes.get_core_v1_api_client( auth=auth_cred) app_v1_api_client = self.kubernetes.get_app_v1_api_client(