diff --git a/releasenotes/notes/support-cnf-heal-with-etsi-sol-34b404bd2709ad51.yaml b/releasenotes/notes/support-cnf-heal-with-etsi-sol-34b404bd2709ad51.yaml new file mode 100644 index 000000000..771ac7e2b --- /dev/null +++ b/releasenotes/notes/support-cnf-heal-with-etsi-sol-34b404bd2709ad51.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + Add Container based VNF heal operation support with ETSI NFV-SOL002 and + SOL003 v2.6.1 VNF Lifecycle Management. For "Heal VNFC with SOL002", users + can heal Pod (mapped as VNFC) that is singleton or created using controller + resources such as Kubernetes Deployment, DaemonSet, StatefulSet, and + ReplicaSet. For "Heal VNF instance with SOL003", users can heal entire VNF + instance by termination and instantiation of the VNF. And the VNFC resource + information are stored and updated for the heal operation of the Pod. diff --git a/tacker/conductor/conductor_server.py b/tacker/conductor/conductor_server.py index a838a07e6..6e904a2e9 100644 --- a/tacker/conductor/conductor_server.py +++ b/tacker/conductor/conductor_server.py @@ -737,7 +737,8 @@ class Conductor(manager.Manager): self.vnf_manager.invoke(vim_connection_info.vim_type, 'post_vnf_instantiation', context=context, vnf_instance=vnf_instance, - vim_connection_info=vim_connection_info) + vim_connection_info=vim_connection_info, + instantiate_vnf_req=instantiate_vnf_req) except Exception as ex: try: diff --git a/tacker/extensions/vnfm.py b/tacker/extensions/vnfm.py index d3d64bf1b..69a3a8fd6 100644 --- a/tacker/extensions/vnfm.py +++ b/tacker/extensions/vnfm.py @@ -117,6 +117,14 @@ class CNFScaleWaitFailed(exceptions.TackerException): message = _('CNF Scale Wait Failed with reason: %(reason)s') +class CNFHealFailed(exceptions.TackerException): + message = _('%(reason)s') + + +class CNFHealWaitFailed(exceptions.TackerException): + message = _('%(reason)s') + + class ServiceTypeNotFound(exceptions.NotFound): message = _('service type %(service_type_id)s could not be found') diff --git a/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/Definitions/helloworld3_df_complex.yaml b/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/Definitions/helloworld3_df_complex.yaml new file mode 100644 index 000000000..66baf613b --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/Definitions/helloworld3_df_complex.yaml @@ -0,0 +1,114 @@ +tosca_definitions_version: tosca_simple_yaml_1_2 + +description: Sample VNF + +imports: + - etsi_nfv_sol001_common_types.yaml + - etsi_nfv_sol001_vnfd_types.yaml + - helloworld3_types.yaml + +topology_template: + inputs: + descriptor_id: + type: string + descriptor_version: + type: string + provider: + type: string + product_name: + type: string + software_version: + type: string + vnfm_info: + type: list + entry_schema: + type: string + flavour_id: + type: string + flavour_description: + type: string + + substitution_mappings: + node_type: company.provider.VNF + properties: + flavour_id: complex + requirements: + virtual_link_external: [] + + node_templates: + VNF: + type: company.provider.VNF + properties: + flavour_description: A flavour for multiple resources + + VDU1: + type: tosca.nodes.nfv.Vdu.Compute + properties: + name: vdu1-heal-complex + description: kubernetes resource as VDU1 + vdu_profile: + min_number_of_instances: 2 + max_number_of_instances: 3 + + VDU2: + type: tosca.nodes.nfv.Vdu.Compute + properties: + name: vdu2-heal + description: kubernetes resource as VDU2 + vdu_profile: + min_number_of_instances: 1 + max_number_of_instances: 1 + + policies: + - scaling_aspects: + type: tosca.policies.nfv.ScalingAspects + properties: + aspects: + vdu1_aspect: + name: vdu1_aspect + description: vdu1 scaling aspect + max_scale_level: 1 + step_deltas: + - delta_1 + + - vdu1_initial_delta: + type: tosca.policies.nfv.VduInitialDelta + properties: + initial_delta: + number_of_instances: 1 + targets: [ VDU1 ] + + - vdu1_scaling_aspect_deltas: + type: tosca.policies.nfv.VduScalingAspectDeltas + properties: + aspect: vdu1_aspect + deltas: + delta_1: + number_of_instances: 1 + targets: [ VDU1 ] + + - instantiation_levels: + type: tosca.policies.nfv.InstantiationLevels + properties: + levels: + instantiation_level_1: + description: Smallest size + scale_info: + vdu1_aspect: + scale_level: 0 + instantiation_level_2: + description: Largest size + scale_info: + vdu1_aspect: + scale_level: 1 + default_level: instantiation_level_1 + + - vdu1_instantiation_levels: + type: tosca.policies.nfv.VduInstantiationLevels + properties: + levels: + instantiation_level_1: + number_of_instances: 2 + instantiation_level_2: + number_of_instances: 3 + targets: [ VDU1 ] diff --git a/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/Definitions/helloworld3_df_simple.yaml b/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/Definitions/helloworld3_df_simple.yaml new file mode 100644 index 000000000..5055acd69 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/Definitions/helloworld3_df_simple.yaml @@ -0,0 +1,105 @@ +tosca_definitions_version: tosca_simple_yaml_1_2 + +description: Sample VNF + +imports: + - etsi_nfv_sol001_common_types.yaml + - etsi_nfv_sol001_vnfd_types.yaml + - helloworld3_types.yaml + +topology_template: + inputs: + descriptor_id: + type: string + descriptor_version: + type: string + provider: + type: string + product_name: + type: string + software_version: + type: string + vnfm_info: + type: list + entry_schema: + type: string + flavour_id: + type: string + flavour_description: + type: string + + substitution_mappings: + node_type: company.provider.VNF + properties: + flavour_id: simple + requirements: + virtual_link_external: [] + + node_templates: + VNF: + type: company.provider.VNF + properties: + flavour_description: A simple flavour + + VDU1: + type: tosca.nodes.nfv.Vdu.Compute + properties: + name: vdu1-heal-simple + description: kubernetes controller resource as VDU + vdu_profile: + min_number_of_instances: 2 + max_number_of_instances: 3 + + policies: + - scaling_aspects: + type: tosca.policies.nfv.ScalingAspects + properties: + aspects: + vdu1_aspect: + name: vdu1_aspect + description: vdu1 scaling aspect + max_scale_level: 1 + step_deltas: + - delta_1 + + - VDU1_initial_delta: + type: tosca.policies.nfv.VduInitialDelta + properties: + initial_delta: + number_of_instances: 1 + targets: [ VDU1 ] + + - VDU1_scaling_aspect_deltas: + type: tosca.policies.nfv.VduScalingAspectDeltas + properties: + aspect: vdu1_aspect + deltas: + delta_1: + number_of_instances: 1 + targets: [ VDU1 ] + + - instantiation_levels: + type: tosca.policies.nfv.InstantiationLevels + properties: + levels: + instantiation_level_1: + description: Smallest size + scale_info: + vdu1_aspect: + scale_level: 0 + instantiation_level_2: + description: Largest size + scale_info: + vdu1_aspect: + scale_level: 1 + default_level: instantiation_level_1 + + - vdu1_instantiation_levels: + type: tosca.policies.nfv.VduInstantiationLevels + properties: + levels: + instantiation_level_1: + number_of_instances: 2 + instantiation_level_2: + number_of_instances: 3 + targets: [ VDU1 ] diff --git a/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/Definitions/helloworld3_top.vnfd.yaml b/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/Definitions/helloworld3_top.vnfd.yaml new file mode 100644 index 000000000..f6430a939 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/Definitions/helloworld3_top.vnfd.yaml @@ -0,0 +1,32 @@ +tosca_definitions_version: tosca_simple_yaml_1_2 + +description: Sample VNF + +imports: + - etsi_nfv_sol001_common_types.yaml + - etsi_nfv_sol001_vnfd_types.yaml + - helloworld3_types.yaml + - helloworld3_df_simple.yaml + - helloworld3_df_complex.yaml + +topology_template: + inputs: + selected_flavour: + type: string + description: VNF deployment flavour selected by the consumer. It is provided in the API + + node_templates: + VNF: + type: company.provider.VNF + properties: + flavour_id: { get_input: selected_flavour } + descriptor_id: b1bb0ce7-ebca-4fa7-95ed-4840d70a1177 + provider: Company + product_name: Sample VNF + software_version: '1.0' + descriptor_version: '1.0' + vnfm_info: + - Tacker + requirements: + #- virtual_link_external # mapped in lower-level templates + #- virtual_link_internal # mapped in lower-level templates diff --git a/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/Definitions/helloworld3_types.yaml b/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/Definitions/helloworld3_types.yaml new file mode 100644 index 000000000..74a2d3818 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/Definitions/helloworld3_types.yaml @@ -0,0 +1,53 @@ +tosca_definitions_version: tosca_simple_yaml_1_2 + +description: VNF type definition + +imports: + - etsi_nfv_sol001_common_types.yaml + - etsi_nfv_sol001_vnfd_types.yaml + +node_types: + company.provider.VNF: + derived_from: tosca.nodes.nfv.VNF + properties: + descriptor_id: + type: string + constraints: [ valid_values: [ b1bb0ce7-ebca-4fa7-95ed-4840d70a1177 ] ] + default: b1bb0ce7-ebca-4fa7-95ed-4840d70a1177 + descriptor_version: + type: string + constraints: [ valid_values: [ '1.0' ] ] + default: '1.0' + provider: + type: string + constraints: [ valid_values: [ 'Company' ] ] + default: 'Company' + product_name: + type: string + constraints: [ valid_values: [ 'Sample VNF' ] ] + default: 'Sample VNF' + software_version: + type: string + constraints: [ valid_values: [ '1.0' ] ] + default: '1.0' + vnfm_info: + type: list + entry_schema: + type: string + constraints: [ valid_values: [ Tacker ] ] + default: [ Tacker ] + flavour_id: + type: string + constraints: [ valid_values: [ simple,complex ] ] + default: simple + flavour_description: + type: string + default: "" + requirements: + - virtual_link_external: + capability: tosca.capabilities.nfv.VirtualLinkable + - virtual_link_internal: + capability: tosca.capabilities.nfv.VirtualLinkable + interfaces: + Vnflcm: + type: tosca.interfaces.nfv.Vnflcm diff --git a/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/Files/kubernetes/deployment_heal_complex.yaml b/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/Files/kubernetes/deployment_heal_complex.yaml new file mode 100644 index 000000000..86b3a5252 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/Files/kubernetes/deployment_heal_complex.yaml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: vdu1-heal-complex + namespace: default +spec: + replicas: 2 + selector: + matchLabels: + app: webserver + template: + metadata: + labels: + app: webserver + spec: + containers: + - name: nginx + image: nginx + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 + protocol: TCP diff --git a/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/Files/kubernetes/deployment_heal_simple.yaml b/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/Files/kubernetes/deployment_heal_simple.yaml new file mode 100644 index 000000000..eab20ea2a --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/Files/kubernetes/deployment_heal_simple.yaml @@ -0,0 +1,22 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: vdu1-heal-simple + namespace: default +spec: + replicas: 2 + selector: + matchLabels: + app: webserver + template: + metadata: + labels: + app: webserver + spec: + containers: + - name: nginx + image: nginx + imagePullPolicy: IfNotPresent + ports: + - containerPort: 80 + protocol: TCP diff --git a/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/Files/kubernetes/pod_heal.yaml b/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/Files/kubernetes/pod_heal.yaml new file mode 100644 index 000000000..8c0b20993 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/Files/kubernetes/pod_heal.yaml @@ -0,0 +1,12 @@ +apiVersion: v1 +kind: Pod +metadata: + name: vdu2-heal + namespace: default +spec: + containers: + - image: nginx + imagePullPolicy: IfNotPresent + name: webserver2 + ports: + - containerPort: 8080 diff --git a/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/TOSCA-Metadata/TOSCA.meta b/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/TOSCA-Metadata/TOSCA.meta new file mode 100644 index 000000000..716018287 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_cnf_heal/TOSCA-Metadata/TOSCA.meta @@ -0,0 +1,19 @@ +TOSCA-Meta-File-Version: 1.0 +Created-by: dummy_user +CSAR-Version: 1.1 +Entry-Definitions: Definitions/helloworld3_top.vnfd.yaml + +Name: Files/kubernetes/pod_heal.yaml +Content-Type: application/yaml +Algorithm: SHA-256 +Hash: 08fabdd52e8a386669f177c0a7a8a351b036bcde3bf399ca1816455d81dd191c + +Name: Files/kubernetes/deployment_heal_simple.yaml +Content-Type: application/yaml +Algorithm: SHA-256 +Hash: 39c9b301d04714c6b124b333057a22d316835c3cb340c4e2ebfadc296c3fbfbc + +Name: Files/kubernetes/deployment_heal_complex.yaml +Content-Type: application/yaml +Algorithm: SHA-256 +Hash: 06c018b9f4b231a604a6cd223a2552fecc4c6dc8bedf9325e84f7fe2b6fe8492 diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Definitions/helloworld3_df_simple.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Definitions/helloworld3_df_simple.yaml new file mode 100644 index 000000000..f36be6b29 --- /dev/null +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Definitions/helloworld3_df_simple.yaml @@ -0,0 +1,124 @@ +tosca_definitions_version: tosca_simple_yaml_1_2 + +description: Simple deployment flavour for Sample VNF + +imports: + - etsi_nfv_sol001_common_types.yaml + - etsi_nfv_sol001_vnfd_types.yaml + - helloworld3_types.yaml + +topology_template: + inputs: + descriptor_id: + type: string + descriptor_version: + type: string + provider: + type: string + product_name: + type: string + software_version: + type: string + vnfm_info: + type: list + entry_schema: + type: string + flavour_id: + type: string + flavour_description: + type: string + + substitution_mappings: + node_type: company.provider.VNF + properties: + flavour_id: simple + requirements: + virtual_link_external: [] + + node_templates: + VNF: + type: company.provider.VNF + properties: + flavour_description: A simple flavour + + VDU1: + type: tosca.nodes.nfv.Vdu.Compute + properties: + name: vdu1 + description: VDU1 compute node + vdu_profile: + min_number_of_instances: 1 + max_number_of_instances: 1 + + VDU2: + type: tosca.nodes.nfv.Vdu.Compute + properties: + name: vdu2 + description: VDU2 compute node + vdu_profile: + min_number_of_instances: 1 + max_number_of_instances: 3 + + policies: + - scaling_aspects: + type: tosca.policies.nfv.ScalingAspects + properties: + aspects: + vdu2_aspect: + name: vdu2_aspect + description: vdu2 scaling aspect + max_scale_level: 2 + step_deltas: + - delta_1 + + - VDU2_initial_delta: + type: tosca.policies.nfv.VduInitialDelta + properties: + initial_delta: + number_of_instances: 1 + targets: [ VDU2 ] + + - VDU2_scaling_aspect_deltas: + type: tosca.policies.nfv.VduScalingAspectDeltas + properties: + aspect: vdu2_aspect + deltas: + delta_1: + number_of_instances: 1 + targets: [ VDU2 ] + + - instantiation_levels: + type: tosca.policies.nfv.InstantiationLevels + properties: + levels: + instantiation_level_1: + description: Smallest size + scale_info: + vdu2_aspect: + scale_level: 0 + instantiation_level_2: + description: Largest size + scale_info: + vdu2_aspect: + scale_level: 2 + default_level: instantiation_level_1 + + - VDU1_instantiation_levels: + type: tosca.policies.nfv.VduInstantiationLevels + properties: + levels: + instantiation_level_1: + number_of_instances: 1 + instantiation_level_2: + number_of_instances: 1 + targets: [ VDU1 ] + + - VDU2_instantiation_levels: + type: tosca.policies.nfv.VduInstantiationLevels + properties: + levels: + instantiation_level_1: + number_of_instances: 1 + instantiation_level_2: + number_of_instances: 3 + targets: [ VDU2 ] diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/daemon-set.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/daemon-set.yaml index 28e7c893b..fb1996955 100644 --- a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/daemon-set.yaml +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/daemon-set.yaml @@ -1,7 +1,7 @@ apiVersion: apps/v1 kind: DaemonSet metadata: - name: nginx + name: vdu1 namespace: default spec: selector: diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/deployment.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/deployment.yaml index e58aaa16a..484d42ac7 100644 --- a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/deployment.yaml +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/deployment.yaml @@ -1,7 +1,7 @@ apiVersion: apps/v1 kind: Deployment metadata: - name: curry-probe-test001 + name: vdu2 namespace: default spec: replicas: 1 diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/pod.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/pod.yaml index 82382deb1..56ce7280d 100644 --- a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/pod.yaml +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/pod.yaml @@ -2,7 +2,7 @@ apiVersion: v1 kind: Pod metadata: namespace: default - name: curry-endpoint-test001 + name: vdu1 spec: containers: - image: celebdor/kuryr-demo diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/replicaset_service_secret.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/replicaset_service_secret.yaml index b6a731d6e..ab3ea9d90 100644 --- a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/replicaset_service_secret.yaml +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/replicaset_service_secret.yaml @@ -28,7 +28,7 @@ metadata: apiVersion: apps/v1 kind: ReplicaSet metadata: - name: curry-replicaset-multiple + name: vdu2 namespace: default spec: replicas: 2 diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/statefulset.yaml b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/statefulset.yaml index ffa6e4751..8292b6e25 100644 --- a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/statefulset.yaml +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/Files/kubernetes/statefulset.yaml @@ -1,7 +1,7 @@ apiVersion: apps/v1 kind: StatefulSet metadata: - name: curry-ns-statefulset + name: vdu2 namespace: default spec: selector: diff --git a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/TOSCA-Metadata/TOSCA.meta b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/TOSCA-Metadata/TOSCA.meta index eb432cb59..bdeb12777 100644 --- a/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/TOSCA-Metadata/TOSCA.meta +++ b/tacker/tests/etc/samples/etsi/nfv/test_create_vnf_instance_and_instantiate_and_terminate_cnf_resources/TOSCA-Metadata/TOSCA.meta @@ -26,12 +26,12 @@ Hash: 4042352e0de6aa0ad28d44354bd8e0d62fc8e753c8f52b7edf69d2a7a25d8f8d Name: Files/kubernetes/daemon-set.yaml Content-Type: test-data Algorithm: SHA-256 -Hash: c0750df79c9ba2824b032b6a485764486b014021aa6dade5ef61f1c10569412f +Hash: f8ed04536a8795af4828b2f731225abc34986f9ea30237d9652669ca57d9d217 Name: Files/kubernetes/deployment.yaml Content-Type: test-data Algorithm: SHA-256 -Hash: 6a40dfb06764394fb604ae807d1198bc2e2ee8aece3b9483dfde48e53f316a58 +Hash: 80f160c9bdd9daa6d0111c8d40b5575946b8c0f23696aa8d91d20f313adae087 Name: Files/kubernetes/horizontal-pod-autoscaler.yaml Content-Type: test-data @@ -91,12 +91,12 @@ Hash: 5d4d3d399e04cdba1f9c691ac7e690e295ff02b7c935abae873b68a83a858c50 Name: Files/kubernetes/pod.yaml Content-Type: test-data Algorithm: SHA-256 -Hash: a708dcf5ba4d3a7c675f18b71484a32b7e4446e80e57dcc3035b8a921c3f659d +Hash: 6c97b1a8fc8d21a6a9e7ab1c383b49d3ec31f79a83de218f5537d18531ddfbd8 Name: Files/kubernetes/replicaset_service_secret.yaml Content-Type: test-data Algorithm: SHA-256 -Hash: 8ed52e5e167890efd7fba29c748f717dff01d68b60ff9a06af178cbafdfdc765 +Hash: 7d83ba61def65be3203b164b496057e4d062249804df82eba1831111cc4614a0 Name: Files/kubernetes/resource-quota.yaml Content-Type: test-data @@ -116,7 +116,7 @@ Hash: 83bd9c40db8c798d0cab0e793a4b40a4ac7eca4fec4fba89ab4257d0f397db40 Name: Files/kubernetes/statefulset.yaml Content-Type: test-data Algorithm: SHA-256 -Hash: d0beddd39f6808cb62094146778961b068871393df3474e0787145639a94f649 +Hash: 6829939e8b30a36c69d0e84c65b36701712c89bfbe827536cba8c0cdb15a816b Name: Files/kubernetes/storage-class.yaml Content-Type: test-data diff --git a/tacker/tests/functional/sol_kubernetes/vnflcm/test_kubernetes_heal.py b/tacker/tests/functional/sol_kubernetes/vnflcm/test_kubernetes_heal.py new file mode 100644 index 000000000..3bf194f36 --- /dev/null +++ b/tacker/tests/functional/sol_kubernetes/vnflcm/test_kubernetes_heal.py @@ -0,0 +1,403 @@ +# Copyright (C) 2020 FUJITSU +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import time + +from oslo_serialization import jsonutils +from oslo_utils import uuidutils +from sqlalchemy import desc +from sqlalchemy.orm import joinedload + +from tacker.common import exceptions +from tacker import context +from tacker.db import api as db_api +from tacker.db.db_sqlalchemy import api +from tacker.db.db_sqlalchemy import models +from tacker.objects import fields +from tacker.objects import vnf_lcm_op_occs +from tacker.tests.functional import base +from tacker.tests import utils + +VNF_PACKAGE_UPLOAD_TIMEOUT = 300 +VNF_INSTANTIATE_TIMEOUT = 600 +VNF_TERMINATE_TIMEOUT = 600 +VNF_HEAL_TIMEOUT = 600 +RETRY_WAIT_TIME = 5 + + +def _create_and_upload_vnf_package(tacker_client, csar_package_name, + user_defined_data): + # create vnf package + body = jsonutils.dumps({"userDefinedData": user_defined_data}) + resp, vnf_package = tacker_client.do_request( + '/vnfpkgm/v1/vnf_packages', "POST", body=body) + + # upload vnf package + csar_package_path = "../../../etc/samples/etsi/nfv/%s" % csar_package_name + file_path = os.path.abspath(os.path.join(os.path.dirname(__file__), + csar_package_path)) + + # Generating unique vnfd id. This is required when multiple workers + # are running concurrently. The call below creates a new temporary + # CSAR with unique vnfd id. + file_path, uniqueid = utils.create_csar_with_unique_vnfd_id(file_path) + + with open(file_path, 'rb') as file_object: + resp, resp_body = tacker_client.do_request( + '/vnfpkgm/v1/vnf_packages/{id}/package_content'.format( + id=vnf_package['id']), + "PUT", body=file_object, content_type='application/zip') + + # wait for onboard + start_time = int(time.time()) + show_url = os.path.join('/vnfpkgm/v1/vnf_packages', vnf_package['id']) + vnfd_id = None + while True: + resp, body = tacker_client.do_request(show_url, "GET") + if body['onboardingState'] == "ONBOARDED": + vnfd_id = body['vnfdId'] + break + + if ((int(time.time()) - start_time) > VNF_PACKAGE_UPLOAD_TIMEOUT): + raise Exception("Failed to onboard vnf package, process could not" + " be completed within %d seconds", VNF_PACKAGE_UPLOAD_TIMEOUT) + + time.sleep(RETRY_WAIT_TIME) + + # remove temporarily created CSAR file + os.remove(file_path) + return vnf_package['id'], vnfd_id + + +def _delete_wait_vnf_instance(tacker_client, id): + url = os.path.join("/vnflcm/v1/vnf_instances", id) + start_time = int(time.time()) + while True: + resp, body = tacker_client.do_request(url, "DELETE") + if 204 == resp.status_code: + break + + if ((int(time.time()) - start_time) > VNF_TERMINATE_TIMEOUT): + raise Exception("Failed to delete vnf instance, process could not" + " be completed within %d seconds", VNF_TERMINATE_TIMEOUT) + + time.sleep(RETRY_WAIT_TIME) + + +def _show_vnf_instance(tacker_client, id): + show_url = os.path.join("/vnflcm/v1/vnf_instances", id) + resp, vnf_instance = tacker_client.do_request(show_url, "GET") + + return vnf_instance + + +def _vnf_instance_wait( + tacker_client, id, + instantiation_state=fields.VnfInstanceState.INSTANTIATED, + timeout=VNF_INSTANTIATE_TIMEOUT): + show_url = os.path.join("/vnflcm/v1/vnf_instances", id) + start_time = int(time.time()) + while True: + resp, body = tacker_client.do_request(show_url, "GET") + if body['instantiationState'] == instantiation_state: + break + + if ((int(time.time()) - start_time) > timeout): + raise Exception("Failed to wait vnf instance, process could not" + " be completed within %d seconds", timeout) + + time.sleep(RETRY_WAIT_TIME) + + +class VnfLcmKubernetesHealTest(base.BaseTackerTest): + + @classmethod + def setUpClass(cls): + cls.tacker_client = base.BaseTackerTest.tacker_http_client() + cls.vnf_package_resource, cls.vnfd_id_resource = \ + _create_and_upload_vnf_package( + cls.tacker_client, "test_cnf_heal", + {"key": "sample_heal_functional"}) + cls.vnf_instance_ids = [] + super(VnfLcmKubernetesHealTest, cls).setUpClass() + + @classmethod + def tearDownClass(cls): + # Update vnf package operational state to DISABLED + update_req_body = jsonutils.dumps({ + "operationalState": "DISABLED"}) + base_path = "/vnfpkgm/v1/vnf_packages" + for package_id in [cls.vnf_package_resource]: + resp, resp_body = cls.tacker_client.do_request( + '{base_path}/{id}'.format(id=package_id, + base_path=base_path), + "PATCH", content_type='application/json', + body=update_req_body) + + # Delete vnf package + url = '/vnfpkgm/v1/vnf_packages/%s' % package_id + cls.tacker_client.do_request(url, "DELETE") + + super(VnfLcmKubernetesHealTest, cls).tearDownClass() + + def setUp(self): + super(VnfLcmKubernetesHealTest, self).setUp() + self.base_vnf_instances_url = "/vnflcm/v1/vnf_instances" + self.base_vnf_lcm_op_occs_url = "/vnflcm/v1/vnf_lcm_op_occs" + self.context = context.get_admin_context() + vim_list = self.client.list_vims() + if not vim_list: + self.skipTest("Vims are not configured") + + vim_id = 'vim-kubernetes' + vim = self.get_vim(vim_list, vim_id) + if not vim: + self.skipTest("Kubernetes VIM '%s' is missing" % vim_id) + self.vim_id = vim['id'] + + def _instantiate_vnf_instance_request( + self, flavour_id, vim_id=None, additional_param=None): + request_body = {"flavourId": flavour_id} + + if vim_id: + request_body["vimConnectionInfo"] = [ + {"id": uuidutils.generate_uuid(), + "vimId": vim_id, + "vimType": "kubernetes"}] + + if additional_param: + request_body["additionalParams"] = additional_param + + return request_body + + def _create_vnf_instance(self, vnfd_id, vnf_instance_name=None, + vnf_instance_description=None): + request_body = {'vnfdId': vnfd_id} + if vnf_instance_name: + request_body['vnfInstanceName'] = vnf_instance_name + + if vnf_instance_description: + request_body['vnfInstanceDescription'] = vnf_instance_description + + resp, response_body = self.http_client.do_request( + self.base_vnf_instances_url, "POST", + body=jsonutils.dumps(request_body)) + return resp, response_body + + def _instantiate_vnf_instance(self, id, request_body): + url = os.path.join(self.base_vnf_instances_url, id, "instantiate") + resp, body = self.http_client.do_request( + url, "POST", body=jsonutils.dumps(request_body)) + self.assertEqual(202, resp.status_code) + _vnf_instance_wait(self.tacker_client, id) + + def _create_and_instantiate_vnf_instance(self, flavour_id, + additional_params): + # create vnf instance + vnf_instance_name = "test_vnf_instance_for_cnf_heal-%s" % \ + uuidutils.generate_uuid() + vnf_instance_description = "vnf instance for cnf heal testing" + resp, vnf_instance = self._create_vnf_instance( + self.vnfd_id_resource, vnf_instance_name=vnf_instance_name, + vnf_instance_description=vnf_instance_description) + + # instantiate vnf instance + additional_param = additional_params + request_body = self._instantiate_vnf_instance_request( + flavour_id, vim_id=self.vim_id, additional_param=additional_param) + + self._instantiate_vnf_instance(vnf_instance['id'], request_body) + vnf_instance = _show_vnf_instance( + self.tacker_client, vnf_instance['id']) + self.vnf_instance_ids.append(vnf_instance['id']) + + return vnf_instance + + def _terminate_vnf_instance(self, id): + # Terminate vnf forcefully + request_body = { + "terminationType": fields.VnfInstanceTerminationType.FORCEFUL, + } + url = os.path.join(self.base_vnf_instances_url, id, "terminate") + resp, body = self.http_client.do_request( + url, "POST", body=jsonutils.dumps(request_body)) + self.assertEqual(202, resp.status_code) + _vnf_instance_wait( + self.tacker_client, id, + instantiation_state=fields.VnfInstanceState.NOT_INSTANTIATED, + timeout=VNF_TERMINATE_TIMEOUT) + + def _delete_vnf_instance(self, id): + _delete_wait_vnf_instance(self.tacker_client, id) + + # verify vnf instance is deleted + url = os.path.join(self.base_vnf_instances_url, id) + resp, body = self.http_client.do_request(url, "GET") + self.assertEqual(404, resp.status_code) + + def _heal_vnf_instance(self, id, vnfc_instance_id): + url = os.path.join(self.base_vnf_instances_url, id, "heal") + # generate body + request_body = { + "vnfcInstanceId": vnfc_instance_id} + resp, body = self.http_client.do_request( + url, "POST", body=jsonutils.dumps(request_body)) + self.assertEqual(202, resp.status_code) + + @db_api.context_manager.reader + def _vnf_notify_get_by_id(self, context, vnf_instance_id, + columns_to_join=None): + query = api.model_query( + context, models.VnfLcmOpOccs, + read_deleted="no", project_only=True).filter_by( + vnf_instance_id=vnf_instance_id).order_by( + desc("created_at")) + + if columns_to_join: + for column in columns_to_join: + query = query.options(joinedload(column)) + + db_vnflcm_op_occ = query.first() + + if not db_vnflcm_op_occ: + raise exceptions.VnfInstanceNotFound(id=vnf_instance_id) + + vnflcm_op_occ = vnf_lcm_op_occs.VnfLcmOpOcc.obj_from_db_obj( + context, db_vnflcm_op_occ) + return vnflcm_op_occ + + def _wait_vnflcm_op_occs( + self, context, vnf_instance_id, + operation_state='COMPLETED'): + start_time = int(time.time()) + while True: + vnflcm_op_occ = self._vnf_notify_get_by_id( + context, vnf_instance_id) + + if vnflcm_op_occ.operation_state == operation_state: + break + + if ((int(time.time()) - start_time) > VNF_HEAL_TIMEOUT): + raise Exception("Failed to wait heal instance") + + time.sleep(RETRY_WAIT_TIME) + + def _get_vnfc_resource_info(self, vnf_instance): + inst_vnf_info = vnf_instance['instantiatedVnfInfo'] + vnfc_resource_info = inst_vnf_info['vnfcResourceInfo'] + return vnfc_resource_info + + def test_heal_cnf_with_sol002(self): + """Test heal as per SOL002 for CNF + + This test will instantiate cnf. Heal API will be invoked as per SOL002 + i.e. with vnfcInstanceId, so that the specified vnfc instance is healed + which includes Kubernetes resources (Pod and Deployment). + """ + # use def-files of singleton Pod and Deployment (replicas=2) + inst_additional_param = { + "lcm-kubernetes-def-files": [ + "Files/kubernetes/deployment_heal_complex.yaml", + "Files/kubernetes/pod_heal.yaml"]} + vnf_instance = self._create_and_instantiate_vnf_instance( + "complex", inst_additional_param) + before_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance) + + # get vnfc_instance_id of heal target + deployment_target_vnfc = None + for vnfc_rsc in before_vnfc_rscs: + compute_resource = vnfc_rsc['computeResource'] + rsc_kind = compute_resource['vimLevelResourceType'] + if rsc_kind == 'Pod': + # target 1: Singleton Pod + pod_target_vnfc = vnfc_rsc + elif not deployment_target_vnfc: + # target 2: Deployment's Pod + deployment_target_vnfc = vnfc_rsc + else: + # not target: Deployment's remianing one + deployment_not_target_vnfc = vnfc_rsc + + # test heal SOL-002 (partial heal) + vnfc_instance_id = \ + [pod_target_vnfc['id'], deployment_target_vnfc['id']] + self._heal_vnf_instance(vnf_instance['id'], vnfc_instance_id) + # wait vnflcm_op_occs.operation_state become COMPLETE + self._wait_vnflcm_op_occs(self.context, vnf_instance['id']) + # check vnfcResourceInfo after heal operation + vnf_instance = _show_vnf_instance( + self.tacker_client, vnf_instance['id']) + after_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance) + self.assertEqual(len(before_vnfc_rscs), len(after_vnfc_rscs)) + for vnfc_rsc in after_vnfc_rscs: + after_pod_name = vnfc_rsc['computeResource']['resourceId'] + if vnfc_rsc['id'] == pod_target_vnfc['id']: + # check stored pod name is not changed (Pod) + after_resource = pod_target_vnfc + compute_resource = after_resource['computeResource'] + before_pod_name = compute_resource['resourceId'] + self.assertEqual(after_pod_name, before_pod_name) + elif vnfc_rsc['id'] == deployment_target_vnfc['id']: + # check stored pod name is changed (Deployment) + after_resource = deployment_target_vnfc + compute_resource = after_resource['computeResource'] + before_pod_name = compute_resource['resourceId'] + self.assertNotEqual(after_pod_name, before_pod_name) + else: + # check stored pod name is not changed (not target) + after_resource = deployment_not_target_vnfc + compute_resource = after_resource['computeResource'] + before_pod_name = compute_resource['resourceId'] + self.assertEqual(after_pod_name, before_pod_name) + self._terminate_vnf_instance(vnf_instance['id']) + self._delete_vnf_instance(vnf_instance['id']) + + def test_heal_cnf_with_sol003(self): + """Test heal as per SOL003 for CNF + + This test will instantiate cnf. Heal API will be invoked as per SOL003 + i.e. without passing vnfcInstanceId, so that the entire vnf is healed + which includes Kubernetes resource (Deployment). + """ + # use def-files of Deployment (replicas=2) + inst_additional_param = { + "lcm-kubernetes-def-files": [ + "Files/kubernetes/deployment_heal_simple.yaml"]} + vnf_instance = self._create_and_instantiate_vnf_instance( + "simple", inst_additional_param) + before_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance) + + # test heal SOL-003 (entire heal) + vnfc_instance_id = [] + self._heal_vnf_instance(vnf_instance['id'], vnfc_instance_id) + # wait vnflcm_op_occs.operation_state become COMPLETE + self._wait_vnflcm_op_occs(self.context, vnf_instance['id']) + # check vnfcResourceInfo after heal operation + vnf_instance = _show_vnf_instance( + self.tacker_client, vnf_instance['id']) + after_vnfc_rscs = self._get_vnfc_resource_info(vnf_instance) + self.assertEqual(len(before_vnfc_rscs), len(after_vnfc_rscs)) + # check id and pod name (as computeResource.resourceId) is changed + for before_vnfc_rsc in before_vnfc_rscs: + for after_vnfc_rsc in after_vnfc_rscs: + self.assertNotEqual( + before_vnfc_rsc['id'], after_vnfc_rsc['id']) + self.assertNotEqual( + before_vnfc_rsc['computeResource']['resourceId'], + after_vnfc_rsc['computeResource']['resourceId']) + # terminate vnf instance + self._terminate_vnf_instance(vnf_instance['id']) + self._delete_vnf_instance(vnf_instance['id']) diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/fakes.py b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/fakes.py index bb77995bc..85ca86aa1 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/fakes.py +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/fakes.py @@ -13,9 +13,14 @@ # License for the specific language governing permissions and limitations # under the License. +import datetime from kubernetes import client +from oslo_serialization import jsonutils +from oslo_utils import uuidutils from tacker.db.db_sqlalchemy import models +from tacker import objects +from tacker.objects import vim_connection from tacker.tests import uuidsentinel CREATE_K8S_FALSE_VALUE = None @@ -485,6 +490,17 @@ def fake_v1_deployment(): status=client.V1DeploymentStatus( replicas=1, ready_replicas=1 + ), + spec=client.V1DeploymentSpec( + replicas=2, + selector=client.V1LabelSelector( + match_labels={'app': 'webserver'} + ), + template=client.V1PodTemplateSpec( + metadata=client.V1ObjectMeta( + labels={'app': 'webserver'} + ) + ) ) ) @@ -515,6 +531,17 @@ def fake_v1_replica_set(): status=client.V1ReplicaSetStatus( replicas=1, ready_replicas=1 + ), + spec=client.V1ReplicaSetSpec( + replicas=2, + selector=client.V1LabelSelector( + match_labels={'app': 'webserver'} + ), + template=client.V1PodTemplateSpec( + metadata=client.V1ObjectMeta( + labels={'app': 'webserver'} + ) + ) ) ) @@ -923,6 +950,16 @@ def fake_daemon_set(): desired_number_scheduled=13, current_number_scheduled=4, number_misscheduled=2, + ), + spec=client.V1DaemonSetSpec( + selector=client.V1LabelSelector( + match_labels={'app': 'webserver'} + ), + template=client.V1PodTemplateSpec( + metadata=client.V1ObjectMeta( + labels={'app': 'webserver'} + ) + ) ) ) @@ -1015,13 +1052,65 @@ def get_vnf_resource_list(kind, name='fake_name'): return [vnf_resource] -def get_fake_pod_info(kind, name='fake_name', pod_status='Running'): - if kind == 'Deployment': - pod_name = _('{name}-1234567890-abcde').format(name=name) - elif kind == 'ReplicaSet': - pod_name = _('{name}-12345').format(name=name) - elif kind == 'StatefulSet': - pod_name = _('{name}-1').format(name=name) +def get_fake_pod_info(kind, name='fake_name', pod_status='Running', + pod_name=None): + if not pod_name: + if kind == 'Deployment': + pod_name = _('{name}-1234567890-abcde').format(name=name) + elif kind == 'ReplicaSet' or kind == 'DaemonSet': + pod_name = _('{name}-12345').format(name=name) + elif kind == 'StatefulSet': + pod_name = _('{name}-1').format(name=name) + elif kind == 'Pod': + pod_name = name return client.V1Pod( - metadata=client.V1ObjectMeta(name=pod_name), + metadata=client.V1ObjectMeta(name=pod_name, + creation_timestamp=datetime.datetime.now().isoformat('T')), status=client.V1PodStatus(phase=pod_status)) + + +def fake_vnfc_resource_info(vdu_id='VDU1', rsc_kind='Deployment', + rsc_name='fake_name', pod_name=None, + namespace=None): + def _get_metadata_str(name, namespace="fake_namespace"): + if namespace == "brank": + namespace = "" + metadata = { + 'name': name, + 'namespace': namespace} + return jsonutils.dumps(metadata) + + vnfc_obj = objects.VnfcResourceInfo() + vnfc_obj.id = uuidutils.generate_uuid() + vnfc_obj.vdu_id = vdu_id + if not pod_name: + v1_pod = get_fake_pod_info(rsc_kind, rsc_name) + pod_name = v1_pod.metadata.name + compute_resource = objects.ResourceHandle( + resource_id=pod_name, + vim_level_resource_type=rsc_kind) + vnfc_obj.compute_resource = compute_resource + metadata = {} + if namespace: + metadata['Pod'] = _get_metadata_str( + name=pod_name, namespace=namespace) + if rsc_kind != 'Pod': + metadata[rsc_kind] = _get_metadata_str( + name=rsc_name, namespace=namespace) + else: + metadata['Pod'] = _get_metadata_str(name=pod_name) + if rsc_kind != 'Pod': + metadata[rsc_kind] = _get_metadata_str(name=rsc_name) + vnfc_obj.metadata = metadata + + return vnfc_obj + + +def fake_vim_connection_info(): + access_info = { + 'auth_url': 'http://fake_url:6443', + 'ssl_ca_cert': None} + + return vim_connection.VimConnectionInfo( + vim_type="kubernetes", + access_info=access_info) diff --git a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_kubernetes_driver.py b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_kubernetes_driver.py index 937827ce0..2a512425a 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_kubernetes_driver.py +++ b/tacker/tests/unit/vnfm/infra_drivers/kubernetes/test_kubernetes_driver.py @@ -13,10 +13,12 @@ # License for the specific language governing permissions and limitations # under the License. +import copy import ddt import os from kubernetes import client +from oslo_serialization import jsonutils from tacker.common.container import kubernetes_utils from tacker.common import exceptions from tacker import context @@ -1862,6 +1864,350 @@ class TestKubernetes(base.TestCase): "'curry-test001', 'apiVersion': 'apps/v1', " + "'kind': 'Deployment', 'status': 'Creating'}") + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + @mock.patch.object(translate_outputs.Transformer, 'get_k8s_objs_from_yaml') + @mock.patch.object(objects.VnfPackageVnfd, "get_by_id") + @mock.patch('tacker.vnflcm.utils._get_vnfd_dict') + def test_post_vnf_instantiation_with_pod(self, mock_vnfd_dict, + mock_vnf_package_vnfd_get_by_id, + mock_get_k8s_objs_from_yaml, + mock_list_namespaced_pod): + mock_vnfd_dict.return_value = vnflcm_fakes.vnfd_dict_cnf() + mock_vnf_package_vnfd_get_by_id.return_value = \ + vnflcm_fakes.return_vnf_package_vnfd() + k8s_objs_pod = fakes.fake_k8s_objs_pod() + k8s_objs_pod[0].get('object').metadata.name = "vdu1" + mock_get_k8s_objs_from_yaml.return_value = k8s_objs_pod + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='Pod', name='vdu1')]) + instantiate_vnf_req = objects.InstantiateVnfRequest( + additional_params={'lcm-kubernetes-def-files': ["dummy.yaml"]}) + self.kubernetes.post_vnf_instantiation( + context=self.context, + vnf_instance=self.vnf_instance, + vim_connection_info=fakes.fake_vim_connection_info(), + instantiate_vnf_req=instantiate_vnf_req) + self.assertEqual(mock_list_namespaced_pod.call_count, 1) + # validate stored VnfcResourceInfo + vnfc_resource_info_after = \ + self.vnf_instance.instantiated_vnf_info.vnfc_resource_info + self.assertEqual(len(vnfc_resource_info_after), 1) + self.assertEqual( + vnfc_resource_info_after[0].compute_resource.resource_id, 'vdu1') + self.assertEqual(vnfc_resource_info_after[0].compute_resource. + vim_level_resource_type, 'Pod') + self.assertEqual(vnfc_resource_info_after[0].vdu_id, 'VDU1') + metadata_after = vnfc_resource_info_after[0].metadata + self.assertEqual( + jsonutils.loads(metadata_after.get('Pod')).get('name'), 'vdu1') + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + @mock.patch.object(translate_outputs.Transformer, 'get_k8s_objs_from_yaml') + @mock.patch.object(objects.VnfPackageVnfd, "get_by_id") + @mock.patch('tacker.vnflcm.utils._get_vnfd_dict') + def test_post_vnf_instantiation_with_deployment(self, mock_vnfd_dict, + mock_vnf_package_vnfd_get_by_id, + mock_get_k8s_objs_from_yaml, + mock_list_namespaced_pod): + mock_vnfd_dict.return_value = vnflcm_fakes.vnfd_dict_cnf() + mock_vnf_package_vnfd_get_by_id.return_value = \ + vnflcm_fakes.return_vnf_package_vnfd() + k8s_objs_deployment = fakes.fake_k8s_objs_deployment() + k8s_objs_deployment[0].get('object').metadata.name = "vdu1" + mock_get_k8s_objs_from_yaml.return_value = k8s_objs_deployment + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='Deployment', name='vdu1')]) + instantiate_vnf_req = objects.InstantiateVnfRequest( + additional_params={'lcm-kubernetes-def-files': ["dummy.yaml"]}) + self.kubernetes.post_vnf_instantiation( + context=self.context, + vnf_instance=self.vnf_instance, + vim_connection_info=fakes.fake_vim_connection_info(), + instantiate_vnf_req=instantiate_vnf_req) + self.assertEqual(mock_list_namespaced_pod.call_count, 1) + # validate stored VnfcResourceInfo + vnfc_resource_info_after = \ + self.vnf_instance.instantiated_vnf_info.vnfc_resource_info + self.assertEqual(len(vnfc_resource_info_after), 1) + expected_pod = fakes.get_fake_pod_info('Deployment', 'vdu1') + self.assertEqual( + vnfc_resource_info_after[0].compute_resource.resource_id, + expected_pod.metadata.name) + self.assertEqual(vnfc_resource_info_after[0].compute_resource. + vim_level_resource_type, 'Deployment') + self.assertEqual(vnfc_resource_info_after[0].vdu_id, 'VDU1') + metadata_after = vnfc_resource_info_after[0].metadata + self.assertEqual(jsonutils.loads( + metadata_after.get('Deployment')).get('name'), 'vdu1') + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + @mock.patch.object(translate_outputs.Transformer, 'get_k8s_objs_from_yaml') + @mock.patch.object(objects.VnfPackageVnfd, "get_by_id") + @mock.patch('tacker.vnflcm.utils._get_vnfd_dict') + def test_post_vnf_instantiation_with_replicaset(self, mock_vnfd_dict, + mock_vnf_package_vnfd_get_by_id, + mock_get_k8s_objs_from_yaml, + mock_list_namespaced_pod): + mock_vnfd_dict.return_value = vnflcm_fakes.vnfd_dict_cnf() + mock_vnf_package_vnfd_get_by_id.return_value = \ + vnflcm_fakes.return_vnf_package_vnfd() + k8s_objs_replicaset = fakes.fake_k8s_objs_replica_set() + k8s_objs_replicaset[0].get('object').metadata.name = "vdu1" + mock_get_k8s_objs_from_yaml.return_value = k8s_objs_replicaset + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='ReplicaSet', name='vdu1')]) + instantiate_vnf_req = objects.InstantiateVnfRequest( + additional_params={'lcm-kubernetes-def-files': ["dummy.yaml"]}) + self.kubernetes.post_vnf_instantiation( + context=self.context, + vnf_instance=self.vnf_instance, + vim_connection_info=fakes.fake_vim_connection_info(), + instantiate_vnf_req=instantiate_vnf_req) + self.assertEqual(mock_list_namespaced_pod.call_count, 1) + # validate stored VnfcResourceInfo + vnfc_resource_info_after = \ + self.vnf_instance.instantiated_vnf_info.vnfc_resource_info + self.assertEqual(len(vnfc_resource_info_after), 1) + expected_pod = fakes.get_fake_pod_info('ReplicaSet', 'vdu1') + self.assertEqual( + vnfc_resource_info_after[0].compute_resource.resource_id, + expected_pod.metadata.name) + self.assertEqual(vnfc_resource_info_after[0].compute_resource. + vim_level_resource_type, 'ReplicaSet') + self.assertEqual(vnfc_resource_info_after[0].vdu_id, 'VDU1') + metadata_after = vnfc_resource_info_after[0].metadata + self.assertEqual(jsonutils.loads( + metadata_after.get('ReplicaSet')).get('name'), 'vdu1') + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + @mock.patch.object(translate_outputs.Transformer, 'get_k8s_objs_from_yaml') + @mock.patch.object(objects.VnfPackageVnfd, "get_by_id") + @mock.patch('tacker.vnflcm.utils._get_vnfd_dict') + def test_post_vnf_instantiation_with_daemonset(self, mock_vnfd_dict, + mock_vnf_package_vnfd_get_by_id, + mock_get_k8s_objs_from_yaml, + mock_list_namespaced_pod): + mock_vnfd_dict.return_value = vnflcm_fakes.vnfd_dict_cnf() + mock_vnf_package_vnfd_get_by_id.return_value = \ + vnflcm_fakes.return_vnf_package_vnfd() + k8s_objs_daemonset = fakes.fake_k8s_objs_daemon_set() + k8s_objs_daemonset[0].get('object').metadata.name = "vdu1" + mock_get_k8s_objs_from_yaml.return_value = k8s_objs_daemonset + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='DaemonSet', name='vdu1')]) + instantiate_vnf_req = objects.InstantiateVnfRequest( + additional_params={'lcm-kubernetes-def-files': ["dummy.yaml"]}) + self.kubernetes.post_vnf_instantiation( + context=self.context, + vnf_instance=self.vnf_instance, + vim_connection_info=fakes.fake_vim_connection_info(), + instantiate_vnf_req=instantiate_vnf_req) + self.assertEqual(mock_list_namespaced_pod.call_count, 1) + # validate stored VnfcResourceInfo + vnfc_resource_info_after = \ + self.vnf_instance.instantiated_vnf_info.vnfc_resource_info + self.assertEqual(len(vnfc_resource_info_after), 1) + expected_pod = fakes.get_fake_pod_info('DaemonSet', 'vdu1') + self.assertEqual( + vnfc_resource_info_after[0].compute_resource.resource_id, + expected_pod.metadata.name) + self.assertEqual(vnfc_resource_info_after[0].compute_resource. + vim_level_resource_type, 'DaemonSet') + self.assertEqual(vnfc_resource_info_after[0].vdu_id, 'VDU1') + metadata_after = vnfc_resource_info_after[0].metadata + self.assertEqual(jsonutils.loads( + metadata_after.get('DaemonSet')).get('name'), 'vdu1') + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + @mock.patch.object(translate_outputs.Transformer, 'get_k8s_objs_from_yaml') + @mock.patch.object(objects.VnfPackageVnfd, "get_by_id") + @mock.patch('tacker.vnflcm.utils._get_vnfd_dict') + def test_post_vnf_instantiation_with_statefulset(self, mock_vnfd_dict, + mock_vnf_package_vnfd_get_by_id, + mock_get_k8s_objs_from_yaml, + mock_list_namespaced_pod): + mock_vnfd_dict.return_value = vnflcm_fakes.vnfd_dict_cnf() + mock_vnf_package_vnfd_get_by_id.return_value = \ + vnflcm_fakes.return_vnf_package_vnfd() + k8s_objs_statefulset = fakes.fake_k8s_objs_stateful_set() + k8s_objs_statefulset[0].get('object').metadata.name = "vdu1" + mock_get_k8s_objs_from_yaml.return_value = k8s_objs_statefulset + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='StatefulSet', name='vdu1')]) + instantiate_vnf_req = objects.InstantiateVnfRequest( + additional_params={'lcm-kubernetes-def-files': ["dummy.yaml"]}) + self.kubernetes.post_vnf_instantiation( + context=self.context, + vnf_instance=self.vnf_instance, + vim_connection_info=fakes.fake_vim_connection_info(), + instantiate_vnf_req=instantiate_vnf_req) + self.assertEqual(mock_list_namespaced_pod.call_count, 1) + # validate stored VnfcResourceInfo + vnfc_resource_info_after = \ + self.vnf_instance.instantiated_vnf_info.vnfc_resource_info + self.assertEqual(len(vnfc_resource_info_after), 1) + expected_pod = fakes.get_fake_pod_info('StatefulSet', 'vdu1') + self.assertEqual( + vnfc_resource_info_after[0].compute_resource.resource_id, + expected_pod.metadata.name) + self.assertEqual(vnfc_resource_info_after[0].compute_resource. + vim_level_resource_type, 'StatefulSet') + self.assertEqual(vnfc_resource_info_after[0].vdu_id, 'VDU1') + metadata_after = vnfc_resource_info_after[0].metadata + self.assertEqual(jsonutils.loads( + metadata_after.get('StatefulSet')).get('name'), 'vdu1') + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + @mock.patch.object(translate_outputs.Transformer, 'get_k8s_objs_from_yaml') + @mock.patch.object(objects.VnfPackageVnfd, "get_by_id") + @mock.patch('tacker.vnflcm.utils._get_vnfd_dict') + def test_post_vnf_instantiation_with_multiple_pod(self, mock_vnfd_dict, + mock_vnf_package_vnfd_get_by_id, + mock_get_k8s_objs_from_yaml, + mock_list_namespaced_pod): + vnfd_dict = vnflcm_fakes.vnfd_dict_cnf() + node_tpls = vnfd_dict.get('topology_template').get('node_templates') + node_tpls['VDU2'] = copy.deepcopy(node_tpls['VDU1']) + node_tpls['VDU2']['properties']['name'] = "vdu2" + mock_vnfd_dict.return_value = vnfd_dict + mock_vnf_package_vnfd_get_by_id.return_value = \ + vnflcm_fakes.return_vnf_package_vnfd() + # use multiple pod with default namespace + k8s_objs_pod = fakes.fake_k8s_objs_pod() + k8s_objs_pod[0].get('object').metadata.name = "vdu1" + k8s_objs_pod[0].get('object').metadata.namespace = None + k8s_objs_pod[0]['namespace'] = None + k8s_objs_pod.append(copy.deepcopy(k8s_objs_pod[0])) + k8s_objs_pod[1].get('object').metadata.name = "vdu2" + k8s_objs_pod[1].get('object').metadata.namespace = None + k8s_objs_pod[1]['namespace'] = None + mock_get_k8s_objs_from_yaml.return_value = k8s_objs_pod + mock_list_namespaced_pod.return_value = \ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='Pod', name='vdu1'), + fakes.get_fake_pod_info(kind='Pod', name='vdu2')]) + instantiate_vnf_req = objects.InstantiateVnfRequest( + additional_params={'lcm-kubernetes-def-files': ["dummy.yaml"]}) + self.kubernetes.post_vnf_instantiation( + context=self.context, + vnf_instance=self.vnf_instance, + vim_connection_info=fakes.fake_vim_connection_info(), + instantiate_vnf_req=instantiate_vnf_req) + self.assertEqual(mock_list_namespaced_pod.call_count, 1) + # validate stored VnfcResourceInfo + vnfc_resource_info_after = \ + self.vnf_instance.instantiated_vnf_info.vnfc_resource_info + self.assertEqual(len(vnfc_resource_info_after), 2) + self.assertEqual( + vnfc_resource_info_after[0].compute_resource.resource_id, 'vdu1') + self.assertEqual(vnfc_resource_info_after[0].compute_resource. + vim_level_resource_type, 'Pod') + self.assertEqual(vnfc_resource_info_after[0].vdu_id, 'VDU1') + metadata_after = vnfc_resource_info_after[0].metadata + self.assertEqual( + jsonutils.loads(metadata_after.get('Pod')).get('name'), 'vdu1') + self.assertEqual( + vnfc_resource_info_after[1].compute_resource.resource_id, 'vdu2') + self.assertEqual(vnfc_resource_info_after[1].compute_resource. + vim_level_resource_type, 'Pod') + self.assertEqual(vnfc_resource_info_after[1].vdu_id, 'VDU2') + metadata_after = vnfc_resource_info_after[1].metadata + self.assertEqual( + jsonutils.loads(metadata_after.get('Pod')).get('name'), 'vdu2') + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + @mock.patch.object(translate_outputs.Transformer, 'get_k8s_objs_from_yaml') + @mock.patch.object(objects.VnfPackageVnfd, "get_by_id") + @mock.patch('tacker.vnflcm.utils._get_vnfd_dict') + def test_post_vnf_instantiation_without_pod_creation(self, mock_vnfd_dict, + mock_vnf_package_vnfd_get_by_id, + mock_get_k8s_objs_from_yaml, + mock_list_namespaced_pod): + vnfd_dict = vnflcm_fakes.vnfd_dict_cnf() + node_tpls = vnfd_dict.get('topology_template').get('node_templates') + # delete definition of VDU and policies from vnfd + del node_tpls['VDU1'] + del vnfd_dict.get('topology_template')['policies'] + mock_vnfd_dict.return_value = vnfd_dict + mock_vnf_package_vnfd_get_by_id.return_value = \ + vnflcm_fakes.return_vnf_package_vnfd() + # use service object + mock_get_k8s_objs_from_yaml.return_value = \ + fakes.fake_k8s_objs_api_service() + instantiate_vnf_req = objects.InstantiateVnfRequest( + additional_params={'lcm-kubernetes-def-files': ["dummy.yaml"]}) + self.kubernetes.post_vnf_instantiation( + context=self.context, + vnf_instance=self.vnf_instance, + vim_connection_info=fakes.fake_vim_connection_info(), + instantiate_vnf_req=instantiate_vnf_req) + self.assertEqual(mock_list_namespaced_pod.call_count, 0) + # validate stored VnfcResourceInfo + vnfc_resource_info_after = \ + self.vnf_instance.instantiated_vnf_info.vnfc_resource_info + self.assertEqual(len(vnfc_resource_info_after), 0) + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + @mock.patch.object(translate_outputs.Transformer, 'get_k8s_objs_from_yaml') + @mock.patch.object(objects.VnfPackageVnfd, "get_by_id") + @mock.patch('tacker.vnflcm.utils._get_vnfd_dict') + def test_post_vnf_instantiation_without_naming_rule_match(self, + mock_vnfd_dict, + mock_vnf_package_vnfd_get_by_id, + mock_get_k8s_objs_from_yaml, + mock_list_namespaced_pod): + mock_vnfd_dict.return_value = vnflcm_fakes.vnfd_dict_cnf() + mock_vnf_package_vnfd_get_by_id.return_value = \ + vnflcm_fakes.return_vnf_package_vnfd() + k8s_objs_pod = fakes.fake_k8s_objs_pod() + k8s_objs_pod[0].get('object').metadata.name = "vdu1" + mock_get_k8s_objs_from_yaml.return_value = k8s_objs_pod + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='Pod', name='vdu2')]) + instantiate_vnf_req = objects.InstantiateVnfRequest( + additional_params={'lcm-kubernetes-def-files': ["dummy.yaml"]}) + self.kubernetes.post_vnf_instantiation( + context=self.context, + vnf_instance=self.vnf_instance, + vim_connection_info=fakes.fake_vim_connection_info(), + instantiate_vnf_req=instantiate_vnf_req) + self.assertEqual(mock_list_namespaced_pod.call_count, 1) + # validate stored VnfcResourceInfo + vnfc_resource_info_after = \ + self.vnf_instance.instantiated_vnf_info.vnfc_resource_info + self.assertEqual(len(vnfc_resource_info_after), 0) + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + @mock.patch.object(translate_outputs.Transformer, 'get_k8s_objs_from_yaml') + @mock.patch.object(objects.VnfPackageVnfd, "get_by_id") + @mock.patch('tacker.vnflcm.utils._get_vnfd_dict') + def test_post_vnf_instantiation_api_fail(self, + mock_vnfd_dict, + mock_vnf_package_vnfd_get_by_id, + mock_get_k8s_objs_from_yaml, + mock_list_namespaced_pod): + mock_vnfd_dict.return_value = vnflcm_fakes.vnfd_dict_cnf() + mock_vnf_package_vnfd_get_by_id.return_value = \ + vnflcm_fakes.return_vnf_package_vnfd() + k8s_objs_pod = fakes.fake_k8s_objs_pod() + k8s_objs_pod[0].get('object').metadata.name = "vdu1" + mock_get_k8s_objs_from_yaml.return_value = k8s_objs_pod + mock_list_namespaced_pod.side_effect = \ + client.rest.ApiException(status=500) + instantiate_vnf_req = objects.InstantiateVnfRequest( + additional_params={'lcm-kubernetes-def-files': ["dummy.yaml"]}) + self.assertRaises(client.rest.ApiException, + self.kubernetes.post_vnf_instantiation, + self.context, self.vnf_instance, + fakes.fake_vim_connection_info(), + instantiate_vnf_req) + @mock.patch.object(client.AppsV1Api, 'patch_namespaced_deployment_scale') @mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment_scale') @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") @@ -2348,3 +2694,894 @@ class TestKubernetes(base.TestCase): vnf_info=vnf_info, region_name=None) mock_list_namespaced_pod.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + @mock.patch.object(objects.VnfPackageVnfd, "get_by_id") + @mock.patch('tacker.vnflcm.utils._get_vnfd_dict') + def test_scale_resource_update_scale_out(self, mock_vnfd_dict, + mock_vnf_package_vnfd_get_by_id, + mock_vnf_resource_list, + mock_list_namespaced_pod): + mock_vnfd_dict.return_value = vnflcm_fakes.vnfd_dict_cnf() + mock_vnf_package_vnfd_get_by_id.return_value = \ + vnflcm_fakes.return_vnf_package_vnfd() + vnf_resource_list = [] + vnf_resource_list.append(models.VnfResource()) + vnf_resource_list[0].vnf_instance_id = self.vnf_instance.id + vnf_resource_list[0].resource_name = "default,vdu0" + vnf_resource_list[0].resource_type = "apps/v1,Deployment" + vnf_resource_list.append(copy.deepcopy(vnf_resource_list[0])) + vnf_resource_list[1].resource_name = "default,vdu1" + mock_vnf_resource_list.return_value = vnf_resource_list + vnfc_resource_info = [] + vnfc_resource_info.append( + fakes.fake_vnfc_resource_info(rsc_name="vdu1")) + vnfc_resource_info.append( + fakes.fake_vnfc_resource_info(vdu_id="VDU2", rsc_name="vdu2")) + self.vnf_instance.instantiated_vnf_info.vnfc_resource_info = \ + vnfc_resource_info + fake_pod_list = [] + fake_pod_list.append( + fakes.get_fake_pod_info(kind='Deployment', name='vdu1')) + fake_pod_list.append( + fakes.get_fake_pod_info(kind='Deployment', name='vdu1', + pod_name="vdu1-1234567890-dummy")) + fake_pod_list.append( + fakes.get_fake_pod_info(kind='Deployment', name='vdu2', + pod_name="vdu2-abcdef0123-fakes")) + vnfc_resource_info = [] + vnfc_resource_info.append( + fakes.fake_vnfc_resource_info(rsc_name="vdu1")) + vnfc_resource_info.append( + fakes.fake_vnfc_resource_info(vdu_id="VDU2", rsc_name="vdu2")) + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=fake_pod_list) + scale_vnf_req = objects.ScaleVnfRequest(type='SCALE_OUT', + aspect_id='vdu1_aspect', + number_of_steps=1) + self.kubernetes.scale_resource_update( + context=self.context, + vnf_instance=self.vnf_instance, + scale_vnf_request=scale_vnf_req, + vim_connection_info=fakes.fake_vim_connection_info()) + self.assertEqual(mock_list_namespaced_pod.call_count, 1) + # validate added VnfcResourceInfo + vnfc_resource_info_after = \ + self.vnf_instance.instantiated_vnf_info.vnfc_resource_info + self.assertEqual(len(vnfc_resource_info_after), 3) + expected_pod = fake_pod_list[1] + self.assertEqual( + vnfc_resource_info_after[2].compute_resource.resource_id, + expected_pod.metadata.name) + self.assertEqual(vnfc_resource_info_after[2].compute_resource. + vim_level_resource_type, 'Deployment') + self.assertEqual(vnfc_resource_info_after[2].vdu_id, 'VDU1') + metadata_after = vnfc_resource_info_after[2].metadata + self.assertEqual(jsonutils.loads( + metadata_after.get('Deployment')).get('name'), 'vdu1') + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + @mock.patch.object(objects.VnfPackageVnfd, "get_by_id") + @mock.patch('tacker.vnflcm.utils._get_vnfd_dict') + def test_scale_resource_update_scale_in(self, mock_vnfd_dict, + mock_vnf_package_vnfd_get_by_id, + mock_vnf_resource_list, + mock_list_namespaced_pod): + mock_vnfd_dict.return_value = vnflcm_fakes.vnfd_dict_cnf() + mock_vnf_package_vnfd_get_by_id.return_value = \ + vnflcm_fakes.return_vnf_package_vnfd() + vnf_resource = models.VnfResource() + vnf_resource.vnf_instance_id = self.vnf_instance.id + vnf_resource.resource_name = "default,vdu1" + vnf_resource.resource_type = "apps/v1,Deployment" + mock_vnf_resource_list.return_value = [vnf_resource] + vnfc_resource_info = [] + vnfc_resource_info.append( + fakes.fake_vnfc_resource_info(rsc_name="vdu1")) + vnfc_resource_info.append( + fakes.fake_vnfc_resource_info(rsc_name="vdu1", + pod_name="vdu1-1234567890-dummy")) + self.vnf_instance.instantiated_vnf_info.vnfc_resource_info = \ + vnfc_resource_info + fake_pod_list = [] + fake_pod_list.append( + fakes.get_fake_pod_info(kind='Deployment', name='vdu1')) + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=fake_pod_list) + scale_vnf_req = objects.ScaleVnfRequest(type='SCALE_IN', + aspect_id='vdu1_aspect', + number_of_steps=1) + self.kubernetes.scale_resource_update( + context=self.context, + vnf_instance=self.vnf_instance, + scale_vnf_request=scale_vnf_req, + vim_connection_info=fakes.fake_vim_connection_info()) + self.assertEqual(mock_list_namespaced_pod.call_count, 1) + # validate VnfcResourceInfo + vnfc_resource_info_after = \ + self.vnf_instance.instantiated_vnf_info.vnfc_resource_info + self.assertEqual(len(vnfc_resource_info_after), 1) + expected_pod = fake_pod_list[0] + self.assertEqual( + vnfc_resource_info_after[0].compute_resource.resource_id, + expected_pod.metadata.name) + self.assertEqual(vnfc_resource_info_after[0].compute_resource. + vim_level_resource_type, 'Deployment') + self.assertEqual(vnfc_resource_info_after[0].vdu_id, 'VDU1') + metadata_after = vnfc_resource_info_after[0].metadata + self.assertEqual(jsonutils.loads( + metadata_after.get('Deployment')).get('name'), 'vdu1') + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + @mock.patch.object(objects.VnfResourceList, "get_by_vnf_instance_id") + @mock.patch.object(objects.VnfPackageVnfd, "get_by_id") + @mock.patch('tacker.vnflcm.utils._get_vnfd_dict') + def test_scale_resource_update_api_fail(self, mock_vnfd_dict, + mock_vnf_package_vnfd_get_by_id, + mock_vnf_resource_list, + mock_list_namespaced_pod): + mock_vnfd_dict.return_value = vnflcm_fakes.vnfd_dict_cnf() + mock_vnf_package_vnfd_get_by_id.return_value = \ + vnflcm_fakes.return_vnf_package_vnfd() + vnf_resource = models.VnfResource() + vnf_resource.vnf_instance_id = self.vnf_instance.id + vnf_resource.resource_name = "default,vdu1" + vnf_resource.resource_type = "apps/v1,Deployment" + mock_vnf_resource_list.return_value = [vnf_resource] + vnfc_resource_info = [] + vnfc_resource_info.append( + fakes.fake_vnfc_resource_info(rsc_name="vdu1")) + self.vnf_instance.instantiated_vnf_info.vnfc_resource_info =\ + vnfc_resource_info + mock_list_namespaced_pod.side_effect = \ + client.rest.ApiException(status=500) + scale_vnf_req = objects.ScaleVnfRequest(type='SCALE_OUT', + aspect_id='vdu1_aspect', + number_of_steps=1) + self.assertRaises(client.rest.ApiException, + self.kubernetes.scale_resource_update, + self.context, self.vnf_instance, + scale_vnf_req, + fakes.fake_vim_connection_info()) + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_heal_vnf_api_fail( + self, mock_list_namespaced_pod): + mock_list_namespaced_pod.side_effect =\ + client.rest.ApiException(status=500) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = fakes.fake_vnfc_resource_info(rsc_kind='Pod') + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.assertRaises(client.rest.ApiException, + self.kubernetes.heal_vnf, + context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + + @mock.patch.object(client.CoreV1Api, 'delete_namespaced_pod') + @mock.patch.object(client.CoreV1Api, 'read_namespaced_pod') + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_heal_vnf_pod_stack_retries_false( + self, mock_list_namespaced_pod, + mock_read_namespaced_pod, + mock_delete_namespaced_pod): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='Pod')]) + mock_read_namespaced_pod.return_value = fakes.fake_pod() + mock_delete_namespaced_pod.return_value = client.V1Status() + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = fakes.fake_vnfc_resource_info(rsc_kind='Pod') + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.assertRaises(vnfm.CNFHealFailed, + self.kubernetes.heal_vnf, + context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + + @mock.patch.object(client.CoreV1Api, 'create_namespaced_pod') + @mock.patch.object(client.CoreV1Api, 'delete_namespaced_pod') + @mock.patch.object(client.CoreV1Api, 'read_namespaced_pod') + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_heal_vnf_pod( + self, mock_list_namespaced_pod, + mock_read_namespaced_pod, + mock_delete_namespaced_pod, + mock_create_namespaced_pod): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='Pod')]) + mock_read_namespaced_pod.side_effect = [ + fakes.fake_pod(), + client.rest.ApiException(status=404)] + mock_delete_namespaced_pod.return_value = client.V1Status() + mock_create_namespaced_pod.return_value = client.V1Status() + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = fakes.fake_vnfc_resource_info(rsc_kind='Pod') + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.kubernetes.heal_vnf(context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + mock_delete_namespaced_pod.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'create_namespaced_pod') + @mock.patch.object(client.CoreV1Api, 'delete_namespaced_pod') + @mock.patch.object(client.CoreV1Api, 'read_namespaced_pod') + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_heal_vnf_pod_api_fail_code_500( + self, mock_list_namespaced_pod, + mock_read_namespaced_pod, + mock_delete_namespaced_pod, + mock_create_namespaced_pod): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='Pod')]) + mock_read_namespaced_pod.side_effect = [ + fakes.fake_pod(), + client.rest.ApiException(status=500)] + mock_delete_namespaced_pod.return_value = client.V1Status() + mock_create_namespaced_pod.return_value = client.V1Status() + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = fakes.fake_vnfc_resource_info(rsc_kind='Pod') + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.assertRaises(vnfm.CNFHealFailed, + self.kubernetes.heal_vnf, + context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + mock_delete_namespaced_pod.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'delete_namespaced_pod') + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_heal_vnf_deployment( + self, mock_list_namespaced_pod, + mock_delete_namespaced_pod): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='Deployment')]) + mock_delete_namespaced_pod.return_value = client.V1Status() + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = fakes.fake_vnfc_resource_info( + rsc_kind='Deployment') + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.kubernetes.heal_vnf(context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + mock_delete_namespaced_pod.assert_called_once() + self.assertEqual(len(vnf_instance_obj.instantiated_vnf_info. + vnfc_resource_info), 1) + + @mock.patch.object(client.CoreV1Api, 'delete_namespaced_pod') + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_heal_vnf_deployment_target_pod_not_found( + self, mock_list_namespaced_pod, + mock_delete_namespaced_pod): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='Deployment')]) + mock_delete_namespaced_pod.side_effect =\ + client.rest.ApiException(status=404) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = fakes.fake_vnfc_resource_info( + rsc_kind='Deployment') + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.kubernetes.heal_vnf(context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + mock_delete_namespaced_pod.assert_called_once() + self.assertEqual("POD_NOT_FOUND", + vnf_instance_obj.instantiated_vnf_info. + vnfc_resource_info[0].compute_resource. + resource_id) + + @mock.patch.object(client.CoreV1Api, 'delete_namespaced_pod') + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_heal_vnf_deployment_api_failed_code_500( + self, mock_list_namespaced_pod, + mock_delete_namespaced_pod): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='Deployment')]) + mock_delete_namespaced_pod.side_effect =\ + client.rest.ApiException(status=500) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = fakes.fake_vnfc_resource_info( + rsc_kind='Deployment') + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.assertRaises(vnfm.CNFHealFailed, + self.kubernetes.heal_vnf, + context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_heal_vnf_with_not_supported_kind( + self, mock_list_namespaced_pod): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='ReplicaSet')]) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = fakes.fake_vnfc_resource_info( + rsc_kind='ReplicaSet') + # change Kubernetes resource kind to Job (for illegal route) + vnfc_resource_info_obj.compute_resource.vim_level_resource_type = "Job" + vnfc_resource_info_obj.metadata["Job"] =\ + vnfc_resource_info_obj.metadata.pop("ReplicaSet") + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.assertRaises(vnfm.CNFHealFailed, + self.kubernetes.heal_vnf, + context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + + @mock.patch.object(client.CoreV1Api, 'delete_namespaced_pod') + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_heal_vnf_deployment_update_vnfc_before_heal( + self, mock_list_namespaced_pod, + mock_delete_namespaced_pod): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='Deployment', name='fake_name', + pod_name="fake_name-1234567890-strp1"), + fakes.get_fake_pod_info(kind='Deployment', name='fake_name', + pod_name="fake_name-1234567890-added"), + fakes.get_fake_pod_info(kind='Deployment', name='fake_name', + pod_name="fake_name-1234567890-strp3")] + ) + mock_delete_namespaced_pod.return_value = client.V1Status() + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info = [] + vnfc_resource_info.append(fakes.fake_vnfc_resource_info( + vdu_id='VDU1', rsc_kind='Deployment', rsc_name='fake_name', + pod_name="fake_name-1234567890-strp1", namespace="brank")) + vnfc_resource_info.append(fakes.fake_vnfc_resource_info( + vdu_id='VDU1', rsc_kind='Deployment', rsc_name='fake_name', + pod_name="fake_name-1234567890-strp2", namespace="brank")) + vnfc_resource_info.append(fakes.fake_vnfc_resource_info( + vdu_id='VDU1', rsc_kind='Deployment', rsc_name='fake_name', + pod_name="fake_name-1234567890-strp3", namespace="brank")) + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + vnfc_resource_info + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[ + vnfc_resource_info[0].id, vnfc_resource_info[2].id]) + before_vnfc = \ + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info + self.kubernetes.heal_vnf(context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + after_vnfc = \ + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info + self.assertEqual(before_vnfc[0], after_vnfc[0]) + self.assertEqual( + "fake_name-1234567890-added", + after_vnfc[1].compute_resource.resource_id) + self.assertEqual(before_vnfc[2], after_vnfc[2]) + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_heal_vnf_wait_api_fail( + self, mock_list_namespaced_pod): + mock_list_namespaced_pod.side_effect =\ + client.rest.ApiException(status=500) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = fakes.fake_vnfc_resource_info(rsc_kind='Pod') + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.assertRaises(client.rest.ApiException, + self.kubernetes.heal_vnf_wait, + context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_heal_vnf_wait_pod(self, mock_list_namespaced_pod): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='Pod')]) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = fakes.fake_vnfc_resource_info(rsc_kind='Pod') + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.kubernetes.heal_vnf_wait(context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + mock_list_namespaced_pod.assert_called_once() + + @mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment_scale') + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_heal_vnf_wait_deployment(self, mock_list_namespaced_pod, + mock_read_namespaced_deployment_scale): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='Deployment')]) + mock_read_namespaced_deployment_scale.return_value = \ + client.V1Scale(spec=client.V1ScaleSpec(replicas=1), + status=client.V1ScaleStatus(replicas=1)) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = fakes.fake_vnfc_resource_info( + rsc_kind='Deployment') + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.kubernetes.heal_vnf_wait(context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + mock_list_namespaced_pod.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_heal_vnf_wait_daemon_set(self, mock_list_namespaced_pod): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='DaemonSet')]) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = fakes.fake_vnfc_resource_info( + rsc_kind='DaemonSet') + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.kubernetes.heal_vnf_wait(context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + mock_list_namespaced_pod.assert_called_once() + + @mock.patch.object(client.AppsV1Api, 'read_namespaced_stateful_set_scale') + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_heal_vnf_wait_stateful_set(self, mock_list_namespaced_pod, + mock_read_namespaced_stateful_set_scale): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='StatefulSet')]) + mock_read_namespaced_stateful_set_scale.return_value = \ + client.V1Scale(spec=client.V1ScaleSpec(replicas=1), + status=client.V1ScaleStatus(replicas=1)) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = fakes.fake_vnfc_resource_info( + rsc_kind='StatefulSet') + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.kubernetes.heal_vnf_wait(context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + mock_list_namespaced_pod.assert_called_once() + + @mock.patch.object(client.AppsV1Api, 'read_namespaced_replica_set_scale') + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_heal_vnf_wait_replica_set(self, mock_list_namespaced_pod, + mock_read_namespaced_replica_set_scale): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='ReplicaSet')]) + mock_read_namespaced_replica_set_scale.return_value = \ + client.V1Scale(spec=client.V1ScaleSpec(replicas=1), + status=client.V1ScaleStatus(replicas=1)) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = fakes.fake_vnfc_resource_info( + rsc_kind='ReplicaSet') + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.kubernetes.heal_vnf_wait(context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + mock_list_namespaced_pod.assert_called_once() + + @mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment_scale') + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_heal_vnf_wait_deployment_same_namespace( + self, mock_list_namespaced_pod, + mock_read_namespaced_deployment_scale): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='Deployment')]) + mock_read_namespaced_deployment_scale.return_value = \ + client.V1Scale(spec=client.V1ScaleSpec(replicas=1), + status=client.V1ScaleStatus(replicas=1)) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_list = [ + fakes.fake_vnfc_resource_info(rsc_kind='Deployment'), + fakes.fake_vnfc_resource_info( + vdu_id='VDU2', rsc_kind='Deployment', rsc_name='fake_name')] + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + vnfc_resource_info_list + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[ + vnfc_resource_info_list[0].id, + vnfc_resource_info_list[1].id]) + self.kubernetes.heal_vnf_wait(context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + mock_list_namespaced_pod.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_heal_vnf_wait_deployment_no_need_waiting(self, + mock_list_namespaced_pod): + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = fakes.fake_vnfc_resource_info( + rsc_kind='Deployment', pod_name="POD_NOT_FOUND") + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.kubernetes.heal_vnf_wait(context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + self.assertEqual(mock_list_namespaced_pod.call_count, 0) + + @mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment_scale') + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_heal_vnf_wait_retry_over(self, mock_list_namespaced_pod, + mock_read_namespaced_deployment_scale): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[fakes.get_fake_pod_info( + kind='Deployment', pod_status='Pending')]) + mock_read_namespaced_deployment_scale.return_value = \ + client.V1Scale(spec=client.V1ScaleSpec(replicas=1), + status=client.V1ScaleStatus(replicas=1)) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = \ + fakes.fake_vnfc_resource_info(rsc_kind='Deployment') + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.assertRaises(vnfm.CNFHealWaitFailed, + self.kubernetes.heal_vnf_wait, + context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + + @mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment_scale') + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_heal_vnf_wait_unknown_pod_status(self, mock_list_namespaced_pod, + mock_read_namespaced_deployment_scale): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[fakes.get_fake_pod_info( + kind='Deployment', pod_status='Unknown')]) + mock_read_namespaced_deployment_scale.return_value = \ + client.V1Scale(spec=client.V1ScaleSpec(replicas=1), + status=client.V1ScaleStatus(replicas=1)) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = \ + fakes.fake_vnfc_resource_info(rsc_kind='Deployment') + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.assertRaises(vnfm.CNFHealWaitFailed, + self.kubernetes.heal_vnf_wait, + context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + + @mock.patch.object(client.AppsV1Api, 'read_namespaced_deployment_scale') + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_heal_vnf_wait_retry_over_unmatch_pod_num(self, + mock_list_namespaced_pod, mock_read_namespaced_deployment_scale): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='Deployment'), + fakes.get_fake_pod_info(kind='Deployment')]) + mock_read_namespaced_deployment_scale.return_value = \ + client.V1Scale(spec=client.V1ScaleSpec(replicas=1), + status=client.V1ScaleStatus(replicas=1)) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = \ + fakes.fake_vnfc_resource_info(rsc_kind='Deployment') + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.assertRaises(vnfm.CNFHealWaitFailed, + self.kubernetes.heal_vnf_wait, + context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_post_heal_vnf_api_fail( + self, mock_list_namespaced_pod): + mock_list_namespaced_pod.side_effect =\ + client.rest.ApiException(status=500) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = fakes.fake_vnfc_resource_info( + rsc_kind='Deployment') + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.assertRaises(client.rest.ApiException, + self.kubernetes.post_heal_vnf, + context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_post_heal_vnf_deployment_exist_added_pod_names( + self, mock_list_namespaced_pod): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='Deployment', + name='fake_name', pod_name="fake_name-1234567890-actp1"), + fakes.get_fake_pod_info(kind='Deployment', + name='fake_name', pod_name="fake_name-1234567890-actp2"), + fakes.get_fake_pod_info(kind='Deployment', + name='fake_name', pod_name="fake_name-1234567890-actp3")]) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_list = [ + fakes.fake_vnfc_resource_info(rsc_kind='Deployment', + rsc_name='fake_name', pod_name="fake_name-1234567890-strp1"), + fakes.fake_vnfc_resource_info(rsc_kind='Deployment', + rsc_name='fake_name', pod_name="fake_name-1234567890-strp2"), + fakes.fake_vnfc_resource_info(rsc_kind='Deployment', + rsc_name='fake_name', pod_name="POD_NOT_FOUND")] + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info =\ + vnfc_resource_info_list + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_list[0].id, + vnfc_resource_info_list[1].id, + vnfc_resource_info_list[2].id]) + self.kubernetes.post_heal_vnf(context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + mock_list_namespaced_pod.assert_called_once() + # oldest creation Pod is set to POD_NOT_FOUND entry + self.assertEqual( + vnfc_resource_info_list[2].compute_resource.resource_id, + mock_list_namespaced_pod.return_value.items[0].metadata.name) + # newest creation Pod is set to healed entry + self.assertEqual( + vnfc_resource_info_list[1].compute_resource.resource_id, + mock_list_namespaced_pod.return_value.items[2].metadata.name) + self.assertEqual( + vnfc_resource_info_list[0].compute_resource.resource_id, + mock_list_namespaced_pod.return_value.items[1].metadata.name) + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_post_heal_vnf_deployment_with_pod_not_found_entry( + self, mock_list_namespaced_pod): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='Deployment', + name='fake_name', pod_name="fake_name-1234567890-abcdf")]) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_list = [ + fakes.fake_vnfc_resource_info(rsc_kind='Deployment', + rsc_name='fake_name', pod_name="POD_NOT_FOUND")] + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info =\ + vnfc_resource_info_list + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_list[0].id]) + self.kubernetes.post_heal_vnf(context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + mock_list_namespaced_pod.assert_called_once() + self.assertEqual( + vnfc_resource_info_list[0].compute_resource.resource_id, + mock_list_namespaced_pod.return_value.items[0].metadata.name) + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_post_heal_vnf_daemon_set_exist_added_pod_names( + self, mock_list_namespaced_pod): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='DaemonSet', + name='fake_name', pod_name="fake_name-12346")]) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = fakes.fake_vnfc_resource_info( + rsc_kind='DaemonSet', + rsc_name='fake_name') + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.kubernetes.post_heal_vnf(context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + mock_list_namespaced_pod.assert_called_once() + self.assertEqual( + len(vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info), 1) + after_vnfc = vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info + self.assertEqual( + mock_list_namespaced_pod.return_value.items[0].metadata.name, + after_vnfc[0].compute_resource.resource_id) + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_post_heal_vnf_deployment_no_exist_added_pod_names( + self, mock_list_namespaced_pod): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='Deployment', + name='fake_name')]) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = fakes.fake_vnfc_resource_info( + rsc_kind='Deployment', + rsc_name='fake_name') + vnf_instance_obj_before =\ + jsonutils.loads(vnfc_resource_info_obj.metadata. + get("Pod")).get("name") + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.kubernetes.post_heal_vnf(context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + mock_list_namespaced_pod.assert_called_once() + self.assertEqual(len(vnf_instance_obj.instantiated_vnf_info. + vnfc_resource_info), 1) + self.assertEqual(vnf_instance_obj_before, + vnf_instance_obj.instantiated_vnf_info. + vnfc_resource_info[0].compute_resource. + resource_id) + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_post_heal_vnf_daemon_set(self, mock_list_namespaced_pod): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='DaemonSet')]) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = fakes.fake_vnfc_resource_info( + rsc_kind='DaemonSet') + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info = \ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.kubernetes.post_heal_vnf(context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + mock_list_namespaced_pod.assert_called_once() + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_post_heal_vnf_pod(self, mock_list_namespaced_pod): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='Pod')]) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = fakes.fake_vnfc_resource_info( + rsc_kind='Pod') + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info =\ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.kubernetes.post_heal_vnf(context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + self.assertEqual(mock_list_namespaced_pod.call_count, 0) + + @mock.patch.object(client.CoreV1Api, 'list_namespaced_pod') + def test_post_heal_vnf_stateful_set(self, mock_list_namespaced_pod): + mock_list_namespaced_pod.return_value =\ + client.V1PodList(items=[ + fakes.get_fake_pod_info(kind='StatefulSet')]) + vnf_instance_obj = vnflcm_fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED) + vnfc_resource_info_obj = fakes.fake_vnfc_resource_info( + rsc_kind='StatefulSet') + vnf_instance_obj.instantiated_vnf_info.vnfc_resource_info =\ + [vnfc_resource_info_obj] + vim_connection_object = fakes.fake_vim_connection_info() + heal_request_data_obj = objects.heal_vnf_request.HealVnfRequest( + vnfc_instance_id=[vnfc_resource_info_obj.id]) + self.kubernetes.post_heal_vnf(context=self.context, + vnf_instance=vnf_instance_obj, + vim_connection_info=vim_connection_object, + heal_vnf_request=heal_request_data_obj) + self.assertEqual(mock_list_namespaced_pod.call_count, 0) diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/test_openstack_driver.py b/tacker/tests/unit/vnfm/infra_drivers/openstack/test_openstack_driver.py index b99ce18ff..d1838cff9 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/openstack/test_openstack_driver.py +++ b/tacker/tests/unit/vnfm/infra_drivers/openstack/test_openstack_driver.py @@ -1382,10 +1382,11 @@ class TestOpenStack(base.FixturedTestCase): vnf_link_ports[0].resource_handle.vim_level_resource_type, 'physical_resource_id': uuidsentinel.cp1_resource_id}] + inst_req_info = fd_utils.get_instantiate_vnf_request() self._responses_in_stack_list(inst_vnf_info.instance_id, resources=resources) self.openstack.post_vnf_instantiation( - self.context, vnf_instance, vim_connection_info) + self.context, vnf_instance, vim_connection_info, inst_req_info) self.assertEqual(vnf_instance.instantiated_vnf_info. vnfc_resource_info[0].metadata['stack_id'], inst_vnf_info.instance_id) @@ -1453,8 +1454,9 @@ class TestOpenStack(base.FixturedTestCase): 'physical_resource_id': uuidsentinel.v_l_resource_info_id}] self._responses_in_stack_list(inst_vnf_info.instance_id, resources=resources) + inst_req_info = fd_utils.get_instantiate_vnf_request() self.openstack.post_vnf_instantiation( - self.context, vnf_instance, vim_connection_info) + self.context, vnf_instance, vim_connection_info, inst_req_info) self.assertEqual(vnf_instance.instantiated_vnf_info. vnfc_resource_info[0].metadata['stack_id'], inst_vnf_info.instance_id) @@ -1623,7 +1625,7 @@ class TestOpenStack(base.FixturedTestCase): "UPDATE_COMPLETE"]) stack = self.openstack.heal_vnf_wait( - self.context, vnf_instance, vim_connection_info) + self.context, vnf_instance, vim_connection_info, None) self.assertEqual('UPDATE_COMPLETE', stack.stack_status) def test_heal_vnf_wait_fail(self): @@ -1640,7 +1642,7 @@ class TestOpenStack(base.FixturedTestCase): self.openstack.STACK_RETRIES = 1 result = self.assertRaises(vnfm.VNFHealWaitFailed, self.openstack.heal_vnf_wait, self.context, vnf_instance, - vim_connection_info) + vim_connection_info, None) expected_msg = ("VNF Heal action is not completed within 10 seconds " "on stack %s") % inst_vnf_info.instance_id diff --git a/tacker/vnflcm/vnflcm_driver.py b/tacker/vnflcm/vnflcm_driver.py index be9267c1f..3e267d242 100644 --- a/tacker/vnflcm/vnflcm_driver.py +++ b/tacker/vnflcm/vnflcm_driver.py @@ -611,7 +611,8 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver): self._vnf_manager.invoke( vim_connection_info.vim_type, 'heal_vnf_wait', context=context, vnf_instance=vnf_instance, - vim_connection_info=vim_connection_info) + vim_connection_info=vim_connection_info, + heal_vnf_request=heal_vnf_request) except Exception as exp: LOG.error("Failed to update vnf %(id)s resources for instance " "%(instance)s. Error: %(error)s", @@ -675,7 +676,8 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver): self._vnf_manager.invoke( vim_connection_info.vim_type, 'post_vnf_instantiation', context=context, vnf_instance=vnf_instance, - vim_connection_info=vim_connection_info) + vim_connection_info=vim_connection_info, + instantiate_vnf_req=instantiate_vnf_request) except Exception as exc: with excutils.save_and_reraise_exception() as exc_ctxt: diff --git a/tacker/vnfm/infra_drivers/abstract_driver.py b/tacker/vnfm/infra_drivers/abstract_driver.py index 33ec86369..34cbd098d 100644 --- a/tacker/vnfm/infra_drivers/abstract_driver.py +++ b/tacker/vnfm/infra_drivers/abstract_driver.py @@ -97,7 +97,7 @@ class VnfAbstractDriver(extensions.PluginInterface, metaclass=abc.ABCMeta): @abc.abstractmethod def post_vnf_instantiation(self, context, vnf_instance, - vim_connection_info): + vim_connection_info, instantiate_vnf_req): pass @abc.abstractmethod @@ -114,7 +114,8 @@ class VnfAbstractDriver(extensions.PluginInterface, metaclass=abc.ABCMeta): pass @abc.abstractmethod - def heal_vnf_wait(self, context, vnf_instance, vim_connection_info): + def heal_vnf_wait(self, context, vnf_instance, vim_connection_info, + heal_vnf_request): """Check vnf is healed successfully""" pass diff --git a/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_outputs.py b/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_outputs.py index af6e80b02..8b4a699df 100644 --- a/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_outputs.py +++ b/tacker/vnfm/infra_drivers/kubernetes/k8s/translate_outputs.py @@ -486,6 +486,12 @@ class Transformer(object): return sorted_k8s_objs + def get_object_meta(self, content): + must_param = {} + v1_object_meta = client.V1ObjectMeta() + self._init_k8s_obj(v1_object_meta, content, must_param) + return v1_object_meta + # config_labels configures label def config_labels(self, deployment_name=None, scaling_name=None): label = dict() diff --git a/tacker/vnfm/infra_drivers/kubernetes/kubernetes_driver.py b/tacker/vnfm/infra_drivers/kubernetes/kubernetes_driver.py index a63654e1c..fadd766b6 100644 --- a/tacker/vnfm/infra_drivers/kubernetes/kubernetes_driver.py +++ b/tacker/vnfm/infra_drivers/kubernetes/kubernetes_driver.py @@ -23,6 +23,7 @@ from kubernetes import client from oslo_config import cfg from oslo_log import log as logging from oslo_serialization import jsonutils +from oslo_utils import uuidutils from toscaparser import tosca_template from tacker._i18n import _ @@ -46,6 +47,7 @@ from urllib.parse import urlparse CNF_TARGET_FILES_KEY = 'lcm-kubernetes-def-files' LOG = logging.getLogger(__name__) CONF = cfg.CONF +VNFC_POD_NOT_FOUND = "POD_NOT_FOUND" OPTS = [ cfg.IntOpt('stack_retries', @@ -1247,12 +1249,16 @@ class Kubernetes(abstract_driver.VnfAbstractDriver, def _is_match_pod_naming_rule(self, rsc_kind, rsc_name, pod_name): match_result = None - if rsc_kind == 'Deployment': + if rsc_kind == 'Pod': + # Expected example: name + if rsc_name == pod_name: + match_result = True + elif rsc_kind == 'Deployment': # Expected example: name-012789abef-019az match_result = re.match( rsc_name + '-([0-9a-f]{10})-([0-9a-z]{5})+$', pod_name) - elif rsc_kind == 'ReplicaSet': + elif rsc_kind == 'ReplicaSet' or rsc_kind == 'DaemonSet': # Expected example: name-019az match_result = re.match( rsc_name + '-([0-9a-z]{5})+$', @@ -1523,19 +1529,501 @@ class Kubernetes(abstract_driver.VnfAbstractDriver, return resource_info_str def post_vnf_instantiation(self, context, vnf_instance, - vim_connection_info): - pass + vim_connection_info, instantiate_vnf_req): + """Initially store VnfcResourceInfo after instantiation + + After instantiation, this function gets pods information from + Kubernetes VIM and store information such as pod name and resource kind + and metadata, and vdu id. + """ + auth_attr = vim_connection_info.access_info + auth_cred, file_descriptor = self._get_auth_creds(auth_attr) + try: + # get Kubernetes object files + target_k8s_files = self._get_target_k8s_files(instantiate_vnf_req) + vnf_package_path = vnflcm_utils._get_vnf_package_path( + context, vnf_instance.vnfd_id) + # initialize Transformer + transformer = translate_outputs.Transformer( + None, None, None, None) + # get Kubernetes object + k8s_objs = transformer.get_k8s_objs_from_yaml( + target_k8s_files, vnf_package_path) + # get TOSCA node templates + vnfd_dict = vnflcm_utils._get_vnfd_dict( + context, vnf_instance.vnfd_id, + vnf_instance.instantiated_vnf_info.flavour_id) + tosca = tosca_template.ToscaTemplate( + parsed_params={}, a_file=False, yaml_dict_tpl=vnfd_dict) + tosca_node_tpls = tosca.topology_template.nodetemplates + # get vdu_ids dict {vdu_name(as pod_name): vdu_id} + vdu_ids = {} + for node_tpl in tosca_node_tpls: + for node_name, node_value in node_tpl.templates.items(): + if node_value.get('type') == "tosca.nodes.nfv.Vdu.Compute": + vdu_id = node_name + vdu_name = node_value.get('properties').get('name') + vdu_ids[vdu_name] = vdu_id + # initialize Kubernetes APIs + core_v1_api_client = self.kubernetes.get_core_v1_api_client( + auth=auth_cred) + target_kinds = ["Pod", "Deployment", "DaemonSet", "StatefulSet", + "ReplicaSet"] + pod_list_dict = {} + vnfc_resource_list = [] + for k8s_obj in k8s_objs: + rsc_kind = k8s_obj.get('object').kind + if rsc_kind not in target_kinds: + # Skip if rsc_kind is not target kind + continue + rsc_name = k8s_obj.get('object').metadata.name + namespace = k8s_obj.get('object').metadata.namespace + if not namespace: + namespace = "default" + # get V1PodList by namespace + if namespace in pod_list_dict.keys(): + pod_list = pod_list_dict.get(namespace) + else: + pod_list = core_v1_api_client.list_namespaced_pod( + namespace=namespace) + pod_list_dict[namespace] = pod_list + # get initially store VnfcResourceInfo after instantiation + for pod in pod_list.items: + pod_name = pod.metadata.name + match_result = self._is_match_pod_naming_rule( + rsc_kind, rsc_name, pod_name) + if match_result: + # get metadata + metadata = {} + metadata[rsc_kind] = jsonutils.dumps( + k8s_obj.get('object').metadata.to_dict()) + if rsc_kind != 'Pod': + metadata['Pod'] = jsonutils.dumps( + k8s_obj.get('object').spec.template.metadata. + to_dict()) + # generate VnfcResourceInfo + vnfc_resource = objects.VnfcResourceInfo() + vnfc_resource.id = uuidutils.generate_uuid() + vnfc_resource.vdu_id = vdu_ids.get(rsc_name) + resource = objects.ResourceHandle() + resource.resource_id = pod_name + resource.vim_level_resource_type = rsc_kind + vnfc_resource.compute_resource = resource + vnfc_resource.metadata = metadata + vnfc_resource_list.append(vnfc_resource) + + if vnfc_resource_list: + inst_vnf_info = vnf_instance.instantiated_vnf_info + inst_vnf_info.vnfc_resource_info = vnfc_resource_list + except Exception as e: + LOG.error('Update vnfc resource info got an error due to %s', e) + raise + finally: + self.clean_authenticate_vim(auth_cred, file_descriptor) + + def _get_vnfc_rscs_with_vnfc_id(self, inst_vnf_info, heal_vnf_request): + if not heal_vnf_request.vnfc_instance_id: + # include all vnfc resources + return [resource for resource in inst_vnf_info.vnfc_resource_info] + + vnfc_resources = [] + for vnfc_resource in inst_vnf_info.vnfc_resource_info: + if vnfc_resource.id in heal_vnf_request.vnfc_instance_id: + vnfc_resources.append(vnfc_resource) + return vnfc_resources + + def _get_added_pod_names(self, core_v1_api_client, inst_vnf_info, vdu_id, + vnfc_resource, pod_list_dict): + compute_resource = vnfc_resource.compute_resource + rsc_kind = compute_resource.vim_level_resource_type + rsc_metadata = jsonutils.loads( + vnfc_resource.metadata.get(rsc_kind)) + namespace = rsc_metadata.get('namespace') + if not namespace: + namespace = "default" + rsc_name = rsc_metadata.get('name') + # Get pod list from kubernetes + if namespace in pod_list_dict.keys(): + pod_list = pod_list_dict.get(namespace) + else: + pod_list = core_v1_api_client.list_namespaced_pod( + namespace=namespace) + pod_list_dict[namespace] = pod_list + # Sort by newest creation_timestamp + sorted_pod_list = sorted(pod_list.items, key=lambda x: + x.metadata.creation_timestamp, reverse=True) + # Get the associated pod name that runs with the actual kubernetes + actual_pod_names = list() + for pod in sorted_pod_list: + match_result = self._is_match_pod_naming_rule( + rsc_kind, rsc_name, pod.metadata.name) + if match_result: + actual_pod_names.append(pod.metadata.name) + # Get the associated pod name stored in vnfcResourceInfo + stored_pod_names = [] + for vnfc_rsc_info in inst_vnf_info.vnfc_resource_info: + if vnfc_rsc_info.vdu_id == vnfc_resource.vdu_id: + stored_pod_names.append( + vnfc_rsc_info.compute_resource.resource_id) + # Get the added pod name that does not exist in vnfcResourceInfo + added_pod_names = [ + actl_pn for actl_pn in actual_pod_names + if actl_pn not in stored_pod_names + ] + return actual_pod_names, added_pod_names def heal_vnf(self, context, vnf_instance, vim_connection_info, heal_vnf_request): - raise NotImplementedError() + """Heal function + + This function heals vnfc instances (mapped as Pod), + and update vnfcResourceInfo which are not the target of healing + before healing operation. + + """ + # initialize Kubernetes APIs + auth_attr = vim_connection_info.access_info + auth_cred, file_descriptor = self._get_auth_creds(auth_attr) + inst_vnf_info = vnf_instance.instantiated_vnf_info + try: + core_v1_api_client = self.kubernetes.get_core_v1_api_client( + auth=auth_cred) + # get vnfc_resource_info list for healing + vnfc_resources = self._get_vnfc_rscs_with_vnfc_id( + inst_vnf_info=inst_vnf_info, + heal_vnf_request=heal_vnf_request + ) + # Updates resource_id in vnfc_resource_info which are not the + # target of healing before heal operation because they may have + # been re-created by kubelet of Kubernetes automatically and their + # resource_id (as Pod name) have been already changed + updated_vdu_ids = [] + pod_list_dict = {} + for vnfc_resource in vnfc_resources: + vdu_id = vnfc_resource.vdu_id + if vdu_id in updated_vdu_ids: + # For updated vdu_id, go to the next Loop + continue + actual_pod_names, added_pod_names = self._get_added_pod_names( + core_v1_api_client, inst_vnf_info, vdu_id, vnfc_resource, + pod_list_dict) + + if added_pod_names: + heal_target_ids = heal_vnf_request.vnfc_instance_id + for vnfc_rsc in inst_vnf_info.vnfc_resource_info: + stored_pod_name = vnfc_rsc.compute_resource.resource_id + # Updated vnfcResourceInfo of the same vdu_id other + # than heal target + if (vnfc_rsc.id not in heal_target_ids) and\ + (vdu_id == vnfc_rsc.vdu_id) and\ + (stored_pod_name not in actual_pod_names): + pod_name = added_pod_names.pop() + vnfc_rsc.compute_resource.resource_id = pod_name + LOG.warning("Update resource_id before healing," + " vnfc_resource_info.id:%(vnfc_id)s," + " pod_name:%(pod_name)s", + {'vnfc_id': vnfc_rsc.id, + 'pod_name': pod_name}) + if not added_pod_names: + break + updated_vdu_ids.append(vdu_id) + + for vnfc_resource in vnfc_resources: + body = client.V1DeleteOptions(propagation_policy='Foreground') + compute_resource = vnfc_resource.compute_resource + rsc_kind = compute_resource.vim_level_resource_type + pod_name = compute_resource.resource_id + rsc_metadata = jsonutils.loads( + vnfc_resource.metadata.get(rsc_kind)) + namespace = rsc_metadata.get('namespace') + if not namespace: + namespace = "default" + + if rsc_kind == 'Pod': + rsc_name = rsc_metadata.get('name') + # Get pod information for re-creation before deletion + pod_info = core_v1_api_client.read_namespaced_pod( + namespace=namespace, + name=rsc_name + ) + # Delete Pod + core_v1_api_client.delete_namespaced_pod( + namespace=namespace, + name=pod_name, + body=body + ) + # Check and wait that the Pod is deleted + stack_retries = self.STACK_RETRIES + for cnt in range(self.STACK_RETRIES): + try: + core_v1_api_client.read_namespaced_pod( + namespace=namespace, + name=pod_name + ) + except Exception as e: + if e.status == 404: + break + else: + error_reason = _("Failed the request to read a" + " Pod information. namespace: {namespace}," + " pod_name: {name}, kind: {kind}, Reason: " + "{exception}").format( + namespace=namespace, name=pod_name, + kind=rsc_kind, exception=e) + raise vnfm.CNFHealFailed(reason=error_reason) + stack_retries = stack_retries - 1 + time.sleep(self.STACK_RETRY_WAIT) + + # Number of retries exceeded retry count + if stack_retries == 0: + error_reason = _("Resource healing is not completed" + "within {wait} seconds").format(wait=( + self.STACK_RETRIES * self.STACK_RETRY_WAIT)) + LOG.error("CNF Healing failed: %(reason)s", + {'reason': error_reason}) + raise vnfm.CNFHealFailed(reason=error_reason) + + # Recreate pod using retained pod_info + transformer = translate_outputs.Transformer( + None, None, None, None) + metadata = transformer.get_object_meta(rsc_metadata) + body = client.V1Pod(metadata=metadata, spec=pod_info.spec) + core_v1_api_client.create_namespaced_pod( + namespace=namespace, + body=body + ) + elif (rsc_kind in ['Deployment', 'DaemonSet', 'StatefulSet', + 'ReplicaSet']): + try: + # Delete Pod (Pod is automatically re-created) + core_v1_api_client.delete_namespaced_pod( + namespace=namespace, + name=pod_name, + body=body + ) + except Exception as e: + if e.status == 404: + # If when the pod to be deleted does not exist, + # change resource_id to "POD_NOT_FOUND" + compute_resource = vnfc_resource.compute_resource + compute_resource.resource_id = VNFC_POD_NOT_FOUND + LOG.warning("Target pod to delete is not found," + " vnfc_resource_info.id:%(vnfc_id)s," + " pod_name:%(pod_name)s", + {'vnfc_id': vnfc_resource.id, + 'pod_name': pod_name}) + else: + error_reason = _("Failed the request to delete a " + "Pod. namespace: {namespace}, pod_name: {name}" + ", kind: {kind}, Reason: {exception}").format( + namespace=namespace, name=pod_name, + kind=rsc_kind, exception=e) + raise vnfm.CNFHealFailed(reason=error_reason) + else: + error_reason = _( + "{vnfc_instance_id} is a kind of Kubertnetes" + " resource that is not covered").format( + vnfc_instance_id=vnfc_resource.id) + LOG.error("CNF Heal failed: %(reason)s", + {'reason': error_reason}) + raise vnfm.CNFHealFailed(reason=error_reason) + except Exception as e: + LOG.error('Healing CNF got an error due to %s', e) + raise + finally: + self.clean_authenticate_vim(auth_cred, file_descriptor) + + def heal_vnf_wait(self, context, vnf_instance, + vim_connection_info, heal_vnf_request): + """heal wait function - def heal_vnf_wait(self, context, vnf_instance, vim_connection_info): - raise NotImplementedError() + Wait until all status from Pod objects is RUNNING. + """ + # initialize Kubernetes APIs + auth_attr = vim_connection_info.access_info + auth_cred, file_descriptor = self._get_auth_creds(auth_attr) + try: + core_v1_api_client = self.kubernetes.get_core_v1_api_client( + auth=auth_cred) + app_v1_api_client = self.kubernetes.get_app_v1_api_client( + auth=auth_cred) + vnfc_resources = self._get_vnfc_rscs_with_vnfc_id( + inst_vnf_info=vnf_instance.instantiated_vnf_info, + heal_vnf_request=heal_vnf_request) + # Exclude entries where pods were not found when heal + vnfc_resources = [rsc for rsc in vnfc_resources + if rsc.compute_resource. + resource_id != VNFC_POD_NOT_FOUND] + + if not vnfc_resources: + # If heal is not running, wait is no need + return + + # Get kubernetes resource information from target vnfcResourceInfo + k8s_resources = list() + for vnfc_resource in vnfc_resources: + info = {} + compute_resource = vnfc_resource.compute_resource + info['kind'] = compute_resource.vim_level_resource_type + rsc_metadata = jsonutils.loads( + vnfc_resource.metadata.get(info['kind'])) + info['name'] = rsc_metadata.get('name') + info['namespace'] = rsc_metadata.get('namespace') + k8s_resources.append(info) + # exclude duplicate entries + k8s_resources = list(map(jsonutils.loads, + set(map(jsonutils.dumps, k8s_resources)))) + # get replicas of scalable resources for checking number of pod + scalable_kinds = ["Deployment", "ReplicaSet", "StatefulSet"] + for k8s_resource in k8s_resources: + if k8s_resource.get('kind') in scalable_kinds: + scale_info = self._call_read_scale_api( + app_v1_api_client=app_v1_api_client, + namespace=k8s_resource.get('namespace'), + name=k8s_resource.get('name'), + kind=k8s_resource.get('kind')) + k8s_resource['replicas'] = scale_info.spec.replicas + stack_retries = self.STACK_RETRIES + status = 'Pending' + while status == 'Pending' and stack_retries > 0: + pods_information = [] + pod_list_dict = {} + is_unmatch_pods_num = False + # Get related pod information and check status + for k8s_resource in k8s_resources: + namespace = k8s_resource.get('namespace') + if namespace in pod_list_dict.keys(): + pod_list = pod_list_dict.get(namespace) + else: + pod_list = core_v1_api_client.list_namespaced_pod( + namespace=k8s_resource.get('namespace')) + pod_list_dict[namespace] = pod_list + tmp_pods_info = list() + for pod in pod_list.items: + match_result = self._is_match_pod_naming_rule( + k8s_resource.get('kind'), + k8s_resource.get('name'), + pod.metadata.name) + if match_result: + tmp_pods_info.append(pod) + # NOTE(ueha): The status of pod being deleted is retrieved + # as "Running", which cause incorrect information to be + # stored in vnfcResouceInfo. Therefore, for the scalable + # kinds, by comparing the actual number of pods with the + # replicas, it can wait until the pod deletion is complete + # and store correct information to vnfcResourceInfo. + if k8s_resource.get('kind') in scalable_kinds and \ + k8s_resource.get('replicas') != len(tmp_pods_info): + LOG.warning("Unmatch number of pod. (kind: %(kind)s," + " name: %(name)s, replicas: %(replicas)s," + " actual_pod_num: %(actual_pod_num)s)", { + 'kind': k8s_resource.get('kind'), + 'name': k8s_resource.get('name'), + 'replicas': str(k8s_resource.get('replicas')), + 'actual_pod_num': str(len(tmp_pods_info))}) + is_unmatch_pods_num = True + pods_information.extend(tmp_pods_info) + status = self._get_pod_status(pods_information) + + if status == 'Unknown': + error_reason = _("Pod status is found Unknown") + LOG.warning("CNF Healing failed: %(reason)s", + {'reason': error_reason}) + raise vnfm.CNFHealWaitFailed(reason=error_reason) + elif status == 'Pending' or is_unmatch_pods_num: + time.sleep(self.STACK_RETRY_WAIT) + stack_retries = stack_retries - 1 + status = 'Pending' + + if stack_retries == 0 and status != 'Running': + error_reason = _("Resource healing is not completed within" + " {wait} seconds").format( + wait=(self.STACK_RETRIES * + self.STACK_RETRY_WAIT)) + LOG.error("CNF Healing failed: %(reason)s", + {'reason': error_reason}) + raise vnfm.CNFHealWaitFailed(reason=error_reason) + except Exception as e: + LOG.error('Healing wait CNF got an error due to %s', e) + raise + finally: + self.clean_authenticate_vim(auth_cred, file_descriptor) def post_heal_vnf(self, context, vnf_instance, vim_connection_info, heal_vnf_request): - raise NotImplementedError() + """Update VnfcResourceInfo after healing""" + # initialize Kubernetes APIs + auth_attr = vim_connection_info.access_info + auth_cred, file_descriptor = self._get_auth_creds(auth_attr) + inst_vnf_info = vnf_instance.instantiated_vnf_info + try: + core_v1_api_client = self.kubernetes.get_core_v1_api_client( + auth=auth_cred) + vnfc_resources = self._get_vnfc_rscs_with_vnfc_id( + inst_vnf_info=inst_vnf_info, + heal_vnf_request=heal_vnf_request + ) + # initialize + updated_vdu_ids = [] + pod_list_dict = {} + for vnfc_resource in vnfc_resources: + vdu_id = vnfc_resource.vdu_id + if vdu_id in updated_vdu_ids: + # For updated vdu_id, go to the next Loop + continue + compute_resource = vnfc_resource.compute_resource + rsc_kind = compute_resource.vim_level_resource_type + pod_name = compute_resource.resource_id + + if rsc_kind == 'Pod' or rsc_kind == 'StatefulSet': + # No update required as the pod name does not change + continue + + # Update vnfcResourceInfo when other rsc_kind + # (Deployment, DaemonSet, ReplicaSet) + actual_pod_names, added_pod_names = self._get_added_pod_names( + core_v1_api_client, inst_vnf_info, vdu_id, vnfc_resource, + pod_list_dict) + + updated_vnfc_ids = [] + # Update entries that pod was not found when heal_vnf method + if added_pod_names: + for vnfc_rsc in vnfc_resources: + rsc_id = vnfc_rsc.compute_resource.resource_id + if vdu_id == vnfc_rsc.vdu_id and \ + rsc_id == VNFC_POD_NOT_FOUND: + pod_name = added_pod_names.pop() + vnfc_rsc.compute_resource.resource_id = pod_name + LOG.warning("Update resource_id of the" + " entry where the pod was not found," + " vnfc_resource_info.id:%(vnfc_id)s," + " new podname:%(pod_name)s", + {'vnfc_id': vnfc_rsc.id, + 'pod_name': pod_name}) + updated_vnfc_ids.append(vnfc_rsc.id) + if not added_pod_names: + break + # Update entries that was healed successful + if added_pod_names: + for vnfc_rsc_id in heal_vnf_request.vnfc_instance_id: + if vnfc_rsc_id in updated_vnfc_ids: + # If the entry has already been updated, + # go to the next loop + continue + for vnfc_rsc in vnfc_resources: + if vdu_id == vnfc_rsc.vdu_id and \ + vnfc_rsc_id == vnfc_rsc.id: + pod_name = added_pod_names.pop() + compute_resource = vnfc_rsc.compute_resource + compute_resource.resource_id = pod_name + if not added_pod_names: + break + updated_vdu_ids.append(vdu_id) + except Exception as e: + LOG.error('Post healing CNF got an error due to %s', e) + raise + finally: + self.clean_authenticate_vim(auth_cred, file_descriptor) def get_scale_ids(self, plugin, @@ -1563,7 +2051,90 @@ class Kubernetes(abstract_driver.VnfAbstractDriver, def scale_resource_update(self, context, vnf_instance, scale_vnf_request, vim_connection_info): - pass + """Update VnfcResourceInfo after scaling""" + auth_attr = vim_connection_info.access_info + auth_cred, file_descriptor = self._get_auth_creds(auth_attr) + inst_vnf_info = vnf_instance.instantiated_vnf_info + try: + # initialize Kubernetes APIs + core_v1_api_client = self.kubernetes.get_core_v1_api_client( + auth=auth_cred) + vnf_resources = objects.VnfResourceList.get_by_vnf_instance_id( + context, vnf_instance.id) + # get scale target informations + vnfd_dict = vnflcm_utils._get_vnfd_dict(context, + vnf_instance.vnfd_id, + inst_vnf_info.flavour_id) + tosca = tosca_template.ToscaTemplate(parsed_params={}, + a_file=False, + yaml_dict_tpl=vnfd_dict) + extract_policy_infos = vnflcm_utils.get_extract_policy_infos(tosca) + vdu_defs = vnflcm_utils.get_target_vdu_def_dict( + extract_policy_infos=extract_policy_infos, + aspect_id=scale_vnf_request.aspect_id, + tosca=tosca) + is_found = False + for vnf_resource in vnf_resources: + # For CNF operations, Kubernetes resource information is + # stored in vnfc_resource as follows: + # - resource_name : "namespace,name" + # - resource_type : "api_version,kind" + rsc_name = vnf_resource.resource_name.split(',')[1] + for vdu_id, vdu_def in vdu_defs.items(): + vdu_properties = vdu_def.get('properties') + if rsc_name == vdu_properties.get('name'): + is_found = True + namespace = vnf_resource.resource_name.split(',')[0] + rsc_kind = vnf_resource.resource_type.split(',')[1] + target_vdu_id = vdu_id + break + if is_found: + break + # extract stored Pod names by vdu_id + stored_pod_list = [] + metadata = None + for vnfc_resource in inst_vnf_info.vnfc_resource_info: + if vnfc_resource.vdu_id == target_vdu_id: + stored_pod_list.append( + vnfc_resource.compute_resource.resource_id) + if not metadata: + # get metadata for new VnfcResourceInfo entry + metadata = vnfc_resource.metadata + # get actual Pod name list + pod_list = core_v1_api_client.list_namespaced_pod( + namespace=namespace) + actual_pod_list = [] + for pod in pod_list.items: + match_result = self._is_match_pod_naming_rule( + rsc_kind, rsc_name, pod.metadata.name) + if match_result: + actual_pod_list.append(pod.metadata.name) + # Remove the reduced pods from VnfcResourceInfo + del_index = [] + for index, vnfc in enumerate(inst_vnf_info.vnfc_resource_info): + if vnfc.compute_resource.resource_id not in actual_pod_list \ + and vnfc.vdu_id == target_vdu_id: + del_index.append(index) + for ind in reversed(del_index): + inst_vnf_info.vnfc_resource_info.pop(ind) + # Add the increased pods to VnfcResourceInfo + for actual_pod_name in actual_pod_list: + if actual_pod_name not in stored_pod_list: + add_vnfc_resource = objects.VnfcResourceInfo() + add_vnfc_resource.id = uuidutils.generate_uuid() + add_vnfc_resource.vdu_id = target_vdu_id + resource = objects.ResourceHandle() + resource.resource_id = actual_pod_name + resource.vim_level_resource_type = rsc_kind + add_vnfc_resource.compute_resource = resource + add_vnfc_resource.metadata = metadata + inst_vnf_info.vnfc_resource_info.append( + add_vnfc_resource) + except Exception as e: + LOG.error('Update vnfc resource info got an error due to %s', e) + raise + finally: + self.clean_authenticate_vim(auth_cred, file_descriptor) def scale_in_reverse(self, context, diff --git a/tacker/vnfm/infra_drivers/noop.py b/tacker/vnfm/infra_drivers/noop.py index 7874f26bc..6a19c2ef1 100644 --- a/tacker/vnfm/infra_drivers/noop.py +++ b/tacker/vnfm/infra_drivers/noop.py @@ -91,7 +91,7 @@ class VnfNoop(abstract_driver.VnfAbstractDriver): pass def post_vnf_instantiation(self, context, vnf_instance, - vim_connection_info): + vim_connection_info, instantiate_vnf_req): pass def heal_vnf(self, context, vnf_instance, vim_connection_info, diff --git a/tacker/vnfm/infra_drivers/openstack/openstack.py b/tacker/vnfm/infra_drivers/openstack/openstack.py index ef88a74d2..0c743284d 100644 --- a/tacker/vnfm/infra_drivers/openstack/openstack.py +++ b/tacker/vnfm/infra_drivers/openstack/openstack.py @@ -907,7 +907,7 @@ class OpenStack(abstract_driver.VnfAbstractDriver, @log.log def post_vnf_instantiation(self, context, vnf_instance, - vim_connection_info): + vim_connection_info, instantiate_vnf_req): inst_vnf_info = vnf_instance.instantiated_vnf_info access_info = vim_connection_info.access_info @@ -1221,7 +1221,8 @@ class OpenStack(abstract_driver.VnfAbstractDriver, heatclient.update(stack_id=inst_vnf_info.instance_id, existing=True) @log.log - def heal_vnf_wait(self, context, vnf_instance, vim_connection_info): + def heal_vnf_wait(self, context, vnf_instance, vim_connection_info, + heal_vnf_request): """Check vnf is healed successfully""" access_info = vim_connection_info.access_info