diff --git a/tacker/tests/etc/samples/vnf_vnfd_dict_scale.yaml b/tacker/tests/etc/samples/vnf_vnfd_dict_scale.yaml new file mode 100644 index 000000000..f108b7f46 --- /dev/null +++ b/tacker/tests/etc/samples/vnf_vnfd_dict_scale.yaml @@ -0,0 +1,403 @@ +tosca_definitions_version: tosca_simple_yaml_1_2 + +description: Simple deployment flavour for Sample VNF + +imports: + - etsi_nfv_sol001_common_types.yaml + - etsi_nfv_sol001_vnfd_types.yaml + - helloworld3_types.yaml + +topology_template: + inputs: + descriptor_id: + type: string + descriptor_version: + type: string + provider: + type: string + product_name: + type: string + software_version: + type: string + vnfm_info: + type: list + entry_schema: + type: string + flavour_id: + type: string + flavour_description: + type: string + + substitution_mappings: + node_type: company.provider.VNF + properties: + flavour_id: simple + requirements: + virtual_link_external1_1: [ VDU1_CP1, virtual_link ] + virtual_link_external1_2: [ VDU2_CP1, virtual_link ] + virtual_link_external2_1: [ VDU1_CP2, virtual_link ] + virtual_link_external2_2: [ VDU2_CP2, virtual_link ] + + node_templates: + VNF: + type: company.provider.VNF + properties: + flavour_description: A simple flavour + interfaces: + Vnflcm: + instantiate: [] + instantiate_start: [] + instantiate_end: [] + terminate: [] + terminate_start: [] + terminate_end: [] + modify_information: [] + modify_information_start: [] + modify_information_end: [] + + VDU1: + type: tosca.nodes.nfv.Vdu.Compute + properties: + name: VDU1 + description: VDU1 compute node + vdu_profile: + min_number_of_instances: 1 + max_number_of_instances: 3 + capabilities: + virtual_compute: + properties: + requested_additional_capabilities: + properties: + requested_additional_capability_name: m1.tiny + support_mandatory: true + target_performance_parameters: + entry_schema: test + virtual_memory: + virtual_mem_size: 512 MB + virtual_cpu: + num_virtual_cpu: 1 + virtual_local_storage: + - size_of_storage: 3 GB + requirements: + - virtual_storage: VirtualStorage + + VDU2: + type: tosca.nodes.nfv.Vdu.Compute + properties: + name: VDU2 + description: VDU2 compute node + vdu_profile: + min_number_of_instances: 2 + max_number_of_instances: 2 + sw_image_data: + name: cirros-0.5.2-x86_64-disk + version: '0.5.2' + checksum: + algorithm: sha-256 + hash: 932fcae93574e242dc3d772d5235061747dfe537668443a1f0567d893614b464 + container_format: bare + disk_format: qcow2 + min_disk: 0 GB + min_ram: 256 MB + size: 12 GB + capabilities: + virtual_compute: + properties: + requested_additional_capabilities: + properties: + requested_additional_capability_name: m1.tiny + support_mandatory: true + target_performance_parameters: + entry_schema: test + virtual_memory: + virtual_mem_size: 512 MB + virtual_cpu: + num_virtual_cpu: 1 + virtual_local_storage: + - size_of_storage: 3 GB + + VirtualStorage: + type: tosca.nodes.nfv.Vdu.VirtualBlockStorage + properties: + virtual_block_storage_data: + size_of_storage: 1 GB + rdma_enabled: true + sw_image_data: + name: cirros-0.5.2-x86_64-disk + version: '0.5.2' + checksum: + algorithm: sha-256 + hash: 932fcae93574e242dc3d772d5235061747dfe537668443a1f0567d893614b464 + container_format: bare + disk_format: qcow2 + min_disk: 0 GB + min_ram: 256 MB + size: 12 GB + + VDU1_CP1: + type: tosca.nodes.nfv.VduCp + properties: + layer_protocols: [ ipv4 ] + order: 0 + requirements: + - virtual_binding: VDU1 + + VDU1_CP2: + type: tosca.nodes.nfv.VduCp + properties: + layer_protocols: [ ipv4 ] + order: 1 + requirements: + - virtual_binding: VDU1 + + VDU1_CP3: + type: tosca.nodes.nfv.VduCp + properties: + layer_protocols: [ ipv4 ] + order: 2 + requirements: + - virtual_binding: VDU1 + - virtual_link: internalVL1 + + VDU1_CP4: + type: tosca.nodes.nfv.VduCp + properties: + layer_protocols: [ ipv4 ] + order: 3 + requirements: + - virtual_binding: VDU1 + - virtual_link: internalVL2 + + VDU1_CP5: + type: tosca.nodes.nfv.VduCp + properties: + layer_protocols: [ ipv4 ] + order: 4 + requirements: + - virtual_binding: VDU1 + - virtual_link: internalVL3 + + VDU2_CP1: + type: tosca.nodes.nfv.VduCp + properties: + layer_protocols: [ ipv4 ] + order: 0 + requirements: + - virtual_binding: VDU2 + + VDU2_CP2: + type: tosca.nodes.nfv.VduCp + properties: + layer_protocols: [ ipv4 ] + order: 1 + requirements: + - virtual_binding: VDU2 + + VDU2_CP3: + type: tosca.nodes.nfv.VduCp + properties: + layer_protocols: [ ipv4 ] + order: 2 + requirements: + - virtual_binding: VDU2 + - virtual_link: internalVL1 + + VDU2_CP4: + type: tosca.nodes.nfv.VduCp + properties: + layer_protocols: [ ipv4 ] + order: 3 + requirements: + - virtual_binding: VDU2 + - virtual_link: internalVL2 + + VDU2_CP5: + type: tosca.nodes.nfv.VduCp + properties: + layer_protocols: [ ipv4 ] + order: 4 + requirements: + - virtual_binding: VDU2 + - virtual_link: internalVL3 + + internalVL1: + type: tosca.nodes.nfv.VnfVirtualLink + properties: + connectivity_type: + layer_protocols: [ ipv4 ] + description: External Managed Virtual link in the VNF + vl_profile: + max_bitrate_requirements: + root: 1048576 + leaf: 1048576 + min_bitrate_requirements: + root: 1048576 + leaf: 1048576 + virtual_link_protocol_data: + - associated_layer_protocol: ipv4 + l3_protocol_data: + ip_version: ipv4 + cidr: 33.33.0.0/24 + + internalVL2: + type: tosca.nodes.nfv.VnfVirtualLink + properties: + connectivity_type: + layer_protocols: [ ipv4 ] + description: External Managed Virtual link in the VNF + vl_profile: + max_bitrate_requirements: + root: 1048576 + leaf: 1048576 + min_bitrate_requirements: + root: 1048576 + leaf: 1048576 + virtual_link_protocol_data: + - associated_layer_protocol: ipv4 + l3_protocol_data: + ip_version: ipv4 + cidr: 33.34.0.0/24 + + internalVL3: + type: tosca.nodes.nfv.VnfVirtualLink + properties: + connectivity_type: + layer_protocols: [ ipv4 ] + description: Internal Virtual link in the VNF + vl_profile: + max_bitrate_requirements: + root: 1048576 + leaf: 1048576 + min_bitrate_requirements: + root: 1048576 + leaf: 1048576 + virtual_link_protocol_data: + - associated_layer_protocol: ipv4 + l3_protocol_data: + ip_version: ipv4 + cidr: 33.35.0.0/24 + + policies: + - scaling_aspects: + type: tosca.policies.nfv.ScalingAspects + properties: + aspects: + VDU1_scale: + name: VDU1_scale + description: VDU1 scaling aspect + max_scale_level: 2 + step_deltas: + - delta_1 + + - VDU1_initial_delta: + type: tosca.policies.nfv.VduInitialDelta + properties: + initial_delta: + number_of_instances: 1 + targets: [ VDU1 ] + + - VDU2_initial_delta: + type: tosca.policies.nfv.VduInitialDelta + properties: + initial_delta: + number_of_instances: 2 + targets: [ VDU2 ] + + - VDU1_scaling_aspect_deltas: + type: tosca.policies.nfv.VduScalingAspectDeltas + properties: + aspect: VDU1_scale + deltas: + delta_1: + number_of_instances: 1 + targets: [ VDU1 ] + + - instantiation_levels: + type: tosca.policies.nfv.InstantiationLevels + properties: + levels: + instantiation_level_1: + description: Smallest size + scale_info: + VDU1_scale: + scale_level: 0 + instantiation_level_2: + description: Largest size + scale_info: + VDU1_scale: + scale_level: 2 + default_level: instantiation_level_1 + + - VDU1_instantiation_levels: + type: tosca.policies.nfv.VduInstantiationLevels + properties: + levels: + instantiation_level_1: + number_of_instances: 1 + instantiation_level_2: + number_of_instances: 3 + targets: [ VDU1 ] + + - VDU2_instantiation_levels: + type: tosca.policies.nfv.VduInstantiationLevels + properties: + levels: + instantiation_level_1: + number_of_instances: 2 + instantiation_level_2: + number_of_instances: 2 + targets: [ VDU2 ] + + - internalVL1_instantiation_levels: + type: tosca.policies.nfv.VirtualLinkInstantiationLevels + properties: + levels: + instantiation_level_1: + bitrate_requirements: + root: 1048576 + leaf: 1048576 + instantiation_level_2: + bitrate_requirements: + root: 1048576 + leaf: 1048576 + targets: [ internalVL1 ] + + - internalVL2_instantiation_levels: + type: tosca.policies.nfv.VirtualLinkInstantiationLevels + properties: + levels: + instantiation_level_1: + bitrate_requirements: + root: 1048576 + leaf: 1048576 + instantiation_level_2: + bitrate_requirements: + root: 1048576 + leaf: 1048576 + targets: [ internalVL2 ] + + - internalVL3_instantiation_levels: + type: tosca.policies.nfv.VirtualLinkInstantiationLevels + properties: + levels: + instantiation_level_1: + bitrate_requirements: + root: 1048576 + leaf: 1048576 + instantiation_level_2: + bitrate_requirements: + root: 1048576 + leaf: 1048576 + targets: [ internalVL3 ] + + - policy_antiaffinity_vdu1: + type: tosca.policies.nfv.AntiAffinityRule + targets: [ VDU1 ] + properties: + scope: zone + + - policy_antiaffinity_vdu2: + type: tosca.policies.nfv.AntiAffinityRule + targets: [ VDU2 ] + properties: + scope: zone diff --git a/tacker/tests/unit/vnflcm/test_vnflcm_driver.py b/tacker/tests/unit/vnflcm/test_vnflcm_driver.py index d019465f1..974ab2ac2 100644 --- a/tacker/tests/unit/vnflcm/test_vnflcm_driver.py +++ b/tacker/tests/unit/vnflcm/test_vnflcm_driver.py @@ -1768,9 +1768,14 @@ class TestVnflcmDriver(db_base.SqlTestCase): vim_type="openstack") scale_name_list = ["fake"] grp_id = "fake_id" + vnf_instance = fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED, + task_state=fields.VnfInstanceTaskState.SCALING) + driver = vnflcm_driver.VnfLcmDriver() - driver.scale(self.context, vnf_info, scale_vnf_request, - vim_connection_info, scale_name_list, grp_id) + driver.scale(self.context, vnf_info, vnf_instance, + scale_vnf_request, vim_connection_info, + scale_name_list, grp_id) @mock.patch.object(TackerManager, 'get_service_plugins', return_value={'VNFM': FakeVNFMPlugin()}) @@ -1796,12 +1801,17 @@ class TestVnflcmDriver(db_base.SqlTestCase): vim_type="openstack") scale_name_list = ["fake"] grp_id = "fake_id" + vnf_instance = fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED, + task_state=fields.VnfInstanceTaskState.SCALING) + with open(vnf_info["attributes"]["heat_template"], "r") as f: mock_safe_load.return_value = yaml.safe_load(f) print(mock_safe_load.return_value) driver = vnflcm_driver.VnfLcmDriver() - driver.scale(self.context, vnf_info, scale_vnf_request, - vim_connection_info, scale_name_list, grp_id) + driver.scale(self.context, vnf_info, vnf_instance, + scale_vnf_request, vim_connection_info, + scale_name_list, grp_id) @mock.patch.object(TackerManager, 'get_service_plugins', return_value={'VNFM': FakeVNFMPlugin()}) @@ -1827,12 +1837,16 @@ class TestVnflcmDriver(db_base.SqlTestCase): vim_type="openstack") scale_name_list = ["fake"] grp_id = "fake_id" + vnf_instance = fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED, + task_state=fields.VnfInstanceTaskState.SCALING) with open(vnf_info["attributes"]["heat_template"], "r") as f: mock_safe_load.return_value = yaml.safe_load(f) print(mock_safe_load.return_value) driver = vnflcm_driver.VnfLcmDriver() - driver.scale(self.context, vnf_info, scale_vnf_request, - vim_connection_info, scale_name_list, grp_id) + driver.scale(self.context, vnf_info, vnf_instance, + scale_vnf_request, + vim_connection_info, scale_name_list, grp_id) @mock.patch.object(TackerManager, 'get_service_plugins', return_value={'VNFM': FakeVNFMPlugin()}) @@ -1858,12 +1872,17 @@ class TestVnflcmDriver(db_base.SqlTestCase): vim_type="openstack") scale_name_list = ["fake"] grp_id = "fake_id" + vnf_instance = fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED, + task_state=fields.VnfInstanceTaskState.SCALING) + with open(vnf_info["attributes"]["heat_template"], "r") as f: mock_safe_load.return_value = yaml.safe_load(f) print(mock_safe_load.return_value) driver = vnflcm_driver.VnfLcmDriver() - driver.scale(self.context, vnf_info, scale_vnf_request, - vim_connection_info, scale_name_list, grp_id) + driver.scale(self.context, vnf_info, vnf_instance, + scale_vnf_request, + vim_connection_info, scale_name_list, grp_id) @mock.patch.object(TackerManager, 'get_service_plugins', return_value={'VNFM': FakeVNFMPlugin()}) @@ -1968,6 +1987,9 @@ class TestVnflcmDriver(db_base.SqlTestCase): 'username': 'test_user', 'project_name': 'test_project'}} self.vim_client.get_vim.return_value = vim_obj + vnf_instance = fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED, + task_state=fields.VnfInstanceTaskState.SCALING) driver.scale_vnf(self.context, vnf_info, vnf_instance, scale_vnf_request) @@ -1993,13 +2015,16 @@ class TestVnflcmDriver(db_base.SqlTestCase): scale_name_list = ["fake"] grp_id = "fake_id" driver = vnflcm_driver.VnfLcmDriver() - + vnf_instance = fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED, + task_state=fields.VnfInstanceTaskState.SCALING) msg = 'Unknown scale type' self.assertRaisesRegex(exceptions.VnfScaleFailed, msg, driver.scale, self.context, vnf_info, + vnf_instance, scale_vnf_request, vim_connection_info, scale_name_list, @@ -2029,11 +2054,15 @@ class TestVnflcmDriver(db_base.SqlTestCase): driver = vnflcm_driver.VnfLcmDriver() msg = 'Unknown vim type' + vnf_instance = fakes.return_vnf_instance( + fields.VnfInstanceState.INSTANTIATED, + task_state=fields.VnfInstanceTaskState.SCALING) self.assertRaisesRegex(exceptions.VnfScaleFailed, msg, driver.scale, self.context, vnf_info, + vnf_instance, scale_vnf_request, vim_connection_info, scale_name_list, diff --git a/tacker/tests/unit/vnfm/infra_drivers/openstack/test_openstack_driver.py b/tacker/tests/unit/vnfm/infra_drivers/openstack/test_openstack_driver.py index 91e506163..f9c01f0f3 100644 --- a/tacker/tests/unit/vnfm/infra_drivers/openstack/test_openstack_driver.py +++ b/tacker/tests/unit/vnfm/infra_drivers/openstack/test_openstack_driver.py @@ -13,14 +13,12 @@ # License for the specific language governing permissions and limitations # under the License. +import ddt import importlib import json import os -import tempfile -from unittest import mock - -import ddt import requests +import tempfile import yaml from heatclient.v1 import resources @@ -43,7 +41,7 @@ from tacker.tests.unit.vnfm.infra_drivers.openstack.fixture_data import \ from tacker.tests import uuidsentinel from tacker.vnfm.infra_drivers.openstack import heat_client as hc from tacker.vnfm.infra_drivers.openstack import openstack - +from unittest import mock vnf_dict = { 'instance_id': 'd1121d3c-368b-4ac2-b39d-835aa3e4ccd8' @@ -2169,7 +2167,7 @@ class TestOpenStack(base.FixturedTestCase): inst_vnf_info = fd_utils.get_vnf_instantiated_info( virtual_storage_resource_info=[v_s_resource_info], vnfc_resource_info=[vnfc_resource_info]) - + inst_vnf_info.additional_params = None vnf_instance = fd_utils.get_vnf_instance_object( instantiated_vnf_info=inst_vnf_info) @@ -2205,6 +2203,7 @@ class TestOpenStack(base.FixturedTestCase): vnf_lcm_op_occs = fd_utils.get_lcm_op_occs_object( error_point=fields.ErrorPoint.PRE_VIM_CONTROL) mock_get_vnflcm_op_occs.return_value = vnf_lcm_op_occs + self.openstack.heal_vnf( self.context, vnf_instance, vim_connection_info, heal_vnf_request) @@ -2240,7 +2239,7 @@ class TestOpenStack(base.FixturedTestCase): instantiated_vnf_info=inst_vnf_info) vim_connection_info = fd_utils.get_vim_connection_info_object() - + inst_vnf_info.additional_params = None heal_vnf_request = objects.HealVnfRequest( vnfc_instance_id=[vnfc_resource_info.id], cause="healing request") diff --git a/tacker/tests/unit/vnfm/lcm_user_data/utils/test_utils.py b/tacker/tests/unit/vnfm/lcm_user_data/utils/test_utils.py index 69e723d42..4742bed24 100644 --- a/tacker/tests/unit/vnfm/lcm_user_data/utils/test_utils.py +++ b/tacker/tests/unit/vnfm/lcm_user_data/utils/test_utils.py @@ -15,8 +15,10 @@ import os import testtools import yaml +from tacker import objects from tacker.objects import instantiate_vnf_req from tacker.tests import constants +from tacker.tests import uuidsentinel from tacker.vnfm.lcm_user_data import utils default_initial_param_dict = { @@ -198,3 +200,24 @@ class TestUtils(testtools.TestCase): inst_req_info.ext_virtual_links = None cpd_vl_dict = utils.create_cpd_vl_dict(base_hot_dict, inst_req_info) self.assertEqual({}, cpd_vl_dict) + + def test_create_desired_capacity_dict(self): + base_hot_dict = {} + vnfd_dict = {} + base_hot_dict['heat_template'] = self._read_file( + "hot_lcm_user_data_with_scale.yaml") + expected_desired_capaity = {'VDU1_scale': 1} + vnfd_dict = self._read_file('vnf_vnfd_dict_scale.yaml') + s_status = {"aspect_id": "VDU1_scale", "scale_level": 0} + scale_status = objects.ScaleInfo(**s_status) + instantiated_vnf_info = { + 'flavour_id': uuidsentinel.flavour_id, + 'vnf_state': 'STARTED', + 'instance_id': '', + "scale_status": [scale_status] + } + inst_req_info = objects.InstantiatedVnfInfo(**instantiated_vnf_info) + + actual_desired_capacity = utils.get_desired_capacity_dict( + base_hot_dict, vnfd_dict, inst_req_info) + self.assertEqual(actual_desired_capacity, expected_desired_capaity) diff --git a/tacker/vnflcm/vnflcm_driver.py b/tacker/vnflcm/vnflcm_driver.py index f1bc382aa..105f0bf4a 100644 --- a/tacker/vnflcm/vnflcm_driver.py +++ b/tacker/vnflcm/vnflcm_driver.py @@ -1180,8 +1180,9 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver): # action_driver LOG.debug("vnf_info['vnfd']['attributes'] %s", vnf_info['vnfd']['attributes']) - self.scale(context, vnf_info, scale_vnf_request, - vim_connection_info, scale_name_list, grp_id) + self.scale(context, vnf_info, vnf_instance, + scale_vnf_request, vim_connection_info, + scale_name_list, grp_id) @log.log @revert_to_error_scale @@ -1241,6 +1242,7 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver): self, context, vnf_info, + vnf_instance, scale_vnf_request, vim_connection_info, scale_name_list, @@ -1307,6 +1309,7 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver): context=context, auth_attr=vim_connection_info.access_info, vnf_info=vnf_info, + vnf_instance=vnf_instance, scale_vnf_request=scale_vnf_request, region_name=vim_connection_info.access_info.get('region_name'), scale_name_list=scale_name_list, @@ -1572,6 +1575,7 @@ class VnfLcmDriver(abstract_driver.VnfInstanceAbstractDriver): context=context, auth_attr=vim_connection_info.access_info, vnf_info=vnf_info, + vnf_instance=vnf_instance, scale_vnf_request=scale_vnf_request, region_name=vim_connection_info.access_info.get( 'region_name'), diff --git a/tacker/vnfm/infra_drivers/openstack/openstack.py b/tacker/vnfm/infra_drivers/openstack/openstack.py index 33148fd2d..9fe22c967 100644 --- a/tacker/vnfm/infra_drivers/openstack/openstack.py +++ b/tacker/vnfm/infra_drivers/openstack/openstack.py @@ -322,7 +322,7 @@ class OpenStack(abstract_driver.VnfAbstractDriver, # Find existing stack name_filter = None if 'stack_name' in vnf['attributes'].keys(): - name_filter = vnf['attributes']['stack_name'] + name_filter = vnf['attributes']['stack_param'] else: name_filter = (vnf['name'].replace(' ', '_') + '_' + vnf['id']) @@ -402,7 +402,6 @@ class OpenStack(abstract_driver.VnfAbstractDriver, vdu_name = rsc.vdu_id if not vdu_name: continue - if scale_group_dict: base_hot_dict, nested_hot_dict, vdu_none_flg = \ self._update_hot_available_scale( @@ -1458,6 +1457,68 @@ class OpenStack(abstract_driver.VnfAbstractDriver, for name, value in nested_hot_dict.items(): files_dict[name] = self._format_base_hot(value) stack_update_param['files'] = files_dict + additional_param = inst_vnf_info.additional_params + if additional_param is not None: + param = yaml.safe_load(vnf_dict['attributes']['stack_name']) + vnf_instance = vnf_instance + lcm_user_data_path = None + lcm_user_data_class = None + lcm_user_data_path = additional_param.get( + 'lcm-operation-user-data') + lcm_user_data_class = additional_param.get( + 'lcm-operation-user-data-class') + LOG.debug('UserData path: %s', lcm_user_data_path) + LOG.debug('UserData class: %s', lcm_user_data_class) + if lcm_user_data_path is not None and \ + lcm_user_data_class is not None: + vnf_pack_path = vnflcm_utils._get_vnf_package_path( + context, vnf_dict['vnfd_id']) + LOG.debug('VNF package path: %s', vnf_pack_path) + lcm_user_data_module = os.path.splitext( + os.path.basename(lcm_user_data_path))[0] + LOG.debug('UserData module name: %s', lcm_user_data_module) + try: + spec = importlib.util.spec_from_file_location( + lcm_user_data_module, + vnf_pack_path + '/' + lcm_user_data_path) + lcm_user_module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(lcm_user_module) + LOG.debug('lcm_user_module: %s', lcm_user_module) + except Exception: + error_reason = _( + "failed to get UserData path based on " + "lcm-operation-user-data from additionalParams.") + raise vnfm.LCMUserDataFailed(reason=error_reason) + try: + lcm_user_class = getattr(lcm_user_module, + lcm_user_data_class) + user_method_heal = getattr(lcm_user_class, + "heal", None) + except Exception: + error_reason = _( + "failed to get UserData class based on " + "lcm-operation-user-data-class from additionalParams.") + raise vnfm.LCMUserDataFailed(reason=error_reason) + LOG.debug('Check target method: {}'.format(user_method_heal)) + if callable(user_method_heal): + LOG.debug('The above is callable') + if base_hot_dict is None: + error_reason = _("failed to get Base HOT.") + raise vnfm.LCMUserDataFailed(reason=error_reason) + if base_hot_dict is None: + nested_hot_dict = {} + param_base_hot_dict = copy.deepcopy(nested_hot_dict) + param_base_hot_dict['heat_template'] = base_hot_dict + vnfd_dict = yaml.safe_load( + vnf_dict['vnfd']['attributes']['vnfd_' + + inst_vnf_info.flavour_id]) + vnfc_resource_info = \ + self._get_vnfc_resources_from_heal_request( + inst_vnf_info, heal_vnf_request) + updated_stack_param = user_method_heal( + param_base_hot_dict, vnfd_dict, heal_vnf_request, + vnf_instance, inst_vnf_info, param, vnfc_resource_info) + stack_param = {**updated_stack_param} if stack_param: stack_update_param['parameters'] = stack_param @@ -1848,7 +1909,7 @@ class OpenStack(abstract_driver.VnfAbstractDriver, @log.log def scale_in_reverse(self, context, plugin, auth_attr, vnf_info, - scale_vnf_request, region_name, + vnf_instance, scale_vnf_request, region_name, scale_name_list, grp_id): heatclient = hc.HeatClient(auth_attr, region_name) if grp_id: @@ -1858,13 +1919,83 @@ class OpenStack(abstract_driver.VnfAbstractDriver, resource_name=name, mark_unhealthy=True, resource_status_reason='Scale') - paramDict = {} - scale_json = vnf_info['attributes']['scale_group'] - scaleGroupDict = jsonutils.loads(scale_json) - for name, value in scaleGroupDict['scaleGroupDict'].items(): - paramDict[name + '_desired_capacity'] = value['default'] - paramDict[scale_vnf_request.aspect_id + '_desired_capacity'] = \ - vnf_info['res_num'] + inst_vnf_info = vnf_instance.instantiated_vnf_info + vnf = vnf_info + stack_param = {} + updated_stack_param = {} + if 'stack_param' in vnf['attributes'].keys(): + param = yaml.safe_load(vnf['attributes']['stack_param']) + additional_param = inst_vnf_info.additional_params + if additional_param is not None: + lcm_user_data_path = None + lcm_user_data_class = None + lcm_user_data_path = additional_param.get( + 'lcm-operation-user-data') + lcm_user_data_class = additional_param.get( + 'lcm-operation-user-data-class') + LOG.debug('UserData path: %s', lcm_user_data_path) + LOG.debug('UserData class: %s', lcm_user_data_class) + if lcm_user_data_path is not None and \ + lcm_user_data_class is not None: + vnf_pack_path = vnflcm_utils._get_vnf_package_path( + context, vnf['vnfd_id']) + LOG.debug('VNF package path: %s', vnf_pack_path) + lcm_user_data_module = os.path.splitext( + os.path.basename(lcm_user_data_path))[0] + LOG.debug('UserData module name: %s', lcm_user_data_module) + try: + spec = importlib.util.spec_from_file_location( + lcm_user_data_module, + vnf_pack_path + '/' + lcm_user_data_path) + lcm_user_module = importlib.util.module_from_spec(spec) + spec.loader.exec_module(lcm_user_module) + LOG.debug('lcm_user_module: %s', lcm_user_module) + except Exception: + error_reason = _( + "failed to get UserData path based on " + "lcm-operation-user-data from additionalParams.") + raise vnfm.LCMUserDataFailed(reason=error_reason) + try: + lcm_user_class = getattr(lcm_user_module, + lcm_user_data_class) + user_method_scale = getattr(lcm_user_class, + "scale", None) + except Exception: + error_reason = _( + "failed to get UserData class based on " + "lcm-operation-user-data-class " + "from additionalParams.") + raise vnfm.LCMUserDataFailed(reason=error_reason) + + if callable(user_method_scale): + base_hot_dict, nested_hot_dict = \ + vnflcm_utils._get_base_nest_hot_dict( + context, inst_vnf_info.flavour_id, + vnf_instance.vnfd_id) + if base_hot_dict is None: + error_reason = _("failed to get Base HOT.") + raise vnfm.LCMUserDataFailed(reason=error_reason) + if base_hot_dict is None: + nested_hot_dict = {} + param_base_hot_dict = copy.deepcopy( + nested_hot_dict) + param_base_hot_dict['heat_template'] = \ + base_hot_dict + vnfd_dict = yaml.safe_load( + vnf['vnfd']['attributes']['vnfd_' + + inst_vnf_info.flavour_id]) + updated_stack_param = user_method_scale( + param_base_hot_dict, vnfd_dict, scale_vnf_request, + vnf_instance, inst_vnf_info, param, vnf['res_num']) + stack_param = {**stack_param, **updated_stack_param} + else: + stack_param = {**stack_param, **param} + else: + stack_param = {**stack_param, **param} + else: + stack_param = {**stack_param, **param} + + paramDict = stack_param stack_update_param = { 'parameters': paramDict, 'existing': True} diff --git a/tacker/vnfm/lcm_user_data/utils.py b/tacker/vnfm/lcm_user_data/utils.py index 4d2b263d6..b75d7a35c 100644 --- a/tacker/vnfm/lcm_user_data/utils.py +++ b/tacker/vnfm/lcm_user_data/utils.py @@ -14,8 +14,9 @@ import copy from oslo_log import log as logging +from oslo_serialization import jsonutils from tacker.common.utils import MemoryUnit - +from tacker.tosca import utils as tosca_utils """Define util functions that can be used in UserData. @@ -412,3 +413,85 @@ def _create_fixed_ips_list(ext_cp): fixed_ips_lst.append(fixed_ips) return fixed_ips_lst + + +def _create_scale_group_dict(base_hot_dict, vnfd_dict, inst_req_info): + + base_hot = base_hot_dict['heat_template'] + LOG.debug("base_hot: %s", base_hot) + + scaling_group_dict = {} + for name, rsc in base_hot.get('resources').items(): + if rsc['type'] == 'OS::Heat::AutoScalingGroup': + key_name = name.replace('_group', '') + scaling_group_dict[key_name] = name + LOG.debug("scaling_group_dict: %s", scaling_group_dict) + + if scaling_group_dict: + vnf = {'attributes': {'scaling_group_names': + jsonutils.dump_as_bytes(scaling_group_dict)}} + scale_group_dict = tosca_utils.get_scale_group( + vnf, vnfd_dict, inst_req_info) + LOG.debug("scale_group_dict: %s", scale_group_dict) + return scale_group_dict + else: + LOG.debug("no scale_group_dict") + return {} + + +def create_desired_capacity_dict(base_hot_dict, vnfd_dict, inst_req_info): + """Create a dict containing information about desired capacity. + + :param base_hot_dict: dict(Base HOT dict format) + :param vnfd_dict: dict(VNFD dict format) + :param inst_req_info: dict(Instantiation request information format) + :return: dict(Scaling aspect name, Desired capacity value) + """ + scale_group_dict = _create_scale_group_dict( + base_hot_dict, vnfd_dict, inst_req_info) + + param_dict = {} + if scale_group_dict.get('scaleGroupDict'): + for name, value in scale_group_dict['scaleGroupDict'].items(): + param_dict[name] = value['default'] + + LOG.info("desired_capacity dict: %s", param_dict) + return param_dict + + +def _calc_desired_capacity(inst_vnf_info, name, value): + + for scale_status in inst_vnf_info.scale_status: + if scale_status.aspect_id == name: + LOG.debug("scale_level of %s: %d", + name, scale_status.scale_level) + increase = value['num'] * scale_status.scale_level + desired_capacity = value['initialNum'] + increase + LOG.debug("desired_capacity: %d", desired_capacity) + return desired_capacity + + LOG.debug("scale_level of %s: None", name) + return None + + +def get_desired_capacity_dict(base_hot_dict, vnfd_dict, inst_vnf_info): + """Get a dict containing information about desired capacity. + + :param base_hot_dict: dict(Base HOT dict format) + :param vnfd_dict: dict(VNFD dict format) + :param inst_vnf_info: dict(Instantiated VNF Info dict format) + :return: dict(Scaling aspect name, Desired capacity value) + """ + scale_group_dict = _create_scale_group_dict( + base_hot_dict, vnfd_dict, {}) + + param_dict = {} + if scale_group_dict.get('scaleGroupDict'): + for name, value in scale_group_dict['scaleGroupDict'].items(): + desired_capacity = _calc_desired_capacity( + inst_vnf_info, name, value) + if desired_capacity is not None: + param_dict[name] = desired_capacity + + LOG.info("desired_capacity dict: %s", param_dict) + return param_dict