From 55040a0e1ca063d469223cee2c095715e541561b Mon Sep 17 00:00:00 2001 From: Itsuro Oda Date: Thu, 28 Oct 2021 04:30:38 +0000 Subject: [PATCH] support scale VNF task of v2 API This patch implements scale VNF task defined in ETSI NFV-SOL003 v3.3.1 5.4.5. Retry and rollback operation of scale API are also supported. Note that rollback operation is supported for scale out only. Rollback operation of scale in is not supported. This patch includes some refactorings to commonize scale code with the code of existing APIs (i.e. instantiate and terminate) to avoid code duplication. For example making grant requests, making instantiatedVnfInfo and so on. Functional tests will be provided with another patch. Implements: blueprint support-nfv-solv3-scale-vnf Change-Id: I03dbfb577ca9158a75d1c249808dadda13d317b3 --- .../add-v2-scale-api-b60e8fe329f6038b.yaml | 5 + .../sol_refactored/api/policies/vnflcm_v2.py | 9 + .../sol_refactored/api/schemas/vnflcm_v2.py | 16 + tacker/sol_refactored/common/exceptions.py | 17 + .../sol_refactored/common/lcm_op_occ_utils.py | 279 +++- .../common/vnf_instance_utils.py | 5 +- tacker/sol_refactored/common/vnfd_utils.py | 56 + .../conductor/vnflcm_driver_v2.py | 414 +++-- tacker/sol_refactored/controller/vnflcm_v2.py | 95 +- .../infra_drivers/openstack/heat_utils.py | 32 + .../infra_drivers/openstack/openstack.py | 794 ++++++---- .../openstack/userdata_default.py | 64 +- .../infra_drivers/openstack/userdata_main.py | 11 +- .../infra_drivers/openstack/userdata_utils.py | 69 +- tacker/sol_refactored/test-tools/cli.py | 15 +- .../contents/BaseHOT/simple/sample1.yaml | 18 +- .../sample1/contents/UserData/userdata.py | 8 +- .../contents/BaseHOT/simple/sample2.yaml | 19 +- .../common/test_lcm_op_occ_utils.py | 1205 +++++++++++++++ .../sol_refactored/common/test_vnfd_utils.py | 26 +- .../conductor/test_vnflcm_driver_v2.py | 229 ++- .../controller/test_vnflcm_v2.py | 92 +- .../infra_drivers/openstack/test_openstack.py | 1328 +++++++++++++++++ .../openstack/test_userdata_utils.py | 46 +- .../sample1/BaseHOT/simple/ut_sample1.yaml | 14 - .../Definitions/ut_sample1_df_simple.yaml | 15 + .../sample1/UserData/userdata_default.py | 37 +- 27 files changed, 4332 insertions(+), 586 deletions(-) create mode 100755 releasenotes/notes/add-v2-scale-api-b60e8fe329f6038b.yaml create mode 100644 tacker/tests/unit/sol_refactored/common/test_lcm_op_occ_utils.py create mode 100644 tacker/tests/unit/sol_refactored/infra_drivers/openstack/test_openstack.py diff --git a/releasenotes/notes/add-v2-scale-api-b60e8fe329f6038b.yaml b/releasenotes/notes/add-v2-scale-api-b60e8fe329f6038b.yaml new file mode 100755 index 000000000..6da0e7e96 --- /dev/null +++ b/releasenotes/notes/add-v2-scale-api-b60e8fe329f6038b.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add the Version "2.0.0" of Scale VNF API + based on ETSI NFV specifications. diff --git a/tacker/sol_refactored/api/policies/vnflcm_v2.py b/tacker/sol_refactored/api/policies/vnflcm_v2.py index 51c01251f..6451ee22b 100644 --- a/tacker/sol_refactored/api/policies/vnflcm_v2.py +++ b/tacker/sol_refactored/api/policies/vnflcm_v2.py @@ -93,6 +93,15 @@ rules = [ 'path': VNF_INSTANCES_ID_PATH + '/terminate'} ] ), + policy.DocumentedRuleDefault( + name=POLICY_NAME.format('scale'), + check_str=RULE_ANY, + description="Scale vnf instance.", + operations=[ + {'method': 'POST', + 'path': VNF_INSTANCES_ID_PATH + '/scale'} + ] + ), # TODO(oda-g): add more lcm operations etc when implemented. diff --git a/tacker/sol_refactored/api/schemas/vnflcm_v2.py b/tacker/sol_refactored/api/schemas/vnflcm_v2.py index 63f9e62d8..3790c3a87 100644 --- a/tacker/sol_refactored/api/schemas/vnflcm_v2.py +++ b/tacker/sol_refactored/api/schemas/vnflcm_v2.py @@ -77,6 +77,22 @@ TerminateVnfRequest_V200 = { 'additionalProperties': True, } +# SOL003 5.5.2.5 +ScaleVnfRequest_V200 = { + 'type': 'object', + 'properties': { + 'type': { + 'type': 'string', + 'enum': ['SCALE_OUT', 'SCALE_IN'] + }, + 'aspectId': common_types.IdentifierInVnfd, + 'numberOfSteps': {'type': 'integer', 'minimum': 1}, + 'additionalParams': parameter_types.keyvalue_pairs, + }, + 'required': ['type', 'aspectId'], + 'additionalProperties': True, +} + # SOL013 8.3.4 _SubscriptionAuthentication = { 'type': 'object', diff --git a/tacker/sol_refactored/common/exceptions.py b/tacker/sol_refactored/common/exceptions.py index ef299b8f9..2c35fcadb 100644 --- a/tacker/sol_refactored/common/exceptions.py +++ b/tacker/sol_refactored/common/exceptions.py @@ -223,3 +223,20 @@ class GrantRequestOrGrantNotFound(SolHttpError404): class RollbackNotSupported(SolHttpError422): message = _("Rollback of %(op)s is not supported.") + + +class UnexpectedParentResourceDefinition(SolHttpError422): + message = _("Parent resource is necessary for VDU definition.") + + +class InvalidScaleAspectId(SolHttpError400): + message = _("Invalid aspectId '%(aspect_id)s'.") + + +class InvalidScaleNumberOfSteps(SolHttpError400): + message = _("Invalid numberOfSteps '%(num_steps)d'.") + + +class DeltaMissingInVnfd(SolHttpError400): + message = _("Delta '%(delta)s' is not defined in " + "VduScalingAspectDeltas.") diff --git a/tacker/sol_refactored/common/lcm_op_occ_utils.py b/tacker/sol_refactored/common/lcm_op_occ_utils.py index 686936fea..c5838f2ab 100644 --- a/tacker/sol_refactored/common/lcm_op_occ_utils.py +++ b/tacker/sol_refactored/common/lcm_op_occ_utils.py @@ -117,78 +117,239 @@ def make_lcmocc_notif_data(subsc, lcmocc, endpoint): return notif_data -def _make_instantiate_lcmocc(lcmocc, inst, change_type): +def _make_affected_vnfc(vnfc, change_type): + affected_vnfc = objects.AffectedVnfcV2( + id=vnfc.id, + vduId=vnfc.vduId, + changeType=change_type, + computeResource=vnfc.computeResource + ) + if vnfc.obj_attr_is_set('vnfcCpInfo'): + cp_ids = [cp.id for cp in vnfc.vnfcCpInfo] + affected_vnfc.affectedVnfcCpIds = cp_ids + if vnfc.obj_attr_is_set('storageResourceIds'): + str_ids = vnfc.storageResourceIds + if change_type == 'ADDED': + affected_vnfc.addedStorageResourceIds = str_ids + else: # 'REMOVED' + affected_vnfc.removedStorageResourceIds = str_ids + + return affected_vnfc + + +def _make_affected_vl(vl, change_type): + affected_vl = objects.AffectedVirtualLinkV2( + id=vl.id, + vnfVirtualLinkDescId=vl.vnfVirtualLinkDescId, + changeType=change_type, + networkResource=vl.networkResource + ) + if vl.obj_attr_is_set('vnfLinkPorts'): + affected_vl.vnfLinkPortIds = [port.id for port in vl.vnfLinkPorts] + + return affected_vl + + +def _make_affected_vls_link_port_change(vls_saved, vls, common_vls): + affected_vls = [] + + for vl_id in common_vls: + old_ports = set() + new_ports = set() + for vl in vls_saved: + if vl.id == vl_id: + old_vl = vl + if vl.obj_attr_is_set('vnfLinkPorts'): + old_ports = {port.id for port in vl.vnfLinkPorts} + for vl in vls: + if vl.id == vl_id: + new_vl = vl + if vl.obj_attr_is_set('vnfLinkPorts'): + new_ports = {port.id for port in vl.vnfLinkPorts} + add_ports = new_ports - old_ports + rm_ports = old_ports - new_ports + # assume there are not add_ports and rm_ports at the same time. + if add_ports: + affected_vl = objects.AffectedVirtualLinkV2( + id=new_vl.id, + vnfVirtualLinkDescId=new_vl.vnfVirtualLinkDescId, + changeType='LINK_PORT_ADDED', + networkResource=new_vl.networkResource, + vnfLinkPortIds=list(add_ports) + ) + affected_vls.append(affected_vl) + elif rm_ports: + affected_vl = objects.AffectedVirtualLinkV2( + id=old_vl.id, + vnfVirtualLinkDescId=old_vl.vnfVirtualLinkDescId, + changeType='LINK_PORT_REMOVED', + networkResource=old_vl.networkResource, + vnfLinkPortIds=list(rm_ports) + ) + affected_vls.append(affected_vl) + + return affected_vls + + +def _make_affected_strg(strg, change_type): + return objects.AffectedVirtualStorageV2( + id=strg.id, + virtualStorageDescId=strg.virtualStorageDescId, + changeType=change_type, + storageResource=strg.storageResource + ) + + +def _make_affected_ext_link_ports(inst_info_saved, inst_info): + affected_ext_link_ports = [] + + ext_vl_ports_saved = set() + ext_vl_ports = set() + if inst_info_saved.obj_attr_is_set('extVirtualLinkInfo'): + for ext_vl in inst_info_saved.extVirtualLinkInfo: + if ext_vl.obj_attr_is_set('extLinkPorts'): + ext_vl_ports_saved |= {port.id + for port in ext_vl.extLinkPorts} + if inst_info.obj_attr_is_set('extVirtualLinkInfo'): + for ext_vl in inst_info.extVirtualLinkInfo: + if ext_vl.obj_attr_is_set('extLinkPorts'): + ext_vl_ports |= {port.id + for port in ext_vl.extLinkPorts} + add_ext_vl_ports = ext_vl_ports - ext_vl_ports_saved + rm_ext_vl_ports = ext_vl_ports_saved - ext_vl_ports + + if add_ext_vl_ports: + for ext_vl in inst_info.extVirtualLinkInfo: + if not ext_vl.obj_attr_is_set('extLinkPorts'): + continue + affected_ext_link_ports += [ + objects.AffectedExtLinkPortV2( + id=port.id, + changeType='ADDED', + extCpInstanceId=port.cpInstanceId, + resourceHandle=port.resourceHandle + ) + for port in ext_vl.extLinkPorts + if port.id in add_ext_vl_ports + ] + if rm_ext_vl_ports: + for ext_vl in inst_info_saved.extVirtualLinkInfo: + if not ext_vl.obj_attr_is_set('extLinkPorts'): + continue + affected_ext_link_ports += [ + objects.AffectedExtLinkPortV2( + id=port.id, + changeType='REMOVED', + extCpInstanceId=port.cpInstanceId, + resourceHandle=port.resourceHandle + ) + for port in ext_vl.extLinkPorts + if port.id in rm_ext_vl_ports + ] + + return affected_ext_link_ports + + +def update_lcmocc(lcmocc, inst_saved, inst): # make ResourceChanges of lcmocc from instantiatedVnfInfo. # NOTE: grant related info such as resourceDefinitionId, zoneId # and so on are not included in lcmocc since such info are not # included in instantiatedVnfInfo. + if inst_saved.obj_attr_is_set('instantiatedVnfInfo'): + inst_info_saved = inst_saved.instantiatedVnfInfo + else: + # dummy + inst_info_saved = objects.VnfInstanceV2_InstantiatedVnfInfo() + inst_info = inst.instantiatedVnfInfo - lcmocc_vncs = [] - if inst_info.obj_attr_is_set('vnfcResourceInfo'): - for inst_vnc in inst_info.vnfcResourceInfo: - lcmocc_vnc = objects.AffectedVnfcV2( - id=inst_vnc.id, - vduId=inst_vnc.vduId, - changeType=change_type, - computeResource=inst_vnc.computeResource - ) - if inst_vnc.obj_attr_is_set('vnfcCpInfo'): - cp_ids = [cp.id for cp in inst_vnc.vnfcCpInfo] - lcmocc_vnc.affectedVnfcCpIds = cp_ids - if inst_vnc.obj_attr_is_set('storageResourceIds'): - str_ids = inst_vnc.storageResourceIds - if change_type == 'ADDED': - lcmocc_vnc.addedStorageResourceIds = str_ids - else: # 'REMOVED' - lcmocc_vnc.removedStorageResourceIds = str_ids - lcmocc_vncs.append(lcmocc_vnc) + # NOTE: objects may be re-created. so compare 'id' instead of object + # itself. + def _calc_diff(attr): + # NOTE: instantiatedVnfInfo object is dict compat + objs_saved = set() + if inst_info_saved.obj_attr_is_set(attr): + objs_saved = {obj.id for obj in inst_info_saved[attr]} + objs = set() + if inst_info.obj_attr_is_set(attr): + objs = {obj.id for obj in inst_info[attr]} - lcmocc_vls = [] - if inst_info.obj_attr_is_set('vnfVirtualLinkResourceInfo'): - for inst_vl in inst_info.vnfVirtualLinkResourceInfo: - lcmocc_vl = objects.AffectedVirtualLinkV2( - id=inst_vl.id, - vnfVirtualLinkDescId=inst_vl.vnfVirtualLinkDescId, - changeType=change_type, - networkResource=inst_vl.networkResource - ) - if inst_vl.obj_attr_is_set('vnfLinkPorts'): - port_ids = [port.id for port in inst_vl.vnfLinkPorts] - lcmocc_vl.vnfLinkPortIds = port_ids - lcmocc_vls.append(lcmocc_vl) + # return removed_objs, added_objs, common_objs + return objs_saved - objs, objs - objs_saved, objs_saved & objs - lcmocc_strs = [] - if inst_info.obj_attr_is_set('virtualStorageResourceInfo'): - for inst_str in inst_info.virtualStorageResourceInfo: - lcmocc_str = objects.AffectedVirtualStorageV2( - id=inst_str.id, - virtualStorageDescId=inst_str.virtualStorageDescId, - changeType=change_type, - storageResource=inst_str.storageResource - ) - lcmocc_strs.append(lcmocc_str) + removed_vnfcs, added_vnfcs, _ = _calc_diff('vnfcResourceInfo') + affected_vnfcs = [] + if removed_vnfcs: + affected_vnfcs += [_make_affected_vnfc(vnfc, 'REMOVED') + for vnfc in inst_info_saved.vnfcResourceInfo + if vnfc.id in removed_vnfcs] + if added_vnfcs: + affected_vnfcs += [_make_affected_vnfc(vnfc, 'ADDED') + for vnfc in inst_info.vnfcResourceInfo + if vnfc.id in added_vnfcs] - if lcmocc_vncs or lcmocc_vls or lcmocc_strs: + removed_vls, added_vls, common_vls = _calc_diff( + 'vnfVirtualLinkResourceInfo') + affected_vls = [] + if removed_vls: + affected_vls += [_make_affected_vl(vl, 'REMOVED') + for vl in inst_info_saved.vnfVirtualLinkResourceInfo + if vl.id in removed_vls] + if added_vls: + affected_vls += [_make_affected_vl(vl, 'ADDED') + for vl in inst_info.vnfVirtualLinkResourceInfo + if vl.id in added_vls] + if common_vls: + affected_vls += _make_affected_vls_link_port_change( + inst_info_saved.vnfVirtualLinkResourceInfo, + inst_info.vnfVirtualLinkResourceInfo, common_vls) + + removed_mgd_vls, added_mgd_vls, common_mgd_vls = _calc_diff( + 'extManagedVirtualLinkInfo') + if removed_mgd_vls: + affected_vls += [_make_affected_vl(vl, 'LINK_PORT_REMOVED') + for vl in inst_info_saved.extManagedVirtualLinkInfo + if vl.id in removed_mgd_vls] + if added_mgd_vls: + affected_vls += [_make_affected_vl(vl, 'LINK_PORT_ADDED') + for vl in inst_info.extManagedVirtualLinkInfo + if vl.id in added_mgd_vls] + if common_mgd_vls: + affected_vls += _make_affected_vls_link_port_change( + inst_info_saved.extManagedVirtualLinkInfo, + inst_info.extManagedVirtualLinkInfo, common_mgd_vls) + + removed_strgs, added_strgs, _ = _calc_diff('virtualStorageResourceInfo') + affected_strgs = [] + if removed_strgs: + affected_strgs += [ + _make_affected_strg(strg, 'REMOVED') + for strg in inst_info_saved.virtualStorageResourceInfo + if strg.id in removed_strgs + ] + if added_strgs: + affected_strgs += [_make_affected_strg(strg, 'ADDED') + for strg in inst_info.virtualStorageResourceInfo + if strg.id in added_strgs] + + affected_ext_link_ports = _make_affected_ext_link_ports( + inst_info_saved, inst_info) + + if (affected_vnfcs or affected_vls or affected_strgs or + affected_ext_link_ports): change_info = objects.VnfLcmOpOccV2_ResourceChanges() - if lcmocc_vncs: - change_info.affectedVnfcs = lcmocc_vncs - if lcmocc_vls: - change_info.affectedVirtualLinks = lcmocc_vls - if lcmocc_strs: - change_info.affectedVirtualStorages = lcmocc_strs + if affected_vnfcs: + change_info.affectedVnfcs = affected_vnfcs + if affected_vls: + change_info.affectedVirtualLinks = affected_vls + if affected_strgs: + change_info.affectedVirtualStorages = affected_strgs + if affected_ext_link_ports: + change_info.affectedExtLinkPorts = affected_ext_link_ports lcmocc.resourceChanges = change_info -def make_instantiate_lcmocc(lcmocc, inst): - _make_instantiate_lcmocc(lcmocc, inst, 'ADDED') - - -def make_terminate_lcmocc(lcmocc, inst): - _make_instantiate_lcmocc(lcmocc, inst, 'REMOVED') - - def get_grant_req_and_grant(context, lcmocc): grant_reqs = objects.GrantRequestV1.get_by_filter(context, vnfLcmOpOccId=lcmocc.id) @@ -203,7 +364,7 @@ def get_grant_req_and_grant(context, lcmocc): def check_lcmocc_in_progress(context, inst_id): # if the controller or conductor executes an operation for the vnf # instance (i.e. operationState is ...ING), other operation for - # the same vnf instance is exculed by the coordinator. + # the same vnf instance is exculded by the coordinator. # check here is existence of lcmocc for the vnf instance with # FAILED_TEMP operationState. lcmoccs = objects.VnfLcmOpOccV2.get_by_filter( diff --git a/tacker/sol_refactored/common/vnf_instance_utils.py b/tacker/sol_refactored/common/vnf_instance_utils.py index bcfb7330e..20d4381fc 100644 --- a/tacker/sol_refactored/common/vnf_instance_utils.py +++ b/tacker/sol_refactored/common/vnf_instance_utils.py @@ -46,8 +46,11 @@ def make_inst_links(inst, endpoint): links.instantiate = objects.Link(href=self_href + "/instantiate") else: # 'INSTANTIATED' links.terminate = objects.Link(href=self_href + "/terminate") + links.scale = objects.Link(href=self_href + "/scale") # TODO(oda-g): add when the operation supported - # links.scale = objects.Link(href = self_href + "/scale") + # links.heal = objects.Link(href=self_href + "/heal") + # links.changeExtConn = objects.Link( + # href=self_href + "/change_ext_conn") # etc. return links diff --git a/tacker/sol_refactored/common/vnfd_utils.py b/tacker/sol_refactored/common/vnfd_utils.py index d59a3ac7a..ce11233f9 100644 --- a/tacker/sol_refactored/common/vnfd_utils.py +++ b/tacker/sol_refactored/common/vnfd_utils.py @@ -358,3 +358,59 @@ class Vnfd(object): raise sol_ex.SolHttpError422(sol_detail=msg) return script + + def get_scale_vdu_and_num(self, flavour_id, aspect_id): + aspects = self.get_policy_values_by_type(flavour_id, + 'tosca.policies.nfv.ScalingAspects') + delta = None + for aspect in aspects: + value = aspect['properties']['aspects'].get(aspect_id) + if value is not None: + # expect there is one delta. + # NOTE: Tacker does not support non-uniform deltas defined in + # ETSI NFV SOL001 8. Therefore, uniform delta corresponding + # to number_of_instances can be set and number_of_instances is + # the same regardless of scale_level. + delta = value['step_deltas'][0] + break + + if delta is None: + return {} + + aspect_deltas = self.get_policy_values_by_type(flavour_id, + 'tosca.policies.nfv.VduScalingAspectDeltas') + vdu_num_inst = {} + for aspect_delta in aspect_deltas: + if aspect_delta.get('properties', {}).get('aspect') == aspect_id: + num_inst = (aspect_delta['properties']['deltas'] + .get(delta, {}).get('number_of_instances')) + # NOTE: it is not checked whether 'delta' defined in + # ScaleingAspects exists in VduScalingAspectDeltas at + # the loading of vnf package. this is a mistake of the + # VNFD definition. + if num_inst is None: + raise sol_ex.DeltaMissingInVnfd(delta=delta) + for vdu_name in aspect_delta['targets']: + vdu_num_inst[vdu_name] = num_inst + + return vdu_num_inst + + def get_scale_info_from_inst_level(self, flavour_id, inst_level): + policies = self.get_policy_values_by_type(flavour_id, + 'tosca.policies.nfv.InstantiationLevels') + for policy in policies: + return (policy['properties']['levels'] + .get(inst_level, {}) + .get('scale_info', {})) + return {} + + def get_max_scale_level(self, flavour_id, aspect_id): + aspects = self.get_policy_values_by_type(flavour_id, + 'tosca.policies.nfv.ScalingAspects') + for aspect in aspects: + value = aspect['properties']['aspects'].get(aspect_id) + if value is not None: + return value['max_scale_level'] + + # should not occur + return 0 diff --git a/tacker/sol_refactored/conductor/vnflcm_driver_v2.py b/tacker/sol_refactored/conductor/vnflcm_driver_v2.py index 2c66b1d5f..56bb35980 100644 --- a/tacker/sol_refactored/conductor/vnflcm_driver_v2.py +++ b/tacker/sol_refactored/conductor/vnflcm_driver_v2.py @@ -43,8 +43,32 @@ class VnfLcmDriverV2(object): self.nfvo_client = nfvo_client.NfvoClient() def grant(self, context, lcmocc, inst, vnfd): + # grant exchange + # NOTE: the api_version of NFVO supposes 1.4.0 at the moment. + + # make common part of grant_req among operations + grant_req = objects.GrantRequestV1( + vnfInstanceId=inst.id, + vnfLcmOpOccId=lcmocc.id, + vnfdId=inst.vnfdId, + operation=lcmocc.operation, + isAutomaticInvocation=lcmocc.isAutomaticInvocation + ) + grant_req._links = objects.GrantRequestV1_Links( + vnfLcmOpOcc=objects.Link( + href=lcmocc_utils.lcmocc_href(lcmocc.id, self.endpoint)), + vnfInstance=objects.Link( + href=inst_utils.inst_href(inst.id, self.endpoint))) + + # make operation specific part of grant_req and check request + # parameters if necessary. method = getattr(self, "%s_%s" % (lcmocc.operation.lower(), 'grant')) - return method(context, lcmocc, inst, vnfd) + method(grant_req, lcmocc.operationParams, inst, vnfd) + + # NOTE: if not granted, 403 error raised. + grant = self.nfvo_client.grant(context, grant_req) + + return grant_req, grant def post_grant(self, context, lcmocc, inst, grant_req, grant, vnfd): method = getattr(self, @@ -84,7 +108,64 @@ class VnfLcmDriverV2(object): LOG.debug("execute %s of %s success.", operation, script) + def _make_inst_info_common(self, lcmocc, inst_saved, inst, vnfd): + # make vim independent part of instantiatedVnfInfo. + # scaleStatus and maxScaleLevels at the moment. + inst_info = inst.instantiatedVnfInfo + req = lcmocc.operationParams + + if lcmocc.operation == v2fields.LcmOperationType.INSTANTIATE: + # create scaleStatus and maxScaleLevels + flavour_id = req.flavourId + if req.obj_attr_is_set('instantiationLevelId'): + inst_level = req.instantiationLevelId + else: + inst_level = vnfd.get_default_instantiation_level(flavour_id) + + # make scaleStatus from tosca.policies.nfv.InstantiationLevels + # definition. + scale_info = vnfd.get_scale_info_from_inst_level(flavour_id, + inst_level) + scale_status = [ + objects.ScaleInfoV2( + aspectId=aspect_id, + scaleLevel=value['scale_level'] + ) + for aspect_id, value in scale_info.items() + ] + max_scale_levels = [ + objects.ScaleInfoV2( + aspectId=obj.aspectId, + scaleLevel=vnfd.get_max_scale_level(flavour_id, + obj.aspectId) + ) + for obj in scale_status + ] + + if scale_status: + inst_info.scaleStatus = scale_status + inst_info.maxScaleLevels = max_scale_levels + elif lcmocc.operation != v2fields.LcmOperationType.TERMINATE: + inst_info_saved = inst_saved.instantiatedVnfInfo + if inst_info_saved.obj_attr_is_set('scaleStatus'): + inst_info.scaleStatus = inst_info_saved.scaleStatus + inst_info.maxScaleLevels = inst_info_saved.maxScaleLevels + + if lcmocc.operation == v2fields.LcmOperationType.SCALE: + # adjust scaleStatus + num_steps = req.numberOfSteps + if req.type == 'SCALE_IN': + num_steps *= -1 + + for aspect_info in inst_info.scaleStatus: + if aspect_info.aspectId == req.aspectId: + aspect_info.scaleLevel += num_steps + break + def process(self, context, lcmocc, inst, grant_req, grant, vnfd): + # save inst to use updating lcmocc after process done + inst_saved = inst.obj_clone() + # perform preamble LCM script req = lcmocc.operationParams operation = "%s_%s" % (lcmocc.operation.lower(), 'start') @@ -104,6 +185,9 @@ class VnfLcmDriverV2(object): self._exec_mgmt_driver_script(operation, flavour_id, req, inst, grant_req, grant, vnfd) + self._make_inst_info_common(lcmocc, inst_saved, inst, vnfd) + lcmocc_utils.update_lcmocc(lcmocc, inst_saved, inst) + def rollback(self, context, lcmocc, inst, grant_req, grant, vnfd): method = getattr(self, "%s_%s" % (lcmocc.operation.lower(), 'rollback'), @@ -114,38 +198,60 @@ class VnfLcmDriverV2(object): raise sol_ex.RollbackNotSupported(op=lcmocc.operation) def _get_link_ports(self, inst_req): - names = [] + names = set() if inst_req.obj_attr_is_set('extVirtualLinks'): for ext_vl in inst_req.extVirtualLinks: for ext_cp in ext_vl.extCps: for cp_config in ext_cp.cpConfig.values(): if cp_config.obj_attr_is_set('linkPortId'): - names.append(ext_cp.cpdId) + names.add(ext_cp.cpdId) if inst_req.obj_attr_is_set('extManagedVirtualLinks'): for ext_mgd_vl in inst_req.extManagedVirtualLinks: if ext_mgd_vl.obj_attr_is_set('vnfLinkPort'): - names.append(ext_mgd_vl.vnfVirtualLinkDescId) + names.add(ext_mgd_vl.vnfVirtualLinkDescId) return names - def instantiate_grant(self, context, lcmocc, inst, vnfd): - req = lcmocc.operationParams + def _make_res_def_for_new_vdu(self, vdu_name, num_inst, cp_names, + storage_names): + # common part of instantiate and scale out + add_reses = [] + for _ in range(num_inst): + vdu_res_id = uuidutils.generate_uuid() + add_reses.append( + objects.ResourceDefinitionV1( + id=vdu_res_id, + type='COMPUTE', + resourceTemplateId=vdu_name + ) + ) + for cp_name in cp_names: + add_reses.append( + objects.ResourceDefinitionV1( + id="{}-{}".format(cp_name, vdu_res_id), + type='LINKPORT', + resourceTemplateId=cp_name + ) + ) + for storage_name in storage_names: + add_reses.append( + objects.ResourceDefinitionV1( + id="{}-{}".format(storage_name, vdu_res_id), + type='STORAGE', + resourceTemplateId=storage_name + ) + ) + + return add_reses + + def instantiate_grant(self, grant_req, req, inst, vnfd): flavour_id = req.flavourId if vnfd.get_vnfd_flavour(flavour_id) is None: raise sol_ex.FlavourIdNotFound(flavour_id=flavour_id) - # grant exchange - # NOTE: the api_version of NFVO supposes 1.4.0 at the moment. - grant_req = objects.GrantRequestV1( - vnfInstanceId=inst.id, - vnfLcmOpOccId=lcmocc.id, - vnfdId=inst.vnfdId, - flavourId=flavour_id, - operation=lcmocc.operation, - isAutomaticInvocation=lcmocc.isAutomaticInvocation - ) + grant_req.flavourId = flavour_id if req.obj_attr_is_set('instantiationLevelId'): inst_level = req.instantiationLevelId @@ -161,30 +267,8 @@ class VnfLcmDriverV2(object): vdu_cp_names = vnfd.get_vdu_cps(flavour_id, name) vdu_storage_names = vnfd.get_vdu_storages(node) - for _ in range(num): - res_def = objects.ResourceDefinitionV1( - id=uuidutils.generate_uuid(), - type='COMPUTE', - resourceTemplateId=name) - add_reses.append(res_def) - - for cp_name in vdu_cp_names: - if cp_name in link_port_names: - continue - for _ in range(num): - res_def = objects.ResourceDefinitionV1( - id=uuidutils.generate_uuid(), - type='LINKPORT', - resourceTemplateId=cp_name) - add_reses.append(res_def) - - for storage_name in vdu_storage_names: - for _ in range(num): - res_def = objects.ResourceDefinitionV1( - id=uuidutils.generate_uuid(), - type='STORAGE', - resourceTemplateId=storage_name) - add_reses.append(res_def) + add_reses += self._make_res_def_for_new_vdu(name, num, + set(vdu_cp_names) - link_port_names, vdu_storage_names) ext_mgd_vls = [] if req.obj_attr_is_set('extManagedVirtualLinks'): @@ -232,17 +316,6 @@ class VnfLcmDriverV2(object): if req.obj_attr_is_set('additionalParams'): grant_req.additionalParams = req.additionalParams - grant_req._links = objects.GrantRequestV1_Links( - vnfLcmOpOcc=objects.Link( - href=lcmocc_utils.lcmocc_href(lcmocc.id, self.endpoint)), - vnfInstance=objects.Link( - href=inst_utils.inst_href(inst.id, self.endpoint))) - - # NOTE: if not granted, 403 error raised. - grant = self.nfvo_client.grant(context, grant_req) - - return grant_req, grant - def instantiate_post_grant(self, context, lcmocc, inst, grant_req, grant, vnfd): # set inst vimConnectionInfo @@ -288,7 +361,6 @@ class VnfLcmDriverV2(object): raise sol_ex.SolException(sol_detail='not support vim type') inst.instantiationState = 'INSTANTIATED' - lcmocc_utils.make_instantiate_lcmocc(lcmocc, inst) def instantiate_rollback(self, context, lcmocc, inst, grant_req, grant, vnfd): @@ -301,71 +373,63 @@ class VnfLcmDriverV2(object): # only support openstack at the moment raise sol_ex.SolException(sol_detail='not support vim type') - def terminate_grant(self, context, lcmocc, inst, vnfd): - # grant exchange - # NOTE: the api_version of NFVO supposes 1.4.0 at the moment. - grant_req = objects.GrantRequestV1( - vnfInstanceId=inst.id, - vnfLcmOpOccId=lcmocc.id, - vnfdId=inst.vnfdId, - operation=lcmocc.operation, - isAutomaticInvocation=lcmocc.isAutomaticInvocation - ) - - inst_info = inst.instantiatedVnfInfo + def _make_res_def_for_remove_vnfcs(self, inst_info, inst_vnfcs): + # common part of terminate and scale in rm_reses = [] vnfc_cps = {} - if inst_info.obj_attr_is_set('vnfcResourceInfo'): - for inst_vnc in inst_info.vnfcResourceInfo: - res_def = objects.ResourceDefinitionV1( - id=uuidutils.generate_uuid(), + for inst_vnfc in inst_vnfcs: + vdu_res_id = uuidutils.generate_uuid() + rm_reses.append( + objects.ResourceDefinitionV1( + id=vdu_res_id, type='COMPUTE', - resourceTemplateId=inst_vnc.vduId, - resource=inst_vnc.computeResource) - rm_reses.append(res_def) + resourceTemplateId=inst_vnfc.vduId, + resource=inst_vnfc.computeResource + ) + ) - if inst_vnc.obj_attr_is_set('vnfcCpInfo'): - for cp_info in inst_vnc.vnfcCpInfo: - if not (cp_info.obj_attr_is_set('vnfExtCpId') or - cp_info.obj_attr_is_set('vnfLinkPortId')): - # it means extLinkPorts of extVirtualLinks was - # specified. so it is not the resource to be - # deleted. - continue - res_def = objects.ResourceDefinitionV1( - id=uuidutils.generate_uuid(), - resourceTemplateId=cp_info.cpdId, - type='LINKPORT') - rm_reses.append(res_def) - if cp_info.obj_attr_is_set('vnfExtCpId'): - vnfc_cps[cp_info.vnfExtCpId] = res_def - else: # vnfLinkPortId - vnfc_cps[cp_info.vnfLinkPortId] = res_def + if inst_vnfc.obj_attr_is_set('vnfcCpInfo'): + for cp_info in inst_vnfc.vnfcCpInfo: + if not (cp_info.obj_attr_is_set('vnfExtCpId') or + cp_info.obj_attr_is_set('vnfLinkPortId')): + # it means extLinkPorts of extVirtualLinks was + # specified. so it is not the resource to be + # deleted. + continue + res_def = objects.ResourceDefinitionV1( + id="{}-{}".format(cp_info.cpdId, vdu_res_id), + resourceTemplateId=cp_info.cpdId, + type='LINKPORT') + rm_reses.append(res_def) + if cp_info.obj_attr_is_set('vnfExtCpId'): + vnfc_cps[cp_info.vnfExtCpId] = res_def + else: # vnfLinkPortId + vnfc_cps[cp_info.vnfLinkPortId] = res_def + if inst_vnfc.obj_attr_is_set('storageResourceIds'): + for storage_id in inst_vnfc.storageResourceIds: + for inst_str in inst_info.virtualStorageResourceInfo: + if inst_str.id == storage_id: + str_name = inst_str.virtualStorageDescId + rm_reses.append( + objects.ResourceDefinitionV1( + id="{}-{}".format(str_name, vdu_res_id), + type='STORAGE', + resourceTemplateId=str_name, + resource=inst_str.storageResource + ) + ) + break + + # fill resourceHandle of ports if inst_info.obj_attr_is_set('vnfVirtualLinkResourceInfo'): for inst_vl in inst_info.vnfVirtualLinkResourceInfo: - res_def = objects.ResourceDefinitionV1( - id=uuidutils.generate_uuid(), - type='VL', - resourceTemplateId=inst_vl.vnfVirtualLinkDescId, - resource=inst_vl.networkResource) - rm_reses.append(res_def) - if inst_vl.obj_attr_is_set('vnfLinkPorts'): for port in inst_vl.vnfLinkPorts: if port.id in vnfc_cps: res_def = vnfc_cps[port.id] res_def.resource = port.resourceHandle - if inst_info.obj_attr_is_set('virtualStorageResourceInfo'): - for inst_str in inst_info.virtualStorageResourceInfo: - res_def = objects.ResourceDefinitionV1( - id=uuidutils.generate_uuid(), - type='STORAGE', - resourceTemplateId=inst_str.virtualStorageDescId, - resource=inst_str.storageResource) - rm_reses.append(res_def) - if inst_info.obj_attr_is_set('extVirtualLinkInfo'): for ext_vl in inst_info.extVirtualLinkInfo: if ext_vl.obj_attr_is_set('extLinkPorts'): @@ -384,20 +448,29 @@ class VnfLcmDriverV2(object): res_def = vnfc_cps[port.id] res_def.resource = port.resourceHandle + return rm_reses + + def terminate_grant(self, grant_req, req, inst, vnfd): + inst_info = inst.instantiatedVnfInfo + rm_reses = [] + if inst_info.obj_attr_is_set('vnfcResourceInfo'): + rm_reses += self._make_res_def_for_remove_vnfcs( + inst_info, inst_info.vnfcResourceInfo) + + if inst_info.obj_attr_is_set('vnfVirtualLinkResourceInfo'): + for inst_vl in inst_info.vnfVirtualLinkResourceInfo: + rm_reses.append( + objects.ResourceDefinitionV1( + id=uuidutils.generate_uuid(), + type='VL', + resourceTemplateId=inst_vl.vnfVirtualLinkDescId, + resource=inst_vl.networkResource + ) + ) + if rm_reses: grant_req.removeResources = rm_reses - grant_req._links = objects.GrantRequestV1_Links( - vnfLcmOpOcc=objects.Link( - href=lcmocc_utils.lcmocc_href(lcmocc.id, self.endpoint)), - vnfInstance=objects.Link( - href=inst_utils.inst_href(inst.id, self.endpoint))) - - # NOTE: if not granted, 403 error raised. - grant_res = self.nfvo_client.grant(context, grant_req) - - return grant_req, grant_res - def terminate_process(self, context, lcmocc, inst, grant_req, grant, vnfd): req = lcmocc.operationParams @@ -410,7 +483,6 @@ class VnfLcmDriverV2(object): raise sol_ex.SolException(sol_detail='not support vim type') inst.instantiationState = 'NOT_INSTANTIATED' - lcmocc_utils.make_terminate_lcmocc(lcmocc, inst) # reset instantiatedVnfInfo # NOTE: reset after update lcmocc @@ -424,3 +496,113 @@ class VnfLcmDriverV2(object): # reset vimConnectionInfo inst.vimConnectionInfo = {} + + def scale_grant(self, grant_req, req, inst, vnfd): + flavour_id = inst.instantiatedVnfInfo.flavourId + scale_type = req.type + aspect_id = req.aspectId + num_steps = req.numberOfSteps + + vdu_num_inst = vnfd.get_scale_vdu_and_num(flavour_id, aspect_id) + if not vdu_num_inst: + # should not occur. just check for consistency. + raise sol_ex.InvalidScaleAspectId(aspect_id=aspect_id) + + if scale_type == 'SCALE_OUT': + self._make_scale_out_grant_request(grant_req, inst, num_steps, + vdu_num_inst) + else: + self._make_scale_in_grant_request(grant_req, inst, num_steps, + vdu_num_inst) + + if req.obj_attr_is_set('additionalParams'): + grant_req.additionalParams = req.additionalParams + + def _make_scale_out_grant_request(self, grant_req, inst, num_steps, + vdu_num_inst): + inst_info = inst.instantiatedVnfInfo + add_reses = [] + + # get one of vnfc for the vdu from inst.instantiatedVnfInfo + vdu_sample = {} + for vdu_name in vdu_num_inst.keys(): + for inst_vnfc in inst_info.vnfcResourceInfo: + if inst_vnfc.vduId == vdu_name: + vdu_sample[vdu_name] = inst_vnfc + break + + for vdu_name, inst_vnfc in vdu_sample.items(): + num_inst = vdu_num_inst[vdu_name] * num_steps + + vdu_cp_names = [] + if inst_vnfc.obj_attr_is_set('vnfcCpInfo'): + # NOTE: it is expected that there are only dynamic ports + # for vdus which enable scaling. + vdu_cp_names = [cp_info.cpdId + for cp_info in inst_vnfc.vnfcCpInfo] + + vdu_storage_names = [] + if inst_vnfc.obj_attr_is_set('storageResourceIds'): + for storage_id in inst_vnfc.storageResourceIds: + for storage_res in inst_info.virtualStorageResourceInfo: + if storage_res.id == storage_id: + vdu_storage_names.append( + storage_res.virtualStorageDescId) + break + + add_reses += self._make_res_def_for_new_vdu(vdu_name, + num_inst, vdu_cp_names, vdu_storage_names) + + if add_reses: + grant_req.addResources = add_reses + + def _make_scale_in_grant_request(self, grant_req, inst, num_steps, + vdu_num_inst): + inst_info = inst.instantiatedVnfInfo + rm_vnfcs = [] + + # select remove VDUs + # NOTE: scale-in specification of tacker SOL003 v2 API is that + # newer VDU is selected for reduction. + # It is expected that vnfcResourceInfo is sorted by creation_time + # of VDU, newer is earlier. + for vdu_name, num_inst in vdu_num_inst.items(): + num_inst = num_inst * num_steps + + count = 0 + for inst_vnfc in inst_info.vnfcResourceInfo: + if inst_vnfc.vduId == vdu_name: + rm_vnfcs.append(inst_vnfc) + count += 1 + if count == num_inst: + break + + rm_reses = self._make_res_def_for_remove_vnfcs(inst_info, rm_vnfcs) + + if rm_reses: + grant_req.removeResources = rm_reses + + def scale_process(self, context, lcmocc, inst, grant_req, + grant, vnfd): + req = lcmocc.operationParams + vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo) + if vim_info.vimType == 'ETSINFV.OPENSTACK_KEYSTONE.V_3': + driver = openstack.Openstack() + driver.scale(req, inst, grant_req, grant, vnfd) + else: + # only support openstack at the moment + raise sol_ex.SolException(sol_detail='not support vim type') + + def scale_rollback(self, context, lcmocc, inst, grant_req, + grant, vnfd): + req = lcmocc.operationParams + if req.type == 'SCALE_IN': + raise sol_ex.RollbackNotSupported(op='SCALE IN') + + vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo) + if vim_info.vimType == 'ETSINFV.OPENSTACK_KEYSTONE.V_3': + driver = openstack.Openstack() + driver.scale_rollback(req, inst, grant_req, grant, vnfd) + else: + # only support openstack at the moment + raise sol_ex.SolException(sol_detail='not support vim type') diff --git a/tacker/sol_refactored/controller/vnflcm_v2.py b/tacker/sol_refactored/controller/vnflcm_v2.py index 5863ee69d..2e3967ed8 100644 --- a/tacker/sol_refactored/controller/vnflcm_v2.py +++ b/tacker/sol_refactored/controller/vnflcm_v2.py @@ -149,6 +149,21 @@ class VnfLcmControllerV2(sol_wsgi.SolAPIController): self.endpoint) return sol_wsgi.SolResponse(204, None) + def _new_lcmocc(self, inst_id, operation, req_body): + now = datetime.utcnow() + lcmocc = objects.VnfLcmOpOccV2( + id=uuidutils.generate_uuid(), + operationState=v2fields.LcmOperationStateType.STARTING, + stateEnteredTime=now, + startTime=now, + vnfInstanceId=inst_id, + operation=operation, + isAutomaticInvocation=False, + isCancelPending=False, + operationParams=req_body) + + return lcmocc + @validator.schema(schema.InstantiateVnfRequest_V200, '2.0.0') @coordinate.lock_vnf_instance('{id}') def instantiate(self, request, id, body): @@ -160,17 +175,8 @@ class VnfLcmControllerV2(sol_wsgi.SolAPIController): lcmocc_utils.check_lcmocc_in_progress(context, id) - now = datetime.utcnow() - lcmocc = objects.VnfLcmOpOccV2( - id=uuidutils.generate_uuid(), - operationState=v2fields.LcmOperationStateType.STARTING, - stateEnteredTime=now, - startTime=now, - vnfInstanceId=id, - operation=v2fields.LcmOperationType.INSTANTIATE, - isAutomaticInvocation=False, - isCancelPending=False, - operationParams=body) + lcmocc = self._new_lcmocc(id, v2fields.LcmOperationType.INSTANTIATE, + body) req_param = lcmocc.operationParams # if there is partial vimConnectionInfo check and fulfill here. @@ -203,18 +209,63 @@ class VnfLcmControllerV2(sol_wsgi.SolAPIController): lcmocc_utils.check_lcmocc_in_progress(context, id) - now = datetime.utcnow() - lcmocc = objects.VnfLcmOpOccV2( - id=uuidutils.generate_uuid(), - operationState=v2fields.LcmOperationStateType.STARTING, - stateEnteredTime=now, - startTime=now, - vnfInstanceId=id, - operation=v2fields.LcmOperationType.TERMINATE, - isAutomaticInvocation=False, - isCancelPending=False, - operationParams=body) + lcmocc = self._new_lcmocc(id, v2fields.LcmOperationType.TERMINATE, + body) + lcmocc.create(context) + self.conductor_rpc.start_lcm_op(context, lcmocc.id) + + location = lcmocc_utils.lcmocc_href(lcmocc.id, self.endpoint) + + return sol_wsgi.SolResponse(202, None, location=location) + + def _get_current_scale_level(self, inst, aspect_id): + if (inst.obj_attr_is_set('instantiatedVnfInfo') and + inst.instantiatedVnfInfo.obj_attr_is_set('scaleStatus')): + for scale_info in inst.instantiatedVnfInfo.scaleStatus: + if scale_info.aspectId == aspect_id: + return scale_info.scaleLevel + + def _get_max_scale_level(self, inst, aspect_id): + if (inst.obj_attr_is_set('instantiatedVnfInfo') and + inst.instantiatedVnfInfo.obj_attr_is_set('maxScaleLevels')): + for scale_info in inst.instantiatedVnfInfo.maxScaleLevels: + if scale_info.aspectId == aspect_id: + return scale_info.scaleLevel + + @validator.schema(schema.ScaleVnfRequest_V200, '2.0.0') + @coordinate.lock_vnf_instance('{id}') + def scale(self, request, id, body): + context = request.context + inst = inst_utils.get_inst(context, id) + + if inst.instantiationState != 'INSTANTIATED': + raise sol_ex.VnfInstanceIsNotInstantiated(inst_id=id) + + lcmocc_utils.check_lcmocc_in_progress(context, id) + + # check parameters + aspect_id = body['aspectId'] + if 'numberOfSteps' not in body: + # set default value (1) defined by SOL specification for + # the convenience of the following methods. + body['numberOfSteps'] = 1 + + scale_level = self._get_current_scale_level(inst, aspect_id) + max_scale_level = self._get_max_scale_level(inst, aspect_id) + if scale_level is None or max_scale_level is None: + raise sol_ex.InvalidScaleAspectId(aspect_id=aspect_id) + + num_steps = body['numberOfSteps'] + if body['type'] == 'SCALE_IN': + num_steps *= -1 + scale_level += num_steps + if scale_level < 0 or scale_level > max_scale_level: + raise sol_ex.InvalidScaleNumberOfSteps( + num_steps=body['numberOfSteps']) + + lcmocc = self._new_lcmocc(id, v2fields.LcmOperationType.SCALE, + body) lcmocc.create(context) self.conductor_rpc.start_lcm_op(context, lcmocc.id) diff --git a/tacker/sol_refactored/infra_drivers/openstack/heat_utils.py b/tacker/sol_refactored/infra_drivers/openstack/heat_utils.py index 72158c153..ee242c5bb 100644 --- a/tacker/sol_refactored/infra_drivers/openstack/heat_utils.py +++ b/tacker/sol_refactored/infra_drivers/openstack/heat_utils.py @@ -123,6 +123,22 @@ class HeatClient(object): "DELETE_COMPLETE", "DELETE_IN_PROGRESS", "DELETE_FAILED", none_is_done=True) + def get_parameters(self, stack_name): + path = "stacks/{}".format(stack_name) + resp, body = self.client.do_request(path, "GET", + expected_status=[200]) + + return body["stack"]["parameters"] + + def mark_unhealthy(self, stack_id, resource_name): + path = "stacks/{}/resources/{}".format(stack_id, resource_name) + fields = { + "mark_unhealthy": True, + "resource_status_reason": "marked by tacker" + } + resp, body = self.client.do_request(path, "PATCH", + expected_status=[200], body=fields) + def get_reses_by_types(heat_reses, types): return [res for res in heat_reses if res['resource_type'] in types] @@ -146,3 +162,19 @@ def get_port_reses(heat_reses): def get_stack_name(inst): return "vnf-" + inst.id + + +def get_resource_stack_id(heat_res): + # return the form "stack_name/stack_id" + for link in heat_res.get('links', []): + if link['rel'] == 'stack': + items = link['href'].split('/') + return "{}/{}".format(items[-2], items[-1]) + + +def get_parent_resource(heat_res, heat_reses): + parent = heat_res.get('parent_resource') + if parent: + for res in heat_reses: + if res['resource_name'] == parent: + return res diff --git a/tacker/sol_refactored/infra_drivers/openstack/openstack.py b/tacker/sol_refactored/infra_drivers/openstack/openstack.py index 9024d6935..23f143b2b 100644 --- a/tacker/sol_refactored/infra_drivers/openstack/openstack.py +++ b/tacker/sol_refactored/infra_drivers/openstack/openstack.py @@ -14,7 +14,9 @@ # under the License. +from dateutil import parser import eventlet +import json import os import pickle import subprocess @@ -28,12 +30,40 @@ from tacker.sol_refactored.common import vnf_instance_utils as inst_utils from tacker.sol_refactored.infra_drivers.openstack import heat_utils from tacker.sol_refactored.infra_drivers.openstack import userdata_default from tacker.sol_refactored import objects +from tacker.sol_refactored.objects.v2 import fields as v2fields LOG = logging.getLogger(__name__) CONF = config.CONF +LINK_PORT_PREFIX = 'req-' +CP_INFO_PREFIX = 'cp-' + + +# Id of the resources in instantiatedVnfInfo related methods. +# NOTE: instantiatedVnfInfo is re-created in each operation. +# Id of the resources in instantiatedVnfInfo is based on +# heat resource-id so that id is not changed at re-creation. +# Some ids are same as heat resource-id and some ids are +# combination of prefix and other ids. +def _make_link_port_id(link_port_id): + # prepend 'req-' to distinguish from ports which are + # created by heat. + return '{}{}'.format(LINK_PORT_PREFIX, link_port_id) + + +def _is_link_port(link_port_id): + return link_port_id.startswith(LINK_PORT_PREFIX) + + +def _make_cp_info_id(link_port_id): + return '{}{}'.format(CP_INFO_PREFIX, link_port_id) + + +def _make_combination_id(a, b): + return '{}-{}'.format(a, b) + class Openstack(object): @@ -42,7 +72,7 @@ class Openstack(object): def instantiate(self, req, inst, grant_req, grant, vnfd): # make HOT - fields = self.make_hot(req, inst, grant_req, grant, vnfd) + fields = self._make_hot(req, inst, grant_req, grant, vnfd) LOG.debug("stack fields: %s", fields) @@ -61,10 +91,115 @@ class Openstack(object): heat_reses = heat_client.get_resources(stack_name) # make instantiated_vnf_info - self.make_instantiated_vnf_info(req, inst, grant, vnfd, heat_reses) + self._make_instantiated_vnf_info(req, inst, grant_req, grant, vnfd, + heat_reses) + + def instantiate_rollback(self, req, inst, grant_req, grant, vnfd): + vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo) + heat_client = heat_utils.HeatClient(vim_info) + stack_name = heat_utils.get_stack_name(inst) + status, _ = heat_client.get_status(stack_name) + if status is not None: + heat_client.delete_stack(stack_name) + + def terminate(self, req, inst, grant_req, grant, vnfd): + if req.terminationType == 'GRACEFUL': + timeout = CONF.v2_vnfm.default_graceful_termination_timeout + if req.obj_attr_is_set('gracefulTerminationTimeout'): + timeout = req.gracefulTerminationTimeout + eventlet.sleep(timeout) + + # delete stack + vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo) + heat_client = heat_utils.HeatClient(vim_info) + stack_name = heat_utils.get_stack_name(inst) + heat_client.delete_stack(stack_name) + + def _update_nfv_dict(self, heat_client, stack_name, fields): + parameters = heat_client.get_parameters(stack_name) + LOG.debug("ORIG parameters: %s", parameters) + # NOTE: parameters['nfv'] is string + orig_nfv_dict = json.loads(parameters.get('nfv', '{}')) + if 'nfv' in fields['parameters']: + fields['parameters']['nfv'] = inst_utils.json_merge_patch( + orig_nfv_dict, fields['parameters']['nfv']) + LOG.debug("NEW parameters: %s", fields['parameters']) + return fields + + def scale(self, req, inst, grant_req, grant, vnfd): + vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo) + heat_client = heat_utils.HeatClient(vim_info) + + # make HOT + fields = self._make_hot(req, inst, grant_req, grant, vnfd) + + LOG.debug("stack fields: %s", fields) + + stack_name = fields.pop('stack_name') + + # mark unhealthy to servers to be removed if scale in + if req.type == 'SCALE_IN': + vnfc_res_ids = [res_def.resource.resourceId + for res_def in grant_req.removeResources + if res_def.type == 'COMPUTE'] + for vnfc in inst.instantiatedVnfInfo.vnfcResourceInfo: + if vnfc.computeResource.resourceId in vnfc_res_ids: + if 'parent_stack_id' not in vnfc.metadata: + # It means definition of VDU in the BaseHOT + # is inappropriate. + raise sol_ex.UnexpectedParentResourceDefinition() + heat_client.mark_unhealthy( + vnfc.metadata['parent_stack_id'], + vnfc.metadata['parent_resource_name']) + + # update stack + fields = self._update_nfv_dict(heat_client, stack_name, fields) + heat_client.update_stack(stack_name, fields) + + # get stack resource + heat_reses = heat_client.get_resources(stack_name) + + # make instantiated_vnf_info + self._make_instantiated_vnf_info(req, inst, grant_req, grant, vnfd, + heat_reses) + + def scale_rollback(self, req, inst, grant_req, grant, vnfd): + # NOTE: rollback is supported for scale out only + vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo) + heat_client = heat_utils.HeatClient(vim_info) + stack_name = heat_utils.get_stack_name(inst) + heat_reses = heat_client.get_resources(stack_name) + + # mark unhealthy to added servers while scale out + vnfc_ids = [vnfc.id + for vnfc in inst.instantiatedVnfInfo.vnfcResourceInfo] + for res in heat_utils.get_server_reses(heat_reses): + if res['physical_resource_id'] not in vnfc_ids: + metadata = self._make_vnfc_metadata(res, heat_reses) + if 'parent_stack_id' not in metadata: + # It means definition of VDU in the BaseHOT + # is inappropriate. + raise sol_ex.UnexpectedParentResourceDefinition() + heat_client.mark_unhealthy( + metadata['parent_stack_id'], + metadata['parent_resource_name']) + + # update (put back) 'desired_capacity' parameter + fields = self._update_nfv_dict(heat_client, stack_name, + userdata_default.DefaultUserData.scale_rollback( + req, inst, grant_req, grant, vnfd.csar_dir)) + + heat_client.update_stack(stack_name, fields) + + # NOTE: instantiatedVnfInfo is not necessary to update since it + # should be same as before scale API started. + + def _make_hot(self, req, inst, grant_req, grant, vnfd): + if grant_req.operation == v2fields.LcmOperationType.INSTANTIATE: + flavour_id = req.flavourId + else: + flavour_id = inst.instantiatedVnfInfo.flavourId - def make_hot(self, req, inst, grant_req, grant, vnfd): - flavour_id = req.flavourId hot_dict = vnfd.get_base_hot(flavour_id) if not hot_dict: raise sol_ex.BaseHOTNotDefined() @@ -77,15 +212,17 @@ class Openstack(object): 'lcm-operation-user-data-class') if userdata is None and userdata_class is None: - LOG.debug("Processing default userdata instantiate") + LOG.debug("Processing default userdata %s", grant_req.operation) # NOTE: objects used here are dict compat. - fields = userdata_default.DefaultUserData.instantiate( - req, inst, grant_req, grant, vnfd.csar_dir) + method = getattr(userdata_default.DefaultUserData, + grant_req.operation.lower()) + fields = method(req, inst, grant_req, grant, vnfd.csar_dir) elif userdata is None or userdata_class is None: # Both must be specified. raise sol_ex.UserdataMissing() else: - LOG.debug("Processing %s %s instantiate", userdata, userdata_class) + LOG.debug("Processing %s %s %s", userdata, userdata_class, + grant_req.operation) tmp_csar_dir = vnfd.make_tmp_csar_dir() script_dict = { @@ -98,7 +235,7 @@ class Openstack(object): script_path = os.path.join( os.path.dirname(__file__), "userdata_main.py") - out = subprocess.run(["python3", script_path, "INSTANTIATE"], + out = subprocess.run(["python3", script_path], input=pickle.dumps(script_dict), capture_output=True) @@ -118,6 +255,20 @@ class Openstack(object): return fields + def _get_checked_reses(self, nodes, reses): + names = list(nodes.keys()) + + def _check_res_in_vnfd(res): + if res['resource_name'] in names: + return True + else: + # should not occur. just check for consistency. + LOG.debug("%s not in VNFD definition.", res['resource_name']) + return False + + return {res['physical_resource_id']: res + for res in reses if _check_res_in_vnfd(res)} + def _address_range_data_to_info(self, range_data): obj = objects.ipOverEthernetAddressInfoV2_IpAddresses_AddressRange() obj.minAddress = range_data.minAddress @@ -157,122 +308,10 @@ class Openstack(object): return proto_info - def make_instantiated_vnf_info(self, req, inst, grant, vnfd, heat_reses): - flavour_id = req.flavourId - vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo) - inst_vnf_info = objects.VnfInstanceV2_InstantiatedVnfInfo( - flavourId=flavour_id, - vnfState='STARTED', - ) - - # make virtualStorageResourceInfo - storages = vnfd.get_storage_nodes(flavour_id) - reses = heat_utils.get_storage_reses(heat_reses) - storage_infos = [] - storage_info_to_heat_res = {} - - for res in reses: - storage_name = res['resource_name'] - if storage_name not in list(storages.keys()): - # should not occur. just check for consistency. - LOG.debug("%s not in VNFD storage definition.", storage_name) - continue - storage_info = objects.VirtualStorageResourceInfoV2( - id=uuidutils.generate_uuid(), - virtualStorageDescId=storage_name, - storageResource=objects.ResourceHandle( - resourceId=res['physical_resource_id'], - vimLevelResourceType=res['resource_type'], - vimConnectionId=vim_info.vimId, - ) - ) - storage_infos.append(storage_info) - storage_info_to_heat_res[storage_info.id] = res - - if storage_infos: - inst_vnf_info.virtualStorageResourceInfo = storage_infos - - # make vnfcResourceInfo - vdus = vnfd.get_vdu_nodes(flavour_id) - reses = heat_utils.get_server_reses(heat_reses) - vnfc_res_infos = [] - vnfc_res_info_to_heat_res = {} - - for res in reses: - vdu_name = res['resource_name'] - if vdu_name not in list(vdus.keys()): - # should not occur. just check for consistency. - LOG.debug("%s not in VNFD VDU definition.", vdu_name) - continue - vnfc_res_info = objects.VnfcResourceInfoV2( - id=uuidutils.generate_uuid(), - vduId=vdu_name, - computeResource=objects.ResourceHandle( - resourceId=res['physical_resource_id'], - vimLevelResourceType=res['resource_type'], - vimConnectionId=vim_info.vimId, - ), - ) - vdu_cps = vnfd.get_vdu_cps(flavour_id, vdu_name) - cp_infos = [] - for cp in vdu_cps: - cp_info = objects.VnfcResourceInfoV2_VnfcCpInfo( - id=uuidutils.generate_uuid(), - cpdId=cp, - # vnfExtCpId or vnfLinkPortId may set later - ) - cp_infos.append(cp_info) - if cp_infos: - vnfc_res_info.vnfcCpInfo = cp_infos - - # find storages used by this - storage_ids = [] - for storage_id, storage_res in storage_info_to_heat_res.items(): - if (vdu_name in storage_res.get('required_by', []) and - res.get('parent_resource') == - storage_res.get('parent_resource')): - storage_ids.append(storage_id) - if storage_ids: - vnfc_res_info.storageResourceIds = storage_ids - - vnfc_res_infos.append(vnfc_res_info) - vnfc_res_info_to_heat_res[vnfc_res_info.id] = res - - if vnfc_res_infos: - inst_vnf_info.vnfcResourceInfo = vnfc_res_infos - - # make vnfVirtualLinkResourceInfo - vls = vnfd.get_virtual_link_nodes(flavour_id) - reses = heat_utils.get_network_reses(heat_reses) - vnf_vl_infos = [] - vnf_vl_info_to_heat_res = {} - - for res in reses: - vl_name = res['resource_name'] - if vl_name not in list(vls.keys()): - # should not occur. just check for consistency. - LOG.debug("%s not in VNFD VL definition.", vl_name) - continue - vnf_vl_info = objects.VnfVirtualLinkResourceInfoV2( - id=uuidutils.generate_uuid(), - vnfVirtualLinkDescId=vl_name, - networkResource=objects.ResourceHandle( - resourceId=res['physical_resource_id'], - vimLevelResourceType=res['resource_type'], - vimConnectionId=vim_info.vimId, - ), - # vnfLinkPorts set later - ) - vnf_vl_infos.append(vnf_vl_info) - vnf_vl_info_to_heat_res[vnf_vl_info.id] = res - - if vnf_vl_infos: - inst_vnf_info.vnfVirtualLinkResourceInfo = vnf_vl_infos - + def _make_ext_vl_info_from_req(self, req, grant, ext_cp_infos): # make extVirtualLinkInfo ext_vls = [] req_ext_vls = [] - ext_cp_infos = [] if grant.obj_attr_is_set('extVirtualLinks'): req_ext_vls = grant.extVirtualLinks elif req.obj_attr_is_set('extVirtualLinks'): @@ -282,7 +321,6 @@ class Openstack(object): ext_vl = objects.ExtVirtualLinkInfoV2( id=req_ext_vl.id, resourceHandle=objects.ResourceHandle( - id=uuidutils.generate_uuid(), resourceId=req_ext_vl.resourceId ), currentVnfExtCpData=req_ext_vl.extCps @@ -301,11 +339,11 @@ class Openstack(object): link_ports = [] for req_link_port in req_ext_vl.extLinkPorts: link_port = objects.ExtLinkPortInfoV2( - id=req_link_port.id, + id=_make_link_port_id(req_link_port.id), resourceHandle=req_link_port.resourceHandle, ) ext_cp_info = objects.VnfExtCpInfoV2( - id=uuidutils.generate_uuid(), + id=_make_cp_info_id(link_port.id), extLinkPortId=link_port.id # associatedVnfcCpId may set later ) @@ -315,7 +353,7 @@ class Openstack(object): found = False for key, cp_conf in ext_cp.cpConfig.items(): if (cp_conf.obj_attr_is_set('linkPortId') and - cp_conf.linkPortId == link_port.id): + cp_conf.linkPortId == req_link_port.id): ext_cp_info.cpdId = ext_cp.cpdId ext_cp_info.cpConfigId = key # NOTE: cpProtocolInfo can't be filled @@ -329,10 +367,38 @@ class Openstack(object): ext_vl.extLinkPorts = link_ports - if ext_vls: - inst_vnf_info.extVirtualLinkInfo = ext_vls - # ext_cp_infos set later + return ext_vls + def _make_ext_vl_info_from_inst(self, old_inst_vnf_info, ext_cp_infos): + # make extVirtualLinkInfo from old inst.extVirtualLinkInfo + ext_vls = [] + old_cp_infos = [] + + if old_inst_vnf_info.obj_attr_is_set('extVirtualLinkInfo'): + ext_vls = old_inst_vnf_info.extVirtualLinkInfo + if old_inst_vnf_info.obj_attr_is_set('extCpInfo'): + old_cp_infos = old_inst_vnf_info.extCpInfo + + for ext_vl in ext_vls: + if not ext_vl.obj_attr_is_set('extLinkPorts'): + continue + new_link_ports = [] + for link_port in ext_vl.extLinkPorts: + if not _is_link_port(link_port.id): + # port created by heat. re-create later + continue + + new_link_ports.append(link_port) + for ext_cp in old_cp_infos: + if ext_cp.id == link_port.cpInstanceId: + ext_cp_infos.append(ext_cp) + break + + ext_vl.extLinkPorts = new_link_ports + + return ext_vls + + def _make_ext_mgd_vl_info_from_req(self, vnfd, flavour_id, req, grant): # make extManagedVirtualLinkInfo ext_mgd_vls = [] req_mgd_vls = [] @@ -341,14 +407,20 @@ class Openstack(object): elif req.obj_attr_is_set('extManagedVirtualLinks'): req_mgd_vls = req.extManagedVirtualLinks + vls = vnfd.get_virtual_link_nodes(flavour_id) for req_mgd_vl in req_mgd_vls: + vl_name = req_mgd_vl.vnfVirtualLinkDescId + if vl_name not in list(vls.keys()): + # should not occur. just check for consistency. + LOG.debug("%s not in VNFD VL definition.", vl_name) + continue ext_mgd_vl = objects.ExtManagedVirtualLinkInfoV2( id=req_mgd_vl.id, - vnfVirtualLinkDescId=req_mgd_vl.vnfVirtualLinkDescId, + vnfVirtualLinkDescId=vl_name, networkResource=objects.ResourceHandle( id=uuidutils.generate_uuid(), resourceId=req_mgd_vl.resourceId - ), + ) ) if req_mgd_vl.obj_attr_is_set('vimConnectionId'): ext_mgd_vl.networkResource.vimConnectionId = ( @@ -359,142 +431,266 @@ class Openstack(object): ext_mgd_vls.append(ext_mgd_vl) - if not req_mgd_vl.obj_attr_is_set('vnfLinkPort'): - continue - link_ports = [] - for req_link_port in req_mgd_vl.vnfLinkPort: - link_port = objects.VnfLinkPortInfoV2( - id=req_link_port.vnfLinkPortId, - resourceHandle=req_link_port.resourceHandle, - cpInstanceType='EXT_CP', # may be changed later - # cpInstanceId may set later - ) - link_ports.append(link_port) - ext_mgd_vl.vnfLinkPort = link_ports - - if ext_mgd_vls: - inst_vnf_info.extManagedVirtualLinkInfo = ext_mgd_vls - - # make CP related infos - vdu_cps = vnfd.get_vducp_nodes(flavour_id) - reses = heat_utils.get_port_reses(heat_reses) - - for res in reses: - cp_name = res['resource_name'] - if cp_name not in list(vdu_cps.keys()): - # should not occur. just check for consistency. - LOG.debug("%s not in VNFD CP definition.", cp_name) - continue - vl_name = vnfd.get_vl_name_from_cp(flavour_id, vdu_cps[cp_name]) - is_external = False - if vl_name is None: # extVirtualLink - is_external = True - - # NOTE: object is diffrent from other vl types - vnf_link_port = objects.ExtLinkPortInfoV2( - id=uuidutils.generate_uuid(), - resourceHandle=objects.ResourceHandle( - resourceId=res['physical_resource_id'], - vimLevelResourceType=res['resource_type'], - vimConnectionId=vim_info.vimId, + if req_mgd_vl.obj_attr_is_set('vnfLinkPort'): + ext_mgd_vl.vnfLinkPort = [ + objects.VnfLinkPortInfoV2( + id=_make_link_port_id(req_link_port.vnfLinkPortId), + resourceHandle=req_link_port.resourceHandle, + cpInstanceType='EXT_CP', # may be changed later + # cpInstanceId may set later ) - ) - ext_cp_info = objects.VnfExtCpInfoV2( - id=uuidutils.generate_uuid(), - extLinkPortId=vnf_link_port.id, - cpdId=cp_name - # associatedVnfcCpId may set later - ) - vnf_link_port.cpInstanceId = ext_cp_info.id + for req_link_port in req_mgd_vl.vnfLinkPort + ] - found = False - for ext_vl in ext_vls: - for ext_cp in ext_vl.currentVnfExtCpData: - if ext_cp.cpdId == cp_name: - found = True - break - if found: + return ext_mgd_vls + + def _make_ext_mgd_vl_info_from_inst(self, old_inst_vnf_info): + # make extManagedVirtualLinkInfo + ext_mgd_vls = [] + + if old_inst_vnf_info.obj_attr_is_set('extManagedVirtualLinkInfo'): + ext_mgd_vls = old_inst_vnf_info.extManagedVirtualLinkInfo + + for ext_mgd_vl in ext_mgd_vls: + if ext_mgd_vl.obj_attr_is_set('vnfLinkPorts'): + ext_mgd_vl.vnfLinkPorts = [link_port + for link_port in ext_mgd_vl.vnfLinkPorts + if _is_link_port(link_port.id)] + + return ext_mgd_vls + + def _find_ext_vl_by_cp_name(self, cp_name, ext_vl_infos): + for ext_vl_info in ext_vl_infos: + for ext_cp_data in ext_vl_info.currentVnfExtCpData: + if ext_cp_data.cpdId == cp_name: + return ext_vl_info, ext_cp_data + + return None, None + + def _link_ext_port_info(self, ext_port_infos, ext_vl_infos, ext_cp_infos, + port_reses): + for ext_port_info in ext_port_infos: + res = port_reses[ext_port_info.id] + cp_name = res['resource_name'] + ext_cp_info = objects.VnfExtCpInfoV2( + id=_make_cp_info_id(ext_port_info.id), + extLinkPortId=ext_port_info.id, + cpdId=cp_name + # associatedVnfcCpId may set later + ) + ext_port_info.cpInstanceId = ext_cp_info.id + + ext_vl_info, ext_cp_data = self._find_ext_vl_by_cp_name( + cp_name, ext_vl_infos) + + if ext_vl_info: + if ext_vl_info.obj_attr_is_set('extLinkPorts'): + ext_vl_info.extLinkPorts.append(ext_port_info) + else: + ext_vl_info.extLinkPorts = [ext_port_info] + + for key, cp_conf in ext_cp_data.cpConfig.items(): + # NOTE: it is assumed that there is one item + # (with cpProtocolData) of cpConfig at the moment. + if cp_conf.obj_attr_is_set('cpProtocolData'): + proto_infos = [] + for proto_data in cp_conf.cpProtocolData: + proto_info = self._proto_data_to_info( + proto_data) + proto_infos.append(proto_info) + ext_cp_info.cpProtocolInfo = proto_infos + ext_cp_info.cpConfigId = key break - if found: - if ext_vl.obj_attr_is_set('extLinkPorts'): - ext_vl.extLinkPorts.append(vnf_link_port) - else: - ext_vl.extLinkPorts = [vnf_link_port] + ext_cp_infos.append(ext_cp_info) - for key, cp_conf in ext_cp.cpConfig.items(): - # NOTE: it is assumed that there is one item - # (with cpProtocolData) of cpConfig at the moment. - if cp_conf.obj_attr_is_set('cpProtocolData'): - proto_infos = [] - for proto_data in cp_conf.cpProtocolData: - proto_info = self._proto_data_to_info( - proto_data) - proto_infos.append(proto_info) - ext_cp_info.cpProtocolInfo = proto_infos - ext_cp_info.cpConfigId = key - break - - ext_cp_infos.append(ext_cp_info) - else: - # Internal VL or extManagedVirtualLink - vnf_link_port = objects.VnfLinkPortInfoV2( - id=uuidutils.generate_uuid(), - resourceHandle=objects.ResourceHandle( - resourceId=res['physical_resource_id'], - vimLevelResourceType=res['resource_type'], - vimConnectionId=vim_info.vimId, - cpInstanceType='EXT_CP' # may be changed later - ) - ) - - is_internal = False - for vnf_vl_info in vnf_vl_infos: - if vnf_vl_info.vnfVirtualLinkDescId == vl_name: - # Internal VL - is_internal = True - if vnf_vl_info.obj_attr_is_set('vnfLinkPorts'): - vnf_vl_info.vnfLinkPorts.append(vnf_link_port) - else: - vnf_vl_info.vnfLinkPorts = [vnf_link_port] - - if not is_internal: - # extManagedVirtualLink - for ext_mgd_vl in ext_mgd_vls: - # should be found - if ext_mgd_vl.vnfVirtualLinkDescId == vl_name: - if ext_mgd_vl.obj_attr_is_set('vnfLinkPorts'): - ext_mgd_vl.vnfLinkPorts.append(vnf_link_port) - else: - ext_mgd_vl.vnfLinkPorts = [vnf_link_port] - - # link to vnfcResourceInfo.vnfcCpInfo - for vnfc_res_info in vnfc_res_infos: - if not vnfc_res_info.obj_attr_is_set('vnfcCpInfo'): - continue - vnfc_res = vnfc_res_info_to_heat_res[vnfc_res_info.id] - vdu_name = vnfc_res_info.vduId - if not (vdu_name in res.get('required_by', []) and - res.get('parent_resource') == - vnfc_res.get('parent_resource')): - continue + def _find_vnfc_cp_info(self, port_res, vnfc_res_infos, server_reses): + for vnfc_res_info in vnfc_res_infos: + if not vnfc_res_info.obj_attr_is_set('vnfcCpInfo'): + continue + vnfc_res = server_reses[vnfc_res_info.id] + vdu_name = vnfc_res_info.vduId + cp_name = port_res['resource_name'] + if (vdu_name in port_res.get('required_by', []) and + port_res.get('parent_resource') == + vnfc_res.get('parent_resource')): for vnfc_cp in vnfc_res_info.vnfcCpInfo: - if vnfc_cp.cpdId != cp_name: - continue - if is_external: - vnfc_cp.vnfExtCpId = vnf_link_port.cpInstanceId - for ext_cp_info in ext_cp_infos: - if ext_cp_info.extLinkPortId == vnf_link_port.id: - ext_cp_info.associatedVnfcCpId = vnfc_cp.id - break - else: - vnf_link_port.cpInstanceType = 'VNFC_CP' - vnf_link_port.cpInstanceId = vnfc_cp.id - vnfc_cp.vnfLinkPortId = vnf_link_port.id - break + if vnfc_cp.cpdId == cp_name: + return vnfc_cp - if ext_cp_infos: - inst_vnf_info.extCpInfo = ext_cp_infos + def _link_vnfc_cp_info(self, vnfc_res_infos, ext_port_infos, + vnf_port_infos, ext_cp_infos, server_reses, port_reses): + + for ext_port_info in ext_port_infos: + port_res = port_reses[ext_port_info.id] + vnfc_cp = self._find_vnfc_cp_info(port_res, vnfc_res_infos, + server_reses) + if vnfc_cp: + # should be found + vnfc_cp.vnfExtCpId = ext_port_info.cpInstanceId + for ext_cp_info in ext_cp_infos: + if ext_cp_info.extLinkPortId == ext_port_info.id: + ext_cp_info.associatedVnfcCpId = vnfc_cp.id + break + + for vnf_port_info in vnf_port_infos: + port_res = port_reses[vnf_port_info.id] + vnfc_cp = self._find_vnfc_cp_info(port_res, vnfc_res_infos, + server_reses) + if vnfc_cp: + # should be found + vnf_port_info.cpInstanceType = 'VNFC_CP' + vnf_port_info.cpInstanceId = vnfc_cp.id + vnfc_cp.vnfLinkPortId = vnf_port_info.id + + def _make_vnfc_metadata(self, server_res, heat_reses): + metadata = { + 'creation_time': server_res['creation_time'], + } + parent_res = heat_utils.get_parent_resource(server_res, heat_reses) + if parent_res: + metadata['parent_stack_id'] = ( + heat_utils.get_resource_stack_id(parent_res)) + metadata['parent_resource_name'] = parent_res['resource_name'] + + return metadata + + def _make_instantiated_vnf_info(self, req, inst, grant_req, grant, vnfd, + heat_reses): + init = False + if grant_req.operation == v2fields.LcmOperationType.INSTANTIATE: + init = True + flavour_id = req.flavourId + else: + flavour_id = inst.instantiatedVnfInfo.flavourId + vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo) + vducp_nodes = vnfd.get_vducp_nodes(flavour_id) + + storage_reses = self._get_checked_reses( + vnfd.get_storage_nodes(flavour_id), + heat_utils.get_storage_reses(heat_reses)) + server_reses = self._get_checked_reses(vnfd.get_vdu_nodes(flavour_id), + heat_utils.get_server_reses(heat_reses)) + network_reses = self._get_checked_reses( + vnfd.get_virtual_link_nodes(flavour_id), + heat_utils.get_network_reses(heat_reses)) + port_reses = self._get_checked_reses(vducp_nodes, + heat_utils.get_port_reses(heat_reses)) + + def _res_to_handle(res): + return objects.ResourceHandle( + resourceId=res['physical_resource_id'], + vimLevelResourceType=res['resource_type'], + vimConnectionId=vim_info.vimId) + + storage_infos = [ + objects.VirtualStorageResourceInfoV2( + id=res_id, + virtualStorageDescId=res['resource_name'], + storageResource=_res_to_handle(res) + ) + for res_id, res in storage_reses.items() + ] + + vnfc_res_infos = [ + objects.VnfcResourceInfoV2( + id=res_id, + vduId=res['resource_name'], + computeResource=_res_to_handle(res), + metadata=self._make_vnfc_metadata(res, heat_reses) + ) + for res_id, res in server_reses.items() + ] + + for vnfc_res_info in vnfc_res_infos: + vdu_name = vnfc_res_info.vduId + server_res = server_reses[vnfc_res_info.id] + storage_ids = [storage_id + for storage_id, storage_res in storage_reses.items() + if (vdu_name in storage_res.get('required_by', []) and + server_res.get('parent_resource') == + storage_res.get('parent_resource')) + ] + if storage_ids: + vnfc_res_info.storageResourceIds = storage_ids + + vdu_cps = vnfd.get_vdu_cps(flavour_id, vdu_name) + cp_infos = [ + objects.VnfcResourceInfoV2_VnfcCpInfo( + id=_make_combination_id(cp, vnfc_res_info.id), + cpdId=cp, + # vnfExtCpId or vnfLinkPortId may set later + ) + for cp in vdu_cps + ] + if cp_infos: + vnfc_res_info.vnfcCpInfo = cp_infos + + vnf_vl_res_infos = [ + objects.VnfVirtualLinkResourceInfoV2( + id=res_id, + vnfVirtualLinkDescId=res['resource_name'], + networkResource=_res_to_handle(res) + ) + for res_id, res in network_reses.items() + ] + + ext_cp_infos = [] + if init: + ext_vl_infos = self._make_ext_vl_info_from_req( + req, grant, ext_cp_infos) + ext_mgd_vl_infos = self._make_ext_mgd_vl_info_from_req(vnfd, + flavour_id, req, grant) + else: + old_inst_vnf_info = inst.instantiatedVnfInfo + ext_vl_infos = self._make_ext_vl_info_from_inst( + old_inst_vnf_info, ext_cp_infos) + ext_mgd_vl_infos = self._make_ext_mgd_vl_info_from_inst( + old_inst_vnf_info) + + def _find_vl_name(port_res): + cp_name = port_res['resource_name'] + return vnfd.get_vl_name_from_cp(flavour_id, vducp_nodes[cp_name]) + + ext_port_infos = [ + objects.ExtLinkPortInfoV2( + id=res_id, + resourceHandle=_res_to_handle(res) + ) + for res_id, res in port_reses.items() + if _find_vl_name(res) is None + ] + + self._link_ext_port_info(ext_port_infos, ext_vl_infos, ext_cp_infos, + port_reses) + + vnf_port_infos = [ + objects.VnfLinkPortInfoV2( + id=res_id, + resourceHandle=_res_to_handle(res), + cpInstanceType='EXT_CP' # may be changed later + ) + for res_id, res in port_reses.items() + if _find_vl_name(res) is not None + ] + + vl_name_to_info = {info.vnfVirtualLinkDescId: info + for info in vnf_vl_res_infos + ext_mgd_vl_infos} + + for vnf_port_info in vnf_port_infos: + port_res = port_reses[vnf_port_info.id] + vl_info = vl_name_to_info.get(_find_vl_name(port_res)) + if vl_info is None: + # should not occur. just check for consistency. + continue + + if vl_info.obj_attr_is_set('vnfLinkPorts'): + vl_info.vnfLinkPorts.append(vnf_port_info) + else: + vl_info.vnfLinkPorts = [vnf_port_info] + + self._link_vnfc_cp_info(vnfc_res_infos, ext_port_infos, + vnf_port_infos, ext_cp_infos, server_reses, port_reses) # NOTE: The followings are not handled at the moment. # - handle tosca.nodes.nfv.VnfExtCp type @@ -505,40 +701,52 @@ class Openstack(object): # because the association of compute resource and port resource # is not identified. + # make new instatiatedVnfInfo and replace + inst_vnf_info = objects.VnfInstanceV2_InstantiatedVnfInfo( + flavourId=flavour_id, + vnfState='STARTED', + ) + if storage_infos: + inst_vnf_info.virtualStorageResourceInfo = storage_infos + + if vnfc_res_infos: + # NOTE: scale-in specification of tacker SOL003 v2 API is that + # newer VDU is selected for reduction. It is necessary to sort + # vnfc_res_infos at this point so that the conductor should + # choose VDUs from a head sequentially when making scale-in + # grant request. + + def _get_key(vnfc): + return parser.isoparse(vnfc.metadata['creation_time']) + + sorted_vnfc_res_infos = sorted(vnfc_res_infos, key=_get_key, + reverse=True) + inst_vnf_info.vnfcResourceInfo = sorted_vnfc_res_infos + + if vnf_vl_res_infos: + inst_vnf_info.vnfVirtualLinkResourceInfo = vnf_vl_res_infos + + if ext_vl_infos: + inst_vnf_info.extVirtualLinkInfo = ext_vl_infos + + if ext_mgd_vl_infos: + inst_vnf_info.extManagedVirtualLinkInfo = ext_mgd_vl_infos + + if ext_cp_infos: + inst_vnf_info.extCpInfo = ext_cp_infos + # make vnfcInfo # NOTE: vnfcInfo only exists in SOL002 - vnfc_infos = [] - for vnfc_res_info in vnfc_res_infos: - vnfc_info = objects.VnfcInfoV2( - id=uuidutils.generate_uuid(), - vduId=vnfc_res_info.vduId, - vnfcResourceInfoId=vnfc_res_info.id, - vnfcState='STARTED' - ) - vnfc_infos.append(vnfc_info) - - if vnfc_infos: - inst_vnf_info.vnfcInfo = vnfc_infos + if vnfc_res_infos: + inst_vnf_info.vnfcInfo = [ + objects.VnfcInfoV2( + id=_make_combination_id(vnfc_res_info.vduId, + vnfc_res_info.id), + vduId=vnfc_res_info.vduId, + vnfcResourceInfoId=vnfc_res_info.id, + vnfcState='STARTED' + ) + for vnfc_res_info in sorted_vnfc_res_infos + ] inst.instantiatedVnfInfo = inst_vnf_info - - def instantiate_rollback(self, req, inst, grant_req, grant, vnfd): - vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo) - heat_client = heat_utils.HeatClient(vim_info) - stack_name = heat_utils.get_stack_name(inst) - status, _ = heat_client.get_status(stack_name) - if status is not None: - heat_client.delete_stack(stack_name) - - def terminate(self, req, inst, grant_req, grant, vnfd): - if req.terminationType == 'GRACEFUL': - timeout = CONF.v2_vnfm.default_graceful_termination_timeout - if req.obj_attr_is_set('gracefulTerminationTimeout'): - timeout = req.gracefulTerminationTimeout - eventlet.sleep(timeout) - - # delete stack - vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo) - heat_client = heat_utils.HeatClient(vim_info) - stack_name = heat_utils.get_stack_name(inst) - heat_client.delete_stack(stack_name) diff --git a/tacker/sol_refactored/infra_drivers/openstack/userdata_default.py b/tacker/sol_refactored/infra_drivers/openstack/userdata_default.py index 6322aae5d..f86e9dd85 100644 --- a/tacker/sol_refactored/infra_drivers/openstack/userdata_default.py +++ b/tacker/sol_refactored/infra_drivers/openstack/userdata_default.py @@ -36,14 +36,18 @@ class DefaultUserData(userdata_utils.AbstractUserData): if 'computeFlavourId' in vdu_value: vdu_value['computeFlavourId'] = ( userdata_utils.get_param_flavor( - vdu_name, req, vnfd, grant)) + vdu_name, flavour_id, vnfd, grant)) if 'vcImageId' in vdu_value: vdu_value['vcImageId'] = userdata_utils.get_param_image( - vdu_name, req, vnfd, grant) + vdu_name, flavour_id, vnfd, grant) if 'locationConstraints' in vdu_value: vdu_value['locationConstraints'] = ( userdata_utils.get_param_zone( vdu_name, grant_req, grant)) + if 'desired_capacity' in vdu_value: + vdu_value['desired_capacity'] = ( + userdata_utils.get_param_capacity( + vdu_name, inst, grant_req)) cps = nfv_dict.get('CP', {}) for cp_name, cp_value in cps.items(): @@ -84,3 +88,59 @@ class DefaultUserData(userdata_utils.AbstractUserData): fields['files'][key] = yaml.safe_dump(value) return fields + + @staticmethod + def scale(req, inst, grant_req, grant, tmp_csar_dir): + # scale is interested in 'desired_capacity' only. + # This method returns only 'desired_capacity' part in the + # 'nfv' dict. It is applied to json merge patch against + # the existing 'nfv' dict by the caller. + # NOTE: complete 'nfv' dict can not be made at the moment + # since InstantiateVnfRequest is necessary to make it. + + vnfd = userdata_utils.get_vnfd(inst['vnfdId'], tmp_csar_dir) + flavour_id = inst['instantiatedVnfInfo']['flavourId'] + + hot_dict = vnfd.get_base_hot(flavour_id) + top_hot = hot_dict['template'] + + nfv_dict = userdata_utils.init_nfv_dict(top_hot) + + vdus = nfv_dict.get('VDU', {}) + new_vdus = {} + for vdu_name, vdu_value in vdus.items(): + if 'desired_capacity' in vdu_value: + capacity = userdata_utils.get_param_capacity( + vdu_name, inst, grant_req) + new_vdus[vdu_name] = {'desired_capacity': capacity} + + fields = {'parameters': {'nfv': {'VDU': new_vdus}}} + + return fields + + @staticmethod + def scale_rollback(req, inst, grant_req, grant, tmp_csar_dir): + # NOTE: This method is not called by a userdata script but + # is called by the openstack infra_driver directly now. + # It is thought that it is suitable that this method defines + # here since it is very likely to scale method above. + + vnfd = userdata_utils.get_vnfd(inst['vnfdId'], tmp_csar_dir) + flavour_id = inst['instantiatedVnfInfo']['flavourId'] + + hot_dict = vnfd.get_base_hot(flavour_id) + top_hot = hot_dict['template'] + + nfv_dict = userdata_utils.init_nfv_dict(top_hot) + + vdus = nfv_dict.get('VDU', {}) + new_vdus = {} + for vdu_name, vdu_value in vdus.items(): + if 'desired_capacity' in vdu_value: + capacity = userdata_utils.get_current_capacity( + vdu_name, inst) + new_vdus[vdu_name] = {'desired_capacity': capacity} + + fields = {'parameters': {'nfv': {'VDU': new_vdus}}} + + return fields diff --git a/tacker/sol_refactored/infra_drivers/openstack/userdata_main.py b/tacker/sol_refactored/infra_drivers/openstack/userdata_main.py index d9c0fe6a2..76acb4317 100644 --- a/tacker/sol_refactored/infra_drivers/openstack/userdata_main.py +++ b/tacker/sol_refactored/infra_drivers/openstack/userdata_main.py @@ -20,7 +20,7 @@ import sys import traceback -def main(operation): +def main(): script_dict = pickle.load(sys.stdin.buffer) req = script_dict['request'] @@ -39,11 +39,8 @@ def main(operation): module = importlib.import_module(class_module) klass = getattr(module, userdata_class) - if operation == 'INSTANTIATE': - stack_dict = klass.instantiate( - req, inst, grant_req, grant, tmp_csar_dir) - else: - raise Exception("Unknown operation") + method = getattr(klass, grant_req['operation'].lower()) + stack_dict = method(req, inst, grant_req, grant, tmp_csar_dir) pickle.dump(stack_dict, sys.stdout.buffer) sys.stdout.flush() @@ -51,7 +48,7 @@ def main(operation): if __name__ == "__main__": try: - main(sys.argv[1]) + main() os._exit(0) except Exception: sys.stderr.write(traceback.format_exc()) diff --git a/tacker/sol_refactored/infra_drivers/openstack/userdata_utils.py b/tacker/sol_refactored/infra_drivers/openstack/userdata_utils.py index 26536b7f0..98b44fccb 100644 --- a/tacker/sol_refactored/infra_drivers/openstack/userdata_utils.py +++ b/tacker/sol_refactored/infra_drivers/openstack/userdata_utils.py @@ -20,23 +20,29 @@ from tacker.sol_refactored.common import vnfd_utils class AbstractUserData(metaclass=abc.ABCMeta): + """Definition of each method + + Args: + req: Request dict for each API + (ex. InstantiateVnfRequest for instantiate) + inst: VnfInstance dict + grant_req: GrantRequest dict + grant: Grant dict + tmp_csar_dir: directory path that csar contents are extracted + + Returns: + dict of parameters for create/update heat stack. + see the example of userdata_default.py. + """ @staticmethod @abc.abstractmethod def instantiate(req, inst, grant_req, grant, tmp_csar_dir): - """Definition of instantiate method + raise sol_ex.UserDataClassNotImplemented() - Args: - req: InstantiateVnfRequest dict - inst: VnfInstance dict - grant_req: GrantRequest dict - grant: Grant dict - tmp_csar_dir: directory path that csar contents are extracted - - Returns: - dict of parameters for create heat stack. - see the example of userdata_default.py. - """ + @staticmethod + @abc.abstractmethod + def scale(req, inst, grant_req, grant, tmp_csar_dir): raise sol_ex.UserDataClassNotImplemented() @@ -83,7 +89,7 @@ def init_nfv_dict(hot_template): return nfv -def get_param_flavor(vdu_name, req, vnfd, grant): +def get_param_flavor(vdu_name, flavour_id, vnfd, grant): # try to get from grant if 'vimAssets' in grant: assets = grant['vimAssets'] @@ -96,10 +102,10 @@ def get_param_flavor(vdu_name, req, vnfd, grant): # if specified in VNFD, use it # NOTE: if not found. parameter is set to None. # may be error when stack create - return vnfd.get_compute_flavor(req['flavourId'], vdu_name) + return vnfd.get_compute_flavor(flavour_id, vdu_name) -def get_param_image(vdu_name, req, vnfd, grant): +def get_param_image(vdu_name, flavour_id, vnfd, grant): # try to get from grant if 'vimAssets' in grant: assets = grant['vimAssets'] @@ -112,7 +118,7 @@ def get_param_image(vdu_name, req, vnfd, grant): # if specified in VNFD, use it # NOTE: if not found. parameter is set to None. # may be error when stack create - sw_images = vnfd.get_sw_image(req['flavourId']) + sw_images = vnfd.get_sw_image(flavour_id) for name, image in sw_images.items(): if name == vdu_name: return image @@ -133,6 +139,37 @@ def get_param_zone(vdu_name, grant_req, grant): return zone['zoneId'] +def get_current_capacity(vdu_name, inst): + count = 0 + inst_vnfcs = (inst.get('instantiatedVnfInfo', {}) + .get('vnfcResourceInfo', [])) + for inst_vnfc in inst_vnfcs: + if inst_vnfc['vduId'] == vdu_name: + count += 1 + + return count + + +def get_param_capacity(vdu_name, inst, grant_req): + # NOTE: refer grant_req here since interpretation of VNFD was done when + # making grant_req. + count = get_current_capacity(vdu_name, inst) + + add_reses = grant_req.get('addResources', []) + for res_def in add_reses: + if (res_def['type'] == 'COMPUTE' and + res_def['resourceTemplateId'] == vdu_name): + count += 1 + + rm_reses = grant_req.get('removeResources', []) + for res_def in rm_reses: + if (res_def['type'] == 'COMPUTE' and + res_def['resourceTemplateId'] == vdu_name): + count -= 1 + + return count + + def _get_fixed_ips_from_extcp(extcp): fixed_ips = [] for cp_conf in extcp['cpConfig'].values(): diff --git a/tacker/sol_refactored/test-tools/cli.py b/tacker/sol_refactored/test-tools/cli.py index c6ed952f3..7868bd840 100644 --- a/tacker/sol_refactored/test-tools/cli.py +++ b/tacker/sol_refactored/test-tools/cli.py @@ -89,6 +89,12 @@ class Client(object): path, "POST", body=req_body, version="2.0.0") self.print(resp, body) + def scale(self, id, req_body): + path = self.path + '/' + id + '/scale' + resp, body = self.client.do_request( + path, "POST", body=req_body, version="2.0.0") + self.print(resp, body) + def retry(self, id): path = self.path + '/' + id + '/retry' resp, body = self.client.do_request(path, "POST", version="2.0.0") @@ -97,6 +103,7 @@ class Client(object): def rollback(self, id): path = self.path + '/' + id + '/rollback' resp, body = self.client.do_request(path, "POST", version="2.0.0") + self.print(resp, body) def fail(self, id): path = self.path + '/' + id + '/fail' @@ -112,6 +119,7 @@ def usage(): print(" inst delete {id}") print(" inst inst {id} body(path of content)") print(" inst term {id} body(path of content)") + print(" inst scale {id} body(path of content)") print(" subsc create body(path of content)") print(" subsc list [body(path of content)]") print(" subsc show {id}") @@ -137,7 +145,8 @@ if __name__ == '__main__': action = sys.argv[2] if resource == "inst": - if action not in ["create", "list", "show", "delete", "inst", "term"]: + if action not in ["create", "list", "show", "delete", "inst", "term", + "scale"]: usage() client = Client("/vnflcm/v2/vnf_instances") elif resource == "subsc": @@ -179,6 +188,10 @@ if __name__ == '__main__': if len(sys.argv) != 5: usage() client.term(sys.argv[3], get_body(sys.argv[4])) + elif action == "scale": + if len(sys.argv) != 5: + usage() + client.scale(sys.argv[3], get_body(sys.argv[4])) elif action == "retry": if len(sys.argv) != 4: usage() diff --git a/tacker/tests/functional/sol_v2/samples/sample1/contents/BaseHOT/simple/sample1.yaml b/tacker/tests/functional/sol_v2/samples/sample1/contents/BaseHOT/simple/sample1.yaml index 8d14dd503..1205bf028 100644 --- a/tacker/tests/functional/sol_v2/samples/sample1/contents/BaseHOT/simple/sample1.yaml +++ b/tacker/tests/functional/sol_v2/samples/sample1/contents/BaseHOT/simple/sample1.yaml @@ -11,7 +11,7 @@ resources: properties: min_size: 1 max_size: 3 - desired_capacity: 1 + desired_capacity: { get_param: [ nfv, VDU, VDU1, desired_capacity ] } resource: type: VDU1.yaml properties: @@ -26,20 +26,8 @@ resources: net4: { get_resource: internalVL2 } net5: { get_resource: internalVL3 } - VDU1_scale_out: - type: OS::Heat::ScalingPolicy - properties: - scaling_adjustment: 1 - auto_scaling_group_id: - get_resource: VDU1_scale_group - adjustment_type: change_in_capacity - VDU1_scale_in: - type: OS::Heat::ScalingPolicy - properties: - scaling_adjustment: -1 - auto_scaling_group_id: - get_resource: VDU1_scale_group - adjustment_type: change_in_capacity +# NOTE: Resource definition of OS::Heat::ScalingPolicy is omitted. +# It is not used by v2 scale implementation unlike v1. VDU2: type: OS::Nova::Server diff --git a/tacker/tests/functional/sol_v2/samples/sample1/contents/UserData/userdata.py b/tacker/tests/functional/sol_v2/samples/sample1/contents/UserData/userdata.py index 848e79bae..29283981a 100644 --- a/tacker/tests/functional/sol_v2/samples/sample1/contents/UserData/userdata.py +++ b/tacker/tests/functional/sol_v2/samples/sample1/contents/UserData/userdata.py @@ -60,14 +60,18 @@ class UserData(userdata_utils.AbstractUserData): if 'computeFlavourId' in vdu_value: vdu_value['computeFlavourId'] = ( userdata_utils.get_param_flavor( - vdu_name, req, vnfd, grant)) + vdu_name, flavour_id, vnfd, grant)) if 'vcImageId' in vdu_value: vdu_value['vcImageId'] = userdata_utils.get_param_image( - vdu_name, req, vnfd, grant) + vdu_name, flavour_id, vnfd, grant) if 'locationConstraints' in vdu_value: vdu_value['locationConstraints'] = ( userdata_utils.get_param_zone( vdu_name, grant_req, grant)) + if 'desired_capacity' in vdu_value: + vdu_value['desired_capacity'] = ( + userdata_utils.get_param_capacity( + vdu_name, inst, grant_req)) cps = nfv_dict.get('CP', {}) for cp_name, cp_value in cps.items(): diff --git a/tacker/tests/functional/sol_v2/samples/sample2/contents/BaseHOT/simple/sample2.yaml b/tacker/tests/functional/sol_v2/samples/sample2/contents/BaseHOT/simple/sample2.yaml index 6469a7bd3..cef290a67 100644 --- a/tacker/tests/functional/sol_v2/samples/sample2/contents/BaseHOT/simple/sample2.yaml +++ b/tacker/tests/functional/sol_v2/samples/sample2/contents/BaseHOT/simple/sample2.yaml @@ -11,7 +11,7 @@ resources: properties: min_size: 1 max_size: 3 - desired_capacity: 1 + desired_capacity: { get_param: [ nfv, VDU, VDU1, desired_capacity ] } resource: type: VDU1.yaml properties: @@ -19,20 +19,9 @@ resources: image: { get_param: [ nfv, VDU, VDU1, vcImageId ] } net5: { get_resource: internalVL3 } affinity: { get_resource: nfvi_node_affinity } - VDU1_scale_out: - type: OS::Heat::ScalingPolicy - properties: - scaling_adjustment: 1 - auto_scaling_group_id: - get_resource: VDU1_scale_group - adjustment_type: change_in_capacity - VDU1_scale_in: - type: OS::Heat::ScalingPolicy - properties: - scaling_adjustment: -1 - auto_scaling_group_id: - get_resource: VDU1_scale_group - adjustment_type: change_in_capacity + +# NOTE: Resource definition of OS::Heat::ScalingPolicy is omitted. +# It is not used by v2 scale implementation unlike v1. VDU2: type: OS::Nova::Server diff --git a/tacker/tests/unit/sol_refactored/common/test_lcm_op_occ_utils.py b/tacker/tests/unit/sol_refactored/common/test_lcm_op_occ_utils.py new file mode 100644 index 000000000..899ab3387 --- /dev/null +++ b/tacker/tests/unit/sol_refactored/common/test_lcm_op_occ_utils.py @@ -0,0 +1,1205 @@ +# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from tacker import context +from tacker.sol_refactored.common import lcm_op_occ_utils as lcmocc_utils +from tacker.sol_refactored import objects +from tacker.tests import base + + +# instantiatedVnfInfo examples +# NOTE: +# - some identifiers are modified to make check easy. +# - some attributes which are not related to test update_lcmocc are omitted. +_inst_info_example_1 = { + # "flavourId", "vnfState", "scaleStatus", "maxScaleLevels" are omitted + # "extCpInfo": omitted + "extVirtualLinkInfo": [ + { + "id": "bbf0932a-6142-4ea8-93cd-8059dba594a1", + "resourceHandle": { + "resourceId": "3529d333-dbcc-4d93-9b64-210647712569" + }, + "extLinkPorts": [ + { + "id": "res_id_VDU2_CP1", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU2_CP1", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "cp-res_id_VDU2_CP1" + }, + { + "id": "res_id_VDU1_CP1_1", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU1_CP1_1", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "cp-res_id_VDU1_CP1_1" + } + ], + # "currentVnfExtCpData": omitted + }, + { + "id": "790949df-c7b3-4926-a559-3895412f1dfe", + "resourceHandle": { + "resourceId": "367e5b3b-34dc-47f2-85b8-c39e3272893a" + }, + "extLinkPorts": [ + { + "id": "res_id_VDU2_CP2", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU2_CP2", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "cp-res_id_VDU2_CP2" + }, + { + "id": "res_id_VDU1_CP2_1", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU1_CP2_1", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "cp-res_id_VDU1_CP2_1" + } + ], + # "currentVnfExtCpData": omitted + } + ], + "extManagedVirtualLinkInfo": [ + { + "id": "res_id_internalVL1", + "vnfVirtualLinkDescId": "internalVL1", + "networkResource": { + "resourceId": "res_id_internalVL1" + }, + "vnfLinkPorts": [ + { + "id": "res_id_VDU2_CP3", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU2_CP3", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU2_CP3-res_id_VDU2", + "cpInstanceType": "VNFC_CP" + }, + { + "id": "res_id_VDU1_CP3_1", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU1_CP3_1", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU1_CP3-res_id_VDU1_1", + "cpInstanceType": "VNFC_CP" + } + ] + } + ], + "vnfcResourceInfo": [ + { + "id": "res_id_VDU1_1", + "vduId": "VDU1", + "computeResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU1_1", + "vimLevelResourceType": "OS::Nova::Server" + }, + "storageResourceIds": [ + "res_id_VirtualStorage_1" + ], + "vnfcCpInfo": [ + { + "id": "VDU1_CP1-res_id_VDU1_1", + "cpdId": "VDU1_CP1", + "vnfExtCpId": "cp-res_id_VDU1_CP1_1" + }, + { + "id": "VDU1_CP2-res_id_VDU1_1", + "cpdId": "VDU1_CP2", + "vnfExtCpId": "cp-res_id_VDU1_CP2_1" + }, + { + "id": "VDU1_CP3-res_id_VDU1_1", + "cpdId": "VDU1_CP3", + "vnfLinkPortId": "res_id_VDU1_CP3_1" + }, + { + "id": "VDU1_CP4-res_id_VDU1_1", + "cpdId": "VDU1_CP4", + "vnfLinkPortId": "res_id_VDU1_CP4_1" + }, + { + "id": "VDU1_CP5-res_id_VDU1_1", + "cpdId": "VDU1_CP5", + "vnfLinkPortId": "res_id_VDU1_CP5_1" + } + ], + # "metadata": omitted + }, + { + "id": "res_id_VDU2", + "vduId": "VDU2", + "computeResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU2", + "vimLevelResourceType": "OS::Nova::Server" + }, + "vnfcCpInfo": [ + { + "id": "VDU2_CP1-res_id_VDU2", + "cpdId": "VDU2_CP1", + "vnfExtCpId": "cp-res_id_VDU2_CP1" + }, + { + "id": "VDU2_CP2-res_id_VDU2", + "cpdId": "VDU2_CP2", + "vnfExtCpId": "cp-res_id_VDU2_CP2" + }, + { + "id": "VDU2_CP3-res_id_VDU2", + "cpdId": "VDU2_CP3", + "vnfLinkPortId": "res_id_VDU2_CP3" + }, + { + "id": "VDU2_CP4-res_id_VDU2", + "cpdId": "VDU2_CP4", + "vnfLinkPortId": "res_id_VDU2_CP4" + }, + { + "id": "VDU2_CP5-res_id_VDU2", + "cpdId": "VDU2_CP5", + "vnfLinkPortId": "res_id_VDU2_CP5" + } + ], + # "metadata": omitted + } + ], + "vnfVirtualLinkResourceInfo": [ + { + "id": "res_id_internalVL3", + "vnfVirtualLinkDescId": "internalVL3", + "networkResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_internalVL3", + "vimLevelResourceType": "OS::Neutron::Net" + }, + "vnfLinkPorts": [ + { + "id": "res_id_VDU2_CP5", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU2_CP5", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU2_CP5-res_id_VDU2", + "cpInstanceType": "VNFC_CP" + }, + { + "id": "res_id_VDU1_CP5_1", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU1_CP5_1", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU1_CP5-res_id_VDU1_1", + "cpInstanceType": "VNFC_CP" + } + ] + }, + { + "id": "res_id_internalVL2", + "vnfVirtualLinkDescId": "internalVL2", + "networkResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_internalVL2", + "vimLevelResourceType": "OS::Neutron::Net" + }, + "vnfLinkPorts": [ + { + "id": "res_id_VDU2_CP4", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU2_CP4", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU2_CP4-res_id_VDU2", + "cpInstanceType": "VNFC_CP" + }, + { + "id": "res_id_VDU1_CP4_1", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU1_CP4_1", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU1_CP4-res_id_VDU1_1", + "cpInstanceType": "VNFC_CP" + } + ] + } + ], + "virtualStorageResourceInfo": [ + { + "id": "res_id_VirtualStorage_1", + "virtualStorageDescId": "VirtualStorage", + "storageResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VirtualStorage_1", + "vimLevelResourceType": "OS::Cinder::Volume" + } + } + ], + # "vnfcInfo": omitted +} + +# example_2 is added a VDU1 to example_1. +_inst_info_example_2 = { + # "flavourId", "vnfState", "scaleStatus", "maxScaleLevels" are omitted + # "extCpInfo": omitted + "extVirtualLinkInfo": [ + { + "id": "bbf0932a-6142-4ea8-93cd-8059dba594a1", + "resourceHandle": { + "resourceId": "3529d333-dbcc-4d93-9b64-210647712569" + }, + "extLinkPorts": [ + { + "id": "res_id_VDU2_CP1", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU2_CP1", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "cp-res_id_VDU2_CP1" + }, + { + "id": "res_id_VDU1_CP1_1", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU1_CP1_1", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "cp-res_id_VDU1_CP1_1" + }, + { + "id": "res_id_VDU1_CP1_2", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU1_CP1_2", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "cp-res_id_VDU1_CP1_2" + } + ], + # "currentVnfExtCpData": omitted + }, + { + "id": "790949df-c7b3-4926-a559-3895412f1dfe", + "resourceHandle": { + "resourceId": "367e5b3b-34dc-47f2-85b8-c39e3272893a" + }, + "extLinkPorts": [ + { + "id": "res_id_VDU2_CP2", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU2_CP2", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "cp-res_id_VDU2_CP2" + }, + { + "id": "res_id_VDU1_CP2_1", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU1_CP2_1", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "cp-res_id_VDU1_CP2_1" + }, + { + "id": "res_id_VDU1_CP2_2", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU1_CP2_2", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "cp-res_id_VDU1_CP2_2" + } + ], + # "currentVnfExtCpData": omitted + } + ], + "extManagedVirtualLinkInfo": [ + { + "id": "res_id_internalVL1", + "vnfVirtualLinkDescId": "internalVL1", + "networkResource": { + "resourceId": "res_id_internalVL1" + }, + "vnfLinkPorts": [ + { + "id": "res_id_VDU2_CP3", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU2_CP3", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU2_CP3-res_id_VDU2", + "cpInstanceType": "VNFC_CP" + }, + { + "id": "res_id_VDU1_CP3_1", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU1_CP3_1", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU1_CP3-res_id_VDU1_1", + "cpInstanceType": "VNFC_CP" + }, + { + "id": "res_id_VDU1_CP3_2", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU1_CP3_2", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU1_CP3-res_id_VDU1_2", + "cpInstanceType": "VNFC_CP" + } + ] + } + ], + "vnfcResourceInfo": [ + { + "id": "res_id_VDU1_2", + "vduId": "VDU1", + "computeResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU1_2", + "vimLevelResourceType": "OS::Nova::Server" + }, + "storageResourceIds": [ + "res_id_VirtualStorage_2" + ], + "vnfcCpInfo": [ + { + "id": "VDU1_CP1-res_id_VDU1_2", + "cpdId": "VDU1_CP1", + "vnfExtCpId": "cp-res_id_VDU1_CP1_2" + }, + { + "id": "VDU1_CP2-res_id_VDU1_2", + "cpdId": "VDU1_CP2", + "vnfExtCpId": "cp-res_id_VDU1_CP2_2" + }, + { + "id": "VDU1_CP3-res_id_VDU1_2", + "cpdId": "VDU1_CP3", + "vnfLinkPortId": "res_id_VDU1_CP3_2" + }, + { + "id": "VDU1_CP4-res_id_VDU1_2", + "cpdId": "VDU1_CP4", + "vnfLinkPortId": "res_id_VDU1_CP4_2" + }, + { + "id": "VDU1_CP5-res_id_VDU1_2", + "cpdId": "VDU1_CP5", + "vnfLinkPortId": "res_id_VDU1_CP5_2" + } + ], + # "metadata": omitted + }, + { + "id": "res_id_VDU1_1", + "vduId": "VDU1", + "computeResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU1_1", + "vimLevelResourceType": "OS::Nova::Server" + }, + "storageResourceIds": [ + "res_id_VirtualStorage_1" + ], + "vnfcCpInfo": [ + { + "id": "VDU1_CP1-res_id_VDU1_1", + "cpdId": "VDU1_CP1", + "vnfExtCpId": "cp-res_id_VDU1_CP1_1" + }, + { + "id": "VDU1_CP2-res_id_VDU1_1", + "cpdId": "VDU1_CP2", + "vnfExtCpId": "cp-res_id_VDU1_CP2_1" + }, + { + "id": "VDU1_CP3-res_id_VDU1_1", + "cpdId": "VDU1_CP3", + "vnfLinkPortId": "res_id_VDU1_CP3_1" + }, + { + "id": "VDU1_CP4-res_id_VDU1_1", + "cpdId": "VDU1_CP4", + "vnfLinkPortId": "res_id_VDU1_CP4_1" + }, + { + "id": "VDU1_CP5-res_id_VDU1_1", + "cpdId": "VDU1_CP5", + "vnfLinkPortId": "res_id_VDU1_CP5_1" + } + ], + # "metadata": omitted + }, + { + "id": "res_id_VDU2", + "vduId": "VDU2", + "computeResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU2", + "vimLevelResourceType": "OS::Nova::Server" + }, + "vnfcCpInfo": [ + { + "id": "VDU2_CP1-res_id_VDU2", + "cpdId": "VDU2_CP1", + "vnfExtCpId": "cp-res_id_VDU2_CP1" + }, + { + "id": "VDU2_CP2-res_id_VDU2", + "cpdId": "VDU2_CP2", + "vnfExtCpId": "cp-res_id_VDU2_CP2" + }, + { + "id": "VDU2_CP3-res_id_VDU2", + "cpdId": "VDU2_CP3", + "vnfLinkPortId": "res_id_VDU2_CP3" + }, + { + "id": "VDU2_CP4-res_id_VDU2", + "cpdId": "VDU2_CP4", + "vnfLinkPortId": "res_id_VDU2_CP4" + }, + { + "id": "VDU2_CP5-res_id_VDU2", + "cpdId": "VDU2_CP5", + "vnfLinkPortId": "res_id_VDU2_CP5" + } + ], + # "metadata": omitted + } + ], + "vnfVirtualLinkResourceInfo": [ + { + "id": "res_id_internalVL3", + "vnfVirtualLinkDescId": "internalVL3", + "networkResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_internalVL3", + "vimLevelResourceType": "OS::Neutron::Net" + }, + "vnfLinkPorts": [ + { + "id": "res_id_VDU2_CP5", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU2_CP5", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU2_CP5-res_id_VDU2", + "cpInstanceType": "VNFC_CP" + }, + { + "id": "res_id_VDU1_CP5_1", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU1_CP5_1", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU1_CP5-res_id_VDU1_1", + "cpInstanceType": "VNFC_CP" + }, + { + "id": "res_id_VDU1_CP5_2", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU1_CP5_2", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU1_CP5-res_id_VDU1_2", + "cpInstanceType": "VNFC_CP" + } + ] + }, + { + "id": "res_id_internalVL2", + "vnfVirtualLinkDescId": "internalVL2", + "networkResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_internalVL2", + "vimLevelResourceType": "OS::Neutron::Net" + }, + "vnfLinkPorts": [ + { + "id": "res_id_VDU2_CP4", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU2_CP4", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU2_CP4-res_id_VDU2", + "cpInstanceType": "VNFC_CP" + }, + { + "id": "res_id_VDU1_CP4_1", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU1_CP4_1", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU1_CP4-res_id_VDU1_1", + "cpInstanceType": "VNFC_CP" + }, + { + "id": "res_id_VDU1_CP4_2", + "resourceHandle": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU1_CP4_2", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU1_CP4-res_id_VDU1_2", + "cpInstanceType": "VNFC_CP" + } + ] + } + ], + "virtualStorageResourceInfo": [ + { + "id": "res_id_VirtualStorage_1", + "virtualStorageDescId": "VirtualStorage", + "storageResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VirtualStorage_1", + "vimLevelResourceType": "OS::Cinder::Volume" + } + }, + { + "id": "res_id_VirtualStorage_2", + "virtualStorageDescId": "VirtualStorage", + "storageResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VirtualStorage_2", + "vimLevelResourceType": "OS::Cinder::Volume" + } + } + ], + # "vnfcInfo": omitted +} + +# expected results +_expected_resource_changes_instantiate = { + "affectedVnfcs": [ + { + "id": "res_id_VDU1_1", + "vduId": "VDU1", + "changeType": "ADDED", + "computeResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU1_1", + "vimLevelResourceType": "OS::Nova::Server" + }, + "affectedVnfcCpIds": [ + "VDU1_CP1-res_id_VDU1_1", + "VDU1_CP2-res_id_VDU1_1", + "VDU1_CP3-res_id_VDU1_1", + "VDU1_CP4-res_id_VDU1_1", + "VDU1_CP5-res_id_VDU1_1" + ], + "addedStorageResourceIds": [ + "res_id_VirtualStorage_1" + ] + }, + { + "id": "res_id_VDU2", + "vduId": "VDU2", + "changeType": "ADDED", + "computeResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU2", + "vimLevelResourceType": "OS::Nova::Server" + }, + "affectedVnfcCpIds": [ + "VDU2_CP1-res_id_VDU2", + "VDU2_CP2-res_id_VDU2", + "VDU2_CP3-res_id_VDU2", + "VDU2_CP4-res_id_VDU2", + "VDU2_CP5-res_id_VDU2" + ] + } + ], + "affectedVirtualLinks": [ + { + "id": "res_id_internalVL1", + "vnfVirtualLinkDescId": "internalVL1", + "changeType": "LINK_PORT_ADDED", + "networkResource": { + "resourceId": "res_id_internalVL1" + }, + "vnfLinkPortIds": [ + "res_id_VDU1_CP3_1", + "res_id_VDU2_CP3" + ] + }, + { + "id": "res_id_internalVL2", + "vnfVirtualLinkDescId": "internalVL2", + "changeType": "ADDED", + "networkResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_internalVL2", + "vimLevelResourceType": "OS::Neutron::Net" + }, + "vnfLinkPortIds": [ + "res_id_VDU1_CP4_1", + "res_id_VDU2_CP4" + ] + }, + { + "id": "res_id_internalVL3", + "vnfVirtualLinkDescId": "internalVL3", + "changeType": "ADDED", + "networkResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_internalVL3", + "vimLevelResourceType": "OS::Neutron::Net" + }, + "vnfLinkPortIds": [ + "res_id_VDU1_CP5_1", + "res_id_VDU2_CP5" + ] + } + ], + "affectedExtLinkPorts": [ + { + "id": "res_id_VDU1_CP1_1", + "changeType": "ADDED", + "extCpInstanceId": "cp-res_id_VDU1_CP1_1", + "resourceHandle": { + "resourceId": "res_id_VDU1_CP1_1", + "vimConnectionId": "vim_connection_id", + "vimLevelResourceType": "OS::Neutron::Port" + } + }, + { + "id": "res_id_VDU1_CP2_1", + "changeType": "ADDED", + "extCpInstanceId": "cp-res_id_VDU1_CP2_1", + "resourceHandle": { + "resourceId": "res_id_VDU1_CP2_1", + "vimConnectionId": "vim_connection_id", + "vimLevelResourceType": "OS::Neutron::Port" + } + }, + { + "id": "res_id_VDU2_CP1", + "changeType": "ADDED", + "extCpInstanceId": "cp-res_id_VDU2_CP1", + "resourceHandle": { + "resourceId": "res_id_VDU2_CP1", + "vimConnectionId": "vim_connection_id", + "vimLevelResourceType": "OS::Neutron::Port" + } + }, + { + "id": "res_id_VDU2_CP2", + "changeType": "ADDED", + "extCpInstanceId": "cp-res_id_VDU2_CP2", + "resourceHandle": { + "resourceId": "res_id_VDU2_CP2", + "vimConnectionId": "vim_connection_id", + "vimLevelResourceType": "OS::Neutron::Port" + } + }, + ], + "affectedVirtualStorages": [ + { + "id": "res_id_VirtualStorage_1", + "virtualStorageDescId": "VirtualStorage", + "changeType": "ADDED", + "storageResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VirtualStorage_1", + "vimLevelResourceType": "OS::Cinder::Volume" + } + } + ] +} + +_expected_resource_changes_scale_out = { + "affectedVnfcs": [ + { + "id": "res_id_VDU1_2", + "vduId": "VDU1", + "changeType": "ADDED", + "computeResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU1_2", + "vimLevelResourceType": "OS::Nova::Server" + }, + "affectedVnfcCpIds": [ + "VDU1_CP1-res_id_VDU1_2", + "VDU1_CP2-res_id_VDU1_2", + "VDU1_CP3-res_id_VDU1_2", + "VDU1_CP4-res_id_VDU1_2", + "VDU1_CP5-res_id_VDU1_2" + ], + "addedStorageResourceIds": [ + "res_id_VirtualStorage_2" + ] + } + ], + "affectedVirtualLinks": [ + { + "id": "res_id_internalVL1", + "vnfVirtualLinkDescId": "internalVL1", + "changeType": "LINK_PORT_ADDED", + "networkResource": { + "resourceId": "res_id_internalVL1" + }, + "vnfLinkPortIds": [ + "res_id_VDU1_CP3_2" + ] + }, + { + "id": "res_id_internalVL2", + "vnfVirtualLinkDescId": "internalVL2", + "changeType": "LINK_PORT_ADDED", + "networkResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_internalVL2", + "vimLevelResourceType": "OS::Neutron::Net" + }, + "vnfLinkPortIds": [ + "res_id_VDU1_CP4_2" + ] + }, + { + "id": "res_id_internalVL3", + "vnfVirtualLinkDescId": "internalVL3", + "changeType": "LINK_PORT_ADDED", + "networkResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_internalVL3", + "vimLevelResourceType": "OS::Neutron::Net" + }, + "vnfLinkPortIds": [ + "res_id_VDU1_CP5_2" + ] + } + ], + "affectedExtLinkPorts": [ + { + "id": "res_id_VDU1_CP1_2", + "changeType": "ADDED", + "extCpInstanceId": "cp-res_id_VDU1_CP1_2", + "resourceHandle": { + "resourceId": "res_id_VDU1_CP1_2", + "vimConnectionId": "vim_connection_id", + "vimLevelResourceType": "OS::Neutron::Port" + } + }, + { + "id": "res_id_VDU1_CP2_2", + "changeType": "ADDED", + "extCpInstanceId": "cp-res_id_VDU1_CP2_2", + "resourceHandle": { + "resourceId": "res_id_VDU1_CP2_2", + "vimConnectionId": "vim_connection_id", + "vimLevelResourceType": "OS::Neutron::Port" + } + } + ], + "affectedVirtualStorages": [ + { + "id": "res_id_VirtualStorage_2", + "virtualStorageDescId": "VirtualStorage", + "changeType": "ADDED", + "storageResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VirtualStorage_2", + "vimLevelResourceType": "OS::Cinder::Volume" + } + } + ] +} + +_expected_resource_changes_scale_in = { + "affectedVnfcs": [ + { + "id": "res_id_VDU1_2", + "vduId": "VDU1", + "changeType": "REMOVED", + "computeResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU1_2", + "vimLevelResourceType": "OS::Nova::Server" + }, + "affectedVnfcCpIds": [ + "VDU1_CP1-res_id_VDU1_2", + "VDU1_CP2-res_id_VDU1_2", + "VDU1_CP3-res_id_VDU1_2", + "VDU1_CP4-res_id_VDU1_2", + "VDU1_CP5-res_id_VDU1_2" + ], + "removedStorageResourceIds": [ + "res_id_VirtualStorage_2" + ] + } + ], + "affectedVirtualLinks": [ + { + "id": "res_id_internalVL1", + "vnfVirtualLinkDescId": "internalVL1", + "changeType": "LINK_PORT_REMOVED", + "networkResource": { + "resourceId": "res_id_internalVL1" + }, + "vnfLinkPortIds": [ + "res_id_VDU1_CP3_2" + ] + }, + { + "id": "res_id_internalVL2", + "vnfVirtualLinkDescId": "internalVL2", + "changeType": "LINK_PORT_REMOVED", + "networkResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_internalVL2", + "vimLevelResourceType": "OS::Neutron::Net" + }, + "vnfLinkPortIds": [ + "res_id_VDU1_CP4_2" + ] + }, + { + "id": "res_id_internalVL3", + "vnfVirtualLinkDescId": "internalVL3", + "changeType": "LINK_PORT_REMOVED", + "networkResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_internalVL3", + "vimLevelResourceType": "OS::Neutron::Net" + }, + "vnfLinkPortIds": [ + "res_id_VDU1_CP5_2" + ] + } + ], + "affectedExtLinkPorts": [ + { + "id": "res_id_VDU1_CP1_2", + "changeType": "REMOVED", + "extCpInstanceId": "cp-res_id_VDU1_CP1_2", + "resourceHandle": { + "resourceId": "res_id_VDU1_CP1_2", + "vimConnectionId": "vim_connection_id", + "vimLevelResourceType": "OS::Neutron::Port" + } + }, + { + "id": "res_id_VDU1_CP2_2", + "changeType": "REMOVED", + "extCpInstanceId": "cp-res_id_VDU1_CP2_2", + "resourceHandle": { + "resourceId": "res_id_VDU1_CP2_2", + "vimConnectionId": "vim_connection_id", + "vimLevelResourceType": "OS::Neutron::Port" + } + } + ], + "affectedVirtualStorages": [ + { + "id": "res_id_VirtualStorage_2", + "virtualStorageDescId": "VirtualStorage", + "changeType": "REMOVED", + "storageResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VirtualStorage_2", + "vimLevelResourceType": "OS::Cinder::Volume" + } + } + ] +} + +_expected_resource_changes_terminate = { + "affectedVnfcs": [ + { + "id": "res_id_VDU1_1", + "vduId": "VDU1", + "changeType": "REMOVED", + "computeResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU1_1", + "vimLevelResourceType": "OS::Nova::Server" + }, + "affectedVnfcCpIds": [ + "VDU1_CP1-res_id_VDU1_1", + "VDU1_CP2-res_id_VDU1_1", + "VDU1_CP3-res_id_VDU1_1", + "VDU1_CP4-res_id_VDU1_1", + "VDU1_CP5-res_id_VDU1_1" + ], + "removedStorageResourceIds": [ + "res_id_VirtualStorage_1" + ] + }, + { + "id": "res_id_VDU2", + "vduId": "VDU2", + "changeType": "REMOVED", + "computeResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VDU2", + "vimLevelResourceType": "OS::Nova::Server" + }, + "affectedVnfcCpIds": [ + "VDU2_CP1-res_id_VDU2", + "VDU2_CP2-res_id_VDU2", + "VDU2_CP3-res_id_VDU2", + "VDU2_CP4-res_id_VDU2", + "VDU2_CP5-res_id_VDU2" + ] + } + ], + "affectedVirtualLinks": [ + { + "id": "res_id_internalVL1", + "vnfVirtualLinkDescId": "internalVL1", + "changeType": "LINK_PORT_REMOVED", + "networkResource": { + "resourceId": "res_id_internalVL1" + }, + "vnfLinkPortIds": [ + "res_id_VDU1_CP3_1", + "res_id_VDU2_CP3" + ] + }, + { + "id": "res_id_internalVL2", + "vnfVirtualLinkDescId": "internalVL2", + "changeType": "REMOVED", + "networkResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_internalVL2", + "vimLevelResourceType": "OS::Neutron::Net" + }, + "vnfLinkPortIds": [ + "res_id_VDU1_CP4_1", + "res_id_VDU2_CP4" + ] + }, + { + "id": "res_id_internalVL3", + "vnfVirtualLinkDescId": "internalVL3", + "changeType": "REMOVED", + "networkResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_internalVL3", + "vimLevelResourceType": "OS::Neutron::Net" + }, + "vnfLinkPortIds": [ + "res_id_VDU1_CP5_1", + "res_id_VDU2_CP5" + ] + } + ], + "affectedExtLinkPorts": [ + { + "id": "res_id_VDU1_CP1_1", + "changeType": "REMOVED", + "extCpInstanceId": "cp-res_id_VDU1_CP1_1", + "resourceHandle": { + "resourceId": "res_id_VDU1_CP1_1", + "vimConnectionId": "vim_connection_id", + "vimLevelResourceType": "OS::Neutron::Port" + } + }, + { + "id": "res_id_VDU1_CP2_1", + "changeType": "REMOVED", + "extCpInstanceId": "cp-res_id_VDU1_CP2_1", + "resourceHandle": { + "resourceId": "res_id_VDU1_CP2_1", + "vimConnectionId": "vim_connection_id", + "vimLevelResourceType": "OS::Neutron::Port" + } + }, + { + "id": "res_id_VDU2_CP1", + "changeType": "REMOVED", + "extCpInstanceId": "cp-res_id_VDU2_CP1", + "resourceHandle": { + "resourceId": "res_id_VDU2_CP1", + "vimConnectionId": "vim_connection_id", + "vimLevelResourceType": "OS::Neutron::Port" + } + }, + { + "id": "res_id_VDU2_CP2", + "changeType": "REMOVED", + "extCpInstanceId": "cp-res_id_VDU2_CP2", + "resourceHandle": { + "resourceId": "res_id_VDU2_CP2", + "vimConnectionId": "vim_connection_id", + "vimLevelResourceType": "OS::Neutron::Port" + } + } + ], + "affectedVirtualStorages": [ + { + "id": "res_id_VirtualStorage_1", + "virtualStorageDescId": "VirtualStorage", + "changeType": "REMOVED", + "storageResource": { + "vimConnectionId": "vim_connection_id", + "resourceId": "res_id_VirtualStorage_1", + "vimLevelResourceType": "OS::Cinder::Volume" + } + } + ] +} + + +class TestLcmOpOccUtils(base.BaseTestCase): + + def setUp(self): + super(TestLcmOpOccUtils, self).setUp() + objects.register_all() + self.context = context.get_admin_context() + + def _sort_resource_changes(self, result): + # sort lists before compare with an expected_result since + # order of list items is unpredictable. + # note that an expected_result is already sorted. + def _get_key(obj): + return obj['id'] + + if 'affectedVnfcs' in result: + result['affectedVnfcs'].sort(key=_get_key) + for vnfc in result['affectedVnfcs']: + if 'affectedVnfcCpIds' in vnfc: + vnfc['affectedVnfcCpIds'].sort() + if 'removedStorageResourceIds' in vnfc: + vnfc['removedStorageResourceIds'].sort() + + if 'affectedVirtualLinks' in result: + result['affectedVirtualLinks'].sort(key=_get_key) + for vl in result['affectedVirtualLinks']: + if 'vnfLinkPortIds' in vl: + vl['vnfLinkPortIds'].sort() + + if 'affectedExtLinkPorts' in result: + result['affectedExtLinkPorts'].sort(key=_get_key) + + if 'affectedVirtualStorages' in result: + result['affectedVirtualStorages'].sort(key=_get_key) + + return result + + def test_update_lcmocc_instantiate(self): + # prepare + inst_saved = objects.VnfInstanceV2() + inst = objects.VnfInstanceV2() + inst.instantiatedVnfInfo = ( + objects.VnfInstanceV2_InstantiatedVnfInfo.from_dict( + _inst_info_example_1)) + lcmocc = objects.VnfLcmOpOccV2() + + # execute update_lcmocc + lcmocc_utils.update_lcmocc(lcmocc, inst_saved, inst) + + # check resourceChanges + lcmocc = lcmocc.to_dict() + self.assertEqual( + _expected_resource_changes_instantiate, + self._sort_resource_changes(lcmocc['resourceChanges'])) + + def test_update_lcmocc_scale_out(self): + # prepare + inst_saved = objects.VnfInstanceV2() + inst_saved.instantiatedVnfInfo = ( + objects.VnfInstanceV2_InstantiatedVnfInfo.from_dict( + _inst_info_example_1)) + inst = objects.VnfInstanceV2() + inst.instantiatedVnfInfo = ( + objects.VnfInstanceV2_InstantiatedVnfInfo.from_dict( + _inst_info_example_2)) + lcmocc = objects.VnfLcmOpOccV2() + + # execute update_lcmocc + lcmocc_utils.update_lcmocc(lcmocc, inst_saved, inst) + + # check resourceChanges + lcmocc = lcmocc.to_dict() + self.assertEqual( + _expected_resource_changes_scale_out, + self._sort_resource_changes(lcmocc['resourceChanges'])) + + def test_update_lcmocc_scale_in(self): + # prepare + inst_saved = objects.VnfInstanceV2() + inst_saved.instantiatedVnfInfo = ( + objects.VnfInstanceV2_InstantiatedVnfInfo.from_dict( + _inst_info_example_2)) + inst = objects.VnfInstanceV2() + inst.instantiatedVnfInfo = ( + objects.VnfInstanceV2_InstantiatedVnfInfo.from_dict( + _inst_info_example_1)) + lcmocc = objects.VnfLcmOpOccV2() + + # execute update_lcmocc + lcmocc_utils.update_lcmocc(lcmocc, inst_saved, inst) + + # check resourceChanges + lcmocc = lcmocc.to_dict() + self.assertEqual(_expected_resource_changes_scale_in, + self._sort_resource_changes(lcmocc['resourceChanges'])) + + def test_update_lcmocc_terminate(self): + # prepare + inst_saved = objects.VnfInstanceV2() + inst_saved.instantiatedVnfInfo = ( + objects.VnfInstanceV2_InstantiatedVnfInfo.from_dict( + _inst_info_example_1)) + inst = objects.VnfInstanceV2() + inst.instantiatedVnfInfo = objects.VnfInstanceV2_InstantiatedVnfInfo( + flavourId="SAMPLE_VNFD_ID", + vnfState='STOPPED') + lcmocc = objects.VnfLcmOpOccV2() + + # execute update_lcmocc + lcmocc_utils.update_lcmocc(lcmocc, inst_saved, inst) + + # check resourceChanges + lcmocc = lcmocc.to_dict() + self.assertEqual(_expected_resource_changes_terminate, + self._sort_resource_changes(lcmocc['resourceChanges'])) diff --git a/tacker/tests/unit/sol_refactored/common/test_vnfd_utils.py b/tacker/tests/unit/sol_refactored/common/test_vnfd_utils.py index 32680faff..d59fcb286 100644 --- a/tacker/tests/unit/sol_refactored/common/test_vnfd_utils.py +++ b/tacker/tests/unit/sol_refactored/common/test_vnfd_utils.py @@ -15,6 +15,7 @@ import os +from tacker.sol_refactored.common import exceptions as sol_ex from tacker.sol_refactored.common import vnfd_utils from tacker.tests import base @@ -100,9 +101,6 @@ class TestVnfd(base.BaseTestCase): result = self.vnfd_1.get_base_hot(SAMPLE_FLAVOUR_ID) # check keys and sampling data self.assertEqual(['VDU1.yaml'], list(result['files'].keys())) - self.assertEqual(1, - result['template']['resources']['VDU1_scale_out']['properties'] - ['scaling_adjustment']) self.assertEqual({'get_param': 'net3'}, result['files']['VDU1.yaml']['resources']['VDU1_CP3'] ['properties']['network']) @@ -159,3 +157,25 @@ class TestVnfd(base.BaseTestCase): result = self.vnfd_1.get_interface_script(SAMPLE_FLAVOUR_ID, "scale_end") self.assertEqual(None, result) + + def test_get_scale_vdu_and_num(self): + expected_result = {'VDU1': 1} + result = self.vnfd_1.get_scale_vdu_and_num(SAMPLE_FLAVOUR_ID, + 'VDU1_scale') + self.assertEqual(expected_result, result) + + def test_get_scale_vdu_and_num_no_delta(self): + self.assertRaises(sol_ex.DeltaMissingInVnfd, + self.vnfd_1.get_scale_vdu_and_num, SAMPLE_FLAVOUR_ID, + 'Invalid_scale') + + def test_get_scale_info_from_inst_level(self): + expected_result = {'VDU1_scale': {'scale_level': 2}} + result = self.vnfd_1.get_scale_info_from_inst_level( + SAMPLE_FLAVOUR_ID, 'instantiation_level_2') + self.assertEqual(expected_result, result) + + def test_get_max_scale_level(self): + result = self.vnfd_1.get_max_scale_level(SAMPLE_FLAVOUR_ID, + 'VDU1_scale') + self.assertEqual(2, result) diff --git a/tacker/tests/unit/sol_refactored/conductor/test_vnflcm_driver_v2.py b/tacker/tests/unit/sol_refactored/conductor/test_vnflcm_driver_v2.py index fcf4f12bd..62cadc5ce 100644 --- a/tacker/tests/unit/sol_refactored/conductor/test_vnflcm_driver_v2.py +++ b/tacker/tests/unit/sol_refactored/conductor/test_vnflcm_driver_v2.py @@ -126,11 +126,11 @@ _inst_req_example = { } } -# instantiatedVnfInfo example for terminate grant test +# instantiatedVnfInfo example for terminate/scale grant test # NOTE: # - some identifiers are modified to make check easy. -# - some attributes which are not related to make terminate grant -# retuest are omitted. +# - some attributes which are not related to make terminate/scale grant +# request are omitted. _inst_info_example = { "flavourId": "simple", "vnfState": "STARTED", @@ -386,7 +386,7 @@ _inst_info_example = { { "id": "259c5895-7be6-4bed-8a94-221c41b3d08f", "cpdId": "VDU1_CP1", - # when extLinkPorts of extVitualLinks specified, there is + # when extLinkPorts of extVirtualLinks specified, there is # no vnfExtCpId nor vnfLinkPortId. }, { @@ -534,6 +534,18 @@ class TestVnfLcmDriverV2(base.BaseTestCase): self.vnfd_1 = vnfd_utils.Vnfd(SAMPLE_VNFD_ID) self.vnfd_1.init_from_csar_dir(os.path.join(sample_dir, "sample1")) + def _grant_req_links(self, lcmocc_id, inst_id): + return { + 'vnfLcmOpOcc': { + 'href': '{}/v2/vnflcm/vnf_lcm_op_occs/{}'.format( + self.driver.endpoint, lcmocc_id) + }, + 'vnfInstance': { + 'href': '{}/v2/vnflcm/vnf_instances/{}'.format( + self.driver.endpoint, inst_id) + } + } + @mock.patch.object(nfvo_client.NfvoClient, 'grant') def test_instantiate_grant(self, mocked_grant): # prepare @@ -551,7 +563,7 @@ class TestVnfLcmDriverV2(base.BaseTestCase): lcmocc = objects.VnfLcmOpOccV2( # required fields id=uuidutils.generate_uuid(), - operationState=fields.LcmOperationStateType.PROCESSING, + operationState=fields.LcmOperationStateType.STARTING, stateEnteredTime=datetime.utcnow(), startTime=datetime.utcnow(), vnfInstanceId=inst.id, @@ -563,7 +575,7 @@ class TestVnfLcmDriverV2(base.BaseTestCase): mocked_grant.return_value = objects.GrantV1() # run instantiate_grant - grant_req, _ = self.driver.instantiate_grant( + grant_req, _ = self.driver.grant( self.context, lcmocc, inst, self.vnfd_1) # check grant_req is constructed according to intention @@ -575,7 +587,8 @@ class TestVnfLcmDriverV2(base.BaseTestCase): 'flavourId': SAMPLE_FLAVOUR_ID, 'operation': 'INSTANTIATE', 'isAutomaticInvocation': False, - 'instantiationLevelId': 'instantiation_level_2' + 'instantiationLevelId': 'instantiation_level_2', + '_links': self._grant_req_links(lcmocc.id, inst.id) } for key, value in expected_fixed_items.items(): self.assertEqual(value, grant_req[key]) @@ -641,7 +654,7 @@ class TestVnfLcmDriverV2(base.BaseTestCase): lcmocc = objects.VnfLcmOpOccV2( # required fields id=uuidutils.generate_uuid(), - operationState=fields.LcmOperationStateType.PROCESSING, + operationState=fields.LcmOperationStateType.STARTING, stateEnteredTime=datetime.utcnow(), startTime=datetime.utcnow(), vnfInstanceId=inst.id, @@ -653,7 +666,7 @@ class TestVnfLcmDriverV2(base.BaseTestCase): mocked_grant.return_value = objects.GrantV1() # run terminate_grant - grant_req, _ = self.driver.terminate_grant( + grant_req, _ = self.driver.grant( self.context, lcmocc, inst, self.vnfd_1) # check grant_req is constructed according to intention @@ -663,7 +676,8 @@ class TestVnfLcmDriverV2(base.BaseTestCase): 'vnfLcmOpOccId': lcmocc.id, 'vnfdId': SAMPLE_VNFD_ID, 'operation': 'TERMINATE', - 'isAutomaticInvocation': False + 'isAutomaticInvocation': False, + '_links': self._grant_req_links(lcmocc.id, inst.id) } for key, value in expected_fixed_items.items(): self.assertEqual(value, grant_req[key]) @@ -711,3 +725,198 @@ class TestVnfLcmDriverV2(base.BaseTestCase): for key, value in check_reses.items(): for name, ids in value.items(): self.assertEqual(expected_res_ids[key][name], ids) + + def _scale_grant_prepare(self, scale_type): + inst = objects.VnfInstanceV2( + # required fields + id=uuidutils.generate_uuid(), + vnfdId=SAMPLE_VNFD_ID, + vnfProvider='provider', + vnfProductName='product name', + vnfSoftwareVersion='software version', + vnfdVersion='vnfd version', + instantiationState='INSTANTIATED' + ) + inst_info = objects.VnfInstanceV2_InstantiatedVnfInfo.from_dict( + _inst_info_example) + inst.instantiatedVnfInfo = inst_info + req = objects.ScaleVnfRequest.from_dict( + {"type": scale_type, + "aspectId": "VDU1_scale", + "numberOfSteps": 1}) + lcmocc = objects.VnfLcmOpOccV2( + # required fields + id=uuidutils.generate_uuid(), + operationState=fields.LcmOperationStateType.STARTING, + stateEnteredTime=datetime.utcnow(), + startTime=datetime.utcnow(), + vnfInstanceId=inst.id, + operation=fields.LcmOperationType.SCALE, + isAutomaticInvocation=False, + isCancelPending=False, + operationParams=req) + + return inst, lcmocc + + @mock.patch.object(nfvo_client.NfvoClient, 'grant') + def test_scale_grant_scale_out(self, mocked_grant): + # prepare + inst, lcmocc = self._scale_grant_prepare('SCALE_OUT') + mocked_grant.return_value = objects.GrantV1() + + # run scale_grant scale-out + grant_req, _ = self.driver.grant( + self.context, lcmocc, inst, self.vnfd_1) + + # check grant_req is constructed according to intention + grant_req = grant_req.to_dict() + expected_fixed_items = { + 'vnfInstanceId': inst.id, + 'vnfLcmOpOccId': lcmocc.id, + 'vnfdId': SAMPLE_VNFD_ID, + 'operation': 'SCALE', + 'isAutomaticInvocation': False, + '_links': self._grant_req_links(lcmocc.id, inst.id) + } + for key, value in expected_fixed_items.items(): + self.assertEqual(value, grant_req[key]) + + add_reses = grant_req['addResources'] + check_reses = { + 'COMPUTE': {'VDU1': []}, + 'STORAGE': {'VirtualStorage': []}, + 'LINKPORT': {'VDU1_CP1': [], 'VDU1_CP2': [], 'VDU1_CP3': [], + 'VDU1_CP4': [], 'VDU1_CP5': []} + } + expected_num = { + 'COMPUTE': {'VDU1': 1}, + 'STORAGE': {'VirtualStorage': 1}, + 'LINKPORT': {'VDU1_CP1': 1, 'VDU1_CP2': 1, 'VDU1_CP3': 1, + 'VDU1_CP4': 1, 'VDU1_CP5': 1} + } + for res in add_reses: + check_reses[res['type']][res['resourceTemplateId']].append( + res['id']) + + for key, value in check_reses.items(): + for name, ids in value.items(): + self.assertEqual(expected_num[key][name], len(ids)) + + @mock.patch.object(nfvo_client.NfvoClient, 'grant') + def test_scale_grant_scale_in(self, mocked_grant): + # prepare + inst, lcmocc = self._scale_grant_prepare('SCALE_IN') + mocked_grant.return_value = objects.GrantV1() + + # run scale_grant scale-in + grant_req, _ = self.driver.grant( + self.context, lcmocc, inst, self.vnfd_1) + + # check grant_req is constructed according to intention + grant_req = grant_req.to_dict() + expected_fixed_items = { + 'vnfInstanceId': inst.id, + 'vnfLcmOpOccId': lcmocc.id, + 'vnfdId': SAMPLE_VNFD_ID, + 'operation': 'SCALE', + 'isAutomaticInvocation': False, + '_links': self._grant_req_links(lcmocc.id, inst.id) + } + for key, value in expected_fixed_items.items(): + self.assertEqual(value, grant_req[key]) + + rm_reses = grant_req['removeResources'] + check_reses = { + 'COMPUTE': {'VDU1': []}, + 'STORAGE': {'VirtualStorage': []}, + 'LINKPORT': {'VDU1_CP1': [], 'VDU1_CP2': [], 'VDU1_CP3': [], + 'VDU1_CP4': [], 'VDU1_CP5': []} + } + expected_res_ids = { + 'COMPUTE': { + 'VDU1': ['res_id_VDU1_1'] + }, + 'STORAGE': { + 'VirtualStorage': ['res_id_VirtualStorage_1'] + }, + 'LINKPORT': { + 'VDU1_CP1': ['res_id_VDU1_1_CP1'], + 'VDU1_CP2': ['res_id_VDU1_1_CP2'], + 'VDU1_CP3': ['res_id_VDU1_1_CP3'], + 'VDU1_CP4': ['res_id_VDU1_1_CP4'], + 'VDU1_CP5': ['res_id_VDU1_1_CP5'] + } + } + for res in rm_reses: + check_reses[res['type']][res['resourceTemplateId']].append( + res['resource']['resourceId']) + + for key, value in check_reses.items(): + for name, ids in value.items(): + self.assertEqual(expected_res_ids[key][name], ids) + + def test_make_inst_info_common_instantiate(self): + # prepare + inst_saved = objects.VnfInstanceV2( + # only set used members in the method + instantiatedVnfInfo=objects.VnfInstanceV2_InstantiatedVnfInfo() + ) + inst = inst_saved.obj_clone() + req = objects.InstantiateVnfRequestV2.from_dict(_inst_req_example) + lcmocc = objects.VnfLcmOpOccV2( + # only set used members in the method + operation=fields.LcmOperationType.INSTANTIATE, + operationParams=req) + + # run _make_inst_info_common + self.driver._make_inst_info_common( + lcmocc, inst_saved, inst, self.vnfd_1) + + inst = inst.to_dict() + expected_scale_status = [{'aspectId': 'VDU1_scale', 'scaleLevel': 2}] + expected_max_scale_levels = [ + {'aspectId': 'VDU1_scale', 'scaleLevel': 2}] + + self.assertEqual(expected_scale_status, + inst['instantiatedVnfInfo']['scaleStatus']) + self.assertEqual(expected_max_scale_levels, + inst['instantiatedVnfInfo']['maxScaleLevels']) + + def test_make_inst_info_common_scale(self): + # prepare + inst_saved = objects.VnfInstanceV2( + # only set used members in the method + instantiatedVnfInfo=objects.VnfInstanceV2_InstantiatedVnfInfo() + ) + inst_saved.instantiatedVnfInfo.scaleStatus = [ + objects.ScaleInfoV2(aspectId='VDU1_scale', scaleLevel=2) + ] + inst_saved.instantiatedVnfInfo.maxScaleLevels = [ + objects.ScaleInfoV2(aspectId='VDU1_scale', scaleLevel=2) + ] + inst = objects.VnfInstanceV2( + # only set used members in the method + instantiatedVnfInfo=objects.VnfInstanceV2_InstantiatedVnfInfo() + ) + req = objects.ScaleVnfRequest.from_dict( + {"type": "SCALE_IN", + "aspectId": "VDU1_scale", + "numberOfSteps": 1}) + lcmocc = objects.VnfLcmOpOccV2( + # only set used members in the method + operation=fields.LcmOperationType.SCALE, + operationParams=req) + + # run _make_inst_info_common + self.driver._make_inst_info_common( + lcmocc, inst_saved, inst, self.vnfd_1) + + inst = inst.to_dict() + expected_scale_status = [{'aspectId': 'VDU1_scale', 'scaleLevel': 1}] + expected_max_scale_levels = [ + {'aspectId': 'VDU1_scale', 'scaleLevel': 2}] + + self.assertEqual(expected_scale_status, + inst['instantiatedVnfInfo']['scaleStatus']) + self.assertEqual(expected_max_scale_levels, + inst['instantiatedVnfInfo']['maxScaleLevels']) diff --git a/tacker/tests/unit/sol_refactored/controller/test_vnflcm_v2.py b/tacker/tests/unit/sol_refactored/controller/test_vnflcm_v2.py index f85983909..130ff885b 100644 --- a/tacker/tests/unit/sol_refactored/controller/test_vnflcm_v2.py +++ b/tacker/tests/unit/sol_refactored/controller/test_vnflcm_v2.py @@ -52,7 +52,7 @@ class TestVnflcmV2(db_base.SqlTestCase): instantiationState=inst_state ) - req = {"flavourId": "simple"} # instantate request + req = {"flavourId": "simple"} # instantiate request lcmocc = objects.VnfLcmOpOccV2( # required fields id=uuidutils.generate_uuid(), @@ -259,3 +259,93 @@ class TestVnflcmV2(db_base.SqlTestCase): # check grant_req and grant are deleted self.assertRaises(sol_ex.GrantRequestOrGrantNotFound, lcmocc_utils.get_grant_req_and_grant, self.context, lcmocc) + + def test_scale_not_instantiated(self): + inst_id, _ = self._create_inst_and_lcmocc('NOT_INSTANTIATED', + fields.LcmOperationStateType.COMPLETED) + body = {"aspectId": "aspect_1", "type": "SCALE_OUT"} + + self.assertRaises(sol_ex.VnfInstanceIsNotInstantiated, + self.controller.scale, request=self.request, id=inst_id, + body=body) + + def test_scale_lcmocc_in_progress(self): + inst_id, _ = self._create_inst_and_lcmocc('INSTANTIATED', + fields.LcmOperationStateType.FAILED_TEMP) + body = {"aspectId": "aspect_1", "type": "SCALE_OUT"} + + self.assertRaises(sol_ex.OtherOperationInProgress, + self.controller.scale, request=self.request, id=inst_id, + body=body) + + def _prepare_db_for_scale_param_check(self, scale_status, + max_scale_levels): + inst = objects.VnfInstanceV2( + # required fields + id=uuidutils.generate_uuid(), + vnfdId=uuidutils.generate_uuid(), + vnfProvider='provider', + vnfProductName='product name', + vnfSoftwareVersion='software version', + vnfdVersion='vnfd version', + instantiationState='INSTANTIATED' + ) + inst.instantiatedVnfInfo = objects.VnfInstanceV2_InstantiatedVnfInfo( + flavourId='small', + vnfState='STARTED', + scaleStatus=scale_status, + maxScaleLevels=max_scale_levels + ) + inst.create(self.context) + + return inst.id + + def test_scale_invalid_aspect_id(self): + scale_status = [ + objects.ScaleInfoV2( + aspectId="aspect_2", + scaleLevel=0 + ) + ] + max_scale_levels = [ + objects.ScaleInfoV2( + aspectId="aspect_2", + scaleLevel=3 + ) + ] + inst_id = self._prepare_db_for_scale_param_check(scale_status, + max_scale_levels) + body = {"aspectId": "aspect_1", "type": "SCALE_OUT"} + + self.assertRaises(sol_ex.InvalidScaleAspectId, + self.controller.scale, request=self.request, id=inst_id, + body=body) + + def test_scale_invalid_number_of_steps(self): + scale_status = [ + objects.ScaleInfoV2( + aspectId="aspect_1", + scaleLevel=1 + ) + ] + max_scale_levels = [ + objects.ScaleInfoV2( + aspectId="aspect_1", + scaleLevel=3 + ) + ] + inst_id = self._prepare_db_for_scale_param_check(scale_status, + max_scale_levels) + body = {"aspectId": "aspect_1", "type": "SCALE_OUT", + "numberOfSteps": 3} + + self.assertRaises(sol_ex.InvalidScaleNumberOfSteps, + self.controller.scale, request=self.request, id=inst_id, + body=body) + + body = {"aspectId": "aspect_1", "type": "SCALE_IN", + "numberOfSteps": 2} + + self.assertRaises(sol_ex.InvalidScaleNumberOfSteps, + self.controller.scale, request=self.request, id=inst_id, + body=body) diff --git a/tacker/tests/unit/sol_refactored/infra_drivers/openstack/test_openstack.py b/tacker/tests/unit/sol_refactored/infra_drivers/openstack/test_openstack.py new file mode 100644 index 000000000..27859daf6 --- /dev/null +++ b/tacker/tests/unit/sol_refactored/infra_drivers/openstack/test_openstack.py @@ -0,0 +1,1328 @@ +# Copyright (C) 2021 Nippon Telegraph and Telephone Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from tacker import context +from tacker.sol_refactored.common import vnfd_utils +from tacker.sol_refactored.infra_drivers.openstack import openstack +from tacker.sol_refactored import objects +from tacker.sol_refactored.objects.v2 import fields +from tacker.tests import base + + +SAMPLE_VNFD_ID = "b1bb0ce7-ebca-4fa7-95ed-4840d7000000" +SAMPLE_FLAVOUR_ID = "simple" + +# instantiateVnfRequest example +_vim_connection_info_example = { + "vimId": "vim_id_1", + "vimType": "ETSINFV.OPENSTACK_KEYSTONE.V_3", + # "interfaceInfo": omitted + # "accessInfo": omitted +} + +_instantiate_req_example = { + "flavourId": SAMPLE_FLAVOUR_ID, + "extVirtualLinks": [ + { + "id": "id_ext_vl_1", + "resourceId": "res_id_ext_vl_1", + "extCps": [ + { + "cpdId": "VDU1_CP1", + "cpConfig": { + "VDU1_CP1_1": { + "cpProtocolData": [ + { + "layerProtocol": "IP_OVER_ETHERNET", + "ipOverEthernet": { + "ipAddresses": [ + { + "type": "IPV4", + "numDynamicAddresses": 1 + } + ] + } + } + ] + } + } + }, + { + "cpdId": "VDU2_CP1", + "cpConfig": { + "VDU2_CP1_1": { + "cpProtocolData": [ + { + "layerProtocol": "IP_OVER_ETHERNET", + "ipOverEthernet": { + "ipAddresses": [ + { + "type": "IPV4", + "fixedAddresses": [ + "10.10.0.102" + ] + } + ] + } + } + ] + } + } + } + ] + }, + { + "id": "id_ext_vl_2", + "resourceId": "res_id_id_ext_vl_2", + "extCps": [ + { + "cpdId": "VDU1_CP2", + "cpConfig": { + "VDU1_CP2_1": { + "cpProtocolData": [ + { + "layerProtocol": "IP_OVER_ETHERNET", + "ipOverEthernet": { + "ipAddresses": [ + { + "type": "IPV4", + "numDynamicAddresses": 1, + "subnetId": "res_id_subnet_1" + } + ] + } + } + ] + } + } + }, + { + "cpdId": "VDU2_CP2", + "cpConfig": { + "VDU2_CP2_1": { + "linkPortId": "link_port_id_VDU2_CP2" + } + } + } + ], + "extLinkPorts": [ + { + "id": "link_port_id_VDU2_CP2", + "resourceHandle": { + "resourceId": "res_id_VDU2_CP2" + } + } + ] + } + ], + "extManagedVirtualLinks": [ + { + "id": "id_ext_mgd_1", + "vnfVirtualLinkDescId": "internalVL1", + "resourceId": "res_id_internalVL1" + } + ], + "vimConnectionInfo": { + "vim1": _vim_connection_info_example + } +} + +# heat resources example +# NOTE: +# - following attributes which are not related to tests are omitted. +# updated_time, logical_resource_id, resource_status, resource_status_reason +# and "rel: self" in links. +# - some identifiers are modified to make check easy. +# - stack_id is based on real example. +_url = "http://127.0.0.1/heat-api/v1/57bcfdcbccbc4b85a9a4c94690a1164f/stacks/" +_stack_id = ("vnf-768c24d2-2ea6-4225-b1c7-79e42abfbde6/" + "1fa212ca-e904-4109-9a96-1900d35d2a5b") +_href = "".join((_url, _stack_id)) + +_stack_id_VDU1_scale = ( + "vnf-768c24d2-2ea6-4225-b1c7-79e42abfbde6-VDU1_scale_group-dv4kv7qtcwhw/" + "53ee92b6-8193-4df5-90f7-2738e61fba2c") +_href_VDU1_scale = "".join((_url, _stack_id_VDU1_scale)) + +_stack_id_VDU1_1 = ( + "vnf-768c24d2-2ea6-4225-b1c7-79e42abfbde6-VDU1_scale_group-dv4kv7qtcwhw-" + "bemybz4ugeso-mrajuhqw7ath/ea59d312-bab3-4ef2-897c-88b5cee117de") +_href_VDU1_1 = "".join((_url, _stack_id_VDU1_1)) + +_stack_id_VDU1_2 = ( + "vnf-768c24d2-2ea6-4225-b1c7-79e42abfbde6-VDU1_scale_group-dv4kv7qtcwhw-" + "myet4efobvvp-aptv6apap2h5/dd94d2ae-a02b-4fab-a492-514c422299ec") +_href_VDU1_2 = "".join((_url, _stack_id_VDU1_2)) + +_heat_reses_example = [ + { + "creation_time": "2021-12-10T00:40:46Z", + "resource_name": "VDU2", + "physical_resource_id": "res_id_VDU2", + "resource_type": "OS::Nova::Server", + "links": [ + { + "href": _href, + "rel": "stack" + } + ], + "required_by": [] + }, + { + "creation_time": "2021-12-10T00:40:46Z", + "resource_name": "VDU2_CP1", + "physical_resource_id": "res_id_VDU2_CP1", + "resource_type": "OS::Neutron::Port", + "links": [ + { + "href": _href, + "rel": "stack" + } + ], + "required_by": [ + "VDU2" + ] + }, + { + "creation_time": "2021-12-10T00:40:47Z", + "resource_name": "VDU2_CP5", + "physical_resource_id": "res_id_VDU2_CP5", + "resource_type": "OS::Neutron::Port", + "links": [ + { + "href": _href, + "rel": "stack" + } + ], + "required_by": [ + "VDU2" + ] + }, + { + "creation_time": "2021-12-10T00:40:49Z", + "resource_name": "internalVL3_subnet", + "physical_resource_id": "06f68f37-d37c-4310-8756-3884d9b8cb4b", + "resource_type": "OS::Neutron::Subnet", + "links": [ + { + "href": _href, + "rel": "stack" + } + ], + "required_by": [ + "VDU2", + "VDU2_CP5" + ] + }, + { + "creation_time": "2021-12-10T00:40:51Z", + "resource_name": "VDU2_CP3", + "physical_resource_id": "res_id_VDU2_CP3", + "resource_type": "OS::Neutron::Port", + "links": [ + { + "href": _href, + "rel": "stack" + } + ], + "required_by": [ + "VDU2" + ] + }, + { + "creation_time": "2021-12-10T00:40:53Z", + "resource_name": "VDU2_CP4", + "physical_resource_id": "res_id_VDU2_CP4", + "resource_type": "OS::Neutron::Port", + "links": [ + { + "href": _href, + "rel": "stack" + } + ], + "required_by": [ + "VDU2" + ] + }, + { + "creation_time": "2021-12-10T00:40:53Z", + "resource_name": "VDU1_scale_out", + "physical_resource_id": "234c78aefbaa4770ba5bbd8c7f624584", + "resource_type": "OS::Heat::ScalingPolicy", + "links": [ + { + "href": _href, + "rel": "stack" + } + ], + "required_by": [] + }, + { + "creation_time": "2021-12-10T00:40:53Z", + "resource_name": "VDU1_scale_in", + "physical_resource_id": "7cb738b392a64936a14f92bb79123a42", + "resource_type": "OS::Heat::ScalingPolicy", + "links": [ + { + "href": _href, + "rel": "stack" + } + ], + "required_by": [] + }, + { + "creation_time": "2021-12-10T00:40:53Z", + "resource_name": "VDU1_scale_group", + "physical_resource_id": "53ee92b6-8193-4df5-90f7-2738e61fba2c", + "resource_type": "OS::Heat::AutoScalingGroup", + "links": [ + { + "href": _href, + "rel": "stack" + }, + { + "href": _href_VDU1_scale, + "rel": "nested" + } + ], + "required_by": [ + "VDU1_scale_out", + "VDU1_scale_in" + ] + }, + { + "creation_time": "2021-12-10T00:40:55Z", + "resource_name": "internalVL3", + "physical_resource_id": "res_id_internalVL3", + "resource_type": "OS::Neutron::Net", + "links": [ + { + "href": _href, + "rel": "stack" + } + ], + "required_by": [ + "internalVL3_subnet", + "VDU2_CP5", + "VDU1_scale_group" + ] + }, + { + "creation_time": "2021-12-10T00:40:56Z", + "resource_name": "internalVL2_subnet", + "physical_resource_id": "848bc969-cc5c-47b2-94de-469556b993fb", + "resource_type": "OS::Neutron::Subnet", + "links": [ + { + "href": _href, + "rel": "stack" + } + ], + "required_by": [ + "VDU2", + "VDU2_CP4" + ] + }, + { + "creation_time": "2021-12-10T00:40:58Z", + "resource_name": "internalVL2", + "physical_resource_id": "res_id_internalVL2", + "resource_type": "OS::Neutron::Net", + "links": [ + { + "href": _href, + "rel": "stack" + } + ], + "required_by": [ + "VDU2_CP4", + "internalVL2_subnet", + "VDU1_scale_group" + ] + }, + { + "creation_time": "2021-12-10T00:41:35Z", + "resource_name": "bemybz4ugeso", + "physical_resource_id": "ea59d312-bab3-4ef2-897c-88b5cee117de", + "resource_type": "VDU1.yaml", + "links": [ + { + "href": _href_VDU1_scale, + "rel": "stack" + }, + { + "href": _href_VDU1_1, + "rel": "nested" + } + ], + "required_by": [], + "parent_resource": "VDU1_scale_group" + }, + { + "creation_time": "2021-12-10T01:03:37Z", + "resource_name": "myet4efobvvp", + "physical_resource_id": "dd94d2ae-a02b-4fab-a492-514c422299ec", + "resource_type": "VDU1.yaml", + "links": [ + { + "href": _href_VDU1_scale, + "rel": "stack" + }, + { + "href": _href_VDU1_2, + "rel": "nested" + } + ], + "required_by": [], + "parent_resource": "VDU1_scale_group" + }, + { + "creation_time": "2021-12-10T00:41:43Z", + "resource_name": "VDU1", + "physical_resource_id": "res_id_VDU1_1", + "resource_type": "OS::Nova::Server", + "links": [ + { + "href": _href_VDU1_1, + "rel": "stack" + } + ], + "required_by": [], + "parent_resource": "bemybz4ugeso" + }, + { + "creation_time": "2021-12-10T00:41:45Z", + "resource_name": "VirtualStorage", + "physical_resource_id": "res_id_VirtualStorage_1", + "resource_type": "OS::Cinder::Volume", + "links": [ + { + "href": _href_VDU1_1, + "rel": "stack" + } + ], + "required_by": [ + "VDU1" + ], + "parent_resource": "bemybz4ugeso" + }, + { + "creation_time": "2021-12-10T00:41:45Z", + "resource_name": "multi", + "physical_resource_id": "0690bb1b-36d0-4684-851b-9c1b13a9a5de", + "resource_type": "OS::Cinder::VolumeType", + "links": [ + { + "href": _href_VDU1_1, + "rel": "stack" + } + ], + "required_by": [ + "VirtualStorage" + ], + "parent_resource": "bemybz4ugeso" + }, + { + "creation_time": "2021-12-10T00:41:45Z", + "resource_name": "VDU1_CP2", + "physical_resource_id": "res_id_VDU1_CP2_1", + "resource_type": "OS::Neutron::Port", + "links": [ + { + "href": _href_VDU1_1, + "rel": "stack" + } + ], + "required_by": [ + "VDU1" + ], + "parent_resource": "bemybz4ugeso" + }, + { + "creation_time": "2021-12-10T00:41:45Z", + "resource_name": "VDU1_CP1", + "physical_resource_id": "res_id_VDU1_CP1_1", + "resource_type": "OS::Neutron::Port", + "links": [ + { + "href": _href_VDU1_1, + "rel": "stack" + } + ], + "required_by": [ + "multi", + "VDU1" + ], + "parent_resource": "bemybz4ugeso" + }, + { + "creation_time": "2021-12-10T00:41:45Z", + "resource_name": "VDU1_CP4", + "physical_resource_id": "res_id_VDU1_CP4_1", + "resource_type": "OS::Neutron::Port", + "links": [ + { + "href": _href_VDU1_1, + "rel": "stack" + } + ], + "required_by": [ + "VDU1" + ], + "parent_resource": "bemybz4ugeso" + }, + { + "creation_time": "2021-12-10T00:41:45Z", + "resource_name": "VDU1_CP5", + "physical_resource_id": "res_id_VDU1_CP5_1", + "resource_type": "OS::Neutron::Port", + "links": [ + { + "href": _href_VDU1_1, + "rel": "stack" + } + ], + "required_by": [ + "VDU1" + ], + "parent_resource": "bemybz4ugeso" + }, + { + "creation_time": "2021-12-10T00:41:46Z", + "resource_name": "VDU1_CP3", + "physical_resource_id": "res_id_VDU1_CP3_1", + "resource_type": "OS::Neutron::Port", + "links": [ + { + "href": _href_VDU1_1, + "rel": "stack" + } + ], + "required_by": [ + "VDU1" + ], + "parent_resource": "bemybz4ugeso" + }, + { + "creation_time": "2021-12-10T01:03:49Z", + "resource_name": "VDU1", + "physical_resource_id": "res_id_VDU1_2", + "resource_type": "OS::Nova::Server", + "links": [ + { + "href": _href_VDU1_2, + "rel": "stack" + } + ], + "required_by": [], + "parent_resource": "myet4efobvvp" + }, + { + "creation_time": "2021-12-10T01:03:51Z", + "resource_name": "VirtualStorage", + "physical_resource_id": "res_id_VirtualStorage_2", + "resource_type": "OS::Cinder::Volume", + "links": [ + { + "href": _href_VDU1_2, + "rel": "stack" + } + ], + "required_by": [ + "VDU1" + ], + "parent_resource": "myet4efobvvp" + }, + { + "creation_time": "2021-12-10T01:03:53Z", + "resource_name": "multi", + "physical_resource_id": "03433e3a-a8c4-40b3-8802-6b114922feff", + "resource_type": "OS::Cinder::VolumeType", + "links": [ + { + "href": _href_VDU1_2, + "rel": "stack" + } + ], + "required_by": [ + "VirtualStorage" + ], + "parent_resource": "myet4efobvvp" + }, + { + "creation_time": "2021-12-10T01:03:53Z", + "resource_name": "VDU1_CP2", + "physical_resource_id": "res_id_VDU1_CP2_2", + "resource_type": "OS::Neutron::Port", + "links": [ + { + "href": _href_VDU1_2, + "rel": "stack" + } + ], + "required_by": [ + "VDU1" + ], + "parent_resource": "myet4efobvvp" + }, + { + "creation_time": "2021-12-10T01:03:53Z", + "resource_name": "VDU1_CP1", + "physical_resource_id": "res_id_VDU1_CP1_2", + "resource_type": "OS::Neutron::Port", + "links": [ + { + "href": _href_VDU1_2, + "rel": "stack" + } + ], + "required_by": [ + "multi", + "VDU1" + ], + "parent_resource": "myet4efobvvp" + }, + { + "creation_time": "2021-12-10T01:03:53Z", + "resource_name": "VDU1_CP4", + "physical_resource_id": "res_id_VDU1_CP4_2", + "resource_type": "OS::Neutron::Port", + "links": [ + { + "href": _href_VDU1_2, + "rel": "stack" + } + ], + "required_by": [ + "VDU1" + ], + "parent_resource": "myet4efobvvp" + }, + { + "creation_time": "2021-12-10T01:03:55Z", + "resource_name": "VDU1_CP5", + "physical_resource_id": "res_id_VDU1_CP5_2", + "resource_type": "OS::Neutron::Port", + "links": [ + { + "href": _href_VDU1_2, + "rel": "stack" + } + ], + "required_by": [ + "VDU1" + ], + "parent_resource": "myet4efobvvp" + }, + { + "creation_time": "2021-12-10T01:03:57Z", + "resource_name": "VDU1_CP3", + "physical_resource_id": "res_id_VDU1_CP3_2", + "resource_type": "OS::Neutron::Port", + "links": [ + { + "href": _href_VDU1_2, + "rel": "stack" + } + ], + "required_by": [ + "VDU1" + ], + "parent_resource": "myet4efobvvp" + } +] + +# expected results +_expected_inst_info = { + "flavourId": "simple", + "vnfState": "STARTED", + "extCpInfo": [ + { + 'id': 'cp-req-link_port_id_VDU2_CP2', + 'cpdId': 'VDU2_CP2', + 'cpConfigId': 'VDU2_CP2_1', + 'extLinkPortId': 'req-link_port_id_VDU2_CP2', + }, + { + "id": "cp-res_id_VDU1_CP1_1", + "cpdId": "VDU1_CP1", + "cpConfigId": "VDU1_CP1_1", + "cpProtocolInfo": [ + { + "layerProtocol": "IP_OVER_ETHERNET", + "ipOverEthernet": { + "ipAddresses": [ + { + "type": "IPV4", + "isDynamic": True + } + ] + } + } + ], + "extLinkPortId": "res_id_VDU1_CP1_1", + "associatedVnfcCpId": "VDU1_CP1-res_id_VDU1_1" + }, + { + "id": "cp-res_id_VDU1_CP1_2", + "cpdId": "VDU1_CP1", + "cpConfigId": "VDU1_CP1_1", + "cpProtocolInfo": [ + { + "layerProtocol": "IP_OVER_ETHERNET", + "ipOverEthernet": { + "ipAddresses": [ + { + "type": "IPV4", + "isDynamic": True + } + ] + } + } + ], + "extLinkPortId": "res_id_VDU1_CP1_2", + "associatedVnfcCpId": "VDU1_CP1-res_id_VDU1_2" + }, + { + "id": "cp-res_id_VDU1_CP2_1", + "cpdId": "VDU1_CP2", + "cpConfigId": "VDU1_CP2_1", + "cpProtocolInfo": [ + { + "layerProtocol": "IP_OVER_ETHERNET", + "ipOverEthernet": { + "ipAddresses": [ + { + "type": "IPV4", + "isDynamic": True, + "subnetId": "res_id_subnet_1" + } + ] + } + } + ], + "extLinkPortId": "res_id_VDU1_CP2_1", + "associatedVnfcCpId": "VDU1_CP2-res_id_VDU1_1" + }, + { + "id": "cp-res_id_VDU1_CP2_2", + "cpdId": "VDU1_CP2", + "cpConfigId": "VDU1_CP2_1", + "cpProtocolInfo": [ + { + "layerProtocol": "IP_OVER_ETHERNET", + "ipOverEthernet": { + "ipAddresses": [ + { + "type": "IPV4", + "isDynamic": True, + "subnetId": "res_id_subnet_1" + } + ] + } + } + ], + "extLinkPortId": "res_id_VDU1_CP2_2", + "associatedVnfcCpId": "VDU1_CP2-res_id_VDU1_2" + }, + { + "id": "cp-res_id_VDU2_CP1", + "cpdId": "VDU2_CP1", + "cpConfigId": "VDU2_CP1_1", + "cpProtocolInfo": [ + { + "layerProtocol": "IP_OVER_ETHERNET", + "ipOverEthernet": { + "ipAddresses": [ + { + "type": "IPV4", + "addresses": [ + "10.10.0.102" + ] + } + ] + } + } + ], + "extLinkPortId": "res_id_VDU2_CP1", + "associatedVnfcCpId": "VDU2_CP1-res_id_VDU2" + } + ], + "extVirtualLinkInfo": [ + { + "id": "id_ext_vl_1", + "resourceHandle": { + "resourceId": "res_id_ext_vl_1" + }, + "extLinkPorts": [ + { + "id": "res_id_VDU1_CP1_1", + "resourceHandle": { + "vimConnectionId": "vim_id_1", + "resourceId": "res_id_VDU1_CP1_1", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "cp-res_id_VDU1_CP1_1" + }, + { + "id": "res_id_VDU1_CP1_2", + "resourceHandle": { + "vimConnectionId": "vim_id_1", + "resourceId": "res_id_VDU1_CP1_2", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "cp-res_id_VDU1_CP1_2" + }, + { + "id": "res_id_VDU2_CP1", + "resourceHandle": { + "vimConnectionId": "vim_id_1", + "resourceId": "res_id_VDU2_CP1", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "cp-res_id_VDU2_CP1" + } + ], + "currentVnfExtCpData": [ + { + "cpdId": "VDU1_CP1", + "cpConfig": { + "VDU1_CP1_1": { + "cpProtocolData": [ + { + "layerProtocol": "IP_OVER_ETHERNET", + "ipOverEthernet": { + "ipAddresses": [ + { + "type": "IPV4", + "numDynamicAddresses": 1 + } + ] + } + } + ] + } + } + }, + { + "cpdId": "VDU2_CP1", + "cpConfig": { + "VDU2_CP1_1": { + "cpProtocolData": [ + { + "layerProtocol": "IP_OVER_ETHERNET", + "ipOverEthernet": { + "ipAddresses": [ + { + "type": "IPV4", + "fixedAddresses": [ + "10.10.0.102" + ] + } + ] + } + } + ] + } + } + } + ] + }, + { + "id": "id_ext_vl_2", + "resourceHandle": { + "resourceId": "res_id_id_ext_vl_2" + }, + "extLinkPorts": [ + { + "id": "req-link_port_id_VDU2_CP2", + "resourceHandle": { + "resourceId": "res_id_VDU2_CP2", + }, + "cpInstanceId": "cp-req-link_port_id_VDU2_CP2" + }, + { + "id": "res_id_VDU1_CP2_1", + "resourceHandle": { + "vimConnectionId": "vim_id_1", + "resourceId": "res_id_VDU1_CP2_1", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "cp-res_id_VDU1_CP2_1" + }, + { + "id": "res_id_VDU1_CP2_2", + "resourceHandle": { + "vimConnectionId": "vim_id_1", + "resourceId": "res_id_VDU1_CP2_2", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "cp-res_id_VDU1_CP2_2" + } + ], + "currentVnfExtCpData": [ + { + "cpdId": "VDU1_CP2", + "cpConfig": { + "VDU1_CP2_1": { + "cpProtocolData": [ + { + "layerProtocol": "IP_OVER_ETHERNET", + "ipOverEthernet": { + "ipAddresses": [ + { + "type": "IPV4", + "numDynamicAddresses": 1, + "subnetId": "res_id_subnet_1" + } + ] + } + } + ] + } + } + }, + { + "cpdId": "VDU2_CP2", + "cpConfig": { + "VDU2_CP2_1": { + "linkPortId": "link_port_id_VDU2_CP2" + } + } + } + ] + } + ], + "extManagedVirtualLinkInfo": [ + { + "id": "id_ext_mgd_1", + "vnfVirtualLinkDescId": "internalVL1", + "networkResource": { + "resourceId": "res_id_internalVL1" + }, + "vnfLinkPorts": [ + { + "id": "res_id_VDU1_CP3_1", + "resourceHandle": { + "vimConnectionId": "vim_id_1", + "resourceId": "res_id_VDU1_CP3_1", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU1_CP3-res_id_VDU1_1", + "cpInstanceType": "VNFC_CP" + }, + { + "id": "res_id_VDU1_CP3_2", + "resourceHandle": { + "vimConnectionId": "vim_id_1", + "resourceId": "res_id_VDU1_CP3_2", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU1_CP3-res_id_VDU1_2", + "cpInstanceType": "VNFC_CP" + }, + { + "id": "res_id_VDU2_CP3", + "resourceHandle": { + "vimConnectionId": "vim_id_1", + "resourceId": "res_id_VDU2_CP3", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU2_CP3-res_id_VDU2", + "cpInstanceType": "VNFC_CP" + } + ] + } + ], + "vnfcResourceInfo": [ + { + "id": "res_id_VDU1_2", + "vduId": "VDU1", + "computeResource": { + "vimConnectionId": "vim_id_1", + "resourceId": "res_id_VDU1_2", + "vimLevelResourceType": "OS::Nova::Server" + }, + "storageResourceIds": [ + "res_id_VirtualStorage_2" + ], + "vnfcCpInfo": [ + { + "id": "VDU1_CP1-res_id_VDU1_2", + "cpdId": "VDU1_CP1", + "vnfExtCpId": "cp-res_id_VDU1_CP1_2" + }, + { + "id": "VDU1_CP2-res_id_VDU1_2", + "cpdId": "VDU1_CP2", + "vnfExtCpId": "cp-res_id_VDU1_CP2_2" + }, + { + "id": "VDU1_CP3-res_id_VDU1_2", + "cpdId": "VDU1_CP3", + "vnfLinkPortId": "res_id_VDU1_CP3_2" + }, + { + "id": "VDU1_CP4-res_id_VDU1_2", + "cpdId": "VDU1_CP4", + "vnfLinkPortId": "res_id_VDU1_CP4_2" + }, + { + "id": "VDU1_CP5-res_id_VDU1_2", + "cpdId": "VDU1_CP5", + "vnfLinkPortId": "res_id_VDU1_CP5_2" + } + ], + "metadata": { + "creation_time": "2021-12-10T01:03:49Z", + "parent_stack_id": _stack_id_VDU1_scale, + "parent_resource_name": "myet4efobvvp" + } + }, + { + "id": "res_id_VDU1_1", + "vduId": "VDU1", + "computeResource": { + "vimConnectionId": "vim_id_1", + "resourceId": "res_id_VDU1_1", + "vimLevelResourceType": "OS::Nova::Server" + }, + "storageResourceIds": [ + "res_id_VirtualStorage_1" + ], + "vnfcCpInfo": [ + { + "id": "VDU1_CP1-res_id_VDU1_1", + "cpdId": "VDU1_CP1", + "vnfExtCpId": "cp-res_id_VDU1_CP1_1" + }, + { + "id": "VDU1_CP2-res_id_VDU1_1", + "cpdId": "VDU1_CP2", + "vnfExtCpId": "cp-res_id_VDU1_CP2_1" + }, + { + "id": "VDU1_CP3-res_id_VDU1_1", + "cpdId": "VDU1_CP3", + "vnfLinkPortId": "res_id_VDU1_CP3_1" + }, + { + "id": "VDU1_CP4-res_id_VDU1_1", + "cpdId": "VDU1_CP4", + "vnfLinkPortId": "res_id_VDU1_CP4_1" + }, + { + "id": "VDU1_CP5-res_id_VDU1_1", + "cpdId": "VDU1_CP5", + "vnfLinkPortId": "res_id_VDU1_CP5_1" + } + ], + "metadata": { + "creation_time": "2021-12-10T00:41:43Z", + "parent_stack_id": _stack_id_VDU1_scale, + "parent_resource_name": "bemybz4ugeso" + } + }, + { + "id": "res_id_VDU2", + "vduId": "VDU2", + "computeResource": { + "vimConnectionId": "vim_id_1", + "resourceId": "res_id_VDU2", + "vimLevelResourceType": "OS::Nova::Server" + }, + "vnfcCpInfo": [ + { + "id": "VDU2_CP1-res_id_VDU2", + "cpdId": "VDU2_CP1", + "vnfExtCpId": "cp-res_id_VDU2_CP1" + }, + { + "id": "VDU2_CP2-res_id_VDU2", + "cpdId": "VDU2_CP2", + # "vnfExtCpId" does not exist since it is specified by + # linkPortIds. + }, + { + "id": "VDU2_CP3-res_id_VDU2", + "cpdId": "VDU2_CP3", + "vnfLinkPortId": "res_id_VDU2_CP3" + }, + { + "id": "VDU2_CP4-res_id_VDU2", + "cpdId": "VDU2_CP4", + "vnfLinkPortId": "res_id_VDU2_CP4" + }, + { + "id": "VDU2_CP5-res_id_VDU2", + "cpdId": "VDU2_CP5", + "vnfLinkPortId": "res_id_VDU2_CP5" + } + ], + "metadata": { + "creation_time": "2021-12-10T00:40:46Z" + } + } + ], + "vnfVirtualLinkResourceInfo": [ + { + "id": "res_id_internalVL2", + "vnfVirtualLinkDescId": "internalVL2", + "networkResource": { + "vimConnectionId": "vim_id_1", + "resourceId": "res_id_internalVL2", + "vimLevelResourceType": "OS::Neutron::Net" + }, + "vnfLinkPorts": [ + { + "id": "res_id_VDU1_CP4_1", + "resourceHandle": { + "vimConnectionId": "vim_id_1", + "resourceId": "res_id_VDU1_CP4_1", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU1_CP4-res_id_VDU1_1", + "cpInstanceType": "VNFC_CP" + }, + { + "id": "res_id_VDU1_CP4_2", + "resourceHandle": { + "vimConnectionId": "vim_id_1", + "resourceId": "res_id_VDU1_CP4_2", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU1_CP4-res_id_VDU1_2", + "cpInstanceType": "VNFC_CP" + }, + { + "id": "res_id_VDU2_CP4", + "resourceHandle": { + "vimConnectionId": "vim_id_1", + "resourceId": "res_id_VDU2_CP4", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU2_CP4-res_id_VDU2", + "cpInstanceType": "VNFC_CP" + } + ] + }, + { + "id": "res_id_internalVL3", + "vnfVirtualLinkDescId": "internalVL3", + "networkResource": { + "vimConnectionId": "vim_id_1", + "resourceId": "res_id_internalVL3", + "vimLevelResourceType": "OS::Neutron::Net" + }, + "vnfLinkPorts": [ + { + "id": "res_id_VDU1_CP5_1", + "resourceHandle": { + "vimConnectionId": "vim_id_1", + "resourceId": "res_id_VDU1_CP5_1", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU1_CP5-res_id_VDU1_1", + "cpInstanceType": "VNFC_CP" + }, + { + "id": "res_id_VDU1_CP5_2", + "resourceHandle": { + "vimConnectionId": "vim_id_1", + "resourceId": "res_id_VDU1_CP5_2", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU1_CP5-res_id_VDU1_2", + "cpInstanceType": "VNFC_CP" + }, + { + "id": "res_id_VDU2_CP5", + "resourceHandle": { + "vimConnectionId": "vim_id_1", + "resourceId": "res_id_VDU2_CP5", + "vimLevelResourceType": "OS::Neutron::Port" + }, + "cpInstanceId": "VDU2_CP5-res_id_VDU2", + "cpInstanceType": "VNFC_CP" + } + ] + } + ], + "virtualStorageResourceInfo": [ + { + "id": "res_id_VirtualStorage_1", + "virtualStorageDescId": "VirtualStorage", + "storageResource": { + "vimConnectionId": "vim_id_1", + "resourceId": "res_id_VirtualStorage_1", + "vimLevelResourceType": "OS::Cinder::Volume" + } + }, + { + "id": "res_id_VirtualStorage_2", + "virtualStorageDescId": "VirtualStorage", + "storageResource": { + "vimConnectionId": "vim_id_1", + "resourceId": "res_id_VirtualStorage_2", + "vimLevelResourceType": "OS::Cinder::Volume" + } + } + ], + "vnfcInfo": [ + { + "id": "VDU1-res_id_VDU1_2", + "vduId": "VDU1", + "vnfcResourceInfoId": "res_id_VDU1_2", + "vnfcState": "STARTED" + }, + { + "id": "VDU1-res_id_VDU1_1", + "vduId": "VDU1", + "vnfcResourceInfoId": "res_id_VDU1_1", + "vnfcState": "STARTED" + }, + { + "id": "VDU2-res_id_VDU2", + "vduId": "VDU2", + "vnfcResourceInfoId": "res_id_VDU2", + "vnfcState": "STARTED" + } + ] +} + + +class TestOpenstack(base.BaseTestCase): + + def setUp(self): + super(TestOpenstack, self).setUp() + objects.register_all() + self.driver = openstack.Openstack() + self.context = context.get_admin_context() + + cur_dir = os.path.dirname(__file__) + sample_dir = os.path.join(cur_dir, "../..", "samples") + + self.vnfd_1 = vnfd_utils.Vnfd(SAMPLE_VNFD_ID) + self.vnfd_1.init_from_csar_dir(os.path.join(sample_dir, "sample1")) + + def _check_inst_info(self, expected, result): + # sort lists before compare with an expected result since + # order of list items is unpredictable. + # note that an expected_result is already sorted. + def _get_key(obj): + return obj['id'] + + if "extCpInfo" in expected: + self.assertIn("extCpInfo", result) + result["extCpInfo"].sort(key=_get_key) + # assume len(cpProtocolInfo) == 1 + self.assertEqual(expected["extCpInfo"], result["extCpInfo"]) + + if "extVirtualLinkInfo" in expected: + self.assertIn("extVirtualLinkInfo", result) + for ext_vl in result["extVirtualLinkInfo"]: + if "extLinkPorts" in ext_vl: + ext_vl["extLinkPorts"].sort(key=_get_key) + # order of currentVnfExtCpData is same as order of + # instantiateVnfRequest + self.assertEqual(expected["extVirtualLinkInfo"], + result["extVirtualLinkInfo"]) + + if "extManagedVirtualLinkInfo" in expected: + self.assertIn("extManagedVirtualLinkInfo", result) + for ext_mgd in result["extManagedVirtualLinkInfo"]: + if "vnfLinkPorts" in ext_mgd: + ext_mgd["vnfLinkPorts"].sort(key=_get_key) + self.assertEqual(expected["extManagedVirtualLinkInfo"], + result["extManagedVirtualLinkInfo"]) + + # vnfcResourceInfo is sorted by creation_time (reverse) + if "vnfcResourceInfo" in expected: + self.assertIn("vnfcResourceInfo", result) + for vnfc in result["vnfcResourceInfo"]: + if "storageResourceIds" in vnfc: + vnfc["storageResourceIds"].sort() + if "vnfcCpInfo" in vnfc: + vnfc["vnfcCpInfo"].sort(key=_get_key) + self.assertEqual(expected["vnfcResourceInfo"], + result["vnfcResourceInfo"]) + + if "vnfVirtualLinkResourceInfo" in expected: + self.assertIn("vnfVirtualLinkResourceInfo", result) + result["vnfVirtualLinkResourceInfo"].sort(key=_get_key) + for vl_info in result["vnfVirtualLinkResourceInfo"]: + if "vnfLinkPorts" in vl_info: + vl_info["vnfLinkPorts"].sort(key=_get_key) + self.assertEqual(expected["vnfVirtualLinkResourceInfo"], + result["vnfVirtualLinkResourceInfo"]) + + if "virtualStorageResourceInfo" in expected: + self.assertIn("virtualStorageResourceInfo", result) + result["virtualStorageResourceInfo"].sort(key=_get_key) + self.assertEqual(expected["virtualStorageResourceInfo"], + result["virtualStorageResourceInfo"]) + + # order of vnfcInfo is same as vnfcResourceInfo + if "vnfcInfo" in expected: + self.assertIn("vnfcInfo", result) + self.assertEqual(expected["vnfcInfo"], result["vnfcInfo"]) + + def test_make_instantiated_vnf_info_new(self): + # prepare + req = objects.InstantiateVnfRequestV2.from_dict( + _instantiate_req_example) + inst = objects.VnfInstanceV2( + vimConnectionInfo=req.vimConnectionInfo + ) + grant_req = objects.GrantRequestV1( + operation=fields.LcmOperationType.INSTANTIATE + ) + grant = objects.GrantV1() + + # execute make_instantiated_vnf_info + self.driver._make_instantiated_vnf_info(req, inst, grant_req, grant, + self.vnfd_1, _heat_reses_example) + + # check + result = inst.to_dict()["instantiatedVnfInfo"] + self._check_inst_info(_expected_inst_info, result) + + def test_make_instantiated_vnf_info_update(self): + # prepare + req = None # not used + inst_info = objects.VnfInstanceV2_InstantiatedVnfInfo.from_dict( + _expected_inst_info) + vim_info = { + "vim1": objects.VimConnectionInfo.from_dict( + _vim_connection_info_example) + } + inst = objects.VnfInstanceV2( + instantiatedVnfInfo=inst_info, + vimConnectionInfo=vim_info + ) + grant_req = objects.GrantRequestV1( + operation=fields.LcmOperationType.SCALE + ) + grant = objects.GrantV1() + + # execute make_instantiated_vnf_info + self.driver._make_instantiated_vnf_info(req, inst, grant_req, grant, + self.vnfd_1, _heat_reses_example) + + # check + result = inst.to_dict()["instantiatedVnfInfo"] + self._check_inst_info(_expected_inst_info, result) diff --git a/tacker/tests/unit/sol_refactored/infra_drivers/openstack/test_userdata_utils.py b/tacker/tests/unit/sol_refactored/infra_drivers/openstack/test_userdata_utils.py index 967d47763..2d20768b8 100644 --- a/tacker/tests/unit/sol_refactored/infra_drivers/openstack/test_userdata_utils.py +++ b/tacker/tests/unit/sol_refactored/infra_drivers/openstack/test_userdata_utils.py @@ -23,10 +23,10 @@ SAMPLE_VNFD_ID = "b1bb0ce7-ebca-4fa7-95ed-4840d7000000" SAMPLE_FLAVOUR_ID = "simple" -class TestVnfd(base.BaseTestCase): +class TestUserDataUtils(base.BaseTestCase): def setUp(self): - super(TestVnfd, self).setUp() + super(TestUserDataUtils, self).setUp() cur_dir = os.path.dirname(__file__) sample_dir = os.path.join(cur_dir, "../..", "samples") @@ -58,7 +58,6 @@ class TestVnfd(base.BaseTestCase): self.assertEqual(expected_result, result) def test_get_param_flavor(self): - req = {'flavourId': SAMPLE_FLAVOUR_ID} flavor = 'm1.large' grant = { 'vimAssets': { @@ -69,17 +68,16 @@ class TestVnfd(base.BaseTestCase): } } - result = userdata_utils.get_param_flavor('VDU1', req, + result = userdata_utils.get_param_flavor('VDU1', SAMPLE_FLAVOUR_ID, self.vnfd_1, grant) self.assertEqual(flavor, result) # if not exist in grant, get from VNFD - result = userdata_utils.get_param_flavor('VDU2', req, + result = userdata_utils.get_param_flavor('VDU2', SAMPLE_FLAVOUR_ID, self.vnfd_1, grant) self.assertEqual('m1.tiny', result) def test_get_param_image(self): - req = {'flavourId': SAMPLE_FLAVOUR_ID} image_id = 'f30e149d-b3c7-497a-8b19-a092bc81e47b' grant = { 'vimAssets': { @@ -92,7 +90,7 @@ class TestVnfd(base.BaseTestCase): } } - result = userdata_utils.get_param_image('VDU2', req, + result = userdata_utils.get_param_image('VDU2', SAMPLE_FLAVOUR_ID, self.vnfd_1, grant) self.assertEqual(image_id, result) @@ -118,6 +116,40 @@ class TestVnfd(base.BaseTestCase): result = userdata_utils.get_param_zone('VDU1', grant_req, grant) self.assertEqual('nova', result) + def test_get_param_capacity(self): + # test get_current_capacity at the same time + grant_req = { + 'addResources': [ + {'id': 'dd60c89a-29a2-43bc-8cff-a534515523df', + 'type': 'COMPUTE', 'resourceTemplateId': 'VDU1'}, + {'id': '49b99140-c897-478c-83fa-ba3698912b18', + 'type': 'COMPUTE', 'resourceTemplateId': 'VDU1'}, + {'id': 'b03c4b75-ca17-4773-8a50-9a53df78a007', + 'type': 'COMPUTE', 'resourceTemplateId': 'VDU2'} + ], + 'removeResources': [ + {'id': '0837249d-ac2a-4963-bf98-bc0755eec663', + 'type': 'COMPUTE', 'resourceTemplateId': 'VDU1'}, + {'id': '3904e9d1-c0ec-4c3c-b29e-c8942a20f866', + 'type': 'COMPUTE', 'resourceTemplateId': 'VDU2'} + ] + } + inst = { + 'instantiatedVnfInfo': { + 'vnfcResourceInfo': [ + {'id': 'cdf36e11-f6ca-4c80-aaf1-0d2e764a2f3a', + 'vduId': 'VDU2'}, + {'id': 'c8cb522d-ddf8-4136-9c85-92bab8f2993d', + 'vduId': 'VDU1'} + ] + } + } + + result = userdata_utils.get_param_capacity('VDU1', inst, grant_req) + self.assertEqual(2, result) + result = userdata_utils.get_param_capacity('VDU2', inst, grant_req) + self.assertEqual(1, result) + def test_get_parama_network(self): res_id = "8fe7cc1a-e4ac-41b9-8b89-ed14689adb9c" req = { diff --git a/tacker/tests/unit/sol_refactored/samples/sample1/BaseHOT/simple/ut_sample1.yaml b/tacker/tests/unit/sol_refactored/samples/sample1/BaseHOT/simple/ut_sample1.yaml index a8e6d0317..50b94f452 100644 --- a/tacker/tests/unit/sol_refactored/samples/sample1/BaseHOT/simple/ut_sample1.yaml +++ b/tacker/tests/unit/sol_refactored/samples/sample1/BaseHOT/simple/ut_sample1.yaml @@ -24,20 +24,6 @@ resources: net4: { get_resource: internalVL2 } net5: { get_resource: internalVL3 } affinity: { get_resource: nfvi_node_affinity } - VDU1_scale_out: - type: OS::Heat::ScalingPolicy - properties: - scaling_adjustment: 1 - auto_scaling_group_id: - get_resource: VDU1_scale_group - adjustment_type: change_in_capacity - VDU1_scale_in: - type: OS::Heat::ScalingPolicy - properties: - scaling_adjustment: -1 - auto_scaling_group_id: - get_resource: VDU1_scale_group - adjustment_type: change_in_capacity VDU2: type: OS::Nova::Server diff --git a/tacker/tests/unit/sol_refactored/samples/sample1/Definitions/ut_sample1_df_simple.yaml b/tacker/tests/unit/sol_refactored/samples/sample1/Definitions/ut_sample1_df_simple.yaml index 2e988307a..7a78eb0a8 100644 --- a/tacker/tests/unit/sol_refactored/samples/sample1/Definitions/ut_sample1_df_simple.yaml +++ b/tacker/tests/unit/sol_refactored/samples/sample1/Definitions/ut_sample1_df_simple.yaml @@ -302,6 +302,12 @@ topology_template: max_scale_level: 2 step_deltas: - delta_1 + Invalid_scale: + name: Invalid_scale + description: Invalid scaling aspect + max_scale_level: 2 + step_deltas: + - delta_missing - VDU1_initial_delta: type: tosca.policies.nfv.VduInitialDelta @@ -326,6 +332,15 @@ topology_template: number_of_instances: 1 targets: [ VDU1 ] + - VDU1_scaling_aspect_deltas: + type: tosca.policies.nfv.VduScalingAspectDeltas + properties: + aspect: Invalid_scale + deltas: + delta_dummy: # delta_missing is missing + number_of_instances: 1 + targets: [ VDU2 ] + - instantiation_levels: type: tosca.policies.nfv.InstantiationLevels properties: diff --git a/tacker/tests/unit/sol_refactored/samples/sample1/UserData/userdata_default.py b/tacker/tests/unit/sol_refactored/samples/sample1/UserData/userdata_default.py index 6322aae5d..114f5aac6 100644 --- a/tacker/tests/unit/sol_refactored/samples/sample1/UserData/userdata_default.py +++ b/tacker/tests/unit/sol_refactored/samples/sample1/UserData/userdata_default.py @@ -36,14 +36,18 @@ class DefaultUserData(userdata_utils.AbstractUserData): if 'computeFlavourId' in vdu_value: vdu_value['computeFlavourId'] = ( userdata_utils.get_param_flavor( - vdu_name, req, vnfd, grant)) + vdu_name, flavour_id, vnfd, grant)) if 'vcImageId' in vdu_value: vdu_value['vcImageId'] = userdata_utils.get_param_image( - vdu_name, req, vnfd, grant) + vdu_name, flavour_id, vnfd, grant) if 'locationConstraints' in vdu_value: vdu_value['locationConstraints'] = ( userdata_utils.get_param_zone( vdu_name, grant_req, grant)) + if 'desired_capacity' in vdu_value: + vdu_value['desired_capacity'] = ( + userdata_utils.get_param_capacity( + vdu_name, inst, grant_req)) cps = nfv_dict.get('CP', {}) for cp_name, cp_value in cps.items(): @@ -84,3 +88,32 @@ class DefaultUserData(userdata_utils.AbstractUserData): fields['files'][key] = yaml.safe_dump(value) return fields + + @staticmethod + def scale(req, inst, grant_req, grant, tmp_csar_dir): + # scale is interested in 'desired_capacity' only. + # This method returns only 'desired_capacity' part in the + # 'nfv' dict. It is applied to json merge patch against + # the existing 'nfv' dict by the caller. + # NOTE: complete 'nfv' dict can not be made at the moment + # since InstantiateVnfRequest is necessary to make it. + + vnfd = userdata_utils.get_vnfd(inst['vnfdId'], tmp_csar_dir) + flavour_id = inst['instantiatedVnfInfo']['flavourId'] + + hot_dict = vnfd.get_base_hot(flavour_id) + top_hot = hot_dict['template'] + + nfv_dict = userdata_utils.init_nfv_dict(top_hot) + + vdus = nfv_dict.get('VDU', {}) + new_vdus = {} + for vdu_name, vdu_value in vdus.items(): + if 'desired_capacity' in vdu_value: + capacity = userdata_utils.get_param_capacity( + vdu_name, inst, grant_req) + new_vdus[vdu_name] = {'desired_capacity': capacity} + + fields = {'parameters': {'nfv': {'VDU': new_vdus}}} + + return fields