Browse Source

Merge "support scale VNF task of v2 API"

changes/33/825833/1
Zuul 4 months ago committed by Gerrit Code Review
parent
commit
433d9b3425
  1. 5
      releasenotes/notes/add-v2-scale-api-b60e8fe329f6038b.yaml
  2. 9
      tacker/sol_refactored/api/policies/vnflcm_v2.py
  3. 16
      tacker/sol_refactored/api/schemas/vnflcm_v2.py
  4. 17
      tacker/sol_refactored/common/exceptions.py
  5. 285
      tacker/sol_refactored/common/lcm_op_occ_utils.py
  6. 5
      tacker/sol_refactored/common/vnf_instance_utils.py
  7. 56
      tacker/sol_refactored/common/vnfd_utils.py
  8. 416
      tacker/sol_refactored/conductor/vnflcm_driver_v2.py
  9. 95
      tacker/sol_refactored/controller/vnflcm_v2.py
  10. 32
      tacker/sol_refactored/infra_drivers/openstack/heat_utils.py
  11. 780
      tacker/sol_refactored/infra_drivers/openstack/openstack.py
  12. 64
      tacker/sol_refactored/infra_drivers/openstack/userdata_default.py
  13. 11
      tacker/sol_refactored/infra_drivers/openstack/userdata_main.py
  14. 71
      tacker/sol_refactored/infra_drivers/openstack/userdata_utils.py
  15. 15
      tacker/sol_refactored/test-tools/cli.py
  16. 18
      tacker/tests/functional/sol_v2/samples/sample1/contents/BaseHOT/simple/sample1.yaml
  17. 8
      tacker/tests/functional/sol_v2/samples/sample1/contents/UserData/userdata.py
  18. 19
      tacker/tests/functional/sol_v2/samples/sample2/contents/BaseHOT/simple/sample2.yaml
  19. 1205
      tacker/tests/unit/sol_refactored/common/test_lcm_op_occ_utils.py
  20. 26
      tacker/tests/unit/sol_refactored/common/test_vnfd_utils.py
  21. 229
      tacker/tests/unit/sol_refactored/conductor/test_vnflcm_driver_v2.py
  22. 92
      tacker/tests/unit/sol_refactored/controller/test_vnflcm_v2.py
  23. 1328
      tacker/tests/unit/sol_refactored/infra_drivers/openstack/test_openstack.py
  24. 46
      tacker/tests/unit/sol_refactored/infra_drivers/openstack/test_userdata_utils.py
  25. 14
      tacker/tests/unit/sol_refactored/samples/sample1/BaseHOT/simple/ut_sample1.yaml
  26. 15
      tacker/tests/unit/sol_refactored/samples/sample1/Definitions/ut_sample1_df_simple.yaml
  27. 37
      tacker/tests/unit/sol_refactored/samples/sample1/UserData/userdata_default.py

5
releasenotes/notes/add-v2-scale-api-b60e8fe329f6038b.yaml

@ -0,0 +1,5 @@
---
features:
- |
Add the Version "2.0.0" of Scale VNF API
based on ETSI NFV specifications.

9
tacker/sol_refactored/api/policies/vnflcm_v2.py

@ -93,6 +93,15 @@ rules = [
'path': VNF_INSTANCES_ID_PATH + '/terminate'}
]
),
policy.DocumentedRuleDefault(
name=POLICY_NAME.format('scale'),
check_str=RULE_ANY,
description="Scale vnf instance.",
operations=[
{'method': 'POST',
'path': VNF_INSTANCES_ID_PATH + '/scale'}
]
),
# TODO(oda-g): add more lcm operations etc when implemented.

16
tacker/sol_refactored/api/schemas/vnflcm_v2.py

@ -77,6 +77,22 @@ TerminateVnfRequest_V200 = {
'additionalProperties': True,
}
# SOL003 5.5.2.5
ScaleVnfRequest_V200 = {
'type': 'object',
'properties': {
'type': {
'type': 'string',
'enum': ['SCALE_OUT', 'SCALE_IN']
},
'aspectId': common_types.IdentifierInVnfd,
'numberOfSteps': {'type': 'integer', 'minimum': 1},
'additionalParams': parameter_types.keyvalue_pairs,
},
'required': ['type', 'aspectId'],
'additionalProperties': True,
}
# SOL013 8.3.4
_SubscriptionAuthentication = {
'type': 'object',

17
tacker/sol_refactored/common/exceptions.py

@ -223,3 +223,20 @@ class GrantRequestOrGrantNotFound(SolHttpError404):
class RollbackNotSupported(SolHttpError422):
message = _("Rollback of %(op)s is not supported.")
class UnexpectedParentResourceDefinition(SolHttpError422):
message = _("Parent resource is necessary for VDU definition.")
class InvalidScaleAspectId(SolHttpError400):
message = _("Invalid aspectId '%(aspect_id)s'.")
class InvalidScaleNumberOfSteps(SolHttpError400):
message = _("Invalid numberOfSteps '%(num_steps)d'.")
class DeltaMissingInVnfd(SolHttpError400):
message = _("Delta '%(delta)s' is not defined in "
"VduScalingAspectDeltas.")

285
tacker/sol_refactored/common/lcm_op_occ_utils.py

@ -117,78 +117,239 @@ def make_lcmocc_notif_data(subsc, lcmocc, endpoint):
return notif_data
def _make_instantiate_lcmocc(lcmocc, inst, change_type):
def _make_affected_vnfc(vnfc, change_type):
affected_vnfc = objects.AffectedVnfcV2(
id=vnfc.id,
vduId=vnfc.vduId,
changeType=change_type,
computeResource=vnfc.computeResource
)
if vnfc.obj_attr_is_set('vnfcCpInfo'):
cp_ids = [cp.id for cp in vnfc.vnfcCpInfo]
affected_vnfc.affectedVnfcCpIds = cp_ids
if vnfc.obj_attr_is_set('storageResourceIds'):
str_ids = vnfc.storageResourceIds
if change_type == 'ADDED':
affected_vnfc.addedStorageResourceIds = str_ids
else: # 'REMOVED'
affected_vnfc.removedStorageResourceIds = str_ids
return affected_vnfc
def _make_affected_vl(vl, change_type):
affected_vl = objects.AffectedVirtualLinkV2(
id=vl.id,
vnfVirtualLinkDescId=vl.vnfVirtualLinkDescId,
changeType=change_type,
networkResource=vl.networkResource
)
if vl.obj_attr_is_set('vnfLinkPorts'):
affected_vl.vnfLinkPortIds = [port.id for port in vl.vnfLinkPorts]
return affected_vl
def _make_affected_vls_link_port_change(vls_saved, vls, common_vls):
affected_vls = []
for vl_id in common_vls:
old_ports = set()
new_ports = set()
for vl in vls_saved:
if vl.id == vl_id:
old_vl = vl
if vl.obj_attr_is_set('vnfLinkPorts'):
old_ports = {port.id for port in vl.vnfLinkPorts}
for vl in vls:
if vl.id == vl_id:
new_vl = vl
if vl.obj_attr_is_set('vnfLinkPorts'):
new_ports = {port.id for port in vl.vnfLinkPorts}
add_ports = new_ports - old_ports
rm_ports = old_ports - new_ports
# assume there are not add_ports and rm_ports at the same time.
if add_ports:
affected_vl = objects.AffectedVirtualLinkV2(
id=new_vl.id,
vnfVirtualLinkDescId=new_vl.vnfVirtualLinkDescId,
changeType='LINK_PORT_ADDED',
networkResource=new_vl.networkResource,
vnfLinkPortIds=list(add_ports)
)
affected_vls.append(affected_vl)
elif rm_ports:
affected_vl = objects.AffectedVirtualLinkV2(
id=old_vl.id,
vnfVirtualLinkDescId=old_vl.vnfVirtualLinkDescId,
changeType='LINK_PORT_REMOVED',
networkResource=old_vl.networkResource,
vnfLinkPortIds=list(rm_ports)
)
affected_vls.append(affected_vl)
return affected_vls
def _make_affected_strg(strg, change_type):
return objects.AffectedVirtualStorageV2(
id=strg.id,
virtualStorageDescId=strg.virtualStorageDescId,
changeType=change_type,
storageResource=strg.storageResource
)
def _make_affected_ext_link_ports(inst_info_saved, inst_info):
affected_ext_link_ports = []
ext_vl_ports_saved = set()
ext_vl_ports = set()
if inst_info_saved.obj_attr_is_set('extVirtualLinkInfo'):
for ext_vl in inst_info_saved.extVirtualLinkInfo:
if ext_vl.obj_attr_is_set('extLinkPorts'):
ext_vl_ports_saved |= {port.id
for port in ext_vl.extLinkPorts}
if inst_info.obj_attr_is_set('extVirtualLinkInfo'):
for ext_vl in inst_info.extVirtualLinkInfo:
if ext_vl.obj_attr_is_set('extLinkPorts'):
ext_vl_ports |= {port.id
for port in ext_vl.extLinkPorts}
add_ext_vl_ports = ext_vl_ports - ext_vl_ports_saved
rm_ext_vl_ports = ext_vl_ports_saved - ext_vl_ports
if add_ext_vl_ports:
for ext_vl in inst_info.extVirtualLinkInfo:
if not ext_vl.obj_attr_is_set('extLinkPorts'):
continue
affected_ext_link_ports += [
objects.AffectedExtLinkPortV2(
id=port.id,
changeType='ADDED',
extCpInstanceId=port.cpInstanceId,
resourceHandle=port.resourceHandle
)
for port in ext_vl.extLinkPorts
if port.id in add_ext_vl_ports
]
if rm_ext_vl_ports:
for ext_vl in inst_info_saved.extVirtualLinkInfo:
if not ext_vl.obj_attr_is_set('extLinkPorts'):
continue
affected_ext_link_ports += [
objects.AffectedExtLinkPortV2(
id=port.id,
changeType='REMOVED',
extCpInstanceId=port.cpInstanceId,
resourceHandle=port.resourceHandle
)
for port in ext_vl.extLinkPorts
if port.id in rm_ext_vl_ports
]
return affected_ext_link_ports
def update_lcmocc(lcmocc, inst_saved, inst):
# make ResourceChanges of lcmocc from instantiatedVnfInfo.
# NOTE: grant related info such as resourceDefinitionId, zoneId
# and so on are not included in lcmocc since such info are not
# included in instantiatedVnfInfo.
inst_info = inst.instantiatedVnfInfo
if inst_saved.obj_attr_is_set('instantiatedVnfInfo'):
inst_info_saved = inst_saved.instantiatedVnfInfo
else:
# dummy
inst_info_saved = objects.VnfInstanceV2_InstantiatedVnfInfo()
lcmocc_vncs = []
if inst_info.obj_attr_is_set('vnfcResourceInfo'):
for inst_vnc in inst_info.vnfcResourceInfo:
lcmocc_vnc = objects.AffectedVnfcV2(
id=inst_vnc.id,
vduId=inst_vnc.vduId,
changeType=change_type,
computeResource=inst_vnc.computeResource
)
if inst_vnc.obj_attr_is_set('vnfcCpInfo'):
cp_ids = [cp.id for cp in inst_vnc.vnfcCpInfo]
lcmocc_vnc.affectedVnfcCpIds = cp_ids
if inst_vnc.obj_attr_is_set('storageResourceIds'):
str_ids = inst_vnc.storageResourceIds
if change_type == 'ADDED':
lcmocc_vnc.addedStorageResourceIds = str_ids
else: # 'REMOVED'
lcmocc_vnc.removedStorageResourceIds = str_ids
lcmocc_vncs.append(lcmocc_vnc)
lcmocc_vls = []
if inst_info.obj_attr_is_set('vnfVirtualLinkResourceInfo'):
for inst_vl in inst_info.vnfVirtualLinkResourceInfo:
lcmocc_vl = objects.AffectedVirtualLinkV2(
id=inst_vl.id,
vnfVirtualLinkDescId=inst_vl.vnfVirtualLinkDescId,
changeType=change_type,
networkResource=inst_vl.networkResource
)
if inst_vl.obj_attr_is_set('vnfLinkPorts'):
port_ids = [port.id for port in inst_vl.vnfLinkPorts]
lcmocc_vl.vnfLinkPortIds = port_ids
lcmocc_vls.append(lcmocc_vl)
lcmocc_strs = []
if inst_info.obj_attr_is_set('virtualStorageResourceInfo'):
for inst_str in inst_info.virtualStorageResourceInfo:
lcmocc_str = objects.AffectedVirtualStorageV2(
id=inst_str.id,
virtualStorageDescId=inst_str.virtualStorageDescId,
changeType=change_type,
storageResource=inst_str.storageResource
)
lcmocc_strs.append(lcmocc_str)
inst_info = inst.instantiatedVnfInfo
if lcmocc_vncs or lcmocc_vls or lcmocc_strs:
# NOTE: objects may be re-created. so compare 'id' instead of object
# itself.
def _calc_diff(attr):
# NOTE: instantiatedVnfInfo object is dict compat
objs_saved = set()
if inst_info_saved.obj_attr_is_set(attr):
objs_saved = {obj.id for obj in inst_info_saved[attr]}
objs = set()
if inst_info.obj_attr_is_set(attr):
objs = {obj.id for obj in inst_info[attr]}
# return removed_objs, added_objs, common_objs
return objs_saved - objs, objs - objs_saved, objs_saved & objs
removed_vnfcs, added_vnfcs, _ = _calc_diff('vnfcResourceInfo')
affected_vnfcs = []
if removed_vnfcs:
affected_vnfcs += [_make_affected_vnfc(vnfc, 'REMOVED')
for vnfc in inst_info_saved.vnfcResourceInfo
if vnfc.id in removed_vnfcs]
if added_vnfcs:
affected_vnfcs += [_make_affected_vnfc(vnfc, 'ADDED')
for vnfc in inst_info.vnfcResourceInfo
if vnfc.id in added_vnfcs]
removed_vls, added_vls, common_vls = _calc_diff(
'vnfVirtualLinkResourceInfo')
affected_vls = []
if removed_vls:
affected_vls += [_make_affected_vl(vl, 'REMOVED')
for vl in inst_info_saved.vnfVirtualLinkResourceInfo
if vl.id in removed_vls]
if added_vls:
affected_vls += [_make_affected_vl(vl, 'ADDED')
for vl in inst_info.vnfVirtualLinkResourceInfo
if vl.id in added_vls]
if common_vls:
affected_vls += _make_affected_vls_link_port_change(
inst_info_saved.vnfVirtualLinkResourceInfo,
inst_info.vnfVirtualLinkResourceInfo, common_vls)
removed_mgd_vls, added_mgd_vls, common_mgd_vls = _calc_diff(
'extManagedVirtualLinkInfo')
if removed_mgd_vls:
affected_vls += [_make_affected_vl(vl, 'LINK_PORT_REMOVED')
for vl in inst_info_saved.extManagedVirtualLinkInfo
if vl.id in removed_mgd_vls]
if added_mgd_vls:
affected_vls += [_make_affected_vl(vl, 'LINK_PORT_ADDED')
for vl in inst_info.extManagedVirtualLinkInfo
if vl.id in added_mgd_vls]
if common_mgd_vls:
affected_vls += _make_affected_vls_link_port_change(
inst_info_saved.extManagedVirtualLinkInfo,
inst_info.extManagedVirtualLinkInfo, common_mgd_vls)
removed_strgs, added_strgs, _ = _calc_diff('virtualStorageResourceInfo')
affected_strgs = []
if removed_strgs:
affected_strgs += [
_make_affected_strg(strg, 'REMOVED')
for strg in inst_info_saved.virtualStorageResourceInfo
if strg.id in removed_strgs
]
if added_strgs:
affected_strgs += [_make_affected_strg(strg, 'ADDED')
for strg in inst_info.virtualStorageResourceInfo
if strg.id in added_strgs]
affected_ext_link_ports = _make_affected_ext_link_ports(
inst_info_saved, inst_info)
if (affected_vnfcs or affected_vls or affected_strgs or
affected_ext_link_ports):
change_info = objects.VnfLcmOpOccV2_ResourceChanges()
if lcmocc_vncs:
change_info.affectedVnfcs = lcmocc_vncs
if lcmocc_vls:
change_info.affectedVirtualLinks = lcmocc_vls
if lcmocc_strs:
change_info.affectedVirtualStorages = lcmocc_strs
if affected_vnfcs:
change_info.affectedVnfcs = affected_vnfcs
if affected_vls:
change_info.affectedVirtualLinks = affected_vls
if affected_strgs:
change_info.affectedVirtualStorages = affected_strgs
if affected_ext_link_ports:
change_info.affectedExtLinkPorts = affected_ext_link_ports
lcmocc.resourceChanges = change_info
def make_instantiate_lcmocc(lcmocc, inst):
_make_instantiate_lcmocc(lcmocc, inst, 'ADDED')
def make_terminate_lcmocc(lcmocc, inst):
_make_instantiate_lcmocc(lcmocc, inst, 'REMOVED')
def get_grant_req_and_grant(context, lcmocc):
grant_reqs = objects.GrantRequestV1.get_by_filter(context,
vnfLcmOpOccId=lcmocc.id)
@ -203,7 +364,7 @@ def get_grant_req_and_grant(context, lcmocc):
def check_lcmocc_in_progress(context, inst_id):
# if the controller or conductor executes an operation for the vnf
# instance (i.e. operationState is ...ING), other operation for
# the same vnf instance is exculed by the coordinator.
# the same vnf instance is exculded by the coordinator.
# check here is existence of lcmocc for the vnf instance with
# FAILED_TEMP operationState.
lcmoccs = objects.VnfLcmOpOccV2.get_by_filter(

5
tacker/sol_refactored/common/vnf_instance_utils.py

@ -46,8 +46,11 @@ def make_inst_links(inst, endpoint):
links.instantiate = objects.Link(href=self_href + "/instantiate")
else: # 'INSTANTIATED'
links.terminate = objects.Link(href=self_href + "/terminate")
links.scale = objects.Link(href=self_href + "/scale")
# TODO(oda-g): add when the operation supported
# links.scale = objects.Link(href = self_href + "/scale")
# links.heal = objects.Link(href=self_href + "/heal")
# links.changeExtConn = objects.Link(
# href=self_href + "/change_ext_conn")
# etc.
return links

56
tacker/sol_refactored/common/vnfd_utils.py

@ -358,3 +358,59 @@ class Vnfd(object):
raise sol_ex.SolHttpError422(sol_detail=msg)
return script
def get_scale_vdu_and_num(self, flavour_id, aspect_id):
aspects = self.get_policy_values_by_type(flavour_id,
'tosca.policies.nfv.ScalingAspects')
delta = None
for aspect in aspects:
value = aspect['properties']['aspects'].get(aspect_id)
if value is not None:
# expect there is one delta.
# NOTE: Tacker does not support non-uniform deltas defined in
# ETSI NFV SOL001 8. Therefore, uniform delta corresponding
# to number_of_instances can be set and number_of_instances is
# the same regardless of scale_level.
delta = value['step_deltas'][0]
break
if delta is None:
return {}
aspect_deltas = self.get_policy_values_by_type(flavour_id,
'tosca.policies.nfv.VduScalingAspectDeltas')
vdu_num_inst = {}
for aspect_delta in aspect_deltas:
if aspect_delta.get('properties', {}).get('aspect') == aspect_id:
num_inst = (aspect_delta['properties']['deltas']
.get(delta, {}).get('number_of_instances'))
# NOTE: it is not checked whether 'delta' defined in
# ScaleingAspects exists in VduScalingAspectDeltas at
# the loading of vnf package. this is a mistake of the
# VNFD definition.
if num_inst is None:
raise sol_ex.DeltaMissingInVnfd(delta=delta)
for vdu_name in aspect_delta['targets']:
vdu_num_inst[vdu_name] = num_inst
return vdu_num_inst
def get_scale_info_from_inst_level(self, flavour_id, inst_level):
policies = self.get_policy_values_by_type(flavour_id,
'tosca.policies.nfv.InstantiationLevels')
for policy in policies:
return (policy['properties']['levels']
.get(inst_level, {})
.get('scale_info', {}))
return {}
def get_max_scale_level(self, flavour_id, aspect_id):
aspects = self.get_policy_values_by_type(flavour_id,
'tosca.policies.nfv.ScalingAspects')
for aspect in aspects:
value = aspect['properties']['aspects'].get(aspect_id)
if value is not None:
return value['max_scale_level']
# should not occur
return 0

416
tacker/sol_refactored/conductor/vnflcm_driver_v2.py

@ -43,8 +43,32 @@ class VnfLcmDriverV2(object):
self.nfvo_client = nfvo_client.NfvoClient()
def grant(self, context, lcmocc, inst, vnfd):
# grant exchange
# NOTE: the api_version of NFVO supposes 1.4.0 at the moment.
# make common part of grant_req among operations
grant_req = objects.GrantRequestV1(
vnfInstanceId=inst.id,
vnfLcmOpOccId=lcmocc.id,
vnfdId=inst.vnfdId,
operation=lcmocc.operation,
isAutomaticInvocation=lcmocc.isAutomaticInvocation
)
grant_req._links = objects.GrantRequestV1_Links(
vnfLcmOpOcc=objects.Link(
href=lcmocc_utils.lcmocc_href(lcmocc.id, self.endpoint)),
vnfInstance=objects.Link(
href=inst_utils.inst_href(inst.id, self.endpoint)))
# make operation specific part of grant_req and check request
# parameters if necessary.
method = getattr(self, "%s_%s" % (lcmocc.operation.lower(), 'grant'))
return method(context, lcmocc, inst, vnfd)
method(grant_req, lcmocc.operationParams, inst, vnfd)
# NOTE: if not granted, 403 error raised.
grant = self.nfvo_client.grant(context, grant_req)
return grant_req, grant
def post_grant(self, context, lcmocc, inst, grant_req, grant, vnfd):
method = getattr(self,
@ -84,7 +108,64 @@ class VnfLcmDriverV2(object):
LOG.debug("execute %s of %s success.", operation, script)
def _make_inst_info_common(self, lcmocc, inst_saved, inst, vnfd):
# make vim independent part of instantiatedVnfInfo.
# scaleStatus and maxScaleLevels at the moment.
inst_info = inst.instantiatedVnfInfo
req = lcmocc.operationParams
if lcmocc.operation == v2fields.LcmOperationType.INSTANTIATE:
# create scaleStatus and maxScaleLevels
flavour_id = req.flavourId
if req.obj_attr_is_set('instantiationLevelId'):
inst_level = req.instantiationLevelId
else:
inst_level = vnfd.get_default_instantiation_level(flavour_id)
# make scaleStatus from tosca.policies.nfv.InstantiationLevels
# definition.
scale_info = vnfd.get_scale_info_from_inst_level(flavour_id,
inst_level)
scale_status = [
objects.ScaleInfoV2(
aspectId=aspect_id,
scaleLevel=value['scale_level']
)
for aspect_id, value in scale_info.items()
]
max_scale_levels = [
objects.ScaleInfoV2(
aspectId=obj.aspectId,
scaleLevel=vnfd.get_max_scale_level(flavour_id,
obj.aspectId)
)
for obj in scale_status
]
if scale_status:
inst_info.scaleStatus = scale_status
inst_info.maxScaleLevels = max_scale_levels
elif lcmocc.operation != v2fields.LcmOperationType.TERMINATE:
inst_info_saved = inst_saved.instantiatedVnfInfo
if inst_info_saved.obj_attr_is_set('scaleStatus'):
inst_info.scaleStatus = inst_info_saved.scaleStatus
inst_info.maxScaleLevels = inst_info_saved.maxScaleLevels
if lcmocc.operation == v2fields.LcmOperationType.SCALE:
# adjust scaleStatus
num_steps = req.numberOfSteps
if req.type == 'SCALE_IN':
num_steps *= -1
for aspect_info in inst_info.scaleStatus:
if aspect_info.aspectId == req.aspectId:
aspect_info.scaleLevel += num_steps
break
def process(self, context, lcmocc, inst, grant_req, grant, vnfd):
# save inst to use updating lcmocc after process done
inst_saved = inst.obj_clone()
# perform preamble LCM script
req = lcmocc.operationParams
operation = "%s_%s" % (lcmocc.operation.lower(), 'start')
@ -104,6 +185,9 @@ class VnfLcmDriverV2(object):
self._exec_mgmt_driver_script(operation,
flavour_id, req, inst, grant_req, grant, vnfd)
self._make_inst_info_common(lcmocc, inst_saved, inst, vnfd)
lcmocc_utils.update_lcmocc(lcmocc, inst_saved, inst)
def rollback(self, context, lcmocc, inst, grant_req, grant, vnfd):
method = getattr(self,
"%s_%s" % (lcmocc.operation.lower(), 'rollback'),
@ -114,38 +198,60 @@ class VnfLcmDriverV2(object):
raise sol_ex.RollbackNotSupported(op=lcmocc.operation)
def _get_link_ports(self, inst_req):
names = []
names = set()
if inst_req.obj_attr_is_set('extVirtualLinks'):
for ext_vl in inst_req.extVirtualLinks:
for ext_cp in ext_vl.extCps:
for cp_config in ext_cp.cpConfig.values():
if cp_config.obj_attr_is_set('linkPortId'):
names.append(ext_cp.cpdId)
names.add(ext_cp.cpdId)
if inst_req.obj_attr_is_set('extManagedVirtualLinks'):
for ext_mgd_vl in inst_req.extManagedVirtualLinks:
if ext_mgd_vl.obj_attr_is_set('vnfLinkPort'):
names.append(ext_mgd_vl.vnfVirtualLinkDescId)
names.add(ext_mgd_vl.vnfVirtualLinkDescId)
return names
def instantiate_grant(self, context, lcmocc, inst, vnfd):
req = lcmocc.operationParams
def _make_res_def_for_new_vdu(self, vdu_name, num_inst, cp_names,
storage_names):
# common part of instantiate and scale out
add_reses = []
for _ in range(num_inst):
vdu_res_id = uuidutils.generate_uuid()
add_reses.append(
objects.ResourceDefinitionV1(
id=vdu_res_id,
type='COMPUTE',
resourceTemplateId=vdu_name
)
)
for cp_name in cp_names:
add_reses.append(
objects.ResourceDefinitionV1(
id="{}-{}".format(cp_name, vdu_res_id),
type='LINKPORT',
resourceTemplateId=cp_name
)
)
for storage_name in storage_names:
add_reses.append(
objects.ResourceDefinitionV1(
id="{}-{}".format(storage_name, vdu_res_id),
type='STORAGE',
resourceTemplateId=storage_name
)
)
return add_reses
def instantiate_grant(self, grant_req, req, inst, vnfd):
flavour_id = req.flavourId
if vnfd.get_vnfd_flavour(flavour_id) is None:
raise sol_ex.FlavourIdNotFound(flavour_id=flavour_id)
# grant exchange
# NOTE: the api_version of NFVO supposes 1.4.0 at the moment.
grant_req = objects.GrantRequestV1(
vnfInstanceId=inst.id,
vnfLcmOpOccId=lcmocc.id,
vnfdId=inst.vnfdId,
flavourId=flavour_id,
operation=lcmocc.operation,
isAutomaticInvocation=lcmocc.isAutomaticInvocation
)
grant_req.flavourId = flavour_id
if req.obj_attr_is_set('instantiationLevelId'):
inst_level = req.instantiationLevelId
@ -161,30 +267,8 @@ class VnfLcmDriverV2(object):
vdu_cp_names = vnfd.get_vdu_cps(flavour_id, name)
vdu_storage_names = vnfd.get_vdu_storages(node)
for _ in range(num):
res_def = objects.ResourceDefinitionV1(
id=uuidutils.generate_uuid(),
type='COMPUTE',
resourceTemplateId=name)
add_reses.append(res_def)
for cp_name in vdu_cp_names:
if cp_name in link_port_names:
continue
for _ in range(num):
res_def = objects.ResourceDefinitionV1(
id=uuidutils.generate_uuid(),
type='LINKPORT',
resourceTemplateId=cp_name)
add_reses.append(res_def)
for storage_name in vdu_storage_names:
for _ in range(num):
res_def = objects.ResourceDefinitionV1(
id=uuidutils.generate_uuid(),
type='STORAGE',
resourceTemplateId=storage_name)
add_reses.append(res_def)
add_reses += self._make_res_def_for_new_vdu(name, num,
set(vdu_cp_names) - link_port_names, vdu_storage_names)
ext_mgd_vls = []
if req.obj_attr_is_set('extManagedVirtualLinks'):
@ -232,17 +316,6 @@ class VnfLcmDriverV2(object):
if req.obj_attr_is_set('additionalParams'):
grant_req.additionalParams = req.additionalParams
grant_req._links = objects.GrantRequestV1_Links(
vnfLcmOpOcc=objects.Link(
href=lcmocc_utils.lcmocc_href(lcmocc.id, self.endpoint)),
vnfInstance=objects.Link(
href=inst_utils.inst_href(inst.id, self.endpoint)))
# NOTE: if not granted, 403 error raised.
grant = self.nfvo_client.grant(context, grant_req)
return grant_req, grant
def instantiate_post_grant(self, context, lcmocc, inst, grant_req,
grant, vnfd):
# set inst vimConnectionInfo
@ -288,7 +361,6 @@ class VnfLcmDriverV2(object):
raise sol_ex.SolException(sol_detail='not support vim type')
inst.instantiationState = 'INSTANTIATED'
lcmocc_utils.make_instantiate_lcmocc(lcmocc, inst)
def instantiate_rollback(self, context, lcmocc, inst, grant_req,
grant, vnfd):
@ -301,71 +373,63 @@ class VnfLcmDriverV2(object):
# only support openstack at the moment
raise sol_ex.SolException(sol_detail='not support vim type')
def terminate_grant(self, context, lcmocc, inst, vnfd):
# grant exchange
# NOTE: the api_version of NFVO supposes 1.4.0 at the moment.
grant_req = objects.GrantRequestV1(
vnfInstanceId=inst.id,
vnfLcmOpOccId=lcmocc.id,
vnfdId=inst.vnfdId,
operation=lcmocc.operation,
isAutomaticInvocation=lcmocc.isAutomaticInvocation
)
inst_info = inst.instantiatedVnfInfo
def _make_res_def_for_remove_vnfcs(self, inst_info, inst_vnfcs):
# common part of terminate and scale in
rm_reses = []
vnfc_cps = {}
if inst_info.obj_attr_is_set('vnfcResourceInfo'):
for inst_vnc in inst_info.vnfcResourceInfo:
res_def = objects.ResourceDefinitionV1(
id=uuidutils.generate_uuid(),
for inst_vnfc in inst_vnfcs:
vdu_res_id = uuidutils.generate_uuid()
rm_reses.append(
objects.ResourceDefinitionV1(
id=vdu_res_id,
type='COMPUTE',
resourceTemplateId=inst_vnc.vduId,
resource=inst_vnc.computeResource)
rm_reses.append(res_def)
if inst_vnc.obj_attr_is_set('vnfcCpInfo'):
for cp_info in inst_vnc.vnfcCpInfo:
if not (cp_info.obj_attr_is_set('vnfExtCpId') or
cp_info.obj_attr_is_set('vnfLinkPortId')):
# it means extLinkPorts of extVirtualLinks was
# specified. so it is not the resource to be
# deleted.
continue
res_def = objects.ResourceDefinitionV1(
id=uuidutils.generate_uuid(),
resourceTemplateId=cp_info.cpdId,
type='LINKPORT')
rm_reses.append(res_def)
if cp_info.obj_attr_is_set('vnfExtCpId'):
vnfc_cps[cp_info.vnfExtCpId] = res_def
else: # vnfLinkPortId
vnfc_cps[cp_info.vnfLinkPortId] = res_def
resourceTemplateId=inst_vnfc.vduId,
resource=inst_vnfc.computeResource
)
)
if inst_vnfc.obj_attr_is_set('vnfcCpInfo'):
for cp_info in inst_vnfc.vnfcCpInfo:
if not (cp_info.obj_attr_is_set('vnfExtCpId') or
cp_info.obj_attr_is_set('vnfLinkPortId')):
# it means extLinkPorts of extVirtualLinks was
# specified. so it is not the resource to be
# deleted.
continue
res_def = objects.ResourceDefinitionV1(
id="{}-{}".format(cp_info.cpdId, vdu_res_id),
resourceTemplateId=cp_info.cpdId,
type='LINKPORT')
rm_reses.append(res_def)
if cp_info.obj_attr_is_set('vnfExtCpId'):
vnfc_cps[cp_info.vnfExtCpId] = res_def
else: # vnfLinkPortId
vnfc_cps[cp_info.vnfLinkPortId] = res_def
if inst_vnfc.obj_attr_is_set('storageResourceIds'):
for storage_id in inst_vnfc.storageResourceIds:
for inst_str in inst_info.virtualStorageResourceInfo:
if inst_str.id == storage_id:
str_name = inst_str.virtualStorageDescId
rm_reses.append(
objects.ResourceDefinitionV1(
id="{}-{}".format(str_name, vdu_res_id),
type='STORAGE',
resourceTemplateId=str_name,
resource=inst_str.storageResource
)
)
break
# fill resourceHandle of ports
if inst_info.obj_attr_is_set('vnfVirtualLinkResourceInfo'):
for inst_vl in inst_info.vnfVirtualLinkResourceInfo:
res_def = objects.ResourceDefinitionV1(
id=uuidutils.generate_uuid(),
type='VL',
resourceTemplateId=inst_vl.vnfVirtualLinkDescId,
resource=inst_vl.networkResource)
rm_reses.append(res_def)
if inst_vl.obj_attr_is_set('vnfLinkPorts'):
for port in inst_vl.vnfLinkPorts:
if port.id in vnfc_cps:
res_def = vnfc_cps[port.id]
res_def.resource = port.resourceHandle
if inst_info.obj_attr_is_set('virtualStorageResourceInfo'):
for inst_str in inst_info.virtualStorageResourceInfo:
res_def = objects.ResourceDefinitionV1(
id=uuidutils.generate_uuid(),
type='STORAGE',
resourceTemplateId=inst_str.virtualStorageDescId,
resource=inst_str.storageResource)
rm_reses.append(res_def)
if inst_info.obj_attr_is_set('extVirtualLinkInfo'):
for ext_vl in inst_info.extVirtualLinkInfo:
if ext_vl.obj_attr_is_set('extLinkPorts'):
@ -384,19 +448,28 @@ class VnfLcmDriverV2(object):
res_def = vnfc_cps[port.id]
res_def.resource = port.resourceHandle
if rm_reses:
grant_req.removeResources = rm_reses
return rm_reses
grant_req._links = objects.GrantRequestV1_Links(
vnfLcmOpOcc=objects.Link(
href=lcmocc_utils.lcmocc_href(lcmocc.id, self.endpoint)),
vnfInstance=objects.Link(
href=inst_utils.inst_href(inst.id, self.endpoint)))
def terminate_grant(self, grant_req, req, inst, vnfd):
inst_info = inst.instantiatedVnfInfo
rm_reses = []
if inst_info.obj_attr_is_set('vnfcResourceInfo'):
rm_reses += self._make_res_def_for_remove_vnfcs(
inst_info, inst_info.vnfcResourceInfo)
# NOTE: if not granted, 403 error raised.
grant_res = self.nfvo_client.grant(context, grant_req)
if inst_info.obj_attr_is_set('vnfVirtualLinkResourceInfo'):
for inst_vl in inst_info.vnfVirtualLinkResourceInfo:
rm_reses.append(
objects.ResourceDefinitionV1(
id=uuidutils.generate_uuid(),
type='VL',
resourceTemplateId=inst_vl.vnfVirtualLinkDescId,
resource=inst_vl.networkResource
)
)
return grant_req, grant_res
if rm_reses:
grant_req.removeResources = rm_reses
def terminate_process(self, context, lcmocc, inst, grant_req,
grant, vnfd):
@ -410,7 +483,6 @@ class VnfLcmDriverV2(object):
raise sol_ex.SolException(sol_detail='not support vim type')
inst.instantiationState = 'NOT_INSTANTIATED'
lcmocc_utils.make_terminate_lcmocc(lcmocc, inst)
# reset instantiatedVnfInfo
# NOTE: reset after update lcmocc
@ -424,3 +496,113 @@ class VnfLcmDriverV2(object):
# reset vimConnectionInfo
inst.vimConnectionInfo = {}
def scale_grant(self, grant_req, req, inst, vnfd):
flavour_id = inst.instantiatedVnfInfo.flavourId
scale_type = req.type
aspect_id = req.aspectId
num_steps = req.numberOfSteps
vdu_num_inst = vnfd.get_scale_vdu_and_num(flavour_id, aspect_id)
if not vdu_num_inst:
# should not occur. just check for consistency.
raise sol_ex.InvalidScaleAspectId(aspect_id=aspect_id)
if scale_type == 'SCALE_OUT':
self._make_scale_out_grant_request(grant_req, inst, num_steps,
vdu_num_inst)
else:
self._make_scale_in_grant_request(grant_req, inst, num_steps,
vdu_num_inst)
if req.obj_attr_is_set('additionalParams'):
grant_req.additionalParams = req.additionalParams
def _make_scale_out_grant_request(self, grant_req, inst, num_steps,
vdu_num_inst):
inst_info = inst.instantiatedVnfInfo
add_reses = []
# get one of vnfc for the vdu from inst.instantiatedVnfInfo
vdu_sample = {}
for vdu_name in vdu_num_inst.keys():
for inst_vnfc in inst_info.vnfcResourceInfo:
if inst_vnfc.vduId == vdu_name:
vdu_sample[vdu_name] = inst_vnfc
break
for vdu_name, inst_vnfc in vdu_sample.items():
num_inst = vdu_num_inst[vdu_name] * num_steps
vdu_cp_names = []
if inst_vnfc.obj_attr_is_set('vnfcCpInfo'):
# NOTE: it is expected that there are only dynamic ports
# for vdus which enable scaling.
vdu_cp_names = [cp_info.cpdId
for cp_info in inst_vnfc.vnfcCpInfo]
vdu_storage_names = []
if inst_vnfc.obj_attr_is_set('storageResourceIds'):
for storage_id in inst_vnfc.storageResourceIds:
for storage_res in inst_info.virtualStorageResourceInfo:
if storage_res.id == storage_id:
vdu_storage_names.append(
storage_res.virtualStorageDescId)
break
add_reses += self._make_res_def_for_new_vdu(vdu_name,
num_inst, vdu_cp_names, vdu_storage_names)
if add_reses:
grant_req.addResources = add_reses
def _make_scale_in_grant_request(self, grant_req, inst, num_steps,
vdu_num_inst):
inst_info = inst.instantiatedVnfInfo
rm_vnfcs = []
# select remove VDUs
# NOTE: scale-in specification of tacker SOL003 v2 API is that
# newer VDU is selected for reduction.
# It is expected that vnfcResourceInfo is sorted by creation_time
# of VDU, newer is earlier.
for vdu_name, num_inst in vdu_num_inst.items():
num_inst = num_inst * num_steps
count = 0
for inst_vnfc in inst_info.vnfcResourceInfo:
if inst_vnfc.vduId == vdu_name:
rm_vnfcs.append(inst_vnfc)
count += 1
if count == num_inst:
break
rm_reses = self._make_res_def_for_remove_vnfcs(inst_info, rm_vnfcs)
if rm_reses:
grant_req.removeResources = rm_reses
def scale_process(self, context, lcmocc, inst, grant_req,
grant, vnfd):
req = lcmocc.operationParams
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
if vim_info.vimType == 'ETSINFV.OPENSTACK_KEYSTONE.V_3':
driver = openstack.Openstack()
driver.scale(req, inst, grant_req, grant, vnfd)
else:
# only support openstack at the moment
raise sol_ex.SolException(sol_detail='not support vim type')
def scale_rollback(self, context, lcmocc, inst, grant_req,
grant, vnfd):
req = lcmocc.operationParams
if req.type == 'SCALE_IN':
raise sol_ex.RollbackNotSupported(op='SCALE IN')
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
if vim_info.vimType == 'ETSINFV.OPENSTACK_KEYSTONE.V_3':
driver = openstack.Openstack()
driver.scale_rollback(req, inst, grant_req, grant, vnfd)
else:
# only support openstack at the moment
raise sol_ex.SolException(sol_detail='not support vim type')

95
tacker/sol_refactored/controller/vnflcm_v2.py

@ -149,6 +149,21 @@ class VnfLcmControllerV2(sol_wsgi.SolAPIController):
self.endpoint)
return sol_wsgi.SolResponse(204, None)
def _new_lcmocc(self, inst_id, operation, req_body):
now = datetime.utcnow()
lcmocc = objects.VnfLcmOpOccV2(
id=uuidutils.generate_uuid(),
operationState=v2fields.LcmOperationStateType.STARTING,
stateEnteredTime=now,
startTime=now,
vnfInstanceId=inst_id,
operation=operation,
isAutomaticInvocation=False,
isCancelPending=False,
operationParams=req_body)
return lcmocc
@validator.schema(schema.InstantiateVnfRequest_V200, '2.0.0')
@coordinate.lock_vnf_instance('{id}')
def instantiate(self, request, id, body):
@ -160,17 +175,8 @@ class VnfLcmControllerV2(sol_wsgi.SolAPIController):
lcmocc_utils.check_lcmocc_in_progress(context, id)
now = datetime.utcnow()
lcmocc = objects.VnfLcmOpOccV2(
id=uuidutils.generate_uuid(),
operationState=v2fields.LcmOperationStateType.STARTING,
stateEnteredTime=now,
startTime=now,
vnfInstanceId=id,
operation=v2fields.LcmOperationType.INSTANTIATE,
isAutomaticInvocation=False,
isCancelPending=False,
operationParams=body)
lcmocc = self._new_lcmocc(id, v2fields.LcmOperationType.INSTANTIATE,
body)
req_param = lcmocc.operationParams
# if there is partial vimConnectionInfo check and fulfill here.
@ -203,18 +209,63 @@ class VnfLcmControllerV2(sol_wsgi.SolAPIController):
lcmocc_utils.check_lcmocc_in_progress(context, id)
now = datetime.utcnow()
lcmocc = objects.VnfLcmOpOccV2(
id=uuidutils.generate_uuid(),
operationState=v2fields.LcmOperationStateType.STARTING,
stateEnteredTime=now,
startTime=now,
vnfInstanceId=id,
operation=v2fields.LcmOperationType.TERMINATE,
isAutomaticInvocation=False,
isCancelPending=False,
operationParams=body)
lcmocc = self._new_lcmocc(id, v2fields.LcmOperationType.TERMINATE,
body)
lcmocc.create(context)
self.conductor_rpc.start_lcm_op(context, lcmocc.id)
location = lcmocc_utils.lcmocc_href(lcmocc.id, self.endpoint)
return sol_wsgi.SolResponse(202, None, location=location)
def _get_current_scale_level(self, inst, aspect_id):
if (inst.obj_attr_is_set('instantiatedVnfInfo') and
inst.instantiatedVnfInfo.obj_attr_is_set('scaleStatus')):
for scale_info in inst.instantiatedVnfInfo.scaleStatus:
if scale_info.aspectId == aspect_id:
return scale_info.scaleLevel
def _get_max_scale_level(self, inst, aspect_id):
if (inst.obj_attr_is_set('instantiatedVnfInfo') and
inst.instantiatedVnfInfo.obj_attr_is_set('maxScaleLevels')):
for scale_info in inst.instantiatedVnfInfo.maxScaleLevels:
if scale_info.aspectId == aspect_id:
return scale_info.scaleLevel
@validator.schema(schema.ScaleVnfRequest_V200, '2.0.0')
@coordinate.lock_vnf_instance('{id}')
def scale(self, request, id, body):
context = request.context
inst = inst_utils.get_inst(context, id)
if inst.instantiationState != 'INSTANTIATED':
raise sol_ex.VnfInstanceIsNotInstantiated(inst_id=id)
lcmocc_utils.check_lcmocc_in_progress(context, id)
# check parameters
aspect_id = body['aspectId']
if 'numberOfSteps' not in body:
# set default value (1) defined by SOL specification for
# the convenience of the following methods.
body['numberOfSteps'] = 1
scale_level = self._get_current_scale_level(inst, aspect_id)
max_scale_level = self._get_max_scale_level(inst, aspect_id)
if scale_level is None or max_scale_level is None:
raise sol_ex.InvalidScaleAspectId(aspect_id=aspect_id)
num_steps = body['numberOfSteps']
if body['type'] == 'SCALE_IN':
num_steps *= -1
scale_level += num_steps
if scale_level < 0 or scale_level > max_scale_level:
raise sol_ex.InvalidScaleNumberOfSteps(
num_steps=body['numberOfSteps'])
lcmocc = self._new_lcmocc(id, v2fields.LcmOperationType.SCALE,
body)
lcmocc.create(context)
self.conductor_rpc.start_lcm_op(context, lcmocc.id)

32
tacker/sol_refactored/infra_drivers/openstack/heat_utils.py

@ -123,6 +123,22 @@ class HeatClient(object):
"DELETE_COMPLETE", "DELETE_IN_PROGRESS", "DELETE_FAILED",
none_is_done=True)
def get_parameters(self, stack_name):
path = "stacks/{}".format(stack_name)
resp, body = self.client.do_request(path, "GET",
expected_status=[200])
return body["stack"]["parameters"]
def mark_unhealthy(self, stack_id, resource_name):
path = "stacks/{}/resources/{}".format(stack_id, resource_name)
fields = {
"mark_unhealthy": True,
"resource_status_reason": "marked by tacker"
}
resp, body = self.client.do_request(path, "PATCH",
expected_status=[200], body=fields)
def get_reses_by_types(heat_reses, types):
return [res for res in heat_reses if res['resource_type'] in types]
@ -146,3 +162,19 @@ def get_port_reses(heat_reses):
def get_stack_name(inst):
return "vnf-" + inst.id
def get_resource_stack_id(heat_res):
# return the form "stack_name/stack_id"
for link in heat_res.get('links', []):
if link['rel'] == 'stack':
items = link['href'].split('/')
return "{}/{}".format(items[-2], items[-1])
def get_parent_resource(heat_res, heat_reses):
parent = heat_res.get('parent_resource')
if parent:
for res in heat_reses:
if res['resource_name'] == parent:
return res

780
tacker/sol_refactored/infra_drivers/openstack/openstack.py

@ -14,7 +14,9 @@
# under the License.
from dateutil import parser
import eventlet
import json
import os
import pickle
import subprocess
@ -28,12 +30,40 @@ from tacker.sol_refactored.common import vnf_instance_utils as inst_utils
from tacker.sol_refactored.infra_drivers.openstack import heat_utils
from tacker.sol_refactored.infra_drivers.openstack import userdata_default
from tacker.sol_refactored import objects
from tacker.sol_refactored.objects.v2 import fields as v2fields
LOG = logging.getLogger(__name__)
CONF = config.CONF
LINK_PORT_PREFIX = 'req-'
CP_INFO_PREFIX = 'cp-'
# Id of the resources in instantiatedVnfInfo related methods.
# NOTE: instantiatedVnfInfo is re-created in each operation.
# Id of the resources in instantiatedVnfInfo is based on
# heat resource-id so that id is not changed at re-creation.
# Some ids are same as heat resource-id and some ids are
# combination of prefix and other ids.
def _make_link_port_id(link_port_id):
# prepend 'req-' to distinguish from ports which are
# created by heat.
return '{}{}'.format(LINK_PORT_PREFIX, link_port_id)
def _is_link_port(link_port_id):
return link_port_id.startswith(LINK_PORT_PREFIX)
def _make_cp_info_id(link_port_id):
return '{}{}'.format(CP_INFO_PREFIX, link_port_id)
def _make_combination_id(a, b):
return '{}-{}'.format(a, b)
class Openstack(object):
@ -42,7 +72,7 @@ class Openstack(object):
def instantiate(self, req, inst, grant_req, grant, vnfd):
# make HOT
fields = self.make_hot(req, inst, grant_req, grant, vnfd)
fields = self._make_hot(req, inst, grant_req, grant, vnfd)
LOG.debug("stack fields: %s", fields)
@ -61,10 +91,115 @@ class Openstack(object):
heat_reses = heat_client.get_resources(stack_name)
# make instantiated_vnf_info
self.make_instantiated_vnf_info(req, inst, grant, vnfd, heat_reses)
self._make_instantiated_vnf_info(req, inst, grant_req, grant, vnfd,
heat_reses)
def instantiate_rollback(self, req, inst, grant_req, grant, vnfd):
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
heat_client = heat_utils.HeatClient(vim_info)
stack_name = heat_utils.get_stack_name(inst)
status, _ = heat_client.get_status(stack_name)
if status is not None:
heat_client.delete_stack(stack_name)
def terminate(self, req, inst, grant_req, grant, vnfd):
if req.terminationType == 'GRACEFUL':
timeout = CONF.v2_vnfm.default_graceful_termination_timeout
if req.obj_attr_is_set('gracefulTerminationTimeout'):
timeout = req.gracefulTerminationTimeout
eventlet.sleep(timeout)
# delete stack
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
heat_client = heat_utils.HeatClient(vim_info)
stack_name = heat_utils.get_stack_name(inst)
heat_client.delete_stack(stack_name)
def _update_nfv_dict(self, heat_client, stack_name, fields):
parameters = heat_client.get_parameters(stack_name)
LOG.debug("ORIG parameters: %s", parameters)
# NOTE: parameters['nfv'] is string
orig_nfv_dict = json.loads(parameters.get('nfv', '{}'))
if 'nfv' in fields['parameters']:
fields['parameters']['nfv'] = inst_utils.json_merge_patch(
orig_nfv_dict, fields['parameters']['nfv'])
LOG.debug("NEW parameters: %s", fields['parameters'])
return fields
def scale(self, req, inst, grant_req, grant, vnfd):
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
heat_client = heat_utils.HeatClient(vim_info)
# make HOT
fields = self._make_hot(req, inst, grant_req, grant, vnfd)
LOG.debug("stack fields: %s", fields)
stack_name = fields.pop('stack_name')
# mark unhealthy to servers to be removed if scale in
if req.type == 'SCALE_IN':
vnfc_res_ids = [res_def.resource.resourceId
for res_def in grant_req.removeResources
if res_def.type == 'COMPUTE']
for vnfc in inst.instantiatedVnfInfo.vnfcResourceInfo:
if vnfc.computeResource.resourceId in vnfc_res_ids:
if 'parent_stack_id' not in vnfc.metadata:
# It means definition of VDU in the BaseHOT
# is inappropriate.
raise sol_ex.UnexpectedParentResourceDefinition()
heat_client.mark_unhealthy(
vnfc.metadata['parent_stack_id'],
vnfc.metadata['parent_resource_name'])
# update stack
fields = self._update_nfv_dict(heat_client, stack_name, fields)
heat_client.update_stack(stack_name, fields)
# get stack resource
heat_reses = heat_client.get_resources(stack_name)
# make instantiated_vnf_info
self._make_instantiated_vnf_info(req, inst, grant_req, grant, vnfd,
heat_reses)
def scale_rollback(self, req, inst, grant_req, grant, vnfd):
# NOTE: rollback is supported for scale out only
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
heat_client = heat_utils.HeatClient(vim_info)
stack_name = heat_utils.get_stack_name(inst)
heat_reses = heat_client.get_resources(stack_name)
# mark unhealthy to added servers while scale out
vnfc_ids = [vnfc.id
for vnfc in inst.instantiatedVnfInfo.vnfcResourceInfo]
for res in heat_utils.get_server_reses(heat_reses):
if res['physical_resource_id'] not in vnfc_ids:
metadata = self._make_vnfc_metadata(res, heat_reses)
if 'parent_stack_id' not in metadata:
# It means definition of VDU in the BaseHOT
# is inappropriate.
raise sol_ex.UnexpectedParentResourceDefinition()
heat_client.mark_unhealthy(
metadata['parent_stack_id'],
metadata['parent_resource_name'])
# update (put back) 'desired_capacity' parameter
fields = self._update_nfv_dict(heat_client, stack_name,
userdata_default.DefaultUserData.scale_rollback(
req, inst, grant_req, grant, vnfd.csar_dir))
heat_client.update_stack(stack_name, fields)
# NOTE: instantiatedVnfInfo is not necessary to update since it
# should be same as before scale API started.
def _make_hot(self, req, inst, grant_req, grant, vnfd):
if grant_req.operation == v2fields.LcmOperationType.INSTANTIATE:
flavour_id = req.flavourId
else:
flavour_id = inst.instantiatedVnfInfo.flavourId
def make_hot(self, req, inst, grant_req, grant, vnfd):
flavour_id = req