Merge "support scale VNF task of v2 API"

This commit is contained in:
Zuul 2022-01-21 07:06:21 +00:00 committed by Gerrit Code Review
commit 433d9b3425
27 changed files with 4332 additions and 586 deletions

View File

@ -0,0 +1,5 @@
---
features:
- |
Add the Version "2.0.0" of Scale VNF API
based on ETSI NFV specifications.

View File

@ -93,6 +93,15 @@ rules = [
'path': VNF_INSTANCES_ID_PATH + '/terminate'}
]
),
policy.DocumentedRuleDefault(
name=POLICY_NAME.format('scale'),
check_str=RULE_ANY,
description="Scale vnf instance.",
operations=[
{'method': 'POST',
'path': VNF_INSTANCES_ID_PATH + '/scale'}
]
),
# TODO(oda-g): add more lcm operations etc when implemented.

View File

@ -77,6 +77,22 @@ TerminateVnfRequest_V200 = {
'additionalProperties': True,
}
# SOL003 5.5.2.5
ScaleVnfRequest_V200 = {
'type': 'object',
'properties': {
'type': {
'type': 'string',
'enum': ['SCALE_OUT', 'SCALE_IN']
},
'aspectId': common_types.IdentifierInVnfd,
'numberOfSteps': {'type': 'integer', 'minimum': 1},
'additionalParams': parameter_types.keyvalue_pairs,
},
'required': ['type', 'aspectId'],
'additionalProperties': True,
}
# SOL013 8.3.4
_SubscriptionAuthentication = {
'type': 'object',

View File

@ -223,3 +223,20 @@ class GrantRequestOrGrantNotFound(SolHttpError404):
class RollbackNotSupported(SolHttpError422):
message = _("Rollback of %(op)s is not supported.")
class UnexpectedParentResourceDefinition(SolHttpError422):
message = _("Parent resource is necessary for VDU definition.")
class InvalidScaleAspectId(SolHttpError400):
message = _("Invalid aspectId '%(aspect_id)s'.")
class InvalidScaleNumberOfSteps(SolHttpError400):
message = _("Invalid numberOfSteps '%(num_steps)d'.")
class DeltaMissingInVnfd(SolHttpError400):
message = _("Delta '%(delta)s' is not defined in "
"VduScalingAspectDeltas.")

View File

@ -117,78 +117,239 @@ def make_lcmocc_notif_data(subsc, lcmocc, endpoint):
return notif_data
def _make_instantiate_lcmocc(lcmocc, inst, change_type):
def _make_affected_vnfc(vnfc, change_type):
affected_vnfc = objects.AffectedVnfcV2(
id=vnfc.id,
vduId=vnfc.vduId,
changeType=change_type,
computeResource=vnfc.computeResource
)
if vnfc.obj_attr_is_set('vnfcCpInfo'):
cp_ids = [cp.id for cp in vnfc.vnfcCpInfo]
affected_vnfc.affectedVnfcCpIds = cp_ids
if vnfc.obj_attr_is_set('storageResourceIds'):
str_ids = vnfc.storageResourceIds
if change_type == 'ADDED':
affected_vnfc.addedStorageResourceIds = str_ids
else: # 'REMOVED'
affected_vnfc.removedStorageResourceIds = str_ids
return affected_vnfc
def _make_affected_vl(vl, change_type):
affected_vl = objects.AffectedVirtualLinkV2(
id=vl.id,
vnfVirtualLinkDescId=vl.vnfVirtualLinkDescId,
changeType=change_type,
networkResource=vl.networkResource
)
if vl.obj_attr_is_set('vnfLinkPorts'):
affected_vl.vnfLinkPortIds = [port.id for port in vl.vnfLinkPorts]
return affected_vl
def _make_affected_vls_link_port_change(vls_saved, vls, common_vls):
affected_vls = []
for vl_id in common_vls:
old_ports = set()
new_ports = set()
for vl in vls_saved:
if vl.id == vl_id:
old_vl = vl
if vl.obj_attr_is_set('vnfLinkPorts'):
old_ports = {port.id for port in vl.vnfLinkPorts}
for vl in vls:
if vl.id == vl_id:
new_vl = vl
if vl.obj_attr_is_set('vnfLinkPorts'):
new_ports = {port.id for port in vl.vnfLinkPorts}
add_ports = new_ports - old_ports
rm_ports = old_ports - new_ports
# assume there are not add_ports and rm_ports at the same time.
if add_ports:
affected_vl = objects.AffectedVirtualLinkV2(
id=new_vl.id,
vnfVirtualLinkDescId=new_vl.vnfVirtualLinkDescId,
changeType='LINK_PORT_ADDED',
networkResource=new_vl.networkResource,
vnfLinkPortIds=list(add_ports)
)
affected_vls.append(affected_vl)
elif rm_ports:
affected_vl = objects.AffectedVirtualLinkV2(
id=old_vl.id,
vnfVirtualLinkDescId=old_vl.vnfVirtualLinkDescId,
changeType='LINK_PORT_REMOVED',
networkResource=old_vl.networkResource,
vnfLinkPortIds=list(rm_ports)
)
affected_vls.append(affected_vl)
return affected_vls
def _make_affected_strg(strg, change_type):
return objects.AffectedVirtualStorageV2(
id=strg.id,
virtualStorageDescId=strg.virtualStorageDescId,
changeType=change_type,
storageResource=strg.storageResource
)
def _make_affected_ext_link_ports(inst_info_saved, inst_info):
affected_ext_link_ports = []
ext_vl_ports_saved = set()
ext_vl_ports = set()
if inst_info_saved.obj_attr_is_set('extVirtualLinkInfo'):
for ext_vl in inst_info_saved.extVirtualLinkInfo:
if ext_vl.obj_attr_is_set('extLinkPorts'):
ext_vl_ports_saved |= {port.id
for port in ext_vl.extLinkPorts}
if inst_info.obj_attr_is_set('extVirtualLinkInfo'):
for ext_vl in inst_info.extVirtualLinkInfo:
if ext_vl.obj_attr_is_set('extLinkPorts'):
ext_vl_ports |= {port.id
for port in ext_vl.extLinkPorts}
add_ext_vl_ports = ext_vl_ports - ext_vl_ports_saved
rm_ext_vl_ports = ext_vl_ports_saved - ext_vl_ports
if add_ext_vl_ports:
for ext_vl in inst_info.extVirtualLinkInfo:
if not ext_vl.obj_attr_is_set('extLinkPorts'):
continue
affected_ext_link_ports += [
objects.AffectedExtLinkPortV2(
id=port.id,
changeType='ADDED',
extCpInstanceId=port.cpInstanceId,
resourceHandle=port.resourceHandle
)
for port in ext_vl.extLinkPorts
if port.id in add_ext_vl_ports
]
if rm_ext_vl_ports:
for ext_vl in inst_info_saved.extVirtualLinkInfo:
if not ext_vl.obj_attr_is_set('extLinkPorts'):
continue
affected_ext_link_ports += [
objects.AffectedExtLinkPortV2(
id=port.id,
changeType='REMOVED',
extCpInstanceId=port.cpInstanceId,
resourceHandle=port.resourceHandle
)
for port in ext_vl.extLinkPorts
if port.id in rm_ext_vl_ports
]
return affected_ext_link_ports
def update_lcmocc(lcmocc, inst_saved, inst):
# make ResourceChanges of lcmocc from instantiatedVnfInfo.
# NOTE: grant related info such as resourceDefinitionId, zoneId
# and so on are not included in lcmocc since such info are not
# included in instantiatedVnfInfo.
if inst_saved.obj_attr_is_set('instantiatedVnfInfo'):
inst_info_saved = inst_saved.instantiatedVnfInfo
else:
# dummy
inst_info_saved = objects.VnfInstanceV2_InstantiatedVnfInfo()
inst_info = inst.instantiatedVnfInfo
lcmocc_vncs = []
if inst_info.obj_attr_is_set('vnfcResourceInfo'):
for inst_vnc in inst_info.vnfcResourceInfo:
lcmocc_vnc = objects.AffectedVnfcV2(
id=inst_vnc.id,
vduId=inst_vnc.vduId,
changeType=change_type,
computeResource=inst_vnc.computeResource
)
if inst_vnc.obj_attr_is_set('vnfcCpInfo'):
cp_ids = [cp.id for cp in inst_vnc.vnfcCpInfo]
lcmocc_vnc.affectedVnfcCpIds = cp_ids
if inst_vnc.obj_attr_is_set('storageResourceIds'):
str_ids = inst_vnc.storageResourceIds
if change_type == 'ADDED':
lcmocc_vnc.addedStorageResourceIds = str_ids
else: # 'REMOVED'
lcmocc_vnc.removedStorageResourceIds = str_ids
lcmocc_vncs.append(lcmocc_vnc)
# NOTE: objects may be re-created. so compare 'id' instead of object
# itself.
def _calc_diff(attr):
# NOTE: instantiatedVnfInfo object is dict compat
objs_saved = set()
if inst_info_saved.obj_attr_is_set(attr):
objs_saved = {obj.id for obj in inst_info_saved[attr]}
objs = set()
if inst_info.obj_attr_is_set(attr):
objs = {obj.id for obj in inst_info[attr]}
lcmocc_vls = []
if inst_info.obj_attr_is_set('vnfVirtualLinkResourceInfo'):
for inst_vl in inst_info.vnfVirtualLinkResourceInfo:
lcmocc_vl = objects.AffectedVirtualLinkV2(
id=inst_vl.id,
vnfVirtualLinkDescId=inst_vl.vnfVirtualLinkDescId,
changeType=change_type,
networkResource=inst_vl.networkResource
)
if inst_vl.obj_attr_is_set('vnfLinkPorts'):
port_ids = [port.id for port in inst_vl.vnfLinkPorts]
lcmocc_vl.vnfLinkPortIds = port_ids
lcmocc_vls.append(lcmocc_vl)
# return removed_objs, added_objs, common_objs
return objs_saved - objs, objs - objs_saved, objs_saved & objs
lcmocc_strs = []
if inst_info.obj_attr_is_set('virtualStorageResourceInfo'):
for inst_str in inst_info.virtualStorageResourceInfo:
lcmocc_str = objects.AffectedVirtualStorageV2(
id=inst_str.id,
virtualStorageDescId=inst_str.virtualStorageDescId,
changeType=change_type,
storageResource=inst_str.storageResource
)
lcmocc_strs.append(lcmocc_str)
removed_vnfcs, added_vnfcs, _ = _calc_diff('vnfcResourceInfo')
affected_vnfcs = []
if removed_vnfcs:
affected_vnfcs += [_make_affected_vnfc(vnfc, 'REMOVED')
for vnfc in inst_info_saved.vnfcResourceInfo
if vnfc.id in removed_vnfcs]
if added_vnfcs:
affected_vnfcs += [_make_affected_vnfc(vnfc, 'ADDED')
for vnfc in inst_info.vnfcResourceInfo
if vnfc.id in added_vnfcs]
if lcmocc_vncs or lcmocc_vls or lcmocc_strs:
removed_vls, added_vls, common_vls = _calc_diff(
'vnfVirtualLinkResourceInfo')
affected_vls = []
if removed_vls:
affected_vls += [_make_affected_vl(vl, 'REMOVED')
for vl in inst_info_saved.vnfVirtualLinkResourceInfo
if vl.id in removed_vls]
if added_vls:
affected_vls += [_make_affected_vl(vl, 'ADDED')
for vl in inst_info.vnfVirtualLinkResourceInfo
if vl.id in added_vls]
if common_vls:
affected_vls += _make_affected_vls_link_port_change(
inst_info_saved.vnfVirtualLinkResourceInfo,
inst_info.vnfVirtualLinkResourceInfo, common_vls)
removed_mgd_vls, added_mgd_vls, common_mgd_vls = _calc_diff(
'extManagedVirtualLinkInfo')
if removed_mgd_vls:
affected_vls += [_make_affected_vl(vl, 'LINK_PORT_REMOVED')
for vl in inst_info_saved.extManagedVirtualLinkInfo
if vl.id in removed_mgd_vls]
if added_mgd_vls:
affected_vls += [_make_affected_vl(vl, 'LINK_PORT_ADDED')
for vl in inst_info.extManagedVirtualLinkInfo
if vl.id in added_mgd_vls]
if common_mgd_vls:
affected_vls += _make_affected_vls_link_port_change(
inst_info_saved.extManagedVirtualLinkInfo,
inst_info.extManagedVirtualLinkInfo, common_mgd_vls)
removed_strgs, added_strgs, _ = _calc_diff('virtualStorageResourceInfo')
affected_strgs = []
if removed_strgs:
affected_strgs += [
_make_affected_strg(strg, 'REMOVED')
for strg in inst_info_saved.virtualStorageResourceInfo
if strg.id in removed_strgs
]
if added_strgs:
affected_strgs += [_make_affected_strg(strg, 'ADDED')
for strg in inst_info.virtualStorageResourceInfo
if strg.id in added_strgs]
affected_ext_link_ports = _make_affected_ext_link_ports(
inst_info_saved, inst_info)
if (affected_vnfcs or affected_vls or affected_strgs or
affected_ext_link_ports):
change_info = objects.VnfLcmOpOccV2_ResourceChanges()
if lcmocc_vncs:
change_info.affectedVnfcs = lcmocc_vncs
if lcmocc_vls:
change_info.affectedVirtualLinks = lcmocc_vls
if lcmocc_strs:
change_info.affectedVirtualStorages = lcmocc_strs
if affected_vnfcs:
change_info.affectedVnfcs = affected_vnfcs
if affected_vls:
change_info.affectedVirtualLinks = affected_vls
if affected_strgs:
change_info.affectedVirtualStorages = affected_strgs
if affected_ext_link_ports:
change_info.affectedExtLinkPorts = affected_ext_link_ports
lcmocc.resourceChanges = change_info
def make_instantiate_lcmocc(lcmocc, inst):
_make_instantiate_lcmocc(lcmocc, inst, 'ADDED')
def make_terminate_lcmocc(lcmocc, inst):
_make_instantiate_lcmocc(lcmocc, inst, 'REMOVED')
def get_grant_req_and_grant(context, lcmocc):
grant_reqs = objects.GrantRequestV1.get_by_filter(context,
vnfLcmOpOccId=lcmocc.id)
@ -203,7 +364,7 @@ def get_grant_req_and_grant(context, lcmocc):
def check_lcmocc_in_progress(context, inst_id):
# if the controller or conductor executes an operation for the vnf
# instance (i.e. operationState is ...ING), other operation for
# the same vnf instance is exculed by the coordinator.
# the same vnf instance is exculded by the coordinator.
# check here is existence of lcmocc for the vnf instance with
# FAILED_TEMP operationState.
lcmoccs = objects.VnfLcmOpOccV2.get_by_filter(

View File

@ -46,8 +46,11 @@ def make_inst_links(inst, endpoint):
links.instantiate = objects.Link(href=self_href + "/instantiate")
else: # 'INSTANTIATED'
links.terminate = objects.Link(href=self_href + "/terminate")
links.scale = objects.Link(href=self_href + "/scale")
# TODO(oda-g): add when the operation supported
# links.scale = objects.Link(href = self_href + "/scale")
# links.heal = objects.Link(href=self_href + "/heal")
# links.changeExtConn = objects.Link(
# href=self_href + "/change_ext_conn")
# etc.
return links

View File

@ -358,3 +358,59 @@ class Vnfd(object):
raise sol_ex.SolHttpError422(sol_detail=msg)
return script
def get_scale_vdu_and_num(self, flavour_id, aspect_id):
aspects = self.get_policy_values_by_type(flavour_id,
'tosca.policies.nfv.ScalingAspects')
delta = None
for aspect in aspects:
value = aspect['properties']['aspects'].get(aspect_id)
if value is not None:
# expect there is one delta.
# NOTE: Tacker does not support non-uniform deltas defined in
# ETSI NFV SOL001 8. Therefore, uniform delta corresponding
# to number_of_instances can be set and number_of_instances is
# the same regardless of scale_level.
delta = value['step_deltas'][0]
break
if delta is None:
return {}
aspect_deltas = self.get_policy_values_by_type(flavour_id,
'tosca.policies.nfv.VduScalingAspectDeltas')
vdu_num_inst = {}
for aspect_delta in aspect_deltas:
if aspect_delta.get('properties', {}).get('aspect') == aspect_id:
num_inst = (aspect_delta['properties']['deltas']
.get(delta, {}).get('number_of_instances'))
# NOTE: it is not checked whether 'delta' defined in
# ScaleingAspects exists in VduScalingAspectDeltas at
# the loading of vnf package. this is a mistake of the
# VNFD definition.
if num_inst is None:
raise sol_ex.DeltaMissingInVnfd(delta=delta)
for vdu_name in aspect_delta['targets']:
vdu_num_inst[vdu_name] = num_inst
return vdu_num_inst
def get_scale_info_from_inst_level(self, flavour_id, inst_level):
policies = self.get_policy_values_by_type(flavour_id,
'tosca.policies.nfv.InstantiationLevels')
for policy in policies:
return (policy['properties']['levels']
.get(inst_level, {})
.get('scale_info', {}))
return {}
def get_max_scale_level(self, flavour_id, aspect_id):
aspects = self.get_policy_values_by_type(flavour_id,
'tosca.policies.nfv.ScalingAspects')
for aspect in aspects:
value = aspect['properties']['aspects'].get(aspect_id)
if value is not None:
return value['max_scale_level']
# should not occur
return 0

View File

@ -43,8 +43,32 @@ class VnfLcmDriverV2(object):
self.nfvo_client = nfvo_client.NfvoClient()
def grant(self, context, lcmocc, inst, vnfd):
# grant exchange
# NOTE: the api_version of NFVO supposes 1.4.0 at the moment.
# make common part of grant_req among operations
grant_req = objects.GrantRequestV1(
vnfInstanceId=inst.id,
vnfLcmOpOccId=lcmocc.id,
vnfdId=inst.vnfdId,
operation=lcmocc.operation,
isAutomaticInvocation=lcmocc.isAutomaticInvocation
)
grant_req._links = objects.GrantRequestV1_Links(
vnfLcmOpOcc=objects.Link(
href=lcmocc_utils.lcmocc_href(lcmocc.id, self.endpoint)),
vnfInstance=objects.Link(
href=inst_utils.inst_href(inst.id, self.endpoint)))
# make operation specific part of grant_req and check request
# parameters if necessary.
method = getattr(self, "%s_%s" % (lcmocc.operation.lower(), 'grant'))
return method(context, lcmocc, inst, vnfd)
method(grant_req, lcmocc.operationParams, inst, vnfd)
# NOTE: if not granted, 403 error raised.
grant = self.nfvo_client.grant(context, grant_req)
return grant_req, grant
def post_grant(self, context, lcmocc, inst, grant_req, grant, vnfd):
method = getattr(self,
@ -84,7 +108,64 @@ class VnfLcmDriverV2(object):
LOG.debug("execute %s of %s success.", operation, script)
def _make_inst_info_common(self, lcmocc, inst_saved, inst, vnfd):
# make vim independent part of instantiatedVnfInfo.
# scaleStatus and maxScaleLevels at the moment.
inst_info = inst.instantiatedVnfInfo
req = lcmocc.operationParams
if lcmocc.operation == v2fields.LcmOperationType.INSTANTIATE:
# create scaleStatus and maxScaleLevels
flavour_id = req.flavourId
if req.obj_attr_is_set('instantiationLevelId'):
inst_level = req.instantiationLevelId
else:
inst_level = vnfd.get_default_instantiation_level(flavour_id)
# make scaleStatus from tosca.policies.nfv.InstantiationLevels
# definition.
scale_info = vnfd.get_scale_info_from_inst_level(flavour_id,
inst_level)
scale_status = [
objects.ScaleInfoV2(
aspectId=aspect_id,
scaleLevel=value['scale_level']
)
for aspect_id, value in scale_info.items()
]
max_scale_levels = [
objects.ScaleInfoV2(
aspectId=obj.aspectId,
scaleLevel=vnfd.get_max_scale_level(flavour_id,
obj.aspectId)
)
for obj in scale_status
]
if scale_status:
inst_info.scaleStatus = scale_status
inst_info.maxScaleLevels = max_scale_levels
elif lcmocc.operation != v2fields.LcmOperationType.TERMINATE:
inst_info_saved = inst_saved.instantiatedVnfInfo
if inst_info_saved.obj_attr_is_set('scaleStatus'):
inst_info.scaleStatus = inst_info_saved.scaleStatus
inst_info.maxScaleLevels = inst_info_saved.maxScaleLevels
if lcmocc.operation == v2fields.LcmOperationType.SCALE:
# adjust scaleStatus
num_steps = req.numberOfSteps
if req.type == 'SCALE_IN':
num_steps *= -1
for aspect_info in inst_info.scaleStatus:
if aspect_info.aspectId == req.aspectId:
aspect_info.scaleLevel += num_steps
break
def process(self, context, lcmocc, inst, grant_req, grant, vnfd):
# save inst to use updating lcmocc after process done
inst_saved = inst.obj_clone()
# perform preamble LCM script
req = lcmocc.operationParams
operation = "%s_%s" % (lcmocc.operation.lower(), 'start')
@ -104,6 +185,9 @@ class VnfLcmDriverV2(object):
self._exec_mgmt_driver_script(operation,
flavour_id, req, inst, grant_req, grant, vnfd)
self._make_inst_info_common(lcmocc, inst_saved, inst, vnfd)
lcmocc_utils.update_lcmocc(lcmocc, inst_saved, inst)
def rollback(self, context, lcmocc, inst, grant_req, grant, vnfd):
method = getattr(self,
"%s_%s" % (lcmocc.operation.lower(), 'rollback'),
@ -114,38 +198,60 @@ class VnfLcmDriverV2(object):
raise sol_ex.RollbackNotSupported(op=lcmocc.operation)
def _get_link_ports(self, inst_req):
names = []
names = set()
if inst_req.obj_attr_is_set('extVirtualLinks'):
for ext_vl in inst_req.extVirtualLinks:
for ext_cp in ext_vl.extCps:
for cp_config in ext_cp.cpConfig.values():
if cp_config.obj_attr_is_set('linkPortId'):
names.append(ext_cp.cpdId)
names.add(ext_cp.cpdId)
if inst_req.obj_attr_is_set('extManagedVirtualLinks'):
for ext_mgd_vl in inst_req.extManagedVirtualLinks:
if ext_mgd_vl.obj_attr_is_set('vnfLinkPort'):
names.append(ext_mgd_vl.vnfVirtualLinkDescId)
names.add(ext_mgd_vl.vnfVirtualLinkDescId)
return names
def instantiate_grant(self, context, lcmocc, inst, vnfd):
req = lcmocc.operationParams
def _make_res_def_for_new_vdu(self, vdu_name, num_inst, cp_names,
storage_names):
# common part of instantiate and scale out
add_reses = []
for _ in range(num_inst):
vdu_res_id = uuidutils.generate_uuid()
add_reses.append(
objects.ResourceDefinitionV1(
id=vdu_res_id,
type='COMPUTE',
resourceTemplateId=vdu_name
)
)
for cp_name in cp_names:
add_reses.append(
objects.ResourceDefinitionV1(
id="{}-{}".format(cp_name, vdu_res_id),
type='LINKPORT',
resourceTemplateId=cp_name
)
)
for storage_name in storage_names:
add_reses.append(
objects.ResourceDefinitionV1(
id="{}-{}".format(storage_name, vdu_res_id),
type='STORAGE',
resourceTemplateId=storage_name
)
)
return add_reses
def instantiate_grant(self, grant_req, req, inst, vnfd):
flavour_id = req.flavourId
if vnfd.get_vnfd_flavour(flavour_id) is None:
raise sol_ex.FlavourIdNotFound(flavour_id=flavour_id)
# grant exchange
# NOTE: the api_version of NFVO supposes 1.4.0 at the moment.
grant_req = objects.GrantRequestV1(
vnfInstanceId=inst.id,
vnfLcmOpOccId=lcmocc.id,
vnfdId=inst.vnfdId,
flavourId=flavour_id,
operation=lcmocc.operation,
isAutomaticInvocation=lcmocc.isAutomaticInvocation
)
grant_req.flavourId = flavour_id
if req.obj_attr_is_set('instantiationLevelId'):
inst_level = req.instantiationLevelId
@ -161,30 +267,8 @@ class VnfLcmDriverV2(object):
vdu_cp_names = vnfd.get_vdu_cps(flavour_id, name)
vdu_storage_names = vnfd.get_vdu_storages(node)
for _ in range(num):
res_def = objects.ResourceDefinitionV1(
id=uuidutils.generate_uuid(),
type='COMPUTE',
resourceTemplateId=name)
add_reses.append(res_def)
for cp_name in vdu_cp_names:
if cp_name in link_port_names:
continue
for _ in range(num):
res_def = objects.ResourceDefinitionV1(
id=uuidutils.generate_uuid(),
type='LINKPORT',
resourceTemplateId=cp_name)
add_reses.append(res_def)
for storage_name in vdu_storage_names:
for _ in range(num):
res_def = objects.ResourceDefinitionV1(
id=uuidutils.generate_uuid(),
type='STORAGE',
resourceTemplateId=storage_name)
add_reses.append(res_def)
add_reses += self._make_res_def_for_new_vdu(name, num,
set(vdu_cp_names) - link_port_names, vdu_storage_names)
ext_mgd_vls = []
if req.obj_attr_is_set('extManagedVirtualLinks'):
@ -232,17 +316,6 @@ class VnfLcmDriverV2(object):
if req.obj_attr_is_set('additionalParams'):
grant_req.additionalParams = req.additionalParams
grant_req._links = objects.GrantRequestV1_Links(
vnfLcmOpOcc=objects.Link(
href=lcmocc_utils.lcmocc_href(lcmocc.id, self.endpoint)),
vnfInstance=objects.Link(
href=inst_utils.inst_href(inst.id, self.endpoint)))
# NOTE: if not granted, 403 error raised.
grant = self.nfvo_client.grant(context, grant_req)
return grant_req, grant
def instantiate_post_grant(self, context, lcmocc, inst, grant_req,
grant, vnfd):
# set inst vimConnectionInfo
@ -288,7 +361,6 @@ class VnfLcmDriverV2(object):
raise sol_ex.SolException(sol_detail='not support vim type')
inst.instantiationState = 'INSTANTIATED'
lcmocc_utils.make_instantiate_lcmocc(lcmocc, inst)
def instantiate_rollback(self, context, lcmocc, inst, grant_req,
grant, vnfd):
@ -301,71 +373,63 @@ class VnfLcmDriverV2(object):
# only support openstack at the moment
raise sol_ex.SolException(sol_detail='not support vim type')
def terminate_grant(self, context, lcmocc, inst, vnfd):
# grant exchange
# NOTE: the api_version of NFVO supposes 1.4.0 at the moment.
grant_req = objects.GrantRequestV1(
vnfInstanceId=inst.id,
vnfLcmOpOccId=lcmocc.id,
vnfdId=inst.vnfdId,
operation=lcmocc.operation,
isAutomaticInvocation=lcmocc.isAutomaticInvocation
)
inst_info = inst.instantiatedVnfInfo
def _make_res_def_for_remove_vnfcs(self, inst_info, inst_vnfcs):
# common part of terminate and scale in
rm_reses = []
vnfc_cps = {}
if inst_info.obj_attr_is_set('vnfcResourceInfo'):
for inst_vnc in inst_info.vnfcResourceInfo:
res_def = objects.ResourceDefinitionV1(
id=uuidutils.generate_uuid(),
for inst_vnfc in inst_vnfcs:
vdu_res_id = uuidutils.generate_uuid()
rm_reses.append(
objects.ResourceDefinitionV1(
id=vdu_res_id,
type='COMPUTE',
resourceTemplateId=inst_vnc.vduId,
resource=inst_vnc.computeResource)
rm_reses.append(res_def)
resourceTemplateId=inst_vnfc.vduId,
resource=inst_vnfc.computeResource
)
)
if inst_vnc.obj_attr_is_set('vnfcCpInfo'):
for cp_info in inst_vnc.vnfcCpInfo:
if not (cp_info.obj_attr_is_set('vnfExtCpId') or
cp_info.obj_attr_is_set('vnfLinkPortId')):
# it means extLinkPorts of extVirtualLinks was
# specified. so it is not the resource to be
# deleted.
continue
res_def = objects.ResourceDefinitionV1(
id=uuidutils.generate_uuid(),
resourceTemplateId=cp_info.cpdId,
type='LINKPORT')
rm_reses.append(res_def)
if cp_info.obj_attr_is_set('vnfExtCpId'):
vnfc_cps[cp_info.vnfExtCpId] = res_def
else: # vnfLinkPortId
vnfc_cps[cp_info.vnfLinkPortId] = res_def
if inst_vnfc.obj_attr_is_set('vnfcCpInfo'):
for cp_info in inst_vnfc.vnfcCpInfo:
if not (cp_info.obj_attr_is_set('vnfExtCpId') or
cp_info.obj_attr_is_set('vnfLinkPortId')):
# it means extLinkPorts of extVirtualLinks was
# specified. so it is not the resource to be
# deleted.
continue
res_def = objects.ResourceDefinitionV1(
id="{}-{}".format(cp_info.cpdId, vdu_res_id),
resourceTemplateId=cp_info.cpdId,
type='LINKPORT')
rm_reses.append(res_def)
if cp_info.obj_attr_is_set('vnfExtCpId'):
vnfc_cps[cp_info.vnfExtCpId] = res_def
else: # vnfLinkPortId
vnfc_cps[cp_info.vnfLinkPortId] = res_def
if inst_vnfc.obj_attr_is_set('storageResourceIds'):
for storage_id in inst_vnfc.storageResourceIds:
for inst_str in inst_info.virtualStorageResourceInfo:
if inst_str.id == storage_id:
str_name = inst_str.virtualStorageDescId
rm_reses.append(
objects.ResourceDefinitionV1(
id="{}-{}".format(str_name, vdu_res_id),
type='STORAGE',
resourceTemplateId=str_name,
resource=inst_str.storageResource
)
)
break
# fill resourceHandle of ports
if inst_info.obj_attr_is_set('vnfVirtualLinkResourceInfo'):
for inst_vl in inst_info.vnfVirtualLinkResourceInfo:
res_def = objects.ResourceDefinitionV1(
id=uuidutils.generate_uuid(),
type='VL',
resourceTemplateId=inst_vl.vnfVirtualLinkDescId,
resource=inst_vl.networkResource)
rm_reses.append(res_def)
if inst_vl.obj_attr_is_set('vnfLinkPorts'):
for port in inst_vl.vnfLinkPorts:
if port.id in vnfc_cps:
res_def = vnfc_cps[port.id]
res_def.resource = port.resourceHandle
if inst_info.obj_attr_is_set('virtualStorageResourceInfo'):
for inst_str in inst_info.virtualStorageResourceInfo:
res_def = objects.ResourceDefinitionV1(
id=uuidutils.generate_uuid(),
type='STORAGE',
resourceTemplateId=inst_str.virtualStorageDescId,
resource=inst_str.storageResource)
rm_reses.append(res_def)
if inst_info.obj_attr_is_set('extVirtualLinkInfo'):
for ext_vl in inst_info.extVirtualLinkInfo:
if ext_vl.obj_attr_is_set('extLinkPorts'):
@ -384,20 +448,29 @@ class VnfLcmDriverV2(object):
res_def = vnfc_cps[port.id]
res_def.resource = port.resourceHandle
return rm_reses
def terminate_grant(self, grant_req, req, inst, vnfd):
inst_info = inst.instantiatedVnfInfo
rm_reses = []
if inst_info.obj_attr_is_set('vnfcResourceInfo'):
rm_reses += self._make_res_def_for_remove_vnfcs(
inst_info, inst_info.vnfcResourceInfo)
if inst_info.obj_attr_is_set('vnfVirtualLinkResourceInfo'):
for inst_vl in inst_info.vnfVirtualLinkResourceInfo:
rm_reses.append(
objects.ResourceDefinitionV1(
id=uuidutils.generate_uuid(),
type='VL',
resourceTemplateId=inst_vl.vnfVirtualLinkDescId,
resource=inst_vl.networkResource
)
)
if rm_reses:
grant_req.removeResources = rm_reses
grant_req._links = objects.GrantRequestV1_Links(
vnfLcmOpOcc=objects.Link(
href=lcmocc_utils.lcmocc_href(lcmocc.id, self.endpoint)),
vnfInstance=objects.Link(
href=inst_utils.inst_href(inst.id, self.endpoint)))
# NOTE: if not granted, 403 error raised.
grant_res = self.nfvo_client.grant(context, grant_req)
return grant_req, grant_res
def terminate_process(self, context, lcmocc, inst, grant_req,
grant, vnfd):
req = lcmocc.operationParams
@ -410,7 +483,6 @@ class VnfLcmDriverV2(object):
raise sol_ex.SolException(sol_detail='not support vim type')
inst.instantiationState = 'NOT_INSTANTIATED'
lcmocc_utils.make_terminate_lcmocc(lcmocc, inst)
# reset instantiatedVnfInfo
# NOTE: reset after update lcmocc
@ -424,3 +496,113 @@ class VnfLcmDriverV2(object):
# reset vimConnectionInfo
inst.vimConnectionInfo = {}
def scale_grant(self, grant_req, req, inst, vnfd):
flavour_id = inst.instantiatedVnfInfo.flavourId
scale_type = req.type
aspect_id = req.aspectId
num_steps = req.numberOfSteps
vdu_num_inst = vnfd.get_scale_vdu_and_num(flavour_id, aspect_id)
if not vdu_num_inst:
# should not occur. just check for consistency.
raise sol_ex.InvalidScaleAspectId(aspect_id=aspect_id)
if scale_type == 'SCALE_OUT':
self._make_scale_out_grant_request(grant_req, inst, num_steps,
vdu_num_inst)
else:
self._make_scale_in_grant_request(grant_req, inst, num_steps,
vdu_num_inst)
if req.obj_attr_is_set('additionalParams'):
grant_req.additionalParams = req.additionalParams
def _make_scale_out_grant_request(self, grant_req, inst, num_steps,
vdu_num_inst):
inst_info = inst.instantiatedVnfInfo
add_reses = []
# get one of vnfc for the vdu from inst.instantiatedVnfInfo
vdu_sample = {}
for vdu_name in vdu_num_inst.keys():
for inst_vnfc in inst_info.vnfcResourceInfo:
if inst_vnfc.vduId == vdu_name:
vdu_sample[vdu_name] = inst_vnfc
break
for vdu_name, inst_vnfc in vdu_sample.items():
num_inst = vdu_num_inst[vdu_name] * num_steps
vdu_cp_names = []
if inst_vnfc.obj_attr_is_set('vnfcCpInfo'):
# NOTE: it is expected that there are only dynamic ports
# for vdus which enable scaling.
vdu_cp_names = [cp_info.cpdId
for cp_info in inst_vnfc.vnfcCpInfo]
vdu_storage_names = []
if inst_vnfc.obj_attr_is_set('storageResourceIds'):
for storage_id in inst_vnfc.storageResourceIds:
for storage_res in inst_info.virtualStorageResourceInfo:
if storage_res.id == storage_id:
vdu_storage_names.append(
storage_res.virtualStorageDescId)
break
add_reses += self._make_res_def_for_new_vdu(vdu_name,
num_inst, vdu_cp_names, vdu_storage_names)
if add_reses:
grant_req.addResources = add_reses
def _make_scale_in_grant_request(self, grant_req, inst, num_steps,
vdu_num_inst):
inst_info = inst.instantiatedVnfInfo
rm_vnfcs = []
# select remove VDUs
# NOTE: scale-in specification of tacker SOL003 v2 API is that
# newer VDU is selected for reduction.
# It is expected that vnfcResourceInfo is sorted by creation_time
# of VDU, newer is earlier.
for vdu_name, num_inst in vdu_num_inst.items():
num_inst = num_inst * num_steps
count = 0
for inst_vnfc in inst_info.vnfcResourceInfo:
if inst_vnfc.vduId == vdu_name:
rm_vnfcs.append(inst_vnfc)
count += 1
if count == num_inst:
break
rm_reses = self._make_res_def_for_remove_vnfcs(inst_info, rm_vnfcs)
if rm_reses:
grant_req.removeResources = rm_reses
def scale_process(self, context, lcmocc, inst, grant_req,
grant, vnfd):
req = lcmocc.operationParams
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
if vim_info.vimType == 'ETSINFV.OPENSTACK_KEYSTONE.V_3':
driver = openstack.Openstack()
driver.scale(req, inst, grant_req, grant, vnfd)
else:
# only support openstack at the moment
raise sol_ex.SolException(sol_detail='not support vim type')
def scale_rollback(self, context, lcmocc, inst, grant_req,
grant, vnfd):
req = lcmocc.operationParams
if req.type == 'SCALE_IN':
raise sol_ex.RollbackNotSupported(op='SCALE IN')
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
if vim_info.vimType == 'ETSINFV.OPENSTACK_KEYSTONE.V_3':
driver = openstack.Openstack()
driver.scale_rollback(req, inst, grant_req, grant, vnfd)
else:
# only support openstack at the moment
raise sol_ex.SolException(sol_detail='not support vim type')

View File

@ -149,6 +149,21 @@ class VnfLcmControllerV2(sol_wsgi.SolAPIController):
self.endpoint)
return sol_wsgi.SolResponse(204, None)
def _new_lcmocc(self, inst_id, operation, req_body):
now = datetime.utcnow()
lcmocc = objects.VnfLcmOpOccV2(
id=uuidutils.generate_uuid(),
operationState=v2fields.LcmOperationStateType.STARTING,
stateEnteredTime=now,
startTime=now,
vnfInstanceId=inst_id,
operation=operation,
isAutomaticInvocation=False,
isCancelPending=False,
operationParams=req_body)
return lcmocc
@validator.schema(schema.InstantiateVnfRequest_V200, '2.0.0')
@coordinate.lock_vnf_instance('{id}')
def instantiate(self, request, id, body):
@ -160,17 +175,8 @@ class VnfLcmControllerV2(sol_wsgi.SolAPIController):
lcmocc_utils.check_lcmocc_in_progress(context, id)
now = datetime.utcnow()
lcmocc = objects.VnfLcmOpOccV2(
id=uuidutils.generate_uuid(),
operationState=v2fields.LcmOperationStateType.STARTING,
stateEnteredTime=now,
startTime=now,
vnfInstanceId=id,
operation=v2fields.LcmOperationType.INSTANTIATE,
isAutomaticInvocation=False,
isCancelPending=False,
operationParams=body)
lcmocc = self._new_lcmocc(id, v2fields.LcmOperationType.INSTANTIATE,
body)
req_param = lcmocc.operationParams
# if there is partial vimConnectionInfo check and fulfill here.
@ -203,18 +209,63 @@ class VnfLcmControllerV2(sol_wsgi.SolAPIController):
lcmocc_utils.check_lcmocc_in_progress(context, id)
now = datetime.utcnow()
lcmocc = objects.VnfLcmOpOccV2(
id=uuidutils.generate_uuid(),
operationState=v2fields.LcmOperationStateType.STARTING,
stateEnteredTime=now,
startTime=now,
vnfInstanceId=id,
operation=v2fields.LcmOperationType.TERMINATE,
isAutomaticInvocation=False,
isCancelPending=False,
operationParams=body)
lcmocc = self._new_lcmocc(id, v2fields.LcmOperationType.TERMINATE,
body)
lcmocc.create(context)
self.conductor_rpc.start_lcm_op(context, lcmocc.id)
location = lcmocc_utils.lcmocc_href(lcmocc.id, self.endpoint)
return sol_wsgi.SolResponse(202, None, location=location)
def _get_current_scale_level(self, inst, aspect_id):
if (inst.obj_attr_is_set('instantiatedVnfInfo') and
inst.instantiatedVnfInfo.obj_attr_is_set('scaleStatus')):
for scale_info in inst.instantiatedVnfInfo.scaleStatus:
if scale_info.aspectId == aspect_id:
return scale_info.scaleLevel
def _get_max_scale_level(self, inst, aspect_id):
if (inst.obj_attr_is_set('instantiatedVnfInfo') and
inst.instantiatedVnfInfo.obj_attr_is_set('maxScaleLevels')):
for scale_info in inst.instantiatedVnfInfo.maxScaleLevels:
if scale_info.aspectId == aspect_id:
return scale_info.scaleLevel
@validator.schema(schema.ScaleVnfRequest_V200, '2.0.0')
@coordinate.lock_vnf_instance('{id}')
def scale(self, request, id, body):
context = request.context
inst = inst_utils.get_inst(context, id)
if inst.instantiationState != 'INSTANTIATED':
raise sol_ex.VnfInstanceIsNotInstantiated(inst_id=id)
lcmocc_utils.check_lcmocc_in_progress(context, id)
# check parameters
aspect_id = body['aspectId']
if 'numberOfSteps' not in body:
# set default value (1) defined by SOL specification for
# the convenience of the following methods.
body['numberOfSteps'] = 1
scale_level = self._get_current_scale_level(inst, aspect_id)
max_scale_level = self._get_max_scale_level(inst, aspect_id)
if scale_level is None or max_scale_level is None:
raise sol_ex.InvalidScaleAspectId(aspect_id=aspect_id)
num_steps = body['numberOfSteps']
if body['type'] == 'SCALE_IN':
num_steps *= -1
scale_level += num_steps
if scale_level < 0 or scale_level > max_scale_level:
raise sol_ex.InvalidScaleNumberOfSteps(
num_steps=body['numberOfSteps'])
lcmocc = self._new_lcmocc(id, v2fields.LcmOperationType.SCALE,
body)
lcmocc.create(context)
self.conductor_rpc.start_lcm_op(context, lcmocc.id)

View File

@ -123,6 +123,22 @@ class HeatClient(object):
"DELETE_COMPLETE", "DELETE_IN_PROGRESS", "DELETE_FAILED",
none_is_done=True)
def get_parameters(self, stack_name):
path = "stacks/{}".format(stack_name)
resp, body = self.client.do_request(path, "GET",
expected_status=[200])
return body["stack"]["parameters"]
def mark_unhealthy(self, stack_id, resource_name):
path = "stacks/{}/resources/{}".format(stack_id, resource_name)
fields = {
"mark_unhealthy": True,
"resource_status_reason": "marked by tacker"
}
resp, body = self.client.do_request(path, "PATCH",
expected_status=[200], body=fields)
def get_reses_by_types(heat_reses, types):
return [res for res in heat_reses if res['resource_type'] in types]
@ -146,3 +162,19 @@ def get_port_reses(heat_reses):
def get_stack_name(inst):
return "vnf-" + inst.id
def get_resource_stack_id(heat_res):
# return the form "stack_name/stack_id"
for link in heat_res.get('links', []):
if link['rel'] == 'stack':
items = link['href'].split('/')
return "{}/{}".format(items[-2], items[-1])
def get_parent_resource(heat_res, heat_reses):
parent = heat_res.get('parent_resource')
if parent:
for res in heat_reses:
if res['resource_name'] == parent:
return res

View File

@ -14,7 +14,9 @@
# under the License.
from dateutil import parser
import eventlet
import json
import os
import pickle
import subprocess
@ -28,12 +30,40 @@ from tacker.sol_refactored.common import vnf_instance_utils as inst_utils
from tacker.sol_refactored.infra_drivers.openstack import heat_utils
from tacker.sol_refactored.infra_drivers.openstack import userdata_default
from tacker.sol_refactored import objects
from tacker.sol_refactored.objects.v2 import fields as v2fields
LOG = logging.getLogger(__name__)
CONF = config.CONF
LINK_PORT_PREFIX = 'req-'
CP_INFO_PREFIX = 'cp-'
# Id of the resources in instantiatedVnfInfo related methods.
# NOTE: instantiatedVnfInfo is re-created in each operation.
# Id of the resources in instantiatedVnfInfo is based on
# heat resource-id so that id is not changed at re-creation.
# Some ids are same as heat resource-id and some ids are
# combination of prefix and other ids.
def _make_link_port_id(link_port_id):
# prepend 'req-' to distinguish from ports which are
# created by heat.
return '{}{}'.format(LINK_PORT_PREFIX, link_port_id)
def _is_link_port(link_port_id):
return link_port_id.startswith(LINK_PORT_PREFIX)
def _make_cp_info_id(link_port_id):
return '{}{}'.format(CP_INFO_PREFIX, link_port_id)
def _make_combination_id(a, b):
return '{}-{}'.format(a, b)
class Openstack(object):
@ -42,7 +72,7 @@ class Openstack(object):
def instantiate(self, req, inst, grant_req, grant, vnfd):
# make HOT
fields = self.make_hot(req, inst, grant_req, grant, vnfd)
fields = self._make_hot(req, inst, grant_req, grant, vnfd)
LOG.debug("stack fields: %s", fields)
@ -61,10 +91,115 @@ class Openstack(object):
heat_reses = heat_client.get_resources(stack_name)
# make instantiated_vnf_info
self.make_instantiated_vnf_info(req, inst, grant, vnfd, heat_reses)
self._make_instantiated_vnf_info(req, inst, grant_req, grant, vnfd,
heat_reses)
def instantiate_rollback(self, req, inst, grant_req, grant, vnfd):
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
heat_client = heat_utils.HeatClient(vim_info)
stack_name = heat_utils.get_stack_name(inst)
status, _ = heat_client.get_status(stack_name)
if status is not None:
heat_client.delete_stack(stack_name)
def terminate(self, req, inst, grant_req, grant, vnfd):
if req.terminationType == 'GRACEFUL':
timeout = CONF.v2_vnfm.default_graceful_termination_timeout
if req.obj_attr_is_set('gracefulTerminationTimeout'):
timeout = req.gracefulTerminationTimeout
eventlet.sleep(timeout)
# delete stack
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
heat_client = heat_utils.HeatClient(vim_info)
stack_name = heat_utils.get_stack_name(inst)
heat_client.delete_stack(stack_name)
def _update_nfv_dict(self, heat_client, stack_name, fields):
parameters = heat_client.get_parameters(stack_name)
LOG.debug("ORIG parameters: %s", parameters)
# NOTE: parameters['nfv'] is string
orig_nfv_dict = json.loads(parameters.get('nfv', '{}'))
if 'nfv' in fields['parameters']:
fields['parameters']['nfv'] = inst_utils.json_merge_patch(
orig_nfv_dict, fields['parameters']['nfv'])
LOG.debug("NEW parameters: %s", fields['parameters'])
return fields
def scale(self, req, inst, grant_req, grant, vnfd):
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
heat_client = heat_utils.HeatClient(vim_info)
# make HOT
fields = self._make_hot(req, inst, grant_req, grant, vnfd)
LOG.debug("stack fields: %s", fields)
stack_name = fields.pop('stack_name')
# mark unhealthy to servers to be removed if scale in
if req.type == 'SCALE_IN':
vnfc_res_ids = [res_def.resource.resourceId
for res_def in grant_req.removeResources
if res_def.type == 'COMPUTE']
for vnfc in inst.instantiatedVnfInfo.vnfcResourceInfo:
if vnfc.computeResource.resourceId in vnfc_res_ids:
if 'parent_stack_id' not in vnfc.metadata:
# It means definition of VDU in the BaseHOT
# is inappropriate.
raise sol_ex.UnexpectedParentResourceDefinition()
heat_client.mark_unhealthy(
vnfc.metadata['parent_stack_id'],
vnfc.metadata['parent_resource_name'])
# update stack
fields = self._update_nfv_dict(heat_client, stack_name, fields)
heat_client.update_stack(stack_name, fields)
# get stack resource
heat_reses = heat_client.get_resources(stack_name)
# make instantiated_vnf_info
self._make_instantiated_vnf_info(req, inst, grant_req, grant, vnfd,
heat_reses)
def scale_rollback(self, req, inst, grant_req, grant, vnfd):
# NOTE: rollback is supported for scale out only
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
heat_client = heat_utils.HeatClient(vim_info)
stack_name = heat_utils.get_stack_name(inst)
heat_reses = heat_client.get_resources(stack_name)
# mark unhealthy to added servers while scale out
vnfc_ids = [vnfc.id
for vnfc in inst.instantiatedVnfInfo.vnfcResourceInfo]
for res in heat_utils.get_server_reses(heat_reses):
if res['physical_resource_id'] not in vnfc_ids:
metadata = self._make_vnfc_metadata(res, heat_reses)
if 'parent_stack_id' not in metadata:
# It means definition of VDU in the BaseHOT
# is inappropriate.
raise sol_ex.UnexpectedParentResourceDefinition()
heat_client.mark_unhealthy(
metadata['parent_stack_id'],
metadata['parent_resource_name'])
# update (put back) 'desired_capacity' parameter
fields = self._update_nfv_dict(heat_client, stack_name,
userdata_default.DefaultUserData.scale_rollback(
req, inst, grant_req, grant, vnfd.csar_dir))
heat_client.update_stack(stack_name, fields)
# NOTE: instantiatedVnfInfo is not necessary to update since it
# should be same as before scale API started.
def _make_hot(self, req, inst, grant_req, grant, vnfd):
if grant_req.operation == v2fields.LcmOperationType.INSTANTIATE:
flavour_id = req.flavourId
else:
flavour_id = inst.instantiatedVnfInfo.flavourId
def make_hot(self, req, inst, grant_req, grant, vnfd):
flavour_id = req.flavourId
hot_dict = vnfd.get_base_hot(flavour_id)
if not hot_dict:
raise sol_ex.BaseHOTNotDefined()
@ -77,15 +212,17 @@ class Openstack(object):
'lcm-operation-user-data-class')
if userdata is None and userdata_class is None:
LOG.debug("Processing default userdata instantiate")
LOG.debug("Processing default userdata %s", grant_req.operation)
# NOTE: objects used here are dict compat.
fields = userdata_default.DefaultUserData.instantiate(
req, inst, grant_req, grant, vnfd.csar_dir)
method = getattr(userdata_default.DefaultUserData,
grant_req.operation.lower())
fields = method(req, inst, grant_req, grant, vnfd.csar_dir)
elif userdata is None or userdata_class is None:
# Both must be specified.
raise sol_ex.UserdataMissing()
else:
LOG.debug("Processing %s %s instantiate", userdata, userdata_class)
LOG.debug("Processing %s %s %s", userdata, userdata_class,
grant_req.operation)
tmp_csar_dir = vnfd.make_tmp_csar_dir()
script_dict = {
@ -98,7 +235,7 @@ class Openstack(object):
script_path = os.path.join(
os.path.dirname(__file__), "userdata_main.py")
out = subprocess.run(["python3", script_path, "INSTANTIATE"],
out = subprocess.run(["python3", script_path],
input=pickle.dumps(script_dict),
capture_output=True)
@ -118,6 +255,20 @@ class Openstack(object):
return fields
def _get_checked_reses(self, nodes, reses):
names = list(nodes.keys())
def _check_res_in_vnfd(res):
if res['resource_name'] in names:
return True
else:
# should not occur. just check for consistency.
LOG.debug("%s not in VNFD definition.", res['resource_name'])
return False
return {res['physical_resource_id']: res
for res in reses if _check_res_in_vnfd(res)}
def _address_range_data_to_info(self, range_data):
obj = objects.ipOverEthernetAddressInfoV2_IpAddresses_AddressRange()
obj.minAddress = range_data.minAddress
@ -157,122 +308,10 @@ class Openstack(object):
return proto_info
def make_instantiated_vnf_info(self, req, inst, grant, vnfd, heat_reses):
flavour_id = req.flavourId
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
inst_vnf_info = objects.VnfInstanceV2_InstantiatedVnfInfo(
flavourId=flavour_id,
vnfState='STARTED',
)
# make virtualStorageResourceInfo
storages = vnfd.get_storage_nodes(flavour_id)
reses = heat_utils.get_storage_reses(heat_reses)
storage_infos = []
storage_info_to_heat_res = {}
for res in reses:
storage_name = res['resource_name']
if storage_name not in list(storages.keys()):
# should not occur. just check for consistency.
LOG.debug("%s not in VNFD storage definition.", storage_name)
continue
storage_info = objects.VirtualStorageResourceInfoV2(
id=uuidutils.generate_uuid(),
virtualStorageDescId=storage_name,
storageResource=objects.ResourceHandle(
resourceId=res['physical_resource_id'],
vimLevelResourceType=res['resource_type'],
vimConnectionId=vim_info.vimId,
)
)
storage_infos.append(storage_info)
storage_info_to_heat_res[storage_info.id] = res
if storage_infos:
inst_vnf_info.virtualStorageResourceInfo = storage_infos
# make vnfcResourceInfo
vdus = vnfd.get_vdu_nodes(flavour_id)
reses = heat_utils.get_server_reses(heat_reses)
vnfc_res_infos = []
vnfc_res_info_to_heat_res = {}
for res in reses:
vdu_name = res['resource_name']
if vdu_name not in list(vdus.keys()):
# should not occur. just check for consistency.
LOG.debug("%s not in VNFD VDU definition.", vdu_name)
continue
vnfc_res_info = objects.VnfcResourceInfoV2(
id=uuidutils.generate_uuid(),
vduId=vdu_name,
computeResource=objects.ResourceHandle(
resourceId=res['physical_resource_id'],
vimLevelResourceType=res['resource_type'],
vimConnectionId=vim_info.vimId,
),
)
vdu_cps = vnfd.get_vdu_cps(flavour_id, vdu_name)
cp_infos = []
for cp in vdu_cps:
cp_info = objects.VnfcResourceInfoV2_VnfcCpInfo(
id=uuidutils.generate_uuid(),
cpdId=cp,
# vnfExtCpId or vnfLinkPortId may set later
)
cp_infos.append(cp_info)
if cp_infos:
vnfc_res_info.vnfcCpInfo = cp_infos
# find storages used by this
storage_ids = []
for storage_id, storage_res in storage_info_to_heat_res.items():
if (vdu_name in storage_res.get('required_by', []) and
res.get('parent_resource') ==
storage_res.get('parent_resource')):
storage_ids.append(storage_id)
if storage_ids:
vnfc_res_info.storageResourceIds = storage_ids
vnfc_res_infos.append(vnfc_res_info)
vnfc_res_info_to_heat_res[vnfc_res_info.id] = res
if vnfc_res_infos:
inst_vnf_info.vnfcResourceInfo = vnfc_res_infos
# make vnfVirtualLinkResourceInfo
vls = vnfd.get_virtual_link_nodes(flavour_id)
reses = heat_utils.get_network_reses(heat_reses)
vnf_vl_infos = []
vnf_vl_info_to_heat_res = {}
for res in reses:
vl_name = res['resource_name']
if vl_name not in list(vls.keys()):
# should not occur. just check for consistency.
LOG.debug("%s not in VNFD VL definition.", vl_name)
continue
vnf_vl_info = objects.VnfVirtualLinkResourceInfoV2(
id=uuidutils.generate_uuid(),
vnfVirtualLinkDescId=vl_name,
networkResource=objects.ResourceHandle(
resourceId=res['physical_resource_id'],
vimLevelResourceType=res['resource_type'],
vimConnectionId=vim_info.vimId,
),
# vnfLinkPorts set later
)
vnf_vl_infos.append(vnf_vl_info)
vnf_vl_info_to_heat_res[vnf_vl_info.id] = res
if vnf_vl_infos:
inst_vnf_info.vnfVirtualLinkResourceInfo = vnf_vl_infos
def _make_ext_vl_info_from_req(self, req, grant, ext_cp_infos):
# make extVirtualLinkInfo
ext_vls = []
req_ext_vls = []
ext_cp_infos = []
if grant.obj_attr_is_set('extVirtualLinks'):
req_ext_vls = grant.extVirtualLinks
elif req.obj_attr_is_set('extVirtualLinks'):
@ -282,7 +321,6 @@ class Openstack(object):
ext_vl = objects.ExtVirtualLinkInfoV2(
id=req_ext_vl.id,
resourceHandle=objects.ResourceHandle(
id=uuidutils.generate_uuid(),
resourceId=req_ext_vl.resourceId
),
currentVnfExtCpData=req_ext_vl.extCps
@ -301,11 +339,11 @@ class Openstack(object):
link_ports = []
for req_link_port in req_ext_vl.extLinkPorts:
link_port = objects.ExtLinkPortInfoV2(
id=req_link_port.id,
id=_make_link_port_id(req_link_port.id),
resourceHandle=req_link_port.resourceHandle,
)
ext_cp_info = objects.VnfExtCpInfoV2(
id=uuidutils.generate_uuid(),
id=_make_cp_info_id(link_port.id),
extLinkPortId=link_port.id
# associatedVnfcCpId may set later
)
@ -315,7 +353,7 @@ class Openstack(object):
found = False
for key, cp_conf in ext_cp.cpConfig.items():
if (cp_conf.obj_attr_is_set('linkPortId') and
cp_conf.linkPortId == link_port.id):
cp_conf.linkPortId == req_link_port.id):
ext_cp_info.cpdId = ext_cp.cpdId
ext_cp_info.cpConfigId = key
# NOTE: cpProtocolInfo can't be filled
@ -329,10 +367,38 @@ class Openstack(object):
ext_vl.extLinkPorts = link_ports
if ext_vls:
inst_vnf_info.extVirtualLinkInfo = ext_vls
# ext_cp_infos set later
return ext_vls
def _make_ext_vl_info_from_inst(self, old_inst_vnf_info, ext_cp_infos):
# make extVirtualLinkInfo from old inst.extVirtualLinkInfo
ext_vls = []
old_cp_infos = []
if old_inst_vnf_info.obj_attr_is_set('extVirtualLinkInfo'):
ext_vls = old_inst_vnf_info.extVirtualLinkInfo
if old_inst_vnf_info.obj_attr_is_set('extCpInfo'):
old_cp_infos = old_inst_vnf_info.extCpInfo
for ext_vl in ext_vls:
if not ext_vl.obj_attr_is_set('extLinkPorts'):
continue
new_link_ports = []
for link_port in ext_vl.extLinkPorts:
if not _is_link_port(link_port.id):
# port created by heat. re-create later
continue
new_link_ports.append(link_port)
for ext_cp in old_cp_infos:
if ext_cp.id == link_port.cpInstanceId:
ext_cp_infos.append(ext_cp)
break
ext_vl.extLinkPorts = new_link_ports
return ext_vls
def _make_ext_mgd_vl_info_from_req(self, vnfd, flavour_id, req, grant):
# make extManagedVirtualLinkInfo
ext_mgd_vls = []
req_mgd_vls = []
@ -341,14 +407,20 @@ class Openstack(object):
elif req.obj_attr_is_set('extManagedVirtualLinks'):
req_mgd_vls = req.extManagedVirtualLinks
vls = vnfd.get_virtual_link_nodes(flavour_id)
for req_mgd_vl in req_mgd_vls:
vl_name = req_mgd_vl.vnfVirtualLinkDescId
if vl_name not in list(vls.keys()):
# should not occur. just check for consistency.
LOG.debug("%s not in VNFD VL definition.", vl_name)
continue
ext_mgd_vl = objects.ExtManagedVirtualLinkInfoV2(
id=req_mgd_vl.id,
vnfVirtualLinkDescId=req_mgd_vl.vnfVirtualLinkDescId,
vnfVirtualLinkDescId=vl_name,
networkResource=objects.ResourceHandle(
id=uuidutils.generate_uuid(),
resourceId=req_mgd_vl.resourceId
),
)
)
if req_mgd_vl.obj_attr_is_set('vimConnectionId'):
ext_mgd_vl.networkResource.vimConnectionId = (
@ -359,142 +431,266 @@ class Openstack(object):
ext_mgd_vls.append(ext_mgd_vl)
if not req_mgd_vl.obj_attr_is_set('vnfLinkPort'):
continue
link_ports = []
for req_link_port in req_mgd_vl.vnfLinkPort:
link_port = objects.VnfLinkPortInfoV2(
id=req_link_port.vnfLinkPortId,
resourceHandle=req_link_port.resourceHandle,
cpInstanceType='EXT_CP', # may be changed later
# cpInstanceId may set later
)
link_ports.append(link_port)
ext_mgd_vl.vnfLinkPort = link_ports
if ext_mgd_vls:
inst_vnf_info.extManagedVirtualLinkInfo = ext_mgd_vls
# make CP related infos
vdu_cps = vnfd.get_vducp_nodes(flavour_id)
reses = heat_utils.get_port_reses(heat_reses)
for res in reses:
cp_name = res['resource_name']
if cp_name not in list(vdu_cps.keys()):
# should not occur. just check for consistency.
LOG.debug("%s not in VNFD CP definition.", cp_name)
continue
vl_name = vnfd.get_vl_name_from_cp(flavour_id, vdu_cps[cp_name])
is_external = False
if vl_name is None: # extVirtualLink
is_external = True
# NOTE: object is diffrent from other vl types
vnf_link_port = objects.ExtLinkPortInfoV2(
id=uuidutils.generate_uuid(),
resourceHandle=objects.ResourceHandle(
resourceId=res['physical_resource_id'],
vimLevelResourceType=res['resource_type'],
vimConnectionId=vim_info.vimId,
if req_mgd_vl.obj_attr_is_set('vnfLinkPort'):
ext_mgd_vl.vnfLinkPort = [
objects.VnfLinkPortInfoV2(
id=_make_link_port_id(req_link_port.vnfLinkPortId),
resourceHandle=req_link_port.resourceHandle,
cpInstanceType='EXT_CP', # may be changed later
# cpInstanceId may set later
)
)
ext_cp_info = objects.VnfExtCpInfoV2(
id=uuidutils.generate_uuid(),
extLinkPortId=vnf_link_port.id,
cpdId=cp_name
# associatedVnfcCpId may set later
)
vnf_link_port.cpInstanceId = ext_cp_info.id
for req_link_port in req_mgd_vl.vnfLinkPort
]
found = False
for ext_vl in ext_vls:
for ext_cp in ext_vl.currentVnfExtCpData:
if ext_cp.cpdId == cp_name:
found = True
break
if found:
return ext_mgd_vls
def _make_ext_mgd_vl_info_from_inst(self, old_inst_vnf_info):
# make extManagedVirtualLinkInfo
ext_mgd_vls = []
if old_inst_vnf_info.obj_attr_is_set('extManagedVirtualLinkInfo'):
ext_mgd_vls = old_inst_vnf_info.extManagedVirtualLinkInfo
for ext_mgd_vl in ext_mgd_vls:
if ext_mgd_vl.obj_attr_is_set('vnfLinkPorts'):
ext_mgd_vl.vnfLinkPorts = [link_port
for link_port in ext_mgd_vl.vnfLinkPorts
if _is_link_port(link_port.id)]
return ext_mgd_vls
def _find_ext_vl_by_cp_name(self, cp_name, ext_vl_infos):
for ext_vl_info in ext_vl_infos:
for ext_cp_data in ext_vl_info.currentVnfExtCpData:
if ext_cp_data.cpdId == cp_name:
return ext_vl_info, ext_cp_data
return None, None
def _link_ext_port_info(self, ext_port_infos, ext_vl_infos, ext_cp_infos,
port_reses):
for ext_port_info in ext_port_infos:
res = port_reses[ext_port_info.id]
cp_name = res['resource_name']
ext_cp_info = objects.VnfExtCpInfoV2(
id=_make_cp_info_id(ext_port_info.id),
extLinkPortId=ext_port_info.id,
cpdId=cp_name
# associatedVnfcCpId may set later
)
ext_port_info.cpInstanceId = ext_cp_info.id
ext_vl_info, ext_cp_data = self._find_ext_vl_by_cp_name(
cp_name, ext_vl_infos)
if ext_vl_info:
if ext_vl_info.obj_attr_is_set('extLinkPorts'):
ext_vl_info.extLinkPorts.append(ext_port_info)
else:
ext_vl_info.extLinkPorts = [ext_port_info]
for key, cp_conf in ext_cp_data.cpConfig.items():
# NOTE: it is assumed that there is one item
# (with cpProtocolData) of cpConfig at the moment.
if cp_conf.obj_attr_is_set('cpProtocolData'):
proto_infos = []
for proto_data in cp_conf.cpProtocolData:
proto_info = self._proto_data_to_info(
proto_data)
proto_infos.append(proto_info)
ext_cp_info.cpProtocolInfo = proto_infos
ext_cp_info.cpConfigId = key
break
if found:
if ext_vl.obj_attr_is_set('extLinkPorts'):
ext_vl.extLinkPorts.append(vnf_link_port)
else:
ext_vl.extLinkPorts = [vnf_link_port]
ext_cp_infos.append(ext_cp_info)
for key, cp_conf in ext_cp.cpConfig.items():
# NOTE: it is assumed that there is one item
# (with cpProtocolData) of cpConfig at the moment.
if cp_conf.obj_attr_is_set('cpProtocolData'):
proto_infos = []
for proto_data in cp_conf.cpProtocolData:
proto_info = self._proto_data_to_info(
proto_data)
proto_infos.append(proto_info)
ext_cp_info.cpProtocolInfo = proto_infos
ext_cp_info.cpConfigId = key
break
ext_cp_infos.append(ext_cp_info)
else:
# Internal VL or extManagedVirtualLink
vnf_link_port = objects.VnfLinkPortInfoV2(
id=uuidutils.generate_uuid(),
resourceHandle=objects.ResourceHandle(
resourceId=res['physical_resource_id'],
vimLevelResourceType=res['resource_type'],
vimConnectionId=vim_info.vimId,
cpInstanceType='EXT_CP' # may be changed later
)
)
is_internal = False
for vnf_vl_info in vnf_vl_infos:
if vnf_vl_info.vnfVirtualLinkDescId == vl_name:
# Internal VL
is_internal = True
if vnf_vl_info.obj_attr_is_set('vnfLinkPorts'):
vnf_vl_info.vnfLinkPorts.append(vnf_link_port)
else:
vnf_vl_info.vnfLinkPorts = [vnf_link_port]
if not is_internal:
# extManagedVirtualLink
for ext_mgd_vl in ext_mgd_vls:
# should be found
if ext_mgd_vl.vnfVirtualLinkDescId == vl_name:
if ext_mgd_vl.obj_attr_is_set('vnfLinkPorts'):
ext_mgd_vl.vnfLinkPorts.append(vnf_link_port)
else:
ext_mgd_vl.vnfLinkPorts = [vnf_link_port]
# link to vnfcResourceInfo.vnfcCpInfo
for vnfc_res_info in vnfc_res_infos:
if not vnfc_res_info.obj_attr_is_set('vnfcCpInfo'):
continue
vnfc_res = vnfc_res_info_to_heat_res[vnfc_res_info.id]
vdu_name = vnfc_res_info.vduId
if not (vdu_name in res.get('required_by', []) and
res.get('parent_resource') ==
vnfc_res.get('parent_resource')):
continue
def _find_vnfc_cp_info(self, port_res, vnfc_res_infos, server_reses):
for vnfc_res_info in vnfc_res_infos:
if not vnfc_res_info.obj_attr_is_set('vnfcCpInfo'):
continue
vnfc_res = server_reses[vnfc_res_info.id]
vdu_name = vnfc_res_info.vduId
cp_name = port_res['resource_name']
if (vdu_name in port_res.get('required_by', []) and
port_res.get('parent_resource') ==
vnfc_res.get('parent_resource')):
for vnfc_cp in vnfc_res_info.vnfcCpInfo:
if vnfc_cp.cpdId != cp_name:
continue
if is_external:
vnfc_cp.vnfExtCpId = vnf_link_port.cpInstanceId
for ext_cp_info in ext_cp_infos:
if ext_cp_info.extLinkPortId == vnf_link_port.id:
ext_cp_info.associatedVnfcCpId = vnfc_cp.id
break
else:
vnf_link_port.cpInstanceType = 'VNFC_CP'
vnf_link_port.cpInstanceId = vnfc_cp.id
vnfc_cp.vnfLinkPortId = vnf_link_port.id
break
if vnfc_cp.cpdId == cp_name:
return vnfc_cp
if ext_cp_infos:
inst_vnf_info.extCpInfo = ext_cp_infos
def _link_vnfc_cp_info(self, vnfc_res_infos, ext_port_infos,
vnf_port_infos, ext_cp_infos, server_reses, port_reses):
for ext_port_info in ext_port_infos:
port_res = port_reses[ext_port_info.id]
vnfc_cp = self._find_vnfc_cp_info(port_res, vnfc_res_infos,
server_reses)
if vnfc_cp:
# should be found
vnfc_cp.vnfExtCpId = ext_port_info.cpInstanceId
for ext_cp_info in ext_cp_infos:
if ext_cp_info.extLinkPortId == ext_port_info.id:
ext_cp_info.associatedVnfcCpId = vnfc_cp.id
break
for vnf_port_info in vnf_port_infos:
port_res = port_reses[vnf_port_info.id]
vnfc_cp = self._find_vnfc_cp_info(port_res, vnfc_res_infos,
server_reses)
if vnfc_cp:
# should be found
vnf_port_info.cpInstanceType = 'VNFC_CP'
vnf_port_info.cpInstanceId = vnfc_cp.id
vnfc_cp.vnfLinkPortId = vnf_port_info.id
def _make_vnfc_metadata(self, server_res, heat_reses):
metadata = {
'creation_time': server_res['creation_time'],
}
parent_res = heat_utils.get_parent_resource(server_res, heat_reses)
if parent_res:
metadata['parent_stack_id'] = (
heat_utils.get_resource_stack_id(parent_res))
metadata['parent_resource_name'] = parent_res['resource_name']
return metadata
def _make_instantiated_vnf_info(self, req, inst, grant_req, grant, vnfd,
heat_reses):
init = False
if grant_req.operation == v2fields.LcmOperationType.INSTANTIATE:
init = True
flavour_id = req.flavourId
else:
flavour_id = inst.instantiatedVnfInfo.flavourId
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
vducp_nodes = vnfd.get_vducp_nodes(flavour_id)
storage_reses = self._get_checked_reses(
vnfd.get_storage_nodes(flavour_id),
heat_utils.get_storage_reses(heat_reses))
server_reses = self._get_checked_reses(vnfd.get_vdu_nodes(flavour_id),
heat_utils.get_server_reses(heat_reses))
network_reses = self._get_checked_reses(
vnfd.get_virtual_link_nodes(flavour_id),
heat_utils.get_network_reses(heat_reses))
port_reses = self._get_checked_reses(vducp_nodes,
heat_utils.get_port_reses(heat_reses))
def _res_to_handle(res):
return objects.ResourceHandle(
resourceId=res['physical_resource_id'],
vimLevelResourceType=res['resource_type'],
vimConnectionId=vim_info.vimId)
storage_infos = [
objects.VirtualStorageResourceInfoV2(
id=res_id,
virtualStorageDescId=res['resource_name'],
storageResource=_res_to_handle(res)
)
for res_id, res in storage_reses.items()
]
vnfc_res_infos = [
objects.VnfcResourceInfoV2(
id=res_id,
vduId=res['resource_name'],
computeResource=_res_to_handle(res),
metadata=self._make_vnfc_metadata(res, heat_reses)
)
for res_id, res in server_reses.items()
]
for vnfc_res_info in vnfc_res_infos:
vdu_name = vnfc_res_info.vduId
server_res = server_reses[vnfc_res_info.id]
storage_ids = [storage_id
for storage_id, storage_res in storage_reses.items()
if (vdu_name in storage_res.get('required_by', []) and
server_res.get('parent_resource') ==
storage_res.get('parent_resource'))
]
if storage_ids:
vnfc_res_info.storageResourceIds = storage_ids
vdu_cps = vnfd.get_vdu_cps(flavour_id, vdu_name)
cp_infos = [
objects.VnfcResourceInfoV2_VnfcCpInfo(
id=_make_combination_id(cp, vnfc_res_info.id),
cpdId=cp,
# vnfExtCpId or vnfLinkPortId may set later
)
for cp in vdu_cps
]
if cp_infos:
vnfc_res_info.vnfcCpInfo = cp_infos
vnf_vl_res_infos = [
objects.VnfVirtualLinkResourceInfoV2(
id=res_id,
vnfVirtualLinkDescId=res['resource_name'],
networkResource=_res_to_handle(res)
)
for res_id, res in network_reses.items()
]
ext_cp_infos = []
if init:
ext_vl_infos = self._make_ext_vl_info_from_req(
req, grant, ext_cp_infos)
ext_mgd_vl_infos = self._make_ext_mgd_vl_info_from_req(vnfd,
flavour_id, req, grant)
else:
old_inst_vnf_info = inst.instantiatedVnfInfo
ext_vl_infos = self._make_ext_vl_info_from_inst(
old_inst_vnf_info, ext_cp_infos)
ext_mgd_vl_infos = self._make_ext_mgd_vl_info_from_inst(
old_inst_vnf_info)
def _find_vl_name(port_res):
cp_name = port_res['resource_name']
return vnfd.get_vl_name_from_cp(flavour_id, vducp_nodes[cp_name])
ext_port_infos = [
objects.ExtLinkPortInfoV2(
id=res_id,
resourceHandle=_res_to_handle(res)
)
for res_id, res in port_reses.items()
if _find_vl_name(res) is None
]
self._link_ext_port_info(ext_port_infos, ext_vl_infos, ext_cp_infos,
port_reses)
vnf_port_infos = [
objects.VnfLinkPortInfoV2(
id=res_id,
resourceHandle=_res_to_handle(res),
cpInstanceType='EXT_CP' # may be changed later
)
for res_id, res in port_reses.items()
if _find_vl_name(res) is not None
]
vl_name_to_info = {info.vnfVirtualLinkDescId: info
for info in vnf_vl_res_infos + ext_mgd_vl_infos}
for vnf_port_info in vnf_port_infos:
port_res = port_reses[vnf_port_info.id]
vl_info = vl_name_to_info.get(_find_vl_name(port_res))
if vl_info is None:
# should not occur. just check for consistency.
continue
if vl_info.obj_attr_is_set('vnfLinkPorts'):
vl_info.vnfLinkPorts.append(vnf_port_info)
else:
vl_info.vnfLinkPorts = [vnf_port_info]
self._link_vnfc_cp_info(vnfc_res_infos, ext_port_infos,
vnf_port_infos, ext_cp_infos, server_reses, port_reses)
# NOTE: The followings are not handled at the moment.
# - handle tosca.nodes.nfv.VnfExtCp type
@ -505,40 +701,52 @@ class Openstack(object):
# because the association of compute resource and port resource
# is not identified.
# make new instatiatedVnfInfo and replace
inst_vnf_info = objects.VnfInstanceV2_InstantiatedVnfInfo(
flavourId=flavour_id,
vnfState='STARTED',
)
if storage_infos:
inst_vnf_info.virtualStorageResourceInfo = storage_infos
if vnfc_res_infos:
# NOTE: scale-in specification of tacker SOL003 v2 API is that
# newer VDU is selected for reduction. It is necessary to sort
# vnfc_res_infos at this point so that the conductor should
# choose VDUs from a head sequentially when making scale-in
# grant request.
def _get_key(vnfc):
return parser.isoparse(vnfc.metadata['creation_time'])
sorted_vnfc_res_infos = sorted(vnfc_res_infos, key=_get_key,
reverse=True)
inst_vnf_info.vnfcResourceInfo = sorted_vnfc_res_infos
if vnf_vl_res_infos:
inst_vnf_info.vnfVirtualLinkResourceInfo = vnf_vl_res_infos
if ext_vl_infos:
inst_vnf_info.extVirtualLinkInfo = ext_vl_infos
if ext_mgd_vl_infos:
inst_vnf_info.extManagedVirtualLinkInfo = ext_mgd_vl_infos
if ext_cp_infos:
inst_vnf_info.extCpInfo = ext_cp_infos
# make vnfcInfo
# NOTE: vnfcInfo only exists in SOL002
vnfc_infos = []
for vnfc_res_info in vnfc_res_infos:
vnfc_info = objects.VnfcInfoV2(
id=uuidutils.generate_uuid(),
vduId=vnfc_res_info.vduId,
vnfcResourceInfoId=vnfc_res_info.id,
vnfcState='STARTED'
)
vnfc_infos.append(vnfc_info)
if vnfc_infos:
inst_vnf_info.vnfcInfo = vnfc_infos
if vnfc_res_infos:
inst_vnf_info.vnfcInfo = [
objects.VnfcInfoV2(
id=_make_combination_id(vnfc_res_info.vduId,
vnfc_res_info.id),
vduId=vnfc_res_info.vduId,
vnfcResourceInfoId=vnfc_res_info.id,
vnfcState='STARTED'
)
for vnfc_res_info in sorted_vnfc_res_infos
]
inst.instantiatedVnfInfo = inst_vnf_info
def instantiate_rollback(self, req, inst, grant_req, grant, vnfd):
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
heat_client = heat_utils.HeatClient(vim_info)
stack_name = heat_utils.get_stack_name(inst)
status, _ = heat_client.get_status(stack_name)
if status is not None:
heat_client.delete_stack(stack_name)
def terminate(self, req, inst, grant_req, grant, vnfd):
if req.terminationType == 'GRACEFUL':
timeout = CONF.v2_vnfm.default_graceful_termination_timeout
if req.obj_attr_is_set('gracefulTerminationTimeout'):
timeout = req.gracefulTerminationTimeout
eventlet.sleep(timeout)
# delete stack
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
heat_client = heat_utils.HeatClient(vim_info)
stack_name = heat_utils.get_stack_name(inst)
heat_client.delete_stack(stack_name)

View File

@ -36,14 +36,18 @@ class DefaultUserData(userdata_utils.AbstractUserData):
if 'computeFlavourId' in vdu_value:
vdu_value['computeFlavourId'] = (
userdata_utils.get_param_flavor(
vdu_name, req, vnfd, grant))
vdu_name, flavour_id, vnfd, grant))
if 'vcImageId' in vdu_value:
vdu_value['vcImageId'] = userdata_utils.get_param_image(
vdu_name, req, vnfd, grant)
vdu_name, flavour_id, vnfd, grant)
if 'locationConstraints' in vdu_value:
vdu_value['locationConstraints'] = (
userdata_utils.get_param_zone(
vdu_name, grant_req, grant))
if 'desired_capacity' in vdu_value:
vdu_value['desired_capacity'] = (
userdata_utils.get_param_capacity(
vdu_name, inst, grant_req))
cps = nfv_dict.get('CP', {})
for cp_name, cp_value in cps.items():
@ -84,3 +88,59 @@ class DefaultUserData(userdata_utils.AbstractUserData):
fields['files'][key] = yaml.safe_dump(value)
return fields
@staticmethod
def scale(req, inst, grant_req, grant, tmp_csar_dir):
# scale is interested in 'desired_capacity' only.
# This method returns only 'desired_capacity' part in the
# 'nfv' dict. It is applied to json merge patch against
# the existing 'nfv' dict by the caller.
# NOTE: complete 'nfv' dict can not be made at the moment
# since InstantiateVnfRequest is necessary to make it.
vnfd = userdata_utils.get_vnfd(inst['vnfdId'], tmp_csar_dir)
flavour_id = inst['instantiatedVnfInfo']['flavourId']
hot_dict = vnfd.get_base_hot(flavour_id)
top_hot = hot_dict['template']
nfv_dict = userdata_utils.init_nfv_dict(top_hot)
vdus = nfv_dict.get('VDU', {})
new_vdus = {}
for vdu_name, vdu_value in vdus.items():
if 'desired_capacity' in vdu_value:
capacity = userdata_utils.get_param_capacity(
vdu_name, inst, grant_req)
new_vdus[vdu_name] = {'desired_capacity': capacity}
fields = {'parameters': {'nfv': {'VDU': new_vdus}}}
return fields
@staticmethod
def scale_rollback(req, inst, grant_req, grant, tmp_csar_dir):
# NOTE: This method is not called by a userdata script but
# is called by the openstack infra_driver directly now.
# It is thought that it is suitable that this method defines
# here since it is very likely to scale method above.
vnfd = userdata_utils.get_vnfd(inst['vnfdId'], tmp_csar_dir)
flavour_id = inst['instantiatedVnfInfo']['flavourId']
hot_dict = vnfd.get_base_hot(flavour_id)
top_hot = hot_dict['template']
nfv_dict = userdata_utils.init_nfv_dict(top_hot)
vdus = nfv_dict.get('VDU', {})
new_vdus = {}
for vdu_name, vdu_value in vdus.items():
if 'desired_capacity' in vdu_value:
capacity = userdata_utils.get_current_capacity(
vdu_name, inst)
new_vdus[vdu_name] = {'desired_capacity': capacity}
fields = {'parameters': {'nfv': {'VDU': new_vdus}}}
return fields

View File

@ -20,7 +20,7 @@ import sys
import traceback
def main(operation):
def main():
script_dict = pickle.load(sys.stdin.buffer)
req = script_dict['request']
@ -39,11 +39,8 @@ def main(operation):
module = importlib.import_module(class_module)
klass = getattr(module, userdata_class)
if operation == 'INSTANTIATE':
stack_dict = klass.instantiate(
req, inst, grant_req, grant, tmp_csar_dir)
else:
raise Exception("Unknown operation")
method = getattr(klass, grant_req['operation'].lower())
stack_dict = method(req, inst, grant_req, grant, tmp_csar_dir)
pickle.dump(stack_dict, sys.stdout.buffer)
sys.stdout.flush()
@ -51,7 +48,7 @@ def main(operation):
if __name__ == "__main__":
try:
main(sys.argv[1])
main()
os._exit(0)
except Exception:
sys.stderr.write(traceback.format_exc())

View File

@ -20,23 +20,29 @@ from tacker.sol_refactored.common import vnfd_utils
class AbstractUserData(metaclass=abc.ABCMeta):
"""Definition of each method
Args:
req: Request dict for each API
(ex. InstantiateVnfRequest for instantiate)
inst: VnfInstance dict
grant_req: GrantRequest dict
grant: Grant dict
tmp_csar_dir: directory path that csar contents are extracted
Returns:
dict of parameters for create/update heat stack.
see the example of userdata_default.py.
"""
@staticmethod
@abc.abstractmethod
def instantiate(req, inst, grant_req, grant, tmp_csar_dir):
"""Definition of instantiate method
raise sol_ex.UserDataClassNotImplemented()
Args:
req: InstantiateVnfRequest dict
inst: VnfInstance dict
grant_req: GrantRequest dict
grant: Grant dict
tmp_csar_dir: directory path that csar contents are extracted
Returns:
dict of parameters for create heat stack.
see the example of userdata_default.py.
"""
@staticmethod
@abc.abstractmethod
def scale(req, inst, grant_req, grant, tmp_csar_dir):
raise sol_ex.UserDataClassNotImplemented()
@ -83,7 +89,7 @@ def init_nfv_dict(hot_template):
return nfv
def get_param_flavor(vdu_name, req, vnfd, grant):
def get_param_flavor(vdu_name, flavour_id, vnfd, grant):
# try to get from grant
if 'vimAssets' in grant:
assets = grant['vimAssets']
@ -96,10 +102,10 @@ def get_param_flavor(vdu_name, req, vnfd, grant):
# if specified in VNFD, use it
# NOTE: if not found. parameter is set to None.
# may be error when stack create
return vnfd.get_compute_flavor(req['flavourId'], vdu_name)
return vnfd.get_compute_flavor(flavour_id, vdu_name)
def get_param_image(vdu_name, req, vnfd, grant):
def get_param_image(vdu_name, flavour_id, vnfd, grant):
# try to get from grant
if 'vimAssets' in grant:
assets = grant['vimAssets']
@ -112,7 +118,7 @@ def get_param_image(vdu_name, req, vnfd, grant):
# if specified in VNFD, use it
# NOTE: if not found. parameter is set to None.
# may be error when stack create
sw_images = vnfd.get_sw_image(req['flavourId'])
sw_images = vnfd.get_sw_image(flavour_id)
for name, image in sw_images.items():
if name == vdu_name:
return image
@ -133,6 +139,37 @@ def get_param_zone(vdu_name, grant_req, grant):
return zone['zoneId']
def get_current_capacity(vdu_name, inst):
count = 0
inst_vnfcs = (inst.get('instantiatedVnfInfo', {})
.get('vnfcResourceInfo', []))
for inst_vnfc in inst_vnfcs:
if inst_vnfc['vduId'] == vdu_name:
count += 1
return count
def get_param_capacity(vdu_name, inst, grant_req):
# NOTE: refer grant_req here since interpretation of VNFD was done when
# making grant_req.
count = get_current_capacity(vdu_name, inst)
add_reses = grant_req.get('addResources', [])
for res_def in add_reses:
if (res_def['type'] == 'COMPUTE' and
res_def['resourceTemplateId'] == vdu_name):
count += 1
rm_reses = grant_req.get('removeResources', [])
for res_def in rm_reses:
if (res_def['type'] == 'COMPUTE' and
res_def['resourceTemplateId'] == vdu_name):
count -= 1
return count
def _get_fixed_ips_from_extcp(extcp):
fixed_ips = []
for cp_conf in extcp['cpConfig'].values():

View File

@ -89,6 +89,12 @@ class Client(object):
path, "POST", body=req_body, version="2.0.0")
self.print(resp, body)
def scale(self, id, req_body):
path = self.path + '/' + id + '/scale'
resp, body = self.client.do_request(
path, "POST", body=req_body, version="2.0.0")
self.print(resp, body)
def retry(self, id):
path = self.path + '/' + id + '/retry'
resp, body = self.client.do_request(path, "POST", version="2.0.0")
@ -97,6 +103,7 @@ class Client(object):
def rollback(self, id):
path = self.path + '/' + id + '/rollback'
resp, body = self.client.do_request(path, "POST", version="2.0.0")
self.print(resp, body)
def fail(self, id):
path = self.path + '/' + id + '/fail'
@ -112,6 +119,7 @@ def usage():
print(" inst delete {id}")
print(" inst inst {id} body(path of content)")
print(" inst term {id} body(path of content)")
print(" inst scale {id} body(path of content)")
print(" subsc create body(path of content)")
print(" subsc list [body(path of content)]")
print(" subsc show {id}")
@ -137,7 +145,8 @@ if __name__ == '__main__':
action = sys.argv[2]
if resource == "inst":
if action not in ["create", "list", "show", "delete", "inst", "term"]:
if action not in ["create", "list", "show", "delete", "inst", "term",
"scale"]:
usage()
client = Client("/vnflcm/v2/vnf_instances")
elif resource == "subsc":
@ -179,6 +188,10 @@ if __name__ == '__main__':
if len(sys.argv) != 5:
usage()
client.term(sys.argv[3], get_body(sys.argv[4]))
elif action == "scale":
if len(sys.argv) != 5:
usage()
client.scale(sys.argv[3], get_body(sys.argv[4]))
elif action == "retry":
if len(sys.argv) != 4:
usage()

View File

@ -11,7 +11,7 @@ resources:
properties:
min_size: 1
max_size: 3
desired_capacity: 1
desired_capacity: { get_param: [ nfv, VDU, VDU1, desired_capacity ] }
resource:
type: VDU1.yaml
properties:
@ -26,20 +26,8 @@ resources:
net4: { get_resource: internalVL2 }
net5: { get_resource: internalVL3 }
VDU1_scale_out:
type: OS::Heat::ScalingPolicy
properties:
scaling_adjustment: 1
auto_scaling_group_id:
get_resource: VDU1_scale_group
adjustment_type: change_in_capacity
VDU1_scale_in:
type: OS::Heat::ScalingPolicy
properties:
scaling_adjustment: -1
auto_scaling_group_id:
get_resource: VDU1_scale_group
adjustment_type: change_in_capacity
# NOTE: Resource definition of OS::Heat::ScalingPolicy is omitted.
# It is not used by v2 scale implementation unlike v1.
VDU2:
type: OS::Nova::Server

View File

@ -60,14 +60,18 @@ class UserData(userdata_utils.AbstractUserData):
if 'computeFlavourId' in vdu_value:
vdu_value['computeFlavourId'] = (
userdata_utils.get_param_flavor(
vdu_name, req, vnfd, grant))
vdu_name, flavour_id, vnfd, grant))
if 'vcImageId' in vdu_value:
vdu_value['vcImageId'] = userdata_utils.get_param_image(
vdu_name, req, vnfd, grant)
vdu_name, flavour_id, vnfd, grant)
if 'locationConstraints' in vdu_value:
vdu_value['locationConstraints'] = (
userdata_utils.get_param_zone(
vdu_name, grant_req, grant))
if 'desired_capacity' in vdu_value:
vdu_value['desired_capacity'] = (
userdata_utils.get_param_capacity(
vdu_name, inst, grant_req))
cps = nfv_dict.get('CP', {})
for cp_name, cp_value in cps.items():

View File

@ -11,7 +11,7 @@ resources:
properties:
min_size: 1
max_size: 3
desired_capacity: 1
desired_capacity: { get_param: [ nfv, VDU, VDU1, desired_capacity ] }
resource:
type: VDU1.yaml
properties:
@ -19,20 +19,9 @@ resources:
image: { get_param: [ nfv, VDU, VDU1, vcImageId ] }
net5: { get_resource: internalVL3 }
affinity: { get_resource: nfvi_node_affinity }
VDU1_scale_out:
type: OS::Heat::ScalingPolicy
properties:
scaling_adjustment: 1
auto_scaling_group_id:
get_resource: VDU1_scale_group
adjustment_type: change_in_capacity
VDU1_scale_in:
type: OS::Heat::ScalingPolicy
properties:
scaling_adjustment: -1
auto_scaling_group_id:
get_resource: VDU1_scale_group
adjustment_type: change_in_capacity
# NOTE: Resource definition of OS::Heat::ScalingPolicy is omitted.
# It is not used by v2 scale implementation unlike v1.
VDU2:
type: OS::Nova::Server

File diff suppressed because it is too large Load Diff

View File

@ -15,6 +15,7 @@
import os
from tacker.sol_refactored.common import exceptions as sol_ex
from tacker.sol_refactored.common import vnfd_utils
from tacker.tests import base
@ -100,9 +101,6 @@ class TestVnfd(base.BaseTestCase):
result = self.vnfd_1.get_base_hot(SAMPLE_FLAVOUR_ID)
# check keys and sampling data
self.assertEqual(['VDU1.yaml'], list(result['files'].keys()))
self.assertEqual(1,
result['template']['resources']['VDU1_scale_out']['properties']
['scaling_adjustment'])
self.assertEqual({'get_param': 'net3'},
result['files']['VDU1.yaml']['resources']['VDU1_CP3']
['properties']['network'])
@ -159,3 +157,25 @@ class TestVnfd(base.BaseTestCase):
result = self.vnfd_1.get_interface_script(SAMPLE_FLAVOUR_ID,
"scale_end")
self.assertEqual(None, result)
def test_get_scale_vdu_and_num(self):
expected_result = {'VDU1': 1}
result = self.vnfd_1.get_scale_vdu_and_num(SAMPLE_FLAVOUR_ID,
'VDU1_scale')
self.assertEqual(expected_result, result)
def test_get_scale_vdu_and_num_no_delta(self):
self.assertRaises(sol_ex.DeltaMissingInVnfd,
self.vnfd_1.get_scale_vdu_and_num, SAMPLE_FLAVOUR_ID,
'Invalid_scale')
def test_get_scale_info_from_inst_level(self):
expected_result = {'VDU1_scale': {'scale_level': 2}}
result = self.vnfd_1.get_scale_info_from_inst_level(
SAMPLE_FLAVOUR_ID, 'instantiation_level_2')
self.assertEqual(expected_result, result)
def test_get_max_scale_level(self):
result = self.vnfd_1.get_max_scale_level(SAMPLE_FLAVOUR_ID,
'VDU1_scale')
self.assertEqual(2, result)

View File

@ -126,11 +126,11 @@ _inst_req_example = {
}
}
# instantiatedVnfInfo example for terminate grant test
# instantiatedVnfInfo example for terminate/scale grant test
# NOTE:
# - some identifiers are modified to make check easy.
# - some attributes which are not related to make terminate grant
# retuest are omitted.
# - some attributes which are not related to make terminate/scale grant
# request are omitted.
_inst_info_example = {
"flavourId": "simple",
"vnfState": "STARTED",
@ -386,7 +386,7 @@ _inst_info_example = {
{
"id": "259c5895-7be6-4bed-8a94-221c41b3d08f",
"cpdId": "VDU1_CP1",
# when extLinkPorts of extVitualLinks specified, there is
# when extLinkPorts of extVirtualLinks specified, there is
# no vnfExtCpId nor vnfLinkPortId.
},
{
@ -534,6 +534,18 @@ class TestVnfLcmDriverV2(base.BaseTestCase):
self.vnfd_1 = vnfd_utils.Vnfd(SAMPLE_VNFD_ID)
self.vnfd_1.init_from_csar_dir(os.path.join(sample_dir, "sample1"))
def _grant_req_links(self, lcmocc_id, inst_id):
return {
'vnfLcmOpOcc': {
'href': '{}/v2/vnflcm/vnf_lcm_op_occs/{}'.format(
self.driver.endpoint, lcmocc_id)
},
'vnfInstance': {
'href': '{}/v2/vnflcm/vnf_instances/{}'.format(
self.driver.endpoint, inst_id)
}
}
@mock.patch.object(nfvo_client.NfvoClient, 'grant')
def test_instantiate_grant(self, mocked_grant):
# prepare
@ -551,7 +563,7 @@ class TestVnfLcmDriverV2(base.BaseTestCase):
lcmocc = objects.VnfLcmOpOccV2(
# required fields
id=uuidutils.generate_uuid(),
operationState=fields.LcmOperationStateType.PROCESSING,
operationState=fields.LcmOperationStateType.STARTING,
stateEnteredTime=datetime.utcnow(),
startTime=datetime.utcnow(),
vnfInstanceId=inst.id,
@ -563,7 +575,7 @@ class TestVnfLcmDriverV2(base.BaseTestCase):
mocked_grant.return_value = objects.GrantV1()
# run instantiate_grant
grant_req, _ = self.driver.instantiate_grant(
grant_req, _ = self.driver.grant(
self.context, lcmocc, inst, self.vnfd_1)
# check grant_req is constructed according to intention
@ -575,7 +587,8 @@ class TestVnfLcmDriverV2(base.BaseTestCase):
'flavourId': SAMPLE_FLAVOUR_ID,
'operation': 'INSTANTIATE',
'isAutomaticInvocation': False,
'instantiationLevelId': 'instantiation_level_2'
'instantiationLevelId': 'instantiation_level_2',
'_links': self._grant_req_links(lcmocc.id, inst.id)
}
for key, value in expected_fixed_items.items():
self.assertEqual(value, grant_req[key])
@ -641,7 +654,7 @@ class TestVnfLcmDriverV2(base.BaseTestCase):
lcmocc = objects.VnfLcmOpOccV2(
# required fields
id=uuidutils.generate_uuid(),
operationState=fields.LcmOperationStateType.PROCESSING,
operationState=fields.LcmOperationStateType.STARTING,
stateEnteredTime=datetime.utcnow(),
startTime=datetime.utcnow(),
vnfInstanceId=inst.id,
@ -653,7 +666,7 @@ class TestVnfLcmDriverV2(base.BaseTestCase):
mocked_grant.return_value = objects.GrantV1()
# run terminate_grant
grant_req, _ = self.driver.terminate_grant(
grant_req, _ = self.driver.grant(
self.context, lcmocc, inst, self.vnfd_1)
# check grant_req is constructed according to intention
@ -663,7 +676,8 @@ class TestVnfLcmDriverV2(base.BaseTestCase):
'vnfLcmOpOccId': lcmocc.id,
'vnfdId': SAMPLE_VNFD_ID,
'operation': 'TERMINATE',
'isAutomaticInvocation': False
'isAutomaticInvocation': False,
'_links': self._grant_req_links(lcmocc.id, inst.id)
}
for key, value in expected_fixed_items.items():
self.assertEqual(value, grant_req[key])
@ -711,3 +725,198 @@ class TestVnfLcmDriverV2(base.BaseTestCase):
for key, value in check_reses.items():
for name, ids in value.items():
self.assertEqual(expected_res_ids[key][name], ids)
def _scale_grant_prepare(self, scale_type):
inst = objects.VnfInstanceV2(
# required fields
id=uuidutils.generate_uuid(),
vnfdId=SAMPLE_VNFD_ID,
vnfProvider='provider',
vnfProductName='product name',
vnfSoftwareVersion='software version',
vnfdVersion='vnfd version',
instantiationState='INSTANTIATED'
)
inst_info = objects.VnfInstanceV2_InstantiatedVnfInfo.from_dict(
_inst_info_example)
inst.instantiatedVnfInfo = inst_info
req = objects.ScaleVnfRequest.from_dict(
{"type": scale_type,
"aspectId": "VDU1_scale",
"numberOfSteps": 1})
lcmocc = objects.VnfLcmOpOccV2(
# required fields
id=uuidutils.generate_uuid(),
operationState=fields.LcmOperationStateType.STARTING,
stateEnteredTime=datetime.utcnow(),
startTime=datetime.utcnow(),
vnfInstanceId=inst.id,
operation=fields.LcmOperationType.SCALE,
isAutomaticInvocation=False,
isCancelPending=False,
operationParams=req)
return inst, lcmocc
@mock.patch.object(nfvo_client.NfvoClient, 'grant')
def test_scale_grant_scale_out(self, mocked_grant):
# prepare
inst, lcmocc = self._scale_grant_prepare('SCALE_OUT')
mocked_grant.return_value = objects.GrantV1()
# run scale_grant scale-out
grant_req, _ = self.driver.grant(
self.context, lcmocc, inst, self.vnfd_1)
# check grant_req is constructed according to intention
grant_req = grant_req.to_dict()
expected_fixed_items = {
'vnfInstanceId': inst.id,
'vnfLcmOpOccId': lcmocc.id,
'vnfdId': SAMPLE_VNFD_ID,
'operation': 'SCALE',
'isAutomaticInvocation': False,
'_links': self._grant_req_links(lcmocc.id, inst.id)
}
for key, value in expected_fixed_items.items():
self.assertEqual(value, grant_req[key])
add_reses = grant_req['addResources']
check_reses = {
'COMPUTE': {'VDU1': []},
'STORAGE': {'VirtualStorage': []},
'LINKPORT': {'VDU1_CP1': [], 'VDU1_CP2': [], 'VDU1_CP3': [],
'VDU1_CP4': [], 'VDU1_CP5': []}
}
expected_num = {
'COMPUTE': {'VDU1': 1},
'STORAGE': {'VirtualStorage': 1},
'LINKPORT': {'VDU1_CP1': 1, 'VDU1_CP2': 1, 'VDU1_CP3': 1,
'VDU1_CP4': 1, 'VDU1_CP5': 1}
}
for res in add_reses:
check_reses[res['type']][res['resourceTemplateId']].append(
res['id'])
for key, value in check_reses.items():
for name, ids in value.items():
self.assertEqual(expected_num[key][name], len(ids))
@mock.patch.object(nfvo_client.NfvoClient, 'grant')
def test_scale_grant_scale_in(self, mocked_grant):
# prepare
inst, lcmocc = self._scale_grant_prepare('SCALE_IN')
mocked_grant.return_value = objects.GrantV1()
# run scale_grant scale-in
grant_req, _ = self.driver.grant(
self.context, lcmocc, inst, self.vnfd_1)
# check grant_req is constructed according to intention
grant_req = grant_req.to_dict()
expected_fixed_items = {
'vnfInstanceId': inst.id,
'vnfLcmOpOccId': lcmocc.id,
'vnfdId': SAMPLE_VNFD_ID,
'operation': 'SCALE',
'isAutomaticInvocation': False,
'_links': self._grant_req_links(lcmocc.id, inst.id)
}
for key, value in expected_fixed_items.items():
self.assertEqual(value, grant_req[key])
rm_reses = grant_req['removeResources']
check_reses = {
'COMPUTE': {'VDU1': []},
'STORAGE': {'VirtualStorage': []},
'LINKPORT': {'VDU1_CP1': [], 'VDU1_CP2': [], 'VDU1_CP3': [],
'VDU1_CP4': [], 'VDU1_CP5': []}
}
expected_res_ids = {
'COMPUTE': {
'VDU1': ['res_id_VDU1_1']
},
'STORAGE': {
'VirtualStorage': ['res_id_VirtualStorage_1']
},
'LINKPORT': {
'VDU1_CP1': ['res_id_VDU1_1_CP1'],
'VDU1_CP2': ['res_id_VDU1_1_CP2'],
'VDU1_CP3': ['res_id_VDU1_1_CP3'],
'VDU1_CP4': ['res_id_VDU1_1_CP4'],
'VDU1_CP5': ['res_id_VDU1_1_CP5']
}
}
for res in rm_reses:
check_reses[res['type']][res['resourceTemplateId']].append(
res['resource']['resourceId'])
for key, value in check_reses.items():
for name, ids in value.items():
self.assertEqual(expected_res_ids[key][name], ids)
def test_make_inst_info_common_instantiate(self):
# prepare
inst_saved = objects.VnfInstanceV2(
# only set used members in the method
instantiatedVnfInfo=objects.VnfInstanceV2_InstantiatedVnfInfo()
)
inst = inst_saved.obj_clone()
req = objects.InstantiateVnfRequestV2.from_dict(_inst_req_example)
lcmocc = objects.VnfLcmOpOccV2(
# only set used members in the method
operation=fields.LcmOperationType.INSTANTIATE,
operationParams=req)
# run _make_inst_info_common
self.driver._make_inst_info_common(
lcmocc, inst_saved, inst, self.vnfd_1)
inst = inst.to_dict()
expected_scale_status = [{'aspectId': 'VDU1_scale', 'scaleLevel': 2}]
expected_max_scale_levels = [
{'aspectId': 'VDU1_scale', 'scaleLevel': 2}]
self.assertEqual(expected_scale_status,
inst['instantiatedVnfInfo']['scaleStatus'])
self.assertEqual(expected_max_scale_levels,
inst['instantiatedVnfInfo']['maxScaleLevels'])
def test_make_inst_info_common_scale(self):
# prepare
inst_saved = objects.VnfInstanceV2(
# only set used members in the method
instantiatedVnfInfo=objects.VnfInstanceV2_InstantiatedVnfInfo()
)
inst_saved.instantiatedVnfInfo.scaleStatus = [
objects.ScaleInfoV2(aspectId='VDU1_scale', scaleLevel=2)
]
inst_saved.instantiatedVnfInfo.maxScaleLevels = [
objects.ScaleInfoV2(aspectId='VDU1_scale', scaleLevel=2)
]
inst = objects.VnfInstanceV2(
# only set used members in the method
instantiatedVnfInfo=objects.VnfInstanceV2_InstantiatedVnfInfo()
)
req = objects.ScaleVnfRequest.from_dict(
{"type": "SCALE_IN",
"aspectId": "VDU1_scale",
"numberOfSteps": 1})
lcmocc = objects.VnfLcmOpOccV2(
# only set used members in the method
operation=fields.LcmOperationType.SCALE,
operationParams=req)
# run _make_inst_info_common
self.driver._make_inst_info_common(
lcmocc, inst_saved, inst, self.vnfd_1)
inst = inst.to_dict()
expected_scale_status = [{'aspectId': 'VDU1_scale', 'scaleLevel': 1}]
expected_max_scale_levels = [
{'aspectId': 'VDU1_scale', 'scaleLevel': 2}]
self.assertEqual(expected_scale_status,
inst['instantiatedVnfInfo']['scaleStatus'])
self.assertEqual(expected_max_scale_levels,
inst['instantiatedVnfInfo']['maxScaleLevels'])

View File

@ -52,7 +52,7 @@ class TestVnflcmV2(db_base.SqlTestCase):
instantiationState=inst_state
)
req = {"flavourId": "simple"} # instantate request
req = {"flavourId": "simple"} # instantiate request
lcmocc = objects.VnfLcmOpOccV2(
# required fields
id=uuidutils.generate_uuid(),
@ -259,3 +259,93 @@ class TestVnflcmV2(db_base.SqlTestCase):
# check grant_req and grant are deleted
self.assertRaises(sol_ex.GrantRequestOrGrantNotFound,
lcmocc_utils.get_grant_req_and_grant, self.context, lcmocc)
def test_scale_not_instantiated(self):
inst_id, _ = self._create_inst_and_lcmocc('NOT_INSTANTIATED',
fields.LcmOperationStateType.COMPLETED)
body = {"aspectId": "aspect_1", "type": "SCALE_OUT"}
self.assertRaises(sol_ex.VnfInstanceIsNotInstantiated,
self.controller.scale, request=self.request, id=inst_id,
body=body)
def test_scale_lcmocc_in_progress(self):
inst_id, _ = self._create_inst_and_lcmocc('INSTANTIATED',
fields.LcmOperationStateType.FAILED_TEMP)
body = {"aspectId": "aspect_1", "type": "SCALE_OUT"}
self.assertRaises(sol_ex.OtherOperationInProgress,
self.controller.scale, request=self.request, id=inst_id,
body=body)
def _prepare_db_for_scale_param_check(self, scale_status,
max_scale_levels):
inst = objects.VnfInstanceV2(
# required fields
id=uuidutils.generate_uuid(),
vnfdId=uuidutils.generate_uuid(),
vnfProvider='provider',
vnfProductName='product name',
vnfSoftwareVersion='software version',
vnfdVersion='vnfd version',
instantiationState='INSTANTIATED'
)
inst.instantiatedVnfInfo = objects.VnfInstanceV2_InstantiatedVnfInfo(
flavourId='small',
vnfState='STARTED',
scaleStatus=scale_status,
maxScaleLevels=max_scale_levels
)
inst.create(self.context)
return inst.id
def test_scale_invalid_aspect_id(self):
scale_status = [
objects.ScaleInfoV2(
aspectId="aspect_2",
scaleLevel=0
)
]
max_scale_levels = [
objects.ScaleInfoV2(
aspectId="aspect_2",
scaleLevel=3
)
]
inst_id = self._prepare_db_for_scale_param_check(scale_status,
max_scale_levels)
body = {"aspectId": "aspect_1", "type": "SCALE_OUT"}
self.assertRaises(sol_ex.InvalidScaleAspectId,
self.controller.scale, request=self.request, id=inst_id,
body=body)
def test_scale_invalid_number_of_steps(self):
scale_status = [
objects.ScaleInfoV2(
aspectId="aspect_1",
scaleLevel=1
)
]
max_scale_levels = [
objects.ScaleInfoV2(
aspectId="aspect_1",
scaleLevel=3
)
]
inst_id = self._prepare_db_for_scale_param_check(scale_status,
max_scale_levels)
body = {"aspectId": "aspect_1", "type": "SCALE_OUT",
"numberOfSteps": 3}
self.assertRaises(sol_ex.InvalidScaleNumberOfSteps,
self.controller.scale, request=self.request, id=inst_id,
body=body)
body = {"aspectId": "aspect_1", "type": "SCALE_IN",
"numberOfSteps": 2}
self.assertRaises(sol_ex.InvalidScaleNumberOfSteps,
self.controller.scale, request=self.request, id=inst_id,
body=body)

File diff suppressed because it is too large Load Diff

View File

@ -23,10 +23,10 @@ SAMPLE_VNFD_ID = "b1bb0ce7-ebca-4fa7-95ed-4840d7000000"
SAMPLE_FLAVOUR_ID = "simple"
class TestVnfd(base.BaseTestCase):
class TestUserDataUtils(base.BaseTestCase):
def setUp(self):
super(TestVnfd, self).setUp()
super(TestUserDataUtils, self).setUp()
cur_dir = os.path.dirname(__file__)
sample_dir = os.path.join(cur_dir, "../..", "samples")
@ -58,7 +58,6 @@ class TestVnfd(base.BaseTestCase):
self.assertEqual(expected_result, result)
def test_get_param_flavor(self):
req = {'flavourId': SAMPLE_FLAVOUR_ID}
flavor = 'm1.large'
grant = {
'vimAssets': {
@ -69,17 +68,16 @@ class TestVnfd(base.BaseTestCase):
}
}
result = userdata_utils.get_param_flavor('VDU1', req,
result = userdata_utils.get_param_flavor('VDU1', SAMPLE_FLAVOUR_ID,
self.vnfd_1, grant)
self.assertEqual(flavor, result)
# if not exist in grant, get from VNFD
result = userdata_utils.get_param_flavor('VDU2', req,
result = userdata_utils.get_param_flavor('VDU2', SAMPLE_FLAVOUR_ID,
self.vnfd_1, grant)
self.assertEqual('m1.tiny', result)
def test_get_param_image(self):
req = {'flavourId': SAMPLE_FLAVOUR_ID}
image_id = 'f30e149d-b3c7-497a-8b19-a092bc81e47b'
grant = {
'vimAssets': {
@ -92,7 +90,7 @@ class TestVnfd(base.BaseTestCase):
}
}
result = userdata_utils.get_param_image('VDU2', req,
result = userdata_utils.get_param_image('VDU2', SAMPLE_FLAVOUR_ID,
self.vnfd_1, grant)
self.assertEqual(image_id, result)
@ -118,6 +116,40 @@ class TestVnfd(base.BaseTestCase):
result = userdata_utils.get_param_zone('VDU1', grant_req, grant)
self.assertEqual('nova', result)
def test_get_param_capacity(self):
# test get_current_capacity at the same time
grant_req = {
'addResources': [
{'id': 'dd60c89a-29a2-43bc-8cff-a534515523df',
'type': 'COMPUTE', 'resourceTemplateId': 'VDU1'},
{'id': '49b99140-c897-478c-83fa-ba3698912b18',
'type': 'COMPUTE', 'resourceTemplateId': 'VDU1'},
{'id': 'b03c4b75-ca17-4773-8a50-9a53df78a007',
'type': 'COMPUTE', 'resourceTemplateId': 'VDU2'}
],
'removeResources': [
{'id': '0837249d-ac2a-4963-bf98-bc0755eec663',
'type': 'COMPUTE', 'resourceTemplateId': 'VDU1'},
{'id': '3904e9d1-c0ec-4c3c-b29e-c8942a20f866',
'type': 'COMPUTE', 'resourceTemplateId': 'VDU2'}
]
}
inst = {
'instantiatedVnfInfo': {
'vnfcResourceInfo': [
{'id': 'cdf36e11-f6ca-4c80-aaf1-0d2e764a2f3a',
'vduId': 'VDU2'},
{'id': 'c8cb522d-ddf8-4136-9c85-92bab8f2993d',
'vduId': 'VDU1'}
]
}
}
result = userdata_utils.get_param_capacity('VDU1', inst, grant_req)
self.assertEqual(2, result)
result = userdata_utils.get_param_capacity('VDU2', inst, grant_req)
self.assertEqual(1, result)
def test_get_parama_network(self):
res_id = "8fe7cc1a-e4ac-41b9-8b89-ed14689adb9c"
req = {

View File

@ -24,20 +24,6 @@ resources:
net4: { get_resource: internalVL2 }
net5: { get_resource: internalVL3 }
affinity: { get_resource: nfvi_node_affinity }
VDU1_scale_out:
type: OS::Heat::ScalingPolicy
properties:
scaling_adjustment: 1
auto_scaling_group_id:
get_resource: VDU1_scale_group
adjustment_type: change_in_capacity
VDU1_scale_in:
type: OS::Heat::ScalingPolicy
properties:
scaling_adjustment: -1
auto_scaling_group_id:
get_resource: VDU1_scale_group
adjustment_type: change_in_capacity
VDU2:
type: OS::Nova::Server

View File

@ -302,6 +302,12 @@ topology_template:
max_scale_level: 2
step_deltas:
- delta_1
Invalid_scale:
name: Invalid_scale
description: Invalid scaling aspect
max_scale_level: 2
step_deltas:
- delta_missing
- VDU1_initial_delta:
type: tosca.policies.nfv.VduInitialDelta
@ -326,6 +332,15 @@ topology_template:
number_of_instances: 1
targets: [ VDU1 ]
- VDU1_scaling_aspect_deltas:
type: tosca.policies.nfv.VduScalingAspectDeltas
properties:
aspect: Invalid_scale
deltas:
delta_dummy: # delta_missing is missing
number_of_instances: 1
targets: [ VDU2 ]
- instantiation_levels:
type: tosca.policies.nfv.InstantiationLevels
properties:

View File

@ -36,14 +36,18 @@ class DefaultUserData(userdata_utils.AbstractUserData):
if 'computeFlavourId' in vdu_value:
vdu_value['computeFlavourId'] = (
userdata_utils.get_param_flavor(
vdu_name, req, vnfd, grant))
vdu_name, flavour_id, vnfd, grant))
if 'vcImageId' in vdu_value:
vdu_value['vcImageId'] = userdata_utils.get_param_image(
vdu_name, req, vnfd, grant)
vdu_name, flavour_id, vnfd, grant)
if 'locationConstraints' in vdu_value:
vdu_value['locationConstraints'] = (
userdata_utils.get_param_zone(
vdu_name, grant_req, grant))
if 'desired_capacity' in vdu_value:
vdu_value['desired_capacity'] = (
userdata_utils.get_param_capacity(
vdu_name, inst, grant_req))
cps = nfv_dict.get('CP', {})
for cp_name, cp_value in cps.items():
@ -84,3 +88,32 @@ class DefaultUserData(userdata_utils.AbstractUserData):
fields['files'][key] = yaml.safe_dump(value)
return fields
@staticmethod
def scale(req, inst, grant_req, grant, tmp_csar_dir):
# scale is interested in 'desired_capacity' only.
# This method returns only 'desired_capacity' part in the
# 'nfv' dict. It is applied to json merge patch against
# the existing 'nfv' dict by the caller.
# NOTE: complete 'nfv' dict can not be made at the moment
# since InstantiateVnfRequest is necessary to make it.
vnfd = userdata_utils.get_vnfd(inst['vnfdId'], tmp_csar_dir)
flavour_id = inst['instantiatedVnfInfo']['flavourId']
hot_dict = vnfd.get_base_hot(flavour_id)
top_hot = hot_dict['template']
nfv_dict = userdata_utils.init_nfv_dict(top_hot)
vdus = nfv_dict.get('VDU', {})
new_vdus = {}
for vdu_name, vdu_value in vdus.items():
if 'desired_capacity' in vdu_value:
capacity = userdata_utils.get_param_capacity(
vdu_name, inst, grant_req)
new_vdus[vdu_name] = {'desired_capacity': capacity}
fields = {'parameters': {'nfv': {'VDU': new_vdus}}}
return fields