Merge "Refactor for Performance Improvement"
This commit is contained in:
commit
1d8a377459
@ -0,0 +1,9 @@
|
||||
---
|
||||
upgrade:
|
||||
- |
|
||||
Improve performance of Tacker by refactoring some code, and corrects
|
||||
the output level of log.
|
||||
|
||||
At the same time, the redirection when V2 calls HEAT's API is
|
||||
removed instead of direct call, and the subcription filter when sends
|
||||
notification is improved in V1 code.
|
@ -128,9 +128,8 @@ def _get_pagination_max_limit():
|
||||
if max_limit == 0:
|
||||
raise ValueError()
|
||||
except ValueError:
|
||||
LOG.warning("Invalid value for pagination_max_limit: %s. It "
|
||||
"should be an integer greater to 0",
|
||||
cfg.CONF.pagination_max_limit)
|
||||
LOG.warning("pagination_max_limit: %s must be greater than 0",
|
||||
cfg.CONF.pagination_max_limit)
|
||||
return max_limit
|
||||
|
||||
|
||||
|
@ -552,9 +552,9 @@ class ExtensionManager(object):
|
||||
new_ext = new_ext_class()
|
||||
self.add_extension(new_ext)
|
||||
except Exception as exception:
|
||||
LOG.warning("Extension file %(f)s wasn't loaded due to "
|
||||
"%(exception)s",
|
||||
{'f': f, 'exception': exception})
|
||||
LOG.error("Extension file %(f)s wasn't loaded due to "
|
||||
"%(exception)s",
|
||||
{'f': f, 'exception': exception})
|
||||
|
||||
def add_extension(self, ext):
|
||||
# Do nothing if the extension doesn't check out
|
||||
@ -562,7 +562,6 @@ class ExtensionManager(object):
|
||||
return
|
||||
|
||||
alias = ext.get_alias()
|
||||
LOG.info('Loaded extension: %s', alias)
|
||||
|
||||
if alias in self.extensions:
|
||||
raise exceptions.DuplicatedExtension(alias=alias)
|
||||
|
@ -79,8 +79,8 @@ def Resource(controller, faults=None, deserializers=None, serializers=None):
|
||||
mapped_exc = api_common.convert_exception_to_http_exc(e, faults,
|
||||
language)
|
||||
if hasattr(mapped_exc, 'code') and 400 <= mapped_exc.code < 500:
|
||||
LOG.info('%(action)s failed (client error): %(exc)s',
|
||||
{'action': action, 'exc': mapped_exc})
|
||||
LOG.error('%(action)s failed (client error): %(exc)s',
|
||||
{'action': action, 'exc': mapped_exc})
|
||||
else:
|
||||
LOG.exception('%(action)s failed: %(details)s',
|
||||
{'action': action,
|
||||
|
@ -43,7 +43,7 @@ class TackerKeystoneContext(base.ConfigurableMiddleware):
|
||||
ctx = context.Context.from_environ(req.environ)
|
||||
|
||||
if not ctx.user_id:
|
||||
LOG.debug("X_USER_ID is not found in request")
|
||||
LOG.error("X_USER_ID is not found in request")
|
||||
return webob.exc.HTTPUnauthorized()
|
||||
|
||||
# Inject the context...
|
||||
@ -104,8 +104,8 @@ class _ClientCredentialsGrant(_OAuth2GrantBase):
|
||||
LOG.info(
|
||||
"Get Access Token, Connecting to <GET:{}>".format(
|
||||
self.token_endpoint))
|
||||
LOG.info("Request Headers={}".format(kwargs.get('headers')))
|
||||
LOG.info("Request Body={}".format(kwargs.get('data')))
|
||||
LOG.debug("[REQ] Headers={}".format(kwargs.get('headers')))
|
||||
LOG.debug("[RES] Body={}".format(kwargs.get('data')))
|
||||
|
||||
response = basic_auth_request.post(
|
||||
self.token_endpoint,
|
||||
@ -114,8 +114,8 @@ class _ClientCredentialsGrant(_OAuth2GrantBase):
|
||||
response.raise_for_status()
|
||||
|
||||
response_body = response.json()
|
||||
LOG.info("[RES] Headers={}".format(response.headers))
|
||||
LOG.info("[RES] Body={}".format(response_body))
|
||||
LOG.debug("[RES] Headers={}".format(response.headers))
|
||||
LOG.debug("[RES] Body={}".format(response_body))
|
||||
|
||||
return response_body
|
||||
|
||||
@ -279,7 +279,7 @@ class _AuthManager:
|
||||
client = _OAuth2Session(grant)
|
||||
client.apply_access_token_info()
|
||||
|
||||
LOG.info(
|
||||
LOG.debug(
|
||||
"Add to Auth management, id=<{}>, type=<{}>, class=<{}>".format(
|
||||
id, auth_type, client.__class__.__name__))
|
||||
|
||||
|
@ -75,7 +75,8 @@ class RemoteCommandExecutor(object):
|
||||
|
||||
def close_session(self):
|
||||
self.__ssh.close()
|
||||
LOG.debug("Connection close")
|
||||
LOG.info(f"The SSH connection to the remote"
|
||||
f" host {self.__host} has been closed.")
|
||||
|
||||
def execute_command(self, cmd, input_data=None):
|
||||
try:
|
||||
|
@ -128,7 +128,7 @@ def load_paste_app(app_name):
|
||||
raise cfg.ConfigFilesNotFoundError(
|
||||
config_files=[cfg.CONF.api_paste_config])
|
||||
config_path = os.path.abspath(config_path)
|
||||
LOG.info("Config paste file: %s", config_path)
|
||||
LOG.debug("Config paste file: %s", config_path)
|
||||
|
||||
try:
|
||||
app = deploy.loadapp("config:%s" % config_path, name=app_name)
|
||||
|
@ -574,7 +574,7 @@ def delete_csar_data(package_uuid):
|
||||
exc_message = encodeutils.exception_to_unicode(exc)
|
||||
msg = _('Failed to delete csar folder: '
|
||||
'%(csar_path)s, Error: %(exc)s')
|
||||
LOG.warning(msg, {'csar_path': csar_path, 'exc': exc_message})
|
||||
LOG.error(msg, {'csar_path': csar_path, 'exc': exc_message})
|
||||
|
||||
|
||||
class PreserveZipFilePermissions(zipfile.ZipFile):
|
||||
|
@ -45,8 +45,8 @@ class DriverManager(object):
|
||||
drivers[type_] = ext
|
||||
self._drivers = dict((type_, ext.obj)
|
||||
for (type_, ext) in drivers.items())
|
||||
LOG.info("Registered drivers from %(namespace)s: %(keys)s",
|
||||
{'namespace': namespace, 'keys': self._drivers.keys()})
|
||||
LOG.debug("Registered drivers from %(namespace)s: %(keys)s",
|
||||
{'namespace': namespace, 'keys': self._drivers.keys()})
|
||||
|
||||
@staticmethod
|
||||
def _driver_name(driver):
|
||||
|
@ -622,8 +622,8 @@ class MemoryUnit(object):
|
||||
unit = MemoryUnit.validate_unit(unit)
|
||||
else:
|
||||
unit = MemoryUnit.UNIT_SIZE_DEFAULT
|
||||
LOG.info(_('A memory unit is not provided for size; using the '
|
||||
'default unit %(default)s.') % {'default': 'B'})
|
||||
LOG.debug(_('A memory unit is not provided for size; using the '
|
||||
'default unit %(default)s.') % {'default': 'B'})
|
||||
result = re.sub(r'\s+', ' ', size).split(' ')
|
||||
if len(result) == 2:
|
||||
if result[1]:
|
||||
@ -632,9 +632,9 @@ class MemoryUnit(object):
|
||||
MemoryUnit.UNIT_SIZE_DICT[unit_size] *
|
||||
math.pow(MemoryUnit.UNIT_SIZE_DICT
|
||||
[unit], -1))
|
||||
LOG.info(_('Given size %(size)s is converted to %(num)s '
|
||||
'%(unit)s.') % {'size': size,
|
||||
'num': converted, 'unit': unit})
|
||||
LOG.debug(_('Given size %(size)s is converted to %(num)s '
|
||||
'%(unit)s.') % {'size': size,
|
||||
'num': converted, 'unit': unit})
|
||||
else:
|
||||
msg = _('Size is not given for software image data.')
|
||||
LOG.error(msg)
|
||||
|
@ -257,9 +257,9 @@ def grant_error_common(function):
|
||||
vnf_lcm_op_occs.state_entered_time = timestamp
|
||||
vnf_lcm_op_occs.save()
|
||||
except Exception as e:
|
||||
LOG.warning("Failed to update vnf_lcm_op_occ for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
LOG.error("Failed to update vnf_lcm_op_occ for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
|
||||
try:
|
||||
notification = {}
|
||||
@ -285,9 +285,9 @@ def grant_error_common(function):
|
||||
notification['_links']['vnfLcmOpOcc']['href'] = vnflcm_url
|
||||
self.send_notification(context, notification)
|
||||
except Exception as e:
|
||||
LOG.warning("Failed notification for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
LOG.error("Failed notification for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
|
||||
return decorated_function
|
||||
|
||||
@ -863,10 +863,10 @@ class Conductor(manager.Manager, v2_hook.ConductorV2Hook):
|
||||
shutil.rmtree(csar_zip_temp_path)
|
||||
os.remove(csar_path)
|
||||
except OSError:
|
||||
LOG.warning("Failed to delete csar zip %(zip)s and"
|
||||
" folder $(folder)s for vnf package %(uuid)s.",
|
||||
{'zip': csar_path, 'folder': csar_zip_temp_path,
|
||||
'uuid': vnf_pack.id})
|
||||
LOG.error("Failed to delete csar zip %(zip)s and"
|
||||
" folder $(folder)s for vnf package %(uuid)s.",
|
||||
{'zip': csar_path, 'folder': csar_zip_temp_path,
|
||||
'uuid': vnf_pack.id})
|
||||
|
||||
def _get_vnf_link_ports_by_vl(self, vnf_info, ext_vl_id,
|
||||
resource_id):
|
||||
@ -1654,9 +1654,9 @@ class Conductor(manager.Manager, v2_hook.ConductorV2Hook):
|
||||
vnf_instance_id = vnf_instance.id
|
||||
|
||||
try:
|
||||
LOG.debug("Update vnf lcm %s %s",
|
||||
vnf_lcm_op_occs_id,
|
||||
operation_state)
|
||||
LOG.info("Update vnf lcm %s %s",
|
||||
vnf_lcm_op_occs_id,
|
||||
operation_state)
|
||||
vnf_lcm_op_occ = objects.VnfLcmOpOcc.get_by_id(context,
|
||||
vnf_lcm_op_occs_id)
|
||||
vnf_lcm_op_occ.operation_state = operation_state
|
||||
@ -1767,8 +1767,8 @@ class Conductor(manager.Manager, v2_hook.ConductorV2Hook):
|
||||
"""
|
||||
|
||||
try:
|
||||
LOG.debug("send_notification start notification[%s]"
|
||||
% notification)
|
||||
LOG.info("send_notification start notification[%s]"
|
||||
% notification)
|
||||
|
||||
notification = utils.convert_snakecase_to_camelcase(notification)
|
||||
|
||||
@ -1820,7 +1820,7 @@ class Conductor(manager.Manager, v2_hook.ConductorV2Hook):
|
||||
|
||||
for num in range(CONF.vnf_lcm.retry_num):
|
||||
try:
|
||||
LOG.info("send notify[%s]" %
|
||||
LOG.debug("send notify[%s]" %
|
||||
json.dumps(notification))
|
||||
auth_client = auth.auth_manager.get_auth_client(
|
||||
notification['subscriptionId'])
|
||||
@ -1830,7 +1830,7 @@ class Conductor(manager.Manager, v2_hook.ConductorV2Hook):
|
||||
timeout=CONF.vnf_lcm.retry_timeout,
|
||||
verify=CONF.vnf_lcm.verify_notification_ssl)
|
||||
if response.status_code == 204:
|
||||
LOG.info(
|
||||
LOG.debug(
|
||||
"send success notify[%s]",
|
||||
json.dumps(notification))
|
||||
break
|
||||
@ -1877,6 +1877,15 @@ class Conductor(manager.Manager, v2_hook.ConductorV2Hook):
|
||||
|
||||
for subscription in vnf_lcm_subscriptions:
|
||||
if subscription.tenant_id == vnf_instance.get("tenant_id"):
|
||||
if subscription.filter:
|
||||
filter_values = jsonutils.loads(subscription.filter)
|
||||
filter_vnfdids = filter_values.get(
|
||||
'vnfInstanceSubscriptionFilter', {}).get(
|
||||
'vnfdIds')
|
||||
if filter_vnfdids:
|
||||
if vnf_instance.get("vnfd_id") in filter_vnfdids:
|
||||
extract_vnf_lcm_subscriptions.append(subscription)
|
||||
continue
|
||||
extract_vnf_lcm_subscriptions.append(subscription)
|
||||
|
||||
return extract_vnf_lcm_subscriptions
|
||||
|
@ -213,8 +213,8 @@ class CommonDbMixin(object):
|
||||
query = self._model_query(context, model)
|
||||
return query.filter(model.name == name).one()
|
||||
except orm_exc.NoResultFound:
|
||||
LOG.info("No result found for %(name)s in %(model)s table",
|
||||
{'name': name, 'model': model})
|
||||
LOG.error("No result found for %(name)s in %(model)s table",
|
||||
{'name': name, 'model': model})
|
||||
|
||||
def get_by_name(self, context, model, name):
|
||||
return self._get_by_name(context, model, name)
|
||||
|
@ -565,9 +565,9 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
|
||||
self._update_vnf_status_db(
|
||||
context, vnf_info['id'], previous_statuses, 'ERROR')
|
||||
except Exception as e:
|
||||
LOG.warning("Failed to revert scale info for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_info['id'], "error": e})
|
||||
LOG.error("Failed to revert scale info for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_info['id'], "error": e})
|
||||
self._cos_db_plg.create_event(
|
||||
context, res_id=vnf_info['id'],
|
||||
res_type=constants.RES_TYPE_VNF,
|
||||
@ -799,7 +799,7 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
|
||||
filter(~VNF.status.in_(exclude_status)).
|
||||
with_for_update().one())
|
||||
except orm_exc.NoResultFound:
|
||||
LOG.warning('no vnf found %s', vnf_id)
|
||||
LOG.error('no vnf found %s', vnf_id)
|
||||
return False
|
||||
|
||||
vnf_db.update({'status': new_status})
|
||||
|
@ -52,8 +52,8 @@ def get_csar_data_iter(body):
|
||||
return data_iter
|
||||
except Exception as e:
|
||||
error = encodeutils.exception_to_unicode(e)
|
||||
LOG.warning("Failed to open csar URL: %(url)s due to error: %(error)s",
|
||||
{"url": url, "error": error})
|
||||
LOG.error("Failed to open csar URL: %(url)s due to error: %(error)s",
|
||||
{"url": url, "error": error})
|
||||
raise exceptions.VNFPackageURLInvalid(url=url)
|
||||
|
||||
|
||||
@ -73,10 +73,10 @@ def store_csar(context, package_uuid, body):
|
||||
context=context)
|
||||
except Exception as e:
|
||||
error = encodeutils.exception_to_unicode(e)
|
||||
LOG.warning("Failed to store csar data in glance store for "
|
||||
"package %(uuid)s due to error: %(error)s",
|
||||
{"uuid": package_uuid,
|
||||
"error": error})
|
||||
LOG.error("Failed to store csar data in glance store for "
|
||||
"package %(uuid)s due to error: %(error)s",
|
||||
{"uuid": package_uuid,
|
||||
"error": error})
|
||||
raise exceptions.UploadFailedToGlanceStore(uuid=package_uuid,
|
||||
error=error)
|
||||
finally:
|
||||
|
@ -95,7 +95,7 @@ class Kubernetes_Driver(abstract_vim_driver.VimAbstractDriver):
|
||||
k8s_info = core_api_client.get_api_versions()
|
||||
LOG.info(k8s_info)
|
||||
except Exception as e:
|
||||
LOG.info('VIM Kubernetes authentication is wrong.')
|
||||
LOG.error('VIM Kubernetes authentication is wrong.')
|
||||
# delete temp file
|
||||
self.clean_authenticate_vim(auth_dict, file_descriptor)
|
||||
raise nfvo.VimUnauthorizedException(message=str(e))
|
||||
@ -175,9 +175,8 @@ class Kubernetes_Driver(abstract_vim_driver.VimAbstractDriver):
|
||||
LOG.debug('VIM key deleted successfully for vim %s',
|
||||
vim_id)
|
||||
except Exception as exception:
|
||||
LOG.warning('VIM key deletion failed for vim %s due to %s',
|
||||
vim_id,
|
||||
exception)
|
||||
LOG.error('VIM key deletion failed for vim %s due to %s',
|
||||
vim_id, exception)
|
||||
raise
|
||||
else:
|
||||
raise nfvo.VimEncryptKeyError(vim_id=vim_id)
|
||||
@ -218,9 +217,8 @@ class Kubernetes_Driver(abstract_vim_driver.VimAbstractDriver):
|
||||
LOG.debug('VIM auth successfully stored for vim %s',
|
||||
vim_id)
|
||||
except Exception as exception:
|
||||
LOG.warning('VIM key creation failed for vim %s due to %s',
|
||||
vim_id,
|
||||
exception)
|
||||
LOG.error('VIM key creation failed for vim %s due to %s',
|
||||
vim_id, exception)
|
||||
raise
|
||||
else:
|
||||
raise nfvo.VimEncryptKeyError(vim_id=vim_id)
|
||||
|
@ -159,7 +159,7 @@ class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver,
|
||||
try:
|
||||
regions = self._find_regions(ks_client)
|
||||
except (exceptions.Unauthorized, exceptions.BadRequest) as e:
|
||||
LOG.warning("Authorization failed for user")
|
||||
LOG.error("Authorization failed for user")
|
||||
raise nfvo.VimUnauthorizedException(message=e.message)
|
||||
vim_obj['placement_attr'] = {'regions': regions}
|
||||
return vim_obj
|
||||
@ -204,9 +204,8 @@ class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver,
|
||||
LOG.debug('VIM key deleted successfully for vim %s',
|
||||
vim_id)
|
||||
except Exception as ex:
|
||||
LOG.warning('VIM key deletion failed for vim %s due to %s',
|
||||
vim_id,
|
||||
ex)
|
||||
LOG.error('VIM key deletion failed for vim %s due to %s',
|
||||
vim_id, ex)
|
||||
raise
|
||||
else:
|
||||
key_file = os.path.join(CONF.vim_keys.openstack, vim_id)
|
||||
@ -240,9 +239,8 @@ class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver,
|
||||
LOG.debug('VIM auth successfully stored for vim %s',
|
||||
vim_id)
|
||||
except Exception as ex:
|
||||
LOG.warning('VIM key creation failed for vim %s due to %s',
|
||||
vim_id,
|
||||
ex)
|
||||
LOG.error('VIM key creation failed for vim %s due to %s',
|
||||
vim_id, ex)
|
||||
raise
|
||||
|
||||
else:
|
||||
@ -837,7 +835,7 @@ class NeutronClient(object):
|
||||
try:
|
||||
self.client.delete_sfc_flow_classifier(fc_id)
|
||||
except nc_exceptions.NotFound:
|
||||
LOG.warning("fc %s not found", fc_id)
|
||||
LOG.error("fc %s not found", fc_id)
|
||||
raise ValueError('fc %s not found' % fc_id)
|
||||
|
||||
def port_pair_create(self, port_pair_dict):
|
||||
@ -861,7 +859,7 @@ class NeutronClient(object):
|
||||
try:
|
||||
self.client.delete_sfc_port_pair(port_pair_id)
|
||||
except nc_exceptions.NotFound:
|
||||
LOG.warning('port pair %s not found', port_pair_id)
|
||||
LOG.error('port pair %s not found', port_pair_id)
|
||||
raise ValueError('port pair %s not found' % port_pair_id)
|
||||
|
||||
def port_pair_group_create(self, ppg_dict):
|
||||
@ -869,7 +867,7 @@ class NeutronClient(object):
|
||||
ppg = self.client.create_sfc_port_pair_group(
|
||||
{'port_pair_group': ppg_dict})
|
||||
except nc_exceptions.BadRequest as e:
|
||||
LOG.warning('create port pair group returns %s', e)
|
||||
LOG.error('create port pair group returns %s', e)
|
||||
raise ValueError(str(e))
|
||||
|
||||
if ppg and len(ppg):
|
||||
@ -885,7 +883,7 @@ class NeutronClient(object):
|
||||
try:
|
||||
self.client.delete_sfc_port_pair_group(ppg_id)
|
||||
except nc_exceptions.NotFound:
|
||||
LOG.warning('port pair group %s not found', ppg_id)
|
||||
LOG.error('port pair group %s not found', ppg_id)
|
||||
raise ValueError('port pair group %s not found' % ppg_id)
|
||||
|
||||
def port_chain_create(self, port_chain_dict):
|
||||
@ -893,7 +891,7 @@ class NeutronClient(object):
|
||||
pc = self.client.create_sfc_port_chain(
|
||||
{'port_chain': port_chain_dict})
|
||||
except nc_exceptions.BadRequest as e:
|
||||
LOG.warning('create port chain returns %s', e)
|
||||
LOG.error('create port chain returns %s', e)
|
||||
raise ValueError(str(e))
|
||||
|
||||
if pc and len(pc):
|
||||
@ -932,7 +930,7 @@ class NeutronClient(object):
|
||||
pp_id = port_pairs[j]
|
||||
self.client.delete_sfc_port_pair(pp_id)
|
||||
except nc_exceptions.NotFound:
|
||||
LOG.warning('port chain %s not found', port_chain_id)
|
||||
LOG.error('port chain %s not found', port_chain_id)
|
||||
raise ValueError('port chain %s not found' % port_chain_id)
|
||||
|
||||
def port_chain_update(self, port_chain_id, port_chain):
|
||||
@ -940,7 +938,7 @@ class NeutronClient(object):
|
||||
pc = self.client.update_sfc_port_chain(port_chain_id,
|
||||
{'port_chain': port_chain})
|
||||
except nc_exceptions.BadRequest as e:
|
||||
LOG.warning('update port chain returns %s', e)
|
||||
LOG.error('update port chain returns %s', e)
|
||||
raise ValueError(str(e))
|
||||
if pc and len(pc):
|
||||
return pc['port_chain']['id']
|
||||
@ -971,5 +969,5 @@ class NeutronClient(object):
|
||||
|
||||
return port_pair_group
|
||||
except nc_exceptions.NotFound:
|
||||
LOG.warning('port pair group %s not found', ppg_id)
|
||||
LOG.error('port pair group %s not found', ppg_id)
|
||||
raise ValueError('port pair group %s not found' % ppg_id)
|
||||
|
@ -47,7 +47,7 @@ class VNFFGNoop(abstract_vnffg_driver.VnffgAbstractDriver):
|
||||
@log.log
|
||||
def update_chain(self, chain_id, fc_ids, vnfs, auth_attr=None):
|
||||
if chain_id not in self._instances:
|
||||
LOG.debug('Chain not found')
|
||||
LOG.error('Chain not found')
|
||||
raise ValueError('No chain instance %s' % chain_id)
|
||||
|
||||
@log.log
|
||||
@ -63,7 +63,7 @@ class VNFFGNoop(abstract_vnffg_driver.VnffgAbstractDriver):
|
||||
@log.log
|
||||
def update_flow_classifier(self, fc_id, fc, auth_attr=None):
|
||||
if fc_id not in self._instances:
|
||||
LOG.debug('FC not found')
|
||||
LOG.error('FC not found')
|
||||
raise ValueError('No FC instance %s' % fc_id)
|
||||
|
||||
@log.log
|
||||
|
@ -234,7 +234,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
|
||||
vim_id, ex)
|
||||
return vim_obj
|
||||
except Exception as ex:
|
||||
LOG.debug("Got exception when update_vim %s due to %s",
|
||||
LOG.error("Got exception when update_vim %s due to %s",
|
||||
vim_id, ex)
|
||||
with excutils.save_and_reraise_exception():
|
||||
if new_auth_created:
|
||||
@ -598,7 +598,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
|
||||
|
||||
f = fernet.Fernet(vim_key)
|
||||
if not f:
|
||||
LOG.warning('Unable to decode VIM auth')
|
||||
LOG.error('Unable to decode VIM auth')
|
||||
raise nfvo.VimNotFoundException(vim_id=vim_id)
|
||||
return f.decrypt(cred).decode('utf-8')
|
||||
|
||||
@ -610,7 +610,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
|
||||
with open(key_file, 'r') as f:
|
||||
return f.read()
|
||||
except Exception:
|
||||
LOG.warning('VIM id invalid or key not found for %s', vim_id)
|
||||
LOG.error('VIM id invalid or key not found for %s', vim_id)
|
||||
raise nfvo.VimKeyNotFoundException(vim_id=vim_id)
|
||||
|
||||
def _vim_resource_name_to_id(self, context, resource, name, vnf_id):
|
||||
@ -908,7 +908,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
|
||||
if value['get_input'] in paramvalues:
|
||||
original[key] = paramvalues[value['get_input']]
|
||||
else:
|
||||
LOG.debug('Key missing Value: %s', key)
|
||||
LOG.error('Key missing Value: %s', key)
|
||||
raise cs.InputValuesMissing(key=key)
|
||||
else:
|
||||
self._update_params(value, paramvalues)
|
||||
|
@ -41,7 +41,7 @@ def _get_vnfd_id(context, id):
|
||||
api.model_query(context, models.VnfPackageVnfd).\
|
||||
filter_by(package_uuid=id).first()
|
||||
except Exception:
|
||||
LOG.info("select vnf_package_vnfd failed")
|
||||
LOG.warning("select vnf_package_vnfd failed")
|
||||
if vnf_package_vnfd:
|
||||
return vnf_package_vnfd.vnfd_id
|
||||
else:
|
||||
@ -53,7 +53,7 @@ def _check_vnfd(context, id):
|
||||
try:
|
||||
vnfd = api.model_query(context, vnfm_db.VNFD).filter_by(id=id).first()
|
||||
except Exception:
|
||||
LOG.info("select vnfd failed")
|
||||
LOG.warning("select vnfd failed")
|
||||
if vnfd:
|
||||
return "TRUE"
|
||||
else:
|
||||
@ -65,7 +65,7 @@ def _vnfd_delete(context, id):
|
||||
try:
|
||||
api.model_query(context, vnfm_db.VNFD).filter_by(id=id).delete()
|
||||
except Exception:
|
||||
LOG.info("delete vnfd failed")
|
||||
LOG.warning("delete vnfd failed")
|
||||
|
||||
|
||||
@db_api.context_manager.writer
|
||||
@ -77,7 +77,7 @@ def _vnfd_destroy(context, id):
|
||||
filter_by(id=id).\
|
||||
update(updated_values, synchronize_session=False)
|
||||
except Exception:
|
||||
LOG.info("destroy vnfdfailed")
|
||||
LOG.warning("destroy vnfd failed")
|
||||
|
||||
|
||||
@base.TackerObjectRegistry.register
|
||||
|
@ -41,7 +41,7 @@ def _get_vnfd_id(context, id):
|
||||
api.model_query(context, models.VnfPackageVnfd).\
|
||||
filter_by(package_uuid=id).first()
|
||||
except Exception:
|
||||
LOG.info("select vnfd_attribute failed")
|
||||
LOG.warning("select vnfd_attribute failed")
|
||||
if vnf_package_vnfd:
|
||||
return vnf_package_vnfd.vnfd_id
|
||||
else:
|
||||
@ -55,7 +55,7 @@ def _check_vnfd_attribute(context, id):
|
||||
api.model_query(context, vnfm_db.VNFDAttribute).\
|
||||
filter_by(vnfd_id=id).first()
|
||||
except Exception:
|
||||
LOG.info("select vnfd_attribute failed")
|
||||
LOG.warning("select vnfd_attribute failed")
|
||||
if vnfd_attribute:
|
||||
return "TRUE"
|
||||
else:
|
||||
@ -68,7 +68,7 @@ def _vnfd_attribute_delete(context, id):
|
||||
api.model_query(context, vnfm_db.VNFDAttribute).\
|
||||
filter_by(vnfd_id=id).delete()
|
||||
except Exception:
|
||||
LOG.info("delete vnfd_attribute failed")
|
||||
LOG.warning("delete vnfd_attribute failed")
|
||||
|
||||
|
||||
@base.TackerObjectRegistry.register
|
||||
|
@ -71,10 +71,10 @@ def authorize(context, action, target, do_raise=True, exc=None):
|
||||
do_raise=do_raise, exc=exc, action=action)
|
||||
except policy.PolicyNotRegistered:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.debug('Policy not registered')
|
||||
LOG.error('Policy not registered')
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.debug('Policy check for %(action)s failed with credentials '
|
||||
LOG.error('Policy check for %(action)s failed with credentials '
|
||||
'%(credentials)s',
|
||||
{'action': action, 'credentials': credentials})
|
||||
|
||||
@ -428,7 +428,7 @@ def enforce(context, action, target, plugin=None, pluralized=None):
|
||||
except policy.PolicyNotAuthorized:
|
||||
with excutils.save_and_reraise_exception():
|
||||
log_rule_list(rule)
|
||||
LOG.debug("Failed policy check for '%s'", action)
|
||||
LOG.error("Failed policy check for '%s'", action)
|
||||
return result
|
||||
|
||||
|
||||
|
@ -55,7 +55,7 @@ def lock_vnf_instance(inst_arg, delay=False):
|
||||
# NOTE: 'with lock' is not used since it can't handle
|
||||
# lock failed exception well.
|
||||
if not lock.acquire(blocking=blocking):
|
||||
LOG.debug("Locking vnfInstance %s failed.", inst_id)
|
||||
LOG.error("Locking vnfInstance %s failed.", inst_id)
|
||||
raise sol_ex.OtherOperationInProgress(inst_id=inst_id)
|
||||
|
||||
try:
|
||||
|
@ -251,7 +251,7 @@ class BaseViewBuilder(object):
|
||||
loc += 1
|
||||
m = self.value_re.match(values[loc:])
|
||||
if m is None:
|
||||
LOG.debug("value parse error, %s at loc %d", values, loc)
|
||||
LOG.error("value parse error, %s at loc %d", values, loc)
|
||||
raise sol_ex.InvalidAttributeFilter(
|
||||
sol_detail="value parse error")
|
||||
loc += m.end()
|
||||
@ -269,7 +269,7 @@ class BaseViewBuilder(object):
|
||||
while True:
|
||||
m = self.simpleFilterExpr_re.match(filter[loc:])
|
||||
if m is None:
|
||||
LOG.debug("filter %s parse error at char %d", filter, loc)
|
||||
LOG.error("filter %s parse error at char %d", filter, loc)
|
||||
raise sol_ex.InvalidAttributeFilter(
|
||||
sol_detail="filter parse error")
|
||||
op = m.group(1)
|
||||
@ -286,7 +286,7 @@ class BaseViewBuilder(object):
|
||||
if loc == len(filter):
|
||||
return res
|
||||
if filter[loc] != ';':
|
||||
LOG.debug("filter %s parse error at char %d "
|
||||
LOG.error("filter %s parse error at char %d "
|
||||
"(semicolon expected)", filter, loc)
|
||||
raise sol_ex.InvalidAttributeFilter(
|
||||
sol_detail="filter parse error. semicolon expected.")
|
||||
|
@ -46,7 +46,10 @@ class HeatClient(object):
|
||||
expected_status=[201], body=fields)
|
||||
|
||||
if wait:
|
||||
self.wait_stack_create(fields["stack_name"])
|
||||
self.wait_stack_create(
|
||||
f'{fields["stack_name"]}/{body["stack"]["id"]}')
|
||||
|
||||
return body['stack']['id']
|
||||
|
||||
def update_stack(self, stack_name, fields, wait=True):
|
||||
path = f"stacks/{stack_name}"
|
||||
@ -75,6 +78,16 @@ class HeatClient(object):
|
||||
return (body["stack"]["stack_status"],
|
||||
body["stack"]["stack_status_reason"])
|
||||
|
||||
def get_stack_id(self, stack_name):
|
||||
path = f"stacks/{stack_name}"
|
||||
resp, body = self.client.do_request(path, "GET",
|
||||
expected_status=[200, 404])
|
||||
|
||||
if resp.status_code == 404:
|
||||
return None
|
||||
|
||||
return body["stack"]["id"]
|
||||
|
||||
def get_resources(self, stack_name):
|
||||
# NOTE: Because it is necessary to get nested stack info, it is
|
||||
# necessary to specify 'nested_depth=2'.
|
||||
@ -91,20 +104,21 @@ class HeatClient(object):
|
||||
def _check_status():
|
||||
status, status_reason = self.get_status(stack_name)
|
||||
if status in complete_status:
|
||||
LOG.info("%s %s done.", operation, stack_name)
|
||||
LOG.info("%s %s done.", operation, stack_name.split('/')[0])
|
||||
raise loopingcall.LoopingCallDone()
|
||||
elif status in failed_status:
|
||||
LOG.error("%s %s failed.", operation, stack_name)
|
||||
LOG.error("% %s failed.", operation, stack_name.split('/')[0])
|
||||
sol_title = "%s failed" % operation
|
||||
raise sol_ex.StackOperationFailed(sol_title=sol_title,
|
||||
sol_detail=status_reason)
|
||||
elif status not in progress_status:
|
||||
LOG.error("%s %s failed. status: %s", operation,
|
||||
stack_name, status)
|
||||
stack_name.split('/')[0], status)
|
||||
sol_title = "%s failed" % operation
|
||||
raise sol_ex.StackOperationFailed(sol_title=sol_title,
|
||||
sol_detail='Unknown error')
|
||||
LOG.debug("%s %s %s", operation, stack_name, progress_status)
|
||||
LOG.debug("%s %s %s", operation, stack_name.split('/')[0],
|
||||
progress_status)
|
||||
|
||||
timer = loopingcall.FixedIntervalLoopingCall(_check_status)
|
||||
timer.start(interval=CHECK_INTERVAL).wait()
|
||||
@ -122,18 +136,10 @@ class HeatClient(object):
|
||||
# for some operations (ex. heal-all).
|
||||
# It is expected that it takes short time after "DELETE_COMPLETE".
|
||||
# So timeout after "DELETE_COMPLETE" is not specified.
|
||||
self._wait_completion(stack_name, "Stack delete",
|
||||
self._wait_completion(stack_name.split('/')[0], "Stack delete",
|
||||
[None], ["DELETE_IN_PROGRESS", "DELETE_COMPLETE"],
|
||||
["DELETE_FAILED"])
|
||||
|
||||
def get_stack_resource(self, stack_name):
|
||||
path = f"stacks/{stack_name}"
|
||||
resp, body = self.client.do_request(path, "GET",
|
||||
expected_status=[200, 404])
|
||||
if resp.status_code == 404:
|
||||
raise sol_ex.StackOperationFailed
|
||||
return body
|
||||
|
||||
def get_resource_info(self, stack_id, resource_name):
|
||||
path = f"stacks/{stack_id}/resources/{resource_name}"
|
||||
resp, body = self.client.do_request(path, "GET",
|
||||
@ -193,8 +199,13 @@ def get_port_reses(heat_reses):
|
||||
return get_reses_by_types(heat_reses, ['OS::Neutron::Port'])
|
||||
|
||||
|
||||
def get_stack_name(inst):
|
||||
return "vnf-" + inst.id
|
||||
def get_stack_name(inst, stack_id=None):
|
||||
stack_name = f"vnf-{inst.id}"
|
||||
if inst.obj_attr_is_set('instantiatedVnfInfo') and not stack_id:
|
||||
return f"{stack_name}/{inst.instantiatedVnfInfo.metadata['stack_id']}"
|
||||
if stack_id:
|
||||
return f"{stack_name}/{stack_id}"
|
||||
return stack_name
|
||||
|
||||
|
||||
def get_resource_stack_id(heat_res):
|
||||
|
@ -94,24 +94,24 @@ class Openstack(object):
|
||||
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
|
||||
heat_client = heat_utils.HeatClient(vim_info)
|
||||
stack_name = heat_utils.get_stack_name(inst)
|
||||
status, _ = heat_client.get_status(stack_name)
|
||||
if status is None:
|
||||
stack_id = heat_client.get_stack_id(stack_name)
|
||||
if stack_id is None:
|
||||
fields['stack_name'] = stack_name
|
||||
heat_client.create_stack(fields)
|
||||
stack_id = heat_client.create_stack(fields)
|
||||
else:
|
||||
heat_client.update_stack(stack_name, fields)
|
||||
heat_client.update_stack(f'{stack_name}/{stack_id}', fields)
|
||||
|
||||
# make instantiated_vnf_info
|
||||
self._make_instantiated_vnf_info(req, inst, grant_req, grant, vnfd,
|
||||
heat_client)
|
||||
heat_client, stack_id=stack_id)
|
||||
|
||||
def instantiate_rollback(self, req, inst, grant_req, grant, vnfd):
|
||||
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
|
||||
heat_client = heat_utils.HeatClient(vim_info)
|
||||
stack_name = heat_utils.get_stack_name(inst)
|
||||
status, _ = heat_client.get_status(stack_name)
|
||||
if status is not None:
|
||||
heat_client.delete_stack(stack_name)
|
||||
stack_id = heat_client.get_stack_id(stack_name)
|
||||
if stack_id is not None:
|
||||
heat_client.delete_stack(f'{stack_name}/{stack_id}')
|
||||
|
||||
def terminate(self, req, inst, grant_req, grant, vnfd):
|
||||
if req.terminationType == 'GRACEFUL':
|
||||
@ -270,11 +270,11 @@ class Openstack(object):
|
||||
fields["template"] = heat_client.get_template(stack_name)
|
||||
if "files" not in fields:
|
||||
fields["files"] = heat_client.get_files(stack_name)
|
||||
fields["stack_name"] = stack_name
|
||||
fields["stack_name"] = stack_name.split('/')[0]
|
||||
|
||||
# stack delete and create
|
||||
heat_client.delete_stack(stack_name)
|
||||
heat_client.create_stack(fields)
|
||||
stack_id = heat_client.create_stack(fields)
|
||||
else:
|
||||
# mark unhealthy to target resources.
|
||||
# As the target resources has been already selected in
|
||||
@ -301,10 +301,11 @@ class Openstack(object):
|
||||
|
||||
# update stack
|
||||
heat_client.update_stack(stack_name, fields)
|
||||
stack_id = inst.instantiatedVnfInfo.metadata['stack_id']
|
||||
|
||||
# make instantiated_vnf_info
|
||||
self._make_instantiated_vnf_info(req, inst, grant_req, grant, vnfd,
|
||||
heat_client)
|
||||
heat_client, stack_id=stack_id)
|
||||
|
||||
def change_vnfpkg(self, req, inst, grant_req, grant, vnfd):
|
||||
# make HOT
|
||||
@ -998,9 +999,11 @@ class Openstack(object):
|
||||
metadata[f'image-{vdu_name}'] = image
|
||||
|
||||
def _make_instantiated_vnf_info(self, req, inst, grant_req, grant, vnfd,
|
||||
heat_client, is_rollback=False):
|
||||
heat_client, is_rollback=False, stack_id=None):
|
||||
# get heat resources
|
||||
stack_name = heat_utils.get_stack_name(inst)
|
||||
stack_id = stack_id if stack_id else inst.instantiatedVnfInfo.metadata[
|
||||
'stack_id']
|
||||
stack_name = heat_utils.get_stack_name(inst, stack_id)
|
||||
heat_reses = heat_client.get_resources(stack_name)
|
||||
nfv_dict = json.loads(heat_client.get_parameters(stack_name)['nfv'])
|
||||
|
||||
@ -1225,4 +1228,7 @@ class Openstack(object):
|
||||
|
||||
inst_vnf_info.vnfcInfo = vnfc_infos
|
||||
|
||||
# store stack_id into metadata
|
||||
metadata = {"stack_id": stack_id}
|
||||
inst_vnf_info.metadata = metadata
|
||||
inst.instantiatedVnfInfo = inst_vnf_info
|
||||
|
@ -31,7 +31,6 @@ class Subscription:
|
||||
return {
|
||||
"filter": {
|
||||
"vnfInstanceSubscriptionFilter": {
|
||||
"vnfdIds": ["b1bb0ce7-ebca-4fa7-95ed-4840d7000000"],
|
||||
"vnfProductsFromProviders": [{
|
||||
"vnfProvider": "Company",
|
||||
"vnfProducts": [
|
||||
|
@ -326,6 +326,126 @@ class VnfLcmWithUserDataTest(vnflcm_base.BaseVnfLcmTest):
|
||||
resp, _ = self._show_subscription(subscription_id)
|
||||
self.assertEqual(404, resp.status_code)
|
||||
|
||||
def test_vnfdid_filter_in_subscription(self):
|
||||
"""Test notification when virtual storage absent in VNFD.
|
||||
|
||||
In this test case, we do following steps.
|
||||
- Create VNF package.
|
||||
- Upload VNF package.
|
||||
- Create subscription with vnf instance's vnfdid filter.
|
||||
- Create subscription with other vnfdid filter.
|
||||
- Create subscription without filter.
|
||||
- Create VNF instance.
|
||||
- Instantiate VNF.
|
||||
- Terminate VNF
|
||||
- Delete VNF
|
||||
- Delete all subscriptions
|
||||
"""
|
||||
# Pre Setting: Create vnf package.
|
||||
sample_name = 'functional5'
|
||||
csar_package_path = os.path.abspath(
|
||||
os.path.join(
|
||||
os.path.dirname(__file__),
|
||||
"../../../etc/samples/etsi/nfv",
|
||||
sample_name))
|
||||
tempname, _ = vnflcm_base._create_csar_with_unique_vnfd_id(
|
||||
csar_package_path)
|
||||
# upload vnf package
|
||||
vnf_package_id, vnfd_id = vnflcm_base._create_and_upload_vnf_package(
|
||||
self.tacker_client, user_defined_data={
|
||||
"key": sample_name}, temp_csar_path=tempname)
|
||||
|
||||
# Post Setting: Reserve deleting vnf package.
|
||||
self.addCleanup(vnflcm_base._delete_vnf_package, self.tacker_client,
|
||||
vnf_package_id)
|
||||
|
||||
# Create subscription with vnf instance's vnfdid filter.
|
||||
sub_id_1 = self._gen_sub_and_register_sub(
|
||||
'with_vnfd_id_filter', vnfd_id)
|
||||
self.addCleanup(
|
||||
self._delete_subscription,
|
||||
sub_id_1)
|
||||
# Create subscription with other vnfdid filter.
|
||||
sub_id_2 = self._gen_sub_and_register_sub(
|
||||
'with_other_vnfd_id', uuidutils.generate_uuid())
|
||||
self.addCleanup(
|
||||
self._delete_subscription,
|
||||
sub_id_2)
|
||||
# Create subscription without filter.
|
||||
sub_id_3 = self._gen_sub_and_register_sub(
|
||||
'no_filter', uuidutils.generate_uuid())
|
||||
self.addCleanup(
|
||||
self._delete_subscription,
|
||||
sub_id_3)
|
||||
|
||||
sub_id = self._gen_sub_and_register_sub(self._testMethodName, vnfd_id)
|
||||
self.addCleanup(
|
||||
self._delete_subscription,
|
||||
sub_id)
|
||||
# Create vnf instance
|
||||
resp, vnf_instance = self._create_vnf_instance_from_body(
|
||||
fake_vnflcm.VnfInstances.make_create_request_body(vnfd_id))
|
||||
vnf_instance_id = vnf_instance['id']
|
||||
self._wait_lcm_done(vnf_instance_id=vnf_instance_id)
|
||||
self.assert_create_vnf(resp, vnf_instance, vnf_package_id)
|
||||
self.addCleanup(self._delete_vnf_instance, vnf_instance_id)
|
||||
vnflcm_base.FAKE_SERVER_MANAGER.clear_history(
|
||||
os.path.join(vnflcm_base.MOCK_NOTIFY_CALLBACK_URL,
|
||||
"with_vnfd_id_filter"))
|
||||
vnflcm_base.FAKE_SERVER_MANAGER.clear_history(
|
||||
os.path.join(vnflcm_base.MOCK_NOTIFY_CALLBACK_URL,
|
||||
"with_other_vnfd_id"))
|
||||
vnflcm_base.FAKE_SERVER_MANAGER.clear_history(
|
||||
os.path.join(vnflcm_base.MOCK_NOTIFY_CALLBACK_URL,
|
||||
"no_filter"))
|
||||
|
||||
# Instantiate vnf instance
|
||||
request_body = (
|
||||
fake_vnflcm.VnfInstances.
|
||||
make_inst_request_body_include_num_dynamic(
|
||||
self.vim['tenant_id'], self.ext_networks,
|
||||
self.ext_mngd_networks, self.ext_link_ports, self.ext_subnets))
|
||||
resp, _ = self._instantiate_vnf_instance(vnf_instance_id, request_body)
|
||||
self._wait_lcm_done('COMPLETED', vnf_instance_id=vnf_instance_id)
|
||||
self.assert_instantiate_vnf(resp, vnf_instance_id, vnf_package_id)
|
||||
|
||||
# Show vnf instance
|
||||
resp, vnf_instance = self._show_vnf_instance(vnf_instance_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.assertEqual(vnf_instance["instantiationState"],
|
||||
"INSTANTIATED")
|
||||
|
||||
# check subscription
|
||||
for name in ['with_vnfd_id_filter', 'with_other_vnfd_id', 'no_filter']:
|
||||
self._check_subscription(name)
|
||||
|
||||
# Terminate VNF
|
||||
stack = self._get_heat_stack(vnf_instance_id)
|
||||
resources_list = self._get_heat_resource_list(stack.id)
|
||||
resource_name_list = [r.resource_name for r in resources_list]
|
||||
glance_image_id_list = self._get_glance_image_list_from_stack_resource(
|
||||
stack.id, resource_name_list)
|
||||
|
||||
terminate_req_body = fake_vnflcm.VnfInstances.make_term_request_body()
|
||||
resp, _ = self._terminate_vnf_instance(
|
||||
vnf_instance_id, terminate_req_body)
|
||||
self._wait_lcm_done('COMPLETED', vnf_instance_id=vnf_instance_id)
|
||||
self.assert_terminate_vnf(resp, vnf_instance_id, stack.id,
|
||||
resource_name_list, glance_image_id_list,
|
||||
vnf_package_id)
|
||||
# check subscription
|
||||
for name in ['with_vnfd_id_filter', 'with_other_vnfd_id', 'no_filter']:
|
||||
self._check_subscription(name)
|
||||
|
||||
# Delete VNF
|
||||
resp, _ = self._delete_vnf_instance(vnf_instance_id)
|
||||
self._wait_lcm_done(vnf_instance_id=vnf_instance_id)
|
||||
self.assert_delete_vnf(resp, vnf_instance_id, vnf_package_id)
|
||||
|
||||
# Subscription delete
|
||||
for subsc_id in [sub_id, sub_id_1, sub_id_2, sub_id_3]:
|
||||
self._assert_subscription_deletion(subsc_id)
|
||||
|
||||
def test_stack_update_in_scaling(self):
|
||||
"""Test basic life cycle operations with sample VNFD.
|
||||
|
||||
@ -2191,6 +2311,12 @@ class VnfLcmWithUserDataTest(vnflcm_base.BaseVnfLcmTest):
|
||||
self.tacker_client, vnf_pkg_id)
|
||||
self.assert_vnf_package_usage_state(vnf_pkg_info)
|
||||
|
||||
def _assert_subscription_deletion(self, sub_id):
|
||||
resp, _ = self._delete_subscription(sub_id)
|
||||
self.assertEqual(204, resp.status_code)
|
||||
resp, _ = self._show_subscription(sub_id)
|
||||
self.assertEqual(404, resp.status_code)
|
||||
|
||||
def _assert_scale_vnf(
|
||||
self,
|
||||
resp,
|
||||
@ -2371,6 +2497,47 @@ class VnfLcmWithUserDataTest(vnflcm_base.BaseVnfLcmTest):
|
||||
if _links.get('grant') is not None:
|
||||
self.assertIsNotNone(_links.get('grant').get('href'))
|
||||
|
||||
def _gen_sub_and_register_sub(self, name, vnfd_id):
|
||||
callback_url = os.path.join(vnflcm_base.MOCK_NOTIFY_CALLBACK_URL,
|
||||
name)
|
||||
request_body = fake_vnflcm.Subscription.make_create_request_body(
|
||||
'http://localhost:{}{}'.format(
|
||||
vnflcm_base.FAKE_SERVER_MANAGER.SERVER_PORT,
|
||||
callback_url))
|
||||
request_body['filter']['vnfInstanceSubscriptionFilter']['vnfdIds'] = [
|
||||
vnfd_id]
|
||||
if name == 'no_filter':
|
||||
del request_body['filter']
|
||||
vnflcm_base.FAKE_SERVER_MANAGER.set_callback(
|
||||
'GET',
|
||||
callback_url,
|
||||
status_code=204
|
||||
)
|
||||
vnflcm_base.FAKE_SERVER_MANAGER.set_callback(
|
||||
'POST',
|
||||
callback_url,
|
||||
status_code=204
|
||||
)
|
||||
resp, response_body = self._register_subscription(request_body)
|
||||
self.assertEqual(201, resp.status_code)
|
||||
self.assert_http_header_location_for_subscription(resp.headers)
|
||||
self.assert_notification_get(callback_url)
|
||||
subscription_id = response_body.get('id')
|
||||
return subscription_id
|
||||
|
||||
def _check_subscription(self, name):
|
||||
callback_url = os.path.join(
|
||||
vnflcm_base.MOCK_NOTIFY_CALLBACK_URL,
|
||||
name)
|
||||
notify_mock_responses = vnflcm_base.FAKE_SERVER_MANAGER.get_history(
|
||||
callback_url)
|
||||
vnflcm_base.FAKE_SERVER_MANAGER.clear_history(
|
||||
callback_url)
|
||||
if name == 'with_other_vnfd_id':
|
||||
self.assertEqual(0, len(notify_mock_responses))
|
||||
else:
|
||||
self.assertEqual(3, len(notify_mock_responses))
|
||||
|
||||
def test_inst_chgextconn_term(self):
|
||||
"""Test basic life cycle operations with sample VNFD.
|
||||
|
||||
|
@ -32,7 +32,6 @@ class Subscription:
|
||||
return {
|
||||
"filter": {
|
||||
"vnfInstanceSubscriptionFilter": {
|
||||
"vnfdIds": ["b1bb0ce7-ebca-4fa7-95ed-4840d7000000"],
|
||||
"vnfProductsFromProviders": [{
|
||||
"vnfProvider": "Company",
|
||||
"vnfProducts": [
|
||||
|
@ -31,7 +31,6 @@ class Subscription:
|
||||
return {
|
||||
"filter": {
|
||||
"vnfInstanceSubscriptionFilter": {
|
||||
"vnfdIds": ["b1bb0ce7-ebca-4fa7-95ed-4840d7000000"],
|
||||
"vnfProductsFromProviders": [{
|
||||
"vnfProvider": "Company",
|
||||
"vnfProducts": [
|
||||
|
@ -118,8 +118,7 @@ class ChangeVnfPkgVnfLcmTest(test_vnflcm_basic_common.CommonVnfLcmTest):
|
||||
expected_inst_attrs.extend(additional_inst_attrs)
|
||||
resp_1, body_1 = self.show_vnf_instance(inst_id)
|
||||
stack_name = "vnf-{}".format(inst_id)
|
||||
stack_id = self.heat_client.get_stack_resource(stack_name)['stack'][
|
||||
'id']
|
||||
stack_id = self.heat_client.get_stack_id(stack_name)
|
||||
image_id_1 = self.get_current_vdu_image(stack_id, stack_name, 'VDU2')
|
||||
storageResourceId_1 = [
|
||||
obj.get('storageResourceIds') for obj in body_1[
|
||||
|
@ -764,8 +764,7 @@ class VnfLcmTest(test_vnflcm_basic_common.CommonVnfLcmTest):
|
||||
network_stack_before_heal = [stack for stack in temp_stacks if
|
||||
(stack['resource_name'] == 'internalVL3')][0]
|
||||
|
||||
stack_id_before_heal = self.heat_client.get_stack_resource(stack_name)[
|
||||
'stack']['id']
|
||||
stack_id_before_heal = self.heat_client.get_stack_id(stack_name)
|
||||
heal_req = paramgen.heal_vnf_all_max_with_parameter(True)
|
||||
resp, body = self.heal_vnf_instance(inst_id, heal_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
@ -774,8 +773,7 @@ class VnfLcmTest(test_vnflcm_basic_common.CommonVnfLcmTest):
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# check stack info
|
||||
stack_id_after_heal = self.heat_client.get_stack_resource(stack_name)[
|
||||
'stack']['id']
|
||||
stack_id_after_heal = self.heat_client.get_stack_id(stack_name)
|
||||
self.assertNotEqual(stack_id_before_heal, stack_id_after_heal)
|
||||
stack_status, _ = self.heat_client.get_status(stack_name)
|
||||
self.assertEqual("CREATE_COMPLETE", stack_status)
|
||||
@ -1395,8 +1393,7 @@ class VnfLcmTest(test_vnflcm_basic_common.CommonVnfLcmTest):
|
||||
network_stack_before_heal = [stack for stack in temp_stacks if
|
||||
(stack['resource_name'] == 'internalVL3')][0]
|
||||
|
||||
stack_id_before_heal = self.heat_client.get_stack_resource(stack_name)[
|
||||