Merge "Refactor for Performance Improvement"
This commit is contained in:
commit
1d8a377459
@ -0,0 +1,9 @@
|
||||
---
|
||||
upgrade:
|
||||
- |
|
||||
Improve performance of Tacker by refactoring some code, and corrects
|
||||
the output level of log.
|
||||
|
||||
At the same time, the redirection when V2 calls HEAT's API is
|
||||
removed instead of direct call, and the subcription filter when sends
|
||||
notification is improved in V1 code.
|
@ -128,9 +128,8 @@ def _get_pagination_max_limit():
|
||||
if max_limit == 0:
|
||||
raise ValueError()
|
||||
except ValueError:
|
||||
LOG.warning("Invalid value for pagination_max_limit: %s. It "
|
||||
"should be an integer greater to 0",
|
||||
cfg.CONF.pagination_max_limit)
|
||||
LOG.warning("pagination_max_limit: %s must be greater than 0",
|
||||
cfg.CONF.pagination_max_limit)
|
||||
return max_limit
|
||||
|
||||
|
||||
|
@ -552,9 +552,9 @@ class ExtensionManager(object):
|
||||
new_ext = new_ext_class()
|
||||
self.add_extension(new_ext)
|
||||
except Exception as exception:
|
||||
LOG.warning("Extension file %(f)s wasn't loaded due to "
|
||||
"%(exception)s",
|
||||
{'f': f, 'exception': exception})
|
||||
LOG.error("Extension file %(f)s wasn't loaded due to "
|
||||
"%(exception)s",
|
||||
{'f': f, 'exception': exception})
|
||||
|
||||
def add_extension(self, ext):
|
||||
# Do nothing if the extension doesn't check out
|
||||
@ -562,7 +562,6 @@ class ExtensionManager(object):
|
||||
return
|
||||
|
||||
alias = ext.get_alias()
|
||||
LOG.info('Loaded extension: %s', alias)
|
||||
|
||||
if alias in self.extensions:
|
||||
raise exceptions.DuplicatedExtension(alias=alias)
|
||||
|
@ -79,8 +79,8 @@ def Resource(controller, faults=None, deserializers=None, serializers=None):
|
||||
mapped_exc = api_common.convert_exception_to_http_exc(e, faults,
|
||||
language)
|
||||
if hasattr(mapped_exc, 'code') and 400 <= mapped_exc.code < 500:
|
||||
LOG.info('%(action)s failed (client error): %(exc)s',
|
||||
{'action': action, 'exc': mapped_exc})
|
||||
LOG.error('%(action)s failed (client error): %(exc)s',
|
||||
{'action': action, 'exc': mapped_exc})
|
||||
else:
|
||||
LOG.exception('%(action)s failed: %(details)s',
|
||||
{'action': action,
|
||||
|
@ -43,7 +43,7 @@ class TackerKeystoneContext(base.ConfigurableMiddleware):
|
||||
ctx = context.Context.from_environ(req.environ)
|
||||
|
||||
if not ctx.user_id:
|
||||
LOG.debug("X_USER_ID is not found in request")
|
||||
LOG.error("X_USER_ID is not found in request")
|
||||
return webob.exc.HTTPUnauthorized()
|
||||
|
||||
# Inject the context...
|
||||
@ -104,8 +104,8 @@ class _ClientCredentialsGrant(_OAuth2GrantBase):
|
||||
LOG.info(
|
||||
"Get Access Token, Connecting to <GET:{}>".format(
|
||||
self.token_endpoint))
|
||||
LOG.info("Request Headers={}".format(kwargs.get('headers')))
|
||||
LOG.info("Request Body={}".format(kwargs.get('data')))
|
||||
LOG.debug("[REQ] Headers={}".format(kwargs.get('headers')))
|
||||
LOG.debug("[RES] Body={}".format(kwargs.get('data')))
|
||||
|
||||
response = basic_auth_request.post(
|
||||
self.token_endpoint,
|
||||
@ -114,8 +114,8 @@ class _ClientCredentialsGrant(_OAuth2GrantBase):
|
||||
response.raise_for_status()
|
||||
|
||||
response_body = response.json()
|
||||
LOG.info("[RES] Headers={}".format(response.headers))
|
||||
LOG.info("[RES] Body={}".format(response_body))
|
||||
LOG.debug("[RES] Headers={}".format(response.headers))
|
||||
LOG.debug("[RES] Body={}".format(response_body))
|
||||
|
||||
return response_body
|
||||
|
||||
@ -279,7 +279,7 @@ class _AuthManager:
|
||||
client = _OAuth2Session(grant)
|
||||
client.apply_access_token_info()
|
||||
|
||||
LOG.info(
|
||||
LOG.debug(
|
||||
"Add to Auth management, id=<{}>, type=<{}>, class=<{}>".format(
|
||||
id, auth_type, client.__class__.__name__))
|
||||
|
||||
|
@ -75,7 +75,8 @@ class RemoteCommandExecutor(object):
|
||||
|
||||
def close_session(self):
|
||||
self.__ssh.close()
|
||||
LOG.debug("Connection close")
|
||||
LOG.info(f"The SSH connection to the remote"
|
||||
f" host {self.__host} has been closed.")
|
||||
|
||||
def execute_command(self, cmd, input_data=None):
|
||||
try:
|
||||
|
@ -128,7 +128,7 @@ def load_paste_app(app_name):
|
||||
raise cfg.ConfigFilesNotFoundError(
|
||||
config_files=[cfg.CONF.api_paste_config])
|
||||
config_path = os.path.abspath(config_path)
|
||||
LOG.info("Config paste file: %s", config_path)
|
||||
LOG.debug("Config paste file: %s", config_path)
|
||||
|
||||
try:
|
||||
app = deploy.loadapp("config:%s" % config_path, name=app_name)
|
||||
|
@ -574,7 +574,7 @@ def delete_csar_data(package_uuid):
|
||||
exc_message = encodeutils.exception_to_unicode(exc)
|
||||
msg = _('Failed to delete csar folder: '
|
||||
'%(csar_path)s, Error: %(exc)s')
|
||||
LOG.warning(msg, {'csar_path': csar_path, 'exc': exc_message})
|
||||
LOG.error(msg, {'csar_path': csar_path, 'exc': exc_message})
|
||||
|
||||
|
||||
class PreserveZipFilePermissions(zipfile.ZipFile):
|
||||
|
@ -45,8 +45,8 @@ class DriverManager(object):
|
||||
drivers[type_] = ext
|
||||
self._drivers = dict((type_, ext.obj)
|
||||
for (type_, ext) in drivers.items())
|
||||
LOG.info("Registered drivers from %(namespace)s: %(keys)s",
|
||||
{'namespace': namespace, 'keys': self._drivers.keys()})
|
||||
LOG.debug("Registered drivers from %(namespace)s: %(keys)s",
|
||||
{'namespace': namespace, 'keys': self._drivers.keys()})
|
||||
|
||||
@staticmethod
|
||||
def _driver_name(driver):
|
||||
|
@ -622,8 +622,8 @@ class MemoryUnit(object):
|
||||
unit = MemoryUnit.validate_unit(unit)
|
||||
else:
|
||||
unit = MemoryUnit.UNIT_SIZE_DEFAULT
|
||||
LOG.info(_('A memory unit is not provided for size; using the '
|
||||
'default unit %(default)s.') % {'default': 'B'})
|
||||
LOG.debug(_('A memory unit is not provided for size; using the '
|
||||
'default unit %(default)s.') % {'default': 'B'})
|
||||
result = re.sub(r'\s+', ' ', size).split(' ')
|
||||
if len(result) == 2:
|
||||
if result[1]:
|
||||
@ -632,9 +632,9 @@ class MemoryUnit(object):
|
||||
MemoryUnit.UNIT_SIZE_DICT[unit_size] *
|
||||
math.pow(MemoryUnit.UNIT_SIZE_DICT
|
||||
[unit], -1))
|
||||
LOG.info(_('Given size %(size)s is converted to %(num)s '
|
||||
'%(unit)s.') % {'size': size,
|
||||
'num': converted, 'unit': unit})
|
||||
LOG.debug(_('Given size %(size)s is converted to %(num)s '
|
||||
'%(unit)s.') % {'size': size,
|
||||
'num': converted, 'unit': unit})
|
||||
else:
|
||||
msg = _('Size is not given for software image data.')
|
||||
LOG.error(msg)
|
||||
|
@ -257,9 +257,9 @@ def grant_error_common(function):
|
||||
vnf_lcm_op_occs.state_entered_time = timestamp
|
||||
vnf_lcm_op_occs.save()
|
||||
except Exception as e:
|
||||
LOG.warning("Failed to update vnf_lcm_op_occ for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
LOG.error("Failed to update vnf_lcm_op_occ for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
|
||||
try:
|
||||
notification = {}
|
||||
@ -285,9 +285,9 @@ def grant_error_common(function):
|
||||
notification['_links']['vnfLcmOpOcc']['href'] = vnflcm_url
|
||||
self.send_notification(context, notification)
|
||||
except Exception as e:
|
||||
LOG.warning("Failed notification for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
LOG.error("Failed notification for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
|
||||
return decorated_function
|
||||
|
||||
@ -863,10 +863,10 @@ class Conductor(manager.Manager, v2_hook.ConductorV2Hook):
|
||||
shutil.rmtree(csar_zip_temp_path)
|
||||
os.remove(csar_path)
|
||||
except OSError:
|
||||
LOG.warning("Failed to delete csar zip %(zip)s and"
|
||||
" folder $(folder)s for vnf package %(uuid)s.",
|
||||
{'zip': csar_path, 'folder': csar_zip_temp_path,
|
||||
'uuid': vnf_pack.id})
|
||||
LOG.error("Failed to delete csar zip %(zip)s and"
|
||||
" folder $(folder)s for vnf package %(uuid)s.",
|
||||
{'zip': csar_path, 'folder': csar_zip_temp_path,
|
||||
'uuid': vnf_pack.id})
|
||||
|
||||
def _get_vnf_link_ports_by_vl(self, vnf_info, ext_vl_id,
|
||||
resource_id):
|
||||
@ -1654,9 +1654,9 @@ class Conductor(manager.Manager, v2_hook.ConductorV2Hook):
|
||||
vnf_instance_id = vnf_instance.id
|
||||
|
||||
try:
|
||||
LOG.debug("Update vnf lcm %s %s",
|
||||
vnf_lcm_op_occs_id,
|
||||
operation_state)
|
||||
LOG.info("Update vnf lcm %s %s",
|
||||
vnf_lcm_op_occs_id,
|
||||
operation_state)
|
||||
vnf_lcm_op_occ = objects.VnfLcmOpOcc.get_by_id(context,
|
||||
vnf_lcm_op_occs_id)
|
||||
vnf_lcm_op_occ.operation_state = operation_state
|
||||
@ -1767,8 +1767,8 @@ class Conductor(manager.Manager, v2_hook.ConductorV2Hook):
|
||||
"""
|
||||
|
||||
try:
|
||||
LOG.debug("send_notification start notification[%s]"
|
||||
% notification)
|
||||
LOG.info("send_notification start notification[%s]"
|
||||
% notification)
|
||||
|
||||
notification = utils.convert_snakecase_to_camelcase(notification)
|
||||
|
||||
@ -1820,7 +1820,7 @@ class Conductor(manager.Manager, v2_hook.ConductorV2Hook):
|
||||
|
||||
for num in range(CONF.vnf_lcm.retry_num):
|
||||
try:
|
||||
LOG.info("send notify[%s]" %
|
||||
LOG.debug("send notify[%s]" %
|
||||
json.dumps(notification))
|
||||
auth_client = auth.auth_manager.get_auth_client(
|
||||
notification['subscriptionId'])
|
||||
@ -1830,7 +1830,7 @@ class Conductor(manager.Manager, v2_hook.ConductorV2Hook):
|
||||
timeout=CONF.vnf_lcm.retry_timeout,
|
||||
verify=CONF.vnf_lcm.verify_notification_ssl)
|
||||
if response.status_code == 204:
|
||||
LOG.info(
|
||||
LOG.debug(
|
||||
"send success notify[%s]",
|
||||
json.dumps(notification))
|
||||
break
|
||||
@ -1877,6 +1877,15 @@ class Conductor(manager.Manager, v2_hook.ConductorV2Hook):
|
||||
|
||||
for subscription in vnf_lcm_subscriptions:
|
||||
if subscription.tenant_id == vnf_instance.get("tenant_id"):
|
||||
if subscription.filter:
|
||||
filter_values = jsonutils.loads(subscription.filter)
|
||||
filter_vnfdids = filter_values.get(
|
||||
'vnfInstanceSubscriptionFilter', {}).get(
|
||||
'vnfdIds')
|
||||
if filter_vnfdids:
|
||||
if vnf_instance.get("vnfd_id") in filter_vnfdids:
|
||||
extract_vnf_lcm_subscriptions.append(subscription)
|
||||
continue
|
||||
extract_vnf_lcm_subscriptions.append(subscription)
|
||||
|
||||
return extract_vnf_lcm_subscriptions
|
||||
|
@ -213,8 +213,8 @@ class CommonDbMixin(object):
|
||||
query = self._model_query(context, model)
|
||||
return query.filter(model.name == name).one()
|
||||
except orm_exc.NoResultFound:
|
||||
LOG.info("No result found for %(name)s in %(model)s table",
|
||||
{'name': name, 'model': model})
|
||||
LOG.error("No result found for %(name)s in %(model)s table",
|
||||
{'name': name, 'model': model})
|
||||
|
||||
def get_by_name(self, context, model, name):
|
||||
return self._get_by_name(context, model, name)
|
||||
|
@ -565,9 +565,9 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
|
||||
self._update_vnf_status_db(
|
||||
context, vnf_info['id'], previous_statuses, 'ERROR')
|
||||
except Exception as e:
|
||||
LOG.warning("Failed to revert scale info for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_info['id'], "error": e})
|
||||
LOG.error("Failed to revert scale info for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_info['id'], "error": e})
|
||||
self._cos_db_plg.create_event(
|
||||
context, res_id=vnf_info['id'],
|
||||
res_type=constants.RES_TYPE_VNF,
|
||||
@ -799,7 +799,7 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
|
||||
filter(~VNF.status.in_(exclude_status)).
|
||||
with_for_update().one())
|
||||
except orm_exc.NoResultFound:
|
||||
LOG.warning('no vnf found %s', vnf_id)
|
||||
LOG.error('no vnf found %s', vnf_id)
|
||||
return False
|
||||
|
||||
vnf_db.update({'status': new_status})
|
||||
|
@ -52,8 +52,8 @@ def get_csar_data_iter(body):
|
||||
return data_iter
|
||||
except Exception as e:
|
||||
error = encodeutils.exception_to_unicode(e)
|
||||
LOG.warning("Failed to open csar URL: %(url)s due to error: %(error)s",
|
||||
{"url": url, "error": error})
|
||||
LOG.error("Failed to open csar URL: %(url)s due to error: %(error)s",
|
||||
{"url": url, "error": error})
|
||||
raise exceptions.VNFPackageURLInvalid(url=url)
|
||||
|
||||
|
||||
@ -73,10 +73,10 @@ def store_csar(context, package_uuid, body):
|
||||
context=context)
|
||||
except Exception as e:
|
||||
error = encodeutils.exception_to_unicode(e)
|
||||
LOG.warning("Failed to store csar data in glance store for "
|
||||
"package %(uuid)s due to error: %(error)s",
|
||||
{"uuid": package_uuid,
|
||||
"error": error})
|
||||
LOG.error("Failed to store csar data in glance store for "
|
||||
"package %(uuid)s due to error: %(error)s",
|
||||
{"uuid": package_uuid,
|
||||
"error": error})
|
||||
raise exceptions.UploadFailedToGlanceStore(uuid=package_uuid,
|
||||
error=error)
|
||||
finally:
|
||||
|
@ -95,7 +95,7 @@ class Kubernetes_Driver(abstract_vim_driver.VimAbstractDriver):
|
||||
k8s_info = core_api_client.get_api_versions()
|
||||
LOG.info(k8s_info)
|
||||
except Exception as e:
|
||||
LOG.info('VIM Kubernetes authentication is wrong.')
|
||||
LOG.error('VIM Kubernetes authentication is wrong.')
|
||||
# delete temp file
|
||||
self.clean_authenticate_vim(auth_dict, file_descriptor)
|
||||
raise nfvo.VimUnauthorizedException(message=str(e))
|
||||
@ -175,9 +175,8 @@ class Kubernetes_Driver(abstract_vim_driver.VimAbstractDriver):
|
||||
LOG.debug('VIM key deleted successfully for vim %s',
|
||||
vim_id)
|
||||
except Exception as exception:
|
||||
LOG.warning('VIM key deletion failed for vim %s due to %s',
|
||||
vim_id,
|
||||
exception)
|
||||
LOG.error('VIM key deletion failed for vim %s due to %s',
|
||||
vim_id, exception)
|
||||
raise
|
||||
else:
|
||||
raise nfvo.VimEncryptKeyError(vim_id=vim_id)
|
||||
@ -218,9 +217,8 @@ class Kubernetes_Driver(abstract_vim_driver.VimAbstractDriver):
|
||||
LOG.debug('VIM auth successfully stored for vim %s',
|
||||
vim_id)
|
||||
except Exception as exception:
|
||||
LOG.warning('VIM key creation failed for vim %s due to %s',
|
||||
vim_id,
|
||||
exception)
|
||||
LOG.error('VIM key creation failed for vim %s due to %s',
|
||||
vim_id, exception)
|
||||
raise
|
||||
else:
|
||||
raise nfvo.VimEncryptKeyError(vim_id=vim_id)
|
||||
|
@ -159,7 +159,7 @@ class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver,
|
||||
try:
|
||||
regions = self._find_regions(ks_client)
|
||||
except (exceptions.Unauthorized, exceptions.BadRequest) as e:
|
||||
LOG.warning("Authorization failed for user")
|
||||
LOG.error("Authorization failed for user")
|
||||
raise nfvo.VimUnauthorizedException(message=e.message)
|
||||
vim_obj['placement_attr'] = {'regions': regions}
|
||||
return vim_obj
|
||||
@ -204,9 +204,8 @@ class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver,
|
||||
LOG.debug('VIM key deleted successfully for vim %s',
|
||||
vim_id)
|
||||
except Exception as ex:
|
||||
LOG.warning('VIM key deletion failed for vim %s due to %s',
|
||||
vim_id,
|
||||
ex)
|
||||
LOG.error('VIM key deletion failed for vim %s due to %s',
|
||||
vim_id, ex)
|
||||
raise
|
||||
else:
|
||||
key_file = os.path.join(CONF.vim_keys.openstack, vim_id)
|
||||
@ -240,9 +239,8 @@ class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver,
|
||||
LOG.debug('VIM auth successfully stored for vim %s',
|
||||
vim_id)
|
||||
except Exception as ex:
|
||||
LOG.warning('VIM key creation failed for vim %s due to %s',
|
||||
vim_id,
|
||||
ex)
|
||||
LOG.error('VIM key creation failed for vim %s due to %s',
|
||||
vim_id, ex)
|
||||
raise
|
||||
|
||||
else:
|
||||
@ -837,7 +835,7 @@ class NeutronClient(object):
|
||||
try:
|
||||
self.client.delete_sfc_flow_classifier(fc_id)
|
||||
except nc_exceptions.NotFound:
|
||||
LOG.warning("fc %s not found", fc_id)
|
||||
LOG.error("fc %s not found", fc_id)
|
||||
raise ValueError('fc %s not found' % fc_id)
|
||||
|
||||
def port_pair_create(self, port_pair_dict):
|
||||
@ -861,7 +859,7 @@ class NeutronClient(object):
|
||||
try:
|
||||
self.client.delete_sfc_port_pair(port_pair_id)
|
||||
except nc_exceptions.NotFound:
|
||||
LOG.warning('port pair %s not found', port_pair_id)
|
||||
LOG.error('port pair %s not found', port_pair_id)
|
||||
raise ValueError('port pair %s not found' % port_pair_id)
|
||||
|
||||
def port_pair_group_create(self, ppg_dict):
|
||||
@ -869,7 +867,7 @@ class NeutronClient(object):
|
||||
ppg = self.client.create_sfc_port_pair_group(
|
||||
{'port_pair_group': ppg_dict})
|
||||
except nc_exceptions.BadRequest as e:
|
||||
LOG.warning('create port pair group returns %s', e)
|
||||
LOG.error('create port pair group returns %s', e)
|
||||
raise ValueError(str(e))
|
||||
|
||||
if ppg and len(ppg):
|
||||
@ -885,7 +883,7 @@ class NeutronClient(object):
|
||||
try:
|
||||
self.client.delete_sfc_port_pair_group(ppg_id)
|
||||
except nc_exceptions.NotFound:
|
||||
LOG.warning('port pair group %s not found', ppg_id)
|
||||
LOG.error('port pair group %s not found', ppg_id)
|
||||
raise ValueError('port pair group %s not found' % ppg_id)
|
||||
|
||||
def port_chain_create(self, port_chain_dict):
|
||||
@ -893,7 +891,7 @@ class NeutronClient(object):
|
||||
pc = self.client.create_sfc_port_chain(
|
||||
{'port_chain': port_chain_dict})
|
||||
except nc_exceptions.BadRequest as e:
|
||||
LOG.warning('create port chain returns %s', e)
|
||||
LOG.error('create port chain returns %s', e)
|
||||
raise ValueError(str(e))
|
||||
|
||||
if pc and len(pc):
|
||||
@ -932,7 +930,7 @@ class NeutronClient(object):
|
||||
pp_id = port_pairs[j]
|
||||
self.client.delete_sfc_port_pair(pp_id)
|
||||
except nc_exceptions.NotFound:
|
||||
LOG.warning('port chain %s not found', port_chain_id)
|
||||
LOG.error('port chain %s not found', port_chain_id)
|
||||
raise ValueError('port chain %s not found' % port_chain_id)
|
||||
|
||||
def port_chain_update(self, port_chain_id, port_chain):
|
||||
@ -940,7 +938,7 @@ class NeutronClient(object):
|
||||
pc = self.client.update_sfc_port_chain(port_chain_id,
|
||||
{'port_chain': port_chain})
|
||||
except nc_exceptions.BadRequest as e:
|
||||
LOG.warning('update port chain returns %s', e)
|
||||
LOG.error('update port chain returns %s', e)
|
||||
raise ValueError(str(e))
|
||||
if pc and len(pc):
|
||||
return pc['port_chain']['id']
|
||||
@ -971,5 +969,5 @@ class NeutronClient(object):
|
||||
|
||||
return port_pair_group
|
||||
except nc_exceptions.NotFound:
|
||||
LOG.warning('port pair group %s not found', ppg_id)
|
||||
LOG.error('port pair group %s not found', ppg_id)
|
||||
raise ValueError('port pair group %s not found' % ppg_id)
|
||||
|
@ -47,7 +47,7 @@ class VNFFGNoop(abstract_vnffg_driver.VnffgAbstractDriver):
|
||||
@log.log
|
||||
def update_chain(self, chain_id, fc_ids, vnfs, auth_attr=None):
|
||||
if chain_id not in self._instances:
|
||||
LOG.debug('Chain not found')
|
||||
LOG.error('Chain not found')
|
||||
raise ValueError('No chain instance %s' % chain_id)
|
||||
|
||||
@log.log
|
||||
@ -63,7 +63,7 @@ class VNFFGNoop(abstract_vnffg_driver.VnffgAbstractDriver):
|
||||
@log.log
|
||||
def update_flow_classifier(self, fc_id, fc, auth_attr=None):
|
||||
if fc_id not in self._instances:
|
||||
LOG.debug('FC not found')
|
||||
LOG.error('FC not found')
|
||||
raise ValueError('No FC instance %s' % fc_id)
|
||||
|
||||
@log.log
|
||||
|
@ -234,7 +234,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
|
||||
vim_id, ex)
|
||||
return vim_obj
|
||||
except Exception as ex:
|
||||
LOG.debug("Got exception when update_vim %s due to %s",
|
||||
LOG.error("Got exception when update_vim %s due to %s",
|
||||
vim_id, ex)
|
||||
with excutils.save_and_reraise_exception():
|
||||
if new_auth_created:
|
||||
@ -598,7 +598,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
|
||||
|
||||
f = fernet.Fernet(vim_key)
|
||||
if not f:
|
||||
LOG.warning('Unable to decode VIM auth')
|
||||
LOG.error('Unable to decode VIM auth')
|
||||
raise nfvo.VimNotFoundException(vim_id=vim_id)
|
||||
return f.decrypt(cred).decode('utf-8')
|
||||
|
||||
@ -610,7 +610,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
|
||||
with open(key_file, 'r') as f:
|
||||
return f.read()
|
||||
except Exception:
|
||||
LOG.warning('VIM id invalid or key not found for %s', vim_id)
|
||||
LOG.error('VIM id invalid or key not found for %s', vim_id)
|
||||
raise nfvo.VimKeyNotFoundException(vim_id=vim_id)
|
||||
|
||||
def _vim_resource_name_to_id(self, context, resource, name, vnf_id):
|
||||
@ -908,7 +908,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
|
||||
if value['get_input'] in paramvalues:
|
||||
original[key] = paramvalues[value['get_input']]
|
||||
else:
|
||||
LOG.debug('Key missing Value: %s', key)
|
||||
LOG.error('Key missing Value: %s', key)
|
||||
raise cs.InputValuesMissing(key=key)
|
||||
else:
|
||||
self._update_params(value, paramvalues)
|
||||
|
@ -41,7 +41,7 @@ def _get_vnfd_id(context, id):
|
||||
api.model_query(context, models.VnfPackageVnfd).\
|
||||
filter_by(package_uuid=id).first()
|
||||
except Exception:
|
||||
LOG.info("select vnf_package_vnfd failed")
|
||||
LOG.warning("select vnf_package_vnfd failed")
|
||||
if vnf_package_vnfd:
|
||||
return vnf_package_vnfd.vnfd_id
|
||||
else:
|
||||
@ -53,7 +53,7 @@ def _check_vnfd(context, id):
|
||||
try:
|
||||
vnfd = api.model_query(context, vnfm_db.VNFD).filter_by(id=id).first()
|
||||
except Exception:
|
||||
LOG.info("select vnfd failed")
|
||||
LOG.warning("select vnfd failed")
|
||||
if vnfd:
|
||||
return "TRUE"
|
||||
else:
|
||||
@ -65,7 +65,7 @@ def _vnfd_delete(context, id):
|
||||
try:
|
||||
api.model_query(context, vnfm_db.VNFD).filter_by(id=id).delete()
|
||||
except Exception:
|
||||
LOG.info("delete vnfd failed")
|
||||
LOG.warning("delete vnfd failed")
|
||||
|
||||
|
||||
@db_api.context_manager.writer
|
||||
@ -77,7 +77,7 @@ def _vnfd_destroy(context, id):
|
||||
filter_by(id=id).\
|
||||
update(updated_values, synchronize_session=False)
|
||||
except Exception:
|
||||
LOG.info("destroy vnfdfailed")
|
||||
LOG.warning("destroy vnfd failed")
|
||||
|
||||
|
||||
@base.TackerObjectRegistry.register
|
||||
|
@ -41,7 +41,7 @@ def _get_vnfd_id(context, id):
|
||||
api.model_query(context, models.VnfPackageVnfd).\
|
||||
filter_by(package_uuid=id).first()
|
||||
except Exception:
|
||||
LOG.info("select vnfd_attribute failed")
|
||||
LOG.warning("select vnfd_attribute failed")
|
||||
if vnf_package_vnfd:
|
||||
return vnf_package_vnfd.vnfd_id
|
||||
else:
|
||||
@ -55,7 +55,7 @@ def _check_vnfd_attribute(context, id):
|
||||
api.model_query(context, vnfm_db.VNFDAttribute).\
|
||||
filter_by(vnfd_id=id).first()
|
||||
except Exception:
|
||||
LOG.info("select vnfd_attribute failed")
|
||||
LOG.warning("select vnfd_attribute failed")
|
||||
if vnfd_attribute:
|
||||
return "TRUE"
|
||||
else:
|
||||
@ -68,7 +68,7 @@ def _vnfd_attribute_delete(context, id):
|
||||
api.model_query(context, vnfm_db.VNFDAttribute).\
|
||||
filter_by(vnfd_id=id).delete()
|
||||
except Exception:
|
||||
LOG.info("delete vnfd_attribute failed")
|
||||
LOG.warning("delete vnfd_attribute failed")
|
||||
|
||||
|
||||
@base.TackerObjectRegistry.register
|
||||
|
@ -71,10 +71,10 @@ def authorize(context, action, target, do_raise=True, exc=None):
|
||||
do_raise=do_raise, exc=exc, action=action)
|
||||
except policy.PolicyNotRegistered:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.debug('Policy not registered')
|
||||
LOG.error('Policy not registered')
|
||||
except Exception:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.debug('Policy check for %(action)s failed with credentials '
|
||||
LOG.error('Policy check for %(action)s failed with credentials '
|
||||
'%(credentials)s',
|
||||
{'action': action, 'credentials': credentials})
|
||||
|
||||
@ -428,7 +428,7 @@ def enforce(context, action, target, plugin=None, pluralized=None):
|
||||
except policy.PolicyNotAuthorized:
|
||||
with excutils.save_and_reraise_exception():
|
||||
log_rule_list(rule)
|
||||
LOG.debug("Failed policy check for '%s'", action)
|
||||
LOG.error("Failed policy check for '%s'", action)
|
||||
return result
|
||||
|
||||
|
||||
|
@ -55,7 +55,7 @@ def lock_vnf_instance(inst_arg, delay=False):
|
||||
# NOTE: 'with lock' is not used since it can't handle
|
||||
# lock failed exception well.
|
||||
if not lock.acquire(blocking=blocking):
|
||||
LOG.debug("Locking vnfInstance %s failed.", inst_id)
|
||||
LOG.error("Locking vnfInstance %s failed.", inst_id)
|
||||
raise sol_ex.OtherOperationInProgress(inst_id=inst_id)
|
||||
|
||||
try:
|
||||
|
@ -251,7 +251,7 @@ class BaseViewBuilder(object):
|
||||
loc += 1
|
||||
m = self.value_re.match(values[loc:])
|
||||
if m is None:
|
||||
LOG.debug("value parse error, %s at loc %d", values, loc)
|
||||
LOG.error("value parse error, %s at loc %d", values, loc)
|
||||
raise sol_ex.InvalidAttributeFilter(
|
||||
sol_detail="value parse error")
|
||||
loc += m.end()
|
||||
@ -269,7 +269,7 @@ class BaseViewBuilder(object):
|
||||
while True:
|
||||
m = self.simpleFilterExpr_re.match(filter[loc:])
|
||||
if m is None:
|
||||
LOG.debug("filter %s parse error at char %d", filter, loc)
|
||||
LOG.error("filter %s parse error at char %d", filter, loc)
|
||||
raise sol_ex.InvalidAttributeFilter(
|
||||
sol_detail="filter parse error")
|
||||
op = m.group(1)
|
||||
@ -286,7 +286,7 @@ class BaseViewBuilder(object):
|
||||
if loc == len(filter):
|
||||
return res
|
||||
if filter[loc] != ';':
|
||||
LOG.debug("filter %s parse error at char %d "
|
||||
LOG.error("filter %s parse error at char %d "
|
||||
"(semicolon expected)", filter, loc)
|
||||
raise sol_ex.InvalidAttributeFilter(
|
||||
sol_detail="filter parse error. semicolon expected.")
|
||||
|
@ -46,7 +46,10 @@ class HeatClient(object):
|
||||
expected_status=[201], body=fields)
|
||||
|
||||
if wait:
|
||||
self.wait_stack_create(fields["stack_name"])
|
||||
self.wait_stack_create(
|
||||
f'{fields["stack_name"]}/{body["stack"]["id"]}')
|
||||
|
||||
return body['stack']['id']
|
||||
|
||||
def update_stack(self, stack_name, fields, wait=True):
|
||||
path = f"stacks/{stack_name}"
|
||||
@ -75,6 +78,16 @@ class HeatClient(object):
|
||||
return (body["stack"]["stack_status"],
|
||||
body["stack"]["stack_status_reason"])
|
||||
|
||||
def get_stack_id(self, stack_name):
|
||||
path = f"stacks/{stack_name}"
|
||||
resp, body = self.client.do_request(path, "GET",
|
||||
expected_status=[200, 404])
|
||||
|
||||
if resp.status_code == 404:
|
||||
return None
|
||||
|
||||
return body["stack"]["id"]
|
||||
|
||||
def get_resources(self, stack_name):
|
||||
# NOTE: Because it is necessary to get nested stack info, it is
|
||||
# necessary to specify 'nested_depth=2'.
|
||||
@ -91,20 +104,21 @@ class HeatClient(object):
|
||||
def _check_status():
|
||||
status, status_reason = self.get_status(stack_name)
|
||||
if status in complete_status:
|
||||
LOG.info("%s %s done.", operation, stack_name)
|
||||
LOG.info("%s %s done.", operation, stack_name.split('/')[0])
|
||||
raise loopingcall.LoopingCallDone()
|
||||
elif status in failed_status:
|
||||
LOG.error("%s %s failed.", operation, stack_name)
|
||||
LOG.error("% %s failed.", operation, stack_name.split('/')[0])
|
||||
sol_title = "%s failed" % operation
|
||||
raise sol_ex.StackOperationFailed(sol_title=sol_title,
|
||||
sol_detail=status_reason)
|
||||
elif status not in progress_status:
|
||||
LOG.error("%s %s failed. status: %s", operation,
|
||||
stack_name, status)
|
||||
stack_name.split('/')[0], status)
|
||||
sol_title = "%s failed" % operation
|
||||
raise sol_ex.StackOperationFailed(sol_title=sol_title,
|
||||
sol_detail='Unknown error')
|
||||
LOG.debug("%s %s %s", operation, stack_name, progress_status)
|
||||
LOG.debug("%s %s %s", operation, stack_name.split('/')[0],
|
||||
progress_status)
|
||||
|
||||
timer = loopingcall.FixedIntervalLoopingCall(_check_status)
|
||||
timer.start(interval=CHECK_INTERVAL).wait()
|
||||
@ -122,18 +136,10 @@ class HeatClient(object):
|
||||
# for some operations (ex. heal-all).
|
||||
# It is expected that it takes short time after "DELETE_COMPLETE".
|
||||
# So timeout after "DELETE_COMPLETE" is not specified.
|
||||
self._wait_completion(stack_name, "Stack delete",
|
||||
self._wait_completion(stack_name.split('/')[0], "Stack delete",
|
||||
[None], ["DELETE_IN_PROGRESS", "DELETE_COMPLETE"],
|
||||
["DELETE_FAILED"])
|
||||
|
||||
def get_stack_resource(self, stack_name):
|
||||
path = f"stacks/{stack_name}"
|
||||
resp, body = self.client.do_request(path, "GET",
|
||||
expected_status=[200, 404])
|
||||
if resp.status_code == 404:
|
||||
raise sol_ex.StackOperationFailed
|
||||
return body
|
||||
|
||||
def get_resource_info(self, stack_id, resource_name):
|
||||
path = f"stacks/{stack_id}/resources/{resource_name}"
|
||||
resp, body = self.client.do_request(path, "GET",
|
||||
@ -193,8 +199,13 @@ def get_port_reses(heat_reses):
|
||||
return get_reses_by_types(heat_reses, ['OS::Neutron::Port'])
|
||||
|
||||
|
||||
def get_stack_name(inst):
|
||||
return "vnf-" + inst.id
|
||||
def get_stack_name(inst, stack_id=None):
|
||||
stack_name = f"vnf-{inst.id}"
|
||||
if inst.obj_attr_is_set('instantiatedVnfInfo') and not stack_id:
|
||||
return f"{stack_name}/{inst.instantiatedVnfInfo.metadata['stack_id']}"
|
||||
if stack_id:
|
||||
return f"{stack_name}/{stack_id}"
|
||||
return stack_name
|
||||
|
||||
|
||||
def get_resource_stack_id(heat_res):
|
||||
|
@ -94,24 +94,24 @@ class Openstack(object):
|
||||
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
|
||||
heat_client = heat_utils.HeatClient(vim_info)
|
||||
stack_name = heat_utils.get_stack_name(inst)
|
||||
status, _ = heat_client.get_status(stack_name)
|
||||
if status is None:
|
||||
stack_id = heat_client.get_stack_id(stack_name)
|
||||
if stack_id is None:
|
||||
fields['stack_name'] = stack_name
|
||||
heat_client.create_stack(fields)
|
||||
stack_id = heat_client.create_stack(fields)
|
||||
else:
|
||||
heat_client.update_stack(stack_name, fields)
|
||||
heat_client.update_stack(f'{stack_name}/{stack_id}', fields)
|
||||
|
||||
# make instantiated_vnf_info
|
||||
self._make_instantiated_vnf_info(req, inst, grant_req, grant, vnfd,
|
||||
heat_client)
|
||||
heat_client, stack_id=stack_id)
|
||||
|
||||
def instantiate_rollback(self, req, inst, grant_req, grant, vnfd):
|
||||
vim_info = inst_utils.select_vim_info(inst.vimConnectionInfo)
|
||||
heat_client = heat_utils.HeatClient(vim_info)
|
||||
stack_name = heat_utils.get_stack_name(inst)
|
||||
status, _ = heat_client.get_status(stack_name)
|
||||
if status is not None:
|
||||
heat_client.delete_stack(stack_name)
|
||||
stack_id = heat_client.get_stack_id(stack_name)
|
||||
if stack_id is not None:
|
||||
heat_client.delete_stack(f'{stack_name}/{stack_id}')
|
||||
|
||||
def terminate(self, req, inst, grant_req, grant, vnfd):
|
||||
if req.terminationType == 'GRACEFUL':
|
||||
@ -270,11 +270,11 @@ class Openstack(object):
|
||||
fields["template"] = heat_client.get_template(stack_name)
|
||||
if "files" not in fields:
|
||||
fields["files"] = heat_client.get_files(stack_name)
|
||||
fields["stack_name"] = stack_name
|
||||
fields["stack_name"] = stack_name.split('/')[0]
|
||||
|
||||
# stack delete and create
|
||||
heat_client.delete_stack(stack_name)
|
||||
heat_client.create_stack(fields)
|
||||
stack_id = heat_client.create_stack(fields)
|
||||
else:
|
||||
# mark unhealthy to target resources.
|
||||
# As the target resources has been already selected in
|
||||
@ -301,10 +301,11 @@ class Openstack(object):
|
||||
|
||||
# update stack
|
||||
heat_client.update_stack(stack_name, fields)
|
||||
stack_id = inst.instantiatedVnfInfo.metadata['stack_id']
|
||||
|
||||
# make instantiated_vnf_info
|
||||
self._make_instantiated_vnf_info(req, inst, grant_req, grant, vnfd,
|
||||
heat_client)
|
||||
heat_client, stack_id=stack_id)
|
||||
|
||||
def change_vnfpkg(self, req, inst, grant_req, grant, vnfd):
|
||||
# make HOT
|
||||
@ -998,9 +999,11 @@ class Openstack(object):
|
||||
metadata[f'image-{vdu_name}'] = image
|
||||
|
||||
def _make_instantiated_vnf_info(self, req, inst, grant_req, grant, vnfd,
|
||||
heat_client, is_rollback=False):
|
||||
heat_client, is_rollback=False, stack_id=None):
|
||||
# get heat resources
|
||||
stack_name = heat_utils.get_stack_name(inst)
|
||||
stack_id = stack_id if stack_id else inst.instantiatedVnfInfo.metadata[
|
||||
'stack_id']
|
||||
stack_name = heat_utils.get_stack_name(inst, stack_id)
|
||||
heat_reses = heat_client.get_resources(stack_name)
|
||||
nfv_dict = json.loads(heat_client.get_parameters(stack_name)['nfv'])
|
||||
|
||||
@ -1225,4 +1228,7 @@ class Openstack(object):
|
||||
|
||||
inst_vnf_info.vnfcInfo = vnfc_infos
|
||||
|
||||
# store stack_id into metadata
|
||||
metadata = {"stack_id": stack_id}
|
||||
inst_vnf_info.metadata = metadata
|
||||
inst.instantiatedVnfInfo = inst_vnf_info
|
||||
|
@ -31,7 +31,6 @@ class Subscription:
|
||||
return {
|
||||
"filter": {
|
||||
"vnfInstanceSubscriptionFilter": {
|
||||
"vnfdIds": ["b1bb0ce7-ebca-4fa7-95ed-4840d7000000"],
|
||||
"vnfProductsFromProviders": [{
|
||||
"vnfProvider": "Company",
|
||||
"vnfProducts": [
|
||||
|
@ -326,6 +326,126 @@ class VnfLcmWithUserDataTest(vnflcm_base.BaseVnfLcmTest):
|
||||
resp, _ = self._show_subscription(subscription_id)
|
||||
self.assertEqual(404, resp.status_code)
|
||||
|
||||
def test_vnfdid_filter_in_subscription(self):
|
||||
"""Test notification when virtual storage absent in VNFD.
|
||||
|
||||
In this test case, we do following steps.
|
||||
- Create VNF package.
|
||||
- Upload VNF package.
|
||||
- Create subscription with vnf instance's vnfdid filter.
|
||||
- Create subscription with other vnfdid filter.
|
||||
- Create subscription without filter.
|
||||
- Create VNF instance.
|
||||
- Instantiate VNF.
|
||||
- Terminate VNF
|
||||
- Delete VNF
|
||||
- Delete all subscriptions
|
||||
"""
|
||||
# Pre Setting: Create vnf package.
|
||||
sample_name = 'functional5'
|
||||
csar_package_path = os.path.abspath(
|
||||
os.path.join(
|
||||
os.path.dirname(__file__),
|
||||
"../../../etc/samples/etsi/nfv",
|
||||
sample_name))
|
||||
tempname, _ = vnflcm_base._create_csar_with_unique_vnfd_id(
|
||||
csar_package_path)
|
||||
# upload vnf package
|
||||
vnf_package_id, vnfd_id = vnflcm_base._create_and_upload_vnf_package(
|
||||
self.tacker_client, user_defined_data={
|
||||
"key": sample_name}, temp_csar_path=tempname)
|
||||
|
||||
# Post Setting: Reserve deleting vnf package.
|
||||
self.addCleanup(vnflcm_base._delete_vnf_package, self.tacker_client,
|
||||
vnf_package_id)
|
||||
|
||||
# Create subscription with vnf instance's vnfdid filter.
|
||||
sub_id_1 = self._gen_sub_and_register_sub(
|
||||
'with_vnfd_id_filter', vnfd_id)
|
||||
self.addCleanup(
|
||||
self._delete_subscription,
|
||||
sub_id_1)
|
||||
# Create subscription with other vnfdid filter.
|
||||
sub_id_2 = self._gen_sub_and_register_sub(
|
||||
'with_other_vnfd_id', uuidutils.generate_uuid())
|
||||
self.addCleanup(
|
||||
self._delete_subscription,
|
||||
sub_id_2)
|
||||
# Create subscription without filter.
|
||||
sub_id_3 = self._gen_sub_and_register_sub(
|
||||
'no_filter', uuidutils.generate_uuid())
|
||||
self.addCleanup(
|
||||
self._delete_subscription,
|
||||
sub_id_3)
|
||||
|
||||
sub_id = self._gen_sub_and_register_sub(self._testMethodName, vnfd_id)
|
||||
self.addCleanup(
|
||||
self._delete_subscription,
|
||||
sub_id)
|
||||
# Create vnf instance
|
||||
resp, vnf_instance = self._create_vnf_instance_from_body(
|
||||
fake_vnflcm.VnfInstances.make_create_request_body(vnfd_id))
|
||||
vnf_instance_id = vnf_instance['id']
|
||||
self._wait_lcm_done(vnf_instance_id=vnf_instance_id)
|
||||
self.assert_create_vnf(resp, vnf_instance, vnf_package_id)
|
||||
self.addCleanup(self._delete_vnf_instance, vnf_instance_id)
|
||||
vnflcm_base.FAKE_SERVER_MANAGER.clear_history(
|
||||
os.path.join(vnflcm_base.MOCK_NOTIFY_CALLBACK_URL,
|
||||
"with_vnfd_id_filter"))
|
||||
vnflcm_base.FAKE_SERVER_MANAGER.clear_history(
|
||||
os.path.join(vnflcm_base.MOCK_NOTIFY_CALLBACK_URL,
|
||||
"with_other_vnfd_id"))
|
||||
vnflcm_base.FAKE_SERVER_MANAGER.clear_history(
|
||||
os.path.join(vnflcm_base.MOCK_NOTIFY_CALLBACK_URL,
|
||||
"no_filter"))
|
||||
|
||||
# Instantiate vnf instance
|
||||
request_body = (
|
||||
fake_vnflcm.VnfInstances.
|
||||
make_inst_request_body_include_num_dynamic(
|
||||
self.vim['tenant_id'], self.ext_networks,
|
||||
self.ext_mngd_networks, self.ext_link_ports, self.ext_subnets))
|
||||
resp, _ = self._instantiate_vnf_instance(vnf_instance_id, request_body)
|
||||
self._wait_lcm_done('COMPLETED', vnf_instance_id=vnf_instance_id)
|
||||
self.assert_instantiate_vnf(resp, vnf_instance_id, vnf_package_id)
|
||||
|
||||
# Show vnf instance
|
||||
resp, vnf_instance = self._show_vnf_instance(vnf_instance_id)
|
||||
self.assertEqual(200, resp.status_code)
|
||||
self.assertEqual(vnf_instance["instantiationState"],
|
||||
"INSTANTIATED")
|
||||
|
||||
# check subscription
|
||||
for name in ['with_vnfd_id_filter', 'with_other_vnfd_id', 'no_filter']:
|
||||
self._check_subscription(name)
|
||||
|
||||
# Terminate VNF
|
||||
stack = self._get_heat_stack(vnf_instance_id)
|
||||
resources_list = self._get_heat_resource_list(stack.id)
|
||||
resource_name_list = [r.resource_name for r in resources_list]
|
||||
glance_image_id_list = self._get_glance_image_list_from_stack_resource(
|
||||
stack.id, resource_name_list)
|
||||
|
||||
terminate_req_body = fake_vnflcm.VnfInstances.make_term_request_body()
|
||||
resp, _ = self._terminate_vnf_instance(
|
||||
vnf_instance_id, terminate_req_body)
|
||||
self._wait_lcm_done('COMPLETED', vnf_instance_id=vnf_instance_id)
|
||||
self.assert_terminate_vnf(resp, vnf_instance_id, stack.id,
|
||||
resource_name_list, glance_image_id_list,
|
||||
vnf_package_id)
|
||||
# check subscription
|
||||
for name in ['with_vnfd_id_filter', 'with_other_vnfd_id', 'no_filter']:
|
||||
self._check_subscription(name)
|
||||
|
||||
# Delete VNF
|
||||
resp, _ = self._delete_vnf_instance(vnf_instance_id)
|
||||
self._wait_lcm_done(vnf_instance_id=vnf_instance_id)
|
||||
self.assert_delete_vnf(resp, vnf_instance_id, vnf_package_id)
|
||||
|
||||
# Subscription delete
|
||||
for subsc_id in [sub_id, sub_id_1, sub_id_2, sub_id_3]:
|
||||
self._assert_subscription_deletion(subsc_id)
|
||||
|
||||
def test_stack_update_in_scaling(self):
|
||||
"""Test basic life cycle operations with sample VNFD.
|
||||
|
||||
@ -2191,6 +2311,12 @@ class VnfLcmWithUserDataTest(vnflcm_base.BaseVnfLcmTest):
|
||||
self.tacker_client, vnf_pkg_id)
|
||||
self.assert_vnf_package_usage_state(vnf_pkg_info)
|
||||
|
||||
def _assert_subscription_deletion(self, sub_id):
|
||||
resp, _ = self._delete_subscription(sub_id)
|
||||
self.assertEqual(204, resp.status_code)
|
||||
resp, _ = self._show_subscription(sub_id)
|
||||
self.assertEqual(404, resp.status_code)
|
||||
|
||||
def _assert_scale_vnf(
|
||||
self,
|
||||
resp,
|
||||
@ -2371,6 +2497,47 @@ class VnfLcmWithUserDataTest(vnflcm_base.BaseVnfLcmTest):
|
||||
if _links.get('grant') is not None:
|
||||
self.assertIsNotNone(_links.get('grant').get('href'))
|
||||
|
||||
def _gen_sub_and_register_sub(self, name, vnfd_id):
|
||||
callback_url = os.path.join(vnflcm_base.MOCK_NOTIFY_CALLBACK_URL,
|
||||
name)
|
||||
request_body = fake_vnflcm.Subscription.make_create_request_body(
|
||||
'http://localhost:{}{}'.format(
|
||||
vnflcm_base.FAKE_SERVER_MANAGER.SERVER_PORT,
|
||||
callback_url))
|
||||
request_body['filter']['vnfInstanceSubscriptionFilter']['vnfdIds'] = [
|
||||
vnfd_id]
|
||||
if name == 'no_filter':
|
||||
del request_body['filter']
|
||||
vnflcm_base.FAKE_SERVER_MANAGER.set_callback(
|
||||
'GET',
|
||||
callback_url,
|
||||
status_code=204
|
||||
)
|
||||
vnflcm_base.FAKE_SERVER_MANAGER.set_callback(
|
||||
'POST',
|
||||
callback_url,
|
||||
status_code=204
|
||||
)
|
||||
resp, response_body = self._register_subscription(request_body)
|
||||
self.assertEqual(201, resp.status_code)
|
||||
self.assert_http_header_location_for_subscription(resp.headers)
|
||||
self.assert_notification_get(callback_url)
|
||||
subscription_id = response_body.get('id')
|
||||
return subscription_id
|
||||
|
||||
def _check_subscription(self, name):
|
||||
callback_url = os.path.join(
|
||||
vnflcm_base.MOCK_NOTIFY_CALLBACK_URL,
|
||||
name)
|
||||
notify_mock_responses = vnflcm_base.FAKE_SERVER_MANAGER.get_history(
|
||||
callback_url)
|
||||
vnflcm_base.FAKE_SERVER_MANAGER.clear_history(
|
||||
callback_url)
|
||||
if name == 'with_other_vnfd_id':
|
||||
self.assertEqual(0, len(notify_mock_responses))
|
||||
else:
|
||||
self.assertEqual(3, len(notify_mock_responses))
|
||||
|
||||
def test_inst_chgextconn_term(self):
|
||||
"""Test basic life cycle operations with sample VNFD.
|
||||
|
||||
|
@ -32,7 +32,6 @@ class Subscription:
|
||||
return {
|
||||
"filter": {
|
||||
"vnfInstanceSubscriptionFilter": {
|
||||
"vnfdIds": ["b1bb0ce7-ebca-4fa7-95ed-4840d7000000"],
|
||||
"vnfProductsFromProviders": [{
|
||||
"vnfProvider": "Company",
|
||||
"vnfProducts": [
|
||||
|
@ -31,7 +31,6 @@ class Subscription:
|
||||
return {
|
||||
"filter": {
|
||||
"vnfInstanceSubscriptionFilter": {
|
||||
"vnfdIds": ["b1bb0ce7-ebca-4fa7-95ed-4840d7000000"],
|
||||
"vnfProductsFromProviders": [{
|
||||
"vnfProvider": "Company",
|
||||
"vnfProducts": [
|
||||
|
@ -118,8 +118,7 @@ class ChangeVnfPkgVnfLcmTest(test_vnflcm_basic_common.CommonVnfLcmTest):
|
||||
expected_inst_attrs.extend(additional_inst_attrs)
|
||||
resp_1, body_1 = self.show_vnf_instance(inst_id)
|
||||
stack_name = "vnf-{}".format(inst_id)
|
||||
stack_id = self.heat_client.get_stack_resource(stack_name)['stack'][
|
||||
'id']
|
||||
stack_id = self.heat_client.get_stack_id(stack_name)
|
||||
image_id_1 = self.get_current_vdu_image(stack_id, stack_name, 'VDU2')
|
||||
storageResourceId_1 = [
|
||||
obj.get('storageResourceIds') for obj in body_1[
|
||||
|
@ -764,8 +764,7 @@ class VnfLcmTest(test_vnflcm_basic_common.CommonVnfLcmTest):
|
||||
network_stack_before_heal = [stack for stack in temp_stacks if
|
||||
(stack['resource_name'] == 'internalVL3')][0]
|
||||
|
||||
stack_id_before_heal = self.heat_client.get_stack_resource(stack_name)[
|
||||
'stack']['id']
|
||||
stack_id_before_heal = self.heat_client.get_stack_id(stack_name)
|
||||
heal_req = paramgen.heal_vnf_all_max_with_parameter(True)
|
||||
resp, body = self.heal_vnf_instance(inst_id, heal_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
@ -774,8 +773,7 @@ class VnfLcmTest(test_vnflcm_basic_common.CommonVnfLcmTest):
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# check stack info
|
||||
stack_id_after_heal = self.heat_client.get_stack_resource(stack_name)[
|
||||
'stack']['id']
|
||||
stack_id_after_heal = self.heat_client.get_stack_id(stack_name)
|
||||
self.assertNotEqual(stack_id_before_heal, stack_id_after_heal)
|
||||
stack_status, _ = self.heat_client.get_status(stack_name)
|
||||
self.assertEqual("CREATE_COMPLETE", stack_status)
|
||||
@ -1395,8 +1393,7 @@ class VnfLcmTest(test_vnflcm_basic_common.CommonVnfLcmTest):
|
||||
network_stack_before_heal = [stack for stack in temp_stacks if
|
||||
(stack['resource_name'] == 'internalVL3')][0]
|
||||
|
||||
stack_id_before_heal = self.heat_client.get_stack_resource(stack_name)[
|
||||
'stack']['id']
|
||||
stack_id_before_heal = self.heat_client.get_stack_id(stack_name)
|
||||
heal_req = paramgen.heal_vnf_all_max_with_parameter(True)
|
||||
resp, body = self.heal_vnf_instance(inst_id, heal_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
@ -1405,8 +1402,7 @@ class VnfLcmTest(test_vnflcm_basic_common.CommonVnfLcmTest):
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# check stack info
|
||||
stack_id_after_heal = self.heat_client.get_stack_resource(stack_name)[
|
||||
'stack']['id']
|
||||
stack_id_after_heal = self.heat_client.get_stack_id(stack_name)
|
||||
self.assertNotEqual(stack_id_before_heal, stack_id_after_heal)
|
||||
stack_status, _ = self.heat_client.get_status(stack_name)
|
||||
self.assertEqual("CREATE_COMPLETE", stack_status)
|
||||
@ -1479,8 +1475,7 @@ class VnfLcmTest(test_vnflcm_basic_common.CommonVnfLcmTest):
|
||||
network_stack_before_heal = [stack for stack in temp_stacks if
|
||||
(stack['resource_name'] == 'internalVL3')][0]
|
||||
|
||||
stack_id_before_heal = self.heat_client.get_stack_resource(stack_name)[
|
||||
'stack']['id']
|
||||
stack_id_before_heal = self.heat_client.get_stack_id(stack_name)
|
||||
heal_req = paramgen.heal_vnf_all_max_with_parameter(True)
|
||||
resp, body = self.heal_vnf_instance(inst_id, heal_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
@ -1489,8 +1484,7 @@ class VnfLcmTest(test_vnflcm_basic_common.CommonVnfLcmTest):
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# check stack info
|
||||
stack_id_after_heal = self.heat_client.get_stack_resource(stack_name)[
|
||||
'stack']['id']
|
||||
stack_id_after_heal = self.heat_client.get_stack_id(stack_name)
|
||||
self.assertNotEqual(stack_id_before_heal, stack_id_after_heal)
|
||||
stack_status, _ = self.heat_client.get_status(stack_name)
|
||||
self.assertEqual("CREATE_COMPLETE", stack_status)
|
||||
@ -1532,8 +1526,7 @@ class VnfLcmTest(test_vnflcm_basic_common.CommonVnfLcmTest):
|
||||
network_stack_after_heal['physical_resource_id'])
|
||||
|
||||
# 14. Change external connectivity
|
||||
stack_id = self.heat_client.get_stack_resource(stack_name)['stack'][
|
||||
'id']
|
||||
stack_id = self.heat_client.get_stack_id(stack_name)
|
||||
port_info = self.heat_client.get_resource_info(
|
||||
f"{stack_name}/{stack_id}", 'VDU2_CP2')
|
||||
before_physical_resource_id = port_info['physical_resource_id']
|
||||
|
@ -614,8 +614,7 @@ class CommonVnfLcmTest(base_v2.BaseSolV2Test):
|
||||
network_stack_before_heal = [stack for stack in temp_stacks if
|
||||
(stack['resource_name'] == 'internalVL3')][0]
|
||||
|
||||
stack_id_before_heal = self.heat_client.get_stack_resource(stack_name)[
|
||||
'stack']['id']
|
||||
stack_id_before_heal = self.heat_client.get_stack_id(stack_name)
|
||||
heal_req = paramgen.heal_vnf_all_max_with_parameter(True)
|
||||
resp, body = self.heal_vnf_instance(inst_id, heal_req)
|
||||
self.assertEqual(202, resp.status_code)
|
||||
@ -624,8 +623,7 @@ class CommonVnfLcmTest(base_v2.BaseSolV2Test):
|
||||
self.wait_lcmocc_complete(lcmocc_id)
|
||||
|
||||
# check stack info
|
||||
stack_id_after_heal = self.heat_client.get_stack_resource(stack_name)[
|
||||
'stack']['id']
|
||||
stack_id_after_heal = self.heat_client.get_stack_id(stack_name)
|
||||
self.assertNotEqual(stack_id_before_heal, stack_id_after_heal)
|
||||
stack_status, _ = self.heat_client.get_status(stack_name)
|
||||
self.assertEqual("CREATE_COMPLETE", stack_status)
|
||||
@ -816,8 +814,7 @@ class CommonVnfLcmTest(base_v2.BaseSolV2Test):
|
||||
before_physical_resource_id_1 = port_info['physical_resource_id']
|
||||
before_fixed_ips_1 = port_info['attributes']['fixed_ips']
|
||||
|
||||
stack_id_2 = self.heat_client.get_stack_resource(stack_name)['stack'][
|
||||
'id']
|
||||
stack_id_2 = self.heat_client.get_stack_id(stack_name)
|
||||
port_info = self.heat_client.get_resource_info(
|
||||
f"{stack_name}/{stack_id_2}", 'VDU2_CP2')
|
||||
before_physical_resource_id_2 = port_info['physical_resource_id']
|
||||
@ -836,8 +833,7 @@ class CommonVnfLcmTest(base_v2.BaseSolV2Test):
|
||||
after_physical_resource_id_1 = port_info['physical_resource_id']
|
||||
after_fixed_ips_1 = port_info['attributes']['fixed_ips']
|
||||
|
||||
stack_id_2 = self.heat_client.get_stack_resource(stack_name)['stack'][
|
||||
'id']
|
||||
stack_id_2 = self.heat_client.get_stack_id(stack_name)
|
||||
port_info = self.heat_client.get_resource_info(
|
||||
f"{stack_name}/{stack_id_2}", 'VDU2_CP2')
|
||||
after_physical_resource_id_2 = port_info['physical_resource_id']
|
||||
@ -1744,8 +1740,7 @@ class CommonVnfLcmTest(base_v2.BaseSolV2Test):
|
||||
# 3. Show VNF instance
|
||||
resp_1, body_1 = self.show_vnf_instance(inst_id)
|
||||
stack_name = "vnf-{}".format(inst_id)
|
||||
stack_id = self.heat_client.get_stack_resource(stack_name)['stack'][
|
||||
'id']
|
||||
stack_id = self.heat_client.get_stack_id(stack_name)
|
||||
image_id_1 = self.get_current_vdu_image(stack_id, stack_name, 'VDU2')
|
||||
|
||||
self.assertEqual(200, resp_1.status_code)
|
||||
@ -2316,7 +2311,8 @@ class CommonVnfLcmTest(base_v2.BaseSolV2Test):
|
||||
|
||||
# 9. Delete VNF instance
|
||||
# Delete Stack
|
||||
self.heat_client.delete_stack(f'vnf-{inst_id}')
|
||||
stack_id = self.heat_client.get_stack_id(f'vnf-{inst_id}')
|
||||
self.heat_client.delete_stack(f'vnf-{inst_id}/{stack_id}')
|
||||
|
||||
resp, body = self.delete_vnf_instance(inst_id)
|
||||
self.assertEqual(204, resp.status_code)
|
||||
|
@ -207,7 +207,7 @@ class ResourceTestCase(base.BaseTestCase):
|
||||
with mock.patch.object(wsgi_resource, 'LOG') as log:
|
||||
res = resource.get('', extra_environ=environ, expect_errors=True)
|
||||
self.assertEqual(map_webob_exc.code, res.status_int)
|
||||
self.assertEqual(expect_log_info, log.info.called)
|
||||
self.assertEqual(expect_log_info, log.error.called)
|
||||
self.assertNotEqual(expect_log_info, log.exception.called)
|
||||
|
||||
def test_4xx_error_logged_info_level(self):
|
||||
|
@ -128,12 +128,14 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase):
|
||||
def _create_vnf_package_vnfd(self):
|
||||
return fakes.get_vnf_package_vnfd()
|
||||
|
||||
def _create_subscriptions(self, auth_params=None):
|
||||
def _create_subscriptions(self, auth_params=None, filter_params=None):
|
||||
class DummyLcmSubscription:
|
||||
def __init__(self, auth_params=None):
|
||||
if auth_params:
|
||||
self.authentication = json.dumps(
|
||||
auth_params).encode()
|
||||
if filter_params:
|
||||
self.filter = json.dumps(filter_params).encode()
|
||||
|
||||
self.id = uuidsentinel.lcm_subscription_id
|
||||
self.tenant_id = uuidsentinel.tenant_id
|
||||
@ -3018,7 +3020,30 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase):
|
||||
|
||||
mock_vnf_by_id.return_value = fakes.return_vnf_instance(
|
||||
fields.VnfInstanceState.INSTANTIATED)
|
||||
mock_subscriptions_get.return_value = self._create_subscriptions()
|
||||
sub_1 = self._create_subscriptions(
|
||||
filter_params={
|
||||
"vnfInstanceSubscriptionFilter": {
|
||||
"vnfdIds": [uuidsentinel.vnfd_id]
|
||||
}
|
||||
}
|
||||
)
|
||||
sub_2 = self._create_subscriptions()
|
||||
sub_2[0].tenant_id = uuidsentinel.tenant_id_2
|
||||
sub_3 = self._create_subscriptions()
|
||||
sub_4 = self._create_subscriptions(
|
||||
filter_params={
|
||||
"vnfInstanceSubscriptionFilter": {
|
||||
"vnfdIds": [uuidsentinel.vnfd_id_2]
|
||||
}
|
||||
}
|
||||
)
|
||||
sub_5 = self._create_subscriptions(
|
||||
filter_params={
|
||||
"vnfInstanceSubscriptionFilter": {}
|
||||
}
|
||||
)
|
||||
sub_sum = sub_1 + sub_2 + sub_3 + sub_4 + sub_5
|
||||
mock_subscriptions_get.return_value = sub_sum
|
||||
notification = {
|
||||
'vnfInstanceId': uuidsentinel.vnf_instance_id,
|
||||
'notificationType': 'VnfLcmOperationOccurrenceNotification',
|
||||
@ -3034,7 +3059,7 @@ class TestConductor(SqlTestCase, unit_base.FixturedTestCase):
|
||||
history = self.requests_mock.request_history
|
||||
req_count = nfvo_client._count_mock_history(
|
||||
history, "https://localhost")
|
||||
self.assertEqual(1, req_count)
|
||||
self.assertEqual(3, req_count)
|
||||
|
||||
@mock.patch.object(objects.VnfInstance, 'get_by_id')
|
||||
@mock.patch.object(objects.LccnSubscriptionRequest,
|
||||
|
@ -32,6 +32,7 @@ from tacker.tests import base
|
||||
|
||||
SAMPLE_VNFD_ID = "b1bb0ce7-ebca-4fa7-95ed-4840d7000000"
|
||||
SAMPLE_FLAVOUR_ID = "simple"
|
||||
STACK_ID = "d7aeba20-1b00-4bff-b050-6b42a262c84d"
|
||||
|
||||
# instantiateVnfRequest example
|
||||
_vim_connection_info_example = {
|
||||
@ -1243,6 +1244,9 @@ _inst_info_example = {
|
||||
}
|
||||
}
|
||||
],
|
||||
"metadata": {
|
||||
"stack_id": STACK_ID
|
||||
}
|
||||
# "vnfcInfo": omitted
|
||||
}
|
||||
|
||||
@ -1812,7 +1816,8 @@ _expected_inst_info = {
|
||||
"vnfcResourceInfoId": "res_id_VDU2",
|
||||
"vnfcState": "STARTED"
|
||||
}
|
||||
]
|
||||
],
|
||||
"metadata": {"stack_id": STACK_ID}
|
||||
}
|
||||
|
||||
_expected_inst_info_vnfc_updated = copy.copy(_expected_inst_info)
|
||||
@ -2255,7 +2260,10 @@ _expected_inst_info_change_ext_conn = {
|
||||
_expected_inst_info["vnfVirtualLinkResourceInfo"],
|
||||
"virtualStorageResourceInfo":
|
||||
_expected_inst_info["virtualStorageResourceInfo"],
|
||||
"vnfcInfo": _expected_inst_info["vnfcInfo"]
|
||||
"vnfcInfo": _expected_inst_info["vnfcInfo"],
|
||||
"metadata": {
|
||||
"stack_id": STACK_ID
|
||||
}
|
||||
}
|
||||
|
||||
mock_resource = {
|
||||
@ -2837,7 +2845,7 @@ class TestOpenstack(base.BaseTestCase):
|
||||
|
||||
# execute make_instantiated_vnf_info
|
||||
self.driver._make_instantiated_vnf_info(req, inst, grant_req, grant,
|
||||
self.vnfd_1, heat_client)
|
||||
self.vnfd_1, heat_client, stack_id=STACK_ID)
|
||||
|
||||
# check
|
||||
result = inst.to_dict()["instantiatedVnfInfo"]
|
||||
@ -2869,7 +2877,8 @@ class TestOpenstack(base.BaseTestCase):
|
||||
heat_client.get_template.return_value = _heat_get_template_example
|
||||
|
||||
# execute make_instantiated_vnf_info
|
||||
self.driver._make_instantiated_vnf_info(req, inst, grant_req, grant,
|
||||
self.driver._make_instantiated_vnf_info(
|
||||
req, inst, grant_req, grant,
|
||||
self.vnfd_1, heat_client)
|
||||
|
||||
# check
|
||||
@ -2904,13 +2913,15 @@ class TestOpenstack(base.BaseTestCase):
|
||||
heat_client.get_template.return_value = _heat_get_template_example
|
||||
|
||||
# execute make_instantiated_vnf_info
|
||||
self.driver._make_instantiated_vnf_info(req, inst, grant_req, grant,
|
||||
self.driver._make_instantiated_vnf_info(
|
||||
req, inst, grant_req, grant,
|
||||
self.vnfd_1, heat_client)
|
||||
|
||||
# check
|
||||
result = inst.to_dict()["instantiatedVnfInfo"]
|
||||
self._check_inst_info(_expected_inst_info_change_ext_conn, result)
|
||||
|
||||
@mock.patch.object(openstack.heat_utils.HeatClient, 'get_stack_id')
|
||||
@mock.patch.object(openstack.heat_utils.HeatClient, 'get_status')
|
||||
@mock.patch.object(openstack.heat_utils.HeatClient, 'create_stack')
|
||||
@mock.patch.object(openstack.heat_utils.HeatClient, 'update_stack')
|
||||
@ -2918,7 +2929,8 @@ class TestOpenstack(base.BaseTestCase):
|
||||
@mock.patch.object(openstack.heat_utils.HeatClient, 'get_parameters')
|
||||
@mock.patch.object(openstack.heat_utils.HeatClient, 'get_template')
|
||||
def test_instantiate(self, mock_template, mock_parameters, mock_resources,
|
||||
mock_update_stack, mock_create_stack, mock_status):
|
||||
mock_update_stack, mock_create_stack, mock_status,
|
||||
mock_stack_id):
|
||||
# prepare
|
||||
req = objects.InstantiateVnfRequest.from_dict(_instantiate_req_example)
|
||||
inst = objects.VnfInstanceV2(
|
||||
@ -2936,22 +2948,24 @@ class TestOpenstack(base.BaseTestCase):
|
||||
operation=fields.LcmOperationType.INSTANTIATE
|
||||
)
|
||||
grant = objects.GrantV1()
|
||||
mock_status.return_value = (None, 'test')
|
||||
mock_resources.return_value = _heat_reses_example
|
||||
mock_parameters.return_value = _heat_get_parameters_example
|
||||
mock_template.return_value = _heat_get_template_example
|
||||
mock_stack_id.return_value = None
|
||||
# execute
|
||||
self.driver.instantiate(req, inst, grant_req, grant, self.vnfd_1)
|
||||
mock_create_stack.assert_called_once()
|
||||
|
||||
mock_status.return_value = ('Create_Failed', 'test')
|
||||
mock_stack_id.return_value = STACK_ID
|
||||
# execute
|
||||
self.driver.instantiate(req, inst, grant_req, grant, self.vnfd_1)
|
||||
mock_update_stack.assert_called_once()
|
||||
|
||||
@mock.patch.object(openstack.heat_utils.HeatClient, 'get_stack_id')
|
||||
@mock.patch.object(openstack.heat_utils.HeatClient, 'get_status')
|
||||
@mock.patch.object(openstack.heat_utils.HeatClient, 'delete_stack')
|
||||
def test_instantiate_rollback(self, mock_delete_stack, mock_status):
|
||||
def test_instantiate_rollback(self, mock_delete_stack, mock_status,
|
||||
mock_stack_id):
|
||||
# prepare
|
||||
req = objects.InstantiateVnfRequest.from_dict(_instantiate_req_example)
|
||||
inst = objects.VnfInstanceV2(
|
||||
@ -2969,13 +2983,13 @@ class TestOpenstack(base.BaseTestCase):
|
||||
operation=fields.LcmOperationType.INSTANTIATE
|
||||
)
|
||||
grant = objects.GrantV1()
|
||||
mock_status.return_value = (None, 'test')
|
||||
mock_stack_id.return_value = None
|
||||
# execute
|
||||
self.driver.instantiate_rollback(
|
||||
req, inst, grant_req, grant, self.vnfd_1)
|
||||
mock_delete_stack.assert_not_called()
|
||||
|
||||
mock_status.return_value = ('Create_Failed', 'test')
|
||||
mock_stack_id.return_value = STACK_ID
|
||||
# execute
|
||||
self.driver.instantiate_rollback(
|
||||
req, inst, grant_req, grant, self.vnfd_1)
|
||||
|
@ -272,7 +272,7 @@ class TestOpenStack(base.TestCase):
|
||||
auth_attr=utils.get_vim_auth_obj())
|
||||
log_msg = "at vnf_id {} because all parameters "\
|
||||
"match the existing one.".format(vnf_id)
|
||||
mock_log.warning.assert_called_with(log_msg)
|
||||
mock_log.error.assert_called_with(log_msg)
|
||||
|
||||
@mock.patch('tacker.vnfm.infra_drivers.openstack.openstack.LOG')
|
||||
def test_update_empty_param(self, mock_log):
|
||||
@ -287,7 +287,7 @@ class TestOpenStack(base.TestCase):
|
||||
auth_attr=utils.get_vim_auth_obj())
|
||||
log_msg = "at vnf_id {} because the target "\
|
||||
"yaml is empty.".format(vnf_id)
|
||||
mock_log.warning.assert_called_with(log_msg)
|
||||
mock_log.error.assert_called_with(log_msg)
|
||||
|
||||
def _get_expected_fields_tosca(self, template):
|
||||
return {'stack_name':
|
||||
|
@ -1435,7 +1435,7 @@ class TestOpenStack(base.FixturedTestCase):
|
||||
self.assertRaises(vnfm.VNFDeleteWaitFailed,
|
||||
self.openstack.delete_wait,
|
||||
None, None, self.instance_uuid, None, None)
|
||||
self.mock_log.warning.assert_called_once()
|
||||
self.mock_log.error.assert_called_once()
|
||||
|
||||
def test_update_wait(self):
|
||||
self._response_in_wait_until_stack_ready(["CREATE_COMPLETE"])
|
||||
@ -1666,7 +1666,7 @@ class TestOpenStack(base.FixturedTestCase):
|
||||
policy=fd_utils.get_dummy_policy_dict(),
|
||||
region_name=None,
|
||||
last_event_id=fd_utils.get_dummy_event()['id'])
|
||||
mock_log.warning.assert_called_once()
|
||||
mock_log.error.assert_called_once()
|
||||
|
||||
def _response_in_resource_metadata(self, metadata=None):
|
||||
# response for heat_client's resource_metadata()
|
||||
@ -1693,7 +1693,7 @@ class TestOpenStack(base.FixturedTestCase):
|
||||
policy=fd_utils.get_dummy_policy_dict(),
|
||||
region_name=None,
|
||||
last_event_id=uuidsentinel.event_id)
|
||||
self.mock_log.warning.assert_called_once()
|
||||
self.mock_log.error.assert_called_once()
|
||||
|
||||
def test_scale_wait_without_resource_metadata(self):
|
||||
dummy_event = fd_utils.get_dummy_event("CREATE_IN_PROGRESS")
|
||||
|
@ -724,9 +724,9 @@ def convert_inst_req_info(heat_dict, inst_req_info, tosca):
|
||||
aspect_vdu_dict, vdu_delta_dict,
|
||||
heat_dict)
|
||||
else:
|
||||
LOG.info('Because instLevelId is not defined and '
|
||||
'there is no default level in TOSCA, '
|
||||
'the conversion of desired_capacity is skipped.')
|
||||
LOG.debug('Because instLevelId is not defined and '
|
||||
'there is no default level in TOSCA, '
|
||||
'the conversion of desired_capacity is skipped.')
|
||||
|
||||
|
||||
@log.log
|
||||
|
@ -81,25 +81,25 @@ def revert_to_error_scale(function):
|
||||
vim_connection_info,
|
||||
error=True)
|
||||
except Exception as e:
|
||||
LOG.warning(traceback.format_exc())
|
||||
LOG.warning("Failed to scale resource update "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
LOG.error(traceback.format_exc())
|
||||
LOG.error("Failed to scale resource update "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
|
||||
try:
|
||||
self._vnfm_plugin._update_vnf_scaling_status_err(context,
|
||||
vnf_info)
|
||||
except Exception as e:
|
||||
LOG.warning("Failed to revert scale info for event "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
LOG.error("Failed to revert scale info for event "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
try:
|
||||
vnf_instance.task_state = None
|
||||
self._vnf_instance_update(context, vnf_instance)
|
||||
except Exception as e:
|
||||
LOG.warning("Failed to revert instantiation info for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
LOG.error("Failed to revert instantiation info for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
problem = objects.ProblemDetails(status=500,
|
||||
detail=str(ex))
|
||||
|
||||
@ -114,9 +114,9 @@ def revert_to_error_scale(function):
|
||||
vnf_info['current_error_point']
|
||||
vnf_lcm_op_occ.save()
|
||||
except Exception as e:
|
||||
LOG.warning("Failed to update vnf_lcm_op_occ for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
LOG.error("Failed to update vnf_lcm_op_occ for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
|
||||
try:
|
||||
notification = vnf_info['notification']
|
||||
@ -138,9 +138,9 @@ def revert_to_error_scale(function):
|
||||
resource_dict.get('affected_virtual_storages'))
|
||||
self.rpc_api.send_notification(context, notification)
|
||||
except Exception as e:
|
||||
LOG.warning("Failed to revert scale info for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
LOG.error("Failed to revert scale info for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
|
||||
return decorated_function
|
||||
|
||||
@ -170,9 +170,9 @@ def revert_to_error_task_state(function):
|
||||
"id": vnf_instance.id,
|
||||
"error": fields.VnfInstanceTaskState.ERROR})
|
||||
except Exception as e:
|
||||
LOG.warning("Failed to revert task state for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
LOG.error("Failed to revert task state for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
|
||||
return decorated_function
|
||||
|
||||
@ -224,23 +224,23 @@ def revert_to_error_rollback(function):
|
||||
vnf_info,
|
||||
vnf_instance)
|
||||
except Exception as e:
|
||||
LOG.warning(traceback.format_exc())
|
||||
LOG.warning("Failed to scale resource update "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
LOG.error(traceback.format_exc())
|
||||
LOG.error("Failed to scale resource update "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
|
||||
try:
|
||||
self._update_vnf_rollback_status_err(context, vnf_info)
|
||||
except Exception as e:
|
||||
LOG.warning("Failed to revert scale info for event "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
LOG.error("Failed to revert scale info for event "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
try:
|
||||
self._vnf_instance_update(context, vnf_instance)
|
||||
except Exception as e:
|
||||
LOG.warning("Failed to revert instantiation info for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
LOG.error("Failed to revert instantiation info for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
problem = objects.ProblemDetails(status=500,
|
||||
detail=str(ex))
|
||||
|
||||
@ -254,9 +254,9 @@ def revert_to_error_rollback(function):
|
||||
vnf_lcm_op_occ.error = problem
|
||||
vnf_lcm_op_occ.save()
|
||||
except Exception as e:
|
||||
LOG.warning("Failed to update vnf_lcm_op_occ for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
LOG.error("Failed to update vnf_lcm_op_occ for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
|
||||
try:
|
||||
notification = vnf_info['notification']
|
||||
@ -281,9 +281,9 @@ def revert_to_error_rollback(function):
|
||||
'affected_virtual_storages'))
|
||||
self.rpc_api.send_notification(context, notification)
|
||||
except Exception as e:
|
||||
LOG.warning("Failed to revert scale info for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
LOG.error("Failed to revert scale info for vnf "
|
||||
"instance %(id)s. Error: %(error)s",
|
||||
{"id": vnf_instance.id, "error": e})
|
||||
return decorated_function
|
||||
|
||||
|
||||
|
@ -74,7 +74,7 @@ class Parser(object):
|
||||
a_file=False,
|
||||
yaml_dict_tpl=self.vnfd_dict)
|
||||
except Exception as e:
|
||||
LOG.debug("tosca-parser error: %s", str(e))
|
||||
LOG.error("tosca-parser error: %s", str(e))
|
||||
raise vnfm.ToscaParserFailed(error_msg_details=str(e))
|
||||
|
||||
# Initiate a list tosca_kube_object which are defined from VDU
|
||||
@ -207,7 +207,7 @@ class Parser(object):
|
||||
# Because in Kubernetes environment, we can attach only one
|
||||
# scaling policy to Deployment. If user provides more than one
|
||||
# policy this error will happen when count > 1
|
||||
LOG.debug("Tacker only support one scaling policy per VDU")
|
||||
LOG.error("Tacker only support one scaling policy per VDU")
|
||||
raise vnfm.InvalidKubernetesScalingPolicyNumber
|
||||
|
||||
return scaling_obj
|
||||
|
@ -188,8 +188,8 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
|
||||
self.STACK_RETRIES *
|
||||
self.STACK_RETRY_WAIT),
|
||||
stack=vnf_id)
|
||||
LOG.warning("VNF Creation failed: %(reason)s",
|
||||
{'reason': error_reason})
|
||||
LOG.error("VNF Creation failed: %(reason)s",
|
||||
{'reason': error_reason})
|
||||
raise vnfm.VNFCreateWaitFailed(reason=error_reason)
|
||||
elif stack_retries != 0 and status != 'Running':
|
||||
raise vnfm.VNFCreateWaitFailed(reason=error_reason)
|
||||
@ -2357,8 +2357,8 @@ class Kubernetes(abstract_driver.VnfAbstractDriver,
|
||||
|
||||
if status == 'Unknown':
|
||||
error_reason = _("Pod status is found Unknown")
|
||||
LOG.warning("CNF Healing failed: %(reason)s",
|
||||
{'reason': error_reason})
|
||||
LOG.error("CNF Healing failed: %(reason)s",
|
||||
{'reason': error_reason})
|
||||
raise vnfm.CNFHealWaitFailed(reason=error_reason)
|
||||
elif status == 'Pending' or is_unmatch_pods_num:
|
||||
time.sleep(self.STACK_RETRY_WAIT)
|
||||
|
@ -95,7 +95,7 @@ class TOSCAToKubernetes(object):
|
||||
if value['get_input'] in paramvalues:
|
||||
original[key] = paramvalues[value['get_input']]
|
||||
else:
|
||||
LOG.debug('Key missing Value: %s', key)
|
||||
LOG.error('Key missing Value: %s', key)
|
||||
raise cs.InputValuesMissing(key=key)
|
||||
else:
|
||||
self._update_params(value, paramvalues)
|
||||
@ -112,7 +112,7 @@ class TOSCAToKubernetes(object):
|
||||
if 'get_input' in str(node):
|
||||
self._update_params(node, param_vattrs_dict)
|
||||
except Exception as e:
|
||||
LOG.debug("Not Well Formed: %s", str(e))
|
||||
LOG.error("Not Well Formed: %s", str(e))
|
||||
raise vnfm.ParamYAMLNotWellFormed(
|
||||
error_msg_details=str(e))
|
||||
else:
|
||||
|
@ -55,7 +55,7 @@ class VnfNoop(abstract_driver.VnfAbstractDriver):
|
||||
@log.log
|
||||
def update(self, plugin, context, vnf_id, vnf_dict, vnf):
|
||||
if vnf_id not in self._instances:
|
||||
LOG.debug('not found')
|
||||
LOG.error('Not found')
|
||||
raise ValueError('No instance %s' % vnf_id)
|
||||
|
||||
@log.log
|
||||
|
@ -59,5 +59,5 @@ class GlanceClient(object):
|
||||
try:
|
||||
return self.connection.image.get_image(image_id)
|
||||
except exc.HTTPNotFound:
|
||||
LOG.warning("Image %(image)s created not found ",
|
||||
{'image': image_id})
|
||||
LOG.error("Image %(image)s created not found ",
|
||||
{'image': image_id})
|
||||
|
@ -660,7 +660,7 @@ class OpenStack(abstract_driver.VnfAbstractDriver,
|
||||
raise exception_class(reason=error_reason)
|
||||
elif stack_retries != 0 and status != wait_status:
|
||||
error_reason = stack.stack_status_reason
|
||||
LOG.warning(error_reason)
|
||||
LOG.error(error_reason)
|
||||
raise exception_class(reason=error_reason)
|
||||
|
||||
def _find_mgmt_ips(self, outputs):
|
||||
@ -701,7 +701,7 @@ class OpenStack(abstract_driver.VnfAbstractDriver,
|
||||
if not update_values:
|
||||
error_reason = _("at vnf_id {} because all parameters "
|
||||
"match the existing one.").format(vnf_id)
|
||||
LOG.warning(error_reason)
|
||||
LOG.error(error_reason)
|
||||
raise vnfm.VNFUpdateInvalidInput(reason=error_reason)
|
||||
|
||||
# update vnf_dict
|
||||
@ -719,7 +719,7 @@ class OpenStack(abstract_driver.VnfAbstractDriver,
|
||||
elif not update_param_yaml and not update_config_yaml:
|
||||
error_reason = _("at vnf_id {} because the target "
|
||||
"yaml is empty.").format(vnf_id)
|
||||
LOG.warning(error_reason)
|
||||
LOG.error(error_reason)
|
||||
raise vnfm.VNFUpdateInvalidInput(reason=error_reason)
|
||||
|
||||
# update config attribute
|
||||
@ -893,7 +893,7 @@ class OpenStack(abstract_driver.VnfAbstractDriver,
|
||||
"status %(status)s") % {
|
||||
'stack': policy['instance_id'],
|
||||
'status': rsc.resource_status}
|
||||
LOG.warning(error_reason)
|
||||
LOG.error(error_reason)
|
||||
raise vnfm.VNFScaleWaitFailed(
|
||||
vnf_id=policy['vnf']['id'],
|
||||
reason=error_reason)
|
||||
@ -916,7 +916,7 @@ class OpenStack(abstract_driver.VnfAbstractDriver,
|
||||
"error %(error)s") % {
|
||||
'stack': policy['instance_id'],
|
||||
'error': str(e)}
|
||||
LOG.warning(error_reason)
|
||||
LOG.error(error_reason)
|
||||
raise vnfm.VNFScaleWaitFailed(vnf_id=policy['vnf']['id'],
|
||||
reason=error_reason)
|
||||
|
||||
@ -925,7 +925,7 @@ class OpenStack(abstract_driver.VnfAbstractDriver,
|
||||
"VNF scaling failed to complete within %{wait}s seconds "
|
||||
"while waiting for the stack %(stack)s to be "
|
||||
"scaled.")
|
||||
LOG.warning(error_reason, {
|
||||
LOG.error(error_reason, {
|
||||
'stack': stack_id,
|
||||
'wait': (
|
||||
self.STACK_RETRIES * self.STACK_RETRY_WAIT)})
|
||||
@ -1121,11 +1121,11 @@ class OpenStack(abstract_driver.VnfAbstractDriver,
|
||||
"name": vnf_resource.resource_name,
|
||||
"id": vnf_instance.id})
|
||||
except Exception:
|
||||
LOG.info("Failed to delete resource '%(name)s' of type"
|
||||
" %(type)s' for vnf %(id)s",
|
||||
{"type": vnf_resource.resource_type,
|
||||
"name": vnf_resource.resource_name,
|
||||
"id": vnf_instance.id})
|
||||
LOG.debug("Failed to delete resource '%(name)s' of type"
|
||||
" %(type)s' for vnf %(id)s",
|
||||
{"type": vnf_resource.resource_type,
|
||||
"name": vnf_resource.resource_name,
|
||||
"id": vnf_instance.id})
|
||||
|
||||
def instantiate_vnf(self, context, vnf_instance, vnfd_dict,
|
||||
vim_connection_info, instantiate_vnf_req,
|
||||
|
@ -140,13 +140,13 @@ class TOSCAToHOT(object):
|
||||
elif key in paramvalues:
|
||||
self._update_params(value, paramvalues[key], False)
|
||||
else:
|
||||
LOG.debug('Key missing Value: %s', key)
|
||||
LOG.error('Key missing Value: %s', key)
|
||||
raise cs.InputValuesMissing(key=key)
|
||||
elif 'get_input' in value:
|
||||
if value['get_input'] in paramvalues:
|
||||
original[key] = paramvalues[value['get_input']]
|
||||
else:
|
||||
LOG.debug('Key missing Value: %s', key)
|
||||
LOG.error('Key missing Value: %s', key)
|
||||
raise cs.InputValuesMissing(key=key)
|
||||
else:
|
||||
self._update_params(value, paramvalues, True)
|
||||
@ -159,7 +159,7 @@ class TOSCAToHOT(object):
|
||||
param_vattrs_dict = yaml.safe_load(param_vattrs_yaml)
|
||||
LOG.debug('param_vattrs_yaml', param_vattrs_dict)
|
||||
except Exception as e:
|
||||
LOG.debug("Not Well Formed: %s", str(e))
|
||||
LOG.error("Not Well Formed: %s", str(e))
|
||||
raise vnfm.ParamYAMLNotWellFormed(
|
||||
error_msg_details=str(e))
|
||||
else:
|
||||
@ -263,7 +263,7 @@ class TOSCAToHOT(object):
|
||||
try:
|
||||
parsed_params = yaml.safe_load(dev_attrs['param_values'])
|
||||
except Exception as e:
|
||||
LOG.debug("Params not Well Formed: %s", str(e))
|
||||
LOG.error("Params not Well Formed: %s", str(e))
|
||||
raise vnfm.ParamYAMLNotWellFormed(error_msg_details=str(e))
|
||||
|
||||
appmonitoring_dict = \
|
||||
@ -284,7 +284,7 @@ class TOSCAToHOT(object):
|
||||
yaml_dict_tpl=vnfd_dict)
|
||||
|
||||
except Exception as e:
|
||||
LOG.debug("tosca-parser error: %s", str(e))
|
||||
LOG.error("tosca-parser error: %s", str(e))
|
||||
raise vnfm.ToscaParserFailed(error_msg_details=str(e))
|
||||
|
||||
unique_id = uuidutils.generate_uuid()
|
||||
@ -323,7 +323,7 @@ class TOSCAToHOT(object):
|
||||
nested_resource_yaml
|
||||
|
||||
except Exception as e:
|
||||
LOG.debug("heat-translator error: %s", str(e))
|
||||
LOG.error("heat-translator error: %s", str(e))
|
||||
raise vnfm.HeatTranslatorFailed(error_msg_details=str(e))
|
||||
|
||||
if self.nested_resources:
|
||||
@ -362,7 +362,7 @@ class TOSCAToHOT(object):
|
||||
unique_id=unique_id, inst_req_info=inst_req_info,
|
||||
grant_info=grant_info, tosca=tosca)
|
||||
except Exception as e:
|
||||
LOG.debug("post_process_heat_template_for_scaling "
|
||||
LOG.error("post_process_heat_template_for_scaling "
|
||||
"error: %s", str(e))
|
||||
raise
|
||||
|
||||
|
@ -107,7 +107,7 @@ def create_initial_param_dict(base_hot_dict):
|
||||
if param_list[2] not in resource_info:
|
||||
resource_info[param_list[2]] = {}
|
||||
|
||||
LOG.info('initial_param_dict: %s', initial_param_dict)
|
||||
LOG.debug('initial_param_dict: %s', initial_param_dict)
|
||||
return initial_param_dict
|
||||
|
||||
|
||||
@ -178,7 +178,7 @@ def create_initial_param_server_port_dict(base_hot_dict):
|
||||
if param_list[2] not in resource_info:
|
||||
resource_info[param_list[2]] = {}
|
||||
|
||||
LOG.info('initial_param_dict: %s', initial_param_dict)
|
||||
LOG.debug('initial_param_dict: %s', initial_param_dict)
|
||||
return initial_param_dict
|
||||
|
||||
|
||||
@ -210,7 +210,7 @@ def create_final_param_dict(param_dict, vdu_flavor_dict,
|
||||
for fixed_ip in cpd_vl_dict.get(target_cp).get("fixed_ips"):
|
||||
cps[target_cp]['fixed_ips'].append(fixed_ip)
|
||||
|
||||
LOG.info('final_param_dict: %s', final_param_dict)
|
||||
LOG.debug('final_param_dict: %s', final_param_dict)
|
||||
return final_param_dict
|
||||
|
||||
|
||||
@ -244,7 +244,7 @@ def create_vdu_flavor_dict(vnfd_dict):
|
||||
val[0]['size_of_storage'], 'GiB')
|
||||
vdu_flavor_dict[vdu_name] = flavor_dict
|
||||
|
||||
LOG.info('vdu_flavor_dict: %s', vdu_flavor_dict)
|
||||
LOG.debug('vdu_flavor_dict: %s', vdu_flavor_dict)
|
||||
return vdu_flavor_dict
|
||||
|
||||
|
||||
@ -259,7 +259,7 @@ def create_vdu_image_dict(grant_info):
|
||||
for vnf_resource in resources:
|
||||
vdu_image_dict[vdu_name] = vnf_resource.resource_identifier
|
||||
|
||||
LOG.info('vdu_image_dict: %s', vdu_image_dict)
|
||||
LOG.debug('vdu_image_dict: %s', vdu_image_dict)
|
||||
return vdu_image_dict
|
||||
|
||||
|
||||
@ -285,7 +285,7 @@ def create_cpd_vl_dict(base_hot_dict, inst_req_info):
|
||||
cpd_vl_dict[ext_cp.cpd_id] = vl_uuid
|
||||
break
|
||||
|
||||
LOG.info('cpd_vl_dict: %s', cpd_vl_dict)
|
||||
LOG.debug('cpd_vl_dict: %s', cpd_vl_dict)
|
||||
return cpd_vl_dict
|
||||
|
||||
|
||||
@ -337,7 +337,7 @@ def create_vdu_flavor_capability_name_dict(vnfd_dict):
|
||||
"requested_additional"
|
||||
"_capability_name"]
|
||||
|
||||
LOG.info('vdu_flavor_dict: %s', vdu_flavor_dict)
|
||||
LOG.debug('vdu_flavor_dict: %s', vdu_flavor_dict)
|
||||
return vdu_flavor_dict
|
||||
|
||||
|
||||
@ -359,7 +359,7 @@ def create_sw_image_dict(vnfd_dict):
|
||||
if 'name' in sw_image_data_props.keys():
|
||||
sw_image_data[vdu_name] = sw_image_data_props['name']
|
||||
|
||||
LOG.info('sw_image_data: %s', sw_image_data)
|
||||
LOG.debug('sw_image_data: %s', sw_image_data)
|
||||
return sw_image_data
|
||||
|
||||
|
||||
@ -382,7 +382,7 @@ def create_network_dict(inst_req_info, param_dict):
|
||||
cp_data[ext_cp.cpd_id]["fixed_ips"] =\
|
||||
_create_fixed_ips_list(ext_cp)
|
||||
|
||||
LOG.info('cp_data: %s', cp_data)
|
||||
LOG.debug('cp_data: %s', cp_data)
|
||||
return cp_data
|
||||
|
||||
|
||||
@ -455,7 +455,7 @@ def create_desired_capacity_dict(base_hot_dict, vnfd_dict, inst_req_info):
|
||||
for name, value in scale_group_dict['scaleGroupDict'].items():
|
||||
param_dict[name] = value['default']
|
||||
|
||||
LOG.info("desired_capacity dict: %s", param_dict)
|
||||
LOG.debug("desired_capacity dict: %s", param_dict)
|
||||
return param_dict
|
||||
|
||||
|
||||
@ -493,5 +493,5 @@ def get_desired_capacity_dict(base_hot_dict, vnfd_dict, inst_vnf_info):
|
||||
if desired_capacity is not None:
|
||||
param_dict[name] = desired_capacity
|
||||
|
||||
LOG.info("desired_capacity dict: %s", param_dict)
|
||||
LOG.debug("desired_capacity dict: %s", param_dict)
|
||||
return param_dict
|
||||
|
@ -75,7 +75,7 @@ class VNFMonitorHTTPPing(abstract_driver.VNFMonitorAbstractDriver):
|
||||
urlreq.urlopen(url, timeout=timeout)
|
||||
return True
|
||||
except urlerr.URLError:
|
||||
LOG.warning('Unable to reach to the url %s', url)
|
||||
LOG.error('Unable to reach to the url %s', url)
|
||||
return 'failure'
|
||||
|
||||
@log.log
|
||||
|
@ -89,7 +89,7 @@ class VNFMonitorPing(abstract_driver.VNFMonitorAbstractDriver):
|
||||
linux_utils.execute(ping_cmd, check_exit_code=True)
|
||||
return True
|
||||
except RuntimeError:
|
||||
LOG.warning("Cannot ping ip address: %s", mgmt_ip)
|
||||
LOG.error("Cannot ping ip address: %s", mgmt_ip)
|
||||
return 'failure'
|
||||
|
||||
@log.log
|
||||
|
@ -63,13 +63,13 @@ class _Connect:
|
||||
LOG.info("Connecting to <{ip}:{port}>, count=<{count}>".format(
|
||||
ip=args[0], port=args[1], count=retry_cnt))
|
||||
if 'headers' in kwargs:
|
||||
LOG.info("[REQ] HEADERS={}".format(kwargs['headers']))
|
||||
LOG.debug("[REQ] HEADERS={}".format(kwargs['headers']))
|
||||
|
||||
if 'data' in kwargs:
|
||||
LOG.info("[REQ] BODY={}".format(kwargs['data']))
|
||||
LOG.debug("[REQ] BODY={}".format(kwargs['data']))
|
||||
|
||||
elif 'json' in kwargs:
|
||||
LOG.info("[REQ] BODY={}".format(kwargs['json']))
|
||||
LOG.debug("[REQ] BODY={}".format(kwargs['json']))
|
||||
|
||||
try:
|
||||
response = request_function(*args, **kwargs)
|
||||
|
@ -184,7 +184,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
|
||||
|
||||
service_types = vnfd_data.get('service_types')
|
||||
if not attributes.is_attr_set(service_types):
|
||||
LOG.debug('service type must be specified')
|
||||
LOG.error('service type must be specified')
|
||||
raise vnfm.ServiceTypesNotSpecified()
|
||||
for service_type in service_types:
|
||||
# TODO(yamahata):
|
||||
@ -392,7 +392,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
|
||||
driver_name, 'create', plugin=self,
|
||||
context=context, vnf=vnf_dict, auth_attr=vim_auth)
|
||||
except Exception:
|
||||
LOG.debug('Fail to create vnf %s in infra_driver, '
|
||||
LOG.error('Fail to create vnf %s in infra_driver, '
|
||||
'so delete this vnf',
|
||||
vnf_dict['id'])
|
||||
with excutils.save_and_reraise_exception():
|
||||
@ -417,7 +417,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
|
||||
|
||||
infra_driver, vim_auth = self._get_infra_driver(context, vnf_info)
|
||||
if infra_driver not in self._vnf_manager:
|
||||
LOG.debug('unknown vim driver '
|
||||
LOG.error('unknown vim driver '
|
||||
'%(infra_driver)s in %(drivers)s',
|
||||
{'infra_driver': infra_driver,
|
||||
'drivers': cfg.CONF.tacker.infra_driver})
|
||||
@ -723,7 +723,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
|
||||
if vnf and vnf['vnf'].get('attributes').get('force'):
|
||||
force_delete = vnf['vnf'].get('attributes').get('force')
|
||||
if force_delete and not context.is_admin:
|
||||
LOG.warning("force delete is admin only operation")
|
||||
LOG.error("force delete is admin only operation")
|
||||
raise exceptions.AdminRequired(reason="Admin only operation")
|
||||
|
||||
self._delete_vnf(context, vnf_id, force_delete=force_delete)
|
||||
@ -748,7 +748,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
|
||||
policy=policy['name']
|
||||
)
|
||||
|
||||
LOG.debug("Policy %s is validated successfully", policy['name'])
|
||||
LOG.info("Policy %s is validated successfully", policy['name'])
|
||||
|
||||
def _get_status():
|
||||
if policy['action'] == constants.ACTION_SCALE_IN:
|
||||
|
@ -47,7 +47,7 @@ class VimClient(object):
|
||||
try:
|
||||
vim_info = nfvo_plugin.get_default_vim(context)
|
||||
except Exception as ex:
|
||||
LOG.debug('Fail to get default vim due to %s', ex)
|
||||
LOG.error('Fail to get default vim due to %s', ex)
|
||||
raise nfvo.VimDefaultNotDefined()
|
||||
else:
|
||||
try:
|
||||
@ -131,7 +131,7 @@ class VimClient(object):
|
||||
|
||||
f = fernet.Fernet(vim_key)
|
||||
if not f:
|
||||
LOG.warning('Unable to decode VIM auth')
|
||||
LOG.error('Unable to decode VIM auth')
|
||||
raise nfvo.VimNotFoundException(vim_id=vim_id)
|
||||
return f.decrypt(cred).decode('utf-8')
|
||||
|
||||
@ -143,5 +143,5 @@ class VimClient(object):
|
||||
with open(key_file, 'r') as f:
|
||||
return f.read()
|
||||
except Exception:
|
||||
LOG.warning('VIM id invalid or key not found for %s', vim_id)
|
||||
LOG.error('VIM id invalid or key not found for %s', vim_id)
|
||||
raise nfvo.VimKeyNotFoundException(vim_id=vim_id)
|
||||
|
@ -569,7 +569,7 @@ class RequestDeserializer(object):
|
||||
deserializer = self.get_body_deserializer(content_type)
|
||||
except exception.InvalidContentType:
|
||||
with excutils.save_and_reraise_exception():
|
||||
LOG.debug("Unable to deserialize body as provided "
|
||||
LOG.error("Unable to deserialize body as provided "
|
||||
"Content-Type")
|
||||
|
||||
if isinstance(deserializer, ZipDeserializer):
|
||||
@ -1014,7 +1014,7 @@ class Resource(Application):
|
||||
msg_dict = dict(url=request.url, exception=e)
|
||||
msg = _("%(url)s returned a fault: %(exception)s") % msg_dict
|
||||
|
||||
LOG.info(msg)
|
||||
LOG.debug(msg)
|
||||
|
||||
return response
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user