Remove log translations

Starting with the Pike series, OpenStack no longer supports log
translation.

See:
http://lists.openstack.org/pipermail/openstack-i18n/2016-November/002574.html
http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html

Change-Id: I4440a1d6c332e48845fceadb464dd34ab11e12d2
This commit is contained in:
Ngo Quoc Cuong 2017-07-04 23:52:35 -04:00
parent 17e1a85e2b
commit d0872fec2d
36 changed files with 249 additions and 258 deletions

View File

@ -43,7 +43,7 @@ def create_process(cmd, root_helper=None, addl_env=None,
cmd = map(str, cmd)
if debuglog:
LOG.debug(_("Running command: %s"), cmd)
LOG.debug("Running command: %s", cmd)
env = os.environ.copy()
if addl_env:
env.update(addl_env)

View File

@ -19,6 +19,7 @@ from tacker.vnfm.monitor_drivers.token import Token
from tacker import wsgi
# check alarm url with db --> move to plugin
LOG = logging.getLogger(__name__)
OPTS = [
@ -41,7 +42,7 @@ def config_opts():
class AlarmReceiver(wsgi.Middleware):
def process_request(self, req):
LOG.debug(_('Process request: %s'), req)
LOG.debug('Process request: %s', req)
if req.method != 'POST':
return
url = req.url

View File

@ -102,8 +102,8 @@ def _get_pagination_max_limit():
if max_limit == 0:
raise ValueError()
except ValueError:
LOG.warning(_("Invalid value for pagination_max_limit: %s. It "
"should be an integer greater to 0"),
LOG.warning("Invalid value for pagination_max_limit: %s. It "
"should be an integer greater to 0",
cfg.CONF.pagination_max_limit)
return max_limit

View File

@ -277,8 +277,7 @@ class ExtensionMiddleware(wsgi.Middleware):
(resource.parent["collection_name"],
resource.parent["member_name"]))
LOG.debug(_('Extended resource: %s'),
resource.collection)
LOG.debug('Extended resource: %s', resource.collection)
for action, method in (resource.collection_actions).items():
conditions = dict(method=[method])
path = "/%s/%s" % (resource.collection, action)
@ -299,7 +298,7 @@ class ExtensionMiddleware(wsgi.Middleware):
action_controllers = self._action_ext_controllers(application,
self.ext_mgr, mapper)
for action in self.ext_mgr.get_actions():
LOG.debug(_('Extended action: %s'), action.action_name)
LOG.debug('Extended action: %s', action.action_name)
controller = action_controllers[action.collection]
controller.add_action(action.action_name, action.handler)
@ -307,7 +306,7 @@ class ExtensionMiddleware(wsgi.Middleware):
req_controllers = self._request_ext_controllers(application,
self.ext_mgr, mapper)
for request_ext in self.ext_mgr.get_request_extensions():
LOG.debug(_('Extended request: %s'), request_ext.key)
LOG.debug('Extended request: %s', request_ext.key)
controller = req_controllers[request_ext.key]
controller.add_handler(request_ext.handler)
@ -405,7 +404,7 @@ class ExtensionManager(object):
return cls._instance
def __init__(self, path):
LOG.info(_('Initializing extension manager.'))
LOG.info('Initializing extension manager.')
self.path = path
self.extensions = {}
self._load_all_extensions()
@ -485,8 +484,8 @@ class ExtensionManager(object):
else:
attr_map[resource] = resource_attrs
except AttributeError:
LOG.exception(_("Error fetching extended attributes for "
"extension '%s'"), ext.get_name())
LOG.exception("Error fetching extended attributes for "
"extension '%s'", ext.get_name())
processed_exts.add(ext_name)
del exts_to_process[ext_name]
if len(processed_exts) == processed_ext_count:
@ -494,8 +493,8 @@ class ExtensionManager(object):
break
if exts_to_process:
# NOTE(salv-orlando): Consider whether this error should be fatal
LOG.error(_("It was impossible to process the following "
"extensions: %s because of missing requirements."),
LOG.error("It was impossible to process the following "
"extensions: %s because of missing requirements.",
','.join(exts_to_process.keys()))
# Extending extensions' attributes map.
@ -505,13 +504,13 @@ class ExtensionManager(object):
def _check_extension(self, extension):
"""Checks for required methods in extension objects."""
try:
LOG.debug(_('Ext name: %s'), extension.get_name())
LOG.debug(_('Ext alias: %s'), extension.get_alias())
LOG.debug(_('Ext description: %s'), extension.get_description())
LOG.debug(_('Ext namespace: %s'), extension.get_namespace())
LOG.debug(_('Ext updated: %s'), extension.get_updated())
LOG.debug('Ext name: %s', extension.get_name())
LOG.debug('Ext alias: %s', extension.get_alias())
LOG.debug('Ext description: %s', extension.get_description())
LOG.debug('Ext namespace: %s', extension.get_namespace())
LOG.debug('Ext updated: %s', extension.get_updated())
except AttributeError as ex:
LOG.exception(_("Exception loading extension: %s"), ex)
LOG.exception("Exception loading extension: %s", ex)
return False
return True
@ -529,7 +528,7 @@ class ExtensionManager(object):
if os.path.exists(path):
self._load_all_extensions_from_path(path)
else:
LOG.error(_("Extension path '%s' doesn't exist!"), path)
LOG.error("Extension path '%s' doesn't exist!", path)
def _load_all_extensions_from_path(self, path):
# Sorting the extension list makes the order in which they
@ -537,7 +536,7 @@ class ExtensionManager(object):
# Tacker Servers
for f in sorted(os.listdir(path)):
try:
LOG.debug(_('Loading extension file: %s'), f)
LOG.debug('Loading extension file: %s', f)
mod_name, file_ext = os.path.splitext(os.path.split(f)[-1])
ext_path = os.path.join(path, f)
if file_ext.lower() == '.py' and not mod_name.startswith('_'):
@ -545,16 +544,16 @@ class ExtensionManager(object):
ext_name = mod_name[0].upper() + mod_name[1:]
new_ext_class = getattr(mod, ext_name, None)
if not new_ext_class:
LOG.warning(_('Did not find expected name '
'"%(ext_name)s" in %(file)s'),
LOG.warning('Did not find expected name '
'"%(ext_name)s" in %(file)s',
{'ext_name': ext_name,
'file': ext_path})
continue
new_ext = new_ext_class()
self.add_extension(new_ext)
except Exception as exception:
LOG.warning(_("Extension file %(f)s wasn't loaded due to "
"%(exception)s"),
LOG.warning("Extension file %(f)s wasn't loaded due to "
"%(exception)s",
{'f': f, 'exception': exception})
def add_extension(self, ext):
@ -563,7 +562,7 @@ class ExtensionManager(object):
return
alias = ext.get_alias()
LOG.info(_('Loaded extension: %s'), alias)
LOG.info('Loaded extension: %s', alias)
if alias in self.extensions:
raise exceptions.DuplicatedExtension(alias=alias)

View File

@ -73,8 +73,8 @@ class Controller(object):
_("Native pagination depend on native sorting")
)
if not self._allow_sorting:
LOG.info(_("Allow sorting is enabled because native "
"pagination requires native sorting"))
LOG.info("Allow sorting is enabled because native "
"pagination requires native sorting")
self._allow_sorting = True
if parent:
@ -331,8 +331,7 @@ class Controller(object):
obj_deleter(request.context, obj['id'], **kwargs)
except Exception:
# broad catch as our only purpose is to log the exception
LOG.exception(_("Unable to undo add for "
"%(resource)s %(id)s"),
LOG.exception("Unable to undo add for %(resource)s %(id)s",
{'resource': self._resource,
'id': obj['id']})
# TODO(salvatore-orlando): The object being processed when the
@ -508,8 +507,8 @@ class Controller(object):
if not body:
raise webob.exc.HTTPBadRequest(_("Resource body required"))
LOG.debug(_("Request body: %(body)s"), {'body':
strutils.mask_password(body)})
LOG.debug("Request body: %(body)s",
{'body': strutils.mask_password(body)})
prep_req_body = lambda x: Controller.prepare_request_body(
context,
x if resource in x else {resource: x},

View File

@ -79,16 +79,12 @@ def Resource(controller, faults=None, deserializers=None, serializers=None):
mapped_exc = api_common.convert_exception_to_http_exc(e, faults,
language)
if hasattr(mapped_exc, 'code') and 400 <= mapped_exc.code < 500:
LOG.info(_('%(action)s failed (client error): %(exc)s'),
LOG.info('%(action)s failed (client error): %(exc)s',
{'action': action, 'exc': mapped_exc})
else:
LOG.exception(
_('%(action)s failed: %(details)s'),
{
'action': action,
'details': extract_exc_details(e),
}
)
LOG.exception('%(action)s failed: %(details)s',
{'action': action,
'details': extract_exc_details(e)})
raise mapped_exc
status = action_status.get(action, 200)

View File

@ -32,7 +32,7 @@ class TackerKeystoneContext(wsgi.Middleware):
# Determine the user ID
user_id = req.headers.get('X_USER_ID')
if not user_id:
LOG.debug(_("X_USER_ID is not found in request"))
LOG.debug("X_USER_ID is not found in request")
return webob.exc.HTTPUnauthorized()
# Determine the tenant

View File

@ -64,27 +64,27 @@ class RemoteCommandExecutor(object):
self.__ssh.set_missing_host_key_policy(paramiko.WarningPolicy())
self.__ssh.connect(self.__host, username=self.__user,
password=self.__password, timeout=self.__timeout)
LOG.info(_("Connected to %s") % self.__host)
LOG.info("Connected to %s", self.__host)
except paramiko.AuthenticationException:
LOG.error(_("Authentication failed when connecting to %s")
% self.__host)
LOG.error("Authentication failed when connecting to %s",
self.__host)
raise exceptions.NotAuthorized
except paramiko.SSHException:
LOG.error(_("Could not connect to %s. Giving up") % self.__host)
LOG.error("Could not connect to %s. Giving up", self.__host)
raise
def close_session(self):
self.__ssh.close()
LOG.debug(_("Connection close"))
LOG.debug("Connection close")
def execute_command(self, cmd, input_data=None):
try:
stdin, stdout, stderr = self.__ssh.exec_command(cmd)
if input_data:
stdin.write(input_data)
LOG.debug(_("Input data written successfully"))
LOG.debug("Input data written successfully")
stdin.flush()
LOG.debug(_("Input data flushed"))
LOG.debug("Input data flushed")
stdin.channel.shutdown_write()
# NOTE (dkushwaha): There might be a case, when server can take
@ -96,11 +96,10 @@ class RemoteCommandExecutor(object):
cmd_err = stderr.readlines()
return_code = stdout.channel.recv_exit_status()
except paramiko.SSHException:
LOG.error(_("Command execution failed at %s. Giving up")
% self.__host)
LOG.error("Command execution failed at %s. Giving up", self.__host)
raise
result = CommandResult(cmd, cmd_out, cmd_err, return_code)
LOG.debug(_("Remote command execution result: %s"), result)
LOG.debug("Remote command execution result: %s", result)
return result
def __del__(self):

View File

@ -111,7 +111,7 @@ def setup_logging(conf):
"""
product_name = "tacker"
logging.setup(conf, product_name)
LOG.info(_("Logging enabled!"))
LOG.info("Logging enabled!")
def load_paste_app(app_name):
@ -127,7 +127,7 @@ def load_paste_app(app_name):
raise cfg.ConfigFilesNotFoundError(
config_files=[cfg.CONF.api_paste_config])
config_path = os.path.abspath(config_path)
LOG.info(_("Config paste file: %s"), config_path)
LOG.info("Config paste file: %s", config_path)
try:
app = deploy.loadapp("config:%s" % config_path, name=app_name)

View File

@ -44,7 +44,7 @@ class DriverManager(object):
self._drivers = dict((type_, ext.obj)
for (type_, ext) in drivers.items())
LOG.info(_("Registered drivers from %(namespace)s: %(keys)s"),
LOG.info("Registered drivers from %(namespace)s: %(keys)s",
{'namespace': namespace, 'keys': self._drivers.keys()})
@staticmethod

View File

@ -30,7 +30,7 @@ def log(method):
"method_name": method.__name__,
"args": strutils.mask_password(args[1:]),
"kwargs": strutils.mask_password(kwargs)}
LOG.debug(_('%(class_name)s method %(method_name)s'
' called with arguments %(args)s %(kwargs)s'), data)
LOG.debug('%(class_name)s method %(method_name)s'
' called with arguments %(args)s %(kwargs)s', data)
return method(*args, **kwargs)
return wrapper

View File

@ -65,7 +65,7 @@ class CommonServicesPluginDb(common_services.CommonServicesPluginBase,
timestamp=tstamp)
context.session.add(event_db)
except Exception as e:
LOG.exception(_("create event error: %s"), str(e))
LOG.exception("create event error: %s", str(e))
raise common_services.EventCreationFailureException(
error_str=str(e))
return self._make_event_dict(event_db)

View File

@ -212,5 +212,5 @@ class CommonDbMixin(object):
query = self._model_query(context, model)
return query.filter(model.name == name).one()
except orm_exc.NoResultFound:
LOG.info(_("No result found for %(name)s in %(model)s table"),
LOG.info("No result found for %(name)s in %(model)s table",
{'name': name, 'model': model})

View File

@ -155,7 +155,7 @@ class NSPluginDb(network_service.NSPluginBase, db_base.CommonDbMixin):
return dict((arg.key, arg.value) for arg in dev_attrs_db)
def _make_ns_dict(self, ns_db, fields=None):
LOG.debug(_('ns_db %s'), ns_db)
LOG.debug('ns_db %s', ns_db)
res = {}
key_list = ('id', 'tenant_id', 'nsd_id', 'name', 'description',
'vnf_ids', 'status', 'mgmt_urls', 'error_reason',
@ -166,7 +166,7 @@ class NSPluginDb(network_service.NSPluginBase, db_base.CommonDbMixin):
def create_nsd(self, context, nsd):
vnfds = nsd['vnfds']
nsd = nsd['nsd']
LOG.debug(_('nsd %s'), nsd)
LOG.debug('nsd %s', nsd)
tenant_id = self._get_tenant_id_for_create(context, nsd)
try:
@ -191,11 +191,11 @@ class NSPluginDb(network_service.NSPluginBase, db_base.CommonDbMixin):
raise exceptions.DuplicateEntity(
_type="nsd",
entry=e.columns)
LOG.debug(_('nsd_db %(nsd_db)s %(attributes)s '),
LOG.debug('nsd_db %(nsd_db)s %(attributes)s ',
{'nsd_db': nsd_db,
'attributes': nsd_db.attributes})
nsd_dict = self._make_nsd_dict(nsd_db)
LOG.debug(_('nsd_dict %s'), nsd_dict)
LOG.debug('nsd_dict %s', nsd_dict)
self._cos_db_plg.create_event(
context, res_id=nsd_dict['id'],
res_type=constants.RES_TYPE_NSD,
@ -240,7 +240,7 @@ class NSPluginDb(network_service.NSPluginBase, db_base.CommonDbMixin):
# reference implementation. needs to be overrided by subclass
def create_ns(self, context, ns):
LOG.debug(_('ns %s'), ns)
LOG.debug('ns %s', ns)
ns = ns['ns']
tenant_id = self._get_tenant_id_for_create(context, ns)
nsd_id = ns['nsd_id']
@ -279,7 +279,7 @@ class NSPluginDb(network_service.NSPluginBase, db_base.CommonDbMixin):
def create_ns_post(self, context, ns_id, mistral_obj,
vnfd_dict, error_reason):
LOG.debug(_('ns ID %s'), ns_id)
LOG.debug('ns ID %s', ns_id)
output = ast.literal_eval(mistral_obj.output)
mgmt_urls = dict()
vnf_ids = dict()

View File

@ -21,7 +21,6 @@ from oslo_log import log as logging
from six import iteritems
from sqlalchemy import orm
from sqlalchemy.orm import exc as orm_exc
from tacker._i18n import _
from tacker.db import db_base
from tacker.db import model_base
from tacker.db import models_v1
@ -219,7 +218,7 @@ class VnffgPluginDbMixin(vnffg.VNFFGPluginBase, db_base.CommonDbMixin):
def create_vnffgd(self, context, vnffgd):
template = vnffgd['vnffgd']
LOG.debug(_('template %s'), template)
LOG.debug('template %s', template)
tenant_id = self._get_tenant_id_for_create(context, template)
with context.session.begin(subtransactions=True):
@ -232,7 +231,7 @@ class VnffgPluginDbMixin(vnffg.VNFFGPluginBase, db_base.CommonDbMixin):
template=template.get('template'))
context.session.add(template_db)
LOG.debug(_('template_db %(template_db)s'),
LOG.debug('template_db %(template_db)s',
{'template_db': template_db})
return self._make_template_dict(template_db)
@ -322,7 +321,7 @@ class VnffgPluginDbMixin(vnffg.VNFFGPluginBase, db_base.CommonDbMixin):
# called internally, not by REST API
def _create_vnffg_pre(self, context, vnffg):
vnffg = vnffg['vnffg']
LOG.debug(_('vnffg %s'), vnffg)
LOG.debug('vnffg %s', vnffg)
tenant_id = self._get_tenant_id_for_create(context, vnffg)
name = vnffg.get('name')
vnffg_id = vnffg.get('id') or str(uuid.uuid4())
@ -332,7 +331,7 @@ class VnffgPluginDbMixin(vnffg.VNFFGPluginBase, db_base.CommonDbMixin):
with context.session.begin(subtransactions=True):
template_db = self._get_resource(context, VnffgTemplate,
template_id)
LOG.debug(_('vnffg template %s'), template_db)
LOG.debug('vnffg template %s', template_db)
if vnffg.get('attributes') and \
vnffg['attributes'].get('param_values'):
@ -346,13 +345,13 @@ class VnffgPluginDbMixin(vnffg.VNFFGPluginBase, db_base.CommonDbMixin):
vnf_members = self._get_vnffg_property(template_db.template,
'constituent_vnfs')
LOG.debug(_('Constituent VNFs: %s'), vnf_members)
LOG.debug('Constituent VNFs: %s', vnf_members)
vnf_mapping = self._get_vnf_mapping(context, vnffg.get(
'vnf_mapping'), vnf_members)
LOG.debug(_('VNF Mapping: %s'), vnf_mapping)
LOG.debug('VNF Mapping: %s', vnf_mapping)
# create NFP dict
nfp_dict = self._create_nfp_pre(template_db)
LOG.debug(_('NFP: %s'), nfp_dict)
LOG.debug('NFP: %s', nfp_dict)
vnffg_db = Vnffg(id=vnffg_id,
tenant_id=tenant_id,
name=name,
@ -377,7 +376,7 @@ class VnffgPluginDbMixin(vnffg.VNFFGPluginBase, db_base.CommonDbMixin):
chain = self._create_port_chain(context, vnf_mapping, template_db,
nfp_dict['name'])
LOG.debug(_('chain: %s'), chain)
LOG.debug('chain: %s', chain)
sfc_db = VnffgChain(id=sfc_id,
tenant_id=tenant_id,
status=constants.PENDING_CREATE,
@ -398,7 +397,7 @@ class VnffgPluginDbMixin(vnffg.VNFFGPluginBase, db_base.CommonDbMixin):
match = self._policy_to_acl_criteria(context, template_db,
nfp_dict['name'],
vnf_mapping)
LOG.debug(_('acl_match %s'), match)
LOG.debug('acl_match %s', match)
match_db_table = ACLMatchCriteria(
id=str(uuid.uuid4()),
@ -502,7 +501,7 @@ class VnffgPluginDbMixin(vnffg.VNFFGPluginBase, db_base.CommonDbMixin):
attr_val = VnffgPluginDbMixin._search_value(
template['node_templates'][nfp], attribute)
if attr_val is None:
LOG.debug(_('NFP %(nfp)s, attr %(attr)s'),
LOG.debug('NFP %(nfp)s, attr %(attr)s',
{'nfp': template['node_templates'][nfp],
'attr': attribute})
raise nfvo.NfpAttributeNotFoundException(attribute=attribute)
@ -546,14 +545,14 @@ class VnffgPluginDbMixin(vnffg.VNFFGPluginBase, db_base.CommonDbMixin):
# that match VNFD
if vnf_mapping is None or vnfd not in vnf_mapping.keys():
# find suitable VNFs from vnfd_id
LOG.debug(_('Searching VNFS with id %s'), vnfd_id)
LOG.debug('Searching VNFS with id %s', vnfd_id)
vnf_list = vnfm_plugin.get_vnfs(context,
{'vnfd_id': [vnfd_id]},
fields=['id'])
if len(vnf_list) == 0:
raise nfvo.VnffgInvalidMappingException(vnfd_name=vnfd)
else:
LOG.debug(_('Matching VNFs found %s'), vnf_list)
LOG.debug('Matching VNFs found %s', vnf_list)
vnf_list = [vnf['id'] for vnf in vnf_list]
if len(vnf_list) > 1:
new_mapping[vnfd] = random.choice(vnf_list)
@ -581,7 +580,7 @@ class VnffgPluginDbMixin(vnffg.VNFFGPluginBase, db_base.CommonDbMixin):
:param vnfs: List of VNF instance IDs
:return: None
"""
LOG.debug(_('validating vim for vnfs %s'), vnfs)
LOG.debug('validating vim for vnfs %s', vnfs)
vnfm_plugin = manager.TackerManager.get_service_plugins()['VNFM']
vim_id = None
for vnf in vnfs:
@ -670,9 +669,8 @@ class VnffgPluginDbMixin(vnffg.VNFFGPluginBase, db_base.CommonDbMixin):
# instance_id = None means error on creation
def _create_vnffg_post(self, context, sfc_instance_id,
fc_instance_id, vnffg_dict):
LOG.debug(_('SFC created instance is %s'), sfc_instance_id)
LOG.debug(_('Flow Classifier created instance is %s'),
fc_instance_id)
LOG.debug('SFC created instance is %s', sfc_instance_id)
LOG.debug('Flow Classifier created instance is %s', fc_instance_id)
nfp_dict = self.get_nfp(context, vnffg_dict['forwarding_paths'])
sfc_id = nfp_dict['chain_id']
classifier_id = nfp_dict['classifier_id']
@ -723,8 +721,8 @@ class VnffgPluginDbMixin(vnffg.VNFFGPluginBase, db_base.CommonDbMixin):
nfp_query.update({'status': status})
def _make_vnffg_dict(self, vnffg_db, fields=None):
LOG.debug(_('vnffg_db %s'), vnffg_db)
LOG.debug(_('vnffg_db nfp %s'), vnffg_db.forwarding_paths)
LOG.debug('vnffg_db %s', vnffg_db)
LOG.debug('vnffg_db nfp %s', vnffg_db.forwarding_paths)
res = {
'forwarding_paths': vnffg_db.forwarding_paths[0]['id']
}
@ -917,8 +915,8 @@ class VnffgPluginDbMixin(vnffg.VNFFGPluginBase, db_base.CommonDbMixin):
if entry[key]}
def _make_classifier_dict(self, classifier_db, fields=None):
LOG.debug(_('classifier_db %s'), classifier_db)
LOG.debug(_('classifier_db match %s'), classifier_db.match)
LOG.debug('classifier_db %s', classifier_db)
LOG.debug('classifier_db match %s', classifier_db.match)
res = {
'match': self._make_acl_match_dict(classifier_db.match)
}
@ -928,7 +926,7 @@ class VnffgPluginDbMixin(vnffg.VNFFGPluginBase, db_base.CommonDbMixin):
return self._fields(res, fields)
def _make_nfp_dict(self, nfp_db, fields=None):
LOG.debug(_('nfp_db %s'), nfp_db)
LOG.debug('nfp_db %s', nfp_db)
res = {'chain_id': nfp_db.chain['id'],
'classifier_id': nfp_db.classifier['id']}
key_list = ('name', 'id', 'tenant_id', 'symmetrical', 'status',
@ -937,7 +935,7 @@ class VnffgPluginDbMixin(vnffg.VNFFGPluginBase, db_base.CommonDbMixin):
return self._fields(res, fields)
def _make_chain_dict(self, chain_db, fields=None):
LOG.debug(_('chain_db %s'), chain_db)
LOG.debug('chain_db %s', chain_db)
res = {}
key_list = ('id', 'tenant_id', 'symmetrical', 'status', 'chain',
'path_id', 'nfp_id', 'instance_id')

View File

@ -216,8 +216,8 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
return dict((arg.key, arg.value) for arg in dev_attrs_db)
def _make_vnf_dict(self, vnf_db, fields=None):
LOG.debug(_('vnf_db %s'), vnf_db)
LOG.debug(_('vnf_db attributes %s'), vnf_db.attributes)
LOG.debug('vnf_db %s', vnf_db)
LOG.debug('vnf_db attributes %s', vnf_db.attributes)
res = {
'vnfd':
self._make_vnfd_dict(vnf_db.vnfd),
@ -239,14 +239,14 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
def create_vnfd(self, context, vnfd):
vnfd = vnfd['vnfd']
LOG.debug(_('vnfd %s'), vnfd)
LOG.debug('vnfd %s', vnfd)
tenant_id = self._get_tenant_id_for_create(context, vnfd)
service_types = vnfd.get('service_types')
mgmt_driver = vnfd.get('mgmt_driver')
template_source = vnfd.get("template_source")
if (not attributes.is_attr_set(service_types)):
LOG.debug(_('service types unspecified'))
LOG.debug('service types unspecified')
raise vnfm.ServiceTypesNotSpecified()
try:
@ -280,11 +280,11 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
raise exceptions.DuplicateEntity(
_type="vnfd",
entry=e.columns)
LOG.debug(_('vnfd_db %(vnfd_db)s %(attributes)s '),
LOG.debug('vnfd_db %(vnfd_db)s %(attributes)s ',
{'vnfd_db': vnfd_db,
'attributes': vnfd_db.attributes})
vnfd_dict = self._make_vnfd_dict(vnfd_db)
LOG.debug(_('vnfd_dict %s'), vnfd_dict)
LOG.debug('vnfd_dict %s', vnfd_dict)
self._cos_db_plg.create_event(
context, res_id=vnfd_dict['id'],
res_type=constants.RES_TYPE_VNFD,
@ -352,7 +352,7 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
def choose_vnfd(self, context, service_type,
required_attributes=None):
required_attributes = required_attributes or []
LOG.debug(_('required_attributes %s'), required_attributes)
LOG.debug('required_attributes %s', required_attributes)
with context.session.begin(subtransactions=True):
query = (
context.session.query(VNFD).
@ -368,7 +368,7 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
VNFD.id ==
VNFDAttribute.vnfd_id,
VNFDAttribute.key == key)))
LOG.debug(_('statements %s'), query)
LOG.debug('statements %s', query)
vnfd_db = query.first()
if vnfd_db:
return self._make_vnfd_dict(vnfd_db)
@ -388,7 +388,7 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
# called internally, not by REST API
def _create_vnf_pre(self, context, vnf):
LOG.debug(_('vnf %s'), vnf)
LOG.debug('vnf %s', vnf)
tenant_id = self._get_tenant_id_for_create(context, vnf)
vnfd_id = vnf['vnfd_id']
name = vnf.get('name')
@ -435,7 +435,7 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
# intsance_id = None means error on creation
def _create_vnf_post(self, context, vnf_id, instance_id,
mgmt_url, vnf_dict):
LOG.debug(_('vnf_dict %s'), vnf_dict)
LOG.debug('vnf_dict %s', vnf_dict)
with context.session.begin(subtransactions=True):
query = (self._model_query(context, VNF).
filter(VNF.id == vnf_id).
@ -656,7 +656,7 @@ class VNFMPluginDb(vnfm.VNFMPluginBase, db_base.CommonDbMixin):
filter(~VNF.status.in_(exclude_status)).
with_lockmode('update').one())
except orm_exc.NoResultFound:
LOG.warning(_('no vnf found %s'), vnf_id)
LOG.warning('no vnf found %s', vnf_id)
return False
vnf_db.update({'status': new_status})

View File

@ -133,11 +133,11 @@ class TackerManager(object):
plugin_providers = cfg.CONF.service_plugins
if 'commonservices' not in plugin_providers:
plugin_providers.append('commonservices')
LOG.debug(_("Loading service plugins: %s"), plugin_providers)
LOG.debug("Loading service plugins: %s", plugin_providers)
for provider in plugin_providers:
if provider == '':
continue
LOG.info(_("Loading Plugin: %s"), provider)
LOG.info("Loading Plugin: %s", provider)
plugin_inst = self._get_plugin_instance('tacker.service_plugins',
provider)
@ -156,8 +156,8 @@ class TackerManager(object):
# hasattr(plugin_inst, 'agent_notifiers')):
# self.plugin.agent_notifiers.update(plugin_inst.agent_notifiers)
LOG.debug(_("Successfully loaded %(type)s plugin. "
"Description: %(desc)s"),
LOG.debug("Successfully loaded %(type)s plugin. "
"Description: %(desc)s",
{"type": plugin_inst.get_plugin_type(),
"desc": plugin_inst.get_plugin_description()})

View File

@ -154,7 +154,7 @@ class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver,
try:
keystone_version = self.keystone.get_version(auth_url)
except Exception as e:
LOG.error(_('VIM Auth URL invalid'))
LOG.error('VIM Auth URL invalid')
raise nfvo.VimConnectionException(message=e.message)
return keystone_version
@ -186,7 +186,7 @@ class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver,
try:
regions_list = self._find_regions(ks_client)
except (exceptions.Unauthorized, exceptions.BadRequest) as e:
LOG.warning(_("Authorization failed for user"))
LOG.warning("Authorization failed for user")
raise nfvo.VimUnauthorizedException(message=e.message)
vim_obj['placement_attr'] = {'regions': regions_list}
return vim_obj
@ -309,7 +309,7 @@ class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver,
try:
resources = getattr(client, "%s" % cmd)(**cmd_args)[vim_res_name]
LOG.debug(_('resources output %s'), resources)
LOG.debug('resources output %s', resources)
except Exception:
raise nfvo.VimGetResourceException(
cmd=cmd, name=resource_name, type=resource_type)
@ -351,13 +351,13 @@ class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver,
return None
if not auth_attr:
LOG.warning(_("auth information required for n-sfc driver"))
LOG.warning("auth information required for n-sfc driver")
return None
if symmetrical:
LOG.warning(_("n-sfc driver does not support symmetrical"))
LOG.warning("n-sfc driver does not support symmetrical")
raise NotImplementedError('symmetrical chain not supported')
LOG.debug(_('fc passed is %s'), fc)
LOG.debug('fc passed is %s', fc)
sfc_classifier_params = {}
for field in fc:
if field in FC_MAP:
@ -368,10 +368,10 @@ class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver,
raise ValueError('protocol %s not supported' % fc[field])
sfc_classifier_params['protocol'] = protocol
else:
LOG.warning(_("flow classifier %s not supported by "
"networking-sfc driver"), field)
LOG.warning("flow classifier %s not supported by "
"networking-sfc driver", field)
LOG.debug(_('sfc_classifier_params is %s'), sfc_classifier_params)
LOG.debug('sfc_classifier_params is %s', sfc_classifier_params)
if len(sfc_classifier_params) > 0:
neutronclient_ = NeutronClient(auth_attr)
@ -384,11 +384,11 @@ class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver,
def create_chain(self, name, fc_id, vnfs, symmetrical=False,
auth_attr=None):
if not auth_attr:
LOG.warning(_("auth information required for n-sfc driver"))
LOG.warning("auth information required for n-sfc driver")
return None
if symmetrical:
LOG.warning(_("n-sfc driver does not support symmetrical"))
LOG.warning("n-sfc driver does not support symmetrical")
raise NotImplementedError('symmetrical chain not supported')
neutronclient_ = NeutronClient(auth_attr)
@ -404,16 +404,16 @@ class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver,
'port pair group for %s' % vnf['name']
port_pair_group['port_pairs'] = []
if CONNECTION_POINT not in vnf:
LOG.warning(_("Chain creation failed due to missing "
LOG.warning("Chain creation failed due to missing "
"connection point info in VNF "
"%(vnfname)s"), {'vnfname': vnf['name']})
"%(vnfname)s", {'vnfname': vnf['name']})
return None
cp_list = vnf[CONNECTION_POINT]
num_cps = len(cp_list)
if num_cps != 1 and num_cps != 2:
LOG.warning(_("Chain creation failed due to wrong number of "
LOG.warning("Chain creation failed due to wrong number of "
"connection points: expected [1 | 2], got "
"%(cps)d"), {'cps': num_cps})
"%(cps)d", {'cps': num_cps})
return None
port_pair = {}
port_pair['name'] = vnf['name'] + '-connection-points'
@ -426,16 +426,16 @@ class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver,
port_pair['egress'] = cp_list[1]
port_pair_id = neutronclient_.port_pair_create(port_pair)
if not port_pair_id:
LOG.warning(_("Chain creation failed due to port pair creation"
" failed for vnf %(vnf)s"), {'vnf': vnf['name']})
LOG.warning("Chain creation failed due to port pair creation"
" failed for vnf %(vnf)s", {'vnf': vnf['name']})
return None
port_pair_group['port_pairs'].append(port_pair_id)
port_pair_group_id = \
neutronclient_.port_pair_group_create(port_pair_group)
if not port_pair_group_id:
LOG.warning(_("Chain creation failed due to port pair group "
LOG.warning("Chain creation failed due to port pair group "
"creation failed for vnf "
"%(vnf)s"), {'vnf': vnf['name']})
"%(vnf)s", {'vnf': vnf['name']})
return None
port_pair_group_list.append(port_pair_group_id)
@ -455,12 +455,12 @@ class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver,
# it will look it up (or reconstruct) from
# networking-sfc DB --- but the caveat is that
# the VNF name MUST be unique
LOG.warning(_("n-sfc driver does not support sf chain update"))
LOG.warning("n-sfc driver does not support sf chain update")
raise NotImplementedError('sf chain update not supported')
def delete_chain(self, chain_id, auth_attr=None):
if not auth_attr:
LOG.warning(_("auth information required for n-sfc driver"))
LOG.warning("auth information required for n-sfc driver")
return None
neutronclient_ = NeutronClient(auth_attr)
@ -469,11 +469,11 @@ class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver,
def update_flow_classifier(self, fc_id, fc,
symmetrical=False, auth_attr=None):
if not auth_attr:
LOG.warning(_("auth information required for n-sfc driver"))
LOG.warning("auth information required for n-sfc driver")
return None
if symmetrical:
LOG.warning(_("n-sfc driver does not support symmetrical"))
LOG.warning("n-sfc driver does not support symmetrical")
raise NotImplementedError('symmetrical chain not supported')
# for now, the only parameters allowed for flow-classifier-update
@ -483,7 +483,7 @@ class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver,
sfc_classifier_params['name'] = fc['name']
sfc_classifier_params['description'] = fc['description']
LOG.debug(_('sfc_classifier_params is %s'), sfc_classifier_params)
LOG.debug('sfc_classifier_params is %s', sfc_classifier_params)
neutronclient_ = NeutronClient(auth_attr)
return neutronclient_.flow_classifier_update(fc_id,
@ -491,7 +491,7 @@ class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver,
def delete_flow_classifier(self, fc_id, auth_attr=None):
if not auth_attr:
LOG.warning(_("auth information required for n-sfc driver"))
LOG.warning("auth information required for n-sfc driver")
raise EnvironmentError('auth attribute required for'
' networking-sfc driver')
@ -500,7 +500,7 @@ class OpenStack_Driver(abstract_vim_driver.VimAbstractDriver,
def get_mistral_client(self, auth_dict):
if not auth_dict:
LOG.warning(_("auth dict required to instantiate mistral client"))
LOG.warning("auth dict required to instantiate mistral client")
raise EnvironmentError('auth dict required for'
' mistral workflow driver')
return mistral_client.MistralClient(
@ -547,7 +547,7 @@ class NeutronClient(object):
self.client = neutron_client.Client(session=sess)
def flow_classifier_create(self, fc_dict):
LOG.debug(_("fc_dict passed is {fc_dict}").format(fc_dict=fc_dict))
LOG.debug("fc_dict passed is {fc_dict}".format(fc_dict=fc_dict))
fc = self.client.create_flow_classifier({'flow_classifier': fc_dict})
if fc:
return fc['flow_classifier']['id']
@ -562,14 +562,14 @@ class NeutronClient(object):
try:
self.client.delete_flow_classifier(fc_id)
except nc_exceptions.NotFound:
LOG.warning(_("fc %s not found"), fc_id)
LOG.warning("fc %s not found", fc_id)
raise ValueError('fc %s not found' % fc_id)
def port_pair_create(self, port_pair_dict):
try:
pp = self.client.create_port_pair({'port_pair': port_pair_dict})
except nc_exceptions.BadRequest as e:
LOG.error(_("create port pair returns %s"), e)
LOG.error("create port pair returns %s", e)
raise ValueError(str(e))
if pp and len(pp):
@ -581,7 +581,7 @@ class NeutronClient(object):
try:
self.client.delete_port_pair(port_pair_id)
except nc_exceptions.NotFound:
LOG.warning(_('port pair %s not found'), port_pair_id)
LOG.warning('port pair %s not found', port_pair_id)
raise ValueError('port pair %s not found' % port_pair_id)
def port_pair_group_create(self, ppg_dict):
@ -589,7 +589,7 @@ class NeutronClient(object):
ppg = self.client.create_port_pair_group(
{'port_pair_group': ppg_dict})
except nc_exceptions.BadRequest as e:
LOG.warning(_('create port pair group returns %s'), e)
LOG.warning('create port pair group returns %s', e)
raise ValueError(str(e))
if ppg and len(ppg):
@ -601,7 +601,7 @@ class NeutronClient(object):
try:
self.client.delete_port_pair_group(ppg_id)
except nc_exceptions.NotFound:
LOG.warning(_('port pair group %s not found'), ppg_id)
LOG.warning('port pair group %s not found', ppg_id)
raise ValueError('port pair group %s not found' % ppg_id)
def port_chain_create(self, port_chain_dict):
@ -609,7 +609,7 @@ class NeutronClient(object):
pc = self.client.create_port_chain(
{'port_chain': port_chain_dict})
except nc_exceptions.BadRequest as e:
LOG.warning(_('create port chain returns %s'), e)
LOG.warning('create port chain returns %s', e)
raise ValueError(str(e))
if pc and len(pc):
@ -634,5 +634,5 @@ class NeutronClient(object):
pp_id = port_pairs[j]
self.client.delete_port_pair(pp_id)
except nc_exceptions.NotFound:
LOG.warning(_('port chain %s not found'), port_chain_id)
LOG.warning('port chain %s not found', port_chain_id)
raise ValueError('port chain %s not found' % port_chain_id)

View File

@ -48,7 +48,7 @@ class VNFFGNoop(abstract_vnffg_driver.VnffgAbstractDriver):
@log.log
def update_chain(self, chain_id, fc_ids, vnfs, auth_attr=None):
if chain_id not in self._instances:
LOG.debug(_('Chain not found'))
LOG.debug('Chain not found')
raise ValueError('No chain instance %s' % chain_id)
@log.log
@ -64,7 +64,7 @@ class VNFFGNoop(abstract_vnffg_driver.VnffgAbstractDriver):
@log.log
def update_flow_classifier(self, fc_id, fc, auth_attr=None):
if fc_id not in self._instances:
LOG.debug(_('FC not found'))
LOG.debug('FC not found')
raise ValueError('No FC instance %s' % fc_id)
@log.log

View File

@ -101,7 +101,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
@log.log
def create_vim(self, context, vim):
LOG.debug(_('Create vim called with parameters %s'),
LOG.debug('Create vim called with parameters %s',
strutils.mask_password(vim))
vim_obj = vim['vim']
vim_type = vim_obj['type']
@ -212,7 +212,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
'template'
)
LOG.debug(_('template yaml: %s'), template)
LOG.debug('template yaml: %s', template)
toscautils.updateimports(template)
@ -220,7 +220,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
tosca_template.ToscaTemplate(
a_file=False, yaml_dict_tpl=template)
except Exception as e:
LOG.exception(_("tosca-parser error: %s"), str(e))
LOG.exception("tosca-parser error: %s", str(e))
raise nfvo.ToscaParserFailed(error_msg_details=str(e))
@log.log
@ -321,7 +321,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
vnffg_dict = super(NfvoPlugin, self)._update_vnffg_pre(context,
vnffg_id)
new_vnffg = vnffg['vnffg']
LOG.debug(_('vnffg update: %s'), vnffg)
LOG.debug('vnffg update: %s', vnffg)
nfp = super(NfvoPlugin, self).get_nfp(context,
vnffg_dict['forwarding_paths'])
sfc = super(NfvoPlugin, self).get_sfc(context, nfp['chain_id'])
@ -346,7 +346,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
'vnf_mapping'],
template_db,
nfp['name'])
LOG.debug(_('chain update: %s'), chain)
LOG.debug('chain update: %s', chain)
sfc['chain'] = chain
sfc['symmetrical'] = new_vnffg['symmetrical']
vim_obj = self._get_vim_from_vnf(context,
@ -464,7 +464,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
f = fernet.Fernet(vim_key)
if not f:
LOG.warning(_('Unable to decode VIM auth'))
LOG.warning('Unable to decode VIM auth')
raise nfvo.VimNotFoundException(
'Unable to decode VIM auth key')
return f.decrypt(cred)
@ -472,10 +472,10 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
@staticmethod
def _find_vim_key(vim_id):
key_file = os.path.join(CONF.vim_keys.openstack, vim_id)
LOG.debug(_('Attempting to open key file for vim id %s'), vim_id)
LOG.debug('Attempting to open key file for vim id %s', vim_id)
with open(key_file, 'r') as f:
return f.read()
LOG.warning(_('VIM id invalid or key not found for %s'), vim_id)
LOG.warning('VIM id invalid or key not found for %s', vim_id)
def _vim_resource_name_to_id(self, context, resource, name, vnf_id):
"""Converts a VIM resource name to its ID
@ -501,7 +501,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
if isinstance(template, dict):
nsd_data['attributes']['nsd'] = yaml.safe_dump(
template)
LOG.debug(_('nsd %s'), nsd_data)
LOG.debug('nsd %s', nsd_data)
self._parse_template_input(context, nsd)
return super(NfvoPlugin, self).create_nsd(
@ -512,7 +512,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
nsd_yaml = nsd_dict['attributes'].get('nsd')
inner_nsd_dict = yaml.safe_load(nsd_yaml)
nsd['vnfds'] = dict()
LOG.debug(_('nsd_dict: %s'), inner_nsd_dict)
LOG.debug('nsd_dict: %s', inner_nsd_dict)
vnfm_plugin = manager.TackerManager.get_service_plugins()['VNFM']
vnfd_imports = inner_nsd_dict['imports']
@ -542,7 +542,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
ToscaTemplate(a_file=False,
yaml_dict_tpl=inner_nsd_dict)
except Exception as e:
LOG.exception(_("tosca-parser error: %s"), str(e))
LOG.exception("tosca-parser error: %s", str(e))
raise nfvo.ToscaParserFailed(error_msg_details=str(e))
finally:
for file_path in new_files:
@ -559,7 +559,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
nsd_dict['name'] = inner_nsd_dict['metadata'].get(
'template_name', '')
LOG.debug(_('nsd %s'), nsd)
LOG.debug('nsd %s', nsd)
def _get_vnfd_id(self, vnfd_name, onboarded_vnfds):
for vnfd in onboarded_vnfds:
@ -648,7 +648,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
workflow=workflow,
auth_dict=self.get_auth_dict(context))
except Exception as ex:
LOG.error(_('Error while executing workflow: %s'), ex)
LOG.error('Error while executing workflow: %s', ex)
self._vim_drivers.invoke(driver_type,
'delete_workflow',
workflow_id=workflow['id'],
@ -666,7 +666,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
'get_execution',
execution_id=execution_id,
auth_dict=self.get_auth_dict(context)).state
LOG.debug(_('status: %s'), exec_state)
LOG.debug('status: %s', exec_state)
if exec_state == 'SUCCESS' or exec_state == 'ERROR':
break
mistral_retries = mistral_retries - 1
@ -740,7 +740,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
kwargs={
'ns': ns})
except nfvo.NoTasksException:
LOG.warning(_("No VNF deletion task(s)."))
LOG.warning("No VNF deletion task(s).")
if workflow:
try:
mistral_execution = self._vim_drivers.invoke(
@ -750,7 +750,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
auth_dict=self.get_auth_dict(context))
except Exception as ex:
LOG.error(_('Error while executing workflow: %s'), ex)
LOG.error('Error while executing workflow: %s', ex)
self._vim_drivers.invoke(driver_type,
'delete_workflow',
workflow_id=workflow['id'],
@ -769,7 +769,7 @@ class NfvoPlugin(nfvo_db_plugin.NfvoPluginDb, vnffg_db.VnffgPluginDbMixin,
'get_execution',
execution_id=execution_id,
auth_dict=self.get_auth_dict(context)).state
LOG.debug(_('status: %s'), exec_state)
LOG.debug('status: %s', exec_state)
if exec_state == 'SUCCESS' or exec_state == 'ERROR':
break
mistral_retries -= 1

View File

@ -99,7 +99,7 @@ def updateimports(template):
template['imports'].append(nfvfile)
LOG.debug(_("%s"), path)
LOG.debug(path)
@log.log
@ -276,7 +276,7 @@ def post_process_heat_template(heat_tpl, mgmt_ports, metadata,
heat_dict['outputs'].update(output)
else:
heat_dict['outputs'] = output
LOG.debug(_('Added output for %s'), outputname)
LOG.debug('Added output for %s', outputname)
if metadata:
for vdu_name, metadata_dict in metadata['vdus'].items():
heat_dict['resources'][vdu_name]['properties']['metadata'] =\
@ -396,9 +396,9 @@ def populate_flavor_extra_specs(es_dict, properties, flavor_extra_input):
error_msg_details=(mval + ":Invalid Input"))
es_dict['hw:mem_page_size'] = mval
if 'numa_nodes' in properties and 'numa_node_count' in properties:
LOG.warning(_('Both numa_nodes and numa_node_count have been'
LOG.warning('Both numa_nodes and numa_node_count have been'
'specified; numa_node definitions will be ignored and'
'numa_node_count will be applied'))
'numa_node_count will be applied')
if 'numa_node_count' in properties:
es_dict['hw:numa_nodes'] = \
properties['numa_node_count'].value

View File

@ -56,7 +56,7 @@ class DeviceNoop(abstract_driver.DeviceAbstractDriver):
@log.log
def update(self, plugin, context, vnf_id, vnf_dict, vnf):
if vnf_id not in self._instances:
LOG.debug(_('not found'))
LOG.debug('not found')
raise ValueError('No instance %s' % vnf_id)
@log.log

View File

@ -47,8 +47,8 @@ class HeatClient(object):
try:
self.stacks.delete(stack_id)
except heatException.HTTPNotFound:
LOG.warning(_("Stack %(stack)s created by service chain driver is "
"not found at cleanup"), {'stack': stack_id})
LOG.warning("Stack %(stack)s created by service chain driver is "
"not found at cleanup", {'stack': stack_id})
def get(self, stack_id):
return self.stacks.get(stack_id)

View File

@ -95,7 +95,7 @@ class OpenStack(abstract_driver.DeviceAbstractDriver,
@log.log
def create(self, plugin, context, vnf, auth_attr):
LOG.debug(_('vnf %s'), vnf)
LOG.debug('vnf %s', vnf)
region_name = vnf.get('placement_attr', {}).get('region_name', None)
heatclient = hc.HeatClient(auth_attr, region_name)
@ -115,9 +115,9 @@ class OpenStack(abstract_driver.DeviceAbstractDriver,
fields['stack_name'] = name
# service context is ignored
LOG.debug(_('service_context: %s'), vnf.get('service_context', []))
LOG.debug(_('fields: %s'), fields)
LOG.debug(_('template: %s'), fields['template'])
LOG.debug('service_context: %s', vnf.get('service_context', []))
LOG.debug('fields: %s', fields)
LOG.debug('template: %s', fields['template'])
stack = heatclient.create(fields)
return stack
@ -137,17 +137,17 @@ class OpenStack(abstract_driver.DeviceAbstractDriver,
try:
stack = heatclient.get(vnf_id)
except Exception:
LOG.warning(_("VNF Instance setup may not have "
LOG.warning("VNF Instance setup may not have "
"happened because Heat API request failed "
"while waiting for the stack %(stack)s to be "
"created"), {'stack': vnf_id})
"created", {'stack': vnf_id})
# continue to avoid temporary connection error to target
# VIM
status = stack.stack_status
LOG.debug(_('status: %s'), status)
LOG.debug('status: %s', status)
stack_retries = stack_retries - 1
LOG.debug(_('stack status: %(stack)s %(status)s'),
LOG.debug('stack status: %(stack)s %(status)s',
{'stack': str(stack), 'status': status})
if stack_retries == 0 and status != 'CREATE_COMPLETE':
error_reason = _("Resource creation is not completed within"
@ -156,7 +156,7 @@ class OpenStack(abstract_driver.DeviceAbstractDriver,
wait=(self.STACK_RETRIES *
self.STACK_RETRY_WAIT),
stack=vnf_id)
LOG.warning(_("VNF Creation failed: %(reason)s"),
LOG.warning("VNF Creation failed: %(reason)s",
{'reason': error_reason})
raise vnfm.VNFCreateWaitFailed(reason=error_reason)
@ -165,7 +165,7 @@ class OpenStack(abstract_driver.DeviceAbstractDriver,
raise vnfm.VNFCreateWaitFailed(reason=error_reason)
def _find_mgmt_ips(outputs):
LOG.debug(_('outputs %s'), outputs)
LOG.debug('outputs %s', outputs)
mgmt_ips = dict((output['output_key'][len(OUTPUT_PREFIX):],
output['output_value'])
for output in outputs
@ -246,10 +246,10 @@ class OpenStack(abstract_driver.DeviceAbstractDriver,
except heatException.HTTPNotFound:
return
except Exception:
LOG.warning(_("VNF Instance cleanup may not have "
LOG.warning("VNF Instance cleanup may not have "
"happened because Heat API request failed "
"while waiting for the stack %(stack)s to be "
"deleted"), {'stack': vnf_id})
"deleted", {'stack': vnf_id})
# Just like create wait, ignore the exception to
# avoid temporary connection error.
status = stack.stack_status

View File

@ -95,7 +95,7 @@ class TOSCAToHOT(object):
self.vnfd_yaml = self.attributes.pop('vnfd', None)
if self.vnfd_yaml is None:
# TODO(kangaraj-manickam) raise user level exception
LOG.info(_("VNFD is not provided, so no vnf is created !!"))
LOG.info("VNFD is not provided, so no vnf is created !!")
return
LOG.debug('vnfd_yaml %s', self.vnfd_yaml)

View File

@ -70,16 +70,15 @@ class Keystone(object):
@staticmethod
def create_key_dir(path):
if not os.access(path, os.F_OK):
LOG.info(_(
'[fernet_tokens] key_repository does not appear to exist; '
'attempting to create it'))
LOG.info('[fernet_tokens] key_repository does not appear to '
'exist; attempting to create it')
try:
os.makedirs(path, 0o700)
except OSError:
LOG.error(_(
LOG.error(
'Failed to create [fernet_tokens] key_repository: either'
'it already exists or you don\'t have sufficient'
'permissions to create it'))
'permissions to create it')
def create_fernet_key(self):
fernet_key = fernet.Fernet.generate_key()

View File

@ -33,9 +33,9 @@ class DeviceMgmtNoop(abstract_driver.DeviceMGMTAbstractDriver):
return 'Tacker VNFMgmt Noop Driver'
def mgmt_url(self, plugin, context, vnf):
LOG.debug(_('mgmt_url %s'), vnf)
LOG.debug('mgmt_url %s', vnf)
return 'noop-mgmt-url'
def mgmt_call(self, plugin, context, vnf, kwargs):
LOG.debug(_('mgmt_call %(vnf)s %(kwargs)s'),
LOG.debug('mgmt_call %(vnf)s %(kwargs)s',
{'vnf': vnf, 'kwargs': kwargs})

View File

@ -49,7 +49,7 @@ class DeviceMgmtOpenWRT(abstract_driver.DeviceMGMTAbstractDriver):
return 'Tacker VNFMgmt OpenWRT Driver'
def mgmt_url(self, plugin, context, vnf):
LOG.debug(_('mgmt_url %s'), vnf)
LOG.debug('mgmt_url %s', vnf)
return vnf.get('mgmt_url', '')
@log.log
@ -58,16 +58,16 @@ class DeviceMgmtOpenWRT(abstract_driver.DeviceMGMTAbstractDriver):
password = cfg.CONF.openwrt.password
try:
cmd = "uci import %s; /etc/init.d/%s restart" % (service, service)
LOG.debug(_('execute command: %(cmd)s on mgmt_ip_address '
'%(mgmt_ip)s'),
LOG.debug('execute command: %(cmd)s on mgmt_ip_address '
'%(mgmt_ip)s',
{'cmd': cmd,
'mgmt_ip': mgmt_ip_address})
commander = cmd_executer.RemoteCommandExecutor(
user, password, mgmt_ip_address)
commander.execute_command(cmd, input_data=config)
except Exception as ex:
LOG.error(_("While executing command on remote "
"%(mgmt_ip)s: %(exception)s"),
LOG.error("While executing command on remote "
"%(mgmt_ip)s: %(exception)s",
{'mgmt_ip': mgmt_ip_address,
'exception': ex})
raise exceptions.MgmtDriverException()
@ -96,8 +96,8 @@ class DeviceMgmtOpenWRT(abstract_driver.DeviceMGMTAbstractDriver):
continue
mgmt_ip_address = mgmt_url.get(vdu, '')
if not mgmt_ip_address:
LOG.warning(_('tried to configure unknown mgmt '
'address on VNF %(vnf)s VDU %(vdu)s'),
LOG.warning('tried to configure unknown mgmt '
'address on VNF %(vnf)s VDU %(vdu)s',
{'vnf': vnf.get('name'),
'vdu': vdu})
continue

View File

@ -60,7 +60,7 @@ class VNFMonitorCeilometer(
# -name/action-name?key=8785'
host = cfg.CONF.ceilometer.host
port = cfg.CONF.ceilometer.port
LOG.info(_("Tacker in heat listening on %(host)s:%(port)s"),
LOG.info("Tacker in heat listening on %(host)s:%(port)s",
{'host': host,
'port': port})
origin = "http://%(host)s:%(port)s/v1.0/vnfs" % {

View File

@ -48,7 +48,7 @@ class VNFMonitorHTTPPing(abstract_driver.VNFMonitorAbstractDriver):
return 'Tacker HTTP Ping Driver for VNF'
def monitor_url(self, plugin, context, vnf):
LOG.debug(_('monitor_url %s'), vnf)
LOG.debug('monitor_url %s', vnf)
return vnf.get('monitor_url', '')
def _is_pingable(self, mgmt_ip='', retry=5, timeout=5, port=80, **kwargs):

View File

@ -47,7 +47,7 @@ class VNFMonitorPing(abstract_driver.VNFMonitorAbstractDriver):
return 'Tacker VNFMonitor Ping Driver'
def monitor_url(self, plugin, context, vnf):
LOG.debug(_('monitor_url %s'), vnf)
LOG.debug('monitor_url %s', vnf)
return vnf.get('monitor_url', '')
def _is_pingable(self, mgmt_ip="", count=5, timeout=1, interval='0.2',

View File

@ -161,11 +161,11 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
raise exceptions.Invalid('Not a valid template: '
'tosca_definitions_version is missing.')
LOG.debug(_('vnfd %s'), vnfd_data)
LOG.debug('vnfd %s', vnfd_data)
service_types = vnfd_data.get('service_types')
if not attributes.is_attr_set(service_types):
LOG.debug(_('service type must be specified'))
LOG.debug('service type must be specified')
raise vnfm.ServiceTypesNotSpecified()
for service_type in service_types:
# TODO(yamahata):
@ -189,7 +189,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
return
inner_vnfd_dict = yaml.safe_load(vnfd_yaml)
LOG.debug(_('vnfd_dict: %s'), inner_vnfd_dict)
LOG.debug('vnfd_dict: %s', inner_vnfd_dict)
# Prepend the tacker_defs.yaml import file with the full
# path to the file
@ -199,7 +199,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
tosca = ToscaTemplate(a_file=False,
yaml_dict_tpl=inner_vnfd_dict)
except Exception as e:
LOG.exception(_("tosca-parser error: %s"), str(e))
LOG.exception("tosca-parser error: %s", str(e))
raise vnfm.ToscaParserFailed(error_msg_details=str(e))
if ('description' not in vnfd_dict or
@ -214,7 +214,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
vnfd_dict['mgmt_driver'] = toscautils.get_mgmt_driver(
tosca)
LOG.debug(_('vnfd %s'), vnfd)
LOG.debug('vnfd %s', vnfd)
def add_vnf_to_monitor(self, context, vnf_dict):
dev_attrs = vnf_dict['attributes']
@ -305,7 +305,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
try:
self.mgmt_call(context, vnf_dict, kwargs)
except exceptions.MgmtDriverException:
LOG.error(_('VNF configuration failed'))
LOG.error('VNF configuration failed')
new_status = constants.ERROR
self.set_vnf_error_status_reason(context, vnf_id,
'Unable to configure VDU')
@ -325,7 +325,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
vnf_dict = self._create_vnf_pre(
context, vnf) if not vnf.get('id') else vnf
vnf_id = vnf_dict['id']
LOG.debug(_('vnf_dict %s'), vnf_dict)
LOG.debug('vnf_dict %s', vnf_dict)
self.mgmt_create_pre(context, vnf_dict)
self.add_alarm_url_to_vnf(context, vnf_dict)
try:
@ -381,8 +381,8 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
self._report_deprecated_yaml_str()
infra_driver, vim_auth = self._get_infra_driver(context, vnf_info)
if infra_driver not in self._vnf_manager:
LOG.debug(_('unknown vim driver '
'%(infra_driver)s in %(drivers)s'),
LOG.debug('unknown vim driver '
'%(infra_driver)s in %(drivers)s',
{'infra_driver': infra_driver,
'drivers': cfg.CONF.tacker.infra_driver})
raise vnfm.InvalidInfraDriver(vim_name=infra_driver)
@ -422,7 +422,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
region_name=region_name)
self.mgmt_call(context, vnf_dict, kwargs)
except exceptions.MgmtDriverException as e:
LOG.error(_('VNF configuration failed'))
LOG.error('VNF configuration failed')
new_status = constants.ERROR
self._vnf_monitor.delete_hosting_vnf(vnf_dict['id'])
self.set_vnf_error_status_reason(context, vnf_dict['id'],
@ -489,7 +489,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
e = e_
vnf_dict['status'] = constants.ERROR
vnf_dict['error_reason'] = six.text_type(e)
LOG.exception(_('_delete_vnf_wait'))
LOG.exception('_delete_vnf_wait')
self.set_vnf_error_status_reason(context, vnf_dict['id'],
vnf_dict['error_reason'])
@ -553,7 +553,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
policy=policy['id']
)
LOG.debug(_("Policy %s is validated successfully"), policy['id'])
LOG.debug("Policy %s is validated successfully", policy['id'])
def _get_status():
if policy['action'] == constants.ACTION_SCALE_IN:
@ -570,7 +570,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
policy,
[constants.ACTIVE],
status)
LOG.debug(_("Policy %(policy)s vnf is at %(status)s"),
LOG.debug("Policy %(policy)s vnf is at %(status)s",
{'policy': policy['id'],
'status': status})
return result
@ -583,7 +583,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
[status],
new_status,
mgmt_url)
LOG.debug(_("Policy %(policy)s vnf is at %(status)s"),
LOG.debug("Policy %(policy)s vnf is at %(status)s",
{'policy': policy['id'],
'status': new_status})
return result
@ -600,11 +600,11 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
policy=policy,
region_name=region_name
)
LOG.debug(_("Policy %s action is started successfully"),
LOG.debug("Policy %s action is started successfully",
policy['id'])
return last_event_id
except Exception as e:
LOG.error(_("Policy %s action is failed to start"),
LOG.error("Policy %s action is failed to start",
policy)
with excutils.save_and_reraise_exception():
vnf['status'] = constants.ERROR
@ -617,7 +617,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
# wait
def _vnf_policy_action_wait():
try:
LOG.debug(_("Policy %s action is in progress"),
LOG.debug("Policy %s action is in progress",
policy['id'])
mgmt_url = self._vnf_manager.invoke(
infra_driver,
@ -629,12 +629,12 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
region_name=region_name,
last_event_id=last_event_id
)
LOG.debug(_("Policy %s action is completed successfully"),
LOG.debug("Policy %s action is completed successfully",
policy['id'])
_handle_vnf_scaling_post(constants.ACTIVE, mgmt_url)
# TODO(kanagaraj-manickam): Add support for config and mgmt
except Exception as e:
LOG.error(_("Policy %s action is failed to complete"),
LOG.error("Policy %s action is failed to complete",
policy['id'])
with excutils.save_and_reraise_exception():
self.set_vnf_error_status_reason(
@ -750,7 +750,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
if not policy_:
if action not in constants.DEFAULT_ALARM_ACTIONS:
policy_ = self.get_vnf_policy(context, action, vnf_id)
LOG.debug(_("Trigger %s is validated successfully"), trigger)
LOG.debug("Trigger %s is validated successfully", trigger)
return policy_, action_
# validate url
@ -781,7 +781,7 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
vnf_dict = trigger['vnf']
if trigger['action_name'] in constants.DEFAULT_ALARM_ACTIONS:
action = trigger['action_name']
LOG.debug(_('vnf for monitoring: %s'), vnf_dict)
LOG.debug('vnf for monitoring: %s', vnf_dict)
self._vnf_action.invoke(
action, 'execute_action', plugin=self, context=context,
vnf_dict=vnf_dict, args={})
@ -791,8 +791,8 @@ class VNFMPlugin(vnfm_db.VNFMPluginDb, VNFMMgmtMixin):
bckend_policy_type = bckend_policy['type']
if bckend_policy_type == constants.POLICY_SCALING:
if vnf_dict['status'] != constants.ACTIVE:
LOG.info(_("Scaling Policy action skipped due to status:"
" %(status)s for vnf: %(vnfid)s"),
LOG.info("Scaling Policy action skipped due to status:"
" %(status)s for vnf: %(vnfid)s",
{"status": vnf_dict['status'],
"vnfid": vnf_dict['id']})
return

View File

@ -44,7 +44,7 @@ class VNFActionLogOnly(abstract_action.AbstractPolicyAction):
def execute_action(self, plugin, context, vnf_dict, args):
vnf_id = vnf_dict['id']
LOG.error(_('vnf %s dead'), vnf_id)
LOG.error('vnf %s dead', vnf_id)
_log_monitor_events(context,
vnf_dict,
"ActionLogOnly invoked")
@ -69,4 +69,4 @@ class VNFActionLogAndKill(abstract_action.AbstractPolicyAction):
if vnf_dict['attributes'].get('monitoring_policy'):
plugin._vnf_monitor.mark_dead(vnf_dict['id'])
plugin.delete_vnf(context, vnf_id)
LOG.error(_('vnf %s dead'), vnf_id)
LOG.error('vnf %s dead', vnf_id)

View File

@ -46,14 +46,14 @@ class VNFActionRespawn(abstract_action.AbstractPolicyAction):
def execute_action(self, plugin, context, vnf_dict, args):
vnf_id = vnf_dict['id']
LOG.info(_('vnf %s is dead and needs to be respawned'), vnf_id)
LOG.info('vnf %s is dead and needs to be respawned', vnf_id)
attributes = vnf_dict['attributes']
vim_id = vnf_dict['vim_id']
def _update_failure_count():
failure_count = int(attributes.get('failure_count', '0')) + 1
failure_count_str = str(failure_count)
LOG.debug(_("vnf %(vnf_id)s failure count %(failure_count)s"),
LOG.debug("vnf %(vnf_id)s failure count %(failure_count)s",
{'vnf_id': vnf_id, 'failure_count': failure_count_str})
attributes['failure_count'] = failure_count_str
attributes['dead_instance_id_' + failure_count_str] = vnf_dict[
@ -69,13 +69,13 @@ class VNFActionRespawn(abstract_action.AbstractPolicyAction):
heatclient = hc.HeatClient(auth_attr=vim_auth,
region_name=region_name)
heatclient.delete(vnf_dict['instance_id'])
LOG.debug(_("Heat stack %s delete initiated"), vnf_dict[
'instance_id'])
LOG.debug("Heat stack %s delete initiated",
vnf_dict['instance_id'])
_log_monitor_events(context, vnf_dict, "ActionRespawnHeat invoked")
def _respawn_vnf():
update_vnf_dict = plugin.create_vnf_sync(context, vnf_dict)
LOG.info(_('respawned new vnf %s'), update_vnf_dict['id'])
LOG.info('respawned new vnf %s', update_vnf_dict['id'])
plugin.config_vnf(context, update_vnf_dict)
return update_vnf_dict
@ -87,8 +87,8 @@ class VNFActionRespawn(abstract_action.AbstractPolicyAction):
_delete_heat_stack(vim_res['vim_auth'])
updated_vnf = _respawn_vnf()
plugin.add_vnf_to_monitor(context, updated_vnf)
LOG.debug(_("VNF %s added to monitor thread"), updated_vnf[
'id'])
LOG.debug("VNF %s added to monitor thread",
updated_vnf['id'])
if vnf_dict['attributes'].get('alarming_policy'):
_delete_heat_stack(vim_res['vim_auth'])
vnf_dict['attributes'].pop('alarming_policy')

View File

@ -39,8 +39,8 @@ class VimClient(object):
constants.NFVO)
if not vim_id:
LOG.debug(_('VIM id not provided. Attempting to find default '
'VIM information'))
LOG.debug('VIM id not provided. Attempting to find default '
'VIM information')
try:
vim_info = nfvo_plugin.get_default_vim(context)
except Exception as ex:
@ -52,7 +52,7 @@ class VimClient(object):
mask_password=False)
except Exception:
raise nfvo.VimNotFoundException(vim_id=vim_id)
LOG.debug(_('VIM info found for vim id %s'), vim_id)
LOG.debug('VIM info found for vim id %s', vim_id)
if region_name and not self.region_valid(vim_info['placement_attr']
['regions'], region_name):
raise nfvo.VimRegionNotFoundException(region_name=region_name)
@ -101,7 +101,7 @@ class VimClient(object):
f = fernet.Fernet(vim_key)
if not f:
LOG.warning(_('Unable to decode VIM auth'))
LOG.warning('Unable to decode VIM auth')
raise nfvo.VimNotFoundException(
'Unable to decode VIM auth key')
return f.decrypt(cred)
@ -109,7 +109,7 @@ class VimClient(object):
@staticmethod
def _find_vim_key(vim_id):
key_file = os.path.join(CONF.vim_keys.openstack, vim_id)
LOG.debug(_('Attempting to open key file for vim id %s'), vim_id)
LOG.debug('Attempting to open key file for vim id %s', vim_id)
with open(key_file, 'r') as f:
return f.read()
LOG.warning(_('VIM id invalid or key not found for %s'), vim_id)
LOG.warning('VIM id invalid or key not found for %s', vim_id)

View File

@ -145,7 +145,7 @@ class Server(object):
family = info[0]
bind_addr = info[-1]
except Exception:
LOG.exception(_("Unable to listen on %(host)s:%(port)s"),
LOG.exception("Unable to listen on %(host)s:%(port)s",
{'host': host, 'port': port})
sys.exit(1)
@ -355,7 +355,7 @@ class Request(webob.Request):
def get_content_type(self):
allowed_types = ("application/json")
if "Content-Type" not in self.headers:
LOG.debug(_("Missing Content-Type"))
LOG.debug("Missing Content-Type")
return None
_type = self.content_type
if _type in allowed_types:
@ -533,23 +533,23 @@ class RequestDeserializer(object):
try:
content_type = request.best_match_content_type()
except exception.InvalidContentType:
LOG.debug(_("Unrecognized Content-Type provided in request"))
LOG.debug("Unrecognized Content-Type provided in request")
return {}
if content_type is None:
LOG.debug(_("No Content-Type provided in request"))
LOG.debug("No Content-Type provided in request")
return {}
if not len(request.body) > 0:
LOG.debug(_("Empty body provided in request"))
LOG.debug("Empty body provided in request")
return {}
try:
deserializer = self.get_body_deserializer(content_type)
except exception.InvalidContentType:
with excutils.save_and_reraise_exception():
LOG.debug(_("Unable to deserialize body as provided "
"Content-Type"))
LOG.debug("Unable to deserialize body as provided "
"Content-Type")
return deserializer.deserialize(request.body, action)
@ -780,28 +780,28 @@ class Resource(Application):
def __call__(self, request):
"""WSGI method that controls (de)serialization and method dispatch."""
LOG.info(_("%(method)s %(url)s"), {"method": request.method,
LOG.info("%(method)s %(url)s", {"method": request.method,
"url": request.url})
try:
action, args, accept = self.deserializer.deserialize(request)
except exception.InvalidContentType:
msg = _("Unsupported Content-Type")
LOG.exception(_("InvalidContentType: %s"), msg)
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
LOG.exception("InvalidContentType: Unsupported Content-Type")
return Fault(webob.exc.HTTPBadRequest(
explanation=_("Unsupported Content-Type")))
except exception.MalformedRequestBody:
msg = _("Malformed request body")
LOG.exception(_("MalformedRequestBody: %s"), msg)
return Fault(webob.exc.HTTPBadRequest(explanation=msg))
LOG.exception("MalformedRequestBody: Malformed request body")
return Fault(webob.exc.HTTPBadRequest(
explanation=_("Malformed request body")))
try:
action_result = self.dispatch(request, action, args)
except webob.exc.HTTPException as ex:
LOG.info(_("HTTP exception thrown: %s"), ex)
LOG.info("HTTP exception thrown: %s", ex)
action_result = Fault(ex,
self._fault_body_function)
except Exception:
LOG.exception(_("Internal error"))
LOG.exception("Internal error")
# Do not include the traceback to avoid returning it to clients.
action_result = Fault(webob.exc.HTTPServerError(),
self._fault_body_function)