diff --git a/gbpservice/_i18n.py b/gbpservice/_i18n.py index 1f62d1ca2..dd01c2910 100644 --- a/gbpservice/_i18n.py +++ b/gbpservice/_i18n.py @@ -27,16 +27,6 @@ _C = _translators.contextual_form # The plural translation function using the name "_P" _P = _translators.plural_form -# Translators for log levels. -# -# The abbreviated names are meant to reflect the usual use of a short -# name like '_'. The "L" is for "log" and the other letter comes from -# the level. -_LI = _translators.log_info -_LW = _translators.log_warning -_LE = _translators.log_error -_LC = _translators.log_critical - def get_available_languages(): return oslo_i18n.get_available_languages(DOMAIN) diff --git a/gbpservice/common/utils.py b/gbpservice/common/utils.py index 2a0dc511f..8e8a60996 100644 --- a/gbpservice/common/utils.py +++ b/gbpservice/common/utils.py @@ -17,7 +17,6 @@ from oslo_utils import importutils from stevedore import driver from gbpservice._i18n import _ -from gbpservice._i18n import _LE LOG = logging.getLogger(__name__) cfg.CONF.import_group('keystone_authtoken', 'keystonemiddleware.auth_token') @@ -42,8 +41,8 @@ def load_plugin(namespace, plugin): try: plugin_class = importutils.import_class(plugin) except ImportError as e2: - LOG.exception(_LE("Error loading plugin by name, %s"), e1) - LOG.exception(_LE("Error loading plugin by class, %s"), e2) + LOG.exception("Error loading plugin by name, %s", e1) + LOG.exception("Error loading plugin by class, %s", e2) raise ImportError(_("Plugin not found.")) return plugin_class() diff --git a/gbpservice/contrib/nfp/config_orchestrator/common/common.py b/gbpservice/contrib/nfp/config_orchestrator/common/common.py index a7249a893..74e5690e2 100644 --- a/gbpservice/contrib/nfp/config_orchestrator/common/common.py +++ b/gbpservice/contrib/nfp/config_orchestrator/common/common.py @@ -10,7 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -from gbpservice._i18n import _LE from gbpservice.contrib.nfp.config_orchestrator.common import ( topics as a_topics) from gbpservice.nfp.core import log as nfp_logging @@ -115,7 +114,7 @@ def get_dhcp_agent_host(config): if agents: return agents[0].get('host', None) except Exception as exc: - LOG.error(_LE("Failed to get dhcp agent host : %(exc)s"), + LOG.error("Failed to get dhcp agent host : %(exc)s", {'exc': exc}) @@ -168,8 +167,8 @@ def get_network_function_details(context, network_function_id): return network_function_details['network_function'] except Exception as e: - LOG.error(_LE("Failed to get network function details of " - "network_function_id %(network_function_id)s : %(ex)s "), + LOG.error("Failed to get network function details of " + "network_function_id %(network_function_id)s : %(ex)s ", {'ex': e, 'network_function_id': network_function_id}) @@ -193,7 +192,7 @@ def get_network_function_map(context, network_function_id): LOG.debug(msg) return request_data except Exception as e: - LOG.error(_LE("Failed to get network function map of " - "network_function_id %(network_function_id)s : %(ex)s "), + LOG.error("Failed to get network function map of " + "network_function_id %(network_function_id)s : %(ex)s ", {'ex': e, 'network_function_id': network_function_id}) return request_data diff --git a/gbpservice/contrib/nfp/config_orchestrator/handlers/config/firewall.py b/gbpservice/contrib/nfp/config_orchestrator/handlers/config/firewall.py index 51a94b1d1..e1e47c07a 100644 --- a/gbpservice/contrib/nfp/config_orchestrator/handlers/config/firewall.py +++ b/gbpservice/contrib/nfp/config_orchestrator/handlers/config/firewall.py @@ -13,7 +13,6 @@ import ast import copy -from gbpservice._i18n import _LI from gbpservice.contrib.nfp.config_orchestrator.common import common from gbpservice.nfp.common import constants as const from gbpservice.nfp.common import data_formatter as df @@ -158,8 +157,8 @@ class FwAgent(firewall_db.Firewall_db_mixin): nf_id = self._fetch_nf_from_resource_desc(firewall["description"]) nfp_context['log_context']['meta_id'] = nf_id nf = common.get_network_function_details(context, nf_id) - LOG.info(_LI("Received RPC CREATE FIREWALL for " - "Firewall: %(firewall)s"), + LOG.info("Received RPC CREATE FIREWALL for " + "Firewall: %(firewall)s", {'firewall': firewall}) body = self._data_wrapper(context, firewall, host, nf, 'CREATE') transport.send_request_to_configurator(self._conf, @@ -172,8 +171,8 @@ class FwAgent(firewall_db.Firewall_db_mixin): nf_id = self._fetch_nf_from_resource_desc(firewall["description"]) nfp_context['log_context']['meta_id'] = nf_id nf = common.get_network_function_details(context, nf_id) - LOG.info(_LI("Received RPC DELETE FIREWALL for " - "Firewall: %(firewall)s"), + LOG.info("Received RPC DELETE FIREWALL for " + "Firewall: %(firewall)s", {'firewall': firewall}) body = self._data_wrapper(context, firewall, host, nf, 'DELETE') transport.send_request_to_configurator(self._conf, diff --git a/gbpservice/contrib/nfp/config_orchestrator/handlers/config/loadbalancerv2.py b/gbpservice/contrib/nfp/config_orchestrator/handlers/config/loadbalancerv2.py index af38e1cbf..424265d6e 100644 --- a/gbpservice/contrib/nfp/config_orchestrator/handlers/config/loadbalancerv2.py +++ b/gbpservice/contrib/nfp/config_orchestrator/handlers/config/loadbalancerv2.py @@ -13,7 +13,6 @@ import ast import copy -from gbpservice._i18n import _LI from gbpservice.contrib.nfp.config_orchestrator.common import common from gbpservice.contrib.nfp.config_orchestrator.common import lbv2_constants from gbpservice.nfp.common import constants as const @@ -277,7 +276,7 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2): def create_loadbalancer(self, context, loadbalancer, driver_name, allocate_vip=True): nfp_context = module_context.init() - LOG.info(_LI("Received RPC CREATE LOADBALANCER for LB:%(lb)s"), + LOG.info("Received RPC CREATE LOADBALANCER for LB:%(lb)s", {'lb': loadbalancer}) # Fetch nf_id from description of the resource nf_id = self._fetch_nf_from_resource_desc(loadbalancer["description"]) @@ -306,8 +305,8 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2): def delete_loadbalancer(self, context, loadbalancer, delete_vip_port=True): nfp_context = module_context.init() - LOG.info(_LI("Received RPC DELETE LOADBALANCER for LB:" - "%(lb)s"), {'lb': loadbalancer}) + LOG.info("Received RPC DELETE LOADBALANCER for LB:" + "%(lb)s", {'lb': loadbalancer}) # Fetch nf_id from description of the resource nf_id = self._fetch_nf_from_resource_desc(loadbalancer["description"]) nfp_context['log_context']['meta_id'] = nf_id @@ -320,7 +319,7 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2): @log_helpers.log_method_call def create_listener(self, context, listener): nfp_context = module_context.init() - LOG.info(_LI("Received RPC CREATE LISTENER for Listener:%(listener)s"), + LOG.info("Received RPC CREATE LISTENER for Listener:%(listener)s", {'listener': listener}) loadbalancer = listener['loadbalancer'] # Fetch nf_id from description of the resource @@ -348,7 +347,7 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2): @log_helpers.log_method_call def delete_listener(self, context, listener): nfp_context = module_context.init() - LOG.info(_LI("Received RPC DELETE LISTENER for Listener:%(listener)s"), + LOG.info("Received RPC DELETE LISTENER for Listener:%(listener)s", {'listener': listener}) loadbalancer = listener['loadbalancer'] # Fetch nf_id from description of the resource @@ -363,7 +362,7 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2): @log_helpers.log_method_call def create_pool(self, context, pool): nfp_context = module_context.init() - LOG.info(_LI("Received RPC CREATE POOL for Pool:%(pool)s"), + LOG.info("Received RPC CREATE POOL for Pool:%(pool)s", {'pool': pool}) loadbalancer = pool['loadbalancer'] # Fetch nf_id from description of the resource @@ -391,7 +390,7 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2): @log_helpers.log_method_call def delete_pool(self, context, pool): nfp_context = module_context.init() - LOG.info(_LI("Received RPC DELETE POOL for Pool:%(pool)s"), + LOG.info("Received RPC DELETE POOL for Pool:%(pool)s", {'pool': pool}) loadbalancer = pool['loadbalancer'] # Fetch nf_id from description of the resource @@ -406,7 +405,7 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2): @log_helpers.log_method_call def create_member(self, context, member): nfp_context = module_context.init() - LOG.info(_LI("Received RPC CREATE MEMBER for Member:%(member)s"), + LOG.info("Received RPC CREATE MEMBER for Member:%(member)s", {'member': member}) loadbalancer = member['pool']['loadbalancer'] # Fetch nf_id from description of the resource @@ -434,7 +433,7 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2): @log_helpers.log_method_call def delete_member(self, context, member): nfp_context = module_context.init() - LOG.info(_LI("Received RPC DELETE MEMBER for Member:%(member)s"), + LOG.info("Received RPC DELETE MEMBER for Member:%(member)s", {'member': member}) loadbalancer = member['pool']['loadbalancer'] # Fetch nf_id from description of the resource @@ -449,7 +448,7 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2): @log_helpers.log_method_call def create_healthmonitor(self, context, healthmonitor): nfp_context = module_context.init() - LOG.info(_LI("Received RPC CREATE HEALTH MONITOR for HM:%(hm)s"), + LOG.info("Received RPC CREATE HEALTH MONITOR for HM:%(hm)s", {'hm': healthmonitor}) loadbalancer = healthmonitor['pool']['loadbalancer'] # Fetch nf_id from description of the resource @@ -478,7 +477,7 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2): @log_helpers.log_method_call def delete_healthmonitor(self, context, healthmonitor): nfp_context = module_context.init() - LOG.info(_LI("Received RPC DELETE HEALTH MONITOR for HM:%(hm)s"), + LOG.info("Received RPC DELETE HEALTH MONITOR for HM:%(hm)s", {'hm': healthmonitor}) loadbalancer = healthmonitor['pool']['loadbalancer'] # Fetch nf_id from description of the resource diff --git a/gbpservice/contrib/nfp/config_orchestrator/handlers/config/vpn.py b/gbpservice/contrib/nfp/config_orchestrator/handlers/config/vpn.py index d7b75c9f2..86930f379 100644 --- a/gbpservice/contrib/nfp/config_orchestrator/handlers/config/vpn.py +++ b/gbpservice/contrib/nfp/config_orchestrator/handlers/config/vpn.py @@ -13,7 +13,6 @@ import ast import copy -from gbpservice._i18n import _LI from gbpservice.contrib.nfp.config_orchestrator.common import common from gbpservice.nfp.common import constants as const from gbpservice.nfp.common import data_formatter as df @@ -149,7 +148,7 @@ class VpnAgent(vpn_db.VPNPluginDb, vpn_db.VPNPluginRpcDbMixin): @log_helpers.log_method_call def vpnservice_updated(self, context, **kwargs): nfp_context = module_context.init() - LOG.info(_LI("Received RPC VPN SERVICE UPDATED with data:%(data)s"), + LOG.info("Received RPC VPN SERVICE UPDATED with data:%(data)s", {'data': kwargs}) # Fetch nf_id from description of the resource nf_id = self._fetch_nf_from_resource_desc(kwargs[ diff --git a/gbpservice/contrib/nfp/config_orchestrator/handlers/notification/handler.py b/gbpservice/contrib/nfp/config_orchestrator/handlers/notification/handler.py index 21e606590..1d822fd91 100644 --- a/gbpservice/contrib/nfp/config_orchestrator/handlers/notification/handler.py +++ b/gbpservice/contrib/nfp/config_orchestrator/handlers/notification/handler.py @@ -13,7 +13,6 @@ import sys import traceback -from gbpservice._i18n import _LI from gbpservice.contrib.nfp.config_orchestrator.common import ( lbv2_constants as lbv2_const) from gbpservice.contrib.nfp.config_orchestrator.common import ( @@ -41,8 +40,8 @@ class RpcHandler(object): def network_function_notification(self, context, notification_data): module_context.init() try: - LOG.info(_LI("Received NETWORK FUNCTION NOTIFICATION:" - "%(notification)s"), + LOG.info("Received NETWORK FUNCTION NOTIFICATION:" + "%(notification)s", {'notification': notification_data['notification']}) if notification_data['info']['service_type'] is not None: handler = NaasNotificationHandler(self.conf, self.sc) @@ -78,9 +77,9 @@ class FirewallNotifier(object): firewall_id = resource_data['firewall_id'] status = resource_data['status'] - LOG.info(_LI("Received firewall configuration create complete API, " - "making an RPC call set firewall status for " - "firewall:%(firewall)s and status: %(status)s"), + LOG.info("Received firewall configuration create complete API, " + "making an RPC call set firewall status for " + "firewall:%(firewall)s and status: %(status)s", {'firewall': firewall_id, 'status': status}) @@ -103,9 +102,9 @@ class FirewallNotifier(object): resource_data = notification['data'] firewall_id = resource_data['firewall_id'] - LOG.info(_LI("Received firewall_configuration_delete_complete API, " - "making an RPC call firewall_deleted for firewall:" - "%(firewall)s "), + LOG.info("Received firewall_configuration_delete_complete API, " + "making an RPC call firewall_deleted for firewall:" + "%(firewall)s ", {'firewall': firewall_id}) # RPC call to plugin to update firewall deleted @@ -143,9 +142,9 @@ class LoadbalancerV2Notifier(object): obj_p_status = resource_data['provisioning_status'] obj_o_status = resource_data['operating_status'] - LOG.info(_LI("Received LB's update_status API. Making an " - "update_status RPC call to plugin for %(obj_type)s:" - "%(obj_id)s with status: %(status)s"), + LOG.info("Received LB's update_status API. Making an " + "update_status RPC call to plugin for %(obj_type)s:" + "%(obj_id)s with status: %(status)s", {'obj_type': obj_type, 'obj_id': obj_id, 'status': obj_p_status}) @@ -192,9 +191,9 @@ class VpnNotifier(object): nfp_context['log_context'] = logging_context status = resource_data['status'] - LOG.info(_LI("Received VPN's update_status API. " - "Making an update_status RPC cast to plugin for object" - "with status: %(status)s"), + LOG.info("Received VPN's update_status API. " + "Making an update_status RPC cast to plugin for object" + "with status: %(status)s", {'status': status}) rpcClient = transport.RPCClient(a_topics.VPN_NFP_PLUGIN_TOPIC) rpcClient.cctxt.cast(context, 'update_status', diff --git a/gbpservice/contrib/nfp/configurator/agents/agent_base.py b/gbpservice/contrib/nfp/configurator/agents/agent_base.py index ccb84f386..0f6cc811a 100644 --- a/gbpservice/contrib/nfp/configurator/agents/agent_base.py +++ b/gbpservice/contrib/nfp/configurator/agents/agent_base.py @@ -11,7 +11,6 @@ # under the License. -from gbpservice._i18n import _LI from gbpservice.contrib.nfp.configurator.lib import constants as const from gbpservice.nfp.core import log as nfp_logging from gbpservice.nfp.core import module as nfp_api @@ -105,7 +104,7 @@ class AgentBaseRPCManager(object): # Multiple request data blobs needs batch processing. Send batch # processing event or do direct processing of single request data blob if (len(sa_req_list) > 1): - LOG.info(_LI("Creating event PROCESS BATCH")) + LOG.info("Creating event PROCESS BATCH") args_dict = { 'sa_req_list': sa_req_list, 'notification_data': notification_data diff --git a/gbpservice/contrib/nfp/configurator/agents/firewall.py b/gbpservice/contrib/nfp/configurator/agents/firewall.py index 8b4a08d25..a255e7b29 100644 --- a/gbpservice/contrib/nfp/configurator/agents/firewall.py +++ b/gbpservice/contrib/nfp/configurator/agents/firewall.py @@ -16,7 +16,6 @@ import oslo_messaging as messaging import requests import six -from gbpservice._i18n import _LI from gbpservice.contrib.nfp.configurator.agents import agent_base from gbpservice.contrib.nfp.configurator.lib import constants as common_const from gbpservice.contrib.nfp.configurator.lib import fw_constants as const @@ -62,9 +61,9 @@ class FwaasRpcSender(agent_base.AgentBaseEventHandler): 'notification_type': ( 'set_firewall_status')}}] } - LOG.info(_LI("Sending Notification 'Set Firewall Status' to " - "Orchestrator for firewall: %(fw_id)s with status:" - "%(status)s"), + LOG.info("Sending Notification 'Set Firewall Status' to " + "Orchestrator for firewall: %(fw_id)s with status:" + "%(status)s", {'fw_id': firewall_id, 'status': status}) self.notify._notification(msg) @@ -86,8 +85,8 @@ class FwaasRpcSender(agent_base.AgentBaseEventHandler): 'notification_type': ( 'firewall_deleted')}}] } - LOG.info(_LI("Sending Notification 'Firewall Deleted' to " - "Orchestrator for firewall: %(fw_id)s "), + LOG.info("Sending Notification 'Firewall Deleted' to " + "Orchestrator for firewall: %(fw_id)s ", {'fw_id': firewall_id}) self.notify._notification(msg) @@ -153,7 +152,7 @@ class FWaasRpcManager(agent_base.AgentBaseRPCManager): """ - LOG.info(_LI("Received request 'Create Firewall'.")) + LOG.info("Received request 'Create Firewall'.") self._create_event(context, firewall, host, const.FIREWALL_CREATE_EVENT) @@ -161,7 +160,7 @@ class FWaasRpcManager(agent_base.AgentBaseRPCManager): """ Receives request to update firewall from configurator """ - LOG.info(_LI("Received request 'Update Firewall'.")) + LOG.info("Received request 'Update Firewall'.") self._create_event(context, firewall, host, const.FIREWALL_UPDATE_EVENT) @@ -169,7 +168,7 @@ class FWaasRpcManager(agent_base.AgentBaseRPCManager): """ Receives request to delete firewall from configurator """ - LOG.info(_LI("Received request 'Delete Firewall'.")) + LOG.info("Received request 'Delete Firewall'.") self._create_event(context, firewall, host, const.FIREWALL_DELETE_EVENT) @@ -256,8 +255,8 @@ class FWaasEventHandler(nfp_api.NfpEventHandler): service_vendor = agent_info['service_vendor'] service_feature = agent_info.get('service_feature', '') driver = self._get_driver(service_vendor, service_feature) - LOG.info(_LI("Invoking driver with service vendor:" - "%(service_vendor)s "), + LOG.info("Invoking driver with service vendor:" + "%(service_vendor)s ", {'service_vendor': service_vendor}) self.method = getattr(driver, "%s" % (ev.id.lower())) self.invoke_driver_for_plugin_api(ev) @@ -435,7 +434,7 @@ def load_drivers(conf): driver_obj = driver_name(conf=conf) drivers[service_type] = driver_obj - LOG.info(_LI("Firewall loaded drivers:%(drivers)s"), + LOG.info("Firewall loaded drivers:%(drivers)s", {'drivers': drivers}) return drivers diff --git a/gbpservice/contrib/nfp/configurator/agents/generic_config.py b/gbpservice/contrib/nfp/configurator/agents/generic_config.py index cf394b680..a13fddaf6 100644 --- a/gbpservice/contrib/nfp/configurator/agents/generic_config.py +++ b/gbpservice/contrib/nfp/configurator/agents/generic_config.py @@ -14,7 +14,6 @@ import copy import os import six -from gbpservice._i18n import _LI from gbpservice.contrib.nfp.configurator.agents import agent_base from gbpservice.contrib.nfp.configurator.lib import ( generic_config_constants as gen_cfg_const) @@ -142,8 +141,8 @@ class GenericConfigRpcManager(agent_base.AgentBaseRPCManager): Returns: None """ - LOG.info(_LI("Received configure health monitor api for nfds:" - "%(nfds)s"), + LOG.info("Received configure health monitor api for nfds:" + "%(nfds)s", {'nfds': resource_data['nfds']}) resource_data['fail_count'] = 0 self._send_event(context, @@ -161,8 +160,8 @@ class GenericConfigRpcManager(agent_base.AgentBaseRPCManager): Returns: None """ - LOG.info(_LI("Received clear health monitor api for nfds:" - "%(nfds)s"), + LOG.info("Received clear health monitor api for nfds:" + "%(nfds)s", {'nfds': resource_data['nfds']}) event_key = resource_data['nfds'][0]['vmid'] poll_event_id = gen_cfg_const.EVENT_CONFIGURE_HEALTHMONITOR @@ -470,8 +469,8 @@ def load_drivers(conf): for service_type, driver_name in six.iteritems(drivers): driver_obj = driver_name(conf=conf) drivers[service_type] = driver_obj - LOG.info(_LI("Generic config agent loaded drivers drivers:" - "%(drivers)s"), + LOG.info("Generic config agent loaded drivers drivers:" + "%(drivers)s", {'drivers': drivers}) return drivers diff --git a/gbpservice/contrib/nfp/configurator/agents/loadbalancer_v2.py b/gbpservice/contrib/nfp/configurator/agents/loadbalancer_v2.py index 113669ec7..5c3f1b5fa 100644 --- a/gbpservice/contrib/nfp/configurator/agents/loadbalancer_v2.py +++ b/gbpservice/contrib/nfp/configurator/agents/loadbalancer_v2.py @@ -13,7 +13,6 @@ import os import six -from gbpservice._i18n import _LI from gbpservice.contrib.nfp.configurator.agents import agent_base from gbpservice.contrib.nfp.configurator.lib import data_filter from gbpservice.contrib.nfp.configurator.lib import lbv2_constants as lb_const @@ -64,9 +63,9 @@ class LBaaSV2RpcSender(data_filter.Filter): operating_status, obj_type: obj}}] } - LOG.info(_LI("Sending Notification 'Update Status' " - "for resource: %(resource)s with Provisioning status:" - "%(p_status)s and Operating status:%(o_status)s"), + LOG.info("Sending Notification 'Update Status' " + "for resource: %(resource)s with Provisioning status:" + "%(p_status)s and Operating status:%(o_status)s", {'resource': agent_info['resource'], 'p_status': provisioning_status, 'o_status': operating_status}) @@ -90,8 +89,8 @@ class LBaaSV2RpcSender(data_filter.Filter): 'update_pool_stats'), 'pool': pool_id}}] } - LOG.info(_LI("Sending Notification 'Update Pool Stats' " - "for pool: %(pool_id)s with stats:%(stats)s"), + LOG.info("Sending Notification 'Update Pool Stats' " + "for pool: %(pool_id)s with stats:%(stats)s", {'pool_id': pool_id, 'stats': stats}) self.notify._notification(msg) @@ -149,8 +148,8 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager): Returns: None """ - LOG.info(_LI("Received request 'Create Loadbalancer' for LB:%(lb)s " - "with driver:%(driver_name)s"), + LOG.info("Received request 'Create Loadbalancer' for LB:%(lb)s " + "with driver:%(driver_name)s", {'lb': loadbalancer['id'], 'driver_name': driver_name}) arg_dict = {'context': context, @@ -177,8 +176,8 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager): lb_const.OLD_LOADBALANCER: old_loadbalancer, lb_const.LOADBALANCER: loadbalancer, } - LOG.info(_LI("Received request 'Update Loadbalancer' for LB:%(lb)s " - "with new Param:%(new_val)s and old Param:%(old_val)s"), + LOG.info("Received request 'Update Loadbalancer' for LB:%(lb)s " + "with new Param:%(new_val)s and old Param:%(old_val)s", {'lb': loadbalancer['id'], 'new_val': new_val, 'old_val': old_val}) @@ -195,7 +194,7 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager): Returns: None """ - LOG.info(_LI("Received request 'Delete Loadbalancer' for LB:%(lb)s "), + LOG.info("Received request 'Delete Loadbalancer' for LB:%(lb)s ", {'lb': loadbalancer['id']}) arg_dict = {'context': context, @@ -214,7 +213,7 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager): Returns: None """ - LOG.info(_LI("Received request 'Create Listener' for LB:%(lb)s "), + LOG.info("Received request 'Create Listener' for LB:%(lb)s ", {'lb': listener['loadbalancer_id']}) arg_dict = {'context': context, lb_const.LISTENER: listener, @@ -235,9 +234,9 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager): """ old_val, new_val = self.get_diff_of_dict(old_listener, listener) - LOG.info(_LI("Received request 'Update Listener' for Listener:" - "%(listener)s in LB:%(lb_id)s with new Param:" - "%(new_val)s and old Param:%(old_val)s"), + LOG.info("Received request 'Update Listener' for Listener:" + "%(listener)s in LB:%(lb_id)s with new Param:" + "%(new_val)s and old Param:%(old_val)s", {'lb_id': listener['loadbalancer_id'], 'listener': listener['id'], 'old_val': old_val, @@ -260,7 +259,7 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager): Returns: None """ - LOG.info(_LI("Received request 'Delete Listener' for LB:%(lb)s "), + LOG.info("Received request 'Delete Listener' for LB:%(lb)s ", {'lb': listener['loadbalancer_id']}) arg_dict = {'context': context, lb_const.LISTENER: listener, @@ -279,7 +278,7 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager): Returns: None """ - LOG.info(_LI("Received request 'Create Pool' for Pool:%(pool_id)s "), + LOG.info("Received request 'Create Pool' for Pool:%(pool_id)s ", {'pool_id': pool['id']}) arg_dict = {'context': context, lb_const.POOL: pool @@ -301,9 +300,9 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager): """ old_val, new_val = self.get_diff_of_dict(old_pool, pool) - LOG.info(_LI("Received request 'Update Pool' for Pool:%(pool)s " - "in LB:%(lb_id)s with new Param:%(new_val)s and " - "old Param:%(old_val)s"), + LOG.info("Received request 'Update Pool' for Pool:%(pool)s " + "in LB:%(lb_id)s with new Param:%(new_val)s and " + "old Param:%(old_val)s", {'pool': pool['id'], 'lb_id': pool['loadbalancer_id'], 'old_val': old_val, @@ -326,7 +325,7 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager): Returns: None """ - LOG.info(_LI("Received request 'Delete Pool' for Pool:%(pool_id)s "), + LOG.info("Received request 'Delete Pool' for Pool:%(pool_id)s ", {'pool_id': pool['id']}) arg_dict = {'context': context, lb_const.POOL: pool, @@ -345,7 +344,7 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager): Returns: None """ - LOG.info(_LI("Received request 'Create Member' for Pool:%(pool_id)s "), + LOG.info("Received request 'Create Member' for Pool:%(pool_id)s ", {'pool_id': member['pool_id']}) arg_dict = {'context': context, lb_const.MEMBER: member, @@ -366,9 +365,9 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager): """ old_val, new_val = self.get_diff_of_dict(old_member, member) - LOG.info(_LI("Received request 'Update Member' for Member:" - "%(member_id)s in Pool:%(pool_id)s with new Param:" - "%(new_val)s and old Param:%(old_val)s"), + LOG.info("Received request 'Update Member' for Member:" + "%(member_id)s in Pool:%(pool_id)s with new Param:" + "%(new_val)s and old Param:%(old_val)s", {'pool_id': member['pool_id'], 'member_id': member['id'], 'old_val': old_val, @@ -391,8 +390,8 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager): Returns: None """ - LOG.info(_LI("Received request 'Delete Member' for Pool:" - "%(pool_id)s "), + LOG.info("Received request 'Delete Member' for Pool:" + "%(pool_id)s ", {'pool_id': member['pool_id']}) arg_dict = {'context': context, lb_const.MEMBER: member, @@ -412,8 +411,8 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager): Returns: None """ - LOG.info(_LI("Received request 'Create Pool Health Monitor' for" - "Health monitor:%(hm)s"), + LOG.info("Received request 'Create Pool Health Monitor' for" + "Health monitor:%(hm)s", {'hm': healthmonitor['id']}) arg_dict = {'context': context, lb_const.HEALTHMONITOR: healthmonitor @@ -437,9 +436,9 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager): """ old_val, new_val = self.get_diff_of_dict( old_healthmonitor, healthmonitor) - LOG.info(_LI("Received request 'Update Pool Health Monitor' for " - "Health monitor:%(hm)s with new Param:%(new_val)s and " - "old Param:%(old_val)s"), + LOG.info("Received request 'Update Pool Health Monitor' for " + "Health monitor:%(hm)s with new Param:%(new_val)s and " + "old Param:%(old_val)s", {'hm': healthmonitor['id'], 'old_val': old_val, 'new_val': new_val}) @@ -463,8 +462,8 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager): Returns: None """ - LOG.info(_LI("Received request 'Delete Pool Health Monitor' for " - "Health monitor:%(hm)s"), + LOG.info("Received request 'Delete Pool Health Monitor' for " + "Health monitor:%(hm)s", {'hm': healthmonitor['id']}) arg_dict = {'context': context, lb_const.HEALTHMONITOR: healthmonitor @@ -484,7 +483,7 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager): Returns: None """ - LOG.info(_LI("Received request 'Agent Updated' ")) + LOG.info("Received request 'Agent Updated' ") arg_dict = {'context': context, 'payload': payload} self._send_event(lb_const.EVENT_AGENT_UPDATED_V2, arg_dict) diff --git a/gbpservice/contrib/nfp/configurator/agents/vpn.py b/gbpservice/contrib/nfp/configurator/agents/vpn.py index 921b283fb..83d955379 100644 --- a/gbpservice/contrib/nfp/configurator/agents/vpn.py +++ b/gbpservice/contrib/nfp/configurator/agents/vpn.py @@ -14,7 +14,6 @@ import os import six -from gbpservice._i18n import _LI from gbpservice.contrib.nfp.configurator.agents import agent_base from gbpservice.contrib.nfp.configurator.drivers.base import base_driver from gbpservice.contrib.nfp.configurator.lib import data_filter @@ -57,7 +56,7 @@ class VpnaasRpcSender(data_filter.Filter): Returns: Dictionary of vpn service type which matches with the filters. """ - LOG.info(_LI("Sending RPC for GET VPN SERVICES with %(filters)s "), + LOG.info("Sending RPC for GET VPN SERVICES with %(filters)s ", {'filters': filters}) return self.call( context, @@ -75,8 +74,8 @@ class VpnaasRpcSender(data_filter.Filter): Returns: dictionary of vpnservice """ - LOG.info(_LI("Sending RPC for GET VPN SERVICECONTEXT with " - "Filters:%(filters)s "), + LOG.info("Sending RPC for GET VPN SERVICECONTEXT with " + "Filters:%(filters)s ", {'filters': filters}) return self.call( context, @@ -88,8 +87,8 @@ class VpnaasRpcSender(data_filter.Filter): Get list of ipsec conns with filters specified. """ - LOG.info(_LI("Sending RPC for GET IPSEC CONNS with Filters:" - "%(filters)s "), + LOG.info("Sending RPC for GET IPSEC CONNS with Filters:" + "%(filters)s ", {'filters': filters}) return self.call( context, @@ -111,8 +110,8 @@ class VpnaasRpcSender(data_filter.Filter): 'notification_type': ( 'update_status')}}] } - LOG.info(_LI("Sending Notification 'Update Status' with " - "status:%(status)s "), + LOG.info("Sending Notification 'Update Status' with " + "status:%(status)s ", {'status': status}) self._notify._notification(msg) @@ -127,8 +126,8 @@ class VpnaasRpcSender(data_filter.Filter): 'notification_type': ( 'ipsec_site_conn_deleted')}}] } - LOG.info(_LI("Sending Notification 'Ipsec Site Conn Deleted' " - "for resource:%(resource_id)s "), + LOG.info("Sending Notification 'Ipsec Site Conn Deleted' " + "for resource:%(resource_id)s ", {'resource_id': resource_id}) self._notify._notification(msg) @@ -172,8 +171,8 @@ class VPNaasRpcManager(agent_base.AgentBaseRPCManager): Returns: None """ - LOG.info(_LI("Received request 'VPN Service Updated'." - "for API '%(api)s'"), + LOG.info("Received request 'VPN Service Updated'." + "for API '%(api)s'", {'api': resource_data.get('reason', '')}) arg_dict = {'context': context, 'resource_data': resource_data} @@ -243,8 +242,8 @@ class VPNaasEventHandler(nfp_api.NfpEventHandler): service_vendor = agent_info['service_vendor'] service_feature = agent_info['service_feature'] driver = self._get_driver(service_vendor, service_feature) - LOG.info(_LI("Invoking driver with service vendor:" - "%(service_vendor)s "), + LOG.info("Invoking driver with service vendor:" + "%(service_vendor)s ", {'service_vendor': service_vendor}) setattr(VPNaasEventHandler, "service_driver", driver) self._vpnservice_updated(ev, driver) diff --git a/gbpservice/contrib/nfp/configurator/drivers/firewall/vyos/vyos_fw_driver.py b/gbpservice/contrib/nfp/configurator/drivers/firewall/vyos/vyos_fw_driver.py index 9ee3106ef..e4a2bb1c8 100644 --- a/gbpservice/contrib/nfp/configurator/drivers/firewall/vyos/vyos_fw_driver.py +++ b/gbpservice/contrib/nfp/configurator/drivers/firewall/vyos/vyos_fw_driver.py @@ -16,7 +16,6 @@ import time from oslo_serialization import jsonutils -from gbpservice._i18n import _LI from gbpservice.contrib.nfp.configurator.drivers.base import base_driver from gbpservice.contrib.nfp.configurator.drivers.firewall.vyos import ( vyos_fw_constants as const) @@ -135,8 +134,8 @@ class FwGenericConfigDriver(base_driver.BaseDriver): self.port, 'change_auth') data = {} - LOG.info(_LI("Initiating POST request to configure Authentication " - "service at mgmt ip:%(mgmt_ip)s"), + LOG.info("Initiating POST request to configure Authentication " + "service at mgmt ip:%(mgmt_ip)s", {'mgmt_ip': mgmt_ip}) err_msg = ("Change Auth POST request to the VyOS firewall " "service at %s failed. " % url) @@ -188,8 +187,8 @@ class FwGenericConfigDriver(base_driver.BaseDriver): 'add_static_ip') data = jsonutils.dumps(static_ips_info) - LOG.info(_LI("Initiating POST request to add static IPs for primary " - "service at mgmt ip:%(mgmt_ip)s"), + LOG.info("Initiating POST request to add static IPs for primary " + "service at mgmt ip:%(mgmt_ip)s", {'mgmt_ip': mgmt_ip}) err_msg = ("Static IP POST request to the VyOS firewall " "service at %s failed. " % url) @@ -267,8 +266,8 @@ class FwGenericConfigDriver(base_driver.BaseDriver): url = const.request_url % (mgmt_ip, self.port, 'add_rule') data = jsonutils.dumps(rule_info) - LOG.info(_LI("Initiating POST request to add persistent rule to " - "primary service at mgmt ip: %(mgmt_ip)s"), + LOG.info("Initiating POST request to add persistent rule to " + "primary service at mgmt ip: %(mgmt_ip)s", {'mgmt_ip': mgmt_ip}) err_msg = ("Add persistent rule POST request to the VyOS firewall " "service at %s failed. " % url) @@ -322,8 +321,8 @@ class FwGenericConfigDriver(base_driver.BaseDriver): 'del_static_ip') data = jsonutils.dumps(static_ips_info) - LOG.info(_LI("Initiating POST request to remove static IPs for " - "primary service at mgmt ip: %(mgmt_ip)s"), + LOG.info("Initiating POST request to remove static IPs for " + "primary service at mgmt ip: %(mgmt_ip)s", {'mgmt_ip': mgmt_ip}) err_msg = ("Static IP DELETE request to the VyOS firewall " @@ -374,8 +373,8 @@ class FwGenericConfigDriver(base_driver.BaseDriver): if result_static_ips != common_const.STATUS_SUCCESS: return result_static_ips else: - LOG.info(_LI("Successfully removed static IPs. " - "Result: %(result_static_ips)s"), + LOG.info("Successfully removed static IPs. " + "Result: %(result_static_ips)s", {'result_static_ips': result_static_ips}) rule_info = dict( @@ -384,8 +383,8 @@ class FwGenericConfigDriver(base_driver.BaseDriver): mgmt_ip = resource_data['mgmt_ip'] - LOG.info(_LI("Initiating DELETE persistent rule for primary " - "service at mgmt ip: %(mgmt_ip)s"), + LOG.info("Initiating DELETE persistent rule for primary " + "service at mgmt ip: %(mgmt_ip)s", {'mgmt_ip': mgmt_ip}) url = const.request_url % (mgmt_ip, self.port, 'delete_rule') data = jsonutils.dumps(rule_info) @@ -447,8 +446,8 @@ class FwGenericConfigDriver(base_driver.BaseDriver): route_info.append({'source_cidr': source_cidr, 'gateway_ip': gateway_ip}) data = jsonutils.dumps(route_info) - LOG.info(_LI("Initiating POST request to configure route of primary " - "service at mgmt ip: %(mgmt_ip)s"), + LOG.info("Initiating POST request to configure route of primary " + "service at mgmt ip: %(mgmt_ip)s", {'mgmt_ip': mgmt_ip}) err_msg = ("Configure routes POST request to the VyOS firewall " @@ -497,8 +496,8 @@ class FwGenericConfigDriver(base_driver.BaseDriver): for source_cidr in source_cidrs: route_info.append({'source_cidr': source_cidr}) data = jsonutils.dumps(route_info) - LOG.info(_LI("Initiating Delete route to primary " - "service at mgmt ip: %(mgmt_ip)s"), + LOG.info("Initiating Delete route to primary " + "service at mgmt ip: %(mgmt_ip)s", {'mgmt_ip': mgmt_ip}) err_msg = ("Routes DELETE request to the VyOS firewall " @@ -558,8 +557,8 @@ class FwaasDriver(FwGenericConfigDriver): headers = self._parse_vm_context(context['agent_info']['context']) resource_data = self.parse.parse_data(common_const.FIREWALL, context) - LOG.info(_LI("Processing request 'Create Firewall' in FWaaS Driver " - "for Firewall ID: %(f_id)s"), + LOG.info("Processing request 'Create Firewall' in FWaaS Driver " + "for Firewall ID: %(f_id)s", {'f_id': firewall['id']}) mgmt_ip = resource_data.get('mgmt_ip') url = const.request_url % (mgmt_ip, @@ -580,7 +579,7 @@ class FwaasDriver(FwGenericConfigDriver): return common_const.STATUS_ERROR if resp is common_const.STATUS_SUCCESS: - LOG.info(_LI("Configured firewall successfully at URL: %(url)s "), + LOG.info("Configured firewall successfully at URL: %(url)s ", {'url': url}) return common_const.STATUS_ACTIVE @@ -604,8 +603,8 @@ class FwaasDriver(FwGenericConfigDriver): """ headers = self._parse_vm_context(context['agent_info']['context']) - LOG.info(_LI("Processing request 'Update Firewall' in FWaaS Driver " - "for Firewall ID:%(f_id)s"), + LOG.info("Processing request 'Update Firewall' in FWaaS Driver " + "for Firewall ID:%(f_id)s", {'f_id': firewall['id']}) resource_data = self.parse.parse_data(common_const.FIREWALL, context) mgmt_ip = resource_data.get('mgmt_ip') @@ -650,8 +649,8 @@ class FwaasDriver(FwGenericConfigDriver): """ headers = self._parse_vm_context(context['agent_info']['context']) - LOG.info(_LI("Processing request 'Delete Firewall' in FWaaS Driver " - "for Firewall ID:%(f_id)s"), + LOG.info("Processing request 'Delete Firewall' in FWaaS Driver " + "for Firewall ID:%(f_id)s", {'f_id': firewall['id']}) resource_data = self.parse.parse_data(common_const.FIREWALL, context) mgmt_ip = resource_data.get('mgmt_ip') diff --git a/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/local_cert_manager.py b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/local_cert_manager.py index d3920d81d..05f7525c8 100644 --- a/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/local_cert_manager.py +++ b/gbpservice/contrib/nfp/configurator/drivers/loadbalancer/v2/haproxy/local_cert_manager.py @@ -19,8 +19,6 @@ import uuid from octavia.certificates.common import local as local_common from octavia.certificates.manager import cert_mgr from octavia.common import exceptions -from octavia.i18n import _LE -from octavia.i18n import _LI from oslo_config import cfg from gbpservice.nfp.core import log as nfp_logging @@ -55,9 +53,9 @@ class LocalCertManager(cert_mgr.CertManager): cert_ref = str(uuid.uuid4()) filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) - LOG.info(_LI( + LOG.info( "Storing certificate data on the local filesystem." - )) + ) try: filename_certificate = "{0}.crt".format(filename_base, cert_ref) with open(filename_certificate, 'w') as cert_file: @@ -78,7 +76,7 @@ class LocalCertManager(cert_mgr.CertManager): with open(filename_pkp, 'w') as pass_file: pass_file.write(private_key_passphrase) except IOError as ioe: - LOG.error(_LE("Failed to store certificate.")) + LOG.error("Failed to store certificate.") raise exceptions.CertificateStorageException(message=ioe.message) return cert_ref @@ -94,9 +92,9 @@ class LocalCertManager(cert_mgr.CertManager): certificate data :raises CertificateStorageException: if certificate retrieval fails """ - LOG.info(_LI( - "Loading certificate {0} from the local filesystem." - ).format(cert_ref)) + LOG.info( + "Loading certificate {0} from the local filesystem.".format( + cert_ref)) filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) @@ -111,9 +109,8 @@ class LocalCertManager(cert_mgr.CertManager): with open(filename_certificate, 'r') as cert_file: cert_data['certificate'] = cert_file.read() except IOError: - LOG.error(_LE( - "Failed to read certificate for {0}." - ).format(cert_ref)) + LOG.error( + "Failed to read certificate for {0}.".format(cert_ref)) raise exceptions.CertificateStorageException( msg="Certificate could not be read." ) @@ -121,9 +118,8 @@ class LocalCertManager(cert_mgr.CertManager): with open(filename_private_key, 'r') as key_file: cert_data['private_key'] = key_file.read() except IOError: - LOG.error(_LE( - "Failed to read private key for {0}." - ).format(cert_ref)) + LOG.error( + "Failed to read private key for {0}.".format(cert_ref)) raise exceptions.CertificateStorageException( msg="Private Key could not be read." ) @@ -151,9 +147,9 @@ class LocalCertManager(cert_mgr.CertManager): :raises CertificateStorageException: if certificate deletion fails """ - LOG.info(_LI( - "Deleting certificate {0} from the local filesystem." - ).format(cert_ref)) + LOG.info( + "Deleting certificate {0} from the local filesystem.".format( + cert_ref)) filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) @@ -170,7 +166,6 @@ class LocalCertManager(cert_mgr.CertManager): if os.path.exists(filename_pkp): os.remove(filename_pkp) except IOError as ioe: - LOG.error(_LE( - "Failed to delete certificate {0}." - ).format(cert_ref)) + LOG.error( + "Failed to delete certificate {0}.".format(cert_ref)) raise exceptions.CertificateStorageException(message=ioe.message) diff --git a/gbpservice/contrib/nfp/configurator/drivers/vpn/vyos/vyos_vpn_driver.py b/gbpservice/contrib/nfp/configurator/drivers/vpn/vyos/vyos_vpn_driver.py index b820fd44e..0dc662885 100644 --- a/gbpservice/contrib/nfp/configurator/drivers/vpn/vyos/vyos_vpn_driver.py +++ b/gbpservice/contrib/nfp/configurator/drivers/vpn/vyos/vyos_vpn_driver.py @@ -16,7 +16,6 @@ import requests import six import time -from neutron._i18n import _LI from gbpservice.contrib.nfp.configurator.drivers.base import base_driver from gbpservice.contrib.nfp.configurator.drivers.vpn.vyos import ( @@ -354,8 +353,8 @@ class VpnGenericConfigDriver(base_driver.BaseDriver): 'change_auth') data = {} - LOG.info(_LI("Initiating POST request to configure Authentication " - "service at mgmt ip:%(mgmt_ip)s"), + LOG.info("Initiating POST request to configure Authentication " + "service at mgmt ip:%(mgmt_ip)s", {'mgmt_ip': mgmt_ip}) err_msg = ("Change Auth POST request to the VyOS firewall " "service at %s failed. " % url) diff --git a/gbpservice/contrib/nfp/configurator/modules/configurator.py b/gbpservice/contrib/nfp/configurator/modules/configurator.py index 2856aabc0..7c03b72e5 100644 --- a/gbpservice/contrib/nfp/configurator/modules/configurator.py +++ b/gbpservice/contrib/nfp/configurator/modules/configurator.py @@ -12,7 +12,6 @@ from oslo_log import helpers as log_helpers -from gbpservice._i18n import _LI from gbpservice.contrib.nfp.configurator.lib import constants as const from gbpservice.contrib.nfp.configurator.lib import demuxer from gbpservice.contrib.nfp.configurator.lib import utils @@ -139,9 +138,9 @@ class ConfiguratorRpcManager(object): log_info = request_data.get('info') logging_context = log_info['context'].get('logging_context', {}) nfp_context['log_context'] = logging_context - LOG.info(_LI("Received RPC CREATE NETWORK FUNCTION DEVICE CONFIG " - "for %(service_type)s, NFI: %(nfi)s, " - "NF_ID: %(nf_id)s"), + LOG.info("Received RPC CREATE NETWORK FUNCTION DEVICE CONFIG " + "for %(service_type)s, NFI: %(nfi)s, " + "NF_ID: %(nf_id)s", {'service_type': request_data['info']['service_type'], 'nfi': request_data['info']['context']['nfi_id'], 'nf_id': request_data['info']['context']['nf_id']}) @@ -173,9 +172,9 @@ class ConfiguratorRpcManager(object): log_info = request_data.get('info') logging_context = log_info['context'].get('logging_context', {}) nfp_context['log_context'] = logging_context - LOG.info(_LI("Received RPC DELETE NETWORK FUNCTION DEVICE CONFIG " - "for %(service_type)s, NFI: %(nfi)s, " - "NF_ID: %(nf_id)s"), + LOG.info("Received RPC DELETE NETWORK FUNCTION DEVICE CONFIG " + "for %(service_type)s, NFI: %(nfi)s, " + "NF_ID: %(nf_id)s", {'service_type': request_data['info']['service_type'], 'nfi': request_data['info']['context']['nfi_id'], 'nf_id': request_data['info']['context']['nf_id']}) @@ -207,9 +206,9 @@ class ConfiguratorRpcManager(object): log_info = request_data.get('info') logging_context = log_info['context'].get('logging_context', {}) nfp_context['log_context'] = logging_context - LOG.info(_LI("Received RPC UPDATE NETWORK FUNCTION DEVICE CONFIG " - "for %(service_type)s, NFI: %(nfi)s, " - "NF_ID: %(nf_id)s"), + LOG.info("Received RPC UPDATE NETWORK FUNCTION DEVICE CONFIG " + "for %(service_type)s, NFI: %(nfi)s, " + "NF_ID: %(nf_id)s", {'service_type': request_data['info']['service_type'], 'nfi': request_data['info']['context']['nfi_id'], 'nf_id': request_data['info']['context']['nf_id']}) @@ -241,8 +240,8 @@ class ConfiguratorRpcManager(object): log_info = request_data.get('info') logging_context = log_info['context'].get('logging_context', {}) nfp_context['log_context'] = logging_context - LOG.info(_LI("Received RPC CREATE NETWORK FUNCTION CONFIG " - "for %(service_type)s "), + LOG.info("Received RPC CREATE NETWORK FUNCTION CONFIG " + "for %(service_type)s ", {'service_type': request_data['info']['service_type']}) self._invoke_service_agent('create', request_data) @@ -272,8 +271,8 @@ class ConfiguratorRpcManager(object): log_info = request_data.get('info') logging_context = log_info['context'].get('logging_context', {}) nfp_context['log_context'] = logging_context - LOG.info(_LI("Received RPC DELETE NETWORK FUNCTION CONFIG " - "for %(service_type)s "), + LOG.info("Received RPC DELETE NETWORK FUNCTION CONFIG " + "for %(service_type)s ", {'service_type': request_data['info']['service_type']}) self._invoke_service_agent('delete', request_data) @@ -303,8 +302,8 @@ class ConfiguratorRpcManager(object): log_info = request_data.get('info') logging_context = log_info['context'].get('logging_context', {}) nfp_context['log_context'] = logging_context - LOG.info(_LI("Received RPC UPDATE NETWORK FUNCTION CONFIG " - "for %(service_type)s "), + LOG.info("Received RPC UPDATE NETWORK FUNCTION CONFIG " + "for %(service_type)s ", {'service_type': request_data['info']['service_type']}) self._invoke_service_agent('update', request_data) @@ -326,7 +325,7 @@ class ConfiguratorRpcManager(object): """ module_context.init() - LOG.info(_LI("Received RPC GET NOTIFICATIONS ")) + LOG.info("Received RPC GET NOTIFICATIONS ") events = self.sc.get_stashed_events() notifications = [] for event in events: diff --git a/gbpservice/contrib/nfp/service_plugins/firewall/nfp_fwaas_plugin.py b/gbpservice/contrib/nfp/service_plugins/firewall/nfp_fwaas_plugin.py index 95a52fc30..f1106751f 100644 --- a/gbpservice/contrib/nfp/service_plugins/firewall/nfp_fwaas_plugin.py +++ b/gbpservice/contrib/nfp/service_plugins/firewall/nfp_fwaas_plugin.py @@ -13,7 +13,6 @@ from keystoneclient import exceptions as k_exceptions from keystoneclient.v2_0 import client as keyclient -from gbpservice._i18n import _LE from gbpservice.common import utils from gbpservice.contrib.nfp.config_orchestrator.common import topics from gbpservice.nfp.core import log as nfp_logging @@ -264,10 +263,10 @@ def _resource_owner_tenant_id(): return tenant.id except k_exceptions.NotFound: with excutils.save_and_reraise_exception(reraise=True): - LOG.error(_LE('No tenant with name %s exists.'), tenant) + LOG.error('No tenant with name %s exists.', tenant) except k_exceptions.NoUniqueMatch: with excutils.save_and_reraise_exception(reraise=True): - LOG.error(_LE('Multiple tenants matches found for %s'), tenant) + LOG.error('Multiple tenants matches found for %s', tenant) def _get_router_for_floatingip(self, context, internal_port, diff --git a/gbpservice/contrib/nfp_service/reference_configurator/controllers/controller.py b/gbpservice/contrib/nfp_service/reference_configurator/controllers/controller.py index bb930913b..f803803f2 100644 --- a/gbpservice/contrib/nfp_service/reference_configurator/controllers/controller.py +++ b/gbpservice/contrib/nfp_service/reference_configurator/controllers/controller.py @@ -18,9 +18,6 @@ import subprocess import time import yaml -from gbpservice._i18n import _LE -from gbpservice._i18n import _LI - from oslo_log import log as logging import oslo_serialization.jsonutils as jsonutils @@ -54,7 +51,7 @@ class Controller(rest.RestController): out2 = subprocess.Popen('dhclient eth0', shell=True, stdout=subprocess.PIPE).stdout.read() output = "%s\n%s\n%s" % (ip_a, out1, out2) - LOG.info(_LI("Dhclient on eth0, result: %(output)s"), + LOG.info("Dhclient on eth0, result: %(output)s", {'output': output}) except Exception as err: msg = ( @@ -161,8 +158,8 @@ class Controller(rest.RestController): return {'failure_desc': {'msg': msg}} def _configure_healthmonitor(self, config_data): - LOG.info(_LI("Configures healthmonitor with configuration " - "data : %(healthmonitor_data)s "), + LOG.info("Configures healthmonitor with configuration " + "data : %(healthmonitor_data)s ", {'healthmonitor_data': config_data}) def _configure_interfaces(self, config_data): @@ -173,10 +170,10 @@ class Controller(rest.RestController): out3 = subprocess.Popen('cat /etc/network/interfaces', shell=True, stdout=subprocess.PIPE).stdout.read() output = "%s\n%s\n%s" % (out1, out2, out3) - LOG.info(_LI("Dhclient on eth0, result: %(initial_data)s"), + LOG.info("Dhclient on eth0, result: %(initial_data)s", {'initial_data': output}) - LOG.info(_LI("Configures interfaces with configuration " - "data : %(interface_data)s "), + LOG.info("Configures interfaces with configuration " + "data : %(interface_data)s ", {'interface_data': config_data}) def get_source_cidrs_and_gateway_ip(self, route_info): @@ -190,8 +187,8 @@ class Controller(rest.RestController): return source_cidrs, gateway_ip def _add_routes(self, route_info): - LOG.info(_LI("Configuring routes with configuration " - "data : %(route_data)s "), + LOG.info("Configuring routes with configuration " + "data : %(route_data)s ", {'route_data': route_info['resource_data']}) source_cidrs, gateway_ip = self.get_source_cidrs_and_gateway_ip( route_info) @@ -205,8 +202,8 @@ class Controller(rest.RestController): try: interface_number_string = source_interface.split("eth", 1)[1] except IndexError: - LOG.error(_LE("Retrieved wrong interface %(interface)s for " - "configuring routes"), + LOG.error("Retrieved wrong interface %(interface)s for " + "configuring routes", {'interface': source_interface}) try: routing_table_number = 20 + int(interface_number_string) @@ -222,7 +219,7 @@ class Controller(rest.RestController): routing_table_number, gateway_ip) default_route_commands.append(ip_route_command) output = "%s\n%s" % (out1, out2) - LOG.info(_LI("Static route configuration result: %(output)s"), + LOG.info("Static route configuration result: %(output)s", {'output': output}) except Exception as ex: raise Exception(_("Failed to add static routes: %(ex)s") % { @@ -231,7 +228,7 @@ class Controller(rest.RestController): try: out = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE).stdout.read() - LOG.info(_LI("Static route configuration result: %(output)s"), + LOG.info("Static route configuration result: %(output)s", {'output': out}) except Exception as ex: raise Exception(_("Failed to add static routes: %(ex)s") % { @@ -269,9 +266,9 @@ class Controller(rest.RestController): "IP Address")) def _apply_user_config(self, config_data): - LOG.info(_LI("Applying user config with configuration " - "type : %(config_type)s and " - "configuration data : %(config_data)s "), + LOG.info("Applying user config with configuration " + "type : %(config_type)s and " + "configuration data : %(config_data)s ", {'config_type': config_data['resource'], 'config_data': config_data['resource_data']}) service_config = config_data['resource_data'][ diff --git a/gbpservice/contrib/nfp_service/reference_configurator/scripts/configure_fw_rules.py b/gbpservice/contrib/nfp_service/reference_configurator/scripts/configure_fw_rules.py index 354faef3d..32d7b3620 100644 --- a/gbpservice/contrib/nfp_service/reference_configurator/scripts/configure_fw_rules.py +++ b/gbpservice/contrib/nfp_service/reference_configurator/scripts/configure_fw_rules.py @@ -15,8 +15,6 @@ from subprocess import PIPE from subprocess import Popen import sys -from oslo_log._i18n import _LE -from oslo_log._i18n import _LI from oslo_log import log as logging from oslo_serialization import jsonutils @@ -29,10 +27,10 @@ class ConfigureIPtables(object): ps = Popen(["sysctl", "net.ipv4.ip_forward"], stdout=PIPE) output = ps.communicate()[0] if "0" in output: - LOG.info(_LI("Enabling IP forwarding ...")) + LOG.info("Enabling IP forwarding ...") call(["sysctl", "-w", "net.ipv4.ip_forward=1"]) else: - LOG.info(_LI("IP forwarding already enabled")) + LOG.info("IP forwarding already enabled") try: self.rules_json = jsonutils.loads(json_blob) except ValueError: @@ -44,7 +42,7 @@ class ConfigureIPtables(object): # check if chain is present if not create new chain if "testchain" not in output: - LOG.info(_LI("Creating new chain ...")) + LOG.info("Creating new chain ...") call(["iptables", "-F"]) call(["iptables", "-N", "testchain"]) call( @@ -57,10 +55,10 @@ class ConfigureIPtables(object): # return # Update chain with new rules - LOG.info(_LI("Updating chain with new rules ...")) + LOG.info("Updating chain with new rules ...") count = 0 for rule in self.rules_json.get('rules'): - LOG.info(_LI("adding rule %(count)d"), {'count': count}) + LOG.info("adding rule %(count)d", {'count': count}) try: action_values = ["LOG", "ACCEPT"] action = rule['action'].upper() @@ -82,14 +80,14 @@ class ConfigureIPtables(object): "-j", action], stdout=PIPE) output = ps.communicate()[0] if output: - LOG.error(_LE("Unable to add rule to chain due to: %(msg)s"), + LOG.error("Unable to add rule to chain due to: %(msg)s", {'msg': output}) count = count + 1 ps = Popen(["iptables", "-A", "testchain", "-m", "state", "--state", "ESTABLISHED,RELATED", "-j", "ACCEPT"], stdout=PIPE) output = ps.communicate()[0] if output: - LOG.error(_LE("Unable to add rule to chain due to: %(output)s"), + LOG.error("Unable to add rule to chain due to: %(output)s", {'output': output}) diff --git a/gbpservice/network/neutronv2/local_api.py b/gbpservice/network/neutronv2/local_api.py index eb744db8a..753cecc23 100644 --- a/gbpservice/network/neutronv2/local_api.py +++ b/gbpservice/network/neutronv2/local_api.py @@ -24,8 +24,6 @@ from neutron_lib.plugins import directory from oslo_log import log as logging from oslo_utils import excutils -from gbpservice._i18n import _LE -from gbpservice._i18n import _LW from gbpservice.neutron.extensions import group_policy as gp_ext from gbpservice.neutron.extensions import servicechain as sc_ext from gbpservice.neutron.services.grouppolicy.common import exceptions as exc @@ -237,7 +235,7 @@ class LocalAPI(object): # plugins are loaded to grab and store plugin. l3_plugin = directory.get_plugin(nl_const.L3) if not l3_plugin: - LOG.error(_LE("No L3 router service plugin found.")) + LOG.error("No L3 router service plugin found.") raise exc.GroupPolicyDeploymentError() return l3_plugin @@ -248,7 +246,7 @@ class LocalAPI(object): # plugins are loaded to grab and store plugin. qos_plugin = directory.get_plugin(pconst.QOS) if not qos_plugin: - LOG.error(_LE("No QoS service plugin found.")) + LOG.error("No QoS service plugin found.") raise exc.GroupPolicyDeploymentError() return qos_plugin @@ -258,7 +256,7 @@ class LocalAPI(object): # plugins are loaded to grab and store plugin. group_policy_plugin = directory.get_plugin(pconst.GROUP_POLICY) if not group_policy_plugin: - LOG.error(_LE("No GroupPolicy service plugin found.")) + LOG.error("No GroupPolicy service plugin found.") raise exc.GroupPolicyDeploymentError() return group_policy_plugin @@ -268,7 +266,7 @@ class LocalAPI(object): # plugins are loaded to grab and store plugin. servicechain_plugin = directory.get_plugin(pconst.SERVICECHAIN) if not servicechain_plugin: - LOG.error(_LE("No Servicechain service plugin found.")) + LOG.error("No Servicechain service plugin found.") raise exc.GroupPolicyDeploymentError() return servicechain_plugin @@ -374,7 +372,7 @@ class LocalAPI(object): self._delete_resource(self._core_plugin, plugin_context, 'port', port_id) except n_exc.PortNotFound: - LOG.warning(_LW('Port %s already deleted'), port_id) + LOG.warning('Port %s already deleted', port_id) def _get_subnet(self, plugin_context, subnet_id): return self._get_resource(self._core_plugin, plugin_context, 'subnet', @@ -398,7 +396,7 @@ class LocalAPI(object): self._delete_resource(self._core_plugin, plugin_context, 'subnet', subnet_id) except n_exc.SubnetNotFound: - LOG.warning(_LW('Subnet %s already deleted'), subnet_id) + LOG.warning('Subnet %s already deleted', subnet_id) def _get_network(self, plugin_context, network_id): return self._get_resource(self._core_plugin, plugin_context, 'network', @@ -422,7 +420,7 @@ class LocalAPI(object): self._delete_resource(self._core_plugin, plugin_context, 'network', network_id) except n_exc.NetworkNotFound: - LOG.warning(_LW('Network %s already deleted'), network_id) + LOG.warning('Network %s already deleted', network_id) def _get_router(self, plugin_context, router_id): return self._get_resource(self._l3_plugin, plugin_context, 'router', @@ -452,7 +450,7 @@ class LocalAPI(object): self._l3_plugin.remove_router_interface(plugin_context, router_id, interface_info) except l3.RouterInterfaceNotFoundForSubnet: - LOG.warning(_LW('Router interface already deleted for subnet %s'), + LOG.warning('Router interface already deleted for subnet %s', interface_info) return @@ -472,7 +470,7 @@ class LocalAPI(object): self._delete_resource(self._l3_plugin, plugin_context, 'router', router_id) except l3.RouterNotFound: - LOG.warning(_LW('Router %s already deleted'), router_id) + LOG.warning('Router %s already deleted', router_id) def _get_sg(self, plugin_context, sg_id): return self._get_resource( @@ -496,7 +494,7 @@ class LocalAPI(object): self._delete_resource(self._core_plugin, plugin_context, 'security_group', sg_id) except ext_sg.SecurityGroupNotFound: - LOG.warning(_LW('Security Group %s already deleted'), sg_id) + LOG.warning('Security Group %s already deleted', sg_id) def _get_sg_rule(self, plugin_context, sg_rule_id): return self._get_resource( @@ -513,7 +511,7 @@ class LocalAPI(object): return self._create_resource(self._core_plugin, plugin_context, 'security_group_rule', attrs) except ext_sg.SecurityGroupRuleExists as ex: - LOG.warning(_LW('Security Group already exists %s'), ex.message) + LOG.warning('Security Group already exists %s', ex.message) return def _update_sg_rule(self, plugin_context, sg_rule_id, attrs): @@ -526,7 +524,7 @@ class LocalAPI(object): self._delete_resource(self._core_plugin, plugin_context, 'security_group_rule', sg_rule_id) except ext_sg.SecurityGroupRuleNotFound: - LOG.warning(_LW('Security Group Rule %s already deleted'), + LOG.warning('Security Group Rule %s already deleted', sg_rule_id) def _get_fip(self, plugin_context, fip_id): @@ -551,7 +549,7 @@ class LocalAPI(object): self._delete_resource(self._l3_plugin, plugin_context, 'floatingip', fip_id) except l3.FloatingIPNotFound: - LOG.warning(_LW('Floating IP %s Already deleted'), fip_id) + LOG.warning('Floating IP %s Already deleted', fip_id) def _get_address_scope(self, plugin_context, address_scope_id): return self._get_resource(self._core_plugin, plugin_context, @@ -575,7 +573,7 @@ class LocalAPI(object): self._delete_resource(self._core_plugin, plugin_context, 'address_scope', address_scope_id) except address_scope.AddressScopeNotFound: - LOG.warning(_LW('Address Scope %s already deleted'), + LOG.warning('Address Scope %s already deleted', address_scope_id) def _get_subnetpool(self, plugin_context, subnetpool_id): @@ -600,7 +598,7 @@ class LocalAPI(object): self._delete_resource(self._core_plugin, plugin_context, 'subnetpool', subnetpool_id) except n_exc.SubnetpoolNotFound: - LOG.warning(_LW('Subnetpool %s already deleted'), subnetpool_id) + LOG.warning('Subnetpool %s already deleted', subnetpool_id) def _get_l2_policy(self, plugin_context, l2p_id): return self._get_resource(self._group_policy_plugin, plugin_context, @@ -619,7 +617,7 @@ class LocalAPI(object): self._delete_resource(self._qos_plugin, plugin_context, 'policy', qos_policy_id) except n_exc.QosPolicyNotFound: - LOG.warning(_LW('QoS Policy %s already deleted'), qos_policy_id) + LOG.warning('QoS Policy %s already deleted', qos_policy_id) def _get_qos_rules(self, plugin_context, filters=None): filters = filters or {} @@ -639,7 +637,7 @@ class LocalAPI(object): 'policy_bandwidth_limit_rule', rule_id, qos_policy_id) except n_exc.QosRuleNotFound: - LOG.warning(_LW('QoS Rule %s already deleted'), rule_id) + LOG.warning('QoS Rule %s already deleted', rule_id) def _get_l2_policies(self, plugin_context, filters=None): filters = filters or {} @@ -659,7 +657,7 @@ class LocalAPI(object): self._delete_resource(self._group_policy_plugin, plugin_context, 'l2_policy', l2p_id, False) except gp_ext.L2PolicyNotFound: - LOG.warning(_LW('L2 Policy %s already deleted'), l2p_id) + LOG.warning('L2 Policy %s already deleted', l2p_id) def _get_l3_policy(self, plugin_context, l3p_id): return self._get_resource(self._group_policy_plugin, plugin_context, @@ -683,7 +681,7 @@ class LocalAPI(object): self._delete_resource(self._group_policy_plugin, plugin_context, 'l3_policy', l3p_id, False) except gp_ext.L3PolicyNotFound: - LOG.warning(_LW('L3 Policy %s already deleted'), l3p_id) + LOG.warning('L3 Policy %s already deleted', l3p_id) def _get_external_segment(self, plugin_context, es_id): return self._get_resource(self._group_policy_plugin, plugin_context, @@ -707,7 +705,7 @@ class LocalAPI(object): self._delete_resource(self._group_policy_plugin, plugin_context, 'external_segment', es_id, False) except gp_ext.ExternalSegmentNotFound: - LOG.warning(_LW('External Segment %s already deleted'), es_id) + LOG.warning('External Segment %s already deleted', es_id) def _get_external_policy(self, plugin_context, ep_id): return self._get_resource(self._group_policy_plugin, plugin_context, @@ -731,7 +729,7 @@ class LocalAPI(object): self._delete_resource(self._group_policy_plugin, plugin_context, 'external_policy', ep_id, False) except gp_ext.ExternalPolicyNotFound: - LOG.warning(_LW('External Policy %s already deleted'), ep_id) + LOG.warning('External Policy %s already deleted', ep_id) def _get_policy_rule_set(self, plugin_context, prs_id): return self._get_resource(self._group_policy_plugin, plugin_context, @@ -755,7 +753,7 @@ class LocalAPI(object): self._delete_resource(self._group_policy_plugin, plugin_context, 'policy_rule_set', prs_id, False) except gp_ext.PolicyRuleSetNotFound: - LOG.warning(_LW('Policy Rule Set %s already deleted'), prs_id) + LOG.warning('Policy Rule Set %s already deleted', prs_id) def _get_servicechain_instance(self, plugin_context, sci_id): return self._get_resource(self._servicechain_plugin, plugin_context, @@ -780,7 +778,7 @@ class LocalAPI(object): self._delete_resource(self._servicechain_plugin, plugin_context, 'servicechain_instance', sci_id, False) except sc_ext.ServiceChainInstanceNotFound: - LOG.warning(_LW("servicechain %s already deleted"), sci_id) + LOG.warning("servicechain %s already deleted", sci_id) def _get_servicechain_spec(self, plugin_context, scs_id): return self._get_resource(self._servicechain_plugin, plugin_context, @@ -804,7 +802,7 @@ class LocalAPI(object): self._delete_resource(self._servicechain_plugin, plugin_context, 'servicechain_spec', scs_id) except sc_ext.ServiceChainSpecNotFound: - LOG.warning(_LW("servicechain spec %s already deleted"), scs_id) + LOG.warning("servicechain spec %s already deleted", scs_id) def _get_policy_target(self, plugin_context, pt_id): return self._get_resource(self._group_policy_plugin, plugin_context, @@ -828,7 +826,7 @@ class LocalAPI(object): self._delete_resource(self._group_policy_plugin, plugin_context, 'policy_target', pt_id, False) except gp_ext.PolicyTargetNotFound: - LOG.warning(_LW('Policy Rule Set %s already deleted'), pt_id) + LOG.warning('Policy Rule Set %s already deleted', pt_id) def _get_policy_target_group(self, plugin_context, ptg_id): return self._get_resource(self._group_policy_plugin, plugin_context, @@ -853,4 +851,4 @@ class LocalAPI(object): self._delete_resource(self._group_policy_plugin, plugin_context, 'policy_target_group', ptg_id) except sc_ext.ServiceChainSpecNotFound: - LOG.warning(_LW("Policy Target Group %s already deleted"), ptg_id) + LOG.warning("Policy Target Group %s already deleted", ptg_id) diff --git a/gbpservice/neutron/db/servicechain_db.py b/gbpservice/neutron/db/servicechain_db.py index 336219548..59a8432cd 100644 --- a/gbpservice/neutron/db/servicechain_db.py +++ b/gbpservice/neutron/db/servicechain_db.py @@ -26,7 +26,6 @@ from sqlalchemy.ext.orderinglist import ordering_list from sqlalchemy import orm from sqlalchemy.orm import exc -from gbpservice._i18n import _LE from gbpservice.neutron.extensions import servicechain as schain from gbpservice.neutron.services.servicechain.common import exceptions as s_exc @@ -153,7 +152,7 @@ class ServiceChainDbPlugin(schain.ServiceChainPluginBase, # plugins are loaded to grab and store plugin. grouppolicy_plugin = directory.get_plugin(pconst.GROUP_POLICY) if not grouppolicy_plugin: - LOG.error(_LE("No Grouppolicy service plugin found.")) + LOG.error("No Grouppolicy service plugin found.") raise s_exc.ServiceChainDeploymentError() return grouppolicy_plugin diff --git a/gbpservice/neutron/extensions/group_policy.py b/gbpservice/neutron/extensions/group_policy.py index 1711ba13f..6664295f2 100644 --- a/gbpservice/neutron/extensions/group_policy.py +++ b/gbpservice/neutron/extensions/group_policy.py @@ -17,12 +17,12 @@ from neutron.api import extensions as neutron_extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import resource_helper from neutron.plugins.common import constants -from neutron.services import service_base from neutron_lib.api import converters as conv from neutron_lib.api import extensions from neutron_lib.api import validators as valid from neutron_lib import constants as nlib_const from neutron_lib import exceptions as nexc +from neutron_lib.services import base as service_base from oslo_config import cfg from oslo_log import log as logging from oslo_utils import uuidutils diff --git a/gbpservice/neutron/extensions/servicechain.py b/gbpservice/neutron/extensions/servicechain.py index b89c7baf8..977dd916f 100644 --- a/gbpservice/neutron/extensions/servicechain.py +++ b/gbpservice/neutron/extensions/servicechain.py @@ -16,11 +16,11 @@ from neutron.api import extensions as neutron_extensions from neutron.api.v2 import attributes as attr from neutron.api.v2 import resource_helper from neutron.plugins.common import constants -from neutron.services import service_base from neutron_lib.api import converters as conv from neutron_lib.api import extensions from neutron_lib.api import validators as valid from neutron_lib import exceptions as nexc +from neutron_lib.services import base as service_base from oslo_config import cfg from oslo_log import log as logging import six diff --git a/gbpservice/neutron/plugins/ml2plus/drivers/apic_aim/apic_mapper.py b/gbpservice/neutron/plugins/ml2plus/drivers/apic_aim/apic_mapper.py index 1caa78904..eb73d0235 100644 --- a/gbpservice/neutron/plugins/ml2plus/drivers/apic_aim/apic_mapper.py +++ b/gbpservice/neutron/plugins/ml2plus/drivers/apic_aim/apic_mapper.py @@ -15,7 +15,6 @@ from oslo_log import log -from gbpservice._i18n import _LE from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import exceptions LOG = log.getLogger(__name__) @@ -108,6 +107,6 @@ class APICNameMapper(object): if self._map(session, "", type_tag, prefix) == name[:pos]: return name[pos:] elif enforce: - LOG.error(_LE("Attempted to reverse-map invalid APIC name '%s'"), + LOG.error("Attempted to reverse-map invalid APIC name '%s'", name) raise exceptions.InternalError() diff --git a/gbpservice/neutron/plugins/ml2plus/drivers/apic_aim/cache.py b/gbpservice/neutron/plugins/ml2plus/drivers/apic_aim/cache.py index c13abd30a..80febff00 100644 --- a/gbpservice/neutron/plugins/ml2plus/drivers/apic_aim/cache.py +++ b/gbpservice/neutron/plugins/ml2plus/drivers/apic_aim/cache.py @@ -20,7 +20,6 @@ from keystoneclient.v3 import client as ksc_client from oslo_config import cfg from oslo_log import log as logging -from gbpservice._i18n import _LW LOG = logging.getLogger(__name__) @@ -47,7 +46,7 @@ class ProjectNameCache(object): auth = ksc_auth.load_from_conf_options(cfg.CONF, AUTH_GROUP) LOG.debug("Got auth: %s", auth) if not auth: - LOG.warning(_LW('No auth_plugin configured in %s'), + LOG.warning('No auth_plugin configured in %s', AUTH_GROUP) session = ksc_session.Session.load_from_conf_options( cfg.CONF, AUTH_GROUP, auth=auth) diff --git a/gbpservice/neutron/plugins/ml2plus/drivers/apic_aim/extension_driver.py b/gbpservice/neutron/plugins/ml2plus/drivers/apic_aim/extension_driver.py index 3b0fe5b9e..d175cdb27 100644 --- a/gbpservice/neutron/plugins/ml2plus/drivers/apic_aim/extension_driver.py +++ b/gbpservice/neutron/plugins/ml2plus/drivers/apic_aim/extension_driver.py @@ -22,7 +22,6 @@ from neutron_lib.plugins import directory from oslo_log import log from oslo_utils import excutils -from gbpservice._i18n import _LI from gbpservice.neutron import extensions as extensions_pkg from gbpservice.neutron.extensions import cisco_apic from gbpservice.neutron.plugins.ml2plus import driver_api as api_plus @@ -38,11 +37,11 @@ class ApicExtensionDriver(api_plus.ExtensionDriver, extn_db.ExtensionDbMixin): def __init__(self): - LOG.info(_LI("APIC AIM ED __init__")) + LOG.info("APIC AIM ED __init__") self._mechanism_driver = None def initialize(self): - LOG.info(_LI("APIC AIM ED initializing")) + LOG.info("APIC AIM ED initializing") extensions.append_api_extensions_path(extensions_pkg.__path__) @property diff --git a/gbpservice/neutron/plugins/ml2plus/drivers/apic_aim/mechanism_driver.py b/gbpservice/neutron/plugins/ml2plus/drivers/apic_aim/mechanism_driver.py index caa2bd781..70d5e2aa8 100644 --- a/gbpservice/neutron/plugins/ml2plus/drivers/apic_aim/mechanism_driver.py +++ b/gbpservice/neutron/plugins/ml2plus/drivers/apic_aim/mechanism_driver.py @@ -52,9 +52,6 @@ from oslo_log import log import oslo_messaging from oslo_utils import importutils -from gbpservice._i18n import _LE -from gbpservice._i18n import _LI -from gbpservice._i18n import _LW from gbpservice.network.neutronv2 import local_api from gbpservice.neutron.extensions import cisco_apic from gbpservice.neutron.extensions import cisco_apic_l3 as a_l3 @@ -165,10 +162,10 @@ class ApicMechanismDriver(api_plus.MechanismDriver, self.md.delete_link(*args, **kwargs) def __init__(self): - LOG.info(_LI("APIC AIM MD __init__")) + LOG.info("APIC AIM MD __init__") def initialize(self): - LOG.info(_LI("APIC AIM MD initializing")) + LOG.info("APIC AIM MD initializing") self.project_name_cache = cache.ProjectNameCache() self.name_mapper = apic_mapper.APICNameMapper() self.aim = aim_manager.AimManager() @@ -1011,9 +1008,9 @@ class ApicMechanismDriver(api_plus.MechanismDriver, # REVISIT: Delete intf_vrf if no longer used? else: # This should never happen. - LOG.error(_LE("Interface topology %(intf_topology)s and " - "router topology %(router_topology)s have " - "different VRFs, but neither is shared"), + LOG.error("Interface topology %(intf_topology)s and " + "router topology %(router_topology)s have " + "different VRFs, but neither is shared", {'intf_topology': intf_topology, 'router_topology': router_topology}) raise exceptions.InternalError() @@ -1611,8 +1608,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver, LOG.debug("Bound using segment: %s", segment) return True else: - LOG.warning(_LW("Refusing to bind port %(port)s to dead " - "agent: %(agent)s"), + LOG.warning("Refusing to bind port %(port)s to dead " + "agent: %(agent)s", {'port': current['id'], 'agent': agent}) def _opflex_bind_port(self, context, segment, agent): @@ -1691,7 +1688,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver, api.PHYSICAL_NETWORK: segment[api.PHYSICAL_NETWORK]} dyn_seg = context.allocate_dynamic_segment(seg_args) - LOG.info(_LI('Allocated dynamic-segment %(s)s for port %(p)s'), + LOG.info('Allocated dynamic-segment %(s)s for port %(p)s', {'s': dyn_seg, 'p': context.current['id']}) dyn_seg['aim_ml2_created'] = True context.continue_binding(segment[api.ID], [dyn_seg]) @@ -1970,8 +1967,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver, def _move_topology(self, aim_ctx, topology, old_vrf, new_vrf, nets_to_notify): - LOG.info(_LI("Moving routed networks %(topology)s from VRF " - "%(old_vrf)s to VRF %(new_vrf)s"), + LOG.info("Moving routed networks %(topology)s from VRF " + "%(old_vrf)s to VRF %(new_vrf)s", {'topology': topology.keys(), 'old_vrf': old_vrf, 'new_vrf': new_vrf}) @@ -2189,7 +2186,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver, display_name=aim_utils.sanitize_display_name('CommonTenant')) tenant = self.aim.get(aim_ctx, attrs) if not tenant: - LOG.info(_LI("Creating common tenant")) + LOG.info("Creating common tenant") tenant = self.aim.create(aim_ctx, attrs) return tenant @@ -2200,7 +2197,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver, if not vrf: attrs.display_name = ( aim_utils.sanitize_display_name('CommonUnroutedVRF')) - LOG.info(_LI("Creating common unrouted VRF")) + LOG.info("Creating common unrouted VRF") vrf = self.aim.create(aim_ctx, attrs) return vrf @@ -2213,7 +2210,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver, name=filter_name, display_name=dname) if not self.aim.get(aim_ctx, filter): - LOG.info(_LI("Creating common Any Filter")) + LOG.info("Creating common Any Filter") self.aim.create(aim_ctx, filter) dname = aim_utils.sanitize_display_name("AnyFilterEntry") @@ -2222,7 +2219,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver, name=ANY_FILTER_ENTRY_NAME, display_name=dname) if not self.aim.get(aim_ctx, entry): - LOG.info(_LI("Creating common Any FilterEntry")) + LOG.info("Creating common Any FilterEntry") self.aim.create(aim_ctx, entry) return filter @@ -2232,7 +2229,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver, if not vrf: attrs.display_name = ( aim_utils.sanitize_display_name('DefaultRoutedVRF')) - LOG.info(_LI("Creating default VRF for %s"), attrs.tenant_name) + LOG.info("Creating default VRF for %s", attrs.tenant_name) vrf = self.aim.create(aim_ctx, attrs) return vrf @@ -2504,8 +2501,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver, .filter(extn_db_sn.snat_host_pool.is_(True)) .all()) if not snat_subnets: - LOG.info(_LI('No subnet in external network %s is marked as ' - 'SNAT-pool'), + LOG.info('No subnet in external network %s is marked as ' + 'SNAT-pool', ext_network['id']) return for snat_subnet in snat_subnets: @@ -2524,8 +2521,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver, snat_ip = port['fixed_ips'][0]['ip_address'] break except n_exceptions.IpAddressGenerationFailure: - LOG.info(_LI('No more addresses available in subnet %s ' - 'for SNAT IP allocation'), + LOG.info('No more addresses available in subnet %s ' + 'for SNAT IP allocation', snat_subnet['id']) else: snat_ip = snat_port.fixed_ips[0].ip_address @@ -2569,8 +2566,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver, try: self.plugin.delete_port(e_context, p[0]) except n_exceptions.NeutronException as ne: - LOG.warning(_LW('Failed to delete SNAT port %(port)s: ' - '%(ex)s'), + LOG.warning('Failed to delete SNAT port %(port)s: ' + '%(ex)s', {'port': p, 'ex': ne}) def check_floatingip_external_address(self, context, floatingip): @@ -2625,7 +2622,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver, epg = self.get_epg_for_network(session, network) if not epg: - LOG.info(_LI('Network %s does not map to any EPG'), network['id']) + LOG.info('Network %s does not map to any EPG', network['id']) return if segment: @@ -2682,8 +2679,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver, aim_ctx, aim_infra.HostLink, host_name=host, interface_name=interface) if not host_link or not host_link[0].path: - LOG.warning(_LW('No host link information found for host: ' - '%(host)s, interface: %(interface)s'), + LOG.warning('No host link information found for host: ' + '%(host)s, interface: %(interface)s', {'host': host, 'interface': interface}) continue host_link = host_link[0].path @@ -2697,7 +2694,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver, host_link = self.aim.find(aim_ctx, aim_infra.HostLink, host_name=host) if not host_link or not host_link[0].path: - LOG.warning(_LW('No host link information found for host %s'), + LOG.warning('No host link information found for host %s', host) return host_link = host_link[0].path @@ -2721,7 +2718,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver, port_context.current['id']) .first()) if not ports: - LOG.info(_LI('Releasing dynamic-segment %(s)s for port %(p)s'), + LOG.info('Releasing dynamic-segment %(s)s for port %(p)s', {'s': btm, 'p': port_context.current['id']}) port_context.release_dynamic_segment(btm[api.ID]) @@ -2871,7 +2868,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver, # this could be caused by concurrent transactions except db_exc.DBDuplicateEntry as e: LOG.debug(e) - LOG.info(_LI('Releasing domain %(d)s for port %(p)s'), + LOG.info('Releasing domain %(d)s for port %(p)s', {'d': domain, 'p': port['id']}) def _get_non_opflex_segments_on_host(self, context, host): diff --git a/gbpservice/neutron/plugins/ml2plus/managers.py b/gbpservice/neutron/plugins/ml2plus/managers.py index e58dcfecf..0b7a99108 100644 --- a/gbpservice/neutron/plugins/ml2plus/managers.py +++ b/gbpservice/neutron/plugins/ml2plus/managers.py @@ -13,8 +13,6 @@ # License for the specific language governing permissions and limitations # under the License. -from gbpservice._i18n import _LE -from gbpservice._i18n import _LI from gbpservice.neutron.plugins.ml2plus import driver_api from neutron.db import api as db_api @@ -62,8 +60,8 @@ class MechanismManager(managers.MechanismManager): 'method': method_name}, exc_info=e) LOG.exception( - _LE("Mechanism driver '%(name)s' failed in " - "%(method)s"), + "Mechanism driver '%(name)s' failed in " + "%(method)s", {'name': driver.name, 'method': method_name} ) errors.append(e) @@ -81,8 +79,8 @@ class MechanismManager(managers.MechanismManager): try: driver.obj.ensure_tenant(plugin_context, tenant_id) except Exception: - LOG.exception(_LE("Mechanism driver '%s' failed in " - "ensure_tenant"), driver.name) + LOG.exception("Mechanism driver '%s' failed in " + "ensure_tenant", driver.name) raise ml2_exc.MechanismDriverError(method="ensure_tenant") def create_subnetpool_precommit(self, context): @@ -197,8 +195,8 @@ class ExtensionManager(managers.ExtensionManager): result) except Exception: with excutils.save_and_reraise_exception(): - LOG.info(_LI("Extension driver '%(name)s' failed in " - "%(method)s"), + LOG.info("Extension driver '%(name)s' failed in " + "%(method)s", {'name': driver.name, 'method': method_name}) # Overrides ML2 implementation to avoid eating retriable diff --git a/gbpservice/neutron/plugins/ml2plus/patch_neutron.py b/gbpservice/neutron/plugins/ml2plus/patch_neutron.py index 50a783e5b..b0a32d0e8 100644 --- a/gbpservice/neutron/plugins/ml2plus/patch_neutron.py +++ b/gbpservice/neutron/plugins/ml2plus/patch_neutron.py @@ -86,7 +86,6 @@ def notify(resource, event, trigger, **kwargs): registry.notify = notify -from neutron._i18n import _LE from neutron.callbacks import events from neutron.callbacks import exceptions from oslo_log import log as logging @@ -112,12 +111,12 @@ def _notify_loop(resource, event, trigger, **kwargs): event.startswith(events.PRECOMMIT) ) if not abortable_event: - LOG.exception(_LE("Error during notification for " - "%(callback)s %(resource)s, %(event)s"), + LOG.exception("Error during notification for " + "%(callback)s %(resource)s, %(event)s", {'callback': callback_id, 'resource': resource, 'event': event}) else: - LOG.error(_LE("Callback %(callback)s raised %(error)s"), + LOG.error("Callback %(callback)s raised %(error)s", {'callback': callback_id, 'error': e}) errors.append(exceptions.NotificationError(callback_id, e)) return errors @@ -197,11 +196,9 @@ def commit_reservation(context, reservation_id): quota.QUOTAS.get_driver().commit_reservation = commit_reservation -from neutron._i18n import _LI from oslo_db.sqlalchemy import exc_filters -exc_filters._LE = _LI exc_filters.LOG.exception = exc_filters.LOG.debug diff --git a/gbpservice/neutron/plugins/ml2plus/plugin.py b/gbpservice/neutron/plugins/ml2plus/plugin.py index 5ce863849..4e5b788ef 100644 --- a/gbpservice/neutron/plugins/ml2plus/plugin.py +++ b/gbpservice/neutron/plugins/ml2plus/plugin.py @@ -16,8 +16,6 @@ # The following is imported at the beginning to ensure # that the patches are applied before any of the # modules save a reference to the functions being patched -from gbpservice._i18n import _LE -from gbpservice._i18n import _LI from gbpservice.neutron import extensions as gbp_extensions from gbpservice.neutron.extensions import patch # noqa from gbpservice.neutron.plugins.ml2plus import patch_neutron # noqa @@ -135,7 +133,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin, security_group=securitygroups_db.SecurityGroup, security_group_rule=securitygroups_db.SecurityGroupRule) def __init__(self): - LOG.info(_LI("Ml2Plus initializing")) + LOG.info("Ml2Plus initializing") registry._get_callback_manager()._notify_loop = ( patch_neutron._notify_loop) # First load drivers, then initialize DB, then initialize drivers @@ -179,9 +177,9 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin, registry.subscribe(self._subnet_delete_after_delete_handler, resources.SUBNET, events.AFTER_DELETE) except AttributeError: - LOG.info(_LI("Detected older version of Neutron, ML2Plus plugin " - "is not subscribed to subnet_precommit_delete and " - "subnet_after_delete events")) + LOG.info("Detected older version of Neutron, ML2Plus plugin " + "is not subscribed to subnet_precommit_delete and " + "subnet_after_delete events") self._setup_dhcp() self._start_rpc_notifiers() self.add_agent_status_check_worker(self.agent_health_check) @@ -193,7 +191,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin, cfg.CONF.ml2plus.refresh_subnetpool_db_obj) self.refresh_address_scope_db_obj = ( cfg.CONF.ml2plus.refresh_address_scope_db_obj) - LOG.info(_LI("Modular L2 Plugin (extended) initialization complete")) + LOG.info("Modular L2 Plugin (extended) initialization complete") db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( attributes.SUBNETPOOLS, ['_ml2_md_extend_subnetpool_dict']) @@ -412,8 +410,8 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin, self.mechanism_manager.create_subnetpool_postcommit(mech_context) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): - LOG.error(_LE("mechanism_manager.create_subnetpool_postcommit " - "failed, deleting subnetpool '%s'"), + LOG.error("mechanism_manager.create_subnetpool_postcommit " + "failed, deleting subnetpool '%s'", result['id']) self.delete_subnetpool(context, result['id']) return result @@ -476,9 +474,9 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin, mech_context) except ml2_exc.MechanismDriverError: with excutils.save_and_reraise_exception(): - LOG.error(_LE("mechanism_manager.create_address_scope_" - "postcommit failed, deleting address_scope" - " '%s'"), + LOG.error("mechanism_manager.create_address_scope_" + "postcommit failed, deleting address_scope" + " '%s'", result['id']) self.delete_address_scope(context, result['id']) return result diff --git a/gbpservice/neutron/services/apic_aim/l3_plugin.py b/gbpservice/neutron/services/apic_aim/l3_plugin.py index 995189075..5dff7f6e6 100644 --- a/gbpservice/neutron/services/apic_aim/l3_plugin.py +++ b/gbpservice/neutron/services/apic_aim/l3_plugin.py @@ -29,8 +29,6 @@ from oslo_log import log as logging from oslo_utils import excutils from sqlalchemy import inspect -from gbpservice._i18n import _LE -from gbpservice._i18n import _LI from gbpservice.neutron import extensions as extensions_pkg from gbpservice.neutron.extensions import cisco_apic_l3 as l3_ext from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import ( @@ -61,7 +59,7 @@ class ApicL3Plugin(common_db_mixin.CommonDbMixin, @resource_registry.tracked_resources(router=l3_db.Router, floatingip=l3_db.FloatingIP) def __init__(self): - LOG.info(_LI("APIC AIM L3 Plugin __init__")) + LOG.info("APIC AIM L3 Plugin __init__") extensions.append_api_extensions_path(extensions_pkg.__path__) self._mechanism_driver = None super(ApicL3Plugin, self).__init__() @@ -84,7 +82,7 @@ class ApicL3Plugin(common_db_mixin.CommonDbMixin, self._include_router_extn_attr(session, router_res) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("APIC AIM extend_router_dict failed")) + LOG.exception("APIC AIM extend_router_dict failed") db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( l3.ROUTERS, ['_extend_router_dict_apic']) @@ -257,8 +255,8 @@ class ApicL3Plugin(common_db_mixin.CommonDbMixin, .create_floatingip(context, floatingip)) break except exceptions.IpAddressGenerationFailure: - LOG.info(_LI('No more floating IP addresses available ' - 'in subnet %s'), + LOG.info('No more floating IP addresses available ' + 'in subnet %s', ext_sn) if not result: diff --git a/gbpservice/neutron/services/grouppolicy/drivers/chain_mapping.py b/gbpservice/neutron/services/grouppolicy/drivers/chain_mapping.py index 3235fb8f9..fecef7df1 100644 --- a/gbpservice/neutron/services/grouppolicy/drivers/chain_mapping.py +++ b/gbpservice/neutron/services/grouppolicy/drivers/chain_mapping.py @@ -20,7 +20,6 @@ from oslo_serialization import jsonutils from oslo_utils import excutils import sqlalchemy as sa -from gbpservice._i18n import _LE from gbpservice.common import utils from gbpservice.network.neutronv2 import local_api from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db as gpdb @@ -104,10 +103,10 @@ class ChainMappingDriver(api.PolicyDriver, local_api.LocalAPI, return tenant.id except k_exceptions.NotFound: with excutils.save_and_reraise_exception(reraise=reraise): - LOG.error(_LE('No tenant with name %s exists.'), tenant) + LOG.error('No tenant with name %s exists.', tenant) except k_exceptions.NoUniqueMatch: with excutils.save_and_reraise_exception(reraise=reraise): - LOG.error(_LE('Multiple tenants matches found for %s'), + LOG.error('Multiple tenants matches found for %s', tenant) @staticmethod @@ -290,7 +289,7 @@ class ChainMappingDriver(api.PolicyDriver, local_api.LocalAPI, context.current['status_details'] = ptg_status[0][ 'status_details'] except Exception: - LOG.error(_LE('Failed to update ptg status')) + LOG.error('Failed to update ptg status') @log.log_method_call def _delete_policy_target_group_postcommit(self, context): diff --git a/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/aim_mapping.py b/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/aim_mapping.py index 0888e4a68..7e087005e 100644 --- a/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/aim_mapping.py +++ b/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/aim_mapping.py @@ -29,9 +29,6 @@ from oslo_log import helpers as log from oslo_log import log as logging from oslo_utils import excutils -from gbpservice._i18n import _LE -from gbpservice._i18n import _LI -from gbpservice._i18n import _LW from gbpservice.network.neutronv2 import local_api from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db as gpmdb @@ -154,7 +151,7 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin): @log.log_method_call def initialize(self): - LOG.info(_LI("APIC AIM Policy Driver initializing")) + LOG.info("APIC AIM Policy Driver initializing") super(AIMMappingDriver, self).initialize() self._apic_aim_mech_driver = None self._apic_segmentation_label_driver = None @@ -163,16 +160,16 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin): self._name_mapper = None self.create_auto_ptg = cfg.CONF.aim_mapping.create_auto_ptg if self.create_auto_ptg: - LOG.info(_LI('Auto PTG creation configuration set, ' - 'this will result in automatic creation of a PTG ' - 'per L2 Policy')) + LOG.info('Auto PTG creation configuration set, ' + 'this will result in automatic creation of a PTG ' + 'per L2 Policy') self.create_per_l3p_implicit_contracts = ( cfg.CONF.aim_mapping.create_per_l3p_implicit_contracts) self.advertise_mtu = cfg.CONF.aim_mapping.advertise_mtu local_api.QUEUE_OUT_OF_PROCESS_NOTIFICATIONS = True if self.create_per_l3p_implicit_contracts: - LOG.info(_LI('Implicit AIM contracts will be created ' - 'for l3_policies which do not have them.')) + LOG.info('Implicit AIM contracts will be created ' + 'for l3_policies which do not have them.') self._create_per_l3p_implicit_contracts() @log.log_method_call @@ -453,11 +450,11 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin): context._plugin).delete_policy_target_group( context._plugin_context, auto_ptg['id']) except gpolicy.PolicyTargetGroupNotFound: - LOG.info(_LI("Auto PTG with ID %(id)s for " - "for L2P %(l2p)s not found. If create_auto_ptg " - "configuration was not set at the time of the L2P " - "creation, you can safely ignore this, else this " - "could potentially be indication of an error."), + LOG.info("Auto PTG with ID %(id)s for " + "for L2P %(l2p)s not found. If create_auto_ptg " + "configuration was not set at the time of the L2P " + "creation, you can safely ignore this, else this " + "could potentially be indication of an error.", {'id': auto_ptg_id, 'l2p': l2p_id}) super(AIMMappingDriver, self).delete_l2_policy_precommit(context) @@ -1062,9 +1059,9 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin): ok_to_bind = True break if not ok_to_bind: - LOG.warning(_LW("Failed to bind the port due to " - "allowed_vm_names rules %(rules)s " - "for VM: %(vm)s"), + LOG.warning("Failed to bind the port due to " + "allowed_vm_names rules %(rules)s " + "for VM: %(vm)s", {'rules': l3p['allowed_vm_names'], 'vm': vm.name}) return ok_to_bind @@ -1548,9 +1545,9 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin): # default EPG and delete them # create=True, delete=True is not a valid combination if create and delete: - LOG.error(_LE("Incorrect use of internal method " - "_process_contracts_for_default_epg(), create and " - "delete cannot be True at the same time")) + LOG.error("Incorrect use of internal method " + "_process_contracts_for_default_epg(), create and " + "delete cannot be True at the same time") raise session = context._plugin_context.session aim_ctx = aim_context.AimContext(session) @@ -1792,8 +1789,8 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin): if not epg: # Something is wrong, default EPG doesn't exist. # TODO(ivar): should rise an exception - LOG.error(_LE("Default EPG doesn't exist for " - "port %s"), port['id']) + LOG.error("Default EPG doesn't exist for " + "port %s", port['id']) return epg def _get_subnet_details(self, plugin_context, port, details): @@ -2155,9 +2152,9 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin): intf_port = self._create_port(plugin_context, attrs) except n_exc.NeutronException: with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Failed to create explicit router ' - 'interface port in subnet ' - '%(subnet)s'), + LOG.exception('Failed to create explicit router ' + 'interface port in subnet ' + '%(subnet)s', {'subnet': subnet['id']}) interface_info = {'port_id': intf_port['id'], NO_VALIDATE: True} @@ -2167,9 +2164,9 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin): except n_exc.BadRequest: self._delete_port(plugin_context, intf_port['id']) with excutils.save_and_reraise_exception(): - LOG.exception(_LE('Attaching router %(router)s to ' - '%(subnet)s with explicit port ' - '%(port) failed'), + LOG.exception('Attaching router %(router)s to ' + '%(subnet)s with explicit port ' + '%(port) failed', {'subnet': subnet['id'], 'router': router_id, 'port': intf_port['id']}) @@ -2185,8 +2182,8 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin): self._add_router_interface(plugin_context, router_id, interface_info) except n_exc.BadRequest as e: - LOG.exception(_LE("Adding subnet to router failed, exception:" - "%s"), e) + LOG.exception("Adding subnet to router failed, exception:" + "%s", e) raise exc.GroupPolicyInternalError() def _detach_router_from_subnets(self, plugin_context, router_id, sn_ids): diff --git a/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/aim_mapping_rpc.py b/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/aim_mapping_rpc.py index 6d0b4a0b7..cf80581e1 100644 --- a/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/aim_mapping_rpc.py +++ b/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/aim_mapping_rpc.py @@ -17,8 +17,6 @@ from neutron.plugins.ml2 import rpc as ml2_rpc from opflexagent import rpc as o_rpc from oslo_log import log -from gbpservice._i18n import _LE -from gbpservice._i18n import _LW from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import ( nova_client as nclient) from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import ( @@ -65,8 +63,8 @@ class AIMMappingRPCMixin(ha_ip_db.HAIPOwnerDbMixin): return self._retrieve_vrf_details(context, **kwargs) except Exception as e: vrf = kwargs.get('vrf_id') - LOG.error(_LE("An exception has occurred while retrieving vrf " - "gbp details for %s"), vrf) + LOG.error("An exception has occurred while retrieving vrf " + "gbp details for %s", vrf) LOG.exception(e) return {'l3_policy_id': vrf} @@ -82,8 +80,8 @@ class AIMMappingRPCMixin(ha_ip_db.HAIPOwnerDbMixin): return self._get_gbp_details(context, kwargs, kwargs.get('host')) except Exception as e: device = kwargs.get('device') - LOG.error(_LE("An exception has occurred while retrieving device " - "gbp details for %s"), device) + LOG.error("An exception has occurred while retrieving device " + "gbp details for %s", device) LOG.exception(e) return {'device': device} @@ -101,8 +99,8 @@ class AIMMappingRPCMixin(ha_ip_db.HAIPOwnerDbMixin): None, None).get_device_details(context, **request)} return result except Exception as e: - LOG.error(_LE("An exception has occurred while requesting device " - "gbp details for %s"), request.get('device')) + LOG.error("An exception has occurred while requesting device " + "gbp details for %s", request.get('device')) LOG.exception(e) return None @@ -137,8 +135,8 @@ class AIMMappingRPCMixin(ha_ip_db.HAIPOwnerDbMixin): port_context = core_plugin.get_bound_port_context(context, port_id, host) if not port_context: - LOG.warning(_LW("Device %(device)s requested by agent " - "%(agent_id)s not found in database"), + LOG.warning("Device %(device)s requested by agent " + "%(agent_id)s not found in database", {'device': port_id, 'agent_id': request.get('agent_id')}) return {'device': request.get('device')} diff --git a/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/nova_client.py b/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/nova_client.py index 012b21a1c..89d029096 100644 --- a/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/nova_client.py +++ b/gbpservice/neutron/services/grouppolicy/drivers/cisco/apic/nova_client.py @@ -14,8 +14,6 @@ from neutron.notifiers import nova as n_nova from novaclient import exceptions as nova_exceptions from oslo_log import log as logging -from gbpservice._i18n import _LW - LOG = logging.getLogger(__name__) @@ -39,7 +37,7 @@ class NovaClient(object): try: return self.client.servers.get(server_id) except nova_exceptions.NotFound: - LOG.warning(_LW("Nova returned NotFound for server: %s"), + LOG.warning("Nova returned NotFound for server: %s", server_id) except Exception as e: LOG.exception(e) diff --git a/gbpservice/neutron/services/grouppolicy/drivers/extensions/aim_mapping_extension_driver.py b/gbpservice/neutron/services/grouppolicy/drivers/extensions/aim_mapping_extension_driver.py index fcf2583a6..f9afc79f3 100644 --- a/gbpservice/neutron/services/grouppolicy/drivers/extensions/aim_mapping_extension_driver.py +++ b/gbpservice/neutron/services/grouppolicy/drivers/extensions/aim_mapping_extension_driver.py @@ -13,7 +13,6 @@ from neutron_lib.plugins import directory from oslo_log import log as logging -from gbpservice._i18n import _LI from gbpservice.neutron.db.grouppolicy.extensions import ( apic_auto_ptg_db as auto_ptg_db) from gbpservice.neutron.db.grouppolicy.extensions import ( @@ -34,7 +33,7 @@ class AIMExtensionDriver(api.ExtensionDriver, _extension_dict = cisco_apic_gbp.EXTENDED_ATTRIBUTES_2_0 def __init__(self): - LOG.info(_LI("AIM Extension __init__")) + LOG.info("AIM Extension __init__") self._policy_driver = None @property diff --git a/gbpservice/neutron/services/grouppolicy/drivers/extensions/apic_reuse_bd_driver.py b/gbpservice/neutron/services/grouppolicy/drivers/extensions/apic_reuse_bd_driver.py index c022b238b..ed56494db 100644 --- a/gbpservice/neutron/services/grouppolicy/drivers/extensions/apic_reuse_bd_driver.py +++ b/gbpservice/neutron/services/grouppolicy/drivers/extensions/apic_reuse_bd_driver.py @@ -12,7 +12,6 @@ from oslo_log import log as logging -from gbpservice._i18n import _LI from gbpservice.neutron.db.grouppolicy.extensions import ( apic_reuse_bd_db as db) from gbpservice.neutron.db.grouppolicy import group_policy_db as gp_db @@ -30,7 +29,7 @@ class ApicReuseBdExtensionDriver(api.ExtensionDriver, _extension_dict = ext.EXTENDED_ATTRIBUTES_2_0 def __init__(self): - LOG.info(_LI("ApicReuseBdExtensionDriver __init__")) + LOG.info("ApicReuseBdExtensionDriver __init__") def initialize(self): pass diff --git a/gbpservice/neutron/services/grouppolicy/drivers/extensions/proxy_group_driver.py b/gbpservice/neutron/services/grouppolicy/drivers/extensions/proxy_group_driver.py index b66f07271..71b846bd7 100644 --- a/gbpservice/neutron/services/grouppolicy/drivers/extensions/proxy_group_driver.py +++ b/gbpservice/neutron/services/grouppolicy/drivers/extensions/proxy_group_driver.py @@ -15,7 +15,6 @@ from neutron_lib.api import validators from oslo_config import cfg from oslo_log import log as logging -from gbpservice._i18n import _LW from gbpservice.neutron.db.grouppolicy.extensions import group_proxy_db as db from gbpservice.neutron.db.grouppolicy import group_policy_db as gp_db from gbpservice.neutron.extensions import driver_proxy_group @@ -115,10 +114,10 @@ class ProxyGroupDriver(api.ExtensionDriver): data['ip_version'], data['proxy_subnet_prefix_length'], data['ip_pool']) if data['proxy_ip_pool']: - LOG.warning(_LW("Since use_subnetpools setting is turned on, " - "proxy_ip_pool %s will be ignored. " - "Proxy subnets will be allocated from same " - "subnetpool as group subnets"), + LOG.warning("Since use_subnetpools setting is turned on, " + "proxy_ip_pool %s will be ignored. " + "Proxy subnets will be allocated from same " + "subnetpool as group subnets", data['proxy_ip_pool']) else: gp_db.GroupPolicyDbPlugin.validate_ip_pool( diff --git a/gbpservice/neutron/services/grouppolicy/drivers/implicit_policy.py b/gbpservice/neutron/services/grouppolicy/drivers/implicit_policy.py index 42d3d380f..00bb2fd26 100644 --- a/gbpservice/neutron/services/grouppolicy/drivers/implicit_policy.py +++ b/gbpservice/neutron/services/grouppolicy/drivers/implicit_policy.py @@ -17,8 +17,6 @@ from oslo_log import log as logging from oslo_utils import excutils import sqlalchemy as sa -from gbpservice._i18n import _LI -from gbpservice._i18n import _LW from gbpservice.network.neutronv2 import local_api from gbpservice.neutron.extensions import driver_proxy_group as pg_ext from gbpservice.neutron.extensions import group_policy as gbp_ext @@ -130,17 +128,17 @@ class ImplicitPolicyBase(api.PolicyDriver, local_api.LocalAPI): filter) l3p = l3ps and l3ps[0] if not l3p: - LOG.warning(_LW( + LOG.warning( "Caught DefaultL3PolicyAlreadyExists, " "but default L3 policy not concurrently " - "created for tenant %s"), tenant_id) + "created for tenant %s", tenant_id) ctxt.reraise = True except exc.OverlappingIPPoolsInSameTenantNotAllowed: with excutils.save_and_reraise_exception(): - LOG.info(_LI("Caught " - "OverlappingIPPoolsinSameTenantNotAllowed " - "during creation of default L3 policy for " - "tenant %s"), tenant_id) + LOG.info("Caught " + "OverlappingIPPoolsinSameTenantNotAllowed " + "during creation of default L3 policy for " + "tenant %s", tenant_id) context.current['l3_policy_id'] = l3p['id'] def _use_implicit_l3_policy(self, context): @@ -204,9 +202,9 @@ class ImplicitPolicyBase(api.PolicyDriver, local_api.LocalAPI): try: self._delete_l2_policy(context._plugin_context, l2p_id) except gbp_ext.L2PolicyInUse: - LOG.info(_LI( + LOG.info( "Cannot delete implicit L2 Policy %s because it's " - "in use."), l2p_id) + "in use.", l2p_id) def _validate_default_external_segment(self, context): # REVISIT(ivar): find a better way to retrieve the default ES diff --git a/gbpservice/neutron/services/grouppolicy/drivers/resource_mapping.py b/gbpservice/neutron/services/grouppolicy/drivers/resource_mapping.py index fa38c0e73..99cd46049 100755 --- a/gbpservice/neutron/services/grouppolicy/drivers/resource_mapping.py +++ b/gbpservice/neutron/services/grouppolicy/drivers/resource_mapping.py @@ -33,9 +33,6 @@ from oslo_utils import excutils import sqlalchemy as sa from sqlalchemy.orm import exc as sa_exc -from gbpservice._i18n import _LE -from gbpservice._i18n import _LI -from gbpservice._i18n import _LW from gbpservice.common import utils from gbpservice.network.neutronv2 import local_api from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb @@ -318,9 +315,9 @@ class ImplicitResourceOperations(local_api.LocalAPI, filters={'address_scope_id': [address_scope_id]}) if subpools: - LOG.warning(_LW("Cannot delete implicitly created " - "address_scope %(id)s since it has " - "associated subnetpools: %(pools)s"), + LOG.warning("Cannot delete implicitly created " + "address_scope %(id)s since it has " + "associated subnetpools: %(pools)s", {'id': address_scope_id, 'pools': subpools}) else: self._delete_address_scope(plugin_context, address_scope_id) @@ -358,9 +355,9 @@ class ImplicitResourceOperations(local_api.LocalAPI, filters={'subnetpool_id': [subnetpool_id]}) if subnets: - LOG.warning(_LW("Cannot delete implicitly created " - "subnetpool %(id)s since it has " - "associated subnets: %(subnets)s"), + LOG.warning("Cannot delete implicitly created " + "subnetpool %(id)s since it has " + "associated subnets: %(subnets)s", {'id': subnetpool_id, 'subnets': subnets}) else: self._delete_subnetpool(plugin_context, subnetpool_id) @@ -631,10 +628,10 @@ class ImplicitResourceOperations(local_api.LocalAPI, except Exception as e: if isinstance(e, oslo_db_excp.RetryRequest): raise e - LOG.info(_LI("Allocating subnet from subnetpool %(sp)s " - "failed. Allocation will be attempted " - "from any other configured " - "subnetpool(s). Exception: %(excp)s"), + LOG.info("Allocating subnet from subnetpool %(sp)s " + "failed. Allocation will be attempted " + "from any other configured " + "subnetpool(s). Exception: %(excp)s", {'sp': pool['id'], 'excp': type(e)}) last = e continue @@ -725,7 +722,7 @@ class ImplicitResourceOperations(local_api.LocalAPI, context.set_port_id(port_id) return except n_exc.IpAddressGenerationFailure as ex: - LOG.warning(_LW("No more address available in subnet %s"), + LOG.warning("No more address available in subnet %s", subnet['id']) last = ex raise last @@ -735,7 +732,7 @@ class ImplicitResourceOperations(local_api.LocalAPI, try: self._delete_port(plugin_context, port_id) except n_exc.PortNotFound: - LOG.warning(_LW("Port %s is missing"), port_id) + LOG.warning("Port %s is missing", port_id) def _reject_invalid_router_access(self, context): # Validate if the explicit router(s) belong to the tenant. @@ -782,8 +779,8 @@ class ImplicitResourceOperations(local_api.LocalAPI, self._add_router_interface(plugin_context, router_id, interface_info) except n_exc.BadRequest as e: - LOG.exception(_LE("Adding subnet to router failed, exception:" - "%s"), e) + LOG.exception("Adding subnet to router failed, exception:" + "%s", e) raise exc.GroupPolicyInternalError() def _add_router_interface_for_subnet(self, context, router_id, subnet_id): @@ -1109,9 +1106,9 @@ class ImplicitResourceOperations(local_api.LocalAPI, context, l2_policy_id) fip_ids = [] if not external_segments: - LOG.error(_LE("Network Service Policy to allocate Floating IP " - "could not be applied because l3policy does " - "not have an attached external segment")) + LOG.error("Network Service Policy to allocate Floating IP " + "could not be applied because l3policy does " + "not have an attached external segment") return fip_ids tenant_id = context.current['tenant_id'] @@ -1153,7 +1150,7 @@ class ImplicitResourceOperations(local_api.LocalAPI, # FIP allocated, no need to try further allocation break except n_exc.IpAddressGenerationFailure as ex: - LOG.warning(_LW("Floating allocation failed: %s"), + LOG.warning("Floating allocation failed: %s", ex.message) if fip_id: fip_ids.append(fip_id) @@ -1261,10 +1258,10 @@ class ImplicitResourceOperations(local_api.LocalAPI, filters={'name': [ gpip.default_external_segment_name]})) if not external_segments: - LOG.error(_LE( + LOG.error( "Network Service Policy to allocate Floating " "IP could not be associated because l3policy " - "does not have an attached external segment")) + "does not have an attached external segment") raise exc.NSPRequiresES() for es in external_segments: if not es['nat_pools']: @@ -1286,9 +1283,9 @@ class ImplicitResourceOperations(local_api.LocalAPI, free_ip = self._get_last_free_ip(context._plugin_context, context.current['subnets']) if not free_ip: - LOG.error(_LE("Reserving IP Addresses failed for Network " - "Service Policy. No more IP Addresses on " - "subnet")) + LOG.error("Reserving IP Addresses failed for Network " + "Service Policy. No more IP Addresses on " + "subnet") return # TODO(Magesh):Fetch subnet from PTG to which NSP is attached self._remove_ip_from_allocation_pool( @@ -1640,7 +1637,7 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations, policy_target = context._plugin.get_policy_target( context._plugin_context, pt_id) except gp_ext.PolicyTargetNotFound: - LOG.warning(_LW("Attempted to fetch deleted Service Target (QoS)")) + LOG.warning("Attempted to fetch deleted Service Target (QoS)") else: port_id = policy_target['port_id'] port = {attributes.PORT: {'qos_policy_id': None}} @@ -1703,16 +1700,16 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations, return tenant.id except k_exceptions.NotFound: with excutils.save_and_reraise_exception(reraise=True): - LOG.error(_LE('No tenant with name %s exists.'), tenant) + LOG.error('No tenant with name %s exists.', tenant) except k_exceptions.NoUniqueMatch: with excutils.save_and_reraise_exception(reraise=True): - LOG.error(_LE('Multiple tenants matches found for %s'), tenant) + LOG.error('Multiple tenants matches found for %s', tenant) except k_exceptions.AuthorizationFailure: - LOG.error(_LE("User: %(user)s dont have permissions"), + LOG.error("User: %(user)s dont have permissions", {'user': user}) except k_exceptions.Unauthorized: - LOG.error(_LE("Wrong credentials provided: user: %(user)s, " - "password: %(pwd)s, tenant: %(tenant)s"), + LOG.error("Wrong credentials provided: user: %(user)s, " + "password: %(pwd)s, tenant: %(tenant)s", {'user': user, 'pwd': pwd, 'tenant': tenant}) @log.log_method_call @@ -1940,7 +1937,7 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations, context.nsp_cleanup_ipaddress, context.nsp_cleanup_fips) except sa_exc.ObjectDeletedError as err: - LOG.warning(_LW("Object already got deleted. Error: %(err)s"), + LOG.warning("Object already got deleted. Error: %(err)s", {'err': err}) # Cleanup SGs self._unset_sg_rules_for_subnets( @@ -2549,7 +2546,7 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations, context._plugin_context, subnet_id, router_id) except n_exc.InvalidInput: # This exception is not expected. - LOG.exception(_LE("adding subnet to router failed")) + LOG.exception("adding subnet to router failed") for subnet_id in subnet_ids: self._delete_subnet(context._plugin_context, subnet_id) raise exc.GroupPolicyInternalError() @@ -2598,7 +2595,7 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations, except n_exc.InvalidInput: # This exception is not expected. # TODO(ivar): find a better way to rollback - LOG.exception(_LE("adding subnet to router failed")) + LOG.exception("adding subnet to router failed") for subnet_id in subnet_ids: self._delete_subnet(context._plugin_context, subnet_id) raise exc.GroupPolicyInternalError() @@ -2688,7 +2685,7 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations, pt = context._plugin.get_policy_target(context._plugin_context, pt_id) except gp_ext.PolicyTargetNotFound: - LOG.warning(_LW("PT %s doesn't exist anymore"), pt_id) + LOG.warning("PT %s doesn't exist anymore", pt_id) return try: port_id = pt['port_id'] @@ -2702,14 +2699,14 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations, port[ext_sg.SECURITYGROUPS] = new_sg_list self._update_port(context._plugin_context, port_id, port) except n_exc.PortNotFound: - LOG.warning(_LW("Port %s is missing"), port_id) + LOG.warning("Port %s is missing", port_id) def _disassoc_sgs_from_pt(self, context, pt_id, sg_list): try: pt = context._plugin.get_policy_target(context._plugin_context, pt_id) except gp_ext.PolicyTargetNotFound: - LOG.warning(_LW("PT %s doesn't exist anymore"), pt_id) + LOG.warning("PT %s doesn't exist anymore", pt_id) return port_id = pt['port_id'] self._disassoc_sgs_from_port(context._plugin_context, port_id, sg_list) @@ -2726,7 +2723,7 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations, port[ext_sg.SECURITYGROUPS] = new_sg_list self._update_port(plugin_context, port_id, port) except n_exc.PortNotFound: - LOG.warning(_LW("Port %s is missing"), port_id) + LOG.warning("Port %s is missing", port_id) def _generate_list_of_sg_from_ptg(self, context, ptg_id): ptg = context._plugin.get_policy_target_group( diff --git a/gbpservice/neutron/services/grouppolicy/extension_manager.py b/gbpservice/neutron/services/grouppolicy/extension_manager.py index bb18fe1c3..8273f7642 100644 --- a/gbpservice/neutron/services/grouppolicy/extension_manager.py +++ b/gbpservice/neutron/services/grouppolicy/extension_manager.py @@ -17,8 +17,6 @@ from oslo_log import log from oslo_utils import excutils import stevedore -from gbpservice._i18n import _LE -from gbpservice._i18n import _LI from gbpservice.neutron.services.grouppolicy.common import exceptions as gp_exc @@ -33,14 +31,14 @@ class ExtensionManager(stevedore.named.NamedExtensionManager): # the order in which the drivers are called. self.ordered_ext_drivers = [] - LOG.info(_LI("Configured extension driver names: %s"), + LOG.info("Configured extension driver names: %s", cfg.CONF.group_policy.extension_drivers) super(ExtensionManager, self).__init__( 'gbpservice.neutron.group_policy.extension_drivers', cfg.CONF.group_policy.extension_drivers, invoke_on_load=True, name_order=True) - LOG.info(_LI("Loaded extension driver names: %s"), self.names()) + LOG.info("Loaded extension driver names: %s", self.names()) self._register_drivers() def _register_drivers(self): @@ -51,13 +49,13 @@ class ExtensionManager(stevedore.named.NamedExtensionManager): """ for ext in self: self.ordered_ext_drivers.append(ext) - LOG.info(_LI("Registered extension drivers: %s"), + LOG.info("Registered extension drivers: %s", [driver.name for driver in self.ordered_ext_drivers]) def initialize(self): # Initialize each driver in the list. for driver in self.ordered_ext_drivers: - LOG.info(_LI("Initializing extension driver '%s'"), driver.name) + LOG.info("Initializing extension driver '%s'", driver.name) driver.obj.initialize() def extension_aliases(self): @@ -65,7 +63,7 @@ class ExtensionManager(stevedore.named.NamedExtensionManager): for driver in self.ordered_ext_drivers: alias = driver.obj.extension_alias exts.append(alias) - LOG.info(_LI("Got %(alias)s extension from driver '%(drv)s'"), + LOG.info("Got %(alias)s extension from driver '%(drv)s'", {'alias': alias, 'drv': driver.name}) return exts @@ -77,13 +75,13 @@ class ExtensionManager(stevedore.named.NamedExtensionManager): except (gp_exc.GroupPolicyException, n_exc.NeutronException): with excutils.save_and_reraise_exception(): LOG.exception( - _LE("Extension driver '%(name)s' " - "failed in %(method)s"), + "Extension driver '%(name)s' " + "failed in %(method)s", {'name': driver.name, 'method': method_name} ) except Exception: - LOG.exception(_LE("Extension driver '%(name)s' " - "failed in %(method)s"), + LOG.exception("Extension driver '%(name)s' " + "failed in %(method)s", {'name': driver.name, 'method': method_name}) # We are replacing a non-GBP/non-Neutron exception here raise gp_exc.GroupPolicyDriverError(method=method_name) diff --git a/gbpservice/neutron/services/grouppolicy/plugin.py b/gbpservice/neutron/services/grouppolicy/plugin.py index 57ddfba97..a3fc1f023 100644 --- a/gbpservice/neutron/services/grouppolicy/plugin.py +++ b/gbpservice/neutron/services/grouppolicy/plugin.py @@ -24,8 +24,6 @@ from oslo_log import helpers as log from oslo_log import log as logging from oslo_utils import excutils -from gbpservice._i18n import _LE -from gbpservice._i18n import _LW from gbpservice.common import utils as gbp_utils from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db @@ -78,7 +76,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): # plugins are loaded to grab and store plugin. servicechain_plugin = directory.get_plugin(pconst.SERVICECHAIN) if not servicechain_plugin: - LOG.error(_LE("No Servicechain service plugin found.")) + LOG.error("No Servicechain service plugin found.") raise gp_exc.GroupPolicyDeploymentError() return servicechain_plugin @@ -484,8 +482,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): policy_context) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("create_policy_target_postcommit " - "failed, deleting policy_target %s"), + LOG.exception("create_policy_target_postcommit " + "failed, deleting policy_target %s", result['id']) self.delete_policy_target(context, result['id']) @@ -536,8 +534,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): self.policy_driver_manager.delete_policy_target_postcommit( policy_context) except Exception: - LOG.exception(_LE("delete_policy_target_postcommit failed " - "for policy_target %s"), + LOG.exception("delete_policy_target_postcommit failed " + "for policy_target %s", policy_target_id) @log.log_method_call @@ -583,8 +581,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): policy_context) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("create_policy_target_group_postcommit " - "failed, deleting policy_target_group %s"), + LOG.exception("create_policy_target_group_postcommit " + "failed, deleting policy_target_group %s", result['id']) self.delete_policy_target_group(context, result['id']) @@ -666,7 +664,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): self.delete_policy_target_group( context, policy_target_group['proxy_group_id']) except gpex.PolicyTargetGroupNotFound: - LOG.warning(_LW('PTG %s already deleted'), + LOG.warning('PTG %s already deleted', policy_target_group['proxy_group_id']) with session.begin(subtransactions=True): @@ -684,8 +682,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): self.policy_driver_manager.delete_policy_target_group_postcommit( policy_context) except Exception: - LOG.exception(_LE("delete_policy_target_group_postcommit failed " - "for policy_target_group %s"), + LOG.exception("delete_policy_target_group_postcommit failed " + "for policy_target_group %s", policy_target_group_id) @log.log_method_call @@ -731,8 +729,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): pdm.create_application_policy_group_postcommit(policy_context) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("create_application_policy_group_postcommit " - "failed, deleting APG %s"), + LOG.exception("create_application_policy_group_postcommit " + "failed, deleting APG %s", result['id']) self.delete_application_policy_group(context, result['id']) @@ -792,8 +790,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): try: pdm.delete_application_policy_group_postcommit(policy_context) except Exception: - LOG.exception(_LE("delete_application_policy_group_postcommit " - "failed for application_policy_group %s"), + LOG.exception("delete_application_policy_group_postcommit " + "failed for application_policy_group %s", application_policy_group_id) @log.log_method_call @@ -836,8 +834,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): policy_context) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("create_l2_policy_postcommit " - "failed, deleting l2_policy %s"), + LOG.exception("create_l2_policy_postcommit " + "failed, deleting l2_policy %s", result['id']) self.delete_l2_policy(context, result['id']) @@ -886,8 +884,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): self.policy_driver_manager.delete_l2_policy_postcommit( policy_context) except Exception: - LOG.exception(_LE("delete_l2_policy_postcommit failed " - "for l2_policy %s"), l2_policy_id) + LOG.exception("delete_l2_policy_postcommit failed " + "for l2_policy %s", l2_policy_id) @log.log_method_call @db_api.retry_if_session_inactive() @@ -932,9 +930,9 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): policy_context) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE( + LOG.exception( "create_network_service_policy_postcommit " - "failed, deleting network_service_policy %s"), + "failed, deleting network_service_policy %s", result['id']) self.delete_network_service_policy(context, result['id']) @@ -991,9 +989,9 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): pdm = self.policy_driver_manager pdm.delete_network_service_policy_postcommit(policy_context) except Exception: - LOG.exception(_LE( + LOG.exception( "delete_network_service_policy_postcommit failed " - "for network_service_policy %s"), network_service_policy_id) + "for network_service_policy %s", network_service_policy_id) @log.log_method_call @db_api.retry_if_session_inactive() @@ -1036,8 +1034,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): policy_context) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("create_l3_policy_postcommit " - "failed, deleting l3_policy %s"), + LOG.exception("create_l3_policy_postcommit " + "failed, deleting l3_policy %s", result['id']) self.delete_l3_policy(context, result['id']) @@ -1091,8 +1089,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): self.policy_driver_manager.delete_l3_policy_postcommit( policy_context) except Exception: - LOG.exception(_LE("delete_l3_policy_postcommit failed " - "for l3_policy %s"), l3_policy_id) + LOG.exception("delete_l3_policy_postcommit failed " + "for l3_policy %s", l3_policy_id) return True @log.log_method_call @@ -1137,9 +1135,9 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): policy_context) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE( + LOG.exception( "policy_driver_manager.create_policy_classifier_postcommit" - " failed, deleting policy_classifier %s"), result['id']) + " failed, deleting policy_classifier %s", result['id']) self.delete_policy_classifier(context, result['id']) return self.get_policy_classifier(context, result['id']) @@ -1188,8 +1186,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): self.policy_driver_manager.delete_policy_classifier_postcommit( policy_context) except Exception: - LOG.exception(_LE("delete_policy_classifier_postcommit failed " - "for policy_classifier %s"), id) + LOG.exception("delete_policy_classifier_postcommit failed " + "for policy_classifier %s", id) @log.log_method_call @db_api.retry_if_session_inactive() @@ -1233,9 +1231,9 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): policy_context) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE( + LOG.exception( "policy_driver_manager.create_policy_action_postcommit " - "failed, deleting policy_action %s"), result['id']) + "failed, deleting policy_action %s", result['id']) self.delete_policy_action(context, result['id']) return self.get_policy_action(context, result['id']) @@ -1284,8 +1282,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): self.policy_driver_manager.delete_policy_action_postcommit( policy_context) except Exception: - LOG.exception(_LE("delete_policy_action_postcommit failed " - "for policy_action %s"), id) + LOG.exception("delete_policy_action_postcommit failed " + "for policy_action %s", id) @log.log_method_call @db_api.retry_if_session_inactive() @@ -1327,9 +1325,9 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): policy_context) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE( + LOG.exception( "policy_driver_manager.create_policy_rule_postcommit" - " failed, deleting policy_rule %s"), result['id']) + " failed, deleting policy_rule %s", result['id']) self.delete_policy_rule(context, result['id']) return self.get_policy_rule(context, result['id']) @@ -1377,8 +1375,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): self.policy_driver_manager.delete_policy_rule_postcommit( policy_context) except Exception: - LOG.exception(_LE("delete_policy_rule_postcommit failed " - "for policy_rule %s"), id) + LOG.exception("delete_policy_rule_postcommit failed " + "for policy_rule %s", id) @log.log_method_call @db_api.retry_if_session_inactive() @@ -1421,9 +1419,9 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): policy_context) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE( + LOG.exception( "policy_driver_manager.create_policy_rule_set_postcommit " - "failed, deleting policy_rule_set %s"), result['id']) + "failed, deleting policy_rule_set %s", result['id']) self.delete_policy_rule_set(context, result['id']) return self.get_policy_rule_set(context, result['id']) @@ -1471,8 +1469,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): self.policy_driver_manager.delete_policy_rule_set_postcommit( policy_context) except Exception: - LOG.exception(_LE("delete_policy_rule_set_postcommit failed " - "for policy_rule_set %s"), id) + LOG.exception("delete_policy_rule_set_postcommit failed " + "for policy_rule_set %s", id) @log.log_method_call @db_api.retry_if_session_inactive() @@ -1518,9 +1516,9 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): create_external_segment_postcommit(policy_context)) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("create_external_segment_postcommit " - "failed, deleting external_segment " - "%s"), result['id']) + LOG.exception("create_external_segment_postcommit " + "failed, deleting external_segment " + "%s", result['id']) self.delete_external_segment(context, result['id']) return self.get_external_segment(context, result['id']) @@ -1577,8 +1575,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): (self.policy_driver_manager. delete_external_segment_postcommit(policy_context)) except Exception: - LOG.exception(_LE("delete_external_segment_postcommit failed " - "for external_segment %s"), + LOG.exception("delete_external_segment_postcommit failed " + "for external_segment %s", external_segment_id) return True @@ -1623,9 +1621,9 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): create_external_policy_postcommit(policy_context)) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE("create_external_policy_postcommit " - "failed, deleting external_policy " - "%s"), result['id']) + LOG.exception("create_external_policy_postcommit " + "failed, deleting external_policy " + "%s", result['id']) self.delete_external_policy(context, result['id']) return self.get_external_policy(context, result['id']) @@ -1678,8 +1676,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): self.policy_driver_manager.delete_external_policy_postcommit( policy_context) except Exception: - LOG.exception(_LE("delete_external_policy_postcommit failed " - "for external_policy %s"), external_policy_id) + LOG.exception("delete_external_policy_postcommit failed " + "for external_policy %s", external_policy_id) @log.log_method_call @db_api.retry_if_session_inactive() @@ -1719,9 +1717,9 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): create_nat_pool_postcommit(policy_context)) except Exception: with excutils.save_and_reraise_exception(): - LOG.exception(_LE( + LOG.exception( "create_nat_pool_postcommit failed, deleting " - "nat_pool %s"), result['id']) + "nat_pool %s", result['id']) self.delete_nat_pool(context, result['id']) return self.get_nat_pool(context, result['id']) @@ -1766,8 +1764,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin): self.policy_driver_manager.delete_nat_pool_postcommit( policy_context) except Exception: - LOG.exception(_LE("delete_nat_pool_postcommit failed " - "for nat_pool %s"), + LOG.exception("delete_nat_pool_postcommit failed " + "for nat_pool %s", nat_pool_id) @log.log_method_call diff --git a/gbpservice/neutron/services/grouppolicy/policy_driver_manager.py b/gbpservice/neutron/services/grouppolicy/policy_driver_manager.py index c0e79b57a..190cd7fb2 100644 --- a/gbpservice/neutron/services/grouppolicy/policy_driver_manager.py +++ b/gbpservice/neutron/services/grouppolicy/policy_driver_manager.py @@ -18,8 +18,6 @@ from oslo_policy import policy as oslo_policy from oslo_utils import excutils import stevedore -from gbpservice._i18n import _LE -from gbpservice._i18n import _LI from gbpservice.neutron.services.grouppolicy.common import exceptions as gp_exc from gbpservice.neutron.services.grouppolicy import group_policy_driver_api @@ -69,14 +67,14 @@ class PolicyDriverManager(stevedore.named.NamedExtensionManager): self.ordered_policy_drivers = [] self.reverse_ordered_policy_drivers = [] - LOG.info(_LI("Configured policy driver names: %s"), + LOG.info("Configured policy driver names: %s", cfg.CONF.group_policy.policy_drivers) super(PolicyDriverManager, self).__init__('gbpservice.neutron.group_policy.policy_drivers', cfg.CONF.group_policy.policy_drivers, invoke_on_load=True, name_order=True) - LOG.info(_LI("Loaded policy driver names: %s"), self.names()) + LOG.info("Loaded policy driver names: %s", self.names()) self._register_policy_drivers() def _register_policy_drivers(self): @@ -90,7 +88,7 @@ class PolicyDriverManager(stevedore.named.NamedExtensionManager): self.ordered_policy_drivers.append(ext) self.reverse_ordered_policy_drivers = self.ordered_policy_drivers[::-1] - LOG.info(_LI("Registered policy drivers: %s"), + LOG.info("Registered policy drivers: %s", [driver.name for driver in self.ordered_policy_drivers]) def initialize(self): @@ -100,7 +98,7 @@ class PolicyDriverManager(stevedore.named.NamedExtensionManager): # set it to True such that the drivers can override it. self.native_bulk_support = False for driver in self.ordered_policy_drivers: - LOG.info(_LI("Initializing policy driver '%s'"), driver.name) + LOG.info("Initializing policy driver '%s'", driver.name) driver.obj.initialize() self.native_bulk_support &= getattr(driver.obj, 'native_bulk_support', True) @@ -143,15 +141,15 @@ class PolicyDriverManager(stevedore.named.NamedExtensionManager): e, oslo_policy.PolicyNotAuthorized): with excutils.save_and_reraise_exception(): LOG.exception( - _LE("Policy driver '%(name)s' failed in" - " %(method)s"), + "Policy driver '%(name)s' failed in" + " %(method)s", {'name': driver.name, 'method': method_name} ) else: error = True # We are eating a non-GBP/non-Neutron exception here LOG.exception( - _LE("Policy driver '%(name)s' failed in %(method)s"), + "Policy driver '%(name)s' failed in %(method)s", {'name': driver.name, 'method': method_name}) if not continue_on_failure: break @@ -173,8 +171,8 @@ class PolicyDriverManager(stevedore.named.NamedExtensionManager): "ensure_tenant, operation will " "be retried", {'driver': driver.name}) else: - LOG.exception(_LE("Policy driver '%s' failed in " - "ensure_tenant"), driver.name) + LOG.exception("Policy driver '%s' failed in " + "ensure_tenant", driver.name) raise gp_exc.GroupPolicyDriverError( method="ensure_tenant") diff --git a/gbpservice/neutron/services/servicechain/plugins/ncp/node_driver_manager.py b/gbpservice/neutron/services/servicechain/plugins/ncp/node_driver_manager.py index 61bc0615d..d17882908 100644 --- a/gbpservice/neutron/services/servicechain/plugins/ncp/node_driver_manager.py +++ b/gbpservice/neutron/services/servicechain/plugins/ncp/node_driver_manager.py @@ -15,7 +15,6 @@ from oslo_config import cfg from oslo_log import log as logging import stevedore -from gbpservice._i18n import _LI from gbpservice.neutron.services.servicechain.plugins.ncp import config # noqa from gbpservice.neutron.services.servicechain.plugins.ncp import model @@ -33,13 +32,13 @@ class NodeDriverManager(stevedore.named.NamedExtensionManager): # Ordered list of node drivers. self.ordered_drivers = [] names = cfg.CONF.node_composition_plugin.node_drivers - LOG.info(_LI("Configured service chain node driver names: %s"), names) + LOG.info("Configured service chain node driver names: %s", names) super(NodeDriverManager, self).__init__( 'gbpservice.neutron.servicechain.ncp_drivers', names, invoke_on_load=True, name_order=True) - LOG.info(_LI( - "Loaded service chain node driver names: %s"), self.names()) + LOG.info( + "Loaded service chain node driver names: %s", self.names()) self._register_drivers() def _register_drivers(self): @@ -47,14 +46,14 @@ class NodeDriverManager(stevedore.named.NamedExtensionManager): for ext in self: self.drivers[ext.name] = ext self.ordered_drivers.append(ext) - LOG.info(_LI("Registered service chain node drivers: %s"), + LOG.info("Registered service chain node drivers: %s", [driver.name for driver in self.ordered_drivers]) def initialize(self): """Initialize all the service chain node drivers.""" self.native_bulk_support = True for driver in self.ordered_drivers: - LOG.info(_LI("Initializing service chain node drivers '%s'"), + LOG.info("Initializing service chain node drivers '%s'", driver.name) driver.obj.initialize(driver.name) self.native_bulk_support &= getattr(driver.obj, diff --git a/gbpservice/neutron/services/servicechain/plugins/ncp/node_drivers/heat_node_driver.py b/gbpservice/neutron/services/servicechain/plugins/ncp/node_drivers/heat_node_driver.py index 0c255c0ba..80b1fa0e4 100644 --- a/gbpservice/neutron/services/servicechain/plugins/ncp/node_drivers/heat_node_driver.py +++ b/gbpservice/neutron/services/servicechain/plugins/ncp/node_drivers/heat_node_driver.py @@ -24,7 +24,6 @@ from oslo_log import log as logging from oslo_serialization import jsonutils import sqlalchemy as sa -from gbpservice._i18n import _LE from gbpservice.neutron.services.servicechain.plugins.ncp import ( exceptions as exc) from gbpservice.neutron.services.servicechain.plugins.ncp import driver_base @@ -374,15 +373,15 @@ class HeatNodeDriver(driver_base.NodeDriverBase): 'DELETE_IN_PROGRESS']: return except Exception: - LOG.exception(_LE("Retrieving the stack %(stack)s failed."), + LOG.exception("Retrieving the stack %(stack)s failed.", {'stack': stack_id}) return else: time.sleep(STACK_ACTION_RETRY_WAIT) time_waited = time_waited + STACK_ACTION_RETRY_WAIT if time_waited >= STACK_ACTION_WAIT_TIME: - LOG.error(_LE("Stack %(action)s not completed within " - "%(wait)s seconds"), + LOG.error("Stack %(action)s not completed within " + "%(wait)s seconds", {'action': action, 'wait': STACK_ACTION_WAIT_TIME, 'stack': stack_id}) diff --git a/gbpservice/neutron/services/servicechain/plugins/ncp/node_drivers/nfp_node_driver.py b/gbpservice/neutron/services/servicechain/plugins/ncp/node_drivers/nfp_node_driver.py index 4983efae7..f773e98bb 100644 --- a/gbpservice/neutron/services/servicechain/plugins/ncp/node_drivers/nfp_node_driver.py +++ b/gbpservice/neutron/services/servicechain/plugins/ncp/node_drivers/nfp_node_driver.py @@ -31,9 +31,6 @@ from oslo_utils import excutils import sqlalchemy as sa from gbpservice._i18n import _ -from gbpservice._i18n import _LE -from gbpservice._i18n import _LI -from gbpservice._i18n import _LW from gbpservice.common import utils from gbpservice.network.neutronv2 import local_api from gbpservice.neutron.services.grouppolicy.common import constants as gconst @@ -166,9 +163,9 @@ class NFPClientApi(object): self.client = n_rpc.get_client(target) def create_network_function(self, context, network_function): - LOG.info(_LI("Sending RPC CREATE NETWORK FUNCTION to Service " - "Orchestrator for tenant:%(tenant_id)s with " - "service profile:%(service_profile_id)s"), + LOG.info("Sending RPC CREATE NETWORK FUNCTION to Service " + "Orchestrator for tenant:%(tenant_id)s with " + "service profile:%(service_profile_id)s", {'tenant_id': network_function['tenant_id'], 'service_profile_id': network_function[ 'service_profile']['id']}) @@ -181,9 +178,9 @@ class NFPClientApi(object): def delete_network_function(self, context, network_function_id, network_function_data): - LOG.info(_LI("Sending RPC DELETE NETWORK FUNCTION to Service " - "Orchestrator for NF:" - "%(network_function_id)s"), + LOG.info("Sending RPC DELETE NETWORK FUNCTION to Service " + "Orchestrator for NF:" + "%(network_function_id)s", {'network_function_id': network_function_id}) cctxt = self.client.prepare(version=self.RPC_API_VERSION) return cctxt.cast( @@ -193,9 +190,9 @@ class NFPClientApi(object): network_function_data=network_function_data) def update_network_function(self, context, network_function_id, config): - LOG.info(_LI("Sending RPC UPDATE NETWORK FUNCTION to Service " - "Orchestrator for NF:" - "%(network_function_id)s"), + LOG.info("Sending RPC UPDATE NETWORK FUNCTION to Service " + "Orchestrator for NF:" + "%(network_function_id)s", {'network_function_id': network_function_id}) cctxt = self.client.prepare(version=self.RPC_API_VERSION) return cctxt.cast( @@ -215,9 +212,9 @@ class NFPClientApi(object): def consumer_ptg_added_notification(self, context, network_function_id, policy_target_group): - LOG.info(_LI("Sending RPC CONSUMER PTG ADDED NOTIFICATION to Service " - "Orchestrator for NF:" - "%(network_function_id)s"), + LOG.info("Sending RPC CONSUMER PTG ADDED NOTIFICATION to Service " + "Orchestrator for NF:" + "%(network_function_id)s", {'network_function_id': network_function_id}) cctxt = self.client.prepare(version=self.RPC_API_VERSION) return cctxt.cast(context, @@ -227,8 +224,8 @@ class NFPClientApi(object): def consumer_ptg_removed_notification(self, context, network_function_id, policy_target_group): - LOG.info(_LI("Sending RPC CONSUMER PTG REMOVED NOTIFICATION to " - " Service Orchestrator for NF:%(network_function_id)s"), + LOG.info("Sending RPC CONSUMER PTG REMOVED NOTIFICATION to " + " Service Orchestrator for NF:%(network_function_id)s", {'network_function_id': network_function_id}) cctxt = self.client.prepare(version=self.RPC_API_VERSION) return cctxt.cast(context, @@ -238,8 +235,8 @@ class NFPClientApi(object): def policy_target_added_notification(self, context, network_function_id, policy_target): - LOG.info(_LI("Sending RPC POLICY TARGET ADDED NOTIFICATION to " - "Service Orchestrator for NF:%(network_function_id)s"), + LOG.info("Sending RPC POLICY TARGET ADDED NOTIFICATION to " + "Service Orchestrator for NF:%(network_function_id)s", {'network_function_id': network_function_id}) cctxt = self.client.prepare(version=self.RPC_API_VERSION) return cctxt.cast(context, @@ -249,8 +246,8 @@ class NFPClientApi(object): def policy_target_removed_notification(self, context, network_function_id, policy_target): - LOG.info(_LI("Sending RPC POLICY TARGET REMOVED NOTIFICATION to " - "Service Orchestrator for NF:%(network_function_id)s"), + LOG.info("Sending RPC POLICY TARGET REMOVED NOTIFICATION to " + "Service Orchestrator for NF:%(network_function_id)s", {'network_function_id': network_function_id}) cctxt = self.client.prepare(version=self.RPC_API_VERSION) return cctxt.cast(context, @@ -259,7 +256,7 @@ class NFPClientApi(object): policy_target=policy_target) def get_plumbing_info(self, context, node_driver_ctxt): - LOG.info(_LI("Sending RPC GET PLUMBING INFO to Service Orchestrator ")) + LOG.info("Sending RPC GET PLUMBING INFO to Service Orchestrator ") request_info = dict(profile=node_driver_ctxt.current_profile, tenant_id=node_driver_ctxt.provider['tenant_id'], provider=node_driver_ctxt.provider) @@ -388,9 +385,9 @@ class NFPNodeDriver(driver_base.NodeDriverBase): nfp_context = NFPContext.get_nfp_context(context.instance['id']) if nfp_context: if len(nfp_context['sc_gateway_type_nodes']): - LOG.info(_LI( + LOG.info( "Not requesting plumber for PTs for service type " - "%(service_type)s"), {'service_type': service_type}) + "%(service_type)s", {'service_type': service_type}) if not nfp_context['update']: nfp_context['sc_gateway_type_nodes'].append( gateway_type_node) @@ -421,9 +418,9 @@ class NFPNodeDriver(driver_base.NodeDriverBase): plumbing_request = self.nfp_notifier.get_plumbing_info( context._plugin_context, context) - LOG.info(_LI("Requesting plumber for PTs for " - "service type %(service_type)s with " - "%(plumbing_request)s "), + LOG.info("Requesting plumber for PTs for " + "service type %(service_type)s with " + "%(plumbing_request)s ", {'plumbing_request': plumbing_request, 'service_type': service_type}) return plumbing_request @@ -508,11 +505,11 @@ class NFPNodeDriver(driver_base.NodeDriverBase): context.plugin_session, context.current_node['id'], context.instance['id'], network_function_id, status, status_details) - LOG.info(_LI("Processed create NF in node driver." + LOG.info("Processed create NF in node driver." "servicechain_instance_id: %(sci_id)s, " - "servicechain_node_id: %(scn_id)s"), { - 'sci_id': context.instance['id'], - 'scn_id': context.current_node['id']}) + "servicechain_node_id: %(scn_id)s", { + 'sci_id': context.instance['id'], + 'scn_id': context.current_node['id']}) def _wait_for_node_operation_completion(self, context, network_function_id, operation): @@ -618,7 +615,7 @@ class NFPNodeDriver(driver_base.NodeDriverBase): self._delete_network_function(context, network_function_id) except Exception: # NFPContext.clear_nfp_context(context.instance['id']) - LOG.exception(_LE("Delete Network service Failed")) + LOG.exception("Delete Network service Failed") exc_type, exc_value, exc_traceback = sys.exc_info() message = "Traceback: %s" % (exc_value) LOG.error(message) @@ -772,8 +769,8 @@ class NFPNodeDriver(driver_base.NodeDriverBase): time_waited = time_waited + 5 if network_function: - LOG.error(_LE("Delete network function %(network_function)s " - "failed"), + LOG.error("Delete network function %(network_function)s " + "failed", {'network_function': network_function_id}) raise NodeInstanceDeleteFailed() @@ -794,38 +791,38 @@ class NFPNodeDriver(driver_base.NodeDriverBase): "time waited: %s", (network_function_id, operation, time_waited, network_function['status'])) if not network_function: - LOG.error(_LE("Failed to retrieve network function")) + LOG.error("Failed to retrieve network function") eventlet.sleep(5) time_waited = time_waited + 5 continue else: if time_waited == 0: - LOG.info(_LI("STARTED POLLING for %(operation)s network " - "function for NF:%(network_function_id)s " - "with initial result: %(result)s "), + LOG.info("STARTED POLLING for %(operation)s network " + "function for NF:%(network_function_id)s " + "with initial result: %(result)s ", {'operation': operation, 'network_function_id': network_function_id, 'result': network_function}) if (network_function['status'] == nfp_constants.ACTIVE or network_function['status'] == nfp_constants.ERROR): - LOG.info(_LI("COMPLETED POLLING for %(operation)s network " - "function for NF:%(network_function_id)s "), + LOG.info("COMPLETED POLLING for %(operation)s network " + "function for NF:%(network_function_id)s ", {'network_function_id': network_function_id, 'operation': operation}) break eventlet.sleep(5) time_waited = time_waited + 5 - LOG.info(_LI("Got %(operation)s network function result for NF:" - "%(network_function_id)s with status:%(status)s"), + LOG.info("Got %(operation)s network function result for NF:" + "%(network_function_id)s with status:%(status)s", {'network_function_id': network_function_id, 'operation': operation, 'status': network_function['status']}) if network_function['status'] != nfp_constants.ACTIVE: - LOG.error(_LE("%(operation)s network function:" - "%(network_function)s " - "failed. Status: %(status)s"), + LOG.error("%(operation)s network function:" + "%(network_function)s " + "failed. Status: %(status)s", {'network_function': network_function_id, 'status': network_function['status'], 'operation': operation}) @@ -852,11 +849,11 @@ class NFPNodeDriver(driver_base.NodeDriverBase): return tenant.id except k_exceptions.NotFound: with excutils.save_and_reraise_exception(reraise=True): - LOG.error(_LE('No tenant with name %(tenant)s exists.'), + LOG.error('No tenant with name %(tenant)s exists.', {'tenant': tenant}) except k_exceptions.NoUniqueMatch: with excutils.save_and_reraise_exception(reraise=True): - LOG.error(_LE('Multiple tenants matches found for %(tenant)s'), + LOG.error('Multiple tenants matches found for %(tenant)s', {'tenant': tenant}) def _get_resource_owner_context(self, plugin_context): @@ -890,11 +887,11 @@ class NFPNodeDriver(driver_base.NodeDriverBase): network_function_id, context.current_node['config']]) except Exception: - LOG.exception(_LE("Update Network service Failed for " - "network function: %(nf_id)s"), + LOG.exception("Update Network service Failed for " + "network function: %(nf_id)s", {'nf_id': network_function_id}) else: - LOG.info(_LI("No action to take on update")) + LOG.info("No action to take on update") def _get_service_chain_specs(self, context): current_specs = context.relevant_specs @@ -970,8 +967,8 @@ class NFPNodeDriver(driver_base.NodeDriverBase): if (service_details['device_type'] != 'None' and ( not provider_service_targets or (service_type in [pconst.FIREWALL, pconst.VPN] and not consumer_service_targets))): - LOG.error(_LE("Service Targets are not created for the Node " - "of service_type %(service_type)s"), + LOG.error("Service Targets are not created for the Node " + "of service_type %(service_type)s", {'service_type': service_type}) raise Exception(_("Service Targets are not created " "for the Node")) @@ -1097,8 +1094,8 @@ class NFPNodeDriver(driver_base.NodeDriverBase): break if not redirect_prs: - LOG.error(_LE("Redirect rule doesn't exist in policy target rule " - " set")) + LOG.error("Redirect rule doesn't exist in policy target rule " + " set") return consuming_ptgs_details, consuming_eps_details consuming_ptg_ids = redirect_prs['consuming_policy_target_groups'] @@ -1224,9 +1221,9 @@ class NFPNodeDriver(driver_base.NodeDriverBase): def _create_network_function(self, context): nfp_create_nf_data = self._get_nfp_network_function(context) - LOG.info(_LI("Received Call CREATE NETWORK FUNCTION for tenant: " - "%(tenant_id)s with service profile:" - "%(service_profile)s"), + LOG.info("Received Call CREATE NETWORK FUNCTION for tenant: " + "%(tenant_id)s with service profile:" + "%(service_profile)s", {'tenant_id': nfp_create_nf_data['tenant_id'], 'service_profile': nfp_create_nf_data['service_profile']}) self._queue_notification(context, 'create_network_function', @@ -1239,9 +1236,9 @@ class NFPNodeDriver(driver_base.NodeDriverBase): if nfp_delete_nf_data['consumer'].get('pt'): self._detach_port_from_pts(context, nfp_delete_nf_data['consumer']['pt']) - LOG.info(_LI("Received Call DELETE NETWORK FUNCTION for tenant: " - "%(tenant_id)s with service profile:" - "%(service_profile)s"), + LOG.info("Received Call DELETE NETWORK FUNCTION for tenant: " + "%(tenant_id)s with service profile:" + "%(service_profile)s", {'tenant_id': nfp_delete_nf_data['tenant_id'], 'service_profile': nfp_delete_nf_data['service_profile']}) self._queue_notification(context, 'delete_network_function', @@ -1261,8 +1258,8 @@ class NFPNodeDriver(driver_base.NodeDriverBase): pt['port_id'])) except Exception: - LOG.warning(_LW("Failed to disassociate port from" - " pt: %(pt)s, Error: %(exc)s"), {'pt': pt, 'exc': exc}) + LOG.warning("Failed to disassociate port from" + " pt: %(pt)s, Error: %(exc)s", {'pt': pt, 'exc': exc}) def _update_ptg(self, context): if hasattr(context, 'provider') and context.provider['description']: diff --git a/gbpservice/neutron/services/servicechain/plugins/ncp/node_drivers/openstack_heat_api_client.py b/gbpservice/neutron/services/servicechain/plugins/ncp/node_drivers/openstack_heat_api_client.py index 9e6bbd1b7..fb2a2cc2b 100644 --- a/gbpservice/neutron/services/servicechain/plugins/ncp/node_drivers/openstack_heat_api_client.py +++ b/gbpservice/neutron/services/servicechain/plugins/ncp/node_drivers/openstack_heat_api_client.py @@ -14,7 +14,6 @@ from heatclient import client as heat_client from heatclient import exc as heat_exc from oslo_log import log as logging -from gbpservice._i18n import _LW LOG = logging.getLogger(__name__) @@ -56,9 +55,9 @@ class HeatClient(object): try: self.stacks.delete(stack_id) except heat_exc.HTTPNotFound: - LOG.warning(_LW( + LOG.warning( "Stack %(stack)s created by service chain driver is " - "not found at cleanup"), {'stack': stack_id}) + "not found at cleanup", {'stack': stack_id}) def get(self, stack_id): return self.stacks.get(stack_id) diff --git a/gbpservice/neutron/services/servicechain/plugins/ncp/node_plumbers/admin_owned_resources_apic_tscp.py b/gbpservice/neutron/services/servicechain/plugins/ncp/node_plumbers/admin_owned_resources_apic_tscp.py index c02bc4e67..8a48af60e 100644 --- a/gbpservice/neutron/services/servicechain/plugins/ncp/node_plumbers/admin_owned_resources_apic_tscp.py +++ b/gbpservice/neutron/services/servicechain/plugins/ncp/node_plumbers/admin_owned_resources_apic_tscp.py @@ -16,7 +16,6 @@ from oslo_config import cfg from oslo_log import log as logging from oslo_utils import excutils -from gbpservice._i18n import _LE from gbpservice.common import utils from gbpservice.neutron.services.servicechain.plugins.ncp.node_plumbers import( traffic_stitching_plumber as tscp) @@ -77,10 +76,10 @@ class AdminOwnedResourcesApicTSCP(tscp.TrafficStitchingPlumber): return tenant.id except k_exceptions.NotFound: with excutils.save_and_reraise_exception(reraise=True): - LOG.error(_LE('No tenant with name %s exists.'), tenant) + LOG.error('No tenant with name %s exists.', tenant) except k_exceptions.NoUniqueMatch: with excutils.save_and_reraise_exception(reraise=True): - LOG.error(_LE('Multiple tenants matches found for %s'), tenant) + LOG.error('Multiple tenants matches found for %s', tenant) def _get_resource_owner_context(self, context): resource_owner_context = context.elevated() diff --git a/gbpservice/neutron/services/servicechain/plugins/ncp/node_plumbers/traffic_stitching_plumber.py b/gbpservice/neutron/services/servicechain/plugins/ncp/node_plumbers/traffic_stitching_plumber.py index e6189d2e2..109c834ca 100644 --- a/gbpservice/neutron/services/servicechain/plugins/ncp/node_plumbers/traffic_stitching_plumber.py +++ b/gbpservice/neutron/services/servicechain/plugins/ncp/node_plumbers/traffic_stitching_plumber.py @@ -15,9 +15,6 @@ from neutron_lib.plugins import directory from oslo_config import cfg from oslo_log import log as logging -from gbpservice._i18n import _LE -from gbpservice._i18n import _LI -from gbpservice._i18n import _LW from gbpservice.neutron.extensions import driver_proxy_group as pg_ext from gbpservice.neutron.extensions import group_policy from gbpservice.neutron.services.grouppolicy.common import exceptions as exc @@ -47,8 +44,8 @@ class TrafficStitchingPlumber(plumber_base.NodePlumberBase): # Verify that proxy_group extension is loaded if pg_ext.PROXY_GROUP not in cfg.CONF.group_policy.extension_drivers: - LOG.error(_LE("proxy_group GBP driver extension is mandatory for " - "traffic stitching plumber.")) + LOG.error("proxy_group GBP driver extension is mandatory for " + "traffic stitching plumber.") raise exc.GroupPolicyDeploymentError() @property @@ -82,7 +79,7 @@ class TrafficStitchingPlumber(plumber_base.NodePlumberBase): management, 'management') # Create proper PTs based on the service type jump_ptg = None - LOG.info(_LI("Plumbing service of type '%s'"), + LOG.info("Plumbing service of type '%s'", info['plumbing_type']) if info['plumbing_type'] == common.PLUMBING_TYPE_ENDPOINT: # No stitching needed, only provider side PT is created. @@ -124,7 +121,7 @@ class TrafficStitchingPlumber(plumber_base.NodePlumberBase): context, part_context, info['consumer'], jump_ptg, 'consumer') else: - LOG.warning(_LW("Unsupported plumbing type %s"), + LOG.warning("Unsupported plumbing type %s", info['plumbing_type']) # Replace current "provider" with jump ptg if needed provider = jump_ptg or provider diff --git a/gbpservice/neutron/services/servicechain/plugins/ncp/plugin.py b/gbpservice/neutron/services/servicechain/plugins/ncp/plugin.py index c937c5492..6883f146e 100644 --- a/gbpservice/neutron/services/servicechain/plugins/ncp/plugin.py +++ b/gbpservice/neutron/services/servicechain/plugins/ncp/plugin.py @@ -18,9 +18,6 @@ from oslo_log import helpers as log from oslo_log import log as logging from oslo_utils import excutils -from gbpservice._i18n import _LE -from gbpservice._i18n import _LI -from gbpservice._i18n import _LW from gbpservice.common import utils from gbpservice.neutron.db import servicechain_db from gbpservice.neutron.services.grouppolicy.common import constants as gp_cts @@ -66,7 +63,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin, self.plumber = utils.load_plugin( PLUMBER_NAMESPACE, plumber_klass) self.plumber.initialize() - LOG.info(_LI("Initialized node plumber '%s'"), plumber_klass) + LOG.info("Initialized node plumber '%s'", plumber_klass) @log.log_method_call def create_servicechain_instance(self, context, servicechain_instance): @@ -96,8 +93,8 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin, except Exception: # Some node could not be deployed with excutils.save_and_reraise_exception(): - LOG.error(_LE("Node deployment failed, " - "deleting servicechain_instance %s"), + LOG.error("Node deployment failed, " + "deleting servicechain_instance %s", instance['id']) self.delete_servicechain_instance(context, instance['id']) @@ -127,8 +124,8 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin, except Exception: # Some node could not be deployed with excutils.save_and_reraise_exception(): - LOG.error(_LE("Node deployment failed, " - "servicechain_instance %s is in ERROR state"), + LOG.error("Node deployment failed, " + "servicechain_instance %s is in ERROR state", instance['id']) @log.log_method_call @@ -257,7 +254,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin, try: update['driver'].update(update['context']) except exc.NodeDriverError as ex: - LOG.error(_LE("Node Update failed, %s"), + LOG.error("Node Update failed, %s", ex.message) return updated_sc_node @@ -398,8 +395,8 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin, old_policy_target_group, current_policy_target_group) except exc.NodeDriverError as ex: - LOG.error(_LE("Node Update on policy target group modification" - " failed, %s"), ex.message) + LOG.error("Node Update on policy target group modification" + " failed, %s", ex.message) def _update_chains_pt_modified(self, context, policy_target, instance_id, action): @@ -412,8 +409,8 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin, 'update_policy_target_' + action)( update['context'], policy_target) except exc.NodeDriverError as ex: - LOG.error(_LE("Node Update on policy target modification " - "failed, %s"), ex.message) + LOG.error("Node Update on policy target modification " + "failed, %s", ex.message) def _update_chains_consumer_modified(self, context, policy_target_group, instance_id, action): @@ -426,9 +423,9 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin, 'update_node_consumer_ptg_' + action)( update['context'], policy_target_group) except exc.NodeDriverError as ex: - LOG.error(_LE( + LOG.error( "Node Update on policy target group modification " - "failed, %s"), ex.message) + "failed, %s", ex.message) def notify_chain_parameters_updated(self, context, servicechain_instance_id): @@ -447,8 +444,8 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin, getattr(update['driver'], 'notify_chain_parameters_updated')(update['context']) except exc.NodeDriverError as ex: - LOG.error(_LE("Node Update on GBP parameter update " - "failed, %s"), ex.message) + LOG.error("Node Update on GBP parameter update " + "failed, %s", ex.message) def _get_instance_nodes(self, context, instance): context = utils.admin_context(context) @@ -500,7 +497,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin, deployers = self._get_scheduled_drivers(context, resource, 'get') except Exception: - LOG.warning(_LW("Failed to get node driver")) + LOG.warning("Failed to get node driver") # Invoke drivers only if status attributes are requested if not fields or STATUS_SET.intersection(set(fields)): @@ -548,8 +545,8 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin, result['status'] = 'ACTIVE' result['status_details'] = 'node deployment completed' except Exception as exc: - LOG.error(_LE("Failed to get servicechain instance status " - "from node driver, Error: %(exc)s"), {'exc': exc}) + LOG.error("Failed to get servicechain instance status " + "from node driver, Error: %(exc)s", {'exc': exc}) return return result result = {'status': 'ACTIVE', 'status_details': ''} @@ -574,7 +571,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin, try: driver.delete(destroy['context']) except exc.NodeDriverError: - LOG.error(_LE("Node destroy failed, for node %s "), + LOG.error("Node destroy failed, for node %s ", driver['context'].current_node['id']) except Exception as e: if db_api.is_retriable(e): diff --git a/gbpservice/neutron/services/servicechain/plugins/sharing.py b/gbpservice/neutron/services/servicechain/plugins/sharing.py index cb9f6bce6..107efa2ca 100644 --- a/gbpservice/neutron/services/servicechain/plugins/sharing.py +++ b/gbpservice/neutron/services/servicechain/plugins/sharing.py @@ -14,7 +14,6 @@ from neutron.plugins.common import constants as pconst from neutron_lib.plugins import directory from oslo_log import log as logging -from gbpservice._i18n import _LE from gbpservice.neutron.services.grouppolicy.common import exceptions as gp_exc from gbpservice.neutron.services.grouppolicy import plugin as gbp_plugin @@ -42,7 +41,7 @@ class SharingMixin(object): # plugins are loaded to grab and store plugin. gbp_plugin = directory.get_plugin(pconst.GROUP_POLICY) if not gbp_plugin: - LOG.error(_LE("No group policy service plugin found.")) + LOG.error("No group policy service plugin found.") raise gp_exc.GroupPolicyDeploymentError() return gbp_plugin diff --git a/gbpservice/neutron/tests/unit/__init__.py b/gbpservice/neutron/tests/unit/__init__.py index daf401880..67c9b2086 100644 --- a/gbpservice/neutron/tests/unit/__init__.py +++ b/gbpservice/neutron/tests/unit/__init__.py @@ -9,7 +9,6 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -from neutron._i18n import _LI from neutron.agent import securitygroups_rpc from neutron.api import extensions from neutron.quota import resource @@ -21,11 +20,6 @@ from gbpservice.network.neutronv2 import local_api # The following is to avoid excessive logging in the UTs -extensions._LW = extensions._LI -l3_agent_scheduler._LW = _LI -securitygroups_rpc._LW = securitygroups_rpc._LI -resource_registry._LW = resource_registry._LI -local_api._LW = _LI extensions.LOG.warning = extensions.LOG.info resource_registry.LOG.warning = resource_registry.LOG.info l3_agent_scheduler.LOG.warning = l3_agent_scheduler.LOG.info diff --git a/gbpservice/neutron/tests/unit/plugins/ml2plus/__init__.py b/gbpservice/neutron/tests/unit/plugins/ml2plus/__init__.py index 0f11b3e13..a7cfcc26b 100644 --- a/gbpservice/neutron/tests/unit/plugins/ml2plus/__init__.py +++ b/gbpservice/neutron/tests/unit/plugins/ml2plus/__init__.py @@ -27,11 +27,8 @@ if not hasattr(sa_utils, '_get_unique_keys'): sa_utils._get_unique_keys = sa_utils.get_unique_keys -from neutron._i18n import _LI - from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import cache # The following is to avoid excessive logging in the UTs -cache._LW = _LI cache.LOG.warning = cache.LOG.info diff --git a/gbpservice/neutron/tests/unit/plugins/ml2plus/drivers/mechanism_logger.py b/gbpservice/neutron/tests/unit/plugins/ml2plus/drivers/mechanism_logger.py index f4981ab9f..6d73d982c 100644 --- a/gbpservice/neutron/tests/unit/plugins/ml2plus/drivers/mechanism_logger.py +++ b/gbpservice/neutron/tests/unit/plugins/ml2plus/drivers/mechanism_logger.py @@ -17,7 +17,6 @@ from neutron.tests.unit.plugins.ml2.drivers import ( mechanism_logger as ml2_logger) from oslo_log import log -from gbpservice._i18n import _LI from gbpservice.neutron.plugins.ml2plus import driver_api LOG = log.getLogger(__name__) @@ -31,10 +30,10 @@ class LoggerPlusMechanismDriver(driver_api.MechanismDriver, """ def initialize(self): - LOG.info(_LI("initialize called")) + LOG.info("initialize called") def ensure_tenant(self, plugin_context, tenant_id): - LOG.info(_LI("ensure_tenant called with tenant_id %s"), tenant_id) + LOG.info("ensure_tenant called with tenant_id %s", tenant_id) def _log_subnetpool_call(self, method_name, context): LOG.info(_("%(method)s called with subnetpool settings %(current)s " diff --git a/gbpservice/neutron/tests/unit/services/grouppolicy/test_grouppolicy_plugin.py b/gbpservice/neutron/tests/unit/services/grouppolicy/test_grouppolicy_plugin.py index 4ad9e166a..19154c1a2 100644 --- a/gbpservice/neutron/tests/unit/services/grouppolicy/test_grouppolicy_plugin.py +++ b/gbpservice/neutron/tests/unit/services/grouppolicy/test_grouppolicy_plugin.py @@ -18,7 +18,6 @@ from oslo_config import cfg from oslo_log import log as logging import webob.exc -from gbpservice._i18n import _LW from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db as gpmdb from gbpservice.neutron.extensions import group_policy as gpolicy from gbpservice.neutron.services.grouppolicy import config @@ -164,7 +163,7 @@ class GroupPolicyPluginTestCase(GroupPolicyPluginTestBase): def tearDown(self): policy_drivers = cfg.CONF.group_policy.policy_drivers - LOG.warning(_LW("PDs used in this test: %s"), + LOG.warning("PDs used in this test: %s", policy_drivers) # Always reset configuration to dummy driver. Any # test which requires to configure a different diff --git a/gbpservice/nfp/common/exceptions.py b/gbpservice/nfp/common/exceptions.py index a3945eedc..a247fa8ad 100644 --- a/gbpservice/nfp/common/exceptions.py +++ b/gbpservice/nfp/common/exceptions.py @@ -17,7 +17,6 @@ from oslo_config import cfg from oslo_log import log as logging from gbpservice._i18n import _ -from gbpservice._i18n import _LE LOG = logging.getLogger(__name__) @@ -66,9 +65,9 @@ class NFPException(Exception): exc_info = sys.exc_info() # kwargs doesn't match a variable in the message # log the issue and the kwargs - LOG.exception(_LE('Exception in string format operation')) + LOG.exception('Exception in string format operation') for name, value in kwargs.items(): - LOG.error(_LE("%(name)s: %(value)s"), + LOG.error("%(name)s: %(value)s", {'name': name, 'value': value}) if CONF.fatal_exception_format_errors: six.reraise(*exc_info) diff --git a/gbpservice/nfp/orchestrator/config_drivers/heat_client.py b/gbpservice/nfp/orchestrator/config_drivers/heat_client.py index e51956236..a2544cacb 100644 --- a/gbpservice/nfp/orchestrator/config_drivers/heat_client.py +++ b/gbpservice/nfp/orchestrator/config_drivers/heat_client.py @@ -12,7 +12,6 @@ from heatclient import client as heat_client from heatclient import exc as heat_exc -from gbpservice._i18n import _LW from gbpservice.nfp.core import log as nfp_logging LOG = nfp_logging.getLogger(__name__) @@ -64,8 +63,8 @@ class HeatClient(object): try: self.stacks.delete(stack_id) except heat_exc.HTTPNotFound: - LOG.warning(_LW("Stack %(stack)s created by service chain driver " - "is not found at cleanup"), {'stack': stack_id}) + LOG.warning("Stack %(stack)s created by service chain driver " + "is not found at cleanup", {'stack': stack_id}) def get(self, stack_id): return self.stacks.get(stack_id) diff --git a/gbpservice/nfp/orchestrator/config_drivers/heat_driver.py b/gbpservice/nfp/orchestrator/config_drivers/heat_driver.py index 45e6446f2..45fb174de 100644 --- a/gbpservice/nfp/orchestrator/config_drivers/heat_driver.py +++ b/gbpservice/nfp/orchestrator/config_drivers/heat_driver.py @@ -21,9 +21,6 @@ from oslo_config import cfg from oslo_serialization import jsonutils import yaml -from gbpservice._i18n import _LE -from gbpservice._i18n import _LI -from gbpservice._i18n import _LW from gbpservice.neutron.services.grouppolicy.common import constants as gconst from gbpservice.neutron.services.servicechain.plugins.ncp import plumber_base from gbpservice.nfp.common import constants as nfp_constants @@ -205,7 +202,7 @@ class HeatDriver(object): try: self._assign_admin_user_to_project(tenant_id) except Exception: - LOG.exception(_LE("Failed to assign admin user to project")) + LOG.exception("Failed to assign admin user to project") return None ''' nfp_context = module_context.get() @@ -223,7 +220,7 @@ class HeatDriver(object): auth_token=auth_token, timeout_mins=timeout_mins) except Exception: - LOG.exception(_LE("Failed to create heatclient object")) + LOG.exception("Failed to create heatclient object") return None return heat_client @@ -318,8 +315,8 @@ class HeatDriver(object): provider_subnet = subnet break if not provider_subnet: - LOG.error(_LE("Unable to get provider subnet for provider " - "policy target group %(provider_ptg)s"), + LOG.error("Unable to get provider subnet for provider " + "policy target group %(provider_ptg)s", {"provider_ptg": provider}) return lb_vip, lb_vip_name if service_type == pconst.LOADBALANCERV2: @@ -520,8 +517,8 @@ class HeatDriver(object): break if not redirect_prs: - LOG.error(_LE("Redirect rule doesn't exist in policy target rule " - " set")) + LOG.error("Redirect rule doesn't exist in policy target rule " + " set") return None, None return (redirect_prs['consuming_policy_target_groups'], redirect_prs['consuming_external_policies']) @@ -663,8 +660,8 @@ class HeatDriver(object): provider_cidr = subnet['cidr'] break if not provider_cidr: - LOG.error(_LE("Unable to get provider cidr for provider " - "policy target group %(provider_ptg)s"), + LOG.error("Unable to get provider cidr for provider " + "policy target group %(provider_ptg)s", {"provider_ptg": provider}) return None @@ -747,7 +744,7 @@ class HeatDriver(object): svc_mgmt_ptgs = gcm.retry(self.gbp_client.get_policy_target_groups, auth_token, filters) if not svc_mgmt_ptgs: - LOG.error(_LE("Service Management Group is not created by Admin")) + LOG.error("Service Management Group is not created by Admin") return None else: mgmt_subnet_id = svc_mgmt_ptgs[0]['subnets'][0] @@ -843,8 +840,8 @@ class HeatDriver(object): auth_token, filters={'port_id': [consumer_port['id']]}) if not stitching_pts: - LOG.error(_LE("Policy target is not created for the " - "stitching port")) + LOG.error("Policy target is not created for the " + "stitching port") return None stitching_ptg_id = ( stitching_pts[0]['policy_target_group_id']) @@ -901,9 +898,9 @@ class HeatDriver(object): stack_template_str.startswith('{') else yaml.load(stack_template_str)) except Exception: - LOG.error(_LE( + LOG.error( "Unable to load stack template for service chain " - "node: %(node_id)s"), {'node_id': service_chain_node}) + "node: %(node_id)s", {'node_id': service_chain_node}) return None, None config_param_values = service_chain_instance.get( 'config_param_values', '{}') @@ -911,7 +908,7 @@ class HeatDriver(object): try: config_param_values = jsonutils.loads(config_param_values) except Exception: - LOG.error(_LE("Unable to load config parameters")) + LOG.error("Unable to load config parameters") return None, None is_template_aws_version = stack_template.get( @@ -1010,8 +1007,8 @@ class HeatDriver(object): if parameter in config_param_values: stack_params[parameter] = config_param_values[parameter] - LOG.info(_LI('Final stack_template : %(stack_data)s, ' - 'stack_params : %(params)s'), + LOG.info('Final stack_template : %(stack_data)s, ' + 'stack_params : %(params)s', {'stack_data': stack_template, 'params': stack_params}) return (stack_template, stack_params) @@ -1021,9 +1018,9 @@ class HeatDriver(object): self.neutron_client.get_networks, token, filters={'name': [INTERNET_OUT_EXT_NET_NAME]}) if not ext_net: - LOG.error(_LE("'internet_out_network_name' not configured" - " in [heat_driver] or Network %(network)s is" - " not found"), + LOG.error("'internet_out_network_name' not configured" + " in [heat_driver] or Network %(network)s is" + " not found", {'network': INTERNET_OUT_EXT_NET_NAME}) return None # There is a case where consumer port has multiple fips @@ -1035,8 +1032,8 @@ class HeatDriver(object): return ncm.retry(self.neutron_client.get_floating_ips, token, **filters)[0]['floating_ip_address'] except Exception: - LOG.error(_LE("Floating IP for VPN Service has either exhausted" - " or has been disassociated Manually")) + LOG.error("Floating IP for VPN Service has either exhausted" + " or has been disassociated Manually") return None def _update_node_config(self, auth_token, tenant_id, service_profile, @@ -1057,7 +1054,7 @@ class HeatDriver(object): provider_subnet = subnet break if not provider_cidr: - LOG.error(_LE("No provider cidr availabale")) + LOG.error("No provider cidr availabale") return None, None service_type = service_profile['service_type'] service_details = transport.parse_service_flavor_string( @@ -1072,9 +1069,9 @@ class HeatDriver(object): stack_template_str.startswith('{') else yaml.load(stack_template_str)) except Exception: - LOG.error(_LE( + LOG.error( "Unable to load stack template for service chain " - "node: %(node_id)s"), {'node_id': service_chain_node}) + "node: %(node_id)s", {'node_id': service_chain_node}) return None, None config_param_values = service_chain_instance.get( 'config_param_values', '{}') @@ -1082,7 +1079,7 @@ class HeatDriver(object): try: config_param_values = jsonutils.loads(config_param_values) except Exception: - LOG.error(_LE("Unable to load config parameters")) + LOG.error("Unable to load config parameters") return None, None is_template_aws_version = stack_template.get( @@ -1200,8 +1197,8 @@ class HeatDriver(object): auth_token, filters={'port_id': [consumer_port['id']]}) if not stitching_pts: - LOG.error(_LE("Policy target is not created for the " - "stitching port")) + LOG.error("Policy target is not created for the " + "stitching port") return None, None stitching_ptg_id = ( stitching_pts[0]['policy_target_group_id']) @@ -1219,9 +1216,9 @@ class HeatDriver(object): auth_token, filters={'name': [INTERNET_OUT_EXT_NET_NAME]}) if not ext_net: - LOG.error(_LE("'internet_out_network_name' not configured" - " in [heat_driver] or Network %(network)s is" - " not found"), + LOG.error("'internet_out_network_name' not configured" + " in [heat_driver] or Network %(network)s is" + " not found", {'network': INTERNET_OUT_EXT_NET_NAME}) return None, None filters = {'port_id': [consumer_port['id']], @@ -1231,8 +1228,8 @@ class HeatDriver(object): self.neutron_client.get_floating_ips, auth_token, filters=filters) if not floatingips: - LOG.error(_LE("Floating IP for VPN Service has been " - "disassociated Manually")) + LOG.error("Floating IP for VPN Service has been " + "disassociated Manually") return None, None for fip in floatingips: if consumer_port['fixed_ips'][0]['ip_address'] == fip[ @@ -1253,9 +1250,9 @@ class HeatDriver(object): ';mgmt_gw_ip=' + mgmt_gw_ip + ';network_function_id=' + network_function['id']) except Exception as e: - LOG.error(_LE("Problem in preparing description, some of " - "the fields might not have initialized. " - "Error: %(error)s"), {'error': e}) + LOG.error("Problem in preparing description, some of " + "the fields might not have initialized. " + "Error: %(error)s", {'error': e}) return None, None siteconn_keys = self._get_site_conn_keys( stack_template[resources_key], @@ -1287,8 +1284,8 @@ class HeatDriver(object): if parameter in config_param_values: stack_params[parameter] = config_param_values[parameter] - LOG.info(_LI('Final stack_template : %(stack_data)s, ' - 'stack_params : %(params)s'), + LOG.info('Final stack_template : %(stack_data)s, ' + 'stack_params : %(params)s', {'stack_data': stack_template, 'params': stack_params}) return (stack_template, stack_params) @@ -1392,7 +1389,7 @@ class HeatDriver(object): admin_token, policy_target['policy_target_group_id'])) elif port_classification == nfp_constants.PROVIDER: - LOG.info(_LI("provider info: %(p_info)s"), + LOG.info("provider info: %(p_info)s", {'p_info': port_id}) with nfp_ctx_mgr.NeutronContextManager as ncm: provider_port = ncm.retry(self.neutron_client.get_port, @@ -1438,7 +1435,7 @@ class HeatDriver(object): elif stack.stack_status == 'CREATE_COMPLETE': return elif stack.stack_status == 'DELETE_COMPLETE': - LOG.info(_LI("Stack %(stack)s is deleted"), + LOG.info("Stack %(stack)s is deleted", {'stack': stack_id}) if action == "delete": return @@ -1453,17 +1450,17 @@ class HeatDriver(object): 'DELETE_IN_PROGRESS']: return except heat_exc.HTTPNotFound: - LOG.warning(_LW( + LOG.warning( "Stack %(stack)s created by service chain " "driver is not found while waiting for %(action)s " - "to complete"), + "to complete", {'stack': stack_id, 'action': action}) if action == "create" or action == "update": operation_failed = True else: return except Exception: - LOG.exception(_LE("Retrieving the stack %(stack)s failed."), + LOG.exception("Retrieving the stack %(stack)s failed.", {'stack': stack_id}) if action == "create" or action == "update": operation_failed = True @@ -1474,8 +1471,8 @@ class HeatDriver(object): if ignore_error: return else: - LOG.error(_LE("Stack %(stack_name)s %(action)s failed for " - "tenant %(stack_owner)s"), + LOG.error("Stack %(stack_name)s %(action)s failed for " + "tenant %(stack_owner)s", {'stack_name': stack.stack_name, 'stack_owner': stack.stack_owner, 'action': action}) @@ -1484,8 +1481,8 @@ class HeatDriver(object): time.sleep(STACK_ACTION_RETRY_WAIT) time_waited = time_waited + STACK_ACTION_RETRY_WAIT if time_waited >= wait_timeout: - LOG.error(_LE("Stack %(action)s not completed within " - "%(wait)s seconds"), + LOG.error("Stack %(action)s not completed within " + "%(wait)s seconds", {'action': action, 'wait': wait_timeout, 'stack': stack_id}) @@ -1499,10 +1496,10 @@ class HeatDriver(object): pass return else: - LOG.error(_LE( + LOG.error( "Stack %(stack_name)s %(action)s not " "completed within %(time)s seconds where " - "stack owner is %(stack_owner)s"), + "stack owner is %(stack_owner)s", {'stack_name': stack.stack_name, 'action': action, 'time': wait_timeout, @@ -1529,7 +1526,7 @@ class HeatDriver(object): elif stack.stack_status == 'UPDATE_COMPLETE': return success_status elif stack.stack_status == 'DELETE_COMPLETE': - LOG.info(_LI("Stack %(stack)s is deleted"), + LOG.info("Stack %(stack)s is deleted", {'stack': stack_id}) return failure_status elif stack.stack_status == 'CREATE_FAILED': @@ -1562,7 +1559,7 @@ class HeatDriver(object): elif stack.stack_status == 'UPDATE_COMPLETE': return success_status elif stack.stack_status == 'DELETE_COMPLETE': - LOG.info(_LI("Stack %(stack)s is deleted"), + LOG.info("Stack %(stack)s is deleted", {'stack': stack_id}) return failure_status elif stack.stack_status == 'CREATE_FAILED': @@ -1589,7 +1586,7 @@ class HeatDriver(object): elif stack.stack_status == 'CREATE_COMPLETE': return failure_status elif stack.stack_status == 'DELETE_COMPLETE': - LOG.info(_LI("Stack %(stack)s is deleted"), + LOG.info("Stack %(stack)s is deleted", {'stack': stack_id}) if network_function: self._post_stack_cleanup(network_function) @@ -1683,8 +1680,8 @@ class HeatDriver(object): stack = hcm.retry(heatclient.create, stack_name, stack_template, stack_params) stack_id = stack['stack']['id'] - LOG.info(_LI("Created stack with ID %(stack_id)s and " - "name %(stack_name)s for provider PTG %(provider)s"), + LOG.info("Created stack with ID %(stack_id)s and " + "name %(stack_name)s for provider PTG %(provider)s", {'stack_id': stack_id, 'stack_name': stack_name, 'provider': provider['id']}) @@ -1735,8 +1732,8 @@ class HeatDriver(object): stack_template, stack_params) stack_id = stack['stack']['id'] - LOG.info(_LI("Created stack with ID %(stack_id)s and " - "name %(stack_name)s for provider PTG %(provider)s"), + LOG.info("Created stack with ID %(stack_id)s and " + "name %(stack_name)s for provider PTG %(provider)s", {'stack_id': stack_id, 'stack_name': stack_name, 'provider': provider['id']}) @@ -1755,8 +1752,8 @@ class HeatDriver(object): except Exception as err: # Log the error and continue with VM delete in case of *aas # cleanup failure - LOG.exception(_LE("Cleaning up the service chain stack failed " - "with Error: %(error)s"), {'error': err}) + LOG.exception("Cleaning up the service chain stack failed " + "with Error: %(error)s", {'error': err}) return None return stack_id @@ -1783,8 +1780,8 @@ class HeatDriver(object): return None if not base_mode_support and not mgmt_ip: - LOG.error(_LE("Service information is not available with Service " - "Orchestrator on node update")) + LOG.error("Service information is not available with Service " + "Orchestrator on node update") return None stack_template, stack_params = self._update_node_config( @@ -1866,8 +1863,8 @@ class HeatDriver(object): pt_added_or_removed=True) return stack_id except Exception: - LOG.exception(_LE("Processing policy target %(operation)s " - " failed"), {'operation': operation}) + LOG.exception("Processing policy target %(operation)s " + " failed", {'operation': operation}) return None def notify_chain_parameters_updated(self, network_function_details): @@ -1902,7 +1899,7 @@ class HeatDriver(object): return None return stack_id except Exception: - LOG.exception(_LE( + LOG.exception( "Processing policy target group " - "%(operation)s failed"), {'operation': operation}) + "%(operation)s failed", {'operation': operation}) return None diff --git a/gbpservice/nfp/orchestrator/db/nfp_db.py b/gbpservice/nfp/orchestrator/db/nfp_db.py index 058f9cffe..354491421 100644 --- a/gbpservice/nfp/orchestrator/db/nfp_db.py +++ b/gbpservice/nfp/orchestrator/db/nfp_db.py @@ -15,7 +15,6 @@ from oslo_serialization import jsonutils from oslo_utils import uuidutils from sqlalchemy.orm import exc -from gbpservice._i18n import _LW from gbpservice.nfp.common import exceptions as nfp_exc from gbpservice.nfp.orchestrator.db import common_db_mixin from gbpservice.nfp.orchestrator.db import nfp_db_model @@ -686,8 +685,8 @@ class NFPDbBase(common_db_mixin.CommonDbMixin): return self._get_gw_info_dict(session.query(svc_gw).filter( svc_gw.network_function_id == nf_id).one()) except exc.NoResultFound: - LOG.warning(_LW("Gateway detail doesn't exist for Network Function" - " %s "), nf_id) + LOG.warning("Gateway detail doesn't exist for Network Function" + " %s ", nf_id) raise def _get_gw_info_dict(self, gw): diff --git a/gbpservice/nfp/orchestrator/drivers/orchestration_driver.py b/gbpservice/nfp/orchestrator/drivers/orchestration_driver.py index ab608e322..0e7f7035b 100644 --- a/gbpservice/nfp/orchestrator/drivers/orchestration_driver.py +++ b/gbpservice/nfp/orchestrator/drivers/orchestration_driver.py @@ -12,8 +12,6 @@ import ast from collections import defaultdict -from gbpservice._i18n import _LE -from gbpservice._i18n import _LW from gbpservice.nfp.common import constants as nfp_constants from gbpservice.nfp.common import data_formatter as df @@ -131,8 +129,8 @@ class OrchestrationDriver(object): network_handler) device_data['interfaces'] = [mgmt_interface] except Exception as e: - LOG.exception(_LE('Failed to get interfaces for device creation.' - 'Error: %(error)s'), {'error': e}) + LOG.exception('Failed to get interfaces for device creation.' + 'Error: %(error)s', {'error': e}) def _delete_interfaces(self, device_data, interfaces, network_handler=None): @@ -148,8 +146,8 @@ class OrchestrationDriver(object): if attr in nfp_constants.METADATA_SUPPORTED_ATTRIBUTES: provider_metadata[attr] = ast.literal_eval(metadata[attr]) except Exception as e: - LOG.error(_LE('Wrong metadata: %(metadata)s provided for ' - 'image name: %(image_name)s. Error: %(error)s'), + LOG.error('Wrong metadata: %(metadata)s provided for ' + 'image name: %(image_name)s. Error: %(error)s', {'image_name': image_name, 'metadata': metadata, 'error': e}) return None @@ -211,8 +209,8 @@ class OrchestrationDriver(object): LOG.debug("No provider metadata specified in image," " proceeding with default values") except Exception: - LOG.error(_LE("Error while getting metadata for image name:" - "%(image_name)s, proceeding with default values"), + LOG.error("Error while getting metadata for image name:" + "%(image_name)s, proceeding with default values", {'image_name': image_name}) return provider_metadata @@ -235,8 +233,8 @@ class OrchestrationDriver(object): LOG.debug("No provider metadata specified in image," " proceeding with default values") except Exception: - LOG.error(_LE("Error while getting metadata for image name: " - "%(image_name)s, proceeding with default values"), + LOG.error("Error while getting metadata for image name: " + "%(image_name)s, proceeding with default values", {'image_name': image_name}) return provider_metadata @@ -274,8 +272,8 @@ class OrchestrationDriver(object): image_id = nova.get_image_id(token, admin_tenant_id, image_name) return image_id except Exception as e: - LOG.error(_LE('Failed to get image id for device creation.' - ' image name: %(image_name)s. Error: %(error)s'), + LOG.error('Failed to get image id for device creation.' + ' image name: %(image_name)s. Error: %(error)s', {'image_name': image_name, 'error': e}) def create_instance(self, nova, token, admin_tenant_id, @@ -291,8 +289,8 @@ class OrchestrationDriver(object): server_grp_id=server_grp_id) return instance_id except Exception as e: - LOG.error(_LE('Failed to create instance.' - 'Error: %(error)s'), {'error': e}) + LOG.error('Failed to create instance.' + 'Error: %(error)s', {'error': e}) def get_neutron_port_details(self, network_handler, token, port_id): try: @@ -314,8 +312,8 @@ class OrchestrationDriver(object): exc_type, exc_value, exc_traceback = sys.exc_info() LOG.error(traceback.format_exception(exc_type, exc_value, exc_traceback)) - LOG.error(_LE('Failed to get management port details. ' - 'Error: %(error)s'), {'error': e}) + LOG.error('Failed to get management port details. ' + 'Error: %(error)s', {'error': e}) @_set_network_handler def create_network_function_device(self, device_data, @@ -383,8 +381,8 @@ class OrchestrationDriver(object): interfaces_to_attach, device_data) except Exception as e: - LOG.error(_LE('Failed to fetch list of interfaces to attach' - ' for device creation %(error)s'), {'error': e}) + LOG.error('Failed to fetch list of interfaces to attach' + ' for device creation %(error)s', {'error': e}) self._delete_interfaces(device_data, interfaces, network_handler=network_handler) return None @@ -479,25 +477,25 @@ class OrchestrationDriver(object): server_grp_id_result=None): interfaces = device_data.pop('interfaces', None) if not interfaces: - LOG.exception(_LE('Failed to get interfaces for device creation.')) + LOG.exception('Failed to get interfaces for device creation.') return None, _, _ image_id = image_id_result.get('result', None) if not image_id: - LOG.error(_LE('Failed to get image id for device creation.')) + LOG.error('Failed to get image id for device creation.') self._delete_interfaces(device_data, interfaces, network_handler=network_handler) return None, _, _ if server_grp_id_result and not server_grp_id_result.get('result'): - LOG.error(_LE('Validation failed for Nova anti-affinity ' - 'server group.')) + LOG.error('Validation failed for Nova anti-affinity ' + 'server group.') return None, _, _ provider_metadata = provider_metadata_result.get('result', None) if not provider_metadata: - LOG.warning(_LW('Failed to get provider metadata for' - ' device creation.')) + LOG.warning('Failed to get provider metadata for' + ' device creation.') provider_metadata = {} return interfaces, image_id, provider_metadata @@ -559,8 +557,8 @@ class OrchestrationDriver(object): admin_tenant_id = device_data['admin_tenant_id'] instance_id = instance_id_result.get('result', None) if not instance_id: - LOG.error(_LE('Failed to create instance with device data:' - '%(data)s.'), + LOG.error('Failed to create instance with device data:' + '%(data)s.', {'data': device_data}) self._delete_interfaces(device_data, interfaces, network_handler=network_handler) @@ -569,7 +567,7 @@ class OrchestrationDriver(object): mgmt_neutron_port_info = port_details_result.get('result', None) if not mgmt_neutron_port_info: - LOG.error(_LE('Failed to get management port details. ')) + LOG.error('Failed to get management port details. ') with nfp_ctx_mgr.NovaContextManager as ncm: ncm.retry(self.compute_handler_nova.delete_instance, token, @@ -646,8 +644,8 @@ class OrchestrationDriver(object): interfaces, network_handler=network_handler) except Exception as e: - LOG.error(_LE('Failed to delete the management data port(s). ' - 'Error: %(error)s'), {'error': e}) + LOG.error('Failed to delete the management data port(s). ' + 'Error: %(error)s', {'error': e}) def get_network_function_device_status(self, device_data, ignore_failure=False): @@ -789,8 +787,8 @@ class OrchestrationDriver(object): executor.fire() except Exception as e: - LOG.error(_LE('Failed to plug interface(s) to the device.' - 'Error: %(error)s'), {'error': e}) + LOG.error('Failed to plug interface(s) to the device.' + 'Error: %(error)s', {'error': e}) return None else: return True @@ -924,7 +922,7 @@ class OrchestrationDriver(object): network_handler = self.network_handlers[nfp_constants.NEUTRON_MODE] network_handler.delete_port(token, port_id) except Exception as exc: - LOG.error(_LE("Failed to delete port %(port_id)s. Error: %(exc)s"), + LOG.error("Failed to delete port %(port_id)s. Error: %(exc)s", {"port_id": port_id, 'exc': exc}) def _get_port_from_pt(self, device_data, pt_id): @@ -942,7 +940,7 @@ class OrchestrationDriver(object): for pt in device_data['consumer']['pt']: if pt['id'] == pt_id: return pt['port_id'] - LOG.error(_LE('Policy Target %(pt_id) not found in provided data'), + LOG.error('Policy Target %(pt_id) not found in provided data', {'pt_id': pt_id}) return port_id @@ -1003,8 +1001,8 @@ class OrchestrationDriver(object): 'port_classification', 'port_model']) ): - LOG.error(_LE('Incomplete device data received for delete ' - 'network function device.')) + LOG.error('Incomplete device data received for delete ' + 'network function device.') return None token = self._get_token(device_data.get('token')) @@ -1027,8 +1025,8 @@ class OrchestrationDriver(object): devices_data['provider']) ) except Exception: - LOG.error(_LE('Failed to get provider port details' - ' for get device config info operation')) + LOG.error('Failed to get provider port details' + ' for get device config info operation') return None elif port['port_classification'] == nfp_constants.CONSUMER: try: @@ -1038,8 +1036,8 @@ class OrchestrationDriver(object): devices_data['consumer']) ) except Exception: - LOG.error(_LE('Failed to get consumer port details' - ' for get device config info operation')) + LOG.error('Failed to get consumer port details' + ' for get device config info operation') return None device_data.update({ diff --git a/gbpservice/nfp/orchestrator/modules/device_orchestrator.py b/gbpservice/nfp/orchestrator/modules/device_orchestrator.py index c95776651..3664cd06d 100644 --- a/gbpservice/nfp/orchestrator/modules/device_orchestrator.py +++ b/gbpservice/nfp/orchestrator/modules/device_orchestrator.py @@ -14,8 +14,6 @@ import oslo_messaging as messaging from gbpservice._i18n import _ -from gbpservice._i18n import _LE -from gbpservice._i18n import _LI from gbpservice.nfp.common import constants as nfp_constants from gbpservice.nfp.common import topics as nsf_topics from gbpservice.nfp.common import utils as nfp_utils @@ -112,14 +110,14 @@ class RpcHandler(object): NFI = event_data.get('network_function_instance_id') if NFD and NF and NFI: - LOG.info(_LI("Created event %(event_name)s with" - " NF:%(nf)s ,NFI:%(nfi)s and NFD:%(nfd)s"), + LOG.info("Created event %(event_name)s with" + " NF:%(nf)s ,NFI:%(nfi)s and NFD:%(nfd)s", {'event_name': event_id, 'nf': NF, 'nfi': NFI, 'nfd': NFD}) else: - LOG.info(_LI("Created event %(event_name)s "), + LOG.info("Created event %(event_name)s ", {'event_name': event_id}) def _create_event(self, event_id, event_data=None, key=None, @@ -182,7 +180,7 @@ class RpcHandler(object): event_id = self.rpc_event_mapping[resource][0] if result.lower() != 'success': - LOG.info(_LI("RPC Handler response data:%(data)s"), + LOG.info("RPC Handler response data:%(data)s", {'data': data}) if is_delete_request: # Ignore any deletion errors, generate SUCCESS event @@ -323,20 +321,20 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler): NFI = event_data.get('network_function_instance_id') if NFD and NF and NFI: - LOG.info(_LI("Received event %(event_name)s with " - "NF:%(nf)s ,NFI:%(nfi)s and NFD:%(nfd)s"), + LOG.info("Received event %(event_name)s with " + "NF:%(nf)s ,NFI:%(nfi)s and NFD:%(nfd)s", {'event_name': event.id, 'nf': NF, 'nfi': NFI, 'nfd': NFD}) else: - LOG.info(_LI("Received event %(event_name)s "), + LOG.info("Received event %(event_name)s ", {'event_name': event.id}) event_handler = self.event_method_mapping(event.id) event_handler(event) except Exception as e: - LOG.error(_LE("error in processing event: %(event_id)s for " - "event data %(event_data)s. error: %(error)s"), + LOG.error("error in processing event: %(event_id)s for " + "event data %(event_data)s. error: %(error)s", {'event_id': event.id, 'event_data': event.data, 'error': e}) _, _, tb = sys.exc_info() @@ -356,13 +354,13 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler): nf = None nfi = None if nf and nfi: - LOG.info(_LI("Created event %(event_name)s with NF:%(nf)s and " - "NFI:%(nfi)s "), + LOG.info("Created event %(event_name)s with NF:%(nf)s and " + "NFI:%(nfi)s ", {'event_name': event_id, 'nf': nf, 'nfi': nfi}) else: - LOG.info(_LI("Created event %(event_name)s "), + LOG.info("Created event %(event_name)s ", {'event_name': event_id}) def _create_event(self, event_id, event_data=None, @@ -407,11 +405,11 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler): self._controller.event_complete(ev) def event_cancelled(self, ev, reason): - LOG.info(_LI("Poll event %(event_id)s cancelled."), + LOG.info("Poll event %(event_id)s cancelled.", {'event_id': ev.id}) if ev.id == 'DEVICE_SPAWNING': - LOG.info(_LI("Device is not up still after 10secs of launch")) + LOG.info("Device is not up still after 10secs of launch") # create event DEVICE_NOT_UP device = self._prepare_failure_case_device_data(ev.data) self._create_event(event_id='DEVICE_NOT_UP', @@ -420,10 +418,10 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler): self._update_network_function_device_db(device, 'DEVICE_NOT_UP') if ev.id == 'DEVICE_BEING_DELETED': - LOG.info(_LI("Device is not deleted completely." - " Continuing further cleanup of resources." - " Possibly there could be stale port resources" - " on Compute")) + LOG.info("Device is not deleted completely." + " Continuing further cleanup of resources." + " Possibly there could be stale port resources" + " on Compute") device = ev.data orchestration_driver = self._get_orchestration_driver( device['service_details']['service_vendor']) @@ -745,15 +743,15 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler): nfd_request = self._prepare_failure_case_device_data(nfp_context) service_details = nfp_context['service_details'] - LOG.info(_LI("Received event CREATE NETWORK FUNCTION " - "DEVICE request.")) + LOG.info("Received event CREATE NETWORK FUNCTION " + "DEVICE request.") orchestration_driver = self._get_orchestration_driver( service_details['service_vendor']) device_data = self._prepare_device_data_from_nfp_context(nfp_context) - LOG.info(_LI("Creating new device:%(device)s"), + LOG.info("Creating new device:%(device)s", {'device': nfd_request}) device_data['volume_support'] = ( self.config.device_orchestrator.volume_support) @@ -765,7 +763,7 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler): orchestration_driver.create_network_function_device( device_data)) if not driver_device_info: - LOG.info(_LI("Device creation failed")) + LOG.info("Device creation failed") self._create_event(event_id='DEVICE_ERROR', event_data=nfd_request, is_internal_event=True) @@ -824,8 +822,8 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler): orchestration_driver.get_network_function_device_status(device)) if is_device_up == nfp_constants.ACTIVE: - LOG.info(_LI("Device with NFD:%(id)s came up for " - "tenant:%(tenant)s "), + LOG.info("Device with NFD:%(id)s came up for " + "tenant:%(tenant)s ", {'id': network_function_device['id'], 'tenant': tenant_id}) self._post_device_up_event_graph(nfp_context) @@ -916,9 +914,9 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler): self._update_network_function_device_db( network_function_device, nfp_constants.ACTIVE) - LOG.info(_LI( + LOG.info( "Configuration completed for device with NFD:%(device_id)s. " - "Updated DB status to ACTIVE."), + "Updated DB status to ACTIVE.", {'device_id': network_function_device['id']}) LOG.debug("Device detail:%s", network_function_device) @@ -1331,8 +1329,8 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler): self._controller.event_complete(event, result="SUCCESS") return device = self._prepare_device_data_fast(network_function_details) - LOG.info(_LI("Recieved DELETE NETWORK FUNCTION " - "DEVICE request ")) + LOG.info("Recieved DELETE NETWORK FUNCTION " + "DEVICE request ") device['event_desc'] = event.desc.to_dict() self._create_event(event_id='DELETE_CONFIGURATION', event_data=device, @@ -1474,7 +1472,7 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler): # Error Handling def handle_device_create_error(self, event): device = event.data - LOG.error(_LE("Device creation failed, for device %(device)s"), + LOG.error("Device creation failed, for device %(device)s", {'device': device}) device['network_function_device_id'] = device.get('id') self._create_event(event_id='DEVICE_CREATE_FAILED', @@ -1563,8 +1561,8 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler): def handle_driver_error(self, event): device = event.data - LOG.error(_LE("Exception occured in driver, driver returned None " - " for device %(device)s"), {'device': device}) + LOG.error("Exception occured in driver, driver returned None " + " for device %(device)s", {'device': device}) status = nfp_constants.ERROR desc = 'Exception in driver, driver return None' self._update_network_function_device_db(device, status, desc) @@ -1631,8 +1629,8 @@ class NDOConfiguratorRpcApi(object): def create_network_function_device_config(self, device_data, config_params): self._update_params(device_data, config_params, operation='create') - LOG.info(_LI("Sending create NFD config request to configurator " - "for NF:%(nf_id)s "), + LOG.info("Sending create NFD config request to configurator " + "for NF:%(nf_id)s ", {'nf_id': config_params['info']['context']['nf_id']}) transport.send_request_to_configurator(self.conf, @@ -1645,7 +1643,7 @@ class NDOConfiguratorRpcApi(object): config_params): self._update_params(device_data, config_params, operation='delete') config_params['info']['context']['nfp_context'] = device_data - LOG.info(_LI("Sending delete NFD config request to configurator ")) + LOG.info("Sending delete NFD config request to configurator ") transport.send_request_to_configurator(self.conf, self.context, diff --git a/gbpservice/nfp/orchestrator/modules/service_orchestrator.py b/gbpservice/nfp/orchestrator/modules/service_orchestrator.py index f5a03642d..eac64e029 100644 --- a/gbpservice/nfp/orchestrator/modules/service_orchestrator.py +++ b/gbpservice/nfp/orchestrator/modules/service_orchestrator.py @@ -17,9 +17,6 @@ from oslo_log import helpers as log_helpers import oslo_messaging from gbpservice._i18n import _ -from gbpservice._i18n import _LE -from gbpservice._i18n import _LI -from gbpservice._i18n import _LW from gbpservice.nfp.common import constants as nfp_constants from gbpservice.nfp.common import exceptions as nfp_exc from gbpservice.nfp.common import topics as nfp_rpc_topics @@ -124,8 +121,8 @@ class RpcHandler(object): Function Instance ''' module_context.init(network_function) - LOG.info(_LI("Received RPC call for CREATE NETWORK FUNCTION for " - "tenant:%(tenant_id)s"), + LOG.info("Received RPC call for CREATE NETWORK FUNCTION for " + "tenant:%(tenant_id)s", {'tenant_id': network_function[ 'resource_owner_context']['tenant_id']}) @@ -151,7 +148,7 @@ class RpcHandler(object): Returns the Network functions from DB ''' module_context.init() - LOG.info(_LI("Received RPC call for GET NETWORK FUNCTIONS ")) + LOG.info("Received RPC call for GET NETWORK FUNCTIONS ") service_orchestrator = ServiceOrchestrator(self._controller, self.conf) return service_orchestrator.get_network_functions( context, filters) @@ -166,8 +163,8 @@ class RpcHandler(object): ''' module_context.init() - LOG.info(_LI("Received RPC call for UPDATE NETWORK FUNCTION for NF:" - "%(network_function_id)s"), + LOG.info("Received RPC call for UPDATE NETWORK FUNCTION for NF:" + "%(network_function_id)s", {'network_function_id': network_function_id}) service_orchestrator = ServiceOrchestrator(self._controller, self.conf) service_orchestrator.update_network_function( @@ -182,8 +179,8 @@ class RpcHandler(object): Results in an Event for async processing of Network Function Instance. ''' module_context.init() - LOG.info(_LI("Received RPC call for DELETE NETWORK FUNCTION for NF:" - "%(network_function_id)s"), + LOG.info("Received RPC call for DELETE NETWORK FUNCTION for NF:" + "%(network_function_id)s", {'network_function_id': network_function_id}) service_orchestrator = ServiceOrchestrator(self._controller, self.conf) @@ -199,9 +196,9 @@ class RpcHandler(object): Results in an Event for async processing of Network Function Instance. ''' module_context.init() - LOG.info(_LI("Received RPC call for POLICY TARGET ADDED NOTIFICATION " - "for NF:" - " %(network_function_id)s"), + LOG.info("Received RPC call for POLICY TARGET ADDED NOTIFICATION " + "for NF:" + " %(network_function_id)s", {'network_function_id': network_function_id}) service_orchestrator = ServiceOrchestrator(self._controller, self.conf) service_orchestrator.handle_policy_target_added( @@ -216,8 +213,8 @@ class RpcHandler(object): Results in an Event for async processing of Network Function Instance. ''' module_context.init() - LOG.info(_LI("Received RPC call for POLICY TARGET REMOVED " - "NOTIFICATION for NF:%(network_function_id)s"), + LOG.info("Received RPC call for POLICY TARGET REMOVED " + "NOTIFICATION for NF:%(network_function_id)s", {'network_function_id': network_function_id}) service_orchestrator = ServiceOrchestrator(self._controller, self.conf) service_orchestrator.handle_policy_target_removed( @@ -232,8 +229,8 @@ class RpcHandler(object): Results in an Event for async processing of Network Function Instance. ''' module_context.init() - LOG.info(_LI("Received RPC call CONSUMER PTG ADDED NOTIFICATION " - "for NF:%(network_function_id)s"), + LOG.info("Received RPC call CONSUMER PTG ADDED NOTIFICATION " + "for NF:%(network_function_id)s", {'network_function_id': network_function_id}) service_orchestrator = ServiceOrchestrator(self._controller, self.conf) service_orchestrator.handle_consumer_ptg_added( @@ -248,8 +245,8 @@ class RpcHandler(object): Results in an Event for async processing of Network Function Instance. ''' module_context.init() - LOG.info(_LI("Received RPC call for CONSUMER PTG REMOVED NOTIFICATION " - "for NF:%(network_function_id)s"), + LOG.info("Received RPC call for CONSUMER PTG REMOVED NOTIFICATION " + "for NF:%(network_function_id)s", {'network_function_id': network_function_id}) service_orchestrator = ServiceOrchestrator(self._controller, self.conf) service_orchestrator.handle_consumer_ptg_removed( @@ -334,13 +331,13 @@ class RpcHandlerConfigurator(object): NF = None NFI = None if NF and NFI: - LOG.info(_LI("Created event %(event_name)s with NF:%(nf)s " - "and NFI:%(nfi)s "), + LOG.info("Created event %(event_name)s with NF:%(nf)s " + "and NFI:%(nfi)s ", {'event_name': event_id, 'nf': NF, 'nfi': NFI}) else: - LOG.info(_LI("Created event %(event_name)s "), + LOG.info("Created event %(event_name)s ", {'event_name': event_id}) def _create_event(self, event_id, event_data=None, @@ -588,20 +585,20 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): NF = None NFI = None if NF and NFI: - LOG.info(_LI("Received event %(event_name)s with NF:%(nf)s and " - "NFI:%(nfi)s "), + LOG.info("Received event %(event_name)s with NF:%(nf)s and " + "NFI:%(nfi)s ", {'event_name': event.id, 'nf': NF, 'nfi': NFI}) else: - LOG.info(_LI("Received event %(event_name)s "), + LOG.info("Received event %(event_name)s ", {'event_name': event.id}) try: event_handler = self.event_method_mapping(event.id) event_handler(event) except Exception as e: - LOG.exception(_LE("Error in processing event: %(event_id)s for " - "event data %(event_data)s. Error: %(error)s"), + LOG.exception("Error in processing event: %(event_id)s for " + "event data %(event_data)s. Error: %(error)s", {'event_id': event.id, 'event_data': event.data, 'error': e}) _, _, tb = sys.exc_info() @@ -609,27 +606,26 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): raise e def handle_poll_event(self, event): - LOG.info(_LI("Received poll event %(id)s"), + LOG.info("Received poll event %(id)s", {'id': event.id}) try: event_handler = self.event_method_mapping(event.id) return event_handler(event) except Exception: - LOG.exception(_LE("Error in processing poll event: " - "%(event_id)s"), {'event_id': event.id}) + LOG.exception("Error in processing poll event: " + "%(event_id)s", {'event_id': event.id}) def event_cancelled(self, event, reason): nfp_context = event.context if event.id == 'CHECK_USER_CONFIG_COMPLETE': network_function = nfp_context['network_function'] - LOG.info(_LI("Applying user config failed for " - "NF:%(network_function_id)s " - "with reason %(reason)s" - ""), {'network_function_id': network_function[ - 'id'], - 'reason': str(reason)}) + LOG.info("Applying user config failed for " + "NF:%(network_function_id)s " + "with reason %(reason)s" + " ", {'network_function_id': network_function[ + 'id'], 'reason': str(reason)}) operation = nfp_context['log_context'].get('path') - LOG.error(_LE("[Event:Service%(operation)sFailed]"), + LOG.error("[Event:Service%(operation)sFailed]", {'operation': operation.capitalize()}) LOG.event('%s network function failed.' % operation.capitalize(), stats_type=nfp_constants.error_event) @@ -649,13 +645,13 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): elif event.id == 'APPLY_USER_CONFIG_IN_PROGRESS' or ( event.id == 'UPDATE_USER_CONFIG_STILL_IN_PROGRESS'): request_data = event.data - LOG.info(_LI("Applying user config failed for " - "NF: %(network_function_id)s data:" - "%(data)s with reason %(reason)s" - ""), {'data': request_data, - 'network_function_id': request_data[ - 'network_function_id'], - 'reason': str(reason)}) + LOG.info("Applying user config failed for " + "NF: %(network_function_id)s data:" + "%(data)s with reason %(reason)s" + "", {'data': request_data, + 'network_function_id': request_data[ + 'network_function_id'], + 'reason': str(reason)}) updated_network_function = {'status': nfp_constants.ERROR} with nfp_ctx_mgr.DbContextManager as dcm: @@ -666,7 +662,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): updated_network_function) operation = nfp_context['log_context'].get('path') - LOG.error(_LE("[Event:Service%(operation)sFailed]"), + LOG.error("[Event:Service%(operation)sFailed]", {'operation': operation.capitalize()}) LOG.event('%s network function failed.' % operation.capitalize(), stats_type=nfp_constants.error_event) @@ -702,13 +698,13 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): NF = None NFI = None if NF and NFI: - LOG.info(_LI("Created event %(event_name)s with NF:%(nf)s and " - "NFI:%(nfi)s "), + LOG.info("Created event %(event_name)s with NF:%(nf)s and " + "NFI:%(nfi)s ", {'event_name': event_id, 'nf': NF, 'nfi': NFI}) else: - LOG.info(_LI("Created event %(event_name)s "), + LOG.info("Created event %(event_name)s ", {'event_name': event_id}) # REVISIT(ashu): Merge this _create_event, and above one to have # single function. @@ -788,9 +784,9 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): tag_str, config_str = self.config_driver.parse_template_config_string( service_config_str) if not config_str: - LOG.error(_LE('Exception while parsing config string, config ' - 'string: %(config_str)s is improper for ' - 'network_function id: %(network_function_id)s'), + LOG.error('Exception while parsing config string, config ' + 'string: %(config_str)s is improper for ' + 'network_function id: %(network_function_id)s', {'config_str': service_config_str, 'network_function_id': network_function_id}) self.handle_driver_error(network_function_id) @@ -823,9 +819,9 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): network_function_id = network_function_data[ 'network_function_details']['network_function']['id'] if not config_str: - LOG.error(_LE('Exception while parsing config string, config ' - 'string: %(config_str)s is improper for ' - 'network_function id: %(network_function_id)s'), + LOG.error('Exception while parsing config string, config ' + 'string: %(config_str)s is improper for ' + 'network_function id: %(network_function_id)s', {'config_str': service_config_str, 'network_function_id': network_function_id}) self.handle_driver_error(network_function_id) @@ -877,11 +873,11 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): def _report_logging_info(self, nf, nfi, service_type, service_vendor): - LOG.info(_LI("[TenantID:%(tenant_id)s, " - "ServiceChainID:%(service_chain_id)s, " - "ServiceInstanceID:%(service_instance_id)s, " - "ServiceType:%(service_type)s, " - "ServiceProvider:%(service_provider)s]"), + LOG.info("[TenantID:%(tenant_id)s, " + "ServiceChainID:%(service_chain_id)s, " + "ServiceInstanceID:%(service_instance_id)s, " + "ServiceType:%(service_type)s, " + "ServiceProvider:%(service_provider)s]", {'tenant_id': nf['tenant_id'], 'service_chain_id': nf['service_chain_id'], 'service_instance_id': nfi['id'], @@ -961,7 +957,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): nfp_context['log_context']['meta_id'] = network_function['id'] nfp_context['log_context']['auth_token'] = context.auth_token - LOG.info(_LI("[Event:ServiceCreateInitiated]")) + LOG.info("[Event:ServiceCreateInitiated]") LOG.event("Started create network function.", stats_type=nfp_constants.request_event) @@ -973,8 +969,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): nfp_context['service_details'] = service_details nfp_context['share_existing_device'] = False nfp_context['base_mode'] = base_mode_support - LOG.info(_LI("Handling RPC call CREATE NETWORK FUNCTION for " - "%(service_type)s with tenant:%(tenant_id)s"), + LOG.info("Handling RPC call CREATE NETWORK FUNCTION for " + "%(service_type)s with tenant:%(tenant_id)s", {'tenant_id': tenant_id, 'service_type': service_profile['service_type']}) if base_mode_support: @@ -1009,7 +1005,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): network_function_id, {'service_config': user_config, 'status': nfp_constants.PENDING_UPDATE}) - LOG.info(_LI("[Event:ServiceUpdateInitiated]")) + LOG.info("[Event:ServiceUpdateInitiated]") LOG.event("Started update network function.", stats_type=nfp_constants.request_event) @@ -1054,7 +1050,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): with nfp_ctx_mgr.DbContextManager: self.db_handler.delete_network_function( self.db_session, network_function_id) - LOG.info(_LI("[Event:ServiceDeleteCompleted]")) + LOG.info("[Event:ServiceDeleteCompleted]") LOG.event("Completed delete network function.", stats_type=nfp_constants.response_event) @@ -1087,7 +1083,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): network_function_id, network_function) nfp_context.update(network_function_details) - LOG.info(_LI("[Event:ServiceDeleteInitiated]")) + LOG.info("[Event:ServiceDeleteInitiated]") LOG.event("Started delete network function.", stats_type=nfp_constants.request_event) if not base_mode_support: @@ -1231,8 +1227,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): nfp_context['log_context']['nfi_id'] = nfi['id'] - LOG.info(_LI("Creating event CREATE NETWORK FUNCTION DEVICE " - "for NF: %(network_function_id)s"), + LOG.info("Creating event CREATE NETWORK FUNCTION DEVICE " + "for NF: %(network_function_id)s", {'network_function_id': network_function['id']}) ev = self._controller.new_event( @@ -1385,10 +1381,10 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): neutron_resource_desc = ( self.config_driver.get_neutron_resource_description(nfp_context)) if not neutron_resource_desc: - LOG.error(_LE( + LOG.error( "Preparing neutron resource description failed in " "config driver, marking user config as Failed for " - "network function: %(nf)s"), {'nf': network_function}) + "network function: %(nf)s", {'nf': network_function}) nfp_context['network_function_id'] = network_function['id'] binding_key = nfp_context['service_details'][ 'service_vendor'].lower() + network_function['id'] @@ -1488,7 +1484,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): request_data['network_function_id']) network_function = network_function_details['network_function'] - LOG.info(_LI("[Event:ServiceUpdateInitiated]")) + LOG.info("[Event:ServiceUpdateInitiated]") LOG.event("Started update network function.", stats_type=nfp_constants.request_event) nfi = network_function_details.get('network_function_instance', None) @@ -1570,7 +1566,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): nfp_context = event.context operation = nfp_context['log_context'].get('path') - LOG.error(_LE("[Event:Service%(operation)sFailed]"), + LOG.error("[Event:Service%(operation)sFailed]", {'operation': operation.capitalize()}) LOG.event('%s network function failed.' % operation.capitalize(), stats_type=nfp_constants.error_event) @@ -1578,9 +1574,9 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): # Trigger RPC to notify the Create_Service caller with status def handle_driver_error(self, network_function_id): - LOG.error(_LE("Error occurred while processing network function " - "CRUD operations, marking network function: %(nf_id)s " - "as ERROR to initiate cleanup."), + LOG.error("Error occurred while processing network function " + "CRUD operations, marking network function: %(nf_id)s " + "as ERROR to initiate cleanup.", {'nf_id': network_function_id}) network_function_details = self.get_network_function_details( network_function_id) @@ -1592,7 +1588,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): network_function_id, network_function) nfp_context = module_context.get() operation = nfp_context['log_context'].get('path') - LOG.error(_LE("[Event:Service%(operation)sFailed]"), + LOG.error("[Event:Service%(operation)sFailed]", {'operation': operation.capitalize()}) LOG.event('%s network function failed.' % operation.capitalize(), stats_type=nfp_constants.error_event) @@ -1664,10 +1660,10 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): request_data['config_policy_id'], request_data['tenant_id'], request_data['network_function_details']) if config_status == nfp_constants.ERROR: - LOG.info(_LI("Applying user config failed for " - "NF:%(network_function_id)s "), { - 'network_function_id': - request_data['network_function_id']}) + LOG.info("Applying user config failed for " + "NF:%(network_function_id)s ", { + 'network_function_id': + request_data['network_function_id']}) updated_network_function = {'status': nfp_constants.ERROR} with nfp_ctx_mgr.DbContextManager as dcm: dcm.lock( @@ -1676,7 +1672,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): request_data['network_function_id'], updated_network_function) operation = event.context['log_context'].get('path') - LOG.error(_LE("[Event:Service%(operation)sFailed]"), + LOG.error("[Event:Service%(operation)sFailed]", {'operation': operation.capitalize()}) LOG.event('%s network function failed.' % operation.capitalize(), stats_type=nfp_constants.error_event) @@ -1710,8 +1706,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): max_times=self.UPDATE_USER_CONFIG_MAXRETRY) return STOP_POLLING updated_network_function = {'status': nfp_constants.ACTIVE} - LOG.info(_LI("Applying user config is successfull moving " - "NF:%(network_function_id)s to ACTIVE"), + LOG.info("Applying user config is successfull moving " + "NF:%(network_function_id)s to ACTIVE", {'network_function_id': request_data['network_function_id']}) with nfp_ctx_mgr.DbContextManager as dcm: @@ -1722,7 +1718,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): updated_network_function) operation = event.context['log_context'].get('path') - LOG.info(_LI("[Event:Service%(operation)sCompleted]"), + LOG.info("[Event:Service%(operation)sCompleted]", {'operation': operation.capitalize()}) LOG.event('Completed %s network function.' % operation, stats_type=nfp_constants.response_event) @@ -1751,8 +1747,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): network_function = nfp_context['network_function'] updated_network_function = {'status': nfp_constants.ACTIVE} - LOG.info(_LI("Applying user config is successfull moving " - "NF: %(network_function_id)s to ACTIVE"), + LOG.info("Applying user config is successfull moving " + "NF: %(network_function_id)s to ACTIVE", {'network_function_id': network_function['id']}) with nfp_ctx_mgr.DbContextManager as dcm: dcm.lock(self.db_session, self.db_handler.update_network_function, @@ -1760,7 +1756,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): updated_network_function) operation = nfp_context['log_context'].get('path') - LOG.info(_LI("[Event:Service%(operation)sCompleted]"), + LOG.info("[Event:Service%(operation)sCompleted]", {'operation': operation.capitalize()}) LOG.event('Completed %s network function.' % operation, stats_type=nfp_constants.response_event) @@ -1787,10 +1783,10 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): config_status = self.config_driver.check_config_complete(nfp_context) if config_status == nfp_constants.ERROR: - LOG.info(_LI("Applying user config failed for " - "NF: %(network_function_id)s"), { - 'network_function_id': - network_function['id']}) + LOG.info("Applying user config failed for " + "NF: %(network_function_id)s", { + 'network_function_id': + network_function['id']}) # Complete the original event APPLY_USER_CONFIG here event_desc = nfp_context.pop('event_desc', None) apply_config_event = self._controller.new_event( @@ -1833,8 +1829,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): network_function) except Exception as err: # REVISIT: May be we need a count before removing the poll event - LOG.error(_LE("Error: %(err)s while verifying configuration " - "delete completion."), {'err': err}) + LOG.error("Error: %(err)s while verifying configuration " + "delete completion.", {'err': err}) self._create_event('USER_CONFIG_DELETE_FAILED', event_data=request_data, is_internal_event=True) self._controller.event_complete(event) @@ -1850,8 +1846,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): # Trigger RPC to notify the Create_Service caller with status elif config_status == nfp_constants.COMPLETED: updated_network_function = {'status': nfp_constants.ACTIVE} - LOG.info(_LI("Applying user config is successfull moving " - "NF:%(network_function_id)s to ACTIVE"), + LOG.info("Applying user config is successfull moving " + "NF:%(network_function_id)s to ACTIVE", {'network_function_id': request_data['network_function_id']}) with nfp_ctx_mgr.DbContextManager as dcm: @@ -1862,7 +1858,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): updated_network_function) operation = event.context['log_context'].get('path') - LOG.info(_LI("[Event:Service%(operation)sCompleted]"), + LOG.info("[Event:Service%(operation)sCompleted]", {'operation': operation.capitalize()}) LOG.event('Completed %s network function.' % operation, stats_type=nfp_constants.response_event) @@ -1890,8 +1886,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): request_data['config_policy_id'], request_data['tenant_id']) except Exception as err: # REVISIT: May be we need a count before removing the poll event - LOG.error(_LE("Error: %(err)s while verifying configuration " - "delete completion."), {'err': err}) + LOG.error("Error: %(err)s while verifying configuration " + "delete completion.", {'err': err}) # self._create_event('USER_CONFIG_DELETE_FAILED', # event_data=event_data, is_internal_event=True) self._controller.event_complete(event) @@ -1954,8 +1950,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): self.db_handler.update_network_function, network_function_id, network_function) - LOG.info(_LI("Applying user config is successfull moving " - "NF: %(network_function_id)s to ACTIVE"), + LOG.info("Applying user config is successfull moving " + "NF: %(network_function_id)s to ACTIVE", {'network_function_id': network_function_id}) else: @@ -1981,8 +1977,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): def handle_update_user_config_failed(self, event): event_data = event.data network_function_id = event_data['network_function_id'] - LOG.error(_LE("NSO: updating user config failed, moving " - "network function %(network_function_id)s to ERROR"), + LOG.error("NSO: updating user config failed, moving " + "network function %(network_function_id)s to ERROR", {'network_function_id': network_function_id}) self.handle_user_config_failed(event) @@ -1998,7 +1994,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): updated_network_function) # Trigger RPC to notify the Create_Service caller with status operation = event.context['log_context'].get('path') - LOG.error(_LE("[Event:Service%(operation)sFailed]"), + LOG.error("[Event:Service%(operation)sFailed]", {'operation': operation.capitalize()}) LOG.event('%s network function failed.' % operation.capitalize(), stats_type=nfp_constants.error_event) @@ -2024,9 +2020,9 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): self.config_driver.is_update_config_supported( request_data['service_type'])): updated_network_function.update({'status': nfp_constants.ACTIVE}) - LOG.warning(_LW( + LOG.warning( "Failed to delete old stack id: %(stack_id)s in" - "firewall update case, Need to manually delete it"), + "firewall update case, Need to manually delete it", {"stack_id": request_data['config_policy_id']}) with nfp_ctx_mgr.DbContextManager as dcm: @@ -2042,7 +2038,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): results = event.result for result in results: if result.result.lower() != 'success': - LOG.error(_LE("Event: %(result_id)s failed"), + LOG.error("Event: %(result_id)s failed", {'result_id': result.id}) network_function_details = event.context @@ -2065,7 +2061,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): with nfp_ctx_mgr.DbContextManager: self.db_handler.delete_network_function( self.db_session, nf['id']) - LOG.info(_LI("[Event:ServiceDeleteCompleted]")) + LOG.info("[Event:ServiceDeleteCompleted]") LOG.event("Completed delete network function.", stats_type=nfp_constants.response_event) @@ -2076,7 +2072,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): nf_id=nf_id, service_type=service_type) - LOG.info(_LI("Deleted NF:%(nf_id)s "), + LOG.info("Deleted NF:%(nf_id)s ", {'nf_id': nf['id']}) self._controller.event_complete(event) @@ -2095,7 +2091,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): with nfp_ctx_mgr.DbContextManager: self.db_handler.delete_network_function( self.db_session, nfi['network_function_id']) - LOG.info(_LI("[Event:ServiceDeleteCompleted]")) + LOG.info("[Event:ServiceDeleteCompleted]") LOG.event("Completed delete network function.", stats_type=nfp_constants.response_event) @@ -2105,7 +2101,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): nf_id=nf_id, service_type=service_type) - LOG.info(_LI("Deleted NF:%(nf_id)s "), + LOG.info("Deleted NF:%(nf_id)s ", {'nf_id': nf_id}) # Inform delete service caller with delete completed RPC @@ -2120,13 +2116,13 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): self.db_session, network_function_id) return network_function except nfp_exc.NetworkFunctionNotFound: - LOG.warning(_LW("Failed to retrieve Network Function details for" - " %(network_function)s"), + LOG.warning("Failed to retrieve Network Function details for" + " %(network_function)s", {'network_function': network_function_id}) return None except Exception: - LOG.exception(_LE("Failed to retrieve Network Function details for" - " %(network_function)s"), + LOG.exception("Failed to retrieve Network Function details for" + " %(network_function)s", {'network_function': network_function_id}) return None @@ -2431,8 +2427,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler): self.db_session, port_id) return port_info except Exception: - LOG.exception(_LE("Failed to retrieve Port Info for" - " %(port_id)s"), + LOG.exception("Failed to retrieve Port Info for" + " %(port_id)s", {'port_id': port_id}) return None @@ -2615,7 +2611,7 @@ class NSOConfiguratorRpcApi(object): config_tag) self._update_params(user_config_data, config_params, operation='create') - LOG.info(_LI("Sending create heat config request to configurator ")) + LOG.info("Sending create heat config request to configurator ") LOG.debug("Sending create heat config request to configurator " "with config_params = %s", config_params) @@ -2632,7 +2628,7 @@ class NSOConfiguratorRpcApi(object): config_tag) self._update_params(user_config_data, config_params, operation='delete') - LOG.info(_LI("Sending delete heat config request to configurator ")) + LOG.info("Sending delete heat config request to configurator ") LOG.debug("Sending delete heat config request to configurator " " with config_params = %s", config_params) @@ -2648,7 +2644,7 @@ class NSOConfiguratorRpcApi(object): config_tag) self._update_params(user_config_data, config_params, operation='update') - LOG.info(_LI("Sending update heat config request to configurator. ")) + LOG.info("Sending update heat config request to configurator. ") transport.send_request_to_configurator(self.conf, self.context, @@ -2662,8 +2658,8 @@ class NSOConfiguratorRpcApi(object): config_tag) self._update_params(user_config_data, config_params, operation='pt_add') - LOG.info(_LI("Sending Policy Target and heat config request to " - "configurator .")) + LOG.info("Sending Policy Target and heat config request to " + "configurator .") transport.send_request_to_configurator(self.conf, self.context, @@ -2677,8 +2673,8 @@ class NSOConfiguratorRpcApi(object): config_tag) self._update_params(user_config_data, config_params, operation='pt_remove') - LOG.info(_LI("Sending Policy Target remove heat config request to " - "configurator. ")) + LOG.info("Sending Policy Target remove heat config request to " + "configurator. ") transport.send_request_to_configurator(self.conf, self.context, @@ -2692,8 +2688,8 @@ class NSOConfiguratorRpcApi(object): config_tag) self._update_params(user_config_data, config_params, operation='consumer_add') - LOG.info(_LI("Sending consumer and heat config request to " - "configurator .")) + LOG.info("Sending consumer and heat config request to " + "configurator .") transport.send_request_to_configurator(self.conf, self.context, @@ -2707,8 +2703,8 @@ class NSOConfiguratorRpcApi(object): config_tag) self._update_params(user_config_data, config_params, operation='consumer_remove') - LOG.info(_LI("Sending consumer remove heat config request to " - "configurator .")) + LOG.info("Sending consumer remove heat config request to " + "configurator .") transport.send_request_to_configurator(self.conf, self.context,