Preparing for Pike support

1. Get rid of log translation (since oslo doesnt support it
any more)

2. Use service based class from neutron_lib

Change-Id: I15cf50984313bc217a4b38707c19b635e4eca039
This commit is contained in:
Sumit Naiksatam 2017-10-24 13:52:55 -07:00
parent 42ccdb6e0d
commit 599f9c70e2
62 changed files with 787 additions and 901 deletions

View File

@ -27,16 +27,6 @@ _C = _translators.contextual_form
# The plural translation function using the name "_P" # The plural translation function using the name "_P"
_P = _translators.plural_form _P = _translators.plural_form
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
def get_available_languages(): def get_available_languages():
return oslo_i18n.get_available_languages(DOMAIN) return oslo_i18n.get_available_languages(DOMAIN)

View File

@ -17,7 +17,6 @@ from oslo_utils import importutils
from stevedore import driver from stevedore import driver
from gbpservice._i18n import _ from gbpservice._i18n import _
from gbpservice._i18n import _LE
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
cfg.CONF.import_group('keystone_authtoken', 'keystonemiddleware.auth_token') cfg.CONF.import_group('keystone_authtoken', 'keystonemiddleware.auth_token')
@ -42,8 +41,8 @@ def load_plugin(namespace, plugin):
try: try:
plugin_class = importutils.import_class(plugin) plugin_class = importutils.import_class(plugin)
except ImportError as e2: except ImportError as e2:
LOG.exception(_LE("Error loading plugin by name, %s"), e1) LOG.exception("Error loading plugin by name, %s", e1)
LOG.exception(_LE("Error loading plugin by class, %s"), e2) LOG.exception("Error loading plugin by class, %s", e2)
raise ImportError(_("Plugin not found.")) raise ImportError(_("Plugin not found."))
return plugin_class() return plugin_class()

View File

@ -10,7 +10,6 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from gbpservice._i18n import _LE
from gbpservice.contrib.nfp.config_orchestrator.common import ( from gbpservice.contrib.nfp.config_orchestrator.common import (
topics as a_topics) topics as a_topics)
from gbpservice.nfp.core import log as nfp_logging from gbpservice.nfp.core import log as nfp_logging
@ -115,7 +114,7 @@ def get_dhcp_agent_host(config):
if agents: if agents:
return agents[0].get('host', None) return agents[0].get('host', None)
except Exception as exc: except Exception as exc:
LOG.error(_LE("Failed to get dhcp agent host : %(exc)s"), LOG.error("Failed to get dhcp agent host : %(exc)s",
{'exc': exc}) {'exc': exc})
@ -168,8 +167,8 @@ def get_network_function_details(context, network_function_id):
return network_function_details['network_function'] return network_function_details['network_function']
except Exception as e: except Exception as e:
LOG.error(_LE("Failed to get network function details of " LOG.error("Failed to get network function details of "
"network_function_id %(network_function_id)s : %(ex)s "), "network_function_id %(network_function_id)s : %(ex)s ",
{'ex': e, 'network_function_id': network_function_id}) {'ex': e, 'network_function_id': network_function_id})
@ -193,7 +192,7 @@ def get_network_function_map(context, network_function_id):
LOG.debug(msg) LOG.debug(msg)
return request_data return request_data
except Exception as e: except Exception as e:
LOG.error(_LE("Failed to get network function map of " LOG.error("Failed to get network function map of "
"network_function_id %(network_function_id)s : %(ex)s "), "network_function_id %(network_function_id)s : %(ex)s ",
{'ex': e, 'network_function_id': network_function_id}) {'ex': e, 'network_function_id': network_function_id})
return request_data return request_data

View File

@ -13,7 +13,6 @@
import ast import ast
import copy import copy
from gbpservice._i18n import _LI
from gbpservice.contrib.nfp.config_orchestrator.common import common from gbpservice.contrib.nfp.config_orchestrator.common import common
from gbpservice.nfp.common import constants as const from gbpservice.nfp.common import constants as const
from gbpservice.nfp.common import data_formatter as df from gbpservice.nfp.common import data_formatter as df
@ -158,8 +157,8 @@ class FwAgent(firewall_db.Firewall_db_mixin):
nf_id = self._fetch_nf_from_resource_desc(firewall["description"]) nf_id = self._fetch_nf_from_resource_desc(firewall["description"])
nfp_context['log_context']['meta_id'] = nf_id nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id) nf = common.get_network_function_details(context, nf_id)
LOG.info(_LI("Received RPC CREATE FIREWALL for " LOG.info("Received RPC CREATE FIREWALL for "
"Firewall: %(firewall)s"), "Firewall: %(firewall)s",
{'firewall': firewall}) {'firewall': firewall})
body = self._data_wrapper(context, firewall, host, nf, 'CREATE') body = self._data_wrapper(context, firewall, host, nf, 'CREATE')
transport.send_request_to_configurator(self._conf, transport.send_request_to_configurator(self._conf,
@ -172,8 +171,8 @@ class FwAgent(firewall_db.Firewall_db_mixin):
nf_id = self._fetch_nf_from_resource_desc(firewall["description"]) nf_id = self._fetch_nf_from_resource_desc(firewall["description"])
nfp_context['log_context']['meta_id'] = nf_id nfp_context['log_context']['meta_id'] = nf_id
nf = common.get_network_function_details(context, nf_id) nf = common.get_network_function_details(context, nf_id)
LOG.info(_LI("Received RPC DELETE FIREWALL for " LOG.info("Received RPC DELETE FIREWALL for "
"Firewall: %(firewall)s"), "Firewall: %(firewall)s",
{'firewall': firewall}) {'firewall': firewall})
body = self._data_wrapper(context, firewall, host, nf, 'DELETE') body = self._data_wrapper(context, firewall, host, nf, 'DELETE')
transport.send_request_to_configurator(self._conf, transport.send_request_to_configurator(self._conf,

View File

@ -13,7 +13,6 @@
import ast import ast
import copy import copy
from gbpservice._i18n import _LI
from gbpservice.contrib.nfp.config_orchestrator.common import common from gbpservice.contrib.nfp.config_orchestrator.common import common
from gbpservice.contrib.nfp.config_orchestrator.common import lbv2_constants from gbpservice.contrib.nfp.config_orchestrator.common import lbv2_constants
from gbpservice.nfp.common import constants as const from gbpservice.nfp.common import constants as const
@ -277,7 +276,7 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2):
def create_loadbalancer(self, context, loadbalancer, driver_name, def create_loadbalancer(self, context, loadbalancer, driver_name,
allocate_vip=True): allocate_vip=True):
nfp_context = module_context.init() nfp_context = module_context.init()
LOG.info(_LI("Received RPC CREATE LOADBALANCER for LB:%(lb)s"), LOG.info("Received RPC CREATE LOADBALANCER for LB:%(lb)s",
{'lb': loadbalancer}) {'lb': loadbalancer})
# Fetch nf_id from description of the resource # Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(loadbalancer["description"]) nf_id = self._fetch_nf_from_resource_desc(loadbalancer["description"])
@ -306,8 +305,8 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2):
def delete_loadbalancer(self, context, loadbalancer, def delete_loadbalancer(self, context, loadbalancer,
delete_vip_port=True): delete_vip_port=True):
nfp_context = module_context.init() nfp_context = module_context.init()
LOG.info(_LI("Received RPC DELETE LOADBALANCER for LB:" LOG.info("Received RPC DELETE LOADBALANCER for LB:"
"%(lb)s"), {'lb': loadbalancer}) "%(lb)s", {'lb': loadbalancer})
# Fetch nf_id from description of the resource # Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(loadbalancer["description"]) nf_id = self._fetch_nf_from_resource_desc(loadbalancer["description"])
nfp_context['log_context']['meta_id'] = nf_id nfp_context['log_context']['meta_id'] = nf_id
@ -320,7 +319,7 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2):
@log_helpers.log_method_call @log_helpers.log_method_call
def create_listener(self, context, listener): def create_listener(self, context, listener):
nfp_context = module_context.init() nfp_context = module_context.init()
LOG.info(_LI("Received RPC CREATE LISTENER for Listener:%(listener)s"), LOG.info("Received RPC CREATE LISTENER for Listener:%(listener)s",
{'listener': listener}) {'listener': listener})
loadbalancer = listener['loadbalancer'] loadbalancer = listener['loadbalancer']
# Fetch nf_id from description of the resource # Fetch nf_id from description of the resource
@ -348,7 +347,7 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2):
@log_helpers.log_method_call @log_helpers.log_method_call
def delete_listener(self, context, listener): def delete_listener(self, context, listener):
nfp_context = module_context.init() nfp_context = module_context.init()
LOG.info(_LI("Received RPC DELETE LISTENER for Listener:%(listener)s"), LOG.info("Received RPC DELETE LISTENER for Listener:%(listener)s",
{'listener': listener}) {'listener': listener})
loadbalancer = listener['loadbalancer'] loadbalancer = listener['loadbalancer']
# Fetch nf_id from description of the resource # Fetch nf_id from description of the resource
@ -363,7 +362,7 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2):
@log_helpers.log_method_call @log_helpers.log_method_call
def create_pool(self, context, pool): def create_pool(self, context, pool):
nfp_context = module_context.init() nfp_context = module_context.init()
LOG.info(_LI("Received RPC CREATE POOL for Pool:%(pool)s"), LOG.info("Received RPC CREATE POOL for Pool:%(pool)s",
{'pool': pool}) {'pool': pool})
loadbalancer = pool['loadbalancer'] loadbalancer = pool['loadbalancer']
# Fetch nf_id from description of the resource # Fetch nf_id from description of the resource
@ -391,7 +390,7 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2):
@log_helpers.log_method_call @log_helpers.log_method_call
def delete_pool(self, context, pool): def delete_pool(self, context, pool):
nfp_context = module_context.init() nfp_context = module_context.init()
LOG.info(_LI("Received RPC DELETE POOL for Pool:%(pool)s"), LOG.info("Received RPC DELETE POOL for Pool:%(pool)s",
{'pool': pool}) {'pool': pool})
loadbalancer = pool['loadbalancer'] loadbalancer = pool['loadbalancer']
# Fetch nf_id from description of the resource # Fetch nf_id from description of the resource
@ -406,7 +405,7 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2):
@log_helpers.log_method_call @log_helpers.log_method_call
def create_member(self, context, member): def create_member(self, context, member):
nfp_context = module_context.init() nfp_context = module_context.init()
LOG.info(_LI("Received RPC CREATE MEMBER for Member:%(member)s"), LOG.info("Received RPC CREATE MEMBER for Member:%(member)s",
{'member': member}) {'member': member})
loadbalancer = member['pool']['loadbalancer'] loadbalancer = member['pool']['loadbalancer']
# Fetch nf_id from description of the resource # Fetch nf_id from description of the resource
@ -434,7 +433,7 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2):
@log_helpers.log_method_call @log_helpers.log_method_call
def delete_member(self, context, member): def delete_member(self, context, member):
nfp_context = module_context.init() nfp_context = module_context.init()
LOG.info(_LI("Received RPC DELETE MEMBER for Member:%(member)s"), LOG.info("Received RPC DELETE MEMBER for Member:%(member)s",
{'member': member}) {'member': member})
loadbalancer = member['pool']['loadbalancer'] loadbalancer = member['pool']['loadbalancer']
# Fetch nf_id from description of the resource # Fetch nf_id from description of the resource
@ -449,7 +448,7 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2):
@log_helpers.log_method_call @log_helpers.log_method_call
def create_healthmonitor(self, context, healthmonitor): def create_healthmonitor(self, context, healthmonitor):
nfp_context = module_context.init() nfp_context = module_context.init()
LOG.info(_LI("Received RPC CREATE HEALTH MONITOR for HM:%(hm)s"), LOG.info("Received RPC CREATE HEALTH MONITOR for HM:%(hm)s",
{'hm': healthmonitor}) {'hm': healthmonitor})
loadbalancer = healthmonitor['pool']['loadbalancer'] loadbalancer = healthmonitor['pool']['loadbalancer']
# Fetch nf_id from description of the resource # Fetch nf_id from description of the resource
@ -478,7 +477,7 @@ class Lbv2Agent(loadbalancer_dbv2.LoadBalancerPluginDbv2):
@log_helpers.log_method_call @log_helpers.log_method_call
def delete_healthmonitor(self, context, healthmonitor): def delete_healthmonitor(self, context, healthmonitor):
nfp_context = module_context.init() nfp_context = module_context.init()
LOG.info(_LI("Received RPC DELETE HEALTH MONITOR for HM:%(hm)s"), LOG.info("Received RPC DELETE HEALTH MONITOR for HM:%(hm)s",
{'hm': healthmonitor}) {'hm': healthmonitor})
loadbalancer = healthmonitor['pool']['loadbalancer'] loadbalancer = healthmonitor['pool']['loadbalancer']
# Fetch nf_id from description of the resource # Fetch nf_id from description of the resource

View File

@ -13,7 +13,6 @@
import ast import ast
import copy import copy
from gbpservice._i18n import _LI
from gbpservice.contrib.nfp.config_orchestrator.common import common from gbpservice.contrib.nfp.config_orchestrator.common import common
from gbpservice.nfp.common import constants as const from gbpservice.nfp.common import constants as const
from gbpservice.nfp.common import data_formatter as df from gbpservice.nfp.common import data_formatter as df
@ -149,7 +148,7 @@ class VpnAgent(vpn_db.VPNPluginDb, vpn_db.VPNPluginRpcDbMixin):
@log_helpers.log_method_call @log_helpers.log_method_call
def vpnservice_updated(self, context, **kwargs): def vpnservice_updated(self, context, **kwargs):
nfp_context = module_context.init() nfp_context = module_context.init()
LOG.info(_LI("Received RPC VPN SERVICE UPDATED with data:%(data)s"), LOG.info("Received RPC VPN SERVICE UPDATED with data:%(data)s",
{'data': kwargs}) {'data': kwargs})
# Fetch nf_id from description of the resource # Fetch nf_id from description of the resource
nf_id = self._fetch_nf_from_resource_desc(kwargs[ nf_id = self._fetch_nf_from_resource_desc(kwargs[

View File

@ -13,7 +13,6 @@
import sys import sys
import traceback import traceback
from gbpservice._i18n import _LI
from gbpservice.contrib.nfp.config_orchestrator.common import ( from gbpservice.contrib.nfp.config_orchestrator.common import (
lbv2_constants as lbv2_const) lbv2_constants as lbv2_const)
from gbpservice.contrib.nfp.config_orchestrator.common import ( from gbpservice.contrib.nfp.config_orchestrator.common import (
@ -41,8 +40,8 @@ class RpcHandler(object):
def network_function_notification(self, context, notification_data): def network_function_notification(self, context, notification_data):
module_context.init() module_context.init()
try: try:
LOG.info(_LI("Received NETWORK FUNCTION NOTIFICATION:" LOG.info("Received NETWORK FUNCTION NOTIFICATION:"
"%(notification)s"), "%(notification)s",
{'notification': notification_data['notification']}) {'notification': notification_data['notification']})
if notification_data['info']['service_type'] is not None: if notification_data['info']['service_type'] is not None:
handler = NaasNotificationHandler(self.conf, self.sc) handler = NaasNotificationHandler(self.conf, self.sc)
@ -78,9 +77,9 @@ class FirewallNotifier(object):
firewall_id = resource_data['firewall_id'] firewall_id = resource_data['firewall_id']
status = resource_data['status'] status = resource_data['status']
LOG.info(_LI("Received firewall configuration create complete API, " LOG.info("Received firewall configuration create complete API, "
"making an RPC call set firewall status for " "making an RPC call set firewall status for "
"firewall:%(firewall)s and status: %(status)s"), "firewall:%(firewall)s and status: %(status)s",
{'firewall': firewall_id, {'firewall': firewall_id,
'status': status}) 'status': status})
@ -103,9 +102,9 @@ class FirewallNotifier(object):
resource_data = notification['data'] resource_data = notification['data']
firewall_id = resource_data['firewall_id'] firewall_id = resource_data['firewall_id']
LOG.info(_LI("Received firewall_configuration_delete_complete API, " LOG.info("Received firewall_configuration_delete_complete API, "
"making an RPC call firewall_deleted for firewall:" "making an RPC call firewall_deleted for firewall:"
"%(firewall)s "), "%(firewall)s ",
{'firewall': firewall_id}) {'firewall': firewall_id})
# RPC call to plugin to update firewall deleted # RPC call to plugin to update firewall deleted
@ -143,9 +142,9 @@ class LoadbalancerV2Notifier(object):
obj_p_status = resource_data['provisioning_status'] obj_p_status = resource_data['provisioning_status']
obj_o_status = resource_data['operating_status'] obj_o_status = resource_data['operating_status']
LOG.info(_LI("Received LB's update_status API. Making an " LOG.info("Received LB's update_status API. Making an "
"update_status RPC call to plugin for %(obj_type)s:" "update_status RPC call to plugin for %(obj_type)s:"
"%(obj_id)s with status: %(status)s"), "%(obj_id)s with status: %(status)s",
{'obj_type': obj_type, {'obj_type': obj_type,
'obj_id': obj_id, 'obj_id': obj_id,
'status': obj_p_status}) 'status': obj_p_status})
@ -192,9 +191,9 @@ class VpnNotifier(object):
nfp_context['log_context'] = logging_context nfp_context['log_context'] = logging_context
status = resource_data['status'] status = resource_data['status']
LOG.info(_LI("Received VPN's update_status API. " LOG.info("Received VPN's update_status API. "
"Making an update_status RPC cast to plugin for object" "Making an update_status RPC cast to plugin for object"
"with status: %(status)s"), "with status: %(status)s",
{'status': status}) {'status': status})
rpcClient = transport.RPCClient(a_topics.VPN_NFP_PLUGIN_TOPIC) rpcClient = transport.RPCClient(a_topics.VPN_NFP_PLUGIN_TOPIC)
rpcClient.cctxt.cast(context, 'update_status', rpcClient.cctxt.cast(context, 'update_status',

View File

@ -11,7 +11,6 @@
# under the License. # under the License.
from gbpservice._i18n import _LI
from gbpservice.contrib.nfp.configurator.lib import constants as const from gbpservice.contrib.nfp.configurator.lib import constants as const
from gbpservice.nfp.core import log as nfp_logging from gbpservice.nfp.core import log as nfp_logging
from gbpservice.nfp.core import module as nfp_api from gbpservice.nfp.core import module as nfp_api
@ -105,7 +104,7 @@ class AgentBaseRPCManager(object):
# Multiple request data blobs needs batch processing. Send batch # Multiple request data blobs needs batch processing. Send batch
# processing event or do direct processing of single request data blob # processing event or do direct processing of single request data blob
if (len(sa_req_list) > 1): if (len(sa_req_list) > 1):
LOG.info(_LI("Creating event PROCESS BATCH")) LOG.info("Creating event PROCESS BATCH")
args_dict = { args_dict = {
'sa_req_list': sa_req_list, 'sa_req_list': sa_req_list,
'notification_data': notification_data 'notification_data': notification_data

View File

@ -16,7 +16,6 @@ import oslo_messaging as messaging
import requests import requests
import six import six
from gbpservice._i18n import _LI
from gbpservice.contrib.nfp.configurator.agents import agent_base from gbpservice.contrib.nfp.configurator.agents import agent_base
from gbpservice.contrib.nfp.configurator.lib import constants as common_const from gbpservice.contrib.nfp.configurator.lib import constants as common_const
from gbpservice.contrib.nfp.configurator.lib import fw_constants as const from gbpservice.contrib.nfp.configurator.lib import fw_constants as const
@ -62,9 +61,9 @@ class FwaasRpcSender(agent_base.AgentBaseEventHandler):
'notification_type': ( 'notification_type': (
'set_firewall_status')}}] 'set_firewall_status')}}]
} }
LOG.info(_LI("Sending Notification 'Set Firewall Status' to " LOG.info("Sending Notification 'Set Firewall Status' to "
"Orchestrator for firewall: %(fw_id)s with status:" "Orchestrator for firewall: %(fw_id)s with status:"
"%(status)s"), "%(status)s",
{'fw_id': firewall_id, {'fw_id': firewall_id,
'status': status}) 'status': status})
self.notify._notification(msg) self.notify._notification(msg)
@ -86,8 +85,8 @@ class FwaasRpcSender(agent_base.AgentBaseEventHandler):
'notification_type': ( 'notification_type': (
'firewall_deleted')}}] 'firewall_deleted')}}]
} }
LOG.info(_LI("Sending Notification 'Firewall Deleted' to " LOG.info("Sending Notification 'Firewall Deleted' to "
"Orchestrator for firewall: %(fw_id)s "), "Orchestrator for firewall: %(fw_id)s ",
{'fw_id': firewall_id}) {'fw_id': firewall_id})
self.notify._notification(msg) self.notify._notification(msg)
@ -153,7 +152,7 @@ class FWaasRpcManager(agent_base.AgentBaseRPCManager):
""" """
LOG.info(_LI("Received request 'Create Firewall'.")) LOG.info("Received request 'Create Firewall'.")
self._create_event(context, firewall, self._create_event(context, firewall,
host, const.FIREWALL_CREATE_EVENT) host, const.FIREWALL_CREATE_EVENT)
@ -161,7 +160,7 @@ class FWaasRpcManager(agent_base.AgentBaseRPCManager):
""" Receives request to update firewall from configurator """ Receives request to update firewall from configurator
""" """
LOG.info(_LI("Received request 'Update Firewall'.")) LOG.info("Received request 'Update Firewall'.")
self._create_event(context, firewall, self._create_event(context, firewall,
host, const.FIREWALL_UPDATE_EVENT) host, const.FIREWALL_UPDATE_EVENT)
@ -169,7 +168,7 @@ class FWaasRpcManager(agent_base.AgentBaseRPCManager):
""" Receives request to delete firewall from configurator """ Receives request to delete firewall from configurator
""" """
LOG.info(_LI("Received request 'Delete Firewall'.")) LOG.info("Received request 'Delete Firewall'.")
self._create_event(context, firewall, self._create_event(context, firewall,
host, const.FIREWALL_DELETE_EVENT) host, const.FIREWALL_DELETE_EVENT)
@ -256,8 +255,8 @@ class FWaasEventHandler(nfp_api.NfpEventHandler):
service_vendor = agent_info['service_vendor'] service_vendor = agent_info['service_vendor']
service_feature = agent_info.get('service_feature', '') service_feature = agent_info.get('service_feature', '')
driver = self._get_driver(service_vendor, service_feature) driver = self._get_driver(service_vendor, service_feature)
LOG.info(_LI("Invoking driver with service vendor:" LOG.info("Invoking driver with service vendor:"
"%(service_vendor)s "), "%(service_vendor)s ",
{'service_vendor': service_vendor}) {'service_vendor': service_vendor})
self.method = getattr(driver, "%s" % (ev.id.lower())) self.method = getattr(driver, "%s" % (ev.id.lower()))
self.invoke_driver_for_plugin_api(ev) self.invoke_driver_for_plugin_api(ev)
@ -435,7 +434,7 @@ def load_drivers(conf):
driver_obj = driver_name(conf=conf) driver_obj = driver_name(conf=conf)
drivers[service_type] = driver_obj drivers[service_type] = driver_obj
LOG.info(_LI("Firewall loaded drivers:%(drivers)s"), LOG.info("Firewall loaded drivers:%(drivers)s",
{'drivers': drivers}) {'drivers': drivers})
return drivers return drivers

View File

@ -14,7 +14,6 @@ import copy
import os import os
import six import six
from gbpservice._i18n import _LI
from gbpservice.contrib.nfp.configurator.agents import agent_base from gbpservice.contrib.nfp.configurator.agents import agent_base
from gbpservice.contrib.nfp.configurator.lib import ( from gbpservice.contrib.nfp.configurator.lib import (
generic_config_constants as gen_cfg_const) generic_config_constants as gen_cfg_const)
@ -142,8 +141,8 @@ class GenericConfigRpcManager(agent_base.AgentBaseRPCManager):
Returns: None Returns: None
""" """
LOG.info(_LI("Received configure health monitor api for nfds:" LOG.info("Received configure health monitor api for nfds:"
"%(nfds)s"), "%(nfds)s",
{'nfds': resource_data['nfds']}) {'nfds': resource_data['nfds']})
resource_data['fail_count'] = 0 resource_data['fail_count'] = 0
self._send_event(context, self._send_event(context,
@ -161,8 +160,8 @@ class GenericConfigRpcManager(agent_base.AgentBaseRPCManager):
Returns: None Returns: None
""" """
LOG.info(_LI("Received clear health monitor api for nfds:" LOG.info("Received clear health monitor api for nfds:"
"%(nfds)s"), "%(nfds)s",
{'nfds': resource_data['nfds']}) {'nfds': resource_data['nfds']})
event_key = resource_data['nfds'][0]['vmid'] event_key = resource_data['nfds'][0]['vmid']
poll_event_id = gen_cfg_const.EVENT_CONFIGURE_HEALTHMONITOR poll_event_id = gen_cfg_const.EVENT_CONFIGURE_HEALTHMONITOR
@ -470,8 +469,8 @@ def load_drivers(conf):
for service_type, driver_name in six.iteritems(drivers): for service_type, driver_name in six.iteritems(drivers):
driver_obj = driver_name(conf=conf) driver_obj = driver_name(conf=conf)
drivers[service_type] = driver_obj drivers[service_type] = driver_obj
LOG.info(_LI("Generic config agent loaded drivers drivers:" LOG.info("Generic config agent loaded drivers drivers:"
"%(drivers)s"), "%(drivers)s",
{'drivers': drivers}) {'drivers': drivers})
return drivers return drivers

View File

@ -13,7 +13,6 @@
import os import os
import six import six
from gbpservice._i18n import _LI
from gbpservice.contrib.nfp.configurator.agents import agent_base from gbpservice.contrib.nfp.configurator.agents import agent_base
from gbpservice.contrib.nfp.configurator.lib import data_filter from gbpservice.contrib.nfp.configurator.lib import data_filter
from gbpservice.contrib.nfp.configurator.lib import lbv2_constants as lb_const from gbpservice.contrib.nfp.configurator.lib import lbv2_constants as lb_const
@ -64,9 +63,9 @@ class LBaaSV2RpcSender(data_filter.Filter):
operating_status, operating_status,
obj_type: obj}}] obj_type: obj}}]
} }
LOG.info(_LI("Sending Notification 'Update Status' " LOG.info("Sending Notification 'Update Status' "
"for resource: %(resource)s with Provisioning status:" "for resource: %(resource)s with Provisioning status:"
"%(p_status)s and Operating status:%(o_status)s"), "%(p_status)s and Operating status:%(o_status)s",
{'resource': agent_info['resource'], {'resource': agent_info['resource'],
'p_status': provisioning_status, 'p_status': provisioning_status,
'o_status': operating_status}) 'o_status': operating_status})
@ -90,8 +89,8 @@ class LBaaSV2RpcSender(data_filter.Filter):
'update_pool_stats'), 'update_pool_stats'),
'pool': pool_id}}] 'pool': pool_id}}]
} }
LOG.info(_LI("Sending Notification 'Update Pool Stats' " LOG.info("Sending Notification 'Update Pool Stats' "
"for pool: %(pool_id)s with stats:%(stats)s"), "for pool: %(pool_id)s with stats:%(stats)s",
{'pool_id': pool_id, {'pool_id': pool_id,
'stats': stats}) 'stats': stats})
self.notify._notification(msg) self.notify._notification(msg)
@ -149,8 +148,8 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager):
Returns: None Returns: None
""" """
LOG.info(_LI("Received request 'Create Loadbalancer' for LB:%(lb)s " LOG.info("Received request 'Create Loadbalancer' for LB:%(lb)s "
"with driver:%(driver_name)s"), "with driver:%(driver_name)s",
{'lb': loadbalancer['id'], {'lb': loadbalancer['id'],
'driver_name': driver_name}) 'driver_name': driver_name})
arg_dict = {'context': context, arg_dict = {'context': context,
@ -177,8 +176,8 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager):
lb_const.OLD_LOADBALANCER: old_loadbalancer, lb_const.OLD_LOADBALANCER: old_loadbalancer,
lb_const.LOADBALANCER: loadbalancer, lb_const.LOADBALANCER: loadbalancer,
} }
LOG.info(_LI("Received request 'Update Loadbalancer' for LB:%(lb)s " LOG.info("Received request 'Update Loadbalancer' for LB:%(lb)s "
"with new Param:%(new_val)s and old Param:%(old_val)s"), "with new Param:%(new_val)s and old Param:%(old_val)s",
{'lb': loadbalancer['id'], {'lb': loadbalancer['id'],
'new_val': new_val, 'new_val': new_val,
'old_val': old_val}) 'old_val': old_val})
@ -195,7 +194,7 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager):
Returns: None Returns: None
""" """
LOG.info(_LI("Received request 'Delete Loadbalancer' for LB:%(lb)s "), LOG.info("Received request 'Delete Loadbalancer' for LB:%(lb)s ",
{'lb': loadbalancer['id']}) {'lb': loadbalancer['id']})
arg_dict = {'context': context, arg_dict = {'context': context,
@ -214,7 +213,7 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager):
Returns: None Returns: None
""" """
LOG.info(_LI("Received request 'Create Listener' for LB:%(lb)s "), LOG.info("Received request 'Create Listener' for LB:%(lb)s ",
{'lb': listener['loadbalancer_id']}) {'lb': listener['loadbalancer_id']})
arg_dict = {'context': context, arg_dict = {'context': context,
lb_const.LISTENER: listener, lb_const.LISTENER: listener,
@ -235,9 +234,9 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager):
""" """
old_val, new_val = self.get_diff_of_dict(old_listener, listener) old_val, new_val = self.get_diff_of_dict(old_listener, listener)
LOG.info(_LI("Received request 'Update Listener' for Listener:" LOG.info("Received request 'Update Listener' for Listener:"
"%(listener)s in LB:%(lb_id)s with new Param:" "%(listener)s in LB:%(lb_id)s with new Param:"
"%(new_val)s and old Param:%(old_val)s"), "%(new_val)s and old Param:%(old_val)s",
{'lb_id': listener['loadbalancer_id'], {'lb_id': listener['loadbalancer_id'],
'listener': listener['id'], 'listener': listener['id'],
'old_val': old_val, 'old_val': old_val,
@ -260,7 +259,7 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager):
Returns: None Returns: None
""" """
LOG.info(_LI("Received request 'Delete Listener' for LB:%(lb)s "), LOG.info("Received request 'Delete Listener' for LB:%(lb)s ",
{'lb': listener['loadbalancer_id']}) {'lb': listener['loadbalancer_id']})
arg_dict = {'context': context, arg_dict = {'context': context,
lb_const.LISTENER: listener, lb_const.LISTENER: listener,
@ -279,7 +278,7 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager):
Returns: None Returns: None
""" """
LOG.info(_LI("Received request 'Create Pool' for Pool:%(pool_id)s "), LOG.info("Received request 'Create Pool' for Pool:%(pool_id)s ",
{'pool_id': pool['id']}) {'pool_id': pool['id']})
arg_dict = {'context': context, arg_dict = {'context': context,
lb_const.POOL: pool lb_const.POOL: pool
@ -301,9 +300,9 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager):
""" """
old_val, new_val = self.get_diff_of_dict(old_pool, pool) old_val, new_val = self.get_diff_of_dict(old_pool, pool)
LOG.info(_LI("Received request 'Update Pool' for Pool:%(pool)s " LOG.info("Received request 'Update Pool' for Pool:%(pool)s "
"in LB:%(lb_id)s with new Param:%(new_val)s and " "in LB:%(lb_id)s with new Param:%(new_val)s and "
"old Param:%(old_val)s"), "old Param:%(old_val)s",
{'pool': pool['id'], {'pool': pool['id'],
'lb_id': pool['loadbalancer_id'], 'lb_id': pool['loadbalancer_id'],
'old_val': old_val, 'old_val': old_val,
@ -326,7 +325,7 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager):
Returns: None Returns: None
""" """
LOG.info(_LI("Received request 'Delete Pool' for Pool:%(pool_id)s "), LOG.info("Received request 'Delete Pool' for Pool:%(pool_id)s ",
{'pool_id': pool['id']}) {'pool_id': pool['id']})
arg_dict = {'context': context, arg_dict = {'context': context,
lb_const.POOL: pool, lb_const.POOL: pool,
@ -345,7 +344,7 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager):
Returns: None Returns: None
""" """
LOG.info(_LI("Received request 'Create Member' for Pool:%(pool_id)s "), LOG.info("Received request 'Create Member' for Pool:%(pool_id)s ",
{'pool_id': member['pool_id']}) {'pool_id': member['pool_id']})
arg_dict = {'context': context, arg_dict = {'context': context,
lb_const.MEMBER: member, lb_const.MEMBER: member,
@ -366,9 +365,9 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager):
""" """
old_val, new_val = self.get_diff_of_dict(old_member, member) old_val, new_val = self.get_diff_of_dict(old_member, member)
LOG.info(_LI("Received request 'Update Member' for Member:" LOG.info("Received request 'Update Member' for Member:"
"%(member_id)s in Pool:%(pool_id)s with new Param:" "%(member_id)s in Pool:%(pool_id)s with new Param:"
"%(new_val)s and old Param:%(old_val)s"), "%(new_val)s and old Param:%(old_val)s",
{'pool_id': member['pool_id'], {'pool_id': member['pool_id'],
'member_id': member['id'], 'member_id': member['id'],
'old_val': old_val, 'old_val': old_val,
@ -391,8 +390,8 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager):
Returns: None Returns: None
""" """
LOG.info(_LI("Received request 'Delete Member' for Pool:" LOG.info("Received request 'Delete Member' for Pool:"
"%(pool_id)s "), "%(pool_id)s ",
{'pool_id': member['pool_id']}) {'pool_id': member['pool_id']})
arg_dict = {'context': context, arg_dict = {'context': context,
lb_const.MEMBER: member, lb_const.MEMBER: member,
@ -412,8 +411,8 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager):
Returns: None Returns: None
""" """
LOG.info(_LI("Received request 'Create Pool Health Monitor' for" LOG.info("Received request 'Create Pool Health Monitor' for"
"Health monitor:%(hm)s"), "Health monitor:%(hm)s",
{'hm': healthmonitor['id']}) {'hm': healthmonitor['id']})
arg_dict = {'context': context, arg_dict = {'context': context,
lb_const.HEALTHMONITOR: healthmonitor lb_const.HEALTHMONITOR: healthmonitor
@ -437,9 +436,9 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager):
""" """
old_val, new_val = self.get_diff_of_dict( old_val, new_val = self.get_diff_of_dict(
old_healthmonitor, healthmonitor) old_healthmonitor, healthmonitor)
LOG.info(_LI("Received request 'Update Pool Health Monitor' for " LOG.info("Received request 'Update Pool Health Monitor' for "
"Health monitor:%(hm)s with new Param:%(new_val)s and " "Health monitor:%(hm)s with new Param:%(new_val)s and "
"old Param:%(old_val)s"), "old Param:%(old_val)s",
{'hm': healthmonitor['id'], {'hm': healthmonitor['id'],
'old_val': old_val, 'old_val': old_val,
'new_val': new_val}) 'new_val': new_val})
@ -463,8 +462,8 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager):
Returns: None Returns: None
""" """
LOG.info(_LI("Received request 'Delete Pool Health Monitor' for " LOG.info("Received request 'Delete Pool Health Monitor' for "
"Health monitor:%(hm)s"), "Health monitor:%(hm)s",
{'hm': healthmonitor['id']}) {'hm': healthmonitor['id']})
arg_dict = {'context': context, arg_dict = {'context': context,
lb_const.HEALTHMONITOR: healthmonitor lb_const.HEALTHMONITOR: healthmonitor
@ -484,7 +483,7 @@ class LBaaSv2RpcManager(agent_base.AgentBaseRPCManager):
Returns: None Returns: None
""" """
LOG.info(_LI("Received request 'Agent Updated' ")) LOG.info("Received request 'Agent Updated' ")
arg_dict = {'context': context, arg_dict = {'context': context,
'payload': payload} 'payload': payload}
self._send_event(lb_const.EVENT_AGENT_UPDATED_V2, arg_dict) self._send_event(lb_const.EVENT_AGENT_UPDATED_V2, arg_dict)

View File

@ -14,7 +14,6 @@
import os import os
import six import six
from gbpservice._i18n import _LI
from gbpservice.contrib.nfp.configurator.agents import agent_base from gbpservice.contrib.nfp.configurator.agents import agent_base
from gbpservice.contrib.nfp.configurator.drivers.base import base_driver from gbpservice.contrib.nfp.configurator.drivers.base import base_driver
from gbpservice.contrib.nfp.configurator.lib import data_filter from gbpservice.contrib.nfp.configurator.lib import data_filter
@ -57,7 +56,7 @@ class VpnaasRpcSender(data_filter.Filter):
Returns: Dictionary of vpn service type which matches with the filters. Returns: Dictionary of vpn service type which matches with the filters.
""" """
LOG.info(_LI("Sending RPC for GET VPN SERVICES with %(filters)s "), LOG.info("Sending RPC for GET VPN SERVICES with %(filters)s ",
{'filters': filters}) {'filters': filters})
return self.call( return self.call(
context, context,
@ -75,8 +74,8 @@ class VpnaasRpcSender(data_filter.Filter):
Returns: dictionary of vpnservice Returns: dictionary of vpnservice
""" """
LOG.info(_LI("Sending RPC for GET VPN SERVICECONTEXT with " LOG.info("Sending RPC for GET VPN SERVICECONTEXT with "
"Filters:%(filters)s "), "Filters:%(filters)s ",
{'filters': filters}) {'filters': filters})
return self.call( return self.call(
context, context,
@ -88,8 +87,8 @@ class VpnaasRpcSender(data_filter.Filter):
Get list of ipsec conns with filters Get list of ipsec conns with filters
specified. specified.
""" """
LOG.info(_LI("Sending RPC for GET IPSEC CONNS with Filters:" LOG.info("Sending RPC for GET IPSEC CONNS with Filters:"
"%(filters)s "), "%(filters)s ",
{'filters': filters}) {'filters': filters})
return self.call( return self.call(
context, context,
@ -111,8 +110,8 @@ class VpnaasRpcSender(data_filter.Filter):
'notification_type': ( 'notification_type': (
'update_status')}}] 'update_status')}}]
} }
LOG.info(_LI("Sending Notification 'Update Status' with " LOG.info("Sending Notification 'Update Status' with "
"status:%(status)s "), "status:%(status)s ",
{'status': status}) {'status': status})
self._notify._notification(msg) self._notify._notification(msg)
@ -127,8 +126,8 @@ class VpnaasRpcSender(data_filter.Filter):
'notification_type': ( 'notification_type': (
'ipsec_site_conn_deleted')}}] 'ipsec_site_conn_deleted')}}]
} }
LOG.info(_LI("Sending Notification 'Ipsec Site Conn Deleted' " LOG.info("Sending Notification 'Ipsec Site Conn Deleted' "
"for resource:%(resource_id)s "), "for resource:%(resource_id)s ",
{'resource_id': resource_id}) {'resource_id': resource_id})
self._notify._notification(msg) self._notify._notification(msg)
@ -172,8 +171,8 @@ class VPNaasRpcManager(agent_base.AgentBaseRPCManager):
Returns: None Returns: None
""" """
LOG.info(_LI("Received request 'VPN Service Updated'." LOG.info("Received request 'VPN Service Updated'."
"for API '%(api)s'"), "for API '%(api)s'",
{'api': resource_data.get('reason', '')}) {'api': resource_data.get('reason', '')})
arg_dict = {'context': context, arg_dict = {'context': context,
'resource_data': resource_data} 'resource_data': resource_data}
@ -243,8 +242,8 @@ class VPNaasEventHandler(nfp_api.NfpEventHandler):
service_vendor = agent_info['service_vendor'] service_vendor = agent_info['service_vendor']
service_feature = agent_info['service_feature'] service_feature = agent_info['service_feature']
driver = self._get_driver(service_vendor, service_feature) driver = self._get_driver(service_vendor, service_feature)
LOG.info(_LI("Invoking driver with service vendor:" LOG.info("Invoking driver with service vendor:"
"%(service_vendor)s "), "%(service_vendor)s ",
{'service_vendor': service_vendor}) {'service_vendor': service_vendor})
setattr(VPNaasEventHandler, "service_driver", driver) setattr(VPNaasEventHandler, "service_driver", driver)
self._vpnservice_updated(ev, driver) self._vpnservice_updated(ev, driver)

View File

@ -16,7 +16,6 @@ import time
from oslo_serialization import jsonutils from oslo_serialization import jsonutils
from gbpservice._i18n import _LI
from gbpservice.contrib.nfp.configurator.drivers.base import base_driver from gbpservice.contrib.nfp.configurator.drivers.base import base_driver
from gbpservice.contrib.nfp.configurator.drivers.firewall.vyos import ( from gbpservice.contrib.nfp.configurator.drivers.firewall.vyos import (
vyos_fw_constants as const) vyos_fw_constants as const)
@ -135,8 +134,8 @@ class FwGenericConfigDriver(base_driver.BaseDriver):
self.port, self.port,
'change_auth') 'change_auth')
data = {} data = {}
LOG.info(_LI("Initiating POST request to configure Authentication " LOG.info("Initiating POST request to configure Authentication "
"service at mgmt ip:%(mgmt_ip)s"), "service at mgmt ip:%(mgmt_ip)s",
{'mgmt_ip': mgmt_ip}) {'mgmt_ip': mgmt_ip})
err_msg = ("Change Auth POST request to the VyOS firewall " err_msg = ("Change Auth POST request to the VyOS firewall "
"service at %s failed. " % url) "service at %s failed. " % url)
@ -188,8 +187,8 @@ class FwGenericConfigDriver(base_driver.BaseDriver):
'add_static_ip') 'add_static_ip')
data = jsonutils.dumps(static_ips_info) data = jsonutils.dumps(static_ips_info)
LOG.info(_LI("Initiating POST request to add static IPs for primary " LOG.info("Initiating POST request to add static IPs for primary "
"service at mgmt ip:%(mgmt_ip)s"), "service at mgmt ip:%(mgmt_ip)s",
{'mgmt_ip': mgmt_ip}) {'mgmt_ip': mgmt_ip})
err_msg = ("Static IP POST request to the VyOS firewall " err_msg = ("Static IP POST request to the VyOS firewall "
"service at %s failed. " % url) "service at %s failed. " % url)
@ -267,8 +266,8 @@ class FwGenericConfigDriver(base_driver.BaseDriver):
url = const.request_url % (mgmt_ip, url = const.request_url % (mgmt_ip,
self.port, 'add_rule') self.port, 'add_rule')
data = jsonutils.dumps(rule_info) data = jsonutils.dumps(rule_info)
LOG.info(_LI("Initiating POST request to add persistent rule to " LOG.info("Initiating POST request to add persistent rule to "
"primary service at mgmt ip: %(mgmt_ip)s"), "primary service at mgmt ip: %(mgmt_ip)s",
{'mgmt_ip': mgmt_ip}) {'mgmt_ip': mgmt_ip})
err_msg = ("Add persistent rule POST request to the VyOS firewall " err_msg = ("Add persistent rule POST request to the VyOS firewall "
"service at %s failed. " % url) "service at %s failed. " % url)
@ -322,8 +321,8 @@ class FwGenericConfigDriver(base_driver.BaseDriver):
'del_static_ip') 'del_static_ip')
data = jsonutils.dumps(static_ips_info) data = jsonutils.dumps(static_ips_info)
LOG.info(_LI("Initiating POST request to remove static IPs for " LOG.info("Initiating POST request to remove static IPs for "
"primary service at mgmt ip: %(mgmt_ip)s"), "primary service at mgmt ip: %(mgmt_ip)s",
{'mgmt_ip': mgmt_ip}) {'mgmt_ip': mgmt_ip})
err_msg = ("Static IP DELETE request to the VyOS firewall " err_msg = ("Static IP DELETE request to the VyOS firewall "
@ -374,8 +373,8 @@ class FwGenericConfigDriver(base_driver.BaseDriver):
if result_static_ips != common_const.STATUS_SUCCESS: if result_static_ips != common_const.STATUS_SUCCESS:
return result_static_ips return result_static_ips
else: else:
LOG.info(_LI("Successfully removed static IPs. " LOG.info("Successfully removed static IPs. "
"Result: %(result_static_ips)s"), "Result: %(result_static_ips)s",
{'result_static_ips': result_static_ips}) {'result_static_ips': result_static_ips})
rule_info = dict( rule_info = dict(
@ -384,8 +383,8 @@ class FwGenericConfigDriver(base_driver.BaseDriver):
mgmt_ip = resource_data['mgmt_ip'] mgmt_ip = resource_data['mgmt_ip']
LOG.info(_LI("Initiating DELETE persistent rule for primary " LOG.info("Initiating DELETE persistent rule for primary "
"service at mgmt ip: %(mgmt_ip)s"), "service at mgmt ip: %(mgmt_ip)s",
{'mgmt_ip': mgmt_ip}) {'mgmt_ip': mgmt_ip})
url = const.request_url % (mgmt_ip, self.port, 'delete_rule') url = const.request_url % (mgmt_ip, self.port, 'delete_rule')
data = jsonutils.dumps(rule_info) data = jsonutils.dumps(rule_info)
@ -447,8 +446,8 @@ class FwGenericConfigDriver(base_driver.BaseDriver):
route_info.append({'source_cidr': source_cidr, route_info.append({'source_cidr': source_cidr,
'gateway_ip': gateway_ip}) 'gateway_ip': gateway_ip})
data = jsonutils.dumps(route_info) data = jsonutils.dumps(route_info)
LOG.info(_LI("Initiating POST request to configure route of primary " LOG.info("Initiating POST request to configure route of primary "
"service at mgmt ip: %(mgmt_ip)s"), "service at mgmt ip: %(mgmt_ip)s",
{'mgmt_ip': mgmt_ip}) {'mgmt_ip': mgmt_ip})
err_msg = ("Configure routes POST request to the VyOS firewall " err_msg = ("Configure routes POST request to the VyOS firewall "
@ -497,8 +496,8 @@ class FwGenericConfigDriver(base_driver.BaseDriver):
for source_cidr in source_cidrs: for source_cidr in source_cidrs:
route_info.append({'source_cidr': source_cidr}) route_info.append({'source_cidr': source_cidr})
data = jsonutils.dumps(route_info) data = jsonutils.dumps(route_info)
LOG.info(_LI("Initiating Delete route to primary " LOG.info("Initiating Delete route to primary "
"service at mgmt ip: %(mgmt_ip)s"), "service at mgmt ip: %(mgmt_ip)s",
{'mgmt_ip': mgmt_ip}) {'mgmt_ip': mgmt_ip})
err_msg = ("Routes DELETE request to the VyOS firewall " err_msg = ("Routes DELETE request to the VyOS firewall "
@ -558,8 +557,8 @@ class FwaasDriver(FwGenericConfigDriver):
headers = self._parse_vm_context(context['agent_info']['context']) headers = self._parse_vm_context(context['agent_info']['context'])
resource_data = self.parse.parse_data(common_const.FIREWALL, context) resource_data = self.parse.parse_data(common_const.FIREWALL, context)
LOG.info(_LI("Processing request 'Create Firewall' in FWaaS Driver " LOG.info("Processing request 'Create Firewall' in FWaaS Driver "
"for Firewall ID: %(f_id)s"), "for Firewall ID: %(f_id)s",
{'f_id': firewall['id']}) {'f_id': firewall['id']})
mgmt_ip = resource_data.get('mgmt_ip') mgmt_ip = resource_data.get('mgmt_ip')
url = const.request_url % (mgmt_ip, url = const.request_url % (mgmt_ip,
@ -580,7 +579,7 @@ class FwaasDriver(FwGenericConfigDriver):
return common_const.STATUS_ERROR return common_const.STATUS_ERROR
if resp is common_const.STATUS_SUCCESS: if resp is common_const.STATUS_SUCCESS:
LOG.info(_LI("Configured firewall successfully at URL: %(url)s "), LOG.info("Configured firewall successfully at URL: %(url)s ",
{'url': url}) {'url': url})
return common_const.STATUS_ACTIVE return common_const.STATUS_ACTIVE
@ -604,8 +603,8 @@ class FwaasDriver(FwGenericConfigDriver):
""" """
headers = self._parse_vm_context(context['agent_info']['context']) headers = self._parse_vm_context(context['agent_info']['context'])
LOG.info(_LI("Processing request 'Update Firewall' in FWaaS Driver " LOG.info("Processing request 'Update Firewall' in FWaaS Driver "
"for Firewall ID:%(f_id)s"), "for Firewall ID:%(f_id)s",
{'f_id': firewall['id']}) {'f_id': firewall['id']})
resource_data = self.parse.parse_data(common_const.FIREWALL, context) resource_data = self.parse.parse_data(common_const.FIREWALL, context)
mgmt_ip = resource_data.get('mgmt_ip') mgmt_ip = resource_data.get('mgmt_ip')
@ -650,8 +649,8 @@ class FwaasDriver(FwGenericConfigDriver):
""" """
headers = self._parse_vm_context(context['agent_info']['context']) headers = self._parse_vm_context(context['agent_info']['context'])
LOG.info(_LI("Processing request 'Delete Firewall' in FWaaS Driver " LOG.info("Processing request 'Delete Firewall' in FWaaS Driver "
"for Firewall ID:%(f_id)s"), "for Firewall ID:%(f_id)s",
{'f_id': firewall['id']}) {'f_id': firewall['id']})
resource_data = self.parse.parse_data(common_const.FIREWALL, context) resource_data = self.parse.parse_data(common_const.FIREWALL, context)
mgmt_ip = resource_data.get('mgmt_ip') mgmt_ip = resource_data.get('mgmt_ip')

View File

@ -19,8 +19,6 @@ import uuid
from octavia.certificates.common import local as local_common from octavia.certificates.common import local as local_common
from octavia.certificates.manager import cert_mgr from octavia.certificates.manager import cert_mgr
from octavia.common import exceptions from octavia.common import exceptions
from octavia.i18n import _LE
from octavia.i18n import _LI
from oslo_config import cfg from oslo_config import cfg
from gbpservice.nfp.core import log as nfp_logging from gbpservice.nfp.core import log as nfp_logging
@ -55,9 +53,9 @@ class LocalCertManager(cert_mgr.CertManager):
cert_ref = str(uuid.uuid4()) cert_ref = str(uuid.uuid4())
filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) filename_base = os.path.join(CONF.certificates.storage_path, cert_ref)
LOG.info(_LI( LOG.info(
"Storing certificate data on the local filesystem." "Storing certificate data on the local filesystem."
)) )
try: try:
filename_certificate = "{0}.crt".format(filename_base, cert_ref) filename_certificate = "{0}.crt".format(filename_base, cert_ref)
with open(filename_certificate, 'w') as cert_file: with open(filename_certificate, 'w') as cert_file:
@ -78,7 +76,7 @@ class LocalCertManager(cert_mgr.CertManager):
with open(filename_pkp, 'w') as pass_file: with open(filename_pkp, 'w') as pass_file:
pass_file.write(private_key_passphrase) pass_file.write(private_key_passphrase)
except IOError as ioe: except IOError as ioe:
LOG.error(_LE("Failed to store certificate.")) LOG.error("Failed to store certificate.")
raise exceptions.CertificateStorageException(message=ioe.message) raise exceptions.CertificateStorageException(message=ioe.message)
return cert_ref return cert_ref
@ -94,9 +92,9 @@ class LocalCertManager(cert_mgr.CertManager):
certificate data certificate data
:raises CertificateStorageException: if certificate retrieval fails :raises CertificateStorageException: if certificate retrieval fails
""" """
LOG.info(_LI( LOG.info(
"Loading certificate {0} from the local filesystem." "Loading certificate {0} from the local filesystem.".format(
).format(cert_ref)) cert_ref))
filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) filename_base = os.path.join(CONF.certificates.storage_path, cert_ref)
@ -111,9 +109,8 @@ class LocalCertManager(cert_mgr.CertManager):
with open(filename_certificate, 'r') as cert_file: with open(filename_certificate, 'r') as cert_file:
cert_data['certificate'] = cert_file.read() cert_data['certificate'] = cert_file.read()
except IOError: except IOError:
LOG.error(_LE( LOG.error(
"Failed to read certificate for {0}." "Failed to read certificate for {0}.".format(cert_ref))
).format(cert_ref))
raise exceptions.CertificateStorageException( raise exceptions.CertificateStorageException(
msg="Certificate could not be read." msg="Certificate could not be read."
) )
@ -121,9 +118,8 @@ class LocalCertManager(cert_mgr.CertManager):
with open(filename_private_key, 'r') as key_file: with open(filename_private_key, 'r') as key_file:
cert_data['private_key'] = key_file.read() cert_data['private_key'] = key_file.read()
except IOError: except IOError:
LOG.error(_LE( LOG.error(
"Failed to read private key for {0}." "Failed to read private key for {0}.".format(cert_ref))
).format(cert_ref))
raise exceptions.CertificateStorageException( raise exceptions.CertificateStorageException(
msg="Private Key could not be read." msg="Private Key could not be read."
) )
@ -151,9 +147,9 @@ class LocalCertManager(cert_mgr.CertManager):
:raises CertificateStorageException: if certificate deletion fails :raises CertificateStorageException: if certificate deletion fails
""" """
LOG.info(_LI( LOG.info(
"Deleting certificate {0} from the local filesystem." "Deleting certificate {0} from the local filesystem.".format(
).format(cert_ref)) cert_ref))
filename_base = os.path.join(CONF.certificates.storage_path, cert_ref) filename_base = os.path.join(CONF.certificates.storage_path, cert_ref)
@ -170,7 +166,6 @@ class LocalCertManager(cert_mgr.CertManager):
if os.path.exists(filename_pkp): if os.path.exists(filename_pkp):
os.remove(filename_pkp) os.remove(filename_pkp)
except IOError as ioe: except IOError as ioe:
LOG.error(_LE( LOG.error(
"Failed to delete certificate {0}." "Failed to delete certificate {0}.".format(cert_ref))
).format(cert_ref))
raise exceptions.CertificateStorageException(message=ioe.message) raise exceptions.CertificateStorageException(message=ioe.message)

View File

@ -16,7 +16,6 @@ import requests
import six import six
import time import time
from neutron._i18n import _LI
from gbpservice.contrib.nfp.configurator.drivers.base import base_driver from gbpservice.contrib.nfp.configurator.drivers.base import base_driver
from gbpservice.contrib.nfp.configurator.drivers.vpn.vyos import ( from gbpservice.contrib.nfp.configurator.drivers.vpn.vyos import (
@ -354,8 +353,8 @@ class VpnGenericConfigDriver(base_driver.BaseDriver):
'change_auth') 'change_auth')
data = {} data = {}
LOG.info(_LI("Initiating POST request to configure Authentication " LOG.info("Initiating POST request to configure Authentication "
"service at mgmt ip:%(mgmt_ip)s"), "service at mgmt ip:%(mgmt_ip)s",
{'mgmt_ip': mgmt_ip}) {'mgmt_ip': mgmt_ip})
err_msg = ("Change Auth POST request to the VyOS firewall " err_msg = ("Change Auth POST request to the VyOS firewall "
"service at %s failed. " % url) "service at %s failed. " % url)

View File

@ -12,7 +12,6 @@
from oslo_log import helpers as log_helpers from oslo_log import helpers as log_helpers
from gbpservice._i18n import _LI
from gbpservice.contrib.nfp.configurator.lib import constants as const from gbpservice.contrib.nfp.configurator.lib import constants as const
from gbpservice.contrib.nfp.configurator.lib import demuxer from gbpservice.contrib.nfp.configurator.lib import demuxer
from gbpservice.contrib.nfp.configurator.lib import utils from gbpservice.contrib.nfp.configurator.lib import utils
@ -139,9 +138,9 @@ class ConfiguratorRpcManager(object):
log_info = request_data.get('info') log_info = request_data.get('info')
logging_context = log_info['context'].get('logging_context', {}) logging_context = log_info['context'].get('logging_context', {})
nfp_context['log_context'] = logging_context nfp_context['log_context'] = logging_context
LOG.info(_LI("Received RPC CREATE NETWORK FUNCTION DEVICE CONFIG " LOG.info("Received RPC CREATE NETWORK FUNCTION DEVICE CONFIG "
"for %(service_type)s, NFI: %(nfi)s, " "for %(service_type)s, NFI: %(nfi)s, "
"NF_ID: %(nf_id)s"), "NF_ID: %(nf_id)s",
{'service_type': request_data['info']['service_type'], {'service_type': request_data['info']['service_type'],
'nfi': request_data['info']['context']['nfi_id'], 'nfi': request_data['info']['context']['nfi_id'],
'nf_id': request_data['info']['context']['nf_id']}) 'nf_id': request_data['info']['context']['nf_id']})
@ -173,9 +172,9 @@ class ConfiguratorRpcManager(object):
log_info = request_data.get('info') log_info = request_data.get('info')
logging_context = log_info['context'].get('logging_context', {}) logging_context = log_info['context'].get('logging_context', {})
nfp_context['log_context'] = logging_context nfp_context['log_context'] = logging_context
LOG.info(_LI("Received RPC DELETE NETWORK FUNCTION DEVICE CONFIG " LOG.info("Received RPC DELETE NETWORK FUNCTION DEVICE CONFIG "
"for %(service_type)s, NFI: %(nfi)s, " "for %(service_type)s, NFI: %(nfi)s, "
"NF_ID: %(nf_id)s"), "NF_ID: %(nf_id)s",
{'service_type': request_data['info']['service_type'], {'service_type': request_data['info']['service_type'],
'nfi': request_data['info']['context']['nfi_id'], 'nfi': request_data['info']['context']['nfi_id'],
'nf_id': request_data['info']['context']['nf_id']}) 'nf_id': request_data['info']['context']['nf_id']})
@ -207,9 +206,9 @@ class ConfiguratorRpcManager(object):
log_info = request_data.get('info') log_info = request_data.get('info')
logging_context = log_info['context'].get('logging_context', {}) logging_context = log_info['context'].get('logging_context', {})
nfp_context['log_context'] = logging_context nfp_context['log_context'] = logging_context
LOG.info(_LI("Received RPC UPDATE NETWORK FUNCTION DEVICE CONFIG " LOG.info("Received RPC UPDATE NETWORK FUNCTION DEVICE CONFIG "
"for %(service_type)s, NFI: %(nfi)s, " "for %(service_type)s, NFI: %(nfi)s, "
"NF_ID: %(nf_id)s"), "NF_ID: %(nf_id)s",
{'service_type': request_data['info']['service_type'], {'service_type': request_data['info']['service_type'],
'nfi': request_data['info']['context']['nfi_id'], 'nfi': request_data['info']['context']['nfi_id'],
'nf_id': request_data['info']['context']['nf_id']}) 'nf_id': request_data['info']['context']['nf_id']})
@ -241,8 +240,8 @@ class ConfiguratorRpcManager(object):
log_info = request_data.get('info') log_info = request_data.get('info')
logging_context = log_info['context'].get('logging_context', {}) logging_context = log_info['context'].get('logging_context', {})
nfp_context['log_context'] = logging_context nfp_context['log_context'] = logging_context
LOG.info(_LI("Received RPC CREATE NETWORK FUNCTION CONFIG " LOG.info("Received RPC CREATE NETWORK FUNCTION CONFIG "
"for %(service_type)s "), "for %(service_type)s ",
{'service_type': request_data['info']['service_type']}) {'service_type': request_data['info']['service_type']})
self._invoke_service_agent('create', request_data) self._invoke_service_agent('create', request_data)
@ -272,8 +271,8 @@ class ConfiguratorRpcManager(object):
log_info = request_data.get('info') log_info = request_data.get('info')
logging_context = log_info['context'].get('logging_context', {}) logging_context = log_info['context'].get('logging_context', {})
nfp_context['log_context'] = logging_context nfp_context['log_context'] = logging_context
LOG.info(_LI("Received RPC DELETE NETWORK FUNCTION CONFIG " LOG.info("Received RPC DELETE NETWORK FUNCTION CONFIG "
"for %(service_type)s "), "for %(service_type)s ",
{'service_type': request_data['info']['service_type']}) {'service_type': request_data['info']['service_type']})
self._invoke_service_agent('delete', request_data) self._invoke_service_agent('delete', request_data)
@ -303,8 +302,8 @@ class ConfiguratorRpcManager(object):
log_info = request_data.get('info') log_info = request_data.get('info')
logging_context = log_info['context'].get('logging_context', {}) logging_context = log_info['context'].get('logging_context', {})
nfp_context['log_context'] = logging_context nfp_context['log_context'] = logging_context
LOG.info(_LI("Received RPC UPDATE NETWORK FUNCTION CONFIG " LOG.info("Received RPC UPDATE NETWORK FUNCTION CONFIG "
"for %(service_type)s "), "for %(service_type)s ",
{'service_type': request_data['info']['service_type']}) {'service_type': request_data['info']['service_type']})
self._invoke_service_agent('update', request_data) self._invoke_service_agent('update', request_data)
@ -326,7 +325,7 @@ class ConfiguratorRpcManager(object):
""" """
module_context.init() module_context.init()
LOG.info(_LI("Received RPC GET NOTIFICATIONS ")) LOG.info("Received RPC GET NOTIFICATIONS ")
events = self.sc.get_stashed_events() events = self.sc.get_stashed_events()
notifications = [] notifications = []
for event in events: for event in events:

View File

@ -13,7 +13,6 @@
from keystoneclient import exceptions as k_exceptions from keystoneclient import exceptions as k_exceptions
from keystoneclient.v2_0 import client as keyclient from keystoneclient.v2_0 import client as keyclient
from gbpservice._i18n import _LE
from gbpservice.common import utils from gbpservice.common import utils
from gbpservice.contrib.nfp.config_orchestrator.common import topics from gbpservice.contrib.nfp.config_orchestrator.common import topics
from gbpservice.nfp.core import log as nfp_logging from gbpservice.nfp.core import log as nfp_logging
@ -264,10 +263,10 @@ def _resource_owner_tenant_id():
return tenant.id return tenant.id
except k_exceptions.NotFound: except k_exceptions.NotFound:
with excutils.save_and_reraise_exception(reraise=True): with excutils.save_and_reraise_exception(reraise=True):
LOG.error(_LE('No tenant with name %s exists.'), tenant) LOG.error('No tenant with name %s exists.', tenant)
except k_exceptions.NoUniqueMatch: except k_exceptions.NoUniqueMatch:
with excutils.save_and_reraise_exception(reraise=True): with excutils.save_and_reraise_exception(reraise=True):
LOG.error(_LE('Multiple tenants matches found for %s'), tenant) LOG.error('Multiple tenants matches found for %s', tenant)
def _get_router_for_floatingip(self, context, internal_port, def _get_router_for_floatingip(self, context, internal_port,

View File

@ -18,9 +18,6 @@ import subprocess
import time import time
import yaml import yaml
from gbpservice._i18n import _LE
from gbpservice._i18n import _LI
from oslo_log import log as logging from oslo_log import log as logging
import oslo_serialization.jsonutils as jsonutils import oslo_serialization.jsonutils as jsonutils
@ -54,7 +51,7 @@ class Controller(rest.RestController):
out2 = subprocess.Popen('dhclient eth0', shell=True, out2 = subprocess.Popen('dhclient eth0', shell=True,
stdout=subprocess.PIPE).stdout.read() stdout=subprocess.PIPE).stdout.read()
output = "%s\n%s\n%s" % (ip_a, out1, out2) output = "%s\n%s\n%s" % (ip_a, out1, out2)
LOG.info(_LI("Dhclient on eth0, result: %(output)s"), LOG.info("Dhclient on eth0, result: %(output)s",
{'output': output}) {'output': output})
except Exception as err: except Exception as err:
msg = ( msg = (
@ -161,8 +158,8 @@ class Controller(rest.RestController):
return {'failure_desc': {'msg': msg}} return {'failure_desc': {'msg': msg}}
def _configure_healthmonitor(self, config_data): def _configure_healthmonitor(self, config_data):
LOG.info(_LI("Configures healthmonitor with configuration " LOG.info("Configures healthmonitor with configuration "
"data : %(healthmonitor_data)s "), "data : %(healthmonitor_data)s ",
{'healthmonitor_data': config_data}) {'healthmonitor_data': config_data})
def _configure_interfaces(self, config_data): def _configure_interfaces(self, config_data):
@ -173,10 +170,10 @@ class Controller(rest.RestController):
out3 = subprocess.Popen('cat /etc/network/interfaces', shell=True, out3 = subprocess.Popen('cat /etc/network/interfaces', shell=True,
stdout=subprocess.PIPE).stdout.read() stdout=subprocess.PIPE).stdout.read()
output = "%s\n%s\n%s" % (out1, out2, out3) output = "%s\n%s\n%s" % (out1, out2, out3)
LOG.info(_LI("Dhclient on eth0, result: %(initial_data)s"), LOG.info("Dhclient on eth0, result: %(initial_data)s",
{'initial_data': output}) {'initial_data': output})
LOG.info(_LI("Configures interfaces with configuration " LOG.info("Configures interfaces with configuration "
"data : %(interface_data)s "), "data : %(interface_data)s ",
{'interface_data': config_data}) {'interface_data': config_data})
def get_source_cidrs_and_gateway_ip(self, route_info): def get_source_cidrs_and_gateway_ip(self, route_info):
@ -190,8 +187,8 @@ class Controller(rest.RestController):
return source_cidrs, gateway_ip return source_cidrs, gateway_ip
def _add_routes(self, route_info): def _add_routes(self, route_info):
LOG.info(_LI("Configuring routes with configuration " LOG.info("Configuring routes with configuration "
"data : %(route_data)s "), "data : %(route_data)s ",
{'route_data': route_info['resource_data']}) {'route_data': route_info['resource_data']})
source_cidrs, gateway_ip = self.get_source_cidrs_and_gateway_ip( source_cidrs, gateway_ip = self.get_source_cidrs_and_gateway_ip(
route_info) route_info)
@ -205,8 +202,8 @@ class Controller(rest.RestController):
try: try:
interface_number_string = source_interface.split("eth", 1)[1] interface_number_string = source_interface.split("eth", 1)[1]
except IndexError: except IndexError:
LOG.error(_LE("Retrieved wrong interface %(interface)s for " LOG.error("Retrieved wrong interface %(interface)s for "
"configuring routes"), "configuring routes",
{'interface': source_interface}) {'interface': source_interface})
try: try:
routing_table_number = 20 + int(interface_number_string) routing_table_number = 20 + int(interface_number_string)
@ -222,7 +219,7 @@ class Controller(rest.RestController):
routing_table_number, gateway_ip) routing_table_number, gateway_ip)
default_route_commands.append(ip_route_command) default_route_commands.append(ip_route_command)
output = "%s\n%s" % (out1, out2) output = "%s\n%s" % (out1, out2)
LOG.info(_LI("Static route configuration result: %(output)s"), LOG.info("Static route configuration result: %(output)s",
{'output': output}) {'output': output})
except Exception as ex: except Exception as ex:
raise Exception(_("Failed to add static routes: %(ex)s") % { raise Exception(_("Failed to add static routes: %(ex)s") % {
@ -231,7 +228,7 @@ class Controller(rest.RestController):
try: try:
out = subprocess.Popen(command, shell=True, out = subprocess.Popen(command, shell=True,
stdout=subprocess.PIPE).stdout.read() stdout=subprocess.PIPE).stdout.read()
LOG.info(_LI("Static route configuration result: %(output)s"), LOG.info("Static route configuration result: %(output)s",
{'output': out}) {'output': out})
except Exception as ex: except Exception as ex:
raise Exception(_("Failed to add static routes: %(ex)s") % { raise Exception(_("Failed to add static routes: %(ex)s") % {
@ -269,9 +266,9 @@ class Controller(rest.RestController):
"IP Address")) "IP Address"))
def _apply_user_config(self, config_data): def _apply_user_config(self, config_data):
LOG.info(_LI("Applying user config with configuration " LOG.info("Applying user config with configuration "
"type : %(config_type)s and " "type : %(config_type)s and "
"configuration data : %(config_data)s "), "configuration data : %(config_data)s ",
{'config_type': config_data['resource'], {'config_type': config_data['resource'],
'config_data': config_data['resource_data']}) 'config_data': config_data['resource_data']})
service_config = config_data['resource_data'][ service_config = config_data['resource_data'][

View File

@ -15,8 +15,6 @@ from subprocess import PIPE
from subprocess import Popen from subprocess import Popen
import sys import sys
from oslo_log._i18n import _LE
from oslo_log._i18n import _LI
from oslo_log import log as logging from oslo_log import log as logging
from oslo_serialization import jsonutils from oslo_serialization import jsonutils
@ -29,10 +27,10 @@ class ConfigureIPtables(object):
ps = Popen(["sysctl", "net.ipv4.ip_forward"], stdout=PIPE) ps = Popen(["sysctl", "net.ipv4.ip_forward"], stdout=PIPE)
output = ps.communicate()[0] output = ps.communicate()[0]
if "0" in output: if "0" in output:
LOG.info(_LI("Enabling IP forwarding ...")) LOG.info("Enabling IP forwarding ...")
call(["sysctl", "-w", "net.ipv4.ip_forward=1"]) call(["sysctl", "-w", "net.ipv4.ip_forward=1"])
else: else:
LOG.info(_LI("IP forwarding already enabled")) LOG.info("IP forwarding already enabled")
try: try:
self.rules_json = jsonutils.loads(json_blob) self.rules_json = jsonutils.loads(json_blob)
except ValueError: except ValueError:
@ -44,7 +42,7 @@ class ConfigureIPtables(object):
# check if chain is present if not create new chain # check if chain is present if not create new chain
if "testchain" not in output: if "testchain" not in output:
LOG.info(_LI("Creating new chain ...")) LOG.info("Creating new chain ...")
call(["iptables", "-F"]) call(["iptables", "-F"])
call(["iptables", "-N", "testchain"]) call(["iptables", "-N", "testchain"])
call( call(
@ -57,10 +55,10 @@ class ConfigureIPtables(object):
# return # return
# Update chain with new rules # Update chain with new rules
LOG.info(_LI("Updating chain with new rules ...")) LOG.info("Updating chain with new rules ...")
count = 0 count = 0
for rule in self.rules_json.get('rules'): for rule in self.rules_json.get('rules'):
LOG.info(_LI("adding rule %(count)d"), {'count': count}) LOG.info("adding rule %(count)d", {'count': count})
try: try:
action_values = ["LOG", "ACCEPT"] action_values = ["LOG", "ACCEPT"]
action = rule['action'].upper() action = rule['action'].upper()
@ -82,14 +80,14 @@ class ConfigureIPtables(object):
"-j", action], stdout=PIPE) "-j", action], stdout=PIPE)
output = ps.communicate()[0] output = ps.communicate()[0]
if output: if output:
LOG.error(_LE("Unable to add rule to chain due to: %(msg)s"), LOG.error("Unable to add rule to chain due to: %(msg)s",
{'msg': output}) {'msg': output})
count = count + 1 count = count + 1
ps = Popen(["iptables", "-A", "testchain", "-m", "state", "--state", ps = Popen(["iptables", "-A", "testchain", "-m", "state", "--state",
"ESTABLISHED,RELATED", "-j", "ACCEPT"], stdout=PIPE) "ESTABLISHED,RELATED", "-j", "ACCEPT"], stdout=PIPE)
output = ps.communicate()[0] output = ps.communicate()[0]
if output: if output:
LOG.error(_LE("Unable to add rule to chain due to: %(output)s"), LOG.error("Unable to add rule to chain due to: %(output)s",
{'output': output}) {'output': output})

View File

@ -24,8 +24,6 @@ from neutron_lib.plugins import directory
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import excutils from oslo_utils import excutils
from gbpservice._i18n import _LE
from gbpservice._i18n import _LW
from gbpservice.neutron.extensions import group_policy as gp_ext from gbpservice.neutron.extensions import group_policy as gp_ext
from gbpservice.neutron.extensions import servicechain as sc_ext from gbpservice.neutron.extensions import servicechain as sc_ext
from gbpservice.neutron.services.grouppolicy.common import exceptions as exc from gbpservice.neutron.services.grouppolicy.common import exceptions as exc
@ -237,7 +235,7 @@ class LocalAPI(object):
# plugins are loaded to grab and store plugin. # plugins are loaded to grab and store plugin.
l3_plugin = directory.get_plugin(nl_const.L3) l3_plugin = directory.get_plugin(nl_const.L3)
if not l3_plugin: if not l3_plugin:
LOG.error(_LE("No L3 router service plugin found.")) LOG.error("No L3 router service plugin found.")
raise exc.GroupPolicyDeploymentError() raise exc.GroupPolicyDeploymentError()
return l3_plugin return l3_plugin
@ -248,7 +246,7 @@ class LocalAPI(object):
# plugins are loaded to grab and store plugin. # plugins are loaded to grab and store plugin.
qos_plugin = directory.get_plugin(pconst.QOS) qos_plugin = directory.get_plugin(pconst.QOS)
if not qos_plugin: if not qos_plugin:
LOG.error(_LE("No QoS service plugin found.")) LOG.error("No QoS service plugin found.")
raise exc.GroupPolicyDeploymentError() raise exc.GroupPolicyDeploymentError()
return qos_plugin return qos_plugin
@ -258,7 +256,7 @@ class LocalAPI(object):
# plugins are loaded to grab and store plugin. # plugins are loaded to grab and store plugin.
group_policy_plugin = directory.get_plugin(pconst.GROUP_POLICY) group_policy_plugin = directory.get_plugin(pconst.GROUP_POLICY)
if not group_policy_plugin: if not group_policy_plugin:
LOG.error(_LE("No GroupPolicy service plugin found.")) LOG.error("No GroupPolicy service plugin found.")
raise exc.GroupPolicyDeploymentError() raise exc.GroupPolicyDeploymentError()
return group_policy_plugin return group_policy_plugin
@ -268,7 +266,7 @@ class LocalAPI(object):
# plugins are loaded to grab and store plugin. # plugins are loaded to grab and store plugin.
servicechain_plugin = directory.get_plugin(pconst.SERVICECHAIN) servicechain_plugin = directory.get_plugin(pconst.SERVICECHAIN)
if not servicechain_plugin: if not servicechain_plugin:
LOG.error(_LE("No Servicechain service plugin found.")) LOG.error("No Servicechain service plugin found.")
raise exc.GroupPolicyDeploymentError() raise exc.GroupPolicyDeploymentError()
return servicechain_plugin return servicechain_plugin
@ -374,7 +372,7 @@ class LocalAPI(object):
self._delete_resource(self._core_plugin, self._delete_resource(self._core_plugin,
plugin_context, 'port', port_id) plugin_context, 'port', port_id)
except n_exc.PortNotFound: except n_exc.PortNotFound:
LOG.warning(_LW('Port %s already deleted'), port_id) LOG.warning('Port %s already deleted', port_id)
def _get_subnet(self, plugin_context, subnet_id): def _get_subnet(self, plugin_context, subnet_id):
return self._get_resource(self._core_plugin, plugin_context, 'subnet', return self._get_resource(self._core_plugin, plugin_context, 'subnet',
@ -398,7 +396,7 @@ class LocalAPI(object):
self._delete_resource(self._core_plugin, plugin_context, 'subnet', self._delete_resource(self._core_plugin, plugin_context, 'subnet',
subnet_id) subnet_id)
except n_exc.SubnetNotFound: except n_exc.SubnetNotFound:
LOG.warning(_LW('Subnet %s already deleted'), subnet_id) LOG.warning('Subnet %s already deleted', subnet_id)
def _get_network(self, plugin_context, network_id): def _get_network(self, plugin_context, network_id):
return self._get_resource(self._core_plugin, plugin_context, 'network', return self._get_resource(self._core_plugin, plugin_context, 'network',
@ -422,7 +420,7 @@ class LocalAPI(object):
self._delete_resource(self._core_plugin, plugin_context, self._delete_resource(self._core_plugin, plugin_context,
'network', network_id) 'network', network_id)
except n_exc.NetworkNotFound: except n_exc.NetworkNotFound:
LOG.warning(_LW('Network %s already deleted'), network_id) LOG.warning('Network %s already deleted', network_id)
def _get_router(self, plugin_context, router_id): def _get_router(self, plugin_context, router_id):
return self._get_resource(self._l3_plugin, plugin_context, 'router', return self._get_resource(self._l3_plugin, plugin_context, 'router',
@ -452,7 +450,7 @@ class LocalAPI(object):
self._l3_plugin.remove_router_interface(plugin_context, router_id, self._l3_plugin.remove_router_interface(plugin_context, router_id,
interface_info) interface_info)
except l3.RouterInterfaceNotFoundForSubnet: except l3.RouterInterfaceNotFoundForSubnet:
LOG.warning(_LW('Router interface already deleted for subnet %s'), LOG.warning('Router interface already deleted for subnet %s',
interface_info) interface_info)
return return
@ -472,7 +470,7 @@ class LocalAPI(object):
self._delete_resource(self._l3_plugin, plugin_context, 'router', self._delete_resource(self._l3_plugin, plugin_context, 'router',
router_id) router_id)
except l3.RouterNotFound: except l3.RouterNotFound:
LOG.warning(_LW('Router %s already deleted'), router_id) LOG.warning('Router %s already deleted', router_id)
def _get_sg(self, plugin_context, sg_id): def _get_sg(self, plugin_context, sg_id):
return self._get_resource( return self._get_resource(
@ -496,7 +494,7 @@ class LocalAPI(object):
self._delete_resource(self._core_plugin, plugin_context, self._delete_resource(self._core_plugin, plugin_context,
'security_group', sg_id) 'security_group', sg_id)
except ext_sg.SecurityGroupNotFound: except ext_sg.SecurityGroupNotFound:
LOG.warning(_LW('Security Group %s already deleted'), sg_id) LOG.warning('Security Group %s already deleted', sg_id)
def _get_sg_rule(self, plugin_context, sg_rule_id): def _get_sg_rule(self, plugin_context, sg_rule_id):
return self._get_resource( return self._get_resource(
@ -513,7 +511,7 @@ class LocalAPI(object):
return self._create_resource(self._core_plugin, plugin_context, return self._create_resource(self._core_plugin, plugin_context,
'security_group_rule', attrs) 'security_group_rule', attrs)
except ext_sg.SecurityGroupRuleExists as ex: except ext_sg.SecurityGroupRuleExists as ex:
LOG.warning(_LW('Security Group already exists %s'), ex.message) LOG.warning('Security Group already exists %s', ex.message)
return return
def _update_sg_rule(self, plugin_context, sg_rule_id, attrs): def _update_sg_rule(self, plugin_context, sg_rule_id, attrs):
@ -526,7 +524,7 @@ class LocalAPI(object):
self._delete_resource(self._core_plugin, plugin_context, self._delete_resource(self._core_plugin, plugin_context,
'security_group_rule', sg_rule_id) 'security_group_rule', sg_rule_id)
except ext_sg.SecurityGroupRuleNotFound: except ext_sg.SecurityGroupRuleNotFound:
LOG.warning(_LW('Security Group Rule %s already deleted'), LOG.warning('Security Group Rule %s already deleted',
sg_rule_id) sg_rule_id)
def _get_fip(self, plugin_context, fip_id): def _get_fip(self, plugin_context, fip_id):
@ -551,7 +549,7 @@ class LocalAPI(object):
self._delete_resource(self._l3_plugin, plugin_context, self._delete_resource(self._l3_plugin, plugin_context,
'floatingip', fip_id) 'floatingip', fip_id)
except l3.FloatingIPNotFound: except l3.FloatingIPNotFound:
LOG.warning(_LW('Floating IP %s Already deleted'), fip_id) LOG.warning('Floating IP %s Already deleted', fip_id)
def _get_address_scope(self, plugin_context, address_scope_id): def _get_address_scope(self, plugin_context, address_scope_id):
return self._get_resource(self._core_plugin, plugin_context, return self._get_resource(self._core_plugin, plugin_context,
@ -575,7 +573,7 @@ class LocalAPI(object):
self._delete_resource(self._core_plugin, plugin_context, self._delete_resource(self._core_plugin, plugin_context,
'address_scope', address_scope_id) 'address_scope', address_scope_id)
except address_scope.AddressScopeNotFound: except address_scope.AddressScopeNotFound:
LOG.warning(_LW('Address Scope %s already deleted'), LOG.warning('Address Scope %s already deleted',
address_scope_id) address_scope_id)
def _get_subnetpool(self, plugin_context, subnetpool_id): def _get_subnetpool(self, plugin_context, subnetpool_id):
@ -600,7 +598,7 @@ class LocalAPI(object):
self._delete_resource(self._core_plugin, plugin_context, self._delete_resource(self._core_plugin, plugin_context,
'subnetpool', subnetpool_id) 'subnetpool', subnetpool_id)
except n_exc.SubnetpoolNotFound: except n_exc.SubnetpoolNotFound:
LOG.warning(_LW('Subnetpool %s already deleted'), subnetpool_id) LOG.warning('Subnetpool %s already deleted', subnetpool_id)
def _get_l2_policy(self, plugin_context, l2p_id): def _get_l2_policy(self, plugin_context, l2p_id):
return self._get_resource(self._group_policy_plugin, plugin_context, return self._get_resource(self._group_policy_plugin, plugin_context,
@ -619,7 +617,7 @@ class LocalAPI(object):
self._delete_resource(self._qos_plugin, self._delete_resource(self._qos_plugin,
plugin_context, 'policy', qos_policy_id) plugin_context, 'policy', qos_policy_id)
except n_exc.QosPolicyNotFound: except n_exc.QosPolicyNotFound:
LOG.warning(_LW('QoS Policy %s already deleted'), qos_policy_id) LOG.warning('QoS Policy %s already deleted', qos_policy_id)
def _get_qos_rules(self, plugin_context, filters=None): def _get_qos_rules(self, plugin_context, filters=None):
filters = filters or {} filters = filters or {}
@ -639,7 +637,7 @@ class LocalAPI(object):
'policy_bandwidth_limit_rule', 'policy_bandwidth_limit_rule',
rule_id, qos_policy_id) rule_id, qos_policy_id)
except n_exc.QosRuleNotFound: except n_exc.QosRuleNotFound:
LOG.warning(_LW('QoS Rule %s already deleted'), rule_id) LOG.warning('QoS Rule %s already deleted', rule_id)
def _get_l2_policies(self, plugin_context, filters=None): def _get_l2_policies(self, plugin_context, filters=None):
filters = filters or {} filters = filters or {}
@ -659,7 +657,7 @@ class LocalAPI(object):
self._delete_resource(self._group_policy_plugin, self._delete_resource(self._group_policy_plugin,
plugin_context, 'l2_policy', l2p_id, False) plugin_context, 'l2_policy', l2p_id, False)
except gp_ext.L2PolicyNotFound: except gp_ext.L2PolicyNotFound:
LOG.warning(_LW('L2 Policy %s already deleted'), l2p_id) LOG.warning('L2 Policy %s already deleted', l2p_id)
def _get_l3_policy(self, plugin_context, l3p_id): def _get_l3_policy(self, plugin_context, l3p_id):
return self._get_resource(self._group_policy_plugin, plugin_context, return self._get_resource(self._group_policy_plugin, plugin_context,
@ -683,7 +681,7 @@ class LocalAPI(object):
self._delete_resource(self._group_policy_plugin, self._delete_resource(self._group_policy_plugin,
plugin_context, 'l3_policy', l3p_id, False) plugin_context, 'l3_policy', l3p_id, False)
except gp_ext.L3PolicyNotFound: except gp_ext.L3PolicyNotFound:
LOG.warning(_LW('L3 Policy %s already deleted'), l3p_id) LOG.warning('L3 Policy %s already deleted', l3p_id)
def _get_external_segment(self, plugin_context, es_id): def _get_external_segment(self, plugin_context, es_id):
return self._get_resource(self._group_policy_plugin, plugin_context, return self._get_resource(self._group_policy_plugin, plugin_context,
@ -707,7 +705,7 @@ class LocalAPI(object):
self._delete_resource(self._group_policy_plugin, plugin_context, self._delete_resource(self._group_policy_plugin, plugin_context,
'external_segment', es_id, False) 'external_segment', es_id, False)
except gp_ext.ExternalSegmentNotFound: except gp_ext.ExternalSegmentNotFound:
LOG.warning(_LW('External Segment %s already deleted'), es_id) LOG.warning('External Segment %s already deleted', es_id)
def _get_external_policy(self, plugin_context, ep_id): def _get_external_policy(self, plugin_context, ep_id):
return self._get_resource(self._group_policy_plugin, plugin_context, return self._get_resource(self._group_policy_plugin, plugin_context,
@ -731,7 +729,7 @@ class LocalAPI(object):
self._delete_resource(self._group_policy_plugin, plugin_context, self._delete_resource(self._group_policy_plugin, plugin_context,
'external_policy', ep_id, False) 'external_policy', ep_id, False)
except gp_ext.ExternalPolicyNotFound: except gp_ext.ExternalPolicyNotFound:
LOG.warning(_LW('External Policy %s already deleted'), ep_id) LOG.warning('External Policy %s already deleted', ep_id)
def _get_policy_rule_set(self, plugin_context, prs_id): def _get_policy_rule_set(self, plugin_context, prs_id):
return self._get_resource(self._group_policy_plugin, plugin_context, return self._get_resource(self._group_policy_plugin, plugin_context,
@ -755,7 +753,7 @@ class LocalAPI(object):
self._delete_resource(self._group_policy_plugin, plugin_context, self._delete_resource(self._group_policy_plugin, plugin_context,
'policy_rule_set', prs_id, False) 'policy_rule_set', prs_id, False)
except gp_ext.PolicyRuleSetNotFound: except gp_ext.PolicyRuleSetNotFound:
LOG.warning(_LW('Policy Rule Set %s already deleted'), prs_id) LOG.warning('Policy Rule Set %s already deleted', prs_id)
def _get_servicechain_instance(self, plugin_context, sci_id): def _get_servicechain_instance(self, plugin_context, sci_id):
return self._get_resource(self._servicechain_plugin, plugin_context, return self._get_resource(self._servicechain_plugin, plugin_context,
@ -780,7 +778,7 @@ class LocalAPI(object):
self._delete_resource(self._servicechain_plugin, plugin_context, self._delete_resource(self._servicechain_plugin, plugin_context,
'servicechain_instance', sci_id, False) 'servicechain_instance', sci_id, False)
except sc_ext.ServiceChainInstanceNotFound: except sc_ext.ServiceChainInstanceNotFound:
LOG.warning(_LW("servicechain %s already deleted"), sci_id) LOG.warning("servicechain %s already deleted", sci_id)
def _get_servicechain_spec(self, plugin_context, scs_id): def _get_servicechain_spec(self, plugin_context, scs_id):
return self._get_resource(self._servicechain_plugin, plugin_context, return self._get_resource(self._servicechain_plugin, plugin_context,
@ -804,7 +802,7 @@ class LocalAPI(object):
self._delete_resource(self._servicechain_plugin, plugin_context, self._delete_resource(self._servicechain_plugin, plugin_context,
'servicechain_spec', scs_id) 'servicechain_spec', scs_id)
except sc_ext.ServiceChainSpecNotFound: except sc_ext.ServiceChainSpecNotFound:
LOG.warning(_LW("servicechain spec %s already deleted"), scs_id) LOG.warning("servicechain spec %s already deleted", scs_id)
def _get_policy_target(self, plugin_context, pt_id): def _get_policy_target(self, plugin_context, pt_id):
return self._get_resource(self._group_policy_plugin, plugin_context, return self._get_resource(self._group_policy_plugin, plugin_context,
@ -828,7 +826,7 @@ class LocalAPI(object):
self._delete_resource(self._group_policy_plugin, plugin_context, self._delete_resource(self._group_policy_plugin, plugin_context,
'policy_target', pt_id, False) 'policy_target', pt_id, False)
except gp_ext.PolicyTargetNotFound: except gp_ext.PolicyTargetNotFound:
LOG.warning(_LW('Policy Rule Set %s already deleted'), pt_id) LOG.warning('Policy Rule Set %s already deleted', pt_id)
def _get_policy_target_group(self, plugin_context, ptg_id): def _get_policy_target_group(self, plugin_context, ptg_id):
return self._get_resource(self._group_policy_plugin, plugin_context, return self._get_resource(self._group_policy_plugin, plugin_context,
@ -853,4 +851,4 @@ class LocalAPI(object):
self._delete_resource(self._group_policy_plugin, plugin_context, self._delete_resource(self._group_policy_plugin, plugin_context,
'policy_target_group', ptg_id) 'policy_target_group', ptg_id)
except sc_ext.ServiceChainSpecNotFound: except sc_ext.ServiceChainSpecNotFound:
LOG.warning(_LW("Policy Target Group %s already deleted"), ptg_id) LOG.warning("Policy Target Group %s already deleted", ptg_id)

View File

@ -26,7 +26,6 @@ from sqlalchemy.ext.orderinglist import ordering_list
from sqlalchemy import orm from sqlalchemy import orm
from sqlalchemy.orm import exc from sqlalchemy.orm import exc
from gbpservice._i18n import _LE
from gbpservice.neutron.extensions import servicechain as schain from gbpservice.neutron.extensions import servicechain as schain
from gbpservice.neutron.services.servicechain.common import exceptions as s_exc from gbpservice.neutron.services.servicechain.common import exceptions as s_exc
@ -153,7 +152,7 @@ class ServiceChainDbPlugin(schain.ServiceChainPluginBase,
# plugins are loaded to grab and store plugin. # plugins are loaded to grab and store plugin.
grouppolicy_plugin = directory.get_plugin(pconst.GROUP_POLICY) grouppolicy_plugin = directory.get_plugin(pconst.GROUP_POLICY)
if not grouppolicy_plugin: if not grouppolicy_plugin:
LOG.error(_LE("No Grouppolicy service plugin found.")) LOG.error("No Grouppolicy service plugin found.")
raise s_exc.ServiceChainDeploymentError() raise s_exc.ServiceChainDeploymentError()
return grouppolicy_plugin return grouppolicy_plugin

View File

@ -17,12 +17,12 @@ from neutron.api import extensions as neutron_extensions
from neutron.api.v2 import attributes as attr from neutron.api.v2 import attributes as attr
from neutron.api.v2 import resource_helper from neutron.api.v2 import resource_helper
from neutron.plugins.common import constants from neutron.plugins.common import constants
from neutron.services import service_base
from neutron_lib.api import converters as conv from neutron_lib.api import converters as conv
from neutron_lib.api import extensions from neutron_lib.api import extensions
from neutron_lib.api import validators as valid from neutron_lib.api import validators as valid
from neutron_lib import constants as nlib_const from neutron_lib import constants as nlib_const
from neutron_lib import exceptions as nexc from neutron_lib import exceptions as nexc
from neutron_lib.services import base as service_base
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import uuidutils from oslo_utils import uuidutils

View File

@ -16,11 +16,11 @@ from neutron.api import extensions as neutron_extensions
from neutron.api.v2 import attributes as attr from neutron.api.v2 import attributes as attr
from neutron.api.v2 import resource_helper from neutron.api.v2 import resource_helper
from neutron.plugins.common import constants from neutron.plugins.common import constants
from neutron.services import service_base
from neutron_lib.api import converters as conv from neutron_lib.api import converters as conv
from neutron_lib.api import extensions from neutron_lib.api import extensions
from neutron_lib.api import validators as valid from neutron_lib.api import validators as valid
from neutron_lib import exceptions as nexc from neutron_lib import exceptions as nexc
from neutron_lib.services import base as service_base
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
import six import six

View File

@ -15,7 +15,6 @@
from oslo_log import log from oslo_log import log
from gbpservice._i18n import _LE
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import exceptions from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import exceptions
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@ -108,6 +107,6 @@ class APICNameMapper(object):
if self._map(session, "", type_tag, prefix) == name[:pos]: if self._map(session, "", type_tag, prefix) == name[:pos]:
return name[pos:] return name[pos:]
elif enforce: elif enforce:
LOG.error(_LE("Attempted to reverse-map invalid APIC name '%s'"), LOG.error("Attempted to reverse-map invalid APIC name '%s'",
name) name)
raise exceptions.InternalError() raise exceptions.InternalError()

View File

@ -20,7 +20,6 @@ from keystoneclient.v3 import client as ksc_client
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from gbpservice._i18n import _LW
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -47,7 +46,7 @@ class ProjectNameCache(object):
auth = ksc_auth.load_from_conf_options(cfg.CONF, AUTH_GROUP) auth = ksc_auth.load_from_conf_options(cfg.CONF, AUTH_GROUP)
LOG.debug("Got auth: %s", auth) LOG.debug("Got auth: %s", auth)
if not auth: if not auth:
LOG.warning(_LW('No auth_plugin configured in %s'), LOG.warning('No auth_plugin configured in %s',
AUTH_GROUP) AUTH_GROUP)
session = ksc_session.Session.load_from_conf_options( session = ksc_session.Session.load_from_conf_options(
cfg.CONF, AUTH_GROUP, auth=auth) cfg.CONF, AUTH_GROUP, auth=auth)

View File

@ -22,7 +22,6 @@ from neutron_lib.plugins import directory
from oslo_log import log from oslo_log import log
from oslo_utils import excutils from oslo_utils import excutils
from gbpservice._i18n import _LI
from gbpservice.neutron import extensions as extensions_pkg from gbpservice.neutron import extensions as extensions_pkg
from gbpservice.neutron.extensions import cisco_apic from gbpservice.neutron.extensions import cisco_apic
from gbpservice.neutron.plugins.ml2plus import driver_api as api_plus from gbpservice.neutron.plugins.ml2plus import driver_api as api_plus
@ -38,11 +37,11 @@ class ApicExtensionDriver(api_plus.ExtensionDriver,
extn_db.ExtensionDbMixin): extn_db.ExtensionDbMixin):
def __init__(self): def __init__(self):
LOG.info(_LI("APIC AIM ED __init__")) LOG.info("APIC AIM ED __init__")
self._mechanism_driver = None self._mechanism_driver = None
def initialize(self): def initialize(self):
LOG.info(_LI("APIC AIM ED initializing")) LOG.info("APIC AIM ED initializing")
extensions.append_api_extensions_path(extensions_pkg.__path__) extensions.append_api_extensions_path(extensions_pkg.__path__)
@property @property

View File

@ -52,9 +52,6 @@ from oslo_log import log
import oslo_messaging import oslo_messaging
from oslo_utils import importutils from oslo_utils import importutils
from gbpservice._i18n import _LE
from gbpservice._i18n import _LI
from gbpservice._i18n import _LW
from gbpservice.network.neutronv2 import local_api from gbpservice.network.neutronv2 import local_api
from gbpservice.neutron.extensions import cisco_apic from gbpservice.neutron.extensions import cisco_apic
from gbpservice.neutron.extensions import cisco_apic_l3 as a_l3 from gbpservice.neutron.extensions import cisco_apic_l3 as a_l3
@ -165,10 +162,10 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
self.md.delete_link(*args, **kwargs) self.md.delete_link(*args, **kwargs)
def __init__(self): def __init__(self):
LOG.info(_LI("APIC AIM MD __init__")) LOG.info("APIC AIM MD __init__")
def initialize(self): def initialize(self):
LOG.info(_LI("APIC AIM MD initializing")) LOG.info("APIC AIM MD initializing")
self.project_name_cache = cache.ProjectNameCache() self.project_name_cache = cache.ProjectNameCache()
self.name_mapper = apic_mapper.APICNameMapper() self.name_mapper = apic_mapper.APICNameMapper()
self.aim = aim_manager.AimManager() self.aim = aim_manager.AimManager()
@ -1011,9 +1008,9 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
# REVISIT: Delete intf_vrf if no longer used? # REVISIT: Delete intf_vrf if no longer used?
else: else:
# This should never happen. # This should never happen.
LOG.error(_LE("Interface topology %(intf_topology)s and " LOG.error("Interface topology %(intf_topology)s and "
"router topology %(router_topology)s have " "router topology %(router_topology)s have "
"different VRFs, but neither is shared"), "different VRFs, but neither is shared",
{'intf_topology': intf_topology, {'intf_topology': intf_topology,
'router_topology': router_topology}) 'router_topology': router_topology})
raise exceptions.InternalError() raise exceptions.InternalError()
@ -1611,8 +1608,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
LOG.debug("Bound using segment: %s", segment) LOG.debug("Bound using segment: %s", segment)
return True return True
else: else:
LOG.warning(_LW("Refusing to bind port %(port)s to dead " LOG.warning("Refusing to bind port %(port)s to dead "
"agent: %(agent)s"), "agent: %(agent)s",
{'port': current['id'], 'agent': agent}) {'port': current['id'], 'agent': agent})
def _opflex_bind_port(self, context, segment, agent): def _opflex_bind_port(self, context, segment, agent):
@ -1691,7 +1688,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
api.PHYSICAL_NETWORK: api.PHYSICAL_NETWORK:
segment[api.PHYSICAL_NETWORK]} segment[api.PHYSICAL_NETWORK]}
dyn_seg = context.allocate_dynamic_segment(seg_args) dyn_seg = context.allocate_dynamic_segment(seg_args)
LOG.info(_LI('Allocated dynamic-segment %(s)s for port %(p)s'), LOG.info('Allocated dynamic-segment %(s)s for port %(p)s',
{'s': dyn_seg, 'p': context.current['id']}) {'s': dyn_seg, 'p': context.current['id']})
dyn_seg['aim_ml2_created'] = True dyn_seg['aim_ml2_created'] = True
context.continue_binding(segment[api.ID], [dyn_seg]) context.continue_binding(segment[api.ID], [dyn_seg])
@ -1970,8 +1967,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
def _move_topology(self, aim_ctx, topology, old_vrf, new_vrf, def _move_topology(self, aim_ctx, topology, old_vrf, new_vrf,
nets_to_notify): nets_to_notify):
LOG.info(_LI("Moving routed networks %(topology)s from VRF " LOG.info("Moving routed networks %(topology)s from VRF "
"%(old_vrf)s to VRF %(new_vrf)s"), "%(old_vrf)s to VRF %(new_vrf)s",
{'topology': topology.keys(), {'topology': topology.keys(),
'old_vrf': old_vrf, 'old_vrf': old_vrf,
'new_vrf': new_vrf}) 'new_vrf': new_vrf})
@ -2189,7 +2186,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
display_name=aim_utils.sanitize_display_name('CommonTenant')) display_name=aim_utils.sanitize_display_name('CommonTenant'))
tenant = self.aim.get(aim_ctx, attrs) tenant = self.aim.get(aim_ctx, attrs)
if not tenant: if not tenant:
LOG.info(_LI("Creating common tenant")) LOG.info("Creating common tenant")
tenant = self.aim.create(aim_ctx, attrs) tenant = self.aim.create(aim_ctx, attrs)
return tenant return tenant
@ -2200,7 +2197,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
if not vrf: if not vrf:
attrs.display_name = ( attrs.display_name = (
aim_utils.sanitize_display_name('CommonUnroutedVRF')) aim_utils.sanitize_display_name('CommonUnroutedVRF'))
LOG.info(_LI("Creating common unrouted VRF")) LOG.info("Creating common unrouted VRF")
vrf = self.aim.create(aim_ctx, attrs) vrf = self.aim.create(aim_ctx, attrs)
return vrf return vrf
@ -2213,7 +2210,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
name=filter_name, name=filter_name,
display_name=dname) display_name=dname)
if not self.aim.get(aim_ctx, filter): if not self.aim.get(aim_ctx, filter):
LOG.info(_LI("Creating common Any Filter")) LOG.info("Creating common Any Filter")
self.aim.create(aim_ctx, filter) self.aim.create(aim_ctx, filter)
dname = aim_utils.sanitize_display_name("AnyFilterEntry") dname = aim_utils.sanitize_display_name("AnyFilterEntry")
@ -2222,7 +2219,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
name=ANY_FILTER_ENTRY_NAME, name=ANY_FILTER_ENTRY_NAME,
display_name=dname) display_name=dname)
if not self.aim.get(aim_ctx, entry): if not self.aim.get(aim_ctx, entry):
LOG.info(_LI("Creating common Any FilterEntry")) LOG.info("Creating common Any FilterEntry")
self.aim.create(aim_ctx, entry) self.aim.create(aim_ctx, entry)
return filter return filter
@ -2232,7 +2229,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
if not vrf: if not vrf:
attrs.display_name = ( attrs.display_name = (
aim_utils.sanitize_display_name('DefaultRoutedVRF')) aim_utils.sanitize_display_name('DefaultRoutedVRF'))
LOG.info(_LI("Creating default VRF for %s"), attrs.tenant_name) LOG.info("Creating default VRF for %s", attrs.tenant_name)
vrf = self.aim.create(aim_ctx, attrs) vrf = self.aim.create(aim_ctx, attrs)
return vrf return vrf
@ -2504,8 +2501,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
.filter(extn_db_sn.snat_host_pool.is_(True)) .filter(extn_db_sn.snat_host_pool.is_(True))
.all()) .all())
if not snat_subnets: if not snat_subnets:
LOG.info(_LI('No subnet in external network %s is marked as ' LOG.info('No subnet in external network %s is marked as '
'SNAT-pool'), 'SNAT-pool',
ext_network['id']) ext_network['id'])
return return
for snat_subnet in snat_subnets: for snat_subnet in snat_subnets:
@ -2524,8 +2521,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
snat_ip = port['fixed_ips'][0]['ip_address'] snat_ip = port['fixed_ips'][0]['ip_address']
break break
except n_exceptions.IpAddressGenerationFailure: except n_exceptions.IpAddressGenerationFailure:
LOG.info(_LI('No more addresses available in subnet %s ' LOG.info('No more addresses available in subnet %s '
'for SNAT IP allocation'), 'for SNAT IP allocation',
snat_subnet['id']) snat_subnet['id'])
else: else:
snat_ip = snat_port.fixed_ips[0].ip_address snat_ip = snat_port.fixed_ips[0].ip_address
@ -2569,8 +2566,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
try: try:
self.plugin.delete_port(e_context, p[0]) self.plugin.delete_port(e_context, p[0])
except n_exceptions.NeutronException as ne: except n_exceptions.NeutronException as ne:
LOG.warning(_LW('Failed to delete SNAT port %(port)s: ' LOG.warning('Failed to delete SNAT port %(port)s: '
'%(ex)s'), '%(ex)s',
{'port': p, 'ex': ne}) {'port': p, 'ex': ne})
def check_floatingip_external_address(self, context, floatingip): def check_floatingip_external_address(self, context, floatingip):
@ -2625,7 +2622,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
epg = self.get_epg_for_network(session, network) epg = self.get_epg_for_network(session, network)
if not epg: if not epg:
LOG.info(_LI('Network %s does not map to any EPG'), network['id']) LOG.info('Network %s does not map to any EPG', network['id'])
return return
if segment: if segment:
@ -2682,8 +2679,8 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
aim_ctx, aim_infra.HostLink, host_name=host, aim_ctx, aim_infra.HostLink, host_name=host,
interface_name=interface) interface_name=interface)
if not host_link or not host_link[0].path: if not host_link or not host_link[0].path:
LOG.warning(_LW('No host link information found for host: ' LOG.warning('No host link information found for host: '
'%(host)s, interface: %(interface)s'), '%(host)s, interface: %(interface)s',
{'host': host, 'interface': interface}) {'host': host, 'interface': interface})
continue continue
host_link = host_link[0].path host_link = host_link[0].path
@ -2697,7 +2694,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
host_link = self.aim.find(aim_ctx, aim_infra.HostLink, host_link = self.aim.find(aim_ctx, aim_infra.HostLink,
host_name=host) host_name=host)
if not host_link or not host_link[0].path: if not host_link or not host_link[0].path:
LOG.warning(_LW('No host link information found for host %s'), LOG.warning('No host link information found for host %s',
host) host)
return return
host_link = host_link[0].path host_link = host_link[0].path
@ -2721,7 +2718,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
port_context.current['id']) port_context.current['id'])
.first()) .first())
if not ports: if not ports:
LOG.info(_LI('Releasing dynamic-segment %(s)s for port %(p)s'), LOG.info('Releasing dynamic-segment %(s)s for port %(p)s',
{'s': btm, 'p': port_context.current['id']}) {'s': btm, 'p': port_context.current['id']})
port_context.release_dynamic_segment(btm[api.ID]) port_context.release_dynamic_segment(btm[api.ID])
@ -2871,7 +2868,7 @@ class ApicMechanismDriver(api_plus.MechanismDriver,
# this could be caused by concurrent transactions # this could be caused by concurrent transactions
except db_exc.DBDuplicateEntry as e: except db_exc.DBDuplicateEntry as e:
LOG.debug(e) LOG.debug(e)
LOG.info(_LI('Releasing domain %(d)s for port %(p)s'), LOG.info('Releasing domain %(d)s for port %(p)s',
{'d': domain, 'p': port['id']}) {'d': domain, 'p': port['id']})
def _get_non_opflex_segments_on_host(self, context, host): def _get_non_opflex_segments_on_host(self, context, host):

View File

@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from gbpservice._i18n import _LE
from gbpservice._i18n import _LI
from gbpservice.neutron.plugins.ml2plus import driver_api from gbpservice.neutron.plugins.ml2plus import driver_api
from neutron.db import api as db_api from neutron.db import api as db_api
@ -62,8 +60,8 @@ class MechanismManager(managers.MechanismManager):
'method': method_name}, 'method': method_name},
exc_info=e) exc_info=e)
LOG.exception( LOG.exception(
_LE("Mechanism driver '%(name)s' failed in " "Mechanism driver '%(name)s' failed in "
"%(method)s"), "%(method)s",
{'name': driver.name, 'method': method_name} {'name': driver.name, 'method': method_name}
) )
errors.append(e) errors.append(e)
@ -81,8 +79,8 @@ class MechanismManager(managers.MechanismManager):
try: try:
driver.obj.ensure_tenant(plugin_context, tenant_id) driver.obj.ensure_tenant(plugin_context, tenant_id)
except Exception: except Exception:
LOG.exception(_LE("Mechanism driver '%s' failed in " LOG.exception("Mechanism driver '%s' failed in "
"ensure_tenant"), driver.name) "ensure_tenant", driver.name)
raise ml2_exc.MechanismDriverError(method="ensure_tenant") raise ml2_exc.MechanismDriverError(method="ensure_tenant")
def create_subnetpool_precommit(self, context): def create_subnetpool_precommit(self, context):
@ -197,8 +195,8 @@ class ExtensionManager(managers.ExtensionManager):
result) result)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.info(_LI("Extension driver '%(name)s' failed in " LOG.info("Extension driver '%(name)s' failed in "
"%(method)s"), "%(method)s",
{'name': driver.name, 'method': method_name}) {'name': driver.name, 'method': method_name})
# Overrides ML2 implementation to avoid eating retriable # Overrides ML2 implementation to avoid eating retriable

View File

@ -86,7 +86,6 @@ def notify(resource, event, trigger, **kwargs):
registry.notify = notify registry.notify = notify
from neutron._i18n import _LE
from neutron.callbacks import events from neutron.callbacks import events
from neutron.callbacks import exceptions from neutron.callbacks import exceptions
from oslo_log import log as logging from oslo_log import log as logging
@ -112,12 +111,12 @@ def _notify_loop(resource, event, trigger, **kwargs):
event.startswith(events.PRECOMMIT) event.startswith(events.PRECOMMIT)
) )
if not abortable_event: if not abortable_event:
LOG.exception(_LE("Error during notification for " LOG.exception("Error during notification for "
"%(callback)s %(resource)s, %(event)s"), "%(callback)s %(resource)s, %(event)s",
{'callback': callback_id, {'callback': callback_id,
'resource': resource, 'event': event}) 'resource': resource, 'event': event})
else: else:
LOG.error(_LE("Callback %(callback)s raised %(error)s"), LOG.error("Callback %(callback)s raised %(error)s",
{'callback': callback_id, 'error': e}) {'callback': callback_id, 'error': e})
errors.append(exceptions.NotificationError(callback_id, e)) errors.append(exceptions.NotificationError(callback_id, e))
return errors return errors
@ -197,11 +196,9 @@ def commit_reservation(context, reservation_id):
quota.QUOTAS.get_driver().commit_reservation = commit_reservation quota.QUOTAS.get_driver().commit_reservation = commit_reservation
from neutron._i18n import _LI
from oslo_db.sqlalchemy import exc_filters from oslo_db.sqlalchemy import exc_filters
exc_filters._LE = _LI
exc_filters.LOG.exception = exc_filters.LOG.debug exc_filters.LOG.exception = exc_filters.LOG.debug

View File

@ -16,8 +16,6 @@
# The following is imported at the beginning to ensure # The following is imported at the beginning to ensure
# that the patches are applied before any of the # that the patches are applied before any of the
# modules save a reference to the functions being patched # modules save a reference to the functions being patched
from gbpservice._i18n import _LE
from gbpservice._i18n import _LI
from gbpservice.neutron import extensions as gbp_extensions from gbpservice.neutron import extensions as gbp_extensions
from gbpservice.neutron.extensions import patch # noqa from gbpservice.neutron.extensions import patch # noqa
from gbpservice.neutron.plugins.ml2plus import patch_neutron # noqa from gbpservice.neutron.plugins.ml2plus import patch_neutron # noqa
@ -135,7 +133,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
security_group=securitygroups_db.SecurityGroup, security_group=securitygroups_db.SecurityGroup,
security_group_rule=securitygroups_db.SecurityGroupRule) security_group_rule=securitygroups_db.SecurityGroupRule)
def __init__(self): def __init__(self):
LOG.info(_LI("Ml2Plus initializing")) LOG.info("Ml2Plus initializing")
registry._get_callback_manager()._notify_loop = ( registry._get_callback_manager()._notify_loop = (
patch_neutron._notify_loop) patch_neutron._notify_loop)
# First load drivers, then initialize DB, then initialize drivers # First load drivers, then initialize DB, then initialize drivers
@ -179,9 +177,9 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
registry.subscribe(self._subnet_delete_after_delete_handler, registry.subscribe(self._subnet_delete_after_delete_handler,
resources.SUBNET, events.AFTER_DELETE) resources.SUBNET, events.AFTER_DELETE)
except AttributeError: except AttributeError:
LOG.info(_LI("Detected older version of Neutron, ML2Plus plugin " LOG.info("Detected older version of Neutron, ML2Plus plugin "
"is not subscribed to subnet_precommit_delete and " "is not subscribed to subnet_precommit_delete and "
"subnet_after_delete events")) "subnet_after_delete events")
self._setup_dhcp() self._setup_dhcp()
self._start_rpc_notifiers() self._start_rpc_notifiers()
self.add_agent_status_check_worker(self.agent_health_check) self.add_agent_status_check_worker(self.agent_health_check)
@ -193,7 +191,7 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
cfg.CONF.ml2plus.refresh_subnetpool_db_obj) cfg.CONF.ml2plus.refresh_subnetpool_db_obj)
self.refresh_address_scope_db_obj = ( self.refresh_address_scope_db_obj = (
cfg.CONF.ml2plus.refresh_address_scope_db_obj) cfg.CONF.ml2plus.refresh_address_scope_db_obj)
LOG.info(_LI("Modular L2 Plugin (extended) initialization complete")) LOG.info("Modular L2 Plugin (extended) initialization complete")
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
attributes.SUBNETPOOLS, ['_ml2_md_extend_subnetpool_dict']) attributes.SUBNETPOOLS, ['_ml2_md_extend_subnetpool_dict'])
@ -412,8 +410,8 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
self.mechanism_manager.create_subnetpool_postcommit(mech_context) self.mechanism_manager.create_subnetpool_postcommit(mech_context)
except ml2_exc.MechanismDriverError: except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE("mechanism_manager.create_subnetpool_postcommit " LOG.error("mechanism_manager.create_subnetpool_postcommit "
"failed, deleting subnetpool '%s'"), "failed, deleting subnetpool '%s'",
result['id']) result['id'])
self.delete_subnetpool(context, result['id']) self.delete_subnetpool(context, result['id'])
return result return result
@ -476,9 +474,9 @@ class Ml2PlusPlugin(ml2_plugin.Ml2Plugin,
mech_context) mech_context)
except ml2_exc.MechanismDriverError: except ml2_exc.MechanismDriverError:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE("mechanism_manager.create_address_scope_" LOG.error("mechanism_manager.create_address_scope_"
"postcommit failed, deleting address_scope" "postcommit failed, deleting address_scope"
" '%s'"), " '%s'",
result['id']) result['id'])
self.delete_address_scope(context, result['id']) self.delete_address_scope(context, result['id'])
return result return result

View File

@ -29,8 +29,6 @@ from oslo_log import log as logging
from oslo_utils import excutils from oslo_utils import excutils
from sqlalchemy import inspect from sqlalchemy import inspect
from gbpservice._i18n import _LE
from gbpservice._i18n import _LI
from gbpservice.neutron import extensions as extensions_pkg from gbpservice.neutron import extensions as extensions_pkg
from gbpservice.neutron.extensions import cisco_apic_l3 as l3_ext from gbpservice.neutron.extensions import cisco_apic_l3 as l3_ext
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import ( from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import (
@ -61,7 +59,7 @@ class ApicL3Plugin(common_db_mixin.CommonDbMixin,
@resource_registry.tracked_resources(router=l3_db.Router, @resource_registry.tracked_resources(router=l3_db.Router,
floatingip=l3_db.FloatingIP) floatingip=l3_db.FloatingIP)
def __init__(self): def __init__(self):
LOG.info(_LI("APIC AIM L3 Plugin __init__")) LOG.info("APIC AIM L3 Plugin __init__")
extensions.append_api_extensions_path(extensions_pkg.__path__) extensions.append_api_extensions_path(extensions_pkg.__path__)
self._mechanism_driver = None self._mechanism_driver = None
super(ApicL3Plugin, self).__init__() super(ApicL3Plugin, self).__init__()
@ -84,7 +82,7 @@ class ApicL3Plugin(common_db_mixin.CommonDbMixin,
self._include_router_extn_attr(session, router_res) self._include_router_extn_attr(session, router_res)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE("APIC AIM extend_router_dict failed")) LOG.exception("APIC AIM extend_router_dict failed")
db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs( db_base_plugin_v2.NeutronDbPluginV2.register_dict_extend_funcs(
l3.ROUTERS, ['_extend_router_dict_apic']) l3.ROUTERS, ['_extend_router_dict_apic'])
@ -257,8 +255,8 @@ class ApicL3Plugin(common_db_mixin.CommonDbMixin,
.create_floatingip(context, floatingip)) .create_floatingip(context, floatingip))
break break
except exceptions.IpAddressGenerationFailure: except exceptions.IpAddressGenerationFailure:
LOG.info(_LI('No more floating IP addresses available ' LOG.info('No more floating IP addresses available '
'in subnet %s'), 'in subnet %s',
ext_sn) ext_sn)
if not result: if not result:

View File

@ -20,7 +20,6 @@ from oslo_serialization import jsonutils
from oslo_utils import excutils from oslo_utils import excutils
import sqlalchemy as sa import sqlalchemy as sa
from gbpservice._i18n import _LE
from gbpservice.common import utils from gbpservice.common import utils
from gbpservice.network.neutronv2 import local_api from gbpservice.network.neutronv2 import local_api
from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db as gpdb from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db as gpdb
@ -104,10 +103,10 @@ class ChainMappingDriver(api.PolicyDriver, local_api.LocalAPI,
return tenant.id return tenant.id
except k_exceptions.NotFound: except k_exceptions.NotFound:
with excutils.save_and_reraise_exception(reraise=reraise): with excutils.save_and_reraise_exception(reraise=reraise):
LOG.error(_LE('No tenant with name %s exists.'), tenant) LOG.error('No tenant with name %s exists.', tenant)
except k_exceptions.NoUniqueMatch: except k_exceptions.NoUniqueMatch:
with excutils.save_and_reraise_exception(reraise=reraise): with excutils.save_and_reraise_exception(reraise=reraise):
LOG.error(_LE('Multiple tenants matches found for %s'), LOG.error('Multiple tenants matches found for %s',
tenant) tenant)
@staticmethod @staticmethod
@ -290,7 +289,7 @@ class ChainMappingDriver(api.PolicyDriver, local_api.LocalAPI,
context.current['status_details'] = ptg_status[0][ context.current['status_details'] = ptg_status[0][
'status_details'] 'status_details']
except Exception: except Exception:
LOG.error(_LE('Failed to update ptg status')) LOG.error('Failed to update ptg status')
@log.log_method_call @log.log_method_call
def _delete_policy_target_group_postcommit(self, context): def _delete_policy_target_group_postcommit(self, context):

View File

@ -29,9 +29,6 @@ from oslo_log import helpers as log
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import excutils from oslo_utils import excutils
from gbpservice._i18n import _LE
from gbpservice._i18n import _LI
from gbpservice._i18n import _LW
from gbpservice.network.neutronv2 import local_api from gbpservice.network.neutronv2 import local_api
from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb
from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db as gpmdb from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db as gpmdb
@ -154,7 +151,7 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
@log.log_method_call @log.log_method_call
def initialize(self): def initialize(self):
LOG.info(_LI("APIC AIM Policy Driver initializing")) LOG.info("APIC AIM Policy Driver initializing")
super(AIMMappingDriver, self).initialize() super(AIMMappingDriver, self).initialize()
self._apic_aim_mech_driver = None self._apic_aim_mech_driver = None
self._apic_segmentation_label_driver = None self._apic_segmentation_label_driver = None
@ -163,16 +160,16 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
self._name_mapper = None self._name_mapper = None
self.create_auto_ptg = cfg.CONF.aim_mapping.create_auto_ptg self.create_auto_ptg = cfg.CONF.aim_mapping.create_auto_ptg
if self.create_auto_ptg: if self.create_auto_ptg:
LOG.info(_LI('Auto PTG creation configuration set, ' LOG.info('Auto PTG creation configuration set, '
'this will result in automatic creation of a PTG ' 'this will result in automatic creation of a PTG '
'per L2 Policy')) 'per L2 Policy')
self.create_per_l3p_implicit_contracts = ( self.create_per_l3p_implicit_contracts = (
cfg.CONF.aim_mapping.create_per_l3p_implicit_contracts) cfg.CONF.aim_mapping.create_per_l3p_implicit_contracts)
self.advertise_mtu = cfg.CONF.aim_mapping.advertise_mtu self.advertise_mtu = cfg.CONF.aim_mapping.advertise_mtu
local_api.QUEUE_OUT_OF_PROCESS_NOTIFICATIONS = True local_api.QUEUE_OUT_OF_PROCESS_NOTIFICATIONS = True
if self.create_per_l3p_implicit_contracts: if self.create_per_l3p_implicit_contracts:
LOG.info(_LI('Implicit AIM contracts will be created ' LOG.info('Implicit AIM contracts will be created '
'for l3_policies which do not have them.')) 'for l3_policies which do not have them.')
self._create_per_l3p_implicit_contracts() self._create_per_l3p_implicit_contracts()
@log.log_method_call @log.log_method_call
@ -453,11 +450,11 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
context._plugin).delete_policy_target_group( context._plugin).delete_policy_target_group(
context._plugin_context, auto_ptg['id']) context._plugin_context, auto_ptg['id'])
except gpolicy.PolicyTargetGroupNotFound: except gpolicy.PolicyTargetGroupNotFound:
LOG.info(_LI("Auto PTG with ID %(id)s for " LOG.info("Auto PTG with ID %(id)s for "
"for L2P %(l2p)s not found. If create_auto_ptg " "for L2P %(l2p)s not found. If create_auto_ptg "
"configuration was not set at the time of the L2P " "configuration was not set at the time of the L2P "
"creation, you can safely ignore this, else this " "creation, you can safely ignore this, else this "
"could potentially be indication of an error."), "could potentially be indication of an error.",
{'id': auto_ptg_id, 'l2p': l2p_id}) {'id': auto_ptg_id, 'l2p': l2p_id})
super(AIMMappingDriver, self).delete_l2_policy_precommit(context) super(AIMMappingDriver, self).delete_l2_policy_precommit(context)
@ -1062,9 +1059,9 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
ok_to_bind = True ok_to_bind = True
break break
if not ok_to_bind: if not ok_to_bind:
LOG.warning(_LW("Failed to bind the port due to " LOG.warning("Failed to bind the port due to "
"allowed_vm_names rules %(rules)s " "allowed_vm_names rules %(rules)s "
"for VM: %(vm)s"), "for VM: %(vm)s",
{'rules': l3p['allowed_vm_names'], {'rules': l3p['allowed_vm_names'],
'vm': vm.name}) 'vm': vm.name})
return ok_to_bind return ok_to_bind
@ -1548,9 +1545,9 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
# default EPG and delete them # default EPG and delete them
# create=True, delete=True is not a valid combination # create=True, delete=True is not a valid combination
if create and delete: if create and delete:
LOG.error(_LE("Incorrect use of internal method " LOG.error("Incorrect use of internal method "
"_process_contracts_for_default_epg(), create and " "_process_contracts_for_default_epg(), create and "
"delete cannot be True at the same time")) "delete cannot be True at the same time")
raise raise
session = context._plugin_context.session session = context._plugin_context.session
aim_ctx = aim_context.AimContext(session) aim_ctx = aim_context.AimContext(session)
@ -1792,8 +1789,8 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
if not epg: if not epg:
# Something is wrong, default EPG doesn't exist. # Something is wrong, default EPG doesn't exist.
# TODO(ivar): should rise an exception # TODO(ivar): should rise an exception
LOG.error(_LE("Default EPG doesn't exist for " LOG.error("Default EPG doesn't exist for "
"port %s"), port['id']) "port %s", port['id'])
return epg return epg
def _get_subnet_details(self, plugin_context, port, details): def _get_subnet_details(self, plugin_context, port, details):
@ -2155,9 +2152,9 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
intf_port = self._create_port(plugin_context, attrs) intf_port = self._create_port(plugin_context, attrs)
except n_exc.NeutronException: except n_exc.NeutronException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE('Failed to create explicit router ' LOG.exception('Failed to create explicit router '
'interface port in subnet ' 'interface port in subnet '
'%(subnet)s'), '%(subnet)s',
{'subnet': subnet['id']}) {'subnet': subnet['id']})
interface_info = {'port_id': intf_port['id'], interface_info = {'port_id': intf_port['id'],
NO_VALIDATE: True} NO_VALIDATE: True}
@ -2167,9 +2164,9 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
except n_exc.BadRequest: except n_exc.BadRequest:
self._delete_port(plugin_context, intf_port['id']) self._delete_port(plugin_context, intf_port['id'])
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE('Attaching router %(router)s to ' LOG.exception('Attaching router %(router)s to '
'%(subnet)s with explicit port ' '%(subnet)s with explicit port '
'%(port) failed'), '%(port) failed',
{'subnet': subnet['id'], {'subnet': subnet['id'],
'router': router_id, 'router': router_id,
'port': intf_port['id']}) 'port': intf_port['id']})
@ -2185,8 +2182,8 @@ class AIMMappingDriver(nrd.CommonNeutronBase, aim_rpc.AIMMappingRPCMixin):
self._add_router_interface(plugin_context, router_id, self._add_router_interface(plugin_context, router_id,
interface_info) interface_info)
except n_exc.BadRequest as e: except n_exc.BadRequest as e:
LOG.exception(_LE("Adding subnet to router failed, exception:" LOG.exception("Adding subnet to router failed, exception:"
"%s"), e) "%s", e)
raise exc.GroupPolicyInternalError() raise exc.GroupPolicyInternalError()
def _detach_router_from_subnets(self, plugin_context, router_id, sn_ids): def _detach_router_from_subnets(self, plugin_context, router_id, sn_ids):

View File

@ -17,8 +17,6 @@ from neutron.plugins.ml2 import rpc as ml2_rpc
from opflexagent import rpc as o_rpc from opflexagent import rpc as o_rpc
from oslo_log import log from oslo_log import log
from gbpservice._i18n import _LE
from gbpservice._i18n import _LW
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import ( from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
nova_client as nclient) nova_client as nclient)
from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import ( from gbpservice.neutron.services.grouppolicy.drivers.cisco.apic import (
@ -65,8 +63,8 @@ class AIMMappingRPCMixin(ha_ip_db.HAIPOwnerDbMixin):
return self._retrieve_vrf_details(context, **kwargs) return self._retrieve_vrf_details(context, **kwargs)
except Exception as e: except Exception as e:
vrf = kwargs.get('vrf_id') vrf = kwargs.get('vrf_id')
LOG.error(_LE("An exception has occurred while retrieving vrf " LOG.error("An exception has occurred while retrieving vrf "
"gbp details for %s"), vrf) "gbp details for %s", vrf)
LOG.exception(e) LOG.exception(e)
return {'l3_policy_id': vrf} return {'l3_policy_id': vrf}
@ -82,8 +80,8 @@ class AIMMappingRPCMixin(ha_ip_db.HAIPOwnerDbMixin):
return self._get_gbp_details(context, kwargs, kwargs.get('host')) return self._get_gbp_details(context, kwargs, kwargs.get('host'))
except Exception as e: except Exception as e:
device = kwargs.get('device') device = kwargs.get('device')
LOG.error(_LE("An exception has occurred while retrieving device " LOG.error("An exception has occurred while retrieving device "
"gbp details for %s"), device) "gbp details for %s", device)
LOG.exception(e) LOG.exception(e)
return {'device': device} return {'device': device}
@ -101,8 +99,8 @@ class AIMMappingRPCMixin(ha_ip_db.HAIPOwnerDbMixin):
None, None).get_device_details(context, **request)} None, None).get_device_details(context, **request)}
return result return result
except Exception as e: except Exception as e:
LOG.error(_LE("An exception has occurred while requesting device " LOG.error("An exception has occurred while requesting device "
"gbp details for %s"), request.get('device')) "gbp details for %s", request.get('device'))
LOG.exception(e) LOG.exception(e)
return None return None
@ -137,8 +135,8 @@ class AIMMappingRPCMixin(ha_ip_db.HAIPOwnerDbMixin):
port_context = core_plugin.get_bound_port_context(context, port_id, port_context = core_plugin.get_bound_port_context(context, port_id,
host) host)
if not port_context: if not port_context:
LOG.warning(_LW("Device %(device)s requested by agent " LOG.warning("Device %(device)s requested by agent "
"%(agent_id)s not found in database"), "%(agent_id)s not found in database",
{'device': port_id, {'device': port_id,
'agent_id': request.get('agent_id')}) 'agent_id': request.get('agent_id')})
return {'device': request.get('device')} return {'device': request.get('device')}

View File

@ -14,8 +14,6 @@ from neutron.notifiers import nova as n_nova
from novaclient import exceptions as nova_exceptions from novaclient import exceptions as nova_exceptions
from oslo_log import log as logging from oslo_log import log as logging
from gbpservice._i18n import _LW
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -39,7 +37,7 @@ class NovaClient(object):
try: try:
return self.client.servers.get(server_id) return self.client.servers.get(server_id)
except nova_exceptions.NotFound: except nova_exceptions.NotFound:
LOG.warning(_LW("Nova returned NotFound for server: %s"), LOG.warning("Nova returned NotFound for server: %s",
server_id) server_id)
except Exception as e: except Exception as e:
LOG.exception(e) LOG.exception(e)

View File

@ -13,7 +13,6 @@
from neutron_lib.plugins import directory from neutron_lib.plugins import directory
from oslo_log import log as logging from oslo_log import log as logging
from gbpservice._i18n import _LI
from gbpservice.neutron.db.grouppolicy.extensions import ( from gbpservice.neutron.db.grouppolicy.extensions import (
apic_auto_ptg_db as auto_ptg_db) apic_auto_ptg_db as auto_ptg_db)
from gbpservice.neutron.db.grouppolicy.extensions import ( from gbpservice.neutron.db.grouppolicy.extensions import (
@ -34,7 +33,7 @@ class AIMExtensionDriver(api.ExtensionDriver,
_extension_dict = cisco_apic_gbp.EXTENDED_ATTRIBUTES_2_0 _extension_dict = cisco_apic_gbp.EXTENDED_ATTRIBUTES_2_0
def __init__(self): def __init__(self):
LOG.info(_LI("AIM Extension __init__")) LOG.info("AIM Extension __init__")
self._policy_driver = None self._policy_driver = None
@property @property

View File

@ -12,7 +12,6 @@
from oslo_log import log as logging from oslo_log import log as logging
from gbpservice._i18n import _LI
from gbpservice.neutron.db.grouppolicy.extensions import ( from gbpservice.neutron.db.grouppolicy.extensions import (
apic_reuse_bd_db as db) apic_reuse_bd_db as db)
from gbpservice.neutron.db.grouppolicy import group_policy_db as gp_db from gbpservice.neutron.db.grouppolicy import group_policy_db as gp_db
@ -30,7 +29,7 @@ class ApicReuseBdExtensionDriver(api.ExtensionDriver,
_extension_dict = ext.EXTENDED_ATTRIBUTES_2_0 _extension_dict = ext.EXTENDED_ATTRIBUTES_2_0
def __init__(self): def __init__(self):
LOG.info(_LI("ApicReuseBdExtensionDriver __init__")) LOG.info("ApicReuseBdExtensionDriver __init__")
def initialize(self): def initialize(self):
pass pass

View File

@ -15,7 +15,6 @@ from neutron_lib.api import validators
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from gbpservice._i18n import _LW
from gbpservice.neutron.db.grouppolicy.extensions import group_proxy_db as db from gbpservice.neutron.db.grouppolicy.extensions import group_proxy_db as db
from gbpservice.neutron.db.grouppolicy import group_policy_db as gp_db from gbpservice.neutron.db.grouppolicy import group_policy_db as gp_db
from gbpservice.neutron.extensions import driver_proxy_group from gbpservice.neutron.extensions import driver_proxy_group
@ -115,10 +114,10 @@ class ProxyGroupDriver(api.ExtensionDriver):
data['ip_version'], data['proxy_subnet_prefix_length'], data['ip_version'], data['proxy_subnet_prefix_length'],
data['ip_pool']) data['ip_pool'])
if data['proxy_ip_pool']: if data['proxy_ip_pool']:
LOG.warning(_LW("Since use_subnetpools setting is turned on, " LOG.warning("Since use_subnetpools setting is turned on, "
"proxy_ip_pool %s will be ignored. " "proxy_ip_pool %s will be ignored. "
"Proxy subnets will be allocated from same " "Proxy subnets will be allocated from same "
"subnetpool as group subnets"), "subnetpool as group subnets",
data['proxy_ip_pool']) data['proxy_ip_pool'])
else: else:
gp_db.GroupPolicyDbPlugin.validate_ip_pool( gp_db.GroupPolicyDbPlugin.validate_ip_pool(

View File

@ -17,8 +17,6 @@ from oslo_log import log as logging
from oslo_utils import excutils from oslo_utils import excutils
import sqlalchemy as sa import sqlalchemy as sa
from gbpservice._i18n import _LI
from gbpservice._i18n import _LW
from gbpservice.network.neutronv2 import local_api from gbpservice.network.neutronv2 import local_api
from gbpservice.neutron.extensions import driver_proxy_group as pg_ext from gbpservice.neutron.extensions import driver_proxy_group as pg_ext
from gbpservice.neutron.extensions import group_policy as gbp_ext from gbpservice.neutron.extensions import group_policy as gbp_ext
@ -130,17 +128,17 @@ class ImplicitPolicyBase(api.PolicyDriver, local_api.LocalAPI):
filter) filter)
l3p = l3ps and l3ps[0] l3p = l3ps and l3ps[0]
if not l3p: if not l3p:
LOG.warning(_LW( LOG.warning(
"Caught DefaultL3PolicyAlreadyExists, " "Caught DefaultL3PolicyAlreadyExists, "
"but default L3 policy not concurrently " "but default L3 policy not concurrently "
"created for tenant %s"), tenant_id) "created for tenant %s", tenant_id)
ctxt.reraise = True ctxt.reraise = True
except exc.OverlappingIPPoolsInSameTenantNotAllowed: except exc.OverlappingIPPoolsInSameTenantNotAllowed:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.info(_LI("Caught " LOG.info("Caught "
"OverlappingIPPoolsinSameTenantNotAllowed " "OverlappingIPPoolsinSameTenantNotAllowed "
"during creation of default L3 policy for " "during creation of default L3 policy for "
"tenant %s"), tenant_id) "tenant %s", tenant_id)
context.current['l3_policy_id'] = l3p['id'] context.current['l3_policy_id'] = l3p['id']
def _use_implicit_l3_policy(self, context): def _use_implicit_l3_policy(self, context):
@ -204,9 +202,9 @@ class ImplicitPolicyBase(api.PolicyDriver, local_api.LocalAPI):
try: try:
self._delete_l2_policy(context._plugin_context, l2p_id) self._delete_l2_policy(context._plugin_context, l2p_id)
except gbp_ext.L2PolicyInUse: except gbp_ext.L2PolicyInUse:
LOG.info(_LI( LOG.info(
"Cannot delete implicit L2 Policy %s because it's " "Cannot delete implicit L2 Policy %s because it's "
"in use."), l2p_id) "in use.", l2p_id)
def _validate_default_external_segment(self, context): def _validate_default_external_segment(self, context):
# REVISIT(ivar): find a better way to retrieve the default ES # REVISIT(ivar): find a better way to retrieve the default ES

View File

@ -33,9 +33,6 @@ from oslo_utils import excutils
import sqlalchemy as sa import sqlalchemy as sa
from sqlalchemy.orm import exc as sa_exc from sqlalchemy.orm import exc as sa_exc
from gbpservice._i18n import _LE
from gbpservice._i18n import _LI
from gbpservice._i18n import _LW
from gbpservice.common import utils from gbpservice.common import utils
from gbpservice.network.neutronv2 import local_api from gbpservice.network.neutronv2 import local_api
from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb
@ -318,9 +315,9 @@ class ImplicitResourceOperations(local_api.LocalAPI,
filters={'address_scope_id': filters={'address_scope_id':
[address_scope_id]}) [address_scope_id]})
if subpools: if subpools:
LOG.warning(_LW("Cannot delete implicitly created " LOG.warning("Cannot delete implicitly created "
"address_scope %(id)s since it has " "address_scope %(id)s since it has "
"associated subnetpools: %(pools)s"), "associated subnetpools: %(pools)s",
{'id': address_scope_id, 'pools': subpools}) {'id': address_scope_id, 'pools': subpools})
else: else:
self._delete_address_scope(plugin_context, address_scope_id) self._delete_address_scope(plugin_context, address_scope_id)
@ -358,9 +355,9 @@ class ImplicitResourceOperations(local_api.LocalAPI,
filters={'subnetpool_id': filters={'subnetpool_id':
[subnetpool_id]}) [subnetpool_id]})
if subnets: if subnets:
LOG.warning(_LW("Cannot delete implicitly created " LOG.warning("Cannot delete implicitly created "
"subnetpool %(id)s since it has " "subnetpool %(id)s since it has "
"associated subnets: %(subnets)s"), "associated subnets: %(subnets)s",
{'id': subnetpool_id, 'subnets': subnets}) {'id': subnetpool_id, 'subnets': subnets})
else: else:
self._delete_subnetpool(plugin_context, subnetpool_id) self._delete_subnetpool(plugin_context, subnetpool_id)
@ -631,10 +628,10 @@ class ImplicitResourceOperations(local_api.LocalAPI,
except Exception as e: except Exception as e:
if isinstance(e, oslo_db_excp.RetryRequest): if isinstance(e, oslo_db_excp.RetryRequest):
raise e raise e
LOG.info(_LI("Allocating subnet from subnetpool %(sp)s " LOG.info("Allocating subnet from subnetpool %(sp)s "
"failed. Allocation will be attempted " "failed. Allocation will be attempted "
"from any other configured " "from any other configured "
"subnetpool(s). Exception: %(excp)s"), "subnetpool(s). Exception: %(excp)s",
{'sp': pool['id'], 'excp': type(e)}) {'sp': pool['id'], 'excp': type(e)})
last = e last = e
continue continue
@ -725,7 +722,7 @@ class ImplicitResourceOperations(local_api.LocalAPI,
context.set_port_id(port_id) context.set_port_id(port_id)
return return
except n_exc.IpAddressGenerationFailure as ex: except n_exc.IpAddressGenerationFailure as ex:
LOG.warning(_LW("No more address available in subnet %s"), LOG.warning("No more address available in subnet %s",
subnet['id']) subnet['id'])
last = ex last = ex
raise last raise last
@ -735,7 +732,7 @@ class ImplicitResourceOperations(local_api.LocalAPI,
try: try:
self._delete_port(plugin_context, port_id) self._delete_port(plugin_context, port_id)
except n_exc.PortNotFound: except n_exc.PortNotFound:
LOG.warning(_LW("Port %s is missing"), port_id) LOG.warning("Port %s is missing", port_id)
def _reject_invalid_router_access(self, context): def _reject_invalid_router_access(self, context):
# Validate if the explicit router(s) belong to the tenant. # Validate if the explicit router(s) belong to the tenant.
@ -782,8 +779,8 @@ class ImplicitResourceOperations(local_api.LocalAPI,
self._add_router_interface(plugin_context, router_id, self._add_router_interface(plugin_context, router_id,
interface_info) interface_info)
except n_exc.BadRequest as e: except n_exc.BadRequest as e:
LOG.exception(_LE("Adding subnet to router failed, exception:" LOG.exception("Adding subnet to router failed, exception:"
"%s"), e) "%s", e)
raise exc.GroupPolicyInternalError() raise exc.GroupPolicyInternalError()
def _add_router_interface_for_subnet(self, context, router_id, subnet_id): def _add_router_interface_for_subnet(self, context, router_id, subnet_id):
@ -1109,9 +1106,9 @@ class ImplicitResourceOperations(local_api.LocalAPI,
context, l2_policy_id) context, l2_policy_id)
fip_ids = [] fip_ids = []
if not external_segments: if not external_segments:
LOG.error(_LE("Network Service Policy to allocate Floating IP " LOG.error("Network Service Policy to allocate Floating IP "
"could not be applied because l3policy does " "could not be applied because l3policy does "
"not have an attached external segment")) "not have an attached external segment")
return fip_ids return fip_ids
tenant_id = context.current['tenant_id'] tenant_id = context.current['tenant_id']
@ -1153,7 +1150,7 @@ class ImplicitResourceOperations(local_api.LocalAPI,
# FIP allocated, no need to try further allocation # FIP allocated, no need to try further allocation
break break
except n_exc.IpAddressGenerationFailure as ex: except n_exc.IpAddressGenerationFailure as ex:
LOG.warning(_LW("Floating allocation failed: %s"), LOG.warning("Floating allocation failed: %s",
ex.message) ex.message)
if fip_id: if fip_id:
fip_ids.append(fip_id) fip_ids.append(fip_id)
@ -1261,10 +1258,10 @@ class ImplicitResourceOperations(local_api.LocalAPI,
filters={'name': [ filters={'name': [
gpip.default_external_segment_name]})) gpip.default_external_segment_name]}))
if not external_segments: if not external_segments:
LOG.error(_LE( LOG.error(
"Network Service Policy to allocate Floating " "Network Service Policy to allocate Floating "
"IP could not be associated because l3policy " "IP could not be associated because l3policy "
"does not have an attached external segment")) "does not have an attached external segment")
raise exc.NSPRequiresES() raise exc.NSPRequiresES()
for es in external_segments: for es in external_segments:
if not es['nat_pools']: if not es['nat_pools']:
@ -1286,9 +1283,9 @@ class ImplicitResourceOperations(local_api.LocalAPI,
free_ip = self._get_last_free_ip(context._plugin_context, free_ip = self._get_last_free_ip(context._plugin_context,
context.current['subnets']) context.current['subnets'])
if not free_ip: if not free_ip:
LOG.error(_LE("Reserving IP Addresses failed for Network " LOG.error("Reserving IP Addresses failed for Network "
"Service Policy. No more IP Addresses on " "Service Policy. No more IP Addresses on "
"subnet")) "subnet")
return return
# TODO(Magesh):Fetch subnet from PTG to which NSP is attached # TODO(Magesh):Fetch subnet from PTG to which NSP is attached
self._remove_ip_from_allocation_pool( self._remove_ip_from_allocation_pool(
@ -1640,7 +1637,7 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations,
policy_target = context._plugin.get_policy_target( policy_target = context._plugin.get_policy_target(
context._plugin_context, pt_id) context._plugin_context, pt_id)
except gp_ext.PolicyTargetNotFound: except gp_ext.PolicyTargetNotFound:
LOG.warning(_LW("Attempted to fetch deleted Service Target (QoS)")) LOG.warning("Attempted to fetch deleted Service Target (QoS)")
else: else:
port_id = policy_target['port_id'] port_id = policy_target['port_id']
port = {attributes.PORT: {'qos_policy_id': None}} port = {attributes.PORT: {'qos_policy_id': None}}
@ -1703,16 +1700,16 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations,
return tenant.id return tenant.id
except k_exceptions.NotFound: except k_exceptions.NotFound:
with excutils.save_and_reraise_exception(reraise=True): with excutils.save_and_reraise_exception(reraise=True):
LOG.error(_LE('No tenant with name %s exists.'), tenant) LOG.error('No tenant with name %s exists.', tenant)
except k_exceptions.NoUniqueMatch: except k_exceptions.NoUniqueMatch:
with excutils.save_and_reraise_exception(reraise=True): with excutils.save_and_reraise_exception(reraise=True):
LOG.error(_LE('Multiple tenants matches found for %s'), tenant) LOG.error('Multiple tenants matches found for %s', tenant)
except k_exceptions.AuthorizationFailure: except k_exceptions.AuthorizationFailure:
LOG.error(_LE("User: %(user)s dont have permissions"), LOG.error("User: %(user)s dont have permissions",
{'user': user}) {'user': user})
except k_exceptions.Unauthorized: except k_exceptions.Unauthorized:
LOG.error(_LE("Wrong credentials provided: user: %(user)s, " LOG.error("Wrong credentials provided: user: %(user)s, "
"password: %(pwd)s, tenant: %(tenant)s"), "password: %(pwd)s, tenant: %(tenant)s",
{'user': user, 'pwd': pwd, 'tenant': tenant}) {'user': user, 'pwd': pwd, 'tenant': tenant})
@log.log_method_call @log.log_method_call
@ -1940,7 +1937,7 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations,
context.nsp_cleanup_ipaddress, context.nsp_cleanup_ipaddress,
context.nsp_cleanup_fips) context.nsp_cleanup_fips)
except sa_exc.ObjectDeletedError as err: except sa_exc.ObjectDeletedError as err:
LOG.warning(_LW("Object already got deleted. Error: %(err)s"), LOG.warning("Object already got deleted. Error: %(err)s",
{'err': err}) {'err': err})
# Cleanup SGs # Cleanup SGs
self._unset_sg_rules_for_subnets( self._unset_sg_rules_for_subnets(
@ -2549,7 +2546,7 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations,
context._plugin_context, subnet_id, router_id) context._plugin_context, subnet_id, router_id)
except n_exc.InvalidInput: except n_exc.InvalidInput:
# This exception is not expected. # This exception is not expected.
LOG.exception(_LE("adding subnet to router failed")) LOG.exception("adding subnet to router failed")
for subnet_id in subnet_ids: for subnet_id in subnet_ids:
self._delete_subnet(context._plugin_context, subnet_id) self._delete_subnet(context._plugin_context, subnet_id)
raise exc.GroupPolicyInternalError() raise exc.GroupPolicyInternalError()
@ -2598,7 +2595,7 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations,
except n_exc.InvalidInput: except n_exc.InvalidInput:
# This exception is not expected. # This exception is not expected.
# TODO(ivar): find a better way to rollback # TODO(ivar): find a better way to rollback
LOG.exception(_LE("adding subnet to router failed")) LOG.exception("adding subnet to router failed")
for subnet_id in subnet_ids: for subnet_id in subnet_ids:
self._delete_subnet(context._plugin_context, subnet_id) self._delete_subnet(context._plugin_context, subnet_id)
raise exc.GroupPolicyInternalError() raise exc.GroupPolicyInternalError()
@ -2688,7 +2685,7 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations,
pt = context._plugin.get_policy_target(context._plugin_context, pt = context._plugin.get_policy_target(context._plugin_context,
pt_id) pt_id)
except gp_ext.PolicyTargetNotFound: except gp_ext.PolicyTargetNotFound:
LOG.warning(_LW("PT %s doesn't exist anymore"), pt_id) LOG.warning("PT %s doesn't exist anymore", pt_id)
return return
try: try:
port_id = pt['port_id'] port_id = pt['port_id']
@ -2702,14 +2699,14 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations,
port[ext_sg.SECURITYGROUPS] = new_sg_list port[ext_sg.SECURITYGROUPS] = new_sg_list
self._update_port(context._plugin_context, port_id, port) self._update_port(context._plugin_context, port_id, port)
except n_exc.PortNotFound: except n_exc.PortNotFound:
LOG.warning(_LW("Port %s is missing"), port_id) LOG.warning("Port %s is missing", port_id)
def _disassoc_sgs_from_pt(self, context, pt_id, sg_list): def _disassoc_sgs_from_pt(self, context, pt_id, sg_list):
try: try:
pt = context._plugin.get_policy_target(context._plugin_context, pt = context._plugin.get_policy_target(context._plugin_context,
pt_id) pt_id)
except gp_ext.PolicyTargetNotFound: except gp_ext.PolicyTargetNotFound:
LOG.warning(_LW("PT %s doesn't exist anymore"), pt_id) LOG.warning("PT %s doesn't exist anymore", pt_id)
return return
port_id = pt['port_id'] port_id = pt['port_id']
self._disassoc_sgs_from_port(context._plugin_context, port_id, sg_list) self._disassoc_sgs_from_port(context._plugin_context, port_id, sg_list)
@ -2726,7 +2723,7 @@ class ResourceMappingDriver(api.PolicyDriver, ImplicitResourceOperations,
port[ext_sg.SECURITYGROUPS] = new_sg_list port[ext_sg.SECURITYGROUPS] = new_sg_list
self._update_port(plugin_context, port_id, port) self._update_port(plugin_context, port_id, port)
except n_exc.PortNotFound: except n_exc.PortNotFound:
LOG.warning(_LW("Port %s is missing"), port_id) LOG.warning("Port %s is missing", port_id)
def _generate_list_of_sg_from_ptg(self, context, ptg_id): def _generate_list_of_sg_from_ptg(self, context, ptg_id):
ptg = context._plugin.get_policy_target_group( ptg = context._plugin.get_policy_target_group(

View File

@ -17,8 +17,6 @@ from oslo_log import log
from oslo_utils import excutils from oslo_utils import excutils
import stevedore import stevedore
from gbpservice._i18n import _LE
from gbpservice._i18n import _LI
from gbpservice.neutron.services.grouppolicy.common import exceptions as gp_exc from gbpservice.neutron.services.grouppolicy.common import exceptions as gp_exc
@ -33,14 +31,14 @@ class ExtensionManager(stevedore.named.NamedExtensionManager):
# the order in which the drivers are called. # the order in which the drivers are called.
self.ordered_ext_drivers = [] self.ordered_ext_drivers = []
LOG.info(_LI("Configured extension driver names: %s"), LOG.info("Configured extension driver names: %s",
cfg.CONF.group_policy.extension_drivers) cfg.CONF.group_policy.extension_drivers)
super(ExtensionManager, self).__init__( super(ExtensionManager, self).__init__(
'gbpservice.neutron.group_policy.extension_drivers', 'gbpservice.neutron.group_policy.extension_drivers',
cfg.CONF.group_policy.extension_drivers, cfg.CONF.group_policy.extension_drivers,
invoke_on_load=True, invoke_on_load=True,
name_order=True) name_order=True)
LOG.info(_LI("Loaded extension driver names: %s"), self.names()) LOG.info("Loaded extension driver names: %s", self.names())
self._register_drivers() self._register_drivers()
def _register_drivers(self): def _register_drivers(self):
@ -51,13 +49,13 @@ class ExtensionManager(stevedore.named.NamedExtensionManager):
""" """
for ext in self: for ext in self:
self.ordered_ext_drivers.append(ext) self.ordered_ext_drivers.append(ext)
LOG.info(_LI("Registered extension drivers: %s"), LOG.info("Registered extension drivers: %s",
[driver.name for driver in self.ordered_ext_drivers]) [driver.name for driver in self.ordered_ext_drivers])
def initialize(self): def initialize(self):
# Initialize each driver in the list. # Initialize each driver in the list.
for driver in self.ordered_ext_drivers: for driver in self.ordered_ext_drivers:
LOG.info(_LI("Initializing extension driver '%s'"), driver.name) LOG.info("Initializing extension driver '%s'", driver.name)
driver.obj.initialize() driver.obj.initialize()
def extension_aliases(self): def extension_aliases(self):
@ -65,7 +63,7 @@ class ExtensionManager(stevedore.named.NamedExtensionManager):
for driver in self.ordered_ext_drivers: for driver in self.ordered_ext_drivers:
alias = driver.obj.extension_alias alias = driver.obj.extension_alias
exts.append(alias) exts.append(alias)
LOG.info(_LI("Got %(alias)s extension from driver '%(drv)s'"), LOG.info("Got %(alias)s extension from driver '%(drv)s'",
{'alias': alias, 'drv': driver.name}) {'alias': alias, 'drv': driver.name})
return exts return exts
@ -77,13 +75,13 @@ class ExtensionManager(stevedore.named.NamedExtensionManager):
except (gp_exc.GroupPolicyException, n_exc.NeutronException): except (gp_exc.GroupPolicyException, n_exc.NeutronException):
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception( LOG.exception(
_LE("Extension driver '%(name)s' " "Extension driver '%(name)s' "
"failed in %(method)s"), "failed in %(method)s",
{'name': driver.name, 'method': method_name} {'name': driver.name, 'method': method_name}
) )
except Exception: except Exception:
LOG.exception(_LE("Extension driver '%(name)s' " LOG.exception("Extension driver '%(name)s' "
"failed in %(method)s"), "failed in %(method)s",
{'name': driver.name, 'method': method_name}) {'name': driver.name, 'method': method_name})
# We are replacing a non-GBP/non-Neutron exception here # We are replacing a non-GBP/non-Neutron exception here
raise gp_exc.GroupPolicyDriverError(method=method_name) raise gp_exc.GroupPolicyDriverError(method=method_name)

View File

@ -24,8 +24,6 @@ from oslo_log import helpers as log
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import excutils from oslo_utils import excutils
from gbpservice._i18n import _LE
from gbpservice._i18n import _LW
from gbpservice.common import utils as gbp_utils from gbpservice.common import utils as gbp_utils
from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb from gbpservice.neutron.db.grouppolicy import group_policy_db as gpdb
from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db
@ -78,7 +76,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
# plugins are loaded to grab and store plugin. # plugins are loaded to grab and store plugin.
servicechain_plugin = directory.get_plugin(pconst.SERVICECHAIN) servicechain_plugin = directory.get_plugin(pconst.SERVICECHAIN)
if not servicechain_plugin: if not servicechain_plugin:
LOG.error(_LE("No Servicechain service plugin found.")) LOG.error("No Servicechain service plugin found.")
raise gp_exc.GroupPolicyDeploymentError() raise gp_exc.GroupPolicyDeploymentError()
return servicechain_plugin return servicechain_plugin
@ -484,8 +482,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
policy_context) policy_context)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE("create_policy_target_postcommit " LOG.exception("create_policy_target_postcommit "
"failed, deleting policy_target %s"), "failed, deleting policy_target %s",
result['id']) result['id'])
self.delete_policy_target(context, result['id']) self.delete_policy_target(context, result['id'])
@ -536,8 +534,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
self.policy_driver_manager.delete_policy_target_postcommit( self.policy_driver_manager.delete_policy_target_postcommit(
policy_context) policy_context)
except Exception: except Exception:
LOG.exception(_LE("delete_policy_target_postcommit failed " LOG.exception("delete_policy_target_postcommit failed "
"for policy_target %s"), "for policy_target %s",
policy_target_id) policy_target_id)
@log.log_method_call @log.log_method_call
@ -583,8 +581,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
policy_context) policy_context)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE("create_policy_target_group_postcommit " LOG.exception("create_policy_target_group_postcommit "
"failed, deleting policy_target_group %s"), "failed, deleting policy_target_group %s",
result['id']) result['id'])
self.delete_policy_target_group(context, result['id']) self.delete_policy_target_group(context, result['id'])
@ -666,7 +664,7 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
self.delete_policy_target_group( self.delete_policy_target_group(
context, policy_target_group['proxy_group_id']) context, policy_target_group['proxy_group_id'])
except gpex.PolicyTargetGroupNotFound: except gpex.PolicyTargetGroupNotFound:
LOG.warning(_LW('PTG %s already deleted'), LOG.warning('PTG %s already deleted',
policy_target_group['proxy_group_id']) policy_target_group['proxy_group_id'])
with session.begin(subtransactions=True): with session.begin(subtransactions=True):
@ -684,8 +682,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
self.policy_driver_manager.delete_policy_target_group_postcommit( self.policy_driver_manager.delete_policy_target_group_postcommit(
policy_context) policy_context)
except Exception: except Exception:
LOG.exception(_LE("delete_policy_target_group_postcommit failed " LOG.exception("delete_policy_target_group_postcommit failed "
"for policy_target_group %s"), "for policy_target_group %s",
policy_target_group_id) policy_target_group_id)
@log.log_method_call @log.log_method_call
@ -731,8 +729,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
pdm.create_application_policy_group_postcommit(policy_context) pdm.create_application_policy_group_postcommit(policy_context)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE("create_application_policy_group_postcommit " LOG.exception("create_application_policy_group_postcommit "
"failed, deleting APG %s"), "failed, deleting APG %s",
result['id']) result['id'])
self.delete_application_policy_group(context, result['id']) self.delete_application_policy_group(context, result['id'])
@ -792,8 +790,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
try: try:
pdm.delete_application_policy_group_postcommit(policy_context) pdm.delete_application_policy_group_postcommit(policy_context)
except Exception: except Exception:
LOG.exception(_LE("delete_application_policy_group_postcommit " LOG.exception("delete_application_policy_group_postcommit "
"failed for application_policy_group %s"), "failed for application_policy_group %s",
application_policy_group_id) application_policy_group_id)
@log.log_method_call @log.log_method_call
@ -836,8 +834,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
policy_context) policy_context)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE("create_l2_policy_postcommit " LOG.exception("create_l2_policy_postcommit "
"failed, deleting l2_policy %s"), "failed, deleting l2_policy %s",
result['id']) result['id'])
self.delete_l2_policy(context, result['id']) self.delete_l2_policy(context, result['id'])
@ -886,8 +884,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
self.policy_driver_manager.delete_l2_policy_postcommit( self.policy_driver_manager.delete_l2_policy_postcommit(
policy_context) policy_context)
except Exception: except Exception:
LOG.exception(_LE("delete_l2_policy_postcommit failed " LOG.exception("delete_l2_policy_postcommit failed "
"for l2_policy %s"), l2_policy_id) "for l2_policy %s", l2_policy_id)
@log.log_method_call @log.log_method_call
@db_api.retry_if_session_inactive() @db_api.retry_if_session_inactive()
@ -932,9 +930,9 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
policy_context) policy_context)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE( LOG.exception(
"create_network_service_policy_postcommit " "create_network_service_policy_postcommit "
"failed, deleting network_service_policy %s"), "failed, deleting network_service_policy %s",
result['id']) result['id'])
self.delete_network_service_policy(context, result['id']) self.delete_network_service_policy(context, result['id'])
@ -991,9 +989,9 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
pdm = self.policy_driver_manager pdm = self.policy_driver_manager
pdm.delete_network_service_policy_postcommit(policy_context) pdm.delete_network_service_policy_postcommit(policy_context)
except Exception: except Exception:
LOG.exception(_LE( LOG.exception(
"delete_network_service_policy_postcommit failed " "delete_network_service_policy_postcommit failed "
"for network_service_policy %s"), network_service_policy_id) "for network_service_policy %s", network_service_policy_id)
@log.log_method_call @log.log_method_call
@db_api.retry_if_session_inactive() @db_api.retry_if_session_inactive()
@ -1036,8 +1034,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
policy_context) policy_context)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE("create_l3_policy_postcommit " LOG.exception("create_l3_policy_postcommit "
"failed, deleting l3_policy %s"), "failed, deleting l3_policy %s",
result['id']) result['id'])
self.delete_l3_policy(context, result['id']) self.delete_l3_policy(context, result['id'])
@ -1091,8 +1089,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
self.policy_driver_manager.delete_l3_policy_postcommit( self.policy_driver_manager.delete_l3_policy_postcommit(
policy_context) policy_context)
except Exception: except Exception:
LOG.exception(_LE("delete_l3_policy_postcommit failed " LOG.exception("delete_l3_policy_postcommit failed "
"for l3_policy %s"), l3_policy_id) "for l3_policy %s", l3_policy_id)
return True return True
@log.log_method_call @log.log_method_call
@ -1137,9 +1135,9 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
policy_context) policy_context)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE( LOG.exception(
"policy_driver_manager.create_policy_classifier_postcommit" "policy_driver_manager.create_policy_classifier_postcommit"
" failed, deleting policy_classifier %s"), result['id']) " failed, deleting policy_classifier %s", result['id'])
self.delete_policy_classifier(context, result['id']) self.delete_policy_classifier(context, result['id'])
return self.get_policy_classifier(context, result['id']) return self.get_policy_classifier(context, result['id'])
@ -1188,8 +1186,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
self.policy_driver_manager.delete_policy_classifier_postcommit( self.policy_driver_manager.delete_policy_classifier_postcommit(
policy_context) policy_context)
except Exception: except Exception:
LOG.exception(_LE("delete_policy_classifier_postcommit failed " LOG.exception("delete_policy_classifier_postcommit failed "
"for policy_classifier %s"), id) "for policy_classifier %s", id)
@log.log_method_call @log.log_method_call
@db_api.retry_if_session_inactive() @db_api.retry_if_session_inactive()
@ -1233,9 +1231,9 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
policy_context) policy_context)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE( LOG.exception(
"policy_driver_manager.create_policy_action_postcommit " "policy_driver_manager.create_policy_action_postcommit "
"failed, deleting policy_action %s"), result['id']) "failed, deleting policy_action %s", result['id'])
self.delete_policy_action(context, result['id']) self.delete_policy_action(context, result['id'])
return self.get_policy_action(context, result['id']) return self.get_policy_action(context, result['id'])
@ -1284,8 +1282,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
self.policy_driver_manager.delete_policy_action_postcommit( self.policy_driver_manager.delete_policy_action_postcommit(
policy_context) policy_context)
except Exception: except Exception:
LOG.exception(_LE("delete_policy_action_postcommit failed " LOG.exception("delete_policy_action_postcommit failed "
"for policy_action %s"), id) "for policy_action %s", id)
@log.log_method_call @log.log_method_call
@db_api.retry_if_session_inactive() @db_api.retry_if_session_inactive()
@ -1327,9 +1325,9 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
policy_context) policy_context)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE( LOG.exception(
"policy_driver_manager.create_policy_rule_postcommit" "policy_driver_manager.create_policy_rule_postcommit"
" failed, deleting policy_rule %s"), result['id']) " failed, deleting policy_rule %s", result['id'])
self.delete_policy_rule(context, result['id']) self.delete_policy_rule(context, result['id'])
return self.get_policy_rule(context, result['id']) return self.get_policy_rule(context, result['id'])
@ -1377,8 +1375,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
self.policy_driver_manager.delete_policy_rule_postcommit( self.policy_driver_manager.delete_policy_rule_postcommit(
policy_context) policy_context)
except Exception: except Exception:
LOG.exception(_LE("delete_policy_rule_postcommit failed " LOG.exception("delete_policy_rule_postcommit failed "
"for policy_rule %s"), id) "for policy_rule %s", id)
@log.log_method_call @log.log_method_call
@db_api.retry_if_session_inactive() @db_api.retry_if_session_inactive()
@ -1421,9 +1419,9 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
policy_context) policy_context)
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE( LOG.exception(
"policy_driver_manager.create_policy_rule_set_postcommit " "policy_driver_manager.create_policy_rule_set_postcommit "
"failed, deleting policy_rule_set %s"), result['id']) "failed, deleting policy_rule_set %s", result['id'])
self.delete_policy_rule_set(context, result['id']) self.delete_policy_rule_set(context, result['id'])
return self.get_policy_rule_set(context, result['id']) return self.get_policy_rule_set(context, result['id'])
@ -1471,8 +1469,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
self.policy_driver_manager.delete_policy_rule_set_postcommit( self.policy_driver_manager.delete_policy_rule_set_postcommit(
policy_context) policy_context)
except Exception: except Exception:
LOG.exception(_LE("delete_policy_rule_set_postcommit failed " LOG.exception("delete_policy_rule_set_postcommit failed "
"for policy_rule_set %s"), id) "for policy_rule_set %s", id)
@log.log_method_call @log.log_method_call
@db_api.retry_if_session_inactive() @db_api.retry_if_session_inactive()
@ -1518,9 +1516,9 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
create_external_segment_postcommit(policy_context)) create_external_segment_postcommit(policy_context))
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE("create_external_segment_postcommit " LOG.exception("create_external_segment_postcommit "
"failed, deleting external_segment " "failed, deleting external_segment "
"%s"), result['id']) "%s", result['id'])
self.delete_external_segment(context, result['id']) self.delete_external_segment(context, result['id'])
return self.get_external_segment(context, result['id']) return self.get_external_segment(context, result['id'])
@ -1577,8 +1575,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
(self.policy_driver_manager. (self.policy_driver_manager.
delete_external_segment_postcommit(policy_context)) delete_external_segment_postcommit(policy_context))
except Exception: except Exception:
LOG.exception(_LE("delete_external_segment_postcommit failed " LOG.exception("delete_external_segment_postcommit failed "
"for external_segment %s"), "for external_segment %s",
external_segment_id) external_segment_id)
return True return True
@ -1623,9 +1621,9 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
create_external_policy_postcommit(policy_context)) create_external_policy_postcommit(policy_context))
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE("create_external_policy_postcommit " LOG.exception("create_external_policy_postcommit "
"failed, deleting external_policy " "failed, deleting external_policy "
"%s"), result['id']) "%s", result['id'])
self.delete_external_policy(context, result['id']) self.delete_external_policy(context, result['id'])
return self.get_external_policy(context, result['id']) return self.get_external_policy(context, result['id'])
@ -1678,8 +1676,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
self.policy_driver_manager.delete_external_policy_postcommit( self.policy_driver_manager.delete_external_policy_postcommit(
policy_context) policy_context)
except Exception: except Exception:
LOG.exception(_LE("delete_external_policy_postcommit failed " LOG.exception("delete_external_policy_postcommit failed "
"for external_policy %s"), external_policy_id) "for external_policy %s", external_policy_id)
@log.log_method_call @log.log_method_call
@db_api.retry_if_session_inactive() @db_api.retry_if_session_inactive()
@ -1719,9 +1717,9 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
create_nat_pool_postcommit(policy_context)) create_nat_pool_postcommit(policy_context))
except Exception: except Exception:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception(_LE( LOG.exception(
"create_nat_pool_postcommit failed, deleting " "create_nat_pool_postcommit failed, deleting "
"nat_pool %s"), result['id']) "nat_pool %s", result['id'])
self.delete_nat_pool(context, result['id']) self.delete_nat_pool(context, result['id'])
return self.get_nat_pool(context, result['id']) return self.get_nat_pool(context, result['id'])
@ -1766,8 +1764,8 @@ class GroupPolicyPlugin(group_policy_mapping_db.GroupPolicyMappingDbPlugin):
self.policy_driver_manager.delete_nat_pool_postcommit( self.policy_driver_manager.delete_nat_pool_postcommit(
policy_context) policy_context)
except Exception: except Exception:
LOG.exception(_LE("delete_nat_pool_postcommit failed " LOG.exception("delete_nat_pool_postcommit failed "
"for nat_pool %s"), "for nat_pool %s",
nat_pool_id) nat_pool_id)
@log.log_method_call @log.log_method_call

View File

@ -18,8 +18,6 @@ from oslo_policy import policy as oslo_policy
from oslo_utils import excutils from oslo_utils import excutils
import stevedore import stevedore
from gbpservice._i18n import _LE
from gbpservice._i18n import _LI
from gbpservice.neutron.services.grouppolicy.common import exceptions as gp_exc from gbpservice.neutron.services.grouppolicy.common import exceptions as gp_exc
from gbpservice.neutron.services.grouppolicy import group_policy_driver_api from gbpservice.neutron.services.grouppolicy import group_policy_driver_api
@ -69,14 +67,14 @@ class PolicyDriverManager(stevedore.named.NamedExtensionManager):
self.ordered_policy_drivers = [] self.ordered_policy_drivers = []
self.reverse_ordered_policy_drivers = [] self.reverse_ordered_policy_drivers = []
LOG.info(_LI("Configured policy driver names: %s"), LOG.info("Configured policy driver names: %s",
cfg.CONF.group_policy.policy_drivers) cfg.CONF.group_policy.policy_drivers)
super(PolicyDriverManager, super(PolicyDriverManager,
self).__init__('gbpservice.neutron.group_policy.policy_drivers', self).__init__('gbpservice.neutron.group_policy.policy_drivers',
cfg.CONF.group_policy.policy_drivers, cfg.CONF.group_policy.policy_drivers,
invoke_on_load=True, invoke_on_load=True,
name_order=True) name_order=True)
LOG.info(_LI("Loaded policy driver names: %s"), self.names()) LOG.info("Loaded policy driver names: %s", self.names())
self._register_policy_drivers() self._register_policy_drivers()
def _register_policy_drivers(self): def _register_policy_drivers(self):
@ -90,7 +88,7 @@ class PolicyDriverManager(stevedore.named.NamedExtensionManager):
self.ordered_policy_drivers.append(ext) self.ordered_policy_drivers.append(ext)
self.reverse_ordered_policy_drivers = self.ordered_policy_drivers[::-1] self.reverse_ordered_policy_drivers = self.ordered_policy_drivers[::-1]
LOG.info(_LI("Registered policy drivers: %s"), LOG.info("Registered policy drivers: %s",
[driver.name for driver in self.ordered_policy_drivers]) [driver.name for driver in self.ordered_policy_drivers])
def initialize(self): def initialize(self):
@ -100,7 +98,7 @@ class PolicyDriverManager(stevedore.named.NamedExtensionManager):
# set it to True such that the drivers can override it. # set it to True such that the drivers can override it.
self.native_bulk_support = False self.native_bulk_support = False
for driver in self.ordered_policy_drivers: for driver in self.ordered_policy_drivers:
LOG.info(_LI("Initializing policy driver '%s'"), driver.name) LOG.info("Initializing policy driver '%s'", driver.name)
driver.obj.initialize() driver.obj.initialize()
self.native_bulk_support &= getattr(driver.obj, self.native_bulk_support &= getattr(driver.obj,
'native_bulk_support', True) 'native_bulk_support', True)
@ -143,15 +141,15 @@ class PolicyDriverManager(stevedore.named.NamedExtensionManager):
e, oslo_policy.PolicyNotAuthorized): e, oslo_policy.PolicyNotAuthorized):
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.exception( LOG.exception(
_LE("Policy driver '%(name)s' failed in" "Policy driver '%(name)s' failed in"
" %(method)s"), " %(method)s",
{'name': driver.name, 'method': method_name} {'name': driver.name, 'method': method_name}
) )
else: else:
error = True error = True
# We are eating a non-GBP/non-Neutron exception here # We are eating a non-GBP/non-Neutron exception here
LOG.exception( LOG.exception(
_LE("Policy driver '%(name)s' failed in %(method)s"), "Policy driver '%(name)s' failed in %(method)s",
{'name': driver.name, 'method': method_name}) {'name': driver.name, 'method': method_name})
if not continue_on_failure: if not continue_on_failure:
break break
@ -173,8 +171,8 @@ class PolicyDriverManager(stevedore.named.NamedExtensionManager):
"ensure_tenant, operation will " "ensure_tenant, operation will "
"be retried", {'driver': driver.name}) "be retried", {'driver': driver.name})
else: else:
LOG.exception(_LE("Policy driver '%s' failed in " LOG.exception("Policy driver '%s' failed in "
"ensure_tenant"), driver.name) "ensure_tenant", driver.name)
raise gp_exc.GroupPolicyDriverError( raise gp_exc.GroupPolicyDriverError(
method="ensure_tenant") method="ensure_tenant")

View File

@ -15,7 +15,6 @@ from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
import stevedore import stevedore
from gbpservice._i18n import _LI
from gbpservice.neutron.services.servicechain.plugins.ncp import config # noqa from gbpservice.neutron.services.servicechain.plugins.ncp import config # noqa
from gbpservice.neutron.services.servicechain.plugins.ncp import model from gbpservice.neutron.services.servicechain.plugins.ncp import model
@ -33,13 +32,13 @@ class NodeDriverManager(stevedore.named.NamedExtensionManager):
# Ordered list of node drivers. # Ordered list of node drivers.
self.ordered_drivers = [] self.ordered_drivers = []
names = cfg.CONF.node_composition_plugin.node_drivers names = cfg.CONF.node_composition_plugin.node_drivers
LOG.info(_LI("Configured service chain node driver names: %s"), names) LOG.info("Configured service chain node driver names: %s", names)
super(NodeDriverManager, super(NodeDriverManager,
self).__init__( self).__init__(
'gbpservice.neutron.servicechain.ncp_drivers', names, 'gbpservice.neutron.servicechain.ncp_drivers', names,
invoke_on_load=True, name_order=True) invoke_on_load=True, name_order=True)
LOG.info(_LI( LOG.info(
"Loaded service chain node driver names: %s"), self.names()) "Loaded service chain node driver names: %s", self.names())
self._register_drivers() self._register_drivers()
def _register_drivers(self): def _register_drivers(self):
@ -47,14 +46,14 @@ class NodeDriverManager(stevedore.named.NamedExtensionManager):
for ext in self: for ext in self:
self.drivers[ext.name] = ext self.drivers[ext.name] = ext
self.ordered_drivers.append(ext) self.ordered_drivers.append(ext)
LOG.info(_LI("Registered service chain node drivers: %s"), LOG.info("Registered service chain node drivers: %s",
[driver.name for driver in self.ordered_drivers]) [driver.name for driver in self.ordered_drivers])
def initialize(self): def initialize(self):
"""Initialize all the service chain node drivers.""" """Initialize all the service chain node drivers."""
self.native_bulk_support = True self.native_bulk_support = True
for driver in self.ordered_drivers: for driver in self.ordered_drivers:
LOG.info(_LI("Initializing service chain node drivers '%s'"), LOG.info("Initializing service chain node drivers '%s'",
driver.name) driver.name)
driver.obj.initialize(driver.name) driver.obj.initialize(driver.name)
self.native_bulk_support &= getattr(driver.obj, self.native_bulk_support &= getattr(driver.obj,

View File

@ -24,7 +24,6 @@ from oslo_log import log as logging
from oslo_serialization import jsonutils from oslo_serialization import jsonutils
import sqlalchemy as sa import sqlalchemy as sa
from gbpservice._i18n import _LE
from gbpservice.neutron.services.servicechain.plugins.ncp import ( from gbpservice.neutron.services.servicechain.plugins.ncp import (
exceptions as exc) exceptions as exc)
from gbpservice.neutron.services.servicechain.plugins.ncp import driver_base from gbpservice.neutron.services.servicechain.plugins.ncp import driver_base
@ -374,15 +373,15 @@ class HeatNodeDriver(driver_base.NodeDriverBase):
'DELETE_IN_PROGRESS']: 'DELETE_IN_PROGRESS']:
return return
except Exception: except Exception:
LOG.exception(_LE("Retrieving the stack %(stack)s failed."), LOG.exception("Retrieving the stack %(stack)s failed.",
{'stack': stack_id}) {'stack': stack_id})
return return
else: else:
time.sleep(STACK_ACTION_RETRY_WAIT) time.sleep(STACK_ACTION_RETRY_WAIT)
time_waited = time_waited + STACK_ACTION_RETRY_WAIT time_waited = time_waited + STACK_ACTION_RETRY_WAIT
if time_waited >= STACK_ACTION_WAIT_TIME: if time_waited >= STACK_ACTION_WAIT_TIME:
LOG.error(_LE("Stack %(action)s not completed within " LOG.error("Stack %(action)s not completed within "
"%(wait)s seconds"), "%(wait)s seconds",
{'action': action, {'action': action,
'wait': STACK_ACTION_WAIT_TIME, 'wait': STACK_ACTION_WAIT_TIME,
'stack': stack_id}) 'stack': stack_id})

View File

@ -31,9 +31,6 @@ from oslo_utils import excutils
import sqlalchemy as sa import sqlalchemy as sa
from gbpservice._i18n import _ from gbpservice._i18n import _
from gbpservice._i18n import _LE
from gbpservice._i18n import _LI
from gbpservice._i18n import _LW
from gbpservice.common import utils from gbpservice.common import utils
from gbpservice.network.neutronv2 import local_api from gbpservice.network.neutronv2 import local_api
from gbpservice.neutron.services.grouppolicy.common import constants as gconst from gbpservice.neutron.services.grouppolicy.common import constants as gconst
@ -166,9 +163,9 @@ class NFPClientApi(object):
self.client = n_rpc.get_client(target) self.client = n_rpc.get_client(target)
def create_network_function(self, context, network_function): def create_network_function(self, context, network_function):
LOG.info(_LI("Sending RPC CREATE NETWORK FUNCTION to Service " LOG.info("Sending RPC CREATE NETWORK FUNCTION to Service "
"Orchestrator for tenant:%(tenant_id)s with " "Orchestrator for tenant:%(tenant_id)s with "
"service profile:%(service_profile_id)s"), "service profile:%(service_profile_id)s",
{'tenant_id': network_function['tenant_id'], {'tenant_id': network_function['tenant_id'],
'service_profile_id': network_function[ 'service_profile_id': network_function[
'service_profile']['id']}) 'service_profile']['id']})
@ -181,9 +178,9 @@ class NFPClientApi(object):
def delete_network_function(self, context, network_function_id, def delete_network_function(self, context, network_function_id,
network_function_data): network_function_data):
LOG.info(_LI("Sending RPC DELETE NETWORK FUNCTION to Service " LOG.info("Sending RPC DELETE NETWORK FUNCTION to Service "
"Orchestrator for NF:" "Orchestrator for NF:"
"%(network_function_id)s"), "%(network_function_id)s",
{'network_function_id': network_function_id}) {'network_function_id': network_function_id})
cctxt = self.client.prepare(version=self.RPC_API_VERSION) cctxt = self.client.prepare(version=self.RPC_API_VERSION)
return cctxt.cast( return cctxt.cast(
@ -193,9 +190,9 @@ class NFPClientApi(object):
network_function_data=network_function_data) network_function_data=network_function_data)
def update_network_function(self, context, network_function_id, config): def update_network_function(self, context, network_function_id, config):
LOG.info(_LI("Sending RPC UPDATE NETWORK FUNCTION to Service " LOG.info("Sending RPC UPDATE NETWORK FUNCTION to Service "
"Orchestrator for NF:" "Orchestrator for NF:"
"%(network_function_id)s"), "%(network_function_id)s",
{'network_function_id': network_function_id}) {'network_function_id': network_function_id})
cctxt = self.client.prepare(version=self.RPC_API_VERSION) cctxt = self.client.prepare(version=self.RPC_API_VERSION)
return cctxt.cast( return cctxt.cast(
@ -215,9 +212,9 @@ class NFPClientApi(object):
def consumer_ptg_added_notification(self, context, network_function_id, def consumer_ptg_added_notification(self, context, network_function_id,
policy_target_group): policy_target_group):
LOG.info(_LI("Sending RPC CONSUMER PTG ADDED NOTIFICATION to Service " LOG.info("Sending RPC CONSUMER PTG ADDED NOTIFICATION to Service "
"Orchestrator for NF:" "Orchestrator for NF:"
"%(network_function_id)s"), "%(network_function_id)s",
{'network_function_id': network_function_id}) {'network_function_id': network_function_id})
cctxt = self.client.prepare(version=self.RPC_API_VERSION) cctxt = self.client.prepare(version=self.RPC_API_VERSION)
return cctxt.cast(context, return cctxt.cast(context,
@ -227,8 +224,8 @@ class NFPClientApi(object):
def consumer_ptg_removed_notification(self, context, network_function_id, def consumer_ptg_removed_notification(self, context, network_function_id,
policy_target_group): policy_target_group):
LOG.info(_LI("Sending RPC CONSUMER PTG REMOVED NOTIFICATION to " LOG.info("Sending RPC CONSUMER PTG REMOVED NOTIFICATION to "
" Service Orchestrator for NF:%(network_function_id)s"), " Service Orchestrator for NF:%(network_function_id)s",
{'network_function_id': network_function_id}) {'network_function_id': network_function_id})
cctxt = self.client.prepare(version=self.RPC_API_VERSION) cctxt = self.client.prepare(version=self.RPC_API_VERSION)
return cctxt.cast(context, return cctxt.cast(context,
@ -238,8 +235,8 @@ class NFPClientApi(object):
def policy_target_added_notification(self, context, network_function_id, def policy_target_added_notification(self, context, network_function_id,
policy_target): policy_target):
LOG.info(_LI("Sending RPC POLICY TARGET ADDED NOTIFICATION to " LOG.info("Sending RPC POLICY TARGET ADDED NOTIFICATION to "
"Service Orchestrator for NF:%(network_function_id)s"), "Service Orchestrator for NF:%(network_function_id)s",
{'network_function_id': network_function_id}) {'network_function_id': network_function_id})
cctxt = self.client.prepare(version=self.RPC_API_VERSION) cctxt = self.client.prepare(version=self.RPC_API_VERSION)
return cctxt.cast(context, return cctxt.cast(context,
@ -249,8 +246,8 @@ class NFPClientApi(object):
def policy_target_removed_notification(self, context, network_function_id, def policy_target_removed_notification(self, context, network_function_id,
policy_target): policy_target):
LOG.info(_LI("Sending RPC POLICY TARGET REMOVED NOTIFICATION to " LOG.info("Sending RPC POLICY TARGET REMOVED NOTIFICATION to "
"Service Orchestrator for NF:%(network_function_id)s"), "Service Orchestrator for NF:%(network_function_id)s",
{'network_function_id': network_function_id}) {'network_function_id': network_function_id})
cctxt = self.client.prepare(version=self.RPC_API_VERSION) cctxt = self.client.prepare(version=self.RPC_API_VERSION)
return cctxt.cast(context, return cctxt.cast(context,
@ -259,7 +256,7 @@ class NFPClientApi(object):
policy_target=policy_target) policy_target=policy_target)
def get_plumbing_info(self, context, node_driver_ctxt): def get_plumbing_info(self, context, node_driver_ctxt):
LOG.info(_LI("Sending RPC GET PLUMBING INFO to Service Orchestrator ")) LOG.info("Sending RPC GET PLUMBING INFO to Service Orchestrator ")
request_info = dict(profile=node_driver_ctxt.current_profile, request_info = dict(profile=node_driver_ctxt.current_profile,
tenant_id=node_driver_ctxt.provider['tenant_id'], tenant_id=node_driver_ctxt.provider['tenant_id'],
provider=node_driver_ctxt.provider) provider=node_driver_ctxt.provider)
@ -388,9 +385,9 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
nfp_context = NFPContext.get_nfp_context(context.instance['id']) nfp_context = NFPContext.get_nfp_context(context.instance['id'])
if nfp_context: if nfp_context:
if len(nfp_context['sc_gateway_type_nodes']): if len(nfp_context['sc_gateway_type_nodes']):
LOG.info(_LI( LOG.info(
"Not requesting plumber for PTs for service type " "Not requesting plumber for PTs for service type "
"%(service_type)s"), {'service_type': service_type}) "%(service_type)s", {'service_type': service_type})
if not nfp_context['update']: if not nfp_context['update']:
nfp_context['sc_gateway_type_nodes'].append( nfp_context['sc_gateway_type_nodes'].append(
gateway_type_node) gateway_type_node)
@ -421,9 +418,9 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
plumbing_request = self.nfp_notifier.get_plumbing_info( plumbing_request = self.nfp_notifier.get_plumbing_info(
context._plugin_context, context) context._plugin_context, context)
LOG.info(_LI("Requesting plumber for PTs for " LOG.info("Requesting plumber for PTs for "
"service type %(service_type)s with " "service type %(service_type)s with "
"%(plumbing_request)s "), "%(plumbing_request)s ",
{'plumbing_request': plumbing_request, {'plumbing_request': plumbing_request,
'service_type': service_type}) 'service_type': service_type})
return plumbing_request return plumbing_request
@ -508,11 +505,11 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
context.plugin_session, context.current_node['id'], context.plugin_session, context.current_node['id'],
context.instance['id'], network_function_id, context.instance['id'], network_function_id,
status, status_details) status, status_details)
LOG.info(_LI("Processed create NF in node driver." LOG.info("Processed create NF in node driver."
"servicechain_instance_id: %(sci_id)s, " "servicechain_instance_id: %(sci_id)s, "
"servicechain_node_id: %(scn_id)s"), { "servicechain_node_id: %(scn_id)s", {
'sci_id': context.instance['id'], 'sci_id': context.instance['id'],
'scn_id': context.current_node['id']}) 'scn_id': context.current_node['id']})
def _wait_for_node_operation_completion(self, context, network_function_id, def _wait_for_node_operation_completion(self, context, network_function_id,
operation): operation):
@ -618,7 +615,7 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
self._delete_network_function(context, network_function_id) self._delete_network_function(context, network_function_id)
except Exception: except Exception:
# NFPContext.clear_nfp_context(context.instance['id']) # NFPContext.clear_nfp_context(context.instance['id'])
LOG.exception(_LE("Delete Network service Failed")) LOG.exception("Delete Network service Failed")
exc_type, exc_value, exc_traceback = sys.exc_info() exc_type, exc_value, exc_traceback = sys.exc_info()
message = "Traceback: %s" % (exc_value) message = "Traceback: %s" % (exc_value)
LOG.error(message) LOG.error(message)
@ -772,8 +769,8 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
time_waited = time_waited + 5 time_waited = time_waited + 5
if network_function: if network_function:
LOG.error(_LE("Delete network function %(network_function)s " LOG.error("Delete network function %(network_function)s "
"failed"), "failed",
{'network_function': network_function_id}) {'network_function': network_function_id})
raise NodeInstanceDeleteFailed() raise NodeInstanceDeleteFailed()
@ -794,38 +791,38 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
"time waited: %s", (network_function_id, operation, "time waited: %s", (network_function_id, operation,
time_waited, network_function['status'])) time_waited, network_function['status']))
if not network_function: if not network_function:
LOG.error(_LE("Failed to retrieve network function")) LOG.error("Failed to retrieve network function")
eventlet.sleep(5) eventlet.sleep(5)
time_waited = time_waited + 5 time_waited = time_waited + 5
continue continue
else: else:
if time_waited == 0: if time_waited == 0:
LOG.info(_LI("STARTED POLLING for %(operation)s network " LOG.info("STARTED POLLING for %(operation)s network "
"function for NF:%(network_function_id)s " "function for NF:%(network_function_id)s "
"with initial result: %(result)s "), "with initial result: %(result)s ",
{'operation': operation, {'operation': operation,
'network_function_id': network_function_id, 'network_function_id': network_function_id,
'result': network_function}) 'result': network_function})
if (network_function['status'] == nfp_constants.ACTIVE or if (network_function['status'] == nfp_constants.ACTIVE or
network_function['status'] == nfp_constants.ERROR): network_function['status'] == nfp_constants.ERROR):
LOG.info(_LI("COMPLETED POLLING for %(operation)s network " LOG.info("COMPLETED POLLING for %(operation)s network "
"function for NF:%(network_function_id)s "), "function for NF:%(network_function_id)s ",
{'network_function_id': network_function_id, {'network_function_id': network_function_id,
'operation': operation}) 'operation': operation})
break break
eventlet.sleep(5) eventlet.sleep(5)
time_waited = time_waited + 5 time_waited = time_waited + 5
LOG.info(_LI("Got %(operation)s network function result for NF:" LOG.info("Got %(operation)s network function result for NF:"
"%(network_function_id)s with status:%(status)s"), "%(network_function_id)s with status:%(status)s",
{'network_function_id': network_function_id, {'network_function_id': network_function_id,
'operation': operation, 'operation': operation,
'status': network_function['status']}) 'status': network_function['status']})
if network_function['status'] != nfp_constants.ACTIVE: if network_function['status'] != nfp_constants.ACTIVE:
LOG.error(_LE("%(operation)s network function:" LOG.error("%(operation)s network function:"
"%(network_function)s " "%(network_function)s "
"failed. Status: %(status)s"), "failed. Status: %(status)s",
{'network_function': network_function_id, {'network_function': network_function_id,
'status': network_function['status'], 'status': network_function['status'],
'operation': operation}) 'operation': operation})
@ -852,11 +849,11 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
return tenant.id return tenant.id
except k_exceptions.NotFound: except k_exceptions.NotFound:
with excutils.save_and_reraise_exception(reraise=True): with excutils.save_and_reraise_exception(reraise=True):
LOG.error(_LE('No tenant with name %(tenant)s exists.'), LOG.error('No tenant with name %(tenant)s exists.',
{'tenant': tenant}) {'tenant': tenant})
except k_exceptions.NoUniqueMatch: except k_exceptions.NoUniqueMatch:
with excutils.save_and_reraise_exception(reraise=True): with excutils.save_and_reraise_exception(reraise=True):
LOG.error(_LE('Multiple tenants matches found for %(tenant)s'), LOG.error('Multiple tenants matches found for %(tenant)s',
{'tenant': tenant}) {'tenant': tenant})
def _get_resource_owner_context(self, plugin_context): def _get_resource_owner_context(self, plugin_context):
@ -890,11 +887,11 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
network_function_id, network_function_id,
context.current_node['config']]) context.current_node['config']])
except Exception: except Exception:
LOG.exception(_LE("Update Network service Failed for " LOG.exception("Update Network service Failed for "
"network function: %(nf_id)s"), "network function: %(nf_id)s",
{'nf_id': network_function_id}) {'nf_id': network_function_id})
else: else:
LOG.info(_LI("No action to take on update")) LOG.info("No action to take on update")
def _get_service_chain_specs(self, context): def _get_service_chain_specs(self, context):
current_specs = context.relevant_specs current_specs = context.relevant_specs
@ -970,8 +967,8 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
if (service_details['device_type'] != 'None' and ( if (service_details['device_type'] != 'None' and (
not provider_service_targets or (service_type in not provider_service_targets or (service_type in
[pconst.FIREWALL, pconst.VPN] and not consumer_service_targets))): [pconst.FIREWALL, pconst.VPN] and not consumer_service_targets))):
LOG.error(_LE("Service Targets are not created for the Node " LOG.error("Service Targets are not created for the Node "
"of service_type %(service_type)s"), "of service_type %(service_type)s",
{'service_type': service_type}) {'service_type': service_type})
raise Exception(_("Service Targets are not created " raise Exception(_("Service Targets are not created "
"for the Node")) "for the Node"))
@ -1097,8 +1094,8 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
break break
if not redirect_prs: if not redirect_prs:
LOG.error(_LE("Redirect rule doesn't exist in policy target rule " LOG.error("Redirect rule doesn't exist in policy target rule "
" set")) " set")
return consuming_ptgs_details, consuming_eps_details return consuming_ptgs_details, consuming_eps_details
consuming_ptg_ids = redirect_prs['consuming_policy_target_groups'] consuming_ptg_ids = redirect_prs['consuming_policy_target_groups']
@ -1224,9 +1221,9 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
def _create_network_function(self, context): def _create_network_function(self, context):
nfp_create_nf_data = self._get_nfp_network_function(context) nfp_create_nf_data = self._get_nfp_network_function(context)
LOG.info(_LI("Received Call CREATE NETWORK FUNCTION for tenant: " LOG.info("Received Call CREATE NETWORK FUNCTION for tenant: "
"%(tenant_id)s with service profile:" "%(tenant_id)s with service profile:"
"%(service_profile)s"), "%(service_profile)s",
{'tenant_id': nfp_create_nf_data['tenant_id'], {'tenant_id': nfp_create_nf_data['tenant_id'],
'service_profile': nfp_create_nf_data['service_profile']}) 'service_profile': nfp_create_nf_data['service_profile']})
self._queue_notification(context, 'create_network_function', self._queue_notification(context, 'create_network_function',
@ -1239,9 +1236,9 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
if nfp_delete_nf_data['consumer'].get('pt'): if nfp_delete_nf_data['consumer'].get('pt'):
self._detach_port_from_pts(context, self._detach_port_from_pts(context,
nfp_delete_nf_data['consumer']['pt']) nfp_delete_nf_data['consumer']['pt'])
LOG.info(_LI("Received Call DELETE NETWORK FUNCTION for tenant: " LOG.info("Received Call DELETE NETWORK FUNCTION for tenant: "
"%(tenant_id)s with service profile:" "%(tenant_id)s with service profile:"
"%(service_profile)s"), "%(service_profile)s",
{'tenant_id': nfp_delete_nf_data['tenant_id'], {'tenant_id': nfp_delete_nf_data['tenant_id'],
'service_profile': nfp_delete_nf_data['service_profile']}) 'service_profile': nfp_delete_nf_data['service_profile']})
self._queue_notification(context, 'delete_network_function', self._queue_notification(context, 'delete_network_function',
@ -1261,8 +1258,8 @@ class NFPNodeDriver(driver_base.NodeDriverBase):
pt['port_id'])) pt['port_id']))
except Exception: except Exception:
LOG.warning(_LW("Failed to disassociate port from" LOG.warning("Failed to disassociate port from"
" pt: %(pt)s, Error: %(exc)s"), {'pt': pt, 'exc': exc}) " pt: %(pt)s, Error: %(exc)s", {'pt': pt, 'exc': exc})
def _update_ptg(self, context): def _update_ptg(self, context):
if hasattr(context, 'provider') and context.provider['description']: if hasattr(context, 'provider') and context.provider['description']:

View File

@ -14,7 +14,6 @@ from heatclient import client as heat_client
from heatclient import exc as heat_exc from heatclient import exc as heat_exc
from oslo_log import log as logging from oslo_log import log as logging
from gbpservice._i18n import _LW
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -56,9 +55,9 @@ class HeatClient(object):
try: try:
self.stacks.delete(stack_id) self.stacks.delete(stack_id)
except heat_exc.HTTPNotFound: except heat_exc.HTTPNotFound:
LOG.warning(_LW( LOG.warning(
"Stack %(stack)s created by service chain driver is " "Stack %(stack)s created by service chain driver is "
"not found at cleanup"), {'stack': stack_id}) "not found at cleanup", {'stack': stack_id})
def get(self, stack_id): def get(self, stack_id):
return self.stacks.get(stack_id) return self.stacks.get(stack_id)

View File

@ -16,7 +16,6 @@ from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import excutils from oslo_utils import excutils
from gbpservice._i18n import _LE
from gbpservice.common import utils from gbpservice.common import utils
from gbpservice.neutron.services.servicechain.plugins.ncp.node_plumbers import( from gbpservice.neutron.services.servicechain.plugins.ncp.node_plumbers import(
traffic_stitching_plumber as tscp) traffic_stitching_plumber as tscp)
@ -77,10 +76,10 @@ class AdminOwnedResourcesApicTSCP(tscp.TrafficStitchingPlumber):
return tenant.id return tenant.id
except k_exceptions.NotFound: except k_exceptions.NotFound:
with excutils.save_and_reraise_exception(reraise=True): with excutils.save_and_reraise_exception(reraise=True):
LOG.error(_LE('No tenant with name %s exists.'), tenant) LOG.error('No tenant with name %s exists.', tenant)
except k_exceptions.NoUniqueMatch: except k_exceptions.NoUniqueMatch:
with excutils.save_and_reraise_exception(reraise=True): with excutils.save_and_reraise_exception(reraise=True):
LOG.error(_LE('Multiple tenants matches found for %s'), tenant) LOG.error('Multiple tenants matches found for %s', tenant)
def _get_resource_owner_context(self, context): def _get_resource_owner_context(self, context):
resource_owner_context = context.elevated() resource_owner_context = context.elevated()

View File

@ -15,9 +15,6 @@ from neutron_lib.plugins import directory
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from gbpservice._i18n import _LE
from gbpservice._i18n import _LI
from gbpservice._i18n import _LW
from gbpservice.neutron.extensions import driver_proxy_group as pg_ext from gbpservice.neutron.extensions import driver_proxy_group as pg_ext
from gbpservice.neutron.extensions import group_policy from gbpservice.neutron.extensions import group_policy
from gbpservice.neutron.services.grouppolicy.common import exceptions as exc from gbpservice.neutron.services.grouppolicy.common import exceptions as exc
@ -47,8 +44,8 @@ class TrafficStitchingPlumber(plumber_base.NodePlumberBase):
# Verify that proxy_group extension is loaded # Verify that proxy_group extension is loaded
if pg_ext.PROXY_GROUP not in cfg.CONF.group_policy.extension_drivers: if pg_ext.PROXY_GROUP not in cfg.CONF.group_policy.extension_drivers:
LOG.error(_LE("proxy_group GBP driver extension is mandatory for " LOG.error("proxy_group GBP driver extension is mandatory for "
"traffic stitching plumber.")) "traffic stitching plumber.")
raise exc.GroupPolicyDeploymentError() raise exc.GroupPolicyDeploymentError()
@property @property
@ -82,7 +79,7 @@ class TrafficStitchingPlumber(plumber_base.NodePlumberBase):
management, 'management') management, 'management')
# Create proper PTs based on the service type # Create proper PTs based on the service type
jump_ptg = None jump_ptg = None
LOG.info(_LI("Plumbing service of type '%s'"), LOG.info("Plumbing service of type '%s'",
info['plumbing_type']) info['plumbing_type'])
if info['plumbing_type'] == common.PLUMBING_TYPE_ENDPOINT: if info['plumbing_type'] == common.PLUMBING_TYPE_ENDPOINT:
# No stitching needed, only provider side PT is created. # No stitching needed, only provider side PT is created.
@ -124,7 +121,7 @@ class TrafficStitchingPlumber(plumber_base.NodePlumberBase):
context, part_context, info['consumer'], context, part_context, info['consumer'],
jump_ptg, 'consumer') jump_ptg, 'consumer')
else: else:
LOG.warning(_LW("Unsupported plumbing type %s"), LOG.warning("Unsupported plumbing type %s",
info['plumbing_type']) info['plumbing_type'])
# Replace current "provider" with jump ptg if needed # Replace current "provider" with jump ptg if needed
provider = jump_ptg or provider provider = jump_ptg or provider

View File

@ -18,9 +18,6 @@ from oslo_log import helpers as log
from oslo_log import log as logging from oslo_log import log as logging
from oslo_utils import excutils from oslo_utils import excutils
from gbpservice._i18n import _LE
from gbpservice._i18n import _LI
from gbpservice._i18n import _LW
from gbpservice.common import utils from gbpservice.common import utils
from gbpservice.neutron.db import servicechain_db from gbpservice.neutron.db import servicechain_db
from gbpservice.neutron.services.grouppolicy.common import constants as gp_cts from gbpservice.neutron.services.grouppolicy.common import constants as gp_cts
@ -66,7 +63,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
self.plumber = utils.load_plugin( self.plumber = utils.load_plugin(
PLUMBER_NAMESPACE, plumber_klass) PLUMBER_NAMESPACE, plumber_klass)
self.plumber.initialize() self.plumber.initialize()
LOG.info(_LI("Initialized node plumber '%s'"), plumber_klass) LOG.info("Initialized node plumber '%s'", plumber_klass)
@log.log_method_call @log.log_method_call
def create_servicechain_instance(self, context, servicechain_instance): def create_servicechain_instance(self, context, servicechain_instance):
@ -96,8 +93,8 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
except Exception: except Exception:
# Some node could not be deployed # Some node could not be deployed
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE("Node deployment failed, " LOG.error("Node deployment failed, "
"deleting servicechain_instance %s"), "deleting servicechain_instance %s",
instance['id']) instance['id'])
self.delete_servicechain_instance(context, instance['id']) self.delete_servicechain_instance(context, instance['id'])
@ -127,8 +124,8 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
except Exception: except Exception:
# Some node could not be deployed # Some node could not be deployed
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.error(_LE("Node deployment failed, " LOG.error("Node deployment failed, "
"servicechain_instance %s is in ERROR state"), "servicechain_instance %s is in ERROR state",
instance['id']) instance['id'])
@log.log_method_call @log.log_method_call
@ -257,7 +254,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
try: try:
update['driver'].update(update['context']) update['driver'].update(update['context'])
except exc.NodeDriverError as ex: except exc.NodeDriverError as ex:
LOG.error(_LE("Node Update failed, %s"), LOG.error("Node Update failed, %s",
ex.message) ex.message)
return updated_sc_node return updated_sc_node
@ -398,8 +395,8 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
old_policy_target_group, old_policy_target_group,
current_policy_target_group) current_policy_target_group)
except exc.NodeDriverError as ex: except exc.NodeDriverError as ex:
LOG.error(_LE("Node Update on policy target group modification" LOG.error("Node Update on policy target group modification"
" failed, %s"), ex.message) " failed, %s", ex.message)
def _update_chains_pt_modified(self, context, policy_target, instance_id, def _update_chains_pt_modified(self, context, policy_target, instance_id,
action): action):
@ -412,8 +409,8 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
'update_policy_target_' + action)( 'update_policy_target_' + action)(
update['context'], policy_target) update['context'], policy_target)
except exc.NodeDriverError as ex: except exc.NodeDriverError as ex:
LOG.error(_LE("Node Update on policy target modification " LOG.error("Node Update on policy target modification "
"failed, %s"), ex.message) "failed, %s", ex.message)
def _update_chains_consumer_modified(self, context, policy_target_group, def _update_chains_consumer_modified(self, context, policy_target_group,
instance_id, action): instance_id, action):
@ -426,9 +423,9 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
'update_node_consumer_ptg_' + action)( 'update_node_consumer_ptg_' + action)(
update['context'], policy_target_group) update['context'], policy_target_group)
except exc.NodeDriverError as ex: except exc.NodeDriverError as ex:
LOG.error(_LE( LOG.error(
"Node Update on policy target group modification " "Node Update on policy target group modification "
"failed, %s"), ex.message) "failed, %s", ex.message)
def notify_chain_parameters_updated(self, context, def notify_chain_parameters_updated(self, context,
servicechain_instance_id): servicechain_instance_id):
@ -447,8 +444,8 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
getattr(update['driver'], getattr(update['driver'],
'notify_chain_parameters_updated')(update['context']) 'notify_chain_parameters_updated')(update['context'])
except exc.NodeDriverError as ex: except exc.NodeDriverError as ex:
LOG.error(_LE("Node Update on GBP parameter update " LOG.error("Node Update on GBP parameter update "
"failed, %s"), ex.message) "failed, %s", ex.message)
def _get_instance_nodes(self, context, instance): def _get_instance_nodes(self, context, instance):
context = utils.admin_context(context) context = utils.admin_context(context)
@ -500,7 +497,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
deployers = self._get_scheduled_drivers(context, resource, deployers = self._get_scheduled_drivers(context, resource,
'get') 'get')
except Exception: except Exception:
LOG.warning(_LW("Failed to get node driver")) LOG.warning("Failed to get node driver")
# Invoke drivers only if status attributes are requested # Invoke drivers only if status attributes are requested
if not fields or STATUS_SET.intersection(set(fields)): if not fields or STATUS_SET.intersection(set(fields)):
@ -548,8 +545,8 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
result['status'] = 'ACTIVE' result['status'] = 'ACTIVE'
result['status_details'] = 'node deployment completed' result['status_details'] = 'node deployment completed'
except Exception as exc: except Exception as exc:
LOG.error(_LE("Failed to get servicechain instance status " LOG.error("Failed to get servicechain instance status "
"from node driver, Error: %(exc)s"), {'exc': exc}) "from node driver, Error: %(exc)s", {'exc': exc})
return return
return result return result
result = {'status': 'ACTIVE', 'status_details': ''} result = {'status': 'ACTIVE', 'status_details': ''}
@ -574,7 +571,7 @@ class NodeCompositionPlugin(servicechain_db.ServiceChainDbPlugin,
try: try:
driver.delete(destroy['context']) driver.delete(destroy['context'])
except exc.NodeDriverError: except exc.NodeDriverError:
LOG.error(_LE("Node destroy failed, for node %s "), LOG.error("Node destroy failed, for node %s ",
driver['context'].current_node['id']) driver['context'].current_node['id'])
except Exception as e: except Exception as e:
if db_api.is_retriable(e): if db_api.is_retriable(e):

View File

@ -14,7 +14,6 @@ from neutron.plugins.common import constants as pconst
from neutron_lib.plugins import directory from neutron_lib.plugins import directory
from oslo_log import log as logging from oslo_log import log as logging
from gbpservice._i18n import _LE
from gbpservice.neutron.services.grouppolicy.common import exceptions as gp_exc from gbpservice.neutron.services.grouppolicy.common import exceptions as gp_exc
from gbpservice.neutron.services.grouppolicy import plugin as gbp_plugin from gbpservice.neutron.services.grouppolicy import plugin as gbp_plugin
@ -42,7 +41,7 @@ class SharingMixin(object):
# plugins are loaded to grab and store plugin. # plugins are loaded to grab and store plugin.
gbp_plugin = directory.get_plugin(pconst.GROUP_POLICY) gbp_plugin = directory.get_plugin(pconst.GROUP_POLICY)
if not gbp_plugin: if not gbp_plugin:
LOG.error(_LE("No group policy service plugin found.")) LOG.error("No group policy service plugin found.")
raise gp_exc.GroupPolicyDeploymentError() raise gp_exc.GroupPolicyDeploymentError()
return gbp_plugin return gbp_plugin

View File

@ -9,7 +9,6 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
from neutron._i18n import _LI
from neutron.agent import securitygroups_rpc from neutron.agent import securitygroups_rpc
from neutron.api import extensions from neutron.api import extensions
from neutron.quota import resource from neutron.quota import resource
@ -21,11 +20,6 @@ from gbpservice.network.neutronv2 import local_api
# The following is to avoid excessive logging in the UTs # The following is to avoid excessive logging in the UTs
extensions._LW = extensions._LI
l3_agent_scheduler._LW = _LI
securitygroups_rpc._LW = securitygroups_rpc._LI
resource_registry._LW = resource_registry._LI
local_api._LW = _LI
extensions.LOG.warning = extensions.LOG.info extensions.LOG.warning = extensions.LOG.info
resource_registry.LOG.warning = resource_registry.LOG.info resource_registry.LOG.warning = resource_registry.LOG.info
l3_agent_scheduler.LOG.warning = l3_agent_scheduler.LOG.info l3_agent_scheduler.LOG.warning = l3_agent_scheduler.LOG.info

View File

@ -27,11 +27,8 @@ if not hasattr(sa_utils, '_get_unique_keys'):
sa_utils._get_unique_keys = sa_utils.get_unique_keys sa_utils._get_unique_keys = sa_utils.get_unique_keys
from neutron._i18n import _LI
from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import cache from gbpservice.neutron.plugins.ml2plus.drivers.apic_aim import cache
# The following is to avoid excessive logging in the UTs # The following is to avoid excessive logging in the UTs
cache._LW = _LI
cache.LOG.warning = cache.LOG.info cache.LOG.warning = cache.LOG.info

View File

@ -17,7 +17,6 @@ from neutron.tests.unit.plugins.ml2.drivers import (
mechanism_logger as ml2_logger) mechanism_logger as ml2_logger)
from oslo_log import log from oslo_log import log
from gbpservice._i18n import _LI
from gbpservice.neutron.plugins.ml2plus import driver_api from gbpservice.neutron.plugins.ml2plus import driver_api
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@ -31,10 +30,10 @@ class LoggerPlusMechanismDriver(driver_api.MechanismDriver,
""" """
def initialize(self): def initialize(self):
LOG.info(_LI("initialize called")) LOG.info("initialize called")
def ensure_tenant(self, plugin_context, tenant_id): def ensure_tenant(self, plugin_context, tenant_id):
LOG.info(_LI("ensure_tenant called with tenant_id %s"), tenant_id) LOG.info("ensure_tenant called with tenant_id %s", tenant_id)
def _log_subnetpool_call(self, method_name, context): def _log_subnetpool_call(self, method_name, context):
LOG.info(_("%(method)s called with subnetpool settings %(current)s " LOG.info(_("%(method)s called with subnetpool settings %(current)s "

View File

@ -18,7 +18,6 @@ from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
import webob.exc import webob.exc
from gbpservice._i18n import _LW
from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db as gpmdb from gbpservice.neutron.db.grouppolicy import group_policy_mapping_db as gpmdb
from gbpservice.neutron.extensions import group_policy as gpolicy from gbpservice.neutron.extensions import group_policy as gpolicy
from gbpservice.neutron.services.grouppolicy import config from gbpservice.neutron.services.grouppolicy import config
@ -164,7 +163,7 @@ class GroupPolicyPluginTestCase(GroupPolicyPluginTestBase):
def tearDown(self): def tearDown(self):
policy_drivers = cfg.CONF.group_policy.policy_drivers policy_drivers = cfg.CONF.group_policy.policy_drivers
LOG.warning(_LW("PDs used in this test: %s"), LOG.warning("PDs used in this test: %s",
policy_drivers) policy_drivers)
# Always reset configuration to dummy driver. Any # Always reset configuration to dummy driver. Any
# test which requires to configure a different # test which requires to configure a different

View File

@ -17,7 +17,6 @@ from oslo_config import cfg
from oslo_log import log as logging from oslo_log import log as logging
from gbpservice._i18n import _ from gbpservice._i18n import _
from gbpservice._i18n import _LE
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -66,9 +65,9 @@ class NFPException(Exception):
exc_info = sys.exc_info() exc_info = sys.exc_info()
# kwargs doesn't match a variable in the message # kwargs doesn't match a variable in the message
# log the issue and the kwargs # log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation')) LOG.exception('Exception in string format operation')
for name, value in kwargs.items(): for name, value in kwargs.items():
LOG.error(_LE("%(name)s: %(value)s"), LOG.error("%(name)s: %(value)s",
{'name': name, 'value': value}) {'name': name, 'value': value})
if CONF.fatal_exception_format_errors: if CONF.fatal_exception_format_errors:
six.reraise(*exc_info) six.reraise(*exc_info)

View File

@ -12,7 +12,6 @@
from heatclient import client as heat_client from heatclient import client as heat_client
from heatclient import exc as heat_exc from heatclient import exc as heat_exc
from gbpservice._i18n import _LW
from gbpservice.nfp.core import log as nfp_logging from gbpservice.nfp.core import log as nfp_logging
LOG = nfp_logging.getLogger(__name__) LOG = nfp_logging.getLogger(__name__)
@ -64,8 +63,8 @@ class HeatClient(object):
try: try:
self.stacks.delete(stack_id) self.stacks.delete(stack_id)
except heat_exc.HTTPNotFound: except heat_exc.HTTPNotFound:
LOG.warning(_LW("Stack %(stack)s created by service chain driver " LOG.warning("Stack %(stack)s created by service chain driver "
"is not found at cleanup"), {'stack': stack_id}) "is not found at cleanup", {'stack': stack_id})
def get(self, stack_id): def get(self, stack_id):
return self.stacks.get(stack_id) return self.stacks.get(stack_id)

View File

@ -21,9 +21,6 @@ from oslo_config import cfg
from oslo_serialization import jsonutils from oslo_serialization import jsonutils
import yaml import yaml
from gbpservice._i18n import _LE
from gbpservice._i18n import _LI
from gbpservice._i18n import _LW
from gbpservice.neutron.services.grouppolicy.common import constants as gconst from gbpservice.neutron.services.grouppolicy.common import constants as gconst
from gbpservice.neutron.services.servicechain.plugins.ncp import plumber_base from gbpservice.neutron.services.servicechain.plugins.ncp import plumber_base
from gbpservice.nfp.common import constants as nfp_constants from gbpservice.nfp.common import constants as nfp_constants
@ -205,7 +202,7 @@ class HeatDriver(object):
try: try:
self._assign_admin_user_to_project(tenant_id) self._assign_admin_user_to_project(tenant_id)
except Exception: except Exception:
LOG.exception(_LE("Failed to assign admin user to project")) LOG.exception("Failed to assign admin user to project")
return None return None
''' '''
nfp_context = module_context.get() nfp_context = module_context.get()
@ -223,7 +220,7 @@ class HeatDriver(object):
auth_token=auth_token, auth_token=auth_token,
timeout_mins=timeout_mins) timeout_mins=timeout_mins)
except Exception: except Exception:
LOG.exception(_LE("Failed to create heatclient object")) LOG.exception("Failed to create heatclient object")
return None return None
return heat_client return heat_client
@ -318,8 +315,8 @@ class HeatDriver(object):
provider_subnet = subnet provider_subnet = subnet
break break
if not provider_subnet: if not provider_subnet:
LOG.error(_LE("Unable to get provider subnet for provider " LOG.error("Unable to get provider subnet for provider "
"policy target group %(provider_ptg)s"), "policy target group %(provider_ptg)s",
{"provider_ptg": provider}) {"provider_ptg": provider})
return lb_vip, lb_vip_name return lb_vip, lb_vip_name
if service_type == pconst.LOADBALANCERV2: if service_type == pconst.LOADBALANCERV2:
@ -520,8 +517,8 @@ class HeatDriver(object):
break break
if not redirect_prs: if not redirect_prs:
LOG.error(_LE("Redirect rule doesn't exist in policy target rule " LOG.error("Redirect rule doesn't exist in policy target rule "
" set")) " set")
return None, None return None, None
return (redirect_prs['consuming_policy_target_groups'], return (redirect_prs['consuming_policy_target_groups'],
redirect_prs['consuming_external_policies']) redirect_prs['consuming_external_policies'])
@ -663,8 +660,8 @@ class HeatDriver(object):
provider_cidr = subnet['cidr'] provider_cidr = subnet['cidr']
break break
if not provider_cidr: if not provider_cidr:
LOG.error(_LE("Unable to get provider cidr for provider " LOG.error("Unable to get provider cidr for provider "
"policy target group %(provider_ptg)s"), "policy target group %(provider_ptg)s",
{"provider_ptg": provider}) {"provider_ptg": provider})
return None return None
@ -747,7 +744,7 @@ class HeatDriver(object):
svc_mgmt_ptgs = gcm.retry(self.gbp_client.get_policy_target_groups, svc_mgmt_ptgs = gcm.retry(self.gbp_client.get_policy_target_groups,
auth_token, filters) auth_token, filters)
if not svc_mgmt_ptgs: if not svc_mgmt_ptgs:
LOG.error(_LE("Service Management Group is not created by Admin")) LOG.error("Service Management Group is not created by Admin")
return None return None
else: else:
mgmt_subnet_id = svc_mgmt_ptgs[0]['subnets'][0] mgmt_subnet_id = svc_mgmt_ptgs[0]['subnets'][0]
@ -843,8 +840,8 @@ class HeatDriver(object):
auth_token, auth_token,
filters={'port_id': [consumer_port['id']]}) filters={'port_id': [consumer_port['id']]})
if not stitching_pts: if not stitching_pts:
LOG.error(_LE("Policy target is not created for the " LOG.error("Policy target is not created for the "
"stitching port")) "stitching port")
return None return None
stitching_ptg_id = ( stitching_ptg_id = (
stitching_pts[0]['policy_target_group_id']) stitching_pts[0]['policy_target_group_id'])
@ -901,9 +898,9 @@ class HeatDriver(object):
stack_template_str.startswith('{') else stack_template_str.startswith('{') else
yaml.load(stack_template_str)) yaml.load(stack_template_str))
except Exception: except Exception:
LOG.error(_LE( LOG.error(
"Unable to load stack template for service chain " "Unable to load stack template for service chain "
"node: %(node_id)s"), {'node_id': service_chain_node}) "node: %(node_id)s", {'node_id': service_chain_node})
return None, None return None, None
config_param_values = service_chain_instance.get( config_param_values = service_chain_instance.get(
'config_param_values', '{}') 'config_param_values', '{}')
@ -911,7 +908,7 @@ class HeatDriver(object):
try: try:
config_param_values = jsonutils.loads(config_param_values) config_param_values = jsonutils.loads(config_param_values)
except Exception: except Exception:
LOG.error(_LE("Unable to load config parameters")) LOG.error("Unable to load config parameters")
return None, None return None, None
is_template_aws_version = stack_template.get( is_template_aws_version = stack_template.get(
@ -1010,8 +1007,8 @@ class HeatDriver(object):
if parameter in config_param_values: if parameter in config_param_values:
stack_params[parameter] = config_param_values[parameter] stack_params[parameter] = config_param_values[parameter]
LOG.info(_LI('Final stack_template : %(stack_data)s, ' LOG.info('Final stack_template : %(stack_data)s, '
'stack_params : %(params)s'), 'stack_params : %(params)s',
{'stack_data': stack_template, 'params': stack_params}) {'stack_data': stack_template, 'params': stack_params})
return (stack_template, stack_params) return (stack_template, stack_params)
@ -1021,9 +1018,9 @@ class HeatDriver(object):
self.neutron_client.get_networks, self.neutron_client.get_networks,
token, filters={'name': [INTERNET_OUT_EXT_NET_NAME]}) token, filters={'name': [INTERNET_OUT_EXT_NET_NAME]})
if not ext_net: if not ext_net:
LOG.error(_LE("'internet_out_network_name' not configured" LOG.error("'internet_out_network_name' not configured"
" in [heat_driver] or Network %(network)s is" " in [heat_driver] or Network %(network)s is"
" not found"), " not found",
{'network': INTERNET_OUT_EXT_NET_NAME}) {'network': INTERNET_OUT_EXT_NET_NAME})
return None return None
# There is a case where consumer port has multiple fips # There is a case where consumer port has multiple fips
@ -1035,8 +1032,8 @@ class HeatDriver(object):
return ncm.retry(self.neutron_client.get_floating_ips, token, return ncm.retry(self.neutron_client.get_floating_ips, token,
**filters)[0]['floating_ip_address'] **filters)[0]['floating_ip_address']
except Exception: except Exception:
LOG.error(_LE("Floating IP for VPN Service has either exhausted" LOG.error("Floating IP for VPN Service has either exhausted"
" or has been disassociated Manually")) " or has been disassociated Manually")
return None return None
def _update_node_config(self, auth_token, tenant_id, service_profile, def _update_node_config(self, auth_token, tenant_id, service_profile,
@ -1057,7 +1054,7 @@ class HeatDriver(object):
provider_subnet = subnet provider_subnet = subnet
break break
if not provider_cidr: if not provider_cidr:
LOG.error(_LE("No provider cidr availabale")) LOG.error("No provider cidr availabale")
return None, None return None, None
service_type = service_profile['service_type'] service_type = service_profile['service_type']
service_details = transport.parse_service_flavor_string( service_details = transport.parse_service_flavor_string(
@ -1072,9 +1069,9 @@ class HeatDriver(object):
stack_template_str.startswith('{') else stack_template_str.startswith('{') else
yaml.load(stack_template_str)) yaml.load(stack_template_str))
except Exception: except Exception:
LOG.error(_LE( LOG.error(
"Unable to load stack template for service chain " "Unable to load stack template for service chain "
"node: %(node_id)s"), {'node_id': service_chain_node}) "node: %(node_id)s", {'node_id': service_chain_node})
return None, None return None, None
config_param_values = service_chain_instance.get( config_param_values = service_chain_instance.get(
'config_param_values', '{}') 'config_param_values', '{}')
@ -1082,7 +1079,7 @@ class HeatDriver(object):
try: try:
config_param_values = jsonutils.loads(config_param_values) config_param_values = jsonutils.loads(config_param_values)
except Exception: except Exception:
LOG.error(_LE("Unable to load config parameters")) LOG.error("Unable to load config parameters")
return None, None return None, None
is_template_aws_version = stack_template.get( is_template_aws_version = stack_template.get(
@ -1200,8 +1197,8 @@ class HeatDriver(object):
auth_token, auth_token,
filters={'port_id': [consumer_port['id']]}) filters={'port_id': [consumer_port['id']]})
if not stitching_pts: if not stitching_pts:
LOG.error(_LE("Policy target is not created for the " LOG.error("Policy target is not created for the "
"stitching port")) "stitching port")
return None, None return None, None
stitching_ptg_id = ( stitching_ptg_id = (
stitching_pts[0]['policy_target_group_id']) stitching_pts[0]['policy_target_group_id'])
@ -1219,9 +1216,9 @@ class HeatDriver(object):
auth_token, auth_token,
filters={'name': [INTERNET_OUT_EXT_NET_NAME]}) filters={'name': [INTERNET_OUT_EXT_NET_NAME]})
if not ext_net: if not ext_net:
LOG.error(_LE("'internet_out_network_name' not configured" LOG.error("'internet_out_network_name' not configured"
" in [heat_driver] or Network %(network)s is" " in [heat_driver] or Network %(network)s is"
" not found"), " not found",
{'network': INTERNET_OUT_EXT_NET_NAME}) {'network': INTERNET_OUT_EXT_NET_NAME})
return None, None return None, None
filters = {'port_id': [consumer_port['id']], filters = {'port_id': [consumer_port['id']],
@ -1231,8 +1228,8 @@ class HeatDriver(object):
self.neutron_client.get_floating_ips, self.neutron_client.get_floating_ips,
auth_token, filters=filters) auth_token, filters=filters)
if not floatingips: if not floatingips:
LOG.error(_LE("Floating IP for VPN Service has been " LOG.error("Floating IP for VPN Service has been "
"disassociated Manually")) "disassociated Manually")
return None, None return None, None
for fip in floatingips: for fip in floatingips:
if consumer_port['fixed_ips'][0]['ip_address'] == fip[ if consumer_port['fixed_ips'][0]['ip_address'] == fip[
@ -1253,9 +1250,9 @@ class HeatDriver(object):
';mgmt_gw_ip=' + mgmt_gw_ip + ';mgmt_gw_ip=' + mgmt_gw_ip +
';network_function_id=' + network_function['id']) ';network_function_id=' + network_function['id'])
except Exception as e: except Exception as e:
LOG.error(_LE("Problem in preparing description, some of " LOG.error("Problem in preparing description, some of "
"the fields might not have initialized. " "the fields might not have initialized. "
"Error: %(error)s"), {'error': e}) "Error: %(error)s", {'error': e})
return None, None return None, None
siteconn_keys = self._get_site_conn_keys( siteconn_keys = self._get_site_conn_keys(
stack_template[resources_key], stack_template[resources_key],
@ -1287,8 +1284,8 @@ class HeatDriver(object):
if parameter in config_param_values: if parameter in config_param_values:
stack_params[parameter] = config_param_values[parameter] stack_params[parameter] = config_param_values[parameter]
LOG.info(_LI('Final stack_template : %(stack_data)s, ' LOG.info('Final stack_template : %(stack_data)s, '
'stack_params : %(params)s'), 'stack_params : %(params)s',
{'stack_data': stack_template, 'params': stack_params}) {'stack_data': stack_template, 'params': stack_params})
return (stack_template, stack_params) return (stack_template, stack_params)
@ -1392,7 +1389,7 @@ class HeatDriver(object):
admin_token, admin_token,
policy_target['policy_target_group_id'])) policy_target['policy_target_group_id']))
elif port_classification == nfp_constants.PROVIDER: elif port_classification == nfp_constants.PROVIDER:
LOG.info(_LI("provider info: %(p_info)s"), LOG.info("provider info: %(p_info)s",
{'p_info': port_id}) {'p_info': port_id})
with nfp_ctx_mgr.NeutronContextManager as ncm: with nfp_ctx_mgr.NeutronContextManager as ncm:
provider_port = ncm.retry(self.neutron_client.get_port, provider_port = ncm.retry(self.neutron_client.get_port,
@ -1438,7 +1435,7 @@ class HeatDriver(object):
elif stack.stack_status == 'CREATE_COMPLETE': elif stack.stack_status == 'CREATE_COMPLETE':
return return
elif stack.stack_status == 'DELETE_COMPLETE': elif stack.stack_status == 'DELETE_COMPLETE':
LOG.info(_LI("Stack %(stack)s is deleted"), LOG.info("Stack %(stack)s is deleted",
{'stack': stack_id}) {'stack': stack_id})
if action == "delete": if action == "delete":
return return
@ -1453,17 +1450,17 @@ class HeatDriver(object):
'DELETE_IN_PROGRESS']: 'DELETE_IN_PROGRESS']:
return return
except heat_exc.HTTPNotFound: except heat_exc.HTTPNotFound:
LOG.warning(_LW( LOG.warning(
"Stack %(stack)s created by service chain " "Stack %(stack)s created by service chain "
"driver is not found while waiting for %(action)s " "driver is not found while waiting for %(action)s "
"to complete"), "to complete",
{'stack': stack_id, 'action': action}) {'stack': stack_id, 'action': action})
if action == "create" or action == "update": if action == "create" or action == "update":
operation_failed = True operation_failed = True
else: else:
return return
except Exception: except Exception:
LOG.exception(_LE("Retrieving the stack %(stack)s failed."), LOG.exception("Retrieving the stack %(stack)s failed.",
{'stack': stack_id}) {'stack': stack_id})
if action == "create" or action == "update": if action == "create" or action == "update":
operation_failed = True operation_failed = True
@ -1474,8 +1471,8 @@ class HeatDriver(object):
if ignore_error: if ignore_error:
return return
else: else:
LOG.error(_LE("Stack %(stack_name)s %(action)s failed for " LOG.error("Stack %(stack_name)s %(action)s failed for "
"tenant %(stack_owner)s"), "tenant %(stack_owner)s",
{'stack_name': stack.stack_name, {'stack_name': stack.stack_name,
'stack_owner': stack.stack_owner, 'stack_owner': stack.stack_owner,
'action': action}) 'action': action})
@ -1484,8 +1481,8 @@ class HeatDriver(object):
time.sleep(STACK_ACTION_RETRY_WAIT) time.sleep(STACK_ACTION_RETRY_WAIT)
time_waited = time_waited + STACK_ACTION_RETRY_WAIT time_waited = time_waited + STACK_ACTION_RETRY_WAIT
if time_waited >= wait_timeout: if time_waited >= wait_timeout:
LOG.error(_LE("Stack %(action)s not completed within " LOG.error("Stack %(action)s not completed within "
"%(wait)s seconds"), "%(wait)s seconds",
{'action': action, {'action': action,
'wait': wait_timeout, 'wait': wait_timeout,
'stack': stack_id}) 'stack': stack_id})
@ -1499,10 +1496,10 @@ class HeatDriver(object):
pass pass
return return
else: else:
LOG.error(_LE( LOG.error(
"Stack %(stack_name)s %(action)s not " "Stack %(stack_name)s %(action)s not "
"completed within %(time)s seconds where " "completed within %(time)s seconds where "
"stack owner is %(stack_owner)s"), "stack owner is %(stack_owner)s",
{'stack_name': stack.stack_name, {'stack_name': stack.stack_name,
'action': action, 'action': action,
'time': wait_timeout, 'time': wait_timeout,
@ -1529,7 +1526,7 @@ class HeatDriver(object):
elif stack.stack_status == 'UPDATE_COMPLETE': elif stack.stack_status == 'UPDATE_COMPLETE':
return success_status return success_status
elif stack.stack_status == 'DELETE_COMPLETE': elif stack.stack_status == 'DELETE_COMPLETE':
LOG.info(_LI("Stack %(stack)s is deleted"), LOG.info("Stack %(stack)s is deleted",
{'stack': stack_id}) {'stack': stack_id})
return failure_status return failure_status
elif stack.stack_status == 'CREATE_FAILED': elif stack.stack_status == 'CREATE_FAILED':
@ -1562,7 +1559,7 @@ class HeatDriver(object):
elif stack.stack_status == 'UPDATE_COMPLETE': elif stack.stack_status == 'UPDATE_COMPLETE':
return success_status return success_status
elif stack.stack_status == 'DELETE_COMPLETE': elif stack.stack_status == 'DELETE_COMPLETE':
LOG.info(_LI("Stack %(stack)s is deleted"), LOG.info("Stack %(stack)s is deleted",
{'stack': stack_id}) {'stack': stack_id})
return failure_status return failure_status
elif stack.stack_status == 'CREATE_FAILED': elif stack.stack_status == 'CREATE_FAILED':
@ -1589,7 +1586,7 @@ class HeatDriver(object):
elif stack.stack_status == 'CREATE_COMPLETE': elif stack.stack_status == 'CREATE_COMPLETE':
return failure_status return failure_status
elif stack.stack_status == 'DELETE_COMPLETE': elif stack.stack_status == 'DELETE_COMPLETE':
LOG.info(_LI("Stack %(stack)s is deleted"), LOG.info("Stack %(stack)s is deleted",
{'stack': stack_id}) {'stack': stack_id})
if network_function: if network_function:
self._post_stack_cleanup(network_function) self._post_stack_cleanup(network_function)
@ -1683,8 +1680,8 @@ class HeatDriver(object):
stack = hcm.retry(heatclient.create, stack_name, stack = hcm.retry(heatclient.create, stack_name,
stack_template, stack_params) stack_template, stack_params)
stack_id = stack['stack']['id'] stack_id = stack['stack']['id']
LOG.info(_LI("Created stack with ID %(stack_id)s and " LOG.info("Created stack with ID %(stack_id)s and "
"name %(stack_name)s for provider PTG %(provider)s"), "name %(stack_name)s for provider PTG %(provider)s",
{'stack_id': stack_id, 'stack_name': stack_name, {'stack_id': stack_id, 'stack_name': stack_name,
'provider': provider['id']}) 'provider': provider['id']})
@ -1735,8 +1732,8 @@ class HeatDriver(object):
stack_template, stack_params) stack_template, stack_params)
stack_id = stack['stack']['id'] stack_id = stack['stack']['id']
LOG.info(_LI("Created stack with ID %(stack_id)s and " LOG.info("Created stack with ID %(stack_id)s and "
"name %(stack_name)s for provider PTG %(provider)s"), "name %(stack_name)s for provider PTG %(provider)s",
{'stack_id': stack_id, 'stack_name': stack_name, {'stack_id': stack_id, 'stack_name': stack_name,
'provider': provider['id']}) 'provider': provider['id']})
@ -1755,8 +1752,8 @@ class HeatDriver(object):
except Exception as err: except Exception as err:
# Log the error and continue with VM delete in case of *aas # Log the error and continue with VM delete in case of *aas
# cleanup failure # cleanup failure
LOG.exception(_LE("Cleaning up the service chain stack failed " LOG.exception("Cleaning up the service chain stack failed "
"with Error: %(error)s"), {'error': err}) "with Error: %(error)s", {'error': err})
return None return None
return stack_id return stack_id
@ -1783,8 +1780,8 @@ class HeatDriver(object):
return None return None
if not base_mode_support and not mgmt_ip: if not base_mode_support and not mgmt_ip:
LOG.error(_LE("Service information is not available with Service " LOG.error("Service information is not available with Service "
"Orchestrator on node update")) "Orchestrator on node update")
return None return None
stack_template, stack_params = self._update_node_config( stack_template, stack_params = self._update_node_config(
@ -1866,8 +1863,8 @@ class HeatDriver(object):
pt_added_or_removed=True) pt_added_or_removed=True)
return stack_id return stack_id
except Exception: except Exception:
LOG.exception(_LE("Processing policy target %(operation)s " LOG.exception("Processing policy target %(operation)s "
" failed"), {'operation': operation}) " failed", {'operation': operation})
return None return None
def notify_chain_parameters_updated(self, network_function_details): def notify_chain_parameters_updated(self, network_function_details):
@ -1902,7 +1899,7 @@ class HeatDriver(object):
return None return None
return stack_id return stack_id
except Exception: except Exception:
LOG.exception(_LE( LOG.exception(
"Processing policy target group " "Processing policy target group "
"%(operation)s failed"), {'operation': operation}) "%(operation)s failed", {'operation': operation})
return None return None

View File

@ -15,7 +15,6 @@ from oslo_serialization import jsonutils
from oslo_utils import uuidutils from oslo_utils import uuidutils
from sqlalchemy.orm import exc from sqlalchemy.orm import exc
from gbpservice._i18n import _LW
from gbpservice.nfp.common import exceptions as nfp_exc from gbpservice.nfp.common import exceptions as nfp_exc
from gbpservice.nfp.orchestrator.db import common_db_mixin from gbpservice.nfp.orchestrator.db import common_db_mixin
from gbpservice.nfp.orchestrator.db import nfp_db_model from gbpservice.nfp.orchestrator.db import nfp_db_model
@ -686,8 +685,8 @@ class NFPDbBase(common_db_mixin.CommonDbMixin):
return self._get_gw_info_dict(session.query(svc_gw).filter( return self._get_gw_info_dict(session.query(svc_gw).filter(
svc_gw.network_function_id == nf_id).one()) svc_gw.network_function_id == nf_id).one())
except exc.NoResultFound: except exc.NoResultFound:
LOG.warning(_LW("Gateway detail doesn't exist for Network Function" LOG.warning("Gateway detail doesn't exist for Network Function"
" %s "), nf_id) " %s ", nf_id)
raise raise
def _get_gw_info_dict(self, gw): def _get_gw_info_dict(self, gw):

View File

@ -12,8 +12,6 @@
import ast import ast
from collections import defaultdict from collections import defaultdict
from gbpservice._i18n import _LE
from gbpservice._i18n import _LW
from gbpservice.nfp.common import constants as nfp_constants from gbpservice.nfp.common import constants as nfp_constants
from gbpservice.nfp.common import data_formatter as df from gbpservice.nfp.common import data_formatter as df
@ -131,8 +129,8 @@ class OrchestrationDriver(object):
network_handler) network_handler)
device_data['interfaces'] = [mgmt_interface] device_data['interfaces'] = [mgmt_interface]
except Exception as e: except Exception as e:
LOG.exception(_LE('Failed to get interfaces for device creation.' LOG.exception('Failed to get interfaces for device creation.'
'Error: %(error)s'), {'error': e}) 'Error: %(error)s', {'error': e})
def _delete_interfaces(self, device_data, interfaces, def _delete_interfaces(self, device_data, interfaces,
network_handler=None): network_handler=None):
@ -148,8 +146,8 @@ class OrchestrationDriver(object):
if attr in nfp_constants.METADATA_SUPPORTED_ATTRIBUTES: if attr in nfp_constants.METADATA_SUPPORTED_ATTRIBUTES:
provider_metadata[attr] = ast.literal_eval(metadata[attr]) provider_metadata[attr] = ast.literal_eval(metadata[attr])
except Exception as e: except Exception as e:
LOG.error(_LE('Wrong metadata: %(metadata)s provided for ' LOG.error('Wrong metadata: %(metadata)s provided for '
'image name: %(image_name)s. Error: %(error)s'), 'image name: %(image_name)s. Error: %(error)s',
{'image_name': image_name, 'metadata': metadata, {'image_name': image_name, 'metadata': metadata,
'error': e}) 'error': e})
return None return None
@ -211,8 +209,8 @@ class OrchestrationDriver(object):
LOG.debug("No provider metadata specified in image," LOG.debug("No provider metadata specified in image,"
" proceeding with default values") " proceeding with default values")
except Exception: except Exception:
LOG.error(_LE("Error while getting metadata for image name:" LOG.error("Error while getting metadata for image name:"
"%(image_name)s, proceeding with default values"), "%(image_name)s, proceeding with default values",
{'image_name': image_name}) {'image_name': image_name})
return provider_metadata return provider_metadata
@ -235,8 +233,8 @@ class OrchestrationDriver(object):
LOG.debug("No provider metadata specified in image," LOG.debug("No provider metadata specified in image,"
" proceeding with default values") " proceeding with default values")
except Exception: except Exception:
LOG.error(_LE("Error while getting metadata for image name: " LOG.error("Error while getting metadata for image name: "
"%(image_name)s, proceeding with default values"), "%(image_name)s, proceeding with default values",
{'image_name': image_name}) {'image_name': image_name})
return provider_metadata return provider_metadata
@ -274,8 +272,8 @@ class OrchestrationDriver(object):
image_id = nova.get_image_id(token, admin_tenant_id, image_name) image_id = nova.get_image_id(token, admin_tenant_id, image_name)
return image_id return image_id
except Exception as e: except Exception as e:
LOG.error(_LE('Failed to get image id for device creation.' LOG.error('Failed to get image id for device creation.'
' image name: %(image_name)s. Error: %(error)s'), ' image name: %(image_name)s. Error: %(error)s',
{'image_name': image_name, 'error': e}) {'image_name': image_name, 'error': e})
def create_instance(self, nova, token, admin_tenant_id, def create_instance(self, nova, token, admin_tenant_id,
@ -291,8 +289,8 @@ class OrchestrationDriver(object):
server_grp_id=server_grp_id) server_grp_id=server_grp_id)
return instance_id return instance_id
except Exception as e: except Exception as e:
LOG.error(_LE('Failed to create instance.' LOG.error('Failed to create instance.'
'Error: %(error)s'), {'error': e}) 'Error: %(error)s', {'error': e})
def get_neutron_port_details(self, network_handler, token, port_id): def get_neutron_port_details(self, network_handler, token, port_id):
try: try:
@ -314,8 +312,8 @@ class OrchestrationDriver(object):
exc_type, exc_value, exc_traceback = sys.exc_info() exc_type, exc_value, exc_traceback = sys.exc_info()
LOG.error(traceback.format_exception(exc_type, exc_value, LOG.error(traceback.format_exception(exc_type, exc_value,
exc_traceback)) exc_traceback))
LOG.error(_LE('Failed to get management port details. ' LOG.error('Failed to get management port details. '
'Error: %(error)s'), {'error': e}) 'Error: %(error)s', {'error': e})
@_set_network_handler @_set_network_handler
def create_network_function_device(self, device_data, def create_network_function_device(self, device_data,
@ -383,8 +381,8 @@ class OrchestrationDriver(object):
interfaces_to_attach, interfaces_to_attach,
device_data) device_data)
except Exception as e: except Exception as e:
LOG.error(_LE('Failed to fetch list of interfaces to attach' LOG.error('Failed to fetch list of interfaces to attach'
' for device creation %(error)s'), {'error': e}) ' for device creation %(error)s', {'error': e})
self._delete_interfaces(device_data, interfaces, self._delete_interfaces(device_data, interfaces,
network_handler=network_handler) network_handler=network_handler)
return None return None
@ -479,25 +477,25 @@ class OrchestrationDriver(object):
server_grp_id_result=None): server_grp_id_result=None):
interfaces = device_data.pop('interfaces', None) interfaces = device_data.pop('interfaces', None)
if not interfaces: if not interfaces:
LOG.exception(_LE('Failed to get interfaces for device creation.')) LOG.exception('Failed to get interfaces for device creation.')
return None, _, _ return None, _, _
image_id = image_id_result.get('result', None) image_id = image_id_result.get('result', None)
if not image_id: if not image_id:
LOG.error(_LE('Failed to get image id for device creation.')) LOG.error('Failed to get image id for device creation.')
self._delete_interfaces(device_data, interfaces, self._delete_interfaces(device_data, interfaces,
network_handler=network_handler) network_handler=network_handler)
return None, _, _ return None, _, _
if server_grp_id_result and not server_grp_id_result.get('result'): if server_grp_id_result and not server_grp_id_result.get('result'):
LOG.error(_LE('Validation failed for Nova anti-affinity ' LOG.error('Validation failed for Nova anti-affinity '
'server group.')) 'server group.')
return None, _, _ return None, _, _
provider_metadata = provider_metadata_result.get('result', None) provider_metadata = provider_metadata_result.get('result', None)
if not provider_metadata: if not provider_metadata:
LOG.warning(_LW('Failed to get provider metadata for' LOG.warning('Failed to get provider metadata for'
' device creation.')) ' device creation.')
provider_metadata = {} provider_metadata = {}
return interfaces, image_id, provider_metadata return interfaces, image_id, provider_metadata
@ -559,8 +557,8 @@ class OrchestrationDriver(object):
admin_tenant_id = device_data['admin_tenant_id'] admin_tenant_id = device_data['admin_tenant_id']
instance_id = instance_id_result.get('result', None) instance_id = instance_id_result.get('result', None)
if not instance_id: if not instance_id:
LOG.error(_LE('Failed to create instance with device data:' LOG.error('Failed to create instance with device data:'
'%(data)s.'), '%(data)s.',
{'data': device_data}) {'data': device_data})
self._delete_interfaces(device_data, interfaces, self._delete_interfaces(device_data, interfaces,
network_handler=network_handler) network_handler=network_handler)
@ -569,7 +567,7 @@ class OrchestrationDriver(object):
mgmt_neutron_port_info = port_details_result.get('result', None) mgmt_neutron_port_info = port_details_result.get('result', None)
if not mgmt_neutron_port_info: if not mgmt_neutron_port_info:
LOG.error(_LE('Failed to get management port details. ')) LOG.error('Failed to get management port details. ')
with nfp_ctx_mgr.NovaContextManager as ncm: with nfp_ctx_mgr.NovaContextManager as ncm:
ncm.retry(self.compute_handler_nova.delete_instance, ncm.retry(self.compute_handler_nova.delete_instance,
token, token,
@ -646,8 +644,8 @@ class OrchestrationDriver(object):
interfaces, interfaces,
network_handler=network_handler) network_handler=network_handler)
except Exception as e: except Exception as e:
LOG.error(_LE('Failed to delete the management data port(s). ' LOG.error('Failed to delete the management data port(s). '
'Error: %(error)s'), {'error': e}) 'Error: %(error)s', {'error': e})
def get_network_function_device_status(self, device_data, def get_network_function_device_status(self, device_data,
ignore_failure=False): ignore_failure=False):
@ -789,8 +787,8 @@ class OrchestrationDriver(object):
executor.fire() executor.fire()
except Exception as e: except Exception as e:
LOG.error(_LE('Failed to plug interface(s) to the device.' LOG.error('Failed to plug interface(s) to the device.'
'Error: %(error)s'), {'error': e}) 'Error: %(error)s', {'error': e})
return None return None
else: else:
return True return True
@ -924,7 +922,7 @@ class OrchestrationDriver(object):
network_handler = self.network_handlers[nfp_constants.NEUTRON_MODE] network_handler = self.network_handlers[nfp_constants.NEUTRON_MODE]
network_handler.delete_port(token, port_id) network_handler.delete_port(token, port_id)
except Exception as exc: except Exception as exc:
LOG.error(_LE("Failed to delete port %(port_id)s. Error: %(exc)s"), LOG.error("Failed to delete port %(port_id)s. Error: %(exc)s",
{"port_id": port_id, 'exc': exc}) {"port_id": port_id, 'exc': exc})
def _get_port_from_pt(self, device_data, pt_id): def _get_port_from_pt(self, device_data, pt_id):
@ -942,7 +940,7 @@ class OrchestrationDriver(object):
for pt in device_data['consumer']['pt']: for pt in device_data['consumer']['pt']:
if pt['id'] == pt_id: if pt['id'] == pt_id:
return pt['port_id'] return pt['port_id']
LOG.error(_LE('Policy Target %(pt_id) not found in provided data'), LOG.error('Policy Target %(pt_id) not found in provided data',
{'pt_id': pt_id}) {'pt_id': pt_id})
return port_id return port_id
@ -1003,8 +1001,8 @@ class OrchestrationDriver(object):
'port_classification', 'port_classification',
'port_model']) 'port_model'])
): ):
LOG.error(_LE('Incomplete device data received for delete ' LOG.error('Incomplete device data received for delete '
'network function device.')) 'network function device.')
return None return None
token = self._get_token(device_data.get('token')) token = self._get_token(device_data.get('token'))
@ -1027,8 +1025,8 @@ class OrchestrationDriver(object):
devices_data['provider']) devices_data['provider'])
) )
except Exception: except Exception:
LOG.error(_LE('Failed to get provider port details' LOG.error('Failed to get provider port details'
' for get device config info operation')) ' for get device config info operation')
return None return None
elif port['port_classification'] == nfp_constants.CONSUMER: elif port['port_classification'] == nfp_constants.CONSUMER:
try: try:
@ -1038,8 +1036,8 @@ class OrchestrationDriver(object):
devices_data['consumer']) devices_data['consumer'])
) )
except Exception: except Exception:
LOG.error(_LE('Failed to get consumer port details' LOG.error('Failed to get consumer port details'
' for get device config info operation')) ' for get device config info operation')
return None return None
device_data.update({ device_data.update({

View File

@ -14,8 +14,6 @@
import oslo_messaging as messaging import oslo_messaging as messaging
from gbpservice._i18n import _ from gbpservice._i18n import _
from gbpservice._i18n import _LE
from gbpservice._i18n import _LI
from gbpservice.nfp.common import constants as nfp_constants from gbpservice.nfp.common import constants as nfp_constants
from gbpservice.nfp.common import topics as nsf_topics from gbpservice.nfp.common import topics as nsf_topics
from gbpservice.nfp.common import utils as nfp_utils from gbpservice.nfp.common import utils as nfp_utils
@ -112,14 +110,14 @@ class RpcHandler(object):
NFI = event_data.get('network_function_instance_id') NFI = event_data.get('network_function_instance_id')
if NFD and NF and NFI: if NFD and NF and NFI:
LOG.info(_LI("Created event %(event_name)s with" LOG.info("Created event %(event_name)s with"
" NF:%(nf)s ,NFI:%(nfi)s and NFD:%(nfd)s"), " NF:%(nf)s ,NFI:%(nfi)s and NFD:%(nfd)s",
{'event_name': event_id, {'event_name': event_id,
'nf': NF, 'nf': NF,
'nfi': NFI, 'nfi': NFI,
'nfd': NFD}) 'nfd': NFD})
else: else:
LOG.info(_LI("Created event %(event_name)s "), LOG.info("Created event %(event_name)s ",
{'event_name': event_id}) {'event_name': event_id})
def _create_event(self, event_id, event_data=None, key=None, def _create_event(self, event_id, event_data=None, key=None,
@ -182,7 +180,7 @@ class RpcHandler(object):
event_id = self.rpc_event_mapping[resource][0] event_id = self.rpc_event_mapping[resource][0]
if result.lower() != 'success': if result.lower() != 'success':
LOG.info(_LI("RPC Handler response data:%(data)s"), LOG.info("RPC Handler response data:%(data)s",
{'data': data}) {'data': data})
if is_delete_request: if is_delete_request:
# Ignore any deletion errors, generate SUCCESS event # Ignore any deletion errors, generate SUCCESS event
@ -323,20 +321,20 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
NFI = event_data.get('network_function_instance_id') NFI = event_data.get('network_function_instance_id')
if NFD and NF and NFI: if NFD and NF and NFI:
LOG.info(_LI("Received event %(event_name)s with " LOG.info("Received event %(event_name)s with "
"NF:%(nf)s ,NFI:%(nfi)s and NFD:%(nfd)s"), "NF:%(nf)s ,NFI:%(nfi)s and NFD:%(nfd)s",
{'event_name': event.id, {'event_name': event.id,
'nf': NF, 'nf': NF,
'nfi': NFI, 'nfi': NFI,
'nfd': NFD}) 'nfd': NFD})
else: else:
LOG.info(_LI("Received event %(event_name)s "), LOG.info("Received event %(event_name)s ",
{'event_name': event.id}) {'event_name': event.id})
event_handler = self.event_method_mapping(event.id) event_handler = self.event_method_mapping(event.id)
event_handler(event) event_handler(event)
except Exception as e: except Exception as e:
LOG.error(_LE("error in processing event: %(event_id)s for " LOG.error("error in processing event: %(event_id)s for "
"event data %(event_data)s. error: %(error)s"), "event data %(event_data)s. error: %(error)s",
{'event_id': event.id, 'event_data': event.data, {'event_id': event.id, 'event_data': event.data,
'error': e}) 'error': e})
_, _, tb = sys.exc_info() _, _, tb = sys.exc_info()
@ -356,13 +354,13 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
nf = None nf = None
nfi = None nfi = None
if nf and nfi: if nf and nfi:
LOG.info(_LI("Created event %(event_name)s with NF:%(nf)s and " LOG.info("Created event %(event_name)s with NF:%(nf)s and "
"NFI:%(nfi)s "), "NFI:%(nfi)s ",
{'event_name': event_id, {'event_name': event_id,
'nf': nf, 'nf': nf,
'nfi': nfi}) 'nfi': nfi})
else: else:
LOG.info(_LI("Created event %(event_name)s "), LOG.info("Created event %(event_name)s ",
{'event_name': event_id}) {'event_name': event_id})
def _create_event(self, event_id, event_data=None, def _create_event(self, event_id, event_data=None,
@ -407,11 +405,11 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
self._controller.event_complete(ev) self._controller.event_complete(ev)
def event_cancelled(self, ev, reason): def event_cancelled(self, ev, reason):
LOG.info(_LI("Poll event %(event_id)s cancelled."), LOG.info("Poll event %(event_id)s cancelled.",
{'event_id': ev.id}) {'event_id': ev.id})
if ev.id == 'DEVICE_SPAWNING': if ev.id == 'DEVICE_SPAWNING':
LOG.info(_LI("Device is not up still after 10secs of launch")) LOG.info("Device is not up still after 10secs of launch")
# create event DEVICE_NOT_UP # create event DEVICE_NOT_UP
device = self._prepare_failure_case_device_data(ev.data) device = self._prepare_failure_case_device_data(ev.data)
self._create_event(event_id='DEVICE_NOT_UP', self._create_event(event_id='DEVICE_NOT_UP',
@ -420,10 +418,10 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
self._update_network_function_device_db(device, self._update_network_function_device_db(device,
'DEVICE_NOT_UP') 'DEVICE_NOT_UP')
if ev.id == 'DEVICE_BEING_DELETED': if ev.id == 'DEVICE_BEING_DELETED':
LOG.info(_LI("Device is not deleted completely." LOG.info("Device is not deleted completely."
" Continuing further cleanup of resources." " Continuing further cleanup of resources."
" Possibly there could be stale port resources" " Possibly there could be stale port resources"
" on Compute")) " on Compute")
device = ev.data device = ev.data
orchestration_driver = self._get_orchestration_driver( orchestration_driver = self._get_orchestration_driver(
device['service_details']['service_vendor']) device['service_details']['service_vendor'])
@ -745,15 +743,15 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
nfd_request = self._prepare_failure_case_device_data(nfp_context) nfd_request = self._prepare_failure_case_device_data(nfp_context)
service_details = nfp_context['service_details'] service_details = nfp_context['service_details']
LOG.info(_LI("Received event CREATE NETWORK FUNCTION " LOG.info("Received event CREATE NETWORK FUNCTION "
"DEVICE request.")) "DEVICE request.")
orchestration_driver = self._get_orchestration_driver( orchestration_driver = self._get_orchestration_driver(
service_details['service_vendor']) service_details['service_vendor'])
device_data = self._prepare_device_data_from_nfp_context(nfp_context) device_data = self._prepare_device_data_from_nfp_context(nfp_context)
LOG.info(_LI("Creating new device:%(device)s"), LOG.info("Creating new device:%(device)s",
{'device': nfd_request}) {'device': nfd_request})
device_data['volume_support'] = ( device_data['volume_support'] = (
self.config.device_orchestrator.volume_support) self.config.device_orchestrator.volume_support)
@ -765,7 +763,7 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
orchestration_driver.create_network_function_device( orchestration_driver.create_network_function_device(
device_data)) device_data))
if not driver_device_info: if not driver_device_info:
LOG.info(_LI("Device creation failed")) LOG.info("Device creation failed")
self._create_event(event_id='DEVICE_ERROR', self._create_event(event_id='DEVICE_ERROR',
event_data=nfd_request, event_data=nfd_request,
is_internal_event=True) is_internal_event=True)
@ -824,8 +822,8 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
orchestration_driver.get_network_function_device_status(device)) orchestration_driver.get_network_function_device_status(device))
if is_device_up == nfp_constants.ACTIVE: if is_device_up == nfp_constants.ACTIVE:
LOG.info(_LI("Device with NFD:%(id)s came up for " LOG.info("Device with NFD:%(id)s came up for "
"tenant:%(tenant)s "), "tenant:%(tenant)s ",
{'id': network_function_device['id'], {'id': network_function_device['id'],
'tenant': tenant_id}) 'tenant': tenant_id})
self._post_device_up_event_graph(nfp_context) self._post_device_up_event_graph(nfp_context)
@ -916,9 +914,9 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
self._update_network_function_device_db( self._update_network_function_device_db(
network_function_device, nfp_constants.ACTIVE) network_function_device, nfp_constants.ACTIVE)
LOG.info(_LI( LOG.info(
"Configuration completed for device with NFD:%(device_id)s. " "Configuration completed for device with NFD:%(device_id)s. "
"Updated DB status to ACTIVE."), "Updated DB status to ACTIVE.",
{'device_id': network_function_device['id']}) {'device_id': network_function_device['id']})
LOG.debug("Device detail:%s", LOG.debug("Device detail:%s",
network_function_device) network_function_device)
@ -1331,8 +1329,8 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
self._controller.event_complete(event, result="SUCCESS") self._controller.event_complete(event, result="SUCCESS")
return return
device = self._prepare_device_data_fast(network_function_details) device = self._prepare_device_data_fast(network_function_details)
LOG.info(_LI("Recieved DELETE NETWORK FUNCTION " LOG.info("Recieved DELETE NETWORK FUNCTION "
"DEVICE request ")) "DEVICE request ")
device['event_desc'] = event.desc.to_dict() device['event_desc'] = event.desc.to_dict()
self._create_event(event_id='DELETE_CONFIGURATION', self._create_event(event_id='DELETE_CONFIGURATION',
event_data=device, event_data=device,
@ -1474,7 +1472,7 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
# Error Handling # Error Handling
def handle_device_create_error(self, event): def handle_device_create_error(self, event):
device = event.data device = event.data
LOG.error(_LE("Device creation failed, for device %(device)s"), LOG.error("Device creation failed, for device %(device)s",
{'device': device}) {'device': device})
device['network_function_device_id'] = device.get('id') device['network_function_device_id'] = device.get('id')
self._create_event(event_id='DEVICE_CREATE_FAILED', self._create_event(event_id='DEVICE_CREATE_FAILED',
@ -1563,8 +1561,8 @@ class DeviceOrchestrator(nfp_api.NfpEventHandler):
def handle_driver_error(self, event): def handle_driver_error(self, event):
device = event.data device = event.data
LOG.error(_LE("Exception occured in driver, driver returned None " LOG.error("Exception occured in driver, driver returned None "
" for device %(device)s"), {'device': device}) " for device %(device)s", {'device': device})
status = nfp_constants.ERROR status = nfp_constants.ERROR
desc = 'Exception in driver, driver return None' desc = 'Exception in driver, driver return None'
self._update_network_function_device_db(device, status, desc) self._update_network_function_device_db(device, status, desc)
@ -1631,8 +1629,8 @@ class NDOConfiguratorRpcApi(object):
def create_network_function_device_config(self, device_data, def create_network_function_device_config(self, device_data,
config_params): config_params):
self._update_params(device_data, config_params, operation='create') self._update_params(device_data, config_params, operation='create')
LOG.info(_LI("Sending create NFD config request to configurator " LOG.info("Sending create NFD config request to configurator "
"for NF:%(nf_id)s "), "for NF:%(nf_id)s ",
{'nf_id': config_params['info']['context']['nf_id']}) {'nf_id': config_params['info']['context']['nf_id']})
transport.send_request_to_configurator(self.conf, transport.send_request_to_configurator(self.conf,
@ -1645,7 +1643,7 @@ class NDOConfiguratorRpcApi(object):
config_params): config_params):
self._update_params(device_data, config_params, operation='delete') self._update_params(device_data, config_params, operation='delete')
config_params['info']['context']['nfp_context'] = device_data config_params['info']['context']['nfp_context'] = device_data
LOG.info(_LI("Sending delete NFD config request to configurator ")) LOG.info("Sending delete NFD config request to configurator ")
transport.send_request_to_configurator(self.conf, transport.send_request_to_configurator(self.conf,
self.context, self.context,

View File

@ -17,9 +17,6 @@ from oslo_log import helpers as log_helpers
import oslo_messaging import oslo_messaging
from gbpservice._i18n import _ from gbpservice._i18n import _
from gbpservice._i18n import _LE
from gbpservice._i18n import _LI
from gbpservice._i18n import _LW
from gbpservice.nfp.common import constants as nfp_constants from gbpservice.nfp.common import constants as nfp_constants
from gbpservice.nfp.common import exceptions as nfp_exc from gbpservice.nfp.common import exceptions as nfp_exc
from gbpservice.nfp.common import topics as nfp_rpc_topics from gbpservice.nfp.common import topics as nfp_rpc_topics
@ -124,8 +121,8 @@ class RpcHandler(object):
Function Instance Function Instance
''' '''
module_context.init(network_function) module_context.init(network_function)
LOG.info(_LI("Received RPC call for CREATE NETWORK FUNCTION for " LOG.info("Received RPC call for CREATE NETWORK FUNCTION for "
"tenant:%(tenant_id)s"), "tenant:%(tenant_id)s",
{'tenant_id': network_function[ {'tenant_id': network_function[
'resource_owner_context']['tenant_id']}) 'resource_owner_context']['tenant_id']})
@ -151,7 +148,7 @@ class RpcHandler(object):
Returns the Network functions from DB Returns the Network functions from DB
''' '''
module_context.init() module_context.init()
LOG.info(_LI("Received RPC call for GET NETWORK FUNCTIONS ")) LOG.info("Received RPC call for GET NETWORK FUNCTIONS ")
service_orchestrator = ServiceOrchestrator(self._controller, self.conf) service_orchestrator = ServiceOrchestrator(self._controller, self.conf)
return service_orchestrator.get_network_functions( return service_orchestrator.get_network_functions(
context, filters) context, filters)
@ -166,8 +163,8 @@ class RpcHandler(object):
''' '''
module_context.init() module_context.init()
LOG.info(_LI("Received RPC call for UPDATE NETWORK FUNCTION for NF:" LOG.info("Received RPC call for UPDATE NETWORK FUNCTION for NF:"
"%(network_function_id)s"), "%(network_function_id)s",
{'network_function_id': network_function_id}) {'network_function_id': network_function_id})
service_orchestrator = ServiceOrchestrator(self._controller, self.conf) service_orchestrator = ServiceOrchestrator(self._controller, self.conf)
service_orchestrator.update_network_function( service_orchestrator.update_network_function(
@ -182,8 +179,8 @@ class RpcHandler(object):
Results in an Event for async processing of Network Function Instance. Results in an Event for async processing of Network Function Instance.
''' '''
module_context.init() module_context.init()
LOG.info(_LI("Received RPC call for DELETE NETWORK FUNCTION for NF:" LOG.info("Received RPC call for DELETE NETWORK FUNCTION for NF:"
"%(network_function_id)s"), "%(network_function_id)s",
{'network_function_id': network_function_id}) {'network_function_id': network_function_id})
service_orchestrator = ServiceOrchestrator(self._controller, self.conf) service_orchestrator = ServiceOrchestrator(self._controller, self.conf)
@ -199,9 +196,9 @@ class RpcHandler(object):
Results in an Event for async processing of Network Function Instance. Results in an Event for async processing of Network Function Instance.
''' '''
module_context.init() module_context.init()
LOG.info(_LI("Received RPC call for POLICY TARGET ADDED NOTIFICATION " LOG.info("Received RPC call for POLICY TARGET ADDED NOTIFICATION "
"for NF:" "for NF:"
" %(network_function_id)s"), " %(network_function_id)s",
{'network_function_id': network_function_id}) {'network_function_id': network_function_id})
service_orchestrator = ServiceOrchestrator(self._controller, self.conf) service_orchestrator = ServiceOrchestrator(self._controller, self.conf)
service_orchestrator.handle_policy_target_added( service_orchestrator.handle_policy_target_added(
@ -216,8 +213,8 @@ class RpcHandler(object):
Results in an Event for async processing of Network Function Instance. Results in an Event for async processing of Network Function Instance.
''' '''
module_context.init() module_context.init()
LOG.info(_LI("Received RPC call for POLICY TARGET REMOVED " LOG.info("Received RPC call for POLICY TARGET REMOVED "
"NOTIFICATION for NF:%(network_function_id)s"), "NOTIFICATION for NF:%(network_function_id)s",
{'network_function_id': network_function_id}) {'network_function_id': network_function_id})
service_orchestrator = ServiceOrchestrator(self._controller, self.conf) service_orchestrator = ServiceOrchestrator(self._controller, self.conf)
service_orchestrator.handle_policy_target_removed( service_orchestrator.handle_policy_target_removed(
@ -232,8 +229,8 @@ class RpcHandler(object):
Results in an Event for async processing of Network Function Instance. Results in an Event for async processing of Network Function Instance.
''' '''
module_context.init() module_context.init()
LOG.info(_LI("Received RPC call CONSUMER PTG ADDED NOTIFICATION " LOG.info("Received RPC call CONSUMER PTG ADDED NOTIFICATION "
"for NF:%(network_function_id)s"), "for NF:%(network_function_id)s",
{'network_function_id': network_function_id}) {'network_function_id': network_function_id})
service_orchestrator = ServiceOrchestrator(self._controller, self.conf) service_orchestrator = ServiceOrchestrator(self._controller, self.conf)
service_orchestrator.handle_consumer_ptg_added( service_orchestrator.handle_consumer_ptg_added(
@ -248,8 +245,8 @@ class RpcHandler(object):
Results in an Event for async processing of Network Function Instance. Results in an Event for async processing of Network Function Instance.
''' '''
module_context.init() module_context.init()
LOG.info(_LI("Received RPC call for CONSUMER PTG REMOVED NOTIFICATION " LOG.info("Received RPC call for CONSUMER PTG REMOVED NOTIFICATION "
"for NF:%(network_function_id)s"), "for NF:%(network_function_id)s",
{'network_function_id': network_function_id}) {'network_function_id': network_function_id})
service_orchestrator = ServiceOrchestrator(self._controller, self.conf) service_orchestrator = ServiceOrchestrator(self._controller, self.conf)
service_orchestrator.handle_consumer_ptg_removed( service_orchestrator.handle_consumer_ptg_removed(
@ -334,13 +331,13 @@ class RpcHandlerConfigurator(object):
NF = None NF = None
NFI = None NFI = None
if NF and NFI: if NF and NFI:
LOG.info(_LI("Created event %(event_name)s with NF:%(nf)s " LOG.info("Created event %(event_name)s with NF:%(nf)s "
"and NFI:%(nfi)s "), "and NFI:%(nfi)s ",
{'event_name': event_id, {'event_name': event_id,
'nf': NF, 'nf': NF,
'nfi': NFI}) 'nfi': NFI})
else: else:
LOG.info(_LI("Created event %(event_name)s "), LOG.info("Created event %(event_name)s ",
{'event_name': event_id}) {'event_name': event_id})
def _create_event(self, event_id, event_data=None, def _create_event(self, event_id, event_data=None,
@ -588,20 +585,20 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
NF = None NF = None
NFI = None NFI = None
if NF and NFI: if NF and NFI:
LOG.info(_LI("Received event %(event_name)s with NF:%(nf)s and " LOG.info("Received event %(event_name)s with NF:%(nf)s and "
"NFI:%(nfi)s "), "NFI:%(nfi)s ",
{'event_name': event.id, {'event_name': event.id,
'nf': NF, 'nf': NF,
'nfi': NFI}) 'nfi': NFI})
else: else:
LOG.info(_LI("Received event %(event_name)s "), LOG.info("Received event %(event_name)s ",
{'event_name': event.id}) {'event_name': event.id})
try: try:
event_handler = self.event_method_mapping(event.id) event_handler = self.event_method_mapping(event.id)
event_handler(event) event_handler(event)
except Exception as e: except Exception as e:
LOG.exception(_LE("Error in processing event: %(event_id)s for " LOG.exception("Error in processing event: %(event_id)s for "
"event data %(event_data)s. Error: %(error)s"), "event data %(event_data)s. Error: %(error)s",
{'event_id': event.id, 'event_data': event.data, {'event_id': event.id, 'event_data': event.data,
'error': e}) 'error': e})
_, _, tb = sys.exc_info() _, _, tb = sys.exc_info()
@ -609,27 +606,26 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
raise e raise e
def handle_poll_event(self, event): def handle_poll_event(self, event):
LOG.info(_LI("Received poll event %(id)s"), LOG.info("Received poll event %(id)s",
{'id': event.id}) {'id': event.id})
try: try:
event_handler = self.event_method_mapping(event.id) event_handler = self.event_method_mapping(event.id)
return event_handler(event) return event_handler(event)
except Exception: except Exception:
LOG.exception(_LE("Error in processing poll event: " LOG.exception("Error in processing poll event: "
"%(event_id)s"), {'event_id': event.id}) "%(event_id)s", {'event_id': event.id})
def event_cancelled(self, event, reason): def event_cancelled(self, event, reason):
nfp_context = event.context nfp_context = event.context
if event.id == 'CHECK_USER_CONFIG_COMPLETE': if event.id == 'CHECK_USER_CONFIG_COMPLETE':
network_function = nfp_context['network_function'] network_function = nfp_context['network_function']
LOG.info(_LI("Applying user config failed for " LOG.info("Applying user config failed for "
"NF:%(network_function_id)s " "NF:%(network_function_id)s "
"with reason %(reason)s" "with reason %(reason)s"
""), {'network_function_id': network_function[ " ", {'network_function_id': network_function[
'id'], 'id'], 'reason': str(reason)})
'reason': str(reason)})
operation = nfp_context['log_context'].get('path') operation = nfp_context['log_context'].get('path')
LOG.error(_LE("[Event:Service%(operation)sFailed]"), LOG.error("[Event:Service%(operation)sFailed]",
{'operation': operation.capitalize()}) {'operation': operation.capitalize()})
LOG.event('%s network function failed.' % operation.capitalize(), LOG.event('%s network function failed.' % operation.capitalize(),
stats_type=nfp_constants.error_event) stats_type=nfp_constants.error_event)
@ -649,13 +645,13 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
elif event.id == 'APPLY_USER_CONFIG_IN_PROGRESS' or ( elif event.id == 'APPLY_USER_CONFIG_IN_PROGRESS' or (
event.id == 'UPDATE_USER_CONFIG_STILL_IN_PROGRESS'): event.id == 'UPDATE_USER_CONFIG_STILL_IN_PROGRESS'):
request_data = event.data request_data = event.data
LOG.info(_LI("Applying user config failed for " LOG.info("Applying user config failed for "
"NF: %(network_function_id)s data:" "NF: %(network_function_id)s data:"
"%(data)s with reason %(reason)s" "%(data)s with reason %(reason)s"
""), {'data': request_data, "", {'data': request_data,
'network_function_id': request_data[ 'network_function_id': request_data[
'network_function_id'], 'network_function_id'],
'reason': str(reason)}) 'reason': str(reason)})
updated_network_function = {'status': nfp_constants.ERROR} updated_network_function = {'status': nfp_constants.ERROR}
with nfp_ctx_mgr.DbContextManager as dcm: with nfp_ctx_mgr.DbContextManager as dcm:
@ -666,7 +662,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
updated_network_function) updated_network_function)
operation = nfp_context['log_context'].get('path') operation = nfp_context['log_context'].get('path')
LOG.error(_LE("[Event:Service%(operation)sFailed]"), LOG.error("[Event:Service%(operation)sFailed]",
{'operation': operation.capitalize()}) {'operation': operation.capitalize()})
LOG.event('%s network function failed.' % operation.capitalize(), LOG.event('%s network function failed.' % operation.capitalize(),
stats_type=nfp_constants.error_event) stats_type=nfp_constants.error_event)
@ -702,13 +698,13 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
NF = None NF = None
NFI = None NFI = None
if NF and NFI: if NF and NFI:
LOG.info(_LI("Created event %(event_name)s with NF:%(nf)s and " LOG.info("Created event %(event_name)s with NF:%(nf)s and "
"NFI:%(nfi)s "), "NFI:%(nfi)s ",
{'event_name': event_id, {'event_name': event_id,
'nf': NF, 'nf': NF,
'nfi': NFI}) 'nfi': NFI})
else: else:
LOG.info(_LI("Created event %(event_name)s "), LOG.info("Created event %(event_name)s ",
{'event_name': event_id}) {'event_name': event_id})
# REVISIT(ashu): Merge this _create_event, and above one to have # REVISIT(ashu): Merge this _create_event, and above one to have
# single function. # single function.
@ -788,9 +784,9 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
tag_str, config_str = self.config_driver.parse_template_config_string( tag_str, config_str = self.config_driver.parse_template_config_string(
service_config_str) service_config_str)
if not config_str: if not config_str:
LOG.error(_LE('Exception while parsing config string, config ' LOG.error('Exception while parsing config string, config '
'string: %(config_str)s is improper for ' 'string: %(config_str)s is improper for '
'network_function id: %(network_function_id)s'), 'network_function id: %(network_function_id)s',
{'config_str': service_config_str, {'config_str': service_config_str,
'network_function_id': network_function_id}) 'network_function_id': network_function_id})
self.handle_driver_error(network_function_id) self.handle_driver_error(network_function_id)
@ -823,9 +819,9 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
network_function_id = network_function_data[ network_function_id = network_function_data[
'network_function_details']['network_function']['id'] 'network_function_details']['network_function']['id']
if not config_str: if not config_str:
LOG.error(_LE('Exception while parsing config string, config ' LOG.error('Exception while parsing config string, config '
'string: %(config_str)s is improper for ' 'string: %(config_str)s is improper for '
'network_function id: %(network_function_id)s'), 'network_function id: %(network_function_id)s',
{'config_str': service_config_str, {'config_str': service_config_str,
'network_function_id': network_function_id}) 'network_function_id': network_function_id})
self.handle_driver_error(network_function_id) self.handle_driver_error(network_function_id)
@ -877,11 +873,11 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
def _report_logging_info(self, nf, nfi, service_type, def _report_logging_info(self, nf, nfi, service_type,
service_vendor): service_vendor):
LOG.info(_LI("[TenantID:%(tenant_id)s, " LOG.info("[TenantID:%(tenant_id)s, "
"ServiceChainID:%(service_chain_id)s, " "ServiceChainID:%(service_chain_id)s, "
"ServiceInstanceID:%(service_instance_id)s, " "ServiceInstanceID:%(service_instance_id)s, "
"ServiceType:%(service_type)s, " "ServiceType:%(service_type)s, "
"ServiceProvider:%(service_provider)s]"), "ServiceProvider:%(service_provider)s]",
{'tenant_id': nf['tenant_id'], {'tenant_id': nf['tenant_id'],
'service_chain_id': nf['service_chain_id'], 'service_chain_id': nf['service_chain_id'],
'service_instance_id': nfi['id'], 'service_instance_id': nfi['id'],
@ -961,7 +957,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
nfp_context['log_context']['meta_id'] = network_function['id'] nfp_context['log_context']['meta_id'] = network_function['id']
nfp_context['log_context']['auth_token'] = context.auth_token nfp_context['log_context']['auth_token'] = context.auth_token
LOG.info(_LI("[Event:ServiceCreateInitiated]")) LOG.info("[Event:ServiceCreateInitiated]")
LOG.event("Started create network function.", LOG.event("Started create network function.",
stats_type=nfp_constants.request_event) stats_type=nfp_constants.request_event)
@ -973,8 +969,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
nfp_context['service_details'] = service_details nfp_context['service_details'] = service_details
nfp_context['share_existing_device'] = False nfp_context['share_existing_device'] = False
nfp_context['base_mode'] = base_mode_support nfp_context['base_mode'] = base_mode_support
LOG.info(_LI("Handling RPC call CREATE NETWORK FUNCTION for " LOG.info("Handling RPC call CREATE NETWORK FUNCTION for "
"%(service_type)s with tenant:%(tenant_id)s"), "%(service_type)s with tenant:%(tenant_id)s",
{'tenant_id': tenant_id, {'tenant_id': tenant_id,
'service_type': service_profile['service_type']}) 'service_type': service_profile['service_type']})
if base_mode_support: if base_mode_support:
@ -1009,7 +1005,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
network_function_id, network_function_id,
{'service_config': user_config, {'service_config': user_config,
'status': nfp_constants.PENDING_UPDATE}) 'status': nfp_constants.PENDING_UPDATE})
LOG.info(_LI("[Event:ServiceUpdateInitiated]")) LOG.info("[Event:ServiceUpdateInitiated]")
LOG.event("Started update network function.", LOG.event("Started update network function.",
stats_type=nfp_constants.request_event) stats_type=nfp_constants.request_event)
@ -1054,7 +1050,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
with nfp_ctx_mgr.DbContextManager: with nfp_ctx_mgr.DbContextManager:
self.db_handler.delete_network_function( self.db_handler.delete_network_function(
self.db_session, network_function_id) self.db_session, network_function_id)
LOG.info(_LI("[Event:ServiceDeleteCompleted]")) LOG.info("[Event:ServiceDeleteCompleted]")
LOG.event("Completed delete network function.", LOG.event("Completed delete network function.",
stats_type=nfp_constants.response_event) stats_type=nfp_constants.response_event)
@ -1087,7 +1083,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
network_function_id, network_function) network_function_id, network_function)
nfp_context.update(network_function_details) nfp_context.update(network_function_details)
LOG.info(_LI("[Event:ServiceDeleteInitiated]")) LOG.info("[Event:ServiceDeleteInitiated]")
LOG.event("Started delete network function.", LOG.event("Started delete network function.",
stats_type=nfp_constants.request_event) stats_type=nfp_constants.request_event)
if not base_mode_support: if not base_mode_support:
@ -1231,8 +1227,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
nfp_context['log_context']['nfi_id'] = nfi['id'] nfp_context['log_context']['nfi_id'] = nfi['id']
LOG.info(_LI("Creating event CREATE NETWORK FUNCTION DEVICE " LOG.info("Creating event CREATE NETWORK FUNCTION DEVICE "
"for NF: %(network_function_id)s"), "for NF: %(network_function_id)s",
{'network_function_id': network_function['id']}) {'network_function_id': network_function['id']})
ev = self._controller.new_event( ev = self._controller.new_event(
@ -1385,10 +1381,10 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
neutron_resource_desc = ( neutron_resource_desc = (
self.config_driver.get_neutron_resource_description(nfp_context)) self.config_driver.get_neutron_resource_description(nfp_context))
if not neutron_resource_desc: if not neutron_resource_desc:
LOG.error(_LE( LOG.error(
"Preparing neutron resource description failed in " "Preparing neutron resource description failed in "
"config driver, marking user config as Failed for " "config driver, marking user config as Failed for "
"network function: %(nf)s"), {'nf': network_function}) "network function: %(nf)s", {'nf': network_function})
nfp_context['network_function_id'] = network_function['id'] nfp_context['network_function_id'] = network_function['id']
binding_key = nfp_context['service_details'][ binding_key = nfp_context['service_details'][
'service_vendor'].lower() + network_function['id'] 'service_vendor'].lower() + network_function['id']
@ -1488,7 +1484,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
request_data['network_function_id']) request_data['network_function_id'])
network_function = network_function_details['network_function'] network_function = network_function_details['network_function']
LOG.info(_LI("[Event:ServiceUpdateInitiated]")) LOG.info("[Event:ServiceUpdateInitiated]")
LOG.event("Started update network function.", LOG.event("Started update network function.",
stats_type=nfp_constants.request_event) stats_type=nfp_constants.request_event)
nfi = network_function_details.get('network_function_instance', None) nfi = network_function_details.get('network_function_instance', None)
@ -1570,7 +1566,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
nfp_context = event.context nfp_context = event.context
operation = nfp_context['log_context'].get('path') operation = nfp_context['log_context'].get('path')
LOG.error(_LE("[Event:Service%(operation)sFailed]"), LOG.error("[Event:Service%(operation)sFailed]",
{'operation': operation.capitalize()}) {'operation': operation.capitalize()})
LOG.event('%s network function failed.' % operation.capitalize(), LOG.event('%s network function failed.' % operation.capitalize(),
stats_type=nfp_constants.error_event) stats_type=nfp_constants.error_event)
@ -1578,9 +1574,9 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
# Trigger RPC to notify the Create_Service caller with status # Trigger RPC to notify the Create_Service caller with status
def handle_driver_error(self, network_function_id): def handle_driver_error(self, network_function_id):
LOG.error(_LE("Error occurred while processing network function " LOG.error("Error occurred while processing network function "
"CRUD operations, marking network function: %(nf_id)s " "CRUD operations, marking network function: %(nf_id)s "
"as ERROR to initiate cleanup."), "as ERROR to initiate cleanup.",
{'nf_id': network_function_id}) {'nf_id': network_function_id})
network_function_details = self.get_network_function_details( network_function_details = self.get_network_function_details(
network_function_id) network_function_id)
@ -1592,7 +1588,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
network_function_id, network_function) network_function_id, network_function)
nfp_context = module_context.get() nfp_context = module_context.get()
operation = nfp_context['log_context'].get('path') operation = nfp_context['log_context'].get('path')
LOG.error(_LE("[Event:Service%(operation)sFailed]"), LOG.error("[Event:Service%(operation)sFailed]",
{'operation': operation.capitalize()}) {'operation': operation.capitalize()})
LOG.event('%s network function failed.' % operation.capitalize(), LOG.event('%s network function failed.' % operation.capitalize(),
stats_type=nfp_constants.error_event) stats_type=nfp_constants.error_event)
@ -1664,10 +1660,10 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
request_data['config_policy_id'], request_data['tenant_id'], request_data['config_policy_id'], request_data['tenant_id'],
request_data['network_function_details']) request_data['network_function_details'])
if config_status == nfp_constants.ERROR: if config_status == nfp_constants.ERROR:
LOG.info(_LI("Applying user config failed for " LOG.info("Applying user config failed for "
"NF:%(network_function_id)s "), { "NF:%(network_function_id)s ", {
'network_function_id': 'network_function_id':
request_data['network_function_id']}) request_data['network_function_id']})
updated_network_function = {'status': nfp_constants.ERROR} updated_network_function = {'status': nfp_constants.ERROR}
with nfp_ctx_mgr.DbContextManager as dcm: with nfp_ctx_mgr.DbContextManager as dcm:
dcm.lock( dcm.lock(
@ -1676,7 +1672,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
request_data['network_function_id'], request_data['network_function_id'],
updated_network_function) updated_network_function)
operation = event.context['log_context'].get('path') operation = event.context['log_context'].get('path')
LOG.error(_LE("[Event:Service%(operation)sFailed]"), LOG.error("[Event:Service%(operation)sFailed]",
{'operation': operation.capitalize()}) {'operation': operation.capitalize()})
LOG.event('%s network function failed.' % operation.capitalize(), LOG.event('%s network function failed.' % operation.capitalize(),
stats_type=nfp_constants.error_event) stats_type=nfp_constants.error_event)
@ -1710,8 +1706,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
max_times=self.UPDATE_USER_CONFIG_MAXRETRY) max_times=self.UPDATE_USER_CONFIG_MAXRETRY)
return STOP_POLLING return STOP_POLLING
updated_network_function = {'status': nfp_constants.ACTIVE} updated_network_function = {'status': nfp_constants.ACTIVE}
LOG.info(_LI("Applying user config is successfull moving " LOG.info("Applying user config is successfull moving "
"NF:%(network_function_id)s to ACTIVE"), "NF:%(network_function_id)s to ACTIVE",
{'network_function_id': {'network_function_id':
request_data['network_function_id']}) request_data['network_function_id']})
with nfp_ctx_mgr.DbContextManager as dcm: with nfp_ctx_mgr.DbContextManager as dcm:
@ -1722,7 +1718,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
updated_network_function) updated_network_function)
operation = event.context['log_context'].get('path') operation = event.context['log_context'].get('path')
LOG.info(_LI("[Event:Service%(operation)sCompleted]"), LOG.info("[Event:Service%(operation)sCompleted]",
{'operation': operation.capitalize()}) {'operation': operation.capitalize()})
LOG.event('Completed %s network function.' % operation, LOG.event('Completed %s network function.' % operation,
stats_type=nfp_constants.response_event) stats_type=nfp_constants.response_event)
@ -1751,8 +1747,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
network_function = nfp_context['network_function'] network_function = nfp_context['network_function']
updated_network_function = {'status': nfp_constants.ACTIVE} updated_network_function = {'status': nfp_constants.ACTIVE}
LOG.info(_LI("Applying user config is successfull moving " LOG.info("Applying user config is successfull moving "
"NF: %(network_function_id)s to ACTIVE"), "NF: %(network_function_id)s to ACTIVE",
{'network_function_id': network_function['id']}) {'network_function_id': network_function['id']})
with nfp_ctx_mgr.DbContextManager as dcm: with nfp_ctx_mgr.DbContextManager as dcm:
dcm.lock(self.db_session, self.db_handler.update_network_function, dcm.lock(self.db_session, self.db_handler.update_network_function,
@ -1760,7 +1756,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
updated_network_function) updated_network_function)
operation = nfp_context['log_context'].get('path') operation = nfp_context['log_context'].get('path')
LOG.info(_LI("[Event:Service%(operation)sCompleted]"), LOG.info("[Event:Service%(operation)sCompleted]",
{'operation': operation.capitalize()}) {'operation': operation.capitalize()})
LOG.event('Completed %s network function.' % operation, LOG.event('Completed %s network function.' % operation,
stats_type=nfp_constants.response_event) stats_type=nfp_constants.response_event)
@ -1787,10 +1783,10 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
config_status = self.config_driver.check_config_complete(nfp_context) config_status = self.config_driver.check_config_complete(nfp_context)
if config_status == nfp_constants.ERROR: if config_status == nfp_constants.ERROR:
LOG.info(_LI("Applying user config failed for " LOG.info("Applying user config failed for "
"NF: %(network_function_id)s"), { "NF: %(network_function_id)s", {
'network_function_id': 'network_function_id':
network_function['id']}) network_function['id']})
# Complete the original event APPLY_USER_CONFIG here # Complete the original event APPLY_USER_CONFIG here
event_desc = nfp_context.pop('event_desc', None) event_desc = nfp_context.pop('event_desc', None)
apply_config_event = self._controller.new_event( apply_config_event = self._controller.new_event(
@ -1833,8 +1829,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
network_function) network_function)
except Exception as err: except Exception as err:
# REVISIT: May be we need a count before removing the poll event # REVISIT: May be we need a count before removing the poll event
LOG.error(_LE("Error: %(err)s while verifying configuration " LOG.error("Error: %(err)s while verifying configuration "
"delete completion."), {'err': err}) "delete completion.", {'err': err})
self._create_event('USER_CONFIG_DELETE_FAILED', self._create_event('USER_CONFIG_DELETE_FAILED',
event_data=request_data, is_internal_event=True) event_data=request_data, is_internal_event=True)
self._controller.event_complete(event) self._controller.event_complete(event)
@ -1850,8 +1846,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
# Trigger RPC to notify the Create_Service caller with status # Trigger RPC to notify the Create_Service caller with status
elif config_status == nfp_constants.COMPLETED: elif config_status == nfp_constants.COMPLETED:
updated_network_function = {'status': nfp_constants.ACTIVE} updated_network_function = {'status': nfp_constants.ACTIVE}
LOG.info(_LI("Applying user config is successfull moving " LOG.info("Applying user config is successfull moving "
"NF:%(network_function_id)s to ACTIVE"), "NF:%(network_function_id)s to ACTIVE",
{'network_function_id': {'network_function_id':
request_data['network_function_id']}) request_data['network_function_id']})
with nfp_ctx_mgr.DbContextManager as dcm: with nfp_ctx_mgr.DbContextManager as dcm:
@ -1862,7 +1858,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
updated_network_function) updated_network_function)
operation = event.context['log_context'].get('path') operation = event.context['log_context'].get('path')
LOG.info(_LI("[Event:Service%(operation)sCompleted]"), LOG.info("[Event:Service%(operation)sCompleted]",
{'operation': operation.capitalize()}) {'operation': operation.capitalize()})
LOG.event('Completed %s network function.' % operation, LOG.event('Completed %s network function.' % operation,
stats_type=nfp_constants.response_event) stats_type=nfp_constants.response_event)
@ -1890,8 +1886,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
request_data['config_policy_id'], request_data['tenant_id']) request_data['config_policy_id'], request_data['tenant_id'])
except Exception as err: except Exception as err:
# REVISIT: May be we need a count before removing the poll event # REVISIT: May be we need a count before removing the poll event
LOG.error(_LE("Error: %(err)s while verifying configuration " LOG.error("Error: %(err)s while verifying configuration "
"delete completion."), {'err': err}) "delete completion.", {'err': err})
# self._create_event('USER_CONFIG_DELETE_FAILED', # self._create_event('USER_CONFIG_DELETE_FAILED',
# event_data=event_data, is_internal_event=True) # event_data=event_data, is_internal_event=True)
self._controller.event_complete(event) self._controller.event_complete(event)
@ -1954,8 +1950,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
self.db_handler.update_network_function, self.db_handler.update_network_function,
network_function_id, network_function_id,
network_function) network_function)
LOG.info(_LI("Applying user config is successfull moving " LOG.info("Applying user config is successfull moving "
"NF: %(network_function_id)s to ACTIVE"), "NF: %(network_function_id)s to ACTIVE",
{'network_function_id': {'network_function_id':
network_function_id}) network_function_id})
else: else:
@ -1981,8 +1977,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
def handle_update_user_config_failed(self, event): def handle_update_user_config_failed(self, event):
event_data = event.data event_data = event.data
network_function_id = event_data['network_function_id'] network_function_id = event_data['network_function_id']
LOG.error(_LE("NSO: updating user config failed, moving " LOG.error("NSO: updating user config failed, moving "
"network function %(network_function_id)s to ERROR"), "network function %(network_function_id)s to ERROR",
{'network_function_id': network_function_id}) {'network_function_id': network_function_id})
self.handle_user_config_failed(event) self.handle_user_config_failed(event)
@ -1998,7 +1994,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
updated_network_function) updated_network_function)
# Trigger RPC to notify the Create_Service caller with status # Trigger RPC to notify the Create_Service caller with status
operation = event.context['log_context'].get('path') operation = event.context['log_context'].get('path')
LOG.error(_LE("[Event:Service%(operation)sFailed]"), LOG.error("[Event:Service%(operation)sFailed]",
{'operation': operation.capitalize()}) {'operation': operation.capitalize()})
LOG.event('%s network function failed.' % operation.capitalize(), LOG.event('%s network function failed.' % operation.capitalize(),
stats_type=nfp_constants.error_event) stats_type=nfp_constants.error_event)
@ -2024,9 +2020,9 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
self.config_driver.is_update_config_supported( self.config_driver.is_update_config_supported(
request_data['service_type'])): request_data['service_type'])):
updated_network_function.update({'status': nfp_constants.ACTIVE}) updated_network_function.update({'status': nfp_constants.ACTIVE})
LOG.warning(_LW( LOG.warning(
"Failed to delete old stack id: %(stack_id)s in" "Failed to delete old stack id: %(stack_id)s in"
"firewall update case, Need to manually delete it"), "firewall update case, Need to manually delete it",
{"stack_id": request_data['config_policy_id']}) {"stack_id": request_data['config_policy_id']})
with nfp_ctx_mgr.DbContextManager as dcm: with nfp_ctx_mgr.DbContextManager as dcm:
@ -2042,7 +2038,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
results = event.result results = event.result
for result in results: for result in results:
if result.result.lower() != 'success': if result.result.lower() != 'success':
LOG.error(_LE("Event: %(result_id)s failed"), LOG.error("Event: %(result_id)s failed",
{'result_id': result.id}) {'result_id': result.id})
network_function_details = event.context network_function_details = event.context
@ -2065,7 +2061,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
with nfp_ctx_mgr.DbContextManager: with nfp_ctx_mgr.DbContextManager:
self.db_handler.delete_network_function( self.db_handler.delete_network_function(
self.db_session, nf['id']) self.db_session, nf['id'])
LOG.info(_LI("[Event:ServiceDeleteCompleted]")) LOG.info("[Event:ServiceDeleteCompleted]")
LOG.event("Completed delete network function.", LOG.event("Completed delete network function.",
stats_type=nfp_constants.response_event) stats_type=nfp_constants.response_event)
@ -2076,7 +2072,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
nf_id=nf_id, nf_id=nf_id,
service_type=service_type) service_type=service_type)
LOG.info(_LI("Deleted NF:%(nf_id)s "), LOG.info("Deleted NF:%(nf_id)s ",
{'nf_id': nf['id']}) {'nf_id': nf['id']})
self._controller.event_complete(event) self._controller.event_complete(event)
@ -2095,7 +2091,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
with nfp_ctx_mgr.DbContextManager: with nfp_ctx_mgr.DbContextManager:
self.db_handler.delete_network_function( self.db_handler.delete_network_function(
self.db_session, nfi['network_function_id']) self.db_session, nfi['network_function_id'])
LOG.info(_LI("[Event:ServiceDeleteCompleted]")) LOG.info("[Event:ServiceDeleteCompleted]")
LOG.event("Completed delete network function.", LOG.event("Completed delete network function.",
stats_type=nfp_constants.response_event) stats_type=nfp_constants.response_event)
@ -2105,7 +2101,7 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
nf_id=nf_id, nf_id=nf_id,
service_type=service_type) service_type=service_type)
LOG.info(_LI("Deleted NF:%(nf_id)s "), LOG.info("Deleted NF:%(nf_id)s ",
{'nf_id': nf_id}) {'nf_id': nf_id})
# Inform delete service caller with delete completed RPC # Inform delete service caller with delete completed RPC
@ -2120,13 +2116,13 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
self.db_session, network_function_id) self.db_session, network_function_id)
return network_function return network_function
except nfp_exc.NetworkFunctionNotFound: except nfp_exc.NetworkFunctionNotFound:
LOG.warning(_LW("Failed to retrieve Network Function details for" LOG.warning("Failed to retrieve Network Function details for"
" %(network_function)s"), " %(network_function)s",
{'network_function': network_function_id}) {'network_function': network_function_id})
return None return None
except Exception: except Exception:
LOG.exception(_LE("Failed to retrieve Network Function details for" LOG.exception("Failed to retrieve Network Function details for"
" %(network_function)s"), " %(network_function)s",
{'network_function': network_function_id}) {'network_function': network_function_id})
return None return None
@ -2431,8 +2427,8 @@ class ServiceOrchestrator(nfp_api.NfpEventHandler):
self.db_session, port_id) self.db_session, port_id)
return port_info return port_info
except Exception: except Exception:
LOG.exception(_LE("Failed to retrieve Port Info for" LOG.exception("Failed to retrieve Port Info for"
" %(port_id)s"), " %(port_id)s",
{'port_id': port_id}) {'port_id': port_id})
return None return None
@ -2615,7 +2611,7 @@ class NSOConfiguratorRpcApi(object):
config_tag) config_tag)
self._update_params(user_config_data, self._update_params(user_config_data,
config_params, operation='create') config_params, operation='create')
LOG.info(_LI("Sending create heat config request to configurator ")) LOG.info("Sending create heat config request to configurator ")
LOG.debug("Sending create heat config request to configurator " LOG.debug("Sending create heat config request to configurator "
"with config_params = %s", "with config_params = %s",
config_params) config_params)
@ -2632,7 +2628,7 @@ class NSOConfiguratorRpcApi(object):
config_tag) config_tag)
self._update_params(user_config_data, self._update_params(user_config_data,
config_params, operation='delete') config_params, operation='delete')
LOG.info(_LI("Sending delete heat config request to configurator ")) LOG.info("Sending delete heat config request to configurator ")
LOG.debug("Sending delete heat config request to configurator " LOG.debug("Sending delete heat config request to configurator "
" with config_params = %s", " with config_params = %s",
config_params) config_params)
@ -2648,7 +2644,7 @@ class NSOConfiguratorRpcApi(object):
config_tag) config_tag)
self._update_params(user_config_data, self._update_params(user_config_data,
config_params, operation='update') config_params, operation='update')
LOG.info(_LI("Sending update heat config request to configurator. ")) LOG.info("Sending update heat config request to configurator. ")
transport.send_request_to_configurator(self.conf, transport.send_request_to_configurator(self.conf,
self.context, self.context,
@ -2662,8 +2658,8 @@ class NSOConfiguratorRpcApi(object):
config_tag) config_tag)
self._update_params(user_config_data, self._update_params(user_config_data,
config_params, operation='pt_add') config_params, operation='pt_add')
LOG.info(_LI("Sending Policy Target and heat config request to " LOG.info("Sending Policy Target and heat config request to "
"configurator .")) "configurator .")
transport.send_request_to_configurator(self.conf, transport.send_request_to_configurator(self.conf,
self.context, self.context,
@ -2677,8 +2673,8 @@ class NSOConfiguratorRpcApi(object):
config_tag) config_tag)
self._update_params(user_config_data, self._update_params(user_config_data,
config_params, operation='pt_remove') config_params, operation='pt_remove')
LOG.info(_LI("Sending Policy Target remove heat config request to " LOG.info("Sending Policy Target remove heat config request to "
"configurator. ")) "configurator. ")
transport.send_request_to_configurator(self.conf, transport.send_request_to_configurator(self.conf,
self.context, self.context,
@ -2692,8 +2688,8 @@ class NSOConfiguratorRpcApi(object):
config_tag) config_tag)
self._update_params(user_config_data, self._update_params(user_config_data,
config_params, operation='consumer_add') config_params, operation='consumer_add')
LOG.info(_LI("Sending consumer and heat config request to " LOG.info("Sending consumer and heat config request to "
"configurator .")) "configurator .")
transport.send_request_to_configurator(self.conf, transport.send_request_to_configurator(self.conf,
self.context, self.context,
@ -2707,8 +2703,8 @@ class NSOConfiguratorRpcApi(object):
config_tag) config_tag)
self._update_params(user_config_data, self._update_params(user_config_data,
config_params, operation='consumer_remove') config_params, operation='consumer_remove')
LOG.info(_LI("Sending consumer remove heat config request to " LOG.info("Sending consumer remove heat config request to "
"configurator .")) "configurator .")
transport.send_request_to_configurator(self.conf, transport.send_request_to_configurator(self.conf,
self.context, self.context,