Remove log translations

Log messages are no longer being translated. This removes all use of
the _LE, _LI, and _LW translation markers to simplify logging and to
avoid confusion with new contributions.

See:
http://lists.openstack.org/pipermail/openstack-i18n/2016-November/002574.html
http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html

Change-Id: If51139eaa6f22165d052a6e6004ff52c93f536f3
This commit is contained in:
shihanzhang 2017-03-21 12:44:10 +08:00
parent 4ee02ce4df
commit 89e2be40e2
54 changed files with 357 additions and 436 deletions

View File

@ -27,16 +27,6 @@ _C = _translators.contextual_form
# The plural translation function using the name "_P"
_P = _translators.plural_form
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
def get_available_languages():
return oslo_i18n.get_available_languages(DOMAIN)

View File

@ -15,6 +15,7 @@ import prettytable
import six
import textwrap
from dragonflow._i18n import _
from dragonflow.common import exceptions

View File

@ -18,7 +18,6 @@ from neutron.common import config
from neutron.conf.agent.metadata import config as metadata_conf
from neutron import wsgi
from dragonflow._i18n import _LI
from dragonflow import conf as cfg
from dragonflow.controller import metadata_service_app
@ -34,7 +33,7 @@ def environment_setup():
bridge = cfg.CONF.df.integration_bridge
interface = cfg.CONF.df.metadata_interface
if ip_lib.device_exists(interface):
LOG.info(_LI("Device %s already exists"), interface)
LOG.info("Device %s already exists", interface)
# Destroy the environment when the device exists.
# We can re-initialize the environment correctly.
environment_destroy()

View File

@ -26,7 +26,7 @@ from oslo_utils import reflection
import six
from stevedore import driver
from dragonflow._i18n import _, _LE
from dragonflow._i18n import _
DF_PUBSUB_DRIVER_NAMESPACE = 'dragonflow.pubsub_driver'
DF_NB_DB_DRIVER_NAMESPACE = 'dragonflow.nb_db_driver'
@ -72,10 +72,10 @@ def load_driver(driver_cfg, namespace):
try:
class_to_load = importutils.import_class(driver_cfg)
except (ImportError, ValueError):
LOG.error(_LE("Error loading class %(class)s by alias e: %(e)s"),
LOG.error("Error loading class %(class)s by alias e: %(e)s",
{'class': driver_cfg, 'e': e1_info},
exc_info=e1_info)
LOG.error(_LE("Error loading class by class name"),
LOG.error("Error loading class by class name",
exc_info=True)
raise ImportError(_("Class not found."))
return class_to_load()
@ -109,7 +109,7 @@ class DFDaemon(object):
def daemonize(self, run):
if self.is_daemonize:
LOG.error(_LE("already daemonized"))
LOG.error("already daemonized")
return
self.is_daemonize = True
if self.is_not_light:
@ -194,8 +194,8 @@ class wrap_func_retry(object):
if remaining > 0:
ectxt.reraise = not self._is_exception_expected(e)
else:
LOG.exception(_LE('Function exceeded '
'retry limit.'))
LOG.exception('Function exceeded '
'retry limit.')
LOG.debug("Performing retry for function %s",
reflection.get_callable_name(f))
# NOTE(vsergeyev): We are using patched time module, so

View File

@ -22,7 +22,6 @@ from ryu.lib.packet import arp
from ryu.lib.packet import packet
from ryu.ofproto import ether
from dragonflow._i18n import _LE, _LI, _LW
from dragonflow import conf as cfg
from dragonflow.controller.common import constants as controller_const
from dragonflow.controller.common import utils
@ -58,7 +57,7 @@ class ActivePortDetectionApp(df_base_app.DFlowApp):
pkt = packet.Packet(msg.data)
arp_pkt = pkt.get_protocol(arp.arp)
if arp_pkt is None:
LOG.error(_LE("No support for non ARP protocol"))
LOG.error("No support for non ARP protocol")
return
if (arp_pkt.opcode == arp.ARP_REQUEST and
@ -79,8 +78,8 @@ class ActivePortDetectionApp(df_base_app.DFlowApp):
ips.add(ip)
else:
# IPv6 addresses are not supported yet
LOG.info(_LI("Don't support IPv6 addresses for now. IPv6"
" address %s will be ignored."), ip)
LOG.info("Don't support IPv6 addresses for now. IPv6"
" address %s will be ignored.", ip)
return ips
@ -180,8 +179,8 @@ class ActivePortDetectionApp(df_base_app.DFlowApp):
found_lport = lport
break
if found_lport is None:
LOG.info(_LI("There is no logical port matched this "
"ofport(%s)."), ofport)
LOG.info("There is no logical port matched this "
"ofport(%s).", ofport)
return
network_id = found_lport.get_lswitch_id()
@ -191,8 +190,8 @@ class ActivePortDetectionApp(df_base_app.DFlowApp):
old_active_port = self.db_store.get_active_port(key)
if (not old_active_port or self._if_old_active_port_need_update(
old_active_port, ip, mac, found_lport)):
LOG.info(_LI("Detected new active node. ip=%(ip)s, "
"mac=%(mac)s, lport_id=%(lport_id)s"),
LOG.info("Detected new active node. ip=%(ip)s, "
"mac=%(mac)s, lport_id=%(lport_id)s",
{'ip': ip, 'mac': mac, 'lport_id': found_lport_id})
if old_active_port:
self.nb_api.update_active_port(
@ -274,8 +273,8 @@ class ActivePortDetectionApp(df_base_app.DFlowApp):
ip,
lport.get_external_value('ofport'))
else:
LOG.warning(_LW("Couldn't find a valid mac to detect active "
"port in lport %s."), lport.get_id())
LOG.warning("Couldn't find a valid mac to detect active "
"port in lport %s.", lport.get_id())
def _periodic_send_arp_request(self):
"""Spawn a thread to periodically to detect active node among

View File

@ -15,7 +15,6 @@
from oslo_log import log
from dragonflow._i18n import _LI
from dragonflow.controller.common import constants as const
from dragonflow.controller.common import cookies
from dragonflow.controller.common import utils as cookie
@ -51,12 +50,12 @@ class Aging(df_base_app.DFlowApp, ofswitch.OpenFlowSwitchMixin):
be called
"""
def ovs_sync_started(self):
LOG.info(_LI("start aging"))
LOG.info("start aging")
canary_flow = self.get_canary_flow()
if not canary_flow:
self.do_aging = False
cookie.set_aging_cookie(const.GLOBAL_INIT_AGING_COOKIE)
LOG.info(_LI("no canary table, don't do aging"))
LOG.info("no canary table, don't do aging")
else:
self.do_aging = True
self._renew_aging_cookie(canary_flow.cookie)
@ -69,14 +68,14 @@ class Aging(df_base_app.DFlowApp, ofswitch.OpenFlowSwitchMixin):
def ovs_sync_finished(self):
if self.do_aging:
self._start_aging()
LOG.info(_LI("do aging"))
LOG.info("do aging")
def _start_aging(self):
old_cookie = cookie.get_xor_cookie(cookie.get_aging_cookie())
self.cleanup_flows(old_cookie, self.aging_mask)
def _renew_aging_cookie(self, cur_c):
LOG.info(_LI("renew cookie, current cookie is %x"), cur_c)
LOG.info("renew cookie, current cookie is %x", cur_c)
new_c = cookie.get_xor_cookie(cur_c)
cookie.set_aging_cookie(new_c & self.aging_mask)
return cookie.get_aging_cookie()

View File

@ -15,7 +15,7 @@
from oslo_log import log
from ryu.ofproto import ether
from dragonflow._i18n import _LI
from dragonflow._i18n import _
from dragonflow import conf as cfg
from dragonflow.controller.common import constants as const
from dragonflow.controller import df_base_app
@ -34,7 +34,7 @@ class ChassisSNATApp(df_base_app.DFlowApp, snat_mixin.SNATApp_mixin):
"""
def __init__(self, *args, **kwargs):
super(ChassisSNATApp, self).__init__(*args, **kwargs)
LOG.info(_LI("Loading SNAT application ... "))
LOG.info("Loading SNAT application ... ")
self.external_network_bridge = (
cfg.CONF.df_dnat_app.external_network_bridge)
self.ex_peer_patch_port = (
@ -91,21 +91,21 @@ class ChassisSNATApp(df_base_app.DFlowApp, snat_mixin.SNATApp_mixin):
:param lport: local logical port which is being removed
"""
LOG.info(_LI("SNAT application: remove local port %(lport)s"),
LOG.info("SNAT application: remove local port %(lport)s",
{'lport': lport})
if self.external_host_mac is not None:
# remove VM specific flows
if self.is_data_port(lport):
self.remove_lport_based_flows(lport)
else:
LOG.info(_LI('SNAT application: not a compute port, skipped'))
LOG.info('SNAT application: not a compute port, skipped')
def add_local_port(self, lport):
"""override add_local_port method to install sNAT related flows
:param lport: local logical port which is being added
"""
LOG.info(_LI("SNAT application: add local port %(lport)s"),
LOG.info("SNAT application: add local port %(lport)s",
{'lport': lport})
if self.external_host_mac is not None:
@ -115,7 +115,7 @@ class ChassisSNATApp(df_base_app.DFlowApp, snat_mixin.SNATApp_mixin):
self.install_lport_based_flows(lport)
else:
LOG.info(_LI('SNAT application: not a compute port, skipped'))
LOG.info('SNAT application: not a compute port, skipped')
def install_strategy_based_flows(self):

View File

@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from dragonflow._i18n import _LI
from dragonflow.controller.common import constants as const
from dragonflow.controller import df_base_app
from oslo_log import log
@ -26,8 +25,8 @@ class ClassifierApp(df_base_app.DFlowApp):
def add_local_port(self, lport):
ofport = lport.get_external_value('ofport')
LOG.info(_LI("Add local ovs port %(ovs_port)s, logical port "
"%(lport)s for classification"),
LOG.info("Add local ovs port %(ovs_port)s, logical port "
"%(lport)s for classification",
{'ovs_port': ofport, 'lport': lport})
self._make_ingress_classification_flow(lport, ofport)
self._make_ingress_dispatch_flow(lport, ofport)

View File

@ -14,7 +14,7 @@ import collections
from oslo_log import log
from dragonflow._i18n import _LI, _LE
from dragonflow._i18n import _
from dragonflow.common import exceptions
@ -73,24 +73,24 @@ def register_cookie_bits(name, length, is_local=False, app_name=None):
shift = _cookie_max_bits_global
max_bits = _cookie_max_bits_local
if not app_name:
raise TypeError(_LE("app_name must be provided "
"if is_local is True"))
raise TypeError(_(
"app_name must be provided if is_local is True"))
if (app_name, name) in _cookies:
LOG.info(_LI("Cookie for %(app_name)s/%(name)s already registered."),
LOG.info("Cookie for %(app_name)s/%(name)s already registered.",
{"app_name": app_name, "name": name})
return
start = _cookies_used_bits[app_name]
if start + length > max_bits:
LOG.error(_LE("Out of cookie space: "
"offset: %(offset)d length: %(length)d"),
LOG.error("Out of cookie space: "
"offset: %(offset)d length: %(length)d",
{"offset": start, "length": length})
raise exceptions.OutOfCookieSpaceException()
_cookies_used_bits[app_name] = start + length
start += shift
mask = (_cookie_mask_all >> (_cookie_max_bits - length)) << start
_cookies[(app_name, name)] = CookieBitPair(start, mask)
LOG.info(_LI("Registered cookie for %(app_name)s/%(name)s, "
"mask: %(mask)x, offset: %(offset)d, length: %(length)d"),
LOG.info("Registered cookie for %(app_name)s/%(name)s, "
"mask: %(mask)x, offset: %(offset)d, length: %(length)d",
{"app_name": app_name, "name": name,
"mask": mask, "offset": start, "length": length})
@ -117,8 +117,8 @@ def get_cookie(name, value, old_cookie=0, old_mask=0,
app_name = GLOBAL_APP_NAME
else:
if not app_name:
raise TypeError(_LE("app_name must be provided "
"if is_local is True"))
raise TypeError(
_("app_name must be provided if is_local is True"))
pair = _cookies[(app_name, name)]
mask_overlap = old_mask & pair.mask
if mask_overlap != 0:

View File

@ -17,7 +17,6 @@ from neutron.agent.common import utils
from oslo_log import log
from ryu.lib import addrconv
from dragonflow._i18n import _LE
from dragonflow.common import exceptions
from dragonflow.controller.common import constants as const
from dragonflow.controller.common import cookies
@ -83,4 +82,4 @@ def delete_conntrack_entries_by_filter(ethertype='IPv4', protocol=None,
extra_ok_codes=[1])
LOG.debug("Successfully executed conntrack command %s", cmd)
except RuntimeError:
LOG.exception(_LE("Failed execute conntrack command %s"), cmd)
LOG.exception("Failed execute conntrack command %s", cmd)

View File

@ -18,7 +18,7 @@ from ryu.lib.packet import ethernet
from ryu.lib.packet import packet
from ryu.ofproto import ether
from dragonflow._i18n import _LE
from dragonflow._i18n import _
from dragonflow.controller.common import constants
from dragonflow.controller.common import cookies
from dragonflow.db import db_store2
@ -214,8 +214,7 @@ def register_event(model, event):
'''
if event not in model.get_events():
raise RuntimeError(
_LE('{0} is not an event of {1}').format(event, model),
)
_('{0} is not an event of {1}').format(event, model))
def decorator(func):
if not hasattr(func, '_register_events'):

View File

@ -22,7 +22,6 @@ from oslo_log import log
from ryu.base import app_manager
from ryu import cfg as ryu_cfg
from dragonflow._i18n import _LI, _LW
from dragonflow.common import constants
from dragonflow.common import utils as df_utils
from dragonflow import conf as cfg
@ -286,7 +285,7 @@ class DfLocalController(object):
self.sync_finished = True
except Exception as e:
self.sync_finished = False
LOG.warning(_LW("run_db_poll - suppressing exception"))
LOG.warning("run_db_poll - suppressing exception")
LOG.exception(e)
def update_chassis(self, chassis):
@ -301,7 +300,7 @@ class DfLocalController(object):
self._logical_port_process(port, None)
def delete_chassis(self, chassis):
LOG.info(_LI("Deleting remote ports in remote chassis %s"), chassis.id)
LOG.info("Deleting remote ports in remote chassis %s", chassis.id)
# Chassis is deleted, there is no reason to keep the remote port
# in it.
remote_ports = self.db_store.get_ports_by_chassis(chassis.id)
@ -316,15 +315,15 @@ class DfLocalController(object):
lswitch.inner_obj):
return
LOG.info(_LI("Adding/Updating Logical Switch = %s"), lswitch)
LOG.info("Adding/Updating Logical Switch = %s", lswitch)
self.db_store.set_lswitch(lswitch.get_id(), lswitch)
self.open_flow_app.notify_update_logical_switch(lswitch)
def delete_lswitch(self, lswitch_id):
lswitch = self.db_store.get_lswitch(lswitch_id)
LOG.info(_LI("Removing Logical Switch = %s"), lswitch_id)
LOG.info("Removing Logical Switch = %s", lswitch_id)
if lswitch is None:
LOG.warning(_LW("Try to delete a nonexistent lswitch(%s)"),
LOG.warning("Try to delete a nonexistent lswitch(%s)",
lswitch_id)
return
self.open_flow_app.notify_remove_logical_switch(lswitch)
@ -352,7 +351,7 @@ class DfLocalController(object):
def _logical_port_process(self, lport, original_lport=None):
lswitch = self.db_store.get_lswitch(lport.get_lswitch_id())
if not lswitch:
LOG.warning(_LW("Could not find lswitch for lport: %s"),
LOG.warning("Could not find lswitch for lport: %s",
lport.get_id())
return
lport.set_external_value('local_network_id',
@ -376,18 +375,17 @@ class DfLocalController(object):
if ofport:
lport.set_external_value('ofport', ofport)
if original_lport is None:
LOG.info(_LI("Adding new local logical port = %s"), lport)
LOG.info("Adding new local logical port = %s", lport)
self.open_flow_app.notify_add_local_port(lport)
else:
LOG.info(_LI("Updating local logical port = %(port)s, "
"original port = %(original_port)s"),
LOG.info("Updating local logical port = %(port)s, "
"original port = %(original_port)s",
{'port': lport,
'original_port': original_lport})
self.open_flow_app.notify_update_local_port(lport,
original_lport)
else:
LOG.info(_LI("Local logical port %s was not created yet"),
lport)
LOG.info("Local logical port %s was not created yet", lport)
return
else:
lport.set_external_value('is_local', False)
@ -411,11 +409,11 @@ class DfLocalController(object):
if ofport:
lport.set_external_value('ofport', ofport)
if original_lport is None:
LOG.info(_LI("Adding new remote logical port = %s"), lport)
LOG.info("Adding new remote logical port = %s", lport)
self.open_flow_app.notify_add_remote_port(lport)
else:
LOG.info(_LI("Updating remote logical port = %(port)s, "
"original port = %(original_port)s"),
LOG.info("Updating remote logical port = %(port)s, "
"original port = %(original_port)s",
{'port': lport,
'original_port': original_lport})
self.open_flow_app.notify_update_remote_port(
@ -423,8 +421,7 @@ class DfLocalController(object):
else:
# The tunnel port online event will update the remote logical
# port. Log this warning first.
LOG.warning(_LW("No tunnel for remote logical port %s"),
lport)
LOG.warning("No tunnel for remote logical port %s", lport)
return
if original_lport is None:
@ -451,12 +448,12 @@ class DfLocalController(object):
if lport is None:
return
if lport.get_external_value('is_local'):
LOG.info(_LI("Removing local logical port = %s"), lport)
LOG.info("Removing local logical port = %s", lport)
if lport.get_external_value('ofport') is not None:
self.open_flow_app.notify_remove_local_port(lport)
self.db_store.delete_port(lport.get_id(), True)
else:
LOG.info(_LI("Removing remote logical port = %s"), lport)
LOG.info("Removing remote logical port = %s", lport)
if lport.get_external_value('ofport') is not None:
self.open_flow_app.notify_remove_remote_port(lport)
self.db_store.delete_port(lport.get_id(), False)
@ -475,17 +472,17 @@ class DfLocalController(object):
def delete_lrouter(self, lrouter_id):
router = self.db_store.get_router(lrouter_id)
if router is None:
LOG.warning(_LW("Try to delete a nonexistent router(%s)"),
LOG.warning("Try to delete a nonexistent router(%s)",
lrouter_id)
return
LOG.info(_LI("Removing router = %s"), lrouter_id)
LOG.info("Removing router = %s", lrouter_id)
self.open_flow_app.notify_delete_router(router)
self.db_store.delete_router(lrouter_id)
def update_secgroup(self, secgroup):
old_secgroup = self.db_store.get_security_group(secgroup.get_id())
if old_secgroup is None:
LOG.info(_LI("Security Group created = %s"), secgroup)
LOG.info("Security Group created = %s", secgroup)
self._add_new_security_group(secgroup)
return
if not df_utils.is_valid_version(
@ -576,12 +573,12 @@ class DfLocalController(object):
self.db_store.delete_security_group(secgroup.get_id())
def _add_new_security_group_rule(self, secgroup, secgroup_rule):
LOG.info(_LI("Adding new secgroup rule = %s"), secgroup_rule)
LOG.info("Adding new secgroup rule = %s", secgroup_rule)
self.open_flow_app.notify_add_security_group_rule(
secgroup, secgroup_rule)
def _delete_security_group_rule(self, secgroup, secgroup_rule):
LOG.info(_LI("Removing secgroup rule = %s"), secgroup_rule)
LOG.info("Removing secgroup rule = %s", secgroup_rule)
self.open_flow_app.notify_remove_security_group_rule(
secgroup, secgroup_rule)
@ -609,18 +606,18 @@ class DfLocalController(object):
if not floatingip:
return
self.open_flow_app.notify_delete_floatingip(floatingip)
LOG.info(_LI("Floatingip is deleted. Floatingip = %s"), floatingip)
LOG.info("Floatingip is deleted. Floatingip = %s", floatingip)
self.db_store.delete_floatingip(floatingip_id)
def update_publisher(self, publisher):
self.db_store.update_publisher(publisher.get_id(), publisher)
LOG.info(_LI('Registering to new publisher: %s'), str(publisher))
LOG.info('Registering to new publisher: %s', str(publisher))
self.nb_api.subscriber.register_listen_address(publisher.get_uri())
def delete_publisher(self, uuid):
publisher = self.db_store.get_publisher(uuid)
if publisher:
LOG.info(_LI('Deleting publisher: %s'), str(publisher))
LOG.info('Deleting publisher: %s', str(publisher))
self.nb_api.subscriber.unregister_listen_address(
publisher.get_uri()
)
@ -629,14 +626,14 @@ class DfLocalController(object):
def _associate_floatingip(self, floatingip):
self.db_store.update_floatingip(floatingip.get_id(), floatingip)
self.open_flow_app.notify_associate_floatingip(floatingip)
LOG.info(_LI("Floatingip is associated with port. Floatingip = %s"),
LOG.info("Floatingip is associated with port. Floatingip = %s",
floatingip)
def _disassociate_floatingip(self, floatingip):
self.db_store.delete_floatingip(floatingip.get_id())
self.open_flow_app.notify_disassociate_floatingip(floatingip)
LOG.info(_LI("Floatingip is disassociated from port. "
"Floatingip = %s"), floatingip)
LOG.info("Floatingip is disassociated from port. "
"Floatingip = %s", floatingip)
def _update_floatingip(self, old_floatingip, new_floatingip):
if new_floatingip.get_lport_id() != old_floatingip.get_lport_id():
@ -663,8 +660,8 @@ class DfLocalController(object):
lport_id = active_port.get_detected_lport_id()
lport = self.db_store.get_local_port(lport_id,
active_port.get_topic())
LOG.info(_LI("Active port updated. Active port = %(new)s, "
"old active port = %(old)s"),
LOG.info("Active port updated. Active port = %(new)s, "
"old active port = %(old)s",
{'new': active_port, 'old': old_active_port})
self.db_store.update_active_port(active_port.get_id(),
active_port)
@ -672,14 +669,14 @@ class DfLocalController(object):
self.open_flow_app.notify_update_active_port(active_port,
old_active_port)
else:
LOG.info(_LI("The logical port is not ready for the "
"active node: %s"), active_port)
LOG.info("The logical port is not ready for the "
"active node: %s", active_port)
def delete_activeport(self, active_port_key):
active_port = self.db_store.get_active_port(active_port_key)
if active_port is not None:
self.db_store.delete_active_port(active_port_key)
LOG.info(_LI("Active node was removed. Active node = %s"),
LOG.info("Active node was removed. Active node = %s",
active_port)
lport_id = active_port.get_detected_lport_id()
lport = self.db_store.get_local_port(lport_id,

View File

@ -21,7 +21,6 @@ from neutron.common import config as common_config
from oslo_log import log as logging
from oslo_serialization import jsonutils
from dragonflow._i18n import _LW
from dragonflow.common import exceptions
from dragonflow.common import utils as df_utils
from dragonflow import conf as cfg
@ -93,9 +92,8 @@ class PublisherService(object):
self._update_timestamp_in_db()
eventlet.sleep(0)
except Exception as e:
LOG.warning(_LW("Exception in main loop: {}, {}").format(
e, traceback.format_exc()
))
LOG.warning("Exception in main loop: {}, {}").format(
e, traceback.format_exc())
# Ignore
def _update_timestamp_in_db(self):

View File

@ -33,7 +33,6 @@ from ryu.ofproto import ether
from dragonflow.common import utils as df_utils
from dragonflow import conf as cfg
from dragonflow._i18n import _LI, _LE, _LW
from dragonflow.controller.common import constants as const
from dragonflow.controller import df_base_app
@ -81,14 +80,14 @@ class DHCPApp(df_base_app.DFlowApp):
pkt_ip = pkt.get_protocol(ipv4.ipv4)
if not pkt_ip:
LOG.error(_LE("No support for non IPv4 protocol"))
LOG.error("No support for non IPv4 protocol")
return
unique_key = msg.match.get('reg6')
port_data = self.unique_key_to_dhcp_app_port_data.get(unique_key)
if not port_data:
LOG.error(
_LE("No lport found for unique key %s for dhcp req"),
"No lport found for unique key %s for dhcp req",
unique_key)
return
@ -97,18 +96,18 @@ class DHCPApp(df_base_app.DFlowApp):
self._block_port_dhcp_traffic(
unique_key,
self.block_hard_timeout)
LOG.warning(_LW("pass rate limit for %(port_id)s blocking DHCP "
"traffic for %(time)s sec"),
LOG.warning("pass rate limit for %(port_id)s blocking DHCP "
"traffic for %(time)s sec",
{'port_id': lport.get_id(),
'time': self.block_hard_timeout})
return
if not self.db_store.get_port(lport.get_id()):
LOG.error(_LE("Port %s no longer found."), lport.get_id())
LOG.error("Port %s no longer found.", lport.get_id())
return
try:
self._handle_dhcp_request(pkt, lport)
except Exception:
LOG.exception(_LE("Unable to handle packet %s"), msg)
LOG.exception("Unable to handle packet %s", msg)
def _handle_dhcp_request(self, packet, lport):
dhcp_packet = packet.get_protocol(dhcp.dhcp)
@ -120,8 +119,8 @@ class DHCPApp(df_base_app.DFlowApp):
dhcp_packet,
dhcp.DHCP_OFFER,
lport)
LOG.info(_LI("sending DHCP offer for port IP %(port_ip)s "
"port id %(port_id)s"),
LOG.info("sending DHCP offer for port IP %(port_ip)s "
"port id %(port_id)s",
{'port_ip': lport.get_ip(), 'port_id': lport.get_id()})
elif dhcp_message_type == dhcp.DHCP_REQUEST:
send_packet = self._create_dhcp_packet(
@ -129,12 +128,12 @@ class DHCPApp(df_base_app.DFlowApp):
dhcp_packet,
dhcp.DHCP_ACK,
lport)
LOG.info(_LI("sending DHCP ACK for port IP %(port_ip)s "
"port id %(tunnel_id)s"),
LOG.info("sending DHCP ACK for port IP %(port_ip)s "
"port id %(tunnel_id)s",
{'port_ip': lport.get_ip(),
'tunnel_id': lport.get_id()})
else:
LOG.error(_LE("DHCP message type %d not handled"),
LOG.error("DHCP message type %d not handled",
dhcp_message_type)
if send_packet:
unique_key = lport.get_unique_key()
@ -146,7 +145,7 @@ class DHCPApp(df_base_app.DFlowApp):
subnet = self._get_subnet_by_port(lport)
if subnet is None:
LOG.error(_LE("No subnet found for port <%s>"), lport.get_id())
LOG.error("No subnet found for port <%s>", lport.get_id())
return
pkt_type_packed = struct.pack('!B', pkt_type)
@ -281,7 +280,7 @@ class DHCPApp(df_base_app.DFlowApp):
subnet = self._get_subnet_by_port(lport)
if subnet:
return subnet.enable_dhcp()
LOG.warning(_LW("No subnet found for port %s"), lport.get_id())
LOG.warning("No subnet found for port %s", lport.get_id())
return False
def _get_port_mtu(self, lport):
@ -300,7 +299,7 @@ class DHCPApp(df_base_app.DFlowApp):
def remove_local_port(self, lport):
if not netaddr.valid_ipv4(lport.get_ip()):
LOG.warning(_LW("No support for non IPv4 protocol"))
LOG.warning("No support for non IPv4 protocol")
return
unique_key = lport.get_unique_key()
@ -323,7 +322,7 @@ class DHCPApp(df_base_app.DFlowApp):
def add_local_port(self, lport):
if not netaddr.valid_ipv4(lport.get_ip()):
LOG.warning(_LW("No support for non IPv4 protocol"))
LOG.warning("No support for non IPv4 protocol")
return
if not lport.is_vm_port():
@ -347,7 +346,7 @@ class DHCPApp(df_base_app.DFlowApp):
self.unique_key_to_dhcp_app_port_data[unique_key] = (port_rate_limiter,
lport)
LOG.info(_LI("Register VM as DHCP client::port <%s>"), lport.get_id())
LOG.info("Register VM as DHCP client::port <%s>", lport.get_id())
parser = self.parser
ofproto = self.ofproto

View File

@ -13,7 +13,7 @@
from oslo_log import log
from oslo_utils import importutils
from dragonflow._i18n import _, _LE
from dragonflow._i18n import _
from dragonflow.common import exceptions
LOG = log.getLogger(__name__)
@ -34,7 +34,7 @@ class AppDispatcher(object):
app = app_class(*args, **kwargs)
self.apps.append(app)
except ImportError as e:
LOG.exception(_LE("Error loading application by class, %s"), e)
LOG.exception("Error loading application by class, %s", e)
raise ImportError(_("Application class not found."))
def dispatch(self, method, *args, **kwargs):
@ -46,8 +46,8 @@ class AppDispatcher(object):
handler(*args, **kwargs)
except Exception as e:
app_name = app.__class__.__name__
LOG.exception(_LE("Dragonflow application '%(name)s' "
"failed in %(method)s"),
LOG.exception("Dragonflow application '%(name)s' "
"failed in %(method)s",
{'name': app_name, 'method': method})
errors.append(e)

View File

@ -25,7 +25,6 @@ from ryu.lib.packet import ipv4
from ryu.lib.packet import packet
from ryu.ofproto import ether
from dragonflow._i18n import _LW
from dragonflow.common import utils as df_utils
from dragonflow import conf as cfg
from dragonflow.controller.common import arp_responder
@ -91,8 +90,8 @@ class DNATApp(df_base_app.DFlowApp):
LOG.debug("Get an invalid TTL packet at table %s",
const.INGRESS_NAT_TABLE)
if self.ingress_ttl_invalid_handler_rate_limit():
LOG.warning(_LW("Get more than %(rate)s TTL invalid "
"packets per second at table %(table)s"),
LOG.warning("Get more than %(rate)s TTL invalid "
"packets per second at table %(table)s",
{'rate': self.conf.dnat_ttl_invalid_max_rate,
'table': const.INGRESS_NAT_TABLE})
return
@ -104,8 +103,8 @@ class DNATApp(df_base_app.DFlowApp):
return
if self.ingress_icmp_error_rate_limit():
LOG.warning(_LW("Get more than %(rate)s ICMP error messages "
"per second at table %(table)s"),
LOG.warning("Get more than %(rate)s ICMP error messages "
"per second at table %(table)s",
{'rate': self.conf.dnat_icmp_error_max_rate,
'table': const.INGRESS_NAT_TABLE})
return
@ -123,8 +122,8 @@ class DNATApp(df_base_app.DFlowApp):
LOG.debug("Get an invalid TTL packet at table %s",
const.EGRESS_NAT_TABLE)
if self.egress_ttl_invalid_handler_rate_limit():
LOG.warning(_LW("Get more than %(rate)s TTL invalid "
"packets per second at table %(table)s"),
LOG.warning("Get more than %(rate)s TTL invalid "
"packets per second at table %(table)s",
{'rate': self.conf.dnat_ttl_invalid_max_rate,
'table': const.EGRESS_NAT_TABLE})
return
@ -139,14 +138,14 @@ class DNATApp(df_base_app.DFlowApp):
unique_key = msg.match.get('reg6')
self.dispatch_packet(icmp_ttl_pkt, unique_key)
else:
LOG.warning(_LW("The invalid TTL packet's destination mac %s "
"can't be recognized."), e_pkt.src)
LOG.warning("The invalid TTL packet's destination mac %s "
"can't be recognized.", e_pkt.src)
return
if self.external_bridge_mac:
if self.ingress_icmp_error_rate_limit():
LOG.warning(_LW("Get more than %(rate)s ICMP error messages "
"per second at table %(table)s"),
LOG.warning("Get more than %(rate)s ICMP error messages "
"per second at table %(table)s",
{'rate': self.conf.dnat_icmp_error_max_rate,
'table': const.INGRESS_NAT_TABLE})
return

View File

@ -21,7 +21,6 @@ from ryu.lib.packet import ipv6
from ryu.lib.packet import packet
from ryu.ofproto import ether
from dragonflow._i18n import _LE
from dragonflow.controller.common import constants as const
from dragonflow.controller import df_base_app
from dragonflow.controller import l3_app_base
@ -51,14 +50,14 @@ class L3App(df_base_app.DFlowApp, l3_app_base.L3AppMixin):
pkt = packet.Packet(msg.data)
pkt_ip = pkt.get_protocol(ipv4.ipv4) or pkt.get_protocol(ipv6.ipv6)
if pkt_ip is None:
LOG.error(_LE("Received Non IP Packet"))
LOG.error("Received Non IP Packet")
return
pkt_ethernet = pkt.get_protocol(ethernet.ethernet)
network_id = msg.match.get('metadata')
try:
self._get_route(pkt_ip, pkt_ethernet, network_id, msg)
except Exception as e:
LOG.error(_LE("L3 App PacketIn exception raised"))
LOG.error("L3 App PacketIn exception raised")
LOG.error(e)
def _get_route(self, pkt_ip, pkt_ethernet, network_id, msg):

View File

@ -26,7 +26,6 @@ from ryu.lib.packet import tcp
from ryu.lib.packet import udp
from ryu.ofproto import ether
from dragonflow._i18n import _LI, _LW
from dragonflow.common import exceptions
from dragonflow.common import utils as df_utils
from dragonflow import conf as cfg
@ -74,11 +73,10 @@ class L3AppMixin(object):
LOG.debug("Get an invalid TTL packet at table %s",
const.L3_LOOKUP_TABLE)
if self.ttl_invalid_handler_rate_limit():
LOG.warning(
_LW("Get more than %(rate)s TTL invalid "
"packets per second at table %(table)s"),
{'rate': self.conf.router_ttl_invalid_max_rate,
'table': const.L3_LOOKUP_TABLE})
LOG.warning("Get more than %(rate)s TTL invalid packets per "
"second at table %(table)s",
{'rate': self.conf.router_ttl_invalid_max_rate,
'table': const.L3_LOOKUP_TABLE})
return True
pkt = packet.Packet(msg.data)
@ -91,8 +89,8 @@ class L3AppMixin(object):
unique_key = msg.match.get('reg6')
self.dispatch_packet(icmp_ttl_pkt, unique_key)
else:
LOG.warning(_LW("The invalid TTL packet's destination mac %s "
"can't be recognized."), e_pkt.dst)
LOG.warning("The invalid TTL packet's destination mac %s "
"can't be recognized.", e_pkt.dst)
return True
if msg.match.get('reg7'):
@ -102,8 +100,8 @@ class L3AppMixin(object):
# concrete.
if self.port_icmp_unreach_respond_rate_limit():
LOG.warning(
_LW("Get more than %(rate)s packets to router port "
"per second at table %(table)s"),
"Get more than %(rate)s packets to router port "
"per second at table %(table)s",
{'rate': self.conf.router_port_unreach_max_rate,
'table': const.L3_LOOKUP_TABLE})
return True
@ -127,11 +125,11 @@ class L3AppMixin(object):
def router_updated(self, router, original_router):
if not original_router:
LOG.info(_LI("Logical Router created = %s"), router)
LOG.info("Logical Router created = %s", router)
self._add_new_lrouter(router)
return
LOG.info(_LI("Logical router updated = %s"), router)
LOG.info("Logical router updated = %s", router)
self._update_router_interfaces(original_router, router)
self._update_router_attributes(original_router, router)
@ -236,7 +234,7 @@ class L3AppMixin(object):
@param lport_mac: The mac address of lport which will act as nexthop
@param route: The extra route dict
"""
LOG.info(_LI('Add extra route %(route)s to router'), route)
LOG.info('Add extra route %(route)s to router', route)
ofproto = self.ofproto
parser = self.parser
@ -276,7 +274,7 @@ class L3AppMixin(object):
@param router_if_mac: The mac address of related router port
@param route: The extra route dict
"""
LOG.info(_LI('Delete extra route %(route)s from router'), route)
LOG.info('Delete extra route %(route)s from router', route)
ofproto = self.ofproto
@ -365,7 +363,7 @@ class L3AppMixin(object):
return match
def _add_new_router_port(self, router, router_port):
LOG.info(_LI("Adding new logical router interface = %s"),
LOG.info("Adding new logical router interface = %s",
router_port)
local_network_id = self.db_store.get_unique_key_by_id(
models.LogicalSwitch.table_name, router_port.get_lswitch_id())
@ -433,7 +431,7 @@ class L3AppMixin(object):
self._add_subnet_send_to_snat(local_network_id, mac, tunnel_key)
def _delete_router_port(self, router, router_port):
LOG.info(_LI("Removing logical router interface = %s"),
LOG.info("Removing logical router interface = %s",
router_port)
local_network_id = self.db_store.get_unique_key_by_id(
models.LogicalSwitch.table_name, router_port.get_lswitch_id())

View File

@ -28,7 +28,7 @@ import six
import six.moves.urllib.parse as urlparse
import webob
from dragonflow._i18n import _, _LW, _LE
from dragonflow._i18n import _
from dragonflow.common import exceptions
from dragonflow.common import utils as df_utils
from dragonflow import conf as cfg
@ -412,7 +412,7 @@ class BaseMetadataProxyHandler(object):
LOG.debug("Request: %s", req)
return self.proxy_request(req)
except Exception:
LOG.exception(_LE("Unexpected error."))
LOG.exception("Unexpected error.")
msg = _('An unknown error has occurred. '
'Please try your request again.')
explanation = six.text_type(msg)
@ -437,10 +437,9 @@ class BaseMetadataProxyHandler(object):
LOG.debug(str(resp))
return self.create_response(req, resp, content)
elif resp.status == 403:
LOG.warning(_LW(
LOG.warning(
'The remote metadata server responded with Forbidden. This '
'response usually occurs when shared secrets do not match.'
))
'response usually occurs when shared secrets do not match.')
return webob.exc.HTTPForbidden()
elif resp.status == 400:
return webob.exc.HTTPBadRequest()
@ -449,7 +448,7 @@ class BaseMetadataProxyHandler(object):
elif resp.status == 409:
return webob.exc.HTTPConflict()
elif resp.status == 500:
msg = _LW(
msg = (
'Remote metadata server experienced an internal server error.'
)
LOG.warning(msg)

View File

@ -20,7 +20,6 @@ from ryu.app.ofctl import service
from ryu.base import app_manager
import ryu.exception as ryu_exc
from dragonflow._i18n import _LE
from dragonflow.controller.common import constants as const
LOG = log.getLogger(__name__)
@ -44,13 +43,12 @@ class OpenFlowSwitchMixin(object):
try:
result = ofctl_api.send_msg(self._app, msg, reply_cls, reply_multi)
except ryu_exc.RyuException as e:
m = _LE("ofctl request %(request)s error %(error)s") % {
m = "ofctl request %(request)s error %(error)s" % {
"request": msg,
"error": e,
}
LOG.error(_LE("exception occurred, %s"), m)
"error": e}
LOG.error("exception occurred, %s", m)
except eventlet.timeout.Timeout as e:
LOG.error(_LE("exception occurred, %s"), e)
LOG.error("exception occurred, %s", e)
finally:
timeout.cancel()
LOG.debug("ofctl request %(request)s result %(result)s",
@ -70,7 +68,7 @@ class OpenFlowSwitchMixin(object):
reply_cls=ofpp.OFPFlowStatsReply,
reply_multi=True)
if replies is None:
LOG.error(_LE("_send_msg failed when dump_flows"))
LOG.error("_send_msg failed when dump_flows")
return []
flows = []
for rep in replies:
@ -82,7 +80,7 @@ class OpenFlowSwitchMixin(object):
try:
self.delete_flows(cookie=match_c, cookie_mask=match_cmask)
except Exception as e:
LOG.error(_LE("exception occurred when cleanup_flows %s"), e)
LOG.error("exception occurred when cleanup_flows %s", e)
@staticmethod
def _match(ofpp, match, **match_kwargs):

View File

@ -15,7 +15,6 @@
from oslo_log import log
from dragonflow._i18n import _LE
from dragonflow.controller import df_base_app
@ -48,8 +47,8 @@ class PortQosApp(df_base_app.DFlowApp):
qos = self._get_qos_policy(qos_id)
if not qos:
LOG.error(_LE("Unable to get QoS %(qos)s when adding/updating "
"local port %(port)s. It may have been deleted."),
LOG.error("Unable to get QoS %(qos)s when adding/updating "
"local port %(port)s. It may have been deleted.",
{'qos': qos_id, 'port': lport.get_id()})
self.vswitch_api.clear_port_qos(lport.get_id())
return

View File

@ -19,7 +19,6 @@ from oslo_log import log
from ryu.lib.packet import arp
from ryu.ofproto import ether
from dragonflow._i18n import _LI
from dragonflow.controller.common import constants as const
from dragonflow.controller import df_base_app
@ -70,7 +69,7 @@ class PortSecApp(df_base_app.DFlowApp):
def _install_flows_check_valid_ip_and_mac(self, unique_key, ip, mac):
if netaddr.IPNetwork(ip).version == 6:
LOG.info(_LI("IPv6 addresses are not supported yet"))
LOG.info("IPv6 addresses are not supported yet")
return
parser = self.parser
@ -98,7 +97,7 @@ class PortSecApp(df_base_app.DFlowApp):
def _uninstall_flows_check_valid_ip_and_mac(self, unique_key, ip, mac):
if netaddr.IPNetwork(ip).version == 6:
LOG.info(_LI("IPv6 addresses are not supported yet"))
LOG.info("IPv6 addresses are not supported yet")
return
parser = self.parser

View File

@ -13,7 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
from dragonflow._i18n import _LI, _LE
from dragonflow.common import utils
from dragonflow import conf as cfg
from dragonflow.controller.common import constants as const
@ -46,7 +45,7 @@ class ProviderNetworksApp(df_base_app.DFlowApp):
try:
return helpers.parse_mappings(bridge_mappings)
except ValueError:
LOG.exception(_LE("Failed to parse bridge mapping"))
LOG.exception("Failed to parse bridge mapping")
raise
def _setup_physical_bridges(self, bridge_mappings):
@ -58,8 +57,8 @@ class ProviderNetworksApp(df_base_app.DFlowApp):
:param bridge_mappings: map physical network names to bridge names.
'''
for physical_network, bridge in bridge_mappings.items():
LOG.info(_LI("Mapping physical network %(physical_network)s to "
"bridge %(bridge)s"),
LOG.info("Mapping physical network %(physical_network)s to "
"bridge %(bridge)s",
{'physical_network': physical_network,
'bridge': bridge})
int_ofport = self.vswitch_api.create_patch_port(
@ -83,7 +82,7 @@ class ProviderNetworksApp(df_base_app.DFlowApp):
port_count = self.logical_networks.get_local_port_count(
network_id=network_id,
network_type=network_type)
LOG.info(_LI("adding %(net_type)s local port %(lport)s"),
LOG.info("adding %(net_type)s local port %(lport)s",
{'net_type': network_type,
'lport': lport})
if port_count == 0:

View File

@ -25,7 +25,6 @@ from ryu.ofproto import ofproto_parser
from ryu.ofproto import ofproto_v1_3
from ryu import utils
from dragonflow._i18n import _LE, _LI
from dragonflow.controller import dispatcher
@ -196,8 +195,8 @@ class RyuDFAdapter(ofp_handler.OFPHandler):
handler = self.table_handlers[table_id]
handler(event)
else:
LOG.info(_LI("No handler for table id %(table)s with message "
"%(msg)"), {'table': table_id, 'msg': msg})
LOG.info("No handler for table id %(table)s with message "
"%(msg)", {'table': table_id, 'msg': msg})
@handler.set_ev_handler(ofp_event.EventOFPErrorMsg,
handler.MAIN_DISPATCHER)
@ -208,11 +207,11 @@ class RyuDFAdapter(ofp_handler.OFPHandler):
ryu_msg = ofproto_parser.msg(
self._datapath, version, msg_type,
msg_len - ofproto_common.OFP_HEADER_SIZE, xid, msg.data)
LOG.error(_LE('OFPErrorMsg received: %s'), ryu_msg)
LOG.error('OFPErrorMsg received: %s', ryu_msg)
except Exception:
LOG.error(_LE('Unrecognized OFPErrorMsg received: '
'type=0x%(type)02x code=0x%(code)02x '
'message=%(msg)s'),
LOG.error('Unrecognized OFPErrorMsg received: '
'type=0x%(type)02x code=0x%(code)02x '
'message=%(msg)s',
{'type': msg.type, 'code': msg.code,
'msg': utils.hex_array(msg.data)})
@ -247,8 +246,8 @@ class RyuDFAdapter(ofp_handler.OFPHandler):
ofproto = dp.ofproto
if cur_config.packet_in_mask[0] & 1 << ofproto.OFPR_INVALID_TTL != 0:
LOG.info(_LI('SW config for TTL error packet in has already '
'been set'))
LOG.info('SW config for TTL error packet in has already '
'been set')
return
packet_in_mask = (cur_config.packet_in_mask[0] |
@ -258,4 +257,4 @@ class RyuDFAdapter(ofp_handler.OFPHandler):
[cur_config.port_status_mask[0], cur_config.port_status_mask[1]],
[cur_config.flow_removed_mask[0], cur_config.flow_removed_mask[1]])
dp.send_msg(m)
LOG.info(_LI('Set SW config for TTL error packet in.'))
LOG.info('Set SW config for TTL error packet in.')

View File

@ -20,7 +20,6 @@ from neutron_lib import constants as n_const
from oslo_log import log
from ryu.ofproto import ether
from dragonflow._i18n import _LI, _LW, _LE
from dragonflow.controller.common import constants as const
from dragonflow.controller.common import utils
from dragonflow.controller import df_base_app
@ -125,8 +124,7 @@ class SGApp(df_base_app.DFlowApp):
result_base['eth_type'] = ether.ETH_TYPE_IP
elif ethertype == n_const.IPv6:
LOG.warning(
_LW("IPv6 in security group rules is not yet supported")
)
"IPv6 in security group rules is not yet supported")
result_base['eth_type'] = ether.ETH_TYPE_IPV6
return [result_base]
protocol_name = secgroup_rule.get_protocol()
@ -203,8 +201,8 @@ class SGApp(df_base_app.DFlowApp):
if netaddr.IPNetwork(fixed_ip).version == 4:
ips.add(fixed_ip)
else:
LOG.warning(_LW("No support for non IPv4 protocol, the IP"
"address %(ip)s of %(lport)s was ignored."),
LOG.warning("No support for non IPv4 protocol, the IP"
"address %(ip)s of %(lport)s was ignored.",
{'ip': fixed_ip, 'lport': lport.get_id()})
allowed_address_pairs = lport.get_allowed_address_pairs()
@ -215,9 +213,9 @@ class SGApp(df_base_app.DFlowApp):
ips.add(ip)
else:
LOG.warning(
_LW("No support for non IPv4 protocol, the address "
"%(ip)s in allowed address pairs of lport "
"%(lport)s was ignored."),
"No support for non IPv4 protocol, the address "
"%(ip)s in allowed address pairs of lport "
"%(lport)s was ignored.",
{'ip': ip, 'lport': lport.get_id()})
return ips
@ -524,10 +522,10 @@ class SGApp(df_base_app.DFlowApp):
ipv4_match_item = "ipv4_dst"
elif secgroup_rule.get_ethertype() == n_const.IPv6:
# not support yet
LOG.info(_LI("IPv6 rules are not supported yet"))
LOG.info("IPv6 rules are not supported yet")
return
else:
LOG.error(_LE("wrong ethernet type"))
LOG.error("wrong ethernet type")
return
actions = [parser.NXActionConjunction(clause=1,
@ -628,9 +626,9 @@ class SGApp(df_base_app.DFlowApp):
match=match)
elif ethertype == n_const.IPv6:
# not support yet
LOG.info(_LI("IPv6 rules are not supported yet"))
LOG.info("IPv6 rules are not supported yet")
else:
LOG.error(_LE("wrong ethernet type"))
LOG.error("wrong ethernet type")
def _uninstall_security_group_rule_flows(self, secgroup_rule):
# uninstall rule flows by its cookie
@ -644,7 +642,7 @@ class SGApp(df_base_app.DFlowApp):
rule_id = self._get_security_rule_mapping(secgroup_rule.get_id())
if rule_id is None:
LOG.error(_LE("the rule_id of the security group rule %s is none"),
LOG.error("the rule_id of the security group rule %s is none",
rule_id)
return
@ -911,7 +909,7 @@ class SGApp(df_base_app.DFlowApp):
def remove_local_port(self, lport):
if not netaddr.valid_ipv4(lport.get_ip()):
LOG.warning(_LW("No support for non IPv4 protocol"))
LOG.warning("No support for non IPv4 protocol")
return
secgroups = lport.get_security_groups()
@ -985,7 +983,7 @@ class SGApp(df_base_app.DFlowApp):
return
if not netaddr.valid_ipv4(lport.get_ip()):
LOG.warning(_LW("No support for non IPv4 protocol"))
LOG.warning("No support for non IPv4 protocol")
return
for secgroup_id in secgroups:
@ -1012,7 +1010,7 @@ class SGApp(df_base_app.DFlowApp):
secgroup_id)
return
LOG.info(_LI("Add a rule %(rule)s to security group %(secgroup)s"),
LOG.info("Add a rule %(rule)s to security group %(secgroup)s",
{'rule': secgroup_rule, 'secgroup': secgroup_id})
# update the record of rules each of which specifies a same security
@ -1035,7 +1033,7 @@ class SGApp(df_base_app.DFlowApp):
secgroup_id)
return
LOG.info(_LI("Remove a rule %(rule)s to security group %(secgroup)s"),
LOG.info("Remove a rule %(rule)s to security group %(secgroup)s",
{'rule': secgroup_rule, 'secgroup': secgroup.get_id()})
conj_id, priority = \

View File

@ -17,7 +17,6 @@ from oslo_log import log
from ryu.ofproto import ether
from ryu.ofproto import nicira_ext
from dragonflow._i18n import _LW
from dragonflow.common import constants as df_common_const
from dragonflow.common import utils as df_utils
from dragonflow.controller.common import arp_responder
@ -62,7 +61,7 @@ class SNATApp_mixin(object):
def is_data_port(self, lport):
if lport.get_device_owner() == '':
LOG.warning(_LW("SNAT application is operating in test mode"))
LOG.warning("SNAT application is operating in test mode")
return True
else:
return df_utils.is_port_owner_of_type(

View File

@ -12,7 +12,6 @@
from oslo_log import log
from dragonflow._i18n import _LI, _LE, _LW
from dragonflow.common import constants
from dragonflow.controller import df_db_objects_refresh
from dragonflow.db import models as db_models
@ -51,9 +50,9 @@ class Topology(object):
@return : None
"""
if ovs_port is None:
LOG.error(_LE("ovs_port is None"))
LOG.error("ovs_port is None")
return
LOG.info(_LI("Ovs port updated: %s"), ovs_port)
LOG.info("Ovs port updated: %s", ovs_port)
# ignore port that misses some parameters
if not self._check_ovs_port_integrity(ovs_port):
return
@ -67,7 +66,7 @@ class Topology(object):
self.ovs_ports[port_id] = ovs_port
port_type = ovs_port.get_type()
if port_type not in self.ovs_port_type:
LOG.info(_LI("Unmanaged port online: %s"), ovs_port)
LOG.info("Unmanaged port online: %s", ovs_port)
return
handler_name = '_' + port_type + '_port_' + action
@ -77,8 +76,8 @@ class Topology(object):
if handler is not None:
handler(ovs_port)
except Exception:
LOG.exception(_LE(
"Exception occurred when handling port online event"))
LOG.exception(
"Exception occurred when handling port online event")
def ovs_port_deleted(self, ovs_port_id):
"""
@ -95,7 +94,7 @@ class Topology(object):
port_type = ovs_port.get_type()
if port_type not in self.ovs_port_type:
LOG.info(_LI("Unmanaged port offline: %s"), ovs_port)
LOG.info("Unmanaged port offline: %s", ovs_port)
return
handler_name = '_' + port_type + '_port_deleted'
@ -105,10 +104,10 @@ class Topology(object):
if handler is not None:
handler(ovs_port)
else:
LOG.info(_LI("%s is None."), handler_name)
LOG.info("%s is None.", handler_name)
except Exception:
LOG.exception(_LE("Exception occurred when handling "
"ovs port update event"))
LOG.exception("Exception occurred when handling "
"ovs port update event")
finally:
del self.ovs_ports[ovs_port_id]
@ -154,8 +153,8 @@ class Topology(object):
else:
self.controller.delete_lport(lport.get_id())
except Exception:
LOG.exception(_LE("Failed to process logical port"
"when %(action)s tunnel %(lport)s"),
LOG.exception("Failed to process logical port"
"when %(action)s tunnel %(lport)s",
{'action': action, 'lport': lport})
def _tunnel_port_updated(self, ovs_port):
@ -173,7 +172,7 @@ class Topology(object):
lport_id = ovs_port.get_iface_id()
lport = self._get_lport(lport_id)
if lport is None:
LOG.warning(_LW("No logical port found for ovs port: %s"),
LOG.warning("No logical port found for ovs port: %s",
ovs_port)
return
topic = lport.get_topic()
@ -190,12 +189,12 @@ class Topology(object):
# If the logical port is not in db store or its ofport is not
# valid. It has not been applied to dragonflow apps. We need to
# update it in dragonflow controller.
LOG.info(_LI("A local logical port(%s) is online"), lport)
LOG.info("A local logical port(%s) is online", lport)
try:
self.controller.update_lport(lport)
except Exception:
LOG.exception(_LE('Failed to process logical port online '
'event: %s'), lport)
LOG.exception('Failed to process logical port online '
'event: %s', lport)
def _vm_port_deleted(self, ovs_port):
ovs_port_id = ovs_port.get_id()
@ -212,12 +211,12 @@ class Topology(object):
topic = lport.get_topic()
LOG.info(_LI("The logical port(%s) is offline"), lport)
LOG.info("The logical port(%s) is offline", lport)
try:
self.controller.delete_lport(lport_id)
except Exception:
LOG.exception(_LE(
'Failed to process logical port offline event %s'), lport_id)
LOG.exception(
'Failed to process logical port offline event %s', lport_id)
finally:
self.controller.notify_port_status(
ovs_port, constants.PORT_STATUS_DOWN)
@ -230,7 +229,7 @@ class Topology(object):
return
if topic not in self.topic_subscribed:
LOG.info(_LI("Subscribe topic: %(topic)s by lport: %(id)s"),
LOG.info("Subscribe topic: %(topic)s by lport: %(id)s",
{"topic": topic, "id": lport_id})
self.nb_api.subscriber.register_topic(topic)
self._pull_tenant_topology_from_db(topic)
@ -244,7 +243,7 @@ class Topology(object):
port_ids = self.topic_subscribed[topic]
port_ids.remove(lport_id)
if len(port_ids) == 0:
LOG.info(_LI("Unsubscribe topic: %(topic)s by lport: %(id)s"),
LOG.info("Unsubscribe topic: %(topic)s by lport: %(id)s",
{"topic": topic, "id": lport_id})
del self.topic_subscribed[topic]
self.nb_api.subscriber.unregister_topic(topic)
@ -287,7 +286,7 @@ class Topology(object):
lport_id = ovs_port.get_iface_id()
lport = self._get_lport(lport_id)
if lport is None:
LOG.warning(_LW("No logical port found for ovs port: %s"),
LOG.warning("No logical port found for ovs port: %s",
ovs_port)
continue
topic = lport.get_topic()

View File

@ -16,7 +16,6 @@
from oslo_log import log
from ryu.lib import mac as mac_api
from dragonflow._i18n import _LI, _LW
from dragonflow import conf as cfg
from dragonflow.controller.common import constants as const
from dragonflow.controller.common import logical_networks
@ -42,11 +41,11 @@ class TunnelingApp(df_base_app.DFlowApp):
def add_local_port(self, lport):
network_type = lport.get_external_value('network_type')
if network_type not in self.tunnel_types:
LOG.warning(_LW("added unsupported network %(net_type)s lport"),
LOG.warning("added unsupported network %(net_type)s lport",
{'net_type': network_type})
return
network_id = lport.get_external_value('local_network_id')
LOG.info(_LI("adding %(net_type)s lport %(lport)s"),
LOG.info("adding %(net_type)s lport %(lport)s",
{'net_type': network_type,
'lport': lport})
port_count = self.local_networks.get_local_port_count(
@ -64,7 +63,7 @@ class TunnelingApp(df_base_app.DFlowApp):
def remove_local_port(self, lport):
network_type = lport.get_external_value('network_type')
if network_type not in self.tunnel_types:
LOG.warning(_LW("removed unsupported network %(net_type)s lport"),
LOG.warning("removed unsupported network %(net_type)s lport",
{'net_type': network_type})
return
network_id = lport.get_external_value('local_network_id')
@ -118,7 +117,7 @@ class TunnelingApp(df_base_app.DFlowApp):
segmentation_id = lport.get_external_value('segmentation_id')
self._add_egress_dispatch_flow(lport, segmentation_id)
network_id = lport.get_external_value('local_network_id')
LOG.info(_LI("adding remote %(net_type)s lport %(lport)s"),
LOG.info("adding remote %(net_type)s lport %(lport)s",
{'net_type': network_type,
'lport': lport})
self.local_networks.add_remote_port(port_id=lport.get_id(),

View File

@ -24,7 +24,6 @@ from oslo_log import log
from oslo_serialization import jsonutils
from oslo_utils import excutils
from dragonflow._i18n import _LI, _LW, _LE
import dragonflow.common.exceptions as df_exceptions
from dragonflow.common import utils as df_utils
from dragonflow.db import db_common
@ -200,7 +199,7 @@ class NbApi(object):
def register_notification_callback(self, controller):
self.controller = controller
LOG.info(_LI("DB configuration sync finished, waiting for changes"))
LOG.info("DB configuration sync finished, waiting for changes")
if not self.use_pubsub:
self.driver.register_notification_callback(
self.db_change_callback)
@ -260,8 +259,8 @@ class NbApi(object):
def apply_db_change(self, table, key, action, value):
# determine if the action is allowed or not
if action not in DB_ACTION_LIST:
LOG.warning(_LW('Unknown action %(action)s for table '
'%(table)s'), {'action': action, 'table': table})
LOG.warning('Unknown action %(action)s for table '
'%(table)s', {'action': action, 'table': table})
return
if action == 'sync':
@ -299,13 +298,9 @@ class NbApi(object):
ovs_port = db_models.OvsPort(value)
self.controller.ovs_port_deleted(ovs_port)
elif 'log' == action:
message = _LI(
'Log event (Info): '
'table: %(table)s '
'key: %(key)s '
'action: %(action)s '
'value: %(value)s'
)
message = ('Log event (Info): table: %(table)s key: %(key)s '
'action: %(action)s value: %(value)s')
LOG.info(message, {
'table': str(table),
'key': str(key),
@ -313,7 +308,7 @@ class NbApi(object):
'value': str(value),
})
else:
LOG.warning(_LW('Unknown table %s'), table)
LOG.warning('Unknown table %s', table)
def create_security_group(self, id, topic, **columns):
secgroup = {}
@ -779,7 +774,7 @@ class NbApi(object):
)
return db_models.Publisher(publisher_value)
except Exception:
LOG.exception(_LE('Could not get publisher %s'), uuid)
LOG.exception('Could not get publisher %s', uuid)
return None
def get_publishers(self, topic=None):
@ -859,7 +854,7 @@ class NbApi(object):
db_models.QosPolicy.table_name, policy_id, topic)
return db_models.QosPolicy(qospolicy_value)
except Exception:
LOG.exception(_LE('Could not get qos policy %s'), policy_id)
LOG.exception('Could not get qos policy %s', policy_id)
return None
def create_active_port(self, id, topic, **columns):
@ -959,7 +954,7 @@ class NbApi(object):
except df_exceptions.DBKeyNotFound:
with excutils.save_and_reraise_exception():
LOG.warning(
_LW('Could not find object %(id)s to delete in %(table)s'),
'Could not find object %(id)s to delete in %(table)s',
extra={'id': id, 'table': model.table_name})
self._send_db_change_event(model.table_name, obj.id, 'delete',
@ -982,7 +977,7 @@ class NbApi(object):
)
except df_exceptions.DBKeyNotFound:
LOG.exception(
_LE('Could not get object %(id)s from table %(table)s'),
'Could not get object %(id)s from table %(table)s',
extra={'id': id, 'table': model.table_name})
else:
return model.from_json(serialized_obj)

View File

@ -18,7 +18,6 @@ import time
from oslo_config import cfg
from oslo_log import log
from dragonflow._i18n import _LE, _LW
from dragonflow.common import utils as df_utils
LOG = log.getLogger(__name__)
@ -121,8 +120,8 @@ class DBConsistencyManager(object):
try:
self.handle_data_comparison(topics, handler, direct)
except Exception as e:
LOG.exception(_LE("Exception occurred when"
"handling db comparison: %s"), e)
LOG.exception("Exception occurred when"
"handling db comparison: %s", e)
def _verify_object(self, handler, action, df_object, local_object=None):
"""Verify the object status and judge whether to create/update/delete
@ -170,7 +169,7 @@ class DBConsistencyManager(object):
handler.handle_delete(obj_id)
del handler.cache[obj_id]
else:
LOG.warning(_LW('Unknown action %s in db consistent'), action)
LOG.warning('Unknown action %s in db consistent', action)
def _compare_df_and_local_data(self, handler, topic, direct):
"""Compare specific resource type df objects and local objects
@ -193,7 +192,7 @@ class DBConsistencyManager(object):
df_version = _get_version(df_object)
if df_version is None:
LOG.error(_LE("Version is None in df_object: %s"), df_object)
LOG.error("Version is None in df_object: %s", df_object)
continue
local_object = local_object_map.pop(df_id, None)
if local_object:

View File

@ -13,7 +13,7 @@ import collections
import itertools
import threading
from dragonflow._i18n import _LE
from dragonflow._i18n import _
from dragonflow.utils import radix_tree
@ -146,7 +146,7 @@ class _ModelCache(object):
obj_id = _take_one(keys)
if obj_id is not None and _take_one(keys) is not None:
raise ValueError(_LE('More than one result available'))
raise ValueError(_('More than one result available'))
else:
obj_id = obj.id

View File

@ -15,7 +15,6 @@ from cassandra import policies
from cassandra import query
from oslo_log import log
from dragonflow._i18n import _LE
from dragonflow.common import exceptions as df_exceptions
from dragonflow import conf as cfg
from dragonflow.db import db_api
@ -59,7 +58,7 @@ def _parse_hosts(hosts):
if len(ports) > 0 and port not in ports:
raise df_exceptions.InvalidDBHostConfiguration(host=host_str)
else:
LOG.error(_LE("The host string %s is invalid."), host_str)
LOG.error("The host string %s is invalid.", host_str)
return (ips, ports[0])

View File

@ -20,7 +20,6 @@ import urllib3
from urllib3 import connection
from urllib3 import exceptions
from dragonflow._i18n import _LE
from dragonflow.common import exceptions as df_exceptions
from dragonflow.db import db_api
@ -99,7 +98,7 @@ def _parse_hosts(hosts):
host_port = host_str.strip().split(':')
host_ports.append((host_port[0], int(host_port[1])))
else:
LOG.error(_LE("The host string %s is invalid."), host_str)
LOG.error("The host string %s is invalid.", host_str)
return tuple(host_ports)

View File

@ -16,7 +16,6 @@ from oslo_log import log
from redis import client as redis_client
from redis import exceptions
from dragonflow._i18n import _LE, _LW
from dragonflow.common import exceptions as df_exceptions
from dragonflow.db import db_api
from dragonflow.db.drivers import redis_mgt
@ -66,15 +65,15 @@ class RedisDbDriver(db_api.DbApi):
try:
self._execute_cmd("DEL", tmp_key)
except Exception:
LOG.exception(_LE("exception when delete_table: "
"%(key)s "), {'key': local_key})
LOG.exception("exception when delete_table: "
"%(key)s ", {'key': local_key})
def _handle_db_conn_error(self, ip_port, local_key=None):
self.redis_mgt.remove_node_from_master_list(ip_port)
self._update_server_list()
if local_key is not None:
LOG.exception(_LE("update server list, key: %(key)s"),
LOG.exception("update server list, key: %(key)s",
{'key': local_key})
def _sync_master_list(self):
@ -106,7 +105,7 @@ class RedisDbDriver(db_api.DbApi):
def _execute_cmd(self, oper, local_key, value=None):
if not self._is_oper_valid(oper):
LOG.warning(_LW("invalid oper: %(oper)s"),
LOG.warning("invalid oper: %(oper)s",
{'oper': oper})
return None
@ -134,8 +133,8 @@ class RedisDbDriver(db_api.DbApi):
alreadysync = True
continue
self._handle_db_conn_error(ip_port, local_key)
LOG.exception(_LE("connection error while sending "
"request to db: %(e)s"), {'e': e})
LOG.exception("connection error while sending "
"request to db: %(e)s", {'e': e})
raise e
except exceptions.ResponseError as e:
if not alreadysync:
@ -155,12 +154,12 @@ class RedisDbDriver(db_api.DbApi):
if client is None:
# maybe there is a fast failover
self._handle_db_conn_error(ip_port, local_key)
LOG.exception(_LE("no client available: "
"%(ip_port)s, %(e)s"),
LOG.exception("no client available: "
"%(ip_port)s, %(e)s",
{'ip_port': resp[2], 'e': e})
raise e
else:
LOG.exception(_LE("error not handled: %(e)s"),
LOG.exception("error not handled: %(e)s",
{'e': e})
raise e
except Exception as e:
@ -169,8 +168,8 @@ class RedisDbDriver(db_api.DbApi):
alreadysync = True
continue
self._handle_db_conn_error(ip_port, local_key)
LOG.exception(_LE("exception while sending request to "
"db: %(e)s"), {'e': e})
LOG.exception("exception while sending request to "
"db: %(e)s", {'e': e})
raise e
def _find_key_without_topic(self, table, key):
@ -194,7 +193,7 @@ class RedisDbDriver(db_api.DbApi):
if res is not None:
return res
except Exception:
LOG.exception(_LE("exception when get_key: %(key)s"),
LOG.exception("exception when get_key: %(key)s",
{'key': local_key})
raise df_exceptions.DBKeyNotFound(key=key)
@ -209,7 +208,7 @@ class RedisDbDriver(db_api.DbApi):
return res
except Exception:
LOG.exception(_LE("exception when set_key: %(key)s"),
LOG.exception("exception when set_key: %(key)s",
{'key': local_key})
def create_key(self, table, key, value, topic=None):
@ -230,7 +229,7 @@ class RedisDbDriver(db_api.DbApi):
return res
except Exception:
LOG.exception(_LE("exception when delete_key: %(key)s"),
LOG.exception("exception when delete_key: %(key)s",
{'key': local_key})
def get_all_entries(self, table, topic=None):
@ -247,8 +246,7 @@ class RedisDbDriver(db_api.DbApi):
res.append(self._execute_cmd("GET", tmp_key))
return res
except Exception:
LOG.exception(_LE("exception when get_all_entries: "
"%(key)s"),
LOG.exception("exception when get_all_entries: %(key)s",
{'key': local_key})
else:
@ -265,7 +263,7 @@ class RedisDbDriver(db_api.DbApi):
return res
except Exception as e:
self._handle_db_conn_error(ip_port, local_key)
LOG.exception(_LE("exception when mget: %(key)s, %(e)s"),
LOG.exception("exception when mget: %(key)s, %(e)s",
{'key': local_key, 'e': e})
def get_all_keys(self, table, topic=None):
@ -281,8 +279,7 @@ class RedisDbDriver(db_api.DbApi):
return [self._strip_table_name_from_key(key) for key in res]
except Exception as e:
self._handle_db_conn_error(ip_port, local_key)
LOG.exception(_LE("exception when get_all_keys: "
"%(key)s, %(e)s"),
LOG.exception("exception when get_all_keys: %(key)s, %(e)s",
{'key': local_key, 'e': e})
else:
@ -298,8 +295,7 @@ class RedisDbDriver(db_api.DbApi):
except Exception as e:
self._handle_db_conn_error(ip_port, local_key)
LOG.exception(_LE("exception when get_all_keys: "
"%(key)s, %(e)s"),
LOG.exception("exception when get_all_keys: %(key)s, %(e)s",
{'key': local_key, 'e': e})
def _strip_table_name_from_key(self, key):
@ -317,14 +313,14 @@ class RedisDbDriver(db_api.DbApi):
return client.incr(local_key)
except Exception as e:
self._handle_db_conn_error(ip_port, local_key)
LOG.exception(_LE("exception when incr: %(key)s, %(e)s"),
LOG.exception("exception when incr: %(key)s, %(e)s",
{'key': local_key, 'e': e})
def allocate_unique_key(self, table):
try:
return self._allocate_unique_key(table)
except Exception as e:
LOG.error(_LE("allocate_unique_key exception: %(e)s"),
LOG.error("allocate_unique_key exception: %(e)s",
{'e': e})
return

View File

@ -22,7 +22,6 @@ from oslo_serialization import jsonutils
import redis
import six
from dragonflow._i18n import _LI, _LE, _LW
from dragonflow.common import utils as df_utils
from dragonflow.db import db_common
from dragonflow.db.drivers import redis_calckey
@ -87,8 +86,8 @@ class RedisMgt(object):
self.default_node = redis.StrictRedis(host, port)
RedisMgt.check_connection(self.default_node)
except Exception as e:
LOG.exception(_LE("exception happened "
"when connect to default node, %s"), e)
LOG.exception("exception happened "
"when connect to default node, %s", e)
def _init_node(self, host, port):
node = redis.StrictRedis(host, port)
@ -101,9 +100,8 @@ class RedisMgt(object):
disconnect()
self.default_node.connection_pool.reset()
except Exception as e:
LOG.exception(_LE("exception happened "
"when release default node, %(e)s"),
{'e': e})
LOG.exception("exception happened "
"when release default node, %(e)s", {'e': e})
def _release_node(self, node):
node.connection_pool.get_connection(None, None).disconnect()
@ -132,16 +130,16 @@ class RedisMgt(object):
node = self._init_node(ip_port[0], ip_port[1])
info = self._get_cluster_info(node)
if info['cluster_state'] != 'ok':
LOG.warning(_LW("redis cluster state failed"))
LOG.warning("redis cluster state failed")
else:
new_nodes.update(self._get_cluster_nodes(node))
self._release_node(node)
break
except Exception:
LOG.exception(_LE("exception happened "
"when get cluster topology, %(ip)s:"
"%(port)s"),
LOG.exception("exception happened "
"when get cluster topology, %(ip)s:"
"%(port)s",
{'ip': ip_port[0], 'port': ip_port[1]})
return new_nodes
@ -238,8 +236,8 @@ class RedisMgt(object):
def remove_node_from_master_list(self, ip_port):
if ip_port is not None:
# remove the node by ip_port
LOG.info(_LI("remove node %(ip_port)s from "
"redis master list"),
LOG.info("remove node %(ip_port)s from "
"redis master list",
{'ip_port': ip_port})
self.master_list = [node for node in self.master_list
if node['ip_port'] != ip_port]
@ -288,7 +286,7 @@ class RedisMgt(object):
# this means a tmp status
# one master one slave
changed = RET_CODE.NODES_CHANGE
LOG.info(_LI("master nodes not equals to slave nodes"))
LOG.info("master nodes not equals to slave nodes")
else:
if cnt != len(old_nodes):
changed = RET_CODE.NODES_CHANGE
@ -299,10 +297,10 @@ class RedisMgt(object):
# should be recovered manually. Assumed that no scale-down in
# cluster.
# Do not have to notify changes.
LOG.warning(_LW("redis cluster nodes less than local, "
"maybe there is a partition in db "
"cluster, nodes:%(new)s, "
"local nodes:%(local)s"),
LOG.warning("redis cluster nodes less than local, "
"maybe there is a partition in db "
"cluster, nodes:%(new)s, "
"local nodes:%(local)s",
{'new': new_nodes, 'local': old_nodes})
return changed
@ -312,7 +310,7 @@ class RedisMgt(object):
changed = self._check_nodes_change(self.cluster_nodes, new_nodes)
if changed == RET_CODE.SLOTS_CHANGE:
LOG.info(_LI("redis_failover_callback:SLOTS_CHANGE"))
LOG.info("redis_failover_callback:SLOTS_CHANGE")
# update local nodes
# don't need re-sync
self.cluster_nodes = new_nodes
@ -320,7 +318,7 @@ class RedisMgt(object):
self.redis_set_master_list_to_syncstring(self.master_list)
elif changed == RET_CODE.NODES_CHANGE:
LOG.info(_LI("redis_failover_callback:NODES_CHANGE"))
LOG.info("redis_failover_callback:NODES_CHANGE")
# update local nodes
self.cluster_nodes = new_nodes
self.master_list = self._parse_to_masterlist()
@ -357,7 +355,7 @@ class RedisMgt(object):
self._release_node(node)
return True
except Exception:
LOG.exception(_LE("check master nodes connection failed"))
LOG.exception("check master nodes connection failed")
return False
def run(self):
@ -378,9 +376,8 @@ class RedisMgt(object):
self.redis_failover_callback(nodes)
except Exception:
LOG.exception(_LE("exception happened "
"when receive messages from plugin"
))
LOG.exception("exception happened "
"when receive messages from plugin")
def redis_get_master_list_from_syncstring(self, syncstring):
try:
@ -388,21 +385,19 @@ class RedisMgt(object):
local_list = msgpack.Unpacker(six.BytesIO(syncstring)).unpack()
if local_list:
self.master_list = local_list
LOG.info(_LI("get new master from syncstring master=%s"),
LOG.info("get new master from syncstring master=%s",
self.master_list)
return True
return False
except Exception:
LOG.exception(_LE("exception happened "
"when get new master from syncstring"
))
LOG.exception("exception happened "
"when get new master from syncstring")
def redis_set_master_list_to_syncstring(self, master_list):
try:
RedisMgt.global_sharedlist.raw = msgpack.packb(master_list)
except Exception:
LOG.exception(_LE("exception happened "
"when set new master to syncstring"
))
LOG.exception("exception happened "
"when set new master to syncstring")

View File

@ -14,7 +14,7 @@ from jsonmodels import fields
import netaddr
import six
from dragonflow._i18n import _LE
from dragonflow._i18n import _
from dragonflow.db import model_framework
from dragonflow.db import model_proxy
@ -34,9 +34,8 @@ def _create_ref(proxy_type, value, lazy):
obj_id = value.id
else:
raise ValueError(
_LE('Reference field should only be initialized by ID or '
'model instance/reference'),
)
_('Reference field should only be initialized by ID or '
'model instance/reference'))
return proxy_type(id=obj_id, lazy=lazy)
@ -176,11 +175,8 @@ class EnumField(fields.StringField):
super(EnumField, self).validate(value)
if value is not None and value not in self._valid_values:
raise errors.ValidationError(
_LE('{value} is not one of: [{valid_values}]').format(
value=value,
valid_values=', '.join(self._valid_values),
),
)
_('{value} is not one of: [{valid_values}]').format(
value=value, valid_values=', '.join(self._valid_values)))
class EnumListField(fields.ListField):
@ -195,7 +191,7 @@ class EnumListField(fields.ListField):
def validate(self, value):
if self.required and not value:
raise errors.ValidationError(_LE('Field is required!'))
raise errors.ValidationError(_('Field is required!'))
if value is None:
return
@ -203,8 +199,6 @@ class EnumListField(fields.ListField):
for elem in value:
if elem not in self._valid_values:
raise errors.ValidationError(
_LE('{value} is not one of: [{valid_values}]').format(
_('{value} is not one of: [{valid_values}]').format(
value=value,
valid_values=', '.join(self._valid_values),
),
)
valid_values=', '.join(self._valid_values)))

View File

@ -19,7 +19,7 @@ from oslo_log import log
from oslo_serialization import jsonutils
import six
from dragonflow._i18n import _LE
from dragonflow._i18n import _
from dragonflow.db.models import legacy
LOG = log.getLogger(__name__)
@ -75,7 +75,7 @@ class _CommonBase(models.Base):
changed_fields = set()
for key, _ in other.iterate_over_set_fields():
for key, value in other.iterate_over_set_fields():
old_value = getattr(self, key)
new_value = getattr(other, key)
@ -100,7 +100,7 @@ class _CommonBase(models.Base):
cb(self, *args, **kwargs)
except Exception:
LOG.exception(
_LE('Error while calling %(func)r(*%(_args)r, **%(kw)r)'),
'Error while calling %(func)r(*%(_args)r, **%(kw)r)',
extra={'func': cb, '_args': args, 'kw': kwargs},
)
@ -187,7 +187,7 @@ class _CommonBase(models.Base):
@classmethod
def dependencies(cls):
deps = []
for _, field in cls.iterate_over_fields():
for key, field in cls.iterate_over_fields():
if isinstance(field, fields.ListField):
types = field.items_types
else:
@ -363,7 +363,7 @@ def iter_models_by_dependency_order():
# If we still have unsorted models yet nothing is independent, we have
# dependency cycle
if not independent_models:
raise RuntimeError(_LE('Models form a dependency cycle'))
raise RuntimeError(_('Models form a dependency cycle'))
# Move independent models to sorted list
for model in independent_models:

View File

@ -11,7 +11,7 @@
# under the License.
import six
from dragonflow._i18n import _LE
from dragonflow._i18n import _
from dragonflow.db import db_store2
@ -65,7 +65,7 @@ class _ModelProxyBase(object):
@id.setter
def id_setter(self, value):
raise RuntimeError(_LE('Setting ID of model-proxy is not allowed'))
raise RuntimeError(_('Setting ID of model-proxy is not allowed'))
def to_struct(self):
return {'id': self._id}

View File

@ -26,7 +26,6 @@ from oslo_utils import timeutils
import six
from sqlalchemy.orm import exc as orm_exc
from dragonflow._i18n import _LW
from dragonflow.common import exceptions as df_exc
from dragonflow.db.neutron import models
@ -180,8 +179,8 @@ def _test_and_create_object(uuid):
if row.lock and timeutils.is_older_than(
row.created_at, cfg.CONF.df.distributed_lock_ttl):
# reset the lock if it is timeout
LOG.warning(_LW('The lock for object %(id)s is reset '
'due to timeout.'), {'id': uuid})
LOG.warning('The lock for object %(id)s is reset '
'due to timeout.', {'id': uuid})
_lock_free_update(session, uuid, lock_state=True,
session_id=row.session_id)
except orm_exc.NoResultFound:

View File

@ -21,7 +21,6 @@ from oslo_log import log as logging
from oslo_serialization import jsonutils
import six
from dragonflow._i18n import _LE, _LI
from dragonflow.common import exceptions
from dragonflow.common import utils as df_utils
from dragonflow.db import db_common
@ -40,7 +39,7 @@ def pack_message(message):
try:
data = msgpack.packb(message, encoding='utf-8')
except Exception:
LOG.exception(_LE("Error in pack_message: "))
LOG.exception("Error in pack_message: ")
return data
@ -49,7 +48,7 @@ def unpack_message(message):
try:
entry = msgpack.unpackb(message, encoding='utf-8')
except Exception:
LOG.exception(_LE("Error in unpack_message: "))
LOG.exception("Error in unpack_message: ")
return entry
@ -228,14 +227,14 @@ class SubscriberAgentBase(SubscriberApi):
self.daemon.stop()
def register_topic(self, topic):
LOG.info(_LI('Register topic %s'), topic)
LOG.info('Register topic %s', topic)
if topic not in self.topic_list:
self.topic_list.append(topic)
return True
return False
def unregister_topic(self, topic):
LOG.info(_LI('Unregister topic %s'), topic)
LOG.info('Unregister topic %s', topic)
self.topic_list.remove(topic)
def set_subscriber_for_failover(self, sub, callback):
@ -270,7 +269,7 @@ class TableMonitor(object):
eventlet.sleep(self._polling_time)
cache = self._poll_once(cache)
except Exception:
LOG.exception(_LE("Error when polling table %s"),
LOG.exception("Error when polling table %s",
self._table_name)
def _poll_once(self, old_cache):
@ -325,7 +324,7 @@ class StalePublisherMonitor(TableMonitor):
continue
last_activity_timestamp = publisher['last_activity_timestamp']
if last_activity_timestamp < time.time() - self._timeout:
LOG.info(_LI('Removing publisher %s'), publisher_json)
LOG.info('Removing publisher %s', publisher_json)
try:
self._driver.delete_key(self._table_name, entry_key)
except exceptions.DBKeyNotFound:

View File

@ -17,7 +17,6 @@ from oslo_log import log as logging
from oslo_serialization import jsonutils
import redis
from dragonflow._i18n import _LE, _LW, _LI
from dragonflow import conf as cfg
from dragonflow.db.drivers import redis_mgt
from dragonflow.db import pub_sub_api
@ -79,11 +78,11 @@ class RedisPublisherAgent(pub_sub_api.PublisherApi):
self._update_client()
def _sync_master_list(self):
LOG.info(_LI("publish connection old masterlist %s"),
LOG.info("publish connection old masterlist %s",
self.redis_mgt.master_list)
result = self.redis_mgt.redis_get_master_list_from_syncstring(
redis_mgt.RedisMgt.global_sharedlist.raw)
LOG.info(_LI("publish connection new masterlist %s"),
LOG.info("publish connection new masterlist %s",
self.redis_mgt.master_list)
if result:
self._update_client()
@ -106,7 +105,7 @@ class RedisPublisherAgent(pub_sub_api.PublisherApi):
if not alreadysync:
self._sync_master_list()
alreadysync = True
LOG.exception(_LE("publish error remote:%(remote)s "),
LOG.exception("publish error remote:%(remote)s ",
{'remote': self.remote})
continue
self.redis_mgt.remove_node_from_master_list(self.remote)
@ -121,7 +120,7 @@ class RedisPublisherAgent(pub_sub_api.PublisherApi):
self.redis_mgt.daemonize()
else:
LOG.warning(_LW("redis mgt is none"))
LOG.warning("redis mgt is none")
class RedisSubscriberAgent(pub_sub_api.SubscriberAgentBase):
@ -174,7 +173,7 @@ class RedisSubscriberAgent(pub_sub_api.SubscriberAgentBase):
if self.redis_mgt is not None:
self.redis_mgt.register_ha_topic()
else:
LOG.warning(_LW("redis mgt is none"))
LOG.warning("redis mgt is none")
def run(self):
while True:
@ -202,21 +201,21 @@ class RedisSubscriberAgent(pub_sub_api.SubscriberAgentBase):
self.redis_mgt.redis_failover_callback(
value)
else:
LOG.warning(_LW("receive unknown message in "
"subscriber %(type)s"),
LOG.warning("receive unknown message in "
"subscriber %(type)s",
{'type': data['type']})
else:
LOG.warning(_LW("pubsub lost connection %(ip)s:"
"%(port)s"),
LOG.warning("pubsub lost connection %(ip)s:"
"%(port)s",
{'ip': self.ip,
'port': self.plugin_updates_port})
eventlet.sleep(1)
except Exception as e:
LOG.warning(_LW("subscriber listening task lost "
"connection "
"%(e)s"), {'e': e})
LOG.warning("subscriber listening task lost "
"connection "
"%(e)s", {'e': e})
try:
connection = self.pub_sub.connection
@ -236,9 +235,8 @@ class RedisSubscriberAgent(pub_sub_api.SubscriberAgentBase):
self.db_changes_callback(None, None, 'dbrestart',
True, None)
else:
LOG.warning(_LW("there is no more db node "
"available"))
LOG.warning("there is no more db node available")
LOG.exception(_LE("reconnect error %(ip)s:%(port)s"),
LOG.exception("reconnect error %(ip)s:%(port)s",
{'ip': self.ip,
'port': self.plugin_updates_port})

View File

@ -21,7 +21,6 @@ import time
from oslo_config import cfg
from oslo_log import log
from dragonflow._i18n import _LE, _LI, _LW
from dragonflow.common import constants
from dragonflow.common import utils as df_utils
from dragonflow.db import db_common
@ -45,8 +44,8 @@ class RedisPortStatusNotifier(port_status_api.PortStatusDriver):
self.create_heart_beat_reporter(cfg.CONF.host)
else:
if not cfg.CONF.df.enable_df_pub_sub:
LOG.warning(_LW("RedisPortStatusNotifier cannot "
"work when enable_df_pub_sub is disabled"))
LOG.warning("RedisPortStatusNotifier cannot "
"work when enable_df_pub_sub is disabled")
return
self.nb_api.publisher.initialize()
@ -58,7 +57,7 @@ class RedisPortStatusNotifier(port_status_api.PortStatusDriver):
else:
ppid = listener.get_ppid()
my_ppid = os.getppid()
LOG.info(_LI("Listener %(l)s exists, my ppid is %(ppid)s"),
LOG.info("Listener %(l)s exists, my ppid is %(ppid)s",
{'l': listener, 'ppid': my_ppid})
# FIXME(wangjian): if api_worker is 1, the old ppid could be
# equal to my_ppid. I tried to set api_worker=1, still multiple
@ -70,7 +69,7 @@ class RedisPortStatusNotifier(port_status_api.PortStatusDriver):
def _create_heart_beat_reporter(self, host):
self.nb_api.register_listener_callback(self.port_status_callback,
'listener_' + host)
LOG.info(_LI("Register listener %s"), host)
LOG.info("Register listener %s", host)
self.heart_beat_reporter = HeartBeatReporter(self.nb_api)
self.heart_beat_reporter.daemonize()
@ -97,16 +96,16 @@ class RedisPortStatusNotifier(port_status_api.PortStatusDriver):
elif listeners_num == 1:
selected = listeners[0]
else:
LOG.warning(_LW("No neutron listener found"))
LOG.warning("No neutron listener found")
return
topic = selected.get_topic()
update = db_common.DbUpdate(table, key, action, value, topic=topic)
LOG.info(_LI("Publish to neutron %s"), topic)
LOG.info("Publish to neutron %s", topic)
self.nb_api.publisher.send_event(update)
def port_status_callback(self, table, key, action, value, topic=None):
if models.LogicalPort.table_name == table and 'update' == action:
LOG.info(_LI("Process port %s status update event"), str(key))
LOG.info("Process port %s status update event", str(key))
if constants.PORT_STATUS_UP == value:
self.mech_driver.set_port_status_up(key)
if constants.PORT_STATUS_DOWN == value:
@ -147,5 +146,4 @@ class HeartBeatReporter(object):
timestamp=timestamp,
ppid=ppid)
except Exception:
LOG.exception(_LE(
"Failed to report heart beat for %s"), listener)
LOG.exception("Failed to report heart beat for %s", listener)

View File

@ -17,7 +17,6 @@ from eventlet.green import zmq
from oslo_config import cfg
from oslo_log import log as logging
from dragonflow._i18n import _LI, _LE
from dragonflow.common import exceptions
from dragonflow.db import db_common
from dragonflow.db import pub_sub_api
@ -32,8 +31,8 @@ class ZMQPubSub(pub_sub_api.PubSubApi):
super(ZMQPubSub, self).__init__()
transport = cfg.CONF.df.publisher_transport
if transport not in SUPPORTED_TRANSPORTS:
message = _LE("zmq_pub_sub: Unsupported publisher_transport value "
"%(transport)s, expected %(expected)s")
message = ("zmq_pub_sub: Unsupported publisher_transport value "
"%(transport)s, expected %(expected)s")
LOG.error(message, {
'transport': transport,
'expected': SUPPORTED_TRANSPORTS
@ -160,7 +159,7 @@ class ZMQSubscriberAgentBase(pub_sub_api.SubscriberAgentBase):
def run(self):
self.sub_socket = self.connect()
LOG.info(_LI("Starting Subscriber on ports %(endpoints)s"),
LOG.info("Starting Subscriber on ports %(endpoints)s",
{'endpoints': self.uri_list})
while True:
try:

View File

@ -27,7 +27,7 @@ from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import directory
from oslo_log import log
from dragonflow._i18n import _, _LI, _LE
from dragonflow._i18n import _
from dragonflow.common import constants as df_common_const
from dragonflow.common import exceptions as df_exceptions
from dragonflow.common import utils as df_utils
@ -54,7 +54,7 @@ class DFMechDriver(driver_api.MechanismDriver):
'net-mtu']
def initialize(self):
LOG.info(_LI("Starting DFMechDriver"))
LOG.info("Starting DFMechDriver")
self.nb_api = None
# When set to True, Nova plugs the VIF directly into the ovs bridge
@ -156,12 +156,12 @@ class DFMechDriver(driver_api.MechanismDriver):
self.nb_api.create_security_group(id=sg_id, topic=tenant_id,
name=sg_name, rules=rules,
version=sg_version)
LOG.info(_LI("DFMechDriver: create security group %s"), sg_name)
LOG.info("DFMechDriver: create security group %s", sg_name)
elif event == events.AFTER_UPDATE:
self.nb_api.update_security_group(id=sg_id, topic=tenant_id,
name=sg_name, rules=rules,
version=sg_version)
LOG.info(_LI("DFMechDriver: update security group %s"), sg_name)
LOG.info("DFMechDriver: update security group %s", sg_name)
return sg
@ -172,7 +172,7 @@ class DFMechDriver(driver_api.MechanismDriver):
tenant_id = sg['tenant_id']
self.nb_api.delete_security_group(sg_id, topic=tenant_id)
LOG.info(_LI("DFMechDriver: delete security group %s"), sg_id)
LOG.info("DFMechDriver: delete security group %s", sg_id)
@lock_db.wrap_db_lock(lock_db.RESOURCE_ML2_SECURITY_GROUP_RULE_CREATE)
def create_security_group_rule(self, resource, event, trigger, **kwargs):
@ -189,7 +189,7 @@ class DFMechDriver(driver_api.MechanismDriver):
self.nb_api.add_security_group_rules(sg_id, tenant_id,
sg_rules=[sg_rule],
sg_version=sg_version)
LOG.info(_LI("DFMechDriver: create security group rule in group %s"),
LOG.info("DFMechDriver: create security group rule in group %s",
sg_id)
return sg_rule
@ -205,7 +205,7 @@ class DFMechDriver(driver_api.MechanismDriver):
self.nb_api.delete_security_group_rule(sg_id, sgr_id, tenant_id,
sg_version=sg_version)
LOG.info(_LI("DFMechDriver: delete security group rule %s"), sgr_id)
LOG.info("DFMechDriver: delete security group rule %s", sgr_id)
def create_network_precommit(self, context):
# TODO(xiaohhui): Multi-provider networks are not supported yet.
@ -231,7 +231,7 @@ class DFMechDriver(driver_api.MechanismDriver):
subnets=[],
qos_policy_id=network.get('qos_policy_id'))
LOG.info(_LI("DFMechDriver: create network %s"), network['id'])
LOG.info("DFMechDriver: create network %s", network['id'])
return network
@lock_db.wrap_db_lock(lock_db.RESOURCE_ML2_NETWORK_OR_PORT)
@ -248,7 +248,7 @@ class DFMechDriver(driver_api.MechanismDriver):
"been deleted concurrently", network_id)
return
LOG.info(_LI("DFMechDriver: delete network %s"), network_id)
LOG.info("DFMechDriver: delete network %s", network_id)
@lock_db.wrap_db_lock(lock_db.RESOURCE_ML2_NETWORK_OR_PORT)
def update_network_postcommit(self, context):
@ -265,7 +265,7 @@ class DFMechDriver(driver_api.MechanismDriver):
version=network['revision_number'],
qos_policy_id=network.get('qos_policy_id'))
LOG.info(_LI("DFMechDriver: update network %s"), network['id'])
LOG.info("DFMechDriver: update network %s", network['id'])
return network
def _get_dhcp_port_for_subnet(self, context, subnet_id):
@ -371,7 +371,7 @@ class DFMechDriver(driver_api.MechanismDriver):
subnet)
except Exception:
LOG.exception(
_LE("Failed to create dhcp port for subnet %s"), subnet['id'])
"Failed to create dhcp port for subnet %s", subnet['id'])
return None
self.nb_api.add_subnet(
@ -387,7 +387,7 @@ class DFMechDriver(driver_api.MechanismDriver):
dns_nameservers=subnet.get('dns_nameservers', []),
host_routes=subnet.get('host_routes', []))
LOG.info(_LI("DFMechDriver: create subnet %s"), subnet['id'])
LOG.info("DFMechDriver: create subnet %s", subnet['id'])
return subnet
def _update_subnet_dhcp_centralized(self, context, subnet):
@ -449,8 +449,7 @@ class DFMechDriver(driver_api.MechanismDriver):
new_subnet)
except Exception:
LOG.exception(
_LE("Failed to create dhcp port for subnet %s"),
new_subnet['id'])
"Failed to create dhcp port for subnet %s", new_subnet['id'])
return None
self.nb_api.update_subnet(
@ -466,7 +465,7 @@ class DFMechDriver(driver_api.MechanismDriver):
dns_nameservers=new_subnet.get('dns_nameservers', []),
host_routes=new_subnet.get('host_routes', []))
LOG.info(_LI("DFMechDriver: update subnet %s"), new_subnet['id'])
LOG.info("DFMechDriver: update subnet %s", new_subnet['id'])
return new_subnet
@lock_db.wrap_db_lock(lock_db.RESOURCE_ML2_SUBNET)
@ -493,7 +492,7 @@ class DFMechDriver(driver_api.MechanismDriver):
"been deleted concurrently", net_id)
return
LOG.info(_LI("DFMechDriver: delete subnet %s"), subnet_id)
LOG.info("DFMechDriver: delete subnet %s", subnet_id)
def _filter_unsupported_allowed_address_pairs(self,
allowed_address_pairs):
@ -562,7 +561,7 @@ class DFMechDriver(driver_api.MechanismDriver):
qos_policy_id=port.get('qos_policy_id'),
extra_dhcp_opts=port.get(edo_ext.EXTRADHCPOPTS, []))
LOG.info(_LI("DFMechDriver: create port %s"), port['id'])
LOG.info("DFMechDriver: create port %s", port['id'])
return port
def _is_dhcp_port_after_subnet_delete(self, port):
@ -665,7 +664,7 @@ class DFMechDriver(driver_api.MechanismDriver):
qos_policy_id=updated_port.get('qos_policy_id'),
extra_dhcp_opts=updated_port.get(edo_ext.EXTRADHCPOPTS, []))
LOG.info(_LI("DFMechDriver: update port %s"), updated_port['id'])
LOG.info("DFMechDriver: update port %s", updated_port['id'])
return updated_port
@lock_db.wrap_db_lock(lock_db.RESOURCE_ML2_NETWORK_OR_PORT)
@ -681,7 +680,7 @@ class DFMechDriver(driver_api.MechanismDriver):
"been deleted concurrently", port_id)
return
LOG.info(_LI("DFMechDriver: delete port %s"), port_id)
LOG.info("DFMechDriver: delete port %s", port_id)
def bind_port(self, context):
"""Set porting binding data for use with nova."""

View File

@ -35,7 +35,6 @@ from oslo_log import log
from oslo_utils import excutils
from oslo_utils import importutils
from dragonflow._i18n import _LE, _LI
from dragonflow.common import exceptions as df_exceptions
from dragonflow.db.neutron import lockedobjects_db as lock_db
from dragonflow.neutron.common import constants as df_const
@ -250,7 +249,7 @@ class DFL3RouterPlugin(service_base.ServicePluginBase,
self.nb_api.delete_floatingip(id=id,
topic=floatingip['tenant_id'])
except df_exceptions.DBKeyNotFound:
LOG.exception(_LE("floatingip %s is not found in DF DB"), id)
LOG.exception("floatingip %s is not found in DF DB", id)
def get_floatingip(self, context, id, fields=None):
with context.session.begin(subtransactions=True):
@ -298,9 +297,9 @@ class DFL3RouterPlugin(service_base.ServicePluginBase,
router_port_info['tenant_id'],
router_version=router_version)
except df_exceptions.DBKeyNotFound:
LOG.exception(_LE("logical router %s is not found in DF DB, "
"suppressing delete_lrouter_port "
"exception"), router_id)
LOG.exception("logical router %s is not found in DF DB, "
"suppressing delete_lrouter_port "
"exception", router_id)
return router_port_info
def get_number_of_agents_for_scheduling(self, context):
@ -315,9 +314,9 @@ class DFL3RouterPlugin(service_base.ServicePluginBase,
max_agents = cfg.CONF.max_l3_agents_per_router
if max_agents:
if max_agents > num_agents:
LOG.info(_LI("Number of active agents lower than "
"max_l3_agents_per_router. L3 agents "
"available: %s"), num_agents)
LOG.info("Number of active agents lower than "
"max_l3_agents_per_router. L3 agents "
"available: %s", num_agents)
else:
num_agents = max_agents

View File

@ -19,7 +19,6 @@ from oslo_config import cfg
from oslo_log import log
from ovs import vlog
from dragonflow._i18n import _LW
from dragonflow.common import constants
from dragonflow.ovsdb import impl_idl
from dragonflow.ovsdb import objects
@ -115,10 +114,10 @@ class OvsApi(object):
@staticmethod
def _check_ofport(port_name, ofport):
if ofport is None:
LOG.warning(_LW("Can't find ofport for port %s."), port_name)
LOG.warning("Can't find ofport for port %s.", port_name)
return False
if ofport < OFPORT_RANGE_MIN or ofport > OFPORT_RANGE_MAX:
LOG.warning(_LW("ofport %(ofport)s for port %(port)s is invalid."),
LOG.warning("ofport %(ofport)s for port %(port)s is invalid.",
{'ofport': ofport, 'port': port_name})
return False

View File

@ -30,7 +30,6 @@ from ryu.lib.packet import ipv4
from ryu.lib.packet import ipv6
from ryu.lib.packet import packet
from dragonflow._i18n import _LI, _LE
from dragonflow.common import utils as d_utils
from dragonflow import conf as cfg
from dragonflow.tests.common import utils as test_utils
@ -356,10 +355,9 @@ class LogicalPortTap(object):
def delete(self):
self._disconnect_tap_device_to_vswitch(self.integration_bridge,
self.tap.name)
LOG.info(_LI('Closing tap interface {} ({})').format(
LOG.info('Closing tap interface {} ({})'.format(
self.tap.name,
self.tap.fileno(),
))
self.tap.fileno()))
self.tap.close()
delete_tap_device(self.tap.name)
@ -368,7 +366,7 @@ class LogicalPortTap(object):
:param buf: Raw packet data to send
:type buf: String (decoded)
"""
LOG.info(_LI('send: via {}: {}').format(
LOG.info('send: via {}: {}'.format(
self.tap.name,
packet_raw_data_to_hex(buf)))
if self.is_blocking:
@ -387,7 +385,7 @@ class LogicalPortTap(object):
else:
fd = self.tap.fileno()
buf = os.read(fd, self.tap.mtu)
LOG.info(_LI('receive: via {}: {}').format(
LOG.info('receive: via {}: {}'.format(
self.tap.name,
packet_raw_data_to_hex(buf)))
return buf
@ -546,7 +544,7 @@ class Policy(object):
:type exception: Exception
"""
LOG.exception(_LE('Adding exception:'))
LOG.exception('Adding exception:')
self.exceptions.append(exception)
self.stop()
@ -864,7 +862,7 @@ class LogAction(Action):
"""Action to log the received packet."""
def __call__(self, policy, rule, port_thread, buf):
pkt = packet.Packet(buf)
LOG.info(_LI('LogAction: Got packet: {}').format(str(pkt)))
LOG.info('LogAction: Got packet: {}'.format(str(pkt)))
class SendAction(Action):
@ -1032,10 +1030,9 @@ class PortThread(object):
buf = tap.read()
self.packet_handler(self, buf)
except Exception as e:
LOG.info(_LI('Reading from {}/{} failed: {}').format(
LOG.info('Reading from {}/{} failed: {}'.format(
tap.tap.name,
self.port.name,
e))
self.port.name, e))
break
try:
tap.set_blocking(True)

View File

@ -25,7 +25,7 @@ import numpy
from oslo_log import log
from oslo_utils import importutils
from dragonflow._i18n import _, _LE
from dragonflow._i18n import _
from dragonflow import conf as cfg
from dragonflow.db import api_nb
@ -124,7 +124,7 @@ def run_client(nb_api):
events.append((time.time(), lport))
def signal_handler(signal, frame):
LOG.error(_LE('You pressed Ctrl+C!'))
LOG.error('You pressed Ctrl+C!')
finished()
def print_status(signal, frame):
@ -142,7 +142,7 @@ def run_client(nb_api):
try:
nb_api.register_notification_callback(callback_handler)
except Exception as e:
LOG.error(_LE('Exception: '), e)
LOG.error('Exception: ', e)
finished()

View File

@ -18,8 +18,6 @@ from oslo_log import log
import ryu.lib.packet
from ryu.ofproto import inet
from dragonflow._i18n import _LI
from dragonflow import conf as cfg
from dragonflow.controller.common import constants
from dragonflow.tests.common import app_testing_objects
@ -40,8 +38,8 @@ class TestApps(test_base.DFTestBase):
port1 = subnet1.create_port()
port2 = subnet2.create_port()
topology.create_router([subnet1.subnet_id, subnet2.subnet_id])
LOG.info(_LI('Port1 name: {}').format(port1.tap.tap.name))
LOG.info(_LI('Port2 name: {}').format(port2.tap.tap.name))
LOG.info('Port1 name: {}'.format(port1.tap.tap.name))
LOG.info('Port2 name: {}'.format(port2.tap.tap.name))
test_utils.print_command(['ip', 'addr'])
test_utils.print_command(['ovs-vsctl', 'show'], True)
test_utils.print_command(
@ -1546,7 +1544,7 @@ class TestSGApp(test_base.DFTestBase):
self.policy.wait(const.DEFAULT_RESOURCE_READY_TIMEOUT)
ovs = test_utils.OvsFlowsParser()
LOG.info(_LI("flows are: %s"),
LOG.info("flows are: %s",
ovs.get_ovs_flows(self.integration_bridge))
if len(self.policy.exceptions) > 0:

View File

@ -17,7 +17,6 @@ import string
from neutron.common import config as common_config
from oslo_log import log
from dragonflow._i18n import _LE
from dragonflow import conf as cfg
from dragonflow.db import api_nb
from dragonflow.tests import base
@ -38,8 +37,8 @@ class DFTestBase(base.BaseTestCase):
try:
self.neutron = clients.get_neutron_client_from_env()
except KeyError as e:
message = _LE('Cannot find environment variable %s. '
'Have you sourced openrc?')
message = ('Cannot find environment variable %s. '
'Have you sourced openrc?')
LOG.error(message, e.args[0])
self.fail(message % e.args[0])
else:

View File

@ -15,7 +15,6 @@ from neutron.agent.linux import ip_lib
from oslo_config import cfg
from oslo_log import log
from dragonflow._i18n import _LE
from dragonflow.cmd.eventlet import df_metadata_service
from dragonflow.conf import df_metadata_service as df_metadata_service_conf
from dragonflow.tests.fullstack import test_base
@ -57,7 +56,7 @@ class TestMetadataService(test_base.DFTestBase):
try:
utils.execute(cmd, run_as_root=True, check_exit_code=[0])
except Exception:
LOG.exception(_LE("Failed to delete metadata test port"))
LOG.exception("Failed to delete metadata test port")
ip = cfg.CONF.df_metadata.ip
cmd = ["ip", "rule", "del", "from", ip, "table",
@ -65,7 +64,6 @@ class TestMetadataService(test_base.DFTestBase):
try:
utils.execute(cmd, run_as_root=True)
except Exception:
LOG.exception(_LE(
"Failed to delete metadata test routing rule"
))
LOG.exception(
"Failed to delete metadata test routing rule")
super(TestMetadataService, self).tearDown()

View File

@ -17,7 +17,6 @@ from neutron.agent.common import utils as agent_utils
from neutronclient.common import exceptions
from oslo_log import log
from dragonflow._i18n import _LW
from dragonflow.tests.common import clients
from dragonflow.tests.common import constants as const
from dragonflow.tests.common import utils
@ -32,8 +31,8 @@ def find_first_network(nclient, params):
if networks_count == 0:
return None
if networks_count > 1:
message = _LW("More than one network (%(count)d) found matching: "
"%(args)s")
message = ("More than one network (%(count)d) found matching: "
"%(args)s")
LOG.warning(message, {'args': params, 'count': networks_count})
return networks[0]

View File

@ -15,7 +15,6 @@ import time
from neutron_lib import constants as n_const
from oslo_log import log
from dragonflow._i18n import _LI
from dragonflow.controller.common import constants as const
from dragonflow.tests.common import constants as test_const
from dragonflow.tests.common import utils
@ -245,7 +244,7 @@ class TestOVSFlowsForSecurityGroup(test_base.DFTestBase):
direction='egress'):
found_egress_conntrack_invalied_drop_flow = True
LOG.info(_LI("default flows are: %s"),
LOG.info("default flows are: %s",
ovs.get_ovs_flows(self.integration_bridge))
self.assertTrue(found_ingress_skip_flow)
@ -311,8 +310,8 @@ class TestOVSFlowsForSecurityGroup(test_base.DFTestBase):
ingress_associating_flow, egress_associating_flow = \
self._find_associating_flows(flows_after_change, unique_key_hex)
LOG.info(_LI("flows after associating a port and a security group"
" are: %s"),
LOG.info("flows after associating a port and a security group"
" are: %s",
ovs.get_ovs_flows(self.integration_bridge))
self.assertIsNotNone(ingress_associating_flow)
@ -383,7 +382,7 @@ class TestOVSFlowsForSecurityGroup(test_base.DFTestBase):
ovs = utils.OvsFlowsParser()
flows = ovs.dump(self.integration_bridge)
LOG.info(_LI("flows after adding rules are: %s"),
LOG.info("flows after adding rules are: %s",
ovs.get_ovs_flows(self.integration_bridge))
# Check if the rule flows were installed.

View File

@ -51,7 +51,8 @@ commands = sphinx-build -a -E -W -d doc/build/doctrees -b html doc/source doc/bu
# H404 multi line docstring should start with a summary
# H405 multi line docstring summary not separated with an empty line
# N530 Direct neutron imports not allowed
ignore = E126,H404,H405,N530
# N531 log message does not translate
ignore = E126,H404,H405,N530,N531
# H904: Delay string interpolations at logging calls
# H203: Use assertIs(Not)None to check for None
enable-extensions=H904,H203