Reduce logging overhead by cleaning up logging calls
Deferred formatting to the logging system: --------------------------------------------- In general logger.debug('xxx %s yyy %d zzz' % (a, b, c)) can be written as: logger.debug('xxx %s yyy %d %zzz', a, b, c) This is faster, as Python logging doesn't bother doing the format operation (which is slow) if the log message won't be logged. We measured that a significant amount of CPU time in the BGP service was attributable to these formatting operations. Removed unneeded calls to str() and repr() ------------------------------------------ since formatting operations %s and %r will make these calls when (and if) the formatting is performed. Signed-off-by: Jerry Cen <zhiweic@google.com> Acked-by: YAMAMOTO Takashi <yamamoto@valinux.co.jp> Signed-off-by: FUJITA Tomonori <fujita.tomonori@lab.ntt.co.jp>
This commit is contained in:
parent
1dd28e459e
commit
b636219007
@ -41,14 +41,14 @@ class BMPStation(app_manager.RyuApp):
|
||||
|
||||
def start(self):
|
||||
super(BMPStation, self).start()
|
||||
self.logger.debug("listening on %s:%s" % (self.server_host,
|
||||
self.server_port))
|
||||
self.logger.debug("listening on %s:%s", self.server_host,
|
||||
self.server_port)
|
||||
|
||||
return hub.spawn(StreamServer((self.server_host, self.server_port),
|
||||
self.loop).serve_forever)
|
||||
|
||||
def loop(self, sock, addr):
|
||||
self.logger.debug("BMP client connected, ip=%s, port=%s" % addr)
|
||||
self.logger.debug("BMP client connected, ip=%s, port=%s", addr)
|
||||
is_active = True
|
||||
buf = bytearray()
|
||||
required_len = bmp.BMPMessage._HDR_LEN
|
||||
@ -62,7 +62,7 @@ class BMPStation(app_manager.RyuApp):
|
||||
while len(buf) >= required_len:
|
||||
version, len_, _ = bmp.BMPMessage.parse_header(buf)
|
||||
if version != bmp.VERSION:
|
||||
self.logger.error("unsupported bmp version: %d" % version)
|
||||
self.logger.error("unsupported bmp version: %d", version)
|
||||
is_active = False
|
||||
break
|
||||
|
||||
@ -83,7 +83,7 @@ class BMPStation(app_manager.RyuApp):
|
||||
(e, self.failed_pkt_count))
|
||||
else:
|
||||
t = time.strftime("%Y %b %d %H:%M:%S", time.localtime())
|
||||
self.logger.debug("%s | %s | %s\n" % (t, addr[0], msg))
|
||||
self.logger.debug("%s | %s | %s\n", t, addr[0], msg)
|
||||
self.output_fd.write("%s | %s | %s\n\n" % (t, addr[0],
|
||||
msg))
|
||||
self.output_fd.flush()
|
||||
@ -91,5 +91,5 @@ class BMPStation(app_manager.RyuApp):
|
||||
|
||||
required_len = bmp.BMPMessage._HDR_LEN
|
||||
|
||||
self.logger.debug("BMP client disconnected, ip=%s, port=%s" % addr)
|
||||
self.logger.debug("BMP client disconnected, ip=%s, port=%s", addr)
|
||||
sock.close()
|
||||
|
@ -49,7 +49,7 @@ class OfctlService(app_manager.RyuApp):
|
||||
ev_cls = ofp_event.ofp_msg_to_ev_cls(msg_cls)
|
||||
self._observing_events.setdefault(ev_cls, 0)
|
||||
if self._observing_events[ev_cls] == 0:
|
||||
self.logger.debug('ofctl: start observing %s' % (ev_cls,))
|
||||
self.logger.debug('ofctl: start observing %s', ev_cls)
|
||||
self.register_handler(ev_cls, self._handle_reply)
|
||||
self.observe_event(ev_cls)
|
||||
self._observing_events[ev_cls] += 1
|
||||
@ -62,7 +62,7 @@ class OfctlService(app_manager.RyuApp):
|
||||
if self._observing_events[ev_cls] == 0:
|
||||
self.unregister_handler(ev_cls, self._handle_reply)
|
||||
self.unobserve_event(ev_cls)
|
||||
self.logger.debug('ofctl: stop observing %s' % (ev_cls,))
|
||||
self.logger.debug('ofctl: stop observing %s', ev_cls)
|
||||
|
||||
@staticmethod
|
||||
def _is_error(msg):
|
||||
@ -76,15 +76,15 @@ class OfctlService(app_manager.RyuApp):
|
||||
assert isinstance(id, numbers.Integral)
|
||||
old_info = self._switches.get(id, None)
|
||||
new_info = _SwitchInfo(datapath=datapath)
|
||||
self.logger.debug('add dpid %s datapath %s new_info %s old_info %s' %
|
||||
(id, datapath, new_info, old_info))
|
||||
self.logger.debug('add dpid %s datapath %s new_info %s old_info %s',
|
||||
id, datapath, new_info, old_info)
|
||||
self._switches[id] = new_info
|
||||
|
||||
@set_ev_cls(ofp_event.EventOFPStateChange, DEAD_DISPATCHER)
|
||||
def _handle_dead(self, ev):
|
||||
datapath = ev.datapath
|
||||
id = datapath.id
|
||||
self.logger.debug('del dpid %s datapath %s' % (id, datapath))
|
||||
self.logger.debug('del dpid %s datapath %s', id, datapath)
|
||||
if id is None:
|
||||
return
|
||||
try:
|
||||
@ -92,7 +92,7 @@ class OfctlService(app_manager.RyuApp):
|
||||
except KeyError:
|
||||
return
|
||||
if info.datapath is datapath:
|
||||
self.logger.debug('forget info %s' % (info,))
|
||||
self.logger.debug('forget info %s', info)
|
||||
self._switches.pop(id)
|
||||
|
||||
@set_ev_cls(event.GetDatapathRequest, MAIN_DISPATCHER)
|
||||
@ -103,7 +103,7 @@ class OfctlService(app_manager.RyuApp):
|
||||
datapath = self._switches[id].datapath
|
||||
except KeyError:
|
||||
datapath = None
|
||||
self.logger.debug('dpid %s -> datapath %s' % (id, datapath))
|
||||
self.logger.debug('dpid %s -> datapath %s', id, datapath)
|
||||
rep = event.Reply(result=datapath)
|
||||
self.reply_to_request(req, rep)
|
||||
|
||||
@ -138,12 +138,12 @@ class OfctlService(app_manager.RyuApp):
|
||||
try:
|
||||
si = self._switches[datapath.id]
|
||||
except KeyError:
|
||||
self.logger.error('unknown dpid %s' % (datapath.id,))
|
||||
self.logger.error('unknown dpid %s', datapath.id)
|
||||
return
|
||||
try:
|
||||
xid = si.barriers.pop(msg.xid)
|
||||
except KeyError:
|
||||
self.logger.error('unknown barrier xid %s' % (msg.xid,))
|
||||
self.logger.error('unknown barrier xid %s', msg.xid)
|
||||
return
|
||||
result = si.results.pop(xid)
|
||||
req = si.xids.pop(xid)
|
||||
@ -169,19 +169,18 @@ class OfctlService(app_manager.RyuApp):
|
||||
try:
|
||||
si = self._switches[datapath.id]
|
||||
except KeyError:
|
||||
self.logger.error('unknown dpid %s' % (datapath.id,))
|
||||
self.logger.error('unknown dpid %s', datapath.id)
|
||||
return
|
||||
try:
|
||||
req = si.xids[msg.xid]
|
||||
except KeyError:
|
||||
self.logger.error('unknown error xid %s' % (msg.xid,))
|
||||
self.logger.error('unknown error xid %s', msg.xid)
|
||||
return
|
||||
if ((not isinstance(ev, ofp_event.EventOFPErrorMsg)) and
|
||||
(req.reply_cls is None or not isinstance(ev.msg, req.reply_cls))):
|
||||
self.logger.error('unexpected reply %s for xid %s' %
|
||||
(ev, msg.xid,))
|
||||
self.logger.error('unexpected reply %s for xid %s', ev, msg.xid)
|
||||
return
|
||||
try:
|
||||
si.results[msg.xid].append(ev.msg)
|
||||
except KeyError:
|
||||
self.logger.error('unknown error xid %s' % (msg.xid,))
|
||||
self.logger.error('unknown error xid %s', msg.xid)
|
||||
|
@ -92,7 +92,7 @@ class WebSocketTopology(app_manager.RyuApp):
|
||||
try:
|
||||
getattr(rpc_server, func_name)(msg)
|
||||
except SocketError:
|
||||
self.logger.debug('WebSocket disconnected: %s' % rpc_client.ws)
|
||||
self.logger.debug('WebSocket disconnected: %s', rpc_client.ws)
|
||||
disconnected_clients.append(rpc_client)
|
||||
except InvalidReplyError as e:
|
||||
self.logger.error(e)
|
||||
|
@ -284,12 +284,12 @@ class RyuApp(object):
|
||||
if name in SERVICE_BRICKS:
|
||||
if isinstance(ev, EventRequestBase):
|
||||
ev.src = self.name
|
||||
LOG.debug("EVENT %s->%s %s" %
|
||||
(self.name, name, ev.__class__.__name__))
|
||||
LOG.debug("EVENT %s->%s %s",
|
||||
self.name, name, ev.__class__.__name__)
|
||||
SERVICE_BRICKS[name]._send_event(ev, state)
|
||||
else:
|
||||
LOG.debug("EVENT LOST %s->%s %s" %
|
||||
(self.name, name, ev.__class__.__name__))
|
||||
LOG.debug("EVENT LOST %s->%s %s",
|
||||
self.name, name, ev.__class__.__name__)
|
||||
|
||||
def send_event_to_observers(self, ev, state=None):
|
||||
"""
|
||||
@ -440,11 +440,11 @@ class AppManager(object):
|
||||
|
||||
@staticmethod
|
||||
def _report_brick(name, app):
|
||||
LOG.debug("BRICK %s" % name)
|
||||
LOG.debug("BRICK %s", name)
|
||||
for ev_cls, list_ in app.observers.items():
|
||||
LOG.debug(" PROVIDES %s TO %s" % (ev_cls.__name__, list_))
|
||||
LOG.debug(" PROVIDES %s TO %s", ev_cls.__name__, list_)
|
||||
for ev_cls in app.event_handlers.keys():
|
||||
LOG.debug(" CONSUMES %s" % (ev_cls.__name__,))
|
||||
LOG.debug(" CONSUMES %s", ev_cls.__name__)
|
||||
|
||||
@staticmethod
|
||||
def report_bricks():
|
||||
|
@ -186,7 +186,7 @@ class RPCReplyListener(SessionListener): # internal use
|
||||
with self._lock:
|
||||
try:
|
||||
rpc = self._id2rpc[id] # the corresponding rpc
|
||||
logger.debug("Delivering to %r" % rpc)
|
||||
logger.debug("Delivering to %r", rpc)
|
||||
rpc.deliver_reply(raw)
|
||||
except KeyError:
|
||||
raise OperationError("Unknown 'message-id': %s", id)
|
||||
@ -269,14 +269,14 @@ class RPC(object):
|
||||
|
||||
*op* is the operation to be requested as an :class:`~xml.etree.ElementTree.Element`
|
||||
"""
|
||||
logger.info('Requesting %r' % self.__class__.__name__)
|
||||
logger.info('Requesting %r', self.__class__.__name__)
|
||||
req = self._wrap(op)
|
||||
self._session.send(req)
|
||||
if self._async:
|
||||
logger.debug('Async request, returning %r', self)
|
||||
return self
|
||||
else:
|
||||
logger.debug('Sync request, will wait for timeout=%r' % self._timeout)
|
||||
logger.debug('Sync request, will wait for timeout=%r', self._timeout)
|
||||
self._event.wait(self._timeout)
|
||||
if self._event.isSet():
|
||||
if self._error:
|
||||
|
@ -38,30 +38,30 @@ class Session(Thread):
|
||||
self._server_capabilities = None # yet
|
||||
self._id = None # session-id
|
||||
self._connected = False # to be set/cleared by subclass implementation
|
||||
logger.debug('%r created: client_capabilities=%r' %
|
||||
(self, self._client_capabilities))
|
||||
logger.debug('%r created: client_capabilities=%r',
|
||||
self, self._client_capabilities)
|
||||
|
||||
def _dispatch_message(self, raw):
|
||||
try:
|
||||
root = parse_root(raw)
|
||||
except Exception as e:
|
||||
logger.error('error parsing dispatch message: %s' % e)
|
||||
logger.error('error parsing dispatch message: %s', e)
|
||||
return
|
||||
with self._lock:
|
||||
listeners = list(self._listeners)
|
||||
for l in listeners:
|
||||
logger.debug('dispatching message to %r: %s' % (l, raw))
|
||||
logger.debug('dispatching message to %r: %s', l, raw)
|
||||
l.callback(root, raw) # no try-except; fail loudly if you must!
|
||||
|
||||
def _dispatch_error(self, err):
|
||||
with self._lock:
|
||||
listeners = list(self._listeners)
|
||||
for l in listeners:
|
||||
logger.debug('dispatching error to %r' % l)
|
||||
logger.debug('dispatching error to %r', l)
|
||||
try: # here we can be more considerate with catching exceptions
|
||||
l.errback(err)
|
||||
except Exception as e:
|
||||
logger.warning('error dispatching to %r: %r' % (l, e))
|
||||
logger.warning('error dispatching to %r: %r', l, e)
|
||||
|
||||
def _post_connect(self):
|
||||
"Greeting stuff"
|
||||
@ -88,8 +88,8 @@ class Session(Thread):
|
||||
raise error[0]
|
||||
#if ':base:1.0' not in self.server_capabilities:
|
||||
# raise MissingCapabilityError(':base:1.0')
|
||||
logger.info('initialized: session-id=%s | server_capabilities=%s' %
|
||||
(self._id, self._server_capabilities))
|
||||
logger.info('initialized: session-id=%s | server_capabilities=%s',
|
||||
self._id, self._server_capabilities)
|
||||
|
||||
def add_listener(self, listener):
|
||||
"""Register a listener that will be notified of incoming messages and
|
||||
@ -97,7 +97,7 @@ class Session(Thread):
|
||||
|
||||
:type listener: :class:`SessionListener`
|
||||
"""
|
||||
logger.debug('installing listener %r' % listener)
|
||||
logger.debug('installing listener %r', listener)
|
||||
if not isinstance(listener, SessionListener):
|
||||
raise SessionError("Listener must be a SessionListener type")
|
||||
with self._lock:
|
||||
@ -109,7 +109,7 @@ class Session(Thread):
|
||||
|
||||
:type listener: :class:`SessionListener`
|
||||
"""
|
||||
logger.debug('discarding listener %r' % listener)
|
||||
logger.debug('discarding listener %r', listener)
|
||||
with self._lock:
|
||||
self._listeners.discard(listener)
|
||||
|
||||
@ -134,7 +134,7 @@ class Session(Thread):
|
||||
"""Send the supplied *message* (xml string) to NETCONF server."""
|
||||
if not self.connected:
|
||||
raise TransportError('Not connected to NETCONF server')
|
||||
logger.debug('queueing %s' % message)
|
||||
logger.debug('queueing %s', message)
|
||||
self._q.put(message)
|
||||
|
||||
### Properties
|
||||
|
@ -218,8 +218,8 @@ class SSHSession(Session):
|
||||
for cls in (paramiko.RSAKey, paramiko.DSSKey):
|
||||
try:
|
||||
key = cls.from_private_key_file(key_filename, password)
|
||||
logger.debug("Trying key %s from %s" %
|
||||
(hexlify(key.get_fingerprint()), key_filename))
|
||||
logger.debug("Trying key %s from %s",
|
||||
hexlify(key.get_fingerprint()), key_filename)
|
||||
self._transport.auth_publickey(username, key)
|
||||
return
|
||||
except Exception as e:
|
||||
@ -229,7 +229,7 @@ class SSHSession(Session):
|
||||
if allow_agent:
|
||||
for key in paramiko.Agent().get_keys():
|
||||
try:
|
||||
logger.debug("Trying SSH agent key %s" %
|
||||
logger.debug("Trying SSH agent key %s",
|
||||
hexlify(key.get_fingerprint()))
|
||||
self._transport.auth_publickey(username, key)
|
||||
return
|
||||
@ -256,8 +256,8 @@ class SSHSession(Session):
|
||||
for cls, filename in keyfiles:
|
||||
try:
|
||||
key = cls.from_private_key_file(filename, password)
|
||||
logger.debug("Trying discovered key %s in %s" %
|
||||
(hexlify(key.get_fingerprint()), filename))
|
||||
logger.debug("Trying discovered key %s in %s",
|
||||
hexlify(key.get_fingerprint()), filename)
|
||||
self._transport.auth_publickey(username, key)
|
||||
return
|
||||
except Exception as e:
|
||||
|
@ -78,7 +78,7 @@ def rpc_server(socket, protocol, dispatcher):
|
||||
log.exception(e)
|
||||
continue
|
||||
|
||||
log.debug('Received message %s from %r' % (message[-1], message[0]))
|
||||
log.debug('Received message %s from %r', message[-1], message[0])
|
||||
|
||||
# assuming protocol is threadsafe and dispatcher is theadsafe, as long
|
||||
# as its immutable
|
||||
@ -91,11 +91,11 @@ def rpc_server(socket, protocol, dispatcher):
|
||||
response = e.error_respond()
|
||||
else:
|
||||
response = dispatcher.dispatch(request)
|
||||
log.debug('Response okay: %r' % response)
|
||||
log.debug('Response okay: %r', response)
|
||||
|
||||
# send reply
|
||||
message[-1] = response.serialize()
|
||||
log.debug('Replying %s to %r' % (message[-1], message[0]))
|
||||
log.debug('Replying %s to %r', message[-1], message[0])
|
||||
socket.send_multipart(message)
|
||||
|
||||
gevent.spawn(handle_client, message)
|
||||
|
@ -117,7 +117,7 @@ def to_actions(dp, acts):
|
||||
parser.OFPInstructionWriteMetadata(
|
||||
metadata, metadata_mask))
|
||||
else:
|
||||
LOG.error('Unknown action type: %s' % action_type)
|
||||
LOG.error('Unknown action type: %s', action_type)
|
||||
|
||||
inst.append(parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
|
||||
actions))
|
||||
|
@ -125,7 +125,7 @@ def to_actions(dp, acts):
|
||||
meter_id = int(a.get('meter_id'))
|
||||
inst.append(parser.OFPInstructionMeter(meter_id))
|
||||
else:
|
||||
LOG.error('Unknown action type: %s' % action_type)
|
||||
LOG.error('Unknown action type: %s', action_type)
|
||||
|
||||
inst.append(parser.OFPInstructionActions(ofp.OFPIT_APPLY_ACTIONS,
|
||||
actions))
|
||||
|
@ -29,7 +29,7 @@ def packet_in_filter(cls, args=None, logging=False):
|
||||
pkt = packet.Packet(ev.msg.data)
|
||||
if not packet_in_handler.pkt_in_filter.filter(pkt):
|
||||
if logging:
|
||||
LOG.debug('The packet is discarded by %s: %s' % (cls, pkt))
|
||||
LOG.debug('The packet is discarded by %s: %s', cls, pkt)
|
||||
return
|
||||
return packet_in_handler(self, ev)
|
||||
pkt_in_filter = cls(args)
|
||||
|
@ -82,8 +82,8 @@ class sFlowV5(object):
|
||||
pack_str = cls._PACK_STR_IPV6
|
||||
min_len = cls._MIN_LEN_V6
|
||||
else:
|
||||
LOG.info("Unknown address_type. sFlowV5.address_type=%d"
|
||||
% address_type)
|
||||
LOG.info("Unknown address_type. sFlowV5.address_type=%d",
|
||||
address_type)
|
||||
return None
|
||||
|
||||
(version, address_type, agent_address, sub_agent_id, sequence_number,
|
||||
@ -137,8 +137,8 @@ class sFlowV5Sample(object):
|
||||
# TODO:
|
||||
# sample_format == 3 : Expanded Flow Sample
|
||||
# sample_format == 4 : Expanded Counter Sample
|
||||
LOG.info("Unknown format. sFlowV5Sample.sample_format=%d"
|
||||
% sample_format)
|
||||
LOG.info("Unknown format. sFlowV5Sample.sample_format=%d",
|
||||
sample_format)
|
||||
pack_str = '!%sc' % sample_length
|
||||
sample = struct.unpack_from(pack_str, buf, offset)
|
||||
|
||||
@ -240,8 +240,8 @@ class sFlowV5FlowRecord(object):
|
||||
# flow_data_format == 1010 : Extended MPLS FEC
|
||||
# flow_data_format == 1011 : Extended MPLS LVP FEC
|
||||
# flow_data_format == 1012 : Extended VLAN tunnel
|
||||
LOG.info("Unknown format. sFlowV5FlowRecord.flow_data_format=%d"
|
||||
% flow_data_format)
|
||||
LOG.info("Unknown format. sFlowV5FlowRecord.flow_data_format=%d",
|
||||
flow_data_format)
|
||||
pack_str = '!%sc' % flow_data_length
|
||||
flow_data = struct.unpack_from(pack_str, buf, offset)
|
||||
|
||||
|
@ -88,7 +88,7 @@ def update_neighbor(neigh_ip_address, changes):
|
||||
def _update_med(neigh_ip_address, value):
|
||||
neigh_conf = _get_neighbor_conf(neigh_ip_address)
|
||||
neigh_conf.multi_exit_disc = value
|
||||
LOG.info('MED value for neigh: %s updated to %s' % (neigh_conf, value))
|
||||
LOG.info('MED value for neigh: %s updated to %s', neigh_conf, value)
|
||||
return True
|
||||
|
||||
|
||||
|
@ -171,7 +171,7 @@ class RyuBGPSpeaker(RyuApp):
|
||||
call('core.start', waiter=waiter, **common_settings)
|
||||
waiter.wait()
|
||||
|
||||
LOG.debug('Core started %s' % CORE_MANAGER.started)
|
||||
LOG.debug('Core started %s', CORE_MANAGER.started)
|
||||
# Core manager started add configured neighbor and vrfs
|
||||
if CORE_MANAGER.started:
|
||||
# Add neighbors.
|
||||
@ -194,7 +194,7 @@ class RyuBGPSpeaker(RyuApp):
|
||||
try:
|
||||
bgp_neighbor[neighbors.IP_ADDRESS] = ip
|
||||
call('neighbor.create', **bgp_neighbor)
|
||||
LOG.debug('Added neighbor %s' % ip)
|
||||
LOG.debug('Added neighbor %s', ip)
|
||||
except RuntimeConfigError as re:
|
||||
LOG.error(re)
|
||||
LOG.error(traceback.format_exc())
|
||||
@ -211,7 +211,7 @@ class RyuBGPSpeaker(RyuApp):
|
||||
try:
|
||||
vrf[vrfs.VRF_NAME] = vrfname
|
||||
call('vrf.create', **vrf)
|
||||
LOG.debug('Added vrf %s' % str(vrf))
|
||||
LOG.debug('Added vrf %s', vrf)
|
||||
except RuntimeConfigError as e:
|
||||
LOG.error(e)
|
||||
continue
|
||||
@ -226,7 +226,7 @@ class RyuBGPSpeaker(RyuApp):
|
||||
for prefix in networks:
|
||||
try:
|
||||
call('network.add', prefix=prefix)
|
||||
LOG.debug('Added network %s' % str(prefix))
|
||||
LOG.debug('Added network %s', prefix)
|
||||
except RuntimeConfigError as e:
|
||||
LOG.error(e)
|
||||
continue
|
||||
|
@ -262,8 +262,7 @@ class Activity(object):
|
||||
# changed size during iteration
|
||||
child_activities = self._child_activity_map.items()
|
||||
for child_name, child_activity in child_activities:
|
||||
LOG.debug('%s: Stopping child activity %s ' %
|
||||
(self.name, child_name))
|
||||
LOG.debug('%s: Stopping child activity %s ', self.name, child_name)
|
||||
if child_activity.started:
|
||||
child_activity.stop()
|
||||
|
||||
@ -273,8 +272,8 @@ class Activity(object):
|
||||
child_threads = self._child_thread_map.items()
|
||||
for thread_name, thread in child_threads:
|
||||
if not name or thread_name is name:
|
||||
LOG.debug('%s: Stopping child thread %s' %
|
||||
(self.name, thread_name))
|
||||
LOG.debug('%s: Stopping child thread %s',
|
||||
self.name, thread_name)
|
||||
thread.kill()
|
||||
del self._child_thread_map[thread_name]
|
||||
|
||||
@ -283,14 +282,13 @@ class Activity(object):
|
||||
"""
|
||||
asso_sockets = self._asso_socket_map.items()
|
||||
for sock_name, sock in asso_sockets:
|
||||
LOG.debug('%s: Closing socket %s - %s' %
|
||||
(self.name, sock_name, sock))
|
||||
LOG.debug('%s: Closing socket %s - %s', self.name, sock_name, sock)
|
||||
sock.close()
|
||||
|
||||
def _stop_timers(self):
|
||||
timers = self._timers.items()
|
||||
for timer_name, timer in timers:
|
||||
LOG.debug('%s: Stopping timer %s' % (self.name, timer_name))
|
||||
LOG.debug('%s: Stopping timer %s', self.name, timer_name)
|
||||
timer.stop()
|
||||
|
||||
def stop(self):
|
||||
@ -304,7 +302,7 @@ class Activity(object):
|
||||
raise ActivityException(desc='Cannot call stop when activity is '
|
||||
'not started or has been stopped already.')
|
||||
|
||||
LOG.debug('Stopping activity %s.' % (self.name))
|
||||
LOG.debug('Stopping activity %s.', self.name)
|
||||
self._stop_timers()
|
||||
self._stop_child_activities()
|
||||
self._stop_child_threads()
|
||||
@ -316,7 +314,7 @@ class Activity(object):
|
||||
self._child_activity_map = weakref.WeakValueDictionary()
|
||||
self._child_thread_map = weakref.WeakValueDictionary()
|
||||
self._timers = weakref.WeakValueDictionary()
|
||||
LOG.debug('Stopping activity %s finished.' % self.name)
|
||||
LOG.debug('Stopping activity %s finished.', self.name)
|
||||
|
||||
def _canonicalize_ip(self, ip):
|
||||
addr = netaddr.IPAddress(ip)
|
||||
@ -396,8 +394,7 @@ class Activity(object):
|
||||
successful, uses the socket instance to start `client_factory`.
|
||||
The socket is bound to `bind_address` if specified.
|
||||
"""
|
||||
LOG.debug('Connect TCP called for %s:%s' % (peer_addr[0],
|
||||
peer_addr[1]))
|
||||
LOG.debug('Connect TCP called for %s:%s', peer_addr[0], peer_addr[1])
|
||||
if netaddr.valid_ipv4(peer_addr[0]):
|
||||
family = socket.AF_INET
|
||||
else:
|
||||
|
@ -72,9 +72,9 @@ class BMPClient(Activity):
|
||||
self._handle_bmp_session)
|
||||
except socket.error:
|
||||
self._connect_retry_event.set()
|
||||
LOG.info('Will try to reconnect to %s after %s secs: %s' %
|
||||
(self.server_address, self._connect_retry_time,
|
||||
self._connect_retry_event.is_set()))
|
||||
LOG.info('Will try to reconnect to %s after %s secs: %s',
|
||||
self.server_address, self._connect_retry_time,
|
||||
self._connect_retry_event.is_set())
|
||||
|
||||
self.pause(self._connect_retry_time)
|
||||
|
||||
|
@ -269,8 +269,7 @@ class CoreService(Factory, Activity):
|
||||
# Update to new RT filters
|
||||
self._peer_manager.set_peer_to_rtfilter_map(new_peer_to_rtfilter_map)
|
||||
self._rt_mgr.peer_to_rtfilter_map = new_peer_to_rtfilter_map
|
||||
LOG.debug('Updated RT filters: %s' %
|
||||
(str(self._rt_mgr.peer_to_rtfilter_map)))
|
||||
LOG.debug('Updated RT filters: %s', self._rt_mgr.peer_to_rtfilter_map)
|
||||
# Update interested RTs i.e. RTs on the path that will be installed
|
||||
# into global tables
|
||||
self._rt_mgr.update_interested_rts()
|
||||
@ -289,8 +288,8 @@ class CoreService(Factory, Activity):
|
||||
self._spawn('rt_filter_chg_%s' % peer,
|
||||
self._rt_mgr.on_rt_filter_chg_sync_peer,
|
||||
peer, new_rts, old_rts, table)
|
||||
LOG.debug('RT Filter change handler launched for route_family %s'
|
||||
% table.route_family)
|
||||
LOG.debug('RT Filter change handler launched for route_family %s',
|
||||
table.route_family)
|
||||
|
||||
def _compute_rtfilter_map(self):
|
||||
"""Returns neighbor's RT filter (permit/allow filter based on RT).
|
||||
@ -471,8 +470,7 @@ class CoreService(Factory, Activity):
|
||||
if (host, port) in self.bmpclients:
|
||||
bmpclient = self.bmpclients[(host, port)]
|
||||
if bmpclient.started:
|
||||
LOG.warn("bmpclient is already running for %s:%s" % (host,
|
||||
port))
|
||||
LOG.warn("bmpclient is already running for %s:%s", host, port)
|
||||
return False
|
||||
bmpclient = BMPClient(self, host, port)
|
||||
self.bmpclients[(host, port)] = bmpclient
|
||||
@ -481,7 +479,7 @@ class CoreService(Factory, Activity):
|
||||
|
||||
def stop_bmp(self, host, port):
|
||||
if (host, port) not in self.bmpclients:
|
||||
LOG.warn("no bmpclient is running for %s:%s" % (host, port))
|
||||
LOG.warn("no bmpclient is running for %s:%s", host, port)
|
||||
return False
|
||||
|
||||
bmpclient = self.bmpclients[(host, port)]
|
||||
|
@ -66,8 +66,7 @@ class ConfigurationManager(CommonConfListener, VrfsConfListener,
|
||||
# import new rts
|
||||
if re_import:
|
||||
LOG.debug(
|
||||
"RE-importing prefixes from VPN table to VRF %s"
|
||||
% repr(vrf_table)
|
||||
"RE-importing prefixes from VPN table to VRF %r", vrf_table
|
||||
)
|
||||
self._table_manager.import_all_vpn_paths_to_vrf(vrf_table)
|
||||
else:
|
||||
|
@ -62,8 +62,8 @@ class PeerManager(object):
|
||||
|
||||
Cleans up the paths in global tables that was received from this peer.
|
||||
"""
|
||||
LOG.debug('Cleaning obsolete paths whose source/version: %s/%s' %
|
||||
(peer.ip_address, peer.version_num))
|
||||
LOG.debug('Cleaning obsolete paths whose source/version: %s/%s',
|
||||
peer.ip_address, peer.version_num)
|
||||
# Launch clean-up for each global tables.
|
||||
self._table_manager.clean_stale_routes(peer)
|
||||
|
||||
@ -158,8 +158,8 @@ class PeerManager(object):
|
||||
requests are ignored. Raises appropriate error in other cases. If
|
||||
`peer_ip` is equal to 'all' makes refresh request to all valid peers.
|
||||
"""
|
||||
LOG.debug('Route refresh requested for peer %s and route families %s'
|
||||
% (peer_ip, route_families))
|
||||
LOG.debug('Route refresh requested for peer %s and route families %s',
|
||||
peer_ip, route_families)
|
||||
if not SUPPORTED_GLOBAL_RF.intersection(route_families):
|
||||
raise ValueError('Given route family(s) % is not supported.' %
|
||||
route_families)
|
||||
|
@ -91,7 +91,7 @@ class TableCoreManager(object):
|
||||
vpn_clone = best_path.clone_to_vpn(vrf_conf.route_dist,
|
||||
for_withdrawal=True)
|
||||
self.learn_path(vpn_clone)
|
||||
LOG.debug('VRF with RD %s marked for removal' % vrf_conf.route_dist)
|
||||
LOG.debug('VRF with RD %s marked for removal', vrf_conf.route_dist)
|
||||
|
||||
def import_all_vpn_paths_to_vrf(self, vrf_table, import_rts=None):
|
||||
"""Imports Vpnv4/6 paths from Global/VPN table into given Vrfv4/6
|
||||
@ -302,8 +302,8 @@ class TableCoreManager(object):
|
||||
path.nexthop,
|
||||
gen_lbl=True
|
||||
)
|
||||
LOG.debug('Re-installed NC paths with current policy for table %s.' %
|
||||
str(vrf_table))
|
||||
LOG.debug('Re-installed NC paths with current policy for table %s.',
|
||||
vrf_table)
|
||||
|
||||
def _remove_links_to_vrf_table(self, vrf_table):
|
||||
"""Removes any links to given `vrf_table`."""
|
||||
@ -361,8 +361,8 @@ class TableCoreManager(object):
|
||||
self._tables[table_id] = vrf_table
|
||||
|
||||
assert vrf_table is not None
|
||||
LOG.debug('Added new VrfTable with rd: %s and add_fmly: %s' %
|
||||
(vrf_conf.route_dist, route_family))
|
||||
LOG.debug('Added new VrfTable with rd: %s and add_fmly: %s',
|
||||
vrf_conf.route_dist, route_family)
|
||||
|
||||
import_rts = vrf_conf.import_rts
|
||||
# If VRF is configured with import RT, we put this table
|
||||
@ -381,8 +381,8 @@ class TableCoreManager(object):
|
||||
table_set = set()
|
||||
self._tables_for_rt[rt_rf_id] = table_set
|
||||
table_set.add(vrf_table)
|
||||
LOG.debug('Added VrfTable %s to import RT table list: %s' %
|
||||
(vrf_table, rt))
|
||||
LOG.debug('Added VrfTable %s to import RT table list: %s',
|
||||
vrf_table, rt)
|
||||
|
||||
def _clean_global_uninteresting_paths(self):
|
||||
"""Marks paths that do not have any route targets of interest
|
||||
@ -398,7 +398,7 @@ class TableCoreManager(object):
|
||||
"""
|
||||
uninteresting_dest_count = 0
|
||||
interested_rts = self._rt_mgr.global_interested_rts
|
||||
LOG.debug('Cleaning uninteresting paths. Global interested RTs %s' %
|
||||
LOG.debug('Cleaning uninteresting paths. Global interested RTs %s',
|
||||
interested_rts)
|
||||
for route_family in [RF_IPv4_VPN, RF_IPv6_VPN, RF_RTC_UC]:
|
||||
# TODO(PH): We currently do not install RT_NLRI paths based on
|
||||
@ -409,8 +409,8 @@ class TableCoreManager(object):
|
||||
uninteresting_dest_count += \
|
||||
table.clean_uninteresting_paths(interested_rts)
|
||||
|
||||
LOG.debug('Found %s number of destinations had uninteresting paths.' %
|
||||
str(uninteresting_dest_count))
|
||||
LOG.debug('Found %s number of destinations had uninteresting paths.',
|
||||
uninteresting_dest_count)
|
||||
|
||||
def import_single_vpn_path_to_all_vrfs(self, vpn_path, path_rts=None):
|
||||
"""Imports *vpnv4_path* to qualifying VRF tables.
|
||||
@ -420,11 +420,11 @@ class TableCoreManager(object):
|
||||
"""
|
||||
assert (vpn_path.route_family in
|
||||
(Vpnv4Path.ROUTE_FAMILY, Vpnv6Path.ROUTE_FAMILY))
|
||||
LOG.debug('Importing path %s to qualifying VRFs' % vpn_path)
|
||||
LOG.debug('Importing path %s to qualifying VRFs', vpn_path)
|
||||
|
||||
# If this path has no RTs we are done.
|
||||
if not path_rts:
|
||||
LOG.info('Encountered a path with no RTs: %s' % vpn_path)
|
||||
LOG.info('Encountered a path with no RTs: %s', vpn_path)
|
||||
return
|
||||
|
||||
# We match path RTs with all VRFs that are interested in them.
|
||||
@ -454,7 +454,7 @@ class TableCoreManager(object):
|
||||
dest_changed(update_vrf_dest)
|
||||
else:
|
||||
# If we do not have any VRF with import RT that match with path RT
|
||||
LOG.debug('No VRF table found that imports RTs: %s' % path_rts)
|
||||
LOG.debug('No VRF table found that imports RTs: %s', path_rts)
|
||||
|
||||
def add_to_vrf(self, route_dist, prefix, next_hop, route_family):
|
||||
"""Adds `prefix` to VRF identified by `route_dist` with given
|
||||
|
@ -123,14 +123,14 @@ class Table(object):
|
||||
Old paths have source version number that is less than current peer
|
||||
version number. Also removes sent paths to this peer.
|
||||
"""
|
||||
LOG.debug('Cleaning paths from table %s for peer %s' % (self, peer))
|
||||
LOG.debug('Cleaning paths from table %s for peer %s', self, peer)
|
||||
for dest in self.itervalues():
|
||||
# Remove paths learned from this source
|
||||
paths_deleted = dest.remove_old_paths_from_source(peer)
|
||||
# Remove sent paths to this peer
|
||||
had_sent = dest.remove_sent_route(peer)
|
||||
if had_sent:
|
||||
LOG.debug('Removed sent route %s for %s' % (dest.nlri, peer))
|
||||
LOG.debug('Removed sent route %s for %s', dest.nlri, peer)
|
||||
# If any paths are removed we enqueue respective destination for
|
||||
# future processing.
|
||||
if paths_deleted:
|
||||
@ -143,8 +143,8 @@ class Table(object):
|
||||
- `interested_rts`: (set) of RT that are of interest/that need to
|
||||
be preserved
|
||||
"""
|
||||
LOG.debug('Cleaning table %s for given interested RTs %s' %
|
||||
(self, interested_rts))
|
||||
LOG.debug('Cleaning table %s for given interested RTs %s',
|
||||
self, interested_rts)
|
||||
uninteresting_dest_count = 0
|
||||
for dest in self.itervalues():
|
||||
added_withdraw = \
|
||||
@ -233,8 +233,8 @@ class NonVrfPathProcessingMixin(object):
|
||||
withdraw_clone = sent_path.clone(for_withdrawal=True)
|
||||
outgoing_route = OutgoingRoute(withdraw_clone)
|
||||
sent_route.sent_peer.enque_outgoing_msg(outgoing_route)
|
||||
LOG.debug('Sending withdrawal to %s for %s' %
|
||||
(sent_route.sent_peer, outgoing_route))
|
||||
LOG.debug('Sending withdrawal to %s for %s',
|
||||
sent_route.sent_peer, outgoing_route)
|
||||
|
||||
# Have to clear sent_route list for this destination as
|
||||
# best path is removed.
|
||||
@ -243,7 +243,7 @@ class NonVrfPathProcessingMixin(object):
|
||||
def _new_best_path(self, new_best_path):
|
||||
old_best_path = self._best_path
|
||||
self._best_path = new_best_path
|
||||
LOG.debug('New best path selected for destination %s' % (self))
|
||||
LOG.debug('New best path selected for destination %s', self)
|
||||
|
||||
# If old best path was withdrawn
|
||||
if (old_best_path and old_best_path not in self._known_path_list
|
||||
@ -264,8 +264,8 @@ class NonVrfPathProcessingMixin(object):
|
||||
withdraw_clone = sent_path.clone(for_withdrawal=True)
|
||||
outgoing_route = OutgoingRoute(withdraw_clone)
|
||||
sent_route.sent_peer.enque_outgoing_msg(outgoing_route)
|
||||
LOG.debug('Sending withdrawal to %s for %s' %
|
||||
(sent_route.sent_peer, outgoing_route))
|
||||
LOG.debug('Sending withdrawal to %s for %s',
|
||||
sent_route.sent_peer, outgoing_route)
|
||||
self._sent_routes = {}
|
||||
|
||||
|
||||
@ -509,7 +509,7 @@ class Destination(object):
|
||||
stopped by the same policies.
|
||||
"""
|
||||
|
||||
LOG.debug('Removing %s withdrawals' % len(self._withdraw_list))
|
||||
LOG.debug('Removing %s withdrawals', len(self._withdraw_list))
|
||||
|
||||
# If we have no withdrawals, we have nothing to do.
|
||||
if not self._withdraw_list:
|
||||
|
@ -64,7 +64,7 @@ class VrfTable(Table):
|
||||
|
||||
def init_import_maps(self, import_maps):
|
||||
LOG.debug(
|
||||
"Initializing import maps (%s) for %s" % (import_maps, repr(self))
|
||||
"Initializing import maps (%s) for %r", import_maps, self
|
||||
)
|
||||
del self._import_maps[:]
|
||||
importmap_manager = self._core_service.importmap_manager
|
||||
@ -306,7 +306,7 @@ class VrfDest(Destination):
|
||||
tm.learn_path(gpath)
|
||||
|
||||
def _new_best_path(self, best_path):
|
||||
LOG.debug('New best path selected for destination %s' % (self))
|
||||
LOG.debug('New best path selected for destination %s', self)
|
||||
|
||||
old_best_path = self._best_path
|
||||
assert (best_path != old_best_path)
|
||||
@ -335,8 +335,8 @@ class VrfDest(Destination):
|
||||
gpath = best_path.clone_to_vpn(self._route_dist)
|
||||
tm = self._core_service.table_manager
|
||||
tm.learn_path(gpath)
|
||||
LOG.debug('VRF table %s has new best path: %s' %
|
||||
(self._route_dist, self.best_path))
|
||||
LOG.debug('VRF table %s has new best path: %s',
|
||||
self._route_dist, self.best_path)
|
||||
|
||||
def _remove_withdrawals(self):
|
||||
"""Removes withdrawn paths.
|
||||
@ -348,7 +348,7 @@ class VrfDest(Destination):
|
||||
stopped by the same policies.
|
||||
"""
|
||||
|
||||
LOG.debug('Removing %s withdrawals' % len(self._withdraw_list))
|
||||
LOG.debug('Removing %s withdrawals', len(self._withdraw_list))
|
||||
|
||||
# If we have not withdrawals, we have nothing to do.
|
||||
if not self._withdraw_list:
|
||||
|
@ -170,7 +170,7 @@ class RpcSession(Activity):
|
||||
# Wait for request/response/notification from peer.
|
||||
msg_buff = self._recv()
|
||||
if len(msg_buff) == 0:
|
||||
LOG.info('Peer %r disconnected.' % self._socket)
|
||||
LOG.info('Peer %r disconnected.', self._socket)
|
||||
break
|
||||
messages = self.feed_and_get_messages(msg_buff)
|
||||
for msg in messages:
|
||||
@ -186,7 +186,7 @@ class RpcSession(Activity):
|
||||
elif msg[0] == RPC_MSG_NOTIFY:
|
||||
_handle_notification(msg)
|
||||
else:
|
||||
LOG.error('Invalid message type: %r' % msg)
|
||||
LOG.error('Invalid message type: %r', msg)
|
||||
self.pause(0)
|
||||
|
||||
def _process_outgoing_msg(self, sink_iter):
|
||||
@ -362,15 +362,15 @@ def _handle_response(response):
|
||||
|
||||
|
||||
def _handle_notification(notification):
|
||||
LOG.debug('Notification from NetworkController<<: %s %s' %
|
||||
(notification[RPC_IDX_NTF_SYM], notification[RPC_IDX_NTF_PARAM]))
|
||||
LOG.debug('Notification from NetworkController<<: %s %s',
|
||||
notification[RPC_IDX_NTF_SYM], notification[RPC_IDX_NTF_PARAM])
|
||||
operation, params = notification[1], notification[2]
|
||||
return api.base.call(operation, **params[0])
|
||||
|
||||
|
||||
def _handle_request(request):
|
||||
LOG.debug('Request from NetworkController<<: %s %s' %
|
||||
(request[RPC_IDX_REQ_SYM], request[RPC_IDX_REQ_PARAM]))
|
||||
LOG.debug('Request from NetworkController<<: %s %s',
|
||||
request[RPC_IDX_REQ_SYM], request[RPC_IDX_REQ_PARAM])
|
||||
operation, params = request[2], request[3]
|
||||
kwargs = {}
|
||||
if len(params) > 0:
|
||||
|
@ -115,7 +115,7 @@ Hello, this is Ryu BGP speaker (version %s).
|
||||
|
||||
def check_channel_pty_request(self, chan, term, width, height,
|
||||
pixelwidth, pixelheight, modes):
|
||||
LOG.debug("termtype: %s" % (term, ))
|
||||
LOG.debug("termtype: %s", term)
|
||||
self.TERM = term
|
||||
return True
|
||||
|
||||
@ -161,7 +161,7 @@ Hello, this is Ryu BGP speaker (version %s).
|
||||
elif c == 'D':
|
||||
self._movcursor(self.curpos - 1)
|
||||
else:
|
||||
LOG.error("unknown CSI sequence. do nothing: %c" % c)
|
||||
LOG.error("unknown CSI sequence. do nothing: %c", c)
|
||||
|
||||
def _handle_esc_seq(self):
|
||||
c = self.chan.recv(1)
|
||||
@ -317,7 +317,7 @@ Hello, this is Ryu BGP speaker (version %s).
|
||||
|
||||
def _execute_cmd(self, cmds):
|
||||
result, cmd = self.root(cmds)
|
||||
LOG.debug("result: %s" % str(result))
|
||||
LOG.debug("result: %s", result)
|
||||
self.prompted = False
|
||||
self._startnewline()
|
||||
output = result.value.replace('\n', '\n\r').rstrip()
|
||||
@ -351,7 +351,7 @@ Hello, this is Ryu BGP speaker (version %s).
|
||||
if len(c) == 0:
|
||||
break
|
||||
|
||||
LOG.debug("ord:%d, hex:0x%x" % (ord(c), ord(c)))
|
||||
LOG.debug("ord:%d, hex:0x%x", ord(c), ord(c))
|
||||
self.promptlen = len(self.PROMPT) if self.prompted else 0
|
||||
if c == '?':
|
||||
cmpleter = self.root
|
||||
@ -448,9 +448,9 @@ Hello, this is Ryu BGP speaker (version %s).
|
||||
LOG.debug("blank buf. just start a new line.")
|
||||
self._startnewline()
|
||||
|
||||
LOG.debug("curpos: %d, buf: %s, prompted: %s" % (self.curpos,
|
||||
self.buf,
|
||||
self.prompted))
|
||||
LOG.debug("curpos: %d, buf: %s, prompted: %s", self.curpos,
|
||||
self.buf,
|
||||
self.prompted)
|
||||
|
||||
LOG.info("session end")
|
||||
|
||||
|
@ -218,8 +218,8 @@ class PeerState(object):
|
||||
'neighbor.down', {'ip_address': self.peer.ip_address}
|
||||
)
|
||||
|
||||
LOG.debug('Peer %s BGP FSM went from %s to %s' %
|
||||
(self.peer.ip_address, old_state, self.bgp_state))
|
||||
LOG.debug('Peer %s BGP FSM went from %s to %s',
|
||||
self.peer.ip_address, old_state, self.bgp_state)
|
||||
|
||||
def incr(self, counter_name, incr_by=1):
|
||||
if counter_name not in self.counters:
|
||||
@ -398,7 +398,7 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
@in_filters.setter
|
||||
def in_filters(self, filters):
|
||||
self._in_filters = [f.clone() for f in filters]
|
||||
LOG.debug('set in-filter : %s' % filters)
|
||||
LOG.debug('set in-filter : %s', filters)
|
||||
self.on_update_in_filter()
|
||||
|
||||
@property
|
||||
@ -408,7 +408,7 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
@out_filters.setter
|
||||
def out_filters(self, filters):
|
||||
self._out_filters = [f.clone() for f in filters]
|
||||
LOG.debug('set out-filter : %s' % filters)
|
||||
LOG.debug('set out-filter : %s', filters)
|
||||
self.on_update_out_filter()
|
||||
|
||||
@property
|
||||
@ -508,7 +508,7 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
# If we do not have any protocol bound and configuration asks us to
|
||||
# enable this peer, we try to establish connection again.
|
||||
if enabled:
|
||||
LOG.info('%s enabled' % self)
|
||||
LOG.info('%s enabled', self)
|
||||
if self._protocol and self._protocol.started:
|
||||
LOG.error('Tried to enable neighbor that is already enabled')
|
||||
else:
|
||||
@ -518,7 +518,7 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
self._connect_retry_event.set()
|
||||
LOG.debug('Starting connect loop as neighbor is enabled.')
|
||||
else:
|
||||
LOG.info('%s disabled' % self)
|
||||
LOG.info('%s disabled', self)
|
||||
if self._protocol:
|
||||
# Stopping protocol will eventually trigger connection_lost
|
||||
# handler which will do some clean-up.
|
||||
@ -547,11 +547,11 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
def _on_update_connect_mode(self, mode):
|
||||
if mode is not CONNECT_MODE_PASSIVE and \
|
||||
'peer.connect_loop' not in self._child_thread_map:
|
||||
LOG.debug("start connect loop. (mode: %s)" % mode)
|
||||
LOG.debug("start connect loop. (mode: %s)", mode)
|
||||
self._spawn('peer.connect_loop', self._connect_loop,
|
||||
self._client_factory)
|
||||
elif mode is CONNECT_MODE_PASSIVE:
|
||||
LOG.debug("stop connect loop. (mode: %s)" % mode)
|
||||
LOG.debug("stop connect loop. (mode: %s)", mode)
|
||||
self._stop_child_threads('peer.connect_loop')
|
||||
|
||||
def on_update_connect_mode(self, conf_evt):
|
||||
@ -585,22 +585,21 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
def on_update_in_filter(self):
|
||||
LOG.debug('on_update_in_filter fired')
|
||||
for received_path in self._adj_rib_in.itervalues():
|
||||
LOG.debug('received_path: %s' % received_path)
|
||||
LOG.debug('received_path: %s', received_path)
|
||||
path = received_path.path
|
||||
nlri_str = path.nlri.formatted_nlri_str
|
||||
block, blocked_reason = self._apply_in_filter(path)
|
||||
if block == received_path.filtered:
|
||||
LOG.debug('block situation not changed: %s' % block)
|
||||
LOG.debug('block situation not changed: %s', block)
|
||||
continue
|
||||
elif block:
|
||||
# path wasn't blocked, but must be blocked by this update
|
||||
path = path.clone(for_withdrawal=True)
|
||||
LOG.debug('withdraw %s because of in filter update'
|
||||
% nlri_str)
|
||||
LOG.debug('withdraw %s because of in filter update', nlri_str)
|
||||
else:
|
||||
# path was blocked, but mustn't be blocked by this update
|
||||
LOG.debug('learn blocked %s because of in filter update'
|
||||
% nlri_str)
|
||||
LOG.debug('learn blocked %s because of in filter update',
|
||||
nlri_str)
|
||||
received_path.filtered = block
|
||||
tm = self._core_service.table_manager
|
||||
tm.learn_path(path)
|
||||
@ -608,24 +607,24 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
def on_update_out_filter(self):
|
||||
LOG.debug('on_update_out_filter fired')
|
||||
for sent_path in self._adj_rib_out.itervalues():
|
||||
LOG.debug('sent_path: %s' % sent_path)
|
||||
LOG.debug('sent_path: %s', sent_path)
|
||||
path = sent_path.path
|
||||
nlri_str = path.nlri.formatted_nlri_str
|
||||
block, blocked_reason = self._apply_out_filter(path)
|
||||
if block == sent_path.filtered:
|
||||
LOG.debug('block situation not changed: %s' % block)
|
||||
LOG.debug('block situation not changed: %s', block)
|
||||
continue
|
||||
elif block:
|
||||
# path wasn't blocked, but must be blocked by this update
|
||||
withdraw_clone = path.clone(for_withdrawal=True)
|
||||
outgoing_route = OutgoingRoute(withdraw_clone)
|
||||
LOG.debug('send withdraw %s because of out filter update'
|
||||
% nlri_str)
|
||||
LOG.debug('send withdraw %s because of out filter update',
|
||||
nlri_str)
|
||||
else:
|
||||
# path was blocked, but mustn't be blocked by this update
|
||||
outgoing_route = OutgoingRoute(path)
|
||||
LOG.debug('send blocked %s because of out filter update'
|
||||
% nlri_str)
|
||||
LOG.debug('send blocked %s because of out filter update',
|
||||
nlri_str)
|
||||
sent_path.filtered = block
|
||||
self.enque_outgoing_msg(outgoing_route)
|
||||
|
||||
@ -633,7 +632,7 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
# resend sent_route in case of filter matching
|
||||
LOG.debug('on_update_attribute_maps fired')
|
||||
for sent_path in self._adj_rib_out.itervalues():
|
||||
LOG.debug('resend path: %s' % sent_path)
|
||||
LOG.debug('resend path: %s', sent_path)
|
||||
path = sent_path.path
|
||||
self.enque_outgoing_msg(OutgoingRoute(path))
|
||||
|
||||
@ -642,7 +641,7 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
self._neigh_conf.remote_as)
|
||||
|
||||
def _run(self, client_factory):
|
||||
LOG.debug('Started peer %s' % self)
|
||||
LOG.debug('Started peer %s', self)
|
||||
self._client_factory = client_factory
|
||||
|
||||
# Tries actively to establish session if CONNECT_MODE is not PASSIVE
|
||||
@ -661,8 +660,8 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
"""
|
||||
assert rr_msg.type == BGP_MSG_ROUTE_REFRESH
|
||||
self._protocol.send(rr_msg)
|
||||
LOG.debug('RouteRefresh %s>> %s' %
|
||||
(self._neigh_conf.ip_address, rr_msg))
|
||||
LOG.debug('RouteRefresh %s>> %s',
|
||||
self._neigh_conf.ip_address, rr_msg)
|
||||
# Collect update statistics for sent refresh request.
|
||||
if rr_msg.demarcation == 0:
|
||||
self.state.incr(PeerCounterNames.SENT_REFRESH)
|
||||
@ -699,8 +698,7 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
# Collect update statistics.
|
||||
self.state.incr(PeerCounterNames.SENT_UPDATES)
|
||||
else:
|
||||
LOG.debug('prefix : %s is not sent by filter : %s'
|
||||
% (path.nlri, blocked_cause))
|
||||
LOG.debug('prefix : %s is not sent by filter : %s', path.nlri, blocked_cause)
|
||||
|
||||
# We have to create sent_route for every OutgoingRoute which is
|
||||
# not a withdraw or was for route-refresh msg.
|
||||
@ -740,8 +738,8 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
# EOR are enqueued as plain Update messages.
|
||||
elif isinstance(outgoing_msg, BGPUpdate):
|
||||
self._protocol.send(outgoing_msg)
|
||||
LOG.debug('Update %s>> %s' % (self._neigh_conf.ip_address,
|
||||
outgoing_msg))
|
||||
LOG.debug('Update %s>> %s', self._neigh_conf.ip_address,
|
||||
outgoing_msg)
|
||||
self.state.incr(PeerCounterNames.SENT_UPDATES)
|
||||
|
||||
def request_route_refresh(self, *route_families):
|
||||
@ -1069,8 +1067,7 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
'%s to %s' % (self, bind_addr,
|
||||
peer_address))
|
||||
else:
|
||||
LOG.debug('%s trying to connect to %s' % (self,
|
||||
peer_address))
|
||||
LOG.debug('%s trying to connect to %s', self, peer_address)
|
||||
tcp_conn_timeout = self._common_conf.tcp_conn_timeout
|
||||
try:
|
||||
password = self._neigh_conf.password
|
||||
@ -1084,10 +1081,10 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
LOG.debug('Socket could not be created in time (%s secs),'
|
||||
' reason %s' % (tcp_conn_timeout,
|
||||
traceback.format_exc()))
|
||||
LOG.info('Will try to reconnect to %s after %s secs: %s' %
|
||||
(self._neigh_conf.ip_address,
|
||||
self._common_conf.bgp_conn_retry_time,
|
||||
self._connect_retry_event.is_set()))
|
||||
LOG.info('Will try to reconnect to %s after %s secs: %s',
|
||||
self._neigh_conf.ip_address,
|
||||
self._common_conf.bgp_conn_retry_time,
|
||||
self._connect_retry_event.is_set())
|
||||
|
||||
self.pause(self._common_conf.bgp_conn_retry_time)
|
||||
|
||||
@ -1101,12 +1098,12 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
# Stop connect_loop retry timer as we are now connected
|
||||
if self._protocol and self._connect_retry_event.is_set():
|
||||
self._connect_retry_event.clear()
|
||||
LOG.debug('Connect retry event for %s is cleared' % self)
|
||||
LOG.debug('Connect retry event for %s is cleared', self)
|
||||
|
||||
if self._protocol and self.outgoing_msg_event.is_set():
|
||||
# Start processing sink.
|
||||
self.outgoing_msg_event.set()
|
||||
LOG.debug('Processing of outgoing msg. started for %s.' % self)
|
||||
LOG.debug('Processing of outgoing msg. started for %s.', self)
|
||||
|
||||
def _send_collision_err_and_stop(self, protocol):
|
||||
code = BGP_ERROR_CEASE
|
||||
@ -1123,7 +1120,7 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
peer. If another protocol is already bound, connection collision
|
||||
resolution takes place.
|
||||
"""
|
||||
LOG.debug('Trying to bind protocol %s to peer %s' % (proto, self))
|
||||
LOG.debug('Trying to bind protocol %s to peer %s', proto, self)
|
||||
# Validate input.
|
||||
if not isinstance(proto, BgpProtocol):
|
||||
raise ValueError('Currently only supports valid instances of'
|
||||
@ -1170,7 +1167,7 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
reactive_proto = self._protocol
|
||||
proactive_proto = proto
|
||||
|
||||
LOG.debug('Pro-active/Active protocol %s' % proactive_proto)
|
||||
LOG.debug('Pro-active/Active protocol %s', proactive_proto)
|
||||
# We compare bgp local and remote router id and keep the protocol
|
||||
# that was initiated by peer with highest id.
|
||||
if proto.is_local_router_id_greater():
|
||||
@ -1392,14 +1389,14 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
|
||||
# Create path instances for each NLRI from the update message.
|
||||
for msg_nlri in msg_nlri_list:
|
||||
LOG.debug('NLRI: %s' % msg_nlri)
|
||||
LOG.debug('NLRI: %s', msg_nlri)
|
||||
new_path = bgp_utils.create_path(
|
||||
self,
|
||||
msg_nlri,
|
||||
pattrs=umsg_pattrs,
|
||||
nexthop=next_hop
|
||||
)
|
||||
LOG.debug('Extracted paths from Update msg.: %s' % new_path)
|
||||
LOG.debug('Extracted paths from Update msg.: %s', new_path)
|
||||
|
||||
block, blocked_cause = self._apply_in_filter(new_path)
|
||||
|
||||
@ -1413,8 +1410,7 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
tm = self._core_service.table_manager
|
||||
tm.learn_path(new_path)
|
||||
else:
|
||||
LOG.debug('prefix : %s is blocked by in-bound filter : %s'
|
||||
% (msg_nlri, blocked_cause))
|
||||
LOG.debug('prefix : %s is blocked by in-bound filter: %s', msg_nlri, blocked_cause)
|
||||
|
||||
# If update message had any qualifying new paths, do some book-keeping.
|
||||
if msg_nlri_list:
|
||||
@ -1476,8 +1472,7 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
tm = self._core_service.table_manager
|
||||
tm.learn_path(w_path)
|
||||
else:
|
||||
LOG.debug('prefix : %s is blocked by in-bound filter : %s'
|
||||
% (nlri_str, blocked_cause))
|
||||
LOG.debug('prefix : %s is blocked by in-bound filter: %s', nlri_str, blocked_cause)
|
||||
|
||||
def _extract_and_handle_mpbgp_new_paths(self, update_msg):
|
||||
"""Extracts new paths advertised in the given update message's
|
||||
@ -1554,7 +1549,7 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
pattrs=umsg_pattrs,
|
||||
nexthop=next_hop
|
||||
)
|
||||
LOG.debug('Extracted paths from Update msg.: %s' % new_path)
|
||||
LOG.debug('Extracted paths from Update msg.: %s', new_path)
|
||||
|
||||
block, blocked_cause = self._apply_in_filter(new_path)
|
||||
|
||||
@ -1572,8 +1567,7 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
tm = self._core_service.table_manager
|
||||
tm.learn_path(new_path)
|
||||
else:
|
||||
LOG.debug('prefix : %s is blocked by in-bound filter : %s'
|
||||
% (msg_nlri, blocked_cause))
|
||||
LOG.debug('prefix : %s is blocked by in-bound filter: %s', msg_nlri, blocked_cause)
|
||||
|
||||
# If update message had any qualifying new paths, do some book-keeping.
|
||||
if msg_nlri_list:
|
||||
@ -1634,15 +1628,14 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
tm = self._core_service.table_manager
|
||||
tm.learn_path(w_path)
|
||||
else:
|
||||
LOG.debug('prefix : %s is blocked by in-bound filter : %s'
|
||||
% (w_nlri, blocked_cause))
|
||||
LOG.debug('prefix : %s is blocked by in-bound filter: %s', w_nlri, blocked_cause)
|
||||
|
||||
def _handle_eor(self, route_family):
|
||||
"""Currently we only handle EOR for RTC address-family.
|
||||
|
||||
We send non-rtc initial updates if not already sent.
|
||||
"""
|
||||
LOG.debug('Handling EOR for %s' % route_family)
|
||||
LOG.debug('Handling EOR for %s', route_family)
|
||||
# assert (route_family in SUPPORTED_GLOBAL_RF)
|
||||
# assert self.is_mbgp_cap_valid(route_family)
|
||||
|
||||
@ -1718,7 +1711,7 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
'err-refresh-stale-path-timer', rst,
|
||||
self._core_service.table_manager.clean_stale_routes, self,
|
||||
route_family)
|
||||
LOG.debug('Refresh Stale Path timer set (%s sec).' % rst)
|
||||
LOG.debug('Refresh Stale Path timer set (%s sec).', rst)
|
||||
|
||||
def _handle_route_refresh_msg(self, msg):
|
||||
afi = msg.afi
|
||||
@ -1742,7 +1735,7 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
tm.clean_stale_routes(self, route_family)
|
||||
|
||||
else:
|
||||
LOG.error('Route refresh message has invalid demarcation %s' %
|
||||
LOG.error('Route refresh message has invalid demarcation %s',
|
||||
demarcation)
|
||||
|
||||
def _handle_route_refresh_req(self, afi, safi):
|
||||
@ -1866,7 +1859,7 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
to various conditions: like bgp state, transmit side loop, local and
|
||||
remote AS path, community attribute, etc.
|
||||
"""
|
||||
LOG.debug('Peer %s asked to communicate path' % self)
|
||||
LOG.debug('Peer %s asked to communicate path', self)
|
||||
if not path:
|
||||
raise ValueError('Invalid path %s given.' % path)
|
||||
|
||||
@ -1901,7 +1894,7 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
# peers AS, if so we do not send UPDATE message to this peer.
|
||||
as_path = path.get_pattr(BGP_ATTR_TYPE_AS_PATH)
|
||||
if as_path and as_path.has_matching_leftmost(self.remote_as):
|
||||
LOG.debug('Skipping sending path as AS_PATH has peer AS %s' %
|
||||
LOG.debug('Skipping sending path as AS_PATH has peer AS %s',
|
||||
self.remote_as)
|
||||
return
|
||||
|
||||
@ -1976,8 +1969,8 @@ class Peer(Source, Sink, NeighborConfListener, Activity):
|
||||
# its sink.
|
||||
outgoing_route = OutgoingRoute(path)
|
||||
self.enque_outgoing_msg(outgoing_route)
|
||||
LOG.debug('Enqueued outgoing route %s for peer %s' %
|
||||
(outgoing_route.path.nlri, self))
|
||||
LOG.debug('Enqueued outgoing route %s for peer %s',
|
||||
outgoing_route.path.nlri, self)
|
||||
|
||||
def connection_made(self):
|
||||
"""Protocols connection established handler
|
||||
|
@ -365,7 +365,7 @@ def _cmp_by_origin(path1, path2):
|
||||
elif origin.value == BGP_ATTR_ORIGIN_INCOMPLETE:
|
||||
return 1
|
||||
else:
|
||||
LOG.error('Invalid origin value encountered %s.' % origin)
|
||||
LOG.error('Invalid origin value encountered %s.', origin)
|
||||
return 0
|
||||
|
||||
origin1 = path1.get_pattr(BGP_ATTR_TYPE_ORIGIN)
|
||||
|
@ -8,7 +8,7 @@ class SignalBus(object):
|
||||
|
||||
def emit_signal(self, identifier, data):
|
||||
identifier = _to_tuple(identifier)
|
||||
LOG.debug('SIGNAL: %s emited with data: %s ' % (identifier, data))
|
||||
LOG.debug('SIGNAL: %s emited with data: %s ', identifier, data)
|
||||
for func, filter_func in self._listeners.get(identifier, []):
|
||||
if not filter_func or filter_func(data):
|
||||
func(identifier, data)
|
||||
|
@ -320,7 +320,7 @@ class BgpProtocol(Protocol, Activity):
|
||||
# We should get default marker since we are not supporting any
|
||||
# authentication.
|
||||
if (auth != BgpProtocol.MESSAGE_MARKER):
|
||||
LOG.error('Invalid message marker received: %s' % auth)
|
||||
LOG.error('Invalid message marker received: %s', auth)
|
||||
raise bgp.NotSync()
|
||||
|
||||
# Check if we have valid bgp message length.
|
||||
@ -372,8 +372,8 @@ class BgpProtocol(Protocol, Activity):
|
||||
self._send_with_lock(notification)
|
||||
self._signal_bus.bgp_error(self._peer, code, subcode, reason)
|
||||
if len(self._localname):
|
||||
LOG.error('Sent notification to %r >> %s' % (self._localname,
|
||||
notification))
|
||||
LOG.error('Sent notification to %r >> %s', self._localname,
|
||||
notification)
|
||||
self._socket.close()
|
||||
|
||||
def _send_with_lock(self, msg):
|
||||
@ -393,12 +393,11 @@ class BgpProtocol(Protocol, Activity):
|
||||
self._send_with_lock(msg)
|
||||
|
||||
if msg.type == BGP_MSG_NOTIFICATION:
|
||||
LOG.error('Sent notification to %s >> %s' %
|
||||
(self._remotename, msg))
|
||||
LOG.error('Sent notification to %s >> %s', self._remotename, msg)
|
||||
|
||||
self._signal_bus.bgp_notification_sent(self._peer, msg)
|
||||
else:
|
||||
LOG.debug('Sent msg to %s >> %s' % (self._remotename, msg))
|
||||
LOG.debug('Sent msg to %s >> %s', self._remotename, msg)
|
||||
|
||||
def stop(self):
|
||||
Activity.stop(self)
|
||||
@ -430,7 +429,7 @@ class BgpProtocol(Protocol, Activity):
|
||||
message except for *Open* and *Notification* message. On receiving
|
||||
*Notification* message we close connection with peer.
|
||||
"""
|
||||
LOG.debug('Received msg from %s << %s' % (self._remotename, msg))
|
||||
LOG.debug('Received msg from %s << %s', self._remotename, msg)
|
||||
|
||||
# If we receive open message we try to bind to protocol
|
||||
if (msg.type == BGP_MSG_OPEN):
|
||||
@ -520,7 +519,7 @@ class BgpProtocol(Protocol, Activity):
|
||||
def _expired(self):
|
||||
"""Hold timer expired event handler.
|
||||
"""
|
||||
LOG.info('Negotiated hold time %s expired.' % self._holdtime)
|
||||
LOG.info('Negotiated hold time %s expired.', self._holdtime)
|
||||
code = BGP_ERROR_HOLD_TIMER_EXPIRED
|
||||
subcode = BGP_ERROR_SUB_HOLD_TIMER_EXPIRED
|
||||
self.send_notification(code, subcode)
|
||||
|
@ -96,7 +96,7 @@ def from_inet_ptoi(bgp_id):
|
||||
packed_byte = socket.inet_pton(socket.AF_INET, bgp_id)
|
||||
four_byte_id = long(packed_byte.encode('hex'), 16)
|
||||
except ValueError:
|
||||
LOG.debug('Invalid bgp id given for conversion to integer value %s' %
|
||||
LOG.debug('Invalid bgp id given for conversion to integer value %s',
|
||||
bgp_id)
|
||||
|
||||
return four_byte_id
|
||||
|
@ -138,8 +138,8 @@ class RouteTargetManager(object):
|
||||
self.add_rt_nlri(removed_rt, is_withdraw=True)
|
||||
|
||||
def on_rt_filter_chg_sync_peer(self, peer, new_rts, old_rts, table):
|
||||
LOG.debug('RT Filter changed for peer %s, new_rts %s, old_rts %s ' %
|
||||
(peer, new_rts, old_rts))
|
||||
LOG.debug('RT Filter changed for peer %s, new_rts %s, old_rts %s ',
|
||||
peer, new_rts, old_rts)
|
||||
for dest in table.itervalues():
|
||||
# If this destination does not have best path, we ignore it
|
||||
if not dest.best_path:
|
||||
@ -195,8 +195,8 @@ class RouteTargetManager(object):
|
||||
# Update current interested RTs for next iteration
|
||||
self._global_interested_rts = curr_global_rts
|
||||
|
||||
LOG.debug('Global Interested RT changed, new RTs %s, removed RTs %s' %
|
||||
(new_global_rts, removed_global_rts))
|
||||
LOG.debug('Global Interested RT changed, new RTs %s, removed RTs %s',
|
||||
new_global_rts, removed_global_rts)
|
||||
tm = self._core_service.table_manager
|
||||
tm.on_interesting_rts_change(new_global_rts, removed_global_rts)
|
||||
|
||||
|
@ -310,8 +310,8 @@ class VRRPV2StateMaster(VRRPState):
|
||||
# should not reach here.
|
||||
# In fact this can be happned due to event scheduling
|
||||
vrrp_router = self.vrrp_router
|
||||
vrrp_router.logger.debug('%s master_down %s %s' % (
|
||||
self.__class__.__name__, ev.__class__.__name__, vrrp_router.state))
|
||||
vrrp_router.logger.debug('%s master_down %s %s',
|
||||
self.__class__.__name__, ev.__class__.__name__, vrrp_router.state)
|
||||
|
||||
def _adver(self):
|
||||
vrrp_router = self.vrrp_router
|
||||
@ -398,8 +398,8 @@ class VRRPV2StateBackup(VRRPState):
|
||||
# should not reach here
|
||||
# In fact this can be happned due to event scheduling
|
||||
vrrp_router = self.vrrp_router
|
||||
vrrp_router.logger.debug('%s adver %s %s' % (
|
||||
self.__class__.__name__, ev.__class__.__name__, vrrp_router.state))
|
||||
vrrp_router.logger.debug('%s adver %s %s',
|
||||
self.__class__.__name__, ev.__class__.__name__, vrrp_router.state)
|
||||
|
||||
def preempt_delay(self, ev):
|
||||
self.vrrp_router.logger.warn('%s preempt_delay',
|
||||
@ -513,8 +513,8 @@ class VRRPV3StateMaster(VRRPState):
|
||||
# should not reach here
|
||||
# In fact this can be happned due to event scheduling
|
||||
vrrp_router = self.vrrp_router
|
||||
vrrp_router.logger.debug('%s master_down %s %s' % (
|
||||
self.__class__.__name__, ev.__class__.__name__, vrrp_router.state))
|
||||
vrrp_router.logger.debug('%s master_down %s %s',
|
||||
self.__class__.__name__, ev.__class__.__name__, vrrp_router.state)
|
||||
|
||||
def _adver(self):
|
||||
vrrp_router = self.vrrp_router
|
||||
@ -613,8 +613,8 @@ class VRRPV3StateBackup(VRRPState):
|
||||
# should not reach here
|
||||
# In fact this can be happned due to event scheduling
|
||||
vrrp_router = self.vrrp_router
|
||||
vrrp_router.logger.debug('adver %s %s %s' % (
|
||||
self.__class__.__name__, ev.__class__.__name__, vrrp_router.state))
|
||||
vrrp_router.logger.debug('adver %s %s %s',
|
||||
self.__class__.__name__, ev.__class__.__name__, vrrp_router.state)
|
||||
|
||||
def preempt_delay(self, ev):
|
||||
self.vrrp_router.logger.warn('%s preempt_delay',
|
||||
|
@ -115,7 +115,7 @@ def hex_array(data):
|
||||
try:
|
||||
return to_hex[type(data)](data)
|
||||
except KeyError:
|
||||
LOG.exception('%s is invalid data type' % type(data))
|
||||
LOG.exception('%s is invalid data type', type(data))
|
||||
return None
|
||||
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user