Add H904 to pep8 check
H904: Delay string interpolations at logging calls, which is the former N342, prevents format string when add log. Enabling it can uniform the way we do log. Change-Id: I4d16a7db530d9fa5a641140e03680307f878f0bb Partial-Bug: #1663864
This commit is contained in:
@@ -68,9 +68,9 @@ def load_driver(driver_cfg, namespace):
|
||||
try:
|
||||
class_to_load = importutils.import_class(driver_cfg)
|
||||
except (ImportError, ValueError):
|
||||
LOG.error(_LE("Error loading class %(class)s by alias e: %(e)s")
|
||||
% {'class': driver_cfg, 'e': e1_info},
|
||||
exc_info=e1_info)
|
||||
LOG.error(_LE("Error loading class %(class)s by alias e: %(e)s"),
|
||||
{'class': driver_cfg, 'e': e1_info},
|
||||
exc_info=e1_info)
|
||||
LOG.error(_LE("Error loading class by class name"),
|
||||
exc_info=True)
|
||||
raise ImportError(_("Class not found."))
|
||||
|
||||
@@ -183,9 +183,9 @@ class DfLocalController(object):
|
||||
|
||||
def delete_lswitch(self, lswitch_id):
|
||||
lswitch = self.db_store.get_lswitch(lswitch_id)
|
||||
LOG.info(_LI("Removing Logical Switch = %s") % lswitch_id)
|
||||
LOG.info(_LI("Removing Logical Switch = %s"), lswitch_id)
|
||||
if lswitch is None:
|
||||
LOG.warning(_LW("Try to delete a nonexistent lswitch(%s)") %
|
||||
LOG.warning(_LW("Try to delete a nonexistent lswitch(%s)"),
|
||||
lswitch_id)
|
||||
return
|
||||
self.open_flow_app.notify_remove_logical_switch(lswitch)
|
||||
@@ -239,19 +239,18 @@ class DfLocalController(object):
|
||||
if ofport:
|
||||
lport.set_external_value('ofport', ofport)
|
||||
if original_lport is None:
|
||||
LOG.info(_LI("Adding new local logical port = %s") %
|
||||
str(lport))
|
||||
LOG.info(_LI("Adding new local logical port = %s"), lport)
|
||||
self.open_flow_app.notify_add_local_port(lport)
|
||||
else:
|
||||
LOG.info(_LI("Updating local logical port = %(port)s, "
|
||||
"original port = %(original_port)s") %
|
||||
{'port': str(lport),
|
||||
'original_port': str(original_lport)})
|
||||
"original port = %(original_port)s"),
|
||||
{'port': lport,
|
||||
'original_port': original_lport})
|
||||
self.open_flow_app.notify_update_local_port(lport,
|
||||
original_lport)
|
||||
else:
|
||||
LOG.info(_LI("Local logical port %s was not created yet") %
|
||||
str(lport))
|
||||
LOG.info(_LI("Local logical port %s was not created yet"),
|
||||
lport)
|
||||
return
|
||||
else:
|
||||
lport.set_external_value('is_local', False)
|
||||
@@ -274,14 +273,13 @@ class DfLocalController(object):
|
||||
if ofport:
|
||||
lport.set_external_value('ofport', ofport)
|
||||
if original_lport is None:
|
||||
LOG.info(_LI("Adding new remote logical port = %s") %
|
||||
str(lport))
|
||||
LOG.info(_LI("Adding new remote logical port = %s"), lport)
|
||||
self.open_flow_app.notify_add_remote_port(lport)
|
||||
else:
|
||||
LOG.info(_LI("Updating remote logical port = %(port)s, "
|
||||
"original port = %(original_port)s") %
|
||||
{'port': str(lport),
|
||||
'original_port': str(original_lport)})
|
||||
"original port = %(original_port)s"),
|
||||
{'port': lport,
|
||||
'original_port': original_lport})
|
||||
self.open_flow_app.notify_update_remote_port(
|
||||
lport, original_lport)
|
||||
else:
|
||||
@@ -297,7 +295,7 @@ class DfLocalController(object):
|
||||
def update_lport(self, lport):
|
||||
chassis = lport.get_chassis()
|
||||
if not self._is_physical_chassis(chassis):
|
||||
LOG.debug(("Port %s has not been bound or it is a vPort") %
|
||||
LOG.debug(("Port %s has not been bound or it is a vPort"),
|
||||
lport.get_id())
|
||||
return
|
||||
original_lport = self.db_store.get_port(lport.get_id())
|
||||
@@ -315,14 +313,12 @@ class DfLocalController(object):
|
||||
if lport is None:
|
||||
return
|
||||
if lport.get_external_value('is_local'):
|
||||
LOG.info(_LI("Removing local logical port = %s") %
|
||||
str(lport))
|
||||
LOG.info(_LI("Removing local logical port = %s"), lport)
|
||||
if lport.get_external_value('ofport') is not None:
|
||||
self.open_flow_app.notify_remove_local_port(lport)
|
||||
self.db_store.delete_port(lport.get_id(), True)
|
||||
else:
|
||||
LOG.info(_LI("Removing remote logical port = %s") %
|
||||
str(lport))
|
||||
LOG.info(_LI("Removing remote logical port = %s"), lport)
|
||||
if lport.get_external_value('ofport') is not None:
|
||||
self.open_flow_app.notify_remove_remote_port(lport)
|
||||
self.db_store.delete_port(lport.get_id(), False)
|
||||
@@ -354,8 +350,7 @@ class DfLocalController(object):
|
||||
def update_secgroup(self, secgroup):
|
||||
old_secgroup = self.db_store.get_security_group(secgroup.get_id())
|
||||
if old_secgroup is None:
|
||||
LOG.info(_LI("Security Group created = %s") %
|
||||
secgroup)
|
||||
LOG.info(_LI("Security Group created = %s"), secgroup)
|
||||
self._add_new_security_group(secgroup)
|
||||
return
|
||||
if not df_utils.is_valid_version(
|
||||
@@ -452,14 +447,12 @@ class DfLocalController(object):
|
||||
self.db_store.delete_security_group(secgroup.get_id())
|
||||
|
||||
def _add_new_security_group_rule(self, secgroup, secgroup_rule):
|
||||
LOG.info(_LI("Adding new secgroup rule = %s") %
|
||||
secgroup_rule)
|
||||
LOG.info(_LI("Adding new secgroup rule = %s"), secgroup_rule)
|
||||
self.open_flow_app.notify_add_security_group_rule(
|
||||
secgroup, secgroup_rule)
|
||||
|
||||
def _delete_security_group_rule(self, secgroup, secgroup_rule):
|
||||
LOG.info(_LI("Removing secgroup rule = %s") %
|
||||
secgroup_rule)
|
||||
LOG.info(_LI("Removing secgroup rule = %s"), secgroup_rule)
|
||||
self.open_flow_app.notify_remove_security_group_rule(
|
||||
secgroup, secgroup_rule)
|
||||
|
||||
@@ -487,8 +480,7 @@ class DfLocalController(object):
|
||||
if not floatingip:
|
||||
return
|
||||
self.open_flow_app.notify_delete_floatingip(floatingip)
|
||||
LOG.info(_LI("Floatingip is deleted. Floatingip = %s") %
|
||||
str(floatingip))
|
||||
LOG.info(_LI("Floatingip is deleted. Floatingip = %s"), floatingip)
|
||||
self.db_store.delete_floatingip(floatingip_id)
|
||||
|
||||
def update_publisher(self, publisher):
|
||||
@@ -508,14 +500,14 @@ class DfLocalController(object):
|
||||
def _associate_floatingip(self, floatingip):
|
||||
self.db_store.update_floatingip(floatingip.get_id(), floatingip)
|
||||
self.open_flow_app.notify_associate_floatingip(floatingip)
|
||||
LOG.info(_LI("Floatingip is associated with port. Floatingip = %s") %
|
||||
str(floatingip))
|
||||
LOG.info(_LI("Floatingip is associated with port. Floatingip = %s"),
|
||||
floatingip)
|
||||
|
||||
def _disassociate_floatingip(self, floatingip):
|
||||
self.db_store.delete_floatingip(floatingip.get_id())
|
||||
self.open_flow_app.notify_disassociate_floatingip(floatingip)
|
||||
LOG.info(_LI("Floatingip is disassociated from port."
|
||||
" Floatingip = %s") % str(floatingip))
|
||||
LOG.info(_LI("Floatingip is disassociated from port. "
|
||||
"Floatingip = %s"), floatingip)
|
||||
|
||||
def _update_floatingip(self, old_floatingip, new_floatingip):
|
||||
if new_floatingip.get_lport_id() != old_floatingip.get_lport_id():
|
||||
|
||||
@@ -96,10 +96,10 @@ class DHCPApp(df_base_app.DFlowApp):
|
||||
self._block_port_dhcp_traffic(
|
||||
ofport,
|
||||
self.block_hard_timeout)
|
||||
LOG.warning(_LW("pass rate limit for %(port_id)s blocking DHCP"
|
||||
" traffic for %(time)s sec") %
|
||||
{'port_id': lport.get_id(),
|
||||
'time': self.block_hard_timeout})
|
||||
LOG.warning(_LW("pass rate limit for %(port_id)s blocking DHCP "
|
||||
"traffic for %(time)s sec"),
|
||||
{'port_id': lport.get_id(),
|
||||
'time': self.block_hard_timeout})
|
||||
return
|
||||
if not self.db_store.get_port(lport.get_id()):
|
||||
LOG.error(_LE("Port %s no longer found."), lport.get_id())
|
||||
@@ -119,19 +119,19 @@ class DHCPApp(df_base_app.DFlowApp):
|
||||
dhcp_packet,
|
||||
dhcp.DHCP_OFFER,
|
||||
lport)
|
||||
LOG.info(_LI("sending DHCP offer for port IP %(port_ip)s"
|
||||
" port id %(port_id)s")
|
||||
% {'port_ip': lport.get_ip(), 'port_id': lport.get_id()})
|
||||
LOG.info(_LI("sending DHCP offer for port IP %(port_ip)s "
|
||||
"port id %(port_id)s"),
|
||||
{'port_ip': lport.get_ip(), 'port_id': lport.get_id()})
|
||||
elif dhcp_message_type == dhcp.DHCP_REQUEST:
|
||||
send_packet = self._create_dhcp_packet(
|
||||
packet,
|
||||
dhcp_packet,
|
||||
dhcp.DHCP_ACK,
|
||||
lport)
|
||||
LOG.info(_LI("sending DHCP ACK for port IP %(port_ip)s"
|
||||
" port id %(tunnel_id)s")
|
||||
% {'port_ip': lport.get_ip(),
|
||||
'tunnel_id': lport.get_id()})
|
||||
LOG.info(_LI("sending DHCP ACK for port IP %(port_ip)s "
|
||||
"port id %(tunnel_id)s"),
|
||||
{'port_ip': lport.get_ip(),
|
||||
'tunnel_id': lport.get_id()})
|
||||
else:
|
||||
LOG.error(_LE("DHCP message type %d not handled"),
|
||||
dhcp_message_type)
|
||||
@@ -144,8 +144,7 @@ class DHCPApp(df_base_app.DFlowApp):
|
||||
|
||||
subnet = self._get_subnet_by_port(lport)
|
||||
if subnet is None:
|
||||
LOG.error(_LE("No subnet found for port <%s>") %
|
||||
lport.get_id())
|
||||
LOG.error(_LE("No subnet found for port <%s>"), lport.get_id())
|
||||
return
|
||||
|
||||
pkt_type_packed = struct.pack('!B', pkt_type)
|
||||
|
||||
@@ -394,8 +394,8 @@ class L3ProactiveApp(df_base_app.DFlowApp):
|
||||
return
|
||||
|
||||
def _delete_router_route(self, router, route):
|
||||
LOG.debug('Delete extra route %(route)s from router %(router)s' %
|
||||
{'route': route, 'router': str(router)})
|
||||
LOG.debug('Delete extra route %(route)s from router %(router)s',
|
||||
{'route': route, 'router': router})
|
||||
|
||||
self._delete_route_process(router, route)
|
||||
self._del_from_route_cache(ROUTE_ADDED, router.get_id(), route)
|
||||
|
||||
@@ -56,7 +56,7 @@ class Topology(object):
|
||||
if ovs_port is None:
|
||||
LOG.error(_LE("ovs_port is None"))
|
||||
return
|
||||
LOG.info(_LI("Ovs port updated: %s") % str(ovs_port))
|
||||
LOG.info(_LI("Ovs port updated: %s"), ovs_port)
|
||||
port_id = ovs_port.get_id()
|
||||
old_port = self.ovs_ports.get(port_id)
|
||||
if old_port is None:
|
||||
@@ -179,8 +179,8 @@ class Topology(object):
|
||||
lport_id = ovs_port.get_iface_id()
|
||||
lport = self._get_lport(lport_id)
|
||||
if lport is None:
|
||||
LOG.warning(_LW("No logical port found for ovs port: %s")
|
||||
% str(ovs_port))
|
||||
LOG.warning(_LW("No logical port found for ovs port: %s"),
|
||||
ovs_port)
|
||||
return
|
||||
topic = lport.get_topic()
|
||||
if not topic:
|
||||
@@ -211,7 +211,7 @@ class Topology(object):
|
||||
self.controller.bridge_port_updated(ovs_port)
|
||||
except Exception:
|
||||
LOG.exception(_LE('Failed to process bridge port online '
|
||||
'event: %s') % str(ovs_port))
|
||||
'event: %s'), ovs_port)
|
||||
|
||||
def _vm_port_deleted(self, ovs_port):
|
||||
ovs_port_id = ovs_port.get_id()
|
||||
@@ -228,12 +228,12 @@ class Topology(object):
|
||||
|
||||
topic = lport.get_topic()
|
||||
|
||||
LOG.info(_LI("The logical port(%s) is offline") % str(lport))
|
||||
LOG.info(_LI("The logical port(%s) is offline"), lport)
|
||||
try:
|
||||
self.controller.delete_lport(lport_id)
|
||||
except Exception:
|
||||
LOG.exception(_LE(
|
||||
'Failed to process logical port offline event %s') % lport_id)
|
||||
'Failed to process logical port offline event %s'), lport_id)
|
||||
finally:
|
||||
# publish vm port down event.
|
||||
if cfg.CONF.df.enable_port_status_notifier:
|
||||
@@ -248,7 +248,7 @@ class Topology(object):
|
||||
return
|
||||
|
||||
if topic not in self.topic_subscribed:
|
||||
LOG.info(_LI("Subscribe topic: %(topic)s by lport: %(id)s") %
|
||||
LOG.info(_LI("Subscribe topic: %(topic)s by lport: %(id)s"),
|
||||
{"topic": topic, "id": lport_id})
|
||||
self.nb_api.subscriber.register_topic(topic)
|
||||
self._pull_tenant_topology_from_db(topic)
|
||||
@@ -262,7 +262,7 @@ class Topology(object):
|
||||
port_ids = self.topic_subscribed[topic]
|
||||
port_ids.remove(lport_id)
|
||||
if len(port_ids) == 0:
|
||||
LOG.info(_LI("Unsubscribe topic: %(topic)s by lport: %(id)s") %
|
||||
LOG.info(_LI("Unsubscribe topic: %(topic)s by lport: %(id)s"),
|
||||
{"topic": topic, "id": lport_id})
|
||||
del self.topic_subscribed[topic]
|
||||
self.nb_api.subscriber.unregister_topic(topic)
|
||||
|
||||
@@ -73,8 +73,8 @@ class RedisDbDriver(db_api.DbApi):
|
||||
self._update_server_list()
|
||||
|
||||
if local_key is not None:
|
||||
LOG.exception(_LE("update server list, key: %(key)s")
|
||||
% {'key': local_key})
|
||||
LOG.exception(_LE("update server list, key: %(key)s"),
|
||||
{'key': local_key})
|
||||
|
||||
def _sync_master_list(self):
|
||||
if self.is_neutron_server:
|
||||
@@ -105,8 +105,8 @@ class RedisDbDriver(db_api.DbApi):
|
||||
|
||||
def _execute_cmd(self, oper, local_key, value=None):
|
||||
if not self._is_oper_valid(oper):
|
||||
LOG.warning(_LW("invalid oper: %(oper)s")
|
||||
% {'oper': oper})
|
||||
LOG.warning(_LW("invalid oper: %(oper)s"),
|
||||
{'oper': oper})
|
||||
return None
|
||||
|
||||
ip_port = self.redis_mgt.get_ip_by_key(local_key)
|
||||
@@ -134,7 +134,7 @@ class RedisDbDriver(db_api.DbApi):
|
||||
continue
|
||||
self._handle_db_conn_error(ip_port, local_key)
|
||||
LOG.exception(_LE("connection error while sending "
|
||||
"request to db: %(e)s") % {'e': e})
|
||||
"request to db: %(e)s"), {'e': e})
|
||||
raise e
|
||||
except exceptions.ResponseError as e:
|
||||
if not alreadysync:
|
||||
@@ -155,12 +155,12 @@ class RedisDbDriver(db_api.DbApi):
|
||||
# maybe there is a fast failover
|
||||
self._handle_db_conn_error(ip_port, local_key)
|
||||
LOG.exception(_LE("no client available: "
|
||||
"%(ip_port)s, %(e)s")
|
||||
% {'ip_port': resp[2], 'e': e})
|
||||
"%(ip_port)s, %(e)s"),
|
||||
{'ip_port': resp[2], 'e': e})
|
||||
raise e
|
||||
else:
|
||||
LOG.exception(_LE("error not handled: %(e)s")
|
||||
% {'e': e})
|
||||
LOG.exception(_LE("error not handled: %(e)s"),
|
||||
{'e': e})
|
||||
raise e
|
||||
except Exception as e:
|
||||
if not alreadysync:
|
||||
@@ -169,7 +169,7 @@ class RedisDbDriver(db_api.DbApi):
|
||||
continue
|
||||
self._handle_db_conn_error(ip_port, local_key)
|
||||
LOG.exception(_LE("exception while sending request to "
|
||||
"db: %(e)s") % {'e': e})
|
||||
"db: %(e)s"), {'e': e})
|
||||
raise e
|
||||
|
||||
def get_key(self, table, key, topic=None):
|
||||
@@ -182,8 +182,8 @@ class RedisDbDriver(db_api.DbApi):
|
||||
if len(local_keys) == 1:
|
||||
return self._execute_cmd("GET", local_keys[0])
|
||||
except Exception:
|
||||
LOG.exception(_LE("exception when get_key: %(key)s ")
|
||||
% {'key': local_key})
|
||||
LOG.exception(_LE("exception when get_key: %(key)s"),
|
||||
{'key': local_key})
|
||||
|
||||
else:
|
||||
local_key = self._uuid_to_key(table, key, topic)
|
||||
@@ -191,8 +191,8 @@ class RedisDbDriver(db_api.DbApi):
|
||||
# return nil if not found
|
||||
return self._execute_cmd("GET", local_key)
|
||||
except Exception:
|
||||
LOG.exception(_LE("exception when get_key: %(key)s ")
|
||||
% {'key': local_key})
|
||||
LOG.exception(_LE("exception when get_key: %(key)s"),
|
||||
{'key': local_key})
|
||||
|
||||
def set_key(self, table, key, value, topic=None):
|
||||
local_key = self._uuid_to_key(table, key, topic)
|
||||
@@ -204,8 +204,8 @@ class RedisDbDriver(db_api.DbApi):
|
||||
|
||||
return res
|
||||
except Exception:
|
||||
LOG.exception(_LE("exception when set_key: %(key)s ")
|
||||
% {'key': local_key})
|
||||
LOG.exception(_LE("exception when set_key: %(key)s"),
|
||||
{'key': local_key})
|
||||
|
||||
def create_key(self, table, key, value, topic=None):
|
||||
return self.set_key(table, key, value, topic)
|
||||
@@ -221,8 +221,8 @@ class RedisDbDriver(db_api.DbApi):
|
||||
|
||||
return res
|
||||
except Exception:
|
||||
LOG.exception(_LE("exception when delete_key: %(key)s ")
|
||||
% {'key': local_key})
|
||||
LOG.exception(_LE("exception when delete_key: %(key)s"),
|
||||
{'key': local_key})
|
||||
|
||||
def get_all_entries(self, table, topic=None):
|
||||
res = []
|
||||
@@ -239,8 +239,8 @@ class RedisDbDriver(db_api.DbApi):
|
||||
return res
|
||||
except Exception:
|
||||
LOG.exception(_LE("exception when get_all_entries: "
|
||||
"%(key)s ")
|
||||
% {'key': local_key})
|
||||
"%(key)s"),
|
||||
{'key': local_key})
|
||||
|
||||
else:
|
||||
local_key = self._uuid_to_key(table, '*', topic)
|
||||
@@ -256,8 +256,8 @@ class RedisDbDriver(db_api.DbApi):
|
||||
return res
|
||||
except Exception as e:
|
||||
self._handle_db_conn_error(ip_port, local_key)
|
||||
LOG.exception(_LE("exception when mget: %(key)s, %(e)s")
|
||||
% {'key': local_key, 'e': e})
|
||||
LOG.exception(_LE("exception when mget: %(key)s, %(e)s"),
|
||||
{'key': local_key, 'e': e})
|
||||
|
||||
def get_all_keys(self, table, topic=None):
|
||||
res = []
|
||||
@@ -273,8 +273,8 @@ class RedisDbDriver(db_api.DbApi):
|
||||
except Exception as e:
|
||||
self._handle_db_conn_error(ip_port, local_key)
|
||||
LOG.exception(_LE("exception when get_all_keys: "
|
||||
"%(key)s, %(e)s")
|
||||
% {'key': local_key, 'e': e})
|
||||
"%(key)s, %(e)s"),
|
||||
{'key': local_key, 'e': e})
|
||||
|
||||
else:
|
||||
local_key = self._uuid_to_key(table, '*', topic)
|
||||
@@ -290,8 +290,8 @@ class RedisDbDriver(db_api.DbApi):
|
||||
except Exception as e:
|
||||
self._handle_db_conn_error(ip_port, local_key)
|
||||
LOG.exception(_LE("exception when get_all_keys: "
|
||||
"%(key)s, %(e)s")
|
||||
% {'key': local_key, 'e': e})
|
||||
"%(key)s, %(e)s"),
|
||||
{'key': local_key, 'e': e})
|
||||
|
||||
def _strip_table_name_from_key(self, key):
|
||||
regex = '^{.*}\\.(.*)$'
|
||||
@@ -308,15 +308,15 @@ class RedisDbDriver(db_api.DbApi):
|
||||
return client.incr(local_key)
|
||||
except Exception as e:
|
||||
self._handle_db_conn_error(ip_port, local_key)
|
||||
LOG.exception(_LE("exception when incr: %(key)s, %(e)s")
|
||||
% {'key': local_key, 'e': e})
|
||||
LOG.exception(_LE("exception when incr: %(key)s, %(e)s"),
|
||||
{'key': local_key, 'e': e})
|
||||
|
||||
def allocate_unique_key(self, table):
|
||||
try:
|
||||
return self._allocate_unique_key(table)
|
||||
except Exception as e:
|
||||
LOG.error(_LE("allocate_unique_key exception: %(e)s")
|
||||
% {'e': e})
|
||||
LOG.error(_LE("allocate_unique_key exception: %(e)s"),
|
||||
{'e': e})
|
||||
return
|
||||
|
||||
def register_notification_callback(self, callback, topics=None):
|
||||
|
||||
@@ -102,8 +102,8 @@ class RedisMgt(object):
|
||||
self.default_node.connection_pool.reset()
|
||||
except Exception as e:
|
||||
LOG.exception(_LE("exception happened "
|
||||
"when release default node, %(e)s")
|
||||
% {'e': e})
|
||||
"when release default node, %(e)s"),
|
||||
{'e': e})
|
||||
|
||||
def _release_node(self, node):
|
||||
node.connection_pool.get_connection(None, None).disconnect()
|
||||
@@ -141,8 +141,8 @@ class RedisMgt(object):
|
||||
except Exception:
|
||||
LOG.exception(_LE("exception happened "
|
||||
"when get cluster topology, %(ip)s:"
|
||||
"%(port)s")
|
||||
% {'ip': ip_port[0], 'port': ip_port[1]})
|
||||
"%(port)s"),
|
||||
{'ip': ip_port[0], 'port': ip_port[1]})
|
||||
|
||||
return new_nodes
|
||||
|
||||
@@ -239,8 +239,8 @@ class RedisMgt(object):
|
||||
if ip_port is not None:
|
||||
# remove the node by ip_port
|
||||
LOG.info(_LI("remove node %(ip_port)s from "
|
||||
"redis master list")
|
||||
% {'ip_port': ip_port})
|
||||
"redis master list"),
|
||||
{'ip_port': ip_port})
|
||||
self.master_list = [node for node in self.master_list
|
||||
if node['ip_port'] != ip_port]
|
||||
|
||||
@@ -302,8 +302,8 @@ class RedisMgt(object):
|
||||
LOG.warning(_LW("redis cluster nodes less than local, "
|
||||
"maybe there is a partition in db "
|
||||
"cluster, nodes:%(new)s, "
|
||||
"local nodes:%(local)s")
|
||||
% {'new': new_nodes, 'local': old_nodes})
|
||||
"local nodes:%(local)s"),
|
||||
{'new': new_nodes, 'local': old_nodes})
|
||||
|
||||
return changed
|
||||
|
||||
@@ -388,8 +388,8 @@ class RedisMgt(object):
|
||||
local_list = msgpack.Unpacker(six.BytesIO(syncstring)).unpack()
|
||||
if local_list:
|
||||
self.master_list = local_list
|
||||
LOG.info(_LI("get new master from syncstring master=%s")
|
||||
% self.master_list)
|
||||
LOG.info(_LI("get new master from syncstring master=%s"),
|
||||
self.master_list)
|
||||
return True
|
||||
|
||||
return False
|
||||
|
||||
@@ -34,9 +34,9 @@ class ZMQPubSub(pub_sub_api.PubSubApi):
|
||||
if transport not in SUPPORTED_TRANSPORTS:
|
||||
message = _LE("zmq_pub_sub: Unsupported publisher_transport value "
|
||||
"%(transport)s, expected %(expected)s")
|
||||
LOG.error(message % {
|
||||
LOG.error(message, {
|
||||
'transport': transport,
|
||||
'expected': str(SUPPORTED_TRANSPORTS)
|
||||
'expected': SUPPORTED_TRANSPORTS
|
||||
})
|
||||
raise exceptions.UnsupportedTransportException(transport=transport)
|
||||
self.subscriber = ZMQSubscriberAgent()
|
||||
@@ -80,7 +80,7 @@ class ZMQPublisherAgentBase(pub_sub_api.PublisherApi):
|
||||
update.topic = topic
|
||||
data = pub_sub_api.pack_message(update.to_dict())
|
||||
self.socket.send_multipart([topic, data])
|
||||
LOG.debug("sending %s" % update)
|
||||
LOG.debug("sending %s", update)
|
||||
|
||||
def close(self):
|
||||
if self.socket:
|
||||
@@ -104,7 +104,7 @@ class ZMQPublisherAgent(ZMQPublisherAgentBase):
|
||||
def _connect(self):
|
||||
context = zmq.Context()
|
||||
self.socket = context.socket(zmq.PUB)
|
||||
LOG.debug("about to bind to network socket: %s" % self._endpoint)
|
||||
LOG.debug("about to bind to network socket: %s", self._endpoint)
|
||||
self.socket.bind(self._endpoint)
|
||||
|
||||
|
||||
@@ -116,7 +116,7 @@ class ZMQPublisherMultiprocAgent(ZMQPublisherAgentBase):
|
||||
def _connect(self):
|
||||
context = zmq.Context()
|
||||
self.socket = context.socket(zmq.PUSH)
|
||||
LOG.debug("about to connect to IPC socket: %s" % self.ipc_socket)
|
||||
LOG.debug("about to connect to IPC socket: %s", self.ipc_socket)
|
||||
self.socket.connect('ipc://%s' % self.ipc_socket)
|
||||
|
||||
def send_event(self, update, topic=None):
|
||||
@@ -160,8 +160,8 @@ class ZMQSubscriberAgentBase(pub_sub_api.SubscriberAgentBase):
|
||||
|
||||
def run(self):
|
||||
self.sub_socket = self.connect()
|
||||
LOG.info(_LI("Starting Subscriber on ports %(endpoints)s ")
|
||||
% {'endpoints': str(self.uri_list)})
|
||||
LOG.info(_LI("Starting Subscriber on ports %(endpoints)s"),
|
||||
{'endpoints': self.uri_list})
|
||||
while True:
|
||||
try:
|
||||
eventlet.sleep(0)
|
||||
@@ -187,7 +187,7 @@ class ZMQSubscriberMultiprocAgent(ZMQSubscriberAgentBase):
|
||||
context = zmq.Context()
|
||||
inproc_server = context.socket(zmq.PULL)
|
||||
ipc_socket = cfg.CONF.df.publisher_multiproc_socket
|
||||
LOG.debug("about to bind to IPC socket: %s" % ipc_socket)
|
||||
LOG.debug("about to bind to IPC socket: %s", ipc_socket)
|
||||
inproc_server.bind('ipc://%s' % ipc_socket)
|
||||
return inproc_server
|
||||
|
||||
@@ -198,7 +198,7 @@ class ZMQSubscriberAgent(ZMQSubscriberAgentBase):
|
||||
socket = context.socket(zmq.SUB)
|
||||
for uri in self.uri_list:
|
||||
#TODO(gampel) handle exp zmq.EINVAL,zmq.EPROTONOSUPPORT
|
||||
LOG.debug("about to connect to network publisher at %s" % uri)
|
||||
LOG.debug("about to connect to network publisher at %s", uri)
|
||||
socket.connect(uri)
|
||||
for topic in self.topic_list:
|
||||
socket.setsockopt(zmq.SUBSCRIBE, topic)
|
||||
|
||||
@@ -174,7 +174,7 @@ class DFMechDriver(driver_api.MechanismDriver):
|
||||
tenant_id = sg['tenant_id']
|
||||
|
||||
self.nb_api.delete_security_group(sg_id, topic=tenant_id)
|
||||
LOG.info(_LI("DFMechDriver: delete security group %s") % sg_id)
|
||||
LOG.info(_LI("DFMechDriver: delete security group %s"), sg_id)
|
||||
|
||||
@lock_db.wrap_db_lock(lock_db.RESOURCE_ML2_SECURITY_GROUP_RULE_CREATE)
|
||||
def create_security_group_rule(self, resource, event, trigger, **kwargs):
|
||||
@@ -247,7 +247,7 @@ class DFMechDriver(driver_api.MechanismDriver):
|
||||
topic=tenant_id)
|
||||
except df_exceptions.DBKeyNotFound:
|
||||
LOG.debug("lswitch %s is not found in DF DB, might have "
|
||||
"been deleted concurrently" % network_id)
|
||||
"been deleted concurrently", network_id)
|
||||
return
|
||||
|
||||
LOG.info(_LI("DFMechDriver: delete network %s"), network_id)
|
||||
@@ -492,7 +492,7 @@ class DFMechDriver(driver_api.MechanismDriver):
|
||||
nw_version=network['revision_number'])
|
||||
except df_exceptions.DBKeyNotFound:
|
||||
LOG.debug("network %s is not found in DB, might have "
|
||||
"been deleted concurrently" % net_id)
|
||||
"been deleted concurrently", net_id)
|
||||
return
|
||||
|
||||
LOG.info(_LI("DFMechDriver: delete subnet %s"), subnet_id)
|
||||
@@ -680,7 +680,7 @@ class DFMechDriver(driver_api.MechanismDriver):
|
||||
self.nb_api.delete_lport(id=port_id, topic=topic)
|
||||
except df_exceptions.DBKeyNotFound:
|
||||
LOG.debug("port %s is not found in DF DB, might have "
|
||||
"been deleted concurrently" % port_id)
|
||||
"been deleted concurrently", port_id)
|
||||
return
|
||||
|
||||
LOG.info(_LI("DFMechDriver: delete port %s"), port_id)
|
||||
|
||||
@@ -147,7 +147,7 @@ class DFL3RouterPlugin(service_base.ServicePluginBase,
|
||||
gateway=gw_info
|
||||
)
|
||||
except df_exceptions.DBKeyNotFound:
|
||||
LOG.debug("router %s is not found in DF DB" % router_id)
|
||||
LOG.debug("router %s is not found in DF DB", router_id)
|
||||
|
||||
return router
|
||||
|
||||
@@ -160,7 +160,7 @@ class DFL3RouterPlugin(service_base.ServicePluginBase,
|
||||
self.nb_api.delete_lrouter(id=router_id,
|
||||
topic=router['tenant_id'])
|
||||
except df_exceptions.DBKeyNotFound:
|
||||
LOG.debug("router %s is not found in DF DB" % router_id)
|
||||
LOG.debug("router %s is not found in DF DB", router_id)
|
||||
return ret_val
|
||||
|
||||
def _get_floatingip_port(self, context, floatingip_id):
|
||||
@@ -252,7 +252,7 @@ class DFL3RouterPlugin(service_base.ServicePluginBase,
|
||||
self.nb_api.delete_floatingip(id=id,
|
||||
topic=floatingip['tenant_id'])
|
||||
except df_exceptions.DBKeyNotFound:
|
||||
LOG.exception(_LE("floatingip %s is not found in DF DB") % id)
|
||||
LOG.exception(_LE("floatingip %s is not found in DF DB"), id)
|
||||
|
||||
def get_floatingip(self, context, id, fields=None):
|
||||
with context.session.begin(subtransactions=True):
|
||||
@@ -302,5 +302,5 @@ class DFL3RouterPlugin(service_base.ServicePluginBase,
|
||||
except df_exceptions.DBKeyNotFound:
|
||||
LOG.exception(_LE("logical router %s is not found in DF DB, "
|
||||
"suppressing delete_lrouter_port "
|
||||
"exception") % router_id)
|
||||
"exception"), router_id)
|
||||
return router_port_info
|
||||
|
||||
@@ -34,7 +34,7 @@ def find_first_network(nclient, params):
|
||||
if networks_count > 1:
|
||||
message = _LW("More than one network (%(count)d) found matching: "
|
||||
"%(args)s")
|
||||
LOG.warning(message % {'args': params, 'count': networks_count})
|
||||
LOG.warning(message, {'args': params, 'count': networks_count})
|
||||
return networks[0]
|
||||
|
||||
|
||||
|
||||
@@ -2,8 +2,7 @@
|
||||
# of appearance. Changing the order has an impact on the overall integration
|
||||
# process, which may cause wedges in the gate later.
|
||||
|
||||
|
||||
hacking<0.12,>=0.11.0 # Apache-2.0
|
||||
hacking!=0.13.0,<0.14,>=0.12.0 # Apache-2.0
|
||||
|
||||
coverage>=4.0 # Apache-2.0
|
||||
keystoneauth1>=2.18.0 # Apache-2.0
|
||||
|
||||
5
tox.ini
5
tox.ini
@@ -53,9 +53,10 @@ commands = sphinx-build -W -b html doc/source doc/build/html
|
||||
# E265 block comment should start with ‘# ‘
|
||||
# H404 multi line docstring should start with a summary
|
||||
# H405 multi line docstring summary not separated with an empty line
|
||||
# H904 Wrap long lines in parentheses instead of a backslash
|
||||
# N530 Direct neutron imports not allowed
|
||||
ignore = E126,E128,E129,E265,H404,H405,H904,N530
|
||||
ignore = E126,E128,E129,E265,H404,H405,N530
|
||||
# H904: Delay string interpolations at logging calls
|
||||
enable-extensions=H904
|
||||
show-source = true
|
||||
# TODO(dougw) neutron/tests/unit/vmware exclusion is a temporary services split hack
|
||||
exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,tools,.ropeproject,rally-scenarios,neutron/tests/unit/vmware*
|
||||
|
||||
Reference in New Issue
Block a user