LOG.warn -> LOG.warning

Python 3 deprecated the logger.warn method, see:
https://docs.python.org/3/library/logging.html#logging.warning
so we prefer to use warning to avoid DeprecationWarning.

In addition to this there will be a hacking rule to prevent this
(https://review.openstack.org/#/c/262257/)

TrivialFix

Change-Id: Iec8499951cd10a910a4d8ecdac95f5bb53a2e86e
This commit is contained in:
Gary Kotton 2015-12-30 23:43:47 -08:00
parent 69fa193ff9
commit 994ddd79c7
14 changed files with 114 additions and 108 deletions

View File

@ -101,7 +101,7 @@ class ApiClientBase(object):
api_providers are configured. api_providers are configured.
''' '''
if not self._api_providers: if not self._api_providers:
LOG.warn(_LW("[%d] no API providers currently available."), rid) LOG.warning(_LW("[%d] no API providers currently available."), rid)
return None return None
if self._conn_pool.empty(): if self._conn_pool.empty():
LOG.debug("[%d] Waiting to acquire API client connection.", rid) LOG.debug("[%d] Waiting to acquire API client connection.", rid)
@ -149,10 +149,10 @@ class ApiClientBase(object):
priority = http_conn.priority priority = http_conn.priority
if bad_state: if bad_state:
# Reconnect to provider. # Reconnect to provider.
LOG.warn(_LW("[%(rid)d] Connection returned in bad state, " LOG.warning(_LW("[%(rid)d] Connection returned in bad state, "
"reconnecting to %(conn)s"), "reconnecting to %(conn)s"),
{'rid': rid, {'rid': rid,
'conn': api_client.ctrl_conn_to_str(http_conn)}) 'conn': api_client.ctrl_conn_to_str(http_conn)})
http_conn = self._create_connection(*self._conn_params(http_conn)) http_conn = self._create_connection(*self._conn_params(http_conn))
elif service_unavail: elif service_unavail:
# http_conn returned a service unaviable response, put other # http_conn returned a service unaviable response, put other

View File

@ -210,8 +210,8 @@ class GetApiProvidersRequestEventlet(EventletApiRequest):
ret.append(_provider_from_listen_addr(addr)) ret.append(_provider_from_listen_addr(addr))
return ret return ret
except Exception as e: except Exception as e:
LOG.warn(_LW("[%(rid)d] Failed to parse API provider: %(e)s"), LOG.warning(_LW("[%(rid)d] Failed to parse API provider: %(e)s"),
{'rid': self._rid(), 'e': e}) {'rid': self._rid(), 'e': e})
# intentionally fall through # intentionally fall through
return None return None

View File

@ -122,9 +122,9 @@ class ApiRequest(object):
conn.request(self._method, url, self._body, headers) conn.request(self._method, url, self._body, headers)
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
LOG.warn(_LW("[%(rid)d] Exception issuing request: " LOG.warning(_LW("[%(rid)d] Exception issuing request: "
"%(e)s"), "%(e)s"),
{'rid': self._rid(), 'e': e}) {'rid': self._rid(), 'e': e})
response = conn.getresponse() response = conn.getresponse()
response.body = response.read() response.body = response.read()
@ -181,10 +181,10 @@ class ApiRequest(object):
# queue. # queue.
if (response.status == httplib.INTERNAL_SERVER_ERROR and if (response.status == httplib.INTERNAL_SERVER_ERROR and
response.status > httplib.NOT_IMPLEMENTED): response.status > httplib.NOT_IMPLEMENTED):
LOG.warn(_LW("[%(rid)d] Request '%(method)s %(url)s' " LOG.warning(_LW("[%(rid)d] Request '%(method)s %(url)s' "
"received: %(status)s"), "received: %(status)s"),
{'rid': self._rid(), 'method': self._method, {'rid': self._rid(), 'method': self._method,
'url': self._url, 'status': response.status}) 'url': self._url, 'status': response.status})
raise Exception(_('Server error return: %s'), response.status) raise Exception(_('Server error return: %s'), response.status)
return response return response
except socket.error: except socket.error:
@ -197,10 +197,11 @@ class ApiRequest(object):
msg = str(e) msg = str(e)
if response is None: if response is None:
elapsed_time = time.time() - issued_time elapsed_time = time.time() - issued_time
LOG.warn(_LW("[%(rid)d] Failed request '%(conn)s': '%(msg)s' " LOG.warning(_LW("[%(rid)d] Failed request '%(conn)s': '%(msg)s' "
"(%(elapsed)s seconds)"), "(%(elapsed)s seconds)"),
{'rid': self._rid(), 'conn': self._request_str(conn, url), {'rid': self._rid(),
'msg': msg, 'elapsed': elapsed_time}) 'conn': self._request_str(conn, url),
'msg': msg, 'elapsed': elapsed_time})
self._request_error = e self._request_error = e
is_conn_error = True is_conn_error = True
return e return e
@ -231,8 +232,8 @@ class ApiRequest(object):
url = value url = value
break break
if not url: if not url:
LOG.warn(_LW("[%d] Received redirect status without location " LOG.warning(_LW("[%d] Received redirect status without location "
"header field"), self._rid()) "header field"), self._rid())
return (conn, None) return (conn, None)
# Accept location with the following format: # Accept location with the following format:
# 1. /path, redirect to same node # 1. /path, redirect to same node
@ -248,13 +249,14 @@ class ApiRequest(object):
url = result.path url = result.path
return (conn, url) # case 1 return (conn, url) # case 1
else: else:
LOG.warn(_LW("[%(rid)d] Received invalid redirect location: " LOG.warning(_LW("[%(rid)d] Received invalid redirect "
"'%(url)s'"), {'rid': self._rid(), 'url': url}) "location: '%(url)s'"),
{'rid': self._rid(), 'url': url})
return (conn, None) # case 3 return (conn, None) # case 3
elif result.scheme not in ["http", "https"] or not result.hostname: elif result.scheme not in ["http", "https"] or not result.hostname:
LOG.warn(_LW("[%(rid)d] Received malformed redirect " LOG.warning(_LW("[%(rid)d] Received malformed redirect "
"location: %(url)s"), "location: %(url)s"),
{'rid': self._rid(), 'url': url}) {'rid': self._rid(), 'url': url})
return (conn, None) # case 3 return (conn, None) # case 3
# case 2, redirect location includes a scheme # case 2, redirect location includes a scheme
# so setup a new connection and authenticate # so setup a new connection and authenticate

View File

@ -67,8 +67,8 @@ def get_nsx_switch_ids(session, cluster, neutron_network_id):
# more than once for each network in Neutron's lifetime # more than once for each network in Neutron's lifetime
nsx_switches = switchlib.get_lswitches(cluster, neutron_network_id) nsx_switches = switchlib.get_lswitches(cluster, neutron_network_id)
if not nsx_switches: if not nsx_switches:
LOG.warn(_LW("Unable to find NSX switches for Neutron network %s"), LOG.warning(_LW("Unable to find NSX switches for Neutron network "
neutron_network_id) "%s"), neutron_network_id)
return return
nsx_switch_ids = [] nsx_switch_ids = []
with session.begin(subtransactions=True): with session.begin(subtransactions=True):
@ -114,8 +114,8 @@ def get_nsx_switch_and_port_id(session, cluster, neutron_port_id):
# NOTE(salv-orlando): Not handling the case where more than one # NOTE(salv-orlando): Not handling the case where more than one
# port is found with the same neutron port tag # port is found with the same neutron port tag
if not nsx_ports: if not nsx_ports:
LOG.warn(_LW("Unable to find NSX port for Neutron port %s"), LOG.warning(_LW("Unable to find NSX port for Neutron port %s"),
neutron_port_id) neutron_port_id)
# This method is supposed to return a tuple # This method is supposed to return a tuple
return None, None return None, None
nsx_port = nsx_ports[0] nsx_port = nsx_ports[0]
@ -154,12 +154,12 @@ def get_nsx_security_group_id(session, cluster, neutron_id):
# NOTE(salv-orlando): Not handling the case where more than one # NOTE(salv-orlando): Not handling the case where more than one
# security profile is found with the same neutron port tag # security profile is found with the same neutron port tag
if not nsx_sec_profiles: if not nsx_sec_profiles:
LOG.warn(_LW("Unable to find NSX security profile for Neutron " LOG.warning(_LW("Unable to find NSX security profile for Neutron "
"security group %s"), neutron_id) "security group %s"), neutron_id)
return return
elif len(nsx_sec_profiles) > 1: elif len(nsx_sec_profiles) > 1:
LOG.warn(_LW("Multiple NSX security profiles found for Neutron " LOG.warning(_LW("Multiple NSX security profiles found for Neutron "
"security group %s"), neutron_id) "security group %s"), neutron_id)
nsx_sec_profile = nsx_sec_profiles[0] nsx_sec_profile = nsx_sec_profiles[0]
nsx_id = nsx_sec_profile['uuid'] nsx_id = nsx_sec_profile['uuid']
with session.begin(subtransactions=True): with session.begin(subtransactions=True):
@ -191,8 +191,8 @@ def get_nsx_router_id(session, cluster, neutron_router_id):
# NOTE(salv-orlando): Not handling the case where more than one # NOTE(salv-orlando): Not handling the case where more than one
# port is found with the same neutron port tag # port is found with the same neutron port tag
if not nsx_routers: if not nsx_routers:
LOG.warn(_LW("Unable to find NSX router for Neutron router %s"), LOG.warning(_LW("Unable to find NSX router for Neutron router %s"),
neutron_router_id) neutron_router_id)
return return
nsx_router = nsx_routers[0] nsx_router = nsx_routers[0]
nsx_router_id = nsx_router['uuid'] nsx_router_id = nsx_router['uuid']
@ -248,11 +248,12 @@ def get_nsx_device_statuses(cluster, tenant_id):
except api_exc.NsxApiException: except api_exc.NsxApiException:
# Do not make a NSX API exception fatal # Do not make a NSX API exception fatal
if tenant_id: if tenant_id:
LOG.warn(_LW("Unable to retrieve operational status for gateway " LOG.warning(_LW("Unable to retrieve operational status for "
"devices belonging to tenant: %s"), tenant_id) "gateway devices belonging to tenant: %s"),
tenant_id)
else: else:
LOG.warn(_LW("Unable to retrieve operational status for " LOG.warning(_LW("Unable to retrieve operational status for "
"gateway devices")) "gateway devices"))
def _convert_bindings_to_nsx_transport_zones(bindings): def _convert_bindings_to_nsx_transport_zones(bindings):

View File

@ -406,8 +406,8 @@ class NsxSynchronizer():
neutron_router_mappings[neutron_router_id] = ( neutron_router_mappings[neutron_router_id] = (
self._nsx_cache[lr_uuid]) self._nsx_cache[lr_uuid])
else: else:
LOG.warn(_LW("Unable to find Neutron router id for " LOG.warning(_LW("Unable to find Neutron router id for "
"NSX logical router: %s"), lr_uuid) "NSX logical router: %s"), lr_uuid)
# Fetch neutron routers from database # Fetch neutron routers from database
filters = ({} if scan_missing else filters = ({} if scan_missing else
{'id': neutron_router_mappings.keys()}) {'id': neutron_router_mappings.keys()})
@ -541,14 +541,14 @@ class NsxSynchronizer():
# be emitted. # be emitted.
num_requests = page_size / (MAX_PAGE_SIZE + 1) + 1 num_requests = page_size / (MAX_PAGE_SIZE + 1) + 1
if num_requests > 1: if num_requests > 1:
LOG.warn(_LW("Requested page size is %(cur_chunk_size)d. " LOG.warning(_LW("Requested page size is %(cur_chunk_size)d. "
"It might be necessary to do %(num_requests)d " "It might be necessary to do %(num_requests)d "
"round-trips to NSX for fetching data. Please " "round-trips to NSX for fetching data. Please "
"tune sync parameters to ensure chunk size " "tune sync parameters to ensure chunk size "
"is less than %(max_page_size)d"), "is less than %(max_page_size)d"),
{'cur_chunk_size': page_size, {'cur_chunk_size': page_size,
'num_requests': num_requests, 'num_requests': num_requests,
'max_page_size': MAX_PAGE_SIZE}) 'max_page_size': MAX_PAGE_SIZE})
# Only the first request might return the total size, # Only the first request might return the total size,
# subsequent requests will definitely not # subsequent requests will definitely not
results, cursor, total_size = nsxlib.get_single_query_page( results, cursor, total_size = nsxlib.get_single_query_page(

View File

@ -58,7 +58,7 @@ def lsn_get_for_network(context, network_id, raise_on_err=True):
raise p_exc.LsnNotFound(entity='network', raise p_exc.LsnNotFound(entity='network',
entity_id=network_id) entity_id=network_id)
else: else:
LOG.warn(msg, network_id) LOG.warning(msg, network_id)
def lsn_port_add_for_lsn(context, lsn_port_id, subnet_id, mac, lsn_id): def lsn_port_add_for_lsn(context, lsn_port_id, subnet_id, mac, lsn_id):

View File

@ -75,9 +75,9 @@ class LsnManager(object):
raise p_exc.LsnNotFound(entity='network', raise p_exc.LsnNotFound(entity='network',
entity_id=network_id) entity_id=network_id)
else: else:
LOG.warn(_LW('Unable to find Logical Service Node for ' LOG.warning(_LW('Unable to find Logical Service Node for '
'the requested network %s.'), 'the requested network %s.'),
network_id) network_id)
def lsn_create(self, context, network_id): def lsn_create(self, context, network_id):
"""Create a LSN associated to the network.""" """Create a LSN associated to the network."""
@ -92,7 +92,8 @@ class LsnManager(object):
try: try:
lsn_api.lsn_delete(self.cluster, lsn_id) lsn_api.lsn_delete(self.cluster, lsn_id)
except (n_exc.NotFound, api_exc.NsxApiException): except (n_exc.NotFound, api_exc.NsxApiException):
LOG.warn(_LW('Unable to delete Logical Service Node %s'), lsn_id) LOG.warning(_LW('Unable to delete Logical Service Node %s'),
lsn_id)
def lsn_delete_by_network(self, context, network_id): def lsn_delete_by_network(self, context, network_id):
"""Delete a LSN associated to the network.""" """Delete a LSN associated to the network."""
@ -117,10 +118,10 @@ class LsnManager(object):
entity='subnet', entity='subnet',
entity_id=subnet_id) entity_id=subnet_id)
else: else:
LOG.warn(_LW('Unable to find Logical Service Node Port ' LOG.warning(_LW('Unable to find Logical Service Node Port '
'for LSN %(lsn_id)s and subnet ' 'for LSN %(lsn_id)s and subnet '
'%(subnet_id)s'), '%(subnet_id)s'),
{'lsn_id': lsn_id, 'subnet_id': subnet_id}) {'lsn_id': lsn_id, 'subnet_id': subnet_id})
return (lsn_id, None) return (lsn_id, None)
else: else:
return (lsn_id, lsn_port_id) return (lsn_id, lsn_port_id)
@ -144,10 +145,10 @@ class LsnManager(object):
entity='MAC', entity='MAC',
entity_id=mac) entity_id=mac)
else: else:
LOG.warn(_LW('Unable to find Logical Service Node ' LOG.warning(_LW('Unable to find Logical Service Node '
'Port for LSN %(lsn_id)s and mac address ' 'Port for LSN %(lsn_id)s and mac address '
'%(mac)s'), '%(mac)s'),
{'lsn_id': lsn_id, 'mac': mac}) {'lsn_id': lsn_id, 'mac': mac})
return (lsn_id, None) return (lsn_id, None)
else: else:
return (lsn_id, lsn_port_id) return (lsn_id, lsn_port_id)
@ -169,7 +170,7 @@ class LsnManager(object):
try: try:
lsn_api.lsn_port_delete(self.cluster, lsn_id, lsn_port_id) lsn_api.lsn_port_delete(self.cluster, lsn_id, lsn_port_id)
except (n_exc.NotFound, api_exc.NsxApiException): except (n_exc.NotFound, api_exc.NsxApiException):
LOG.warn(_LW('Unable to delete LSN Port %s'), lsn_port_id) LOG.warning(_LW('Unable to delete LSN Port %s'), lsn_port_id)
def lsn_port_dispose(self, context, network_id, mac_address): def lsn_port_dispose(self, context, network_id, mac_address):
"""Delete a LSN port given the network and the mac address.""" """Delete a LSN port given the network and the mac address."""
@ -186,11 +187,12 @@ class LsnManager(object):
self.cluster, network_id, lswitch_port_id) self.cluster, network_id, lswitch_port_id)
except (n_exc.PortNotFoundOnNetwork, except (n_exc.PortNotFoundOnNetwork,
api_exc.NsxApiException): api_exc.NsxApiException):
LOG.warn(_LW("Metadata port not found while attempting " LOG.warning(_LW("Metadata port not found while attempting "
"to delete it from network %s"), network_id) "to delete it from network %s"),
network_id)
else: else:
LOG.warn(_LW("Unable to find Logical Services Node " LOG.warning(_LW("Unable to find Logical Services Node "
"Port with MAC %s"), mac_address) "Port with MAC %s"), mac_address)
def lsn_port_dhcp_setup( def lsn_port_dhcp_setup(
self, context, network_id, port_id, port_data, subnet_config=None): self, context, network_id, port_id, port_data, subnet_config=None):

View File

@ -121,7 +121,7 @@ class DhcpMetadataAccess(object):
# This becomes ineffective, as all new networks creations # This becomes ineffective, as all new networks creations
# are handled by Logical Services Nodes in NSX # are handled by Logical Services Nodes in NSX
cfg.CONF.set_override('network_auto_schedule', False) cfg.CONF.set_override('network_auto_schedule', False)
LOG.warn(_LW('network_auto_schedule has been disabled')) LOG.warning(_LW('network_auto_schedule has been disabled'))
notifier = combined.DhcpAgentNotifyAPI(self.safe_reference, notifier = combined.DhcpAgentNotifyAPI(self.safe_reference,
lsn_manager) lsn_manager)
self.supported_extension_aliases.append(lsn.EXT_ALIAS) self.supported_extension_aliases.append(lsn.EXT_ALIAS)

View File

@ -97,8 +97,8 @@ def handle_router_metadata_access(plugin, context, router_id, interface=None):
LOG.debug("Metadata access network is disabled") LOG.debug("Metadata access network is disabled")
return return
if not cfg.CONF.allow_overlapping_ips: if not cfg.CONF.allow_overlapping_ips:
LOG.warn(_LW("Overlapping IPs must be enabled in order to setup " LOG.warning(_LW("Overlapping IPs must be enabled in order to setup "
"the metadata access network")) "the metadata access network"))
return return
ctx_elevated = context.elevated() ctx_elevated = context.elevated()
device_filter = {'device_id': [router_id], device_filter = {'device_id': [router_id],

View File

@ -579,12 +579,12 @@ def delete_nat_rules_by_match(cluster, router_id, rule_type,
min_rules=min_num_expected, min_rules=min_num_expected,
max_rules=max_num_expected) max_rules=max_num_expected)
else: else:
LOG.warn(_LW("Found %(actual_rule_num)d matching NAT rules, which " LOG.warning(_LW("Found %(actual_rule_num)d matching NAT rules, "
"is not in the expected range (%(min_exp_rule_num)d," "which is not in the expected range "
"%(max_exp_rule_num)d)"), "(%(min_exp_rule_num)d,%(max_exp_rule_num)d)"),
{'actual_rule_num': num_rules_to_delete, {'actual_rule_num': num_rules_to_delete,
'min_exp_rule_num': min_num_expected, 'min_exp_rule_num': min_num_expected,
'max_exp_rule_num': max_num_expected}) 'max_exp_rule_num': max_num_expected})
for rule_id in to_delete_ids: for rule_id in to_delete_ids:
delete_router_nat_rule(cluster, router_id, rule_id) delete_router_nat_rule(cluster, router_id, rule_id)

View File

@ -142,8 +142,8 @@ def delete_security_profile(cluster, spid):
except exceptions.NotFound: except exceptions.NotFound:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
# This is not necessarily an error condition # This is not necessarily an error condition
LOG.warn(_LW("Unable to find security profile %s on NSX backend"), LOG.warning(_LW("Unable to find security profile %s on NSX "
spid) "backend"), spid)
def summarize_security_group_rules(logical_port_rules): def summarize_security_group_rules(logical_port_rules):

View File

@ -246,7 +246,7 @@ def get_ports(cluster, networks=None, devices=None, tenants=None):
if not ports: if not ports:
ports = nsxlib.get_all_query_pages(lport_query_path, cluster) ports = nsxlib.get_all_query_pages(lport_query_path, cluster)
except exception.NotFound: except exception.NotFound:
LOG.warn(_LW("Lswitch %s not found in NSX"), lswitch) LOG.warning(_LW("Lswitch %s not found in NSX"), lswitch)
ports = None ports = None
if ports: if ports:
@ -280,11 +280,11 @@ def get_port_by_neutron_tag(cluster, lswitch_uuid, neutron_port_id):
num_results = len(res["results"]) num_results = len(res["results"])
if num_results >= 1: if num_results >= 1:
if num_results > 1: if num_results > 1:
LOG.warn(_LW("Found '%(num_ports)d' ports with " LOG.warning(_LW("Found '%(num_ports)d' ports with "
"q_port_id tag: '%(neutron_port_id)s'. " "q_port_id tag: '%(neutron_port_id)s'. "
"Only 1 was expected."), "Only 1 was expected."),
{'num_ports': num_results, {'num_ports': num_results,
'neutron_port_id': neutron_port_id}) 'neutron_port_id': neutron_port_id})
return res["results"][0] return res["results"][0]

View File

@ -514,7 +514,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id( nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, port_data['id']) context.session, self.cluster, port_data['id'])
if not nsx_port_id: if not nsx_port_id:
LOG.warn( LOG.warning(
_LW("Neutron port %(port_id)s not found on NSX backend. " _LW("Neutron port %(port_id)s not found on NSX backend. "
"Terminating delete operation. A dangling router port " "Terminating delete operation. A dangling router port "
"might have been left on router %(router_id)s"), "might have been left on router %(router_id)s"),
@ -1057,19 +1057,19 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
nsx_switch_ids = nsx_utils.get_nsx_switch_ids( nsx_switch_ids = nsx_utils.get_nsx_switch_ids(
context.session, self.cluster, id) context.session, self.cluster, id)
if not nsx_switch_ids or len(nsx_switch_ids) < 1: if not nsx_switch_ids or len(nsx_switch_ids) < 1:
LOG.warn(_LW("Unable to find NSX mappings for neutron " LOG.warning(_LW("Unable to find NSX mappings for neutron "
"network:%s"), id) "network:%s"), id)
try: try:
switchlib.update_lswitch(self.cluster, switchlib.update_lswitch(self.cluster,
nsx_switch_ids[0], nsx_switch_ids[0],
network['network']['name']) network['network']['name'])
except api_exc.NsxApiException as e: except api_exc.NsxApiException as e:
LOG.warn(_LW("Logical switch update on NSX backend failed. " LOG.warning(_LW("Logical switch update on NSX backend failed. "
"Neutron network id:%(net_id)s; " "Neutron network id:%(net_id)s; "
"NSX lswitch id:%(lswitch_id)s;" "NSX lswitch id:%(lswitch_id)s;"
"Error:%(error)s"), "Error:%(error)s"),
{'net_id': id, 'lswitch_id': nsx_switch_ids[0], {'net_id': id, 'lswitch_id': nsx_switch_ids[0],
'error': e}) 'error': e})
return net return net
@ -1473,8 +1473,9 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# As setting gateway failed, the router must be deleted # As setting gateway failed, the router must be deleted
# in order to ensure atomicity # in order to ensure atomicity
router_id = router_db['id'] router_id = router_db['id']
LOG.warn(_LW("Failed to set gateway info for router being " LOG.warning(_LW("Failed to set gateway info for router "
"created:%s - removing router"), router_id) "being created:%s - removing router"),
router_id)
self.delete_router(context, router_id) self.delete_router(context, router_id)
LOG.info(_LI("Create router failed while setting external " LOG.info(_LI("Create router failed while setting external "
"gateway. Router:%s has been removed from " "gateway. Router:%s has been removed from "
@ -1605,10 +1606,10 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
context.session, router_id) context.session, router_id)
except db_exc.DBError as d_exc: except db_exc.DBError as d_exc:
# Do not make this error fatal # Do not make this error fatal
LOG.warn(_LW("Unable to remove NSX mapping for Neutron router " LOG.warning(_LW("Unable to remove NSX mapping for Neutron router "
"%(router_id)s because of the following exception:" "%(router_id)s because of the following exception:"
"%(d_exc)s"), {'router_id': router_id, "%(d_exc)s"), {'router_id': router_id,
'd_exc': str(d_exc)}) 'd_exc': str(d_exc)})
# Perform the actual delete on the Neutron DB # Perform the actual delete on the Neutron DB
super(NsxPluginV2, self).delete_router(context, router_id) super(NsxPluginV2, self).delete_router(context, router_id)
@ -2071,8 +2072,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
except api_exc.NsxApiException: except api_exc.NsxApiException:
# Consider backend failures as non-fatal, but still warn # Consider backend failures as non-fatal, but still warn
# because this might indicate something dodgy is going on # because this might indicate something dodgy is going on
LOG.warn(_LW("Unable to update name on NSX backend " LOG.warning(_LW("Unable to update name on NSX backend "
"for network gateway: %s"), id) "for network gateway: %s"), id)
return super(NsxPluginV2, self).update_network_gateway( return super(NsxPluginV2, self).update_network_gateway(
context, id, network_gateway) context, id, network_gateway)
@ -2278,10 +2279,10 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
try: try:
l2gwlib.delete_gateway_device(self.cluster, nsx_device_id) l2gwlib.delete_gateway_device(self.cluster, nsx_device_id)
except n_exc.NotFound: except n_exc.NotFound:
LOG.warn(_LW("Removal of gateway device: %(neutron_id)s failed on " LOG.warning(_LW("Removal of gateway device: %(neutron_id)s failed "
"NSX backend (NSX id:%(nsx_id)s) because the NSX " "on NSX backend (NSX id:%(nsx_id)s) because the "
"resource was not found"), "NSX resource was not found"),
{'neutron_id': device_id, 'nsx_id': nsx_device_id}) {'neutron_id': device_id, 'nsx_id': nsx_device_id})
except api_exc.NsxApiException: except api_exc.NsxApiException:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
# In this case a 500 should be returned # In this case a 500 should be returned

View File

@ -354,7 +354,7 @@ class LsnManagerTestCase(base.BaseTestCase):
def _test_lsn_delete_by_network_with_exc(self, exc): def _test_lsn_delete_by_network_with_exc(self, exc):
self.mock_lsn_api.lsn_for_network_get.side_effect = exc self.mock_lsn_api.lsn_for_network_get.side_effect = exc
with mock.patch.object(lsn_man.LOG, 'warn') as l: with mock.patch.object(lsn_man.LOG, 'warning') as l:
self.manager.lsn_delete_by_network(mock.ANY, self.net_id) self.manager.lsn_delete_by_network(mock.ANY, self.net_id)
self.assertEqual(1, l.call_count) self.assertEqual(1, l.call_count)
@ -428,7 +428,7 @@ class LsnManagerTestCase(base.BaseTestCase):
def _test_lsn_port_delete_with_exc(self, exc): def _test_lsn_port_delete_with_exc(self, exc):
self.mock_lsn_api.lsn_port_delete.side_effect = exc self.mock_lsn_api.lsn_port_delete.side_effect = exc
with mock.patch.object(lsn_man.LOG, 'warn') as l: with mock.patch.object(lsn_man.LOG, 'warning') as l:
self.manager.lsn_port_delete(mock.ANY, mock.ANY, mock.ANY) self.manager.lsn_port_delete(mock.ANY, mock.ANY, mock.ANY)
self.assertEqual(1, self.mock_lsn_api.lsn_port_delete.call_count) self.assertEqual(1, self.mock_lsn_api.lsn_port_delete.call_count)
self.assertEqual(1, l.call_count) self.assertEqual(1, l.call_count)
@ -656,7 +656,7 @@ class LsnManagerTestCase(base.BaseTestCase):
def test_lsn_port_dispose_api_error(self): def test_lsn_port_dispose_api_error(self):
self.mock_lsn_api.lsn_port_delete.side_effect = ( self.mock_lsn_api.lsn_port_delete.side_effect = (
exception.NsxApiException) exception.NsxApiException)
with mock.patch.object(lsn_man.LOG, 'warn') as l: with mock.patch.object(lsn_man.LOG, 'warning') as l:
self.manager.lsn_port_dispose(mock.ANY, self.net_id, self.mac) self.manager.lsn_port_dispose(mock.ANY, self.net_id, self.mac)
self.assertEqual(1, l.call_count) self.assertEqual(1, l.call_count)