LOG.warn -> LOG.warning

Python 3 deprecated the logger.warn method, see:
https://docs.python.org/3/library/logging.html#logging.warning
so we prefer to use warning to avoid DeprecationWarning.

In addition to this there will be a hacking rule to prevent this
(https://review.openstack.org/#/c/262257/)

TrivialFix

Change-Id: Iec8499951cd10a910a4d8ecdac95f5bb53a2e86e
This commit is contained in:
Gary Kotton 2015-12-30 23:43:47 -08:00
parent 69fa193ff9
commit 994ddd79c7
14 changed files with 114 additions and 108 deletions

View File

@ -101,7 +101,7 @@ class ApiClientBase(object):
api_providers are configured.
'''
if not self._api_providers:
LOG.warn(_LW("[%d] no API providers currently available."), rid)
LOG.warning(_LW("[%d] no API providers currently available."), rid)
return None
if self._conn_pool.empty():
LOG.debug("[%d] Waiting to acquire API client connection.", rid)
@ -149,10 +149,10 @@ class ApiClientBase(object):
priority = http_conn.priority
if bad_state:
# Reconnect to provider.
LOG.warn(_LW("[%(rid)d] Connection returned in bad state, "
"reconnecting to %(conn)s"),
{'rid': rid,
'conn': api_client.ctrl_conn_to_str(http_conn)})
LOG.warning(_LW("[%(rid)d] Connection returned in bad state, "
"reconnecting to %(conn)s"),
{'rid': rid,
'conn': api_client.ctrl_conn_to_str(http_conn)})
http_conn = self._create_connection(*self._conn_params(http_conn))
elif service_unavail:
# http_conn returned a service unaviable response, put other

View File

@ -210,8 +210,8 @@ class GetApiProvidersRequestEventlet(EventletApiRequest):
ret.append(_provider_from_listen_addr(addr))
return ret
except Exception as e:
LOG.warn(_LW("[%(rid)d] Failed to parse API provider: %(e)s"),
{'rid': self._rid(), 'e': e})
LOG.warning(_LW("[%(rid)d] Failed to parse API provider: %(e)s"),
{'rid': self._rid(), 'e': e})
# intentionally fall through
return None

View File

@ -122,9 +122,9 @@ class ApiRequest(object):
conn.request(self._method, url, self._body, headers)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.warn(_LW("[%(rid)d] Exception issuing request: "
"%(e)s"),
{'rid': self._rid(), 'e': e})
LOG.warning(_LW("[%(rid)d] Exception issuing request: "
"%(e)s"),
{'rid': self._rid(), 'e': e})
response = conn.getresponse()
response.body = response.read()
@ -181,10 +181,10 @@ class ApiRequest(object):
# queue.
if (response.status == httplib.INTERNAL_SERVER_ERROR and
response.status > httplib.NOT_IMPLEMENTED):
LOG.warn(_LW("[%(rid)d] Request '%(method)s %(url)s' "
"received: %(status)s"),
{'rid': self._rid(), 'method': self._method,
'url': self._url, 'status': response.status})
LOG.warning(_LW("[%(rid)d] Request '%(method)s %(url)s' "
"received: %(status)s"),
{'rid': self._rid(), 'method': self._method,
'url': self._url, 'status': response.status})
raise Exception(_('Server error return: %s'), response.status)
return response
except socket.error:
@ -197,10 +197,11 @@ class ApiRequest(object):
msg = str(e)
if response is None:
elapsed_time = time.time() - issued_time
LOG.warn(_LW("[%(rid)d] Failed request '%(conn)s': '%(msg)s' "
"(%(elapsed)s seconds)"),
{'rid': self._rid(), 'conn': self._request_str(conn, url),
'msg': msg, 'elapsed': elapsed_time})
LOG.warning(_LW("[%(rid)d] Failed request '%(conn)s': '%(msg)s' "
"(%(elapsed)s seconds)"),
{'rid': self._rid(),
'conn': self._request_str(conn, url),
'msg': msg, 'elapsed': elapsed_time})
self._request_error = e
is_conn_error = True
return e
@ -231,8 +232,8 @@ class ApiRequest(object):
url = value
break
if not url:
LOG.warn(_LW("[%d] Received redirect status without location "
"header field"), self._rid())
LOG.warning(_LW("[%d] Received redirect status without location "
"header field"), self._rid())
return (conn, None)
# Accept location with the following format:
# 1. /path, redirect to same node
@ -248,13 +249,14 @@ class ApiRequest(object):
url = result.path
return (conn, url) # case 1
else:
LOG.warn(_LW("[%(rid)d] Received invalid redirect location: "
"'%(url)s'"), {'rid': self._rid(), 'url': url})
LOG.warning(_LW("[%(rid)d] Received invalid redirect "
"location: '%(url)s'"),
{'rid': self._rid(), 'url': url})
return (conn, None) # case 3
elif result.scheme not in ["http", "https"] or not result.hostname:
LOG.warn(_LW("[%(rid)d] Received malformed redirect "
"location: %(url)s"),
{'rid': self._rid(), 'url': url})
LOG.warning(_LW("[%(rid)d] Received malformed redirect "
"location: %(url)s"),
{'rid': self._rid(), 'url': url})
return (conn, None) # case 3
# case 2, redirect location includes a scheme
# so setup a new connection and authenticate

View File

@ -67,8 +67,8 @@ def get_nsx_switch_ids(session, cluster, neutron_network_id):
# more than once for each network in Neutron's lifetime
nsx_switches = switchlib.get_lswitches(cluster, neutron_network_id)
if not nsx_switches:
LOG.warn(_LW("Unable to find NSX switches for Neutron network %s"),
neutron_network_id)
LOG.warning(_LW("Unable to find NSX switches for Neutron network "
"%s"), neutron_network_id)
return
nsx_switch_ids = []
with session.begin(subtransactions=True):
@ -114,8 +114,8 @@ def get_nsx_switch_and_port_id(session, cluster, neutron_port_id):
# NOTE(salv-orlando): Not handling the case where more than one
# port is found with the same neutron port tag
if not nsx_ports:
LOG.warn(_LW("Unable to find NSX port for Neutron port %s"),
neutron_port_id)
LOG.warning(_LW("Unable to find NSX port for Neutron port %s"),
neutron_port_id)
# This method is supposed to return a tuple
return None, None
nsx_port = nsx_ports[0]
@ -154,12 +154,12 @@ def get_nsx_security_group_id(session, cluster, neutron_id):
# NOTE(salv-orlando): Not handling the case where more than one
# security profile is found with the same neutron port tag
if not nsx_sec_profiles:
LOG.warn(_LW("Unable to find NSX security profile for Neutron "
"security group %s"), neutron_id)
LOG.warning(_LW("Unable to find NSX security profile for Neutron "
"security group %s"), neutron_id)
return
elif len(nsx_sec_profiles) > 1:
LOG.warn(_LW("Multiple NSX security profiles found for Neutron "
"security group %s"), neutron_id)
LOG.warning(_LW("Multiple NSX security profiles found for Neutron "
"security group %s"), neutron_id)
nsx_sec_profile = nsx_sec_profiles[0]
nsx_id = nsx_sec_profile['uuid']
with session.begin(subtransactions=True):
@ -191,8 +191,8 @@ def get_nsx_router_id(session, cluster, neutron_router_id):
# NOTE(salv-orlando): Not handling the case where more than one
# port is found with the same neutron port tag
if not nsx_routers:
LOG.warn(_LW("Unable to find NSX router for Neutron router %s"),
neutron_router_id)
LOG.warning(_LW("Unable to find NSX router for Neutron router %s"),
neutron_router_id)
return
nsx_router = nsx_routers[0]
nsx_router_id = nsx_router['uuid']
@ -248,11 +248,12 @@ def get_nsx_device_statuses(cluster, tenant_id):
except api_exc.NsxApiException:
# Do not make a NSX API exception fatal
if tenant_id:
LOG.warn(_LW("Unable to retrieve operational status for gateway "
"devices belonging to tenant: %s"), tenant_id)
LOG.warning(_LW("Unable to retrieve operational status for "
"gateway devices belonging to tenant: %s"),
tenant_id)
else:
LOG.warn(_LW("Unable to retrieve operational status for "
"gateway devices"))
LOG.warning(_LW("Unable to retrieve operational status for "
"gateway devices"))
def _convert_bindings_to_nsx_transport_zones(bindings):

View File

@ -406,8 +406,8 @@ class NsxSynchronizer():
neutron_router_mappings[neutron_router_id] = (
self._nsx_cache[lr_uuid])
else:
LOG.warn(_LW("Unable to find Neutron router id for "
"NSX logical router: %s"), lr_uuid)
LOG.warning(_LW("Unable to find Neutron router id for "
"NSX logical router: %s"), lr_uuid)
# Fetch neutron routers from database
filters = ({} if scan_missing else
{'id': neutron_router_mappings.keys()})
@ -541,14 +541,14 @@ class NsxSynchronizer():
# be emitted.
num_requests = page_size / (MAX_PAGE_SIZE + 1) + 1
if num_requests > 1:
LOG.warn(_LW("Requested page size is %(cur_chunk_size)d. "
"It might be necessary to do %(num_requests)d "
"round-trips to NSX for fetching data. Please "
"tune sync parameters to ensure chunk size "
"is less than %(max_page_size)d"),
{'cur_chunk_size': page_size,
'num_requests': num_requests,
'max_page_size': MAX_PAGE_SIZE})
LOG.warning(_LW("Requested page size is %(cur_chunk_size)d. "
"It might be necessary to do %(num_requests)d "
"round-trips to NSX for fetching data. Please "
"tune sync parameters to ensure chunk size "
"is less than %(max_page_size)d"),
{'cur_chunk_size': page_size,
'num_requests': num_requests,
'max_page_size': MAX_PAGE_SIZE})
# Only the first request might return the total size,
# subsequent requests will definitely not
results, cursor, total_size = nsxlib.get_single_query_page(

View File

@ -58,7 +58,7 @@ def lsn_get_for_network(context, network_id, raise_on_err=True):
raise p_exc.LsnNotFound(entity='network',
entity_id=network_id)
else:
LOG.warn(msg, network_id)
LOG.warning(msg, network_id)
def lsn_port_add_for_lsn(context, lsn_port_id, subnet_id, mac, lsn_id):

View File

@ -75,9 +75,9 @@ class LsnManager(object):
raise p_exc.LsnNotFound(entity='network',
entity_id=network_id)
else:
LOG.warn(_LW('Unable to find Logical Service Node for '
'the requested network %s.'),
network_id)
LOG.warning(_LW('Unable to find Logical Service Node for '
'the requested network %s.'),
network_id)
def lsn_create(self, context, network_id):
"""Create a LSN associated to the network."""
@ -92,7 +92,8 @@ class LsnManager(object):
try:
lsn_api.lsn_delete(self.cluster, lsn_id)
except (n_exc.NotFound, api_exc.NsxApiException):
LOG.warn(_LW('Unable to delete Logical Service Node %s'), lsn_id)
LOG.warning(_LW('Unable to delete Logical Service Node %s'),
lsn_id)
def lsn_delete_by_network(self, context, network_id):
"""Delete a LSN associated to the network."""
@ -117,10 +118,10 @@ class LsnManager(object):
entity='subnet',
entity_id=subnet_id)
else:
LOG.warn(_LW('Unable to find Logical Service Node Port '
'for LSN %(lsn_id)s and subnet '
'%(subnet_id)s'),
{'lsn_id': lsn_id, 'subnet_id': subnet_id})
LOG.warning(_LW('Unable to find Logical Service Node Port '
'for LSN %(lsn_id)s and subnet '
'%(subnet_id)s'),
{'lsn_id': lsn_id, 'subnet_id': subnet_id})
return (lsn_id, None)
else:
return (lsn_id, lsn_port_id)
@ -144,10 +145,10 @@ class LsnManager(object):
entity='MAC',
entity_id=mac)
else:
LOG.warn(_LW('Unable to find Logical Service Node '
'Port for LSN %(lsn_id)s and mac address '
'%(mac)s'),
{'lsn_id': lsn_id, 'mac': mac})
LOG.warning(_LW('Unable to find Logical Service Node '
'Port for LSN %(lsn_id)s and mac address '
'%(mac)s'),
{'lsn_id': lsn_id, 'mac': mac})
return (lsn_id, None)
else:
return (lsn_id, lsn_port_id)
@ -169,7 +170,7 @@ class LsnManager(object):
try:
lsn_api.lsn_port_delete(self.cluster, lsn_id, lsn_port_id)
except (n_exc.NotFound, api_exc.NsxApiException):
LOG.warn(_LW('Unable to delete LSN Port %s'), lsn_port_id)
LOG.warning(_LW('Unable to delete LSN Port %s'), lsn_port_id)
def lsn_port_dispose(self, context, network_id, mac_address):
"""Delete a LSN port given the network and the mac address."""
@ -186,11 +187,12 @@ class LsnManager(object):
self.cluster, network_id, lswitch_port_id)
except (n_exc.PortNotFoundOnNetwork,
api_exc.NsxApiException):
LOG.warn(_LW("Metadata port not found while attempting "
"to delete it from network %s"), network_id)
LOG.warning(_LW("Metadata port not found while attempting "
"to delete it from network %s"),
network_id)
else:
LOG.warn(_LW("Unable to find Logical Services Node "
"Port with MAC %s"), mac_address)
LOG.warning(_LW("Unable to find Logical Services Node "
"Port with MAC %s"), mac_address)
def lsn_port_dhcp_setup(
self, context, network_id, port_id, port_data, subnet_config=None):

View File

@ -121,7 +121,7 @@ class DhcpMetadataAccess(object):
# This becomes ineffective, as all new networks creations
# are handled by Logical Services Nodes in NSX
cfg.CONF.set_override('network_auto_schedule', False)
LOG.warn(_LW('network_auto_schedule has been disabled'))
LOG.warning(_LW('network_auto_schedule has been disabled'))
notifier = combined.DhcpAgentNotifyAPI(self.safe_reference,
lsn_manager)
self.supported_extension_aliases.append(lsn.EXT_ALIAS)

View File

@ -97,8 +97,8 @@ def handle_router_metadata_access(plugin, context, router_id, interface=None):
LOG.debug("Metadata access network is disabled")
return
if not cfg.CONF.allow_overlapping_ips:
LOG.warn(_LW("Overlapping IPs must be enabled in order to setup "
"the metadata access network"))
LOG.warning(_LW("Overlapping IPs must be enabled in order to setup "
"the metadata access network"))
return
ctx_elevated = context.elevated()
device_filter = {'device_id': [router_id],

View File

@ -579,12 +579,12 @@ def delete_nat_rules_by_match(cluster, router_id, rule_type,
min_rules=min_num_expected,
max_rules=max_num_expected)
else:
LOG.warn(_LW("Found %(actual_rule_num)d matching NAT rules, which "
"is not in the expected range (%(min_exp_rule_num)d,"
"%(max_exp_rule_num)d)"),
{'actual_rule_num': num_rules_to_delete,
'min_exp_rule_num': min_num_expected,
'max_exp_rule_num': max_num_expected})
LOG.warning(_LW("Found %(actual_rule_num)d matching NAT rules, "
"which is not in the expected range "
"(%(min_exp_rule_num)d,%(max_exp_rule_num)d)"),
{'actual_rule_num': num_rules_to_delete,
'min_exp_rule_num': min_num_expected,
'max_exp_rule_num': max_num_expected})
for rule_id in to_delete_ids:
delete_router_nat_rule(cluster, router_id, rule_id)

View File

@ -142,8 +142,8 @@ def delete_security_profile(cluster, spid):
except exceptions.NotFound:
with excutils.save_and_reraise_exception():
# This is not necessarily an error condition
LOG.warn(_LW("Unable to find security profile %s on NSX backend"),
spid)
LOG.warning(_LW("Unable to find security profile %s on NSX "
"backend"), spid)
def summarize_security_group_rules(logical_port_rules):

View File

@ -246,7 +246,7 @@ def get_ports(cluster, networks=None, devices=None, tenants=None):
if not ports:
ports = nsxlib.get_all_query_pages(lport_query_path, cluster)
except exception.NotFound:
LOG.warn(_LW("Lswitch %s not found in NSX"), lswitch)
LOG.warning(_LW("Lswitch %s not found in NSX"), lswitch)
ports = None
if ports:
@ -280,11 +280,11 @@ def get_port_by_neutron_tag(cluster, lswitch_uuid, neutron_port_id):
num_results = len(res["results"])
if num_results >= 1:
if num_results > 1:
LOG.warn(_LW("Found '%(num_ports)d' ports with "
"q_port_id tag: '%(neutron_port_id)s'. "
"Only 1 was expected."),
{'num_ports': num_results,
'neutron_port_id': neutron_port_id})
LOG.warning(_LW("Found '%(num_ports)d' ports with "
"q_port_id tag: '%(neutron_port_id)s'. "
"Only 1 was expected."),
{'num_ports': num_results,
'neutron_port_id': neutron_port_id})
return res["results"][0]

View File

@ -514,7 +514,7 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
nsx_switch_id, nsx_port_id = nsx_utils.get_nsx_switch_and_port_id(
context.session, self.cluster, port_data['id'])
if not nsx_port_id:
LOG.warn(
LOG.warning(
_LW("Neutron port %(port_id)s not found on NSX backend. "
"Terminating delete operation. A dangling router port "
"might have been left on router %(router_id)s"),
@ -1057,19 +1057,19 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
nsx_switch_ids = nsx_utils.get_nsx_switch_ids(
context.session, self.cluster, id)
if not nsx_switch_ids or len(nsx_switch_ids) < 1:
LOG.warn(_LW("Unable to find NSX mappings for neutron "
"network:%s"), id)
LOG.warning(_LW("Unable to find NSX mappings for neutron "
"network:%s"), id)
try:
switchlib.update_lswitch(self.cluster,
nsx_switch_ids[0],
network['network']['name'])
except api_exc.NsxApiException as e:
LOG.warn(_LW("Logical switch update on NSX backend failed. "
"Neutron network id:%(net_id)s; "
"NSX lswitch id:%(lswitch_id)s;"
"Error:%(error)s"),
{'net_id': id, 'lswitch_id': nsx_switch_ids[0],
'error': e})
LOG.warning(_LW("Logical switch update on NSX backend failed. "
"Neutron network id:%(net_id)s; "
"NSX lswitch id:%(lswitch_id)s;"
"Error:%(error)s"),
{'net_id': id, 'lswitch_id': nsx_switch_ids[0],
'error': e})
return net
@ -1473,8 +1473,9 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
# As setting gateway failed, the router must be deleted
# in order to ensure atomicity
router_id = router_db['id']
LOG.warn(_LW("Failed to set gateway info for router being "
"created:%s - removing router"), router_id)
LOG.warning(_LW("Failed to set gateway info for router "
"being created:%s - removing router"),
router_id)
self.delete_router(context, router_id)
LOG.info(_LI("Create router failed while setting external "
"gateway. Router:%s has been removed from "
@ -1605,10 +1606,10 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
context.session, router_id)
except db_exc.DBError as d_exc:
# Do not make this error fatal
LOG.warn(_LW("Unable to remove NSX mapping for Neutron router "
"%(router_id)s because of the following exception:"
"%(d_exc)s"), {'router_id': router_id,
'd_exc': str(d_exc)})
LOG.warning(_LW("Unable to remove NSX mapping for Neutron router "
"%(router_id)s because of the following exception:"
"%(d_exc)s"), {'router_id': router_id,
'd_exc': str(d_exc)})
# Perform the actual delete on the Neutron DB
super(NsxPluginV2, self).delete_router(context, router_id)
@ -2071,8 +2072,8 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
except api_exc.NsxApiException:
# Consider backend failures as non-fatal, but still warn
# because this might indicate something dodgy is going on
LOG.warn(_LW("Unable to update name on NSX backend "
"for network gateway: %s"), id)
LOG.warning(_LW("Unable to update name on NSX backend "
"for network gateway: %s"), id)
return super(NsxPluginV2, self).update_network_gateway(
context, id, network_gateway)
@ -2278,10 +2279,10 @@ class NsxPluginV2(addr_pair_db.AllowedAddressPairsMixin,
try:
l2gwlib.delete_gateway_device(self.cluster, nsx_device_id)
except n_exc.NotFound:
LOG.warn(_LW("Removal of gateway device: %(neutron_id)s failed on "
"NSX backend (NSX id:%(nsx_id)s) because the NSX "
"resource was not found"),
{'neutron_id': device_id, 'nsx_id': nsx_device_id})
LOG.warning(_LW("Removal of gateway device: %(neutron_id)s failed "
"on NSX backend (NSX id:%(nsx_id)s) because the "
"NSX resource was not found"),
{'neutron_id': device_id, 'nsx_id': nsx_device_id})
except api_exc.NsxApiException:
with excutils.save_and_reraise_exception():
# In this case a 500 should be returned

View File

@ -354,7 +354,7 @@ class LsnManagerTestCase(base.BaseTestCase):
def _test_lsn_delete_by_network_with_exc(self, exc):
self.mock_lsn_api.lsn_for_network_get.side_effect = exc
with mock.patch.object(lsn_man.LOG, 'warn') as l:
with mock.patch.object(lsn_man.LOG, 'warning') as l:
self.manager.lsn_delete_by_network(mock.ANY, self.net_id)
self.assertEqual(1, l.call_count)
@ -428,7 +428,7 @@ class LsnManagerTestCase(base.BaseTestCase):
def _test_lsn_port_delete_with_exc(self, exc):
self.mock_lsn_api.lsn_port_delete.side_effect = exc
with mock.patch.object(lsn_man.LOG, 'warn') as l:
with mock.patch.object(lsn_man.LOG, 'warning') as l:
self.manager.lsn_port_delete(mock.ANY, mock.ANY, mock.ANY)
self.assertEqual(1, self.mock_lsn_api.lsn_port_delete.call_count)
self.assertEqual(1, l.call_count)
@ -656,7 +656,7 @@ class LsnManagerTestCase(base.BaseTestCase):
def test_lsn_port_dispose_api_error(self):
self.mock_lsn_api.lsn_port_delete.side_effect = (
exception.NsxApiException)
with mock.patch.object(lsn_man.LOG, 'warn') as l:
with mock.patch.object(lsn_man.LOG, 'warning') as l:
self.manager.lsn_port_dispose(mock.ANY, self.net_id, self.mac)
self.assertEqual(1, l.call_count)