Replace deprecated LOG.warn with LOG.warning

Python 3 deprecated the logger.warn method, see:
https://docs.python.org/3/library/logging.html#logging.warning
so we prefer to use warning to avoid DeprecationWarning.

Change-Id: I49544d1370e7fe083eb23752227567d8d6f24020
Closes-Bug: #1508442
This commit is contained in:
zhang.lei 2016-03-08 14:54:24 +08:00
parent df5f907185
commit de41db2755
11 changed files with 141 additions and 123 deletions

View File

@ -22,6 +22,7 @@ Octavia Specific Commandments
- [O338] Change assertEqual(A in B, True), assertEqual(True, A in B),
assertEqual(A in B, False) or assertEqual(False, A in B) to the more
specific assertIn/NotIn(A, B)
- [O339] LOG.warn() is not allowed. Use LOG.warning()
Creating Unit Tests
-------------------

View File

@ -58,9 +58,9 @@ def unwrap_envelope(envelope, key):
expected_hmc = envelope[-hash_len:]
calculated_hmc = get_hmac(payload, key)
if not secretutils.constant_time_compare(expected_hmc, calculated_hmc):
LOG.warn(_LW('calculated hmac: %(s1)s not equal to msg hmac: '
'%(s2)s dropping packet'), {'s1': to_hex(calculated_hmc),
's2': to_hex(expected_hmc)})
LOG.warning(_LW('calculated hmac: %(s1)s not equal to msg hmac: '
'%(s2)s dropping packet'), {'s1': to_hex(calculated_hmc),
's2': to_hex(expected_hmc)})
fmt = 'calculated hmac: {0} not equal to msg hmac: {1} dropping packet'
raise exceptions.InvalidHMACException(fmt.format(
to_hex(calculated_hmc), to_hex(expected_hmc)))

View File

@ -240,7 +240,7 @@ class AmphoraAPIClient(object):
)
r = _request(**reqargs)
except (requests.ConnectionError, requests.Timeout):
LOG.warn(_LW("Could not connect to instance. Retrying."))
LOG.warning(_LW("Could not connect to instance. Retrying."))
time.sleep(CONF.haproxy_amphora.connection_retry_interval)
if a >= CONF.haproxy_amphora.connection_max_retries:
raise driver_except.TimeOutException()

View File

@ -41,7 +41,7 @@ def _extract_amp_image_id_by_tag(client, image_tag):
image_id = images[-1]['id']
num_images = len(images)
if num_images > 1:
LOG.warn(
LOG.warning(
_LW("A single Glance image should be tagged with %(tag)s tag, "
"but %(num)d found. Using %(image_id)s."),
{'tag': image_tag, 'num': num_images, 'image_id': image_id}
@ -52,7 +52,7 @@ def _extract_amp_image_id_by_tag(client, image_tag):
def _get_image_uuid(client, image_id, image_tag):
if image_id:
if image_tag:
LOG.warn(
LOG.warning(
_LW("Both amp_image_id and amp_image_tag options defined. "
"Using the former."))
return image_id
@ -145,8 +145,8 @@ class VirtualMachineManager(compute_base.ComputeBase):
try:
self.manager.delete(server=compute_id)
except nova_exceptions.NotFound:
LOG.warn(_LW("Nova instance with id: %s not found. "
"Assuming already deleted."), compute_id)
LOG.warning(_LW("Nova instance with id: %s not found. "
"Assuming already deleted."), compute_id)
except Exception:
LOG.exception(_LE("Error deleting nova virtual machine."))
raise exceptions.ComputeDeleteException()
@ -249,8 +249,8 @@ class VirtualMachineManager(compute_base.ComputeBase):
self.server_groups.delete(server_group_id)
except nova_exceptions.NotFound:
LOG.warn(_LW("Server group instance with id: %s not found. "
"Assuming already deleted."), server_group_id)
LOG.warning(_LW("Server group instance with id: %s not found. "
"Assuming already deleted."), server_group_id)
except Exception:
LOG.exception(_LE("Error delete server group instance."))
raise exceptions.ServerGroupObjectDeleteException()

View File

@ -107,11 +107,11 @@ class UpdateHealthDb(object):
last_update=(datetime.
datetime.utcnow()))
else:
LOG.warn(_LW('Amphora %(id)s health message reports %(found)i '
'listeners when %(expected)i expected'),
{'id': health['id'],
'found': len(listeners),
'expected': expected_listener_count})
LOG.warning(_LW('Amphora %(id)s health message reports %(found)i '
'listeners when %(expected)i expected'),
{'id': health['id'],
'found': len(listeners),
'expected': expected_listener_count})
# We got a heartbeat so lb is healthy until proven otherwise
lb_status = constants.ONLINE
@ -129,9 +129,9 @@ class UpdateHealthDb(object):
if lb_status == constants.ONLINE:
lb_status = constants.DEGRADED
else:
LOG.warn(_LW('Listener %(list)s reported status of '
'%(status)s'), {'list': listener_id,
'status': listener.get('status')})
LOG.warning(_LW('Listener %(list)s reported status of '
'%(status)s'), {'list': listener_id,
'status': listener.get('status')})
try:
if listener_status is not None:
@ -154,9 +154,9 @@ class UpdateHealthDb(object):
pool_status = constants.ERROR
lb_status = constants.ERROR
else:
LOG.warn(_LW('Pool %(pool)s reported status of '
'%(status)s'), {'pool': pool_id,
'status': pool.get('status')})
LOG.warning(_LW('Pool %(pool)s reported status of '
'%(status)s'), {'pool': pool_id,
'status': pool.get('status')})
members = pool['members']
for member_id, status in six.iteritems(members):
@ -173,9 +173,9 @@ class UpdateHealthDb(object):
elif status == constants.NO_CHECK:
member_status = constants.NO_MONITOR
else:
LOG.warn(_LW('Member %(mem)s reported status of '
'%(status)s'), {'mem': member_id,
'status': status})
LOG.warning(_LW('Member %(mem)s reported status of '
'%(status)s'), {'mem': member_id,
'status': status})
try:
if member_status is not None:

View File

@ -58,15 +58,15 @@ class ListenersUpdate(BaseAmphoraTask):
def revert(self, loadbalancer, *args, **kwargs):
"""Handle failed listeners updates."""
LOG.warn(_LW("Reverting listeners updates."))
LOG.warning(_LW("Reverting listeners updates."))
for listener in loadbalancer.listeners:
try:
self.listener_repo.update(db_apis.get_session(),
id=listener.id,
provisioning_status=constants.ERROR)
except Exception:
LOG.warn(_LW("Failed to update listener %s provisioning "
"status..."), listener.id)
LOG.warning(_LW("Failed to update listener %s provisioning "
"status..."), listener.id)
return None
@ -81,7 +81,7 @@ class ListenerStop(BaseAmphoraTask):
def revert(self, listener, *args, **kwargs):
"""Handle a failed listener stop."""
LOG.warn(_LW("Reverting listener stop."))
LOG.warning(_LW("Reverting listener stop."))
self.listener_repo.update(db_apis.get_session(), id=listener.id,
provisioning_status=constants.ERROR)
return None
@ -98,7 +98,7 @@ class ListenerStart(BaseAmphoraTask):
def revert(self, listener, *args, **kwargs):
"""Handle a failed listener start."""
LOG.warn(_LW("Reverting listener start."))
LOG.warning(_LW("Reverting listener start."))
self.listener_repo.update(db_apis.get_session(), id=listener.id,
provisioning_status=constants.ERROR)
return None
@ -116,15 +116,15 @@ class ListenersStart(BaseAmphoraTask):
def revert(self, listeners, *args, **kwargs):
"""Handle failed listeners starts."""
LOG.warn(_LW("Reverting listeners starts."))
LOG.warning(_LW("Reverting listeners starts."))
for listener in listeners:
try:
self.listener_repo.update(db_apis.get_session(),
id=listener.id,
provisioning_status=constants.ERROR)
except Exception:
LOG.warn(_LW("Failed to update listener %s provisioning "
"status..."), listener.id)
LOG.warning(_LW("Failed to update listener %s provisioning "
"status..."), listener.id)
return None
@ -139,7 +139,7 @@ class ListenerDelete(BaseAmphoraTask):
def revert(self, listener, *args, **kwargs):
"""Handle a failed listener delete."""
LOG.warn(_LW("Reverting listener delete."))
LOG.warning(_LW("Reverting listener delete."))
self.listener_repo.update(db_apis.get_session(), id=listener.id,
provisioning_status=constants.ERROR)
@ -172,7 +172,7 @@ class AmphoraFinalize(BaseAmphoraTask):
"""Handle a failed amphora finalize."""
if isinstance(result, failure.Failure):
return
LOG.warn(_LW("Reverting amphora finalize."))
LOG.warning(_LW("Reverting amphora finalize."))
self.amphora_repo.update(db_apis.get_session(), id=amphora.id,
status=constants.ERROR)
@ -192,7 +192,7 @@ class AmphoraPostNetworkPlug(BaseAmphoraTask):
"""Handle a failed post network plug."""
if isinstance(result, failure.Failure):
return
LOG.warn(_LW("Reverting post network plug."))
LOG.warning(_LW("Reverting post network plug."))
self.amphora_repo.update(db_apis.get_session(), id=amphora.id,
status=constants.ERROR)
@ -211,7 +211,7 @@ class AmphoraePostNetworkPlug(BaseAmphoraTask):
"""Handle a failed post network plug."""
if isinstance(result, failure.Failure):
return
LOG.warn(_LW("Reverting post network plug."))
LOG.warning(_LW("Reverting post network plug."))
for amphora in six.moves.filter(
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
loadbalancer.amphorae):
@ -233,7 +233,7 @@ class AmphoraPostVIPPlug(BaseAmphoraTask):
"""Handle a failed amphora vip plug notification."""
if isinstance(result, failure.Failure):
return
LOG.warn(_LW("Reverting post vip plug."))
LOG.warning(_LW("Reverting post vip plug."))
self.loadbalancer_repo.update(db_apis.get_session(),
id=loadbalancer.id,
provisioning_status=constants.ERROR)
@ -270,7 +270,7 @@ class AmphoraUpdateVRRPInterface(BaseAmphoraTask):
"""Handle a failed amphora vip plug notification."""
if isinstance(result, failure.Failure):
return
LOG.warn(_LW("Reverting Get Amphora VRRP Interface."))
LOG.warning(_LW("Reverting Get Amphora VRRP Interface."))
for amp in six.moves.filter(
lambda amp: amp.status == constants.AMPHORA_ALLOCATED,
loadbalancer.amphorae):

View File

@ -104,9 +104,9 @@ class ComputeCreate(BaseComputeTask):
if isinstance(result, failure.Failure):
return
compute_id = result
LOG.warn(_LW("Reverting compute create for amphora with id"
"%(amp)s and compute id: %(comp)s"),
{'amp': amphora_id, 'comp': compute_id})
LOG.warning(_LW("Reverting compute create for amphora with id"
"%(amp)s and compute id: %(comp)s"),
{'amp': amphora_id, 'comp': compute_id})
try:
self.compute.delete(compute_id)
except Exception:
@ -207,8 +207,8 @@ class NovaServerGroupCreate(BaseComputeTask):
:param result: here it refers to server group id
"""
server_group_id = result
LOG.warn(_LW("Reverting server group create with id:%s"),
server_group_id)
LOG.warning(_LW("Reverting server group create with id:%s"),
server_group_id)
self.compute.delete_server_group(server_group_id)

View File

@ -112,7 +112,8 @@ class CreateAmphoraInDB(BaseDatabaseTask):
# executed after this failed so we will need to do something and
# result is the amphora's id
LOG.warn(_LW("Reverting create amphora in DB for amp id %s "), result)
LOG.warning(_LW("Reverting create amphora in DB for amp id %s "),
result)
# Delete the amphora for now. May want to just update status later
self.amphora_repo.delete(db_apis.get_session(), id=result)
@ -157,8 +158,8 @@ class DeleteHealthMonitorInDB(BaseDatabaseTask):
:returns: None
"""
LOG.warn(_LW("Reverting mark health monitor delete in DB "
"for health monitor on pool with id %s"), pool_id)
LOG.warning(_LW("Reverting mark health monitor delete in DB "
"for health monitor on pool with id %s"), pool_id)
# TODO(johnsom) fix this
# self.health_mon_repo.update(db_apis.get_session(), health_mon.id,
# provisioning_status=constants.ERROR)
@ -200,8 +201,8 @@ class DeleteMemberInDB(BaseDatabaseTask):
:returns: None
"""
LOG.warn(_LW("Reverting delete in DB "
"for member id %s"), member.id)
LOG.warning(_LW("Reverting delete in DB "
"for member id %s"), member.id)
# TODO(johnsom) fix this
# self.member_repo.update(db_apis.get_session(), member.id,
# operating_status=constants.ERROR)
@ -225,9 +226,8 @@ class DeleteListenerInDB(BaseDatabaseTask):
:returns: None
"""
LOG.warn(_LW(
"Reverting mark listener delete in DB for listener id %s"),
listener_id)
LOG.warning(_LW("Reverting mark listener delete in DB "
"for listener id %s"), listener_id)
class DeletePoolInDB(BaseDatabaseTask):
@ -252,8 +252,8 @@ class DeletePoolInDB(BaseDatabaseTask):
:returns: None
"""
LOG.warn(_LW("Reverting delete in DB "
"for pool id %s"), pool_id)
LOG.warning(_LW("Reverting delete in DB "
"for pool id %s"), pool_id)
# TODO(johnsom) Fix this
# self.pool_repo.update(db_apis.get_session(), pool.id,
# operating_status=constants.ERROR)
@ -281,8 +281,8 @@ class DeleteL7PolicyInDB(BaseDatabaseTask):
:returns: None
"""
LOG.warn(_LW("Reverting delete in DB "
"for l7policy id %s"), l7policy_id)
LOG.warning(_LW("Reverting delete in DB "
"for l7policy id %s"), l7policy_id)
# TODO(sbalukoff) Fix this
# self.listener_repo.update(db_apis.get_session(), l7policy.listener.id,
# operating_status=constants.ERROR)
@ -310,8 +310,8 @@ class DeleteL7RuleInDB(BaseDatabaseTask):
:returns: None
"""
LOG.warn(_LW("Reverting delete in DB "
"for l7rule id %s"), l7rule_id)
LOG.warning(_LW("Reverting delete in DB "
"for l7rule id %s"), l7rule_id)
# TODO(sbalukoff) Fix this
# self.listener_repo.update(db_apis.get_session(),
# l7rule.l7policy.listener.id,
@ -445,9 +445,9 @@ class _MarkAmphoraRoleAndPriorityInDB(BaseDatabaseTask):
if isinstance(result, failure.Failure):
return
LOG.warn(_LW("Reverting amphora role in DB for amp "
"id %(amp)s"),
{'amp': amphora.id})
LOG.warning(_LW("Reverting amphora role in DB for amp "
"id %(amp)s"),
{'amp': amphora.id})
self.amphora_repo.update(db_apis.get_session(), amphora.id,
role=None,
vrrp_priority=None)
@ -513,9 +513,9 @@ class MarkAmphoraAllocatedInDB(BaseDatabaseTask):
if isinstance(result, failure.Failure):
return
LOG.warn(_LW("Reverting mark amphora ready in DB for amp "
"id %(amp)s and compute id %(comp)s"),
{'amp': amphora.id, 'comp': amphora.compute_id})
LOG.warning(_LW("Reverting mark amphora ready in DB for amp "
"id %(amp)s and compute id %(comp)s"),
{'amp': amphora.id, 'comp': amphora.compute_id})
self.amphora_repo.update(db_apis.get_session(), amphora.id,
status=constants.ERROR)
@ -538,9 +538,9 @@ class MarkAmphoraBootingInDB(BaseDatabaseTask):
if isinstance(result, failure.Failure):
return
LOG.warn(_LW("Reverting mark amphora booting in DB for amp "
"id %(amp)s and compute id %(comp)s"),
{'amp': amphora_id, 'comp': compute_id})
LOG.warning(_LW("Reverting mark amphora booting in DB for amp "
"id %(amp)s and compute id %(comp)s"),
{'amp': amphora_id, 'comp': compute_id})
self.amphora_repo.update(db_apis.get_session(), amphora_id,
status=constants.ERROR,
compute_id=compute_id)
@ -564,9 +564,9 @@ class MarkAmphoraDeletedInDB(BaseDatabaseTask):
def revert(self, amphora, *args, **kwargs):
"""Mark the amphora as broken and ready to be cleaned up."""
LOG.warn(_LW("Reverting mark amphora deleted in DB "
"for amp id %(amp)s and compute id %(comp)s"),
{'amp': amphora.id, 'comp': amphora.compute_id})
LOG.warning(_LW("Reverting mark amphora deleted in DB "
"for amp id %(amp)s and compute id %(comp)s"),
{'amp': amphora.id, 'comp': amphora.compute_id})
self.amphora_repo.update(db_apis.get_session(), amphora.id,
status=constants.ERROR)
@ -589,9 +589,9 @@ class MarkAmphoraPendingDeleteInDB(BaseDatabaseTask):
def revert(self, amphora, *args, **kwargs):
"""Mark the amphora as broken and ready to be cleaned up."""
LOG.warn(_LW("Reverting mark amphora pending delete in DB "
"for amp id %(amp)s and compute id %(comp)s"),
{'amp': amphora.id, 'comp': amphora.compute_id})
LOG.warning(_LW("Reverting mark amphora pending delete in DB "
"for amp id %(amp)s and compute id %(comp)s"),
{'amp': amphora.id, 'comp': amphora.compute_id})
self.amphora_repo.update(db_apis.get_session(), amphora.id,
status=constants.ERROR)
@ -614,9 +614,9 @@ class MarkAmphoraPendingUpdateInDB(BaseDatabaseTask):
def revert(self, amphora, *args, **kwargs):
"""Mark the amphora as broken and ready to be cleaned up."""
LOG.warn(_LW("Reverting mark amphora pending update in DB "
"for amp id %(amp)s and compute id %(comp)s"),
{'amp': amphora.id, 'comp': amphora.compute_id})
LOG.warning(_LW("Reverting mark amphora pending update in DB "
"for amp id %(amp)s and compute id %(comp)s"),
{'amp': amphora.id, 'comp': amphora.compute_id})
self.amphora_repo.update(db_apis.get_session(), amphora.id,
status=constants.ERROR)
@ -642,9 +642,9 @@ class MarkAmphoraReadyInDB(BaseDatabaseTask):
def revert(self, amphora, *args, **kwargs):
"""Mark the amphora as broken and ready to be cleaned up."""
LOG.warn(_LW("Reverting mark amphora ready in DB for amp "
"id %(amp)s and compute id %(comp)s"),
{'amp': amphora.id, 'comp': amphora.compute_id})
LOG.warning(_LW("Reverting mark amphora ready in DB for amp "
"id %(amp)s and compute id %(comp)s"),
{'amp': amphora.id, 'comp': amphora.compute_id})
self.amphora_repo.update(db_apis.get_session(), amphora.id,
status=constants.ERROR,
compute_id=amphora.compute_id,
@ -723,11 +723,11 @@ class MarkLBActiveInDB(BaseDatabaseTask):
db_apis.get_session(), listener.id,
provisioning_status=constants.ERROR)
except Exception:
LOG.warn(_LW("Error updating listener %s provisioning "
"status"), listener.id)
LOG.warning(_LW("Error updating listener %s provisioning "
"status"), listener.id)
LOG.warn(_LW("Reverting mark load balancer deleted in DB "
"for load balancer id %s"), loadbalancer.id)
LOG.warning(_LW("Reverting mark load balancer deleted in DB "
"for load balancer id %s"), loadbalancer.id)
self.loadbalancer_repo.update(db_apis.get_session(),
loadbalancer.id,
provisioning_status=constants.ERROR)
@ -744,9 +744,9 @@ class UpdateLBServerGroupInDB(BaseDatabaseTask):
server_group_id=server_group_id)
def revert(self, loadbalancer_id, server_group_id, *args, **kwargs):
LOG.warn(_LW('Reverting Server Group updated with id: %(s1)s for '
'load balancer id: %(s2)s '),
{'s1': server_group_id, 's2': loadbalancer_id})
LOG.warning(_LW('Reverting Server Group updated with id: %(s1)s for '
'load balancer id: %(s2)s '),
{'s1': server_group_id, 's2': loadbalancer_id})
self.loadbalancer_repo.update(db_apis.get_session(),
id=loadbalancer_id,
server_group_id=None)
@ -770,8 +770,8 @@ class MarkLBDeletedInDB(BaseDatabaseTask):
def revert(self, loadbalancer, *args, **kwargs):
"""Mark the load balancer as broken and ready to be cleaned up."""
LOG.warn(_LW("Reverting mark load balancer deleted in DB "
"for load balancer id %s"), loadbalancer.id)
LOG.warning(_LW("Reverting mark load balancer deleted in DB "
"for load balancer id %s"), loadbalancer.id)
self.loadbalancer_repo.update(db_apis.get_session(),
loadbalancer.id,
provisioning_status=constants.ERROR)
@ -796,8 +796,8 @@ class MarkLBPendingDeleteInDB(BaseDatabaseTask):
def revert(self, loadbalancer, *args, **kwargs):
"""Mark the load balancer as broken and ready to be cleaned up."""
LOG.warn(_LW("Reverting mark load balancer pending delete in DB "
"for load balancer id %s"), loadbalancer.id)
LOG.warning(_LW("Reverting mark load balancer pending delete in DB "
"for load balancer id %s"), loadbalancer.id)
self.loadbalancer_repo.update(db_apis.get_session(),
loadbalancer.id,
provisioning_status=constants.ERROR)
@ -825,12 +825,12 @@ class MarkLBAndListenersActiveInDB(BaseDatabaseTask):
def revert(self, loadbalancer, listeners, *args, **kwargs):
"""Mark the load balancer and listeners as broken."""
LOG.warn(_LW("Reverting mark load balancer "
"and listeners active in DB "
"for load balancer id %(LB)s and "
"listener ids: %(list)s"),
{'LB': loadbalancer.id,
'list': ', '.join([l.id for l in listeners])})
LOG.warning(_LW("Reverting mark load balancer "
"and listeners active in DB "
"for load balancer id %(LB)s and "
"listener ids: %(list)s"),
{'LB': loadbalancer.id,
'list': ', '.join([l.id for l in listeners])})
self.loadbalancer_repo.update(db_apis.get_session(),
loadbalancer.id,
provisioning_status=constants.ERROR)
@ -839,8 +839,8 @@ class MarkLBAndListenersActiveInDB(BaseDatabaseTask):
self.listener_repo.update(db_apis.get_session(), listener.id,
provisioning_status=constants.ERROR)
except Exception:
LOG.warn(_LW("Failed to update listener %s provisioning "
"status..."), listener.id)
LOG.warning(_LW("Failed to update listener %s provisioning "
"status..."), listener.id)
class MarkListenerActiveInDB(BaseDatabaseTask):
@ -866,8 +866,8 @@ class MarkListenerActiveInDB(BaseDatabaseTask):
:returns: None
"""
LOG.warn(_LW("Reverting mark listener deleted in DB "
"for listener id %s"), listener.id)
LOG.warning(_LW("Reverting mark listener deleted in DB "
"for listener id %s"), listener.id)
self.listener_repo.update(db_apis.get_session(), listener.id,
provisioning_status=constants.ERROR)
@ -895,8 +895,8 @@ class MarkListenerDeletedInDB(BaseDatabaseTask):
:returns: None
"""
LOG.warn(_LW("Reverting mark listener deleted in DB "
"for listener id %s"), listener.id)
LOG.warning(_LW("Reverting mark listener deleted in DB "
"for listener id %s"), listener.id)
self.listener_repo.update(db_apis.get_session(), listener.id,
provisioning_status=constants.ERROR)
@ -918,8 +918,8 @@ class MarkListenerPendingDeleteInDB(BaseDatabaseTask):
def revert(self, listener, *args, **kwargs):
"""Mark the listener as broken and ready to be cleaned up."""
LOG.warn(_LW("Reverting mark listener pending delete in DB "
"for listener id %s"), listener.id)
LOG.warning(_LW("Reverting mark listener pending delete in DB "
"for listener id %s"), listener.id)
self.listener_repo.update(db_apis.get_session(), listener.id,
provisioning_status=constants.ERROR)
@ -948,8 +948,8 @@ class UpdateLoadbalancerInDB(BaseDatabaseTask):
:returns: None
"""
LOG.warn(_LW("Reverting update loadbalancer in DB "
"for loadbalancer id %s"), loadbalancer.id)
LOG.warning(_LW("Reverting update loadbalancer in DB "
"for loadbalancer id %s"), loadbalancer.id)
self.loadbalancer_repo.update(db_apis.get_session(),
loadbalancer.id,
@ -980,8 +980,8 @@ class UpdateHealthMonInDB(BaseDatabaseTask):
:returns: None
"""
LOG.warn(_LW("Reverting update health monitor in DB "
"for health monitor id %s"), health_mon.pool_id)
LOG.warning(_LW("Reverting update health monitor in DB "
"for health monitor id %s"), health_mon.pool_id)
# TODO(johnsom) fix this to set the upper ojects to ERROR
self.health_mon_repo.update(db_apis.get_session(), health_mon.pool_id,
enabled=0)
@ -1011,8 +1011,8 @@ class UpdateListenerInDB(BaseDatabaseTask):
:returns: None
"""
LOG.warn(_LW("Reverting update listener in DB "
"for listener id %s"), listener.id)
LOG.warning(_LW("Reverting update listener in DB "
"for listener id %s"), listener.id)
# TODO(johnsom) fix this to set the upper ojects to ERROR
self.listener_repo.update(db_apis.get_session(), listener.id,
enabled=0)
@ -1042,8 +1042,8 @@ class UpdateMemberInDB(BaseDatabaseTask):
:returns: None
"""
LOG.warn(_LW("Reverting update member in DB "
"for member id %s"), member.id)
LOG.warning(_LW("Reverting update member in DB "
"for member id %s"), member.id)
# TODO(johnsom) fix this to set the upper ojects to ERROR
self.member_repo.update(db_apis.get_session(), member.id,
enabled=0)
@ -1074,8 +1074,8 @@ class UpdatePoolInDB(BaseDatabaseTask):
:returns: None
"""
LOG.warn(_LW("Reverting update pool in DB "
"for pool id %s"), pool.id)
LOG.warning(_LW("Reverting update pool in DB "
"for pool id %s"), pool.id)
# TODO(johnsom) fix this to set the upper ojects to ERROR
self.repos.update_pool_and_sp(db_apis.get_session(),
pool.id, {'enabled': 0}, None)
@ -1105,8 +1105,8 @@ class UpdateL7PolicyInDB(BaseDatabaseTask):
:returns: None
"""
LOG.warn(_LW("Reverting update l7policy in DB "
"for l7policy id %s"), l7policy.id)
LOG.warning(_LW("Reverting update l7policy in DB "
"for l7policy id %s"), l7policy.id)
# TODO(sbalukoff) fix this to set the upper objects to ERROR
self.l7policy_repo.update(db_apis.get_session(), l7policy.id,
enabled=0)
@ -1136,8 +1136,8 @@ class UpdateL7RuleInDB(BaseDatabaseTask):
:returns: None
"""
LOG.warn(_LW("Reverting update l7rule in DB "
"for l7rule id %s"), l7rule.id)
LOG.warning(_LW("Reverting update l7rule in DB "
"for l7rule id %s"), l7rule.id)
# TODO(sbalukoff) fix this to set appropriate upper objects to ERROR
self.l7policy_repo.update(db_apis.get_session(), l7rule.l7policy.id,
enabled=0)

View File

@ -159,7 +159,7 @@ class PlugNetworks(BaseNetworkTask):
def revert(self, amphora, delta, *args, **kwargs):
"""Handle a failed network plug by removing all nics added."""
LOG.warn(_LW("Unable to plug networks for amp id %s"), amphora.id)
LOG.warning(_LW("Unable to plug networks for amp id %s"), amphora.id)
if not delta:
return
@ -254,8 +254,8 @@ class HandleNetworkDeltas(BaseNetworkTask):
if isinstance(result, failure.Failure):
return
for amp_id, delta in six.iteritems(deltas):
LOG.warn(_LW("Unable to plug networks for amp id %s"),
delta.amphora_id)
LOG.warning(_LW("Unable to plug networks for amp id %s"),
delta.amphora_id)
if not delta:
return
@ -284,8 +284,8 @@ class PlugVIP(BaseNetworkTask):
if isinstance(result, failure.Failure):
return
LOG.warn(_LW("Unable to plug VIP for loadbalancer id %s"),
loadbalancer.id)
LOG.warning(_LW("Unable to plug VIP for loadbalancer id %s"),
loadbalancer.id)
self.network_driver.unplug_vip(loadbalancer, loadbalancer.vip)
@ -324,7 +324,7 @@ class AllocateVIP(BaseNetworkTask):
LOG.exception(_LE("Unable to allocate VIP"))
return
vip = result
LOG.warn(_LW("Deallocating vip %s"), vip.ip_address)
LOG.warning(_LW("Deallocating vip %s"), vip.ip_address)
self.network_driver.deallocate_vip(vip)

View File

@ -200,6 +200,15 @@ def assert_equal_in(logical_line):
"contents.")
def no_log_warn(logical_line):
"""Disallow 'LOG.warn('
O339
"""
if logical_line.startswith('LOG.warn('):
yield(0, "O339:Use LOG.warning() rather than LOG.warn()")
def factory(register):
register(assert_true_instance)
register(assert_equal_or_not_none)
@ -210,3 +219,4 @@ def factory(register):
register(assert_equal_true_or_false)
register(no_mutable_default_args)
register(assert_equal_in)
register(no_log_warn)

View File

@ -128,3 +128,10 @@ class HackingTestCase(base.BaseTestCase):
self.assertEqual(0, len(list(checks.assert_equal_true_or_false(
"self.assertFalse()"))))
def test_no_log_warn(self):
self.assertEqual(1, len(list(checks.no_log_warn(
"LOG.warn()"))))
self.assertEqual(0, len(list(checks.no_log_warn(
"LOG.warning()"))))