Handle log message interpolation by the logger

According to OpenStack Guideline[1], logged string message should be
interpolated by the logger.

[1]: http://docs.openstack.org/developer/oslo.i18n/guidelines.html#adding-variables-to-log-messages

Change-Id: I532fe2537fcd2ae6b4344d214a308b1a26416745
This commit is contained in:
Erik Olof Gunnar Andersson 2017-06-07 15:28:31 -07:00
parent 69a084ca45
commit adfe2fec38
18 changed files with 200 additions and 211 deletions

View File

@ -107,8 +107,8 @@ class Keepalived(object):
subprocess.check_output(init_enable_cmd.split(),
stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
LOG.debug("Failed to enable octavia-keepalived service: "
"%(err)s", {'err': e})
LOG.debug('Failed to enable octavia-keepalived service: '
'%(err)s', {'err': e})
return flask.make_response(flask.jsonify(dict(
message="Error enabling octavia-keepalived service",
details=e.output)), 500)
@ -134,8 +134,7 @@ class Keepalived(object):
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
LOG.debug("Failed to {0} keepalived service: {1}".format(action,
e))
LOG.debug('Failed to %s keepalived service: %s', action, e)
return flask.make_response(flask.jsonify(dict(
message="Failed to {0} keepalived service".format(action),
details=e.output)), 500)

View File

@ -148,7 +148,7 @@ class Listener(object):
init_system = util.get_os_init_system()
LOG.debug('Found init system: {0}'.format(init_system))
LOG.debug('Found init system: %s', init_system)
init_path = util.init_path(listener_id, init_system)

View File

@ -184,8 +184,7 @@ class BaseOS(object):
try:
subprocess.check_output(cmd.split(), stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
LOG.error('Failed to if up {0} due to '
'error: {1}'.format(interface, str(e)))
LOG.error('Failed to if up %s due to error: %s', interface, e)
raise exceptions.HTTPException(
response=flask.make_response(flask.jsonify(dict(
message='Error plugging {0}'.format(what),

View File

@ -183,10 +183,9 @@ class Plug(object):
# Note, eth0 is skipped because that is the VIP interface
netns_interface = 'eth{0}'.format(len(netns.get_links()))
LOG.info('Plugged interface {0} will become {1} in the '
'namespace {2}'.format(default_netns_interface,
netns_interface,
consts.AMPHORA_NAMESPACE))
LOG.info('Plugged interface %s will become %s in the namespace %s',
default_netns_interface, netns_interface,
consts.AMPHORA_NAMESPACE)
interface_file_path = self._osutils.get_network_interface_file(
netns_interface)
self._osutils.write_port_interface_file(

View File

@ -72,25 +72,22 @@ def run_sender(cmd_queue):
except IOError as e:
# Missing PID file, skip health heartbeat
if e.errno == errno.ENOENT:
LOG.error('Missing keepalived PID file {0}, skipping '
'health heartbeat.'.format(keepalived_pid_path))
LOG.error('Missing keepalived PID file %s, skipping health '
'heartbeat.', keepalived_pid_path)
else:
LOG.error('Failed to check keepalived and haproxy status '
'due to exception {0}, skipping health '
'heartbeat.'.format(str(e)))
LOG.error('Failed to check keepalived and haproxy status due '
'to exception %s, skipping health heartbeat.', e)
except OSError as e:
# Keepalived is not running, skip health heartbeat
if e.errno == errno.ESRCH:
LOG.error('Keepalived is configured but not running, skipping '
'health heartbeat.'.format(keepalived_pid_path))
LOG.error('Keepalived is configured but not running, '
'skipping health heartbeat.')
else:
LOG.error('Failed to check keepalived and haproxy status '
'due to exception {0}, skipping health '
'heartbeat.'.format(str(e)))
LOG.error('Failed to check keepalived and haproxy status due '
'to exception %s, skipping health heartbeat.', e)
except Exception as e:
LOG.error('Failed to check keepalived and haproxy status '
'due to exception {0}, skipping health '
'heartbeat.'.format(str(e)))
LOG.error('Failed to check keepalived and haproxy status due to '
'exception %s, skipping health heartbeat.', e)
try:
cmd = cmd_queue.get_nowait()

View File

@ -136,9 +136,9 @@ class HaproxyAmphoraLoadBalancerDriver(
load_balancer.vip.ip_address,
net_info)
except exc.Conflict:
LOG.warning(('VIP with MAC {mac} already exists on '
'amphora, skipping post_vip_plug').format(
mac=port.mac_address))
LOG.warning('VIP with MAC %(mac)s already exists on amphora, '
'skipping post_vip_plug',
{'mac': port.mac_address})
def post_network_plug(self, amphora, port):
fixed_ips = []
@ -156,9 +156,9 @@ class HaproxyAmphoraLoadBalancerDriver(
try:
self.client.plug_network(amphora, port_info)
except exc.Conflict:
LOG.warning(('Network with MAC {mac} already exists on '
'amphora, skipping post_network_plug').format(
mac=port.mac_address))
LOG.warning('Network with MAC %(mac)s already exists on amphora, '
'skipping post_network_plug',
{'mac': port.mac_address})
def _process_tls_certificates(self, listener):
"""Processes TLS data from the listener.
@ -253,7 +253,7 @@ class AmphoraAPIClient(object):
LOG.debug("request url %s", path)
_request = getattr(self.session, method.lower())
_url = self._base_url(amp.lb_network_ip) + path
LOG.debug("request url " + _url)
LOG.debug("request url %s", _url)
timeout_tuple = (CONF.haproxy_amphora.rest_request_conn_timeout,
CONF.haproxy_amphora.rest_request_read_timeout)
reqargs = {
@ -276,8 +276,8 @@ class AmphoraAPIClient(object):
message="A true SSLContext object is not available"
)
r = _request(**reqargs)
LOG.debug("Connected to amphora. Response: {resp}".format(
resp=r))
LOG.debug('Connected to amphora. Response: %(resp)s',
{'resp': r})
# Give a 404 response one retry. Flask/werkzeug is
# returning 404 on startup.
if r.status_code == 404 and retry_attempt is False:

View File

@ -163,7 +163,7 @@ class UDPStatusGetter(object):
"""
(data, srcaddr) = self.sock.recvfrom(UDP_MAX_SIZE)
LOG.debug("Received packet from {}".format(srcaddr))
LOG.debug('Received packet from %s', srcaddr)
obj = status_message.unwrap_envelope(data, self.key)
return obj, srcaddr

View File

@ -60,8 +60,8 @@ class BaseController(rest.RestController):
"""Gets an object from the database and returns it."""
db_obj = repo.get(session, id=id)
if not db_obj:
LOG.exception("{name} {id} not found".format(
name=data_model._name(), id=id))
LOG.exception('%(name)s %(id)s not found',
{'name': data_model._name(), 'id': id})
raise exceptions.NotFound(
resource=data_model._name(), id=id)
return db_obj

View File

@ -60,8 +60,8 @@ class BaseController(rest.RestController):
"""Gets an object from the database and returns it."""
db_obj = repo.get(session, id=id)
if not db_obj:
LOG.exception("{name} {id} not found".format(
name=data_model._name(), id=id))
LOG.exception('%(name)s %(id)s not found',
{'name': data_model._name(), 'id': id})
raise exceptions.NotFound(
resource=data_model._name(), id=id)
return db_obj

View File

@ -55,8 +55,7 @@ class BarbicanCertManager(cert_mgr.CertManager):
"""
connection = self.auth.get_barbican_client(project_id)
LOG.info("Storing certificate container '{0}' in "
"Barbican.".format(name))
LOG.info("Storing certificate container '%s' in Barbican.", name)
certificate_secret = None
private_key_secret = None
@ -103,14 +102,14 @@ class BarbicanCertManager(cert_mgr.CertManager):
old_ref = i.secret_ref
try:
i.delete()
LOG.info("Deleted secret {0} ({1}) during "
"rollback.".format(i.name, old_ref))
LOG.info('Deleted secret %s (%s) during rollback.',
i.name, old_ref)
except Exception:
LOG.warning("Failed to delete {0} ({1}) during "
"rollback. This might not be a "
"problem.".format(i.name, old_ref))
LOG.warning('Failed to delete %s (%s) during '
'rollback. This might not be a problem.',
i.name, old_ref)
with excutils.save_and_reraise_exception():
LOG.error("Error storing certificate data: {0}".format(str(e)))
LOG.error('Error storing certificate data: %s', e)
def get_cert(self, project_id, cert_ref, resource_ref=None,
check_only=False, service_name='Octavia'):
@ -127,8 +126,7 @@ class BarbicanCertManager(cert_mgr.CertManager):
"""
connection = self.auth.get_barbican_client(project_id)
LOG.info("Loading certificate container {0} from "
"Barbican.".format(cert_ref))
LOG.info('Loading certificate container %s from Barbican.', cert_ref)
try:
if check_only:
cert_container = connection.containers.get(
@ -143,7 +141,7 @@ class BarbicanCertManager(cert_mgr.CertManager):
return barbican_common.BarbicanCert(cert_container)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error("Error getting {0}: {1}".format(cert_ref, str(e)))
LOG.error('Error getting %s: %s', cert_ref, e)
def delete_cert(self, project_id, cert_ref, resource_ref=None,
service_name='Octavia'):
@ -157,8 +155,7 @@ class BarbicanCertManager(cert_mgr.CertManager):
"""
connection = self.auth.get_barbican_client(project_id)
LOG.info("Deregistering as a consumer of {0} in "
"Barbican.".format(cert_ref))
LOG.info('Deregistering as a consumer of %s in Barbican.', cert_ref)
try:
connection.containers.remove_consumer(
container_ref=cert_ref,
@ -167,5 +164,5 @@ class BarbicanCertManager(cert_mgr.CertManager):
)
except Exception as e:
with excutils.save_and_reraise_exception():
LOG.error("Error deregistering as a consumer of {0}: "
"{1}".format(cert_ref, str(e)))
LOG.error('Error deregistering as a consumer of %s: %s',
cert_ref, e)

View File

@ -614,10 +614,13 @@ class MarkAmphoraAllocatedInDB(BaseDatabaseTask):
:returns: None
"""
LOG.info(("Mark ALLOCATED in DB for amphora: %(amp)s with "
"compute id %(comp)s for load balancer: %(lb)s"),
{"amp": amphora.id, "comp": amphora.compute_id,
"lb": loadbalancer_id})
LOG.info('Mark ALLOCATED in DB for amphora: %(amp)s with '
'compute id %(comp)s for load balancer: %(lb)s',
{
'amp': amphora.id,
'comp': amphora.compute_id,
'lb': loadbalancer_id
})
self.amphora_repo.update(db_apis.get_session(), amphora.id,
status=constants.AMPHORA_ALLOCATED,
compute_id=amphora.compute_id,
@ -2266,8 +2269,8 @@ class DecrementHealthMonitorQuota(BaseDatabaseTask):
except Exception:
with excutils.save_and_reraise_exception():
LOG.error('Failed to decrement health monitor quota for '
'project: {proj} the project may have excess '
'quota in use.'.format(proj=health_mon.project_id))
'project: %(proj)s the project may have excess '
'quota in use.', {'proj': health_mon.project_id})
lock_session.rollback()
def revert(self, health_mon, result, *args, **kwargs):
@ -2277,9 +2280,9 @@ class DecrementHealthMonitorQuota(BaseDatabaseTask):
:returns: None
"""
LOG.warning('Reverting decrement quota for health monitor '
'on project {proj} Project quota counts may be '
'incorrect.'.format(proj=health_mon.project_id))
LOG.warning('Reverting decrement quota for health monitor on project'
' %(proj)s Project quota counts may be incorrect.',
{'proj': health_mon.project_id})
# Increment the quota back if this task wasn't the failure
if not isinstance(result, failure.Failure):
@ -2324,9 +2327,9 @@ class DecrementListenerQuota(BaseDatabaseTask):
lock_session.commit()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error('Failed to decrement listener quota for '
'project: {proj} the project may have excess '
'quota in use.'.format(proj=listener.project_id))
LOG.error('Failed to decrement listener quota for project: '
'%(proj)s the project may have excess quota in use.',
{'proj': listener.project_id})
lock_session.rollback()
def revert(self, listener, result, *args, **kwargs):
@ -2336,9 +2339,9 @@ class DecrementListenerQuota(BaseDatabaseTask):
:returns: None
"""
LOG.warning('Reverting decrement quota for listener '
'on project {proj} Project quota counts may be '
'incorrect.'.format(proj=listener.project_id))
LOG.warning('Reverting decrement quota for listener on project '
'%(proj)s Project quota counts may be incorrect.',
{'proj': listener.project_id})
# Increment the quota back if this task wasn't the failure
if not isinstance(result, failure.Failure):
@ -2384,8 +2387,8 @@ class DecrementLoadBalancerQuota(BaseDatabaseTask):
except Exception:
with excutils.save_and_reraise_exception():
LOG.error('Failed to decrement load balancer quota for '
'project: {proj} the project may have excess '
'quota in use.'.format(proj=loadbalancer.project_id))
'project: %(proj)s the project may have excess '
'quota in use.', {'proj': loadbalancer.project_id})
lock_session.rollback()
def revert(self, loadbalancer, result, *args, **kwargs):
@ -2395,9 +2398,9 @@ class DecrementLoadBalancerQuota(BaseDatabaseTask):
:returns: None
"""
LOG.warning('Reverting decrement quota for load balancer '
'on project {proj} Project quota counts may be '
'incorrect.'.format(proj=loadbalancer.project_id))
LOG.warning('Reverting decrement quota for load balancer on project '
'%(proj)s Project quota counts may be incorrect.',
{'proj': loadbalancer.project_id})
# Increment the quota back if this task wasn't the failure
if not isinstance(result, failure.Failure):
@ -2442,9 +2445,9 @@ class DecrementMemberQuota(BaseDatabaseTask):
lock_session.commit()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error('Failed to decrement member quota for '
'project: {proj} the project may have excess '
'quota in use.'.format(proj=member.project_id))
LOG.error('Failed to decrement member quota for project: '
'%(proj)s the project may have excess quota in use.',
{'proj': member.project_id})
lock_session.rollback()
def revert(self, member, result, *args, **kwargs):
@ -2454,9 +2457,9 @@ class DecrementMemberQuota(BaseDatabaseTask):
:returns: None
"""
LOG.warning('Reverting decrement quota for member '
'on project {proj} Project quota counts may be '
'incorrect.'.format(proj=member.project_id))
LOG.warning('Reverting decrement quota for member on project %(proj)s '
'Project quota counts may be incorrect.',
{'proj': member.project_id})
# Increment the quota back if this task wasn't the failure
if not isinstance(result, failure.Failure):
@ -2513,9 +2516,9 @@ class DecrementPoolQuota(BaseDatabaseTask):
lock_session.commit()
except Exception:
with excutils.save_and_reraise_exception():
LOG.error('Failed to decrement pool quota for '
'project: {proj} the project may have excess '
'quota in use.'.format(proj=pool.project_id))
LOG.error('Failed to decrement pool quota for project: '
'%(proj)s the project may have excess quota in use.',
{'proj': pool.project_id})
lock_session.rollback()
def revert(self, pool, pool_child_count, result, *args, **kwargs):
@ -2525,9 +2528,9 @@ class DecrementPoolQuota(BaseDatabaseTask):
:returns: None
"""
LOG.warning('Reverting decrement quota for pool '
'on project {proj} Project quota counts may be '
'incorrect.'.format(proj=pool.project_id))
LOG.warning('Reverting decrement quota for pool on project %(proj)s '
'Project quota counts may be incorrect.',
{'proj': pool.project_id})
# Increment the quota back if this task wasn't the failure
if not isinstance(result, failure.Failure):

View File

@ -412,9 +412,9 @@ class PlugPorts(BaseNetworkTask):
def execute(self, amphora, ports):
for port in ports:
LOG.debug('Plugging port ID: {port_id} into compute instance: '
'{compute_id}.'.format(port_id=port.id,
compute_id=amphora.compute_id))
LOG.debug('Plugging port ID: %(port_id)s into compute instance: '
'%(compute_id)s.',
{'port_id': port.id, 'compute_id': amphora.compute_id})
self.network_driver.plug_port(amphora, port)
@ -423,9 +423,9 @@ class PlugVIPPort(BaseNetworkTask):
def execute(self, amphora, amphorae_network_config):
vrrp_port = amphorae_network_config.get(amphora.id).vrrp_port
LOG.debug('Plugging VIP VRRP port ID: {port_id} into compute '
'instance: {compute_id}.'.format(
port_id=vrrp_port.id, compute_id=amphora.compute_id))
LOG.debug('Plugging VIP VRRP port ID: %(port_id)s into compute '
'instance: %(compute_id)s.',
{'port_id': vrrp_port.id, 'compute_id': amphora.compute_id})
self.network_driver.plug_port(amphora, vrrp_port)
def revert(self, result, amphora, amphorae_network_config,
@ -435,14 +435,14 @@ class PlugVIPPort(BaseNetworkTask):
vrrp_port = amphorae_network_config.get(amphora.id).vrrp_port
self.network_driver.unplug_port(amphora, vrrp_port)
except Exception:
LOG.warning(('Failed to unplug vrrp port: {port} from amphora: '
'{amp}').format(port=vrrp_port.id, amp=amphora.id))
LOG.warning('Failed to unplug vrrp port: %(port)s from amphora: '
'%(amp)s', {'port': vrrp_port.id, 'amp': amphora.id})
class WaitForPortDetach(BaseNetworkTask):
"""Task to wait for the neutron ports to detach from an amphora."""
def execute(self, amphora):
LOG.debug('Waiting for ports to detach from amphora: '
'{amp_id}.'.format(amp_id=amphora.id))
LOG.debug('Waiting for ports to detach from amphora: %(amp_id)s.',
{'amp_id': amphora.id})
self.network_driver.wait_for_port_detach(amphora)

View File

@ -287,8 +287,8 @@ class Repositories(object):
:param count: Number of objects we're going to create (default=1)
:returns: True if quota is met, False if quota was available
"""
LOG.debug('Checking quota for project: {proj} object: {obj}'.format(
proj=project_id, obj=str(_class)))
LOG.debug('Checking quota for project: %(proj)s object: %(obj)s',
{'proj': project_id, 'obj': _class})
# Under noauth everything is admin, so no quota
if CONF.auth_strategy == consts.NOAUTH:
@ -422,8 +422,8 @@ class Repositories(object):
else:
return True
except db_exception.DBDeadlock:
LOG.warning(('Quota project lock timed out for project: '
'{proj}').format(proj=project_id))
LOG.warning('Quota project lock timed out for project: %(proj)s',
{'proj': project_id})
raise exceptions.ProjectBusyException()
return False
@ -436,9 +436,9 @@ class Repositories(object):
:param quantity: Quantity of quota to decrement
:returns: None
"""
LOG.debug('Decrementing quota by: {quant} for project: {proj} '
'object: {obj}'.format(quant=quantity, proj=project_id,
obj=str(_class)))
LOG.debug('Decrementing quota by: %(quant)s for project: %(proj)s '
'object: %(obj)s',
{'quant': quantity, 'proj': project_id, 'obj': _class})
# Lock the project record in the database to block other quota checks
try:
@ -446,10 +446,10 @@ class Repositories(object):
project_id=project_id).with_for_update().first()
if not quotas:
if not CONF.auth_strategy == consts.NOAUTH:
LOG.error('Quota decrement on {clss} called on project: '
'{proj} with no quota record in the '
'database.'.format(clss=type(_class),
proj=project_id))
LOG.error('Quota decrement on %(clss)s called on '
'project: %(proj)s with no quota record in '
'the database.',
{'clss': type(_class), 'proj': project_id})
return
if _class == data_models.LoadBalancer:
if (quotas.in_use_load_balancer is not None and
@ -458,10 +458,10 @@ class Repositories(object):
quotas.in_use_load_balancer - quantity)
else:
if not CONF.auth_strategy == consts.NOAUTH:
LOG.warning('Quota decrement on {clss} called on '
'project: {proj} that would cause a '
'negative quota.'.format(clss=type(_class),
proj=project_id))
LOG.warning('Quota decrement on %(clss)s called on '
'project: %(proj)s that would cause a '
'negative quota.',
{'clss': type(_class), 'proj': project_id})
if _class == data_models.Listener:
if (quotas.in_use_listener is not None and
quotas.in_use_listener > 0):
@ -469,10 +469,10 @@ class Repositories(object):
quotas.in_use_listener - quantity)
else:
if not CONF.auth_strategy == consts.NOAUTH:
LOG.warning('Quota decrement on {clss} called on '
'project: {proj} that would cause a '
'negative quota.'.format(clss=type(_class),
proj=project_id))
LOG.warning('Quota decrement on %(clss)s called on '
'project: %(proj)s that would cause a '
'negative quota.',
{'clss': type(_class), 'proj': project_id})
if _class == data_models.Pool:
if (quotas.in_use_pool is not None and
quotas.in_use_pool > 0):
@ -480,10 +480,10 @@ class Repositories(object):
quotas.in_use_pool - quantity)
else:
if not CONF.auth_strategy == consts.NOAUTH:
LOG.warning('Quota decrement on {clss} called on '
'project: {proj} that would cause a '
'negative quota.'.format(clss=type(_class),
proj=project_id))
LOG.warning('Quota decrement on %(clss)s called on '
'project: %(proj)s that would cause a '
'negative quota.',
{'clss': type(_class), 'proj': project_id})
if _class == data_models.HealthMonitor:
if (quotas.in_use_health_monitor is not None and
quotas.in_use_health_monitor > 0):
@ -491,10 +491,10 @@ class Repositories(object):
quotas.in_use_health_monitor - quantity)
else:
if not CONF.auth_strategy == consts.NOAUTH:
LOG.warning('Quota decrement on {clss} called on '
'project: {proj} that would cause a '
'negative quota.'.format(clss=type(_class),
proj=project_id))
LOG.warning('Quota decrement on %(clss)s called on '
'project: %(proj)s that would cause a '
'negative quota.',
{'clss': type(_class), 'proj': project_id})
if _class == data_models.Member:
if (quotas.in_use_member is not None and
quotas.in_use_member > 0):
@ -502,13 +502,13 @@ class Repositories(object):
quotas.in_use_member - quantity)
else:
if not CONF.auth_strategy == consts.NOAUTH:
LOG.warning('Quota decrement on {clss} called on '
'project: {proj} that would cause a '
'negative quota.'.format(clss=type(_class),
proj=project_id))
LOG.warning('Quota decrement on %(clss)s called on '
'project: %(proj)s that would cause a '
'negative quota.',
{'clss': type(_class), 'proj': project_id})
except db_exception.DBDeadlock:
LOG.warning(('Quota project lock timed out for project: '
'{proj}').format(proj=project_id))
LOG.warning('Quota project lock timed out for project: %(proj)s',
{'proj': project_id})
raise exceptions.ProjectBusyException()
def create_load_balancer_tree(self, session, lock_session, lb_dict):

View File

@ -94,8 +94,8 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
new_port = self.neutron_client.create_port(port)
new_port = utils.convert_port_dict_to_model(new_port)
LOG.debug('Created vip port: {port_id} for amphora: {amp}'.format(
port_id=new_port.id, amp=amphora.id))
LOG.debug('Created vip port: %(port_id)s for amphora: %(amp)s',
{'port_id': new_port.id, 'amp': amphora.id})
interface = self.plug_port(amphora, new_port)
except Exception:
@ -269,8 +269,8 @@ class AllowedAddressPairsDriver(neutron_base.BaseNeutronDriver):
self.neutron_client.delete_port(amphora.vrrp_port_id)
except (neutron_client_exceptions.NotFound,
neutron_client_exceptions.PortNotFoundClient):
LOG.debug('VIP instance port {0} already deleted. '
'Skipping.'.format(amphora.vrrp_port_id))
LOG.debug('VIP instance port %s already deleted. Skipping.',
amphora.vrrp_port_id)
try:
port = self.get_port(vip.port_id)

View File

@ -49,18 +49,20 @@ class BaseNeutronDriver(base.AbstractNetworkDriver):
def _check_extension_enabled(self, extension_alias):
if extension_alias in self._check_extension_cache:
status = self._check_extension_cache[extension_alias]
LOG.debug('Neutron extension {ext} cached as {status}'.format(
ext=extension_alias,
status='enabled' if status else 'disabled'))
LOG.debug('Neutron extension %(ext)s cached as %(status)s',
{
'ext': extension_alias,
'status': 'enabled' if status else 'disabled'
})
else:
try:
self.neutron_client.show_extension(extension_alias)
LOG.debug('Neutron extension {ext} found enabled'.format(
ext=extension_alias))
LOG.debug('Neutron extension %(ext)s found enabled',
{'ext': extension_alias})
self._check_extension_cache[extension_alias] = True
except neutron_client_exceptions.NotFound:
LOG.debug('Neutron extension {ext} is not enabled'.format(
ext=extension_alias))
LOG.debug('Neutron extension %(ext)s is not enabled',
{'ext': extension_alias})
self._check_extension_cache[extension_alias] = False
return self._check_extension_cache[extension_alias]

View File

@ -900,8 +900,8 @@ class NetworkScenarioTest(ScenarioTest):
"failed to reach status: {st}"
.format(fp=floating_ip, cst=floating_ip['status'],
st=status))
LOG.info("FloatingIP: {fp} is at status: {st}"
.format(fp=floating_ip, st=status))
LOG.info('FloatingIP: %(fp)s is at status: %(st)s',
{'fp': floating_ip, 'st': status})
def _check_tenant_network_connectivity(self, server,
username,

View File

@ -186,8 +186,8 @@ class BaseTestCase(manager.NetworkScenarioTest):
server = server['server']
self.servers_keypairs[server['id']] = keypair
LOG.info(('servers_keypairs looks like this(format): {0}'.format(
self.servers_keypairs)))
LOG.info('servers_keypairs looks like this(format): %s',
self.servers_keypairs)
if (config.network.public_network_id and not
config.network.project_networks_reachable):
@ -216,11 +216,11 @@ class BaseTestCase(manager.NetworkScenarioTest):
def _stop_server(self, name):
for sname, value in six.iteritems(self.servers):
if sname == name:
LOG.info(('STOPPING SERVER: {0}'.format(sname)))
LOG.info('STOPPING SERVER: %s', sname)
self.servers_client.stop_server(value)
waiters.wait_for_server_status(self.servers_client,
value, 'SHUTOFF')
LOG.info(('STOPPING SERVER COMPLETED!'))
LOG.info('STOPPING SERVER COMPLETED!')
def _start_server(self, name):
for sname, value in six.iteritems(self.servers):
@ -287,8 +287,8 @@ class BaseTestCase(manager.NetworkScenarioTest):
self.assertTrue(self.listener)
self.addCleanup(self._cleanup_listener, self.listener['id'],
load_balancer_id)
LOG.info(('Waiting for lb status on create listener id: {0}'.format(
self.listener['id'])))
LOG.info('Waiting for lb status on create listener id: %s',
self.listener['id'])
self._wait_for_load_balancer_status(load_balancer_id)
return self.listener
@ -334,8 +334,8 @@ class BaseTestCase(manager.NetworkScenarioTest):
**create_pool_kwargs)
self.assertTrue(self.pool)
self.addCleanup(self._cleanup_pool, self.pool['id'], load_balancer_id)
LOG.info(('Waiting for lb status on create pool id: {0}'.format(
self.pool['id'])))
LOG.info('Waiting for lb status on create pool id: %s',
self.pool['id'])
self._wait_for_load_balancer_status(load_balancer_id)
return self.pool
@ -391,7 +391,7 @@ class BaseTestCase(manager.NetworkScenarioTest):
lb_id=load_balancer_id,
pool_id=pool_id,
**create_member_kwargs)
LOG.info(('Waiting for lb status on create member...'))
LOG.info('Waiting for lb status on create member...')
self._wait_for_load_balancer_status(load_balancer_id)
self.members.append(member)
self.assertTrue(self.members)
@ -399,8 +399,7 @@ class BaseTestCase(manager.NetworkScenarioTest):
def _assign_floating_ip_to_lb_vip(self, lb):
public_network_id = config.network.public_network_id
LOG.info(('assign_floating_ip_to_lb_vip lb: {0} type: {1}'.format(
lb, type(lb))))
LOG.info('assign_floating_ip_to_lb_vip lb: %s type: %s', lb, type(lb))
port_id = lb['vip']['port_id']
floating_ip = self._create_floating_ip(
@ -444,8 +443,8 @@ class BaseTestCase(manager.NetworkScenarioTest):
"failed to reach status: {st}"
.format(fp=floating_ip, cst=floating_ip['status'],
st=status))
LOG.info("FloatingIP: {fp} is at status: {st}"
.format(fp=floating_ip, st=status))
LOG.info('FloatingIP: %(fp)s is at status: %(st)s',
{'fp': floating_ip, 'st': status})
def _create_load_balancer(self, ip_version=4, persistence_type=None):
"""Create a load balancer.
@ -462,8 +461,7 @@ class BaseTestCase(manager.NetworkScenarioTest):
**self.create_lb_kwargs)
lb_id = self.load_balancer['id']
self.addCleanup(self._cleanup_load_balancer, lb_id)
LOG.info(('Waiting for lb status on create load balancer id: {0}'
.format(lb_id)))
LOG.info('Waiting for lb status on create load balancer id: %s', lb_id)
self.load_balancer = self._wait_for_load_balancer_status(
load_balancer_id=lb_id,
provisioning_status='ACTIVE',
@ -513,8 +511,8 @@ class BaseTestCase(manager.NetworkScenarioTest):
lb_client.create_load_balancer_over_quota(
**self.create_lb_kwargs)
LOG.info(('Waiting for lb status on create load balancer id: {0}'
.format(lb_id)))
LOG.info('Waiting for lb status on create load balancer id: %s',
lb_id)
self.load_balancer = self._wait_for_load_balancer_status(
load_balancer_id=lb_id,
provisioning_status='ACTIVE',
@ -537,9 +535,9 @@ class BaseTestCase(manager.NetworkScenarioTest):
else:
raise e
LOG.info(('provisioning_status: {0} operating_status: {1}'.format(
lb.get('provisioning_status'),
lb.get('operating_status'))))
LOG.info('provisioning_status: %s operating_status: %s',
lb.get('provisioning_status'),
lb.get('operating_status'))
if delete and lb.get('provisioning_status') == 'DELETED':
break
@ -602,11 +600,11 @@ class BaseTestCase(manager.NetworkScenarioTest):
lambda x: six.b(x) if type(x) == six.text_type else x, members))
LOG.info(_('Checking all members are balanced...'))
self._wait_for_http_service(self.vip_ip)
LOG.info(_('Connection to {vip} is valid').format(vip=self.vip_ip))
LOG.info(_('Connection to %(vip)s is valid'), {'vip': self.vip_ip})
counters = self._send_concurrent_requests(self.vip_ip)
for member, counter in six.iteritems(counters):
LOG.info(_('Member {member} saw {counter} requests.').format(
member=member, counter=counter))
LOG.info(_('Member %(member)s saw %(counter)s requests.'),
{'member': member, 'counter': counter})
self.assertGreater(counter, 0,
'Member %s never balanced' % member)
for member in members:
@ -624,18 +622,18 @@ class BaseTestCase(manager.NetworkScenarioTest):
def _wait_for_http_service(self, check_ip, port=80):
def try_connect(check_ip, port):
try:
LOG.info(('checking connection to ip: {0} port: {1}'.format(
check_ip, port)))
LOG.info('checking connection to ip: %s port: %d',
check_ip, port)
resp = urllib2.urlopen("http://{0}:{1}/".format(check_ip,
port))
if resp.getcode() == 200:
return True
return False
except IOError as e:
LOG.info(('Got IOError in check connection: {0}'.format(e)))
LOG.info('Got IOError in check connection: %s', e)
return False
except error.HTTPError as e:
LOG.info(('Got HTTPError in check connection: {0}'.format(e)))
LOG.info('Got HTTPError in check connection: %s', e)
return False
timeout = config.validation.ping_timeout
@ -660,7 +658,7 @@ class BaseTestCase(manager.NetworkScenarioTest):
# of success and continue connection tries
except (error.HTTPError, error.URLError,
socket.timeout, socket.error) as e:
LOG.info(('Got Error in sending request: {0}'.format(e)))
LOG.info('Got Error in sending request: %s', e)
continue
return counters
@ -698,7 +696,7 @@ class BaseTestCase(manager.NetworkScenarioTest):
for ct in client_threads:
timeout -= ct.join(timeout)
if timeout <= 0:
LOG.error("Client thread {0} timed out".format(ct.name))
LOG.error('Client thread %s timed out', ct.name)
return dict()
for server in list(ct.counters):
if server not in total_counters:
@ -822,9 +820,8 @@ class BaseTestCase(manager.NetworkScenarioTest):
proc = subprocess.Popen(args, **subprocess_args)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
LOG.error(("Command {0} returned with exit status {1},"
"output {2}, error {3}").format(cmd, proc.returncode,
stdout, stderr))
LOG.error('Command %s returned with exit status %s,output %s, '
'error %s', cmd, proc.returncode, stdout, stderr)
return stdout
def _set_quotas(self, project_id=None, load_balancer=20, listener=20,
@ -862,8 +859,8 @@ class BaseTestCase(manager.NetworkScenarioTest):
load_balancer_id = self.load_balancer['id']
if cleanup:
self.addCleanup(self._cleanup_load_balancer, load_balancer_id)
LOG.info(('Waiting for lb status on create load balancer id: {0}'
.format(load_balancer_id)))
LOG.info('Waiting for lb status on create load balancer id: %s',
load_balancer_id)
self.load_balancer = self._wait_for_load_balancer_status(
load_balancer_id)

View File

@ -185,8 +185,8 @@ class BaseTestCase(manager.NetworkScenarioTest):
server = server['server']
self.servers_keypairs[server['id']] = keypair
LOG.info(('servers_keypairs looks like this(format): {0}'.format(
self.servers_keypairs)))
LOG.info('servers_keypairs looks like this(format): %s',
self.servers_keypairs)
if (config.network.public_network_id and not
config.network.project_networks_reachable):
@ -215,11 +215,11 @@ class BaseTestCase(manager.NetworkScenarioTest):
def _stop_server(self, name):
for sname, value in six.iteritems(self.servers):
if sname == name:
LOG.info(('STOPPING SERVER: {0}'.format(sname)))
LOG.info('STOPPING SERVER: %s', sname)
self.servers_client.stop_server(value)
waiters.wait_for_server_status(self.servers_client,
value, 'SHUTOFF')
LOG.info(('STOPPING SERVER COMPLETED!'))
LOG.info('STOPPING SERVER COMPLETED!')
def _start_server(self, name):
for sname, value in six.iteritems(self.servers):
@ -286,8 +286,8 @@ class BaseTestCase(manager.NetworkScenarioTest):
self.assertTrue(self.listener)
self.addCleanup(self._cleanup_listener, self.listener['id'],
load_balancer_id)
LOG.info(('Waiting for lb status on create listener id: {0}'.format(
self.listener['id'])))
LOG.info('Waiting for lb status on create listener id: %s',
self.listener['id'])
self._wait_for_load_balancer_status(load_balancer_id)
return self.listener
@ -333,8 +333,8 @@ class BaseTestCase(manager.NetworkScenarioTest):
**create_pool_kwargs)
self.assertTrue(self.pool)
self.addCleanup(self._cleanup_pool, self.pool['id'], load_balancer_id)
LOG.info(('Waiting for lb status on create pool id: {0}'.format(
self.pool['id'])))
LOG.info('Waiting for lb status on create pool id: %s',
self.pool['id'])
self._wait_for_load_balancer_status(load_balancer_id)
return self.pool
@ -385,7 +385,7 @@ class BaseTestCase(manager.NetworkScenarioTest):
lb_id=load_balancer_id,
pool_id=pool_id,
**create_member_kwargs)
LOG.info(('Waiting for lb status on create member...'))
LOG.info('Waiting for lb status on create member...')
self._wait_for_load_balancer_status(load_balancer_id)
self.members.append(member)
self.assertTrue(self.members)
@ -393,8 +393,7 @@ class BaseTestCase(manager.NetworkScenarioTest):
def _assign_floating_ip_to_lb_vip(self, lb):
public_network_id = config.network.public_network_id
LOG.info(('assign_floating_ip_to_lb_vip lb: {0} type: {1}'.format(
lb, type(lb))))
LOG.info('assign_floating_ip_to_lb_vip lb: %s type: %s', lb, type(lb))
port_id = lb['vip']['port_id']
floating_ip = self._create_floating_ip(
@ -438,8 +437,8 @@ class BaseTestCase(manager.NetworkScenarioTest):
"failed to reach status: {st}"
.format(fp=floating_ip, cst=floating_ip['status'],
st=status))
LOG.info("FloatingIP: {fp} is at status: {st}"
.format(fp=floating_ip, st=status))
LOG.info('FloatingIP: %(fp)s is at status: %(st)s',
{'fp': floating_ip, 'st': status})
def _create_load_balancer(self, ip_version=4, persistence_type=None):
"""Create a load balancer.
@ -454,8 +453,7 @@ class BaseTestCase(manager.NetworkScenarioTest):
**self.create_lb_kwargs)
lb_id = self.load_balancer['id']
self.addCleanup(self._cleanup_load_balancer, lb_id)
LOG.info(('Waiting for lb status on create load balancer id: {0}'
.format(lb_id)))
LOG.info('Waiting for lb status on create load balancer id: %s', lb_id)
self.load_balancer = self._wait_for_load_balancer_status(
load_balancer_id=lb_id,
provisioning_status='ACTIVE',
@ -505,8 +503,7 @@ class BaseTestCase(manager.NetworkScenarioTest):
lb_client.create_load_balancer_over_quota(
**self.create_lb_kwargs)
LOG.info(('Waiting for lb status on create load balancer id: {0}'
.format(lb_id)))
LOG.info('Waiting for lb status on create load balancer id: %s', lb_id)
self.load_balancer = self._wait_for_load_balancer_status(
load_balancer_id=lb_id,
provisioning_status='ACTIVE',
@ -529,9 +526,9 @@ class BaseTestCase(manager.NetworkScenarioTest):
else:
raise e
LOG.info(('provisioning_status: {0} operating_status: {1}'.format(
lb.get('provisioning_status'),
lb.get('operating_status'))))
LOG.info('provisioning_status: %s operating_status: %s',
lb.get('provisioning_status'),
lb.get('operating_status'))
if delete and lb.get('provisioning_status') == 'DELETED':
break
@ -592,11 +589,11 @@ class BaseTestCase(manager.NetworkScenarioTest):
members = members or ['server1_0', 'server1_1']
LOG.info(_('Checking all members are balanced...'))
self._wait_for_http_service(self.vip_ip)
LOG.info(_('Connection to {vip} is valid').format(vip=self.vip_ip))
LOG.info(_('Connection to %(vip)s is valid'), {'vip': self.vip_ip})
counters = self._send_concurrent_requests(self.vip_ip)
for member, counter in six.iteritems(counters):
LOG.info(_('Member {member} saw {counter} requests.').format(
member=member, counter=counter))
LOG.info(_('Member %(member)s saw %(counter)s requests.'),
{'member': member, 'counter': counter})
self.assertGreater(counter, 0,
'Member %s never balanced' % member)
for member in members:
@ -614,18 +611,18 @@ class BaseTestCase(manager.NetworkScenarioTest):
def _wait_for_http_service(self, check_ip, port=80):
def try_connect(check_ip, port):
try:
LOG.info(('checking connection to ip: {0} port: {1}'.format(
check_ip, port)))
LOG.info('checking connection to ip: %s port: %s',
check_ip, port)
resp = urllib2.urlopen("http://{0}:{1}/".format(check_ip,
port))
if resp.getcode() == 200:
return True
return False
except IOError as e:
LOG.info(('Got IOError in check connection: {0}'.format(e)))
LOG.info('Got IOError in check connection: %s', e)
return False
except error.HTTPError as e:
LOG.info(('Got HTTPError in check connection: {0}'.format(e)))
LOG.info('Got HTTPError in check connection: %s', e)
return False
timeout = config.validation.ping_timeout
@ -650,7 +647,7 @@ class BaseTestCase(manager.NetworkScenarioTest):
# of success and continue connection tries
except (error.HTTPError, error.URLError,
socket.timeout, socket.error) as e:
LOG.info(('Got Error in sending request: {0}'.format(e)))
LOG.info('Got Error in sending request: %s', e)
continue
return counters
@ -688,7 +685,7 @@ class BaseTestCase(manager.NetworkScenarioTest):
for ct in client_threads:
timeout -= ct.join(timeout)
if timeout <= 0:
LOG.error("Client thread {0} timed out".format(ct.name))
LOG.error("Client thread %s timed out", ct.name)
return dict()
for server in list(ct.counters):
if server not in total_counters:
@ -699,9 +696,9 @@ class BaseTestCase(manager.NetworkScenarioTest):
def _traffic_validation_after_stopping_server(self):
"""Check that the requests are sent to the only ACTIVE server."""
LOG.info(('Starting traffic_validation_after_stopping_server...'))
LOG.info('Starting traffic_validation_after_stopping_server...')
counters = self._send_requests(self.vip_ip, ["server1", "server2"])
LOG.info(('Counters is: {0}'.format(counters)))
LOG.info('Counters is: %s', counters)
# Assert that no traffic is sent to server1.
for member, counter in six.iteritems(counters):
@ -825,9 +822,8 @@ class BaseTestCase(manager.NetworkScenarioTest):
proc = subprocess.Popen(args, **subprocess_args)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
LOG.error(("Command {0} returned with exit status {1},"
"output {2}, error {3}").format(cmd, proc.returncode,
stdout, stderr))
LOG.error('Command %s returned with exit status %s, output %s, '
'error %s', cmd, proc.returncode, stdout, stderr)
return stdout
def _set_quotas(self, project_id=None, load_balancer=20, listener=20,
@ -865,8 +861,8 @@ class BaseTestCase(manager.NetworkScenarioTest):
load_balancer_id = self.load_balancer['id']
if cleanup:
self.addCleanup(self._cleanup_load_balancer, load_balancer_id)
LOG.info(('Waiting for lb status on create load balancer id: {0}'
.format(load_balancer_id)))
LOG.info('Waiting for lb status on create load balancer id: %s',
load_balancer_id)
self.load_balancer = self._wait_for_load_balancer_status(
load_balancer_id)