Fix some LOG.debug invocations

LOG.debug("Hello %s" % xyz) should be LOG.debug("Hello %s", xyz).
This allows the logging package to skip creating the
formatted log message if the message is not going to be emitted because
of the current log level.

Change-Id: Ie0c91cd940017fd80d1d84b0e56780a1da980858
This commit is contained in:
Andreas Jaeger 2014-08-31 10:54:56 +02:00
parent 1a9d060600
commit 24cb089fee
19 changed files with 53 additions and 48 deletions

View File

@ -335,7 +335,7 @@ def remove_invalid_options(context, search_options, allowed_search_options):
unknown_options = [opt for opt in search_options
if opt not in allowed_search_options]
bad_options = ", ".join(unknown_options)
LOG.debug("Removing options '%(bad_options)s' from query" %
LOG.debug("Removing options '%(bad_options)s' from query",
{"bad_options": bad_options})
for opt in unknown_options:
del search_options[opt]

View File

@ -155,7 +155,7 @@ class QuotaSetsController(object):
LOG.warn(msg)
raise webob.exc.HTTPBadRequest(explanation=msg)
LOG.debug("force update quotas: %s" % force_update)
LOG.debug("force update quotas: %s", force_update)
if len(bad_keys) > 0:
msg = _("Bad key(s) %s in quota_set") % ",".join(bad_keys)

View File

@ -112,7 +112,8 @@ class APIRouter(base_wsgi.Router):
continue
LOG.debug('Extension %(ext_name)s extending resource: '
'%(collection)s' % locals())
'%(collection)s',
{'ext_name': ext_name, 'collection': collection})
resource = self.resources[collection]
resource.register_actions(controller)

View File

@ -143,7 +143,7 @@ class ShareServerController(wsgi.Controller):
msg = _("Share server's actual status is %(status)s, allowed "
"statuses for deletion are %(allowed_statuses)s.") % (data)
raise exc.HTTPForbidden(explanation=msg)
LOG.debug("Deleting share server with id: %s." % id)
LOG.debug("Deleting share server with id: %s.", id)
try:
self.share_api.delete_share_server(context, share_server)
except exception.ShareServerInUse as e:

View File

@ -98,7 +98,7 @@ def novaclient(context):
service_name=service_name,
endpoint_type=endpoint_type)
LOG.debug('Novaclient connection created using URL: %s' % url)
LOG.debug('Novaclient connection created using URL: %s', url)
extensions = [assisted_volume_snapshots]

View File

@ -144,12 +144,15 @@ class Manager(base.Base):
ticks_to_skip = self._ticks_to_skip[task_name]
if ticks_to_skip > 0:
LOG.debug("Skipping %(full_task_name)s, %(ticks_to_skip)s"
" ticks left until next run", locals())
" ticks left until next run",
{'full_task_name': full_task_name,
'ticks_to_skip': ticks_to_skip})
self._ticks_to_skip[task_name] -= 1
continue
self._ticks_to_skip[task_name] = task._ticks_between_runs
LOG.debug("Running periodic task %(full_task_name)s", locals())
LOG.debug("Running periodic task %(full_task_name)s",
{'full_task_name': full_task_name})
try:
task(self, context)

View File

@ -146,14 +146,14 @@ class FilterScheduler(driver.Scheduler):
if not hosts:
return None
LOG.debug("Filtered share %(hosts)s" % {"hosts": hosts})
LOG.debug("Filtered share %(hosts)s", {"hosts": hosts})
# weighted_host = WeightedHost() ... the best
# host for the job.
weighed_hosts = self.host_manager.get_weighed_hosts(hosts,
filter_properties)
best_host = weighed_hosts[0]
LOG.debug("Choosing for share: %(best_host)s"
% {"best_host": best_host})
LOG.debug("Choosing for share: %(best_host)s",
{"best_host": best_host})
# NOTE(rushiagr): updating the available space parameters at same place
best_host.obj.consume_from_share(share_properties)
return best_host

View File

@ -37,7 +37,7 @@ class RetryFilter(filters.BaseHostFilter):
pass_msg = "passes" if passes else "fails"
LOG.debug("Host %(host)s %(pass_msg)s. Previously tried hosts: "
"%(hosts)s" %
"%(hosts)s",
{"host": host, "pass_msg": pass_msg, "hosts": hosts})
# Host passes if it's not in the list of previously attempted hosts:

View File

@ -238,11 +238,12 @@ class HostManager(object):
"""Update the per-service capabilities based on this notification."""
if service_name not in ('share'):
LOG.debug('Ignoring %(service_name)s service update '
'from %(host)s', locals())
'from %(host)s',
{'service_name': service_name, 'host': host})
return
LOG.debug("Received %(service_name)s service update from "
"%(host)s." % {"service_name": service_name, "host": host})
"%(host)s.", {"service_name": service_name, "host": host})
# Copy the capabilities, so we don't modify the original dict
capab_copy = dict(capabilities)

View File

@ -363,7 +363,7 @@ class Service(object):
except exception.NotFound:
self._create_service_ref(ctxt)
LOG.debug("Creating RPC server for service %s." % self.topic)
LOG.debug("Creating RPC server for service %s.", self.topic)
target = messaging.Target(topic=self.topic, server=self.host)
endpoints = [self.manager]
@ -607,9 +607,9 @@ def wait():
# should use secret flag when switch over to openstack-common
if ("_password" in flag or "_key" in flag or
(flag == "sql_connection" and "mysql:" in flag_get)):
LOG.debug('%(flag)s : FLAG SET ' % {"flag": flag})
LOG.debug('%(flag)s : FLAG SET ', {"flag": flag})
else:
LOG.debug('%(flag)s : %(flag_get)s' %
LOG.debug('%(flag)s : %(flag_get)s',
{"flag": flag, "flag_get": flag_get})
try:
_launcher.wait()

View File

@ -325,7 +325,7 @@ class API(base.Base):
# NOTE(vponomaryov): we do not need 'all_tenants' opt anymore
search_opts.pop('all_tenants', None)
if search_opts:
LOG.debug("Searching for shares by: %s" % str(search_opts))
LOG.debug("Searching for shares by: %s", str(search_opts))
results = []
for s in shares:
# values in search_opts can be only strings
@ -353,7 +353,7 @@ class API(base.Base):
context, context.project_id)
if search_opts:
LOG.debug("Searching by: %s" % str(search_opts))
LOG.debug("Searching by: %s", str(search_opts))
results = []
not_found = object()

View File

@ -250,7 +250,7 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
try:
if not self._is_device_mounted(share, server_details, volume):
LOG.debug("Mounting '%(dev)s' to path '%(path)s' on "
"server '%(server)s'." % log_data)
"server '%(server)s'.", log_data)
mount_cmd = ['sudo mkdir -p', mount_path, '&&']
mount_cmd.extend(['sudo mount', volume['mountpoint'],
mount_path])
@ -279,7 +279,7 @@ class GenericShareDriver(driver.ExecuteMixin, driver.ShareDriver):
}
if self._is_device_mounted(share, server_details):
LOG.debug("Unmounting path '%(path)s' on server "
"'%(server)s'." % log_data)
"'%(server)s'.", log_data)
unmount_cmd = ['sudo umount', mount_path, '&& sudo rmdir',
mount_path]
self._ssh_exec(server_details, unmount_cmd)

View File

@ -224,7 +224,7 @@ class NetAppClusteredShareDriver(driver.ShareDriver):
def setup_server(self, network_info, metadata=None):
"""Creates and configures new vserver."""
LOG.debug('Creating server %s' % network_info['server_id'])
LOG.debug('Creating server %s', network_info['server_id'])
vserver_name = self._vserver_create_if_not_exists(network_info)
return {'vserver_name': vserver_name}
@ -311,7 +311,7 @@ class NetAppClusteredShareDriver(driver.ShareDriver):
self._client.send_request('net-vlan-create', args)
except naapi.NaApiError as e:
if e.code == '13130':
LOG.debug("Vlan %(vlan)s already exists on port %(port)s" %
LOG.debug("Vlan %(vlan)s already exists on port %(port)s",
{'vlan': vlan, 'port': port})
else:
raise exception.NetAppException(
@ -320,8 +320,8 @@ class NetAppClusteredShareDriver(driver.ShareDriver):
{'vlan': vlan, 'port': port, 'err_msg': e.message})
iface_name = (self.configuration.netapp_lif_name_template %
{'node': node, 'net_allocation_id': allocation_id})
LOG.debug('Creating LIF %(lif)r for vserver %(vserver)s '
% {'lif': iface_name, 'vserver': vserver_name})
LOG.debug('Creating LIF %(lif)r for vserver %(vserver)s ',
{'lif': iface_name, 'vserver': vserver_name})
args = {'address': ip,
'administrative-status': 'up',
'data-protocols': [
@ -390,7 +390,7 @@ class NetAppClusteredShareDriver(driver.ShareDriver):
self.api_version, vserver=vserver_name,
configuration=self.configuration)
if not self._vserver_exists(vserver_name):
LOG.debug('Vserver %s does not exist, creating' % vserver_name)
LOG.debug('Vserver %s does not exist, creating', vserver_name)
self._create_vserver(vserver_name)
nodes = self._get_cluster_nodes()
@ -547,7 +547,7 @@ class NetAppClusteredShareDriver(driver.ShareDriver):
'domain': sec_service_data['domain'],
}
try:
LOG.debug("Trying to setup cifs server with data: %s" % data)
LOG.debug("Trying to setup cifs server with data: %s", data)
vserver_client.send_request('cifs-server-create', data)
except naapi.NaApiError as e:
msg = _("Failed to create CIFS server entry. %s.") % e.message
@ -585,7 +585,7 @@ class NetAppClusteredShareDriver(driver.ShareDriver):
def get_available_aggregates_for_vserver(self, vserver, vserver_client):
"""Returns aggregate list for the vserver."""
LOG.debug('Finding available aggreagates for vserver %s' % vserver)
LOG.debug('Finding available aggreagates for vserver %s', vserver)
response = vserver_client.send_request('vserver-get')
vserver_info = response.get_child_by_name('attributes')\
.get_child_by_name('vserver-info')
@ -603,7 +603,7 @@ class NetAppClusteredShareDriver(driver.ShareDriver):
aggr_name = aggr_elem.get_child_content('aggr-name')
aggr_size = int(aggr_elem.get_child_content('aggr-availsize'))
aggr_dict[aggr_name] = aggr_size
LOG.debug("Found available aggregates: %r" % aggr_dict)
LOG.debug("Found available aggregates: %r", aggr_dict)
return aggr_dict
@ensure_vserver
@ -637,8 +637,8 @@ class NetAppClusteredShareDriver(driver.ShareDriver):
aggregate = max(aggregates, key=lambda m: aggregates[m])
LOG.debug('Creating volume %(share_name)s on '
'aggregate %(aggregate)s'
% {'share_name': share_name, 'aggregate': aggregate})
'aggregate %(aggregate)s',
{'share_name': share_name, 'aggregate': aggregate})
args = {'containing-aggr-name': aggregate,
'size': str(share['size']) + 'g',
'volume': share_name,
@ -653,7 +653,7 @@ class NetAppClusteredShareDriver(driver.ShareDriver):
parent_share_name = self._get_valid_share_name(snapshot['share_id'])
parent_snapshot_name = self._get_valid_snapshot_name(snapshot['id'])
LOG.debug('Creating volume from snapshot %s' % snapshot['id'])
LOG.debug('Creating volume from snapshot %s', snapshot['id'])
args = {'volume': share_name,
'parent-volume': parent_share_name,
'parent-snapshot': parent_snapshot_name,
@ -686,14 +686,14 @@ class NetAppClusteredShareDriver(driver.ShareDriver):
"""Sends share offline. Required before deleting a share."""
share_name = self._get_valid_share_name(share['id'])
args = {'name': share_name}
LOG.debug('Offline volume %s' % share_name)
LOG.debug('Offline volume %s', share_name)
vserver_client.send_request('volume-offline', args)
def _delete_share(self, share, vserver_client):
"""Destroys share on a target OnTap device."""
share_name = self._get_valid_share_name(share['id'])
args = {'name': share_name}
LOG.debug('Deleting share %s' % share_name)
LOG.debug('Deleting share %s', share_name)
vserver_client.send_request('volume-destroy', args)
@ensure_vserver
@ -741,7 +741,7 @@ class NetAppClusteredShareDriver(driver.ShareDriver):
snapshot_name = self._get_valid_snapshot_name(snapshot['id'])
args = {'volume': share_name,
'snapshot': snapshot_name}
LOG.debug('Creating snapshot %s' % snapshot_name)
LOG.debug('Creating snapshot %s', snapshot_name)
vserver_client.send_request('snapshot-create', args)
def _remove_export(self, share, vserver_client):
@ -766,7 +766,7 @@ class NetAppClusteredShareDriver(driver.ShareDriver):
self._is_snapshot_busy(share_name, snapshot_name, vserver_client)
args = {'snapshot': snapshot_name,
'volume': share_name}
LOG.debug('Deleting snapshot %s' % snapshot_name)
LOG.debug('Deleting snapshot %s', snapshot_name)
vserver_client.send_request('snapshot-delete', args)
def _is_snapshot_busy(self, share_name, snapshot_name, vserver_client):
@ -784,7 +784,7 @@ class NetAppClusteredShareDriver(driver.ShareDriver):
"""Unmounts share (required before deleting)."""
share_name = self._get_valid_share_name(share['id'])
args = {'volume-name': share_name}
LOG.debug('Unmounting volume %s' % share_name)
LOG.debug('Unmounting volume %s', share_name)
vserver_client.send_request('volume-unmount', args)
@ensure_vserver
@ -1025,7 +1025,7 @@ class NetAppClusteredNFSHelper(NetAppNASHelperBase):
req_bodies[1]['rules']['exports-rule-info-2']['pathname'] = (
'/vol' + volume_path)
LOG.debug('Appending nfs rules %r' % rules)
LOG.debug('Appending nfs rules %r', rules)
try:
if self.nfs_exports_with_prefix:
self._client.send_request(
@ -1060,7 +1060,7 @@ class NetAppClusteredNFSHelper(NetAppNASHelperBase):
}
}
}
LOG.debug('Deleting NFS rules for share %s' % share['id'])
LOG.debug('Deleting NFS rules for share %s', share['id'])
self._client.send_request('nfs-exportfs-delete-rules', args)
def allow_access(self, context, share, access):
@ -1123,8 +1123,8 @@ class NetAppClusteredNFSHelper(NetAppNASHelperBase):
for allowed_host in allowed_hosts:
if 'exports-hostname-info' in allowed_host.get_name():
existing_rules.append(allowed_host.get_child_content('name'))
LOG.debug('Found existing rules %(rules)r for share %(share)s'
% {'rules': existing_rules, 'share': share['id']})
LOG.debug('Found existing rules %(rules)r for share %(share)s',
{'rules': existing_rules, 'share': share['id']})
return existing_rules

View File

@ -228,7 +228,7 @@ class ServiceInstanceManager(object):
if not description:
description = "This security group is intended "\
"to be used by share service."
LOG.debug("Creating security group with name '%s'." % name)
LOG.debug("Creating security group with name '%s'.", name)
sg = self.compute_api.security_group_create(
context, name, description)
for protocol, ports in constants.SERVICE_INSTANCE_SECGROUP_DATA:
@ -487,7 +487,7 @@ class ServiceInstanceManager(object):
if e.kwargs['code'] != 400:
raise
LOG.debug('Subnet %(subnet_id)s is already attached to the '
'router %(router_id)s.' %
'router %(router_id)s.',
{'subnet_id': service_subnet['id'],
'router_id': router['id']})
@ -655,7 +655,7 @@ class ServiceInstanceManager(object):
if e.kwargs['code'] != 404:
raise
LOG.debug('Subnet %(subnet_id)s is not attached to the '
'router %(router_id)s.' %
'router %(router_id)s.',
{'subnet_id': subnet_id,
'router_id': router_id})
self.neutron_api.update_subnet(subnet_id, '')

View File

@ -289,7 +289,7 @@ class ShareManager(manager.SchedulerDependentManager):
if share_server and not share_server.shares:
LOG.debug("Scheduled deletion of share-server "
"with id '%s' automatically by "
"deletion of last share." % share_server['id'])
"deletion of last share.", share_server['id'])
self.delete_share_server(context, share_server)
def create_snapshot(self, context, share_id, snapshot_id):

View File

@ -58,7 +58,7 @@ def get_all_types(context, inactive=0, search_opts={}):
vol_types = db.volume_type_get_all(context, inactive)
if search_opts:
LOG.debug("Searching by: %s" % search_opts)
LOG.debug("Searching by: %s", search_opts)
def _check_extra_specs_match(vol_type, searchdict):
for k, v in six.iteritems(searchdict):

View File

@ -27,4 +27,4 @@ class LoginTest(integrated_helpers._IntegratedTestBase):
"""Simple check - we list shares - so we know we're logged in."""
shares = self.api.get_shares()
for share in shares:
LOG.debug("share: %s" % share)
LOG.debug("share: %s", share)

View File

@ -84,7 +84,7 @@ def find_config(config_path):
def fetchfile(url, target):
LOG.debug('Fetching %s' % url)
LOG.debug('Fetching %s', url)
execute('curl', '--fail', url, '-o', target)

View File

@ -101,7 +101,7 @@ def cinderclient(context):
service_name=service_name,
endpoint_type=endpoint_type)
LOG.debug('Cinderclient connection created using URL: %s' % url)
LOG.debug('Cinderclient connection created using URL: %s', url)
c = cinder_client.Client(context.user_id,
context.auth_token,