Completed first pass at converting all localized strings with multiple format substitutions.

This commit is contained in:
Ed Leafe 2011-01-18 21:00:28 -05:00
parent 8e6684a58e
commit 68c9c89300
36 changed files with 333 additions and 272 deletions

View File

@ -55,7 +55,7 @@ def run_app(paste_config_file):
if config is None:
LOG.debug(_("No paste configuration for app: %s"), api)
continue
LOG.debug(_("App Config: %s\n%r"), api, config)
LOG.debug(_("App Config: %(api)s\n%(config)r") % locals())
wsgi.paste_config_to_flags(config, {
"verbose": FLAGS.verbose,
"%s_host" % api: config.get('host', '0.0.0.0'),

View File

@ -120,9 +120,9 @@ def main():
mac = argv[2]
ip = argv[3]
hostname = argv[4]
LOG.debug(_("Called %s for mac %s with ip %s and "
"hostname %s on interface %s"),
action, mac, ip, hostname, interface)
msg = _("Called %(action)s for mac %(mac)s with ip %(ip)s and"
" hostname %(hostname)s on interface %(interface)s") % locals()
LOG.debug(msg)
globals()[action + '_lease'](mac, ip, hostname, interface)
else:
print init_leases(interface)

View File

@ -26,7 +26,7 @@ def process_todo_nodes(app, doctree, fromdocname):
# reading through docutils for the proper way to construct an empty list
lists = []
for i in xrange(5):
lists.append(nodes.bullet_list("", nodes.Text('','')));
lists.append(nodes.bullet_list("", nodes.Text('','')))
lists[i].remove(lists[i][0])
lists[i].set_class('todo_list')
@ -42,7 +42,8 @@ def process_todo_nodes(app, doctree, fromdocname):
# Create a reference
newnode = nodes.reference('', '')
link = _('%s, line %d') % (filename, todo_info['lineno']);
line_info = todo_info['lineno']
link = _('%(filename)s, line %(line_info)d') % locals()
innernode = nodes.emphasis(link, link)
newnode['refdocname'] = todo_info['docname']

View File

@ -131,9 +131,11 @@ class Lockout(wsgi.Middleware):
# NOTE(vish): To use incr, failures has to be a string.
self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60)
elif failures >= FLAGS.lockout_attempts:
LOG.warn(_('Access key %s has had %d failed authentications'
' and will be locked out for %d minutes.'),
access_key, failures, FLAGS.lockout_minutes)
lock_mins = FLAGS.lockout_minutes
msg = _('Access key %(access_key)s has had %(failures)d'
' failed authentications and will be locked out'
' for %(lock_mins)d minutes.') % locals()
LOG.warn(msg)
self.mc.set(failures_key, str(failures),
time=FLAGS.lockout_minutes * 60)
return res
@ -179,8 +181,10 @@ class Authenticate(wsgi.Middleware):
project=project,
remote_address=remote_address)
req.environ['ec2.context'] = ctxt
LOG.audit(_('Authenticated Request For %s:%s)'), user.name,
project.name, context=req.environ['ec2.context'])
uname = user.name
pname = project.name
msg = _('Authenticated Request For %(uname)s:%(pname)s)') % locals()
LOG.audit(msg, context=req.environ['ec2.context'])
return self.application
@ -206,7 +210,7 @@ class Requestify(wsgi.Middleware):
LOG.debug(_('action: %s'), action)
for key, value in args.items():
LOG.debug(_('arg: %s\t\tval: %s'), key, value)
LOG.debug(_('arg: %(key)s\t\tval: %(value)s') % locals())
# Success!
api_request = apirequest.APIRequest(self.controller, action, args)
@ -277,8 +281,8 @@ class Authorizer(wsgi.Middleware):
if self._matches_any_role(context, allowed_roles):
return self.application
else:
LOG.audit(_("Unauthorized request for controller=%s "
"and action=%s"), controller, action, context=context)
LOG.audit(_('Unauthorized request for controller=%(controller)s '
'and action=%(action)s') % locals(), context=context)
raise webob.exc.HTTPUnauthorized()
def _matches_any_role(self, context, roles):

View File

@ -111,19 +111,23 @@ class AdminController(object):
"""Add or remove a role for a user and project."""
if operation == 'add':
if project:
LOG.audit(_("Adding role %s to user %s for project %s"), role,
user, project, context=context)
msg = _("Adding role %(role)s to user %(user)s"
" for project %(project)s") % locals()
LOG.audit(msg, context=context)
else:
LOG.audit(_("Adding sitewide role %s to user %s"), role, user,
context=context)
msg = _("Adding sitewide role %(role)s to"
" user %(user)s") % locals()
LOG.audit(msg, context=context)
manager.AuthManager().add_role(user, role, project)
elif operation == 'remove':
if project:
LOG.audit(_("Removing role %s from user %s for project %s"),
role, user, project, context=context)
msg = _("Removing role %(role)s from user %(user)s"
" for project %(project)s") % locals()
LOG.audit(msg, context=context)
else:
LOG.audit(_("Removing sitewide role %s from user %s"), role,
user, context=context)
msg = _("Removing sitewide role %(role)s"
" from user %(user)s") % locals()
LOG.audit(msg, context=context)
manager.AuthManager().remove_role(user, role, project)
else:
raise exception.ApiError(_('operation must be add or remove'))
@ -139,8 +143,9 @@ class AdminController(object):
project = name
project = manager.AuthManager().get_project(project)
user = manager.AuthManager().get_user(name)
LOG.audit(_("Getting x509 for user: %s on project: %s"), name,
project, context=context)
msg = _("Getting x509 for user: %(name)s"
" on project: %(project)s") % locals()
LOG.audit(msg, context=context)
return user_dict(user, base64.b64encode(project.get_credentials(user)))
def describe_project(self, context, name, **kwargs):
@ -156,8 +161,9 @@ class AdminController(object):
def register_project(self, context, name, manager_user, description=None,
member_users=None, **kwargs):
"""Creates a new project"""
LOG.audit(_("Create project %s managed by %s"), name, manager_user,
context=context)
msg = _("Create project %(name)s managed by"
" %(manager_user(s") % locals()
LOG.audit(msg, context=context)
return project_dict(
manager.AuthManager().create_project(
name,
@ -181,12 +187,13 @@ class AdminController(object):
**kwargs):
"""Add or remove a user from a project."""
if operation == 'add':
LOG.audit(_("Adding user %s to project %s"), user, project,
context=context)
msg = _("Adding user %(user)s to project %(project)s") % locals()
LOG.audit(msg, context=context)
manager.AuthManager().add_to_project(user, project)
elif operation == 'remove':
LOG.audit(_("Removing user %s from project %s"), user, project,
context=context)
msg = _("Removing user %(user)s from"
" project %(project)s") % locals()
LOG.audit(msg, context=context)
manager.AuthManager().remove_from_project(user, project)
else:
raise exception.ApiError(_('operation must be add or remove'))

View File

@ -93,8 +93,10 @@ class APIRequest(object):
method = getattr(self.controller,
_camelcase_to_underscore(self.action))
except AttributeError:
_error = _('Unsupported API request: controller = %s,'
'action = %s') % (self.controller, self.action)
controller = self.controller
action = self.action
_error = _('Unsupported API request: controller = %(contorller)s,'
' action = %(action)s') % locals()
LOG.exception(_error)
# TODO: Raise custom exception, trap in apiserver,
# and reraise as 400 error.

View File

@ -601,8 +601,9 @@ class CloudController(object):
def attach_volume(self, context, volume_id, instance_id, device, **kwargs):
volume_id = ec2_id_to_id(volume_id)
instance_id = ec2_id_to_id(instance_id)
LOG.audit(_("Attach volume %s to instance %s at %s"), volume_id,
instance_id, device, context=context)
msg = _("Attach volume %(volume_id)s to instance %(instance_id)s"
" at %(device)s") % locals()
LOG.audit(msg, context=context)
self.compute_api.attach_volume(context,
instance_id=instance_id,
volume_id=volume_id,
@ -751,8 +752,8 @@ class CloudController(object):
return {'releaseResponse': ["Address released."]}
def associate_address(self, context, instance_id, public_ip, **kwargs):
LOG.audit(_("Associate address %s to instance %s"), public_ip,
instance_id, context=context)
LOG.audit(_("Associate address %(public_ip)s to"
" instance %(instance_id)s") % locals(), context=context)
instance_id = ec2_id_to_id(instance_id)
self.compute_api.associate_floating_ip(context,
instance_id=instance_id,
@ -840,8 +841,9 @@ class CloudController(object):
if image_location is None and 'name' in kwargs:
image_location = kwargs['name']
image_id = self.image_service.register(context, image_location)
LOG.audit(_("Registered image %s with id %s"), image_location,
image_id, context=context)
msg = _("Registered image %(image_location)s"
" with id %(image_id)s") % locals()
LOG.audit(msg, context=context)
return {'imageId': image_id}
def describe_image_attribute(self, context, image_id, attribute, **kwargs):

View File

@ -132,9 +132,9 @@ class Controller(wsgi.Controller):
if image_id in mapping:
return mapping[image_id]
raise exception.NotFound(
_("No entry for image '%s' in mapping file '%s'") %
(image_id, mapping_filename))
msg = _("No entry for image '%(image_id)s'"
" in mapping file '%(mapping_filename)s'") % locals()
raise exception.NotFound(msg)
def create(self, req):
""" Creates a new server for a given user """

View File

@ -473,8 +473,8 @@ class LdapDriver(object):
raise exception.NotFound("The group at dn %s doesn't exist" %
group_dn)
if self.__is_in_group(uid, group_dn):
raise exception.Duplicate(_("User %s is already a member of "
"the group %s") % (uid, group_dn))
raise exception.Duplicate(_("User %(uid)s is already a member of "
"the group %(group_dn)s") % locals())
attr = [(self.ldap.MOD_ADD, 'member', self.__uid_to_dn(uid))]
self.conn.modify_s(group_dn, attr)

View File

@ -272,16 +272,22 @@ class AuthManager(object):
project = self.get_project(project_id)
if project == None:
LOG.audit(_("failed authorization: no project named %s (user=%s)"),
project_id, user.name)
pjid = project_id
uname = user.name
LOG.audit(_("failed authorization: no project named %(pjid)s"
" (user=%(uname)s)") % locals())
raise exception.NotFound(_('No project called %s could be found')
% project_id)
if not self.is_admin(user) and not self.is_project_member(user,
project):
LOG.audit(_("Failed authorization: user %s not admin and not "
"member of project %s"), user.name, project.name)
raise exception.NotFound(_('User %s is not a member of project %s')
% (user.id, project.id))
uname = user.name
uid = user.id
pjname = project.name
pjid = project.id
LOG.audit(_("Failed authorization: user %(uname)s not admin"
" and not member of project %(pjname)s") % locals())
raise exception.NotFound(_('User %(uid)s is not a member of'
' project %(pjid)s') % locals())
if check_type == 's3':
sign = signer.Signer(user.secret.encode())
expected_signature = sign.s3_authorization(headers, verb, path)
@ -408,14 +414,16 @@ class AuthManager(object):
raise exception.NotFound(_("The %s role can not be found") % role)
if project is not None and role in FLAGS.global_roles:
raise exception.NotFound(_("The %s role is global only") % role)
uid = User.safe_id(user)
pid = Project.safe_id(project)
if project:
LOG.audit(_("Adding role %s to user %s in project %s"), role,
User.safe_id(user), Project.safe_id(project))
LOG.audit(_("Adding role %(role)s to user %(uid)s"
" in project %(pid)s") % locals())
else:
LOG.audit(_("Adding sitewide role %s to user %s"), role,
User.safe_id(user))
LOG.audit(_("Adding sitewide role %(role)s to user %(uid)s")
% locals())
with self.driver() as drv:
drv.add_role(User.safe_id(user), role, Project.safe_id(project))
drv.add_role(uid, role, pid)
def remove_role(self, user, role, project=None):
"""Removes role for user
@ -434,14 +442,16 @@ class AuthManager(object):
@type project: Project or project_id
@param project: Project in which to remove local role.
"""
uid = User.safe_id(user)
pid = Project.safe_id(project)
if project:
LOG.audit(_("Removing role %s from user %s on project %s"),
role, User.safe_id(user), Project.safe_id(project))
LOG.audit(_("Removing role %(role)s from user %(uid)s"
" on project %(pid)s") % locals())
else:
LOG.audit(_("Removing sitewide role %s from user %s"), role,
User.safe_id(user))
LOG.audit(_("Removing sitewide role %(role)s"
" from user %(uid)s") % locals())
with self.driver() as drv:
drv.remove_role(User.safe_id(user), role, Project.safe_id(project))
drv.remove_role(uid, role, pid)
@staticmethod
def get_roles(project_roles=True):
@ -502,8 +512,8 @@ class AuthManager(object):
description,
member_users)
if project_dict:
LOG.audit(_("Created project %s with manager %s"), name,
manager_user)
LOG.audit(_("Created project %(name)s with"
" manager %(manager_user)s") % locals())
project = Project(**project_dict)
return project
@ -530,11 +540,12 @@ class AuthManager(object):
def add_to_project(self, user, project):
"""Add user to project"""
LOG.audit(_("Adding user %s to project %s"), User.safe_id(user),
Project.safe_id(project))
uid = User.safe_id(user)
pid = Project.safe_id(project)
LOG.audit(_("Adding user %(uid)s to project %(pid)s") % locals())
with self.driver() as drv:
return drv.add_to_project(User.safe_id(user),
Project.safe_id(project))
Project.safe_id(project))
def is_project_manager(self, user, project):
"""Checks if user is project manager"""
@ -550,11 +561,11 @@ class AuthManager(object):
def remove_from_project(self, user, project):
"""Removes a user from a project"""
LOG.audit(_("Remove user %s from project %s"), User.safe_id(user),
Project.safe_id(project))
uid = User.safe_id(user)
pid = Project.safe_id(project)
LOG.audit(_("Remove user %(uid)s from project %(pid)s") % locals())
with self.driver() as drv:
return drv.remove_from_project(User.safe_id(user),
Project.safe_id(project))
return drv.remove_from_project(uid, pid)
@staticmethod
def get_project_vpn_data(project):
@ -634,7 +645,10 @@ class AuthManager(object):
user_dict = drv.create_user(name, access, secret, admin)
if user_dict:
rv = User(**user_dict)
LOG.audit(_("Created user %s (admin: %r)"), rv.name, rv.admin)
rvname = rv.name
rvadmin = rv.admin
LOG.audit(_("Created user %(rvname)s"
" (admin: %(rvadmin)r)") % locals())
return rv
def delete_user(self, user):
@ -656,7 +670,8 @@ class AuthManager(object):
if secret_key:
LOG.audit(_("Secret Key change for user %s"), uid)
if admin is not None:
LOG.audit(_("Admin status set to %r for user %s"), admin, uid)
LOG.audit(_("Admin status set to %(admin)r"
" for user %(uid)s") % locals())
with self.driver() as drv:
drv.modify_user(uid, access_key, secret_key, admin)

View File

@ -92,8 +92,9 @@ class API(base.Base):
type_data = instance_types.INSTANCE_TYPES[instance_type]
num_instances = quota.allowed_instances(context, max_count, type_data)
if num_instances < min_count:
LOG.warn(_("Quota exceeeded for %s, tried to run %s instances"),
context.project_id, min_count)
pid = context.project_id
LOG.warn(_("Quota exceeeded for %(pid)s,"
" tried to run %(min_count)s instances") % locals())
raise quota.QuotaError(_("Instance quota exceeded. You can only "
"run %s more instances of this type.") %
num_instances, "InstanceLimitExceeded")
@ -183,8 +184,10 @@ class API(base.Base):
instance = self.update(context, instance_id, **updates)
instances.append(instance)
LOG.debug(_("Casting to scheduler for %s/%s's instance %s"),
context.project_id, context.user_id, instance_id)
pid = context.project_id
uid = context.user_id
LOG.debug(_("Casting to scheduler for %(pid)s/%(uid)s's"
" instance %(instance_id)s") % locals())
rpc.cast(context,
FLAGS.scheduler_topic,
{"method": "run_instance",

View File

@ -77,8 +77,8 @@ def checks_instance_lock(function):
LOG.info(_("check_instance_lock: decorating: |%s|"), function,
context=context)
LOG.info(_("check_instance_lock: arguments: |%s| |%s| |%s|"),
self, context, instance_id, context=context)
LOG.info(_("check_instance_lock: arguments: |%(self)s| |%(context)s|"
" |%(instance_id)s|") % locals(), context=context)
locked = self.get_lock(context, instance_id)
admin = context.is_admin
LOG.info(_("check_instance_lock: locked: |%s|"), locked,
@ -278,11 +278,11 @@ class ComputeManager(manager.Manager):
LOG.audit(_("Rebooting instance %s"), instance_id, context=context)
if instance_ref['state'] != power_state.RUNNING:
state = instance_ref['state']
running = power_state.RUNNING
LOG.warn(_('trying to reboot a non-running '
'instance: %s (state: %s excepted: %s)'),
instance_id,
instance_ref['state'],
power_state.RUNNING,
'instance: %(instance_id)s (state: %(state)s '
'expected: %(running)s)') % locals(),
context=context)
self.db.instance_set_state(context,
@ -307,9 +307,11 @@ class ComputeManager(manager.Manager):
LOG.audit(_('instance %s: snapshotting'), instance_id,
context=context)
if instance_ref['state'] != power_state.RUNNING:
state = instance_ref['state']
running = power_state.RUNNING
LOG.warn(_('trying to snapshot a non-running '
'instance: %s (state: %s excepted: %s)'),
instance_id, instance_ref['state'], power_state.RUNNING)
'instance: %(instance_id)s (state: %(state)s '
'expected: %(running)s)') % locals())
self.driver.snapshot(instance_ref, image_id)
@ -517,8 +519,8 @@ class ComputeManager(manager.Manager):
"""Attach a volume to an instance."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id,
volume_id, mountpoint, context=context)
LOG.audit(_("instance %(instance_id)s: attaching volume %(volume_id)s"
" to %(mountpoint)s") % locals(), context=context)
dev_path = self.volume_manager.setup_compute_volume(context,
volume_id)
try:
@ -533,8 +535,8 @@ class ComputeManager(manager.Manager):
# NOTE(vish): The inline callback eats the exception info so we
# log the traceback here and reraise the same
# ecxception below.
LOG.exception(_("instance %s: attach failed %s, removing"),
instance_id, mountpoint, context=context)
LOG.exception(_("instance %(instance_id)s: attach failed"
" %(mountpoint)s, removing") % locals(), context=context)
self.volume_manager.remove_compute_volume(context,
volume_id)
raise exc
@ -548,9 +550,9 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
volume_ref = self.db.volume_get(context, volume_id)
LOG.audit(_("Detach volume %s from mountpoint %s on instance %s"),
volume_id, volume_ref['mountpoint'], instance_id,
context=context)
mp = volume_ref['mountpoint']
LOG.audit(_("Detach volume %(volume_id)s from mountpoint %(mp)s"
" on instance %(instance_id)s") % locals(), context=context)
if instance_ref['name'] not in self.driver.list_instances():
LOG.warn(_("Detaching volume from unknown instance %s"),
instance_id, context=context)

View File

@ -352,8 +352,9 @@ class Instance(object):
rd += rd_bytes
wr += wr_bytes
except TypeError:
LOG.error(_('Cannot get blockstats for "%s" on "%s"'),
disk, self.instance_id)
iid = self.instance_id
LOG.error(_('Cannot get blockstats for "%(disk)s"'
' on "%(iid)s"') % locals())
raise
return '%d:%d' % (rd, wr)
@ -374,8 +375,9 @@ class Instance(object):
rx += stats[0]
tx += stats[4]
except TypeError:
LOG.error(_('Cannot get ifstats for "%s" on "%s"'),
interface, self.instance_id)
iid = self.instance_id
LOG.error(_('Cannot get ifstats for "%(interface)s"'
' on "%(iid)s"') % locals())
raise
return '%d:%d' % (rx, tx)

View File

@ -40,6 +40,7 @@ for i in xrange(FLAGS.sql_max_retries):
models.register_models()
break
except OperationalError:
LOG.exception(_("Data store %s is unreachable."
" Trying again in %d seconds."),
FLAGS.sql_connection, FLAGS.sql_retry_interval)
fconn = FLAGS.sql_connection
fint = FLAGS.sql_retry_interval
LOG.exception(_("Data store %(fconn)s is unreachable."
" Trying again in %(fint)d seconds.") % locals())

View File

@ -247,7 +247,8 @@ def service_get_by_args(context, host, binary):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.NotFound(_('No service for %s, %s') % (host, binary))
raise exception.NotFound(_('No service for %(host)s, %(binary)s')
% locals())
return result
@ -934,8 +935,8 @@ def key_pair_get(context, user_id, name, session=None):
filter_by(deleted=can_read_deleted(context)).\
first()
if not result:
raise exception.NotFound(_('no keypair for user %s, name %s') %
(user_id, name))
raise exception.NotFound(_('no keypair for user %(user_id)s,'
' name %(name)s') % locals())
return result
@ -1536,8 +1537,8 @@ def security_group_get_by_name(context, project_id, group_name):
first()
if not result:
raise exception.NotFound(
_('No security group named %s for project: %s')
% (group_name, project_id))
_('No security group named %(group_name)s'
' for project: %(project_id)s') % locals())
return result
@ -1921,8 +1922,8 @@ def console_pool_get(context, pool_id):
filter_by(id=pool_id).\
first()
if not result:
raise exception.NotFound(_("No console pool with id %(pool_id)s") %
{'pool_id': pool_id})
raise exception.NotFound(_("No console pool with id %(pool_id)s")i
% locals())
return result
@ -1938,12 +1939,9 @@ def console_pool_get_by_host_type(context, compute_host, host,
options(joinedload('consoles')).\
first()
if not result:
raise exception.NotFound(_('No console pool of type %(type)s '
raise exception.NotFound(_('No console pool of type %(console_type)s '
'for compute host %(compute_host)s '
'on proxy host %(host)s') %
{'type': console_type,
'compute_host': compute_host,
'host': host})
'on proxy host %(host)s') % locals())
return result
@ -1981,9 +1979,7 @@ def console_get_by_pool_instance(context, pool_id, instance_id):
first()
if not result:
raise exception.NotFound(_('No console for instance %(instance_id)s '
'in pool %(pool_id)s') %
{'instance_id': instance_id,
'pool_id': pool_id})
'in pool %(pool_id)s') % locals())
return result
@ -2004,9 +2000,7 @@ def console_get(context, console_id, instance_id=None):
query = query.filter_by(instance_id=instance_id)
result = query.options(joinedload('pool')).first()
if not result:
idesc = (_("on instance %s") % instance_id) if instance_id else ""
idesc = (_("on instance %s") % instance_id) if instance_id else ""
raise exception.NotFound(_("No console with id %(console_id)s"
" %(instance)s") %
{'instance': idesc,
'console_id': console_id})
" %(idesc)s") % locals())
return result

View File

@ -33,8 +33,9 @@ class ProcessExecutionError(IOError):
description = _("Unexpected error while running command.")
if exit_code is None:
exit_code = '-'
message = _("%s\nCommand: %s\nExit code: %s\nStdout: %r\nStderr: %r")\
% (description, cmd, exit_code, stdout, stderr)
message = _("%(description)s\nCommand: %(cmd)s\n"
"Exit code: %(exit_code)s\nStdout: %(stdout)r\n"
"Stderr: %(stderr)r") % locals()
IOError.__init__(self, message)

View File

@ -45,8 +45,9 @@ class Exchange(object):
self._routes = {}
def publish(self, message, routing_key=None):
LOG.debug(_('(%s) publish (key: %s) %s'),
self.name, routing_key, message)
nm = self.name
LOG.debug(_('(%(nm)s) publish (key: %(routing_key)s)'
' %(message)s') % locals())
routing_key = routing_key.split('.')[0]
if routing_key in self._routes:
for f in self._routes[routing_key]:
@ -92,8 +93,8 @@ class Backend(base.BaseBackend):
def queue_bind(self, queue, exchange, routing_key, **kwargs):
global EXCHANGES
global QUEUES
LOG.debug(_('Binding %s to %s with key %s'),
queue, exchange, routing_key)
LOG.debug(_('Binding %(queue)s to %(exchange)s with'
' key %(routing_key)s') % locals())
EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key)
def declare_consumer(self, queue, callback, *args, **kwargs):
@ -117,7 +118,7 @@ class Backend(base.BaseBackend):
content_type=content_type,
content_encoding=content_encoding)
message.result = True
LOG.debug(_('Getting from %s: %s'), queue, message)
LOG.debug(_('Getting from %(queue)s: %(message)s') % locals())
return message
def prepare_message(self, message_data, delivery_mode,

View File

@ -198,8 +198,9 @@ class NetworkManager(manager.Manager):
raise exception.Error(_("IP %s leased that isn't associated") %
address)
if instance_ref['mac_address'] != mac:
raise exception.Error(_("IP %s leased to bad mac %s vs %s") %
(address, instance_ref['mac_address'], mac))
inst_addr = instance_ref['mac_address']
raise exception.Error(_("IP %(address)s leased to bad"
" mac %(inst_addr)s vs %(mac)s") % locals())
now = datetime.datetime.utcnow()
self.db.fixed_ip_update(context,
fixed_ip_ref['address'],
@ -218,8 +219,9 @@ class NetworkManager(manager.Manager):
raise exception.Error(_("IP %s released that isn't associated") %
address)
if instance_ref['mac_address'] != mac:
raise exception.Error(_("IP %s released from bad mac %s vs %s") %
(address, instance_ref['mac_address'], mac))
inst_addr = instance_ref['mac_address']
raise exception.Error(_("IP %(address)s released from"
" bad mac %(inst_addr)s vs %(mac)s") % locals())
if not fixed_ip_ref['leased']:
LOG.warn(_("IP %s released that was not leased"), address,
context=context)

View File

@ -180,7 +180,7 @@ class S3(ErrorHandlingResource):
def render_GET(self, request): # pylint: disable-msg=R0201
"""Renders the GET request for a list of buckets as XML"""
LOG.debug(_('List of buckets requested'), context=request.context)
buckets = [b for b in bucket.Bucket.all() \
buckets = [b for b in bucket.Bucket.all()
if b.is_authorized(request.context)]
render_xml(request, {"ListAllMyBucketsResult": {
@ -268,12 +268,14 @@ class ObjectResource(ErrorHandlingResource):
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
LOG.debug(_("Getting object: %s / %s"), self.bucket.name, self.name)
bname = self.bucket.name
nm = self.name
LOG.debug(_("Getting object: %(bname)s / %(nm)s") % locals())
if not self.bucket.is_authorized(request.context):
LOG.audit(_("Unauthorized attempt to get object %s from bucket "
"%s"), self.name, self.bucket.name,
context=request.context)
LOG.audit(_("Unauthorized attempt to get object %(nm)s"
" from bucket %(bname)s") % locals(),
context=request.context)
raise exception.NotAuthorized()
obj = self.bucket[urllib.unquote(self.name)]
@ -289,12 +291,13 @@ class ObjectResource(ErrorHandlingResource):
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
LOG.debug(_("Putting object: %s / %s"), self.bucket.name, self.name)
nm = self.name
bname = self.bucket.name
LOG.debug(_("Putting object: %(bname)s / %(nm)s") % locals())
if not self.bucket.is_authorized(request.context):
LOG.audit(_("Unauthorized attempt to upload object %s to bucket "
"%s"),
self.name, self.bucket.name, context=request.context)
LOG.audit(_("Unauthorized attempt to upload object %(nm)s to"
" bucket %(bname)s") % locals(), context=request.context)
raise exception.NotAuthorized()
key = urllib.unquote(self.name)
@ -310,8 +313,9 @@ class ObjectResource(ErrorHandlingResource):
Raises NotAuthorized if user in request context is not
authorized to delete the object.
"""
LOG.debug(_("Deleting object: %s / %s"), self.bucket.name, self.name,
nm = self.name
bname = self.bucket.name
LOG.debug(_("Deleting object: %(bname)s / %(nm)s") % locals(),
context=request.context)
if not self.bucket.is_authorized(request.context):
@ -391,7 +395,7 @@ class ImagesResource(resource.Resource):
if not image_path.startswith(FLAGS.images_path) or \
os.path.exists(image_path):
LOG.audit(_("Not authorized to upload image: invalid directory "
"%s"),
"%s"),
image_path, context=request.context)
raise exception.NotAuthorized()
@ -425,8 +429,8 @@ class ImagesResource(resource.Resource):
if operation:
# operation implies publicity toggle
newstatus = (operation == 'add')
LOG.audit(_("Toggling publicity flag of image %s %r"), image_id,
newstatus, context=request.context)
LOG.audit(_("Toggling publicity flag of image %(image_id)s"
" %(newstatus)r") % locals(), context=request.context)
image_object.set_public(newstatus)
else:
# other attributes imply update

View File

@ -89,15 +89,16 @@ class Consumer(messaging.Consumer):
self.failed_connection = False
break
except: # Catching all because carrot sucks
LOG.exception(_("AMQP server on %s:%d is unreachable."
" Trying again in %d seconds.") % (
FLAGS.rabbit_host,
FLAGS.rabbit_port,
FLAGS.rabbit_retry_interval))
fl_host = FLAGS.rabbit_host
fl_port = FLAGS.rabbit_port
fl_intv = FLAGS.rabbit_retry_interval
LOG.exception(_("AMQP server on %(fl_host)s:%(fl_port)d is"
" unreachable. Trying again in %(fl_intv)d seconds.")
% locals())
self.failed_connection = True
if self.failed_connection:
LOG.exception(_("Unable to connect to AMQP server "
"after %d tries. Shutting down."),
"after %d tries. Shutting down."),
FLAGS.rabbit_max_retries)
sys.exit(1)
@ -152,7 +153,7 @@ class TopicConsumer(Consumer):
class AdapterConsumer(TopicConsumer):
"""Calls methods on a proxy object based on method and args"""
def __init__(self, connection=None, topic="broadcast", proxy=None):
LOG.debug(_('Initing the Adapter Consumer for %s') % (topic))
LOG.debug(_('Initing the Adapter Consumer for %s') % topic)
self.proxy = proxy
super(AdapterConsumer, self).__init__(connection=connection,
topic=topic)
@ -167,7 +168,7 @@ class AdapterConsumer(TopicConsumer):
Example: {'method': 'echo', 'args': {'value': 42}}
"""
LOG.debug(_('received %s') % (message_data))
LOG.debug(_('received %s') % message_data)
msg_id = message_data.pop('_msg_id', None)
ctxt = _unpack_context(message_data)
@ -180,7 +181,7 @@ class AdapterConsumer(TopicConsumer):
# messages stay in the queue indefinitely, so for now
# we just log the message and send an error string
# back to the caller
LOG.warn(_('no method for message: %s') % (message_data))
LOG.warn(_('no method for message: %s') % message_data)
msg_reply(msg_id, _('No method for message: %s') % message_data)
return

View File

@ -66,4 +66,4 @@ class SchedulerManager(manager.Manager):
db.queue_get_for(context, topic, host),
{"method": method,
"args": kwargs})
LOG.debug(_("Casting to %s %s for %s"), topic, host, method)
LOG.debug(_("Casting to %(topic)s %(host)s for %(method)s") % locals())

View File

@ -216,10 +216,11 @@ class Service(object):
from nova.db.sqlalchemy import models
models.register_models()
except OperationalError:
logging.exception(_("Data store %s is unreachable."
" Trying again in %d seconds.") %
(FLAGS.sql_connection,
FLAGS.sql_retry_interval))
fl_conn = FLAGS.sql_connection
fl_intv = FLAGS.sql_retry_interval
logging.exception(_("Data store %(fl_conn)s is"
" unreachable. Trying again in %(fl_intv)d"
" seconds.") % locals())
time.sleep(FLAGS.sql_retry_interval)
@ -232,10 +233,10 @@ def serve(*services):
name = '_'.join(x.binary for x in services)
logging.debug(_("Serving %s"), name)
logging.debug(_("Full set of FLAGS:"))
for flag in FLAGS:
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
flag_get = FLAGS.get(flag, None)
logging.debug("%(flag)s : %(flag_get)s" % locals())
for x in services:
x.start()

View File

@ -86,7 +86,7 @@ class RpcTestCase(test.TestCase):
@staticmethod
def echo(context, queue, value):
"""Calls echo in the passed queue"""
LOG.debug(_("Nested received %s, %s"), queue, value)
LOG.debug(_("Nested received %(queue)s, %(value)s") % locals())
ret = rpc.call(context,
queue,
{"method": "echo",

View File

@ -138,7 +138,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
result = obj.communicate()
obj.stdin.close()
if obj.returncode:
LOG.debug(_("Result was %s") % (obj.returncode))
LOG.debug(_("Result was %s") % obj.returncode)
if check_exit_code and obj.returncode != 0:
(stdout, stderr) = result
raise ProcessExecutionError(exit_code=obj.returncode,
@ -214,9 +214,11 @@ def get_my_linklocal(interface):
else:
return 'fe00::'
except IndexError as ex:
LOG.warn(_("Couldn't get Link Local IP of %s :%s"), interface, ex)
LOG.warn(_("Couldn't get Link Local IP of %(interface)s :%(ex)s")
% locals())
except ProcessExecutionError as ex:
LOG.warn(_("Couldn't get Link Local IP of %s :%s"), interface, ex)
LOG.warn(_("Couldn't get Link Local IP of %(interface)s :%(ex)s")
% locals())
except:
return 'fe00::'

View File

@ -129,7 +129,7 @@ class HyperVConnection(object):
vm = self._lookup(instance.name)
if vm is not None:
raise exception.Duplicate(_('Attempt to create duplicate vm %s') %
instance.name)
instance.name)
user = manager.AuthManager().get_user(instance['user_id'])
project = manager.AuthManager().get_project(instance['project_id'])
@ -159,7 +159,7 @@ class HyperVConnection(object):
vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new()
vs_gs_data.ElementName = instance['name']
(job, ret_val) = vs_man_svc.DefineVirtualSystem(
[], None, vs_gs_data.GetText_(1))[1:]
[], None, vs_gs_data.GetText_(1))[1:]
if ret_val == WMI_JOB_STATUS_STARTED:
success = self._check_job_status(job)
else:
@ -184,17 +184,17 @@ class HyperVConnection(object):
memsetting.Limit = mem
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
vm.path_(), [memsetting.GetText_(1)])
vm.path_(), [memsetting.GetText_(1)])
LOG.debug(_('Set memory for vm %s...'), instance.name)
procsetting = vmsetting.associators(
wmi_result_class='Msvm_ProcessorSettingData')[0]
wmi_result_class='Msvm_ProcessorSettingData')[0]
vcpus = long(instance['vcpus'])
procsetting.VirtualQuantity = vcpus
procsetting.Reservation = vcpus
procsetting.Limit = vcpus
(job, ret_val) = vs_man_svc.ModifyVirtualSystemResources(
vm.path_(), [procsetting.GetText_(1)])
vm.path_(), [procsetting.GetText_(1)])
LOG.debug(_('Set vcpus for vm %s...'), instance.name)
def _create_disk(self, vm_name, vhdfile):
@ -205,19 +205,19 @@ class HyperVConnection(object):
vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name)
vm = vms[0]
vmsettings = vm.associators(
wmi_result_class='Msvm_VirtualSystemSettingData')
wmi_result_class='Msvm_VirtualSystemSettingData')
rasds = vmsettings[0].associators(
wmi_result_class='MSVM_ResourceAllocationSettingData')
wmi_result_class='MSVM_ResourceAllocationSettingData')
ctrller = [r for r in rasds
if r.ResourceSubType == 'Microsoft Emulated IDE Controller'\
and r.Address == "0"]
and r.Address == "0"]
#Find the default disk drive object for the vm and clone it.
diskdflt = self._conn.query(
"SELECT * FROM Msvm_ResourceAllocationSettingData \
WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\
AND InstanceID LIKE '%Default%'")[0]
"SELECT * FROM Msvm_ResourceAllocationSettingData \
WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\
AND InstanceID LIKE '%Default%'")[0]
diskdrive = self._clone_wmi_obj(
'Msvm_ResourceAllocationSettingData', diskdflt)
'Msvm_ResourceAllocationSettingData', diskdflt)
#Set the IDE ctrller as parent.
diskdrive.Parent = ctrller[0].path_()
diskdrive.Address = 0
@ -263,17 +263,18 @@ class HyperVConnection(object):
default_nic_data = [n for n in emulatednics_data
if n.InstanceID.rfind('Default') > 0]
new_nic_data = self._clone_wmi_obj(
'Msvm_EmulatedEthernetPortSettingData',
default_nic_data[0])
'Msvm_EmulatedEthernetPortSettingData',
default_nic_data[0])
#Create a port on the vswitch.
(new_port, ret_val) = switch_svc.CreateSwitchPort(vm_name, vm_name,
"", extswitch.path_())
if ret_val != 0:
LOG.error(_('Failed creating a port on the external vswitch'))
raise Exception(_('Failed creating port for %s'),
vm_name)
LOG.debug(_("Created switch port %s on switch %s"),
vm_name, extswitch.path_())
vm_name)
ext_path = extswitch.path_()
LOG.debug(_("Created switch port %(vm_name)s on switch %(ext_path)s")
% locals())
#Connect the new nic to the new port.
new_nic_data.Connection = [new_port]
new_nic_data.ElementName = vm_name + ' nic'
@ -283,7 +284,7 @@ class HyperVConnection(object):
new_resources = self._add_virt_resource(new_nic_data, vm)
if new_resources is None:
raise Exception(_('Failed to add nic to VM %s'),
vm_name)
vm_name)
LOG.info(_("Created nic for %s "), vm_name)
def _add_virt_resource(self, res_setting_data, target_vm):
@ -319,8 +320,10 @@ class HyperVConnection(object):
if job.JobState != WMI_JOB_STATE_COMPLETED:
LOG.debug(_("WMI job failed: %s"), job.ErrorSummaryDescription)
return False
LOG.debug(_("WMI job succeeded: %s, Elapsed=%s "), job.Description,
job.ElapsedTime)
desc = job.Description
elap = job.ElapsedTime
LOG.debug(_("WMI job succeeded: %(desc)s, Elapsed=%(elap)s ")
% locals())
return True
def _find_external_network(self):
@ -386,7 +389,9 @@ class HyperVConnection(object):
vhdfile = self._cim_conn.CIM_DataFile(Name=disk)
for vf in vhdfile:
vf.Delete()
LOG.debug(_("Del: disk %s vm %s"), vhdfile, instance.name)
instance_name = instance.name
LOG.debug(_("Del: disk %(vhdfile)s vm %(instance_name)s")
% locals())
def get_info(self, instance_id):
"""Get information about the VM"""
@ -402,12 +407,14 @@ class HyperVConnection(object):
summary_info = vs_man_svc.GetSummaryInformation(
[4, 100, 103, 105], settings_paths)[1]
info = summary_info[0]
LOG.debug(_("Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, \
cpu_time=%s"), instance_id,
str(HYPERV_POWER_STATE[info.EnabledState]),
str(info.MemoryUsage),
str(info.NumberOfProcessors),
str(info.UpTime))
state = str(HYPERV_POWER_STATE[info.EnabledState])
memusage = str(info.MemoryUsage)
numprocs = str(info.NumberOfProcessors)
uptime = str(info.UpTime)
LOG.debug(_("Got Info for vm %(instance_id)s: state=%(state)s,"
" mem=%(memusage)s, num_cpu=%(numprocs)s,"
" cpu_time=%(uptime)s") % locals())
return {'state': HYPERV_POWER_STATE[info.EnabledState],
'max_mem': info.MemoryUsage,
@ -441,22 +448,22 @@ class HyperVConnection(object):
#already in the state requested
success = True
if success:
LOG.info(_("Successfully changed vm state of %s to %s"), vm_name,
req_state)
LOG.info(_("Successfully changed vm state of %(vm_name)s"
" to %(req_state)s") % locals())
else:
LOG.error(_("Failed to change vm state of %s to %s"), vm_name,
req_state)
raise Exception(_("Failed to change vm state of %s to %s"),
vm_name, req_state)
msg = _("Failed to change vm state of %(vm_name)s"
" to %(req_state)s") % locals()
LOG.error(msg)
raise Exception(msg)
def attach_volume(self, instance_name, device_path, mountpoint):
vm = self._lookup(instance_name)
if vm is None:
raise exception.NotFound('Cannot attach volume to missing %s vm' %
instance_name)
raise exception.NotFound('Cannot attach volume to missing %s vm'
% instance_name)
def detach_volume(self, instance_name, mountpoint):
vm = self._lookup(instance_name)
if vm is None:
raise exception.NotFound('Cannot detach volume from missing %s ' %
instance_name)
raise exception.NotFound('Cannot detach volume from missing %s '
% instance_name)

View File

@ -67,7 +67,7 @@ def _fetch_image_no_curl(url, path, headers):
urlopened = urllib2.urlopen(request)
urlretrieve(urlopened, path)
LOG.debug(_("Finished retreving %s -- placed in %s"), url, path)
LOG.debug(_("Finished retreving %(url)s -- placed in %(path)s") % locals())
def _fetch_s3_image(image, path, user, project):

View File

@ -236,8 +236,9 @@ class LibvirtConnection(object):
def _cleanup(self, instance):
target = os.path.join(FLAGS.instances_path, instance['name'])
LOG.info(_('instance %s: deleting instance files %s'),
instance['name'], target)
instance_name = instance['name']
LOG.info(_('instance %(instance_name)s: deleting instance files'
' %(target)s') % locals())
if os.path.exists(target):
shutil.rmtree(target)
@ -418,7 +419,7 @@ class LibvirtConnection(object):
virsh_output = virsh_output[0].strip()
if virsh_output.startswith('/dev/'):
LOG.info(_('cool, it\'s a device'))
LOG.info(_("cool, it's a device"))
out, err = utils.execute("sudo dd if=%s iflag=nonblock" %
virsh_output, check_exit_code=False)
return out
@ -426,7 +427,7 @@ class LibvirtConnection(object):
return ''
def _append_to_file(self, data, fpath):
LOG.info(_('data: %r, fpath: %r'), data, fpath)
LOG.info(_('data: %(data)r, fpath: %(fpath)r') % locals())
fp = open(fpath, 'a+')
fp.write(data)
return fpath
@ -434,7 +435,7 @@ class LibvirtConnection(object):
def _dump_file(self, fpath):
fp = open(fpath, 'r+')
contents = fp.read()
LOG.info(_('Contents of file %s: %r'), fpath, contents)
LOG.info(_('Contents of file %(fpath)s: %(contents)r') % locals())
return contents
@exception.wrap_exception
@ -623,21 +624,22 @@ class LibvirtConnection(object):
'dns': network_ref['dns'],
'ra_server': ra_server}
if key or net:
inst_name = inst['name']
img_id = inst.image_id
if key:
LOG.info(_('instance %s: injecting key into image %s'),
inst['name'], inst.image_id)
LOG.info(_('instance %(inst_name)s: injecting key into'
' image %(img_id)s') % locals())
if net:
LOG.info(_('instance %s: injecting net into image %s'),
inst['name'], inst.image_id)
LOG.info(_('instance %(inst_name)s: injecting net into'
' image %(img_id)s') % locals())
try:
disk.inject_data(basepath('disk'), key, net,
partition=target_partition,
nbd=FLAGS.use_cow_images)
except Exception as e:
# This could be a windows image, or a vmdk format disk
LOG.warn(_('instance %s: ignoring error injecting data'
' into image %s (%s)'),
inst['name'], inst.image_id, e)
LOG.warn(_('instance %(inst_name)s: ignoring error injecting'
' data into image %(img_id)s (%(e)s)') % locals())
if FLAGS.libvirt_type == 'uml':
utils.execute('sudo chown root %s' % basepath('disk'))

View File

@ -69,7 +69,9 @@ LOG = logging.getLogger("nova.virt.xenapi.fake")
def log_db_contents(msg=None):
LOG.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content))
text = msg or ""
content = pformat(_db_content)
LOG.debug(_("%(text)s: _db_content => %(content)s") % locals())
def reset():
@ -280,7 +282,7 @@ class SessionBase(object):
if impl is not None:
def callit(*params):
LOG.debug(_('Calling %s %s'), name, impl)
LOG.debug(_('Calling %(name)s %(impl)s') % locals())
self._check_session(params)
return impl(*params)
return callit

View File

@ -124,7 +124,8 @@ class VMHelper(HelperBase):
'pae': 'true', 'viridian': 'true'}
LOG.debug(_('Created VM %s...'), instance.name)
vm_ref = session.call_xenapi('VM.create', rec)
LOG.debug(_('Created VM %s as %s.'), instance.name, vm_ref)
instance_name = instance.name
LOG.debug(_('Created VM %(instance_name)s as %(vm_ref)s.') % locals())
return vm_ref
@classmethod
@ -144,10 +145,11 @@ class VMHelper(HelperBase):
vbd_rec['qos_algorithm_type'] = ''
vbd_rec['qos_algorithm_params'] = {}
vbd_rec['qos_supported_algorithms'] = []
LOG.debug(_('Creating VBD for VM %s, VDI %s ... '), vm_ref, vdi_ref)
LOG.debug(_('Creating VBD for VM %(vm_ref)s,'
' VDI %(vdi_ref)s ... ') % locals())
vbd_ref = session.call_xenapi('VBD.create', vbd_rec)
LOG.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref,
vdi_ref)
LOG.debug(_('Created VBD %(vbd_ref)s for VM %(vm_ref)s,'
' VDI %(vdi_ref)s.') % locals())
return vbd_ref
@classmethod
@ -199,11 +201,11 @@ class VMHelper(HelperBase):
vif_rec['other_config'] = {}
vif_rec['qos_algorithm_type'] = ''
vif_rec['qos_algorithm_params'] = {}
LOG.debug(_('Creating VIF for VM %s, network %s.'), vm_ref,
network_ref)
LOG.debug(_('Creating VIF for VM %(vm_ref)s,'
' network %(network_ref)s.') % locals())
vif_ref = session.call_xenapi('VIF.create', vif_rec)
LOG.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref,
vm_ref, network_ref)
LOG.debug(_('Created VIF %(vif_ref)s for VM %(vm_ref)s,'
' network %(network_ref)s.') % locals())
return vif_ref
@classmethod
@ -213,7 +215,8 @@ class VMHelper(HelperBase):
"""
#TODO(sirp): Add quiesce and VSS locking support when Windows support
# is added
LOG.debug(_("Snapshotting VM %s with label '%s'..."), vm_ref, label)
LOG.debug(_("Snapshotting VM %(vm_ref)s with label '%(label)s'...")
% locals())
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
vm_vdi_uuid = vm_vdi_rec["uuid"]
@ -226,8 +229,8 @@ class VMHelper(HelperBase):
template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
template_vdi_uuid = template_vdi_rec["uuid"]
LOG.debug(_('Created snapshot %s from VM %s.'), template_vm_ref,
vm_ref)
LOG.debug(_('Created snapshot %(template_vm_ref)s from'
' VM %(vm_ref)s.') % locals())
parent_uuid = wait_for_vhd_coalesce(
session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid)
@ -240,8 +243,8 @@ class VMHelper(HelperBase):
""" Requests that the Glance plugin bundle the specified VDIs and
push them into Glance using the specified human-friendly name.
"""
logging.debug(_("Asking xapi to upload %s as ID %s"),
vdi_uuids, image_id)
logging.debug(_("Asking xapi to upload %(vdi_uuids)s as"
" ID %(image_id)s") % locals())
params = {'vdi_uuids': vdi_uuids,
'image_id': image_id,
@ -259,7 +262,7 @@ class VMHelper(HelperBase):
"""
url = images.image_url(image)
access = AuthManager().get_access_key(user, project)
LOG.debug(_("Asking xapi to fetch %s as %s"), url, access)
LOG.debug(_("Asking xapi to fetch %(url)s as %(access)s") % locals())
fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel'
args = {}
args['src_url'] = url
@ -431,16 +434,17 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref,
def _poll_vhds():
attempts['counter'] += 1
if attempts['counter'] > max_attempts:
msg = (_("VHD coalesce attempts exceeded (%d > %d), giving up...")
% (attempts['counter'], max_attempts))
counter = attempts['counter']
msg = (_("VHD coalesce attempts exceeded (%(counter)d >"
" %(max_attempts)d), giving up...") % locals())
raise exception.Error(msg)
scan_sr(session, instance_id, sr_ref)
parent_uuid = get_vhd_parent_uuid(session, vdi_ref)
if original_parent_uuid and (parent_uuid != original_parent_uuid):
LOG.debug(_("Parent %s doesn't match original parent %s, "
"waiting for coalesce..."), parent_uuid,
original_parent_uuid)
LOG.debug(_("Parent %(parent_uuid)s doesn't match original parent"
" %(original_parent_uuid)s, waiting for coalesce...")
% locals())
else:
# Breakout of the loop (normally) and return the parent_uuid
raise utils.LoopingCallDone(parent_uuid)
@ -458,8 +462,8 @@ def get_vdi_for_vm_safely(session, vm_ref):
else:
num_vdis = len(vdi_refs)
if num_vdis != 1:
raise Exception(_("Unexpected number of VDIs (%s) found for "
"VM %s") % (num_vdis, vm_ref))
raise Exception(_("Unexpected number of VDIs (%(num_vdis)s) found"
" for VM %s") % locals())
vdi_ref = vdi_refs[0]
vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref)

View File

@ -103,7 +103,9 @@ class VMOps(object):
network_ref, instance.mac_address)
LOG.debug(_('Starting VM %s...'), vm_ref)
self._session.call_xenapi('VM.start', vm_ref, False, False)
LOG.info(_('Spawning VM %s created %s.'), instance.name, vm_ref)
instance_name = instance.name
LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
% locals())
# NOTE(armando): Do we really need to do this in virt?
timer = utils.LoopingCall(f=None)
@ -195,7 +197,8 @@ class VMOps(object):
template_vm_ref, template_vdi_uuids = VMHelper.create_snapshot(
self._session, instance.id, vm_ref, label)
except self.XenAPI.Failure, exc:
logging.error(_("Unable to Snapshot %s: %s"), vm_ref, exc)
logging.error(_("Unable to Snapshot %(vm_ref)s: %(exc)s")
% locals())
return
try:

View File

@ -71,7 +71,7 @@ class VolumeHelper(HelperBase):
session.get_xenapi_host(),
record,
'0', label, description, 'iscsi', '', False, {})
LOG.debug(_('Introduced %s as %s.'), label, sr_ref)
LOG.debug(_('Introduced %(label)s as %(sr_ref)s.') % locals())
return sr_ref
except cls.XenAPI.Failure, exc:
LOG.exception(exc)
@ -98,20 +98,20 @@ class VolumeHelper(HelperBase):
try:
pbds = session.get_xenapi().SR.get_PBDs(sr_ref)
except cls.XenAPI.Failure, exc:
LOG.warn(_('Ignoring exception %s when getting PBDs for %s'),
exc, sr_ref)
LOG.warn(_('Ignoring exception %(exc)s when getting PBDs'
' for %(sr_ref)s') % locals())
for pbd in pbds:
try:
session.get_xenapi().PBD.unplug(pbd)
except cls.XenAPI.Failure, exc:
LOG.warn(_('Ignoring exception %s when unplugging PBD %s'),
exc, pbd)
LOG.warn(_('Ignoring exception %(exc)s when unplugging'
' PBD %(pbd)s') % locals())
try:
session.get_xenapi().SR.forget(sr_ref)
LOG.debug(_("Forgetting SR %s done."), sr_ref)
except cls.XenAPI.Failure, exc:
LOG.warn(_('Ignoring exception %s when forgetting SR %s'), exc,
sr_ref)
LOG.warn(_('Ignoring exception %(exc)s when forgetting'
' SR %(sr_ref)s') % locals())
@classmethod
def introduce_vdi(cls, session, sr_ref):
@ -172,8 +172,8 @@ class VolumeHelper(HelperBase):
(volume_id is None) or \
(target_host is None) or \
(target_iqn is None):
raise StorageError(_('Unable to obtain target information %s, %s')
% (device_path, mountpoint))
raise StorageError(_('Unable to obtain target information'
' %(device_path)s, %(mountpoint)s') % locals())
volume_info = {}
volume_info['deviceNumber'] = device_number
volume_info['volumeId'] = volume_id

View File

@ -48,8 +48,8 @@ class VolumeOps(object):
raise exception.NotFound(_('Instance %s not found')
% instance_name)
# NOTE: No Resource Pool concept so far
LOG.debug(_("Attach_volume: %s, %s, %s"),
instance_name, device_path, mountpoint)
LOG.debug(_("Attach_volume: %(instance_name)s, %(device_path)s,"
" %(mountpoint)s") % locals())
# Create the iSCSI SR, and the PDB through which hosts access SRs.
# But first, retrieve target info, like Host, IQN, LUN and SCSIID
vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint)
@ -66,9 +66,8 @@ class VolumeOps(object):
except StorageError, exc:
LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
raise Exception(_('Unable to create VDI on SR %s for instance %s')
% (sr_ref,
instance_name))
raise Exception(_('Unable to create VDI on SR %(sr_ref)s for'
' instance %(instance_name)s') % locals())
else:
try:
vbd_ref = VMHelper.create_vbd(self._session,
@ -78,9 +77,8 @@ class VolumeOps(object):
except self.XenAPI.Failure, exc:
LOG.exception(exc)
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
raise Exception(_('Unable to use SR %s for instance %s')
% (sr_ref,
instance_name))
raise Exception(_('Unable to use SR %(sr_ref)s for'
' instance %(instance_name)s') % locals())
else:
try:
task = self._session.call_xenapi('Async.VBD.plug',
@ -92,8 +90,8 @@ class VolumeOps(object):
sr_ref)
raise Exception(_('Unable to attach volume to instance %s')
% instance_name)
LOG.info(_('Mountpoint %s attached to instance %s'),
mountpoint, instance_name)
LOG.info(_('Mountpoint %(mountpoint)s attached to'
' instance %(instance_name)s') % locals())
def detach_volume(self, instance_name, mountpoint):
"""Detach volume storage to VM instance"""
@ -103,7 +101,8 @@ class VolumeOps(object):
raise exception.NotFound(_('Instance %s not found')
% instance_name)
# Detach VBD from VM
LOG.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint)
LOG.debug(_("Detach_volume: %instance_name)s, %(mountpoint)s")
% locals())
device_number = VolumeHelper.mountpoint_to_number(mountpoint)
try:
vbd_ref = VMHelper.find_vbd_by_number(self._session,
@ -125,5 +124,5 @@ class VolumeOps(object):
LOG.exception(exc)
# Forget SR
VolumeHelper.destroy_iscsi_storage(self._session, sr_ref)
LOG.info(_('Mountpoint %s detached from instance %s'),
mountpoint, instance_name)
LOG.info(_('Mountpoint %(mountpoint)s detached from'
' instance %(instance_name)s') % locals())

View File

@ -287,19 +287,14 @@ class XenAPISession(object):
return
elif status == "success":
result = self._session.xenapi.task.get_result(task)
LOG.info(_("Task [%s] %s status: success %s") % (
name,
task,
result))
LOG.info(_("Task [%(name)s] %(task)s status:"
" success %(result)s") % locals())
done.send(_parse_xmlrpc_value(result))
else:
error_info = self._session.xenapi.task.get_error_info(task)
action["error"] = str(error_info)
LOG.warn(_("Task [%s] %s status: %s %s") % (
name,
task,
status,
error_info))
LOG.warn(_("Task [%(name)s] %(task)s status:"
" %(status)s %(error_info)s") % locals())
done.send_exception(self.XenAPI.Failure(error_info))
db.instance_action_create(context.get_admin_context(), action)
except self.XenAPI.Failure, exc:

View File

@ -41,10 +41,11 @@ class API(base.Base):
def create(self, context, size, name, description):
if quota.allowed_volumes(context, 1, size) < 1:
LOG.warn(_("Quota exceeeded for %s, tried to create %sG volume"),
context.project_id, size)
pid = context.project_id
LOG.warn(_("Quota exceeeded for %(pid)s, tried to create"
" %(size)sG volume") % locals())
raise quota.QuotaError(_("Volume quota exceeded. You cannot "
"create a volume of size %s") % size)
"create a volume of size %s") % size)
options = {
'size': size,

View File

@ -99,8 +99,10 @@ class VolumeManager(manager.Manager):
# before passing it to the driver.
volume_ref['host'] = self.host
LOG.debug(_("volume %s: creating lv of size %sG"), volume_ref['name'],
volume_ref['size'])
vol_name = volume_ref['name']
vol_size = volume_ref['size']
LOG.debug(_("volume %(vol_name)s: creating lv of size %(vol_size)sG")
% locals())
self.driver.create_volume(volume_ref)
LOG.debug(_("volume %s: creating export"), volume_ref['name'])

View File

@ -64,7 +64,8 @@ class Server(object):
def start(self, application, port, host='0.0.0.0', backlog=128):
"""Run a WSGI server with the given application."""
logging.audit(_("Starting %s on %s:%s"), sys.argv[0], host, port)
arg0 = sys.argv[0]
logging.audit(_("Starting %(arg0)s on %(host)s:%(port)s") % locals())
socket = eventlet.listen((host, port), backlog=backlog)
self.pool.spawn_n(self._run, application, socket)