debug level logs should not be translated

According to the OpenStack translation policy available at
https://wiki.openstack.org/wiki/LoggingStandards debug messages
should not be translated. Like mentioned in several changes in
Nova by garyk this is to help prioritize log translation.

Change-Id: I770dfc2fd474123a8ccc37311ef69d0c03e85545
Closes-Bug: #1318713
This commit is contained in:
Christian Berendt 2014-05-12 17:39:14 +02:00
parent 322f61f5c7
commit 94920c5551
29 changed files with 96 additions and 97 deletions

View File

@ -165,7 +165,7 @@ class KeystoneClientV2(object):
if r.name == cfg.CONF.heat_stack_user_role]
if len(stack_user_role) == 1:
role_id = stack_user_role[0]
logger.debug(_("Adding user %(user)s to role %(role)s") % {
logger.debug("Adding user %(user)s to role %(role)s" % {
'user': user.id, 'role': role_id})
self.client.roles.add_user_role(user.id, role_id,
self.context.tenant_id)

View File

@ -155,7 +155,7 @@ class CloudDns(resource.Resource):
"""Create a Rackspace CloudDns Instance."""
# There is no check_create_complete as the pyrax create for DNS is
# synchronous.
logger.debug(_("CloudDns handle_create called."))
logger.debug("CloudDns handle_create called.")
args = dict((k, v) for k, v in self.properties.items())
for rec in args[self.RECORDS] or {}:
# only pop the priority for the correct types
@ -167,7 +167,7 @@ class CloudDns(resource.Resource):
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
"""Update a Rackspace CloudDns Instance."""
logger.debug(_("CloudDns handle_update called."))
logger.debug("CloudDns handle_update called.")
if not self.resource_id:
raise exception.Error(_('Update called on a non-existent domain'))
if prop_diff:
@ -190,7 +190,7 @@ class CloudDns(resource.Resource):
def handle_delete(self):
"""Delete a Rackspace CloudDns Instance."""
logger.debug(_("CloudDns handle_delete called."))
logger.debug("CloudDns handle_delete called.")
if self.resource_id:
try:
dom = self.cloud_dns().get(self.resource_id)

View File

@ -488,7 +488,7 @@ class CloudLoadBalancer(resource.Resource):
lb_name = (self.properties.get(self.NAME) or
self.physical_resource_name())
logger.debug(_("Creating loadbalancer: %s") % {lb_name: lb_body})
logger.debug("Creating loadbalancer: %s" % {lb_name: lb_body})
loadbalancer = self.clb.create(lb_name, **lb_body)
self.resource_id_set(str(loadbalancer.id))

View File

@ -113,12 +113,12 @@ class CloudServer(server.Server):
self._managed_cloud_started_event_sent = True
if 'rax_service_level_automation' not in server.metadata:
logger.debug(_("Managed Cloud server does not have the "
"rax_service_level_automation metadata tag yet"))
logger.debug("Managed Cloud server does not have the "
"rax_service_level_automation metadata tag yet")
return False
mc_status = server.metadata['rax_service_level_automation']
logger.debug(_("Managed Cloud automation status: %s") % mc_status)
logger.debug("Managed Cloud automation status: %s" % mc_status)
if mc_status == self.MC_STATUS_IN_PROGRESS:
return False
@ -142,12 +142,12 @@ class CloudServer(server.Server):
self._rack_connect_started_event_sent = True
if 'rackconnect_automation_status' not in server.metadata:
logger.debug(_("RackConnect server does not have the "
"rackconnect_automation_status metadata tag yet"))
logger.debug("RackConnect server does not have the "
"rackconnect_automation_status metadata tag yet")
return False
rc_status = server.metadata['rackconnect_automation_status']
logger.debug(_("RackConnect automation status: %s") % rc_status)
logger.debug("RackConnect automation status: %s" % rc_status)
if rc_status == self.RC_STATUS_DEPLOYING:
return False

View File

@ -125,10 +125,10 @@ class EC2Token(wsgi.Middleware):
last_failure = None
for auth_uri in self._conf_get('allowed_auth_uris'):
try:
logger.debug(_("Attempt authorize on %s") % auth_uri)
logger.debug("Attempt authorize on %s" % auth_uri)
return self._authorize(req, auth_uri)
except HeatAPIException as e:
logger.debug(_("Authorize failed: %s") % e.__class__)
logger.debug("Authorize failed: %s" % e.__class__)
last_failure = e
raise last_failure or exception.HeatAccessDeniedError()

View File

@ -220,7 +220,7 @@ class WatchController(object):
# FIXME : Don't yet handle filtering by Dimensions
filter_result = dict((k, v) for (k, v) in parms.iteritems() if k in
("MetricName", "Namespace"))
logger.debug(_("filter parameters : %s") % filter_result)
logger.debug("filter parameters : %s" % filter_result)
try:
# Engine does not currently support query by namespace/metric
@ -324,7 +324,7 @@ class WatchController(object):
logger.error(msg)
return exception.HeatInvalidParameterValueError(msg)
logger.debug(_("setting %(name)s to %(state)s") % {
logger.debug("setting %(name)s to %(state)s" % {
'name': name, 'state': state_map[state]})
try:
self.rpc_client.set_watch_state(con, watch_name=name,

View File

@ -22,7 +22,6 @@ import re
import webob
from heat.common import wsgi
from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging
@ -46,9 +45,9 @@ class VersionNegotiationFilter(wsgi.Middleware):
# See if a version identifier is in the URI passed to
# us already. If so, simply return the right version
# API controller
msg = _("Processing request: %(method)s %(path)s Accept: "
"%(accept)s") % ({'method': req.method,
'path': req.path, 'accept': req.accept})
msg = ("Processing request: %(method)s %(path)s Accept: "
"%(accept)s" % {'method': req.method,
'path': req.path, 'accept': req.accept})
logger.debug(msg)
# If the request is for /versions, just return the versions container
@ -61,17 +60,17 @@ class VersionNegotiationFilter(wsgi.Middleware):
minor_version = req.environ['api.minor_version']
if (major_version == 1 and minor_version == 0):
logger.debug(_("Matched versioned URI. "
"Version: %(major_version)d.%(minor_version)d")
logger.debug("Matched versioned URI. "
"Version: %(major_version)d.%(minor_version)d"
% {'major_version': major_version,
'minor_version': minor_version})
# Strip the version from the path
req.path_info_pop()
return None
else:
logger.debug(_("Unknown version in versioned URI: "
logger.debug("Unknown version in versioned URI: "
"%(major_version)d.%(minor_version)d. "
"Returning version choices.")
"Returning version choices."
% {'major_version': major_version,
'minor_version': minor_version})
return self.versions_app
@ -85,22 +84,22 @@ class VersionNegotiationFilter(wsgi.Middleware):
major_version = req.environ['api.major_version']
minor_version = req.environ['api.minor_version']
if (major_version == 1 and minor_version == 0):
logger.debug(_("Matched versioned media type. Version: "
"%(major_version)d.%(minor_version)d")
logger.debug("Matched versioned media type. Version: "
"%(major_version)d.%(minor_version)d"
% {'major_version': major_version,
'minor_version': minor_version})
return None
else:
logger.debug(_("Unknown version in accept header: "
"%(major_version)d.%(minor_version)d..."
"returning version choices.")
logger.debug("Unknown version in accept header: "
"%(major_version)d.%(minor_version)d..."
"returning version choices."
% {'major_version': major_version,
'minor_version': minor_version})
return self.versions_app
else:
if req.accept not in ('*/*', ''):
logger.debug(_("Unknown accept header: %s..."
"returning HTTP not found."), req.accept)
logger.debug("Unknown accept header: %s..."
"returning HTTP not found.", req.accept)
return webob.exc.HTTPNotFound()
return None

View File

@ -42,7 +42,7 @@ class AuthProtocol(object):
Authenticate send downstream on success. Reject request if
we can't authenticate.
"""
LOG.debug(_('Authenticating user token'))
LOG.debug('Authenticating user token')
context = local.store.context
authenticated = self.rpc_client.authenticated_to_backend(context)
if authenticated:

View File

@ -100,7 +100,7 @@ class KeystoneClientV3(object):
else:
logger.warning(_('stack_user_domain ID not set in heat.conf '
'falling back to using default'))
logger.debug(_('Using stack domain %s') % self.stack_domain_id)
logger.debug('Using stack domain %s' % self.stack_domain_id)
@property
def client(self):
@ -296,7 +296,7 @@ class KeystoneClientV3(object):
name=self._get_username(username), password=password,
default_project=self.context.tenant_id)
# Add user to heat_stack_user_role
logger.debug(_("Adding user %(user)s to role %(role)s") % {
logger.debug("Adding user %(user)s to role %(role)s" % {
'user': user.id, 'role': role_id})
self.client.roles.grant(role=role_id, user=user.id,
project=self.context.tenant_id)
@ -338,7 +338,7 @@ class KeystoneClientV3(object):
name=self._get_username(username), password=password,
default_project=project_id, domain=self.stack_domain_id)
# Add to stack user role
logger.debug(_("Adding user %(user)s to role %(role)s") % {
logger.debug("Adding user %(user)s to role %(role)s" % {
'user': user.id, 'role': role_id})
self.domain_admin_client.roles.grant(role=role_id, user=user.id,
project=project_id)

View File

@ -299,7 +299,7 @@ class Server(object):
break
eventlet.greenio.shutdown_safe(self.sock)
self.sock.close()
self.logger.debug(_('Exited'))
self.logger.debug('Exited')
def wait(self):
"""Wait until all servers have completed running."""

View File

@ -40,7 +40,7 @@ class ParameterGroups(object):
Validate that a parameter belongs to only one Parameter Group
and that each parameter name references a valid parameter.
'''
logger.debug(_('Validating Parameter Groups.'))
logger.debug('Validating Parameter Groups.')
logger.debug(self.parameter_names)
if self.parameter_groups is not None:
#Loop through groups and validate parameters

View File

@ -362,7 +362,7 @@ class Stack(collections.Mapping):
dup_names = set(self.parameters.keys()) & set(self.keys())
if dup_names:
logger.debug(_("Duplicate names %s") % dup_names)
logger.debug("Duplicate names %s" % dup_names)
raise StackValidationFailed(message=_("Duplicate names %s") %
dup_names)
@ -515,7 +515,7 @@ class Stack(collections.Mapping):
self._backup_name(),
owner_id=self.id)
if s is not None:
logger.debug(_('Loaded existing backup stack'))
logger.debug('Loaded existing backup stack')
return self.load(self.context, stack=s)
elif create_if_missing:
templ = Template.load(self.context, self.t.id)
@ -523,7 +523,7 @@ class Stack(collections.Mapping):
prev = type(self)(self.context, self.name, templ, self.env,
owner_id=self.id)
prev.store(backup=True)
logger.debug(_('Created new backup stack'))
logger.debug('Created new backup stack')
return prev
else:
return None
@ -572,7 +572,7 @@ class Stack(collections.Mapping):
if self.status != self.COMPLETE:
if (action == self.ROLLBACK and
self.state == (self.UPDATE, self.IN_PROGRESS)):
logger.debug(_("Starting update rollback for %s") % self.name)
logger.debug("Starting update rollback for %s" % self.name)
else:
self.state_set(action, self.FAILED,
'State invalid for %s' % action)
@ -623,7 +623,7 @@ class Stack(collections.Mapping):
yield self.update_task(oldstack, action=self.ROLLBACK)
return
else:
logger.debug(_('Deleting backup stack'))
logger.debug('Deleting backup stack')
backup_stack.delete(backup=True)
# flip the template to the newstack values

View File

@ -540,7 +540,7 @@ class Resource(object):
yield
except UpdateReplace:
with excutils.save_and_reraise_exception():
logger.debug(_("Resource %s update requires replacement") %
logger.debug("Resource %s update requires replacement" %
self.name)
except Exception as ex:
logger.exception(_('update %(resource)s : %(err)s') %

View File

@ -668,7 +668,7 @@ class AutoScalingGroup(InstanceGroup, CooldownMixin):
return
if new_capacity == capacity:
logger.debug(_('no change in capacity %d') % capacity)
logger.debug('no change in capacity %d' % capacity)
return
# send a notification before, on-error and on-success.

View File

@ -190,8 +190,8 @@ class ElasticIpAssociation(resource.Resource):
server = self.nova().servers.get(self.properties[self.INSTANCE_ID])
server.add_floating_ip(self.properties[self.EIP])
self.resource_id_set(self.properties[self.EIP])
logger.debug(_('ElasticIpAssociation '
'%(instance)s.add_floating_ip(%(eip)s)'),
logger.debug('ElasticIpAssociation '
'%(instance)s.add_floating_ip(%(eip)s)',
{'instance': self.properties[self.INSTANCE_ID],
'eip': self.properties[self.EIP]})
elif self.properties[self.ALLOCATION_ID]:

View File

@ -731,7 +731,7 @@ class Instance(resource.Resource):
raise exception.NotFound(_('Failed to find instance %s') %
self.resource_id)
else:
logger.debug(_("suspending instance %s") % self.resource_id)
logger.debug("suspending instance %s" % self.resource_id)
# We want the server.suspend to happen after the volume
# detachement has finished, so pass both tasks and the server
suspend_runner = scheduler.TaskRunner(server.suspend)
@ -753,8 +753,8 @@ class Instance(resource.Resource):
return True
nova_utils.refresh_server(server)
logger.debug(_("%(name)s check_suspend_complete "
"status = %(status)s"),
logger.debug("%(name)s check_suspend_complete "
"status = %(status)s",
{'name': self.name,
'status': server.status})
if server.status in list(nova_utils.deferred_server_statuses +
@ -787,7 +787,7 @@ class Instance(resource.Resource):
raise exception.NotFound(_('Failed to find instance %s') %
self.resource_id)
else:
logger.debug(_("resuming instance %s") % self.resource_id)
logger.debug("resuming instance %s" % self.resource_id)
server.resume()
return server, scheduler.TaskRunner(self._attach_volumes_task())

View File

@ -413,7 +413,7 @@ class LoadBalancer(stack_resource.StackResource):
client = self.nova()
for i in instances:
ip = nova_utils.server_to_ipaddress(client, i) or '0.0.0.0'
logger.debug(_('haproxy server:%s') % ip)
logger.debug('haproxy server:%s' % ip)
servers.append('%sserver server%d %s:%s %s' % (spaces, n,
ip, inst_port,
check))

View File

@ -253,7 +253,7 @@ class Port(neutron.NeutronResource):
self._prepare_list_properties(props)
logger.debug(_('updating port with %s') % props)
logger.debug('updating port with %s' % props)
self.neutron().update_port(self.resource_id, {'port': props})
def check_update_complete(self, *args):

View File

@ -258,7 +258,7 @@ class OSDBInstance(resource.Resource):
try:
instance = self.trove().instances.get(self.resource_id)
except troveclient.exceptions.NotFound:
logger.debug(_("Database instance %s not found.") %
logger.debug("Database instance %s not found." %
self.resource_id)
self.resource_id_set(None)
else:

View File

@ -108,8 +108,8 @@ class S3Bucket(resource.Resource):
"""Create a bucket."""
container = self.physical_resource_name()
headers = self.tags_to_headers()
logger.debug(_('S3Bucket create container %(container)s with headers '
'%(headers)s') % {
logger.debug('S3Bucket create container %(container)s with headers '
'%(headers)s' % {
'container': container, 'headers': headers})
if self.properties[self.WEBSITE_CONFIGURATION] is not None:
sc = self.properties[self.WEBSITE_CONFIGURATION]
@ -140,7 +140,7 @@ class S3Bucket(resource.Resource):
def handle_delete(self):
"""Perform specified delete policy."""
logger.debug(_('S3Bucket delete container %s') % self.resource_id)
logger.debug('S3Bucket delete container %s' % self.resource_id)
if self.resource_id is not None:
try:
self.swift().delete_container(self.resource_id)

View File

@ -968,7 +968,7 @@ class Server(stack_user.StackUser):
raise exception.NotFound(_('Failed to find server %s') %
self.resource_id)
else:
logger.debug(_('suspending server %s') % self.resource_id)
logger.debug('suspending server %s' % self.resource_id)
# We want the server.suspend to happen after the volume
# detachement has finished, so pass both tasks and the server
suspend_runner = scheduler.TaskRunner(server.suspend)
@ -985,8 +985,8 @@ class Server(stack_user.StackUser):
return True
nova_utils.refresh_server(server)
logger.debug(_('%(name)s check_suspend_complete status '
'= %(status)s') % {
logger.debug('%(name)s check_suspend_complete status '
'= %(status)s' % {
'name': self.name, 'status': server.status})
if server.status in list(nova_utils.deferred_server_statuses +
['ACTIVE']):
@ -1014,7 +1014,7 @@ class Server(stack_user.StackUser):
raise exception.NotFound(_('Failed to find server %s') %
self.resource_id)
else:
logger.debug(_('resuming server %s') % self.resource_id)
logger.debug('resuming server %s' % self.resource_id)
server.resume()
return server

View File

@ -105,9 +105,9 @@ class SwiftContainer(resource.Resource):
if self.properties.get(key) is not None:
container_headers[key] = self.properties[key]
logger.debug(_('SwiftContainer create container %(container)s with '
logger.debug('SwiftContainer create container %(container)s with '
'container headers %(container_headers)s and '
'account headers %(account_headers)s') % {
'account headers %(account_headers)s' % {
'container': container,
'account_headers': account_headers,
'container_headers': container_headers})
@ -121,7 +121,7 @@ class SwiftContainer(resource.Resource):
def handle_delete(self):
"""Perform specified delete policy."""
logger.debug(_('SwiftContainer delete container %s') %
logger.debug('SwiftContainer delete container %s' %
self.resource_id)
if self.resource_id is not None:
try:

View File

@ -217,7 +217,7 @@ class VolumeAttachTask(object):
vol = self.clients.cinder().volumes.get(self.volume_id)
while vol.status == 'available' or vol.status == 'attaching':
logger.debug(_('%(name)s - volume status: %(status)s') % {
logger.debug('%(name)s - volume status: %(status)s' % {
'name': str(self), 'status': vol.status})
yield
vol.get()
@ -280,7 +280,7 @@ class VolumeDetachTask(object):
try:
while vol.status in ('in-use', 'detaching'):
logger.debug(_('%s - volume still in use') % str(self))
logger.debug('%s - volume still in use' % str(self))
yield
vol.get()

View File

@ -278,7 +278,7 @@ class WaitCondition(resource.Resource):
meta = handle.metadata_get(refresh=True)
# Note, can't use a dict generator on python 2.6, hence:
res = dict([(k, meta[k]['Data']) for k in meta])
logger.debug(_('%(name)s.GetAtt(%(key)s) == %(res)s') %
logger.debug('%(name)s.GetAtt(%(key)s) == %(res)s' %
{'name': self.name,
'key': key,
'res': res})

View File

@ -128,7 +128,7 @@ class TaskRunner(object):
def _sleep(self, wait_time):
"""Sleep for the specified number of seconds."""
if ENABLE_SLEEP and wait_time is not None:
logger.debug(_('%s sleeping') % str(self))
logger.debug('%s sleeping' % str(self))
eventlet.sleep(wait_time)
def __call__(self, wait_time=1, timeout=None):
@ -151,7 +151,7 @@ class TaskRunner(object):
"""
assert self._runner is None, "Task already started"
logger.debug(_('%s starting') % str(self))
logger.debug('%s starting' % str(self))
if timeout is not None:
self._timeout = Timeout(self, timeout)
@ -163,7 +163,7 @@ class TaskRunner(object):
else:
self._runner = False
self._done = True
logger.debug(_('%s done (not resumable)') % str(self))
logger.debug('%s done (not resumable)' % str(self))
def step(self):
"""
@ -184,13 +184,13 @@ class TaskRunner(object):
# Clean up in case task swallows exception without exiting
self.cancel()
else:
logger.debug(_('%s running') % str(self))
logger.debug('%s running' % str(self))
try:
next(self._runner)
except StopIteration:
self._done = True
logger.debug(_('%s complete') % str(self))
logger.debug('%s complete' % str(self))
return self._done
@ -207,7 +207,7 @@ class TaskRunner(object):
def cancel(self):
"""Cancel the task and mark it as done."""
if not self.done():
logger.debug(_('%s cancelled') % str(self))
logger.debug('%s cancelled' % str(self))
try:
if self.started():
self._runner.close()

View File

@ -197,7 +197,7 @@ class StackWatch(object):
# Retrieve the stored credentials & create context
# Require tenant_safe=False to the stack_get to defeat tenant
# scoping otherwise we fail to retrieve the stack
logger.debug(_("Periodic watcher task for stack %s") % sid)
logger.debug("Periodic watcher task for stack %s" % sid)
admin_context = context.get_admin_context()
stack = db_api.stack_get(admin_context, sid, tenant_safe=False,
eager_load=True)
@ -290,7 +290,7 @@ class EngineService(service.Service):
self.stack_watch = StackWatch(self.thread_group_mgr)
self.listener = EngineListener(host, self.engine_id,
self.thread_group_mgr)
logger.debug(_("Starting listener for engine %s") % self.engine_id)
logger.debug("Starting listener for engine %s" % self.engine_id)
self.listener.start()
def start(self):
@ -727,7 +727,7 @@ class EngineService(service.Service):
elif stack_lock.StackLock.engine_alive(cnxt, acquire_result):
stop_result = remote_stop(acquire_result)
if stop_result is None:
logger.debug(_("Successfully stopped remote task on engine %s")
logger.debug("Successfully stopped remote task on engine %s"
% acquire_result)
else:
raise exception.StopActionFailed(stack_name=stack.name,
@ -972,7 +972,7 @@ class EngineService(service.Service):
Handle request to perform suspend action on a stack
'''
def _stack_suspend(stack):
logger.debug(_("suspending stack %s") % stack.name)
logger.debug("suspending stack %s" % stack.name)
stack.suspend()
s = self._get_stack(cnxt, stack_identity)
@ -987,7 +987,7 @@ class EngineService(service.Service):
Handle request to perform a resume action on a stack
'''
def _stack_resume(stack):
logger.debug(_("resuming stack %s") % stack.name)
logger.debug("resuming stack %s" % stack.name)
stack.resume()
s = self._get_stack(cnxt, stack_identity)

View File

@ -66,16 +66,16 @@ class StackLock(object):
lock_engine_id = db_api.stack_lock_create(self.stack.id,
self.engine_id)
if lock_engine_id is None:
logger.debug(_("Engine %(engine)s acquired lock on stack "
"%(stack)s") % {'engine': self.engine_id,
'stack': self.stack.id})
logger.debug("Engine %(engine)s acquired lock on stack "
"%(stack)s" % {'engine': self.engine_id,
'stack': self.stack.id})
return
if lock_engine_id == self.engine_id or \
self.engine_alive(self.context, lock_engine_id):
logger.debug(_("Lock on stack %(stack)s is owned by engine "
"%(engine)s") % {'stack': self.stack.id,
'engine': lock_engine_id})
logger.debug("Lock on stack %(stack)s is owned by engine "
"%(engine)s" % {'stack': self.stack.id,
'engine': lock_engine_id})
raise exception.ActionInProgress(stack_name=self.stack.name,
action=self.stack.action)
else:
@ -117,6 +117,6 @@ class StackLock(object):
logger.warning(_("Lock was already released on stack %s!")
% stack_id)
else:
logger.debug(_("Engine %(engine)s released lock on stack "
"%(stack)s") % {'engine': self.engine_id,
'stack': stack_id})
logger.debug("Engine %(engine)s released lock on stack "
"%(stack)s" % {'engine': self.engine_id,
'stack': stack_id})

View File

@ -76,7 +76,7 @@ class StackUpdate(object):
def _remove_backup_resource(self, prev_res):
if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE),
(prev_res.DELETE, prev_res.COMPLETE)):
logger.debug(_("Deleting backup resource %s") % prev_res.name)
logger.debug("Deleting backup resource %s" % prev_res.name)
yield prev_res.destroy()
@staticmethod
@ -100,18 +100,18 @@ class StackUpdate(object):
# Swap in the backup resource if it is in a valid state,
# instead of creating a new resource
if prev_res.status == prev_res.COMPLETE:
logger.debug(_("Swapping in backup Resource %s") %
logger.debug("Swapping in backup Resource %s" %
res_name)
self._exchange_stacks(self.existing_stack[res_name],
prev_res)
return
logger.debug(_("Deleting backup Resource %s") % res_name)
logger.debug("Deleting backup Resource %s" % res_name)
yield prev_res.destroy()
# Back up existing resource
if res_name in self.existing_stack:
logger.debug(_("Backing up existing Resource %s") % res_name)
logger.debug("Backing up existing Resource %s" % res_name)
existing_res = self.existing_stack[res_name]
self.previous_stack[res_name] = existing_res
existing_res.state_set(existing_res.UPDATE, existing_res.COMPLETE)

View File

@ -210,7 +210,7 @@ class WatchRule(object):
data = 0
for d in self.watch_data:
if d.created_at < self.now - self.timeperiod:
logger.debug(_('ignoring %s') % str(d.data))
logger.debug('ignoring %s' % str(d.data))
continue
data = data + float(d.data[self.rule['MetricName']]['Value'])
@ -285,7 +285,7 @@ class WatchRule(object):
dims = dims[0]
sample['resource_metadata'] = dims
sample['resource_id'] = dims.get('InstanceId')
logger.debug(_('new sample:%(k)s data:%(sample)s') % {
logger.debug('new sample:%(k)s data:%(sample)s' % {
'k': k, 'sample': sample})
clients.ceilometer().samples.create(**sample)
@ -298,7 +298,7 @@ class WatchRule(object):
return
if self.state == self.SUSPENDED:
logger.debug(_('Ignoring metric data for %s, SUSPENDED state')
logger.debug('Ignoring metric data for %s, SUSPENDED state'
% self.name)
return []
@ -308,8 +308,8 @@ class WatchRule(object):
# options, e.g --haproxy try to push multiple metrics when we
# actually only care about one (the one we're alarming on)
# so just ignore any data which doesn't contain MetricName
logger.debug(_('Ignoring metric data (only accept %(metric)s) '
': %(data)s') % {
logger.debug('Ignoring metric data (only accept %(metric)s) '
': %(data)s' % {
'metric': self.rule['MetricName'], 'data': data})
return
@ -318,7 +318,7 @@ class WatchRule(object):
'watch_rule_id': self.id
}
wd = db_api.watch_data_create(None, watch_data)
logger.debug(_('new watch:%(name)s data:%(data)s')
logger.debug('new watch:%(name)s data:%(data)s'
% {'name': self.name, 'data': str(wd.data)})
def state_set(self, state):
@ -344,8 +344,8 @@ class WatchRule(object):
if state != self.state:
actions = self.rule_actions(state)
if actions:
logger.debug(_("Overriding state %(self_state)s for watch "
"%(name)s with %(state)s") % {
logger.debug("Overriding state %(self_state)s for watch "
"%(name)s with %(state)s" % {
'self_state': self.state, 'name': self.name,
'state': state})
else: