debug level logs should not be translated
According to the OpenStack translation policy available at https://wiki.openstack.org/wiki/LoggingStandards debug messages should not be translated. Like mentioned in several changes in Nova by garyk this is to help prioritize log translation. Change-Id: I770dfc2fd474123a8ccc37311ef69d0c03e85545 Closes-Bug: #1318713
This commit is contained in:
parent
322f61f5c7
commit
94920c5551
@ -165,7 +165,7 @@ class KeystoneClientV2(object):
|
|||||||
if r.name == cfg.CONF.heat_stack_user_role]
|
if r.name == cfg.CONF.heat_stack_user_role]
|
||||||
if len(stack_user_role) == 1:
|
if len(stack_user_role) == 1:
|
||||||
role_id = stack_user_role[0]
|
role_id = stack_user_role[0]
|
||||||
logger.debug(_("Adding user %(user)s to role %(role)s") % {
|
logger.debug("Adding user %(user)s to role %(role)s" % {
|
||||||
'user': user.id, 'role': role_id})
|
'user': user.id, 'role': role_id})
|
||||||
self.client.roles.add_user_role(user.id, role_id,
|
self.client.roles.add_user_role(user.id, role_id,
|
||||||
self.context.tenant_id)
|
self.context.tenant_id)
|
||||||
|
@ -155,7 +155,7 @@ class CloudDns(resource.Resource):
|
|||||||
"""Create a Rackspace CloudDns Instance."""
|
"""Create a Rackspace CloudDns Instance."""
|
||||||
# There is no check_create_complete as the pyrax create for DNS is
|
# There is no check_create_complete as the pyrax create for DNS is
|
||||||
# synchronous.
|
# synchronous.
|
||||||
logger.debug(_("CloudDns handle_create called."))
|
logger.debug("CloudDns handle_create called.")
|
||||||
args = dict((k, v) for k, v in self.properties.items())
|
args = dict((k, v) for k, v in self.properties.items())
|
||||||
for rec in args[self.RECORDS] or {}:
|
for rec in args[self.RECORDS] or {}:
|
||||||
# only pop the priority for the correct types
|
# only pop the priority for the correct types
|
||||||
@ -167,7 +167,7 @@ class CloudDns(resource.Resource):
|
|||||||
|
|
||||||
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
|
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
|
||||||
"""Update a Rackspace CloudDns Instance."""
|
"""Update a Rackspace CloudDns Instance."""
|
||||||
logger.debug(_("CloudDns handle_update called."))
|
logger.debug("CloudDns handle_update called.")
|
||||||
if not self.resource_id:
|
if not self.resource_id:
|
||||||
raise exception.Error(_('Update called on a non-existent domain'))
|
raise exception.Error(_('Update called on a non-existent domain'))
|
||||||
if prop_diff:
|
if prop_diff:
|
||||||
@ -190,7 +190,7 @@ class CloudDns(resource.Resource):
|
|||||||
|
|
||||||
def handle_delete(self):
|
def handle_delete(self):
|
||||||
"""Delete a Rackspace CloudDns Instance."""
|
"""Delete a Rackspace CloudDns Instance."""
|
||||||
logger.debug(_("CloudDns handle_delete called."))
|
logger.debug("CloudDns handle_delete called.")
|
||||||
if self.resource_id:
|
if self.resource_id:
|
||||||
try:
|
try:
|
||||||
dom = self.cloud_dns().get(self.resource_id)
|
dom = self.cloud_dns().get(self.resource_id)
|
||||||
|
@ -488,7 +488,7 @@ class CloudLoadBalancer(resource.Resource):
|
|||||||
|
|
||||||
lb_name = (self.properties.get(self.NAME) or
|
lb_name = (self.properties.get(self.NAME) or
|
||||||
self.physical_resource_name())
|
self.physical_resource_name())
|
||||||
logger.debug(_("Creating loadbalancer: %s") % {lb_name: lb_body})
|
logger.debug("Creating loadbalancer: %s" % {lb_name: lb_body})
|
||||||
loadbalancer = self.clb.create(lb_name, **lb_body)
|
loadbalancer = self.clb.create(lb_name, **lb_body)
|
||||||
self.resource_id_set(str(loadbalancer.id))
|
self.resource_id_set(str(loadbalancer.id))
|
||||||
|
|
||||||
|
@ -113,12 +113,12 @@ class CloudServer(server.Server):
|
|||||||
self._managed_cloud_started_event_sent = True
|
self._managed_cloud_started_event_sent = True
|
||||||
|
|
||||||
if 'rax_service_level_automation' not in server.metadata:
|
if 'rax_service_level_automation' not in server.metadata:
|
||||||
logger.debug(_("Managed Cloud server does not have the "
|
logger.debug("Managed Cloud server does not have the "
|
||||||
"rax_service_level_automation metadata tag yet"))
|
"rax_service_level_automation metadata tag yet")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
mc_status = server.metadata['rax_service_level_automation']
|
mc_status = server.metadata['rax_service_level_automation']
|
||||||
logger.debug(_("Managed Cloud automation status: %s") % mc_status)
|
logger.debug("Managed Cloud automation status: %s" % mc_status)
|
||||||
|
|
||||||
if mc_status == self.MC_STATUS_IN_PROGRESS:
|
if mc_status == self.MC_STATUS_IN_PROGRESS:
|
||||||
return False
|
return False
|
||||||
@ -142,12 +142,12 @@ class CloudServer(server.Server):
|
|||||||
self._rack_connect_started_event_sent = True
|
self._rack_connect_started_event_sent = True
|
||||||
|
|
||||||
if 'rackconnect_automation_status' not in server.metadata:
|
if 'rackconnect_automation_status' not in server.metadata:
|
||||||
logger.debug(_("RackConnect server does not have the "
|
logger.debug("RackConnect server does not have the "
|
||||||
"rackconnect_automation_status metadata tag yet"))
|
"rackconnect_automation_status metadata tag yet")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
rc_status = server.metadata['rackconnect_automation_status']
|
rc_status = server.metadata['rackconnect_automation_status']
|
||||||
logger.debug(_("RackConnect automation status: %s") % rc_status)
|
logger.debug("RackConnect automation status: %s" % rc_status)
|
||||||
|
|
||||||
if rc_status == self.RC_STATUS_DEPLOYING:
|
if rc_status == self.RC_STATUS_DEPLOYING:
|
||||||
return False
|
return False
|
||||||
|
@ -125,10 +125,10 @@ class EC2Token(wsgi.Middleware):
|
|||||||
last_failure = None
|
last_failure = None
|
||||||
for auth_uri in self._conf_get('allowed_auth_uris'):
|
for auth_uri in self._conf_get('allowed_auth_uris'):
|
||||||
try:
|
try:
|
||||||
logger.debug(_("Attempt authorize on %s") % auth_uri)
|
logger.debug("Attempt authorize on %s" % auth_uri)
|
||||||
return self._authorize(req, auth_uri)
|
return self._authorize(req, auth_uri)
|
||||||
except HeatAPIException as e:
|
except HeatAPIException as e:
|
||||||
logger.debug(_("Authorize failed: %s") % e.__class__)
|
logger.debug("Authorize failed: %s" % e.__class__)
|
||||||
last_failure = e
|
last_failure = e
|
||||||
raise last_failure or exception.HeatAccessDeniedError()
|
raise last_failure or exception.HeatAccessDeniedError()
|
||||||
|
|
||||||
|
@ -220,7 +220,7 @@ class WatchController(object):
|
|||||||
# FIXME : Don't yet handle filtering by Dimensions
|
# FIXME : Don't yet handle filtering by Dimensions
|
||||||
filter_result = dict((k, v) for (k, v) in parms.iteritems() if k in
|
filter_result = dict((k, v) for (k, v) in parms.iteritems() if k in
|
||||||
("MetricName", "Namespace"))
|
("MetricName", "Namespace"))
|
||||||
logger.debug(_("filter parameters : %s") % filter_result)
|
logger.debug("filter parameters : %s" % filter_result)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Engine does not currently support query by namespace/metric
|
# Engine does not currently support query by namespace/metric
|
||||||
@ -324,7 +324,7 @@ class WatchController(object):
|
|||||||
logger.error(msg)
|
logger.error(msg)
|
||||||
return exception.HeatInvalidParameterValueError(msg)
|
return exception.HeatInvalidParameterValueError(msg)
|
||||||
|
|
||||||
logger.debug(_("setting %(name)s to %(state)s") % {
|
logger.debug("setting %(name)s to %(state)s" % {
|
||||||
'name': name, 'state': state_map[state]})
|
'name': name, 'state': state_map[state]})
|
||||||
try:
|
try:
|
||||||
self.rpc_client.set_watch_state(con, watch_name=name,
|
self.rpc_client.set_watch_state(con, watch_name=name,
|
||||||
|
@ -22,7 +22,6 @@ import re
|
|||||||
import webob
|
import webob
|
||||||
|
|
||||||
from heat.common import wsgi
|
from heat.common import wsgi
|
||||||
from heat.openstack.common.gettextutils import _
|
|
||||||
from heat.openstack.common import log as logging
|
from heat.openstack.common import log as logging
|
||||||
|
|
||||||
|
|
||||||
@ -46,9 +45,9 @@ class VersionNegotiationFilter(wsgi.Middleware):
|
|||||||
# See if a version identifier is in the URI passed to
|
# See if a version identifier is in the URI passed to
|
||||||
# us already. If so, simply return the right version
|
# us already. If so, simply return the right version
|
||||||
# API controller
|
# API controller
|
||||||
msg = _("Processing request: %(method)s %(path)s Accept: "
|
msg = ("Processing request: %(method)s %(path)s Accept: "
|
||||||
"%(accept)s") % ({'method': req.method,
|
"%(accept)s" % {'method': req.method,
|
||||||
'path': req.path, 'accept': req.accept})
|
'path': req.path, 'accept': req.accept})
|
||||||
logger.debug(msg)
|
logger.debug(msg)
|
||||||
|
|
||||||
# If the request is for /versions, just return the versions container
|
# If the request is for /versions, just return the versions container
|
||||||
@ -61,17 +60,17 @@ class VersionNegotiationFilter(wsgi.Middleware):
|
|||||||
minor_version = req.environ['api.minor_version']
|
minor_version = req.environ['api.minor_version']
|
||||||
|
|
||||||
if (major_version == 1 and minor_version == 0):
|
if (major_version == 1 and minor_version == 0):
|
||||||
logger.debug(_("Matched versioned URI. "
|
logger.debug("Matched versioned URI. "
|
||||||
"Version: %(major_version)d.%(minor_version)d")
|
"Version: %(major_version)d.%(minor_version)d"
|
||||||
% {'major_version': major_version,
|
% {'major_version': major_version,
|
||||||
'minor_version': minor_version})
|
'minor_version': minor_version})
|
||||||
# Strip the version from the path
|
# Strip the version from the path
|
||||||
req.path_info_pop()
|
req.path_info_pop()
|
||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
logger.debug(_("Unknown version in versioned URI: "
|
logger.debug("Unknown version in versioned URI: "
|
||||||
"%(major_version)d.%(minor_version)d. "
|
"%(major_version)d.%(minor_version)d. "
|
||||||
"Returning version choices.")
|
"Returning version choices."
|
||||||
% {'major_version': major_version,
|
% {'major_version': major_version,
|
||||||
'minor_version': minor_version})
|
'minor_version': minor_version})
|
||||||
return self.versions_app
|
return self.versions_app
|
||||||
@ -85,22 +84,22 @@ class VersionNegotiationFilter(wsgi.Middleware):
|
|||||||
major_version = req.environ['api.major_version']
|
major_version = req.environ['api.major_version']
|
||||||
minor_version = req.environ['api.minor_version']
|
minor_version = req.environ['api.minor_version']
|
||||||
if (major_version == 1 and minor_version == 0):
|
if (major_version == 1 and minor_version == 0):
|
||||||
logger.debug(_("Matched versioned media type. Version: "
|
logger.debug("Matched versioned media type. Version: "
|
||||||
"%(major_version)d.%(minor_version)d")
|
"%(major_version)d.%(minor_version)d"
|
||||||
% {'major_version': major_version,
|
% {'major_version': major_version,
|
||||||
'minor_version': minor_version})
|
'minor_version': minor_version})
|
||||||
return None
|
return None
|
||||||
else:
|
else:
|
||||||
logger.debug(_("Unknown version in accept header: "
|
logger.debug("Unknown version in accept header: "
|
||||||
"%(major_version)d.%(minor_version)d..."
|
"%(major_version)d.%(minor_version)d..."
|
||||||
"returning version choices.")
|
"returning version choices."
|
||||||
% {'major_version': major_version,
|
% {'major_version': major_version,
|
||||||
'minor_version': minor_version})
|
'minor_version': minor_version})
|
||||||
return self.versions_app
|
return self.versions_app
|
||||||
else:
|
else:
|
||||||
if req.accept not in ('*/*', ''):
|
if req.accept not in ('*/*', ''):
|
||||||
logger.debug(_("Unknown accept header: %s..."
|
logger.debug("Unknown accept header: %s..."
|
||||||
"returning HTTP not found."), req.accept)
|
"returning HTTP not found.", req.accept)
|
||||||
return webob.exc.HTTPNotFound()
|
return webob.exc.HTTPNotFound()
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@ -42,7 +42,7 @@ class AuthProtocol(object):
|
|||||||
Authenticate send downstream on success. Reject request if
|
Authenticate send downstream on success. Reject request if
|
||||||
we can't authenticate.
|
we can't authenticate.
|
||||||
"""
|
"""
|
||||||
LOG.debug(_('Authenticating user token'))
|
LOG.debug('Authenticating user token')
|
||||||
context = local.store.context
|
context = local.store.context
|
||||||
authenticated = self.rpc_client.authenticated_to_backend(context)
|
authenticated = self.rpc_client.authenticated_to_backend(context)
|
||||||
if authenticated:
|
if authenticated:
|
||||||
|
@ -100,7 +100,7 @@ class KeystoneClientV3(object):
|
|||||||
else:
|
else:
|
||||||
logger.warning(_('stack_user_domain ID not set in heat.conf '
|
logger.warning(_('stack_user_domain ID not set in heat.conf '
|
||||||
'falling back to using default'))
|
'falling back to using default'))
|
||||||
logger.debug(_('Using stack domain %s') % self.stack_domain_id)
|
logger.debug('Using stack domain %s' % self.stack_domain_id)
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def client(self):
|
def client(self):
|
||||||
@ -296,7 +296,7 @@ class KeystoneClientV3(object):
|
|||||||
name=self._get_username(username), password=password,
|
name=self._get_username(username), password=password,
|
||||||
default_project=self.context.tenant_id)
|
default_project=self.context.tenant_id)
|
||||||
# Add user to heat_stack_user_role
|
# Add user to heat_stack_user_role
|
||||||
logger.debug(_("Adding user %(user)s to role %(role)s") % {
|
logger.debug("Adding user %(user)s to role %(role)s" % {
|
||||||
'user': user.id, 'role': role_id})
|
'user': user.id, 'role': role_id})
|
||||||
self.client.roles.grant(role=role_id, user=user.id,
|
self.client.roles.grant(role=role_id, user=user.id,
|
||||||
project=self.context.tenant_id)
|
project=self.context.tenant_id)
|
||||||
@ -338,7 +338,7 @@ class KeystoneClientV3(object):
|
|||||||
name=self._get_username(username), password=password,
|
name=self._get_username(username), password=password,
|
||||||
default_project=project_id, domain=self.stack_domain_id)
|
default_project=project_id, domain=self.stack_domain_id)
|
||||||
# Add to stack user role
|
# Add to stack user role
|
||||||
logger.debug(_("Adding user %(user)s to role %(role)s") % {
|
logger.debug("Adding user %(user)s to role %(role)s" % {
|
||||||
'user': user.id, 'role': role_id})
|
'user': user.id, 'role': role_id})
|
||||||
self.domain_admin_client.roles.grant(role=role_id, user=user.id,
|
self.domain_admin_client.roles.grant(role=role_id, user=user.id,
|
||||||
project=project_id)
|
project=project_id)
|
||||||
|
@ -299,7 +299,7 @@ class Server(object):
|
|||||||
break
|
break
|
||||||
eventlet.greenio.shutdown_safe(self.sock)
|
eventlet.greenio.shutdown_safe(self.sock)
|
||||||
self.sock.close()
|
self.sock.close()
|
||||||
self.logger.debug(_('Exited'))
|
self.logger.debug('Exited')
|
||||||
|
|
||||||
def wait(self):
|
def wait(self):
|
||||||
"""Wait until all servers have completed running."""
|
"""Wait until all servers have completed running."""
|
||||||
|
@ -40,7 +40,7 @@ class ParameterGroups(object):
|
|||||||
Validate that a parameter belongs to only one Parameter Group
|
Validate that a parameter belongs to only one Parameter Group
|
||||||
and that each parameter name references a valid parameter.
|
and that each parameter name references a valid parameter.
|
||||||
'''
|
'''
|
||||||
logger.debug(_('Validating Parameter Groups.'))
|
logger.debug('Validating Parameter Groups.')
|
||||||
logger.debug(self.parameter_names)
|
logger.debug(self.parameter_names)
|
||||||
if self.parameter_groups is not None:
|
if self.parameter_groups is not None:
|
||||||
#Loop through groups and validate parameters
|
#Loop through groups and validate parameters
|
||||||
|
@ -362,7 +362,7 @@ class Stack(collections.Mapping):
|
|||||||
dup_names = set(self.parameters.keys()) & set(self.keys())
|
dup_names = set(self.parameters.keys()) & set(self.keys())
|
||||||
|
|
||||||
if dup_names:
|
if dup_names:
|
||||||
logger.debug(_("Duplicate names %s") % dup_names)
|
logger.debug("Duplicate names %s" % dup_names)
|
||||||
raise StackValidationFailed(message=_("Duplicate names %s") %
|
raise StackValidationFailed(message=_("Duplicate names %s") %
|
||||||
dup_names)
|
dup_names)
|
||||||
|
|
||||||
@ -515,7 +515,7 @@ class Stack(collections.Mapping):
|
|||||||
self._backup_name(),
|
self._backup_name(),
|
||||||
owner_id=self.id)
|
owner_id=self.id)
|
||||||
if s is not None:
|
if s is not None:
|
||||||
logger.debug(_('Loaded existing backup stack'))
|
logger.debug('Loaded existing backup stack')
|
||||||
return self.load(self.context, stack=s)
|
return self.load(self.context, stack=s)
|
||||||
elif create_if_missing:
|
elif create_if_missing:
|
||||||
templ = Template.load(self.context, self.t.id)
|
templ = Template.load(self.context, self.t.id)
|
||||||
@ -523,7 +523,7 @@ class Stack(collections.Mapping):
|
|||||||
prev = type(self)(self.context, self.name, templ, self.env,
|
prev = type(self)(self.context, self.name, templ, self.env,
|
||||||
owner_id=self.id)
|
owner_id=self.id)
|
||||||
prev.store(backup=True)
|
prev.store(backup=True)
|
||||||
logger.debug(_('Created new backup stack'))
|
logger.debug('Created new backup stack')
|
||||||
return prev
|
return prev
|
||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
@ -572,7 +572,7 @@ class Stack(collections.Mapping):
|
|||||||
if self.status != self.COMPLETE:
|
if self.status != self.COMPLETE:
|
||||||
if (action == self.ROLLBACK and
|
if (action == self.ROLLBACK and
|
||||||
self.state == (self.UPDATE, self.IN_PROGRESS)):
|
self.state == (self.UPDATE, self.IN_PROGRESS)):
|
||||||
logger.debug(_("Starting update rollback for %s") % self.name)
|
logger.debug("Starting update rollback for %s" % self.name)
|
||||||
else:
|
else:
|
||||||
self.state_set(action, self.FAILED,
|
self.state_set(action, self.FAILED,
|
||||||
'State invalid for %s' % action)
|
'State invalid for %s' % action)
|
||||||
@ -623,7 +623,7 @@ class Stack(collections.Mapping):
|
|||||||
yield self.update_task(oldstack, action=self.ROLLBACK)
|
yield self.update_task(oldstack, action=self.ROLLBACK)
|
||||||
return
|
return
|
||||||
else:
|
else:
|
||||||
logger.debug(_('Deleting backup stack'))
|
logger.debug('Deleting backup stack')
|
||||||
backup_stack.delete(backup=True)
|
backup_stack.delete(backup=True)
|
||||||
|
|
||||||
# flip the template to the newstack values
|
# flip the template to the newstack values
|
||||||
|
@ -540,7 +540,7 @@ class Resource(object):
|
|||||||
yield
|
yield
|
||||||
except UpdateReplace:
|
except UpdateReplace:
|
||||||
with excutils.save_and_reraise_exception():
|
with excutils.save_and_reraise_exception():
|
||||||
logger.debug(_("Resource %s update requires replacement") %
|
logger.debug("Resource %s update requires replacement" %
|
||||||
self.name)
|
self.name)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
logger.exception(_('update %(resource)s : %(err)s') %
|
logger.exception(_('update %(resource)s : %(err)s') %
|
||||||
|
@ -668,7 +668,7 @@ class AutoScalingGroup(InstanceGroup, CooldownMixin):
|
|||||||
return
|
return
|
||||||
|
|
||||||
if new_capacity == capacity:
|
if new_capacity == capacity:
|
||||||
logger.debug(_('no change in capacity %d') % capacity)
|
logger.debug('no change in capacity %d' % capacity)
|
||||||
return
|
return
|
||||||
|
|
||||||
# send a notification before, on-error and on-success.
|
# send a notification before, on-error and on-success.
|
||||||
|
@ -190,8 +190,8 @@ class ElasticIpAssociation(resource.Resource):
|
|||||||
server = self.nova().servers.get(self.properties[self.INSTANCE_ID])
|
server = self.nova().servers.get(self.properties[self.INSTANCE_ID])
|
||||||
server.add_floating_ip(self.properties[self.EIP])
|
server.add_floating_ip(self.properties[self.EIP])
|
||||||
self.resource_id_set(self.properties[self.EIP])
|
self.resource_id_set(self.properties[self.EIP])
|
||||||
logger.debug(_('ElasticIpAssociation '
|
logger.debug('ElasticIpAssociation '
|
||||||
'%(instance)s.add_floating_ip(%(eip)s)'),
|
'%(instance)s.add_floating_ip(%(eip)s)',
|
||||||
{'instance': self.properties[self.INSTANCE_ID],
|
{'instance': self.properties[self.INSTANCE_ID],
|
||||||
'eip': self.properties[self.EIP]})
|
'eip': self.properties[self.EIP]})
|
||||||
elif self.properties[self.ALLOCATION_ID]:
|
elif self.properties[self.ALLOCATION_ID]:
|
||||||
|
@ -731,7 +731,7 @@ class Instance(resource.Resource):
|
|||||||
raise exception.NotFound(_('Failed to find instance %s') %
|
raise exception.NotFound(_('Failed to find instance %s') %
|
||||||
self.resource_id)
|
self.resource_id)
|
||||||
else:
|
else:
|
||||||
logger.debug(_("suspending instance %s") % self.resource_id)
|
logger.debug("suspending instance %s" % self.resource_id)
|
||||||
# We want the server.suspend to happen after the volume
|
# We want the server.suspend to happen after the volume
|
||||||
# detachement has finished, so pass both tasks and the server
|
# detachement has finished, so pass both tasks and the server
|
||||||
suspend_runner = scheduler.TaskRunner(server.suspend)
|
suspend_runner = scheduler.TaskRunner(server.suspend)
|
||||||
@ -753,8 +753,8 @@ class Instance(resource.Resource):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
nova_utils.refresh_server(server)
|
nova_utils.refresh_server(server)
|
||||||
logger.debug(_("%(name)s check_suspend_complete "
|
logger.debug("%(name)s check_suspend_complete "
|
||||||
"status = %(status)s"),
|
"status = %(status)s",
|
||||||
{'name': self.name,
|
{'name': self.name,
|
||||||
'status': server.status})
|
'status': server.status})
|
||||||
if server.status in list(nova_utils.deferred_server_statuses +
|
if server.status in list(nova_utils.deferred_server_statuses +
|
||||||
@ -787,7 +787,7 @@ class Instance(resource.Resource):
|
|||||||
raise exception.NotFound(_('Failed to find instance %s') %
|
raise exception.NotFound(_('Failed to find instance %s') %
|
||||||
self.resource_id)
|
self.resource_id)
|
||||||
else:
|
else:
|
||||||
logger.debug(_("resuming instance %s") % self.resource_id)
|
logger.debug("resuming instance %s" % self.resource_id)
|
||||||
server.resume()
|
server.resume()
|
||||||
return server, scheduler.TaskRunner(self._attach_volumes_task())
|
return server, scheduler.TaskRunner(self._attach_volumes_task())
|
||||||
|
|
||||||
|
@ -413,7 +413,7 @@ class LoadBalancer(stack_resource.StackResource):
|
|||||||
client = self.nova()
|
client = self.nova()
|
||||||
for i in instances:
|
for i in instances:
|
||||||
ip = nova_utils.server_to_ipaddress(client, i) or '0.0.0.0'
|
ip = nova_utils.server_to_ipaddress(client, i) or '0.0.0.0'
|
||||||
logger.debug(_('haproxy server:%s') % ip)
|
logger.debug('haproxy server:%s' % ip)
|
||||||
servers.append('%sserver server%d %s:%s %s' % (spaces, n,
|
servers.append('%sserver server%d %s:%s %s' % (spaces, n,
|
||||||
ip, inst_port,
|
ip, inst_port,
|
||||||
check))
|
check))
|
||||||
|
@ -253,7 +253,7 @@ class Port(neutron.NeutronResource):
|
|||||||
|
|
||||||
self._prepare_list_properties(props)
|
self._prepare_list_properties(props)
|
||||||
|
|
||||||
logger.debug(_('updating port with %s') % props)
|
logger.debug('updating port with %s' % props)
|
||||||
self.neutron().update_port(self.resource_id, {'port': props})
|
self.neutron().update_port(self.resource_id, {'port': props})
|
||||||
|
|
||||||
def check_update_complete(self, *args):
|
def check_update_complete(self, *args):
|
||||||
|
@ -258,7 +258,7 @@ class OSDBInstance(resource.Resource):
|
|||||||
try:
|
try:
|
||||||
instance = self.trove().instances.get(self.resource_id)
|
instance = self.trove().instances.get(self.resource_id)
|
||||||
except troveclient.exceptions.NotFound:
|
except troveclient.exceptions.NotFound:
|
||||||
logger.debug(_("Database instance %s not found.") %
|
logger.debug("Database instance %s not found." %
|
||||||
self.resource_id)
|
self.resource_id)
|
||||||
self.resource_id_set(None)
|
self.resource_id_set(None)
|
||||||
else:
|
else:
|
||||||
|
@ -108,8 +108,8 @@ class S3Bucket(resource.Resource):
|
|||||||
"""Create a bucket."""
|
"""Create a bucket."""
|
||||||
container = self.physical_resource_name()
|
container = self.physical_resource_name()
|
||||||
headers = self.tags_to_headers()
|
headers = self.tags_to_headers()
|
||||||
logger.debug(_('S3Bucket create container %(container)s with headers '
|
logger.debug('S3Bucket create container %(container)s with headers '
|
||||||
'%(headers)s') % {
|
'%(headers)s' % {
|
||||||
'container': container, 'headers': headers})
|
'container': container, 'headers': headers})
|
||||||
if self.properties[self.WEBSITE_CONFIGURATION] is not None:
|
if self.properties[self.WEBSITE_CONFIGURATION] is not None:
|
||||||
sc = self.properties[self.WEBSITE_CONFIGURATION]
|
sc = self.properties[self.WEBSITE_CONFIGURATION]
|
||||||
@ -140,7 +140,7 @@ class S3Bucket(resource.Resource):
|
|||||||
|
|
||||||
def handle_delete(self):
|
def handle_delete(self):
|
||||||
"""Perform specified delete policy."""
|
"""Perform specified delete policy."""
|
||||||
logger.debug(_('S3Bucket delete container %s') % self.resource_id)
|
logger.debug('S3Bucket delete container %s' % self.resource_id)
|
||||||
if self.resource_id is not None:
|
if self.resource_id is not None:
|
||||||
try:
|
try:
|
||||||
self.swift().delete_container(self.resource_id)
|
self.swift().delete_container(self.resource_id)
|
||||||
|
@ -968,7 +968,7 @@ class Server(stack_user.StackUser):
|
|||||||
raise exception.NotFound(_('Failed to find server %s') %
|
raise exception.NotFound(_('Failed to find server %s') %
|
||||||
self.resource_id)
|
self.resource_id)
|
||||||
else:
|
else:
|
||||||
logger.debug(_('suspending server %s') % self.resource_id)
|
logger.debug('suspending server %s' % self.resource_id)
|
||||||
# We want the server.suspend to happen after the volume
|
# We want the server.suspend to happen after the volume
|
||||||
# detachement has finished, so pass both tasks and the server
|
# detachement has finished, so pass both tasks and the server
|
||||||
suspend_runner = scheduler.TaskRunner(server.suspend)
|
suspend_runner = scheduler.TaskRunner(server.suspend)
|
||||||
@ -985,8 +985,8 @@ class Server(stack_user.StackUser):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
nova_utils.refresh_server(server)
|
nova_utils.refresh_server(server)
|
||||||
logger.debug(_('%(name)s check_suspend_complete status '
|
logger.debug('%(name)s check_suspend_complete status '
|
||||||
'= %(status)s') % {
|
'= %(status)s' % {
|
||||||
'name': self.name, 'status': server.status})
|
'name': self.name, 'status': server.status})
|
||||||
if server.status in list(nova_utils.deferred_server_statuses +
|
if server.status in list(nova_utils.deferred_server_statuses +
|
||||||
['ACTIVE']):
|
['ACTIVE']):
|
||||||
@ -1014,7 +1014,7 @@ class Server(stack_user.StackUser):
|
|||||||
raise exception.NotFound(_('Failed to find server %s') %
|
raise exception.NotFound(_('Failed to find server %s') %
|
||||||
self.resource_id)
|
self.resource_id)
|
||||||
else:
|
else:
|
||||||
logger.debug(_('resuming server %s') % self.resource_id)
|
logger.debug('resuming server %s' % self.resource_id)
|
||||||
server.resume()
|
server.resume()
|
||||||
return server
|
return server
|
||||||
|
|
||||||
|
@ -105,9 +105,9 @@ class SwiftContainer(resource.Resource):
|
|||||||
if self.properties.get(key) is not None:
|
if self.properties.get(key) is not None:
|
||||||
container_headers[key] = self.properties[key]
|
container_headers[key] = self.properties[key]
|
||||||
|
|
||||||
logger.debug(_('SwiftContainer create container %(container)s with '
|
logger.debug('SwiftContainer create container %(container)s with '
|
||||||
'container headers %(container_headers)s and '
|
'container headers %(container_headers)s and '
|
||||||
'account headers %(account_headers)s') % {
|
'account headers %(account_headers)s' % {
|
||||||
'container': container,
|
'container': container,
|
||||||
'account_headers': account_headers,
|
'account_headers': account_headers,
|
||||||
'container_headers': container_headers})
|
'container_headers': container_headers})
|
||||||
@ -121,7 +121,7 @@ class SwiftContainer(resource.Resource):
|
|||||||
|
|
||||||
def handle_delete(self):
|
def handle_delete(self):
|
||||||
"""Perform specified delete policy."""
|
"""Perform specified delete policy."""
|
||||||
logger.debug(_('SwiftContainer delete container %s') %
|
logger.debug('SwiftContainer delete container %s' %
|
||||||
self.resource_id)
|
self.resource_id)
|
||||||
if self.resource_id is not None:
|
if self.resource_id is not None:
|
||||||
try:
|
try:
|
||||||
|
@ -217,7 +217,7 @@ class VolumeAttachTask(object):
|
|||||||
|
|
||||||
vol = self.clients.cinder().volumes.get(self.volume_id)
|
vol = self.clients.cinder().volumes.get(self.volume_id)
|
||||||
while vol.status == 'available' or vol.status == 'attaching':
|
while vol.status == 'available' or vol.status == 'attaching':
|
||||||
logger.debug(_('%(name)s - volume status: %(status)s') % {
|
logger.debug('%(name)s - volume status: %(status)s' % {
|
||||||
'name': str(self), 'status': vol.status})
|
'name': str(self), 'status': vol.status})
|
||||||
yield
|
yield
|
||||||
vol.get()
|
vol.get()
|
||||||
@ -280,7 +280,7 @@ class VolumeDetachTask(object):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
while vol.status in ('in-use', 'detaching'):
|
while vol.status in ('in-use', 'detaching'):
|
||||||
logger.debug(_('%s - volume still in use') % str(self))
|
logger.debug('%s - volume still in use' % str(self))
|
||||||
yield
|
yield
|
||||||
vol.get()
|
vol.get()
|
||||||
|
|
||||||
|
@ -278,7 +278,7 @@ class WaitCondition(resource.Resource):
|
|||||||
meta = handle.metadata_get(refresh=True)
|
meta = handle.metadata_get(refresh=True)
|
||||||
# Note, can't use a dict generator on python 2.6, hence:
|
# Note, can't use a dict generator on python 2.6, hence:
|
||||||
res = dict([(k, meta[k]['Data']) for k in meta])
|
res = dict([(k, meta[k]['Data']) for k in meta])
|
||||||
logger.debug(_('%(name)s.GetAtt(%(key)s) == %(res)s') %
|
logger.debug('%(name)s.GetAtt(%(key)s) == %(res)s' %
|
||||||
{'name': self.name,
|
{'name': self.name,
|
||||||
'key': key,
|
'key': key,
|
||||||
'res': res})
|
'res': res})
|
||||||
|
@ -128,7 +128,7 @@ class TaskRunner(object):
|
|||||||
def _sleep(self, wait_time):
|
def _sleep(self, wait_time):
|
||||||
"""Sleep for the specified number of seconds."""
|
"""Sleep for the specified number of seconds."""
|
||||||
if ENABLE_SLEEP and wait_time is not None:
|
if ENABLE_SLEEP and wait_time is not None:
|
||||||
logger.debug(_('%s sleeping') % str(self))
|
logger.debug('%s sleeping' % str(self))
|
||||||
eventlet.sleep(wait_time)
|
eventlet.sleep(wait_time)
|
||||||
|
|
||||||
def __call__(self, wait_time=1, timeout=None):
|
def __call__(self, wait_time=1, timeout=None):
|
||||||
@ -151,7 +151,7 @@ class TaskRunner(object):
|
|||||||
"""
|
"""
|
||||||
assert self._runner is None, "Task already started"
|
assert self._runner is None, "Task already started"
|
||||||
|
|
||||||
logger.debug(_('%s starting') % str(self))
|
logger.debug('%s starting' % str(self))
|
||||||
|
|
||||||
if timeout is not None:
|
if timeout is not None:
|
||||||
self._timeout = Timeout(self, timeout)
|
self._timeout = Timeout(self, timeout)
|
||||||
@ -163,7 +163,7 @@ class TaskRunner(object):
|
|||||||
else:
|
else:
|
||||||
self._runner = False
|
self._runner = False
|
||||||
self._done = True
|
self._done = True
|
||||||
logger.debug(_('%s done (not resumable)') % str(self))
|
logger.debug('%s done (not resumable)' % str(self))
|
||||||
|
|
||||||
def step(self):
|
def step(self):
|
||||||
"""
|
"""
|
||||||
@ -184,13 +184,13 @@ class TaskRunner(object):
|
|||||||
# Clean up in case task swallows exception without exiting
|
# Clean up in case task swallows exception without exiting
|
||||||
self.cancel()
|
self.cancel()
|
||||||
else:
|
else:
|
||||||
logger.debug(_('%s running') % str(self))
|
logger.debug('%s running' % str(self))
|
||||||
|
|
||||||
try:
|
try:
|
||||||
next(self._runner)
|
next(self._runner)
|
||||||
except StopIteration:
|
except StopIteration:
|
||||||
self._done = True
|
self._done = True
|
||||||
logger.debug(_('%s complete') % str(self))
|
logger.debug('%s complete' % str(self))
|
||||||
|
|
||||||
return self._done
|
return self._done
|
||||||
|
|
||||||
@ -207,7 +207,7 @@ class TaskRunner(object):
|
|||||||
def cancel(self):
|
def cancel(self):
|
||||||
"""Cancel the task and mark it as done."""
|
"""Cancel the task and mark it as done."""
|
||||||
if not self.done():
|
if not self.done():
|
||||||
logger.debug(_('%s cancelled') % str(self))
|
logger.debug('%s cancelled' % str(self))
|
||||||
try:
|
try:
|
||||||
if self.started():
|
if self.started():
|
||||||
self._runner.close()
|
self._runner.close()
|
||||||
|
@ -197,7 +197,7 @@ class StackWatch(object):
|
|||||||
# Retrieve the stored credentials & create context
|
# Retrieve the stored credentials & create context
|
||||||
# Require tenant_safe=False to the stack_get to defeat tenant
|
# Require tenant_safe=False to the stack_get to defeat tenant
|
||||||
# scoping otherwise we fail to retrieve the stack
|
# scoping otherwise we fail to retrieve the stack
|
||||||
logger.debug(_("Periodic watcher task for stack %s") % sid)
|
logger.debug("Periodic watcher task for stack %s" % sid)
|
||||||
admin_context = context.get_admin_context()
|
admin_context = context.get_admin_context()
|
||||||
stack = db_api.stack_get(admin_context, sid, tenant_safe=False,
|
stack = db_api.stack_get(admin_context, sid, tenant_safe=False,
|
||||||
eager_load=True)
|
eager_load=True)
|
||||||
@ -290,7 +290,7 @@ class EngineService(service.Service):
|
|||||||
self.stack_watch = StackWatch(self.thread_group_mgr)
|
self.stack_watch = StackWatch(self.thread_group_mgr)
|
||||||
self.listener = EngineListener(host, self.engine_id,
|
self.listener = EngineListener(host, self.engine_id,
|
||||||
self.thread_group_mgr)
|
self.thread_group_mgr)
|
||||||
logger.debug(_("Starting listener for engine %s") % self.engine_id)
|
logger.debug("Starting listener for engine %s" % self.engine_id)
|
||||||
self.listener.start()
|
self.listener.start()
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
@ -727,7 +727,7 @@ class EngineService(service.Service):
|
|||||||
elif stack_lock.StackLock.engine_alive(cnxt, acquire_result):
|
elif stack_lock.StackLock.engine_alive(cnxt, acquire_result):
|
||||||
stop_result = remote_stop(acquire_result)
|
stop_result = remote_stop(acquire_result)
|
||||||
if stop_result is None:
|
if stop_result is None:
|
||||||
logger.debug(_("Successfully stopped remote task on engine %s")
|
logger.debug("Successfully stopped remote task on engine %s"
|
||||||
% acquire_result)
|
% acquire_result)
|
||||||
else:
|
else:
|
||||||
raise exception.StopActionFailed(stack_name=stack.name,
|
raise exception.StopActionFailed(stack_name=stack.name,
|
||||||
@ -972,7 +972,7 @@ class EngineService(service.Service):
|
|||||||
Handle request to perform suspend action on a stack
|
Handle request to perform suspend action on a stack
|
||||||
'''
|
'''
|
||||||
def _stack_suspend(stack):
|
def _stack_suspend(stack):
|
||||||
logger.debug(_("suspending stack %s") % stack.name)
|
logger.debug("suspending stack %s" % stack.name)
|
||||||
stack.suspend()
|
stack.suspend()
|
||||||
|
|
||||||
s = self._get_stack(cnxt, stack_identity)
|
s = self._get_stack(cnxt, stack_identity)
|
||||||
@ -987,7 +987,7 @@ class EngineService(service.Service):
|
|||||||
Handle request to perform a resume action on a stack
|
Handle request to perform a resume action on a stack
|
||||||
'''
|
'''
|
||||||
def _stack_resume(stack):
|
def _stack_resume(stack):
|
||||||
logger.debug(_("resuming stack %s") % stack.name)
|
logger.debug("resuming stack %s" % stack.name)
|
||||||
stack.resume()
|
stack.resume()
|
||||||
|
|
||||||
s = self._get_stack(cnxt, stack_identity)
|
s = self._get_stack(cnxt, stack_identity)
|
||||||
|
@ -66,16 +66,16 @@ class StackLock(object):
|
|||||||
lock_engine_id = db_api.stack_lock_create(self.stack.id,
|
lock_engine_id = db_api.stack_lock_create(self.stack.id,
|
||||||
self.engine_id)
|
self.engine_id)
|
||||||
if lock_engine_id is None:
|
if lock_engine_id is None:
|
||||||
logger.debug(_("Engine %(engine)s acquired lock on stack "
|
logger.debug("Engine %(engine)s acquired lock on stack "
|
||||||
"%(stack)s") % {'engine': self.engine_id,
|
"%(stack)s" % {'engine': self.engine_id,
|
||||||
'stack': self.stack.id})
|
'stack': self.stack.id})
|
||||||
return
|
return
|
||||||
|
|
||||||
if lock_engine_id == self.engine_id or \
|
if lock_engine_id == self.engine_id or \
|
||||||
self.engine_alive(self.context, lock_engine_id):
|
self.engine_alive(self.context, lock_engine_id):
|
||||||
logger.debug(_("Lock on stack %(stack)s is owned by engine "
|
logger.debug("Lock on stack %(stack)s is owned by engine "
|
||||||
"%(engine)s") % {'stack': self.stack.id,
|
"%(engine)s" % {'stack': self.stack.id,
|
||||||
'engine': lock_engine_id})
|
'engine': lock_engine_id})
|
||||||
raise exception.ActionInProgress(stack_name=self.stack.name,
|
raise exception.ActionInProgress(stack_name=self.stack.name,
|
||||||
action=self.stack.action)
|
action=self.stack.action)
|
||||||
else:
|
else:
|
||||||
@ -117,6 +117,6 @@ class StackLock(object):
|
|||||||
logger.warning(_("Lock was already released on stack %s!")
|
logger.warning(_("Lock was already released on stack %s!")
|
||||||
% stack_id)
|
% stack_id)
|
||||||
else:
|
else:
|
||||||
logger.debug(_("Engine %(engine)s released lock on stack "
|
logger.debug("Engine %(engine)s released lock on stack "
|
||||||
"%(stack)s") % {'engine': self.engine_id,
|
"%(stack)s" % {'engine': self.engine_id,
|
||||||
'stack': stack_id})
|
'stack': stack_id})
|
||||||
|
@ -76,7 +76,7 @@ class StackUpdate(object):
|
|||||||
def _remove_backup_resource(self, prev_res):
|
def _remove_backup_resource(self, prev_res):
|
||||||
if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE),
|
if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE),
|
||||||
(prev_res.DELETE, prev_res.COMPLETE)):
|
(prev_res.DELETE, prev_res.COMPLETE)):
|
||||||
logger.debug(_("Deleting backup resource %s") % prev_res.name)
|
logger.debug("Deleting backup resource %s" % prev_res.name)
|
||||||
yield prev_res.destroy()
|
yield prev_res.destroy()
|
||||||
|
|
||||||
@staticmethod
|
@staticmethod
|
||||||
@ -100,18 +100,18 @@ class StackUpdate(object):
|
|||||||
# Swap in the backup resource if it is in a valid state,
|
# Swap in the backup resource if it is in a valid state,
|
||||||
# instead of creating a new resource
|
# instead of creating a new resource
|
||||||
if prev_res.status == prev_res.COMPLETE:
|
if prev_res.status == prev_res.COMPLETE:
|
||||||
logger.debug(_("Swapping in backup Resource %s") %
|
logger.debug("Swapping in backup Resource %s" %
|
||||||
res_name)
|
res_name)
|
||||||
self._exchange_stacks(self.existing_stack[res_name],
|
self._exchange_stacks(self.existing_stack[res_name],
|
||||||
prev_res)
|
prev_res)
|
||||||
return
|
return
|
||||||
|
|
||||||
logger.debug(_("Deleting backup Resource %s") % res_name)
|
logger.debug("Deleting backup Resource %s" % res_name)
|
||||||
yield prev_res.destroy()
|
yield prev_res.destroy()
|
||||||
|
|
||||||
# Back up existing resource
|
# Back up existing resource
|
||||||
if res_name in self.existing_stack:
|
if res_name in self.existing_stack:
|
||||||
logger.debug(_("Backing up existing Resource %s") % res_name)
|
logger.debug("Backing up existing Resource %s" % res_name)
|
||||||
existing_res = self.existing_stack[res_name]
|
existing_res = self.existing_stack[res_name]
|
||||||
self.previous_stack[res_name] = existing_res
|
self.previous_stack[res_name] = existing_res
|
||||||
existing_res.state_set(existing_res.UPDATE, existing_res.COMPLETE)
|
existing_res.state_set(existing_res.UPDATE, existing_res.COMPLETE)
|
||||||
|
@ -210,7 +210,7 @@ class WatchRule(object):
|
|||||||
data = 0
|
data = 0
|
||||||
for d in self.watch_data:
|
for d in self.watch_data:
|
||||||
if d.created_at < self.now - self.timeperiod:
|
if d.created_at < self.now - self.timeperiod:
|
||||||
logger.debug(_('ignoring %s') % str(d.data))
|
logger.debug('ignoring %s' % str(d.data))
|
||||||
continue
|
continue
|
||||||
data = data + float(d.data[self.rule['MetricName']]['Value'])
|
data = data + float(d.data[self.rule['MetricName']]['Value'])
|
||||||
|
|
||||||
@ -285,7 +285,7 @@ class WatchRule(object):
|
|||||||
dims = dims[0]
|
dims = dims[0]
|
||||||
sample['resource_metadata'] = dims
|
sample['resource_metadata'] = dims
|
||||||
sample['resource_id'] = dims.get('InstanceId')
|
sample['resource_id'] = dims.get('InstanceId')
|
||||||
logger.debug(_('new sample:%(k)s data:%(sample)s') % {
|
logger.debug('new sample:%(k)s data:%(sample)s' % {
|
||||||
'k': k, 'sample': sample})
|
'k': k, 'sample': sample})
|
||||||
clients.ceilometer().samples.create(**sample)
|
clients.ceilometer().samples.create(**sample)
|
||||||
|
|
||||||
@ -298,7 +298,7 @@ class WatchRule(object):
|
|||||||
return
|
return
|
||||||
|
|
||||||
if self.state == self.SUSPENDED:
|
if self.state == self.SUSPENDED:
|
||||||
logger.debug(_('Ignoring metric data for %s, SUSPENDED state')
|
logger.debug('Ignoring metric data for %s, SUSPENDED state'
|
||||||
% self.name)
|
% self.name)
|
||||||
return []
|
return []
|
||||||
|
|
||||||
@ -308,8 +308,8 @@ class WatchRule(object):
|
|||||||
# options, e.g --haproxy try to push multiple metrics when we
|
# options, e.g --haproxy try to push multiple metrics when we
|
||||||
# actually only care about one (the one we're alarming on)
|
# actually only care about one (the one we're alarming on)
|
||||||
# so just ignore any data which doesn't contain MetricName
|
# so just ignore any data which doesn't contain MetricName
|
||||||
logger.debug(_('Ignoring metric data (only accept %(metric)s) '
|
logger.debug('Ignoring metric data (only accept %(metric)s) '
|
||||||
': %(data)s') % {
|
': %(data)s' % {
|
||||||
'metric': self.rule['MetricName'], 'data': data})
|
'metric': self.rule['MetricName'], 'data': data})
|
||||||
return
|
return
|
||||||
|
|
||||||
@ -318,7 +318,7 @@ class WatchRule(object):
|
|||||||
'watch_rule_id': self.id
|
'watch_rule_id': self.id
|
||||||
}
|
}
|
||||||
wd = db_api.watch_data_create(None, watch_data)
|
wd = db_api.watch_data_create(None, watch_data)
|
||||||
logger.debug(_('new watch:%(name)s data:%(data)s')
|
logger.debug('new watch:%(name)s data:%(data)s'
|
||||||
% {'name': self.name, 'data': str(wd.data)})
|
% {'name': self.name, 'data': str(wd.data)})
|
||||||
|
|
||||||
def state_set(self, state):
|
def state_set(self, state):
|
||||||
@ -344,8 +344,8 @@ class WatchRule(object):
|
|||||||
if state != self.state:
|
if state != self.state:
|
||||||
actions = self.rule_actions(state)
|
actions = self.rule_actions(state)
|
||||||
if actions:
|
if actions:
|
||||||
logger.debug(_("Overriding state %(self_state)s for watch "
|
logger.debug("Overriding state %(self_state)s for watch "
|
||||||
"%(name)s with %(state)s") % {
|
"%(name)s with %(state)s" % {
|
||||||
'self_state': self.state, 'name': self.name,
|
'self_state': self.state, 'name': self.name,
|
||||||
'state': state})
|
'state': state})
|
||||||
else:
|
else:
|
||||||
|
Loading…
x
Reference in New Issue
Block a user