Log translation hint for Heat.engine (part3)

Currently, Log translation is motivated by oslo's move to prioritized
translation of strings, as documented at
https://wiki.openstack.org/wiki/LoggingStandards#Log_Translation
- add log translation hints for warning, error and info levels
- move from LOG.warning to LOG.warn
- remove use of % as a string formatter, use the log functionality
  instead
Partial implements blueprint log-translation-hints

Change-Id: Ib91983e6e61b4d6ddbbed0f95dc98cbde1d4f673
This commit is contained in:
liu-sheng 2014-09-23 16:12:20 +08:00
parent d971917560
commit dc90767545
12 changed files with 135 additions and 113 deletions

View File

@ -13,6 +13,7 @@
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
@ -223,10 +224,10 @@ class AccessKey(resource.Resource):
'''
if self._secret is None:
if not self.resource_id:
LOG.info(_('could not get secret for %(username)s '
'Error:%(msg)s')
% {'username': self.properties[self.USER_NAME],
'msg': "resource_id not yet set"})
LOG.info(_LI('could not get secret for %(username)s '
'Error:%(msg)s'),
{'username': self.properties[self.USER_NAME],
'msg': "resource_id not yet set"})
else:
# First try to retrieve the secret from resource_data, but
# for backwards compatibility, fall back to requesting from
@ -243,8 +244,8 @@ class AccessKey(resource.Resource):
# And the ID of the v3 credential
self.data_set('credential_id', kp.id, redact=True)
except Exception as ex:
LOG.info(_('could not get secret for %(username)s '
'Error:%(msg)s') % {
LOG.info(_LI('could not get secret for %(username)s '
'Error:%(msg)s'), {
'username': self.properties[self.USER_NAME],
'msg': ex})

View File

@ -15,6 +15,7 @@ import json
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes
from heat.engine import constraints
from heat.engine import properties
@ -238,13 +239,13 @@ class VolumeExtendTask(object):
vol.get()
if vol.status != 'available':
LOG.info(_("Resize failed: Volume %(vol)s is in %(status)s state."
) % {'vol': vol.id, 'status': vol.status})
LOG.info(_LI("Resize failed: Volume %(vol)s is in %(status)s "
"state."), {'vol': vol.id, 'status': vol.status})
raise resource.ResourceUnknownStatus(
resource_status=vol.status,
result=_('Volume resize failed'))
LOG.info(_('%s - complete') % str(self))
LOG.info(_LI('%s - complete'), str(self))
class VolumeAttachTask(object):
@ -293,14 +294,15 @@ class VolumeAttachTask(object):
vol.get()
if vol.status != 'in-use':
LOG.info(_("Attachment failed - volume %(vol)s "
"is in %(status)s status") % {"vol": vol.id,
"status": vol.status})
LOG.info(_LI("Attachment failed - volume %(vol)s "
"is in %(status)s status"),
{"vol": vol.id,
"status": vol.status})
raise resource.ResourceUnknownStatus(
resource_status=vol.status,
result=_('Volume attachment failed'))
LOG.info(_('%s - complete') % str(self))
LOG.info(_LI('%s - complete'), str(self))
class VolumeDetachTask(object):
@ -363,13 +365,13 @@ class VolumeDetachTask(object):
yield
vol.get()
LOG.info(_('%(name)s - status: %(status)s')
% {'name': str(self), 'status': vol.status})
LOG.info(_LI('%(name)s - status: %(status)s'),
{'name': str(self), 'status': vol.status})
if vol.status != 'available':
LOG.info(_("Detachment failed - volume %(vol)s "
"is in %(status)s status") % {
"vol": vol.id,
"status": vol.status})
LOG.info(_LI("Detachment failed - volume %(vol)s "
"is in %(status)s status"),
{"vol": vol.id,
"status": vol.status})
raise resource.ResourceUnknownStatus(
resource_status=vol.status,
result=_('Volume detachment failed'))
@ -390,12 +392,12 @@ class VolumeDetachTask(object):
return True
while server_has_attachment(self.server_id, self.attachment_id):
LOG.info(_("Server %(srv)s still has attachment %(att)s.")
% {'att': self.attachment_id, 'srv': self.server_id})
LOG.info(_LI("Server %(srv)s still has attachment %(att)s."),
{'att': self.attachment_id, 'srv': self.server_id})
yield
LOG.info(_("Volume %(vol)s is detached from server %(srv)s")
% {'vol': vol.id, 'srv': self.server_id})
LOG.info(_LI("Volume %(vol)s is detached from server %(srv)s"),
{'vol': vol.id, 'srv': self.server_id})
class VolumeAttachment(resource.Resource):

View File

@ -16,6 +16,9 @@ import uuid
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import identifier
from heat.engine import attributes
from heat.engine import constraints
@ -63,8 +66,8 @@ class BaseWaitConditionHandle(signal_responder.SignalResponder):
if self._metadata_format_ok(metadata):
rsrc_metadata = self.metadata_get(refresh=True)
if metadata[self.UNIQUE_ID] in rsrc_metadata:
LOG.warning(_("Overwriting Metadata item for id %s!")
% metadata[self.UNIQUE_ID])
LOG.warn(_LW("Overwriting Metadata item for id %s!"),
metadata[self.UNIQUE_ID])
safe_metadata = {}
for k in self.METADATA_KEYS:
if k == self.UNIQUE_ID:
@ -76,7 +79,7 @@ class BaseWaitConditionHandle(signal_responder.SignalResponder):
(safe_metadata[self.STATUS],
safe_metadata[self.REASON]))
else:
LOG.error(_("Metadata failed validation for %s") % self.name)
LOG.error(_LE("Metadata failed validation for %s"), self.name)
raise ValueError(_("Metadata format invalid"))
return signal_reason
@ -353,20 +356,20 @@ class HeatWaitCondition(resource.Resource):
yield
except scheduler.Timeout:
timeout = WaitConditionTimeout(self, handle)
LOG.info(_('%(name)s Timed out (%(timeout)s)')
% {'name': str(self), 'timeout': str(timeout)})
LOG.info(_LI('%(name)s Timed out (%(timeout)s)'),
{'name': str(self), 'timeout': str(timeout)})
raise timeout
handle_status = handle.get_status()
if any(s != handle.STATUS_SUCCESS for s in handle_status):
failure = WaitConditionFailure(self, handle)
LOG.info(_('%(name)s Failed (%(failure)s)')
% {'name': str(self), 'failure': str(failure)})
LOG.info(_LI('%(name)s Failed (%(failure)s)'),
{'name': str(self), 'failure': str(failure)})
raise failure
if len(handle_status) >= self.properties[self.COUNT]:
LOG.info(_("%s Succeeded") % str(self))
LOG.info(_LI("%s Succeeded"), str(self))
return
def handle_create(self):

View File

@ -22,6 +22,7 @@ from oslo.utils import excutils
import six
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.openstack.common import log as logging
LOG = logging.getLogger(__name__)
@ -202,7 +203,7 @@ class TaskRunner(object):
assert self._runner is not None, "Task not started"
if self._timeout is not None and self._timeout.expired():
LOG.info(_('%s timed out') % str(self))
LOG.info(_LI('%s timed out'), str(self))
self._done = True
self._timeout.trigger(self._runner)

View File

@ -30,6 +30,9 @@ import webob
from heat.common import context
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import identifier
from heat.common import messaging as rpc_messaging
from heat.db import api as db_api
@ -255,7 +258,8 @@ class StackWatch(object):
db_stack = db_api.stack_get(admin_context, sid, tenant_safe=False,
eager_load=True)
if not db_stack:
LOG.error(_("Unable to retrieve stack %s for periodic task") % sid)
LOG.error(_LE("Unable to retrieve stack %s for periodic task"),
sid)
return
stack = parser.Stack.load(admin_context, stack=db_stack,
use_stored_context=True)
@ -269,8 +273,8 @@ class StackWatch(object):
try:
wrs = db_api.watch_rule_get_all_by_stack(admin_context, sid)
except Exception as ex:
LOG.warn(_('periodic_task db error watch rule removed? %(ex)s')
% ex)
LOG.warn(_LW('periodic_task db error watch rule removed? %(ex)s'),
ex)
return
def run_alarm_action(stack, actions, details):
@ -401,7 +405,7 @@ class EngineService(service.Service):
def stop(self):
# Stop rpc connection at first for preventing new requests
LOG.info(_("Attempting to stop engine service..."))
LOG.info(_LI("Attempting to stop engine service..."))
try:
self.conn.close()
except Exception:
@ -412,14 +416,14 @@ class EngineService(service.Service):
# Ignore dummy service task
if stack_id == cfg.CONF.periodic_interval:
continue
LOG.info(_("Waiting stack %s processing to be finished")
% stack_id)
LOG.info(_LI("Waiting stack %s processing to be finished"),
stack_id)
# Stop threads gracefully
self.thread_group_mgr.stop(stack_id, True)
LOG.info(_("Stack %s processing was finished") % stack_id)
LOG.info(_LI("Stack %s processing was finished"), stack_id)
# Terminate the engine process
LOG.info(_("All threads were gone, terminating engine"))
LOG.info(_LI("All threads were gone, terminating engine"))
super(EngineService, self).stop()
@request_context
@ -598,7 +602,7 @@ class EngineService(service.Service):
:param args: Request parameters/args passed from API
"""
LOG.info(_('previewing stack %s') % stack_name)
LOG.info(_LI('previewing stack %s'), stack_name)
stack = self._parse_template_and_validate_stack(cnxt,
stack_name,
template,
@ -626,7 +630,7 @@ class EngineService(service.Service):
:param owner_id: parent stack ID for nested stacks, only expected when
called from another heat-engine (not a user option)
"""
LOG.info(_('Creating stack %s') % stack_name)
LOG.info(_LI('Creating stack %s'), stack_name)
def _stack_create(stack):
@ -648,7 +652,7 @@ class EngineService(service.Service):
# Schedule a periodic watcher task for this stack
self.stack_watch.start_watch_task(stack.id, cnxt)
else:
LOG.info(_("Stack create failed, status %s") % stack.status)
LOG.info(_LI("Stack create failed, status %s"), stack.status)
stack = self._parse_template_and_validate_stack(cnxt,
stack_name,
@ -683,7 +687,7 @@ class EngineService(service.Service):
"""
# Get the database representation of the existing stack
db_stack = self._get_stack(cnxt, stack_identity)
LOG.info(_('Updating stack %s') % db_stack.name)
LOG.info(_LI('Updating stack %s'), db_stack.name)
current_stack = parser.Stack.load(cnxt, stack=db_stack)
@ -745,7 +749,7 @@ class EngineService(service.Service):
msg = _("Cancelling update when stack is %s"
) % str(current_stack.state)
raise exception.NotSupported(feature=msg)
LOG.info(_('Starting cancel of updating stack %s') % db_stack.name)
LOG.info(_LI('Starting cancel of updating stack %s') % db_stack.name)
# stop the running update and take the lock
# as we cancel only running update, the acquire_result is
# always some engine_id, not None
@ -779,7 +783,7 @@ class EngineService(service.Service):
:param template: Template of stack you want to create.
:param params: Stack Input Params
"""
LOG.info(_('validate_template'))
LOG.info(_LI('validate_template'))
if template is None:
msg = _("No Template provided.")
return webob.exc.HTTPBadRequest(explanation=msg)
@ -874,7 +878,7 @@ class EngineService(service.Service):
"""
st = self._get_stack(cnxt, stack_identity)
LOG.info(_('Deleting stack %s') % st.name)
LOG.info(_LI('Deleting stack %s'), st.name)
stack = parser.Stack.load(cnxt, stack=st)
lock = stack_lock.StackLock(cnxt, stack, self.engine_id)
@ -927,7 +931,7 @@ class EngineService(service.Service):
raise exception.NotSupported(feature='Stack Abandon')
st = self._get_stack(cnxt, stack_identity)
LOG.info(_('abandoning stack %s') % st.name)
LOG.info(_LI('abandoning stack %s'), st.name)
stack = parser.Stack.load(cnxt, stack=st)
lock = stack_lock.StackLock(cnxt, stack, self.engine_id)
with lock.thread_lock(stack.id):
@ -1063,7 +1067,7 @@ class EngineService(service.Service):
if cfg.CONF.heat_stack_user_role in cnxt.roles:
if not self._authorize_stack_user(cnxt, stack, resource_name):
LOG.warning(_("Access denied to resource %s") % resource_name)
LOG.warn(_LW("Access denied to resource %s"), resource_name)
raise exception.Forbidden()
if resource_name not in stack:
@ -1228,7 +1232,7 @@ class EngineService(service.Service):
'''
s = self._get_stack(cnxt, stack_identity)
stack = parser.Stack.load(cnxt, stack=s)
LOG.info(_("Checking stack %s") % stack.name)
LOG.info(_LI("Checking stack %s"), stack.name)
self.thread_group_mgr.start_with_lock(cnxt, stack, self.engine_id,
stack.check)
@ -1318,7 +1322,7 @@ class EngineService(service.Service):
try:
wrn = [w.name for w in db_api.watch_rule_get_all(cnxt)]
except Exception as ex:
LOG.warn(_('show_watch (all) db error %s') % ex)
LOG.warn(_LW('show_watch (all) db error %s'), ex)
return
wrs = [watchrule.WatchRule.load(cnxt, w) for w in wrn]
@ -1341,13 +1345,13 @@ class EngineService(service.Service):
# namespace/metric, but we will want this at some point
# for now, the API can query all metric data and filter locally
if metric_namespace is not None or metric_name is not None:
LOG.error(_("Filtering by namespace/metric not yet supported"))
LOG.error(_LE("Filtering by namespace/metric not yet supported"))
return
try:
wds = db_api.watch_data_get_all(cnxt)
except Exception as ex:
LOG.warn(_('show_metric (all) db error %s') % ex)
LOG.warn(_LW('show_metric (all) db error %s'), ex)
return
result = [api.format_watch_data(w) for w in wds]

View File

@ -15,7 +15,7 @@ from keystoneclient.contrib.ec2 import utils as ec2_utils
from oslo.config import cfg
from six.moves.urllib import parse as urlparse
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import stack_user
from heat.openstack.common import log as logging
@ -65,8 +65,8 @@ class SignalResponder(stack_user.StackUser):
secret_key = self.data().get('secret_key')
if not access_key or not secret_key:
LOG.warning(_('Cannot generate signed url, '
'no stored access/secret key'))
LOG.warn(_LW('Cannot generate signed url, '
'no stored access/secret key'))
return
waitcond_url = cfg.CONF.heat_waitcondition_server_url

View File

@ -26,6 +26,9 @@ from heat.common import context as common_context
from heat.common import exception
from heat.common.exception import StackValidationFailed
from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import identifier
from heat.common import lifecycle_plugin_utils
from heat.db import api as db_api
@ -229,7 +232,7 @@ class Stack(collections.Mapping):
via the Parameters class as the StackId pseudo parameter
'''
if not self.parameters.set_stack_id(self.identifier()):
LOG.warning(_("Unable to set parameters StackId identifier"))
LOG.warn(_LW("Unable to set parameters StackId identifier"))
@staticmethod
def _get_dependencies(resources):
@ -503,11 +506,12 @@ class Stack(collections.Mapping):
stack.update_and_save({'action': action,
'status': status,
'status_reason': reason})
msg = _('Stack %(action)s %(status)s (%(name)s): %(reason)s')
LOG.info(msg % {'action': action,
'status': status,
'name': self.name,
'reason': reason})
LOG.info(_LI('Stack %(action)s %(status)s (%(name)s): '
'%(reason)s'),
{'action': action,
'status': status,
'name': self.name,
'reason': reason})
notification.send(self)
@property
@ -697,7 +701,7 @@ class Stack(collections.Mapping):
@scheduler.wrappertask
def update_task(self, newstack, action=UPDATE, event=None):
if action not in (self.UPDATE, self.ROLLBACK):
LOG.error(_("Unexpected action %s passed to update!") % action)
LOG.error(_LE("Unexpected action %s passed to update!"), action)
self.state_set(self.UPDATE, self.FAILED,
"Invalid action %s" % action)
return
@ -816,7 +820,7 @@ class Stack(collections.Mapping):
required for those resources, e.g the stack_user_project.
'''
if action not in (self.DELETE, self.ROLLBACK):
LOG.error(_("Unexpected action %s passed to delete!") % action)
LOG.error(_LE("Unexpected action %s passed to delete!"), action)
self.state_set(self.DELETE, self.FAILED,
"Invalid action %s" % action)
return
@ -933,16 +937,17 @@ class Stack(collections.Mapping):
try:
db_api.user_creds_delete(self.context, self.user_creds_id)
except exception.NotFound:
LOG.info(_("Tried to delete user_creds that do not exist "
"(stack=%(stack)s user_creds_id=%(uc)s)") %
LOG.info(_LI("Tried to delete user_creds that do not "
"exist (stack=%(stack)s user_creds_id="
"%(uc)s)"),
{'stack': self.id, 'uc': self.user_creds_id})
try:
self.user_creds_id = None
self.store()
except exception.NotFound:
LOG.info(_("Tried to store a stack that does not exist "
"%s ") % self.id)
LOG.info(_LI("Tried to store a stack that does not exist "
"%s "), self.id)
# If the stack has a domain project, delete it
if self.stack_user_project_id and not abandon:
@ -958,8 +963,8 @@ class Stack(collections.Mapping):
try:
self.state_set(action, stack_status, reason)
except exception.NotFound:
LOG.info(_("Tried to delete stack that does not exist "
"%s ") % self.id)
LOG.info(_LI("Tried to delete stack that does not exist "
"%s "), self.id)
if not backup:
lifecycle_plugin_utils.do_post_ops(self.context, self,
@ -970,8 +975,8 @@ class Stack(collections.Mapping):
try:
db_api.stack_delete(self.context, self.id)
except exception.NotFound:
LOG.info(_("Tried to delete stack that does not exist "
"%s ") % self.id)
LOG.info(_LI("Tried to delete stack that does not exist "
"%s "), self.id)
self.id = None
@profiler.trace('Stack.suspend', hide_args=False)
@ -986,7 +991,7 @@ class Stack(collections.Mapping):
'''
# No need to suspend if the stack has been suspended
if self.state == (self.SUSPEND, self.COMPLETE):
LOG.info(_('%s is already suspended') % str(self))
LOG.info(_LI('%s is already suspended'), str(self))
return
sus_task = scheduler.TaskRunner(self.stack_task,
@ -1006,7 +1011,7 @@ class Stack(collections.Mapping):
'''
# No need to resume if the stack has been resumed
if self.state == (self.RESUME, self.COMPLETE):
LOG.info(_('%s is already resumed') % str(self))
LOG.info(_LI('%s is already resumed'), str(self))
return
sus_task = scheduler.TaskRunner(self.stack_task,
@ -1054,7 +1059,7 @@ class Stack(collections.Mapping):
scheduler.TaskRunner(res.destroy)()
except exception.ResourceFailure as ex:
failed = True
LOG.error(_('Resource %(name)s delete failed: %(ex)s') %
LOG.error(_LE('Resource %(name)s delete failed: %(ex)s'),
{'name': res.name, 'ex': ex})
for res in deps:

View File

@ -19,7 +19,8 @@ from oslo import messaging
from oslo.utils import excutils
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import messaging as rpc_messaging
from heat.db import api as db_api
from heat.openstack.common import log as logging
@ -80,32 +81,32 @@ class StackLock(object):
raise exception.ActionInProgress(stack_name=self.stack.name,
action=self.stack.action)
else:
LOG.info(_("Stale lock detected on stack %(stack)s. Engine "
"%(engine)s will attempt to steal the lock")
% {'stack': self.stack.id, 'engine': self.engine_id})
LOG.info(_LI("Stale lock detected on stack %(stack)s. Engine "
"%(engine)s will attempt to steal the lock"),
{'stack': self.stack.id, 'engine': self.engine_id})
result = db_api.stack_lock_steal(self.stack.id, lock_engine_id,
self.engine_id)
if result is None:
LOG.info(_("Engine %(engine)s successfully stole the lock "
"on stack %(stack)s")
% {'engine': self.engine_id,
'stack': self.stack.id})
LOG.info(_LI("Engine %(engine)s successfully stole the lock "
"on stack %(stack)s"),
{'engine': self.engine_id,
'stack': self.stack.id})
return
elif result is True:
if retry:
LOG.info(_("The lock on stack %(stack)s was released "
"while engine %(engine)s was stealing it. "
"Trying again") % {'stack': self.stack.id,
'engine': self.engine_id})
LOG.info(_LI("The lock on stack %(stack)s was released "
"while engine %(engine)s was stealing it. "
"Trying again"), {'stack': self.stack.id,
'engine': self.engine_id})
return self.acquire(retry=False)
else:
new_lock_engine_id = result
LOG.info(_("Failed to steal lock on stack %(stack)s. "
"Engine %(engine)s stole the lock first")
% {'stack': self.stack.id,
'engine': new_lock_engine_id})
LOG.info(_LI("Failed to steal lock on stack %(stack)s. "
"Engine %(engine)s stole the lock first"),
{'stack': self.stack.id,
'engine': new_lock_engine_id})
raise exception.ActionInProgress(
stack_name=self.stack.name, action=self.stack.action)
@ -115,7 +116,7 @@ class StackLock(object):
# Only the engine that owns the lock will be releasing it.
result = db_api.stack_lock_release(stack_id, self.engine_id)
if result is True:
LOG.warning(_("Lock was already released on stack %s!") % stack_id)
LOG.warn(_LW("Lock was already released on stack %s!"), stack_id)
else:
LOG.debug("Engine %(engine)s released lock on stack "
"%(stack)s" % {'engine': self.engine_id,

View File

@ -16,6 +16,8 @@ from oslo.config import cfg
from heat.common import environment_format
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.engine import attributes
from heat.engine import environment
from heat.engine import resource
@ -104,8 +106,8 @@ class StackResource(resource.Resource):
child_template = self.child_template()
params = self.child_params()
except NotImplementedError:
not_implemented_msg = _("Preview of '%s' not yet implemented")
LOG.warning(not_implemented_msg % self.__class__.__name__)
LOG.warn(_LW("Preview of '%s' not yet implemented"),
self.__class__.__name__)
return self
name = "%s-%s" % (self.stack.name, self.name)
@ -253,7 +255,7 @@ class StackResource(resource.Resource):
try:
stack = self.nested()
except exception.NotFound:
LOG.info(_("Stack not found to delete"))
LOG.info(_LI("Stack not found to delete"))
else:
if stack is not None:
delete_task = scheduler.TaskRunner(stack.delete)

View File

@ -15,6 +15,7 @@ import keystoneclient.exceptions as kc_exception
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import resource
from heat.openstack.common import log as logging
@ -95,7 +96,7 @@ class StackUser(resource.Resource):
# compatibility with resources created before the migration
# to stack_user.StackUser domain users. After an appropriate
# transitional period, this should be removed.
LOG.warning(_('Reverting to legacy user delete path'))
LOG.warn(_LW('Reverting to legacy user delete path'))
try:
self.keystone().delete_stack_user(user_id)
except kc_exception.NotFound:

View File

@ -13,7 +13,7 @@
import six
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.db import api as db_api
from heat.engine import dependencies
from heat.engine import resource
@ -132,10 +132,10 @@ class StackUpdate(object):
except resource.UpdateReplace:
pass
else:
LOG.info(_("Resource %(res_name)s for stack %(stack_name)s "
"updated")
% {'res_name': res_name,
'stack_name': self.existing_stack.name})
LOG.info(_LI("Resource %(res_name)s for stack %(stack_name)s "
"updated"),
{'res_name': res_name,
'stack_name': self.existing_stack.name})
return
yield self._create_resource(new_res)

View File

@ -18,6 +18,8 @@ from oslo.utils import timeutils
from heat.common import exception
from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.db import api as db_api
from heat.engine import stack
from heat.engine import timestamp
@ -76,8 +78,8 @@ class WatchRule(object):
try:
watch = db_api.watch_rule_get_by_name(context, watch_name)
except Exception as ex:
LOG.warn(_('WatchRule.load (%(watch_name)s) db error '
'%(ex)s') % {'watch_name': watch_name, 'ex': ex})
LOG.warn(_LW('WatchRule.load (%(watch_name)s) db error '
'%(ex)s'), {'watch_name': watch_name, 'ex': ex})
if watch is None:
raise exception.WatchRuleNotFound(watch_name=watch_name)
else:
@ -247,13 +249,13 @@ class WatchRule(object):
return actions
def rule_actions(self, new_state):
LOG.info(_('WATCH: stack:%(stack)s, watch_name:%(watch_name)s, '
'new_state:%(new_state)s'), {'stack': self.stack_id,
'watch_name': self.name,
'new_state': new_state})
LOG.info(_LI('WATCH: stack:%(stack)s, watch_name:%(watch_name)s, '
'new_state:%(new_state)s'), {'stack': self.stack_id,
'watch_name': self.name,
'new_state': new_state})
actions = []
if self.ACTION_MAP[new_state] not in self.rule:
LOG.info(_('no action for new state %s'), new_state)
LOG.info(_LI('no action for new state %s'), new_state)
else:
s = db_api.stack_get(self.context, self.stack_id,
eager_load=True)
@ -263,8 +265,8 @@ class WatchRule(object):
for refid in self.rule[self.ACTION_MAP[new_state]]:
actions.append(stk.resource_by_refid(refid).signal)
else:
LOG.warning(_("Could not process watch state %s for stack")
% new_state)
LOG.warn(_LW("Could not process watch state %s for stack"),
new_state)
return actions
def _to_ceilometer(self, data):
@ -347,9 +349,9 @@ class WatchRule(object):
% {'self_state': self.state, 'name': self.name,
'state': state})
else:
LOG.warning(_("Unable to override state %(state)s for "
"watch %(name)s") % {'state': self.state,
'name': self.name})
LOG.warn(_LW("Unable to override state %(state)s for "
"watch %(name)s"), {'state': self.state,
'name': self.name})
return actions