Remove log translations

Log messages are no longer being translated. This removes all use of
the _LE, _LI, and _LW translation markers to simplify logging and to
avoid confusion with new contributions.

See:
http://lists.openstack.org/pipermail/openstack-i18n/2016-November/002574.html
http://lists.openstack.org/pipermail/openstack-dev/2017-March/113365.html

Change-Id: Ieec8028305099422e1b0f8fc84bc90c9ca6c694f
This commit is contained in:
liyi 2017-03-21 12:17:51 +08:00
parent 34df3cd915
commit 8f10215ffd
94 changed files with 559 additions and 705 deletions

View File

@ -20,13 +20,12 @@ An OpenStack REST API to Heat.
from oslo_log import log as logging from oslo_log import log as logging
from heat.common.i18n import _LW
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
LOG.warning(_LW('DEPRECATED: `heat-api` script is deprecated. Please use the ' LOG.warning('DEPRECATED: `heat-api` script is deprecated. Please use the '
'system level heat binaries installed to start ' 'system level heat binaries installed to start '
'any of the heat services.')) 'any of the heat services.')
import os import os
import sys import sys

View File

@ -22,13 +22,12 @@ AMQP RPC to implement them.
from oslo_log import log as logging from oslo_log import log as logging
from heat.common.i18n import _LW
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
LOG.warning(_LW('DEPRECATED: `heat-api-cfn` script is deprecated. Please use ' LOG.warning('DEPRECATED: `heat-api-cfn` script is deprecated. Please use '
'the system level heat binaries installed to start ' 'the system level heat binaries installed to start '
'any of the heat services.')) 'any of the heat services.')
import os import os
import sys import sys

View File

@ -22,13 +22,12 @@ implement them.
from oslo_log import log as logging from oslo_log import log as logging
from heat.common.i18n import _LW
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
LOG.warning(_LW('DEPRECATED: `heat-api-cloudwatch` script is deprecated. ' LOG.warning('DEPRECATED: `heat-api-cloudwatch` script is deprecated. '
'Please use the system level heat binaries installed to ' 'Please use the system level heat binaries installed to '
'start any of the heat services.')) 'start any of the heat services.')
import os import os
import sys import sys

View File

@ -22,13 +22,12 @@ engine.
from oslo_log import log as logging from oslo_log import log as logging
from heat.common.i18n import _LW
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
LOG.warning(_LW('DEPRECATED: `heat-engine` script is deprecated. ' LOG.warning('DEPRECATED: `heat-engine` script is deprecated. '
'Please use the system level heat binaries installed to ' 'Please use the system level heat binaries installed to '
'start any of the heat services.')) 'start any of the heat services.')
import os import os
import sys import sys

View File

@ -15,12 +15,11 @@
from oslo_log import log as logging from oslo_log import log as logging
from heat.common.i18n import _LW
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
LOG.warning(_LW('DEPRECATED: `heat-manage` script is deprecated. Please use ' LOG.warning('DEPRECATED: `heat-manage` script is deprecated. Please use '
'the system level heat-manage binary.')) 'the system level heat-manage binary.')
import os import os
import sys import sys

View File

@ -21,7 +21,6 @@ import six
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import attributes from heat.engine import attributes
from heat.engine import constraints from heat.engine import constraints
from heat.engine import properties from heat.engine import properties
@ -554,8 +553,8 @@ def available_resource_mapping():
if DOCKER_INSTALLED: if DOCKER_INSTALLED:
return resource_mapping() return resource_mapping()
else: else:
LOG.warning(_LW("Docker plug-in loaded, but docker lib " LOG.warning("Docker plug-in loaded, but docker lib "
"not installed.")) "not installed.")
return {} return {}

View File

@ -21,12 +21,9 @@ from oslo_log import log as logging
from oslo_utils import importutils from oslo_utils import importutils
from heat.common import exception from heat.common import exception
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
LOG = logging.getLogger('heat.common.keystoneclient') LOG = logging.getLogger('heat.common.keystoneclient')
LOG.info(_LI("Keystone V2 loaded")) LOG.info("Keystone V2 loaded")
class KeystoneClientV2(object): class KeystoneClientV2(object):
@ -100,8 +97,8 @@ class KeystoneClientV2(object):
kwargs['tenant_name'] = self.context.project_name kwargs['tenant_name'] = self.context.project_name
kwargs['tenant_id'] = self.context.tenant_id kwargs['tenant_id'] = self.context.tenant_id
else: else:
LOG.error(_LE("Keystone v2 API connection failed, no password " LOG.error("Keystone v2 API connection failed, no password "
"or auth_token!")) "or auth_token!")
raise exception.AuthorizationFailure() raise exception.AuthorizationFailure()
kwargs['cacert'] = self._get_client_option('ca_file') kwargs['cacert'] = self._get_client_option('ca_file')
kwargs['insecure'] = self._get_client_option('insecure') kwargs['insecure'] = self._get_client_option('insecure')
@ -115,7 +112,7 @@ class KeystoneClientV2(object):
if auth_kwargs: if auth_kwargs:
# Sanity check # Sanity check
if not client.auth_ref.trust_scoped: if not client.auth_ref.trust_scoped:
LOG.error(_LE("v2 trust token re-scoping failed!")) LOG.error("v2 trust token re-scoping failed!")
raise exception.AuthorizationFailure() raise exception.AuthorizationFailure()
# All OK so update the context with the token # All OK so update the context with the token
self.context.auth_token = client.auth_ref.auth_token self.context.auth_token = client.auth_ref.auth_token
@ -123,8 +120,8 @@ class KeystoneClientV2(object):
# Ensure the v2 API we're using is not impacted by keystone # Ensure the v2 API we're using is not impacted by keystone
# bug #1239303, otherwise we can't trust the user_id # bug #1239303, otherwise we can't trust the user_id
if self.context.trustor_user_id != client.auth_ref.user_id: if self.context.trustor_user_id != client.auth_ref.user_id:
LOG.error(_LE("Trust impersonation failed, bug #1239303 " LOG.error("Trust impersonation failed, bug #1239303 "
"suspected, you may need a newer keystone")) "suspected, you may need a newer keystone")
raise exception.AuthorizationFailure() raise exception.AuthorizationFailure()
return client return client
@ -164,8 +161,8 @@ class KeystoneClientV2(object):
Returns the keystone ID of the resulting user Returns the keystone ID of the resulting user
""" """
if len(username) > 64: if len(username) > 64:
LOG.warning(_LW("Truncating the username %s to the last 64 " LOG.warning("Truncating the username %s to the last 64 "
"characters."), username) "characters.", username)
# get the last 64 characters of the username # get the last 64 characters of the username
username = username[-64:] username = username[-64:]
user = self.client.users.create(username, user = self.client.users.create(username,
@ -188,8 +185,8 @@ class KeystoneClientV2(object):
self.client.roles.add_user_role(user.id, role_id, self.client.roles.add_user_role(user.id, role_id,
self.context.tenant_id) self.context.tenant_id)
else: else:
LOG.error(_LE("Failed to add user %(user)s to role %(role)s, " LOG.error("Failed to add user %(user)s to role %(role)s, "
"check role exists!"), "check role exists!",
{'user': username, {'user': username,
'role': cfg.CONF.heat_stack_user_role}) 'role': cfg.CONF.heat_stack_user_role})

View File

@ -25,8 +25,6 @@ from swiftclient import utils as swiftclient_utils
from troveclient import client as tc from troveclient import client as tc
from heat.common import exception from heat.common import exception
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.engine.clients import client_plugin from heat.engine.clients import client_plugin
from heat.engine.clients.os import cinder from heat.engine.clients.os import cinder
from heat.engine.clients.os import glance from heat.engine.clients.os import glance
@ -57,7 +55,7 @@ class RackspaceClientPlugin(client_plugin.ClientPlugin):
"""Create an authenticated client context.""" """Create an authenticated client context."""
self.pyrax = pyrax.create_context("rackspace") self.pyrax = pyrax.create_context("rackspace")
self.pyrax.auth_endpoint = self.context.auth_url self.pyrax.auth_endpoint = self.context.auth_url
LOG.info(_LI("Authenticating username: %s"), LOG.info("Authenticating username: %s",
self.context.username) self.context.username)
tenant = self.context.tenant_id tenant = self.context.tenant_id
tenant_name = self.context.tenant tenant_name = self.context.tenant
@ -65,9 +63,9 @@ class RackspaceClientPlugin(client_plugin.ClientPlugin):
tenant_id=tenant, tenant_id=tenant,
tenant_name=tenant_name) tenant_name=tenant_name)
if not self.pyrax.authenticated: if not self.pyrax.authenticated:
LOG.warning(_LW("Pyrax Authentication Failed.")) LOG.warning("Pyrax Authentication Failed.")
raise exception.AuthorizationFailure() raise exception.AuthorizationFailure()
LOG.info(_LI("User %s authenticated successfully."), LOG.info("User %s authenticated successfully.",
self.context.username) self.context.username)

View File

@ -19,7 +19,6 @@ import six
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes from heat.engine import attributes
from heat.engine import constraints from heat.engine import constraints
from heat.engine import function from heat.engine import function
@ -1184,7 +1183,7 @@ class CloudLoadBalancer(resource.Resource):
raise exception.InvalidTemplateAttribute(resource=self.name, raise exception.InvalidTemplateAttribute(resource=self.name,
key=key) key=key)
function = attribute_function[key] function = attribute_function[key]
LOG.info(_LI('%(name)s.GetAtt(%(key)s) == %(function)s'), LOG.info('%(name)s.GetAtt(%(key)s) == %(function)s',
{'name': self.name, 'key': key, 'function': function}) {'name': self.name, 'key': key, 'function': function})
return function return function

View File

@ -17,7 +17,6 @@ from oslo_log import log as logging
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import constraints from heat.engine import constraints
from heat.engine import properties from heat.engine import properties
from heat.engine.resources.openstack.nova import server from heat.engine.resources.openstack.nova import server
@ -187,7 +186,7 @@ class CloudServer(server.Server):
reason = server.metadata.get('rackconnect_unprocessable_reason', reason = server.metadata.get('rackconnect_unprocessable_reason',
None) None)
if reason is not None: if reason is not None:
LOG.warning(_LW("RackConnect unprocessable reason: %s"), LOG.warning("RackConnect unprocessable reason: %s",
reason) reason)
msg = _("RackConnect automation has completed") msg = _("RackConnect automation has completed")

View File

@ -15,7 +15,6 @@ from oslo_log import log as logging
import six import six
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import attributes from heat.engine import attributes
from heat.engine import constraints from heat.engine import constraints
from heat.engine import properties from heat.engine import properties
@ -108,8 +107,8 @@ class CloudNetwork(resource.Resource):
try: try:
self._network = self.cloud_networks().get(self.resource_id) self._network = self.cloud_networks().get(self.resource_id)
except NotFound: except NotFound:
LOG.warning(_LW("Could not find network %s but resource id is" LOG.warning("Could not find network %s but resource id is"
" set."), self.resource_id) " set.", self.resource_id)
return self._network return self._network
def cloud_networks(self): def cloud_networks(self):
@ -139,7 +138,7 @@ class CloudNetwork(resource.Resource):
try: try:
network.delete() network.delete()
except NetworkInUse: except NetworkInUse:
LOG.warning(_LW("Network '%s' still in use."), network.id) LOG.warning("Network '%s' still in use.", network.id)
else: else:
self._delete_issued = True self._delete_issued = True
return False return False

View File

@ -22,8 +22,6 @@ import webob
from heat.api.aws import exception from heat.api.aws import exception
from heat.common import endpoint_utils from heat.common import endpoint_utils
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common import wsgi from heat.common import wsgi
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -161,14 +159,14 @@ class EC2Token(wsgi.Middleware):
# here so that we can use both authentication methods. # here so that we can use both authentication methods.
# Returning here just means the user didn't supply AWS # Returning here just means the user didn't supply AWS
# authentication and we'll let the app try native keystone next. # authentication and we'll let the app try native keystone next.
LOG.info(_LI("Checking AWS credentials..")) LOG.info("Checking AWS credentials..")
signature = self._get_signature(req) signature = self._get_signature(req)
if not signature: if not signature:
if 'X-Auth-User' in req.headers: if 'X-Auth-User' in req.headers:
return self.application return self.application
else: else:
LOG.info(_LI("No AWS Signature found.")) LOG.info("No AWS Signature found.")
raise exception.HeatIncompleteSignatureError() raise exception.HeatIncompleteSignatureError()
access = self._get_access(req) access = self._get_access(req)
@ -176,14 +174,14 @@ class EC2Token(wsgi.Middleware):
if 'X-Auth-User' in req.headers: if 'X-Auth-User' in req.headers:
return self.application return self.application
else: else:
LOG.info(_LI("No AWSAccessKeyId/Authorization Credential")) LOG.info("No AWSAccessKeyId/Authorization Credential")
raise exception.HeatMissingAuthenticationTokenError() raise exception.HeatMissingAuthenticationTokenError()
LOG.info(_LI("AWS credentials found, checking against keystone.")) LOG.info("AWS credentials found, checking against keystone.")
if not auth_uri: if not auth_uri:
LOG.error(_LE("Ec2Token authorization failed, no auth_uri " LOG.error("Ec2Token authorization failed, no auth_uri "
"specified in config file")) "specified in config file")
raise exception.HeatInternalFailureError(_('Service ' raise exception.HeatInternalFailureError(_('Service '
'misconfigured')) 'misconfigured'))
# Make a copy of args for authentication and signature verification. # Make a copy of args for authentication and signature verification.
@ -207,7 +205,7 @@ class EC2Token(wsgi.Middleware):
headers = {'Content-Type': 'application/json'} headers = {'Content-Type': 'application/json'}
keystone_ec2_uri = self._conf_get_keystone_ec2_uri(auth_uri) keystone_ec2_uri = self._conf_get_keystone_ec2_uri(auth_uri)
LOG.info(_LI('Authenticating with %s'), keystone_ec2_uri) LOG.info('Authenticating with %s', keystone_ec2_uri)
response = requests.post(keystone_ec2_uri, data=creds_json, response = requests.post(keystone_ec2_uri, data=creds_json,
headers=headers, headers=headers,
verify=self.ssl_options['verify'], verify=self.ssl_options['verify'],
@ -220,7 +218,7 @@ class EC2Token(wsgi.Middleware):
roles = [role['name'] roles = [role['name']
for role in result['token'].get('roles', [])] for role in result['token'].get('roles', [])]
except (AttributeError, KeyError): except (AttributeError, KeyError):
LOG.info(_LI("AWS authentication failure.")) LOG.info("AWS authentication failure.")
# Try to extract the reason for failure so we can return the # Try to extract the reason for failure so we can return the
# appropriate AWS error via raising an exception # appropriate AWS error via raising an exception
try: try:
@ -235,7 +233,7 @@ class EC2Token(wsgi.Middleware):
else: else:
raise exception.HeatAccessDeniedError() raise exception.HeatAccessDeniedError()
else: else:
LOG.info(_LI("AWS authentication successful.")) LOG.info("AWS authentication successful.")
# Authenticated! # Authenticated!
ec2_creds = {'ec2Credentials': {'access': access, ec2_creds = {'ec2Credentials': {'access': access,

View File

@ -19,7 +19,6 @@ import re
from oslo_log import log as logging from oslo_log import log as logging
from heat.api.aws import exception from heat.api.aws import exception
from heat.common.i18n import _LE
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -98,7 +97,7 @@ def get_param_value(params, key):
try: try:
return params[key] return params[key]
except KeyError: except KeyError:
LOG.error(_LE("Request does not contain %s parameter!"), key) LOG.error("Request does not contain %s parameter!", key)
raise exception.HeatMissingParameterError(key) raise exception.HeatMissingParameterError(key)

View File

@ -22,7 +22,6 @@ from heat.api.aws import exception
from heat.api.aws import utils as api_utils from heat.api.aws import utils as api_utils
from heat.common import exception as heat_exception from heat.common import exception as heat_exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common import identifier from heat.common import identifier
from heat.common import policy from heat.common import policy
from heat.common import template_format from heat.common import template_format
@ -426,7 +425,7 @@ class StackController(object):
msg = _("The Template must be a JSON or YAML document.") msg = _("The Template must be a JSON or YAML document.")
return exception.HeatInvalidParameterValueError(detail=msg) return exception.HeatInvalidParameterValueError(detail=msg)
LOG.info(_LI('validate_template')) LOG.info('validate_template')
def format_validate_parameter(key, value): def format_validate_parameter(key, value):
"""Reformat engine output into AWS "ValidateTemplate" format.""" """Reformat engine output into AWS "ValidateTemplate" format."""

View File

@ -21,8 +21,6 @@ from heat.api.aws import exception
from heat.api.aws import utils as api_utils from heat.api.aws import utils as api_utils
from heat.common import exception as heat_exception from heat.common import exception as heat_exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LW
from heat.common import policy from heat.common import policy
from heat.common import wsgi from heat.common import wsgi
from heat.rpc import api as rpc_api from heat.rpc import api as rpc_api
@ -199,7 +197,7 @@ class WatchController(object):
# Filter criteria not met, return None # Filter criteria not met, return None
return return
except KeyError: except KeyError:
LOG.warning(_LW("Invalid filter key %s, ignoring"), f) LOG.warning("Invalid filter key %s, ignoring", f)
return result return result
@ -250,8 +248,8 @@ class WatchController(object):
# need to process (each dict) for dimensions # need to process (each dict) for dimensions
metric_data = api_utils.extract_param_list(parms, prefix='MetricData') metric_data = api_utils.extract_param_list(parms, prefix='MetricData')
if not len(metric_data): if not len(metric_data):
LOG.error(_LE("Request does not contain required MetricData")) LOG.error("Request does not contain required MetricData")
return exception.HeatMissingParameterError("MetricData list") return exception.HeatMissingParameterError(_("MetricData list"))
watch_name = None watch_name = None
dimensions = [] dimensions = []

View File

@ -24,7 +24,6 @@ from heat.api.openstack.v1.views import stacks_view
from heat.common import context from heat.common import context
from heat.common import environment_format from heat.common import environment_format
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.common import identifier from heat.common import identifier
from heat.common import param_utils from heat.common import param_utils
from heat.common import serializers from heat.common import serializers
@ -324,7 +323,7 @@ class StackController(object):
not_tags=not_tags, not_tags=not_tags,
not_tags_any=not_tags_any) not_tags_any=not_tags_any)
except AttributeError as ex: except AttributeError as ex:
LOG.warning(_LW("Old Engine Version: %s"), ex) LOG.warning("Old Engine Version: %s", ex)
return stacks_view.collection(req, stacks=stacks, return stacks_view.collection(req, stacks=stacks,
count=count, count=count,

View File

@ -30,7 +30,6 @@ from oslo_service import systemd
import six import six
from heat.common import config from heat.common import config
from heat.common.i18n import _LI
from heat.common import messaging from heat.common import messaging
from heat.common import profiler from heat.common import profiler
from heat.common import wsgi from heat.common import wsgi
@ -55,7 +54,7 @@ def launch_api(setup_logging=True):
port = cfg.CONF.heat_api.bind_port port = cfg.CONF.heat_api.bind_port
host = cfg.CONF.heat_api.bind_host host = cfg.CONF.heat_api.bind_host
LOG.info(_LI('Starting Heat REST API on %(host)s:%(port)s'), LOG.info('Starting Heat REST API on %(host)s:%(port)s',
{'host': host, 'port': port}) {'host': host, 'port': port})
profiler.setup('heat-api', host) profiler.setup('heat-api', host)
gmr.TextGuruMeditation.setup_autorun(version) gmr.TextGuruMeditation.setup_autorun(version)

View File

@ -32,7 +32,6 @@ from oslo_service import systemd
import six import six
from heat.common import config from heat.common import config
from heat.common.i18n import _LI
from heat.common import messaging from heat.common import messaging
from heat.common import profiler from heat.common import profiler
from heat.common import wsgi from heat.common import wsgi
@ -59,7 +58,7 @@ def launch_cfn_api(setup_logging=True):
port = cfg.CONF.heat_api_cfn.bind_port port = cfg.CONF.heat_api_cfn.bind_port
host = cfg.CONF.heat_api_cfn.bind_host host = cfg.CONF.heat_api_cfn.bind_host
LOG.info(_LI('Starting Heat API on %(host)s:%(port)s'), LOG.info('Starting Heat API on %(host)s:%(port)s',
{'host': host, 'port': port}) {'host': host, 'port': port})
profiler.setup('heat-api-cfn', host) profiler.setup('heat-api-cfn', host)
gmr.TextGuruMeditation.setup_autorun(version) gmr.TextGuruMeditation.setup_autorun(version)

View File

@ -32,7 +32,6 @@ from oslo_service import systemd
import six import six
from heat.common import config from heat.common import config
from heat.common.i18n import _LI
from heat.common import messaging from heat.common import messaging
from heat.common import profiler from heat.common import profiler
from heat.common import wsgi from heat.common import wsgi
@ -59,7 +58,7 @@ def launch_cloudwatch_api(setup_logging=True):
port = cfg.CONF.heat_api_cloudwatch.bind_port port = cfg.CONF.heat_api_cloudwatch.bind_port
host = cfg.CONF.heat_api_cloudwatch.bind_host host = cfg.CONF.heat_api_cloudwatch.bind_host
LOG.info(_LI('Starting Heat CloudWatch API on %(host)s:%(port)s'), LOG.info('Starting Heat CloudWatch API on %(host)s:%(port)s',
{'host': host, 'port': port}) {'host': host, 'port': port})
profiler.setup('heat-api-cloudwatch', host) profiler.setup('heat-api-cloudwatch', host)
gmr.TextGuruMeditation.setup_autorun(version) gmr.TextGuruMeditation.setup_autorun(version)

View File

@ -32,7 +32,6 @@ from oslo_reports import guru_meditation_report as gmr
from oslo_service import service from oslo_service import service
from heat.common import config from heat.common import config
from heat.common.i18n import _LC
from heat.common import messaging from heat.common import messaging
from heat.common import profiler from heat.common import profiler
from heat.engine import template from heat.engine import template
@ -60,7 +59,7 @@ def launch_engine(setup_logging=True):
try: try:
mgr = template._get_template_extension_manager() mgr = template._get_template_extension_manager()
except template.TemplatePluginNotRegistered as ex: except template.TemplatePluginNotRegistered as ex:
LOG.critical(_LC("%s"), ex) LOG.critical("%s", ex)
if not mgr or not mgr.names(): if not mgr or not mgr.names():
sys.exit("ERROR: No template format plugins registered") sys.exit("ERROR: No template format plugins registered")

View File

@ -22,7 +22,6 @@ from osprofiler import opts as profiler
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.common import wsgi from heat.common import wsgi
@ -375,8 +374,8 @@ def startup_sanity_check():
not cfg.CONF.stack_user_domain_name): not cfg.CONF.stack_user_domain_name):
# FIXME(shardy): Legacy fallback for folks using old heat.conf # FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration # files which lack domain configuration
LOG.warning(_LW('stack_user_domain_id or stack_user_domain_name not ' LOG.warning('stack_user_domain_id or stack_user_domain_name not '
'set in heat.conf falling back to using default')) 'set in heat.conf falling back to using default')
else: else:
domain_admin_user = cfg.CONF.stack_domain_admin domain_admin_user = cfg.CONF.stack_domain_admin
domain_admin_password = cfg.CONF.stack_domain_admin_password domain_admin_password = cfg.CONF.stack_domain_admin_password
@ -389,7 +388,7 @@ def startup_sanity_check():
auth_key_len = len(cfg.CONF.auth_encryption_key) auth_key_len = len(cfg.CONF.auth_encryption_key)
if auth_key_len in (16, 24): if auth_key_len in (16, 24):
LOG.warning( LOG.warning(
_LW('Please update auth_encryption_key to be 32 characters.')) 'Please update auth_encryption_key to be 32 characters.')
elif auth_key_len != 32: elif auth_key_len != 32:
raise exception.Error(_('heat.conf misconfigured, auth_encryption_key ' raise exception.Error(_('heat.conf misconfigured, auth_encryption_key '
'must be 32 characters')) 'must be 32 characters'))

View File

@ -28,7 +28,6 @@ import six
from heat.common import config from heat.common import config
from heat.common import endpoint_utils from heat.common import endpoint_utils
from heat.common import exception from heat.common import exception
from heat.common.i18n import _LE
from heat.common import policy from heat.common import policy
from heat.common import wsgi from heat.common import wsgi
from heat.db.sqlalchemy import api as db_api from heat.db.sqlalchemy import api as db_api
@ -235,9 +234,9 @@ class RequestContext(context.RequestContext):
if auth_uri: if auth_uri:
return auth_uri return auth_uri
else: else:
LOG.error(_LE('Keystone API endpoint not provided. Set ' LOG.error('Keystone API endpoint not provided. Set '
'auth_uri in section [clients_keystone] ' 'auth_uri in section [clients_keystone] '
'of the configuration file.')) 'of the configuration file.')
raise exception.AuthorizationFailure() raise exception.AuthorizationFailure()
@property @property
@ -247,8 +246,8 @@ class RequestContext(context.RequestContext):
cfg.CONF, TRUSTEE_CONF_GROUP, trust_id=self.trust_id) cfg.CONF, TRUSTEE_CONF_GROUP, trust_id=self.trust_id)
if not self._trusts_auth_plugin: if not self._trusts_auth_plugin:
LOG.error(_LE('Please add the trustee credentials you need ' LOG.error('Please add the trustee credentials you need '
'to the %s section of your heat.conf file.'), 'to the %s section of your heat.conf file.',
TRUSTEE_CONF_GROUP) TRUSTEE_CONF_GROUP)
raise exception.AuthorizationFailure() raise exception.AuthorizationFailure()
@ -276,8 +275,8 @@ class RequestContext(context.RequestContext):
user_domain_id=self.user_domain, user_domain_id=self.user_domain,
auth_url=self.keystone_v3_endpoint) auth_url=self.keystone_v3_endpoint)
LOG.error(_LE("Keystone API connection failed, no password " LOG.error("Keystone API connection failed, no password "
"trust or auth_token!")) "trust or auth_token!")
raise exception.AuthorizationFailure() raise exception.AuthorizationFailure()
def reload_auth_plugin(self): def reload_auth_plugin(self):

View File

@ -24,7 +24,6 @@ from oslo_utils import excutils
import six import six
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LE
_FATAL_EXCEPTION_FORMAT_ERRORS = False _FATAL_EXCEPTION_FORMAT_ERRORS = False
@ -69,9 +68,9 @@ class HeatException(Exception):
reraise=_FATAL_EXCEPTION_FORMAT_ERRORS): reraise=_FATAL_EXCEPTION_FORMAT_ERRORS):
# kwargs doesn't match a variable in the message # kwargs doesn't match a variable in the message
# log the issue and the kwargs # log the issue and the kwargs
LOG.exception(_LE('Exception in string format operation')) LOG.exception('Exception in string format operation')
for name, value in six.iteritems(kwargs): for name, value in six.iteritems(kwargs):
LOG.error(_LE("%(name)s: %(value)s"), LOG.error("%(name)s: %(value)s",
{'name': name, 'value': value}) # noqa {'name': name, 'value': value}) # noqa
if self.error_code: if self.error_code:

View File

@ -27,16 +27,6 @@ _translators = i18n.TranslatorFactory(domain='heat')
# The primary translation function using the well-known name "_" # The primary translation function using the well-known name "_"
_ = _translators.primary _ = _translators.primary
# Translators for log levels.
#
# The abbreviated names are meant to reflect the usual use of a short
# name like '_'. The "L" is for "log" and the other letter comes from
# the level.
_LI = _translators.log_info
_LW = _translators.log_warning
_LE = _translators.log_error
_LC = _translators.log_critical
def repr_wrapper(klass): def repr_wrapper(klass):
"""A decorator that defines __repr__ method under Python 2. """A decorator that defines __repr__ method under Python 2.

View File

@ -16,8 +16,6 @@
from oslo_log import log as logging from oslo_log import log as logging
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.engine import resources from heat.engine import resources
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -42,19 +40,19 @@ def get_plug_point_class_instances():
slps = resources.global_env().get_stack_lifecycle_plugins() slps = resources.global_env().get_stack_lifecycle_plugins()
pp_classes = [cls for name, cls in slps] pp_classes = [cls for name, cls in slps]
except Exception: except Exception:
LOG.exception(_LE("failed to get lifecycle plug point classes")) LOG.exception("failed to get lifecycle plug point classes")
for ppc in pp_classes: for ppc in pp_classes:
try: try:
pp_class_instances.append(ppc()) pp_class_instances.append(ppc())
except Exception: except Exception:
LOG.exception( LOG.exception(
_LE("failed to instantiate stack lifecycle class %s"), ppc) "failed to instantiate stack lifecycle class %s", ppc)
try: try:
pp_class_instances = sorted(pp_class_instances, pp_class_instances = sorted(pp_class_instances,
key=lambda ppci: ppci.get_ordinal()) key=lambda ppci: ppci.get_ordinal())
except Exception: except Exception:
LOG.exception(_LE("failed to sort lifecycle plug point classes")) LOG.exception("failed to sort lifecycle plug point classes")
return pp_class_instances return pp_class_instances
@ -108,13 +106,13 @@ def _do_ops(cinstances, opname, cnxt, stack, current_stack=None, action=None,
op(cnxt, stack, current_stack, action) op(cnxt, stack, current_stack, action)
success_count += 1 success_count += 1
except Exception as ex: except Exception as ex:
LOG.exception(_LE( LOG.exception(
"%(opname)s %(ci)s failed for %(a)s on %(sid)s"), "%(opname)s %(ci)s failed for %(a)s on %(sid)s",
{'opname': opname, 'ci': type(ci), {'opname': opname, 'ci': type(ci),
'a': action, 'sid': stack.id}) 'a': action, 'sid': stack.id})
failure = True failure = True
failure_exception_message = ex.args[0] if ex.args else str(ex) failure_exception_message = ex.args[0] if ex.args else str(ex)
break break
LOG.info(_LI("done with class=%(c)s, stackid=%(sid)s, action=%(a)s"), LOG.info("done with class=%(c)s, stackid=%(sid)s, action=%(a)s",
{'c': type(ci), 'sid': stack.id, 'a': action}) {'c': type(ci), 'sid': stack.id, 'a': action})
return (failure, failure_exception_message, success_count) return (failure, failure_exception_message, success_count)

View File

@ -26,7 +26,6 @@ import types
from oslo_log import log as logging from oslo_log import log as logging
import six import six
from heat.common.i18n import _LE
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -97,7 +96,7 @@ def load_modules(package, ignore_error=False):
try: try:
module = _import_module(importer, module_name, package) module = _import_module(importer, module_name, package)
except ImportError: except ImportError:
LOG.error(_LE('Failed to import module %s'), module_name) LOG.error('Failed to import module %s', module_name)
if not ignore_error: if not ignore_error:
raise raise
else: else:

View File

@ -13,14 +13,13 @@
from oslo_log import log as logging from oslo_log import log as logging
from heat.common.i18n import _LW
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
def log_fail_msg(manager, entrypoint, exception): def log_fail_msg(manager, entrypoint, exception):
LOG.warning(_LW('Encountered exception while loading %(module_name)s: ' LOG.warning('Encountered exception while loading %(module_name)s: '
'"%(message)s". Not using %(name)s.'), '"%(message)s". Not using %(name)s.',
{'module_name': entrypoint.module_name, {'module_name': entrypoint.module_name,
'message': exception.message, 'message': exception.message,
'name': entrypoint.name}) 'name': entrypoint.name})

View File

@ -16,7 +16,6 @@ from oslo_log import log as logging
import osprofiler.initializer import osprofiler.initializer
from heat.common import context from heat.common import context
from heat.common.i18n import _LW
cfg.CONF.import_opt('enabled', 'heat.common.config', group='profiler') cfg.CONF.import_opt('enabled', 'heat.common.config', group='profiler')
@ -31,14 +30,14 @@ def setup(binary, host):
project="heat", project="heat",
service=binary, service=binary,
host=host) host=host)
LOG.warning(_LW("OSProfiler is enabled.\nIt means that person who " LOG.warning("OSProfiler is enabled.\nIt means that person who "
"knows any of hmac_keys that are specified in " "knows any of hmac_keys that are specified in "
"/etc/heat/heat.conf can trace his requests. \n" "/etc/heat/heat.conf can trace his requests. \n"
"In real life only operator can read this file so " "In real life only operator can read this file so "
"there is no security issue. Note that even if person " "there is no security issue. Note that even if person "
"can trigger profiler, only admin user can retrieve " "can trigger profiler, only admin user can retrieve "
"trace information.\n" "trace information.\n"
"To disable OSprofiler set in heat.conf:\n" "To disable OSprofiler set in heat.conf:\n"
"[profiler]\nenabled=false")) "[profiler]\nenabled=false")
else: else:
osprofiler.web.disable() osprofiler.web.disable()

View File

@ -21,7 +21,6 @@ from six.moves import urllib
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
cfg.CONF.import_opt('max_template_size', 'heat.common.config') cfg.CONF.import_opt('max_template_size', 'heat.common.config')
@ -40,7 +39,7 @@ def get(url, allowed_schemes=('http', 'https')):
the allowed_schemes argument. the allowed_schemes argument.
Raise an IOError if getting the data fails. Raise an IOError if getting the data fails.
""" """
LOG.info(_LI('Fetching data from %s'), url) LOG.info('Fetching data from %s', url)
components = urllib.parse.urlparse(url) components = urllib.parse.urlparse(url)
@ -70,10 +69,11 @@ def get(url, allowed_schemes=('http', 'https')):
for chunk in reader: for chunk in reader:
result += chunk result += chunk
if len(result) > cfg.CONF.max_template_size: if len(result) > cfg.CONF.max_template_size:
raise URLFetchError("Template exceeds maximum allowed size (%s" raise URLFetchError(_("Template exceeds maximum allowed size "
" bytes)" % cfg.CONF.max_template_size) "(%s bytes)") %
cfg.CONF.max_template_size)
return result return result
except exceptions.RequestException as ex: except exceptions.RequestException as ex:
LOG.info(_LI('Failed to retrieve template: %s') % ex) LOG.info('Failed to retrieve template: %s', ex)
raise URLFetchError(_('Failed to retrieve template from %s') % url) raise URLFetchError(_('Failed to retrieve template from %s') % url)

View File

@ -48,9 +48,6 @@ import webob.exc
from heat.api.aws import exception as aws_exception from heat.api.aws import exception as aws_exception
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import serializers from heat.common import serializers
@ -275,7 +272,7 @@ class Server(object):
def kill_children(self, *args): def kill_children(self, *args):
"""Kills the entire process group.""" """Kills the entire process group."""
LOG.error(_LE('SIGTERM received')) LOG.error('SIGTERM received')
signal.signal(signal.SIGTERM, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN)
signal.signal(signal.SIGINT, signal.SIG_IGN) signal.signal(signal.SIGINT, signal.SIG_IGN)
self.running = False self.running = False
@ -283,7 +280,7 @@ class Server(object):
def hup(self, *args): def hup(self, *args):
"""Reloads configuration files with zero down time.""" """Reloads configuration files with zero down time."""
LOG.error(_LE('SIGHUP received')) LOG.error('SIGHUP received')
signal.signal(signal.SIGHUP, signal.SIG_IGN) signal.signal(signal.SIGHUP, signal.SIG_IGN)
raise exception.SIGHUPInterrupt raise exception.SIGHUPInterrupt
@ -315,7 +312,7 @@ class Server(object):
else: else:
childs_num = workers childs_num = workers
LOG.info(_LI("Starting %d workers"), workers) LOG.info("Starting %d workers", workers)
signal.signal(signal.SIGTERM, self.kill_children) signal.signal(signal.SIGTERM, self.kill_children)
signal.signal(signal.SIGINT, self.kill_children) signal.signal(signal.SIGINT, self.kill_children)
signal.signal(signal.SIGHUP, self.hup) signal.signal(signal.SIGHUP, self.hup)
@ -333,7 +330,7 @@ class Server(object):
if err.errno not in (errno.EINTR, errno.ECHILD): if err.errno not in (errno.EINTR, errno.ECHILD):
raise raise
except KeyboardInterrupt: except KeyboardInterrupt:
LOG.info(_LI('Caught keyboard interrupt. Exiting.')) LOG.info('Caught keyboard interrupt. Exiting.')
os.killpg(0, signal.SIGTERM) os.killpg(0, signal.SIGTERM)
break break
except exception.SIGHUPInterrupt: except exception.SIGHUPInterrupt:
@ -417,22 +414,22 @@ class Server(object):
def _remove_children(self, pid): def _remove_children(self, pid):
if pid in self.children: if pid in self.children:
self.children.remove(pid) self.children.remove(pid)
LOG.info(_LI('Removed dead child %s'), pid) LOG.info('Removed dead child %s', pid)
elif pid in self.stale_children: elif pid in self.stale_children:
self.stale_children.remove(pid) self.stale_children.remove(pid)
LOG.info(_LI('Removed stale child %s'), pid) LOG.info('Removed stale child %s', pid)
else: else:
LOG.warning(_LW('Unrecognised child %s'), pid) LOG.warning('Unrecognised child %s', pid)
def _verify_and_respawn_children(self, pid, status): def _verify_and_respawn_children(self, pid, status):
if len(self.stale_children) == 0: if len(self.stale_children) == 0:
LOG.debug('No stale children') LOG.debug('No stale children')
if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0: if os.WIFEXITED(status) and os.WEXITSTATUS(status) != 0:
LOG.error(_LE('Not respawning child %d, cannot ' LOG.error('Not respawning child %d, cannot '
'recover from termination'), pid) 'recover from termination', pid)
if not self.children and not self.stale_children: if not self.children and not self.stale_children:
LOG.info( LOG.info(
_LI('All workers have terminated. Exiting')) 'All workers have terminated. Exiting')
self.running = False self.running = False
else: else:
if len(self.children) < self.conf.workers: if len(self.children) < self.conf.workers:
@ -509,12 +506,12 @@ class Server(object):
# exit on sighup # exit on sighup
self._sock = None self._sock = None
self.run_server() self.run_server()
LOG.info(_LI('Child %d exiting normally'), os.getpid()) LOG.info('Child %d exiting normally', os.getpid())
# self.pool.waitall() is now called in wsgi's server so # self.pool.waitall() is now called in wsgi's server so
# it's safe to exit here # it's safe to exit here
sys.exit(0) sys.exit(0)
else: else:
LOG.info(_LI('Started child %s'), pid) LOG.info('Started child %s', pid)
self.children.add(pid) self.children.add(pid)
def run_server(self): def run_server(self):
@ -541,7 +538,7 @@ class Server(object):
def _single_run(self, application, sock): def _single_run(self, application, sock):
"""Start a WSGI server in a new green thread.""" """Start a WSGI server in a new green thread."""
LOG.info(_LI("Starting single process server")) LOG.info("Starting single process server")
eventlet.wsgi.server(sock, application, eventlet.wsgi.server(sock, application,
custom_pool=self.pool, custom_pool=self.pool,
url_length_limit=URL_LENGTH_LIMIT, url_length_limit=URL_LENGTH_LIMIT,
@ -838,7 +835,7 @@ class Resource(object):
action_result = self.dispatch(self.controller, action, action_result = self.dispatch(self.controller, action,
request, **action_args) request, **action_args)
except TypeError as err: except TypeError as err:
LOG.error(_LE('Exception handling resource: %s'), err) LOG.error('Exception handling resource: %s', err)
msg = _('The server could not comply with the request since ' msg = _('The server could not comply with the request since '
'it is either malformed or otherwise incorrect.') 'it is either malformed or otherwise incorrect.')
err = webob.exc.HTTPBadRequest(msg) err = webob.exc.HTTPBadRequest(msg)
@ -860,7 +857,7 @@ class Resource(object):
raise raise
if isinstance(err, webob.exc.HTTPServerError): if isinstance(err, webob.exc.HTTPServerError):
LOG.error( LOG.error(
_LE("Returning %(code)s to user: %(explanation)s"), "Returning %(code)s to user: %(explanation)s",
{'code': err.code, 'explanation': err.explanation}) {'code': err.code, 'explanation': err.explanation})
http_exc = translate_exception(err, request.best_match_language()) http_exc = translate_exception(err, request.best_match_language())
raise exception.HTTPExceptionDisguise(http_exc) raise exception.HTTPExceptionDisguise(http_exc)
@ -899,8 +896,7 @@ class Resource(object):
err_body = action_result.get_unserialized_body() err_body = action_result.get_unserialized_body()
serializer.default(action_result, err_body) serializer.default(action_result, err_body)
except Exception: except Exception:
LOG.warning(_LW("Unable to serialize exception " LOG.warning("Unable to serialize exception response")
"response"))
return action_result return action_result
@ -934,7 +930,7 @@ class Resource(object):
def log_exception(err, exc_info): def log_exception(err, exc_info):
args = {'exc_info': exc_info} if cfg.CONF.debug else {} args = {'exc_info': exc_info} if cfg.CONF.debug else {}
LOG.error(_LE("Unexpected error occurred serving API: %s"), err, LOG.error("Unexpected error occurred serving API: %s", err,
**args) **args)

View File

@ -35,9 +35,6 @@ from sqlalchemy.orm import aliased as orm_aliased
from heat.common import crypt from heat.common import crypt
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.db.sqlalchemy import filters as db_filters from heat.db.sqlalchemy import filters as db_filters
from heat.db.sqlalchemy import migration from heat.db.sqlalchemy import migration
from heat.db.sqlalchemy import models from heat.db.sqlalchemy import models
@ -1294,7 +1291,7 @@ def _purge_stacks(stack_infos, engine, meta):
syncpoint = sqlalchemy.Table('sync_point', meta, autoload=True) syncpoint = sqlalchemy.Table('sync_point', meta, autoload=True)
stack_info_str = ','.join([str(i) for i in stack_infos]) stack_info_str = ','.join([str(i) for i in stack_infos])
LOG.info("Purging stacks %s" % stack_info_str) LOG.info("Purging stacks %s", stack_info_str)
# TODO(cwolfe): find a way to make this re-entrant with # TODO(cwolfe): find a way to make this re-entrant with
# reasonably sized transactions (good luck), or add # reasonably sized transactions (good luck), or add
@ -1475,8 +1472,8 @@ def _db_encrypt_or_decrypt_template_params(
for raw_template in next_batch: for raw_template in next_batch:
try: try:
if verbose: if verbose:
LOG.info(_LI("Processing raw_template %(id)d..."), LOG.info("Processing raw_template %s...",
{'id': raw_template.id}) raw_template.id)
env = raw_template.environment env = raw_template.environment
needs_update = False needs_update = False
@ -1524,16 +1521,16 @@ def _db_encrypt_or_decrypt_template_params(
raw_template_update(ctxt, raw_template.id, raw_template_update(ctxt, raw_template.id,
{'environment': newenv}) {'environment': newenv})
except Exception as exc: except Exception as exc:
LOG.exception(_LE('Failed to %(crypt_action)s parameters ' LOG.exception('Failed to %(crypt_action)s parameters '
'of raw template %(id)d'), 'of raw template %(id)d',
{'id': raw_template.id, {'id': raw_template.id,
'crypt_action': _crypt_action(encrypt)}) 'crypt_action': _crypt_action(encrypt)})
excs.append(exc) excs.append(exc)
continue continue
finally: finally:
if verbose: if verbose:
LOG.info(_LI("Finished %(crypt_action)s processing of " LOG.info("Finished %(crypt_action)s processing of "
"raw_template %(id)d."), "raw_template %(id)d.",
{'id': raw_template.id, {'id': raw_template.id,
'crypt_action': _crypt_action(encrypt)}) 'crypt_action': _crypt_action(encrypt)})
next_batch = list(itertools.islice(template_batches, batch_size)) next_batch = list(itertools.islice(template_batches, batch_size))
@ -1560,8 +1557,8 @@ def _db_encrypt_or_decrypt_resource_prop_data_legacy(
continue continue
try: try:
if verbose: if verbose:
LOG.info(_LI("Processing resource %(id)d..."), LOG.info("Processing resource %s...",
{'id': resource.id}) resource.id)
if encrypt: if encrypt:
result = crypt.encrypted_dict(resource.properties_data, result = crypt.encrypted_dict(resource.properties_data,
encryption_key) encryption_key)
@ -1573,16 +1570,16 @@ def _db_encrypt_or_decrypt_resource_prop_data_legacy(
'properties_data_encrypted': encrypt}, 'properties_data_encrypted': encrypt},
resource.atomic_key) resource.atomic_key)
except Exception as exc: except Exception as exc:
LOG.exception(_LE('Failed to %(crypt_action)s ' LOG.exception('Failed to %(crypt_action)s '
'properties_data of resource %(id)d') % 'properties_data of resource %(id)d' %
{'id': resource.id, {'id': resource.id,
'crypt_action': _crypt_action(encrypt)}) 'crypt_action': _crypt_action(encrypt)})
excs.append(exc) excs.append(exc)
continue continue
finally: finally:
if verbose: if verbose:
LOG.info(_LI("Finished processing resource " LOG.info("Finished processing resource %s.",
"%(id)d."), {'id': resource.id}) resource.id)
next_batch = list(itertools.islice(resource_batches, batch_size)) next_batch = list(itertools.islice(resource_batches, batch_size))
return excs return excs
@ -1607,8 +1604,8 @@ def _db_encrypt_or_decrypt_resource_prop_data(
continue continue
try: try:
if verbose: if verbose:
LOG.info(_LI("Processing resource_properties_data " LOG.info("Processing resource_properties_data "
"%(id)d..."), {'id': rpd.id}) "%s...", rpd.id)
if encrypt: if encrypt:
result = crypt.encrypted_dict(rpd.data, result = crypt.encrypted_dict(rpd.data,
encryption_key) encryption_key)
@ -1619,8 +1616,8 @@ def _db_encrypt_or_decrypt_resource_prop_data(
'encrypted': encrypt}) 'encrypted': encrypt})
except Exception as exc: except Exception as exc:
LOG.exception( LOG.exception(
_LE("Failed to %(crypt_action)s " "Failed to %(crypt_action)s "
"data of resource_properties_data %(id)d") % "data of resource_properties_data %(id)d" %
{'id': rpd.id, {'id': rpd.id,
'crypt_action': _crypt_action(encrypt)}) 'crypt_action': _crypt_action(encrypt)})
excs.append(exc) excs.append(exc)
@ -1628,8 +1625,8 @@ def _db_encrypt_or_decrypt_resource_prop_data(
finally: finally:
if verbose: if verbose:
LOG.info( LOG.info(
_LI("Finished processing resource_properties_data" "Finished processing resource_properties_data"
" %(id)d."), {'id': rpd.id}) " %s.", rpd.id)
next_batch = list(itertools.islice(rpd_batches, batch_size)) next_batch = list(itertools.islice(rpd_batches, batch_size))
return excs return excs
@ -1706,10 +1703,10 @@ def db_properties_data_migrate(ctxt, batch_size=50):
encrypted = resource.properties_data_encrypted encrypted = resource.properties_data_encrypted
if encrypted is None: if encrypted is None:
LOG.warning( LOG.warning(
_LW('Unexpected: resource.encrypted is None for ' 'Unexpected: resource.encrypted is None for '
'resource id %(id)d for legacy ' 'resource id %s for legacy '
'resource.properties_data, assuming False.'), 'resource.properties_data, assuming False.',
{'id': resource.id}) resource.id)
encrypted = False encrypted = False
rsrc_prop_data = resource_prop_data_create( rsrc_prop_data = resource_prop_data_create(
ctxt, {'encrypted': encrypted, ctxt, {'encrypted': encrypted,
@ -1720,8 +1717,8 @@ def db_properties_data_migrate(ctxt, batch_size=50):
'rsrc_prop_data_id': rsrc_prop_data.id}, 'rsrc_prop_data_id': rsrc_prop_data.id},
resource.atomic_key) resource.atomic_key)
except Exception: except Exception:
LOG.exception(_LE('Failed to migrate properties_data for ' LOG.exception('Failed to migrate properties_data for '
'resource %(id)d'), {'id': resource.id}) 'resource %d', resource.id)
continue continue
next_batch = list(itertools.islice(resource_batches, batch_size)) next_batch = list(itertools.islice(resource_batches, batch_size))
@ -1743,8 +1740,8 @@ def db_properties_data_migrate(ctxt, batch_size=50):
event.update({'resource_properties': None, event.update({'resource_properties': None,
'rsrc_prop_data_id': rsrc_prop_data.id}) 'rsrc_prop_data_id': rsrc_prop_data.id})
except Exception: except Exception:
LOG.exception(_LE('Failed to migrate resource_properties ' LOG.exception('Failed to migrate resource_properties '
'for event %(id)d'), {'id': event.id}) 'for event %d', event.id)
continue continue
next_batch = list(itertools.islice(event_batches, batch_size)) next_batch = list(itertools.islice(event_batches, batch_size))

View File

@ -18,7 +18,6 @@ from oslo_utils import timeutils
import six import six
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common import param_utils from heat.common import param_utils
from heat.common import template_format from heat.common import template_format
from heat.common import timeutils as heat_timeutils from heat.common import timeutils as heat_timeutils
@ -41,7 +40,7 @@ def extract_args(params):
try: try:
timeout = int(timeout_mins) timeout = int(timeout_mins)
except (ValueError, TypeError): except (ValueError, TypeError):
LOG.exception(_LE('Timeout conversion failed')) LOG.exception('Timeout conversion failed')
else: else:
if timeout > 0: if timeout > 0:
kwargs[rpc_api.PARAM_TIMEOUT] = timeout kwargs[rpc_api.PARAM_TIMEOUT] = timeout
@ -481,7 +480,7 @@ def format_watch_data(wd, rule_names):
if len(metric) == 1: if len(metric) == 1:
metric_name, metric_data = metric[0] metric_name, metric_data = metric[0]
else: else:
LOG.error(_LE("Unexpected number of keys in watch_data.data!")) LOG.error("Unexpected number of keys in watch_data.data!")
return return
result = { result = {

View File

@ -17,7 +17,6 @@ from oslo_utils import strutils
import six import six
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.common.i18n import repr_wrapper from heat.common.i18n import repr_wrapper
from heat.engine import constraints as constr from heat.engine import constraints as constr
from heat.engine import support from heat.engine import support
@ -182,35 +181,35 @@ class Attributes(collections.Mapping):
def _validate_type(self, attrib, value): def _validate_type(self, attrib, value):
if attrib.schema.type == attrib.schema.STRING: if attrib.schema.type == attrib.schema.STRING:
if not isinstance(value, six.string_types): if not isinstance(value, six.string_types):
LOG.warning(_LW("Attribute %(name)s is not of type " LOG.warning("Attribute %(name)s is not of type "
"%(att_type)s"), "%(att_type)s",
{'name': attrib.name, {'name': attrib.name,
'att_type': attrib.schema.STRING}) 'att_type': attrib.schema.STRING})
elif attrib.schema.type == attrib.schema.LIST: elif attrib.schema.type == attrib.schema.LIST:
if (not isinstance(value, collections.Sequence) if (not isinstance(value, collections.Sequence)
or isinstance(value, six.string_types)): or isinstance(value, six.string_types)):
LOG.warning(_LW("Attribute %(name)s is not of type " LOG.warning("Attribute %(name)s is not of type "
"%(att_type)s"), "%(att_type)s",
{'name': attrib.name, {'name': attrib.name,
'att_type': attrib.schema.LIST}) 'att_type': attrib.schema.LIST})
elif attrib.schema.type == attrib.schema.MAP: elif attrib.schema.type == attrib.schema.MAP:
if not isinstance(value, collections.Mapping): if not isinstance(value, collections.Mapping):
LOG.warning(_LW("Attribute %(name)s is not of type " LOG.warning("Attribute %(name)s is not of type "
"%(att_type)s"), "%(att_type)s",
{'name': attrib.name, {'name': attrib.name,
'att_type': attrib.schema.MAP}) 'att_type': attrib.schema.MAP})
elif attrib.schema.type == attrib.schema.INTEGER: elif attrib.schema.type == attrib.schema.INTEGER:
if not isinstance(value, int): if not isinstance(value, int):
LOG.warning(_LW("Attribute %(name)s is not of type " LOG.warning("Attribute %(name)s is not of type "
"%(att_type)s"), "%(att_type)s",
{'name': attrib.name, {'name': attrib.name,
'att_type': attrib.schema.INTEGER}) 'att_type': attrib.schema.INTEGER})
elif attrib.schema.type == attrib.schema.BOOLEAN: elif attrib.schema.type == attrib.schema.BOOLEAN:
try: try:
strutils.bool_from_string(value, strict=True) strutils.bool_from_string(value, strict=True)
except ValueError: except ValueError:
LOG.warning(_LW("Attribute %(name)s is not of type " LOG.warning("Attribute %(name)s is not of type "
"%(att_type)s"), "%(att_type)s",
{'name': attrib.name, {'name': attrib.name,
'att_type': attrib.schema.BOOLEAN}) 'att_type': attrib.schema.BOOLEAN})

View File

@ -21,8 +21,6 @@ import functools
from oslo_log import log as logging from oslo_log import log as logging
from heat.common import exception from heat.common import exception
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.engine import resource from heat.engine import resource
from heat.engine import scheduler from heat.engine import scheduler
from heat.engine import stack as parser from heat.engine import stack as parser
@ -69,7 +67,7 @@ class CheckResource(object):
return False return False
def _trigger_rollback(self, stack): def _trigger_rollback(self, stack):
LOG.info(_LI("Triggering rollback of %(stack_name)s %(action)s "), LOG.info("Triggering rollback of %(stack_name)s %(action)s ",
{'action': stack.action, 'stack_name': stack.name}) {'action': stack.action, 'stack_name': stack.name})
stack.rollback() stack.rollback()
@ -113,7 +111,7 @@ class CheckResource(object):
stack, self.msg_queue) stack, self.msg_queue)
except resource.UpdateReplace: except resource.UpdateReplace:
new_res_id = rsrc.make_replacement(tmpl.id) new_res_id = rsrc.make_replacement(tmpl.id)
LOG.info(_LI("Replacing resource with new id %s"), LOG.info("Replacing resource with new id %s",
new_res_id) new_res_id)
rpc_data = sync_point.serialize_input_data(self.input_data) rpc_data = sync_point.serialize_input_data(self.input_data)
self._rpc_client.check_resource(cnxt, self._rpc_client.check_resource(cnxt,
@ -177,7 +175,7 @@ class CheckResource(object):
if (resource_id, True) in graph: if (resource_id, True) in graph:
# not is_update evaluates to True below, which means update # not is_update evaluates to True below, which means update
key = (resource_id, not is_update) key = (resource_id, not is_update)
LOG.info(_LI('Re-trigger resource: (%(key1)s, %(key2)s)'), LOG.info('Re-trigger resource: (%(key1)s, %(key2)s)',
{'key1': key[0], 'key2': key[1]}) {'key1': key[0], 'key2': key[1]})
predecessors = set(graph[key]) predecessors = set(graph[key])
@ -340,7 +338,7 @@ def _check_for_message(msg_queue):
if message == rpc_api.THREAD_CANCEL: if message == rpc_api.THREAD_CANCEL:
raise CancelOperation raise CancelOperation
LOG.error(_LE('Unknown message "%s" received'), message) LOG.error('Unknown message "%s" received', message)
def check_resource_update(rsrc, template_id, resource_data, engine_id, def check_resource_update(rsrc, template_id, resource_data, engine_id,

View File

@ -21,7 +21,6 @@ from stevedore import enabled
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.common import pluginutils from heat.common import pluginutils
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -83,7 +82,7 @@ class OpenStackClients(object):
client = getattr(self, method_name)() client = getattr(self, method_name)()
self._clients[name] = client self._clients[name] = client
return client return client
LOG.warning(_LW('Requested client "%s" not found'), name) LOG.warning('Requested client "%s" not found', name)
class ClientBackend(object): class ClientBackend(object):

View File

@ -18,7 +18,6 @@ from oslo_log import log as logging
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine.clients import client_plugin from heat.engine.clients import client_plugin
from heat.engine.clients import os as os_client from heat.engine.clients import os as os_client
from heat.engine import constraints from heat.engine import constraints
@ -137,16 +136,16 @@ class CinderClientPlugin(client_plugin.ClientPlugin):
return True return True
if vol.status in ('in-use', 'detaching'): if vol.status in ('in-use', 'detaching'):
LOG.debug('%s - volume still in use' % vol_id) LOG.debug('%s - volume still in use', vol_id)
return False return False
LOG.debug('Volume %(id)s - status: %(status)s' % { LOG.debug('Volume %(id)s - status: %(status)s', {
'id': vol.id, 'status': vol.status}) 'id': vol.id, 'status': vol.status})
if vol.status not in ('available', 'deleting'): if vol.status not in ('available', 'deleting'):
LOG.debug("Detachment failed - volume %(vol)s " LOG.debug("Detachment failed - volume %(vol)s "
"is in %(status)s status" % {"vol": vol.id, "is in %(status)s status",
"status": vol.status}) {"vol": vol.id, "status": vol.status})
raise exception.ResourceUnknownStatus( raise exception.ResourceUnknownStatus(
resource_status=vol.status, resource_status=vol.status,
result=_('Volume detachment failed')) result=_('Volume detachment failed'))
@ -157,19 +156,19 @@ class CinderClientPlugin(client_plugin.ClientPlugin):
vol = self.client().volumes.get(vol_id) vol = self.client().volumes.get(vol_id)
if vol.status in ('available', 'attaching'): if vol.status in ('available', 'attaching'):
LOG.debug("Volume %(id)s is being attached - " LOG.debug("Volume %(id)s is being attached - "
"volume status: %(status)s" % {'id': vol_id, "volume status: %(status)s",
'status': vol.status}) {'id': vol_id, 'status': vol.status})
return False return False
if vol.status != 'in-use': if vol.status != 'in-use':
LOG.debug("Attachment failed - volume %(vol)s is " LOG.debug("Attachment failed - volume %(vol)s is "
"in %(status)s status" % {"vol": vol_id, "in %(status)s status",
"status": vol.status}) {"vol": vol_id, "status": vol.status})
raise exception.ResourceUnknownStatus( raise exception.ResourceUnknownStatus(
resource_status=vol.status, resource_status=vol.status,
result=_('Volume attachment failed')) result=_('Volume attachment failed'))
LOG.info(_LI('Attaching volume %(id)s complete'), {'id': vol_id}) LOG.info('Attaching volume %(id)s complete', {'id': vol_id})
return True return True

View File

@ -29,8 +29,6 @@ from oslo_utils import importutils
from heat.common import context from heat.common import context
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LW
LOG = logging.getLogger('heat.engine.clients.keystoneclient') LOG = logging.getLogger('heat.engine.clients.keystoneclient')
@ -96,7 +94,7 @@ class KsClientWrapper(object):
self.domain_admin_user = cfg.CONF.stack_domain_admin self.domain_admin_user = cfg.CONF.stack_domain_admin
self.domain_admin_password = cfg.CONF.stack_domain_admin_password self.domain_admin_password = cfg.CONF.stack_domain_admin_password
LOG.debug('Using stack domain %s' % self.stack_domain) LOG.debug('Using stack domain %s', self.stack_domain)
@property @property
def context(self): def context(self):
@ -140,7 +138,7 @@ class KsClientWrapper(object):
try: try:
auth.get_token(self.session) auth.get_token(self.session)
except ks_exception.Unauthorized: except ks_exception.Unauthorized:
LOG.error(_LE("Domain admin client authentication failed")) LOG.error("Domain admin client authentication failed")
raise exception.AuthorizationFailure() raise exception.AuthorizationFailure()
self._domain_admin_auth = auth self._domain_admin_auth = auth
@ -167,17 +165,17 @@ class KsClientWrapper(object):
try: try:
auth_ref = self.context.auth_plugin.get_access(self.session) auth_ref = self.context.auth_plugin.get_access(self.session)
except ks_exception.Unauthorized: except ks_exception.Unauthorized:
LOG.error(_LE("Keystone client authentication failed")) LOG.error("Keystone client authentication failed")
raise exception.AuthorizationFailure() raise exception.AuthorizationFailure()
if self.context.trust_id: if self.context.trust_id:
# Sanity check # Sanity check
if not auth_ref.trust_scoped: if not auth_ref.trust_scoped:
LOG.error(_LE("trust token re-scoping failed!")) LOG.error("trust token re-scoping failed!")
raise exception.AuthorizationFailure() raise exception.AuthorizationFailure()
# Sanity check that impersonation is effective # Sanity check that impersonation is effective
if self.context.trustor_user_id != auth_ref.user_id: if self.context.trustor_user_id != auth_ref.user_id:
LOG.error(_LE("Trust impersonation failed")) LOG.error("Trust impersonation failed")
raise exception.AuthorizationFailure() raise exception.AuthorizationFailure()
return client return client
@ -202,7 +200,7 @@ class KsClientWrapper(object):
trustee_user_id = self.context.trusts_auth_plugin.get_user_id( trustee_user_id = self.context.trusts_auth_plugin.get_user_id(
self.session) self.session)
except ks_exception.Unauthorized: except ks_exception.Unauthorized:
LOG.error(_LE("Domain admin client authentication failed")) LOG.error("Domain admin client authentication failed")
raise exception.AuthorizationFailure() raise exception.AuthorizationFailure()
trustor_user_id = self.context.auth_plugin.get_user_id(self.session) trustor_user_id = self.context.auth_plugin.get_user_id(self.session)
@ -241,8 +239,8 @@ class KsClientWrapper(object):
def _get_username(self, username): def _get_username(self, username):
if(len(username) > 64): if(len(username) > 64):
LOG.warning(_LW("Truncating the username %s to the last 64 " LOG.warning("Truncating the username %s to the last 64 "
"characters."), username) "characters.", username)
# get the last 64 characters of the username # get the last 64 characters of the username
return username[-64:] return username[-64:]
@ -268,15 +266,15 @@ class KsClientWrapper(object):
name=self._get_username(username), password=password, name=self._get_username(username), password=password,
default_project=self.context.tenant_id) default_project=self.context.tenant_id)
# Add user to heat_stack_user_role # Add user to heat_stack_user_role
LOG.debug("Adding user %(user)s to role %(role)s" % { LOG.debug("Adding user %(user)s to role %(role)s",
'user': user.id, 'role': role_id}) {'user': user.id, 'role': role_id})
self.client.roles.grant(role=role_id, user=user.id, self.client.roles.grant(role=role_id, user=user.id,
project=self.context.tenant_id) project=self.context.tenant_id)
else: else:
LOG.error(_LE("Failed to add user %(user)s to role %(role)s, " LOG.error("Failed to add user %(user)s to role %(role)s, "
"check role exists!"), { "check role exists!",
'user': username, {'user': username,
'role': cfg.CONF.heat_stack_user_role}) 'role': cfg.CONF.heat_stack_user_role})
raise exception.Error(_("Can't find role %s") raise exception.Error(_("Can't find role %s")
% cfg.CONF.heat_stack_user_role) % cfg.CONF.heat_stack_user_role)
@ -331,13 +329,13 @@ class KsClientWrapper(object):
name=self._get_username(username), password=password, name=self._get_username(username), password=password,
default_project=project_id, domain=self.stack_domain_id) default_project=project_id, domain=self.stack_domain_id)
# Add to stack user role # Add to stack user role
LOG.debug("Adding user %(user)s to role %(role)s" % { LOG.debug("Adding user %(user)s to role %(role)s",
'user': user.id, 'role': role_id}) {'user': user.id, 'role': role_id})
self.domain_admin_client.roles.grant(role=role_id, user=user.id, self.domain_admin_client.roles.grant(role=role_id, user=user.id,
project=project_id) project=project_id)
else: else:
LOG.error(_LE("Failed to add user %(user)s to role %(role)s, " LOG.error("Failed to add user %(user)s to role %(role)s, "
"check role exists!"), "check role exists!",
{'user': username, {'user': username,
'role': cfg.CONF.heat_stack_user_role}) 'role': cfg.CONF.heat_stack_user_role})
raise exception.Error(_("Can't find role %s") raise exception.Error(_("Can't find role %s")
@ -351,7 +349,7 @@ class KsClientWrapper(object):
try: try:
access = self.domain_admin_auth.get_access(self.session) access = self.domain_admin_auth.get_access(self.session)
except ks_exception.Unauthorized: except ks_exception.Unauthorized:
LOG.error(_LE("Keystone client authentication failed")) LOG.error("Keystone client authentication failed")
raise exception.AuthorizationFailure() raise exception.AuthorizationFailure()
self._stack_domain_id = access.domain_id self._stack_domain_id = access.domain_id
@ -417,12 +415,12 @@ class KsClientWrapper(object):
except ks_exception.NotFound: except ks_exception.NotFound:
return return
except ks_exception.Forbidden: except ks_exception.Forbidden:
LOG.warning(_LW('Unable to get details for project %s, ' LOG.warning('Unable to get details for project %s, '
'not deleting'), project_id) 'not deleting', project_id)
return return
if project.domain_id != self.stack_domain_id: if project.domain_id != self.stack_domain_id:
LOG.warning(_LW('Not deleting non heat-domain project')) LOG.warning('Not deleting non heat-domain project')
return return
try: try:

View File

@ -31,8 +31,6 @@ import tenacity
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.engine.clients import client_plugin from heat.engine.clients import client_plugin
from heat.engine.clients import os as os_client from heat.engine.clients import os as os_client
from heat.engine import constraints from heat.engine import constraints
@ -144,15 +142,15 @@ class NovaClientPlugin(client_plugin.ClientPlugin):
try: try:
server = self.client().servers.get(server_id) server = self.client().servers.get(server_id)
except exceptions.OverLimit as exc: except exceptions.OverLimit as exc:
LOG.warning(_LW("Received an OverLimit response when " LOG.warning("Received an OverLimit response when "
"fetching server (%(id)s) : %(exception)s"), "fetching server (%(id)s) : %(exception)s",
{'id': server_id, {'id': server_id,
'exception': exc}) 'exception': exc})
except exceptions.ClientException as exc: except exceptions.ClientException as exc:
if ((getattr(exc, 'http_status', getattr(exc, 'code', None)) in if ((getattr(exc, 'http_status', getattr(exc, 'code', None)) in
(500, 503))): (500, 503))):
LOG.warning(_LW("Received the following exception when " LOG.warning("Received the following exception when "
"fetching server (%(id)s) : %(exception)s"), "fetching server (%(id)s) : %(exception)s",
{'id': server_id, {'id': server_id,
'exception': exc}) 'exception': exc})
else: else:
@ -167,17 +165,17 @@ class NovaClientPlugin(client_plugin.ClientPlugin):
try: try:
server.get() server.get()
except exceptions.OverLimit as exc: except exceptions.OverLimit as exc:
LOG.warning(_LW("Server %(name)s (%(id)s) received an OverLimit " LOG.warning("Server %(name)s (%(id)s) received an OverLimit "
"response during server.get(): %(exception)s"), "response during server.get(): %(exception)s",
{'name': server.name, {'name': server.name,
'id': server.id, 'id': server.id,
'exception': exc}) 'exception': exc})
except exceptions.ClientException as exc: except exceptions.ClientException as exc:
if ((getattr(exc, 'http_status', getattr(exc, 'code', None)) in if ((getattr(exc, 'http_status', getattr(exc, 'code', None)) in
(500, 503))): (500, 503))):
LOG.warning(_LW('Server "%(name)s" (%(id)s) received the ' LOG.warning('Server "%(name)s" (%(id)s) received the '
'following exception during server.get(): ' 'following exception during server.get(): '
'%(exception)s'), '%(exception)s',
{'name': server.name, {'name': server.name,
'id': server.id, 'id': server.id,
'exception': exc}) 'exception': exc})
@ -568,7 +566,7 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
try: try:
server = self.client().servers.get(server) server = self.client().servers.get(server)
except exceptions.NotFound as ex: except exceptions.NotFound as ex:
LOG.warning(_LW('Instance (%(server)s) not found: %(ex)s'), LOG.warning('Instance (%(server)s) not found: %(ex)s',
{'server': server, 'ex': ex}) {'server': server, 'ex': ex})
else: else:
for n in sorted(server.networks, reverse=True): for n in sorted(server.networks, reverse=True):
@ -691,12 +689,12 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
self.client().volumes.get_server_volume(server_id, attach_id) self.client().volumes.get_server_volume(server_id, attach_id)
except Exception as ex: except Exception as ex:
self.ignore_not_found(ex) self.ignore_not_found(ex)
LOG.info(_LI("Volume %(vol)s is detached from server %(srv)s"), LOG.info("Volume %(vol)s is detached from server %(srv)s",
{'vol': attach_id, 'srv': server_id}) {'vol': attach_id, 'srv': server_id})
return True return True
else: else:
LOG.debug("Server %(srv)s still has attachment %(att)s." % { LOG.debug("Server %(srv)s still has attachment %(att)s.",
'att': attach_id, 'srv': server_id}) {'att': attach_id, 'srv': server_id})
return False return False
def interface_detach(self, server_id, port_id): def interface_detach(self, server_id, port_id):

View File

@ -18,7 +18,6 @@ from zaqarclient.queues.v2 import client as zaqarclient
from zaqarclient.transport import errors as zaqar_errors from zaqarclient.transport import errors as zaqar_errors
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.engine.clients import client_plugin from heat.engine.clients import client_plugin
from heat.engine import constraints from heat.engine import constraints
@ -42,7 +41,7 @@ class ZaqarClientPlugin(client_plugin.ClientPlugin):
def create_for_tenant(self, tenant_id, token): def create_for_tenant(self, tenant_id, token):
con = self.context con = self.context
if token is None: if token is None:
LOG.error(_LE("Zaqar connection failed, no auth_token!")) LOG.error("Zaqar connection failed, no auth_token!")
return None return None
opts = { opts = {

View File

@ -25,7 +25,6 @@ import six
from heat.common import cache from heat.common import cache
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import resources from heat.engine import resources
# decorator that allows to cache the value # decorator that allows to cache the value
@ -101,8 +100,8 @@ class Schema(collections.Mapping):
message=_('Invalid type (%s)') % self.type) message=_('Invalid type (%s)') % self.type)
if required and default is not None: if required and default is not None:
LOG.warning(_LW("Option 'required=True' should not be used with " LOG.warning("Option 'required=True' should not be used with "
"any 'default' value (%s)") % default) "any 'default' value (%s)", default)
self.description = description self.description = description
self.required = required self.required = required

View File

@ -26,9 +26,6 @@ import six
from heat.common import environment_format as env_fmt from heat.common import environment_format as env_fmt
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import policy from heat.common import policy
from heat.engine import support from heat.engine import support
@ -321,13 +318,13 @@ class ResourceRegistry(object):
for res_name, reg_info in list(registry.items()): for res_name, reg_info in list(registry.items()):
if (isinstance(reg_info, ResourceInfo) and if (isinstance(reg_info, ResourceInfo) and
res_name.startswith(name[:-1])): res_name.startswith(name[:-1])):
LOG.warning(_LW('Removing %(item)s from %(path)s'), { LOG.warning('Removing %(item)s from %(path)s', {
'item': res_name, 'item': res_name,
'path': descriptive_path}) 'path': descriptive_path})
del registry[res_name] del registry[res_name]
else: else:
# delete this entry. # delete this entry.
LOG.warning(_LW('Removing %(item)s from %(path)s'), { LOG.warning('Removing %(item)s from %(path)s', {
'item': name, 'item': name,
'path': descriptive_path}) 'path': descriptive_path})
registry.pop(name, None) registry.pop(name, None)
@ -340,8 +337,7 @@ class ResourceRegistry(object):
'path': descriptive_path, 'path': descriptive_path,
'was': str(registry[name].value), 'was': str(registry[name].value),
'now': str(info.value)} 'now': str(info.value)}
LOG.warning(_LW('Changing %(path)s from %(was)s to %(now)s'), LOG.warning('Changing %(path)s from %(was)s to %(now)s', details)
details)
if isinstance(info, ClassResourceInfo): if isinstance(info, ClassResourceInfo):
if info.value.support_status.status != support.SUPPORTED: if info.value.support_status.status != support.SUPPORTED:
@ -353,7 +349,7 @@ class ResourceRegistry(object):
'message': six.text_type( 'message': six.text_type(
info.value.support_status.message) info.value.support_status.message)
} }
LOG.warning(_LW('%(name)s is %(status)s. %(message)s'), LOG.warning('%(name)s is %(status)s. %(message)s',
details) details)
info.user_resource = (self.global_registry is not None) info.user_resource = (self.global_registry is not None)
@ -366,7 +362,7 @@ class ResourceRegistry(object):
if name == 'resources': if name == 'resources':
continue continue
if show_all or isinstance(registry[name], TemplateResourceInfo): if show_all or isinstance(registry[name], TemplateResourceInfo):
msg = (_LI('%(p)sRegistered: %(t)s') % msg = ('%(p)sRegistered: %(t)s' %
{'p': prefix, {'p': prefix,
't': six.text_type(registry[name])}) 't': six.text_type(registry[name])})
LOG.info(msg) LOG.info(msg)
@ -842,17 +838,17 @@ def read_global_environment(env, env_dir=None):
try: try:
env_files = glob.glob(os.path.join(env_dir, '*')) env_files = glob.glob(os.path.join(env_dir, '*'))
except OSError: except OSError:
LOG.exception(_LE('Failed to read %s'), env_dir) LOG.exception('Failed to read %s', env_dir)
return return
for file_path in env_files: for file_path in env_files:
try: try:
with open(file_path) as env_fd: with open(file_path) as env_fd:
LOG.info(_LI('Loading %s'), file_path) LOG.info('Loading %s', file_path)
env_body = env_fmt.parse(env_fd.read()) env_body = env_fmt.parse(env_fd.read())
env_fmt.default_for_missing(env_body) env_fmt.default_for_missing(env_body)
env.load(env_body) env.load(env_body)
except ValueError: except ValueError:
LOG.exception(_LE('Failed to parse %s'), file_path) LOG.exception('Failed to parse %s', file_path)
except IOError: except IOError:
LOG.exception(_LE('Failed to read %s'), file_path) LOG.exception('Failed to read %s', file_path)

View File

@ -19,7 +19,6 @@ from oslo_config import cfg
from oslo_log import log from oslo_log import log
import six import six
from heat.common.i18n import _LE
from heat.common import plugin_loader from heat.common import plugin_loader
LOG = log.getLogger(__name__) LOG = log.getLogger(__name__)
@ -92,15 +91,15 @@ class PluginMapping(object):
try: try:
mapping_dict = mapping_func(*self.args, **self.kwargs) mapping_dict = mapping_func(*self.args, **self.kwargs)
except Exception: except Exception:
LOG.error(_LE('Failed to load %(mapping_name)s ' LOG.error('Failed to load %(mapping_name)s '
'from %(module)s'), fmt_data) 'from %(module)s', fmt_data)
raise raise
else: else:
if isinstance(mapping_dict, collections.Mapping): if isinstance(mapping_dict, collections.Mapping):
return mapping_dict return mapping_dict
elif mapping_dict is not None: elif mapping_dict is not None:
LOG.error(_LE('Invalid type for %(mapping_name)s ' LOG.error('Invalid type for %(mapping_name)s '
'from %(module)s'), fmt_data) 'from %(module)s', fmt_data)
return {} return {}

View File

@ -24,9 +24,6 @@ import six
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import identifier from heat.common import identifier
from heat.common import short_id from heat.common import short_id
from heat.common import timeutils from heat.common import timeutils
@ -188,7 +185,7 @@ class Resource(object):
try: try:
(svc_available, reason) = cls.is_service_available(context) (svc_available, reason) = cls.is_service_available(context)
except Exception as exc: except Exception as exc:
LOG.exception(_LE("Resource type %s unavailable"), LOG.exception("Resource type %s unavailable",
resource_type) resource_type)
ex = exception.ResourceTypeUnavailable( ex = exception.ResourceTypeUnavailable(
resource_type=resource_type, resource_type=resource_type,
@ -485,7 +482,7 @@ class Resource(object):
self._add_event(self.action, self.status, self._add_event(self.action, self.status,
_("%(a)s paused until Hook %(h)s is cleared") _("%(a)s paused until Hook %(h)s is cleared")
% {'a': action, 'h': hook}) % {'a': action, 'h': hook})
LOG.info(_LI('Reached hook on %s'), self) LOG.info('Reached hook on %s', self)
while self.has_hook(hook): while self.has_hook(hook):
try: try:
@ -584,8 +581,8 @@ class Resource(object):
# stacks (see bug 1543685). The error should be harmless # stacks (see bug 1543685). The error should be harmless
# because we're on the before properties, which have presumably # because we're on the before properties, which have presumably
# already been validated. # already been validated.
LOG.warning(_LW('Ignoring error in old property value ' LOG.warning('Ignoring error in old property value '
'%(prop_name)s: %(msg)s'), '%(prop_name)s: %(msg)s',
{'prop_name': key, 'msg': six.text_type(exc)}) {'prop_name': key, 'msg': six.text_type(exc)})
return True return True
@ -768,7 +765,7 @@ class Resource(object):
self.state_set(action, self.COMPLETE, six.text_type(ex)) self.state_set(action, self.COMPLETE, six.text_type(ex))
LOG.debug('%s', six.text_type(ex)) LOG.debug('%s', six.text_type(ex))
except Exception as ex: except Exception as ex:
LOG.info(_LI('%(action)s: %(info)s'), LOG.info('%(action)s: %(info)s',
{"action": action, {"action": action,
"info": six.text_type(self)}, "info": six.text_type(self)},
exc_info=True) exc_info=True)
@ -784,7 +781,7 @@ class Resource(object):
msg += ' (%s)' % reason msg += ' (%s)' % reason
self.state_set(action, self.FAILED, msg) self.state_set(action, self.FAILED, msg)
except Exception: except Exception:
LOG.exception(_LE('Error marking resource as failed')) LOG.exception('Error marking resource as failed')
else: else:
self.state_set(action, self.COMPLETE) self.state_set(action, self.COMPLETE)
@ -837,7 +834,7 @@ class Resource(object):
canceller(handler_data) canceller(handler_data)
except Exception: except Exception:
LOG.exception( LOG.exception(
_LE('Error cancelling resource %s'), 'Error cancelling resource %s',
action action
) )
@ -957,7 +954,7 @@ class Resource(object):
yield self._break_if_required( yield self._break_if_required(
self.CREATE, environment.HOOK_PRE_CREATE) self.CREATE, environment.HOOK_PRE_CREATE)
LOG.info(_LI('creating %s'), self) LOG.info('creating %s', self)
# Re-resolve the template, since if the resource Ref's # Re-resolve the template, since if the resource Ref's
# the StackId pseudo parameter, it will change after # the StackId pseudo parameter, it will change after
@ -1284,9 +1281,9 @@ class Resource(object):
except exception.EntityNotFound: except exception.EntityNotFound:
raise UpdateReplace(self) raise UpdateReplace(self)
except Exception as ex: except Exception as ex:
LOG.warning(_LW("Resource cannot be updated with it's " LOG.warning("Resource cannot be updated with it's "
"live state in case of next " "live state in case of next "
"error: %s"), ex) "error: %s", ex)
return after_props, before_props return after_props, before_props
def _prepare_update_replace(self, action): def _prepare_update_replace(self, action):
@ -1367,7 +1364,7 @@ class Resource(object):
exc = Exception(_('Resource update already requested')) exc = Exception(_('Resource update already requested'))
raise exception.ResourceFailure(exc, self, action) raise exception.ResourceFailure(exc, self, action)
LOG.info(_LI('updating %s'), self) LOG.info('updating %s', self)
self.updated_time = datetime.utcnow() self.updated_time = datetime.utcnow()
@ -1429,7 +1426,7 @@ class Resource(object):
original state with the added message that check was not performed. original state with the added message that check was not performed.
""" """
action = self.CHECK action = self.CHECK
LOG.info(_LI('Checking %s'), self) LOG.info('Checking %s', self)
if hasattr(self, 'handle_%s' % action.lower()): if hasattr(self, 'handle_%s' % action.lower()):
if self.state == (self.INIT, self.COMPLETE): if self.state == (self.INIT, self.COMPLETE):
@ -1477,7 +1474,7 @@ class Resource(object):
% six.text_type(self.state)) % six.text_type(self.state))
raise exception.ResourceFailure(exc, self, action) raise exception.ResourceFailure(exc, self, action)
LOG.info(_LI('suspending %s'), self) LOG.info('suspending %s', self)
return self._do_action(action) return self._do_action(action)
def resume(self): def resume(self):
@ -1496,12 +1493,12 @@ class Resource(object):
exc = exception.Error(_('State %s invalid for resume') exc = exception.Error(_('State %s invalid for resume')
% six.text_type(self.state)) % six.text_type(self.state))
raise exception.ResourceFailure(exc, self, action) raise exception.ResourceFailure(exc, self, action)
LOG.info(_LI('resuming %s'), self) LOG.info('resuming %s', self)
return self._do_action(action) return self._do_action(action)
def snapshot(self): def snapshot(self):
"""Snapshot the resource and return the created data, if any.""" """Snapshot the resource and return the created data, if any."""
LOG.info(_LI('snapshotting %s'), self) LOG.info('snapshotting %s', self)
return self._do_action(self.SNAPSHOT) return self._do_action(self.SNAPSHOT)
@scheduler.wrappertask @scheduler.wrappertask
@ -1551,7 +1548,7 @@ class Resource(object):
This may be overridden by resource plugins to add extra This may be overridden by resource plugins to add extra
validation logic specific to the resource implementation. validation logic specific to the resource implementation.
""" """
LOG.info(_LI('Validating %s'), self) LOG.info('Validating %s', self)
return self.validate_template() return self.validate_template()
def validate_template(self): def validate_template(self):
@ -1608,8 +1605,8 @@ class Resource(object):
db_res = resource_objects.Resource.get_obj( db_res = resource_objects.Resource.get_obj(
self.context, self.replaced_by) self.context, self.replaced_by)
except exception.NotFound: except exception.NotFound:
LOG.info(_LI("Could not find replacement of resource %(name)s " LOG.info("Could not find replacement of resource %(name)s "
"with id %(id)s while updating needed_by."), "with id %(id)s while updating needed_by.",
{'name': self.name, 'id': self.replaced_by}) {'name': self.name, 'id': self.replaced_by})
return return
@ -1698,7 +1695,7 @@ class Resource(object):
yield self._break_if_required( yield self._break_if_required(
self.DELETE, environment.HOOK_PRE_DELETE) self.DELETE, environment.HOOK_PRE_DELETE)
LOG.info(_LI('deleting %s'), self) LOG.info('deleting %s', self)
if self._stored_properties_data is not None: if self._stored_properties_data is not None:
# On delete we can't rely on re-resolving the properties # On delete we can't rely on re-resolving the properties
@ -1723,7 +1720,7 @@ class Resource(object):
while True: while True:
count += 1 count += 1
LOG.info(_LI('delete %(name)s attempt %(attempt)d') % LOG.info('delete %(name)s attempt %(attempt)d' %
{'name': six.text_type(self), 'attempt': count+1}) {'name': six.text_type(self), 'attempt': count+1})
if count: if count:
delay = timeutils.retry_backoff_delay(count, delay = timeutils.retry_backoff_delay(count,
@ -1765,7 +1762,7 @@ class Resource(object):
self.id, self.id,
{'physical_resource_id': self.resource_id}) {'physical_resource_id': self.resource_id})
except Exception as ex: except Exception as ex:
LOG.warning(_LW('db error %s'), ex) LOG.warning('db error %s', ex)
def store(self, set_metadata=False): def store(self, set_metadata=False):
"""Create the resource in the database. """Create the resource in the database.
@ -1836,11 +1833,11 @@ class Resource(object):
atomic_key=rs.atomic_key, atomic_key=rs.atomic_key,
expected_engine_id=None) expected_engine_id=None)
except Exception as ex: except Exception as ex:
LOG.error(_LE('DB error %s'), ex) LOG.error('DB error %s', ex)
raise raise
if not updated_ok: if not updated_ok:
LOG.info(_LI('Resource %s is locked for update; deferring'), LOG.info('Resource %s is locked for update; deferring',
six.text_type(self)) six.text_type(self))
LOG.debug(('Resource id:%(resource_id)s with ' LOG.debug(('Resource id:%(resource_id)s with '
'atomic_key:%(atomic_key)s, locked ' 'atomic_key:%(atomic_key)s, locked '
@ -1872,7 +1869,7 @@ class Resource(object):
atomic_key=atomic_key) atomic_key=atomic_key)
if not updated_ok: if not updated_ok:
LOG.warning(_LW('Failed to unlock resource %s'), self.name) LOG.warning('Failed to unlock resource %s', self.name)
def _resolve_all_attributes(self, attr): def _resolve_all_attributes(self, attr):
"""Method for resolving all attributes. """Method for resolving all attributes.
@ -1920,7 +1917,7 @@ class Resource(object):
else: else:
return resource.to_dict() return resource.to_dict()
except AttributeError as ex: except AttributeError as ex:
LOG.warning(_LW("Resolving 'show' attribute has failed : %s"), LOG.warning("Resolving 'show' attribute has failed : %s",
ex) ex)
return None return None
@ -2138,7 +2135,7 @@ class Resource(object):
# `handle_signal` callbacks: # `handle_signal` callbacks:
hook = details['unset_hook'] hook = details['unset_hook']
self.clear_hook(hook) self.clear_hook(hook)
LOG.info(_LI('Clearing %(hook)s hook on %(resource)s'), LOG.info('Clearing %(hook)s hook on %(resource)s',
{'hook': hook, 'resource': six.text_type(self)}) {'hook': hook, 'resource': six.text_type(self)})
self._add_event(self.action, self.status, self._add_event(self.action, self.status,
"Hook %s is cleared" % hook) "Hook %s is cleared" % hook)
@ -2175,7 +2172,7 @@ class Resource(object):
# Don't log an event as it just spams the user. # Don't log an event as it just spams the user.
pass pass
except Exception as ex: except Exception as ex:
LOG.info(_LI('signal %(name)s : %(msg)s'), LOG.info('signal %(name)s : %(msg)s',
{'name': six.text_type(self), {'name': six.text_type(self),
'msg': six.text_type(ex)}, 'msg': six.text_type(ex)},
exc_info=True) exc_info=True)
@ -2207,7 +2204,7 @@ class Resource(object):
def metadata_update(self, new_metadata=None): def metadata_update(self, new_metadata=None):
"""No-op for resources which don't explicitly override this method.""" """No-op for resources which don't explicitly override this method."""
if new_metadata: if new_metadata:
LOG.warning(_LW("Resource %s does not implement metadata update"), LOG.warning("Resource %s does not implement metadata update",
self.name) self.name)
@classmethod @classmethod

View File

@ -18,8 +18,6 @@ import six
from heat.common import exception from heat.common import exception
from heat.common import grouputils from heat.common import grouputils
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.engine import attributes from heat.engine import attributes
from heat.engine import constraints from heat.engine import constraints
from heat.engine import function from heat.engine import function
@ -282,8 +280,8 @@ class AutoScalingGroup(instgrp.InstanceGroup, cooldown.CooldownMixin):
min_adjustment_step=None): min_adjustment_step=None):
"""Adjust the size of the scaling group if the cooldown permits.""" """Adjust the size of the scaling group if the cooldown permits."""
if self.status != self.COMPLETE: if self.status != self.COMPLETE:
LOG.info(_LI("%s NOT performing scaling adjustment, " LOG.info("%s NOT performing scaling adjustment, "
"when status is not COMPLETE") % self.name) "when status is not COMPLETE", self.name)
raise resource.NoActionRequired raise resource.NoActionRequired
capacity = grouputils.get_size(self) capacity = grouputils.get_size(self)
@ -291,8 +289,8 @@ class AutoScalingGroup(instgrp.InstanceGroup, cooldown.CooldownMixin):
adjustment_type, adjustment_type,
min_adjustment_step) min_adjustment_step)
if new_capacity == capacity: if new_capacity == capacity:
LOG.info(_LI("%s NOT performing scaling adjustment, " LOG.info("%s NOT performing scaling adjustment, "
"as there is no change in capacity.") % self.name) "as there is no change in capacity.", self.name)
raise resource.NoActionRequired raise resource.NoActionRequired
self._check_scaling_allowed() self._check_scaling_allowed()
@ -322,7 +320,7 @@ class AutoScalingGroup(instgrp.InstanceGroup, cooldown.CooldownMixin):
}) })
notification.send(**notif) notification.send(**notif)
except Exception: except Exception:
LOG.exception(_LE('Failed sending error notification')) LOG.exception('Failed sending error notification')
else: else:
size_changed = True size_changed = True
notif.update({ notif.update({
@ -333,8 +331,8 @@ class AutoScalingGroup(instgrp.InstanceGroup, cooldown.CooldownMixin):
}) })
notification.send(**notif) notification.send(**notif)
except Exception: except Exception:
LOG.error(_LE("Error in performing scaling adjustment for " LOG.error("Error in performing scaling adjustment for "
"group %s.") % self.name) "group %s.", self.name)
raise raise
finally: finally:
self._finished_scaling("%s : %s" % (adjustment_type, adjustment), self._finished_scaling("%s : %s" % (adjustment_type, adjustment),

View File

@ -17,9 +17,6 @@ import six
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.engine import attributes from heat.engine import attributes
from heat.engine import constraints from heat.engine import constraints
from heat.engine import properties from heat.engine import properties
@ -106,21 +103,20 @@ class ElasticIp(resource.Resource):
'floatingip': props})['floatingip'] 'floatingip': props})['floatingip']
self.ipaddress = ips['floating_ip_address'] self.ipaddress = ips['floating_ip_address']
self.resource_id_set(ips['id']) self.resource_id_set(ips['id'])
LOG.info(_LI('ElasticIp create %s'), str(ips)) LOG.info('ElasticIp create %s', str(ips))
else: else:
try: try:
ips = self.client().floating_ips.create() ips = self.client().floating_ips.create()
except Exception as e: except Exception as e:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
if self.client_plugin('nova').is_not_found(e): if self.client_plugin('nova').is_not_found(e):
LOG.error(_LE("No default floating IP pool configured." LOG.error("No default floating IP pool configured. "
" Set 'default_floating_pool' in " "Set 'default_floating_pool' in nova.conf.")
"nova.conf."))
if ips: if ips:
self.ipaddress = ips.ip self.ipaddress = ips.ip
self.resource_id_set(ips.id) self.resource_id_set(ips.id)
LOG.info(_LI('ElasticIp create %s'), str(ips)) LOG.info('ElasticIp create %s', str(ips))
instance_id = self.properties[self.INSTANCE_ID] instance_id = self.properties[self.INSTANCE_ID]
if instance_id: if instance_id:
@ -330,7 +326,7 @@ class ElasticIpAssociation(resource.Resource):
self._floatingIp_detach(nova_ignore_not_found=True) self._floatingIp_detach(nova_ignore_not_found=True)
port_id, port_rsrc = self._get_port_info(ni_id, instance_id) port_id, port_rsrc = self._get_port_info(ni_id, instance_id)
if not port_id or not port_rsrc: if not port_id or not port_rsrc:
LOG.error(_LE('Port not specified.')) LOG.error('Port not specified.')
raise exception.NotFound(_('Failed to update, can not found ' raise exception.NotFound(_('Failed to update, can not found '
'port info.')) 'port info.'))
@ -353,7 +349,7 @@ class ElasticIpAssociation(resource.Resource):
port_id, port_rsrc = self._get_port_info(ni_id_update, port_id, port_rsrc = self._get_port_info(ni_id_update,
instance_id_update) instance_id_update)
if not port_id or not port_rsrc: if not port_id or not port_rsrc:
LOG.error(_LE('Port not specified.')) LOG.error('Port not specified.')
raise exception.NotFound(_('Failed to update, can not found ' raise exception.NotFound(_('Failed to update, can not found '
'port info.')) 'port info.'))
@ -377,8 +373,7 @@ class ElasticIpAssociation(resource.Resource):
instance_id = self.properties[self.INSTANCE_ID] instance_id = self.properties[self.INSTANCE_ID]
port_id, port_rsrc = self._get_port_info(ni_id, instance_id) port_id, port_rsrc = self._get_port_info(ni_id, instance_id)
if not port_id or not port_rsrc: if not port_id or not port_rsrc:
LOG.warning(_LW('Skipping association, resource not ' LOG.warning('Skipping association, resource not specified')
'specified'))
return return
float_id = self.properties[self.ALLOCATION_ID] float_id = self.properties[self.ALLOCATION_ID]

View File

@ -21,8 +21,6 @@ cfg.CONF.import_opt('max_server_name_length', 'heat.common.config')
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.engine import attributes from heat.engine import attributes
from heat.engine.clients import progress from heat.engine.clients import progress
from heat.engine import constraints from heat.engine import constraints
@ -396,7 +394,7 @@ class Instance(resource.Resource, sh.SchedulerHintsMixin):
elif name in self.ATTRIBUTES[1:]: elif name in self.ATTRIBUTES[1:]:
res = self._ipaddress() res = self._ipaddress()
LOG.info(_LI('%(name)s._resolve_attribute(%(attname)s) == %(res)s'), LOG.info('%(name)s._resolve_attribute(%(attname)s) == %(res)s',
{'name': self.name, 'attname': name, 'res': res}) {'name': self.name, 'attname': name, 'res': res})
return six.text_type(res) if res else None return six.text_type(res) if res else None
@ -679,9 +677,9 @@ class Instance(resource.Resource, sh.SchedulerHintsMixin):
# keep the behavior as creation # keep the behavior as creation
elif (old_network_ifaces and elif (old_network_ifaces and
(self.NETWORK_INTERFACES not in prop_diff)): (self.NETWORK_INTERFACES not in prop_diff)):
LOG.warning(_LW('There is no change of "%(net_interfaces)s" ' LOG.warning('There is no change of "%(net_interfaces)s" '
'for instance %(server)s, do nothing ' 'for instance %(server)s, do nothing '
'when updating.'), 'when updating.',
{'net_interfaces': self.NETWORK_INTERFACES, {'net_interfaces': self.NETWORK_INTERFACES,
'server': self.resource_id}) 'server': self.resource_id})
# if the interfaces not come from property 'NetworkInterfaces', # if the interfaces not come from property 'NetworkInterfaces',
@ -806,10 +804,10 @@ class Instance(resource.Resource, sh.SchedulerHintsMixin):
if network_interfaces and subnet_id: if network_interfaces and subnet_id:
# consider the old templates, we only to log to warn user # consider the old templates, we only to log to warn user
# NetworkInterfaces has higher priority than SubnetId # NetworkInterfaces has higher priority than SubnetId
LOG.warning(_LW('"%(subnet)s" will be ignored if specified ' LOG.warning('"%(subnet)s" will be ignored if specified '
'"%(net_interfaces)s". So if you specified the ' '"%(net_interfaces)s". So if you specified the '
'"%(net_interfaces)s" property, ' '"%(net_interfaces)s" property, '
'do not specify "%(subnet)s" property.'), 'do not specify "%(subnet)s" property.',
{'subnet': self.SUBNET_ID, {'subnet': self.SUBNET_ID,
'net_interfaces': self.NETWORK_INTERFACES}) 'net_interfaces': self.NETWORK_INTERFACES})
@ -854,7 +852,7 @@ class Instance(resource.Resource, sh.SchedulerHintsMixin):
# if the instance has been suspended successful, # if the instance has been suspended successful,
# no need to suspend again # no need to suspend again
if self.client_plugin().get_status(server) != 'SUSPENDED': if self.client_plugin().get_status(server) != 'SUSPENDED':
LOG.debug("suspending instance %s" % self.resource_id) LOG.debug("suspending instance %s", self.resource_id)
server.suspend() server.suspend()
return server.id return server.id
@ -864,8 +862,8 @@ class Instance(resource.Resource, sh.SchedulerHintsMixin):
if not server: if not server:
return False return False
status = cp.get_status(server) status = cp.get_status(server)
LOG.debug('%(name)s check_suspend_complete status = %(status)s' LOG.debug('%(name)s check_suspend_complete status = %(status)s',
% {'name': self.name, 'status': status}) {'name': self.name, 'status': status})
if status in list(cp.deferred_server_statuses + ['ACTIVE']): if status in list(cp.deferred_server_statuses + ['ACTIVE']):
return status == 'SUSPENDED' return status == 'SUSPENDED'
else: else:
@ -897,7 +895,7 @@ class Instance(resource.Resource, sh.SchedulerHintsMixin):
# if the instance has been resumed successful, # if the instance has been resumed successful,
# no need to resume again # no need to resume again
if self.client_plugin().get_status(server) != 'ACTIVE': if self.client_plugin().get_status(server) != 'ACTIVE':
LOG.debug("resuming instance %s" % self.resource_id) LOG.debug("resuming instance %s", self.resource_id)
server.resume() server.resume()
return server.id return server.id

View File

@ -16,7 +16,6 @@ import six
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes from heat.engine import attributes
from heat.engine import constraints from heat.engine import constraints
from heat.engine import properties from heat.engine import properties
@ -83,20 +82,19 @@ class User(stack_user.StackUser):
# and we don't want to break templates which previously worked # and we don't want to break templates which previously worked
if not isinstance(policy, six.string_types): if not isinstance(policy, six.string_types):
LOG.debug("Ignoring policy %s, must be string " LOG.debug("Ignoring policy %s, must be string "
"resource name" % policy) "resource name", policy)
continue continue
try: try:
policy_rsrc = self.stack[policy] policy_rsrc = self.stack[policy]
except KeyError: except KeyError:
LOG.debug("Policy %(policy)s does not exist in stack " LOG.debug("Policy %(policy)s does not exist in stack "
"%(stack)s" "%(stack)s",
% {'policy': policy, 'stack': self.stack.name}) {'policy': policy, 'stack': self.stack.name})
return False return False
if not callable(getattr(policy_rsrc, 'access_allowed', None)): if not callable(getattr(policy_rsrc, 'access_allowed', None)):
LOG.debug("Policy %s is not an AccessPolicy resource" LOG.debug("Policy %s is not an AccessPolicy resource", policy)
% policy)
return False return False
return True return True
@ -122,7 +120,7 @@ class User(stack_user.StackUser):
for policy in policies: for policy in policies:
if not isinstance(policy, six.string_types): if not isinstance(policy, six.string_types):
LOG.debug("Ignoring policy %s, must be string " LOG.debug("Ignoring policy %s, must be string "
"resource name" % policy) "resource name", policy)
continue continue
policy_rsrc = self.stack[policy] policy_rsrc = self.stack[policy]
if not policy_rsrc.access_allowed(resource_name): if not policy_rsrc.access_allowed(resource_name):
@ -221,7 +219,7 @@ class AccessKey(resource.Resource):
user = self._get_user() user = self._get_user()
if user is None: if user is None:
LOG.debug('Error deleting %s - user not found' % str(self)) LOG.debug('Error deleting %s - user not found', str(self))
return return
user._delete_keypair() user._delete_keypair()
@ -232,8 +230,8 @@ class AccessKey(resource.Resource):
""" """
if self._secret is None: if self._secret is None:
if not self.resource_id: if not self.resource_id:
LOG.info(_LI('could not get secret for %(username)s ' LOG.info('could not get secret for %(username)s '
'Error:%(msg)s'), 'Error:%(msg)s',
{'username': self.properties[self.USER_NAME], {'username': self.properties[self.USER_NAME],
'msg': "resource_id not yet set"}) 'msg': "resource_id not yet set"})
else: else:
@ -252,10 +250,10 @@ class AccessKey(resource.Resource):
# And the ID of the v3 credential # And the ID of the v3 credential
self.data_set('credential_id', kp.id, redact=True) self.data_set('credential_id', kp.id, redact=True)
except Exception as ex: except Exception as ex:
LOG.info(_LI('could not get secret for %(username)s ' LOG.info('could not get secret for %(username)s '
'Error:%(msg)s'), { 'Error:%(msg)s',
'username': self.properties[self.USER_NAME], {'username': self.properties[self.USER_NAME],
'msg': ex}) 'msg': ex})
return self._secret or '000-000-000' return self._secret or '000-000-000'

View File

@ -18,7 +18,6 @@ import six
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common import template_format from heat.common import template_format
from heat.engine import attributes from heat.engine import attributes
from heat.engine import constraints from heat.engine import constraints
@ -505,7 +504,7 @@ backend servers
nova_cp = self.client_plugin('nova') nova_cp = self.client_plugin('nova')
for i in instances or []: for i in instances or []:
ip = nova_cp.server_to_ipaddress(i) or '0.0.0.0' ip = nova_cp.server_to_ipaddress(i) or '0.0.0.0'
LOG.debug('haproxy server:%s' % ip) LOG.debug('haproxy server:%s', ip)
servers.append('%sserver server%d %s:%s%s' % (spaces, n, servers.append('%sserver server%d %s:%s%s' % (spaces, n,
ip, inst_port, ip, inst_port,
check)) check))
@ -526,7 +525,7 @@ backend servers
def get_parsed_template(self): def get_parsed_template(self):
if cfg.CONF.loadbalancer_template: if cfg.CONF.loadbalancer_template:
with open(cfg.CONF.loadbalancer_template) as templ_fd: with open(cfg.CONF.loadbalancer_template) as templ_fd:
LOG.info(_LI('Using custom loadbalancer template %s'), LOG.info('Using custom loadbalancer template %s',
cfg.CONF.loadbalancer_template) cfg.CONF.loadbalancer_template)
contents = templ_fd.read() contents = templ_fd.read()
else: else:

View File

@ -17,7 +17,6 @@ import six
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes from heat.engine import attributes
from heat.engine.clients import progress from heat.engine.clients import progress
from heat.engine import constraints from heat.engine import constraints
@ -345,18 +344,18 @@ class CinderVolume(vb.BaseVolume, sh.SchedulerHintsMixin):
def _check_extend_volume_complete(self): def _check_extend_volume_complete(self):
vol = self.client().volumes.get(self.resource_id) vol = self.client().volumes.get(self.resource_id)
if vol.status == 'extending': if vol.status == 'extending':
LOG.debug("Volume %s is being extended" % vol.id) LOG.debug("Volume %s is being extended", vol.id)
return False return False
if vol.status != 'available': if vol.status != 'available':
LOG.info(_LI("Resize failed: Volume %(vol)s " LOG.info("Resize failed: Volume %(vol)s "
"is in %(status)s state."), "is in %(status)s state.",
{'vol': vol.id, 'status': vol.status}) {'vol': vol.id, 'status': vol.status})
raise exception.ResourceUnknownStatus( raise exception.ResourceUnknownStatus(
resource_status=vol.status, resource_status=vol.status,
result=_('Volume resize failed')) result=_('Volume resize failed'))
LOG.info(_LI('Volume %(id)s resize complete'), {'id': vol.id}) LOG.info('Volume %(id)s resize complete', {'id': vol.id})
return True return True
def _backup_restore(self, vol_id, backup_id): def _backup_restore(self, vol_id, backup_id):
@ -376,17 +375,17 @@ class CinderVolume(vb.BaseVolume, sh.SchedulerHintsMixin):
def _check_backup_restore_complete(self): def _check_backup_restore_complete(self):
vol = self.client().volumes.get(self.resource_id) vol = self.client().volumes.get(self.resource_id)
if vol.status == 'restoring-backup': if vol.status == 'restoring-backup':
LOG.debug("Volume %s is being restoring from backup" % vol.id) LOG.debug("Volume %s is being restoring from backup", vol.id)
return False return False
if vol.status != 'available': if vol.status != 'available':
LOG.info(_LI("Restore failed: Volume %(vol)s is in %(status)s " LOG.info("Restore failed: Volume %(vol)s is in %(status)s "
"state."), {'vol': vol.id, 'status': vol.status}) "state.", {'vol': vol.id, 'status': vol.status})
raise exception.ResourceUnknownStatus( raise exception.ResourceUnknownStatus(
resource_status=vol.status, resource_status=vol.status,
result=_('Volume backup restore failed')) result=_('Volume backup restore failed'))
LOG.info(_LI('Volume %(id)s backup restore complete'), {'id': vol.id}) LOG.info('Volume %s backup restore complete', vol.id)
return True return True
def needs_replace_failed(self): def needs_replace_failed(self):

View File

@ -15,7 +15,6 @@ from oslo_log import log as logging
import six import six
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes from heat.engine import attributes
from heat.engine import constraints from heat.engine import constraints
from heat.engine import properties from heat.engine import properties
@ -81,7 +80,7 @@ class Restarter(signal_responder.SignalResponder):
else: else:
alarm_state = details.get('state', 'alarm').lower() alarm_state = details.get('state', 'alarm').lower()
LOG.info(_LI('%(name)s Alarm, new state %(state)s'), LOG.info('%(name)s Alarm, new state %(state)s',
{'name': self.name, 'state': alarm_state}) {'name': self.name, 'state': alarm_state})
if alarm_state != 'alarm': if alarm_state != 'alarm':
@ -90,13 +89,13 @@ class Restarter(signal_responder.SignalResponder):
target_id = self.properties[self.INSTANCE_ID] target_id = self.properties[self.INSTANCE_ID]
victim = self.stack.resource_by_refid(target_id) victim = self.stack.resource_by_refid(target_id)
if victim is None: if victim is None:
LOG.info(_LI('%(name)s Alarm, can not find instance ' LOG.info('%(name)s Alarm, can not find instance '
'%(instance)s'), '%(instance)s',
{'name': self.name, {'name': self.name,
'instance': target_id}) 'instance': target_id})
return return
LOG.info(_LI('%(name)s Alarm, restarting resource: %(victim)s'), LOG.info('%(name)s Alarm, restarting resource: %(victim)s',
{'name': self.name, 'victim': victim.name}) {'name': self.name, 'victim': victim.name})
self.stack.restart_resource(victim.name) self.stack.restart_resource(victim.name)

View File

@ -16,8 +16,6 @@ import six
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.engine import attributes from heat.engine import attributes
from heat.engine import constraints from heat.engine import constraints
from heat.engine import properties from heat.engine import properties
@ -162,7 +160,7 @@ class AutoScalingPolicy(signal_responder.SignalResponder,
alarm_state = details.get('current', alarm_state = details.get('current',
details.get('state', 'alarm')).lower() details.get('state', 'alarm')).lower()
LOG.info(_LI('Alarm %(name)s, new state %(state)s'), LOG.info('Alarm %(name)s, new state %(state)s',
{'name': self.name, 'state': alarm_state}) {'name': self.name, 'state': alarm_state})
asgn_id = self.properties[self.AUTO_SCALING_GROUP_NAME] asgn_id = self.properties[self.AUTO_SCALING_GROUP_NAME]
@ -176,11 +174,11 @@ class AutoScalingPolicy(signal_responder.SignalResponder,
self._check_scaling_allowed() self._check_scaling_allowed()
LOG.info(_LI('%(name)s alarm, adjusting group %(group)s with id ' LOG.info('%(name)s alarm, adjusting group %(group)s with id '
'%(asgn_id)s by %(filter)s') % { '%(asgn_id)s by %(filter)s',
'name': self.name, 'group': group.name, {'name': self.name, 'group': group.name,
'asgn_id': asgn_id, 'asgn_id': asgn_id,
'filter': self.properties[self.SCALING_ADJUSTMENT]}) 'filter': self.properties[self.SCALING_ADJUSTMENT]})
size_changed = False size_changed = False
try: try:
@ -192,10 +190,9 @@ class AutoScalingPolicy(signal_responder.SignalResponder,
except resource.NoActionRequired: except resource.NoActionRequired:
raise raise
except Exception: except Exception:
LOG.error(_LE("Error in performing scaling adjustment with " LOG.error("Error in performing scaling adjustment with "
"%(name)s alarm for group %(group)s.") % { "%(name)s alarm for group %(group)s.",
'name': self.name, {'name': self.name, 'group': group.name})
'group': group.name})
raise raise
finally: finally:
self._finished_scaling("%s : %s" % ( self._finished_scaling("%s : %s" % (

View File

@ -19,7 +19,6 @@ from six.moves.urllib import parse
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes from heat.engine import attributes
from heat.engine.clients.os import swift from heat.engine.clients.os import swift
from heat.engine import constraints from heat.engine import constraints
@ -245,12 +244,12 @@ class SwiftSignal(resource.Resource):
container = self.client().get_container(self.stack.id) container = self.client().get_container(self.stack.id)
except Exception as exc: except Exception as exc:
self.client_plugin().ignore_not_found(exc) self.client_plugin().ignore_not_found(exc)
LOG.debug("Swift container %s was not found" % self.stack.id) LOG.debug("Swift container %s was not found", self.stack.id)
return [] return []
index = container[1] index = container[1]
if not index: if not index:
LOG.debug("Swift objects in container %s were not found" % LOG.debug("Swift objects in container %s were not found",
self.stack.id) self.stack.id)
return [] return []
@ -332,14 +331,14 @@ class SwiftSignal(resource.Resource):
for status in statuses: for status in statuses:
if status == self.STATUS_FAILURE: if status == self.STATUS_FAILURE:
failure = SwiftSignalFailure(self) failure = SwiftSignalFailure(self)
LOG.info(_LI('%(name)s Failed (%(failure)s)'), LOG.info('%(name)s Failed (%(failure)s)',
{'name': str(self), 'failure': str(failure)}) {'name': str(self), 'failure': str(failure)})
raise failure raise failure
elif status != self.STATUS_SUCCESS: elif status != self.STATUS_SUCCESS:
raise exception.Error(_("Unknown status: %s") % status) raise exception.Error(_("Unknown status: %s") % status)
if len(statuses) >= self.properties[self.COUNT]: if len(statuses) >= self.properties[self.COUNT]:
LOG.info(_LI("%s Succeeded"), str(self)) LOG.info("%s Succeeded", str(self))
return True return True
return False return False

View File

@ -16,7 +16,7 @@ import eventlet
from oslo_utils import timeutils from oslo_utils import timeutils
import six import six
from heat.common.i18n import _, _LI from heat.common.i18n import _
from heat.engine import attributes from heat.engine import attributes
from heat.engine import constraints from heat.engine import constraints
from heat.engine import properties from heat.engine import properties
@ -161,7 +161,7 @@ class TestResource(resource.Resource):
secs = self.properties[self.ACTION_WAIT_SECS][self.action.lower()] secs = self.properties[self.ACTION_WAIT_SECS][self.action.lower()]
if secs is None: if secs is None:
secs = self.properties[self.WAIT_SECS] secs = self.properties[self.WAIT_SECS]
LOG.info(_LI('%(name)s wait_secs:%(wait)s, action:%(action)s'), LOG.info('%(name)s wait_secs:%(wait)s, action:%(action)s',
{'name': self.name, {'name': self.name,
'wait': secs, 'wait': secs,
'action': self.action.lower()}) 'action': self.action.lower()})
@ -228,7 +228,7 @@ class TestResource(resource.Resource):
started_at = timeutils.normalize_time(started_at) started_at = timeutils.normalize_time(started_at)
waited = timeutils.utcnow() - started_at waited = timeutils.utcnow() - started_at
LOG.info(_LI("Resource %(name)s waited %(waited)s/%(sec)s seconds"), LOG.info("Resource %(name)s waited %(waited)s/%(sec)s seconds",
{'name': self.name, {'name': self.name,
'waited': waited, 'waited': waited,
'sec': wait_secs}) 'sec': wait_secs})

View File

@ -17,7 +17,6 @@ from oslo_utils import timeutils
import six import six
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes from heat.engine import attributes
from heat.engine import constraints from heat.engine import constraints
from heat.engine import properties from heat.engine import properties
@ -100,7 +99,7 @@ class HeatWaitCondition(resource.Resource):
def _wait(self, handle, started_at, timeout_in): def _wait(self, handle, started_at, timeout_in):
if timeutils.is_older_than(started_at, timeout_in): if timeutils.is_older_than(started_at, timeout_in):
exc = wc_base.WaitConditionTimeout(self, handle) exc = wc_base.WaitConditionTimeout(self, handle)
LOG.info(_LI('%(name)s Timed out (%(timeout)s)'), LOG.info('%(name)s Timed out (%(timeout)s)',
{'name': str(self), 'timeout': str(exc)}) {'name': str(self), 'timeout': str(exc)})
raise exc raise exc
@ -108,12 +107,12 @@ class HeatWaitCondition(resource.Resource):
if any(s != handle.STATUS_SUCCESS for s in handle_status): if any(s != handle.STATUS_SUCCESS for s in handle_status):
failure = wc_base.WaitConditionFailure(self, handle) failure = wc_base.WaitConditionFailure(self, handle)
LOG.info(_LI('%(name)s Failed (%(failure)s)'), LOG.info('%(name)s Failed (%(failure)s)',
{'name': str(self), 'failure': str(failure)}) {'name': str(self), 'failure': str(failure)})
raise failure raise failure
if len(handle_status) >= self.properties[self.COUNT]: if len(handle_status) >= self.properties[self.COUNT]:
LOG.info(_LI("%s Succeeded"), str(self)) LOG.info("%s Succeeded", str(self))
return True return True
return False return False

View File

@ -17,7 +17,6 @@ import six
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes from heat.engine import attributes
from heat.engine import constraints from heat.engine import constraints
from heat.engine import properties from heat.engine import properties
@ -234,7 +233,7 @@ class ManilaShare(resource.Resource):
if share_status == self.STATUS_CREATING: if share_status == self.STATUS_CREATING:
return False return False
elif share_status == self.STATUS_AVAILABLE: elif share_status == self.STATUS_AVAILABLE:
LOG.info(_LI('Applying access rules to created Share.')) LOG.info('Applying access rules to created Share.')
# apply access rules to created share. please note that it is not # apply access rules to created share. please note that it is not
# possible to define rules for share with share_status = creating # possible to define rules for share with share_status = creating
access_rules = self.properties.get(self.ACCESS_RULES) access_rules = self.properties.get(self.ACCESS_RULES)

View File

@ -15,7 +15,6 @@ from oslo_log import log as logging
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import attributes from heat.engine import attributes
from heat.engine import constraints from heat.engine import constraints
from heat.engine import properties from heat.engine import properties
@ -152,9 +151,9 @@ class CronTrigger(resource.Resource):
# changed after # changed after
# https://blueprints.launchpad.net/mistral/+spec/mistral-cron-trigger-life-cycle # https://blueprints.launchpad.net/mistral/+spec/mistral-cron-trigger-life-cycle
# will be merged. # will be merged.
LOG.warning(_LW("get_live_state isn't implemented for this type of " LOG.warning("get_live_state isn't implemented for this type of "
"resource due to specific behaviour of cron trigger " "resource due to specific behaviour of cron trigger "
"in mistral.")) "in mistral.")
return {} return {}

View File

@ -15,7 +15,6 @@ from oslo_log import log as logging
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import resource from heat.engine import resource
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -113,8 +112,7 @@ class NeutronResource(resource.Resource):
key = self.res_info_key if self.res_info_key else self.entity key = self.res_info_key if self.res_info_key else self.entity
return res_info[key] return res_info[key]
except AttributeError as ex: except AttributeError as ex:
LOG.warning(_LW("Resolving 'show' attribute has failed : %s"), LOG.warning("Resolving 'show' attribute has failed : %s", ex)
ex)
def _resolve_attribute(self, name): def _resolve_attribute(self, name):
if self.resource_id is None: if self.resource_id is None:

View File

@ -16,7 +16,6 @@ from oslo_serialization import jsonutils
import six import six
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import attributes from heat.engine import attributes
from heat.engine import constraints from heat.engine import constraints
from heat.engine import properties from heat.engine import properties
@ -503,7 +502,7 @@ class Port(neutron.NeutronResource):
subnets.append(self.client().show_subnet( subnets.append(self.client().show_subnet(
subnet_id)['subnet']) subnet_id)['subnet'])
except Exception as ex: except Exception as ex:
LOG.warning(_LW("Failed to fetch resource attributes: %s"), ex) LOG.warning("Failed to fetch resource attributes: %s", ex)
return return
return subnets return subnets
return super(Port, self)._resolve_attribute(name) return super(Port, self)._resolve_attribute(name)
@ -520,7 +519,7 @@ class Port(neutron.NeutronResource):
prop_diff['qos_policy_id'] = self.client_plugin( prop_diff['qos_policy_id'] = self.client_plugin(
).get_qos_policy_id(qos_policy) if qos_policy else None ).get_qos_policy_id(qos_policy) if qos_policy else None
self._prepare_port_properties(prop_diff, prepare_for_update=True) self._prepare_port_properties(prop_diff, prepare_for_update=True)
LOG.debug('updating port with %s' % prop_diff) LOG.debug('updating port with %s', prop_diff)
self.client().update_port(self.resource_id, {'port': prop_diff}) self.client().update_port(self.resource_id, {'port': prop_diff})
def check_update_complete(self, *args): def check_update_complete(self, *args):

View File

@ -14,7 +14,6 @@ from oslo_log import log as logging
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes from heat.engine import attributes
from heat.engine import constraints from heat.engine import constraints
from heat.engine import properties from heat.engine import properties
@ -170,8 +169,8 @@ class NovaFlavor(resource.Resource):
if not self.IS_PUBLIC: if not self.IS_PUBLIC:
if not tenants: if not tenants:
LOG.info(_LI('Tenant property is recommended if IS_PUBLIC' LOG.info('Tenant property is recommended if IS_PUBLIC '
'is false.')) 'is false.')
tenant = self.stack.context.tenant_id tenant = self.stack.context.tenant_id
self.client().flavor_access.add_tenant_access(flavor, tenant) self.client().flavor_access.add_tenant_access(flavor, tenant)
else: else:

View File

@ -16,7 +16,6 @@ from oslo_utils import excutils
import six import six
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.engine import attributes from heat.engine import attributes
from heat.engine import constraints from heat.engine import constraints
from heat.engine import properties from heat.engine import properties
@ -90,9 +89,9 @@ class NovaFloatingIp(resource.Resource):
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
if self.client_plugin().is_not_found(e): if self.client_plugin().is_not_found(e):
if pool is None: if pool is None:
LOG.error(_LE('Could not allocate floating IP. ' LOG.error('Could not allocate floating IP. '
'Probably there is no default floating' 'Probably there is no default floating '
' IP pool is configured.')) 'IP pool is configured.')
self.resource_id_set(floating_ip.id) self.resource_id_set(floating_ip.id)
self._floating_ip = floating_ip self._floating_ip = floating_ip

View File

@ -1509,7 +1509,7 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
# if the server has been suspended successful, # if the server has been suspended successful,
# no need to suspend again # no need to suspend again
if self.client_plugin().get_status(server) != 'SUSPENDED': if self.client_plugin().get_status(server) != 'SUSPENDED':
LOG.debug('suspending server %s' % self.resource_id) LOG.debug('suspending server %s', self.resource_id)
server.suspend() server.suspend()
return server.id return server.id
@ -1519,8 +1519,8 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
if not server: if not server:
return False return False
status = cp.get_status(server) status = cp.get_status(server)
LOG.debug('%(name)s check_suspend_complete status = %(status)s' LOG.debug('%(name)s check_suspend_complete status = %(status)s',
% {'name': self.name, 'status': status}) {'name': self.name, 'status': status})
if status in list(cp.deferred_server_statuses + ['ACTIVE']): if status in list(cp.deferred_server_statuses + ['ACTIVE']):
return status == 'SUSPENDED' return status == 'SUSPENDED'
else: else:
@ -1552,7 +1552,7 @@ class Server(server_base.BaseServer, sh.SchedulerHintsMixin,
# if the server has been resumed successful, # if the server has been resumed successful,
# no need to resume again # no need to resume again
if self.client_plugin().get_status(server) != 'ACTIVE': if self.client_plugin().get_status(server) != 'ACTIVE':
LOG.debug('resuming server %s' % self.resource_id) LOG.debug('resuming server %s', self.resource_id)
server.resume() server.resume()
return server.id return server.id

View File

@ -21,7 +21,6 @@ import tenacity
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import resource from heat.engine import resource
from heat.engine.resources.openstack.neutron import port as neutron_port from heat.engine.resources.openstack.neutron import port as neutron_port
@ -434,10 +433,10 @@ class ServerNetworkMixin(object):
try: try:
if self.client_plugin().check_interface_detach( if self.client_plugin().check_interface_detach(
existing_server_id, port['id']): existing_server_id, port['id']):
LOG.info(_LI('Detach interface %(port)s successful from ' LOG.info('Detach interface %(port)s successful from '
'server %(server)s.') 'server %(server)s.',
% {'port': port['id'], {'port': port['id'],
'server': existing_server_id}) 'server': existing_server_id})
except tenacity.RetryError: except tenacity.RetryError:
raise exception.InterfaceDetachFailed( raise exception.InterfaceDetachFailed(
port=port['id'], server=existing_server_id) port=port['id'], server=existing_server_id)
@ -451,10 +450,10 @@ class ServerNetworkMixin(object):
try: try:
if self.client_plugin().check_interface_attach( if self.client_plugin().check_interface_attach(
prev_server_id, port['id']): prev_server_id, port['id']):
LOG.info(_LI('Attach interface %(port)s successful to ' LOG.info('Attach interface %(port)s successful to '
'server %(server)s') 'server %(server)s',
% {'port': port['id'], {'port': port['id'],
'server': prev_server_id}) 'server': prev_server_id})
except tenacity.RetryError: except tenacity.RetryError:
raise exception.InterfaceAttachFailed( raise exception.InterfaceAttachFailed(
port=port['id'], server=prev_server_id) port=port['id'], server=prev_server_id)

View File

@ -17,7 +17,6 @@ from oslo_log import log as logging
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import attributes from heat.engine import attributes
from heat.engine import constraints from heat.engine import constraints
from heat.engine import properties from heat.engine import properties
@ -249,7 +248,7 @@ class SaharaCluster(resource.Resource):
net_id=net_id, net_id=net_id,
use_autoconfig=use_autoconfig, use_autoconfig=use_autoconfig,
shares=shares) shares=shares)
LOG.info(_LI('Cluster "%s" is being started.'), cluster.name) LOG.info('Cluster "%s" is being started.', cluster.name)
self.resource_id_set(cluster.id) self.resource_id_set(cluster.id)
return self.resource_id return self.resource_id
@ -261,7 +260,7 @@ class SaharaCluster(resource.Resource):
if cluster.status != self.CLUSTER_ACTIVE: if cluster.status != self.CLUSTER_ACTIVE:
return False return False
LOG.info(_LI("Cluster '%s' has been created"), cluster.name) LOG.info("Cluster '%s' has been created", cluster.name)
return True return True
def check_delete_complete(self, resource_id): def check_delete_complete(self, resource_id):
@ -272,7 +271,7 @@ class SaharaCluster(resource.Resource):
cluster = self.client().clusters.get(resource_id) cluster = self.client().clusters.get(resource_id)
except Exception as ex: except Exception as ex:
self.client_plugin().ignore_not_found(ex) self.client_plugin().ignore_not_found(ex)
LOG.info(_LI("Cluster '%s' has been deleted"), LOG.info("Cluster '%s' has been deleted",
self._cluster_name()) self._cluster_name())
return True return True
else: else:

View File

@ -21,7 +21,6 @@ from oslo_utils import encodeutils
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import constraints from heat.engine import constraints
from heat.engine import properties from heat.engine import properties
from heat.engine import resource from heat.engine import resource
@ -286,7 +285,7 @@ class SaharaNodeGroupTemplate(resource.Resource):
props = dict((k, v) for k, v in six.iteritems(self.properties)) props = dict((k, v) for k, v in six.iteritems(self.properties))
args = self._prepare_properties(props) args = self._prepare_properties(props)
node_group_template = self.client().node_group_templates.create(**args) node_group_template = self.client().node_group_templates.create(**args)
LOG.info(_LI("Node Group Template '%s' has been created"), LOG.info("Node Group Template '%s' has been created",
node_group_template.name) node_group_template.name)
self.resource_id_set(node_group_template.id) self.resource_id_set(node_group_template.id)
return self.resource_id return self.resource_id
@ -573,7 +572,7 @@ class SaharaClusterTemplate(resource.Resource):
props = dict((k, v) for k, v in six.iteritems(self.properties)) props = dict((k, v) for k, v in six.iteritems(self.properties))
args = self._prepare_properties(props) args = self._prepare_properties(props)
cluster_template = self.client().cluster_templates.create(**args) cluster_template = self.client().cluster_templates.create(**args)
LOG.info(_LI("Cluster Template '%s' has been created"), LOG.info("Cluster Template '%s' has been created",
cluster_template.name) cluster_template.name)
self.resource_id_set(cluster_template.id) self.resource_id_set(cluster_template.id)
return self.resource_id return self.resource_id

View File

@ -14,7 +14,6 @@
from oslo_log import log as logging from oslo_log import log as logging
from heat.common.i18n import _LW
from heat.engine import resource from heat.engine import resource
from heat.engine import support from heat.engine import support
@ -35,7 +34,7 @@ class BaseSenlinResource(resource.Resource):
res_info = client_method(self.resource_id) res_info = client_method(self.resource_id)
return res_info.to_dict() return res_info.to_dict()
except AttributeError as ex: except AttributeError as ex:
LOG.warning(_LW("No method to get the resource: %s"), ex) LOG.warning("No method to get the resource: %s", ex)
def _resolve_attribute(self, name): def _resolve_attribute(self, name):
if self.resource_id is None: if self.resource_id is None:

View File

@ -17,7 +17,6 @@ from six.moves.urllib import parse as urlparse
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import attributes from heat.engine import attributes
from heat.engine import properties from heat.engine import properties
from heat.engine import resource from heat.engine import resource
@ -154,10 +153,10 @@ class SwiftContainer(resource.Resource):
LOG.debug('SwiftContainer create container %(container)s with ' LOG.debug('SwiftContainer create container %(container)s with '
'container headers %(container_headers)s and ' 'container headers %(container_headers)s and '
'account headers %(account_headers)s' 'account headers %(account_headers)s',
% {'container': container, {'container': container,
'account_headers': account_headers, 'account_headers': account_headers,
'container_headers': container_headers}) 'container_headers': container_headers})
self.client().put_container(container, container_headers) self.client().put_container(container, container_headers)
@ -242,7 +241,7 @@ class SwiftContainer(resource.Resource):
headers = self.client().head_container(self.resource_id) headers = self.client().head_container(self.resource_id)
except Exception as ex: except Exception as ex:
if self.client_plugin().is_client_exception(ex): if self.client_plugin().is_client_exception(ex):
LOG.warning(_LW("Head container failed: %s"), ex) LOG.warning("Head container failed: %s", ex)
return None return None
raise raise
else: else:

View File

@ -15,8 +15,6 @@ from oslo_log import log as logging
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.engine import attributes from heat.engine import attributes
from heat.engine import constraints from heat.engine import constraints
from heat.engine import properties from heat.engine import properties
@ -176,9 +174,9 @@ class TroveCluster(resource.Resource):
return cluster return cluster
except Exception as exc: except Exception as exc:
if self.client_plugin().is_over_limit(exc): if self.client_plugin().is_over_limit(exc):
LOG.warning(_LW("Stack %(name)s (%(id)s) received an " LOG.warning("Stack %(name)s (%(id)s) received an "
"OverLimit response during clusters.get():" "OverLimit response during clusters.get():"
" %(exception)s"), " %(exception)s",
{'name': self.stack.name, {'name': self.stack.name,
'id': self.stack.id, 'id': self.stack.id,
'exception': exc}) 'exception': exc})
@ -202,7 +200,7 @@ class TroveCluster(resource.Resource):
if instance['status'] != self.ACTIVE: if instance['status'] != self.ACTIVE:
return False return False
LOG.info(_LI("Cluster '%s' has been created"), cluster.name) LOG.info("Cluster '%s' has been created", cluster.name)
return True return True
def cluster_delete(self, cluster_id): def cluster_delete(self, cluster_id):

View File

@ -16,8 +16,6 @@ import six
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.engine import attributes from heat.engine import attributes
from heat.engine import constraints from heat.engine import constraints
from heat.engine import properties from heat.engine import properties
@ -382,9 +380,9 @@ class Instance(resource.Resource):
return instance return instance
except Exception as exc: except Exception as exc:
if self.client_plugin().is_over_limit(exc): if self.client_plugin().is_over_limit(exc):
LOG.warning(_LW("Stack %(name)s (%(id)s) received an " LOG.warning("Stack %(name)s (%(id)s) received an "
"OverLimit response during instance.get():" "OverLimit response during instance.get():"
" %(exception)s"), " %(exception)s",
{'name': self.stack.name, {'name': self.stack.name,
'id': self.stack.id, 'id': self.stack.id,
'exception': exc}) 'exception': exc})
@ -405,10 +403,10 @@ class Instance(resource.Resource):
if instance.status != self.ACTIVE: if instance.status != self.ACTIVE:
return False return False
LOG.info(_LI("Database instance %(database)s created " LOG.info("Database instance %(database)s created "
"(flavor:%(flavor)s, volume:%(volume)s, " "(flavor:%(flavor)s, volume:%(volume)s, "
"datastore:%(datastore_type)s, " "datastore:%(datastore_type)s, "
"datastore_version:%(datastore_version)s)"), "datastore_version:%(datastore_version)s)",
{'database': self._dbinstance_name(), {'database': self._dbinstance_name(),
'flavor': self.flavor, 'flavor': self.flavor,
'volume': self.volume, 'volume': self.volume,
@ -493,12 +491,12 @@ class Instance(resource.Resource):
# the instance could have updated between the time # the instance could have updated between the time
# we retrieve it and try to update it so check again # we retrieve it and try to update it so check again
if self.client_plugin().is_over_limit(exc): if self.client_plugin().is_over_limit(exc):
LOG.debug("API rate limit: %(ex)s. Retrying." % LOG.debug("API rate limit: %(ex)s. Retrying.",
{'ex': six.text_type(exc)}) {'ex': six.text_type(exc)})
return False return False
if "No change was requested" in six.text_type(exc): if "No change was requested" in six.text_type(exc):
LOG.warning(_LW("Unexpected instance state change " LOG.warning("Unexpected instance state change "
"during update. Retrying.")) "during update. Retrying.")
return False return False
raise raise
return True return True

View File

@ -18,7 +18,6 @@ from oslo_log import log as logging
from oslo_serialization import jsonutils from oslo_serialization import jsonutils
from heat.common import exception from heat.common import exception
from heat.common.i18n import _LE
from heat.engine.clients import progress from heat.engine.clients import progress
from heat.engine.resources import stack_user from heat.engine.resources import stack_user
@ -244,7 +243,7 @@ class BaseServer(stack_user.StackUser):
# transport will continue to work, and the new transport may work # transport will continue to work, and the new transport may work
# despite exceptions in the above block. # despite exceptions in the above block.
LOG.exception( LOG.exception(
_LE('Error while updating software config transport') 'Error while updating software config transport'
) )
def metadata_update(self, new_metadata=None): def metadata_update(self, new_metadata=None):

View File

@ -21,7 +21,6 @@ from six.moves.urllib import parse as urlparse
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine.clients.os import swift from heat.engine.clients.os import swift
from heat.engine.resources import stack_user from heat.engine.resources import stack_user
@ -139,8 +138,8 @@ class SignalResponder(stack_user.StackUser):
secret_key = self.data().get('secret_key') secret_key = self.data().get('secret_key')
if not access_key or not secret_key: if not access_key or not secret_key:
LOG.warning(_LW('Cannot generate signed url, ' LOG.warning('Cannot generate signed url, '
'unable to create keypair')) 'unable to create keypair')
return return
config_url = cfg.CONF.heat_waitcondition_server_url config_url = cfg.CONF.heat_waitcondition_server_url
@ -342,12 +341,12 @@ class SignalResponder(stack_user.StackUser):
container = swift_client.get_container(self.stack.id) container = swift_client.get_container(self.stack.id)
except Exception as exc: except Exception as exc:
self.client_plugin('swift').ignore_not_found(exc) self.client_plugin('swift').ignore_not_found(exc)
LOG.debug("Swift container %s was not found" % self.stack.id) LOG.debug("Swift container %s was not found", self.stack.id)
return return
index = container[1] index = container[1]
if not index: # Swift objects were deleted by user if not index: # Swift objects were deleted by user
LOG.debug("Swift objects in container %s were not found" % LOG.debug("Swift objects in container %s were not found",
self.stack.id) self.stack.id)
return return

View File

@ -23,8 +23,6 @@ import six
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import identifier from heat.common import identifier
from heat.common import template_format from heat.common import template_format
from heat.engine import attributes from heat.engine import attributes
@ -169,7 +167,7 @@ class StackResource(resource.Resource):
params = self.child_params() params = self.child_params()
except NotImplementedError: except NotImplementedError:
class_name = reflection.get_class_name(self, fully_qualified=False) class_name = reflection.get_class_name(self, fully_qualified=False)
LOG.warning(_LW("Preview of '%s' not yet implemented"), class_name) LOG.warning("Preview of '%s' not yet implemented", class_name)
return self return self
name = "%s-%s" % (self.stack.name, self.name) name = "%s-%s" % (self.stack.name, self.name)
@ -446,7 +444,7 @@ class StackResource(resource.Resource):
if self.stack.action == self.stack.ROLLBACK: if self.stack.action == self.stack.ROLLBACK:
if self._try_rollback(): if self._try_rollback():
LOG.info(_LI('Triggered nested stack %s rollback'), LOG.info('Triggered nested stack %s rollback',
self.physical_resource_name()) self.physical_resource_name())
return {'target_action': self.stack.ROLLBACK} return {'target_action': self.stack.ROLLBACK}

View File

@ -16,7 +16,6 @@ from oslo_log import log as logging
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import resource from heat.engine import resource
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -93,7 +92,7 @@ class StackUser(resource.Resource):
# compatibility with resources created before the migration # compatibility with resources created before the migration
# to stack_user.StackUser domain users. After an appropriate # to stack_user.StackUser domain users. After an appropriate
# transitional period, this should be removed. # transitional period, this should be removed.
LOG.warning(_LW('Reverting to legacy user delete path')) LOG.warning('Reverting to legacy user delete path')
try: try:
self.keystone().delete_stack_user(user_id) self.keystone().delete_stack_user(user_id)
except kc_exception.NotFound: except kc_exception.NotFound:

View File

@ -18,7 +18,6 @@ import six
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine.resources import signal_responder from heat.engine.resources import signal_responder
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -67,7 +66,7 @@ class BaseWaitConditionHandle(signal_responder.SignalResponder):
latest_rsrc_metadata) latest_rsrc_metadata)
if not self._metadata_format_ok(signal_data): if not self._metadata_format_ok(signal_data):
LOG.info(_LI("Metadata failed validation for %s"), self.name) LOG.info("Metadata failed validation for %s", self.name)
raise ValueError(_("Metadata format invalid")) raise ValueError(_("Metadata format invalid"))
new_entry = signal_data.copy() new_entry = signal_data.copy()
@ -75,7 +74,7 @@ class BaseWaitConditionHandle(signal_responder.SignalResponder):
new_rsrc_metadata = latest_rsrc_metadata.copy() new_rsrc_metadata = latest_rsrc_metadata.copy()
if unique_id in new_rsrc_metadata: if unique_id in new_rsrc_metadata:
LOG.info(_LI("Overwriting Metadata item for id %s!"), LOG.info("Overwriting Metadata item for id %s!",
unique_id) unique_id)
new_rsrc_metadata.update({unique_id: new_entry}) new_rsrc_metadata.update({unique_id: new_entry})

View File

@ -22,7 +22,6 @@ from oslo_utils import excutils
import six import six
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common.i18n import repr_wrapper from heat.common.i18n import repr_wrapper
from heat.common import timeutils from heat.common import timeutils
@ -153,7 +152,7 @@ class TaskRunner(object):
def _sleep(self, wait_time): def _sleep(self, wait_time):
"""Sleep for the specified number of seconds.""" """Sleep for the specified number of seconds."""
if ENABLE_SLEEP and wait_time is not None: if ENABLE_SLEEP and wait_time is not None:
LOG.debug('%s sleeping' % six.text_type(self)) LOG.debug('%s sleeping', six.text_type(self))
eventlet.sleep(wait_time) eventlet.sleep(wait_time)
def __call__(self, wait_time=1, timeout=None, progress_callback=None): def __call__(self, wait_time=1, timeout=None, progress_callback=None):
@ -180,7 +179,7 @@ class TaskRunner(object):
assert self._runner is None, "Task already started" assert self._runner is None, "Task already started"
assert not self._done, "Task already cancelled" assert not self._done, "Task already cancelled"
LOG.debug('%s starting' % six.text_type(self)) LOG.debug('%s starting', six.text_type(self))
if timeout is not None: if timeout is not None:
self._timeout = Timeout(self, timeout) self._timeout = Timeout(self, timeout)
@ -192,7 +191,7 @@ class TaskRunner(object):
else: else:
self._runner = False self._runner = False
self._done = True self._done = True
LOG.debug('%s done (not resumable)' % six.text_type(self)) LOG.debug('%s done (not resumable)', six.text_type(self))
def step(self): def step(self):
"""Run another step of the task. """Run another step of the task.
@ -207,18 +206,18 @@ class TaskRunner(object):
return False return False
if self._timeout is not None and self._timeout.expired(): if self._timeout is not None and self._timeout.expired():
LOG.info(_LI('%s timed out'), self) LOG.info('%s timed out', self)
self._done = True self._done = True
self._timeout.trigger(self._runner) self._timeout.trigger(self._runner)
else: else:
LOG.debug('%s running' % six.text_type(self)) LOG.debug('%s running', six.text_type(self))
try: try:
poll_period = next(self._runner) poll_period = next(self._runner)
except StopIteration: except StopIteration:
self._done = True self._done = True
LOG.debug('%s complete' % six.text_type(self)) LOG.debug('%s complete', six.text_type(self))
else: else:
if isinstance(poll_period, six.integer_types): if isinstance(poll_period, six.integer_types):
self._poll_period = max(poll_period, 1) self._poll_period = max(poll_period, 1)
@ -276,7 +275,7 @@ class TaskRunner(object):
return return
if not self.started() or grace_period is None: if not self.started() or grace_period is None:
LOG.debug('%s cancelled' % six.text_type(self)) LOG.debug('%s cancelled', six.text_type(self))
self._done = True self._done = True
if self.started(): if self.started():
self._runner.close() self._runner.close()
@ -477,13 +476,13 @@ class DependencyTaskGroup(object):
try: try:
r.cancel(grace_period=gp) r.cancel(grace_period=gp)
except Exception as ex: except Exception as ex:
LOG.debug('Exception cancelling task: %s' % six.text_type(ex)) LOG.debug('Exception cancelling task: %s', six.text_type(ex))
def _cancel_recursively(self, key, runner): def _cancel_recursively(self, key, runner):
try: try:
runner.cancel() runner.cancel()
except Exception as ex: except Exception as ex:
LOG.debug('Exception cancelling task: %s' % six.text_type(ex)) LOG.debug('Exception cancelling task: %s', six.text_type(ex))
node = self._graph[key] node = self._graph[key]
for dependent_node in node.required_by(): for dependent_node in node.required_by():
node_runner = self._runners[dependent_node] node_runner = self._runners[dependent_node]

View File

@ -37,9 +37,6 @@ from heat.common import environment_format as env_fmt
from heat.common import environment_util as env_util from heat.common import environment_util as env_util
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import identifier from heat.common import identifier
from heat.common import messaging as rpc_messaging from heat.common import messaging as rpc_messaging
from heat.common import policy from heat.common import policy
@ -133,7 +130,7 @@ class ThreadGroupManager(object):
try: try:
gt.wait() gt.wait()
except Exception: except Exception:
LOG.exception(_LE('Unhandled error in asynchronous task')) LOG.exception('Unhandled error in asynchronous task')
except BaseException: except BaseException:
pass pass
@ -278,9 +275,9 @@ class EngineListener(object):
try: try:
self._server.stop() self._server.stop()
self._server.wait() self._server.wait()
LOG.info(_LI("Engine listener is stopped successfully")) LOG.info("Engine listener is stopped successfully")
except Exception as e: except Exception as e:
LOG.error(_LE("Failed to stop engine listener, %s"), e) LOG.error("Failed to stop engine listener, %s", e)
def listening(self, ctxt): def listening(self, ctxt):
"""Respond to a watchdog request. """Respond to a watchdog request.
@ -336,14 +333,14 @@ class EngineService(service.ServiceBase):
self.resource_enforcer = policy.ResourceEnforcer() self.resource_enforcer = policy.ResourceEnforcer()
if cfg.CONF.trusts_delegated_roles: if cfg.CONF.trusts_delegated_roles:
LOG.warning(_LW('The default value of "trusts_delegated_roles" ' LOG.warning('The default value of "trusts_delegated_roles" '
'option in heat.conf is changed to [] in Kilo ' 'option in heat.conf is changed to [] in Kilo '
'and heat will delegate all roles of trustor. ' 'and heat will delegate all roles of trustor. '
'Please keep the same if you do not want to ' 'Please keep the same if you do not want to '
'delegate subset roles when upgrading.')) 'delegate subset roles when upgrading.')
def create_periodic_tasks(self): def create_periodic_tasks(self):
LOG.debug("Starting periodic watch tasks pid=%s" % os.getpid()) LOG.debug("Starting periodic watch tasks pid=%s", os.getpid())
# Note with multiple workers, the parent process hasn't called start() # Note with multiple workers, the parent process hasn't called start()
# so we need to create a ThreadGroupManager here for the periodic tasks # so we need to create a ThreadGroupManager here for the periodic tasks
if self.thread_group_mgr is None: if self.thread_group_mgr is None:
@ -361,10 +358,10 @@ class EngineService(service.ServiceBase):
show_hidden=True) show_hidden=True)
for s in stacks: for s in stacks:
self.stack_watch.start_watch_task(s.id, admin_context) self.stack_watch.start_watch_task(s.id, admin_context)
LOG.info(_LI("Watch tasks created")) LOG.info("Watch tasks created")
return return
except Exception as e: except Exception as e:
LOG.error(_LE("Watch task creation attempt failed, %s"), e) LOG.error("Watch task creation attempt failed, %s", e)
eventlet.sleep(5) eventlet.sleep(5)
if self.manage_thread_grp is None: if self.manage_thread_grp is None:
@ -377,7 +374,7 @@ class EngineService(service.ServiceBase):
self.thread_group_mgr = ThreadGroupManager() self.thread_group_mgr = ThreadGroupManager()
self.listener = EngineListener(self.host, self.engine_id, self.listener = EngineListener(self.host, self.engine_id,
self.thread_group_mgr) self.thread_group_mgr)
LOG.debug("Starting listener for engine %s" % self.engine_id) LOG.debug("Starting listener for engine %s", self.engine_id)
self.listener.start() self.listener.start()
if cfg.CONF.convergence_engine: if cfg.CONF.convergence_engine:
@ -427,9 +424,9 @@ class EngineService(service.ServiceBase):
try: try:
self._rpc_server.stop() self._rpc_server.stop()
self._rpc_server.wait() self._rpc_server.wait()
LOG.info(_LI("Engine service is stopped successfully")) LOG.info("Engine service is stopped successfully")
except Exception as e: except Exception as e:
LOG.error(_LE("Failed to stop engine service, %s"), e) LOG.error("Failed to stop engine service, %s", e)
def stop(self): def stop(self):
self._stop_rpc_server() self._stop_rpc_server()
@ -446,19 +443,19 @@ class EngineService(service.ServiceBase):
# Ignore dummy service task # Ignore dummy service task
if stack_id == cfg.CONF.periodic_interval: if stack_id == cfg.CONF.periodic_interval:
continue continue
LOG.info(_LI("Waiting stack %s processing to be finished"), LOG.info("Waiting stack %s processing to be finished",
stack_id) stack_id)
# Stop threads gracefully # Stop threads gracefully
self.thread_group_mgr.stop(stack_id, True) self.thread_group_mgr.stop(stack_id, True)
LOG.info(_LI("Stack %s processing was finished"), stack_id) LOG.info("Stack %s processing was finished", stack_id)
if self.manage_thread_grp: if self.manage_thread_grp:
self.manage_thread_grp.stop() self.manage_thread_grp.stop()
ctxt = context.get_admin_context() ctxt = context.get_admin_context()
service_objects.Service.delete(ctxt, self.service_id) service_objects.Service.delete(ctxt, self.service_id)
LOG.info(_LI('Service %s is deleted'), self.service_id) LOG.info('Service %s is deleted', self.service_id)
# Terminate the engine process # Terminate the engine process
LOG.info(_LI("All threads were gone, terminating engine")) LOG.info("All threads were gone, terminating engine")
def wait(self): def wait(self):
pass pass
@ -739,7 +736,7 @@ class EngineService(service.ServiceBase):
:type environment_files: list or None :type environment_files: list or None
""" """
LOG.info(_LI('previewing stack %s'), stack_name) LOG.info('previewing stack %s', stack_name)
conv_eng = cfg.CONF.convergence_engine conv_eng = cfg.CONF.convergence_engine
stack = self._parse_template_and_validate_stack(cnxt, stack = self._parse_template_and_validate_stack(cnxt,
@ -783,7 +780,7 @@ class EngineService(service.ServiceBase):
:param parent_resource_name: the parent resource name :param parent_resource_name: the parent resource name
:param template_id: the ID of a pre-stored template in the DB :param template_id: the ID of a pre-stored template in the DB
""" """
LOG.info(_LI('Creating stack %s'), stack_name) LOG.info('Creating stack %s', stack_name)
def _create_stack_user(stack): def _create_stack_user(stack):
if not stack.stack_user_project_id: if not stack.stack_user_project_id:
@ -806,7 +803,7 @@ class EngineService(service.ServiceBase):
# Schedule a periodic watcher task for this stack # Schedule a periodic watcher task for this stack
self.stack_watch.start_watch_task(stack.id, cnxt) self.stack_watch.start_watch_task(stack.id, cnxt)
else: else:
LOG.info(_LI("Stack create failed, status %s"), stack.status) LOG.info("Stack create failed, status %s", stack.status)
convergence = cfg.CONF.convergence_engine convergence = cfg.CONF.convergence_engine
@ -883,9 +880,9 @@ class EngineService(service.ServiceBase):
else: else:
# Nothing we can do, the failed update happened before # Nothing we can do, the failed update happened before
# we started storing prev_raw_template_id # we started storing prev_raw_template_id
LOG.error(_LE('PATCH update to FAILED stack only ' LOG.error('PATCH update to FAILED stack only '
'possible if convergence enabled or ' 'possible if convergence enabled or '
'previous template stored')) 'previous template stored')
msg = _('PATCH update to non-COMPLETE stack') msg = _('PATCH update to non-COMPLETE stack')
raise exception.NotSupported(feature=msg) raise exception.NotSupported(feature=msg)
@ -970,7 +967,7 @@ class EngineService(service.ServiceBase):
""" """
# Get the database representation of the existing stack # Get the database representation of the existing stack
db_stack = self._get_stack(cnxt, stack_identity) db_stack = self._get_stack(cnxt, stack_identity)
LOG.info(_LI('Updating stack %s'), db_stack.name) LOG.info('Updating stack %s', db_stack.name)
if cfg.CONF.reauthentication_auth_method == 'trusts': if cfg.CONF.reauthentication_auth_method == 'trusts':
current_stack = parser.Stack.load( current_stack = parser.Stack.load(
cnxt, stack=db_stack, use_stored_context=True) cnxt, stack=db_stack, use_stored_context=True)
@ -1025,7 +1022,7 @@ class EngineService(service.ServiceBase):
""" """
# Get the database representation of the existing stack # Get the database representation of the existing stack
db_stack = self._get_stack(cnxt, stack_identity) db_stack = self._get_stack(cnxt, stack_identity)
LOG.info(_LI('Previewing update of stack %s'), db_stack.name) LOG.info('Previewing update of stack %s', db_stack.name)
current_stack = parser.Stack.load(cnxt, stack=db_stack) current_stack = parser.Stack.load(cnxt, stack=db_stack)
@ -1137,7 +1134,7 @@ class EngineService(service.ServiceBase):
state = '_'.join(current_stack.state) state = '_'.join(current_stack.state)
msg = _("Cancelling update when stack is %s") % str(state) msg = _("Cancelling update when stack is %s") % str(state)
raise exception.NotSupported(feature=msg) raise exception.NotSupported(feature=msg)
LOG.info(_LI('Starting cancel of updating stack %s'), db_stack.name) LOG.info('Starting cancel of updating stack %s', db_stack.name)
if current_stack.convergence: if current_stack.convergence:
if cancel_with_rollback: if cancel_with_rollback:
@ -1206,7 +1203,7 @@ class EngineService(service.ServiceBase):
:param ignorable_errors: List of error_code to be ignored as part of :param ignorable_errors: List of error_code to be ignored as part of
validation validation
""" """
LOG.info(_LI('validate_template')) LOG.info('validate_template')
if template is None: if template is None:
msg = _("No Template provided.") msg = _("No Template provided.")
return webob.exc.HTTPBadRequest(explanation=msg) return webob.exc.HTTPBadRequest(explanation=msg)
@ -1383,7 +1380,7 @@ class EngineService(service.ServiceBase):
st.action == parser.Stack.DELETE): st.action == parser.Stack.DELETE):
raise exception.EntityNotFound(entity='Stack', name=st.name) raise exception.EntityNotFound(entity='Stack', name=st.name)
LOG.info(_LI('Deleting stack %s'), st.name) LOG.info('Deleting stack %s', st.name)
stack = parser.Stack.load(cnxt, stack=st) stack = parser.Stack.load(cnxt, stack=st)
self.resource_enforcer.enforce_stack(stack) self.resource_enforcer.enforce_stack(stack)
@ -1440,7 +1437,7 @@ class EngineService(service.ServiceBase):
watch.start() watch.start()
while not watch.expired(): while not watch.expired():
LOG.debug('Waiting for stack cancel to complete: %s' % LOG.debug('Waiting for stack cancel to complete: %s',
stack.name) stack.name)
with lock.try_thread_lock() as acquire_result: with lock.try_thread_lock() as acquire_result:
@ -1463,7 +1460,7 @@ class EngineService(service.ServiceBase):
stack_identity=stack_identity) stack_identity=stack_identity)
if stop_result is None: if stop_result is None:
LOG.debug("Successfully stopped remote task " LOG.debug("Successfully stopped remote task "
"on engine %s" % acquire_result) "on engine %s", acquire_result)
else: else:
raise exception.StopActionFailed( raise exception.StopActionFailed(
stack_name=stack.name, engine_id=acquire_result) stack_name=stack.name, engine_id=acquire_result)
@ -1507,13 +1504,13 @@ class EngineService(service.ServiceBase):
# Get stack details before deleting it. # Get stack details before deleting it.
stack_info = stack.prepare_abandon() stack_info = stack.prepare_abandon()
if abandon: if abandon:
LOG.info(_LI('abandoning stack %s'), st.name) LOG.info('abandoning stack %s', st.name)
self.thread_group_mgr.start_with_acquired_lock(stack, self.thread_group_mgr.start_with_acquired_lock(stack,
lock, lock,
stack.delete, stack.delete,
abandon=True) abandon=True)
else: else:
LOG.info(_LI('exporting stack %s'), st.name) LOG.info('exporting stack %s', st.name)
return stack_info return stack_info
def list_resource_types(self, def list_resource_types(self,
@ -1615,8 +1612,8 @@ class EngineService(service.ServiceBase):
try: try:
resource_class = resources.global_env().get_class(type_name) resource_class = resources.global_env().get_class(type_name)
except exception.NotFound: except exception.NotFound:
LOG.exception(_LE('Error loading resource type %s ' LOG.exception('Error loading resource type %s '
'from global environment.'), 'from global environment.',
type_name) type_name)
raise exception.InvalidGlobalResource(type_name=type_name) raise exception.InvalidGlobalResource(type_name=type_name)
@ -1676,8 +1673,8 @@ class EngineService(service.ServiceBase):
try: try:
resource_class = resources.global_env().get_class(type_name) resource_class = resources.global_env().get_class(type_name)
except exception.NotFound: except exception.NotFound:
LOG.exception(_LE('Error loading resource type %s ' LOG.exception('Error loading resource type %s '
'from global environment.'), 'from global environment.',
type_name) type_name)
raise exception.InvalidGlobalResource(type_name=type_name) raise exception.InvalidGlobalResource(type_name=type_name)
else: else:
@ -1809,7 +1806,7 @@ class EngineService(service.ServiceBase):
if cfg.CONF.heat_stack_user_role in cnxt.roles: if cfg.CONF.heat_stack_user_role in cnxt.roles:
if not self._authorize_stack_user(cnxt, stack, resource_name): if not self._authorize_stack_user(cnxt, stack, resource_name):
LOG.warning(_LW("Access denied to resource %s"), resource_name) LOG.warning("Access denied to resource %s", resource_name)
raise exception.Forbidden() raise exception.Forbidden()
resource = stack.resource_get(resource_name) resource = stack.resource_get(resource_name)
@ -2012,7 +2009,7 @@ class EngineService(service.ServiceBase):
def stack_suspend(self, cnxt, stack_identity): def stack_suspend(self, cnxt, stack_identity):
"""Handle request to perform suspend action on a stack.""" """Handle request to perform suspend action on a stack."""
def _stack_suspend(stack): def _stack_suspend(stack):
LOG.debug("suspending stack %s" % stack.name) LOG.debug("suspending stack %s", stack.name)
stack.suspend() stack.suspend()
s = self._get_stack(cnxt, stack_identity) s = self._get_stack(cnxt, stack_identity)
@ -2026,7 +2023,7 @@ class EngineService(service.ServiceBase):
def stack_resume(self, cnxt, stack_identity): def stack_resume(self, cnxt, stack_identity):
"""Handle request to perform a resume action on a stack.""" """Handle request to perform a resume action on a stack."""
def _stack_resume(stack): def _stack_resume(stack):
LOG.debug("resuming stack %s" % stack.name) LOG.debug("resuming stack %s", stack.name)
stack.resume() stack.resume()
s = self._get_stack(cnxt, stack_identity) s = self._get_stack(cnxt, stack_identity)
@ -2049,17 +2046,17 @@ class EngineService(service.ServiceBase):
{'data': data, 'status': status, {'data': data, 'status': status,
'status_reason': reason}) 'status_reason': reason})
LOG.debug("Snapshotting stack %s" % stack.name) LOG.debug("Snapshotting stack %s", stack.name)
stack.snapshot(save_snapshot_func=save_snapshot) stack.snapshot(save_snapshot_func=save_snapshot)
s = self._get_stack(cnxt, stack_identity) s = self._get_stack(cnxt, stack_identity)
stack = parser.Stack.load(cnxt, stack=s) stack = parser.Stack.load(cnxt, stack=s)
if stack.status == stack.IN_PROGRESS: if stack.status == stack.IN_PROGRESS:
LOG.info(_LI('%(stack)s is in state %(action)s_IN_PROGRESS, ' LOG.info('%(stack)s is in state %(action)s_IN_PROGRESS, '
'snapshot is not permitted.'), { 'snapshot is not permitted.', {
'stack': six.text_type(stack), 'stack': six.text_type(stack),
'action': stack.action}) 'action': stack.action})
raise exception.ActionInProgress(stack_name=stack.name, raise exception.ActionInProgress(stack_name=stack.name,
action=stack.action) action=stack.action)
@ -2104,7 +2101,7 @@ class EngineService(service.ServiceBase):
"""Handle request to perform a check action on a stack.""" """Handle request to perform a check action on a stack."""
s = self._get_stack(cnxt, stack_identity) s = self._get_stack(cnxt, stack_identity)
stack = parser.Stack.load(cnxt, stack=s) stack = parser.Stack.load(cnxt, stack=s)
LOG.info(_LI("Checking stack %s"), stack.name) LOG.info("Checking stack %s", stack.name)
self.thread_group_mgr.start_with_lock(cnxt, stack, self.engine_id, self.thread_group_mgr.start_with_lock(cnxt, stack, self.engine_id,
stack.check) stack.check)
@ -2112,7 +2109,7 @@ class EngineService(service.ServiceBase):
@context.request_context @context.request_context
def stack_restore(self, cnxt, stack_identity, snapshot_id): def stack_restore(self, cnxt, stack_identity, snapshot_id):
def _stack_restore(stack, snapshot): def _stack_restore(stack, snapshot):
LOG.debug("restoring stack %s" % stack.name) LOG.debug("restoring stack %s", stack.name)
stack.restore(snapshot) stack.restore(snapshot)
s = self._get_stack(cnxt, stack_identity) s = self._get_stack(cnxt, stack_identity)
@ -2174,7 +2171,7 @@ class EngineService(service.ServiceBase):
try: try:
wrn = [w.name for w in watch_rule.WatchRule.get_all(cnxt)] wrn = [w.name for w in watch_rule.WatchRule.get_all(cnxt)]
except Exception as ex: except Exception as ex:
LOG.warning(_LW('show_watch (all) db error %s'), ex) LOG.warning('show_watch (all) db error %s', ex)
return return
wrs = [watchrule.WatchRule.load(cnxt, w) for w in wrn] wrs = [watchrule.WatchRule.load(cnxt, w) for w in wrn]
@ -2196,7 +2193,7 @@ class EngineService(service.ServiceBase):
# namespace/metric, but we will want this at some point # namespace/metric, but we will want this at some point
# for now, the API can query all metric data and filter locally # for now, the API can query all metric data and filter locally
if metric_namespace is not None or metric_name is not None: if metric_namespace is not None or metric_name is not None:
LOG.error(_LE("Filtering by namespace/metric not yet supported")) LOG.error("Filtering by namespace/metric not yet supported")
return return
try: try:
@ -2205,7 +2202,7 @@ class EngineService(service.ServiceBase):
r.id: r.name for r in watch_rule.WatchRule.get_all(cnxt) r.id: r.name for r in watch_rule.WatchRule.get_all(cnxt)
} }
except Exception as ex: except Exception as ex:
LOG.warning(_LW('show_metric (all) db error %s'), ex) LOG.warning('show_metric (all) db error %s', ex)
return return
result = [api.format_watch_data(w, rule_names) for w in wds] result = [api.format_watch_data(w, rule_names) for w in wds]
@ -2339,7 +2336,7 @@ class EngineService(service.ServiceBase):
stack_id=stack_id, stack_id=stack_id,
show_deleted=False) show_deleted=False)
if parent_stack.convergence: if parent_stack.convergence:
LOG.info(_LI("Convergence was already enabled for stack %s"), LOG.info("Convergence was already enabled for stack %s",
stack_id) stack_id)
return return
db_stacks = stack_object.Stack.get_all_by_root_owner_id( db_stacks = stack_object.Stack.get_all_by_root_owner_id(
@ -2382,17 +2379,17 @@ class EngineService(service.ServiceBase):
report_interval=cfg.CONF.periodic_interval) report_interval=cfg.CONF.periodic_interval)
) )
self.service_id = service_ref['id'] self.service_id = service_ref['id']
LOG.debug('Service %s is started' % self.service_id) LOG.debug('Service %s is started', self.service_id)
try: try:
service_objects.Service.update_by_id( service_objects.Service.update_by_id(
cnxt, cnxt,
self.service_id, self.service_id,
dict(deleted_at=None)) dict(deleted_at=None))
LOG.debug('Service %s is updated' % self.service_id) LOG.debug('Service %s is updated', self.service_id)
except Exception as ex: except Exception as ex:
LOG.error(_LE('Service %(service_id)s update ' LOG.error('Service %(service_id)s update '
'failed: %(error)s'), 'failed: %(error)s',
{'service_id': self.service_id, 'error': ex}) {'service_id': self.service_id, 'error': ex})
def service_manage_cleanup(self): def service_manage_cleanup(self):
@ -2410,7 +2407,7 @@ class EngineService(service.ServiceBase):
continue continue
if service_ref['updated_at'] < time_line: if service_ref['updated_at'] < time_line:
# hasn't been updated, assuming it's died. # hasn't been updated, assuming it's died.
LOG.debug('Service %s was aborted' % service_ref['id']) LOG.debug('Service %s was aborted', service_ref['id'])
service_objects.Service.delete(cnxt, service_ref['id']) service_objects.Service.delete(cnxt, service_ref['id'])
def reset_stack_status(self): def reset_stack_status(self):
@ -2440,8 +2437,8 @@ class EngineService(service.ServiceBase):
stk = parser.Stack.load(cnxt, stack=s, stk = parser.Stack.load(cnxt, stack=s,
service_check_defer=True, service_check_defer=True,
resource_validate=False) resource_validate=False)
LOG.info(_LI('Engine %(engine)s went down when stack ' LOG.info('Engine %(engine)s went down when stack '
'%(stack_id)s was in action %(action)s'), '%(stack_id)s was in action %(action)s',
{'engine': engine_id, 'action': stk.action, {'engine': engine_id, 'action': stk.action,
'stack_id': stk.id}) 'stack_id': stk.id})
@ -2457,6 +2454,5 @@ class EngineService(service.ServiceBase):
except exception.ActionInProgress: except exception.ActionInProgress:
continue continue
except Exception: except Exception:
LOG.exception(_LE('Error while resetting stack: %s') LOG.exception('Error while resetting stack: %s', stack_id)
% stack_id)
continue continue

View File

@ -23,7 +23,6 @@ from six.moves.urllib import parse as urlparse
from heat.common import crypt from heat.common import crypt
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.db.sqlalchemy import api as db_api from heat.db.sqlalchemy import api as db_api
from heat.engine import api from heat.engine import api
from heat.engine import scheduler from heat.engine import scheduler
@ -138,7 +137,7 @@ class SoftwareConfigService(object):
except Exception as ex: except Exception as ex:
# ignore not-found, in case swift is not consistent yet # ignore not-found, in case swift is not consistent yet
if swift_plugin.is_not_found(ex): if swift_plugin.is_not_found(ex):
LOG.info(_LI('Signal object not found: %(c)s %(o)s'), { LOG.info('Signal object not found: %(c)s %(o)s', {
'c': container, 'o': object_name}) 'c': container, 'o': object_name})
return sd return sd
raise raise
@ -160,8 +159,8 @@ class SoftwareConfigService(object):
except Exception as ex: except Exception as ex:
# ignore not-found, in case swift is not consistent yet # ignore not-found, in case swift is not consistent yet
if swift_plugin.is_not_found(ex): if swift_plugin.is_not_found(ex):
LOG.info(_LI( LOG.info(
'Signal object not found: %(c)s %(o)s'), { 'Signal object not found: %(c)s %(o)s', {
'c': container, 'o': object_name}) 'c': container, 'o': object_name})
return sd return sd
raise raise

View File

@ -16,8 +16,6 @@ from oslo_utils import timeutils
import six import six
from heat.common import context from heat.common import context
from heat.common.i18n import _LE
from heat.common.i18n import _LW
from heat.engine import stack from heat.engine import stack
from heat.engine import watchrule from heat.engine import watchrule
from heat.objects import stack as stack_object from heat.objects import stack as stack_object
@ -64,13 +62,12 @@ class StackWatch(object):
def check_stack_watches(self, sid): def check_stack_watches(self, sid):
# Use admin_context for stack_get to defeat tenant # Use admin_context for stack_get to defeat tenant
# scoping otherwise we fail to retrieve the stack # scoping otherwise we fail to retrieve the stack
LOG.debug("Periodic watcher task for stack %s" % sid) LOG.debug("Periodic watcher task for stack %s", sid)
admin_context = context.get_admin_context() admin_context = context.get_admin_context()
db_stack = stack_object.Stack.get_by_id(admin_context, db_stack = stack_object.Stack.get_by_id(admin_context,
sid) sid)
if not db_stack: if not db_stack:
LOG.error(_LE("Unable to retrieve stack %s for periodic task"), LOG.error("Unable to retrieve stack %s for periodic task", sid)
sid)
return return
stk = stack.Stack.load(admin_context, stack=db_stack, stk = stack.Stack.load(admin_context, stack=db_stack,
use_stored_context=True) use_stored_context=True)
@ -85,8 +82,7 @@ class StackWatch(object):
wrs = watch_rule_object.WatchRule.get_all_by_stack(admin_context, wrs = watch_rule_object.WatchRule.get_all_by_stack(admin_context,
sid) sid)
except Exception as ex: except Exception as ex:
LOG.warning(_LW('periodic_task db error watch rule' LOG.warning('periodic_task db error watch rule removed? %(ex)s',
' removed? %(ex)s'),
ex) ex)
return return

View File

@ -33,9 +33,6 @@ from heat.common import context as common_context
from heat.common import environment_format as env_fmt from heat.common import environment_format as env_fmt
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import identifier from heat.common import identifier
from heat.common import lifecycle_plugin_utils from heat.common import lifecycle_plugin_utils
from heat.common import timeutils from heat.common import timeutils
@ -81,13 +78,13 @@ def reset_state_on_error(func):
except Exception as exc: except Exception as exc:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
errmsg = six.text_type(exc) errmsg = six.text_type(exc)
LOG.error(_LE('Unexpected exception in %(func)s: %(msg)s'), LOG.error('Unexpected exception in %(func)s: %(msg)s',
{'func': func.__name__, 'msg': errmsg}) {'func': func.__name__, 'msg': errmsg})
except BaseException as exc: except BaseException as exc:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
exc_type = type(exc).__name__ exc_type = type(exc).__name__
errmsg = '%s(%s)' % (exc_type, six.text_type(exc)) errmsg = '%s(%s)' % (exc_type, six.text_type(exc))
LOG.info(_LI('Stopped due to %(msg)s in %(func)s'), LOG.info('Stopped due to %(msg)s in %(func)s',
{'func': func.__name__, 'msg': errmsg}) {'func': func.__name__, 'msg': errmsg})
finally: finally:
if stack.status == stack.IN_PROGRESS: if stack.status == stack.IN_PROGRESS:
@ -458,7 +455,7 @@ class Stack(collections.Mapping):
parameter. parameter.
""" """
if not self.parameters.set_stack_id(self.identifier()): if not self.parameters.set_stack_id(self.identifier()):
LOG.warning(_LW("Unable to set parameters StackId identifier")) LOG.warning("Unable to set parameters StackId identifier")
@staticmethod @staticmethod
def get_dep_attrs(resources, resource_name): def get_dep_attrs(resources, resource_name):
@ -483,8 +480,8 @@ class Stack(collections.Mapping):
if not ignore_errors: if not ignore_errors:
raise raise
else: else:
LOG.warning(_LW('Ignoring error adding implicit ' LOG.warning('Ignoring error adding implicit '
'dependencies for %(res)s: %(err)s') % 'dependencies for %(res)s: %(err)s',
{'res': six.text_type(res), {'res': six.text_type(res),
'err': six.text_type(exc)}) 'err': six.text_type(exc)})
@ -856,7 +853,7 @@ class Stack(collections.Mapping):
except AssertionError: except AssertionError:
raise raise
except Exception as ex: except Exception as ex:
LOG.info(_LI("Exception in stack validation"), LOG.info("Exception in stack validation",
exc_info=True) exc_info=True)
raise exception.StackValidationFailed( raise exception.StackValidationFailed(
message=encodeutils.safe_decode(six.text_type(ex))) message=encodeutils.safe_decode(six.text_type(ex)))
@ -897,7 +894,7 @@ class Stack(collections.Mapping):
for sink in sinks: for sink in sinks:
sink.consume(ctx, ev) sink.consume(ctx, ev)
except Exception as e: except Exception as e:
LOG.debug('Got error sending events %s' % e) LOG.debug('Got error sending events %s', e)
if self.thread_group_mgr is not None: if self.thread_group_mgr is not None:
self.thread_group_mgr.start(self.id, _dispatch, self.thread_group_mgr.start(self.id, _dispatch,
self.context, self.context,
@ -926,9 +923,9 @@ class Stack(collections.Mapping):
updated = self._persist_state() updated = self._persist_state()
if not updated: if not updated:
# Possibly failed concurrent update # Possibly failed concurrent update
LOG.warning(_LW("Failed to set state of stack %(name)s with" LOG.warning("Failed to set state of stack %(name)s with"
" traversal ID %(trvsl_id)s, to" " traversal ID %(trvsl_id)s, to"
" %(action)s_%(status)s"), " %(action)s_%(status)s",
{'name': self.name, {'name': self.name,
'trvsl_id': self.current_traversal, 'trvsl_id': self.current_traversal,
'action': action, 'status': status}) 'action': action, 'status': status})
@ -942,8 +939,7 @@ class Stack(collections.Mapping):
self._persist_state() self._persist_state()
def _log_status(self): def _log_status(self):
LOG.info(_LI('Stack %(action)s %(status)s (%(name)s): ' LOG.info('Stack %(action)s %(status)s (%(name)s): %(reason)s',
'%(reason)s'),
{'action': self.action, {'action': self.action,
'status': self.status, 'status': self.status,
'name': self.name, 'name': self.name,
@ -1254,8 +1250,8 @@ class Stack(collections.Mapping):
# we expect to update the stack having previous traversal ID # we expect to update the stack having previous traversal ID
stack_id = self.store(exp_trvsl=previous_traversal) stack_id = self.store(exp_trvsl=previous_traversal)
if stack_id is None: if stack_id is None:
LOG.warning(_LW("Failed to store stack %(name)s with traversal " LOG.warning("Failed to store stack %(name)s with traversal "
"ID %(trvsl_id)s, aborting stack %(action)s"), "ID %(trvsl_id)s, aborting stack %(action)s",
{'name': self.name, 'trvsl_id': previous_traversal, {'name': self.name, 'trvsl_id': previous_traversal,
'action': self.action}) 'action': self.action})
return return
@ -1280,13 +1276,13 @@ class Stack(collections.Mapping):
stack_id = self.store() stack_id = self.store()
if stack_id is None: if stack_id is None:
# Failed concurrent update # Failed concurrent update
LOG.warning(_LW("Failed to store stack %(name)s with traversal " LOG.warning("Failed to store stack %(name)s with traversal "
"ID %(trvsl_id)s, aborting stack %(action)s"), "ID %(trvsl_id)s, aborting stack %(action)s",
{'name': self.name, 'trvsl_id': self.current_traversal, {'name': self.name, 'trvsl_id': self.current_traversal,
'action': self.action}) 'action': self.action})
return return
LOG.info(_LI('convergence_dependencies: %s'), LOG.info('convergence_dependencies: %s',
self.convergence_dependencies) self.convergence_dependencies)
# Delete all the snapshots before starting delete operation # Delete all the snapshots before starting delete operation
@ -1312,9 +1308,9 @@ class Stack(collections.Mapping):
else: else:
for rsrc_id, is_update in self.convergence_dependencies.leaves(): for rsrc_id, is_update in self.convergence_dependencies.leaves():
if is_update: if is_update:
LOG.info(_LI("Triggering resource %s for update"), rsrc_id) LOG.info("Triggering resource %s for update", rsrc_id)
else: else:
LOG.info(_LI("Triggering resource %s for cleanup"), LOG.info("Triggering resource %s for cleanup",
rsrc_id) rsrc_id)
input_data = sync_point.serialize_input_data({}) input_data = sync_point.serialize_input_data({})
self.worker_client.check_resource(self.context, rsrc_id, self.worker_client.check_resource(self.context, rsrc_id,
@ -1335,8 +1331,8 @@ class Stack(collections.Mapping):
stack_id = self.store() stack_id = self.store()
if stack_id is None: if stack_id is None:
# Failed concurrent update # Failed concurrent update
LOG.warning(_LW("Failed to store stack %(name)s with traversal" LOG.warning("Failed to store stack %(name)s with traversal"
" ID %(trvsl_id)s, not triggering rollback."), " ID %(trvsl_id)s, not triggering rollback.",
{'name': self.name, {'name': self.name,
'trvsl_id': self.current_traversal}) 'trvsl_id': self.current_traversal})
return return
@ -1446,7 +1442,7 @@ class Stack(collections.Mapping):
@scheduler.wrappertask @scheduler.wrappertask
def update_task(self, newstack, action=UPDATE, msg_queue=None): def update_task(self, newstack, action=UPDATE, msg_queue=None):
if action not in (self.UPDATE, self.ROLLBACK, self.RESTORE): if action not in (self.UPDATE, self.ROLLBACK, self.RESTORE):
LOG.error(_LE("Unexpected action %s passed to update!"), action) LOG.error("Unexpected action %s passed to update!", action)
self.state_set(self.UPDATE, self.FAILED, self.state_set(self.UPDATE, self.FAILED,
"Invalid action %s" % action) "Invalid action %s" % action)
return return
@ -1460,7 +1456,7 @@ class Stack(collections.Mapping):
return return
if self.status == self.IN_PROGRESS: if self.status == self.IN_PROGRESS:
if action == self.ROLLBACK: if action == self.ROLLBACK:
LOG.debug("Starting update rollback for %s" % self.name) LOG.debug("Starting update rollback for %s", self.name)
else: else:
reason = _('Attempted to %s an IN_PROGRESS ' reason = _('Attempted to %s an IN_PROGRESS '
'stack') % action 'stack') % action
@ -1620,7 +1616,7 @@ class Stack(collections.Mapping):
elif message == rpc_api.THREAD_CANCEL_WITH_ROLLBACK: elif message == rpc_api.THREAD_CANCEL_WITH_ROLLBACK:
raise ForcedCancel(with_rollback=True) raise ForcedCancel(with_rollback=True)
LOG.error(_LE('Unknown message "%s" received'), message) LOG.error('Unknown message "%s" received', message)
def _delete_backup_stack(self, stack): def _delete_backup_stack(self, stack):
# Delete resources in the backup stack referred to by 'stack' # Delete resources in the backup stack referred to by 'stack'
@ -1673,7 +1669,7 @@ class Stack(collections.Mapping):
return ucreds_object.UserCreds.get_by_id(self.context, return ucreds_object.UserCreds.get_by_id(self.context,
self.user_creds_id) self.user_creds_id)
except exception.Error: except exception.Error:
LOG.exception(_LE("Failed to retrieve user_creds")) LOG.exception("Failed to retrieve user_creds")
return None return None
def _delete_credentials(self, stack_status, reason, abandon): def _delete_credentials(self, stack_status, reason, abandon):
@ -1704,7 +1700,7 @@ class Stack(collections.Mapping):
self.clients.client('keystone').delete_trust( self.clients.client('keystone').delete_trust(
trust_id) trust_id)
except Exception as ex: except Exception as ex:
LOG.exception(_LE("Error deleting trust")) LOG.exception("Error deleting trust")
stack_status = self.FAILED stack_status = self.FAILED
reason = ("Error deleting trust: %s" % reason = ("Error deleting trust: %s" %
six.text_type(ex)) six.text_type(ex))
@ -1714,15 +1710,15 @@ class Stack(collections.Mapping):
ucreds_object.UserCreds.delete(self.context, ucreds_object.UserCreds.delete(self.context,
self.user_creds_id) self.user_creds_id)
except exception.NotFound: except exception.NotFound:
LOG.info(_LI("Tried to delete user_creds that do not exist " LOG.info("Tried to delete user_creds that do not exist "
"(stack=%(stack)s user_creds_id=%(uc)s)"), "(stack=%(stack)s user_creds_id=%(uc)s)",
{'stack': self.id, 'uc': self.user_creds_id}) {'stack': self.id, 'uc': self.user_creds_id})
try: try:
self.user_creds_id = None self.user_creds_id = None
self.store() self.store()
except exception.NotFound: except exception.NotFound:
LOG.info(_LI("Tried to store a stack that does not exist %s"), LOG.info("Tried to store a stack that does not exist %s",
self.id) self.id)
# If the stack has a domain project, delete it # If the stack has a domain project, delete it
@ -1732,7 +1728,7 @@ class Stack(collections.Mapping):
keystone.delete_stack_domain_project( keystone.delete_stack_domain_project(
project_id=self.stack_user_project_id) project_id=self.stack_user_project_id)
except Exception as ex: except Exception as ex:
LOG.exception(_LE("Error deleting project")) LOG.exception("Error deleting project")
stack_status = self.FAILED stack_status = self.FAILED
reason = "Error deleting project: %s" % six.text_type(ex) reason = "Error deleting project: %s" % six.text_type(ex)
@ -1753,7 +1749,7 @@ class Stack(collections.Mapping):
required for those resources, e.g the stack_user_project. required for those resources, e.g the stack_user_project.
""" """
if action not in (self.DELETE, self.ROLLBACK): if action not in (self.DELETE, self.ROLLBACK):
LOG.error(_LE("Unexpected action %s passed to delete!"), action) LOG.error("Unexpected action %s passed to delete!", action)
self.state_set(self.DELETE, self.FAILED, self.state_set(self.DELETE, self.FAILED,
"Invalid action %s" % action) "Invalid action %s" % action)
return return
@ -1811,8 +1807,8 @@ class Stack(collections.Mapping):
try: try:
self.state_set(action, stack_status, reason) self.state_set(action, stack_status, reason)
except exception.NotFound: except exception.NotFound:
LOG.info(_LI("Tried to delete stack that does not exist " LOG.info("Tried to delete stack that does not exist "
"%s "), self.id) "%s ", self.id)
if not backup: if not backup:
lifecycle_plugin_utils.do_post_ops(self.context, self, lifecycle_plugin_utils.do_post_ops(self.context, self,
@ -1823,8 +1819,8 @@ class Stack(collections.Mapping):
try: try:
stack_object.Stack.delete(self.context, self.id) stack_object.Stack.delete(self.context, self.id)
except exception.NotFound: except exception.NotFound:
LOG.info(_LI("Tried to delete stack that does not exist " LOG.info("Tried to delete stack that does not exist "
"%s "), self.id) "%s ", self.id)
self.id = None self.id = None
@profiler.trace('Stack.suspend', hide_args=False) @profiler.trace('Stack.suspend', hide_args=False)
@ -1842,7 +1838,7 @@ class Stack(collections.Mapping):
""" """
# No need to suspend if the stack has been suspended # No need to suspend if the stack has been suspended
if self.state == (self.SUSPEND, self.COMPLETE): if self.state == (self.SUSPEND, self.COMPLETE):
LOG.info(_LI('%s is already suspended'), self) LOG.info('%s is already suspended', self)
return return
self.updated_time = oslo_timeutils.utcnow() self.updated_time = oslo_timeutils.utcnow()
@ -1867,7 +1863,7 @@ class Stack(collections.Mapping):
""" """
# No need to resume if the stack has been resumed # No need to resume if the stack has been resumed
if self.state == (self.RESUME, self.COMPLETE): if self.state == (self.RESUME, self.COMPLETE):
LOG.info(_LI('%s is already resumed'), self) LOG.info('%s is already resumed', self)
return return
self.updated_time = oslo_timeutils.utcnow() self.updated_time = oslo_timeutils.utcnow()
@ -1949,7 +1945,7 @@ class Stack(collections.Mapping):
scheduler.TaskRunner(res.destroy)() scheduler.TaskRunner(res.destroy)()
except exception.ResourceFailure as ex: except exception.ResourceFailure as ex:
failed = True failed = True
LOG.info(_LI('Resource %(name)s delete failed: %(ex)s'), LOG.info('Resource %(name)s delete failed: %(ex)s',
{'name': res.name, 'ex': ex}) {'name': res.name, 'ex': ex})
for res in deps: for res in deps:
@ -1959,8 +1955,8 @@ class Stack(collections.Mapping):
scheduler.TaskRunner(res.create)() scheduler.TaskRunner(res.create)()
except exception.ResourceFailure as ex: except exception.ResourceFailure as ex:
failed = True failed = True
LOG.info(_LI('Resource %(name)s create failed: ' LOG.info('Resource %(name)s create failed: '
'%(ex)s'), {'name': res.name, 'ex': ex}) '%(ex)s', {'name': res.name, 'ex': ex})
else: else:
res.state_set(res.CREATE, res.FAILED, res.state_set(res.CREATE, res.FAILED,
'Resource restart aborted') 'Resource restart aborted')
@ -2023,7 +2019,7 @@ class Stack(collections.Mapping):
service. service.
""" """
LOG.info(_LI('[%(name)s(%(id)s)] update traversal %(tid)s complete'), LOG.info('[%(name)s(%(id)s)] update traversal %(tid)s complete',
{'name': self.name, 'id': self.id, {'name': self.name, 'id': self.id,
'tid': self.current_traversal}) 'tid': self.current_traversal})
@ -2060,8 +2056,8 @@ class Stack(collections.Mapping):
stack_id = self.store(exp_trvsl=exp_trvsl) stack_id = self.store(exp_trvsl=exp_trvsl)
if stack_id is None: if stack_id is None:
# Failed concurrent update # Failed concurrent update
LOG.warning(_LW("Failed to store stack %(name)s with traversal ID " LOG.warning("Failed to store stack %(name)s with traversal ID "
"%(trvsl_id)s, aborting stack purge"), "%(trvsl_id)s, aborting stack purge",
{'name': self.name, {'name': self.name,
'trvsl_id': self.current_traversal}) 'trvsl_id': self.current_traversal})
return return

View File

@ -17,8 +17,6 @@ from oslo_log import log as logging
from oslo_utils import excutils from oslo_utils import excutils
from heat.common import exception from heat.common import exception
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import service_utils from heat.common import service_utils
from heat.objects import stack as stack_object from heat.objects import stack as stack_object
from heat.objects import stack_lock as stack_lock_object from heat.objects import stack_lock as stack_lock_object
@ -77,8 +75,8 @@ class StackLock(object):
raise exception.ActionInProgress(stack_name=stack.name, raise exception.ActionInProgress(stack_name=stack.name,
action=stack.action) action=stack.action)
else: else:
LOG.info(_LI("Stale lock detected on stack %(stack)s. Engine " LOG.info("Stale lock detected on stack %(stack)s. Engine "
"%(engine)s will attempt to steal the lock"), "%(engine)s will attempt to steal the lock",
{'stack': self.stack_id, 'engine': self.engine_id}) {'stack': self.stack_id, 'engine': self.engine_id})
result = stack_lock_object.StackLock.steal(self.context, result = stack_lock_object.StackLock.steal(self.context,
@ -87,22 +85,22 @@ class StackLock(object):
self.engine_id) self.engine_id)
if result is None: if result is None:
LOG.info(_LI("Engine %(engine)s successfully stole the lock " LOG.info("Engine %(engine)s successfully stole the lock "
"on stack %(stack)s"), "on stack %(stack)s",
{'engine': self.engine_id, {'engine': self.engine_id,
'stack': self.stack_id}) 'stack': self.stack_id})
return return
elif result is True: elif result is True:
if retry: if retry:
LOG.info(_LI("The lock on stack %(stack)s was released " LOG.info("The lock on stack %(stack)s was released "
"while engine %(engine)s was stealing it. " "while engine %(engine)s was stealing it. "
"Trying again"), {'stack': self.stack_id, "Trying again", {'stack': self.stack_id,
'engine': self.engine_id}) 'engine': self.engine_id})
return self.acquire(retry=False) return self.acquire(retry=False)
else: else:
new_lock_engine_id = result new_lock_engine_id = result
LOG.info(_LI("Failed to steal lock on stack %(stack)s. " LOG.info("Failed to steal lock on stack %(stack)s. "
"Engine %(engine)s stole the lock first"), "Engine %(engine)s stole the lock first",
{'stack': self.stack_id, {'stack': self.stack_id,
'engine': new_lock_engine_id}) 'engine': new_lock_engine_id})
@ -117,7 +115,7 @@ class StackLock(object):
self.stack_id, self.stack_id,
self.engine_id) self.engine_id)
if result is True: if result is True:
LOG.warning(_LW("Lock was already released on stack %s!"), LOG.warning("Lock was already released on stack %s!",
self.stack_id) self.stack_id)
else: else:
LOG.debug("Engine %(engine)s released lock on stack " LOG.debug("Engine %(engine)s released lock on stack "

View File

@ -15,7 +15,6 @@ from oslo_log import log as logging
import six import six
from heat.common import exception from heat.common import exception
from heat.common.i18n import _LI
from heat.common.i18n import repr_wrapper from heat.common.i18n import repr_wrapper
from heat.engine import dependencies from heat.engine import dependencies
from heat.engine import resource from heat.engine import resource
@ -82,7 +81,7 @@ class StackUpdate(object):
def _remove_backup_resource(self, prev_res): def _remove_backup_resource(self, prev_res):
if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE), if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE),
(prev_res.DELETE, prev_res.COMPLETE)): (prev_res.DELETE, prev_res.COMPLETE)):
LOG.debug("Deleting backup resource %s" % prev_res.name) LOG.debug("Deleting backup resource %s", prev_res.name)
yield prev_res.destroy() yield prev_res.destroy()
@staticmethod @staticmethod
@ -106,17 +105,17 @@ class StackUpdate(object):
# Swap in the backup resource if it is in a valid state, # Swap in the backup resource if it is in a valid state,
# instead of creating a new resource # instead of creating a new resource
if prev_res.status == prev_res.COMPLETE: if prev_res.status == prev_res.COMPLETE:
LOG.debug("Swapping in backup Resource %s" % res_name) LOG.debug("Swapping in backup Resource %s", res_name)
self._exchange_stacks(self.existing_stack[res_name], self._exchange_stacks(self.existing_stack[res_name],
prev_res) prev_res)
return return
LOG.debug("Deleting backup Resource %s" % res_name) LOG.debug("Deleting backup Resource %s", res_name)
yield prev_res.destroy() yield prev_res.destroy()
# Back up existing resource # Back up existing resource
if res_name in self.existing_stack: if res_name in self.existing_stack:
LOG.debug("Backing up existing Resource %s" % res_name) LOG.debug("Backing up existing Resource %s", res_name)
existing_res = self.existing_stack[res_name] existing_res = self.existing_stack[res_name]
self.previous_stack.add_resource(existing_res) self.previous_stack.add_resource(existing_res)
existing_res.state_set(existing_res.UPDATE, existing_res.COMPLETE) existing_res.state_set(existing_res.UPDATE, existing_res.COMPLETE)
@ -170,8 +169,8 @@ class StackUpdate(object):
self.previous_stack.t.add_resource(new_res.t) self.previous_stack.t.add_resource(new_res.t)
self.previous_stack.t.store(self.previous_stack.context) self.previous_stack.t.store(self.previous_stack.context)
LOG.info(_LI("Resource %(res_name)s for stack " LOG.info("Resource %(res_name)s for stack "
"%(stack_name)s updated"), "%(stack_name)s updated",
{'res_name': res_name, {'res_name': res_name,
'stack_name': self.existing_stack.name}) 'stack_name': self.existing_stack.name})
return return

View File

@ -19,8 +19,6 @@ from oslo_utils import timeutils
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.engine import stack from heat.engine import stack
from heat.engine import timestamp from heat.engine import timestamp
from heat.objects import stack as stack_object from heat.objects import stack as stack_object
@ -85,9 +83,8 @@ class WatchRule(object):
watch = watch_rule_objects.WatchRule.get_by_name(context, watch = watch_rule_objects.WatchRule.get_by_name(context,
watch_name) watch_name)
except Exception as ex: except Exception as ex:
LOG.warning(_LW('WatchRule.load (%(watch_name)s) db error ' LOG.warning('WatchRule.load (%(watch_name)s) db error %(ex)s',
'%(ex)s'), {'watch_name': watch_name, {'watch_name': watch_name, 'ex': ex})
'ex': ex})
if watch is None: if watch is None:
raise exception.EntityNotFound(entity='Watch Rule', raise exception.EntityNotFound(entity='Watch Rule',
name=watch_name) name=watch_name)
@ -218,7 +215,7 @@ class WatchRule(object):
data = 0 data = 0
for d in self.watch_data: for d in self.watch_data:
if d.created_at < self.now - self.timeperiod: if d.created_at < self.now - self.timeperiod:
LOG.debug('ignoring %s' % str(d.data)) LOG.debug('ignoring %s', str(d.data))
continue continue
data = data + float(d.data[self.rule['MetricName']]['Value']) data = data + float(d.data[self.rule['MetricName']]['Value'])
@ -255,13 +252,13 @@ class WatchRule(object):
return actions return actions
def rule_actions(self, new_state): def rule_actions(self, new_state):
LOG.info(_LI('WATCH: stack:%(stack)s, watch_name:%(watch_name)s, ' LOG.info('WATCH: stack:%(stack)s, watch_name:%(watch_name)s, '
'new_state:%(new_state)s'), {'stack': self.stack_id, 'new_state:%(new_state)s', {'stack': self.stack_id,
'watch_name': self.name, 'watch_name': self.name,
'new_state': new_state}) 'new_state': new_state})
actions = [] actions = []
if self.ACTION_MAP[new_state] not in self.rule: if self.ACTION_MAP[new_state] not in self.rule:
LOG.info(_LI('no action for new state %s'), new_state) LOG.info('no action for new state %s', new_state)
else: else:
s = stack_object.Stack.get_by_id( s = stack_object.Stack.get_by_id(
self.context, self.context,
@ -272,7 +269,7 @@ class WatchRule(object):
for refid in self.rule[self.ACTION_MAP[new_state]]: for refid in self.rule[self.ACTION_MAP[new_state]]:
actions.append(stk.resource_by_refid(refid).signal) actions.append(stk.resource_by_refid(refid).signal)
else: else:
LOG.warning(_LW("Could not process watch state %s for stack"), LOG.warning("Could not process watch state %s for stack",
new_state) new_state)
return actions return actions
@ -292,7 +289,7 @@ class WatchRule(object):
dims = dims[0] dims = dims[0]
sample['resource_metadata'] = dims sample['resource_metadata'] = dims
sample['resource_id'] = dims.get('InstanceId') sample['resource_id'] = dims.get('InstanceId')
LOG.debug('new sample:%(k)s data:%(sample)s' % { LOG.debug('new sample:%(k)s data:%(sample)s', {
'k': k, 'sample': sample}) 'k': k, 'sample': sample})
clients.client('ceilometer').samples.create(**sample) clients.client('ceilometer').samples.create(**sample)
@ -305,8 +302,8 @@ class WatchRule(object):
return return
if self.state == self.SUSPENDED: if self.state == self.SUSPENDED:
LOG.debug('Ignoring metric data for %s, SUSPENDED state' LOG.debug('Ignoring metric data for %s, SUSPENDED state',
% self.name) self.name)
return [] return []
if self.rule['MetricName'] not in data: if self.rule['MetricName'] not in data:
@ -355,9 +352,9 @@ class WatchRule(object):
% {'self_state': self.state, 'name': self.name, % {'self_state': self.state, 'name': self.name,
'state': state}) 'state': state})
else: else:
LOG.warning(_LW("Unable to override state %(state)s for " LOG.warning("Unable to override state %(state)s for "
"watch %(name)s"), {'state': self.state, "watch %(name)s", {'state': self.state,
'name': self.name}) 'name': self.name})
return actions return actions

View File

@ -21,9 +21,6 @@ from oslo_utils import uuidutils
from osprofiler import profiler from osprofiler import profiler
from heat.common import context from heat.common import context
from heat.common.i18n import _LE
from heat.common.i18n import _LI
from heat.common.i18n import _LW
from heat.common import messaging as rpc_messaging from heat.common import messaging as rpc_messaging
from heat.db.sqlalchemy import api as db_api from heat.db.sqlalchemy import api as db_api
from heat.engine import check_resource from heat.engine import check_resource
@ -72,7 +69,7 @@ class WorkerService(object):
server=self.engine_id, server=self.engine_id,
topic=self.topic) topic=self.topic)
self.target = target self.target = target
LOG.info(_LI("Starting %(topic)s (%(version)s) in engine %(engine)s."), LOG.info("Starting %(topic)s (%(version)s) in engine %(engine)s.",
{'topic': self.topic, {'topic': self.topic,
'version': self.RPC_API_VERSION, 'version': self.RPC_API_VERSION,
'engine': self.engine_id}) 'engine': self.engine_id})
@ -84,13 +81,13 @@ class WorkerService(object):
if self._rpc_server is None: if self._rpc_server is None:
return return
# Stop rpc connection at first for preventing new requests # Stop rpc connection at first for preventing new requests
LOG.info(_LI("Stopping %(topic)s in engine %(engine)s."), LOG.info("Stopping %(topic)s in engine %(engine)s.",
{'topic': self.topic, 'engine': self.engine_id}) {'topic': self.topic, 'engine': self.engine_id})
try: try:
self._rpc_server.stop() self._rpc_server.stop()
self._rpc_server.wait() self._rpc_server.wait()
except Exception as e: except Exception as e:
LOG.error(_LE("%(topic)s is failed to stop, %(exc)s"), LOG.error("%(topic)s is failed to stop, %(exc)s",
{'topic': self.topic, 'exc': e}) {'topic': self.topic, 'exc': e})
def stop_traversal(self, stack): def stop_traversal(self, stack):
@ -122,13 +119,12 @@ class WorkerService(object):
cancelled = _cancel_workers(stack, self.thread_group_mgr, cancelled = _cancel_workers(stack, self.thread_group_mgr,
self.engine_id, self._rpc_client) self.engine_id, self._rpc_client)
if not cancelled: if not cancelled:
LOG.error(_LE("Failed to stop all workers of stack %(name)s " LOG.error("Failed to stop all workers of stack %s, "
", stack cancel not complete"), "stack cancel not complete", stack.name)
{'name': stack.name})
return False return False
LOG.info(_LI('[%(name)s(%(id)s)] Stopped all active workers for stack ' LOG.info('[%(name)s(%(id)s)] Stopped all active workers for stack '
'%(action)s'), '%(action)s',
{'name': stack.name, 'id': stack.id, 'action': stack.action}) {'name': stack.name, 'id': stack.id, 'action': stack.action})
return True return True
@ -199,18 +195,17 @@ def _stop_traversal(stack):
old_trvsl = stack.current_traversal old_trvsl = stack.current_traversal
updated = _update_current_traversal(stack) updated = _update_current_traversal(stack)
if not updated: if not updated:
LOG.warning(_LW("Failed to update stack %(name)s with new " LOG.warning("Failed to update stack %(name)s with new "
"traversal, aborting stack cancel"), "traversal, aborting stack cancel", stack.name)
{'name': stack.name})
return return
reason = 'Stack %(action)s cancelled' % {'action': stack.action} reason = 'Stack %(action)s cancelled' % {'action': stack.action}
updated = stack.state_set(stack.action, stack.FAILED, reason) updated = stack.state_set(stack.action, stack.FAILED, reason)
if not updated: if not updated:
LOG.warning(_LW("Failed to update stack %(name)s status " LOG.warning("Failed to update stack %(name)s status "
"to %(action)s_%(state)s"), "to %(action)s_%(state)s",
{'name': stack.name, 'action': stack.action, {'name': stack.name, 'action': stack.action,
'state': stack.FAILED}) 'state': stack.FAILED})
return return
sync_point.delete_all(stack.context, stack.id, old_trvsl) sync_point.delete_all(stack.context, stack.id, old_trvsl)

View File

@ -23,7 +23,6 @@ import oslo_i18n as i18n
from oslo_log import log as logging from oslo_log import log as logging
from heat.common import config from heat.common import config
from heat.common.i18n import _LI
from heat.common import messaging from heat.common import messaging
from heat.common import profiler from heat.common import profiler
from heat import version as hversion from heat import version as hversion
@ -44,6 +43,6 @@ def init_application():
port = cfg.CONF.heat_api.bind_port port = cfg.CONF.heat_api.bind_port
host = cfg.CONF.heat_api.bind_host host = cfg.CONF.heat_api.bind_host
profiler.setup('heat-api', host) profiler.setup('heat-api', host)
LOG.info(_LI('Starting Heat REST API on %(host)s:%(port)s'), LOG.info('Starting Heat REST API on %(host)s:%(port)s',
{'host': host, 'port': port}) {'host': host, 'port': port})
return config.load_paste_app() return config.load_paste_app()

View File

@ -23,7 +23,6 @@ import oslo_i18n as i18n
from oslo_log import log as logging from oslo_log import log as logging
from heat.common import config from heat.common import config
from heat.common.i18n import _LI
from heat.common import messaging from heat.common import messaging
from heat.common import profiler from heat.common import profiler
from heat import version from heat import version
@ -45,7 +44,7 @@ def init_application():
port = cfg.CONF.heat_api_cfn.bind_port port = cfg.CONF.heat_api_cfn.bind_port
host = cfg.CONF.heat_api_cfn.bind_host host = cfg.CONF.heat_api_cfn.bind_host
LOG.info(_LI('Starting Heat API on %(host)s:%(port)s'), LOG.info('Starting Heat API on %(host)s:%(port)s',
{'host': host, 'port': port}) {'host': host, 'port': port})
profiler.setup('heat-api-cfn', host) profiler.setup('heat-api-cfn', host)

View File

@ -23,7 +23,6 @@ import oslo_i18n as i18n
from oslo_log import log as logging from oslo_log import log as logging
from heat.common import config from heat.common import config
from heat.common.i18n import _LI
from heat.common import messaging from heat.common import messaging
from heat.common import profiler from heat.common import profiler
from heat import version from heat import version
@ -45,7 +44,7 @@ def init_application():
port = cfg.CONF.heat_api_cloudwatch.bind_port port = cfg.CONF.heat_api_cloudwatch.bind_port
host = cfg.CONF.heat_api_cloudwatch.bind_host host = cfg.CONF.heat_api_cloudwatch.bind_host
LOG.info(_LI('Starting Heat CloudWatch API on %(host)s:%(port)s'), LOG.info('Starting Heat CloudWatch API on %(host)s:%(port)s',
{'host': host, 'port': port}) {'host': host, 'port': port})
profiler.setup('heat-api-cloudwatch', host) profiler.setup('heat-api-cloudwatch', host)

View File

@ -18,7 +18,6 @@ from oslo_log import log as logging
from oslo_versionedobjects import base from oslo_versionedobjects import base
from oslo_versionedobjects import fields from oslo_versionedobjects import fields
from heat.common.i18n import _LI
from heat.common import identifier from heat.common import identifier
from heat.db.sqlalchemy import api as db_api from heat.db.sqlalchemy import api as db_api
from heat.objects import base as heat_base from heat.objects import base as heat_base
@ -80,7 +79,7 @@ class Event(
@property @property
def resource_properties(self): def resource_properties(self):
if self._resource_properties is None: if self._resource_properties is None:
LOG.info(_LI('rsrp_prop_data lazy load')) LOG.info('rsrp_prop_data lazy load')
rpd_obj = rpd.ResourcePropertiesData.get_by_id( rpd_obj = rpd.ResourcePropertiesData.get_by_id(
self._context, self.rsrc_prop_data_id) self._context, self.rsrc_prop_data_id)
self._resource_properties = rpd_obj.data or {} self._resource_properties = rpd_obj.data or {}

View File

@ -24,7 +24,6 @@ from oslo_versionedobjects import fields
from heat.common import crypt from heat.common import crypt
from heat.common import environment_format as env_fmt from heat.common import environment_format as env_fmt
from heat.common.i18n import _LW
from heat.db.sqlalchemy import api as db_api from heat.db.sqlalchemy import api as db_api
from heat.objects import base as heat_base from heat.objects import base as heat_base
from heat.objects import fields as heat_fields from heat.objects import fields as heat_fields
@ -71,10 +70,10 @@ class RawTemplate(
value = crypt.decrypt(method, enc_value) value = crypt.decrypt(method, enc_value)
else: else:
value = parameters[param_name] value = parameters[param_name]
LOG.warning(_LW( LOG.warning(
'Encountered already-decrypted data while attempting ' 'Encountered already-decrypted data while attempting '
'to decrypt parameter %s. Please file a Heat bug so ' 'to decrypt parameter %s. Please file a Heat bug so '
'this can be fixed.'), param_name) 'this can be fixed.', param_name)
parameters[param_name] = value parameters[param_name] = value
tpl.environment[env_fmt.PARAMETERS] = parameters tpl.environment[env_fmt.PARAMETERS] = parameters

View File

@ -27,7 +27,6 @@ import tenacity
from heat.common import crypt from heat.common import crypt
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LE
from heat.db.sqlalchemy import api as db_api from heat.db.sqlalchemy import api as db_api
from heat.objects import base as heat_base from heat.objects import base as heat_base
from heat.objects import fields as heat_fields from heat.objects import fields as heat_fields
@ -120,12 +119,12 @@ class Resource(
resource._properties_data = resource['rsrc_prop_data'].data resource._properties_data = resource['rsrc_prop_data'].data
if db_resource['properties_data']: if db_resource['properties_data']:
LOG.error( LOG.error(
_LE('Unexpected condition where resource.rsrc_prop_data ' 'Unexpected condition where resource.rsrc_prop_data '
'and resource.properties_data are both not null. ' 'and resource.properties_data are both not null. '
'rsrc_prop_data.id: %(rsrc_prop_data_id)s ,' 'rsrc_prop_data.id: %(rsrc_prop_data_id)s, '
'resource id: %(res_id)s') 'resource id: %(res_id)s',
% {'rsrc_prop_data_id': resource['rsrc_prop_data'].id, {'rsrc_prop_data_id': resource['rsrc_prop_data'].id,
'res_id': resource['id']}) 'res_id': resource['id']})
elif db_resource['properties_data']: # legacy field elif db_resource['properties_data']: # legacy field
if db_resource['properties_data_encrypted']: if db_resource['properties_data_encrypted']:
decrypted_data = crypt.decrypted_dict( decrypted_data = crypt.decrypted_dict(

View File

@ -15,7 +15,6 @@ from oslo_log import log as logging
from heat.common import exception from heat.common import exception
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LI
from heat.engine import resource from heat.engine import resource
from oslo_utils import timeutils from oslo_utils import timeutils
import six import six
@ -33,8 +32,8 @@ class CooldownMixin(object):
def _check_scaling_allowed(self): def _check_scaling_allowed(self):
metadata = self.metadata_get() metadata = self.metadata_get()
if metadata.get('scaling_in_progress'): if metadata.get('scaling_in_progress'):
LOG.info(_LI("Can not perform scaling action: resource %s " LOG.info("Can not perform scaling action: resource %s "
"is already in scaling.") % self.name) "is already in scaling.", self.name)
reason = _('due to scaling activity') reason = _('due to scaling activity')
raise resource.NoActionRequired(res_name=self.name, raise resource.NoActionRequired(res_name=self.name,
reason=reason) reason=reason)
@ -66,8 +65,8 @@ class CooldownMixin(object):
def _cooldown_check(self, cooldown, last_adjust): def _cooldown_check(self, cooldown, last_adjust):
if not timeutils.is_older_than(last_adjust, cooldown): if not timeutils.is_older_than(last_adjust, cooldown):
LOG.info(_LI("Can not perform scaling action: " LOG.info("Can not perform scaling action: "
"resource %(name)s is in cooldown (%(cooldown)s).") % "resource %(name)s is in cooldown (%(cooldown)s).",
{'name': self.name, {'name': self.name,
'cooldown': cooldown}) 'cooldown': cooldown})
reason = _('due to cooldown, ' reason = _('due to cooldown, '

View File

@ -60,7 +60,7 @@ class MessageProcessor(object):
def __call__(self): def __call__(self):
message = self.queue.get() message = self.queue.get()
if message is None: if message is None:
LOG.debug('[%s] No messages' % self.name) LOG.debug('[%s] No messages', self.name)
return False return False
try: try:

View File

@ -14,7 +14,6 @@
import os import os
from oslo_log import log as logging from oslo_log import log as logging
from heat.common.i18n import _LE
LOG = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
@ -22,7 +21,7 @@ LOG = logging.getLogger(__name__)
def list_all(): def list_all():
scenario_dir = os.path.join(os.path.dirname(__file__), '../scenarios') scenario_dir = os.path.join(os.path.dirname(__file__), '../scenarios')
if not os.path.isdir(scenario_dir): if not os.path.isdir(scenario_dir):
LOG.error(_LE('Scenario directory "%s" not found'), scenario_dir) LOG.error('Scenario directory "%s" not found', scenario_dir)
return return
for root, dirs, files in os.walk(scenario_dir): for root, dirs, files in os.walk(scenario_dir):

View File

@ -15,7 +15,6 @@ import collections
from oslo_log import log as logging from oslo_log import log as logging
import six import six
from heat.common.i18n import _LW
from heat.engine import attributes from heat.engine import attributes
from heat.engine import constraints from heat.engine import constraints
from heat.engine import properties from heat.engine import properties
@ -39,26 +38,26 @@ class GenericResource(resource.Resource):
return (True, None) return (True, None)
def handle_create(self): def handle_create(self):
LOG.warning(_LW('Creating generic resource (Type "%s")'), LOG.warning('Creating generic resource (Type "%s")',
self.type()) self.type())
def handle_update(self, json_snippet, tmpl_diff, prop_diff): def handle_update(self, json_snippet, tmpl_diff, prop_diff):
LOG.warning(_LW('Updating generic resource (Type "%s")'), LOG.warning('Updating generic resource (Type "%s")',
self.type()) self.type())
def handle_delete(self): def handle_delete(self):
LOG.warning(_LW('Deleting generic resource (Type "%s")'), LOG.warning('Deleting generic resource (Type "%s")',
self.type()) self.type())
def _resolve_attribute(self, name): def _resolve_attribute(self, name):
return self.name return self.name
def handle_suspend(self): def handle_suspend(self):
LOG.warning(_LW('Suspending generic resource (Type "%s")'), LOG.warning('Suspending generic resource (Type "%s")',
self.type()) self.type())
def handle_resume(self): def handle_resume(self):
LOG.warning(_LW('Resuming generic resource (Type "%s")'), LOG.warning(('Resuming generic resource (Type "%s")'),
self.type()) self.type())
@ -67,14 +66,14 @@ class CancellableResource(GenericResource):
return True return True
def handle_create_cancel(self, cookie): def handle_create_cancel(self, cookie):
LOG.warning(_LW('Cancelling create generic resource (Type "%s")'), LOG.warning('Cancelling create generic resource (Type "%s")',
self.type()) self.type())
def check_update_complete(self, cookie): def check_update_complete(self, cookie):
return True return True
def handle_update_cancel(self, cookie): def handle_update_cancel(self, cookie):
LOG.warning(_LW('Cancelling update generic resource (Type "%s")'), LOG.warning('Cancelling update generic resource (Type "%s")',
self.type()) self.type())
@ -257,7 +256,7 @@ class SignalResource(signal_responder.SignalResponder):
self.resource_id_set(self._get_user_id()) self.resource_id_set(self._get_user_id())
def handle_signal(self, details=None): def handle_signal(self, details=None):
LOG.warning(_LW('Signaled resource (Type "%(type)s") %(details)s'), LOG.warning('Signaled resource (Type "%(type)s") %(details)s',
{'type': self.type(), 'details': details}) {'type': self.type(), 'details': details})
def _resolve_attribute(self, name): def _resolve_attribute(self, name):

View File

@ -118,7 +118,7 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
try: try:
linux_client.validate_authentication() linux_client.validate_authentication()
except exceptions.SSHTimeout: except exceptions.SSHTimeout:
LOG.exception('ssh connection to %s failed' % ip) LOG.exception('ssh connection to %s failed', ip)
raise raise
return linux_client return linux_client
@ -637,7 +637,7 @@ class HeatIntegrationTest(testscenarios.WithScenarios,
rsrc_events = self.client.events.list(stack_identifier, rsrc_events = self.client.events.list(stack_identifier,
resource_name=rsrc_name) resource_name=rsrc_name)
except heat_exceptions.HTTPNotFound: except heat_exceptions.HTTPNotFound:
LOG.debug("No events yet found for %s" % rsrc_name) LOG.debug("No events yet found for %s", rsrc_name)
else: else:
matched = [e for e in rsrc_events matched = [e for e in rsrc_events
if e.resource_status_reason == reason] if e.resource_status_reason == reason]

View File

@ -19,7 +19,6 @@ from oslo_log import log
import six import six
from heat.common.i18n import _ from heat.common.i18n import _
from heat.common.i18n import _LW
from heat.engine import constraints from heat.engine import constraints
from heat.engine import resources from heat.engine import resources
from heat.engine import support from heat.engine import support
@ -155,8 +154,8 @@ class HeatCustomGuidelines(object):
try: try:
cls_file = open(cls.__module__.replace('.', '/') + '.py') cls_file = open(cls.__module__.replace('.', '/') + '.py')
except IOError as ex: except IOError as ex:
LOG.warning(_LW('Cannot perform trailing spaces check on ' LOG.warning('Cannot perform trailing spaces check on '
'resource module: %s') % six.text_type(ex)) 'resource module: %s', six.text_type(ex))
continue continue
lines = [line.strip() for line in cls_file.readlines()] lines = [line.strip() for line in cls_file.readlines()]
idx = 0 idx = 0