Replacing variable logger on LOG

Commit https://review.openstack.org/#/c/94255/1 introduces check for
translating log messages. This check uses in pattern name 'LOG', so in
heat code also should be used this name intead of 'logger'. Also same
name is used in other projects.

Change-Id: Iba24c7eb1c13e68a91d090d7bcdbcb40d6e16071
Partial-Bug: #1321283
This commit is contained in:
Sergey Kraynev 2014-05-20 08:43:24 -04:00
parent 2d37843d13
commit ecd8d3999d
60 changed files with 519 additions and 540 deletions

View File

@ -20,7 +20,7 @@ from heat.engine import resource
from heat.openstack.common.gettextutils import _ from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
DOCKER_INSTALLED = False DOCKER_INSTALLED = False
# conditionally import so tests can work without having the dependency # conditionally import so tests can work without having the dependency
@ -329,5 +329,5 @@ def available_resource_mapping():
if DOCKER_INSTALLED: if DOCKER_INSTALLED:
return resource_mapping() return resource_mapping()
else: else:
logger.warn(_("Docker plug-in loaded, but docker lib not installed.")) LOG.warn(_("Docker plug-in loaded, but docker lib not installed."))
return {} return {}

View File

@ -23,8 +23,8 @@ from heat.openstack.common.gettextutils import _
from heat.openstack.common import importutils from heat.openstack.common import importutils
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger('heat.common.keystoneclient') LOG = logging.getLogger('heat.common.keystoneclient')
logger.info(_("Keystone V2 loaded")) LOG.info(_("Keystone V2 loaded"))
class KeystoneClientV2(object): class KeystoneClientV2(object):
@ -86,7 +86,7 @@ class KeystoneClientV2(object):
kwargs['tenant_name'] = self.context.tenant kwargs['tenant_name'] = self.context.tenant
kwargs['tenant_id'] = self.context.tenant_id kwargs['tenant_id'] = self.context.tenant_id
else: else:
logger.error(_("Keystone v2 API connection failed, no password " LOG.error(_("Keystone v2 API connection failed, no password "
"or auth_token!")) "or auth_token!"))
raise exception.AuthorizationFailure() raise exception.AuthorizationFailure()
kwargs['cacert'] = self._get_client_option('ca_file') kwargs['cacert'] = self._get_client_option('ca_file')
@ -101,7 +101,7 @@ class KeystoneClientV2(object):
if auth_kwargs: if auth_kwargs:
# Sanity check # Sanity check
if not client.auth_ref.trust_scoped: if not client.auth_ref.trust_scoped:
logger.error(_("v2 trust token re-scoping failed!")) LOG.error(_("v2 trust token re-scoping failed!"))
raise exception.AuthorizationFailure() raise exception.AuthorizationFailure()
# All OK so update the context with the token # All OK so update the context with the token
self.context.auth_token = client.auth_ref.auth_token self.context.auth_token = client.auth_ref.auth_token
@ -109,7 +109,7 @@ class KeystoneClientV2(object):
# Ensure the v2 API we're using is not impacted by keystone # Ensure the v2 API we're using is not impacted by keystone
# bug #1239303, otherwise we can't trust the user_id # bug #1239303, otherwise we can't trust the user_id
if self.context.trustor_user_id != client.auth_ref.user_id: if self.context.trustor_user_id != client.auth_ref.user_id:
logger.error("Trust impersonation failed, bug #1239303 " LOG.error("Trust impersonation failed, bug #1239303 "
"suspected, you may need a newer keystone") "suspected, you may need a newer keystone")
raise exception.AuthorizationFailure() raise exception.AuthorizationFailure()
@ -146,7 +146,7 @@ class KeystoneClientV2(object):
Returns the keystone ID of the resulting user Returns the keystone ID of the resulting user
""" """
if(len(username) > 64): if(len(username) > 64):
logger.warning(_("Truncating the username %s to the last 64 " LOG.warning(_("Truncating the username %s to the last 64 "
"characters.") % username) "characters.") % username)
#get the last 64 characters of the username #get the last 64 characters of the username
username = username[-64:] username = username[-64:]
@ -165,13 +165,14 @@ class KeystoneClientV2(object):
if r.name == cfg.CONF.heat_stack_user_role] if r.name == cfg.CONF.heat_stack_user_role]
if len(stack_user_role) == 1: if len(stack_user_role) == 1:
role_id = stack_user_role[0] role_id = stack_user_role[0]
logger.debug("Adding user %(user)s to role %(role)s" % { LOG.debug("Adding user %(user)s to role %(role)s"
'user': user.id, 'role': role_id}) % {'user': user.id, 'role': role_id})
self.client.roles.add_user_role(user.id, role_id, self.client.roles.add_user_role(user.id, role_id,
self.context.tenant_id) self.context.tenant_id)
else: else:
logger.error(_("Failed to add user %(user)s to role %(role)s, " LOG.error(_("Failed to add user %(user)s to role %(role)s, "
"check role exists!") % {'user': username, "check role exists!")
% {'user': username,
'role': cfg.CONF.heat_stack_user_role}) 'role': cfg.CONF.heat_stack_user_role})
return user.id return user.id

View File

@ -16,13 +16,13 @@
from heat.engine import clients from heat.engine import clients
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
try: try:
from marconiclient.queues.v1 import client as marconiclient from marconiclient.queues.v1 import client as marconiclient
except ImportError: except ImportError:
marconiclient = None marconiclient = None
logger.info(_('marconiclient not available')) LOG.info(_('marconiclient not available'))
class Clients(clients.OpenStackClients): class Clients(clients.OpenStackClients):
@ -39,7 +39,7 @@ class Clients(clients.OpenStackClients):
con = self.context con = self.context
if self.auth_token is None: if self.auth_token is None:
logger.error(_("Marconi connection failed, no auth_token!")) LOG.error(_("Marconi connection failed, no auth_token!"))
return None return None
opts = { opts = {

View File

@ -18,7 +18,7 @@ from heat.engine import resource
from heat.openstack.common.gettextutils import _ from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class NovaFlavor(resource.Resource): class NovaFlavor(resource.Resource):
@ -102,7 +102,7 @@ class NovaFlavor(resource.Resource):
try: try:
self.nova().flavors.delete(self.resource_id) self.nova().flavors.delete(self.resource_id)
except nova_exceptions.NotFound: except nova_exceptions.NotFound:
logger.debug( LOG.debug(
_('Could not find flavor %s.') % self.resource_id) _('Could not find flavor %s.') % self.resource_id)
self.resource_id_set(None) self.resource_id_set(None)

View File

@ -20,23 +20,23 @@ from heat.engine import clients
from heat.openstack.common.gettextutils import _ from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
try: try:
import pyrax import pyrax
except ImportError: except ImportError:
logger.info(_('pyrax not available')) LOG.info(_('pyrax not available'))
try: try:
from swiftclient import client as swiftclient from swiftclient import client as swiftclient
except ImportError: except ImportError:
swiftclient = None swiftclient = None
logger.info(_('swiftclient not available')) LOG.info(_('swiftclient not available'))
try: try:
from ceilometerclient import client as ceilometerclient from ceilometerclient import client as ceilometerclient
except ImportError: except ImportError:
ceilometerclient = None ceilometerclient = None
logger.info(_('ceilometerclient not available')) LOG.info(_('ceilometerclient not available'))
cloud_opts = [ cloud_opts = [
cfg.StrOpt('region_name', cfg.StrOpt('region_name',
@ -111,8 +111,7 @@ class Clients(clients.OpenStackClients):
def __authenticate(self): def __authenticate(self):
pyrax.set_setting("identity_type", "keystone") pyrax.set_setting("identity_type", "keystone")
pyrax.set_setting("auth_endpoint", self.context.auth_url) pyrax.set_setting("auth_endpoint", self.context.auth_url)
logger.info(_("Authenticating username:%s") % LOG.info(_("Authenticating username:%s") % self.context.username)
self.context.username)
self.pyrax = pyrax.auth_with_token(self.context.auth_token, self.pyrax = pyrax.auth_with_token(self.context.auth_token,
tenant_id=self.context.tenant_id, tenant_id=self.context.tenant_id,
tenant_name=self.context.tenant, tenant_name=self.context.tenant,
@ -120,5 +119,5 @@ class Clients(clients.OpenStackClients):
or None)) or None))
if not self.pyrax: if not self.pyrax:
raise exception.AuthorizationFailure("No services available.") raise exception.AuthorizationFailure("No services available.")
logger.info(_("User %s authenticated successfully.") LOG.info(_("User %s authenticated successfully.")
% self.context.username) % self.context.username)

View File

@ -30,7 +30,7 @@ except ImportError:
PYRAX_INSTALLED = False PYRAX_INSTALLED = False
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class CloudDns(resource.Resource): class CloudDns(resource.Resource):
@ -153,7 +153,7 @@ class CloudDns(resource.Resource):
"""Create a Rackspace CloudDns Instance.""" """Create a Rackspace CloudDns Instance."""
# There is no check_create_complete as the pyrax create for DNS is # There is no check_create_complete as the pyrax create for DNS is
# synchronous. # synchronous.
logger.debug("CloudDns handle_create called.") LOG.debug("CloudDns handle_create called.")
args = dict((k, v) for k, v in self.properties.items()) args = dict((k, v) for k, v in self.properties.items())
for rec in args[self.RECORDS] or {}: for rec in args[self.RECORDS] or {}:
# only pop the priority for the correct types # only pop the priority for the correct types
@ -165,7 +165,7 @@ class CloudDns(resource.Resource):
def handle_update(self, json_snippet, tmpl_diff, prop_diff): def handle_update(self, json_snippet, tmpl_diff, prop_diff):
"""Update a Rackspace CloudDns Instance.""" """Update a Rackspace CloudDns Instance."""
logger.debug("CloudDns handle_update called.") LOG.debug("CloudDns handle_update called.")
if not self.resource_id: if not self.resource_id:
raise exception.Error(_('Update called on a non-existent domain')) raise exception.Error(_('Update called on a non-existent domain'))
if prop_diff: if prop_diff:
@ -188,7 +188,7 @@ class CloudDns(resource.Resource):
def handle_delete(self): def handle_delete(self):
"""Delete a Rackspace CloudDns Instance.""" """Delete a Rackspace CloudDns Instance."""
logger.debug("CloudDns handle_delete called.") LOG.debug("CloudDns handle_delete called.")
if self.resource_id: if self.resource_id:
try: try:
dom = self.cloud_dns().get(self.resource_id) dom = self.cloud_dns().get(self.resource_id)

View File

@ -35,7 +35,7 @@ except ImportError:
PYRAX_INSTALLED = False PYRAX_INSTALLED = False
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class LoadbalancerBuildError(exception.HeatException): class LoadbalancerBuildError(exception.HeatException):
@ -493,7 +493,7 @@ class CloudLoadBalancer(resource.Resource):
lb_name = (self.properties.get(self.NAME) or lb_name = (self.properties.get(self.NAME) or
self.physical_resource_name()) self.physical_resource_name())
logger.debug("Creating loadbalancer: %s" % {lb_name: lb_body}) LOG.debug("Creating loadbalancer: %s" % {lb_name: lb_body})
loadbalancer = self.clb.create(lb_name, **lb_body) loadbalancer = self.clb.create(lb_name, **lb_body)
self.resource_id_set(str(loadbalancer.id)) self.resource_id_set(str(loadbalancer.id))
@ -621,7 +621,7 @@ class CloudLoadBalancer(resource.Resource):
raise exception.InvalidTemplateAttribute(resource=self.name, raise exception.InvalidTemplateAttribute(resource=self.name,
key=key) key=key)
function = attribute_function[key] function = attribute_function[key]
logger.info(_('%(name)s.GetAtt(%(key)s) == %(function)s'), LOG.info(_('%(name)s.GetAtt(%(key)s) == %(function)s'),
{'name': self.name, 'key': key, 'function': function}) {'name': self.name, 'key': key, 'function': function})
return unicode(function) return unicode(function)

View File

@ -28,7 +28,7 @@ try:
except ImportError: except ImportError:
PYRAX_INSTALLED = False PYRAX_INSTALLED = False
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class CloudServer(server.Server): class CloudServer(server.Server):
@ -131,12 +131,12 @@ class CloudServer(server.Server):
self._managed_cloud_started_event_sent = True self._managed_cloud_started_event_sent = True
if 'rax_service_level_automation' not in server.metadata: if 'rax_service_level_automation' not in server.metadata:
logger.debug("Managed Cloud server does not have the " LOG.debug("Managed Cloud server does not have the "
"rax_service_level_automation metadata tag yet") "rax_service_level_automation metadata tag yet")
return False return False
mc_status = server.metadata['rax_service_level_automation'] mc_status = server.metadata['rax_service_level_automation']
logger.debug("Managed Cloud automation status: %s" % mc_status) LOG.debug("Managed Cloud automation status: %s" % mc_status)
if mc_status == self.MC_STATUS_IN_PROGRESS: if mc_status == self.MC_STATUS_IN_PROGRESS:
return False return False
@ -160,12 +160,12 @@ class CloudServer(server.Server):
self._rack_connect_started_event_sent = True self._rack_connect_started_event_sent = True
if 'rackconnect_automation_status' not in server.metadata: if 'rackconnect_automation_status' not in server.metadata:
logger.debug("RackConnect server does not have the " LOG.debug("RackConnect server does not have the "
"rackconnect_automation_status metadata tag yet") "rackconnect_automation_status metadata tag yet")
return False return False
rc_status = server.metadata['rackconnect_automation_status'] rc_status = server.metadata['rackconnect_automation_status']
logger.debug("RackConnect automation status: %s" % rc_status) LOG.debug("RackConnect automation status: %s" % rc_status)
if rc_status == self.RC_STATUS_DEPLOYING: if rc_status == self.RC_STATUS_DEPLOYING:
return False return False
@ -182,8 +182,7 @@ class CloudServer(server.Server):
reason = server.metadata.get('rackconnect_unprocessable_reason', reason = server.metadata.get('rackconnect_unprocessable_reason',
None) None)
if reason is not None: if reason is not None:
logger.warning(_("RackConnect unprocessable reason: %s") % LOG.warning(_("RackConnect unprocessable reason: %s") % reason)
reason)
msg = _("RackConnect automation has completed") msg = _("RackConnect automation has completed")
self._add_event(self.action, self.status, msg) self._add_event(self.action, self.status, msg)

View File

@ -35,7 +35,7 @@ else:
def resource_mapping(): def resource_mapping():
return {'Rackspace::Cloud::Network': CloudNetwork} return {'Rackspace::Cloud::Network': CloudNetwork}
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class CloudNetwork(resource.Resource): class CloudNetwork(resource.Resource):
@ -93,8 +93,8 @@ class CloudNetwork(resource.Resource):
try: try:
self._network = self.cloud_networks().get(self.resource_id) self._network = self.cloud_networks().get(self.resource_id)
except NotFound: except NotFound:
logger.warn(_("Could not find network %s but resource id " LOG.warn(_("Could not find network %s but resource id is set.")
"is set.") % self.resource_id) % self.resource_id)
return self._network return self._network
def cloud_networks(self): def cloud_networks(self):

View File

@ -28,7 +28,7 @@ from heat.openstack.common import log as logging
gettextutils.install('heat') gettextutils.install('heat')
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
opts = [ opts = [
@ -125,10 +125,10 @@ class EC2Token(wsgi.Middleware):
last_failure = None last_failure = None
for auth_uri in self._conf_get('allowed_auth_uris'): for auth_uri in self._conf_get('allowed_auth_uris'):
try: try:
logger.debug("Attempt authorize on %s" % auth_uri) LOG.debug("Attempt authorize on %s" % auth_uri)
return self._authorize(req, auth_uri) return self._authorize(req, auth_uri)
except HeatAPIException as e: except HeatAPIException as e:
logger.debug("Authorize failed: %s" % e.__class__) LOG.debug("Authorize failed: %s" % e.__class__)
last_failure = e last_failure = e
raise last_failure or exception.HeatAccessDeniedError() raise last_failure or exception.HeatAccessDeniedError()
@ -138,14 +138,14 @@ class EC2Token(wsgi.Middleware):
# here so that we can use both authentication methods. # here so that we can use both authentication methods.
# Returning here just means the user didn't supply AWS # Returning here just means the user didn't supply AWS
# authentication and we'll let the app try native keystone next. # authentication and we'll let the app try native keystone next.
logger.info(_("Checking AWS credentials..")) LOG.info(_("Checking AWS credentials.."))
signature = self._get_signature(req) signature = self._get_signature(req)
if not signature: if not signature:
if 'X-Auth-User' in req.headers: if 'X-Auth-User' in req.headers:
return self.application return self.application
else: else:
logger.info(_("No AWS Signature found.")) LOG.info(_("No AWS Signature found."))
raise exception.HeatIncompleteSignatureError() raise exception.HeatIncompleteSignatureError()
access = self._get_access(req) access = self._get_access(req)
@ -153,13 +153,13 @@ class EC2Token(wsgi.Middleware):
if 'X-Auth-User' in req.headers: if 'X-Auth-User' in req.headers:
return self.application return self.application
else: else:
logger.info(_("No AWSAccessKeyId/Authorization Credential")) LOG.info(_("No AWSAccessKeyId/Authorization Credential"))
raise exception.HeatMissingAuthenticationTokenError() raise exception.HeatMissingAuthenticationTokenError()
logger.info(_("AWS credentials found, checking against keystone.")) LOG.info(_("AWS credentials found, checking against keystone."))
if not auth_uri: if not auth_uri:
logger.error(_("Ec2Token authorization failed, no auth_uri " LOG.error(_("Ec2Token authorization failed, no auth_uri "
"specified in config file")) "specified in config file"))
raise exception.HeatInternalFailureError(_('Service ' raise exception.HeatInternalFailureError(_('Service '
'misconfigured')) 'misconfigured'))
@ -184,7 +184,7 @@ class EC2Token(wsgi.Middleware):
headers = {'Content-Type': 'application/json'} headers = {'Content-Type': 'application/json'}
keystone_ec2_uri = self._conf_get_keystone_ec2_uri(auth_uri) keystone_ec2_uri = self._conf_get_keystone_ec2_uri(auth_uri)
logger.info(_('Authenticating with %s') % keystone_ec2_uri) LOG.info(_('Authenticating with %s') % keystone_ec2_uri)
response = requests.post(keystone_ec2_uri, data=creds_json, response = requests.post(keystone_ec2_uri, data=creds_json,
headers=headers) headers=headers)
result = response.json() result = response.json()
@ -192,9 +192,9 @@ class EC2Token(wsgi.Middleware):
token_id = result['access']['token']['id'] token_id = result['access']['token']['id']
tenant = result['access']['token']['tenant']['name'] tenant = result['access']['token']['tenant']['name']
tenant_id = result['access']['token']['tenant']['id'] tenant_id = result['access']['token']['tenant']['id']
logger.info(_("AWS authentication successful.")) LOG.info(_("AWS authentication successful."))
except (AttributeError, KeyError): except (AttributeError, KeyError):
logger.info(_("AWS authentication failure.")) LOG.info(_("AWS authentication failure."))
# Try to extract the reason for failure so we can return the # Try to extract the reason for failure so we can return the
# appropriate AWS error via raising an exception # appropriate AWS error via raising an exception
try: try:

View File

@ -31,7 +31,7 @@ from heat.openstack.common import log as logging
from heat.rpc import api as engine_api from heat.rpc import api as engine_api
from heat.rpc import client as rpc_client from heat.rpc import client as rpc_client
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class StackController(object): class StackController(object):
@ -246,11 +246,11 @@ class StackController(object):
Get template file contents, either from local file or URL Get template file contents, either from local file or URL
""" """
if 'TemplateBody' in req.params: if 'TemplateBody' in req.params:
logger.debug('TemplateBody ...') LOG.debug('TemplateBody ...')
return req.params['TemplateBody'] return req.params['TemplateBody']
elif 'TemplateUrl' in req.params: elif 'TemplateUrl' in req.params:
url = req.params['TemplateUrl'] url = req.params['TemplateUrl']
logger.debug('TemplateUrl %s' % url) LOG.debug('TemplateUrl %s' % url)
try: try:
return urlfetch.get(url) return urlfetch.get(url)
except IOError as exc: except IOError as exc:
@ -421,7 +421,7 @@ class StackController(object):
msg = _("The Template must be a JSON or YAML document.") msg = _("The Template must be a JSON or YAML document.")
return exception.HeatInvalidParameterValueError(detail=msg) return exception.HeatInvalidParameterValueError(detail=msg)
logger.info('validate_template') LOG.info('validate_template')
def format_validate_parameter(key, value): def format_validate_parameter(key, value):
""" """

View File

@ -25,7 +25,7 @@ from heat.openstack.common.rpc import common as rpc_common
from heat.rpc import api as engine_api from heat.rpc import api as engine_api
from heat.rpc import client as rpc_client from heat.rpc import client as rpc_client
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class WatchController(object): class WatchController(object):
@ -211,7 +211,7 @@ class WatchController(object):
# Filter criteria not met, return None # Filter criteria not met, return None
return return
except KeyError: except KeyError:
logger.warning(_("Invalid filter key %s, ignoring") % f) LOG.warning(_("Invalid filter key %s, ignoring") % f)
return result return result
@ -220,7 +220,7 @@ class WatchController(object):
# FIXME : Don't yet handle filtering by Dimensions # FIXME : Don't yet handle filtering by Dimensions
filter_result = dict((k, v) for (k, v) in parms.iteritems() if k in filter_result = dict((k, v) for (k, v) in parms.iteritems() if k in
("MetricName", "Namespace")) ("MetricName", "Namespace"))
logger.debug("filter parameters : %s" % filter_result) LOG.debug("filter parameters : %s" % filter_result)
try: try:
# Engine does not currently support query by namespace/metric # Engine does not currently support query by namespace/metric
@ -266,7 +266,7 @@ class WatchController(object):
# need to process (each dict) for dimensions # need to process (each dict) for dimensions
metric_data = api_utils.extract_param_list(parms, prefix='MetricData') metric_data = api_utils.extract_param_list(parms, prefix='MetricData')
if not len(metric_data): if not len(metric_data):
logger.error(_("Request does not contain required MetricData")) LOG.error(_("Request does not contain required MetricData"))
return exception.HeatMissingParameterError("MetricData list") return exception.HeatMissingParameterError("MetricData list")
watch_name = None watch_name = None
@ -321,10 +321,10 @@ class WatchController(object):
'expecting one of %(expect)s') % { 'expecting one of %(expect)s') % {
'state': state, 'state': state,
'expect': state_map.keys()} 'expect': state_map.keys()}
logger.error(msg) LOG.error(msg)
return exception.HeatInvalidParameterValueError(msg) return exception.HeatInvalidParameterValueError(msg)
logger.debug("setting %(name)s to %(state)s" % { LOG.debug("setting %(name)s to %(state)s" % {
'name': name, 'state': state_map[state]}) 'name': name, 'state': state_map[state]})
try: try:
self.rpc_client.set_watch_state(con, watch_name=name, self.rpc_client.set_watch_state(con, watch_name=name,

View File

@ -25,7 +25,7 @@ from heat.common import wsgi
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class VersionNegotiationFilter(wsgi.Middleware): class VersionNegotiationFilter(wsgi.Middleware):
@ -48,7 +48,7 @@ class VersionNegotiationFilter(wsgi.Middleware):
msg = ("Processing request: %(method)s %(path)s Accept: " msg = ("Processing request: %(method)s %(path)s Accept: "
"%(accept)s" % {'method': req.method, "%(accept)s" % {'method': req.method,
'path': req.path, 'accept': req.accept}) 'path': req.path, 'accept': req.accept})
logger.debug(msg) LOG.debug(msg)
# If the request is for /versions, just return the versions container # If the request is for /versions, just return the versions container
if req.path_info_peek() in ("versions", ""): if req.path_info_peek() in ("versions", ""):
@ -60,7 +60,7 @@ class VersionNegotiationFilter(wsgi.Middleware):
minor_version = req.environ['api.minor_version'] minor_version = req.environ['api.minor_version']
if (major_version == 1 and minor_version == 0): if (major_version == 1 and minor_version == 0):
logger.debug("Matched versioned URI. " LOG.debug("Matched versioned URI. "
"Version: %(major_version)d.%(minor_version)d" "Version: %(major_version)d.%(minor_version)d"
% {'major_version': major_version, % {'major_version': major_version,
'minor_version': minor_version}) 'minor_version': minor_version})
@ -68,7 +68,7 @@ class VersionNegotiationFilter(wsgi.Middleware):
req.path_info_pop() req.path_info_pop()
return None return None
else: else:
logger.debug("Unknown version in versioned URI: " LOG.debug("Unknown version in versioned URI: "
"%(major_version)d.%(minor_version)d. " "%(major_version)d.%(minor_version)d. "
"Returning version choices." "Returning version choices."
% {'major_version': major_version, % {'major_version': major_version,
@ -84,13 +84,13 @@ class VersionNegotiationFilter(wsgi.Middleware):
major_version = req.environ['api.major_version'] major_version = req.environ['api.major_version']
minor_version = req.environ['api.minor_version'] minor_version = req.environ['api.minor_version']
if (major_version == 1 and minor_version == 0): if (major_version == 1 and minor_version == 0):
logger.debug("Matched versioned media type. Version: " LOG.debug("Matched versioned media type. Version: "
"%(major_version)d.%(minor_version)d" "%(major_version)d.%(minor_version)d"
% {'major_version': major_version, % {'major_version': major_version,
'minor_version': minor_version}) 'minor_version': minor_version})
return None return None
else: else:
logger.debug("Unknown version in accept header: " LOG.debug("Unknown version in accept header: "
"%(major_version)d.%(minor_version)d..." "%(major_version)d.%(minor_version)d..."
"returning version choices." "returning version choices."
% {'major_version': major_version, % {'major_version': major_version,
@ -98,7 +98,7 @@ class VersionNegotiationFilter(wsgi.Middleware):
return self.versions_app return self.versions_app
else: else:
if req.accept not in ('*/*', ''): if req.accept not in ('*/*', ''):
logger.debug("Unknown accept header: %s..." LOG.debug("Unknown accept header: %s..."
"returning HTTP not found.", req.accept) "returning HTTP not found.", req.accept)
return webob.exc.HTTPNotFound() return webob.exc.HTTPNotFound()
return None return None

View File

@ -30,7 +30,7 @@ from heat.openstack.common import log as logging
from heat.rpc import api as engine_api from heat.rpc import api as engine_api
from heat.rpc import client as rpc_client from heat.rpc import client as rpc_client
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class InstantiationData(object): class InstantiationData(object):
@ -94,7 +94,7 @@ class InstantiationData(object):
return template_data return template_data
elif self.PARAM_TEMPLATE_URL in self.data: elif self.PARAM_TEMPLATE_URL in self.data:
url = self.data[self.PARAM_TEMPLATE_URL] url = self.data[self.PARAM_TEMPLATE_URL]
logger.debug('TemplateUrl %s' % url) LOG.debug('TemplateUrl %s' % url)
try: try:
template_data = urlfetch.get(url) template_data = urlfetch.get(url)
except IOError as ex: except IOError as ex:
@ -188,7 +188,7 @@ class StackController(object):
filters=filter_params, filters=filter_params,
tenant_safe=tenant_safe) tenant_safe=tenant_safe)
except AttributeError as exc: except AttributeError as exc:
logger.warning(_("Old Engine Version: %s") % exc) LOG.warning(_("Old Engine Version: %s") % exc)
return stacks_view.collection(req, stacks=stacks, count=count, return stacks_view.collection(req, stacks=stacks, count=count,
tenant_safe=tenant_safe) tenant_safe=tenant_safe)

View File

@ -25,7 +25,7 @@ from heat.common import wsgi
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
from heat.openstack.common import rpc from heat.openstack.common import rpc
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
paste_deploy_group = cfg.OptGroup('paste_deploy') paste_deploy_group = cfg.OptGroup('paste_deploy')
paste_deploy_opts = [ paste_deploy_opts = [
@ -224,7 +224,7 @@ cfg.CONF.set_default(name='allowed_rpc_exception_modules',
default=allowed_rpc_exception_modules) default=allowed_rpc_exception_modules)
if cfg.CONF.instance_user: if cfg.CONF.instance_user:
logger.warn(_('The "instance_user" option in heat.conf is deprecated and ' LOG.warn(_('The "instance_user" option in heat.conf is deprecated and '
'will be removed in the Juno release.')) 'will be removed in the Juno release.'))

View File

@ -29,7 +29,7 @@ from heat.openstack.common import log as logging
_FATAL_EXCEPTION_FORMAT_ERRORS = False _FATAL_EXCEPTION_FORMAT_ERRORS = False
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class RedirectException(Exception): class RedirectException(Exception):
@ -112,9 +112,9 @@ class HeatException(Exception):
exc_info = sys.exc_info() exc_info = sys.exc_info()
#kwargs doesn't match a variable in the message #kwargs doesn't match a variable in the message
#log the issue and the kwargs #log the issue and the kwargs
logger.exception(_('Exception in string format operation')) LOG.exception(_('Exception in string format operation'))
for name, value in kwargs.iteritems(): for name, value in kwargs.iteritems():
logger.error("%s: %s" % (name, value)) LOG.error("%s: %s" % (name, value))
if _FATAL_EXCEPTION_FORMAT_ERRORS: if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise exc_info[0], exc_info[1], exc_info[2] raise exc_info[0], exc_info[1], exc_info[2]

View File

@ -27,7 +27,7 @@ from heat.openstack.common.gettextutils import _
from heat.openstack.common import importutils from heat.openstack.common import importutils
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger('heat.common.keystoneclient') LOG = logging.getLogger('heat.common.keystoneclient')
AccessKey = namedtuple('AccessKey', ['id', 'access', 'secret']) AccessKey = namedtuple('AccessKey', ['id', 'access', 'secret'])
@ -98,9 +98,9 @@ class KeystoneClientV3(object):
' stack_domain_admin and' ' stack_domain_admin and'
' stack_domain_admin_password')) ' stack_domain_admin_password'))
else: else:
logger.warning(_('stack_user_domain ID not set in heat.conf ' LOG.warning(_('stack_user_domain ID not set in heat.conf '
'falling back to using default')) 'falling back to using default'))
logger.debug('Using stack domain %s' % self.stack_domain_id) LOG.debug('Using stack domain %s' % self.stack_domain_id)
@property @property
def client(self): def client(self):
@ -119,7 +119,7 @@ class KeystoneClientV3(object):
if c.authenticate(): if c.authenticate():
self._admin_client = c self._admin_client = c
else: else:
logger.error("Admin client authentication failed") LOG.error("Admin client authentication failed")
raise exception.AuthorizationFailure() raise exception.AuthorizationFailure()
return self._admin_client return self._admin_client
@ -135,7 +135,7 @@ class KeystoneClientV3(object):
if c.authenticate(domain_id=self.stack_domain_id): if c.authenticate(domain_id=self.stack_domain_id):
self._domain_admin_client = c self._domain_admin_client = c
else: else:
logger.error("Domain admin client authentication failed") LOG.error("Domain admin client authentication failed")
raise exception.AuthorizationFailure() raise exception.AuthorizationFailure()
return self._domain_admin_client return self._domain_admin_client
@ -160,7 +160,7 @@ class KeystoneClientV3(object):
kwargs['password'] = self.context.password kwargs['password'] = self.context.password
kwargs['project_id'] = self.context.tenant_id kwargs['project_id'] = self.context.tenant_id
else: else:
logger.error(_("Keystone v3 API connection failed, no password " LOG.error(_("Keystone v3 API connection failed, no password "
"trust or auth_token!")) "trust or auth_token!"))
raise exception.AuthorizationFailure() raise exception.AuthorizationFailure()
kwargs.update(self._ssl_options()) kwargs.update(self._ssl_options())
@ -171,14 +171,14 @@ class KeystoneClientV3(object):
if 'trust_id' in kwargs: if 'trust_id' in kwargs:
# Sanity check # Sanity check
if not client.auth_ref.trust_scoped: if not client.auth_ref.trust_scoped:
logger.error(_("trust token re-scoping failed!")) LOG.error(_("trust token re-scoping failed!"))
raise exception.AuthorizationFailure() raise exception.AuthorizationFailure()
# All OK so update the context with the token # All OK so update the context with the token
self.context.auth_token = client.auth_ref.auth_token self.context.auth_token = client.auth_ref.auth_token
self.context.auth_url = kwargs.get('auth_url') self.context.auth_url = kwargs.get('auth_url')
# Sanity check that impersonation is effective # Sanity check that impersonation is effective
if self.context.trustor_user_id != client.auth_ref.user_id: if self.context.trustor_user_id != client.auth_ref.user_id:
logger.error("Trust impersonation failed") LOG.error("Trust impersonation failed")
raise exception.AuthorizationFailure() raise exception.AuthorizationFailure()
return client return client
@ -260,7 +260,7 @@ class KeystoneClientV3(object):
def _get_username(self, username): def _get_username(self, username):
if(len(username) > 64): if(len(username) > 64):
logger.warning(_("Truncating the username %s to the last 64 " LOG.warning(_("Truncating the username %s to the last 64 "
"characters.") % username) "characters.") % username)
#get the last 64 characters of the username #get the last 64 characters of the username
return username[-64:] return username[-64:]
@ -296,12 +296,12 @@ class KeystoneClientV3(object):
name=self._get_username(username), password=password, name=self._get_username(username), password=password,
default_project=self.context.tenant_id) default_project=self.context.tenant_id)
# Add user to heat_stack_user_role # Add user to heat_stack_user_role
logger.debug("Adding user %(user)s to role %(role)s" % { LOG.debug("Adding user %(user)s to role %(role)s" % {
'user': user.id, 'role': role_id}) 'user': user.id, 'role': role_id})
self.client.roles.grant(role=role_id, user=user.id, self.client.roles.grant(role=role_id, user=user.id,
project=self.context.tenant_id) project=self.context.tenant_id)
else: else:
logger.error(_("Failed to add user %(user)s to role %(role)s, " LOG.error(_("Failed to add user %(user)s to role %(role)s, "
"check role exists!") % { "check role exists!") % {
'user': username, 'user': username,
'role': cfg.CONF.heat_stack_user_role}) 'role': cfg.CONF.heat_stack_user_role})
@ -323,7 +323,7 @@ class KeystoneClientV3(object):
if not self.stack_domain_id: if not self.stack_domain_id:
# FIXME(shardy): Legacy fallback for folks using old heat.conf # FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration # files which lack domain configuration
logger.warning(_('Falling back to legacy non-domain user create, ' LOG.warning(_('Falling back to legacy non-domain user create, '
'configure domain in heat.conf')) 'configure domain in heat.conf'))
return self.create_stack_user(username=username, password=password) return self.create_stack_user(username=username, password=password)
# We add the new user to a special keystone role # We add the new user to a special keystone role
@ -338,13 +338,14 @@ class KeystoneClientV3(object):
name=self._get_username(username), password=password, name=self._get_username(username), password=password,
default_project=project_id, domain=self.stack_domain_id) default_project=project_id, domain=self.stack_domain_id)
# Add to stack user role # Add to stack user role
logger.debug("Adding user %(user)s to role %(role)s" % { LOG.debug("Adding user %(user)s to role %(role)s" % {
'user': user.id, 'role': role_id}) 'user': user.id, 'role': role_id})
self.domain_admin_client.roles.grant(role=role_id, user=user.id, self.domain_admin_client.roles.grant(role=role_id, user=user.id,
project=project_id) project=project_id)
else: else:
logger.error(_("Failed to add user %(user)s to role %(role)s, " LOG.error(_("Failed to add user %(user)s to role %(role)s, "
"check role exists!") % {'user': username, "check role exists!")
% {'user': username,
'role': cfg.CONF.heat_stack_user_role}) 'role': cfg.CONF.heat_stack_user_role})
raise exception.Error(_("Can't find role %s") raise exception.Error(_("Can't find role %s")
% cfg.CONF.heat_stack_user_role) % cfg.CONF.heat_stack_user_role)
@ -363,7 +364,7 @@ class KeystoneClientV3(object):
if not self.stack_domain_id: if not self.stack_domain_id:
# FIXME(shardy): Legacy fallback for folks using old heat.conf # FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration # files which lack domain configuration
logger.warning(_('Falling back to legacy non-domain user delete, ' LOG.warning(_('Falling back to legacy non-domain user delete, '
'configure domain in heat.conf')) 'configure domain in heat.conf'))
return self.delete_stack_user(user_id) return self.delete_stack_user(user_id)
@ -384,7 +385,7 @@ class KeystoneClientV3(object):
if not self.stack_domain_id: if not self.stack_domain_id:
# FIXME(shardy): Legacy fallback for folks using old heat.conf # FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration # files which lack domain configuration
logger.warning(_('Falling back to legacy non-domain project, ' LOG.warning(_('Falling back to legacy non-domain project, '
'configure domain in heat.conf')) 'configure domain in heat.conf'))
return self.context.tenant_id return self.context.tenant_id
# Note we use the tenant ID not name to ensure uniqueness in a multi- # Note we use the tenant ID not name to ensure uniqueness in a multi-
@ -401,7 +402,7 @@ class KeystoneClientV3(object):
if not self.stack_domain_id: if not self.stack_domain_id:
# FIXME(shardy): Legacy fallback for folks using old heat.conf # FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration # files which lack domain configuration
logger.warning(_('Falling back to legacy non-domain project, ' LOG.warning(_('Falling back to legacy non-domain project, '
'configure domain in heat.conf')) 'configure domain in heat.conf'))
return return
try: try:
@ -475,7 +476,7 @@ class KeystoneClientV3(object):
if not self.stack_domain_id: if not self.stack_domain_id:
# FIXME(shardy): Legacy fallback for folks using old heat.conf # FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration # files which lack domain configuration
logger.warning(_('Falling back to legacy non-domain keypair, ' LOG.warning(_('Falling back to legacy non-domain keypair, '
'configure domain in heat.conf')) 'configure domain in heat.conf'))
return self.create_ec2_keypair(user_id) return self.create_ec2_keypair(user_id)
data_blob = {'access': uuid.uuid4().hex, data_blob = {'access': uuid.uuid4().hex,
@ -492,7 +493,7 @@ class KeystoneClientV3(object):
if not self.stack_domain_id: if not self.stack_domain_id:
# FIXME(shardy): Legacy fallback for folks using old heat.conf # FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration # files which lack domain configuration
logger.warning(_('Falling back to legacy non-domain keypair, ' LOG.warning(_('Falling back to legacy non-domain keypair, '
'configure domain in heat.conf')) 'configure domain in heat.conf'))
return self.delete_ec2_keypair(credential_id=credential_id) return self.delete_ec2_keypair(credential_id=credential_id)
self._check_stack_domain_user(user_id, project_id, 'delete_keypair') self._check_stack_domain_user(user_id, project_id, 'delete_keypair')
@ -511,7 +512,7 @@ class KeystoneClientV3(object):
if not self.stack_domain_id: if not self.stack_domain_id:
# FIXME(shardy): Legacy fallback for folks using old heat.conf # FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration # files which lack domain configuration
logger.warning(_('Falling back to legacy non-domain disable, ' LOG.warning(_('Falling back to legacy non-domain disable, '
'configure domain in heat.conf')) 'configure domain in heat.conf'))
return self.disable_stack_user(user_id) return self.disable_stack_user(user_id)
self._check_stack_domain_user(user_id, project_id, 'disable') self._check_stack_domain_user(user_id, project_id, 'disable')
@ -521,7 +522,7 @@ class KeystoneClientV3(object):
if not self.stack_domain_id: if not self.stack_domain_id:
# FIXME(shardy): Legacy fallback for folks using old heat.conf # FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration # files which lack domain configuration
logger.warning(_('Falling back to legacy non-domain enable, ' LOG.warning(_('Falling back to legacy non-domain enable, '
'configure domain in heat.conf')) 'configure domain in heat.conf'))
return self.enable_stack_user(user_id) return self.enable_stack_user(user_id)
self._check_stack_domain_user(user_id, project_id, 'enable') self._check_stack_domain_user(user_id, project_id, 'enable')

View File

@ -23,7 +23,7 @@ from heat.openstack.common import importutils
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
from heat.openstack.common import processutils from heat.openstack.common import processutils
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
def startup_notify(notify_param): def startup_notify(notify_param):
@ -35,6 +35,6 @@ def startup_notify(notify_param):
try: try:
processutils.execute(notify_param, shell=True) processutils.execute(notify_param, shell=True)
except Exception as e: except Exception as e:
logger.error(_('Failed to execute onready command: %s') % e) LOG.error(_('Failed to execute onready command: %s') % e)
else: else:
notifier.notify() notifier.notify()

View File

@ -26,7 +26,7 @@ import types
from heat.openstack.common.gettextutils import _ from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
def _module_name(*components): def _module_name(*components):
@ -94,7 +94,7 @@ def load_modules(package, ignore_error=False):
try: try:
module = _import_module(importer, module_name, package) module = _import_module(importer, module_name, package)
except ImportError: except ImportError:
logger.error(_('Failed to import module %s') % module_name) LOG.error(_('Failed to import module %s') % module_name)
if not ignore_error: if not ignore_error:
raise raise
else: else:

View File

@ -28,7 +28,7 @@ import six
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class JSONResponseSerializer(object): class JSONResponseSerializer(object):
@ -40,7 +40,7 @@ class JSONResponseSerializer(object):
return obj return obj
response = json.dumps(data, default=sanitizer) response = json.dumps(data, default=sanitizer)
logger.debug("JSON response : %s" % response) LOG.debug("JSON response : %s" % response)
return response return response
def default(self, response, result): def default(self, response, result):

View File

@ -23,7 +23,7 @@ import socket
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
def _sd_notify(msg): def _sd_notify(msg):
@ -37,7 +37,7 @@ def _sd_notify(msg):
sock.sendall(msg) sock.sendall(msg)
sock.close() sock.close()
else: else:
logger.warning(_('Unable to notify systemd of startup completion:' LOG.warning(_('Unable to notify systemd of startup completion:'
' NOTIFY_SOCKET not set')) ' NOTIFY_SOCKET not set'))

View File

@ -25,7 +25,7 @@ from heat.openstack.common import log as logging
cfg.CONF.import_opt('max_template_size', 'heat.common.config') cfg.CONF.import_opt('max_template_size', 'heat.common.config')
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class URLFetchError(exception.Error, IOError): class URLFetchError(exception.Error, IOError):
@ -40,7 +40,7 @@ def get(url, allowed_schemes=('http', 'https')):
the allowed_schemes argument. the allowed_schemes argument.
Raise an IOError if getting the data fails. Raise an IOError if getting the data fails.
""" """
logger.info(_('Fetching data from %s') % url) LOG.info(_('Fetching data from %s') % url)
components = urllib.parse.urlparse(url) components = urllib.parse.urlparse(url)

View File

@ -164,12 +164,12 @@ cfg.CONF.register_opt(json_size_opt)
class WritableLogger(object): class WritableLogger(object):
"""A thin wrapper that responds to `write` and logs.""" """A thin wrapper that responds to `write` and logs."""
def __init__(self, logger, level=logging.DEBUG): def __init__(self, LOG, level=logging.DEBUG):
self.logger = logger self.LOG = LOG
self.level = level self.level = level
def write(self, msg): def write(self, msg):
self.logger.log(self.level, msg.strip("\n")) self.LOG.log(self.level, msg.strip("\n"))
def get_bind_addr(conf, default_port=None): def get_bind_addr(conf, default_port=None):
@ -252,7 +252,7 @@ class Server(object):
""" """
def kill_children(*args): def kill_children(*args):
"""Kills the entire process group.""" """Kills the entire process group."""
self.logger.error(_('SIGTERM received')) self.LOG.error(_('SIGTERM received'))
signal.signal(signal.SIGTERM, signal.SIG_IGN) signal.signal(signal.SIGTERM, signal.SIG_IGN)
self.running = False self.running = False
os.killpg(0, signal.SIGTERM) os.killpg(0, signal.SIGTERM)
@ -261,7 +261,7 @@ class Server(object):
""" """
Shuts down the server, but allows running requests to complete Shuts down the server, but allows running requests to complete
""" """
self.logger.error(_('SIGHUP received')) self.LOG.error(_('SIGHUP received'))
signal.signal(signal.SIGHUP, signal.SIG_IGN) signal.signal(signal.SIGHUP, signal.SIG_IGN)
self.running = False self.running = False
@ -269,7 +269,7 @@ class Server(object):
self.application = application self.application = application
self.sock = get_socket(conf, default_port) self.sock = get_socket(conf, default_port)
self.logger = logging.getLogger('eventlet.wsgi.server') self.LOG = logging.getLogger('eventlet.wsgi.server')
if conf.workers == 0: if conf.workers == 0:
# Useful for profiling, test, debug etc. # Useful for profiling, test, debug etc.
@ -277,7 +277,7 @@ class Server(object):
self.pool.spawn_n(self._single_run, application, self.sock) self.pool.spawn_n(self._single_run, application, self.sock)
return return
self.logger.info(_("Starting %d workers") % conf.workers) self.LOG.info(_("Starting %d workers") % conf.workers)
signal.signal(signal.SIGTERM, kill_children) signal.signal(signal.SIGTERM, kill_children)
signal.signal(signal.SIGHUP, hup) signal.signal(signal.SIGHUP, hup)
while len(self.children) < conf.workers: while len(self.children) < conf.workers:
@ -288,18 +288,18 @@ class Server(object):
try: try:
pid, status = os.wait() pid, status = os.wait()
if os.WIFEXITED(status) or os.WIFSIGNALED(status): if os.WIFEXITED(status) or os.WIFSIGNALED(status):
self.logger.error(_('Removing dead child %s') % pid) self.LOG.error(_('Removing dead child %s') % pid)
self.children.remove(pid) self.children.remove(pid)
self.run_child() self.run_child()
except OSError as err: except OSError as err:
if err.errno not in (errno.EINTR, errno.ECHILD): if err.errno not in (errno.EINTR, errno.ECHILD):
raise raise
except KeyboardInterrupt: except KeyboardInterrupt:
self.logger.info(_('Caught keyboard interrupt. Exiting.')) self.LOG.info(_('Caught keyboard interrupt. Exiting.'))
break break
eventlet.greenio.shutdown_safe(self.sock) eventlet.greenio.shutdown_safe(self.sock)
self.sock.close() self.sock.close()
self.logger.debug('Exited') self.LOG.debug('Exited')
def wait(self): def wait(self):
"""Wait until all servers have completed running.""" """Wait until all servers have completed running."""
@ -317,10 +317,10 @@ class Server(object):
signal.signal(signal.SIGHUP, signal.SIG_DFL) signal.signal(signal.SIGHUP, signal.SIG_DFL)
signal.signal(signal.SIGTERM, signal.SIG_DFL) signal.signal(signal.SIGTERM, signal.SIG_DFL)
self.run_server() self.run_server()
self.logger.info(_('Child %d exiting normally') % os.getpid()) self.LOG.info(_('Child %d exiting normally') % os.getpid())
return return
else: else:
self.logger.info(_('Started child %s') % pid) self.LOG.info(_('Started child %s') % pid)
self.children.append(pid) self.children.append(pid)
def run_server(self): def run_server(self):
@ -334,7 +334,7 @@ class Server(object):
self.application, self.application,
custom_pool=self.pool, custom_pool=self.pool,
url_length_limit=URL_LENGTH_LIMIT, url_length_limit=URL_LENGTH_LIMIT,
log=WritableLogger(self.logger), log=WritableLogger(self.LOG),
debug=cfg.CONF.debug) debug=cfg.CONF.debug)
except socket.error as err: except socket.error as err:
if err[0] != errno.EINVAL: if err[0] != errno.EINVAL:
@ -343,11 +343,11 @@ class Server(object):
def _single_run(self, application, sock): def _single_run(self, application, sock):
"""Start a WSGI server in a new green thread.""" """Start a WSGI server in a new green thread."""
self.logger.info(_("Starting single process server")) self.LOG.info(_("Starting single process server"))
eventlet.wsgi.server(sock, application, eventlet.wsgi.server(sock, application,
custom_pool=self.pool, custom_pool=self.pool,
url_length_limit=URL_LENGTH_LIMIT, url_length_limit=URL_LENGTH_LIMIT,
log=WritableLogger(self.logger)) log=WritableLogger(self.LOG))
class Middleware(object): class Middleware(object):

View File

@ -19,7 +19,7 @@ from heat.openstack.common import log as logging
from heat.openstack.common import timeutils from heat.openstack.common import timeutils
from heat.rpc import api from heat.rpc import api
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
def extract_args(params): def extract_args(params):
@ -34,7 +34,7 @@ def extract_args(params):
try: try:
timeout = int(timeout_mins) timeout = int(timeout_mins)
except (ValueError, TypeError): except (ValueError, TypeError):
logger.exception(_('Timeout conversion failed')) LOG.exception(_('Timeout conversion failed'))
else: else:
if timeout > 0: if timeout > 0:
kwargs[api.PARAM_TIMEOUT] = timeout kwargs[api.PARAM_TIMEOUT] = timeout
@ -234,7 +234,7 @@ def format_watch_data(wd):
if len(metric) == 1: if len(metric) == 1:
metric_name, metric_data = metric[0] metric_name, metric_data = metric[0]
else: else:
logger.error(_("Unexpected number of keys in watch_data.data!")) LOG.error(_("Unexpected number of keys in watch_data.data!"))
return return
result = { result = {

View File

@ -21,42 +21,42 @@ from heat.openstack.common.gettextutils import _
from heat.openstack.common import importutils from heat.openstack.common import importutils
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
try: try:
from swiftclient import client as swiftclient from swiftclient import client as swiftclient
except ImportError: except ImportError:
swiftclient = None swiftclient = None
logger.info(_('swiftclient not available')) LOG.info(_('swiftclient not available'))
try: try:
from neutronclient.v2_0 import client as neutronclient from neutronclient.v2_0 import client as neutronclient
except ImportError: except ImportError:
neutronclient = None neutronclient = None
logger.info(_('neutronclient not available')) LOG.info(_('neutronclient not available'))
try: try:
from cinderclient import client as cinderclient from cinderclient import client as cinderclient
except ImportError: except ImportError:
cinderclient = None cinderclient = None
logger.info(_('cinderclient not available')) LOG.info(_('cinderclient not available'))
try: try:
from troveclient import client as troveclient from troveclient import client as troveclient
except ImportError: except ImportError:
troveclient = None troveclient = None
logger.info(_('troveclient not available')) LOG.info(_('troveclient not available'))
try: try:
from ceilometerclient import client as ceilometerclient from ceilometerclient import client as ceilometerclient
except ImportError: except ImportError:
ceilometerclient = None ceilometerclient = None
logger.info(_('ceilometerclient not available')) LOG.info(_('ceilometerclient not available'))
try: try:
from glanceclient import client as glanceclient from glanceclient import client as glanceclient
except ImportError: except ImportError:
glanceclient = None glanceclient = None
logger.info(_('glanceclient not available')) LOG.info(_('glanceclient not available'))
_default_backend = "heat.engine.clients.OpenStackClients" _default_backend = "heat.engine.clients.OpenStackClients"
@ -188,7 +188,7 @@ class OpenStackClients(object):
con = self.context con = self.context
if self.auth_token is None: if self.auth_token is None:
logger.error(_("Neutron connection failed, no auth_token!")) LOG.error(_("Neutron connection failed, no auth_token!"))
return None return None
endpoint_type = self._get_client_option('neutron', 'endpoint_type') endpoint_type = self._get_client_option('neutron', 'endpoint_type')

View File

@ -19,7 +19,7 @@ from heat.db import api as db_api
from heat.openstack.common.gettextutils import _ from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class Event(object): class Event(object):
@ -88,7 +88,7 @@ class Event(object):
ev['created_at'] = self.timestamp ev['created_at'] = self.timestamp
if self.id is not None: if self.id is not None:
logger.warning(_('Duplicating event')) LOG.warning(_('Duplicating event'))
new_ev = db_api.event_create(self.context, ev) new_ev = db_api.event_create(self.context, ev)
self.id = new_ev.id self.id = new_ev.id

View File

@ -15,7 +15,7 @@ from heat.common.exception import StackValidationFailed
from heat.openstack.common.gettextutils import _ from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
PARAMETER_GROUPS = 'parameter_groups' PARAMETER_GROUPS = 'parameter_groups'
PARAMETERS = 'parameters' PARAMETERS = 'parameters'
@ -28,8 +28,8 @@ class ParameterGroups(object):
def __init__(self, tmpl): def __init__(self, tmpl):
self.tmpl = tmpl self.tmpl = tmpl
self.parameters = tmpl.parameters(None, {}) self.parameters = tmpl.parameters(None, {})
logger.debug(self.tmpl) LOG.debug(self.tmpl)
logger.debug(self.parameters) LOG.debug(self.parameters)
self.parameter_names = [] self.parameter_names = []
if self.parameters: if self.parameters:
self.parameter_names = [param for param in self.parameters] self.parameter_names = [param for param in self.parameters]
@ -40,8 +40,8 @@ class ParameterGroups(object):
Validate that a parameter belongs to only one Parameter Group Validate that a parameter belongs to only one Parameter Group
and that each parameter name references a valid parameter. and that each parameter name references a valid parameter.
''' '''
logger.debug('Validating Parameter Groups.') LOG.debug('Validating Parameter Groups.')
logger.debug(self.parameter_names) LOG.debug(self.parameter_names)
if self.parameter_groups is not None: if self.parameter_groups is not None:
#Loop through groups and validate parameters #Loop through groups and validate parameters
grouped_parameters = [] grouped_parameters = []

View File

@ -39,7 +39,7 @@ from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
from heat.openstack.common import strutils from heat.openstack.common import strutils
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class Stack(collections.Mapping): class Stack(collections.Mapping):
@ -175,7 +175,7 @@ class Stack(collections.Mapping):
via the Parameters class as the StackId pseudo parameter via the Parameters class as the StackId pseudo parameter
''' '''
if not self.parameters.set_stack_id(self.identifier()): if not self.parameters.set_stack_id(self.identifier()):
logger.warning(_("Unable to set parameters StackId identifier")) LOG.warning(_("Unable to set parameters StackId identifier"))
@staticmethod @staticmethod
def _get_dependencies(resources): def _get_dependencies(resources):
@ -361,7 +361,7 @@ class Stack(collections.Mapping):
dup_names = set(self.parameters.keys()) & set(self.keys()) dup_names = set(self.parameters.keys()) & set(self.keys())
if dup_names: if dup_names:
logger.debug("Duplicate names %s" % dup_names) LOG.debug("Duplicate names %s" % dup_names)
raise StackValidationFailed(message=_("Duplicate names %s") % raise StackValidationFailed(message=_("Duplicate names %s") %
dup_names) dup_names)
@ -369,10 +369,10 @@ class Stack(collections.Mapping):
try: try:
result = res.validate() result = res.validate()
except exception.HeatException as ex: except exception.HeatException as ex:
logger.exception(ex) LOG.exception(ex)
raise ex raise ex
except Exception as ex: except Exception as ex:
logger.exception(ex) LOG.exception(ex)
raise StackValidationFailed(message=strutils.safe_decode( raise StackValidationFailed(message=strutils.safe_decode(
six.text_type(ex))) six.text_type(ex)))
if result: if result:
@ -415,7 +415,7 @@ class Stack(collections.Mapping):
'status': status, 'status': status,
'status_reason': reason}) 'status_reason': reason})
msg = _('Stack %(action)s %(status)s (%(name)s): %(reason)s') msg = _('Stack %(action)s %(status)s (%(name)s): %(reason)s')
logger.info(msg % {'action': action, LOG.info(msg % {'action': action,
'status': status, 'status': status,
'name': self.name, 'name': self.name,
'reason': reason}) 'reason': reason})
@ -514,7 +514,7 @@ class Stack(collections.Mapping):
self._backup_name(), self._backup_name(),
owner_id=self.id) owner_id=self.id)
if s is not None: if s is not None:
logger.debug('Loaded existing backup stack') LOG.debug('Loaded existing backup stack')
return self.load(self.context, stack=s) return self.load(self.context, stack=s)
elif create_if_missing: elif create_if_missing:
templ = Template.load(self.context, self.t.id) templ = Template.load(self.context, self.t.id)
@ -522,7 +522,7 @@ class Stack(collections.Mapping):
prev = type(self)(self.context, self.name, templ, self.env, prev = type(self)(self.context, self.name, templ, self.env,
owner_id=self.id) owner_id=self.id)
prev.store(backup=True) prev.store(backup=True)
logger.debug('Created new backup stack') LOG.debug('Created new backup stack')
return prev return prev
else: else:
return None return None
@ -562,8 +562,7 @@ class Stack(collections.Mapping):
@scheduler.wrappertask @scheduler.wrappertask
def update_task(self, newstack, action=UPDATE): def update_task(self, newstack, action=UPDATE):
if action not in (self.UPDATE, self.ROLLBACK): if action not in (self.UPDATE, self.ROLLBACK):
logger.error(_("Unexpected action %s passed to update!") % LOG.error(_("Unexpected action %s passed to update!") % action)
action)
self.state_set(self.UPDATE, self.FAILED, self.state_set(self.UPDATE, self.FAILED,
"Invalid action %s" % action) "Invalid action %s" % action)
return return
@ -571,7 +570,7 @@ class Stack(collections.Mapping):
if self.status != self.COMPLETE: if self.status != self.COMPLETE:
if (action == self.ROLLBACK and if (action == self.ROLLBACK and
self.state == (self.UPDATE, self.IN_PROGRESS)): self.state == (self.UPDATE, self.IN_PROGRESS)):
logger.debug("Starting update rollback for %s" % self.name) LOG.debug("Starting update rollback for %s" % self.name)
else: else:
self.state_set(action, self.FAILED, self.state_set(action, self.FAILED,
'State invalid for %s' % action) 'State invalid for %s' % action)
@ -622,7 +621,7 @@ class Stack(collections.Mapping):
yield self.update_task(oldstack, action=self.ROLLBACK) yield self.update_task(oldstack, action=self.ROLLBACK)
return return
else: else:
logger.debug('Deleting backup stack') LOG.debug('Deleting backup stack')
backup_stack.delete(backup=True) backup_stack.delete(backup=True)
# flip the template to the newstack values # flip the template to the newstack values
@ -651,7 +650,7 @@ class Stack(collections.Mapping):
differently. differently.
''' '''
if action not in (self.DELETE, self.ROLLBACK): if action not in (self.DELETE, self.ROLLBACK):
logger.error(_("Unexpected action %s passed to delete!") % action) LOG.error(_("Unexpected action %s passed to delete!") % action)
self.state_set(self.DELETE, self.FAILED, self.state_set(self.DELETE, self.FAILED,
"Invalid action %s" % action) "Invalid action %s" % action)
return return
@ -727,7 +726,7 @@ class Stack(collections.Mapping):
try: try:
self.clients.keystone().delete_trust(trust_id) self.clients.keystone().delete_trust(trust_id)
except Exception as ex: except Exception as ex:
logger.exception(ex) LOG.exception(ex)
stack_status = self.FAILED stack_status = self.FAILED
reason = "Error deleting trust: %s" % six.text_type(ex) reason = "Error deleting trust: %s" % six.text_type(ex)
@ -742,7 +741,7 @@ class Stack(collections.Mapping):
self.clients.keystone().delete_stack_domain_project( self.clients.keystone().delete_stack_domain_project(
project_id=self.stack_user_project_id) project_id=self.stack_user_project_id)
except Exception as ex: except Exception as ex:
logger.exception(ex) LOG.exception(ex)
stack_status = self.FAILED stack_status = self.FAILED
reason = "Error deleting project: %s" % six.text_type(ex) reason = "Error deleting project: %s" % six.text_type(ex)
@ -764,7 +763,7 @@ class Stack(collections.Mapping):
''' '''
# No need to suspend if the stack has been suspended # No need to suspend if the stack has been suspended
if self.state == (self.SUSPEND, self.COMPLETE): if self.state == (self.SUSPEND, self.COMPLETE):
logger.info(_('%s is already suspended') % str(self)) LOG.info(_('%s is already suspended') % str(self))
return return
sus_task = scheduler.TaskRunner(self.stack_task, sus_task = scheduler.TaskRunner(self.stack_task,
@ -783,7 +782,7 @@ class Stack(collections.Mapping):
''' '''
# No need to resume if the stack has been resumed # No need to resume if the stack has been resumed
if self.state == (self.RESUME, self.COMPLETE): if self.state == (self.RESUME, self.COMPLETE):
logger.info(_('%s is already resumed') % str(self)) LOG.info(_('%s is already resumed') % str(self))
return return
sus_task = scheduler.TaskRunner(self.stack_task, sus_task = scheduler.TaskRunner(self.stack_task,
@ -814,7 +813,7 @@ class Stack(collections.Mapping):
scheduler.TaskRunner(res.destroy)() scheduler.TaskRunner(res.destroy)()
except exception.ResourceFailure as ex: except exception.ResourceFailure as ex:
failed = True failed = True
logger.error(_('delete: %s') % ex) LOG.error(_('delete: %s') % ex)
for res in deps: for res in deps:
if not failed: if not failed:
@ -822,7 +821,7 @@ class Stack(collections.Mapping):
res.state_reset() res.state_reset()
scheduler.TaskRunner(res.create)() scheduler.TaskRunner(res.create)()
except exception.ResourceFailure as ex: except exception.ResourceFailure as ex:
logger.exception(_('create')) LOG.exception(_('create'))
failed = True failed = True
else: else:
res.state_set(res.CREATE, res.FAILED, res.state_set(res.CREATE, res.FAILED,

View File

@ -21,7 +21,7 @@ from oslo.config import cfg
from heat.common import plugin_loader from heat.common import plugin_loader
from heat.openstack.common import log from heat.openstack.common import log
logger = log.getLogger(__name__) LOG = log.getLogger(__name__)
class PluginManager(object): class PluginManager(object):
@ -93,14 +93,14 @@ class PluginMapping(object):
try: try:
mapping_dict = mapping_func(*self.args, **self.kwargs) mapping_dict = mapping_func(*self.args, **self.kwargs)
except Exception: except Exception:
logger.error(_('Failed to load %(mapping_name)s ' LOG.error(_('Failed to load %(mapping_name)s '
'from %(module)s') % fmt_data) 'from %(module)s') % fmt_data)
raise raise
else: else:
if isinstance(mapping_dict, collections.Mapping): if isinstance(mapping_dict, collections.Mapping):
return mapping_dict return mapping_dict
elif mapping_dict is not None: elif mapping_dict is not None:
logger.error(_('Invalid type for %(mapping_name)s ' LOG.error(_('Invalid type for %(mapping_name)s '
'from %(module)s') % fmt_data) 'from %(module)s') % fmt_data)
return {} return {}

View File

@ -32,7 +32,7 @@ from heat.openstack.common import excutils
from heat.openstack.common.gettextutils import _ from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
DELETION_POLICY = (DELETE, RETAIN, SNAPSHOT) = ('Delete', 'Retain', 'Snapshot') DELETION_POLICY = (DELETE, RETAIN, SNAPSHOT) = ('Delete', 'Retain', 'Snapshot')
@ -386,7 +386,7 @@ class Resource(object):
while not check(handle_data): while not check(handle_data):
yield yield
except Exception as ex: except Exception as ex:
logger.exception('%s : %s' % (action, str(self))) LOG.exception('%s : %s' % (action, str(self)))
failure = exception.ResourceFailure(ex, self, action) failure = exception.ResourceFailure(ex, self, action)
self.state_set(action, self.FAILED, six.text_type(failure)) self.state_set(action, self.FAILED, six.text_type(failure))
raise failure raise failure
@ -396,7 +396,7 @@ class Resource(object):
self.state_set(action, self.FAILED, self.state_set(action, self.FAILED,
'%s aborted' % action) '%s aborted' % action)
except Exception: except Exception:
logger.exception(_('Error marking resource as failed')) LOG.exception(_('Error marking resource as failed'))
else: else:
self.state_set(action, self.COMPLETE) self.state_set(action, self.COMPLETE)
@ -420,7 +420,7 @@ class Resource(object):
% str(self.state)) % str(self.state))
raise exception.ResourceFailure(exc, self, action) raise exception.ResourceFailure(exc, self, action)
logger.info('creating %s' % str(self)) LOG.info('creating %s' % str(self))
# Re-resolve the template, since if the resource Ref's # Re-resolve the template, since if the resource Ref's
# the StackId pseudo parameter, it will change after # the StackId pseudo parameter, it will change after
@ -501,7 +501,7 @@ class Resource(object):
exc = Exception(_('Resource update already requested')) exc = Exception(_('Resource update already requested'))
raise exception.ResourceFailure(exc, self, action) raise exception.ResourceFailure(exc, self, action)
logger.info('updating %s' % str(self)) LOG.info('updating %s' % str(self))
try: try:
self.updated_time = datetime.utcnow() self.updated_time = datetime.utcnow()
@ -529,10 +529,10 @@ class Resource(object):
yield yield
except UpdateReplace: except UpdateReplace:
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
logger.debug("Resource %s update requires replacement" % LOG.debug("Resource %s update requires replacement" %
self.name) self.name)
except Exception as ex: except Exception as ex:
logger.exception(_('update %(resource)s : %(err)s') % LOG.exception(_('update %(resource)s : %(err)s') %
{'resource': str(self), 'err': ex}) {'resource': str(self), 'err': ex})
failure = exception.ResourceFailure(ex, self, action) failure = exception.ResourceFailure(ex, self, action)
self.state_set(action, self.FAILED, six.text_type(failure)) self.state_set(action, self.FAILED, six.text_type(failure))
@ -555,7 +555,7 @@ class Resource(object):
% str(self.state)) % str(self.state))
raise exception.ResourceFailure(exc, self, action) raise exception.ResourceFailure(exc, self, action)
logger.info(_('suspending %s') % str(self)) LOG.info(_('suspending %s') % str(self))
return self._do_action(action) return self._do_action(action)
def resume(self): def resume(self):
@ -571,7 +571,7 @@ class Resource(object):
% str(self.state)) % str(self.state))
raise exception.ResourceFailure(exc, self, action) raise exception.ResourceFailure(exc, self, action)
logger.info(_('resuming %s') % str(self)) LOG.info(_('resuming %s') % str(self))
return self._do_action(action) return self._do_action(action)
def physical_resource_name(self): def physical_resource_name(self):
@ -613,7 +613,7 @@ class Resource(object):
return name[0:2] + '-' + name[-postfix_length:] return name[0:2] + '-' + name[-postfix_length:]
def validate(self): def validate(self):
logger.info(_('Validating %s') % str(self)) LOG.info(_('Validating %s') % str(self))
function.validate(self.t) function.validate(self.t)
self.validate_deletion_policy(self.t) self.validate_deletion_policy(self.t)
@ -645,7 +645,7 @@ class Resource(object):
initial_state = self.state initial_state = self.state
logger.info(_('deleting %s') % str(self)) LOG.info(_('deleting %s') % str(self))
try: try:
self.state_set(action, self.IN_PROGRESS) self.state_set(action, self.IN_PROGRESS)
@ -670,7 +670,7 @@ class Resource(object):
yield yield
except Exception as ex: except Exception as ex:
logger.exception(_('Delete %s') % str(self)) LOG.exception(_('Delete %s') % str(self))
failure = exception.ResourceFailure(ex, self, self.action) failure = exception.ResourceFailure(ex, self, self.action)
self.state_set(action, self.FAILED, six.text_type(failure)) self.state_set(action, self.FAILED, six.text_type(failure))
raise failure raise failure
@ -680,8 +680,7 @@ class Resource(object):
self.state_set(action, self.FAILED, self.state_set(action, self.FAILED,
'Deletion aborted') 'Deletion aborted')
except Exception: except Exception:
logger.exception(_('Error marking resource deletion ' LOG.exception(_('Error marking resource deletion failed'))
'failed'))
else: else:
self.state_set(action, self.COMPLETE) self.state_set(action, self.COMPLETE)
@ -711,7 +710,7 @@ class Resource(object):
rs = db_api.resource_get(self.context, self.id) rs = db_api.resource_get(self.context, self.id)
rs.update_and_save({'nova_instance': self.resource_id}) rs.update_and_save({'nova_instance': self.resource_id})
except Exception as ex: except Exception as ex:
logger.warn(_('db error %s') % ex) LOG.warn(_('db error %s') % ex)
def _store(self): def _store(self):
'''Create the resource in the database.''' '''Create the resource in the database.'''
@ -731,7 +730,7 @@ class Resource(object):
self.created_time = new_rs.created_at self.created_time = new_rs.created_at
self._rsrc_metadata = metadata self._rsrc_metadata = metadata
except Exception as ex: except Exception as ex:
logger.error(_('DB error %s') % ex) LOG.error(_('DB error %s') % ex)
def _add_event(self, action, status, reason): def _add_event(self, action, status, reason):
'''Add a state change event to the database.''' '''Add a state change event to the database.'''
@ -756,7 +755,7 @@ class Resource(object):
'updated_at': self.updated_time, 'updated_at': self.updated_time,
'nova_instance': self.resource_id}) 'nova_instance': self.resource_id})
except Exception as ex: except Exception as ex:
logger.error(_('DB error %s') % ex) LOG.error(_('DB error %s') % ex)
# store resource in DB on transition to CREATE_IN_PROGRESS # store resource in DB on transition to CREATE_IN_PROGRESS
# all other transitions (other than to DELETE_COMPLETE) # all other transitions (other than to DELETE_COMPLETE)
@ -874,8 +873,8 @@ class Resource(object):
self._add_event('signal', self.status, get_string_details()) self._add_event('signal', self.status, get_string_details())
self.handle_signal(details) self.handle_signal(details)
except Exception as ex: except Exception as ex:
logger.exception(_('signal %(name)s : %(msg)s') % LOG.exception(_('signal %(name)s : %(msg)s') % {'name': str(self),
{'name': str(self), 'msg': ex}) 'msg': ex})
failure = exception.ResourceFailure(ex, self) failure = exception.ResourceFailure(ex, self)
raise failure raise failure
@ -888,7 +887,7 @@ class Resource(object):
No-op for resources which don't explicitly override this method No-op for resources which don't explicitly override this method
''' '''
if new_metadata: if new_metadata:
logger.warning(_("Resource %s does not implement metadata update") LOG.warning(_("Resource %s does not implement metadata update")
% self.name) % self.name)
@classmethod @classmethod

View File

@ -35,7 +35,7 @@ from heat.openstack.common import log as logging
from heat.openstack.common import timeutils from heat.openstack.common import timeutils
from heat.scaling import template from heat.scaling import template
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
(SCALED_RESOURCE_TYPE,) = ('OS::Heat::ScaledResource',) (SCALED_RESOURCE_TYPE,) = ('OS::Heat::ScaledResource',)
@ -632,9 +632,9 @@ class AutoScalingGroup(InstanceGroup, CooldownMixin):
Adjust the size of the scaling group if the cooldown permits. Adjust the size of the scaling group if the cooldown permits.
""" """
if self._cooldown_inprogress(): if self._cooldown_inprogress():
logger.info(_("%(name)s NOT performing scaling adjustment, " LOG.info(_("%(name)s NOT performing scaling adjustment, "
"cooldown %(cooldown)s") % { "cooldown %(cooldown)s")
'name': self.name, % {'name': self.name,
'cooldown': self.properties[self.COOLDOWN]}) 'cooldown': self.properties[self.COOLDOWN]})
return return
@ -659,21 +659,21 @@ class AutoScalingGroup(InstanceGroup, CooldownMixin):
if new_capacity > upper: if new_capacity > upper:
if upper > capacity: if upper > capacity:
logger.info(_('truncating growth to %s') % upper) LOG.info(_('truncating growth to %s') % upper)
new_capacity = upper new_capacity = upper
else: else:
logger.warn(_('can not exceed %s') % upper) LOG.warn(_('can not exceed %s') % upper)
return return
if new_capacity < lower: if new_capacity < lower:
if lower < capacity: if lower < capacity:
logger.info(_('truncating shrinkage to %s') % lower) LOG.info(_('truncating shrinkage to %s') % lower)
new_capacity = lower new_capacity = lower
else: else:
logger.warn(_('can not be less than %s') % lower) LOG.warn(_('can not be less than %s') % lower)
return return
if new_capacity == capacity: if new_capacity == capacity:
logger.debug('no change in capacity %d' % capacity) LOG.debug('no change in capacity %d' % capacity)
return return
# send a notification before, on-error and on-success. # send a notification before, on-error and on-success.
@ -698,7 +698,7 @@ class AutoScalingGroup(InstanceGroup, CooldownMixin):
}) })
notification.send(**notif) notification.send(**notif)
except Exception: except Exception:
logger.exception(_('Failed sending error notification')) LOG.exception(_('Failed sending error notification'))
else: else:
notif.update({ notif.update({
'suffix': 'end', 'suffix': 'end',
@ -1060,15 +1060,15 @@ class ScalingPolicy(signal_responder.SignalResponder, CooldownMixin):
alarm_state = details.get('current', alarm_state = details.get('current',
details.get('state', 'alarm')).lower() details.get('state', 'alarm')).lower()
logger.info(_('%(name)s Alarm, new state %(state)s') % { LOG.info(_('%(name)s Alarm, new state %(state)s')
'name': self.name, 'state': alarm_state}) % {'name': self.name, 'state': alarm_state})
if alarm_state != 'alarm': if alarm_state != 'alarm':
return return
if self._cooldown_inprogress(): if self._cooldown_inprogress():
logger.info(_("%(name)s NOT performing scaling action, " LOG.info(_("%(name)s NOT performing scaling action, "
"cooldown %(cooldown)s") % { "cooldown %(cooldown)s")
'name': self.name, % {'name': self.name,
'cooldown': self.properties[self.COOLDOWN]}) 'cooldown': self.properties[self.COOLDOWN]})
return return
@ -1080,10 +1080,9 @@ class ScalingPolicy(signal_responder.SignalResponder, CooldownMixin):
'alarm': self.name, 'alarm': self.name,
'group': asgn_id}) 'group': asgn_id})
logger.info(_('%(name)s Alarm, adjusting Group %(group)s with id ' LOG.info(_('%(name)s Alarm, adjusting Group %(group)s with id '
'%(asgn_id)s by %(filter)s') % { '%(asgn_id)s by %(filter)s')
'name': self.name, 'group': group.name, % {'name': self.name, 'group': group.name, 'asgn_id': asgn_id,
'asgn_id': asgn_id,
'filter': self.properties[self.SCALING_ADJUSTMENT]}) 'filter': self.properties[self.SCALING_ADJUSTMENT]})
adjustment_type = self._get_adjustement_type() adjustment_type = self._get_adjustement_type()
group.adjust(self.properties[self.SCALING_ADJUSTMENT], adjustment_type) group.adjust(self.properties[self.SCALING_ADJUSTMENT], adjustment_type)

View File

@ -22,7 +22,7 @@ from heat.openstack.common import excutils
from heat.openstack.common.gettextutils import _ from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class ElasticIp(resource.Resource): class ElasticIp(resource.Resource):
@ -73,14 +73,14 @@ class ElasticIp(resource.Resource):
ips = self.neutron().show_floatingip(self.resource_id) ips = self.neutron().show_floatingip(self.resource_id)
except ne as e: except ne as e:
if e.status_code == 404: if e.status_code == 404:
logger.warn(_("Floating IPs not found: %s") % e) LOG.warn(_("Floating IPs not found: %s") % e)
else: else:
self.ipaddress = ips['floatingip']['floating_ip_address'] self.ipaddress = ips['floatingip']['floating_ip_address']
else: else:
try: try:
ips = self.nova().floating_ips.get(self.resource_id) ips = self.nova().floating_ips.get(self.resource_id)
except clients.novaclient.exceptions.NotFound as ex: except clients.novaclient.exceptions.NotFound as ex:
logger.warn(_("Floating IPs not found: %s") % ex) LOG.warn(_("Floating IPs not found: %s") % ex)
else: else:
self.ipaddress = ips.ip self.ipaddress = ips.ip
return self.ipaddress or '' return self.ipaddress or ''
@ -97,7 +97,7 @@ class ElasticIp(resource.Resource):
'floatingip': props})['floatingip'] 'floatingip': props})['floatingip']
self.ipaddress = ips['floating_ip_address'] self.ipaddress = ips['floating_ip_address']
self.resource_id_set(ips['id']) self.resource_id_set(ips['id'])
logger.info(_('ElasticIp create %s') % str(ips)) LOG.info(_('ElasticIp create %s') % str(ips))
else: else:
if self.properties[self.DOMAIN]: if self.properties[self.DOMAIN]:
raise exception.Error(_('Domain property can not be set on ' raise exception.Error(_('Domain property can not be set on '
@ -109,12 +109,12 @@ class ElasticIp(resource.Resource):
with excutils.save_and_reraise_exception(): with excutils.save_and_reraise_exception():
msg = _("No default floating IP pool configured. " msg = _("No default floating IP pool configured. "
"Set 'default_floating_pool' in nova.conf.") "Set 'default_floating_pool' in nova.conf.")
logger.error(msg) LOG.error(msg)
if ips: if ips:
self.ipaddress = ips.ip self.ipaddress = ips.ip
self.resource_id_set(ips.id) self.resource_id_set(ips.id)
logger.info(_('ElasticIp create %s') % str(ips)) LOG.info(_('ElasticIp create %s') % str(ips))
instance_id = self.properties[self.INSTANCE_ID] instance_id = self.properties[self.INSTANCE_ID]
if instance_id: if instance_id:
@ -193,13 +193,12 @@ class ElasticIpAssociation(resource.Resource):
if self.properties[self.EIP]: if self.properties[self.EIP]:
if not self.properties[self.INSTANCE_ID]: if not self.properties[self.INSTANCE_ID]:
logger.warn(_('Skipping association, InstanceId not ' LOG.warn(_('Skipping association, InstanceId not specified'))
'specified'))
return return
server = self.nova().servers.get(self.properties[self.INSTANCE_ID]) server = self.nova().servers.get(self.properties[self.INSTANCE_ID])
server.add_floating_ip(self.properties[self.EIP]) server.add_floating_ip(self.properties[self.EIP])
self.resource_id_set(self.properties[self.EIP]) self.resource_id_set(self.properties[self.EIP])
logger.debug('ElasticIpAssociation ' LOG.debug('ElasticIpAssociation '
'%(instance)s.add_floating_ip(%(eip)s)', '%(instance)s.add_floating_ip(%(eip)s)',
{'instance': self.properties[self.INSTANCE_ID], {'instance': self.properties[self.INSTANCE_ID],
'eip': self.properties[self.EIP]}) 'eip': self.properties[self.EIP]})
@ -216,7 +215,7 @@ class ElasticIpAssociation(resource.Resource):
port_rsrc = ports['ports'][0] port_rsrc = ports['ports'][0]
port_id = port_rsrc['id'] port_id = port_rsrc['id']
else: else:
logger.warn(_('Skipping association, resource not specified')) LOG.warn(_('Skipping association, resource not specified'))
return return
float_id = self.properties[self.ALLOCATION_ID] float_id = self.properties[self.ALLOCATION_ID]

View File

@ -34,7 +34,7 @@ from heat.openstack.common import log as logging
cfg.CONF.import_opt('instance_user', 'heat.common.config') cfg.CONF.import_opt('instance_user', 'heat.common.config')
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class Restarter(signal_responder.SignalResponder): class Restarter(signal_responder.SignalResponder):
@ -88,22 +88,21 @@ class Restarter(signal_responder.SignalResponder):
else: else:
alarm_state = details.get('state', 'alarm').lower() alarm_state = details.get('state', 'alarm').lower()
logger.info(_('%(name)s Alarm, new state %(state)s') % { LOG.info(_('%(name)s Alarm, new state %(state)s')
'name': self.name, 'state': alarm_state}) % {'name': self.name, 'state': alarm_state})
if alarm_state != 'alarm': if alarm_state != 'alarm':
return return
victim = self._find_resource(self.properties[self.INSTANCE_ID]) victim = self._find_resource(self.properties[self.INSTANCE_ID])
if victim is None: if victim is None:
logger.info(_('%(name)s Alarm, can not find instance ' LOG.info(_('%(name)s Alarm, can not find instance %(instance)s')
'%(instance)s') % { % {'name': self.name,
'name': self.name,
'instance': self.properties[self.INSTANCE_ID]}) 'instance': self.properties[self.INSTANCE_ID]})
return return
logger.info(_('%(name)s Alarm, restarting resource: %(victim)s') % { LOG.info(_('%(name)s Alarm, restarting resource: %(victim)s')
'name': self.name, 'victim': victim.name}) % {'name': self.name, 'victim': victim.name})
self.stack.restart_resource(victim.name) self.stack.restart_resource(victim.name)
def _resolve_attribute(self, name): def _resolve_attribute(self, name):
@ -367,10 +366,8 @@ class Instance(resource.Resource):
elif name in self.ATTRIBUTES[1:]: elif name in self.ATTRIBUTES[1:]:
res = self._ipaddress() res = self._ipaddress()
logger.info(_('%(name)s._resolve_attribute(%(attname)s) == %(res)s'), LOG.info(_('%(name)s._resolve_attribute(%(attname)s) == %(res)s'),
{'name': self.name, {'name': self.name, 'attname': name, 'res': res})
'attname': name,
'res': res})
return unicode(res) if res else None return unicode(res) if res else None
def _build_nics(self, network_interfaces, def _build_nics(self, network_interfaces,
@ -752,7 +749,7 @@ class Instance(resource.Resource):
raise exception.NotFound(_('Failed to find instance %s') % raise exception.NotFound(_('Failed to find instance %s') %
self.resource_id) self.resource_id)
else: else:
logger.debug("suspending instance %s" % self.resource_id) LOG.debug("suspending instance %s" % self.resource_id)
# We want the server.suspend to happen after the volume # We want the server.suspend to happen after the volume
# detachement has finished, so pass both tasks and the server # detachement has finished, so pass both tasks and the server
suspend_runner = scheduler.TaskRunner(server.suspend) suspend_runner = scheduler.TaskRunner(server.suspend)
@ -774,10 +771,9 @@ class Instance(resource.Resource):
return True return True
nova_utils.refresh_server(server) nova_utils.refresh_server(server)
logger.debug("%(name)s check_suspend_complete " LOG.debug("%(name)s check_suspend_complete "
"status = %(status)s", "status = %(status)s",
{'name': self.name, {'name': self.name, 'status': server.status})
'status': server.status})
if server.status in list(nova_utils.deferred_server_statuses + if server.status in list(nova_utils.deferred_server_statuses +
['ACTIVE']): ['ACTIVE']):
return server.status == 'SUSPENDED' return server.status == 'SUSPENDED'
@ -808,7 +804,7 @@ class Instance(resource.Resource):
raise exception.NotFound(_('Failed to find instance %s') % raise exception.NotFound(_('Failed to find instance %s') %
self.resource_id) self.resource_id)
else: else:
logger.debug("resuming instance %s" % self.resource_id) LOG.debug("resuming instance %s" % self.resource_id)
server.resume() server.resume()
return server, scheduler.TaskRunner(self._attach_volumes_task()) return server, scheduler.TaskRunner(self._attach_volumes_task())

View File

@ -24,7 +24,7 @@ from heat.engine import stack_resource
from heat.openstack.common.gettextutils import _ from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
lb_template_default = r''' lb_template_default = r'''
{ {
@ -426,7 +426,7 @@ class LoadBalancer(stack_resource.StackResource):
client = self.nova() client = self.nova()
for i in instances: for i in instances:
ip = nova_utils.server_to_ipaddress(client, i) or '0.0.0.0' ip = nova_utils.server_to_ipaddress(client, i) or '0.0.0.0'
logger.debug('haproxy server:%s' % ip) LOG.debug('haproxy server:%s' % ip)
servers.append('%sserver server%d %s:%s %s' % (spaces, n, servers.append('%sserver server%d %s:%s %s' % (spaces, n,
ip, inst_port, ip, inst_port,
check)) check))
@ -437,7 +437,7 @@ class LoadBalancer(stack_resource.StackResource):
def get_parsed_template(self): def get_parsed_template(self):
if cfg.CONF.loadbalancer_template: if cfg.CONF.loadbalancer_template:
with open(cfg.CONF.loadbalancer_template) as templ_fd: with open(cfg.CONF.loadbalancer_template) as templ_fd:
logger.info(_('Using custom loadbalancer template %s') LOG.info(_('Using custom loadbalancer template %s')
% cfg.CONF.loadbalancer_template) % cfg.CONF.loadbalancer_template)
contents = templ_fd.read() contents = templ_fd.read()
else: else:

View File

@ -21,7 +21,7 @@ from heat.engine import scheduler
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
from heat.openstack.common import uuidutils from heat.openstack.common import uuidutils
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class NeutronResource(resource.Resource): class NeutronResource(resource.Resource):
@ -136,7 +136,7 @@ class NeutronResource(resource.Resource):
try: try:
attributes = self._show_resource() attributes = self._show_resource()
except NeutronClientException as ex: except NeutronClientException as ex:
logger.warn(_("failed to fetch resource attributes: %s") % ex) LOG.warn(_("failed to fetch resource attributes: %s") % ex)
return None return None
return self.handle_get_attributes(self.name, name, attributes) return self.handle_get_attributes(self.name, name, attributes)

View File

@ -23,7 +23,7 @@ from heat.openstack.common import log as logging
if clients.neutronclient is not None: if clients.neutronclient is not None:
import neutronclient.common.exceptions as neutron_exp import neutronclient.common.exceptions as neutron_exp
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class Port(neutron.NeutronResource): class Port(neutron.NeutronResource):
@ -287,7 +287,7 @@ class Port(neutron.NeutronResource):
self._prepare_list_properties(props) self._prepare_list_properties(props)
logger.debug('updating port with %s' % props) LOG.debug('updating port with %s' % props)
self.neutron().update_port(self.resource_id, {'port': props}) self.neutron().update_port(self.resource_id, {'port': props})
def check_update_complete(self, *args): def check_update_complete(self, *args):

View File

@ -19,7 +19,7 @@ from heat.openstack.common import excutils
from heat.openstack.common.gettextutils import _ from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class NovaFloatingIp(resource.Resource): class NovaFloatingIp(resource.Resource):
@ -67,7 +67,7 @@ class NovaFloatingIp(resource.Resource):
if pool is None: if pool is None:
msg = _('Could not allocate floating IP. Probably there ' msg = _('Could not allocate floating IP. Probably there '
'is no default floating IP pool is configured.') 'is no default floating IP pool is configured.')
logger.error(msg) LOG.error(msg)
self.resource_id_set(floating_ip.id) self.resource_id_set(floating_ip.id)
self._floating_ip = floating_ip self._floating_ip = floating_ip

View File

@ -31,7 +31,7 @@ from heat.engine import scheduler
from heat.openstack.common.gettextutils import _ from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
deferred_server_statuses = ['BUILD', deferred_server_statuses = ['BUILD',
@ -55,7 +55,7 @@ def refresh_server(server):
except clients.novaclient.exceptions.OverLimit as exc: except clients.novaclient.exceptions.OverLimit as exc:
msg = _("Server %(name)s (%(id)s) received an OverLimit " msg = _("Server %(name)s (%(id)s) received an OverLimit "
"response during server.get(): %(exception)s") "response during server.get(): %(exception)s")
logger.warning(msg % {'name': server.name, LOG.warning(msg % {'name': server.name,
'id': server.id, 'id': server.id,
'exception': exc}) 'exception': exc})
except clients.novaclient.exceptions.ClientException as exc: except clients.novaclient.exceptions.ClientException as exc:
@ -63,7 +63,7 @@ def refresh_server(server):
(500, 503))): (500, 503))):
msg = _('Server "%(name)s" (%(id)s) received the following ' msg = _('Server "%(name)s" (%(id)s) received the following '
'exception during server.get(): %(exception)s') 'exception during server.get(): %(exception)s')
logger.warning(msg % {'name': server.name, LOG.warning(msg % {'name': server.name,
'id': server.id, 'id': server.id,
'exception': exc}) 'exception': exc})
else: else:
@ -331,8 +331,8 @@ def server_to_ipaddress(client, server):
try: try:
server = client.servers.get(server) server = client.servers.get(server)
except clients.novaclient.exceptions.NotFound as ex: except clients.novaclient.exceptions.NotFound as ex:
logger.warn(_('Instance (%(server)s) not found: %(ex)s') % { LOG.warn(_('Instance (%(server)s) not found: %(ex)s')
'server': server, 'ex': ex}) % {'server': server, 'ex': ex})
else: else:
for n in server.networks: for n in server.networks:
if len(server.networks[n]) > 0: if len(server.networks[n]) > 0:

View File

@ -21,7 +21,7 @@ from heat.engine.resources import nova_utils
from heat.openstack.common.gettextutils import _ from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class OSDBInstance(resource.Resource): class OSDBInstance(resource.Resource):
@ -235,7 +235,7 @@ class OSDBInstance(resource.Resource):
except troveclient.exceptions.RequestEntityTooLarge as exc: except troveclient.exceptions.RequestEntityTooLarge as exc:
msg = _("Stack %(name)s (%(id)s) received an OverLimit " msg = _("Stack %(name)s (%(id)s) received an OverLimit "
"response during instance.get(): %(exception)s") "response during instance.get(): %(exception)s")
logger.warning(msg % {'name': self.stack.name, LOG.warning(msg % {'name': self.stack.name,
'id': self.stack.id, 'id': self.stack.id,
'exception': exc}) 'exception': exc})
@ -253,7 +253,7 @@ class OSDBInstance(resource.Resource):
msg = _("Database instance %(database)s created (flavor:%(flavor)s, " msg = _("Database instance %(database)s created (flavor:%(flavor)s, "
"volume:%(volume)s)") "volume:%(volume)s)")
logger.info(msg % ({'database': self.dbinstancename, LOG.info(msg % ({'database': self.dbinstancename,
'flavor': self.flavor, 'flavor': self.flavor,
'volume': self.volume})) 'volume': self.volume}))
return True return True
@ -269,8 +269,7 @@ class OSDBInstance(resource.Resource):
try: try:
instance = self.trove().instances.get(self.resource_id) instance = self.trove().instances.get(self.resource_id)
except troveclient.exceptions.NotFound: except troveclient.exceptions.NotFound:
logger.debug("Database instance %s not found." % LOG.debug("Database instance %s not found." % self.resource_id)
self.resource_id)
self.resource_id_set(None) self.resource_id_set(None)
else: else:
instance.delete() instance.delete()

View File

@ -20,7 +20,7 @@ from heat.engine import properties
from heat.engine import resource from heat.engine import resource
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class S3Bucket(resource.Resource): class S3Bucket(resource.Resource):
@ -119,9 +119,8 @@ class S3Bucket(resource.Resource):
"""Create a bucket.""" """Create a bucket."""
container = self.physical_resource_name() container = self.physical_resource_name()
headers = self.tags_to_headers() headers = self.tags_to_headers()
logger.debug('S3Bucket create container %(container)s with headers ' LOG.debug('S3Bucket create container %(container)s with headers '
'%(headers)s' % { '%(headers)s' % {'container': container, 'headers': headers})
'container': container, 'headers': headers})
if self.properties[self.WEBSITE_CONFIGURATION] is not None: if self.properties[self.WEBSITE_CONFIGURATION] is not None:
sc = self.properties[self.WEBSITE_CONFIGURATION] sc = self.properties[self.WEBSITE_CONFIGURATION]
index_doc = sc[self.WEBSITE_CONFIGURATION_INDEX_DOCUMENT] index_doc = sc[self.WEBSITE_CONFIGURATION_INDEX_DOCUMENT]
@ -151,12 +150,12 @@ class S3Bucket(resource.Resource):
def handle_delete(self): def handle_delete(self):
"""Perform specified delete policy.""" """Perform specified delete policy."""
logger.debug('S3Bucket delete container %s' % self.resource_id) LOG.debug('S3Bucket delete container %s' % self.resource_id)
if self.resource_id is not None: if self.resource_id is not None:
try: try:
self.swift().delete_container(self.resource_id) self.swift().delete_container(self.resource_id)
except clients.swiftclient.ClientException as ex: except clients.swiftclient.ClientException as ex:
logger.warn(_("Delete container failed: %s") % ex) LOG.warn(_("Delete container failed: %s") % ex)
def FnGetRefId(self): def FnGetRefId(self):
return unicode(self.resource_id) return unicode(self.resource_id)

View File

@ -35,7 +35,7 @@ from heat.openstack.common import uuidutils
cfg.CONF.import_opt('instance_user', 'heat.common.config') cfg.CONF.import_opt('instance_user', 'heat.common.config')
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class Server(stack_user.StackUser): class Server(stack_user.StackUser):
@ -622,8 +622,8 @@ class Server(stack_user.StackUser):
try: try:
server = self.nova().servers.get(self.resource_id) server = self.nova().servers.get(self.resource_id)
except clients.novaclient.exceptions.NotFound as ex: except clients.novaclient.exceptions.NotFound as ex:
logger.warn(_('Instance (%(server)s) not found: %(ex)s') % { LOG.warn(_('Instance (%(server)s) not found: %(ex)s')
'server': self.resource_id, 'ex': ex}) % {'server': self.resource_id, 'ex': ex})
return '' return ''
if name == self.ADDRESSES: if name == self.ADDRESSES:
return self._add_port_for_address(server) return self._add_port_for_address(server)
@ -901,11 +901,11 @@ class Server(stack_user.StackUser):
server=self.name) server=self.name)
raise exception.StackValidationFailed(message=msg) raise exception.StackValidationFailed(message=msg)
elif network.get(self.NETWORK_UUID): elif network.get(self.NETWORK_UUID):
logger.info(_('For the server "%(server)s" the "%(uuid)s" ' LOG.info(_('For the server "%(server)s" the "%(uuid)s" '
'property is set to network "%(network)s". ' 'property is set to network "%(network)s". '
'"%(uuid)s" property is deprecated. Use ' '"%(uuid)s" property is deprecated. Use '
'"%(id)s" property instead.' '"%(id)s" property instead.')
'') % dict(uuid=self.NETWORK_UUID, % dict(uuid=self.NETWORK_UUID,
id=self.NETWORK_ID, id=self.NETWORK_ID,
network=network[self.NETWORK_ID], network=network[self.NETWORK_ID],
server=self.name)) server=self.name))
@ -986,7 +986,7 @@ class Server(stack_user.StackUser):
raise exception.NotFound(_('Failed to find server %s') % raise exception.NotFound(_('Failed to find server %s') %
self.resource_id) self.resource_id)
else: else:
logger.debug('suspending server %s' % self.resource_id) LOG.debug('suspending server %s' % self.resource_id)
# We want the server.suspend to happen after the volume # We want the server.suspend to happen after the volume
# detachement has finished, so pass both tasks and the server # detachement has finished, so pass both tasks and the server
suspend_runner = scheduler.TaskRunner(server.suspend) suspend_runner = scheduler.TaskRunner(server.suspend)
@ -1003,9 +1003,8 @@ class Server(stack_user.StackUser):
return True return True
nova_utils.refresh_server(server) nova_utils.refresh_server(server)
logger.debug('%(name)s check_suspend_complete status ' LOG.debug('%(name)s check_suspend_complete status = %(status)s'
'= %(status)s' % { % {'name': self.name, 'status': server.status})
'name': self.name, 'status': server.status})
if server.status in list(nova_utils.deferred_server_statuses + if server.status in list(nova_utils.deferred_server_statuses +
['ACTIVE']): ['ACTIVE']):
return server.status == 'SUSPENDED' return server.status == 'SUSPENDED'
@ -1032,7 +1031,7 @@ class Server(stack_user.StackUser):
raise exception.NotFound(_('Failed to find server %s') % raise exception.NotFound(_('Failed to find server %s') %
self.resource_id) self.resource_id)
else: else:
logger.debug('resuming server %s' % self.resource_id) LOG.debug('resuming server %s' % self.resource_id)
server.resume() server.resume()
return server return server

View File

@ -21,7 +21,7 @@ from heat.engine import resource
from heat.openstack.common.gettextutils import _ from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class SoftwareConfig(resource.Resource): class SoftwareConfig(resource.Resource):
@ -167,7 +167,7 @@ class SoftwareConfig(resource.Resource):
try: try:
self.heat().software_configs.delete(self.resource_id) self.heat().software_configs.delete(self.resource_id)
except heat_exp.HTTPNotFound: except heat_exp.HTTPNotFound:
logger.debug( LOG.debug(
_('Software config %s is not found.') % self.resource_id) _('Software config %s is not found.') % self.resource_id)
def _resolve_attribute(self, name): def _resolve_attribute(self, name):

View File

@ -28,7 +28,7 @@ from heat.engine.resources.software_config import software_config as sc
from heat.engine import signal_responder from heat.engine import signal_responder
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class SoftwareDeployment(signal_responder.SignalResponder): class SoftwareDeployment(signal_responder.SignalResponder):
@ -240,7 +240,7 @@ class SoftwareDeployment(signal_responder.SignalResponder):
elif sd.status == SoftwareDeployment.FAILED: elif sd.status == SoftwareDeployment.FAILED:
message = _("Deployment to server " message = _("Deployment to server "
"failed: %s") % sd.status_reason "failed: %s") % sd.status_reason
logger.error(message) LOG.error(message)
exc = exception.Error(message) exc = exception.Error(message)
raise exc raise exc

View File

@ -20,7 +20,7 @@ from heat.engine import properties
from heat.engine import resource from heat.engine import resource
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class SwiftContainer(resource.Resource): class SwiftContainer(resource.Resource):
@ -126,10 +126,10 @@ class SwiftContainer(resource.Resource):
if self.properties.get(key) is not None: if self.properties.get(key) is not None:
container_headers[key] = self.properties[key] container_headers[key] = self.properties[key]
logger.debug('SwiftContainer create container %(container)s with ' LOG.debug('SwiftContainer create container %(container)s with '
'container headers %(container_headers)s and ' 'container headers %(container_headers)s and '
'account headers %(account_headers)s' % { 'account headers %(account_headers)s'
'container': container, % {'container': container,
'account_headers': account_headers, 'account_headers': account_headers,
'container_headers': container_headers}) 'container_headers': container_headers})
@ -142,13 +142,12 @@ class SwiftContainer(resource.Resource):
def handle_delete(self): def handle_delete(self):
"""Perform specified delete policy.""" """Perform specified delete policy."""
logger.debug('SwiftContainer delete container %s' % LOG.debug('SwiftContainer delete container %s' % self.resource_id)
self.resource_id)
if self.resource_id is not None: if self.resource_id is not None:
try: try:
self.swift().delete_container(self.resource_id) self.swift().delete_container(self.resource_id)
except clients.swiftclient.ClientException as ex: except clients.swiftclient.ClientException as ex:
logger.warn(_("Delete container failed: %s") % ex) LOG.warn(_("Delete container failed: %s") % ex)
def FnGetRefId(self): def FnGetRefId(self):
return unicode(self.resource_id) return unicode(self.resource_id)
@ -167,7 +166,7 @@ class SwiftContainer(resource.Resource):
try: try:
headers = self.swift().head_container(self.resource_id) headers = self.swift().head_container(self.resource_id)
except clients.swiftclient.ClientException as ex: except clients.swiftclient.ClientException as ex:
logger.warn(_("Head container failed: %s") % ex) LOG.warn(_("Head container failed: %s") % ex)
return None return None
else: else:
if key == self.OBJECT_COUNT: if key == self.OBJECT_COUNT:

View File

@ -19,7 +19,7 @@ from heat.engine import resource
from heat.engine import stack_user from heat.engine import stack_user
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
# #
# We are ignoring Groups as keystone does not support them. # We are ignoring Groups as keystone does not support them.
@ -76,22 +76,21 @@ class User(stack_user.StackUser):
# ignore the policy (don't reject it because we previously ignored # ignore the policy (don't reject it because we previously ignored
# and we don't want to break templates which previously worked # and we don't want to break templates which previously worked
if not isinstance(policy, basestring): if not isinstance(policy, basestring):
logger.warning(_("Ignoring policy %s, must be string " LOG.warning(_("Ignoring policy %s, must be string "
"resource name") % policy) "resource name") % policy)
continue continue
try: try:
policy_rsrc = self.stack[policy] policy_rsrc = self.stack[policy]
except KeyError: except KeyError:
logger.error(_("Policy %(policy)s does not exist in stack " LOG.error(_("Policy %(policy)s does not exist in stack "
"%(stack)s") % { "%(stack)s")
'policy': policy, % {'policy': policy, 'stack': self.stack.name})
'stack': self.stack.name})
return False return False
if not callable(getattr(policy_rsrc, 'access_allowed', None)): if not callable(getattr(policy_rsrc, 'access_allowed', None)):
logger.error(_("Policy %s is not an AccessPolicy resource") % LOG.error(_("Policy %s is not an AccessPolicy resource")
policy) % policy)
return False return False
return True return True
@ -116,7 +115,7 @@ class User(stack_user.StackUser):
policies = (self.properties[self.POLICIES] or []) policies = (self.properties[self.POLICIES] or [])
for policy in policies: for policy in policies:
if not isinstance(policy, basestring): if not isinstance(policy, basestring):
logger.warning(_("Ignoring policy %s, must be string " LOG.warning(_("Ignoring policy %s, must be string "
"resource name") % policy) "resource name") % policy)
continue continue
policy_rsrc = self.stack[policy] policy_rsrc = self.stack[policy]
@ -211,7 +210,7 @@ class AccessKey(resource.Resource):
user = self._get_user() user = self._get_user()
if user is None: if user is None:
logger.warning(_('Error deleting %s - user not found') % str(self)) LOG.warning(_('Error deleting %s - user not found') % str(self))
return return
user._delete_keypair() user._delete_keypair()
@ -221,9 +220,9 @@ class AccessKey(resource.Resource):
''' '''
if self._secret is None: if self._secret is None:
if not self.resource_id: if not self.resource_id:
logger.warn(_('could not get secret for %(username)s ' LOG.warn(_('could not get secret for %(username)s '
'Error:%(msg)s') % { 'Error:%(msg)s')
'username': self.properties[self.USER_NAME], % {'username': self.properties[self.USER_NAME],
'msg': "resource_id not yet set"}) 'msg': "resource_id not yet set"})
else: else:
# First try to retrieve the secret from resource_data, but # First try to retrieve the secret from resource_data, but
@ -241,8 +240,7 @@ class AccessKey(resource.Resource):
# And the ID of the v3 credential # And the ID of the v3 credential
self.data_set('credential_id', kp.id, redact=True) self.data_set('credential_id', kp.id, redact=True)
except Exception as ex: except Exception as ex:
logger.warn( LOG.warn(_('could not get secret for %(username)s '
_('could not get secret for %(username)s '
'Error:%(msg)s') % { 'Error:%(msg)s') % {
'username': self.properties[self.USER_NAME], 'username': self.properties[self.USER_NAME],
'msg': ex}) 'msg': ex})
@ -291,7 +289,7 @@ class AccessPolicy(resource.Resource):
for resource in resources: for resource in resources:
if resource not in self.stack: if resource not in self.stack:
msg = _("AccessPolicy resource %s not in stack") % resource msg = _("AccessPolicy resource %s not in stack") % resource
logger.error(msg) LOG.error(msg)
raise exception.StackValidationFailed(message=msg) raise exception.StackValidationFailed(message=msg)
def access_allowed(self, resource_name): def access_allowed(self, resource_name):

View File

@ -27,7 +27,7 @@ from heat.openstack.common import log as logging
volume_backups = try_import('cinderclient.v1.volume_backups') volume_backups = try_import('cinderclient.v1.volume_backups')
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class Volume(resource.Resource): class Volume(resource.Resource):
@ -153,7 +153,7 @@ class Volume(resource.Resource):
vol.get() vol.get()
if vol.status == 'in-use': if vol.status == 'in-use':
logger.warn(_('can not delete volume when in-use')) LOG.warn(_('can not delete volume when in-use'))
raise exception.Error(_('Volume in use')) raise exception.Error(_('Volume in use'))
vol.delete() vol.delete()
@ -210,7 +210,7 @@ class VolumeAttachTask(object):
def __call__(self): def __call__(self):
"""Return a co-routine which runs the task.""" """Return a co-routine which runs the task."""
logger.debug(str(self)) LOG.debug(str(self))
va = self.clients.nova().volumes.create_server_volume( va = self.clients.nova().volumes.create_server_volume(
server_id=self.server_id, server_id=self.server_id,
@ -221,15 +221,15 @@ class VolumeAttachTask(object):
vol = self.clients.cinder().volumes.get(self.volume_id) vol = self.clients.cinder().volumes.get(self.volume_id)
while vol.status == 'available' or vol.status == 'attaching': while vol.status == 'available' or vol.status == 'attaching':
logger.debug('%(name)s - volume status: %(status)s' % { LOG.debug('%(name)s - volume status: %(status)s'
'name': str(self), 'status': vol.status}) % {'name': str(self), 'status': vol.status})
yield yield
vol.get() vol.get()
if vol.status != 'in-use': if vol.status != 'in-use':
raise exception.Error(vol.status) raise exception.Error(vol.status)
logger.info(_('%s - complete') % str(self)) LOG.info(_('%s - complete') % str(self))
class VolumeDetachTask(object): class VolumeDetachTask(object):
@ -257,7 +257,7 @@ class VolumeDetachTask(object):
def __call__(self): def __call__(self):
"""Return a co-routine which runs the task.""" """Return a co-routine which runs the task."""
logger.debug(str(self)) LOG.debug(str(self))
server_api = self.clients.nova().volumes server_api = self.clients.nova().volumes
@ -269,7 +269,7 @@ class VolumeDetachTask(object):
except (clients.cinderclient.exceptions.NotFound, except (clients.cinderclient.exceptions.NotFound,
clients.novaclient.exceptions.BadRequest, clients.novaclient.exceptions.BadRequest,
clients.novaclient.exceptions.NotFound): clients.novaclient.exceptions.NotFound):
logger.warning(_('%s - volume not found') % str(self)) LOG.warning(_('%s - volume not found') % str(self))
return return
# detach the volume using volume_attachment # detach the volume using volume_attachment
@ -277,24 +277,24 @@ class VolumeDetachTask(object):
server_api.delete_server_volume(self.server_id, self.attachment_id) server_api.delete_server_volume(self.server_id, self.attachment_id)
except (clients.novaclient.exceptions.BadRequest, except (clients.novaclient.exceptions.BadRequest,
clients.novaclient.exceptions.NotFound) as e: clients.novaclient.exceptions.NotFound) as e:
logger.warning(_('%(res)s - %(err)s') % {'res': str(self), LOG.warning(_('%(res)s - %(err)s') % {'res': str(self),
'err': e}) 'err': e})
yield yield
try: try:
while vol.status in ('in-use', 'detaching'): while vol.status in ('in-use', 'detaching'):
logger.debug('%s - volume still in use' % str(self)) LOG.debug('%s - volume still in use' % str(self))
yield yield
vol.get() vol.get()
logger.info(_('%(name)s - status: %(status)s') % { LOG.info(_('%(name)s - status: %(status)s')
'name': str(self), 'status': vol.status}) % {'name': str(self), 'status': vol.status})
if vol.status != 'available': if vol.status != 'available':
raise exception.Error(vol.status) raise exception.Error(vol.status)
except clients.cinderclient.exceptions.NotFound: except clients.cinderclient.exceptions.NotFound:
logger.warning(_('%s - volume not found') % str(self)) LOG.warning(_('%s - volume not found') % str(self))
# The next check is needed for immediate reattachment when updating: # The next check is needed for immediate reattachment when updating:
# there might be some time between cinder marking volume as 'available' # there might be some time between cinder marking volume as 'available'
@ -308,12 +308,12 @@ class VolumeDetachTask(object):
return True return True
while server_has_attachment(self.server_id, self.attachment_id): while server_has_attachment(self.server_id, self.attachment_id):
logger.info(_("Server %(srv)s still has attachment %(att)s.") % LOG.info(_("Server %(srv)s still has attachment %(att)s.")
{'att': self.attachment_id, 'srv': self.server_id}) % {'att': self.attachment_id, 'srv': self.server_id})
yield yield
logger.info(_("Volume %(vol)s is detached from server %(srv)s") % LOG.info(_("Volume %(vol)s is detached from server %(srv)s")
{'vol': vol.id, 'srv': self.server_id}) % {'vol': vol.id, 'srv': self.server_id})
class VolumeAttachment(resource.Resource): class VolumeAttachment(resource.Resource):

View File

@ -24,7 +24,7 @@ from heat.engine import scheduler
from heat.engine import signal_responder from heat.engine import signal_responder
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class WaitConditionHandle(signal_responder.SignalResponder): class WaitConditionHandle(signal_responder.SignalResponder):
@ -76,7 +76,7 @@ class WaitConditionHandle(signal_responder.SignalResponder):
if self._metadata_format_ok(new_metadata): if self._metadata_format_ok(new_metadata):
rsrc_metadata = self.metadata_get(refresh=True) rsrc_metadata = self.metadata_get(refresh=True)
if new_metadata['UniqueId'] in rsrc_metadata: if new_metadata['UniqueId'] in rsrc_metadata:
logger.warning(_("Overwriting Metadata item for UniqueId %s!") LOG.warning(_("Overwriting Metadata item for UniqueId %s!")
% new_metadata['UniqueId']) % new_metadata['UniqueId'])
safe_metadata = {} safe_metadata = {}
for k in ('Data', 'Reason', 'Status'): for k in ('Data', 'Reason', 'Status'):
@ -84,7 +84,7 @@ class WaitConditionHandle(signal_responder.SignalResponder):
rsrc_metadata.update({new_metadata['UniqueId']: safe_metadata}) rsrc_metadata.update({new_metadata['UniqueId']: safe_metadata})
self.metadata_set(rsrc_metadata) self.metadata_set(rsrc_metadata)
else: else:
logger.error(_("Metadata failed validation for %s") % self.name) LOG.error(_("Metadata failed validation for %s") % self.name)
raise ValueError(_("Metadata format invalid")) raise ValueError(_("Metadata format invalid"))
def get_status(self): def get_status(self):
@ -225,20 +225,20 @@ class WaitCondition(resource.Resource):
yield yield
except scheduler.Timeout: except scheduler.Timeout:
timeout = WaitConditionTimeout(self, handle) timeout = WaitConditionTimeout(self, handle)
logger.info(_('%(name)s Timed out (%(timeout)s)') % { LOG.info(_('%(name)s Timed out (%(timeout)s)')
'name': str(self), 'timeout': str(timeout)}) % {'name': str(self), 'timeout': str(timeout)})
raise timeout raise timeout
handle_status = handle.get_status() handle_status = handle.get_status()
if any(s != STATUS_SUCCESS for s in handle_status): if any(s != STATUS_SUCCESS for s in handle_status):
failure = WaitConditionFailure(self, handle) failure = WaitConditionFailure(self, handle)
logger.info(_('%(name)s Failed (%(failure)s)') % { LOG.info(_('%(name)s Failed (%(failure)s)')
'name': str(self), 'failure': str(failure)}) % {'name': str(self), 'failure': str(failure)})
raise failure raise failure
if len(handle_status) >= self.properties[self.COUNT]: if len(handle_status) >= self.properties[self.COUNT]:
logger.info(_("%s Succeeded") % str(self)) LOG.info(_("%s Succeeded") % str(self))
return return
def handle_create(self): def handle_create(self):
@ -285,8 +285,8 @@ class WaitCondition(resource.Resource):
meta = handle.metadata_get(refresh=True) meta = handle.metadata_get(refresh=True)
# Note, can't use a dict generator on python 2.6, hence: # Note, can't use a dict generator on python 2.6, hence:
res = dict([(k, meta[k]['Data']) for k in meta]) res = dict([(k, meta[k]['Data']) for k in meta])
logger.debug('%(name)s.GetAtt(%(key)s) == %(res)s' % LOG.debug('%(name)s.GetAtt(%(key)s) == %(res)s'
{'name': self.name, % {'name': self.name,
'key': key, 'key': key,
'res': res}) 'res': res})

View File

@ -23,7 +23,7 @@ from heat.openstack.common import excutils
from heat.openstack.common.gettextutils import _ from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
# Whether TaskRunner._sleep actually does an eventlet sleep when called. # Whether TaskRunner._sleep actually does an eventlet sleep when called.
@ -128,7 +128,7 @@ class TaskRunner(object):
def _sleep(self, wait_time): def _sleep(self, wait_time):
"""Sleep for the specified number of seconds.""" """Sleep for the specified number of seconds."""
if ENABLE_SLEEP and wait_time is not None: if ENABLE_SLEEP and wait_time is not None:
logger.debug('%s sleeping' % str(self)) LOG.debug('%s sleeping' % str(self))
eventlet.sleep(wait_time) eventlet.sleep(wait_time)
def __call__(self, wait_time=1, timeout=None): def __call__(self, wait_time=1, timeout=None):
@ -151,7 +151,7 @@ class TaskRunner(object):
""" """
assert self._runner is None, "Task already started" assert self._runner is None, "Task already started"
logger.debug('%s starting' % str(self)) LOG.debug('%s starting' % str(self))
if timeout is not None: if timeout is not None:
self._timeout = Timeout(self, timeout) self._timeout = Timeout(self, timeout)
@ -163,7 +163,7 @@ class TaskRunner(object):
else: else:
self._runner = False self._runner = False
self._done = True self._done = True
logger.debug('%s done (not resumable)' % str(self)) LOG.debug('%s done (not resumable)' % str(self))
def step(self): def step(self):
""" """
@ -174,7 +174,7 @@ class TaskRunner(object):
assert self._runner is not None, "Task not started" assert self._runner is not None, "Task not started"
if self._timeout is not None and self._timeout.expired(): if self._timeout is not None and self._timeout.expired():
logger.info(_('%s timed out') % str(self)) LOG.info(_('%s timed out') % str(self))
try: try:
self._runner.throw(self._timeout) self._runner.throw(self._timeout)
@ -184,13 +184,13 @@ class TaskRunner(object):
# Clean up in case task swallows exception without exiting # Clean up in case task swallows exception without exiting
self.cancel() self.cancel()
else: else:
logger.debug('%s running' % str(self)) LOG.debug('%s running' % str(self))
try: try:
next(self._runner) next(self._runner)
except StopIteration: except StopIteration:
self._done = True self._done = True
logger.debug('%s complete' % str(self)) LOG.debug('%s complete' % str(self))
return self._done return self._done
@ -207,7 +207,7 @@ class TaskRunner(object):
def cancel(self): def cancel(self):
"""Cancel the task and mark it as done.""" """Cancel the task and mark it as done."""
if not self.done(): if not self.done():
logger.debug('%s cancelled' % str(self)) LOG.debug('%s cancelled' % str(self))
try: try:
if self.started(): if self.started():
self._runner.close() self._runner.close()

View File

@ -50,7 +50,7 @@ cfg.CONF.import_opt('engine_life_check_timeout', 'heat.common.config')
cfg.CONF.import_opt('max_resources_per_stack', 'heat.common.config') cfg.CONF.import_opt('max_resources_per_stack', 'heat.common.config')
cfg.CONF.import_opt('max_stacks_per_tenant', 'heat.common.config') cfg.CONF.import_opt('max_stacks_per_tenant', 'heat.common.config')
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
def request_context(func): def request_context(func):
@ -197,13 +197,12 @@ class StackWatch(object):
# Retrieve the stored credentials & create context # Retrieve the stored credentials & create context
# Require tenant_safe=False to the stack_get to defeat tenant # Require tenant_safe=False to the stack_get to defeat tenant
# scoping otherwise we fail to retrieve the stack # scoping otherwise we fail to retrieve the stack
logger.debug("Periodic watcher task for stack %s" % sid) LOG.debug("Periodic watcher task for stack %s" % sid)
admin_context = context.get_admin_context() admin_context = context.get_admin_context()
stack = db_api.stack_get(admin_context, sid, tenant_safe=False, stack = db_api.stack_get(admin_context, sid, tenant_safe=False,
eager_load=True) eager_load=True)
if not stack: if not stack:
logger.error(_("Unable to retrieve stack %s for periodic task") % LOG.error(_("Unable to retrieve stack %s for periodic task") % sid)
sid)
return return
stack_context = EngineService.load_user_creds(stack.user_creds_id) stack_context = EngineService.load_user_creds(stack.user_creds_id)
@ -216,7 +215,7 @@ class StackWatch(object):
try: try:
wrs = db_api.watch_rule_get_all_by_stack(stack_context, sid) wrs = db_api.watch_rule_get_all_by_stack(stack_context, sid)
except Exception as ex: except Exception as ex:
logger.warn(_('periodic_task db error watch rule removed? %(ex)s') LOG.warn(_('periodic_task db error watch rule removed? %(ex)s')
% ex) % ex)
return return
@ -290,7 +289,7 @@ class EngineService(service.Service):
self.stack_watch = StackWatch(self.thread_group_mgr) self.stack_watch = StackWatch(self.thread_group_mgr)
self.listener = EngineListener(host, self.engine_id, self.listener = EngineListener(host, self.engine_id,
self.thread_group_mgr) self.thread_group_mgr)
logger.debug("Starting listener for engine %s" % self.engine_id) LOG.debug("Starting listener for engine %s" % self.engine_id)
self.listener.start() self.listener.start()
def start(self): def start(self):
@ -304,7 +303,7 @@ class EngineService(service.Service):
def stop(self): def stop(self):
# Stop rpc connection at first for preventing new requests # Stop rpc connection at first for preventing new requests
logger.info(_("Attempting to stop engine service...")) LOG.info(_("Attempting to stop engine service..."))
try: try:
self.conn.close() self.conn.close()
except Exception: except Exception:
@ -315,14 +314,14 @@ class EngineService(service.Service):
# Ingore dummy service task # Ingore dummy service task
if stack_id == cfg.CONF.periodic_interval: if stack_id == cfg.CONF.periodic_interval:
continue continue
logger.info(_("Waiting stack %s processing to be finished") LOG.info(_("Waiting stack %s processing to be finished")
% stack_id) % stack_id)
# Stop threads gracefully # Stop threads gracefully
self.thread_group_mgr.stop(stack_id, True) self.thread_group_mgr.stop(stack_id, True)
logger.info(_("Stack %s processing was finished") % stack_id) LOG.info(_("Stack %s processing was finished") % stack_id)
# Terminate the engine process # Terminate the engine process
logger.info(_("All threads were gone, terminating engine")) LOG.info(_("All threads were gone, terminating engine"))
super(EngineService, self).stop() super(EngineService, self).stop()
@staticmethod @staticmethod
@ -491,7 +490,7 @@ class EngineService(service.Service):
:param args: Request parameters/args passed from API :param args: Request parameters/args passed from API
""" """
logger.info(_('previewing stack %s') % stack_name) LOG.info(_('previewing stack %s') % stack_name)
tmpl = parser.Template(template, files=files) tmpl = parser.Template(template, files=files)
self._validate_new_stack(cnxt, stack_name, tmpl) self._validate_new_stack(cnxt, stack_name, tmpl)
@ -519,7 +518,7 @@ class EngineService(service.Service):
:param files: Files referenced from the template :param files: Files referenced from the template
:param args: Request parameters/args passed from API :param args: Request parameters/args passed from API
""" """
logger.info(_('template is %s') % template) LOG.info(_('template is %s') % template)
def _stack_create(stack): def _stack_create(stack):
# Create/Adopt a stack, and create the periodic task if successful # Create/Adopt a stack, and create the periodic task if successful
@ -533,8 +532,7 @@ class EngineService(service.Service):
# Schedule a periodic watcher task for this stack # Schedule a periodic watcher task for this stack
self.stack_watch.start_watch_task(stack.id, cnxt) self.stack_watch.start_watch_task(stack.id, cnxt)
else: else:
logger.warning(_("Stack create failed, status %s") % LOG.warning(_("Stack create failed, status %s") % stack.status)
stack.status)
tmpl = parser.Template(template, files=files) tmpl = parser.Template(template, files=files)
self._validate_new_stack(cnxt, stack_name, tmpl) self._validate_new_stack(cnxt, stack_name, tmpl)
@ -570,7 +568,7 @@ class EngineService(service.Service):
:param files: Files referenced from the template :param files: Files referenced from the template
:param args: Request parameters/args passed from API :param args: Request parameters/args passed from API
""" """
logger.info(_('template is %s') % template) LOG.info(_('template is %s') % template)
# Get the database representation of the existing stack # Get the database representation of the existing stack
db_stack = self._get_stack(cnxt, stack_identity) db_stack = self._get_stack(cnxt, stack_identity)
@ -620,7 +618,7 @@ class EngineService(service.Service):
:param template: Template of stack you want to create. :param template: Template of stack you want to create.
:param params: Stack Input Params :param params: Stack Input Params
""" """
logger.info(_('validate_template')) LOG.info(_('validate_template'))
if template is None: if template is None:
msg = _("No Template provided.") msg = _("No Template provided.")
return webob.exc.HTTPBadRequest(explanation=msg) return webob.exc.HTTPBadRequest(explanation=msg)
@ -711,7 +709,7 @@ class EngineService(service.Service):
return False return False
st = self._get_stack(cnxt, stack_identity) st = self._get_stack(cnxt, stack_identity)
logger.info(_('Deleting stack %s') % st.name) LOG.info(_('Deleting stack %s') % st.name)
stack = parser.Stack.load(cnxt, stack=st) stack = parser.Stack.load(cnxt, stack=st)
lock = stack_lock.StackLock(cnxt, stack, self.engine_id) lock = stack_lock.StackLock(cnxt, stack, self.engine_id)
@ -732,7 +730,7 @@ class EngineService(service.Service):
elif stack_lock.StackLock.engine_alive(cnxt, acquire_result): elif stack_lock.StackLock.engine_alive(cnxt, acquire_result):
stop_result = remote_stop(acquire_result) stop_result = remote_stop(acquire_result)
if stop_result is None: if stop_result is None:
logger.debug("Successfully stopped remote task on engine %s" LOG.debug("Successfully stopped remote task on engine %s"
% acquire_result) % acquire_result)
else: else:
raise exception.StopActionFailed(stack_name=stack.name, raise exception.StopActionFailed(stack_name=stack.name,
@ -764,7 +762,7 @@ class EngineService(service.Service):
:param stack_identity: Name of the stack you want to abandon. :param stack_identity: Name of the stack you want to abandon.
""" """
st = self._get_stack(cnxt, stack_identity) st = self._get_stack(cnxt, stack_identity)
logger.info(_('abandoning stack %s') % st.name) LOG.info(_('abandoning stack %s') % st.name)
stack = parser.Stack.load(cnxt, stack=st) stack = parser.Stack.load(cnxt, stack=st)
lock = stack_lock.StackLock(cnxt, stack, self.engine_id) lock = stack_lock.StackLock(cnxt, stack, self.engine_id)
acquire_result = lock.try_acquire() acquire_result = lock.try_acquire()
@ -896,8 +894,7 @@ class EngineService(service.Service):
if cfg.CONF.heat_stack_user_role in cnxt.roles: if cfg.CONF.heat_stack_user_role in cnxt.roles:
if not self._authorize_stack_user(cnxt, stack, resource_name): if not self._authorize_stack_user(cnxt, stack, resource_name):
logger.warning(_("Access denied to resource %s") LOG.warning(_("Access denied to resource %s") % resource_name)
% resource_name)
raise exception.Forbidden() raise exception.Forbidden()
if resource_name not in stack: if resource_name not in stack:
@ -977,7 +974,7 @@ class EngineService(service.Service):
Handle request to perform suspend action on a stack Handle request to perform suspend action on a stack
''' '''
def _stack_suspend(stack): def _stack_suspend(stack):
logger.debug("suspending stack %s" % stack.name) LOG.debug("suspending stack %s" % stack.name)
stack.suspend() stack.suspend()
s = self._get_stack(cnxt, stack_identity) s = self._get_stack(cnxt, stack_identity)
@ -992,7 +989,7 @@ class EngineService(service.Service):
Handle request to perform a resume action on a stack Handle request to perform a resume action on a stack
''' '''
def _stack_resume(stack): def _stack_resume(stack):
logger.debug("resuming stack %s" % stack.name) LOG.debug("resuming stack %s" % stack.name)
stack.resume() stack.resume()
s = self._get_stack(cnxt, stack_identity) s = self._get_stack(cnxt, stack_identity)
@ -1075,7 +1072,7 @@ class EngineService(service.Service):
try: try:
wrn = [w.name for w in db_api.watch_rule_get_all(cnxt)] wrn = [w.name for w in db_api.watch_rule_get_all(cnxt)]
except Exception as ex: except Exception as ex:
logger.warn(_('show_watch (all) db error %s') % ex) LOG.warn(_('show_watch (all) db error %s') % ex)
return return
wrs = [watchrule.WatchRule.load(cnxt, w) for w in wrn] wrs = [watchrule.WatchRule.load(cnxt, w) for w in wrn]
@ -1098,13 +1095,13 @@ class EngineService(service.Service):
# namespace/metric, but we will want this at some point # namespace/metric, but we will want this at some point
# for now, the API can query all metric data and filter locally # for now, the API can query all metric data and filter locally
if metric_namespace is not None or metric_name is not None: if metric_namespace is not None or metric_name is not None:
logger.error(_("Filtering by namespace/metric not yet supported")) LOG.error(_("Filtering by namespace/metric not yet supported"))
return return
try: try:
wds = db_api.watch_data_get_all(cnxt) wds = db_api.watch_data_get_all(cnxt)
except Exception as ex: except Exception as ex:
logger.warn(_('show_metric (all) db error %s') % ex) LOG.warn(_('show_metric (all) db error %s') % ex)
return return
result = [api.format_watch_data(w) for w in wds] result = [api.format_watch_data(w) for w in wds]

View File

@ -20,7 +20,7 @@ from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
SIGNAL_TYPES = ( SIGNAL_TYPES = (
WAITCONDITION, SIGNAL WAITCONDITION, SIGNAL
@ -65,7 +65,7 @@ class SignalResponder(stack_user.StackUser):
secret_key = self.data().get('secret_key') secret_key = self.data().get('secret_key')
if not access_key or not secret_key: if not access_key or not secret_key:
logger.warning(_('Cannot generate signed url, ' LOG.warning(_('Cannot generate signed url, '
'no stored access/secret key')) 'no stored access/secret key'))
return return

View File

@ -24,7 +24,7 @@ from heat.openstack.common.rpc import proxy
cfg.CONF.import_opt('engine_life_check_timeout', 'heat.common.config') cfg.CONF.import_opt('engine_life_check_timeout', 'heat.common.config')
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class StackLock(object): class StackLock(object):
@ -66,20 +66,20 @@ class StackLock(object):
lock_engine_id = db_api.stack_lock_create(self.stack.id, lock_engine_id = db_api.stack_lock_create(self.stack.id,
self.engine_id) self.engine_id)
if lock_engine_id is None: if lock_engine_id is None:
logger.debug("Engine %(engine)s acquired lock on stack " LOG.debug("Engine %(engine)s acquired lock on stack "
"%(stack)s" % {'engine': self.engine_id, "%(stack)s" % {'engine': self.engine_id,
'stack': self.stack.id}) 'stack': self.stack.id})
return return
if lock_engine_id == self.engine_id or \ if lock_engine_id == self.engine_id or \
self.engine_alive(self.context, lock_engine_id): self.engine_alive(self.context, lock_engine_id):
logger.debug("Lock on stack %(stack)s is owned by engine " LOG.debug("Lock on stack %(stack)s is owned by engine "
"%(engine)s" % {'stack': self.stack.id, "%(engine)s" % {'stack': self.stack.id,
'engine': lock_engine_id}) 'engine': lock_engine_id})
raise exception.ActionInProgress(stack_name=self.stack.name, raise exception.ActionInProgress(stack_name=self.stack.name,
action=self.stack.action) action=self.stack.action)
else: else:
logger.info(_("Stale lock detected on stack %(stack)s. Engine " LOG.info(_("Stale lock detected on stack %(stack)s. Engine "
"%(engine)s will attempt to steal the lock") "%(engine)s will attempt to steal the lock")
% {'stack': self.stack.id, 'engine': self.engine_id}) % {'stack': self.stack.id, 'engine': self.engine_id})
@ -87,21 +87,21 @@ class StackLock(object):
self.engine_id) self.engine_id)
if result is None: if result is None:
logger.info(_("Engine %(engine)s successfully stole the lock " LOG.info(_("Engine %(engine)s successfully stole the lock "
"on stack %(stack)s") "on stack %(stack)s")
% {'engine': self.engine_id, % {'engine': self.engine_id,
'stack': self.stack.id}) 'stack': self.stack.id})
return return
elif result is True: elif result is True:
if retry: if retry:
logger.info(_("The lock on stack %(stack)s was released " LOG.info(_("The lock on stack %(stack)s was released "
"while engine %(engine)s was stealing it. " "while engine %(engine)s was stealing it. "
"Trying again") % {'stack': self.stack.id, "Trying again") % {'stack': self.stack.id,
'engine': self.engine_id}) 'engine': self.engine_id})
return self.acquire(retry=False) return self.acquire(retry=False)
else: else:
new_lock_engine_id = result new_lock_engine_id = result
logger.info(_("Failed to steal lock on stack %(stack)s. " LOG.info(_("Failed to steal lock on stack %(stack)s. "
"Engine %(engine)s stole the lock first") "Engine %(engine)s stole the lock first")
% {'stack': self.stack.id, % {'stack': self.stack.id,
'engine': new_lock_engine_id}) 'engine': new_lock_engine_id})
@ -114,9 +114,8 @@ class StackLock(object):
# Only the engine that owns the lock will be releasing it. # Only the engine that owns the lock will be releasing it.
result = db_api.stack_lock_release(stack_id, self.engine_id) result = db_api.stack_lock_release(stack_id, self.engine_id)
if result is True: if result is True:
logger.warning(_("Lock was already released on stack %s!") LOG.warning(_("Lock was already released on stack %s!") % stack_id)
% stack_id)
else: else:
logger.debug("Engine %(engine)s released lock on stack " LOG.debug("Engine %(engine)s released lock on stack "
"%(stack)s" % {'engine': self.engine_id, "%(stack)s" % {'engine': self.engine_id,
'stack': stack_id}) 'stack': stack_id})

View File

@ -22,7 +22,7 @@ from heat.engine import scheduler
from heat.openstack.common.gettextutils import _ from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class StackResource(resource.Resource): class StackResource(resource.Resource):
@ -103,7 +103,7 @@ class StackResource(resource.Resource):
params = self.child_params() params = self.child_params()
except NotImplementedError: except NotImplementedError:
not_implemented_msg = _("Preview of '%s' not yet implemented") not_implemented_msg = _("Preview of '%s' not yet implemented")
logger.warning(not_implemented_msg % self.__class__.__name__) LOG.warning(not_implemented_msg % self.__class__.__name__)
return self return self
self._validate_nested_resources(template) self._validate_nested_resources(template)
@ -240,7 +240,7 @@ class StackResource(resource.Resource):
try: try:
stack = self.nested() stack = self.nested()
except exception.NotFound: except exception.NotFound:
logger.info(_("Stack not found to delete")) LOG.info(_("Stack not found to delete"))
else: else:
if stack is not None: if stack is not None:
delete_task = scheduler.TaskRunner(stack.delete) delete_task = scheduler.TaskRunner(stack.delete)

View File

@ -19,7 +19,7 @@ from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class StackUser(resource.Resource): class StackUser(resource.Resource):
@ -80,7 +80,7 @@ class StackUser(resource.Resource):
# compatibility with resources created before the migration # compatibility with resources created before the migration
# to stack_user.StackUser domain users. After an appropriate # to stack_user.StackUser domain users. After an appropriate
# transitional period, this should be removed. # transitional period, this should be removed.
logger.warning(_('Reverting to legacy user delete path')) LOG.warning(_('Reverting to legacy user delete path'))
try: try:
self.keystone().delete_stack_user(user_id) self.keystone().delete_stack_user(user_id)
except kc_exception.NotFound: except kc_exception.NotFound:

View File

@ -20,7 +20,7 @@ from heat.db import api as db_api
from heat.engine import plugin_manager from heat.engine import plugin_manager
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
__all__ = ['Template'] __all__ = ['Template']
@ -191,7 +191,7 @@ class Template(collections.Mapping):
# check resources # check resources
tmpl_resources = self[self.RESOURCES] tmpl_resources = self[self.RESOURCES]
if not tmpl_resources: if not tmpl_resources:
logger.warn(_('Template does not contain any resources, so ' LOG.warn(_('Template does not contain any resources, so '
'the template would not really do anything when ' 'the template would not really do anything when '
'being instantiated.')) 'being instantiated.'))

View File

@ -20,7 +20,7 @@ from heat.engine import scheduler
from heat.openstack.common.gettextutils import _ from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class StackUpdate(object): class StackUpdate(object):
@ -76,7 +76,7 @@ class StackUpdate(object):
def _remove_backup_resource(self, prev_res): def _remove_backup_resource(self, prev_res):
if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE), if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE),
(prev_res.DELETE, prev_res.COMPLETE)): (prev_res.DELETE, prev_res.COMPLETE)):
logger.debug("Deleting backup resource %s" % prev_res.name) LOG.debug("Deleting backup resource %s" % prev_res.name)
yield prev_res.destroy() yield prev_res.destroy()
@staticmethod @staticmethod
@ -100,18 +100,17 @@ class StackUpdate(object):
# Swap in the backup resource if it is in a valid state, # Swap in the backup resource if it is in a valid state,
# instead of creating a new resource # instead of creating a new resource
if prev_res.status == prev_res.COMPLETE: if prev_res.status == prev_res.COMPLETE:
logger.debug("Swapping in backup Resource %s" % LOG.debug("Swapping in backup Resource %s" % res_name)
res_name)
self._exchange_stacks(self.existing_stack[res_name], self._exchange_stacks(self.existing_stack[res_name],
prev_res) prev_res)
return return
logger.debug("Deleting backup Resource %s" % res_name) LOG.debug("Deleting backup Resource %s" % res_name)
yield prev_res.destroy() yield prev_res.destroy()
# Back up existing resource # Back up existing resource
if res_name in self.existing_stack: if res_name in self.existing_stack:
logger.debug("Backing up existing Resource %s" % res_name) LOG.debug("Backing up existing Resource %s" % res_name)
existing_res = self.existing_stack[res_name] existing_res = self.existing_stack[res_name]
self.previous_stack[res_name] = existing_res self.previous_stack[res_name] = existing_res
existing_res.state_set(existing_res.UPDATE, existing_res.COMPLETE) existing_res.state_set(existing_res.UPDATE, existing_res.COMPLETE)
@ -131,9 +130,9 @@ class StackUpdate(object):
except resource.UpdateReplace: except resource.UpdateReplace:
pass pass
else: else:
logger.info(_("Resource %(res_name)s for stack %(stack_name)s" LOG.info(_("Resource %(res_name)s for stack %(stack_name)s "
" updated") % { "updated")
'res_name': res_name, % {'res_name': res_name,
'stack_name': self.existing_stack.name}) 'stack_name': self.existing_stack.name})
return return

View File

@ -23,7 +23,7 @@ from heat.openstack.common import log as logging
from heat.openstack.common import timeutils from heat.openstack.common import timeutils
from heat.rpc import api as rpc_api from heat.rpc import api as rpc_api
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class WatchRule(object): class WatchRule(object):
@ -75,9 +75,8 @@ class WatchRule(object):
try: try:
watch = db_api.watch_rule_get_by_name(context, watch_name) watch = db_api.watch_rule_get_by_name(context, watch_name)
except Exception as ex: except Exception as ex:
logger.warn(_('WatchRule.load (%(watch_name)s) db error ' LOG.warn(_('WatchRule.load (%(watch_name)s) db error '
'%(ex)s') % { '%(ex)s') % {'watch_name': watch_name, 'ex': ex})
'watch_name': watch_name, 'ex': ex})
if watch is None: if watch is None:
raise exception.WatchRuleNotFound(watch_name=watch_name) raise exception.WatchRuleNotFound(watch_name=watch_name)
else: else:
@ -210,7 +209,7 @@ class WatchRule(object):
data = 0 data = 0
for d in self.watch_data: for d in self.watch_data:
if d.created_at < self.now - self.timeperiod: if d.created_at < self.now - self.timeperiod:
logger.debug('ignoring %s' % str(d.data)) LOG.debug('ignoring %s' % str(d.data))
continue continue
data = data + float(d.data[self.rule['MetricName']]['Value']) data = data + float(d.data[self.rule['MetricName']]['Value'])
@ -247,14 +246,13 @@ class WatchRule(object):
return actions return actions
def rule_actions(self, new_state): def rule_actions(self, new_state):
logger.info(_('WATCH: stack:%(stack)s, watch_name:%(watch_name)s, ' LOG.info(_('WATCH: stack:%(stack)s, watch_name:%(watch_name)s, '
'new_state:%(new_state)s'), {'stack': self.stack_id, 'new_state:%(new_state)s'), {'stack': self.stack_id,
'watch_name': self.name, 'watch_name': self.name,
'new_state': new_state}) 'new_state': new_state})
actions = [] actions = []
if self.ACTION_MAP[new_state] not in self.rule: if self.ACTION_MAP[new_state] not in self.rule:
logger.info(_('no action for new state %s'), LOG.info(_('no action for new state %s'), new_state)
new_state)
else: else:
s = db_api.stack_get(self.context, self.stack_id, s = db_api.stack_get(self.context, self.stack_id,
eager_load=True) eager_load=True)
@ -264,7 +262,7 @@ class WatchRule(object):
for refid in self.rule[self.ACTION_MAP[new_state]]: for refid in self.rule[self.ACTION_MAP[new_state]]:
actions.append(stack.resource_by_refid(refid).signal) actions.append(stack.resource_by_refid(refid).signal)
else: else:
logger.warning(_("Could not process watch state %s for stack") LOG.warning(_("Could not process watch state %s for stack")
% new_state) % new_state)
return actions return actions
@ -285,7 +283,7 @@ class WatchRule(object):
dims = dims[0] dims = dims[0]
sample['resource_metadata'] = dims sample['resource_metadata'] = dims
sample['resource_id'] = dims.get('InstanceId') sample['resource_id'] = dims.get('InstanceId')
logger.debug('new sample:%(k)s data:%(sample)s' % { LOG.debug('new sample:%(k)s data:%(sample)s' % {
'k': k, 'sample': sample}) 'k': k, 'sample': sample})
clients.ceilometer().samples.create(**sample) clients.ceilometer().samples.create(**sample)
@ -298,7 +296,7 @@ class WatchRule(object):
return return
if self.state == self.SUSPENDED: if self.state == self.SUSPENDED:
logger.debug('Ignoring metric data for %s, SUSPENDED state' LOG.debug('Ignoring metric data for %s, SUSPENDED state'
% self.name) % self.name)
return [] return []
@ -308,9 +306,9 @@ class WatchRule(object):
# options, e.g --haproxy try to push multiple metrics when we # options, e.g --haproxy try to push multiple metrics when we
# actually only care about one (the one we're alarming on) # actually only care about one (the one we're alarming on)
# so just ignore any data which doesn't contain MetricName # so just ignore any data which doesn't contain MetricName
logger.debug('Ignoring metric data (only accept %(metric)s) ' LOG.debug('Ignoring metric data (only accept %(metric)s) '
': %(data)s' % { ': %(data)s' % {'metric': self.rule['MetricName'],
'metric': self.rule['MetricName'], 'data': data}) 'data': data})
return return
watch_data = { watch_data = {
@ -318,7 +316,7 @@ class WatchRule(object):
'watch_rule_id': self.id 'watch_rule_id': self.id
} }
wd = db_api.watch_data_create(None, watch_data) wd = db_api.watch_data_create(None, watch_data)
logger.debug('new watch:%(name)s data:%(data)s' LOG.debug('new watch:%(name)s data:%(data)s'
% {'name': self.name, 'data': str(wd.data)}) % {'name': self.name, 'data': str(wd.data)})
def state_set(self, state): def state_set(self, state):
@ -344,14 +342,14 @@ class WatchRule(object):
if state != self.state: if state != self.state:
actions = self.rule_actions(state) actions = self.rule_actions(state)
if actions: if actions:
logger.debug("Overriding state %(self_state)s for watch " LOG.debug("Overriding state %(self_state)s for watch "
"%(name)s with %(state)s" % { "%(name)s with %(state)s"
'self_state': self.state, 'name': self.name, % {'self_state': self.state, 'name': self.name,
'state': state}) 'state': state})
else: else:
logger.warning(_("Unable to override state %(state)s for " LOG.warning(_("Unable to override state %(state)s for "
"watch %(name)s") % { "watch %(name)s") % {'state': self.state,
'state': self.state, 'name': self.name}) 'name': self.name})
return actions return actions

View File

@ -35,9 +35,9 @@ TEST_DEFAULT_LOGLEVELS = {'migrate': logging.WARN}
class FakeLogMixin: class FakeLogMixin:
def setup_logging(self): def setup_logging(self):
# Assign default logs to self.logger so we can still # Assign default logs to self.LOG so we can still
# assert on heat logs. # assert on heat logs.
self.logger = self.useFixture( self.LOG = self.useFixture(
fixtures.FakeLogger(level=logging.DEBUG)) fixtures.FakeLogger(level=logging.DEBUG))
base_list = set([nlog.split('.')[0] base_list = set([nlog.split('.')[0]
for nlog in logging.Logger.manager.loggerDict]) for nlog in logging.Logger.manager.loggerDict])

View File

@ -17,7 +17,7 @@ from heat.engine import stack_user
from heat.openstack.common.gettextutils import _ from heat.openstack.common.gettextutils import _
from heat.openstack.common import log as logging from heat.openstack.common import log as logging
logger = logging.getLogger(__name__) LOG = logging.getLogger(__name__)
class GenericResource(resource.Resource): class GenericResource(resource.Resource):
@ -29,26 +29,26 @@ class GenericResource(resource.Resource):
'Foo': 'Another generic attribute'} 'Foo': 'Another generic attribute'}
def handle_create(self): def handle_create(self):
logger.warning(_('Creating generic resource (Type "%s")') % LOG.warning(_('Creating generic resource (Type "%s")') %
self.type()) self.type())
def handle_update(self, json_snippet, tmpl_diff, prop_diff): def handle_update(self, json_snippet, tmpl_diff, prop_diff):
logger.warning(_('Updating generic resource (Type "%s")') % LOG.warning(_('Updating generic resource (Type "%s")') %
self.type()) self.type())
def handle_delete(self): def handle_delete(self):
logger.warning(_('Deleting generic resource (Type "%s")') % LOG.warning(_('Deleting generic resource (Type "%s")') %
self.type()) self.type())
def _resolve_attribute(self, name): def _resolve_attribute(self, name):
return self.name return self.name
def handle_suspend(self): def handle_suspend(self):
logger.warning(_('Suspending generic resource (Type "%s")') % LOG.warning(_('Suspending generic resource (Type "%s")') %
self.type()) self.type())
def handle_resume(self): def handle_resume(self):
logger.warning(_('Resuming generic resource (Type "%s")') % LOG.warning(_('Resuming generic resource (Type "%s")') %
self.type()) self.type())
@ -129,7 +129,7 @@ class SignalResource(signal_responder.SignalResponder):
msg = _('Cannot signal resource during %s') % self.action msg = _('Cannot signal resource during %s') % self.action
raise Exception(msg) raise Exception(msg)
logger.warning(_('Signaled resource (Type "%(type)s") %(details)s') LOG.warning(_('Signaled resource (Type "%(type)s") %(details)s')
% {'type': self.type(), 'details': details}) % {'type': self.type(), 'details': details})
def _resolve_attribute(self, name): def _resolve_attribute(self, name):

View File

@ -177,7 +177,7 @@ class EnvironmentDuplicateTest(common.HeatTestCase):
replace_log = 'Changing %s from %s to %s' % ('OS::Test::Dummy', replace_log = 'Changing %s from %s to %s' % ('OS::Test::Dummy',
'test.yaml', 'test.yaml',
self.resource_type) self.resource_type)
self.assertNotIn(replace_log, self.logger.output) self.assertNotIn(replace_log, self.LOG.output)
env_test = {u'resource_registry': { env_test = {u'resource_registry': {
u'OS::Test::Dummy': self.resource_type}} u'OS::Test::Dummy': self.resource_type}}
env.load(env_test) env.load(env_test)
@ -186,9 +186,9 @@ class EnvironmentDuplicateTest(common.HeatTestCase):
# should return exactly the same object. # should return exactly the same object.
self.assertIs(info, env.get_resource_info('OS::Test::Dummy', self.assertIs(info, env.get_resource_info('OS::Test::Dummy',
'my_fip')) 'my_fip'))
self.assertNotIn(replace_log, self.logger.output) self.assertNotIn(replace_log, self.LOG.output)
else: else:
self.assertIn(replace_log, self.logger.output) self.assertIn(replace_log, self.LOG.output)
self.assertNotEqual(info, self.assertNotEqual(info,
env.get_resource_info('OS::Test::Dummy', env.get_resource_info('OS::Test::Dummy',
'my_fip')) 'my_fip'))

View File

@ -259,7 +259,7 @@ class ResourceExceptionHandlingTest(HeatTestCase):
None) None)
e = self.assertRaises(self.exception_catch, resource, request) e = self.assertRaises(self.exception_catch, resource, request)
e = e.exc if hasattr(e, 'exc') else e e = e.exc if hasattr(e, 'exc') else e
self.assertNotIn(str(e), self.logger.output) self.assertNotIn(str(e), self.LOG.output)
class JSONRequestDeserializerTest(HeatTestCase): class JSONRequestDeserializerTest(HeatTestCase):