Replacing variable logger on LOG
Commit https://review.openstack.org/#/c/94255/1 introduces check for translating log messages. This check uses in pattern name 'LOG', so in heat code also should be used this name intead of 'logger'. Also same name is used in other projects. Change-Id: Iba24c7eb1c13e68a91d090d7bcdbcb40d6e16071 Partial-Bug: #1321283
This commit is contained in:
parent
2d37843d13
commit
ecd8d3999d
@ -20,7 +20,7 @@ from heat.engine import resource
|
||||
from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
DOCKER_INSTALLED = False
|
||||
# conditionally import so tests can work without having the dependency
|
||||
@ -329,5 +329,5 @@ def available_resource_mapping():
|
||||
if DOCKER_INSTALLED:
|
||||
return resource_mapping()
|
||||
else:
|
||||
logger.warn(_("Docker plug-in loaded, but docker lib not installed."))
|
||||
LOG.warn(_("Docker plug-in loaded, but docker lib not installed."))
|
||||
return {}
|
||||
|
@ -23,8 +23,8 @@ from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import importutils
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger('heat.common.keystoneclient')
|
||||
logger.info(_("Keystone V2 loaded"))
|
||||
LOG = logging.getLogger('heat.common.keystoneclient')
|
||||
LOG.info(_("Keystone V2 loaded"))
|
||||
|
||||
|
||||
class KeystoneClientV2(object):
|
||||
@ -86,7 +86,7 @@ class KeystoneClientV2(object):
|
||||
kwargs['tenant_name'] = self.context.tenant
|
||||
kwargs['tenant_id'] = self.context.tenant_id
|
||||
else:
|
||||
logger.error(_("Keystone v2 API connection failed, no password "
|
||||
LOG.error(_("Keystone v2 API connection failed, no password "
|
||||
"or auth_token!"))
|
||||
raise exception.AuthorizationFailure()
|
||||
kwargs['cacert'] = self._get_client_option('ca_file')
|
||||
@ -101,7 +101,7 @@ class KeystoneClientV2(object):
|
||||
if auth_kwargs:
|
||||
# Sanity check
|
||||
if not client.auth_ref.trust_scoped:
|
||||
logger.error(_("v2 trust token re-scoping failed!"))
|
||||
LOG.error(_("v2 trust token re-scoping failed!"))
|
||||
raise exception.AuthorizationFailure()
|
||||
# All OK so update the context with the token
|
||||
self.context.auth_token = client.auth_ref.auth_token
|
||||
@ -109,7 +109,7 @@ class KeystoneClientV2(object):
|
||||
# Ensure the v2 API we're using is not impacted by keystone
|
||||
# bug #1239303, otherwise we can't trust the user_id
|
||||
if self.context.trustor_user_id != client.auth_ref.user_id:
|
||||
logger.error("Trust impersonation failed, bug #1239303 "
|
||||
LOG.error("Trust impersonation failed, bug #1239303 "
|
||||
"suspected, you may need a newer keystone")
|
||||
raise exception.AuthorizationFailure()
|
||||
|
||||
@ -146,7 +146,7 @@ class KeystoneClientV2(object):
|
||||
Returns the keystone ID of the resulting user
|
||||
"""
|
||||
if(len(username) > 64):
|
||||
logger.warning(_("Truncating the username %s to the last 64 "
|
||||
LOG.warning(_("Truncating the username %s to the last 64 "
|
||||
"characters.") % username)
|
||||
#get the last 64 characters of the username
|
||||
username = username[-64:]
|
||||
@ -165,13 +165,14 @@ class KeystoneClientV2(object):
|
||||
if r.name == cfg.CONF.heat_stack_user_role]
|
||||
if len(stack_user_role) == 1:
|
||||
role_id = stack_user_role[0]
|
||||
logger.debug("Adding user %(user)s to role %(role)s" % {
|
||||
'user': user.id, 'role': role_id})
|
||||
LOG.debug("Adding user %(user)s to role %(role)s"
|
||||
% {'user': user.id, 'role': role_id})
|
||||
self.client.roles.add_user_role(user.id, role_id,
|
||||
self.context.tenant_id)
|
||||
else:
|
||||
logger.error(_("Failed to add user %(user)s to role %(role)s, "
|
||||
"check role exists!") % {'user': username,
|
||||
LOG.error(_("Failed to add user %(user)s to role %(role)s, "
|
||||
"check role exists!")
|
||||
% {'user': username,
|
||||
'role': cfg.CONF.heat_stack_user_role})
|
||||
|
||||
return user.id
|
||||
|
@ -16,13 +16,13 @@
|
||||
from heat.engine import clients
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
from marconiclient.queues.v1 import client as marconiclient
|
||||
except ImportError:
|
||||
marconiclient = None
|
||||
logger.info(_('marconiclient not available'))
|
||||
LOG.info(_('marconiclient not available'))
|
||||
|
||||
|
||||
class Clients(clients.OpenStackClients):
|
||||
@ -39,7 +39,7 @@ class Clients(clients.OpenStackClients):
|
||||
|
||||
con = self.context
|
||||
if self.auth_token is None:
|
||||
logger.error(_("Marconi connection failed, no auth_token!"))
|
||||
LOG.error(_("Marconi connection failed, no auth_token!"))
|
||||
return None
|
||||
|
||||
opts = {
|
||||
|
@ -18,7 +18,7 @@ from heat.engine import resource
|
||||
from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NovaFlavor(resource.Resource):
|
||||
@ -102,7 +102,7 @@ class NovaFlavor(resource.Resource):
|
||||
try:
|
||||
self.nova().flavors.delete(self.resource_id)
|
||||
except nova_exceptions.NotFound:
|
||||
logger.debug(
|
||||
LOG.debug(
|
||||
_('Could not find flavor %s.') % self.resource_id)
|
||||
|
||||
self.resource_id_set(None)
|
||||
|
@ -20,23 +20,23 @@ from heat.engine import clients
|
||||
from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
try:
|
||||
import pyrax
|
||||
except ImportError:
|
||||
logger.info(_('pyrax not available'))
|
||||
LOG.info(_('pyrax not available'))
|
||||
|
||||
try:
|
||||
from swiftclient import client as swiftclient
|
||||
except ImportError:
|
||||
swiftclient = None
|
||||
logger.info(_('swiftclient not available'))
|
||||
LOG.info(_('swiftclient not available'))
|
||||
try:
|
||||
from ceilometerclient import client as ceilometerclient
|
||||
except ImportError:
|
||||
ceilometerclient = None
|
||||
logger.info(_('ceilometerclient not available'))
|
||||
LOG.info(_('ceilometerclient not available'))
|
||||
|
||||
cloud_opts = [
|
||||
cfg.StrOpt('region_name',
|
||||
@ -111,8 +111,7 @@ class Clients(clients.OpenStackClients):
|
||||
def __authenticate(self):
|
||||
pyrax.set_setting("identity_type", "keystone")
|
||||
pyrax.set_setting("auth_endpoint", self.context.auth_url)
|
||||
logger.info(_("Authenticating username:%s") %
|
||||
self.context.username)
|
||||
LOG.info(_("Authenticating username:%s") % self.context.username)
|
||||
self.pyrax = pyrax.auth_with_token(self.context.auth_token,
|
||||
tenant_id=self.context.tenant_id,
|
||||
tenant_name=self.context.tenant,
|
||||
@ -120,5 +119,5 @@ class Clients(clients.OpenStackClients):
|
||||
or None))
|
||||
if not self.pyrax:
|
||||
raise exception.AuthorizationFailure("No services available.")
|
||||
logger.info(_("User %s authenticated successfully.")
|
||||
LOG.info(_("User %s authenticated successfully.")
|
||||
% self.context.username)
|
||||
|
@ -30,7 +30,7 @@ except ImportError:
|
||||
|
||||
PYRAX_INSTALLED = False
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CloudDns(resource.Resource):
|
||||
@ -153,7 +153,7 @@ class CloudDns(resource.Resource):
|
||||
"""Create a Rackspace CloudDns Instance."""
|
||||
# There is no check_create_complete as the pyrax create for DNS is
|
||||
# synchronous.
|
||||
logger.debug("CloudDns handle_create called.")
|
||||
LOG.debug("CloudDns handle_create called.")
|
||||
args = dict((k, v) for k, v in self.properties.items())
|
||||
for rec in args[self.RECORDS] or {}:
|
||||
# only pop the priority for the correct types
|
||||
@ -165,7 +165,7 @@ class CloudDns(resource.Resource):
|
||||
|
||||
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
|
||||
"""Update a Rackspace CloudDns Instance."""
|
||||
logger.debug("CloudDns handle_update called.")
|
||||
LOG.debug("CloudDns handle_update called.")
|
||||
if not self.resource_id:
|
||||
raise exception.Error(_('Update called on a non-existent domain'))
|
||||
if prop_diff:
|
||||
@ -188,7 +188,7 @@ class CloudDns(resource.Resource):
|
||||
|
||||
def handle_delete(self):
|
||||
"""Delete a Rackspace CloudDns Instance."""
|
||||
logger.debug("CloudDns handle_delete called.")
|
||||
LOG.debug("CloudDns handle_delete called.")
|
||||
if self.resource_id:
|
||||
try:
|
||||
dom = self.cloud_dns().get(self.resource_id)
|
||||
|
@ -35,7 +35,7 @@ except ImportError:
|
||||
|
||||
PYRAX_INSTALLED = False
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class LoadbalancerBuildError(exception.HeatException):
|
||||
@ -493,7 +493,7 @@ class CloudLoadBalancer(resource.Resource):
|
||||
|
||||
lb_name = (self.properties.get(self.NAME) or
|
||||
self.physical_resource_name())
|
||||
logger.debug("Creating loadbalancer: %s" % {lb_name: lb_body})
|
||||
LOG.debug("Creating loadbalancer: %s" % {lb_name: lb_body})
|
||||
loadbalancer = self.clb.create(lb_name, **lb_body)
|
||||
self.resource_id_set(str(loadbalancer.id))
|
||||
|
||||
@ -621,7 +621,7 @@ class CloudLoadBalancer(resource.Resource):
|
||||
raise exception.InvalidTemplateAttribute(resource=self.name,
|
||||
key=key)
|
||||
function = attribute_function[key]
|
||||
logger.info(_('%(name)s.GetAtt(%(key)s) == %(function)s'),
|
||||
LOG.info(_('%(name)s.GetAtt(%(key)s) == %(function)s'),
|
||||
{'name': self.name, 'key': key, 'function': function})
|
||||
return unicode(function)
|
||||
|
||||
|
@ -28,7 +28,7 @@ try:
|
||||
except ImportError:
|
||||
PYRAX_INSTALLED = False
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CloudServer(server.Server):
|
||||
@ -131,12 +131,12 @@ class CloudServer(server.Server):
|
||||
self._managed_cloud_started_event_sent = True
|
||||
|
||||
if 'rax_service_level_automation' not in server.metadata:
|
||||
logger.debug("Managed Cloud server does not have the "
|
||||
LOG.debug("Managed Cloud server does not have the "
|
||||
"rax_service_level_automation metadata tag yet")
|
||||
return False
|
||||
|
||||
mc_status = server.metadata['rax_service_level_automation']
|
||||
logger.debug("Managed Cloud automation status: %s" % mc_status)
|
||||
LOG.debug("Managed Cloud automation status: %s" % mc_status)
|
||||
|
||||
if mc_status == self.MC_STATUS_IN_PROGRESS:
|
||||
return False
|
||||
@ -160,12 +160,12 @@ class CloudServer(server.Server):
|
||||
self._rack_connect_started_event_sent = True
|
||||
|
||||
if 'rackconnect_automation_status' not in server.metadata:
|
||||
logger.debug("RackConnect server does not have the "
|
||||
LOG.debug("RackConnect server does not have the "
|
||||
"rackconnect_automation_status metadata tag yet")
|
||||
return False
|
||||
|
||||
rc_status = server.metadata['rackconnect_automation_status']
|
||||
logger.debug("RackConnect automation status: %s" % rc_status)
|
||||
LOG.debug("RackConnect automation status: %s" % rc_status)
|
||||
|
||||
if rc_status == self.RC_STATUS_DEPLOYING:
|
||||
return False
|
||||
@ -182,8 +182,7 @@ class CloudServer(server.Server):
|
||||
reason = server.metadata.get('rackconnect_unprocessable_reason',
|
||||
None)
|
||||
if reason is not None:
|
||||
logger.warning(_("RackConnect unprocessable reason: %s") %
|
||||
reason)
|
||||
LOG.warning(_("RackConnect unprocessable reason: %s") % reason)
|
||||
|
||||
msg = _("RackConnect automation has completed")
|
||||
self._add_event(self.action, self.status, msg)
|
||||
|
@ -35,7 +35,7 @@ else:
|
||||
def resource_mapping():
|
||||
return {'Rackspace::Cloud::Network': CloudNetwork}
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class CloudNetwork(resource.Resource):
|
||||
@ -93,8 +93,8 @@ class CloudNetwork(resource.Resource):
|
||||
try:
|
||||
self._network = self.cloud_networks().get(self.resource_id)
|
||||
except NotFound:
|
||||
logger.warn(_("Could not find network %s but resource id "
|
||||
"is set.") % self.resource_id)
|
||||
LOG.warn(_("Could not find network %s but resource id is set.")
|
||||
% self.resource_id)
|
||||
return self._network
|
||||
|
||||
def cloud_networks(self):
|
||||
|
@ -28,7 +28,7 @@ from heat.openstack.common import log as logging
|
||||
|
||||
gettextutils.install('heat')
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
opts = [
|
||||
@ -125,10 +125,10 @@ class EC2Token(wsgi.Middleware):
|
||||
last_failure = None
|
||||
for auth_uri in self._conf_get('allowed_auth_uris'):
|
||||
try:
|
||||
logger.debug("Attempt authorize on %s" % auth_uri)
|
||||
LOG.debug("Attempt authorize on %s" % auth_uri)
|
||||
return self._authorize(req, auth_uri)
|
||||
except HeatAPIException as e:
|
||||
logger.debug("Authorize failed: %s" % e.__class__)
|
||||
LOG.debug("Authorize failed: %s" % e.__class__)
|
||||
last_failure = e
|
||||
raise last_failure or exception.HeatAccessDeniedError()
|
||||
|
||||
@ -138,14 +138,14 @@ class EC2Token(wsgi.Middleware):
|
||||
# here so that we can use both authentication methods.
|
||||
# Returning here just means the user didn't supply AWS
|
||||
# authentication and we'll let the app try native keystone next.
|
||||
logger.info(_("Checking AWS credentials.."))
|
||||
LOG.info(_("Checking AWS credentials.."))
|
||||
|
||||
signature = self._get_signature(req)
|
||||
if not signature:
|
||||
if 'X-Auth-User' in req.headers:
|
||||
return self.application
|
||||
else:
|
||||
logger.info(_("No AWS Signature found."))
|
||||
LOG.info(_("No AWS Signature found."))
|
||||
raise exception.HeatIncompleteSignatureError()
|
||||
|
||||
access = self._get_access(req)
|
||||
@ -153,13 +153,13 @@ class EC2Token(wsgi.Middleware):
|
||||
if 'X-Auth-User' in req.headers:
|
||||
return self.application
|
||||
else:
|
||||
logger.info(_("No AWSAccessKeyId/Authorization Credential"))
|
||||
LOG.info(_("No AWSAccessKeyId/Authorization Credential"))
|
||||
raise exception.HeatMissingAuthenticationTokenError()
|
||||
|
||||
logger.info(_("AWS credentials found, checking against keystone."))
|
||||
LOG.info(_("AWS credentials found, checking against keystone."))
|
||||
|
||||
if not auth_uri:
|
||||
logger.error(_("Ec2Token authorization failed, no auth_uri "
|
||||
LOG.error(_("Ec2Token authorization failed, no auth_uri "
|
||||
"specified in config file"))
|
||||
raise exception.HeatInternalFailureError(_('Service '
|
||||
'misconfigured'))
|
||||
@ -184,7 +184,7 @@ class EC2Token(wsgi.Middleware):
|
||||
headers = {'Content-Type': 'application/json'}
|
||||
|
||||
keystone_ec2_uri = self._conf_get_keystone_ec2_uri(auth_uri)
|
||||
logger.info(_('Authenticating with %s') % keystone_ec2_uri)
|
||||
LOG.info(_('Authenticating with %s') % keystone_ec2_uri)
|
||||
response = requests.post(keystone_ec2_uri, data=creds_json,
|
||||
headers=headers)
|
||||
result = response.json()
|
||||
@ -192,9 +192,9 @@ class EC2Token(wsgi.Middleware):
|
||||
token_id = result['access']['token']['id']
|
||||
tenant = result['access']['token']['tenant']['name']
|
||||
tenant_id = result['access']['token']['tenant']['id']
|
||||
logger.info(_("AWS authentication successful."))
|
||||
LOG.info(_("AWS authentication successful."))
|
||||
except (AttributeError, KeyError):
|
||||
logger.info(_("AWS authentication failure."))
|
||||
LOG.info(_("AWS authentication failure."))
|
||||
# Try to extract the reason for failure so we can return the
|
||||
# appropriate AWS error via raising an exception
|
||||
try:
|
||||
|
@ -31,7 +31,7 @@ from heat.openstack.common import log as logging
|
||||
from heat.rpc import api as engine_api
|
||||
from heat.rpc import client as rpc_client
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StackController(object):
|
||||
@ -246,11 +246,11 @@ class StackController(object):
|
||||
Get template file contents, either from local file or URL
|
||||
"""
|
||||
if 'TemplateBody' in req.params:
|
||||
logger.debug('TemplateBody ...')
|
||||
LOG.debug('TemplateBody ...')
|
||||
return req.params['TemplateBody']
|
||||
elif 'TemplateUrl' in req.params:
|
||||
url = req.params['TemplateUrl']
|
||||
logger.debug('TemplateUrl %s' % url)
|
||||
LOG.debug('TemplateUrl %s' % url)
|
||||
try:
|
||||
return urlfetch.get(url)
|
||||
except IOError as exc:
|
||||
@ -421,7 +421,7 @@ class StackController(object):
|
||||
msg = _("The Template must be a JSON or YAML document.")
|
||||
return exception.HeatInvalidParameterValueError(detail=msg)
|
||||
|
||||
logger.info('validate_template')
|
||||
LOG.info('validate_template')
|
||||
|
||||
def format_validate_parameter(key, value):
|
||||
"""
|
||||
|
@ -25,7 +25,7 @@ from heat.openstack.common.rpc import common as rpc_common
|
||||
from heat.rpc import api as engine_api
|
||||
from heat.rpc import client as rpc_client
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class WatchController(object):
|
||||
@ -211,7 +211,7 @@ class WatchController(object):
|
||||
# Filter criteria not met, return None
|
||||
return
|
||||
except KeyError:
|
||||
logger.warning(_("Invalid filter key %s, ignoring") % f)
|
||||
LOG.warning(_("Invalid filter key %s, ignoring") % f)
|
||||
|
||||
return result
|
||||
|
||||
@ -220,7 +220,7 @@ class WatchController(object):
|
||||
# FIXME : Don't yet handle filtering by Dimensions
|
||||
filter_result = dict((k, v) for (k, v) in parms.iteritems() if k in
|
||||
("MetricName", "Namespace"))
|
||||
logger.debug("filter parameters : %s" % filter_result)
|
||||
LOG.debug("filter parameters : %s" % filter_result)
|
||||
|
||||
try:
|
||||
# Engine does not currently support query by namespace/metric
|
||||
@ -266,7 +266,7 @@ class WatchController(object):
|
||||
# need to process (each dict) for dimensions
|
||||
metric_data = api_utils.extract_param_list(parms, prefix='MetricData')
|
||||
if not len(metric_data):
|
||||
logger.error(_("Request does not contain required MetricData"))
|
||||
LOG.error(_("Request does not contain required MetricData"))
|
||||
return exception.HeatMissingParameterError("MetricData list")
|
||||
|
||||
watch_name = None
|
||||
@ -321,10 +321,10 @@ class WatchController(object):
|
||||
'expecting one of %(expect)s') % {
|
||||
'state': state,
|
||||
'expect': state_map.keys()}
|
||||
logger.error(msg)
|
||||
LOG.error(msg)
|
||||
return exception.HeatInvalidParameterValueError(msg)
|
||||
|
||||
logger.debug("setting %(name)s to %(state)s" % {
|
||||
LOG.debug("setting %(name)s to %(state)s" % {
|
||||
'name': name, 'state': state_map[state]})
|
||||
try:
|
||||
self.rpc_client.set_watch_state(con, watch_name=name,
|
||||
|
@ -25,7 +25,7 @@ from heat.common import wsgi
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class VersionNegotiationFilter(wsgi.Middleware):
|
||||
@ -48,7 +48,7 @@ class VersionNegotiationFilter(wsgi.Middleware):
|
||||
msg = ("Processing request: %(method)s %(path)s Accept: "
|
||||
"%(accept)s" % {'method': req.method,
|
||||
'path': req.path, 'accept': req.accept})
|
||||
logger.debug(msg)
|
||||
LOG.debug(msg)
|
||||
|
||||
# If the request is for /versions, just return the versions container
|
||||
if req.path_info_peek() in ("versions", ""):
|
||||
@ -60,7 +60,7 @@ class VersionNegotiationFilter(wsgi.Middleware):
|
||||
minor_version = req.environ['api.minor_version']
|
||||
|
||||
if (major_version == 1 and minor_version == 0):
|
||||
logger.debug("Matched versioned URI. "
|
||||
LOG.debug("Matched versioned URI. "
|
||||
"Version: %(major_version)d.%(minor_version)d"
|
||||
% {'major_version': major_version,
|
||||
'minor_version': minor_version})
|
||||
@ -68,7 +68,7 @@ class VersionNegotiationFilter(wsgi.Middleware):
|
||||
req.path_info_pop()
|
||||
return None
|
||||
else:
|
||||
logger.debug("Unknown version in versioned URI: "
|
||||
LOG.debug("Unknown version in versioned URI: "
|
||||
"%(major_version)d.%(minor_version)d. "
|
||||
"Returning version choices."
|
||||
% {'major_version': major_version,
|
||||
@ -84,13 +84,13 @@ class VersionNegotiationFilter(wsgi.Middleware):
|
||||
major_version = req.environ['api.major_version']
|
||||
minor_version = req.environ['api.minor_version']
|
||||
if (major_version == 1 and minor_version == 0):
|
||||
logger.debug("Matched versioned media type. Version: "
|
||||
LOG.debug("Matched versioned media type. Version: "
|
||||
"%(major_version)d.%(minor_version)d"
|
||||
% {'major_version': major_version,
|
||||
'minor_version': minor_version})
|
||||
return None
|
||||
else:
|
||||
logger.debug("Unknown version in accept header: "
|
||||
LOG.debug("Unknown version in accept header: "
|
||||
"%(major_version)d.%(minor_version)d..."
|
||||
"returning version choices."
|
||||
% {'major_version': major_version,
|
||||
@ -98,7 +98,7 @@ class VersionNegotiationFilter(wsgi.Middleware):
|
||||
return self.versions_app
|
||||
else:
|
||||
if req.accept not in ('*/*', ''):
|
||||
logger.debug("Unknown accept header: %s..."
|
||||
LOG.debug("Unknown accept header: %s..."
|
||||
"returning HTTP not found.", req.accept)
|
||||
return webob.exc.HTTPNotFound()
|
||||
return None
|
||||
|
@ -30,7 +30,7 @@ from heat.openstack.common import log as logging
|
||||
from heat.rpc import api as engine_api
|
||||
from heat.rpc import client as rpc_client
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class InstantiationData(object):
|
||||
@ -94,7 +94,7 @@ class InstantiationData(object):
|
||||
return template_data
|
||||
elif self.PARAM_TEMPLATE_URL in self.data:
|
||||
url = self.data[self.PARAM_TEMPLATE_URL]
|
||||
logger.debug('TemplateUrl %s' % url)
|
||||
LOG.debug('TemplateUrl %s' % url)
|
||||
try:
|
||||
template_data = urlfetch.get(url)
|
||||
except IOError as ex:
|
||||
@ -188,7 +188,7 @@ class StackController(object):
|
||||
filters=filter_params,
|
||||
tenant_safe=tenant_safe)
|
||||
except AttributeError as exc:
|
||||
logger.warning(_("Old Engine Version: %s") % exc)
|
||||
LOG.warning(_("Old Engine Version: %s") % exc)
|
||||
|
||||
return stacks_view.collection(req, stacks=stacks, count=count,
|
||||
tenant_safe=tenant_safe)
|
||||
|
@ -25,7 +25,7 @@ from heat.common import wsgi
|
||||
from heat.openstack.common import log as logging
|
||||
from heat.openstack.common import rpc
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
paste_deploy_group = cfg.OptGroup('paste_deploy')
|
||||
paste_deploy_opts = [
|
||||
@ -224,7 +224,7 @@ cfg.CONF.set_default(name='allowed_rpc_exception_modules',
|
||||
default=allowed_rpc_exception_modules)
|
||||
|
||||
if cfg.CONF.instance_user:
|
||||
logger.warn(_('The "instance_user" option in heat.conf is deprecated and '
|
||||
LOG.warn(_('The "instance_user" option in heat.conf is deprecated and '
|
||||
'will be removed in the Juno release.'))
|
||||
|
||||
|
||||
|
@ -29,7 +29,7 @@ from heat.openstack.common import log as logging
|
||||
_FATAL_EXCEPTION_FORMAT_ERRORS = False
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class RedirectException(Exception):
|
||||
@ -112,9 +112,9 @@ class HeatException(Exception):
|
||||
exc_info = sys.exc_info()
|
||||
#kwargs doesn't match a variable in the message
|
||||
#log the issue and the kwargs
|
||||
logger.exception(_('Exception in string format operation'))
|
||||
LOG.exception(_('Exception in string format operation'))
|
||||
for name, value in kwargs.iteritems():
|
||||
logger.error("%s: %s" % (name, value))
|
||||
LOG.error("%s: %s" % (name, value))
|
||||
|
||||
if _FATAL_EXCEPTION_FORMAT_ERRORS:
|
||||
raise exc_info[0], exc_info[1], exc_info[2]
|
||||
|
@ -27,7 +27,7 @@ from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import importutils
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger('heat.common.keystoneclient')
|
||||
LOG = logging.getLogger('heat.common.keystoneclient')
|
||||
|
||||
AccessKey = namedtuple('AccessKey', ['id', 'access', 'secret'])
|
||||
|
||||
@ -98,9 +98,9 @@ class KeystoneClientV3(object):
|
||||
' stack_domain_admin and'
|
||||
' stack_domain_admin_password'))
|
||||
else:
|
||||
logger.warning(_('stack_user_domain ID not set in heat.conf '
|
||||
LOG.warning(_('stack_user_domain ID not set in heat.conf '
|
||||
'falling back to using default'))
|
||||
logger.debug('Using stack domain %s' % self.stack_domain_id)
|
||||
LOG.debug('Using stack domain %s' % self.stack_domain_id)
|
||||
|
||||
@property
|
||||
def client(self):
|
||||
@ -119,7 +119,7 @@ class KeystoneClientV3(object):
|
||||
if c.authenticate():
|
||||
self._admin_client = c
|
||||
else:
|
||||
logger.error("Admin client authentication failed")
|
||||
LOG.error("Admin client authentication failed")
|
||||
raise exception.AuthorizationFailure()
|
||||
return self._admin_client
|
||||
|
||||
@ -135,7 +135,7 @@ class KeystoneClientV3(object):
|
||||
if c.authenticate(domain_id=self.stack_domain_id):
|
||||
self._domain_admin_client = c
|
||||
else:
|
||||
logger.error("Domain admin client authentication failed")
|
||||
LOG.error("Domain admin client authentication failed")
|
||||
raise exception.AuthorizationFailure()
|
||||
return self._domain_admin_client
|
||||
|
||||
@ -160,7 +160,7 @@ class KeystoneClientV3(object):
|
||||
kwargs['password'] = self.context.password
|
||||
kwargs['project_id'] = self.context.tenant_id
|
||||
else:
|
||||
logger.error(_("Keystone v3 API connection failed, no password "
|
||||
LOG.error(_("Keystone v3 API connection failed, no password "
|
||||
"trust or auth_token!"))
|
||||
raise exception.AuthorizationFailure()
|
||||
kwargs.update(self._ssl_options())
|
||||
@ -171,14 +171,14 @@ class KeystoneClientV3(object):
|
||||
if 'trust_id' in kwargs:
|
||||
# Sanity check
|
||||
if not client.auth_ref.trust_scoped:
|
||||
logger.error(_("trust token re-scoping failed!"))
|
||||
LOG.error(_("trust token re-scoping failed!"))
|
||||
raise exception.AuthorizationFailure()
|
||||
# All OK so update the context with the token
|
||||
self.context.auth_token = client.auth_ref.auth_token
|
||||
self.context.auth_url = kwargs.get('auth_url')
|
||||
# Sanity check that impersonation is effective
|
||||
if self.context.trustor_user_id != client.auth_ref.user_id:
|
||||
logger.error("Trust impersonation failed")
|
||||
LOG.error("Trust impersonation failed")
|
||||
raise exception.AuthorizationFailure()
|
||||
|
||||
return client
|
||||
@ -260,7 +260,7 @@ class KeystoneClientV3(object):
|
||||
|
||||
def _get_username(self, username):
|
||||
if(len(username) > 64):
|
||||
logger.warning(_("Truncating the username %s to the last 64 "
|
||||
LOG.warning(_("Truncating the username %s to the last 64 "
|
||||
"characters.") % username)
|
||||
#get the last 64 characters of the username
|
||||
return username[-64:]
|
||||
@ -296,12 +296,12 @@ class KeystoneClientV3(object):
|
||||
name=self._get_username(username), password=password,
|
||||
default_project=self.context.tenant_id)
|
||||
# Add user to heat_stack_user_role
|
||||
logger.debug("Adding user %(user)s to role %(role)s" % {
|
||||
LOG.debug("Adding user %(user)s to role %(role)s" % {
|
||||
'user': user.id, 'role': role_id})
|
||||
self.client.roles.grant(role=role_id, user=user.id,
|
||||
project=self.context.tenant_id)
|
||||
else:
|
||||
logger.error(_("Failed to add user %(user)s to role %(role)s, "
|
||||
LOG.error(_("Failed to add user %(user)s to role %(role)s, "
|
||||
"check role exists!") % {
|
||||
'user': username,
|
||||
'role': cfg.CONF.heat_stack_user_role})
|
||||
@ -323,7 +323,7 @@ class KeystoneClientV3(object):
|
||||
if not self.stack_domain_id:
|
||||
# FIXME(shardy): Legacy fallback for folks using old heat.conf
|
||||
# files which lack domain configuration
|
||||
logger.warning(_('Falling back to legacy non-domain user create, '
|
||||
LOG.warning(_('Falling back to legacy non-domain user create, '
|
||||
'configure domain in heat.conf'))
|
||||
return self.create_stack_user(username=username, password=password)
|
||||
# We add the new user to a special keystone role
|
||||
@ -338,13 +338,14 @@ class KeystoneClientV3(object):
|
||||
name=self._get_username(username), password=password,
|
||||
default_project=project_id, domain=self.stack_domain_id)
|
||||
# Add to stack user role
|
||||
logger.debug("Adding user %(user)s to role %(role)s" % {
|
||||
LOG.debug("Adding user %(user)s to role %(role)s" % {
|
||||
'user': user.id, 'role': role_id})
|
||||
self.domain_admin_client.roles.grant(role=role_id, user=user.id,
|
||||
project=project_id)
|
||||
else:
|
||||
logger.error(_("Failed to add user %(user)s to role %(role)s, "
|
||||
"check role exists!") % {'user': username,
|
||||
LOG.error(_("Failed to add user %(user)s to role %(role)s, "
|
||||
"check role exists!")
|
||||
% {'user': username,
|
||||
'role': cfg.CONF.heat_stack_user_role})
|
||||
raise exception.Error(_("Can't find role %s")
|
||||
% cfg.CONF.heat_stack_user_role)
|
||||
@ -363,7 +364,7 @@ class KeystoneClientV3(object):
|
||||
if not self.stack_domain_id:
|
||||
# FIXME(shardy): Legacy fallback for folks using old heat.conf
|
||||
# files which lack domain configuration
|
||||
logger.warning(_('Falling back to legacy non-domain user delete, '
|
||||
LOG.warning(_('Falling back to legacy non-domain user delete, '
|
||||
'configure domain in heat.conf'))
|
||||
return self.delete_stack_user(user_id)
|
||||
|
||||
@ -384,7 +385,7 @@ class KeystoneClientV3(object):
|
||||
if not self.stack_domain_id:
|
||||
# FIXME(shardy): Legacy fallback for folks using old heat.conf
|
||||
# files which lack domain configuration
|
||||
logger.warning(_('Falling back to legacy non-domain project, '
|
||||
LOG.warning(_('Falling back to legacy non-domain project, '
|
||||
'configure domain in heat.conf'))
|
||||
return self.context.tenant_id
|
||||
# Note we use the tenant ID not name to ensure uniqueness in a multi-
|
||||
@ -401,7 +402,7 @@ class KeystoneClientV3(object):
|
||||
if not self.stack_domain_id:
|
||||
# FIXME(shardy): Legacy fallback for folks using old heat.conf
|
||||
# files which lack domain configuration
|
||||
logger.warning(_('Falling back to legacy non-domain project, '
|
||||
LOG.warning(_('Falling back to legacy non-domain project, '
|
||||
'configure domain in heat.conf'))
|
||||
return
|
||||
try:
|
||||
@ -475,7 +476,7 @@ class KeystoneClientV3(object):
|
||||
if not self.stack_domain_id:
|
||||
# FIXME(shardy): Legacy fallback for folks using old heat.conf
|
||||
# files which lack domain configuration
|
||||
logger.warning(_('Falling back to legacy non-domain keypair, '
|
||||
LOG.warning(_('Falling back to legacy non-domain keypair, '
|
||||
'configure domain in heat.conf'))
|
||||
return self.create_ec2_keypair(user_id)
|
||||
data_blob = {'access': uuid.uuid4().hex,
|
||||
@ -492,7 +493,7 @@ class KeystoneClientV3(object):
|
||||
if not self.stack_domain_id:
|
||||
# FIXME(shardy): Legacy fallback for folks using old heat.conf
|
||||
# files which lack domain configuration
|
||||
logger.warning(_('Falling back to legacy non-domain keypair, '
|
||||
LOG.warning(_('Falling back to legacy non-domain keypair, '
|
||||
'configure domain in heat.conf'))
|
||||
return self.delete_ec2_keypair(credential_id=credential_id)
|
||||
self._check_stack_domain_user(user_id, project_id, 'delete_keypair')
|
||||
@ -511,7 +512,7 @@ class KeystoneClientV3(object):
|
||||
if not self.stack_domain_id:
|
||||
# FIXME(shardy): Legacy fallback for folks using old heat.conf
|
||||
# files which lack domain configuration
|
||||
logger.warning(_('Falling back to legacy non-domain disable, '
|
||||
LOG.warning(_('Falling back to legacy non-domain disable, '
|
||||
'configure domain in heat.conf'))
|
||||
return self.disable_stack_user(user_id)
|
||||
self._check_stack_domain_user(user_id, project_id, 'disable')
|
||||
@ -521,7 +522,7 @@ class KeystoneClientV3(object):
|
||||
if not self.stack_domain_id:
|
||||
# FIXME(shardy): Legacy fallback for folks using old heat.conf
|
||||
# files which lack domain configuration
|
||||
logger.warning(_('Falling back to legacy non-domain enable, '
|
||||
LOG.warning(_('Falling back to legacy non-domain enable, '
|
||||
'configure domain in heat.conf'))
|
||||
return self.enable_stack_user(user_id)
|
||||
self._check_stack_domain_user(user_id, project_id, 'enable')
|
||||
|
@ -23,7 +23,7 @@ from heat.openstack.common import importutils
|
||||
from heat.openstack.common import log as logging
|
||||
from heat.openstack.common import processutils
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def startup_notify(notify_param):
|
||||
@ -35,6 +35,6 @@ def startup_notify(notify_param):
|
||||
try:
|
||||
processutils.execute(notify_param, shell=True)
|
||||
except Exception as e:
|
||||
logger.error(_('Failed to execute onready command: %s') % e)
|
||||
LOG.error(_('Failed to execute onready command: %s') % e)
|
||||
else:
|
||||
notifier.notify()
|
||||
|
@ -26,7 +26,7 @@ import types
|
||||
from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _module_name(*components):
|
||||
@ -94,7 +94,7 @@ def load_modules(package, ignore_error=False):
|
||||
try:
|
||||
module = _import_module(importer, module_name, package)
|
||||
except ImportError:
|
||||
logger.error(_('Failed to import module %s') % module_name)
|
||||
LOG.error(_('Failed to import module %s') % module_name)
|
||||
if not ignore_error:
|
||||
raise
|
||||
else:
|
||||
|
@ -28,7 +28,7 @@ import six
|
||||
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class JSONResponseSerializer(object):
|
||||
@ -40,7 +40,7 @@ class JSONResponseSerializer(object):
|
||||
return obj
|
||||
|
||||
response = json.dumps(data, default=sanitizer)
|
||||
logger.debug("JSON response : %s" % response)
|
||||
LOG.debug("JSON response : %s" % response)
|
||||
return response
|
||||
|
||||
def default(self, response, result):
|
||||
|
@ -23,7 +23,7 @@ import socket
|
||||
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def _sd_notify(msg):
|
||||
@ -37,7 +37,7 @@ def _sd_notify(msg):
|
||||
sock.sendall(msg)
|
||||
sock.close()
|
||||
else:
|
||||
logger.warning(_('Unable to notify systemd of startup completion:'
|
||||
LOG.warning(_('Unable to notify systemd of startup completion:'
|
||||
' NOTIFY_SOCKET not set'))
|
||||
|
||||
|
||||
|
@ -25,7 +25,7 @@ from heat.openstack.common import log as logging
|
||||
|
||||
cfg.CONF.import_opt('max_template_size', 'heat.common.config')
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class URLFetchError(exception.Error, IOError):
|
||||
@ -40,7 +40,7 @@ def get(url, allowed_schemes=('http', 'https')):
|
||||
the allowed_schemes argument.
|
||||
Raise an IOError if getting the data fails.
|
||||
"""
|
||||
logger.info(_('Fetching data from %s') % url)
|
||||
LOG.info(_('Fetching data from %s') % url)
|
||||
|
||||
components = urllib.parse.urlparse(url)
|
||||
|
||||
|
@ -164,12 +164,12 @@ cfg.CONF.register_opt(json_size_opt)
|
||||
class WritableLogger(object):
|
||||
"""A thin wrapper that responds to `write` and logs."""
|
||||
|
||||
def __init__(self, logger, level=logging.DEBUG):
|
||||
self.logger = logger
|
||||
def __init__(self, LOG, level=logging.DEBUG):
|
||||
self.LOG = LOG
|
||||
self.level = level
|
||||
|
||||
def write(self, msg):
|
||||
self.logger.log(self.level, msg.strip("\n"))
|
||||
self.LOG.log(self.level, msg.strip("\n"))
|
||||
|
||||
|
||||
def get_bind_addr(conf, default_port=None):
|
||||
@ -252,7 +252,7 @@ class Server(object):
|
||||
"""
|
||||
def kill_children(*args):
|
||||
"""Kills the entire process group."""
|
||||
self.logger.error(_('SIGTERM received'))
|
||||
self.LOG.error(_('SIGTERM received'))
|
||||
signal.signal(signal.SIGTERM, signal.SIG_IGN)
|
||||
self.running = False
|
||||
os.killpg(0, signal.SIGTERM)
|
||||
@ -261,7 +261,7 @@ class Server(object):
|
||||
"""
|
||||
Shuts down the server, but allows running requests to complete
|
||||
"""
|
||||
self.logger.error(_('SIGHUP received'))
|
||||
self.LOG.error(_('SIGHUP received'))
|
||||
signal.signal(signal.SIGHUP, signal.SIG_IGN)
|
||||
self.running = False
|
||||
|
||||
@ -269,7 +269,7 @@ class Server(object):
|
||||
self.application = application
|
||||
self.sock = get_socket(conf, default_port)
|
||||
|
||||
self.logger = logging.getLogger('eventlet.wsgi.server')
|
||||
self.LOG = logging.getLogger('eventlet.wsgi.server')
|
||||
|
||||
if conf.workers == 0:
|
||||
# Useful for profiling, test, debug etc.
|
||||
@ -277,7 +277,7 @@ class Server(object):
|
||||
self.pool.spawn_n(self._single_run, application, self.sock)
|
||||
return
|
||||
|
||||
self.logger.info(_("Starting %d workers") % conf.workers)
|
||||
self.LOG.info(_("Starting %d workers") % conf.workers)
|
||||
signal.signal(signal.SIGTERM, kill_children)
|
||||
signal.signal(signal.SIGHUP, hup)
|
||||
while len(self.children) < conf.workers:
|
||||
@ -288,18 +288,18 @@ class Server(object):
|
||||
try:
|
||||
pid, status = os.wait()
|
||||
if os.WIFEXITED(status) or os.WIFSIGNALED(status):
|
||||
self.logger.error(_('Removing dead child %s') % pid)
|
||||
self.LOG.error(_('Removing dead child %s') % pid)
|
||||
self.children.remove(pid)
|
||||
self.run_child()
|
||||
except OSError as err:
|
||||
if err.errno not in (errno.EINTR, errno.ECHILD):
|
||||
raise
|
||||
except KeyboardInterrupt:
|
||||
self.logger.info(_('Caught keyboard interrupt. Exiting.'))
|
||||
self.LOG.info(_('Caught keyboard interrupt. Exiting.'))
|
||||
break
|
||||
eventlet.greenio.shutdown_safe(self.sock)
|
||||
self.sock.close()
|
||||
self.logger.debug('Exited')
|
||||
self.LOG.debug('Exited')
|
||||
|
||||
def wait(self):
|
||||
"""Wait until all servers have completed running."""
|
||||
@ -317,10 +317,10 @@ class Server(object):
|
||||
signal.signal(signal.SIGHUP, signal.SIG_DFL)
|
||||
signal.signal(signal.SIGTERM, signal.SIG_DFL)
|
||||
self.run_server()
|
||||
self.logger.info(_('Child %d exiting normally') % os.getpid())
|
||||
self.LOG.info(_('Child %d exiting normally') % os.getpid())
|
||||
return
|
||||
else:
|
||||
self.logger.info(_('Started child %s') % pid)
|
||||
self.LOG.info(_('Started child %s') % pid)
|
||||
self.children.append(pid)
|
||||
|
||||
def run_server(self):
|
||||
@ -334,7 +334,7 @@ class Server(object):
|
||||
self.application,
|
||||
custom_pool=self.pool,
|
||||
url_length_limit=URL_LENGTH_LIMIT,
|
||||
log=WritableLogger(self.logger),
|
||||
log=WritableLogger(self.LOG),
|
||||
debug=cfg.CONF.debug)
|
||||
except socket.error as err:
|
||||
if err[0] != errno.EINVAL:
|
||||
@ -343,11 +343,11 @@ class Server(object):
|
||||
|
||||
def _single_run(self, application, sock):
|
||||
"""Start a WSGI server in a new green thread."""
|
||||
self.logger.info(_("Starting single process server"))
|
||||
self.LOG.info(_("Starting single process server"))
|
||||
eventlet.wsgi.server(sock, application,
|
||||
custom_pool=self.pool,
|
||||
url_length_limit=URL_LENGTH_LIMIT,
|
||||
log=WritableLogger(self.logger))
|
||||
log=WritableLogger(self.LOG))
|
||||
|
||||
|
||||
class Middleware(object):
|
||||
|
@ -19,7 +19,7 @@ from heat.openstack.common import log as logging
|
||||
from heat.openstack.common import timeutils
|
||||
from heat.rpc import api
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def extract_args(params):
|
||||
@ -34,7 +34,7 @@ def extract_args(params):
|
||||
try:
|
||||
timeout = int(timeout_mins)
|
||||
except (ValueError, TypeError):
|
||||
logger.exception(_('Timeout conversion failed'))
|
||||
LOG.exception(_('Timeout conversion failed'))
|
||||
else:
|
||||
if timeout > 0:
|
||||
kwargs[api.PARAM_TIMEOUT] = timeout
|
||||
@ -234,7 +234,7 @@ def format_watch_data(wd):
|
||||
if len(metric) == 1:
|
||||
metric_name, metric_data = metric[0]
|
||||
else:
|
||||
logger.error(_("Unexpected number of keys in watch_data.data!"))
|
||||
LOG.error(_("Unexpected number of keys in watch_data.data!"))
|
||||
return
|
||||
|
||||
result = {
|
||||
|
@ -21,42 +21,42 @@ from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import importutils
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
try:
|
||||
from swiftclient import client as swiftclient
|
||||
except ImportError:
|
||||
swiftclient = None
|
||||
logger.info(_('swiftclient not available'))
|
||||
LOG.info(_('swiftclient not available'))
|
||||
try:
|
||||
from neutronclient.v2_0 import client as neutronclient
|
||||
except ImportError:
|
||||
neutronclient = None
|
||||
logger.info(_('neutronclient not available'))
|
||||
LOG.info(_('neutronclient not available'))
|
||||
try:
|
||||
from cinderclient import client as cinderclient
|
||||
except ImportError:
|
||||
cinderclient = None
|
||||
logger.info(_('cinderclient not available'))
|
||||
LOG.info(_('cinderclient not available'))
|
||||
|
||||
try:
|
||||
from troveclient import client as troveclient
|
||||
except ImportError:
|
||||
troveclient = None
|
||||
logger.info(_('troveclient not available'))
|
||||
LOG.info(_('troveclient not available'))
|
||||
|
||||
try:
|
||||
from ceilometerclient import client as ceilometerclient
|
||||
except ImportError:
|
||||
ceilometerclient = None
|
||||
logger.info(_('ceilometerclient not available'))
|
||||
LOG.info(_('ceilometerclient not available'))
|
||||
|
||||
try:
|
||||
from glanceclient import client as glanceclient
|
||||
except ImportError:
|
||||
glanceclient = None
|
||||
logger.info(_('glanceclient not available'))
|
||||
LOG.info(_('glanceclient not available'))
|
||||
|
||||
_default_backend = "heat.engine.clients.OpenStackClients"
|
||||
|
||||
@ -188,7 +188,7 @@ class OpenStackClients(object):
|
||||
|
||||
con = self.context
|
||||
if self.auth_token is None:
|
||||
logger.error(_("Neutron connection failed, no auth_token!"))
|
||||
LOG.error(_("Neutron connection failed, no auth_token!"))
|
||||
return None
|
||||
|
||||
endpoint_type = self._get_client_option('neutron', 'endpoint_type')
|
||||
|
@ -19,7 +19,7 @@ from heat.db import api as db_api
|
||||
from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Event(object):
|
||||
@ -88,7 +88,7 @@ class Event(object):
|
||||
ev['created_at'] = self.timestamp
|
||||
|
||||
if self.id is not None:
|
||||
logger.warning(_('Duplicating event'))
|
||||
LOG.warning(_('Duplicating event'))
|
||||
|
||||
new_ev = db_api.event_create(self.context, ev)
|
||||
self.id = new_ev.id
|
||||
|
@ -15,7 +15,7 @@ from heat.common.exception import StackValidationFailed
|
||||
from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
PARAMETER_GROUPS = 'parameter_groups'
|
||||
PARAMETERS = 'parameters'
|
||||
@ -28,8 +28,8 @@ class ParameterGroups(object):
|
||||
def __init__(self, tmpl):
|
||||
self.tmpl = tmpl
|
||||
self.parameters = tmpl.parameters(None, {})
|
||||
logger.debug(self.tmpl)
|
||||
logger.debug(self.parameters)
|
||||
LOG.debug(self.tmpl)
|
||||
LOG.debug(self.parameters)
|
||||
self.parameter_names = []
|
||||
if self.parameters:
|
||||
self.parameter_names = [param for param in self.parameters]
|
||||
@ -40,8 +40,8 @@ class ParameterGroups(object):
|
||||
Validate that a parameter belongs to only one Parameter Group
|
||||
and that each parameter name references a valid parameter.
|
||||
'''
|
||||
logger.debug('Validating Parameter Groups.')
|
||||
logger.debug(self.parameter_names)
|
||||
LOG.debug('Validating Parameter Groups.')
|
||||
LOG.debug(self.parameter_names)
|
||||
if self.parameter_groups is not None:
|
||||
#Loop through groups and validate parameters
|
||||
grouped_parameters = []
|
||||
|
@ -39,7 +39,7 @@ from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import log as logging
|
||||
from heat.openstack.common import strutils
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Stack(collections.Mapping):
|
||||
@ -175,7 +175,7 @@ class Stack(collections.Mapping):
|
||||
via the Parameters class as the StackId pseudo parameter
|
||||
'''
|
||||
if not self.parameters.set_stack_id(self.identifier()):
|
||||
logger.warning(_("Unable to set parameters StackId identifier"))
|
||||
LOG.warning(_("Unable to set parameters StackId identifier"))
|
||||
|
||||
@staticmethod
|
||||
def _get_dependencies(resources):
|
||||
@ -361,7 +361,7 @@ class Stack(collections.Mapping):
|
||||
dup_names = set(self.parameters.keys()) & set(self.keys())
|
||||
|
||||
if dup_names:
|
||||
logger.debug("Duplicate names %s" % dup_names)
|
||||
LOG.debug("Duplicate names %s" % dup_names)
|
||||
raise StackValidationFailed(message=_("Duplicate names %s") %
|
||||
dup_names)
|
||||
|
||||
@ -369,10 +369,10 @@ class Stack(collections.Mapping):
|
||||
try:
|
||||
result = res.validate()
|
||||
except exception.HeatException as ex:
|
||||
logger.exception(ex)
|
||||
LOG.exception(ex)
|
||||
raise ex
|
||||
except Exception as ex:
|
||||
logger.exception(ex)
|
||||
LOG.exception(ex)
|
||||
raise StackValidationFailed(message=strutils.safe_decode(
|
||||
six.text_type(ex)))
|
||||
if result:
|
||||
@ -415,7 +415,7 @@ class Stack(collections.Mapping):
|
||||
'status': status,
|
||||
'status_reason': reason})
|
||||
msg = _('Stack %(action)s %(status)s (%(name)s): %(reason)s')
|
||||
logger.info(msg % {'action': action,
|
||||
LOG.info(msg % {'action': action,
|
||||
'status': status,
|
||||
'name': self.name,
|
||||
'reason': reason})
|
||||
@ -514,7 +514,7 @@ class Stack(collections.Mapping):
|
||||
self._backup_name(),
|
||||
owner_id=self.id)
|
||||
if s is not None:
|
||||
logger.debug('Loaded existing backup stack')
|
||||
LOG.debug('Loaded existing backup stack')
|
||||
return self.load(self.context, stack=s)
|
||||
elif create_if_missing:
|
||||
templ = Template.load(self.context, self.t.id)
|
||||
@ -522,7 +522,7 @@ class Stack(collections.Mapping):
|
||||
prev = type(self)(self.context, self.name, templ, self.env,
|
||||
owner_id=self.id)
|
||||
prev.store(backup=True)
|
||||
logger.debug('Created new backup stack')
|
||||
LOG.debug('Created new backup stack')
|
||||
return prev
|
||||
else:
|
||||
return None
|
||||
@ -562,8 +562,7 @@ class Stack(collections.Mapping):
|
||||
@scheduler.wrappertask
|
||||
def update_task(self, newstack, action=UPDATE):
|
||||
if action not in (self.UPDATE, self.ROLLBACK):
|
||||
logger.error(_("Unexpected action %s passed to update!") %
|
||||
action)
|
||||
LOG.error(_("Unexpected action %s passed to update!") % action)
|
||||
self.state_set(self.UPDATE, self.FAILED,
|
||||
"Invalid action %s" % action)
|
||||
return
|
||||
@ -571,7 +570,7 @@ class Stack(collections.Mapping):
|
||||
if self.status != self.COMPLETE:
|
||||
if (action == self.ROLLBACK and
|
||||
self.state == (self.UPDATE, self.IN_PROGRESS)):
|
||||
logger.debug("Starting update rollback for %s" % self.name)
|
||||
LOG.debug("Starting update rollback for %s" % self.name)
|
||||
else:
|
||||
self.state_set(action, self.FAILED,
|
||||
'State invalid for %s' % action)
|
||||
@ -622,7 +621,7 @@ class Stack(collections.Mapping):
|
||||
yield self.update_task(oldstack, action=self.ROLLBACK)
|
||||
return
|
||||
else:
|
||||
logger.debug('Deleting backup stack')
|
||||
LOG.debug('Deleting backup stack')
|
||||
backup_stack.delete(backup=True)
|
||||
|
||||
# flip the template to the newstack values
|
||||
@ -651,7 +650,7 @@ class Stack(collections.Mapping):
|
||||
differently.
|
||||
'''
|
||||
if action not in (self.DELETE, self.ROLLBACK):
|
||||
logger.error(_("Unexpected action %s passed to delete!") % action)
|
||||
LOG.error(_("Unexpected action %s passed to delete!") % action)
|
||||
self.state_set(self.DELETE, self.FAILED,
|
||||
"Invalid action %s" % action)
|
||||
return
|
||||
@ -727,7 +726,7 @@ class Stack(collections.Mapping):
|
||||
try:
|
||||
self.clients.keystone().delete_trust(trust_id)
|
||||
except Exception as ex:
|
||||
logger.exception(ex)
|
||||
LOG.exception(ex)
|
||||
stack_status = self.FAILED
|
||||
reason = "Error deleting trust: %s" % six.text_type(ex)
|
||||
|
||||
@ -742,7 +741,7 @@ class Stack(collections.Mapping):
|
||||
self.clients.keystone().delete_stack_domain_project(
|
||||
project_id=self.stack_user_project_id)
|
||||
except Exception as ex:
|
||||
logger.exception(ex)
|
||||
LOG.exception(ex)
|
||||
stack_status = self.FAILED
|
||||
reason = "Error deleting project: %s" % six.text_type(ex)
|
||||
|
||||
@ -764,7 +763,7 @@ class Stack(collections.Mapping):
|
||||
'''
|
||||
# No need to suspend if the stack has been suspended
|
||||
if self.state == (self.SUSPEND, self.COMPLETE):
|
||||
logger.info(_('%s is already suspended') % str(self))
|
||||
LOG.info(_('%s is already suspended') % str(self))
|
||||
return
|
||||
|
||||
sus_task = scheduler.TaskRunner(self.stack_task,
|
||||
@ -783,7 +782,7 @@ class Stack(collections.Mapping):
|
||||
'''
|
||||
# No need to resume if the stack has been resumed
|
||||
if self.state == (self.RESUME, self.COMPLETE):
|
||||
logger.info(_('%s is already resumed') % str(self))
|
||||
LOG.info(_('%s is already resumed') % str(self))
|
||||
return
|
||||
|
||||
sus_task = scheduler.TaskRunner(self.stack_task,
|
||||
@ -814,7 +813,7 @@ class Stack(collections.Mapping):
|
||||
scheduler.TaskRunner(res.destroy)()
|
||||
except exception.ResourceFailure as ex:
|
||||
failed = True
|
||||
logger.error(_('delete: %s') % ex)
|
||||
LOG.error(_('delete: %s') % ex)
|
||||
|
||||
for res in deps:
|
||||
if not failed:
|
||||
@ -822,7 +821,7 @@ class Stack(collections.Mapping):
|
||||
res.state_reset()
|
||||
scheduler.TaskRunner(res.create)()
|
||||
except exception.ResourceFailure as ex:
|
||||
logger.exception(_('create'))
|
||||
LOG.exception(_('create'))
|
||||
failed = True
|
||||
else:
|
||||
res.state_set(res.CREATE, res.FAILED,
|
||||
|
@ -21,7 +21,7 @@ from oslo.config import cfg
|
||||
from heat.common import plugin_loader
|
||||
from heat.openstack.common import log
|
||||
|
||||
logger = log.getLogger(__name__)
|
||||
LOG = log.getLogger(__name__)
|
||||
|
||||
|
||||
class PluginManager(object):
|
||||
@ -93,14 +93,14 @@ class PluginMapping(object):
|
||||
try:
|
||||
mapping_dict = mapping_func(*self.args, **self.kwargs)
|
||||
except Exception:
|
||||
logger.error(_('Failed to load %(mapping_name)s '
|
||||
LOG.error(_('Failed to load %(mapping_name)s '
|
||||
'from %(module)s') % fmt_data)
|
||||
raise
|
||||
else:
|
||||
if isinstance(mapping_dict, collections.Mapping):
|
||||
return mapping_dict
|
||||
elif mapping_dict is not None:
|
||||
logger.error(_('Invalid type for %(mapping_name)s '
|
||||
LOG.error(_('Invalid type for %(mapping_name)s '
|
||||
'from %(module)s') % fmt_data)
|
||||
|
||||
return {}
|
||||
|
@ -32,7 +32,7 @@ from heat.openstack.common import excutils
|
||||
from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
DELETION_POLICY = (DELETE, RETAIN, SNAPSHOT) = ('Delete', 'Retain', 'Snapshot')
|
||||
|
||||
@ -386,7 +386,7 @@ class Resource(object):
|
||||
while not check(handle_data):
|
||||
yield
|
||||
except Exception as ex:
|
||||
logger.exception('%s : %s' % (action, str(self)))
|
||||
LOG.exception('%s : %s' % (action, str(self)))
|
||||
failure = exception.ResourceFailure(ex, self, action)
|
||||
self.state_set(action, self.FAILED, six.text_type(failure))
|
||||
raise failure
|
||||
@ -396,7 +396,7 @@ class Resource(object):
|
||||
self.state_set(action, self.FAILED,
|
||||
'%s aborted' % action)
|
||||
except Exception:
|
||||
logger.exception(_('Error marking resource as failed'))
|
||||
LOG.exception(_('Error marking resource as failed'))
|
||||
else:
|
||||
self.state_set(action, self.COMPLETE)
|
||||
|
||||
@ -420,7 +420,7 @@ class Resource(object):
|
||||
% str(self.state))
|
||||
raise exception.ResourceFailure(exc, self, action)
|
||||
|
||||
logger.info('creating %s' % str(self))
|
||||
LOG.info('creating %s' % str(self))
|
||||
|
||||
# Re-resolve the template, since if the resource Ref's
|
||||
# the StackId pseudo parameter, it will change after
|
||||
@ -501,7 +501,7 @@ class Resource(object):
|
||||
exc = Exception(_('Resource update already requested'))
|
||||
raise exception.ResourceFailure(exc, self, action)
|
||||
|
||||
logger.info('updating %s' % str(self))
|
||||
LOG.info('updating %s' % str(self))
|
||||
|
||||
try:
|
||||
self.updated_time = datetime.utcnow()
|
||||
@ -529,10 +529,10 @@ class Resource(object):
|
||||
yield
|
||||
except UpdateReplace:
|
||||
with excutils.save_and_reraise_exception():
|
||||
logger.debug("Resource %s update requires replacement" %
|
||||
LOG.debug("Resource %s update requires replacement" %
|
||||
self.name)
|
||||
except Exception as ex:
|
||||
logger.exception(_('update %(resource)s : %(err)s') %
|
||||
LOG.exception(_('update %(resource)s : %(err)s') %
|
||||
{'resource': str(self), 'err': ex})
|
||||
failure = exception.ResourceFailure(ex, self, action)
|
||||
self.state_set(action, self.FAILED, six.text_type(failure))
|
||||
@ -555,7 +555,7 @@ class Resource(object):
|
||||
% str(self.state))
|
||||
raise exception.ResourceFailure(exc, self, action)
|
||||
|
||||
logger.info(_('suspending %s') % str(self))
|
||||
LOG.info(_('suspending %s') % str(self))
|
||||
return self._do_action(action)
|
||||
|
||||
def resume(self):
|
||||
@ -571,7 +571,7 @@ class Resource(object):
|
||||
% str(self.state))
|
||||
raise exception.ResourceFailure(exc, self, action)
|
||||
|
||||
logger.info(_('resuming %s') % str(self))
|
||||
LOG.info(_('resuming %s') % str(self))
|
||||
return self._do_action(action)
|
||||
|
||||
def physical_resource_name(self):
|
||||
@ -613,7 +613,7 @@ class Resource(object):
|
||||
return name[0:2] + '-' + name[-postfix_length:]
|
||||
|
||||
def validate(self):
|
||||
logger.info(_('Validating %s') % str(self))
|
||||
LOG.info(_('Validating %s') % str(self))
|
||||
|
||||
function.validate(self.t)
|
||||
self.validate_deletion_policy(self.t)
|
||||
@ -645,7 +645,7 @@ class Resource(object):
|
||||
|
||||
initial_state = self.state
|
||||
|
||||
logger.info(_('deleting %s') % str(self))
|
||||
LOG.info(_('deleting %s') % str(self))
|
||||
|
||||
try:
|
||||
self.state_set(action, self.IN_PROGRESS)
|
||||
@ -670,7 +670,7 @@ class Resource(object):
|
||||
yield
|
||||
|
||||
except Exception as ex:
|
||||
logger.exception(_('Delete %s') % str(self))
|
||||
LOG.exception(_('Delete %s') % str(self))
|
||||
failure = exception.ResourceFailure(ex, self, self.action)
|
||||
self.state_set(action, self.FAILED, six.text_type(failure))
|
||||
raise failure
|
||||
@ -680,8 +680,7 @@ class Resource(object):
|
||||
self.state_set(action, self.FAILED,
|
||||
'Deletion aborted')
|
||||
except Exception:
|
||||
logger.exception(_('Error marking resource deletion '
|
||||
'failed'))
|
||||
LOG.exception(_('Error marking resource deletion failed'))
|
||||
else:
|
||||
self.state_set(action, self.COMPLETE)
|
||||
|
||||
@ -711,7 +710,7 @@ class Resource(object):
|
||||
rs = db_api.resource_get(self.context, self.id)
|
||||
rs.update_and_save({'nova_instance': self.resource_id})
|
||||
except Exception as ex:
|
||||
logger.warn(_('db error %s') % ex)
|
||||
LOG.warn(_('db error %s') % ex)
|
||||
|
||||
def _store(self):
|
||||
'''Create the resource in the database.'''
|
||||
@ -731,7 +730,7 @@ class Resource(object):
|
||||
self.created_time = new_rs.created_at
|
||||
self._rsrc_metadata = metadata
|
||||
except Exception as ex:
|
||||
logger.error(_('DB error %s') % ex)
|
||||
LOG.error(_('DB error %s') % ex)
|
||||
|
||||
def _add_event(self, action, status, reason):
|
||||
'''Add a state change event to the database.'''
|
||||
@ -756,7 +755,7 @@ class Resource(object):
|
||||
'updated_at': self.updated_time,
|
||||
'nova_instance': self.resource_id})
|
||||
except Exception as ex:
|
||||
logger.error(_('DB error %s') % ex)
|
||||
LOG.error(_('DB error %s') % ex)
|
||||
|
||||
# store resource in DB on transition to CREATE_IN_PROGRESS
|
||||
# all other transitions (other than to DELETE_COMPLETE)
|
||||
@ -874,8 +873,8 @@ class Resource(object):
|
||||
self._add_event('signal', self.status, get_string_details())
|
||||
self.handle_signal(details)
|
||||
except Exception as ex:
|
||||
logger.exception(_('signal %(name)s : %(msg)s') %
|
||||
{'name': str(self), 'msg': ex})
|
||||
LOG.exception(_('signal %(name)s : %(msg)s') % {'name': str(self),
|
||||
'msg': ex})
|
||||
failure = exception.ResourceFailure(ex, self)
|
||||
raise failure
|
||||
|
||||
@ -888,7 +887,7 @@ class Resource(object):
|
||||
No-op for resources which don't explicitly override this method
|
||||
'''
|
||||
if new_metadata:
|
||||
logger.warning(_("Resource %s does not implement metadata update")
|
||||
LOG.warning(_("Resource %s does not implement metadata update")
|
||||
% self.name)
|
||||
|
||||
@classmethod
|
||||
|
@ -35,7 +35,7 @@ from heat.openstack.common import log as logging
|
||||
from heat.openstack.common import timeutils
|
||||
from heat.scaling import template
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
(SCALED_RESOURCE_TYPE,) = ('OS::Heat::ScaledResource',)
|
||||
@ -632,9 +632,9 @@ class AutoScalingGroup(InstanceGroup, CooldownMixin):
|
||||
Adjust the size of the scaling group if the cooldown permits.
|
||||
"""
|
||||
if self._cooldown_inprogress():
|
||||
logger.info(_("%(name)s NOT performing scaling adjustment, "
|
||||
"cooldown %(cooldown)s") % {
|
||||
'name': self.name,
|
||||
LOG.info(_("%(name)s NOT performing scaling adjustment, "
|
||||
"cooldown %(cooldown)s")
|
||||
% {'name': self.name,
|
||||
'cooldown': self.properties[self.COOLDOWN]})
|
||||
return
|
||||
|
||||
@ -659,21 +659,21 @@ class AutoScalingGroup(InstanceGroup, CooldownMixin):
|
||||
|
||||
if new_capacity > upper:
|
||||
if upper > capacity:
|
||||
logger.info(_('truncating growth to %s') % upper)
|
||||
LOG.info(_('truncating growth to %s') % upper)
|
||||
new_capacity = upper
|
||||
else:
|
||||
logger.warn(_('can not exceed %s') % upper)
|
||||
LOG.warn(_('can not exceed %s') % upper)
|
||||
return
|
||||
if new_capacity < lower:
|
||||
if lower < capacity:
|
||||
logger.info(_('truncating shrinkage to %s') % lower)
|
||||
LOG.info(_('truncating shrinkage to %s') % lower)
|
||||
new_capacity = lower
|
||||
else:
|
||||
logger.warn(_('can not be less than %s') % lower)
|
||||
LOG.warn(_('can not be less than %s') % lower)
|
||||
return
|
||||
|
||||
if new_capacity == capacity:
|
||||
logger.debug('no change in capacity %d' % capacity)
|
||||
LOG.debug('no change in capacity %d' % capacity)
|
||||
return
|
||||
|
||||
# send a notification before, on-error and on-success.
|
||||
@ -698,7 +698,7 @@ class AutoScalingGroup(InstanceGroup, CooldownMixin):
|
||||
})
|
||||
notification.send(**notif)
|
||||
except Exception:
|
||||
logger.exception(_('Failed sending error notification'))
|
||||
LOG.exception(_('Failed sending error notification'))
|
||||
else:
|
||||
notif.update({
|
||||
'suffix': 'end',
|
||||
@ -1060,15 +1060,15 @@ class ScalingPolicy(signal_responder.SignalResponder, CooldownMixin):
|
||||
alarm_state = details.get('current',
|
||||
details.get('state', 'alarm')).lower()
|
||||
|
||||
logger.info(_('%(name)s Alarm, new state %(state)s') % {
|
||||
'name': self.name, 'state': alarm_state})
|
||||
LOG.info(_('%(name)s Alarm, new state %(state)s')
|
||||
% {'name': self.name, 'state': alarm_state})
|
||||
|
||||
if alarm_state != 'alarm':
|
||||
return
|
||||
if self._cooldown_inprogress():
|
||||
logger.info(_("%(name)s NOT performing scaling action, "
|
||||
"cooldown %(cooldown)s") % {
|
||||
'name': self.name,
|
||||
LOG.info(_("%(name)s NOT performing scaling action, "
|
||||
"cooldown %(cooldown)s")
|
||||
% {'name': self.name,
|
||||
'cooldown': self.properties[self.COOLDOWN]})
|
||||
return
|
||||
|
||||
@ -1080,10 +1080,9 @@ class ScalingPolicy(signal_responder.SignalResponder, CooldownMixin):
|
||||
'alarm': self.name,
|
||||
'group': asgn_id})
|
||||
|
||||
logger.info(_('%(name)s Alarm, adjusting Group %(group)s with id '
|
||||
'%(asgn_id)s by %(filter)s') % {
|
||||
'name': self.name, 'group': group.name,
|
||||
'asgn_id': asgn_id,
|
||||
LOG.info(_('%(name)s Alarm, adjusting Group %(group)s with id '
|
||||
'%(asgn_id)s by %(filter)s')
|
||||
% {'name': self.name, 'group': group.name, 'asgn_id': asgn_id,
|
||||
'filter': self.properties[self.SCALING_ADJUSTMENT]})
|
||||
adjustment_type = self._get_adjustement_type()
|
||||
group.adjust(self.properties[self.SCALING_ADJUSTMENT], adjustment_type)
|
||||
|
@ -22,7 +22,7 @@ from heat.openstack.common import excutils
|
||||
from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class ElasticIp(resource.Resource):
|
||||
@ -73,14 +73,14 @@ class ElasticIp(resource.Resource):
|
||||
ips = self.neutron().show_floatingip(self.resource_id)
|
||||
except ne as e:
|
||||
if e.status_code == 404:
|
||||
logger.warn(_("Floating IPs not found: %s") % e)
|
||||
LOG.warn(_("Floating IPs not found: %s") % e)
|
||||
else:
|
||||
self.ipaddress = ips['floatingip']['floating_ip_address']
|
||||
else:
|
||||
try:
|
||||
ips = self.nova().floating_ips.get(self.resource_id)
|
||||
except clients.novaclient.exceptions.NotFound as ex:
|
||||
logger.warn(_("Floating IPs not found: %s") % ex)
|
||||
LOG.warn(_("Floating IPs not found: %s") % ex)
|
||||
else:
|
||||
self.ipaddress = ips.ip
|
||||
return self.ipaddress or ''
|
||||
@ -97,7 +97,7 @@ class ElasticIp(resource.Resource):
|
||||
'floatingip': props})['floatingip']
|
||||
self.ipaddress = ips['floating_ip_address']
|
||||
self.resource_id_set(ips['id'])
|
||||
logger.info(_('ElasticIp create %s') % str(ips))
|
||||
LOG.info(_('ElasticIp create %s') % str(ips))
|
||||
else:
|
||||
if self.properties[self.DOMAIN]:
|
||||
raise exception.Error(_('Domain property can not be set on '
|
||||
@ -109,12 +109,12 @@ class ElasticIp(resource.Resource):
|
||||
with excutils.save_and_reraise_exception():
|
||||
msg = _("No default floating IP pool configured. "
|
||||
"Set 'default_floating_pool' in nova.conf.")
|
||||
logger.error(msg)
|
||||
LOG.error(msg)
|
||||
|
||||
if ips:
|
||||
self.ipaddress = ips.ip
|
||||
self.resource_id_set(ips.id)
|
||||
logger.info(_('ElasticIp create %s') % str(ips))
|
||||
LOG.info(_('ElasticIp create %s') % str(ips))
|
||||
|
||||
instance_id = self.properties[self.INSTANCE_ID]
|
||||
if instance_id:
|
||||
@ -193,13 +193,12 @@ class ElasticIpAssociation(resource.Resource):
|
||||
|
||||
if self.properties[self.EIP]:
|
||||
if not self.properties[self.INSTANCE_ID]:
|
||||
logger.warn(_('Skipping association, InstanceId not '
|
||||
'specified'))
|
||||
LOG.warn(_('Skipping association, InstanceId not specified'))
|
||||
return
|
||||
server = self.nova().servers.get(self.properties[self.INSTANCE_ID])
|
||||
server.add_floating_ip(self.properties[self.EIP])
|
||||
self.resource_id_set(self.properties[self.EIP])
|
||||
logger.debug('ElasticIpAssociation '
|
||||
LOG.debug('ElasticIpAssociation '
|
||||
'%(instance)s.add_floating_ip(%(eip)s)',
|
||||
{'instance': self.properties[self.INSTANCE_ID],
|
||||
'eip': self.properties[self.EIP]})
|
||||
@ -216,7 +215,7 @@ class ElasticIpAssociation(resource.Resource):
|
||||
port_rsrc = ports['ports'][0]
|
||||
port_id = port_rsrc['id']
|
||||
else:
|
||||
logger.warn(_('Skipping association, resource not specified'))
|
||||
LOG.warn(_('Skipping association, resource not specified'))
|
||||
return
|
||||
|
||||
float_id = self.properties[self.ALLOCATION_ID]
|
||||
|
@ -34,7 +34,7 @@ from heat.openstack.common import log as logging
|
||||
|
||||
cfg.CONF.import_opt('instance_user', 'heat.common.config')
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Restarter(signal_responder.SignalResponder):
|
||||
@ -88,22 +88,21 @@ class Restarter(signal_responder.SignalResponder):
|
||||
else:
|
||||
alarm_state = details.get('state', 'alarm').lower()
|
||||
|
||||
logger.info(_('%(name)s Alarm, new state %(state)s') % {
|
||||
'name': self.name, 'state': alarm_state})
|
||||
LOG.info(_('%(name)s Alarm, new state %(state)s')
|
||||
% {'name': self.name, 'state': alarm_state})
|
||||
|
||||
if alarm_state != 'alarm':
|
||||
return
|
||||
|
||||
victim = self._find_resource(self.properties[self.INSTANCE_ID])
|
||||
if victim is None:
|
||||
logger.info(_('%(name)s Alarm, can not find instance '
|
||||
'%(instance)s') % {
|
||||
'name': self.name,
|
||||
LOG.info(_('%(name)s Alarm, can not find instance %(instance)s')
|
||||
% {'name': self.name,
|
||||
'instance': self.properties[self.INSTANCE_ID]})
|
||||
return
|
||||
|
||||
logger.info(_('%(name)s Alarm, restarting resource: %(victim)s') % {
|
||||
'name': self.name, 'victim': victim.name})
|
||||
LOG.info(_('%(name)s Alarm, restarting resource: %(victim)s')
|
||||
% {'name': self.name, 'victim': victim.name})
|
||||
self.stack.restart_resource(victim.name)
|
||||
|
||||
def _resolve_attribute(self, name):
|
||||
@ -367,10 +366,8 @@ class Instance(resource.Resource):
|
||||
elif name in self.ATTRIBUTES[1:]:
|
||||
res = self._ipaddress()
|
||||
|
||||
logger.info(_('%(name)s._resolve_attribute(%(attname)s) == %(res)s'),
|
||||
{'name': self.name,
|
||||
'attname': name,
|
||||
'res': res})
|
||||
LOG.info(_('%(name)s._resolve_attribute(%(attname)s) == %(res)s'),
|
||||
{'name': self.name, 'attname': name, 'res': res})
|
||||
return unicode(res) if res else None
|
||||
|
||||
def _build_nics(self, network_interfaces,
|
||||
@ -752,7 +749,7 @@ class Instance(resource.Resource):
|
||||
raise exception.NotFound(_('Failed to find instance %s') %
|
||||
self.resource_id)
|
||||
else:
|
||||
logger.debug("suspending instance %s" % self.resource_id)
|
||||
LOG.debug("suspending instance %s" % self.resource_id)
|
||||
# We want the server.suspend to happen after the volume
|
||||
# detachement has finished, so pass both tasks and the server
|
||||
suspend_runner = scheduler.TaskRunner(server.suspend)
|
||||
@ -774,10 +771,9 @@ class Instance(resource.Resource):
|
||||
return True
|
||||
|
||||
nova_utils.refresh_server(server)
|
||||
logger.debug("%(name)s check_suspend_complete "
|
||||
LOG.debug("%(name)s check_suspend_complete "
|
||||
"status = %(status)s",
|
||||
{'name': self.name,
|
||||
'status': server.status})
|
||||
{'name': self.name, 'status': server.status})
|
||||
if server.status in list(nova_utils.deferred_server_statuses +
|
||||
['ACTIVE']):
|
||||
return server.status == 'SUSPENDED'
|
||||
@ -808,7 +804,7 @@ class Instance(resource.Resource):
|
||||
raise exception.NotFound(_('Failed to find instance %s') %
|
||||
self.resource_id)
|
||||
else:
|
||||
logger.debug("resuming instance %s" % self.resource_id)
|
||||
LOG.debug("resuming instance %s" % self.resource_id)
|
||||
server.resume()
|
||||
return server, scheduler.TaskRunner(self._attach_volumes_task())
|
||||
|
||||
|
@ -24,7 +24,7 @@ from heat.engine import stack_resource
|
||||
from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
lb_template_default = r'''
|
||||
{
|
||||
@ -426,7 +426,7 @@ class LoadBalancer(stack_resource.StackResource):
|
||||
client = self.nova()
|
||||
for i in instances:
|
||||
ip = nova_utils.server_to_ipaddress(client, i) or '0.0.0.0'
|
||||
logger.debug('haproxy server:%s' % ip)
|
||||
LOG.debug('haproxy server:%s' % ip)
|
||||
servers.append('%sserver server%d %s:%s %s' % (spaces, n,
|
||||
ip, inst_port,
|
||||
check))
|
||||
@ -437,7 +437,7 @@ class LoadBalancer(stack_resource.StackResource):
|
||||
def get_parsed_template(self):
|
||||
if cfg.CONF.loadbalancer_template:
|
||||
with open(cfg.CONF.loadbalancer_template) as templ_fd:
|
||||
logger.info(_('Using custom loadbalancer template %s')
|
||||
LOG.info(_('Using custom loadbalancer template %s')
|
||||
% cfg.CONF.loadbalancer_template)
|
||||
contents = templ_fd.read()
|
||||
else:
|
||||
|
@ -21,7 +21,7 @@ from heat.engine import scheduler
|
||||
from heat.openstack.common import log as logging
|
||||
from heat.openstack.common import uuidutils
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NeutronResource(resource.Resource):
|
||||
@ -136,7 +136,7 @@ class NeutronResource(resource.Resource):
|
||||
try:
|
||||
attributes = self._show_resource()
|
||||
except NeutronClientException as ex:
|
||||
logger.warn(_("failed to fetch resource attributes: %s") % ex)
|
||||
LOG.warn(_("failed to fetch resource attributes: %s") % ex)
|
||||
return None
|
||||
return self.handle_get_attributes(self.name, name, attributes)
|
||||
|
||||
|
@ -23,7 +23,7 @@ from heat.openstack.common import log as logging
|
||||
if clients.neutronclient is not None:
|
||||
import neutronclient.common.exceptions as neutron_exp
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Port(neutron.NeutronResource):
|
||||
@ -287,7 +287,7 @@ class Port(neutron.NeutronResource):
|
||||
|
||||
self._prepare_list_properties(props)
|
||||
|
||||
logger.debug('updating port with %s' % props)
|
||||
LOG.debug('updating port with %s' % props)
|
||||
self.neutron().update_port(self.resource_id, {'port': props})
|
||||
|
||||
def check_update_complete(self, *args):
|
||||
|
@ -19,7 +19,7 @@ from heat.openstack.common import excutils
|
||||
from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class NovaFloatingIp(resource.Resource):
|
||||
@ -67,7 +67,7 @@ class NovaFloatingIp(resource.Resource):
|
||||
if pool is None:
|
||||
msg = _('Could not allocate floating IP. Probably there '
|
||||
'is no default floating IP pool is configured.')
|
||||
logger.error(msg)
|
||||
LOG.error(msg)
|
||||
|
||||
self.resource_id_set(floating_ip.id)
|
||||
self._floating_ip = floating_ip
|
||||
|
@ -31,7 +31,7 @@ from heat.engine import scheduler
|
||||
from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
deferred_server_statuses = ['BUILD',
|
||||
@ -55,7 +55,7 @@ def refresh_server(server):
|
||||
except clients.novaclient.exceptions.OverLimit as exc:
|
||||
msg = _("Server %(name)s (%(id)s) received an OverLimit "
|
||||
"response during server.get(): %(exception)s")
|
||||
logger.warning(msg % {'name': server.name,
|
||||
LOG.warning(msg % {'name': server.name,
|
||||
'id': server.id,
|
||||
'exception': exc})
|
||||
except clients.novaclient.exceptions.ClientException as exc:
|
||||
@ -63,7 +63,7 @@ def refresh_server(server):
|
||||
(500, 503))):
|
||||
msg = _('Server "%(name)s" (%(id)s) received the following '
|
||||
'exception during server.get(): %(exception)s')
|
||||
logger.warning(msg % {'name': server.name,
|
||||
LOG.warning(msg % {'name': server.name,
|
||||
'id': server.id,
|
||||
'exception': exc})
|
||||
else:
|
||||
@ -331,8 +331,8 @@ def server_to_ipaddress(client, server):
|
||||
try:
|
||||
server = client.servers.get(server)
|
||||
except clients.novaclient.exceptions.NotFound as ex:
|
||||
logger.warn(_('Instance (%(server)s) not found: %(ex)s') % {
|
||||
'server': server, 'ex': ex})
|
||||
LOG.warn(_('Instance (%(server)s) not found: %(ex)s')
|
||||
% {'server': server, 'ex': ex})
|
||||
else:
|
||||
for n in server.networks:
|
||||
if len(server.networks[n]) > 0:
|
||||
|
@ -21,7 +21,7 @@ from heat.engine.resources import nova_utils
|
||||
from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class OSDBInstance(resource.Resource):
|
||||
@ -235,7 +235,7 @@ class OSDBInstance(resource.Resource):
|
||||
except troveclient.exceptions.RequestEntityTooLarge as exc:
|
||||
msg = _("Stack %(name)s (%(id)s) received an OverLimit "
|
||||
"response during instance.get(): %(exception)s")
|
||||
logger.warning(msg % {'name': self.stack.name,
|
||||
LOG.warning(msg % {'name': self.stack.name,
|
||||
'id': self.stack.id,
|
||||
'exception': exc})
|
||||
|
||||
@ -253,7 +253,7 @@ class OSDBInstance(resource.Resource):
|
||||
|
||||
msg = _("Database instance %(database)s created (flavor:%(flavor)s, "
|
||||
"volume:%(volume)s)")
|
||||
logger.info(msg % ({'database': self.dbinstancename,
|
||||
LOG.info(msg % ({'database': self.dbinstancename,
|
||||
'flavor': self.flavor,
|
||||
'volume': self.volume}))
|
||||
return True
|
||||
@ -269,8 +269,7 @@ class OSDBInstance(resource.Resource):
|
||||
try:
|
||||
instance = self.trove().instances.get(self.resource_id)
|
||||
except troveclient.exceptions.NotFound:
|
||||
logger.debug("Database instance %s not found." %
|
||||
self.resource_id)
|
||||
LOG.debug("Database instance %s not found." % self.resource_id)
|
||||
self.resource_id_set(None)
|
||||
else:
|
||||
instance.delete()
|
||||
|
@ -20,7 +20,7 @@ from heat.engine import properties
|
||||
from heat.engine import resource
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class S3Bucket(resource.Resource):
|
||||
@ -119,9 +119,8 @@ class S3Bucket(resource.Resource):
|
||||
"""Create a bucket."""
|
||||
container = self.physical_resource_name()
|
||||
headers = self.tags_to_headers()
|
||||
logger.debug('S3Bucket create container %(container)s with headers '
|
||||
'%(headers)s' % {
|
||||
'container': container, 'headers': headers})
|
||||
LOG.debug('S3Bucket create container %(container)s with headers '
|
||||
'%(headers)s' % {'container': container, 'headers': headers})
|
||||
if self.properties[self.WEBSITE_CONFIGURATION] is not None:
|
||||
sc = self.properties[self.WEBSITE_CONFIGURATION]
|
||||
index_doc = sc[self.WEBSITE_CONFIGURATION_INDEX_DOCUMENT]
|
||||
@ -151,12 +150,12 @@ class S3Bucket(resource.Resource):
|
||||
|
||||
def handle_delete(self):
|
||||
"""Perform specified delete policy."""
|
||||
logger.debug('S3Bucket delete container %s' % self.resource_id)
|
||||
LOG.debug('S3Bucket delete container %s' % self.resource_id)
|
||||
if self.resource_id is not None:
|
||||
try:
|
||||
self.swift().delete_container(self.resource_id)
|
||||
except clients.swiftclient.ClientException as ex:
|
||||
logger.warn(_("Delete container failed: %s") % ex)
|
||||
LOG.warn(_("Delete container failed: %s") % ex)
|
||||
|
||||
def FnGetRefId(self):
|
||||
return unicode(self.resource_id)
|
||||
|
@ -35,7 +35,7 @@ from heat.openstack.common import uuidutils
|
||||
|
||||
cfg.CONF.import_opt('instance_user', 'heat.common.config')
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Server(stack_user.StackUser):
|
||||
@ -622,8 +622,8 @@ class Server(stack_user.StackUser):
|
||||
try:
|
||||
server = self.nova().servers.get(self.resource_id)
|
||||
except clients.novaclient.exceptions.NotFound as ex:
|
||||
logger.warn(_('Instance (%(server)s) not found: %(ex)s') % {
|
||||
'server': self.resource_id, 'ex': ex})
|
||||
LOG.warn(_('Instance (%(server)s) not found: %(ex)s')
|
||||
% {'server': self.resource_id, 'ex': ex})
|
||||
return ''
|
||||
if name == self.ADDRESSES:
|
||||
return self._add_port_for_address(server)
|
||||
@ -901,11 +901,11 @@ class Server(stack_user.StackUser):
|
||||
server=self.name)
|
||||
raise exception.StackValidationFailed(message=msg)
|
||||
elif network.get(self.NETWORK_UUID):
|
||||
logger.info(_('For the server "%(server)s" the "%(uuid)s" '
|
||||
LOG.info(_('For the server "%(server)s" the "%(uuid)s" '
|
||||
'property is set to network "%(network)s". '
|
||||
'"%(uuid)s" property is deprecated. Use '
|
||||
'"%(id)s" property instead.'
|
||||
'') % dict(uuid=self.NETWORK_UUID,
|
||||
'"%(id)s" property instead.')
|
||||
% dict(uuid=self.NETWORK_UUID,
|
||||
id=self.NETWORK_ID,
|
||||
network=network[self.NETWORK_ID],
|
||||
server=self.name))
|
||||
@ -986,7 +986,7 @@ class Server(stack_user.StackUser):
|
||||
raise exception.NotFound(_('Failed to find server %s') %
|
||||
self.resource_id)
|
||||
else:
|
||||
logger.debug('suspending server %s' % self.resource_id)
|
||||
LOG.debug('suspending server %s' % self.resource_id)
|
||||
# We want the server.suspend to happen after the volume
|
||||
# detachement has finished, so pass both tasks and the server
|
||||
suspend_runner = scheduler.TaskRunner(server.suspend)
|
||||
@ -1003,9 +1003,8 @@ class Server(stack_user.StackUser):
|
||||
return True
|
||||
|
||||
nova_utils.refresh_server(server)
|
||||
logger.debug('%(name)s check_suspend_complete status '
|
||||
'= %(status)s' % {
|
||||
'name': self.name, 'status': server.status})
|
||||
LOG.debug('%(name)s check_suspend_complete status = %(status)s'
|
||||
% {'name': self.name, 'status': server.status})
|
||||
if server.status in list(nova_utils.deferred_server_statuses +
|
||||
['ACTIVE']):
|
||||
return server.status == 'SUSPENDED'
|
||||
@ -1032,7 +1031,7 @@ class Server(stack_user.StackUser):
|
||||
raise exception.NotFound(_('Failed to find server %s') %
|
||||
self.resource_id)
|
||||
else:
|
||||
logger.debug('resuming server %s' % self.resource_id)
|
||||
LOG.debug('resuming server %s' % self.resource_id)
|
||||
server.resume()
|
||||
return server
|
||||
|
||||
|
@ -21,7 +21,7 @@ from heat.engine import resource
|
||||
from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SoftwareConfig(resource.Resource):
|
||||
@ -167,7 +167,7 @@ class SoftwareConfig(resource.Resource):
|
||||
try:
|
||||
self.heat().software_configs.delete(self.resource_id)
|
||||
except heat_exp.HTTPNotFound:
|
||||
logger.debug(
|
||||
LOG.debug(
|
||||
_('Software config %s is not found.') % self.resource_id)
|
||||
|
||||
def _resolve_attribute(self, name):
|
||||
|
@ -28,7 +28,7 @@ from heat.engine.resources.software_config import software_config as sc
|
||||
from heat.engine import signal_responder
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SoftwareDeployment(signal_responder.SignalResponder):
|
||||
@ -240,7 +240,7 @@ class SoftwareDeployment(signal_responder.SignalResponder):
|
||||
elif sd.status == SoftwareDeployment.FAILED:
|
||||
message = _("Deployment to server "
|
||||
"failed: %s") % sd.status_reason
|
||||
logger.error(message)
|
||||
LOG.error(message)
|
||||
exc = exception.Error(message)
|
||||
raise exc
|
||||
|
||||
|
@ -20,7 +20,7 @@ from heat.engine import properties
|
||||
from heat.engine import resource
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class SwiftContainer(resource.Resource):
|
||||
@ -126,10 +126,10 @@ class SwiftContainer(resource.Resource):
|
||||
if self.properties.get(key) is not None:
|
||||
container_headers[key] = self.properties[key]
|
||||
|
||||
logger.debug('SwiftContainer create container %(container)s with '
|
||||
LOG.debug('SwiftContainer create container %(container)s with '
|
||||
'container headers %(container_headers)s and '
|
||||
'account headers %(account_headers)s' % {
|
||||
'container': container,
|
||||
'account headers %(account_headers)s'
|
||||
% {'container': container,
|
||||
'account_headers': account_headers,
|
||||
'container_headers': container_headers})
|
||||
|
||||
@ -142,13 +142,12 @@ class SwiftContainer(resource.Resource):
|
||||
|
||||
def handle_delete(self):
|
||||
"""Perform specified delete policy."""
|
||||
logger.debug('SwiftContainer delete container %s' %
|
||||
self.resource_id)
|
||||
LOG.debug('SwiftContainer delete container %s' % self.resource_id)
|
||||
if self.resource_id is not None:
|
||||
try:
|
||||
self.swift().delete_container(self.resource_id)
|
||||
except clients.swiftclient.ClientException as ex:
|
||||
logger.warn(_("Delete container failed: %s") % ex)
|
||||
LOG.warn(_("Delete container failed: %s") % ex)
|
||||
|
||||
def FnGetRefId(self):
|
||||
return unicode(self.resource_id)
|
||||
@ -167,7 +166,7 @@ class SwiftContainer(resource.Resource):
|
||||
try:
|
||||
headers = self.swift().head_container(self.resource_id)
|
||||
except clients.swiftclient.ClientException as ex:
|
||||
logger.warn(_("Head container failed: %s") % ex)
|
||||
LOG.warn(_("Head container failed: %s") % ex)
|
||||
return None
|
||||
else:
|
||||
if key == self.OBJECT_COUNT:
|
||||
|
@ -19,7 +19,7 @@ from heat.engine import resource
|
||||
from heat.engine import stack_user
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
#
|
||||
# We are ignoring Groups as keystone does not support them.
|
||||
@ -76,22 +76,21 @@ class User(stack_user.StackUser):
|
||||
# ignore the policy (don't reject it because we previously ignored
|
||||
# and we don't want to break templates which previously worked
|
||||
if not isinstance(policy, basestring):
|
||||
logger.warning(_("Ignoring policy %s, must be string "
|
||||
LOG.warning(_("Ignoring policy %s, must be string "
|
||||
"resource name") % policy)
|
||||
continue
|
||||
|
||||
try:
|
||||
policy_rsrc = self.stack[policy]
|
||||
except KeyError:
|
||||
logger.error(_("Policy %(policy)s does not exist in stack "
|
||||
"%(stack)s") % {
|
||||
'policy': policy,
|
||||
'stack': self.stack.name})
|
||||
LOG.error(_("Policy %(policy)s does not exist in stack "
|
||||
"%(stack)s")
|
||||
% {'policy': policy, 'stack': self.stack.name})
|
||||
return False
|
||||
|
||||
if not callable(getattr(policy_rsrc, 'access_allowed', None)):
|
||||
logger.error(_("Policy %s is not an AccessPolicy resource") %
|
||||
policy)
|
||||
LOG.error(_("Policy %s is not an AccessPolicy resource")
|
||||
% policy)
|
||||
return False
|
||||
|
||||
return True
|
||||
@ -116,7 +115,7 @@ class User(stack_user.StackUser):
|
||||
policies = (self.properties[self.POLICIES] or [])
|
||||
for policy in policies:
|
||||
if not isinstance(policy, basestring):
|
||||
logger.warning(_("Ignoring policy %s, must be string "
|
||||
LOG.warning(_("Ignoring policy %s, must be string "
|
||||
"resource name") % policy)
|
||||
continue
|
||||
policy_rsrc = self.stack[policy]
|
||||
@ -211,7 +210,7 @@ class AccessKey(resource.Resource):
|
||||
|
||||
user = self._get_user()
|
||||
if user is None:
|
||||
logger.warning(_('Error deleting %s - user not found') % str(self))
|
||||
LOG.warning(_('Error deleting %s - user not found') % str(self))
|
||||
return
|
||||
user._delete_keypair()
|
||||
|
||||
@ -221,9 +220,9 @@ class AccessKey(resource.Resource):
|
||||
'''
|
||||
if self._secret is None:
|
||||
if not self.resource_id:
|
||||
logger.warn(_('could not get secret for %(username)s '
|
||||
'Error:%(msg)s') % {
|
||||
'username': self.properties[self.USER_NAME],
|
||||
LOG.warn(_('could not get secret for %(username)s '
|
||||
'Error:%(msg)s')
|
||||
% {'username': self.properties[self.USER_NAME],
|
||||
'msg': "resource_id not yet set"})
|
||||
else:
|
||||
# First try to retrieve the secret from resource_data, but
|
||||
@ -241,8 +240,7 @@ class AccessKey(resource.Resource):
|
||||
# And the ID of the v3 credential
|
||||
self.data_set('credential_id', kp.id, redact=True)
|
||||
except Exception as ex:
|
||||
logger.warn(
|
||||
_('could not get secret for %(username)s '
|
||||
LOG.warn(_('could not get secret for %(username)s '
|
||||
'Error:%(msg)s') % {
|
||||
'username': self.properties[self.USER_NAME],
|
||||
'msg': ex})
|
||||
@ -291,7 +289,7 @@ class AccessPolicy(resource.Resource):
|
||||
for resource in resources:
|
||||
if resource not in self.stack:
|
||||
msg = _("AccessPolicy resource %s not in stack") % resource
|
||||
logger.error(msg)
|
||||
LOG.error(msg)
|
||||
raise exception.StackValidationFailed(message=msg)
|
||||
|
||||
def access_allowed(self, resource_name):
|
||||
|
@ -27,7 +27,7 @@ from heat.openstack.common import log as logging
|
||||
|
||||
volume_backups = try_import('cinderclient.v1.volume_backups')
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class Volume(resource.Resource):
|
||||
@ -153,7 +153,7 @@ class Volume(resource.Resource):
|
||||
vol.get()
|
||||
|
||||
if vol.status == 'in-use':
|
||||
logger.warn(_('can not delete volume when in-use'))
|
||||
LOG.warn(_('can not delete volume when in-use'))
|
||||
raise exception.Error(_('Volume in use'))
|
||||
|
||||
vol.delete()
|
||||
@ -210,7 +210,7 @@ class VolumeAttachTask(object):
|
||||
|
||||
def __call__(self):
|
||||
"""Return a co-routine which runs the task."""
|
||||
logger.debug(str(self))
|
||||
LOG.debug(str(self))
|
||||
|
||||
va = self.clients.nova().volumes.create_server_volume(
|
||||
server_id=self.server_id,
|
||||
@ -221,15 +221,15 @@ class VolumeAttachTask(object):
|
||||
|
||||
vol = self.clients.cinder().volumes.get(self.volume_id)
|
||||
while vol.status == 'available' or vol.status == 'attaching':
|
||||
logger.debug('%(name)s - volume status: %(status)s' % {
|
||||
'name': str(self), 'status': vol.status})
|
||||
LOG.debug('%(name)s - volume status: %(status)s'
|
||||
% {'name': str(self), 'status': vol.status})
|
||||
yield
|
||||
vol.get()
|
||||
|
||||
if vol.status != 'in-use':
|
||||
raise exception.Error(vol.status)
|
||||
|
||||
logger.info(_('%s - complete') % str(self))
|
||||
LOG.info(_('%s - complete') % str(self))
|
||||
|
||||
|
||||
class VolumeDetachTask(object):
|
||||
@ -257,7 +257,7 @@ class VolumeDetachTask(object):
|
||||
|
||||
def __call__(self):
|
||||
"""Return a co-routine which runs the task."""
|
||||
logger.debug(str(self))
|
||||
LOG.debug(str(self))
|
||||
|
||||
server_api = self.clients.nova().volumes
|
||||
|
||||
@ -269,7 +269,7 @@ class VolumeDetachTask(object):
|
||||
except (clients.cinderclient.exceptions.NotFound,
|
||||
clients.novaclient.exceptions.BadRequest,
|
||||
clients.novaclient.exceptions.NotFound):
|
||||
logger.warning(_('%s - volume not found') % str(self))
|
||||
LOG.warning(_('%s - volume not found') % str(self))
|
||||
return
|
||||
|
||||
# detach the volume using volume_attachment
|
||||
@ -277,24 +277,24 @@ class VolumeDetachTask(object):
|
||||
server_api.delete_server_volume(self.server_id, self.attachment_id)
|
||||
except (clients.novaclient.exceptions.BadRequest,
|
||||
clients.novaclient.exceptions.NotFound) as e:
|
||||
logger.warning(_('%(res)s - %(err)s') % {'res': str(self),
|
||||
LOG.warning(_('%(res)s - %(err)s') % {'res': str(self),
|
||||
'err': e})
|
||||
|
||||
yield
|
||||
|
||||
try:
|
||||
while vol.status in ('in-use', 'detaching'):
|
||||
logger.debug('%s - volume still in use' % str(self))
|
||||
LOG.debug('%s - volume still in use' % str(self))
|
||||
yield
|
||||
vol.get()
|
||||
|
||||
logger.info(_('%(name)s - status: %(status)s') % {
|
||||
'name': str(self), 'status': vol.status})
|
||||
LOG.info(_('%(name)s - status: %(status)s')
|
||||
% {'name': str(self), 'status': vol.status})
|
||||
if vol.status != 'available':
|
||||
raise exception.Error(vol.status)
|
||||
|
||||
except clients.cinderclient.exceptions.NotFound:
|
||||
logger.warning(_('%s - volume not found') % str(self))
|
||||
LOG.warning(_('%s - volume not found') % str(self))
|
||||
|
||||
# The next check is needed for immediate reattachment when updating:
|
||||
# there might be some time between cinder marking volume as 'available'
|
||||
@ -308,12 +308,12 @@ class VolumeDetachTask(object):
|
||||
return True
|
||||
|
||||
while server_has_attachment(self.server_id, self.attachment_id):
|
||||
logger.info(_("Server %(srv)s still has attachment %(att)s.") %
|
||||
{'att': self.attachment_id, 'srv': self.server_id})
|
||||
LOG.info(_("Server %(srv)s still has attachment %(att)s.")
|
||||
% {'att': self.attachment_id, 'srv': self.server_id})
|
||||
yield
|
||||
|
||||
logger.info(_("Volume %(vol)s is detached from server %(srv)s") %
|
||||
{'vol': vol.id, 'srv': self.server_id})
|
||||
LOG.info(_("Volume %(vol)s is detached from server %(srv)s")
|
||||
% {'vol': vol.id, 'srv': self.server_id})
|
||||
|
||||
|
||||
class VolumeAttachment(resource.Resource):
|
||||
|
@ -24,7 +24,7 @@ from heat.engine import scheduler
|
||||
from heat.engine import signal_responder
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class WaitConditionHandle(signal_responder.SignalResponder):
|
||||
@ -76,7 +76,7 @@ class WaitConditionHandle(signal_responder.SignalResponder):
|
||||
if self._metadata_format_ok(new_metadata):
|
||||
rsrc_metadata = self.metadata_get(refresh=True)
|
||||
if new_metadata['UniqueId'] in rsrc_metadata:
|
||||
logger.warning(_("Overwriting Metadata item for UniqueId %s!")
|
||||
LOG.warning(_("Overwriting Metadata item for UniqueId %s!")
|
||||
% new_metadata['UniqueId'])
|
||||
safe_metadata = {}
|
||||
for k in ('Data', 'Reason', 'Status'):
|
||||
@ -84,7 +84,7 @@ class WaitConditionHandle(signal_responder.SignalResponder):
|
||||
rsrc_metadata.update({new_metadata['UniqueId']: safe_metadata})
|
||||
self.metadata_set(rsrc_metadata)
|
||||
else:
|
||||
logger.error(_("Metadata failed validation for %s") % self.name)
|
||||
LOG.error(_("Metadata failed validation for %s") % self.name)
|
||||
raise ValueError(_("Metadata format invalid"))
|
||||
|
||||
def get_status(self):
|
||||
@ -225,20 +225,20 @@ class WaitCondition(resource.Resource):
|
||||
yield
|
||||
except scheduler.Timeout:
|
||||
timeout = WaitConditionTimeout(self, handle)
|
||||
logger.info(_('%(name)s Timed out (%(timeout)s)') % {
|
||||
'name': str(self), 'timeout': str(timeout)})
|
||||
LOG.info(_('%(name)s Timed out (%(timeout)s)')
|
||||
% {'name': str(self), 'timeout': str(timeout)})
|
||||
raise timeout
|
||||
|
||||
handle_status = handle.get_status()
|
||||
|
||||
if any(s != STATUS_SUCCESS for s in handle_status):
|
||||
failure = WaitConditionFailure(self, handle)
|
||||
logger.info(_('%(name)s Failed (%(failure)s)') % {
|
||||
'name': str(self), 'failure': str(failure)})
|
||||
LOG.info(_('%(name)s Failed (%(failure)s)')
|
||||
% {'name': str(self), 'failure': str(failure)})
|
||||
raise failure
|
||||
|
||||
if len(handle_status) >= self.properties[self.COUNT]:
|
||||
logger.info(_("%s Succeeded") % str(self))
|
||||
LOG.info(_("%s Succeeded") % str(self))
|
||||
return
|
||||
|
||||
def handle_create(self):
|
||||
@ -285,8 +285,8 @@ class WaitCondition(resource.Resource):
|
||||
meta = handle.metadata_get(refresh=True)
|
||||
# Note, can't use a dict generator on python 2.6, hence:
|
||||
res = dict([(k, meta[k]['Data']) for k in meta])
|
||||
logger.debug('%(name)s.GetAtt(%(key)s) == %(res)s' %
|
||||
{'name': self.name,
|
||||
LOG.debug('%(name)s.GetAtt(%(key)s) == %(res)s'
|
||||
% {'name': self.name,
|
||||
'key': key,
|
||||
'res': res})
|
||||
|
||||
|
@ -23,7 +23,7 @@ from heat.openstack.common import excutils
|
||||
from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# Whether TaskRunner._sleep actually does an eventlet sleep when called.
|
||||
@ -128,7 +128,7 @@ class TaskRunner(object):
|
||||
def _sleep(self, wait_time):
|
||||
"""Sleep for the specified number of seconds."""
|
||||
if ENABLE_SLEEP and wait_time is not None:
|
||||
logger.debug('%s sleeping' % str(self))
|
||||
LOG.debug('%s sleeping' % str(self))
|
||||
eventlet.sleep(wait_time)
|
||||
|
||||
def __call__(self, wait_time=1, timeout=None):
|
||||
@ -151,7 +151,7 @@ class TaskRunner(object):
|
||||
"""
|
||||
assert self._runner is None, "Task already started"
|
||||
|
||||
logger.debug('%s starting' % str(self))
|
||||
LOG.debug('%s starting' % str(self))
|
||||
|
||||
if timeout is not None:
|
||||
self._timeout = Timeout(self, timeout)
|
||||
@ -163,7 +163,7 @@ class TaskRunner(object):
|
||||
else:
|
||||
self._runner = False
|
||||
self._done = True
|
||||
logger.debug('%s done (not resumable)' % str(self))
|
||||
LOG.debug('%s done (not resumable)' % str(self))
|
||||
|
||||
def step(self):
|
||||
"""
|
||||
@ -174,7 +174,7 @@ class TaskRunner(object):
|
||||
assert self._runner is not None, "Task not started"
|
||||
|
||||
if self._timeout is not None and self._timeout.expired():
|
||||
logger.info(_('%s timed out') % str(self))
|
||||
LOG.info(_('%s timed out') % str(self))
|
||||
|
||||
try:
|
||||
self._runner.throw(self._timeout)
|
||||
@ -184,13 +184,13 @@ class TaskRunner(object):
|
||||
# Clean up in case task swallows exception without exiting
|
||||
self.cancel()
|
||||
else:
|
||||
logger.debug('%s running' % str(self))
|
||||
LOG.debug('%s running' % str(self))
|
||||
|
||||
try:
|
||||
next(self._runner)
|
||||
except StopIteration:
|
||||
self._done = True
|
||||
logger.debug('%s complete' % str(self))
|
||||
LOG.debug('%s complete' % str(self))
|
||||
|
||||
return self._done
|
||||
|
||||
@ -207,7 +207,7 @@ class TaskRunner(object):
|
||||
def cancel(self):
|
||||
"""Cancel the task and mark it as done."""
|
||||
if not self.done():
|
||||
logger.debug('%s cancelled' % str(self))
|
||||
LOG.debug('%s cancelled' % str(self))
|
||||
try:
|
||||
if self.started():
|
||||
self._runner.close()
|
||||
|
@ -50,7 +50,7 @@ cfg.CONF.import_opt('engine_life_check_timeout', 'heat.common.config')
|
||||
cfg.CONF.import_opt('max_resources_per_stack', 'heat.common.config')
|
||||
cfg.CONF.import_opt('max_stacks_per_tenant', 'heat.common.config')
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
def request_context(func):
|
||||
@ -197,13 +197,12 @@ class StackWatch(object):
|
||||
# Retrieve the stored credentials & create context
|
||||
# Require tenant_safe=False to the stack_get to defeat tenant
|
||||
# scoping otherwise we fail to retrieve the stack
|
||||
logger.debug("Periodic watcher task for stack %s" % sid)
|
||||
LOG.debug("Periodic watcher task for stack %s" % sid)
|
||||
admin_context = context.get_admin_context()
|
||||
stack = db_api.stack_get(admin_context, sid, tenant_safe=False,
|
||||
eager_load=True)
|
||||
if not stack:
|
||||
logger.error(_("Unable to retrieve stack %s for periodic task") %
|
||||
sid)
|
||||
LOG.error(_("Unable to retrieve stack %s for periodic task") % sid)
|
||||
return
|
||||
stack_context = EngineService.load_user_creds(stack.user_creds_id)
|
||||
|
||||
@ -216,7 +215,7 @@ class StackWatch(object):
|
||||
try:
|
||||
wrs = db_api.watch_rule_get_all_by_stack(stack_context, sid)
|
||||
except Exception as ex:
|
||||
logger.warn(_('periodic_task db error watch rule removed? %(ex)s')
|
||||
LOG.warn(_('periodic_task db error watch rule removed? %(ex)s')
|
||||
% ex)
|
||||
return
|
||||
|
||||
@ -290,7 +289,7 @@ class EngineService(service.Service):
|
||||
self.stack_watch = StackWatch(self.thread_group_mgr)
|
||||
self.listener = EngineListener(host, self.engine_id,
|
||||
self.thread_group_mgr)
|
||||
logger.debug("Starting listener for engine %s" % self.engine_id)
|
||||
LOG.debug("Starting listener for engine %s" % self.engine_id)
|
||||
self.listener.start()
|
||||
|
||||
def start(self):
|
||||
@ -304,7 +303,7 @@ class EngineService(service.Service):
|
||||
|
||||
def stop(self):
|
||||
# Stop rpc connection at first for preventing new requests
|
||||
logger.info(_("Attempting to stop engine service..."))
|
||||
LOG.info(_("Attempting to stop engine service..."))
|
||||
try:
|
||||
self.conn.close()
|
||||
except Exception:
|
||||
@ -315,14 +314,14 @@ class EngineService(service.Service):
|
||||
# Ingore dummy service task
|
||||
if stack_id == cfg.CONF.periodic_interval:
|
||||
continue
|
||||
logger.info(_("Waiting stack %s processing to be finished")
|
||||
LOG.info(_("Waiting stack %s processing to be finished")
|
||||
% stack_id)
|
||||
# Stop threads gracefully
|
||||
self.thread_group_mgr.stop(stack_id, True)
|
||||
logger.info(_("Stack %s processing was finished") % stack_id)
|
||||
LOG.info(_("Stack %s processing was finished") % stack_id)
|
||||
|
||||
# Terminate the engine process
|
||||
logger.info(_("All threads were gone, terminating engine"))
|
||||
LOG.info(_("All threads were gone, terminating engine"))
|
||||
super(EngineService, self).stop()
|
||||
|
||||
@staticmethod
|
||||
@ -491,7 +490,7 @@ class EngineService(service.Service):
|
||||
:param args: Request parameters/args passed from API
|
||||
"""
|
||||
|
||||
logger.info(_('previewing stack %s') % stack_name)
|
||||
LOG.info(_('previewing stack %s') % stack_name)
|
||||
tmpl = parser.Template(template, files=files)
|
||||
self._validate_new_stack(cnxt, stack_name, tmpl)
|
||||
|
||||
@ -519,7 +518,7 @@ class EngineService(service.Service):
|
||||
:param files: Files referenced from the template
|
||||
:param args: Request parameters/args passed from API
|
||||
"""
|
||||
logger.info(_('template is %s') % template)
|
||||
LOG.info(_('template is %s') % template)
|
||||
|
||||
def _stack_create(stack):
|
||||
# Create/Adopt a stack, and create the periodic task if successful
|
||||
@ -533,8 +532,7 @@ class EngineService(service.Service):
|
||||
# Schedule a periodic watcher task for this stack
|
||||
self.stack_watch.start_watch_task(stack.id, cnxt)
|
||||
else:
|
||||
logger.warning(_("Stack create failed, status %s") %
|
||||
stack.status)
|
||||
LOG.warning(_("Stack create failed, status %s") % stack.status)
|
||||
|
||||
tmpl = parser.Template(template, files=files)
|
||||
self._validate_new_stack(cnxt, stack_name, tmpl)
|
||||
@ -570,7 +568,7 @@ class EngineService(service.Service):
|
||||
:param files: Files referenced from the template
|
||||
:param args: Request parameters/args passed from API
|
||||
"""
|
||||
logger.info(_('template is %s') % template)
|
||||
LOG.info(_('template is %s') % template)
|
||||
|
||||
# Get the database representation of the existing stack
|
||||
db_stack = self._get_stack(cnxt, stack_identity)
|
||||
@ -620,7 +618,7 @@ class EngineService(service.Service):
|
||||
:param template: Template of stack you want to create.
|
||||
:param params: Stack Input Params
|
||||
"""
|
||||
logger.info(_('validate_template'))
|
||||
LOG.info(_('validate_template'))
|
||||
if template is None:
|
||||
msg = _("No Template provided.")
|
||||
return webob.exc.HTTPBadRequest(explanation=msg)
|
||||
@ -711,7 +709,7 @@ class EngineService(service.Service):
|
||||
return False
|
||||
|
||||
st = self._get_stack(cnxt, stack_identity)
|
||||
logger.info(_('Deleting stack %s') % st.name)
|
||||
LOG.info(_('Deleting stack %s') % st.name)
|
||||
stack = parser.Stack.load(cnxt, stack=st)
|
||||
|
||||
lock = stack_lock.StackLock(cnxt, stack, self.engine_id)
|
||||
@ -732,7 +730,7 @@ class EngineService(service.Service):
|
||||
elif stack_lock.StackLock.engine_alive(cnxt, acquire_result):
|
||||
stop_result = remote_stop(acquire_result)
|
||||
if stop_result is None:
|
||||
logger.debug("Successfully stopped remote task on engine %s"
|
||||
LOG.debug("Successfully stopped remote task on engine %s"
|
||||
% acquire_result)
|
||||
else:
|
||||
raise exception.StopActionFailed(stack_name=stack.name,
|
||||
@ -764,7 +762,7 @@ class EngineService(service.Service):
|
||||
:param stack_identity: Name of the stack you want to abandon.
|
||||
"""
|
||||
st = self._get_stack(cnxt, stack_identity)
|
||||
logger.info(_('abandoning stack %s') % st.name)
|
||||
LOG.info(_('abandoning stack %s') % st.name)
|
||||
stack = parser.Stack.load(cnxt, stack=st)
|
||||
lock = stack_lock.StackLock(cnxt, stack, self.engine_id)
|
||||
acquire_result = lock.try_acquire()
|
||||
@ -896,8 +894,7 @@ class EngineService(service.Service):
|
||||
|
||||
if cfg.CONF.heat_stack_user_role in cnxt.roles:
|
||||
if not self._authorize_stack_user(cnxt, stack, resource_name):
|
||||
logger.warning(_("Access denied to resource %s")
|
||||
% resource_name)
|
||||
LOG.warning(_("Access denied to resource %s") % resource_name)
|
||||
raise exception.Forbidden()
|
||||
|
||||
if resource_name not in stack:
|
||||
@ -977,7 +974,7 @@ class EngineService(service.Service):
|
||||
Handle request to perform suspend action on a stack
|
||||
'''
|
||||
def _stack_suspend(stack):
|
||||
logger.debug("suspending stack %s" % stack.name)
|
||||
LOG.debug("suspending stack %s" % stack.name)
|
||||
stack.suspend()
|
||||
|
||||
s = self._get_stack(cnxt, stack_identity)
|
||||
@ -992,7 +989,7 @@ class EngineService(service.Service):
|
||||
Handle request to perform a resume action on a stack
|
||||
'''
|
||||
def _stack_resume(stack):
|
||||
logger.debug("resuming stack %s" % stack.name)
|
||||
LOG.debug("resuming stack %s" % stack.name)
|
||||
stack.resume()
|
||||
|
||||
s = self._get_stack(cnxt, stack_identity)
|
||||
@ -1075,7 +1072,7 @@ class EngineService(service.Service):
|
||||
try:
|
||||
wrn = [w.name for w in db_api.watch_rule_get_all(cnxt)]
|
||||
except Exception as ex:
|
||||
logger.warn(_('show_watch (all) db error %s') % ex)
|
||||
LOG.warn(_('show_watch (all) db error %s') % ex)
|
||||
return
|
||||
|
||||
wrs = [watchrule.WatchRule.load(cnxt, w) for w in wrn]
|
||||
@ -1098,13 +1095,13 @@ class EngineService(service.Service):
|
||||
# namespace/metric, but we will want this at some point
|
||||
# for now, the API can query all metric data and filter locally
|
||||
if metric_namespace is not None or metric_name is not None:
|
||||
logger.error(_("Filtering by namespace/metric not yet supported"))
|
||||
LOG.error(_("Filtering by namespace/metric not yet supported"))
|
||||
return
|
||||
|
||||
try:
|
||||
wds = db_api.watch_data_get_all(cnxt)
|
||||
except Exception as ex:
|
||||
logger.warn(_('show_metric (all) db error %s') % ex)
|
||||
LOG.warn(_('show_metric (all) db error %s') % ex)
|
||||
return
|
||||
|
||||
result = [api.format_watch_data(w) for w in wds]
|
||||
|
@ -20,7 +20,7 @@ from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
SIGNAL_TYPES = (
|
||||
WAITCONDITION, SIGNAL
|
||||
@ -65,7 +65,7 @@ class SignalResponder(stack_user.StackUser):
|
||||
secret_key = self.data().get('secret_key')
|
||||
|
||||
if not access_key or not secret_key:
|
||||
logger.warning(_('Cannot generate signed url, '
|
||||
LOG.warning(_('Cannot generate signed url, '
|
||||
'no stored access/secret key'))
|
||||
return
|
||||
|
||||
|
@ -24,7 +24,7 @@ from heat.openstack.common.rpc import proxy
|
||||
|
||||
cfg.CONF.import_opt('engine_life_check_timeout', 'heat.common.config')
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StackLock(object):
|
||||
@ -66,20 +66,20 @@ class StackLock(object):
|
||||
lock_engine_id = db_api.stack_lock_create(self.stack.id,
|
||||
self.engine_id)
|
||||
if lock_engine_id is None:
|
||||
logger.debug("Engine %(engine)s acquired lock on stack "
|
||||
LOG.debug("Engine %(engine)s acquired lock on stack "
|
||||
"%(stack)s" % {'engine': self.engine_id,
|
||||
'stack': self.stack.id})
|
||||
return
|
||||
|
||||
if lock_engine_id == self.engine_id or \
|
||||
self.engine_alive(self.context, lock_engine_id):
|
||||
logger.debug("Lock on stack %(stack)s is owned by engine "
|
||||
LOG.debug("Lock on stack %(stack)s is owned by engine "
|
||||
"%(engine)s" % {'stack': self.stack.id,
|
||||
'engine': lock_engine_id})
|
||||
raise exception.ActionInProgress(stack_name=self.stack.name,
|
||||
action=self.stack.action)
|
||||
else:
|
||||
logger.info(_("Stale lock detected on stack %(stack)s. Engine "
|
||||
LOG.info(_("Stale lock detected on stack %(stack)s. Engine "
|
||||
"%(engine)s will attempt to steal the lock")
|
||||
% {'stack': self.stack.id, 'engine': self.engine_id})
|
||||
|
||||
@ -87,21 +87,21 @@ class StackLock(object):
|
||||
self.engine_id)
|
||||
|
||||
if result is None:
|
||||
logger.info(_("Engine %(engine)s successfully stole the lock "
|
||||
LOG.info(_("Engine %(engine)s successfully stole the lock "
|
||||
"on stack %(stack)s")
|
||||
% {'engine': self.engine_id,
|
||||
'stack': self.stack.id})
|
||||
return
|
||||
elif result is True:
|
||||
if retry:
|
||||
logger.info(_("The lock on stack %(stack)s was released "
|
||||
LOG.info(_("The lock on stack %(stack)s was released "
|
||||
"while engine %(engine)s was stealing it. "
|
||||
"Trying again") % {'stack': self.stack.id,
|
||||
'engine': self.engine_id})
|
||||
return self.acquire(retry=False)
|
||||
else:
|
||||
new_lock_engine_id = result
|
||||
logger.info(_("Failed to steal lock on stack %(stack)s. "
|
||||
LOG.info(_("Failed to steal lock on stack %(stack)s. "
|
||||
"Engine %(engine)s stole the lock first")
|
||||
% {'stack': self.stack.id,
|
||||
'engine': new_lock_engine_id})
|
||||
@ -114,9 +114,8 @@ class StackLock(object):
|
||||
# Only the engine that owns the lock will be releasing it.
|
||||
result = db_api.stack_lock_release(stack_id, self.engine_id)
|
||||
if result is True:
|
||||
logger.warning(_("Lock was already released on stack %s!")
|
||||
% stack_id)
|
||||
LOG.warning(_("Lock was already released on stack %s!") % stack_id)
|
||||
else:
|
||||
logger.debug("Engine %(engine)s released lock on stack "
|
||||
LOG.debug("Engine %(engine)s released lock on stack "
|
||||
"%(stack)s" % {'engine': self.engine_id,
|
||||
'stack': stack_id})
|
||||
|
@ -22,7 +22,7 @@ from heat.engine import scheduler
|
||||
from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StackResource(resource.Resource):
|
||||
@ -103,7 +103,7 @@ class StackResource(resource.Resource):
|
||||
params = self.child_params()
|
||||
except NotImplementedError:
|
||||
not_implemented_msg = _("Preview of '%s' not yet implemented")
|
||||
logger.warning(not_implemented_msg % self.__class__.__name__)
|
||||
LOG.warning(not_implemented_msg % self.__class__.__name__)
|
||||
return self
|
||||
|
||||
self._validate_nested_resources(template)
|
||||
@ -240,7 +240,7 @@ class StackResource(resource.Resource):
|
||||
try:
|
||||
stack = self.nested()
|
||||
except exception.NotFound:
|
||||
logger.info(_("Stack not found to delete"))
|
||||
LOG.info(_("Stack not found to delete"))
|
||||
else:
|
||||
if stack is not None:
|
||||
delete_task = scheduler.TaskRunner(stack.delete)
|
||||
|
@ -19,7 +19,7 @@ from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StackUser(resource.Resource):
|
||||
@ -80,7 +80,7 @@ class StackUser(resource.Resource):
|
||||
# compatibility with resources created before the migration
|
||||
# to stack_user.StackUser domain users. After an appropriate
|
||||
# transitional period, this should be removed.
|
||||
logger.warning(_('Reverting to legacy user delete path'))
|
||||
LOG.warning(_('Reverting to legacy user delete path'))
|
||||
try:
|
||||
self.keystone().delete_stack_user(user_id)
|
||||
except kc_exception.NotFound:
|
||||
|
@ -20,7 +20,7 @@ from heat.db import api as db_api
|
||||
from heat.engine import plugin_manager
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
__all__ = ['Template']
|
||||
|
||||
@ -191,7 +191,7 @@ class Template(collections.Mapping):
|
||||
# check resources
|
||||
tmpl_resources = self[self.RESOURCES]
|
||||
if not tmpl_resources:
|
||||
logger.warn(_('Template does not contain any resources, so '
|
||||
LOG.warn(_('Template does not contain any resources, so '
|
||||
'the template would not really do anything when '
|
||||
'being instantiated.'))
|
||||
|
||||
|
@ -20,7 +20,7 @@ from heat.engine import scheduler
|
||||
from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class StackUpdate(object):
|
||||
@ -76,7 +76,7 @@ class StackUpdate(object):
|
||||
def _remove_backup_resource(self, prev_res):
|
||||
if prev_res.state not in ((prev_res.INIT, prev_res.COMPLETE),
|
||||
(prev_res.DELETE, prev_res.COMPLETE)):
|
||||
logger.debug("Deleting backup resource %s" % prev_res.name)
|
||||
LOG.debug("Deleting backup resource %s" % prev_res.name)
|
||||
yield prev_res.destroy()
|
||||
|
||||
@staticmethod
|
||||
@ -100,18 +100,17 @@ class StackUpdate(object):
|
||||
# Swap in the backup resource if it is in a valid state,
|
||||
# instead of creating a new resource
|
||||
if prev_res.status == prev_res.COMPLETE:
|
||||
logger.debug("Swapping in backup Resource %s" %
|
||||
res_name)
|
||||
LOG.debug("Swapping in backup Resource %s" % res_name)
|
||||
self._exchange_stacks(self.existing_stack[res_name],
|
||||
prev_res)
|
||||
return
|
||||
|
||||
logger.debug("Deleting backup Resource %s" % res_name)
|
||||
LOG.debug("Deleting backup Resource %s" % res_name)
|
||||
yield prev_res.destroy()
|
||||
|
||||
# Back up existing resource
|
||||
if res_name in self.existing_stack:
|
||||
logger.debug("Backing up existing Resource %s" % res_name)
|
||||
LOG.debug("Backing up existing Resource %s" % res_name)
|
||||
existing_res = self.existing_stack[res_name]
|
||||
self.previous_stack[res_name] = existing_res
|
||||
existing_res.state_set(existing_res.UPDATE, existing_res.COMPLETE)
|
||||
@ -131,9 +130,9 @@ class StackUpdate(object):
|
||||
except resource.UpdateReplace:
|
||||
pass
|
||||
else:
|
||||
logger.info(_("Resource %(res_name)s for stack %(stack_name)s"
|
||||
" updated") % {
|
||||
'res_name': res_name,
|
||||
LOG.info(_("Resource %(res_name)s for stack %(stack_name)s "
|
||||
"updated")
|
||||
% {'res_name': res_name,
|
||||
'stack_name': self.existing_stack.name})
|
||||
return
|
||||
|
||||
|
@ -23,7 +23,7 @@ from heat.openstack.common import log as logging
|
||||
from heat.openstack.common import timeutils
|
||||
from heat.rpc import api as rpc_api
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class WatchRule(object):
|
||||
@ -75,9 +75,8 @@ class WatchRule(object):
|
||||
try:
|
||||
watch = db_api.watch_rule_get_by_name(context, watch_name)
|
||||
except Exception as ex:
|
||||
logger.warn(_('WatchRule.load (%(watch_name)s) db error '
|
||||
'%(ex)s') % {
|
||||
'watch_name': watch_name, 'ex': ex})
|
||||
LOG.warn(_('WatchRule.load (%(watch_name)s) db error '
|
||||
'%(ex)s') % {'watch_name': watch_name, 'ex': ex})
|
||||
if watch is None:
|
||||
raise exception.WatchRuleNotFound(watch_name=watch_name)
|
||||
else:
|
||||
@ -210,7 +209,7 @@ class WatchRule(object):
|
||||
data = 0
|
||||
for d in self.watch_data:
|
||||
if d.created_at < self.now - self.timeperiod:
|
||||
logger.debug('ignoring %s' % str(d.data))
|
||||
LOG.debug('ignoring %s' % str(d.data))
|
||||
continue
|
||||
data = data + float(d.data[self.rule['MetricName']]['Value'])
|
||||
|
||||
@ -247,14 +246,13 @@ class WatchRule(object):
|
||||
return actions
|
||||
|
||||
def rule_actions(self, new_state):
|
||||
logger.info(_('WATCH: stack:%(stack)s, watch_name:%(watch_name)s, '
|
||||
LOG.info(_('WATCH: stack:%(stack)s, watch_name:%(watch_name)s, '
|
||||
'new_state:%(new_state)s'), {'stack': self.stack_id,
|
||||
'watch_name': self.name,
|
||||
'new_state': new_state})
|
||||
actions = []
|
||||
if self.ACTION_MAP[new_state] not in self.rule:
|
||||
logger.info(_('no action for new state %s'),
|
||||
new_state)
|
||||
LOG.info(_('no action for new state %s'), new_state)
|
||||
else:
|
||||
s = db_api.stack_get(self.context, self.stack_id,
|
||||
eager_load=True)
|
||||
@ -264,7 +262,7 @@ class WatchRule(object):
|
||||
for refid in self.rule[self.ACTION_MAP[new_state]]:
|
||||
actions.append(stack.resource_by_refid(refid).signal)
|
||||
else:
|
||||
logger.warning(_("Could not process watch state %s for stack")
|
||||
LOG.warning(_("Could not process watch state %s for stack")
|
||||
% new_state)
|
||||
return actions
|
||||
|
||||
@ -285,7 +283,7 @@ class WatchRule(object):
|
||||
dims = dims[0]
|
||||
sample['resource_metadata'] = dims
|
||||
sample['resource_id'] = dims.get('InstanceId')
|
||||
logger.debug('new sample:%(k)s data:%(sample)s' % {
|
||||
LOG.debug('new sample:%(k)s data:%(sample)s' % {
|
||||
'k': k, 'sample': sample})
|
||||
clients.ceilometer().samples.create(**sample)
|
||||
|
||||
@ -298,7 +296,7 @@ class WatchRule(object):
|
||||
return
|
||||
|
||||
if self.state == self.SUSPENDED:
|
||||
logger.debug('Ignoring metric data for %s, SUSPENDED state'
|
||||
LOG.debug('Ignoring metric data for %s, SUSPENDED state'
|
||||
% self.name)
|
||||
return []
|
||||
|
||||
@ -308,9 +306,9 @@ class WatchRule(object):
|
||||
# options, e.g --haproxy try to push multiple metrics when we
|
||||
# actually only care about one (the one we're alarming on)
|
||||
# so just ignore any data which doesn't contain MetricName
|
||||
logger.debug('Ignoring metric data (only accept %(metric)s) '
|
||||
': %(data)s' % {
|
||||
'metric': self.rule['MetricName'], 'data': data})
|
||||
LOG.debug('Ignoring metric data (only accept %(metric)s) '
|
||||
': %(data)s' % {'metric': self.rule['MetricName'],
|
||||
'data': data})
|
||||
return
|
||||
|
||||
watch_data = {
|
||||
@ -318,7 +316,7 @@ class WatchRule(object):
|
||||
'watch_rule_id': self.id
|
||||
}
|
||||
wd = db_api.watch_data_create(None, watch_data)
|
||||
logger.debug('new watch:%(name)s data:%(data)s'
|
||||
LOG.debug('new watch:%(name)s data:%(data)s'
|
||||
% {'name': self.name, 'data': str(wd.data)})
|
||||
|
||||
def state_set(self, state):
|
||||
@ -344,14 +342,14 @@ class WatchRule(object):
|
||||
if state != self.state:
|
||||
actions = self.rule_actions(state)
|
||||
if actions:
|
||||
logger.debug("Overriding state %(self_state)s for watch "
|
||||
"%(name)s with %(state)s" % {
|
||||
'self_state': self.state, 'name': self.name,
|
||||
LOG.debug("Overriding state %(self_state)s for watch "
|
||||
"%(name)s with %(state)s"
|
||||
% {'self_state': self.state, 'name': self.name,
|
||||
'state': state})
|
||||
else:
|
||||
logger.warning(_("Unable to override state %(state)s for "
|
||||
"watch %(name)s") % {
|
||||
'state': self.state, 'name': self.name})
|
||||
LOG.warning(_("Unable to override state %(state)s for "
|
||||
"watch %(name)s") % {'state': self.state,
|
||||
'name': self.name})
|
||||
return actions
|
||||
|
||||
|
||||
|
@ -35,9 +35,9 @@ TEST_DEFAULT_LOGLEVELS = {'migrate': logging.WARN}
|
||||
|
||||
class FakeLogMixin:
|
||||
def setup_logging(self):
|
||||
# Assign default logs to self.logger so we can still
|
||||
# Assign default logs to self.LOG so we can still
|
||||
# assert on heat logs.
|
||||
self.logger = self.useFixture(
|
||||
self.LOG = self.useFixture(
|
||||
fixtures.FakeLogger(level=logging.DEBUG))
|
||||
base_list = set([nlog.split('.')[0]
|
||||
for nlog in logging.Logger.manager.loggerDict])
|
||||
|
@ -17,7 +17,7 @@ from heat.engine import stack_user
|
||||
from heat.openstack.common.gettextutils import _
|
||||
from heat.openstack.common import log as logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
LOG = logging.getLogger(__name__)
|
||||
|
||||
|
||||
class GenericResource(resource.Resource):
|
||||
@ -29,26 +29,26 @@ class GenericResource(resource.Resource):
|
||||
'Foo': 'Another generic attribute'}
|
||||
|
||||
def handle_create(self):
|
||||
logger.warning(_('Creating generic resource (Type "%s")') %
|
||||
LOG.warning(_('Creating generic resource (Type "%s")') %
|
||||
self.type())
|
||||
|
||||
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
|
||||
logger.warning(_('Updating generic resource (Type "%s")') %
|
||||
LOG.warning(_('Updating generic resource (Type "%s")') %
|
||||
self.type())
|
||||
|
||||
def handle_delete(self):
|
||||
logger.warning(_('Deleting generic resource (Type "%s")') %
|
||||
LOG.warning(_('Deleting generic resource (Type "%s")') %
|
||||
self.type())
|
||||
|
||||
def _resolve_attribute(self, name):
|
||||
return self.name
|
||||
|
||||
def handle_suspend(self):
|
||||
logger.warning(_('Suspending generic resource (Type "%s")') %
|
||||
LOG.warning(_('Suspending generic resource (Type "%s")') %
|
||||
self.type())
|
||||
|
||||
def handle_resume(self):
|
||||
logger.warning(_('Resuming generic resource (Type "%s")') %
|
||||
LOG.warning(_('Resuming generic resource (Type "%s")') %
|
||||
self.type())
|
||||
|
||||
|
||||
@ -129,7 +129,7 @@ class SignalResource(signal_responder.SignalResponder):
|
||||
msg = _('Cannot signal resource during %s') % self.action
|
||||
raise Exception(msg)
|
||||
|
||||
logger.warning(_('Signaled resource (Type "%(type)s") %(details)s')
|
||||
LOG.warning(_('Signaled resource (Type "%(type)s") %(details)s')
|
||||
% {'type': self.type(), 'details': details})
|
||||
|
||||
def _resolve_attribute(self, name):
|
||||
|
@ -177,7 +177,7 @@ class EnvironmentDuplicateTest(common.HeatTestCase):
|
||||
replace_log = 'Changing %s from %s to %s' % ('OS::Test::Dummy',
|
||||
'test.yaml',
|
||||
self.resource_type)
|
||||
self.assertNotIn(replace_log, self.logger.output)
|
||||
self.assertNotIn(replace_log, self.LOG.output)
|
||||
env_test = {u'resource_registry': {
|
||||
u'OS::Test::Dummy': self.resource_type}}
|
||||
env.load(env_test)
|
||||
@ -186,9 +186,9 @@ class EnvironmentDuplicateTest(common.HeatTestCase):
|
||||
# should return exactly the same object.
|
||||
self.assertIs(info, env.get_resource_info('OS::Test::Dummy',
|
||||
'my_fip'))
|
||||
self.assertNotIn(replace_log, self.logger.output)
|
||||
self.assertNotIn(replace_log, self.LOG.output)
|
||||
else:
|
||||
self.assertIn(replace_log, self.logger.output)
|
||||
self.assertIn(replace_log, self.LOG.output)
|
||||
self.assertNotEqual(info,
|
||||
env.get_resource_info('OS::Test::Dummy',
|
||||
'my_fip'))
|
||||
|
@ -259,7 +259,7 @@ class ResourceExceptionHandlingTest(HeatTestCase):
|
||||
None)
|
||||
e = self.assertRaises(self.exception_catch, resource, request)
|
||||
e = e.exc if hasattr(e, 'exc') else e
|
||||
self.assertNotIn(str(e), self.logger.output)
|
||||
self.assertNotIn(str(e), self.LOG.output)
|
||||
|
||||
|
||||
class JSONRequestDeserializerTest(HeatTestCase):
|
||||
|
Loading…
Reference in New Issue
Block a user