Merge "Using LOG.warning replace LOG.warn"

This commit is contained in:
Jenkins 2015-12-30 08:27:04 +00:00 committed by Gerrit Code Review
commit 809cd7b668
35 changed files with 166 additions and 155 deletions

View File

@ -550,7 +550,8 @@ def available_resource_mapping():
if DOCKER_INSTALLED: if DOCKER_INSTALLED:
return resource_mapping() return resource_mapping()
else: else:
LOG.warn(_LW("Docker plug-in loaded, but docker lib not installed.")) LOG.warning(_LW("Docker plug-in loaded, but docker lib "
"not installed."))
return {} return {}

View File

@ -156,8 +156,8 @@ class KeystoneClientV2(object):
Returns the keystone ID of the resulting user Returns the keystone ID of the resulting user
""" """
if len(username) > 64: if len(username) > 64:
LOG.warn(_LW("Truncating the username %s to the last 64 " LOG.warning(_LW("Truncating the username %s to the last 64 "
"characters."), username) "characters."), username)
# get the last 64 characters of the username # get the last 64 characters of the username
username = username[-64:] username = username[-64:]
user = self.client.users.create(username, user = self.client.users.create(username,

View File

@ -65,7 +65,7 @@ class RackspaceClientPlugin(client_plugin.ClientPlugin):
tenant_id=tenant, tenant_id=tenant,
tenant_name=tenant_name) tenant_name=tenant_name)
if not self.pyrax.authenticated: if not self.pyrax.authenticated:
LOG.warn(_LW("Pyrax Authentication Failed.")) LOG.warning(_LW("Pyrax Authentication Failed."))
raise exception.AuthorizationFailure() raise exception.AuthorizationFailure()
LOG.info(_LI("User %s authenticated successfully."), LOG.info(_LI("User %s authenticated successfully."),
self.context.username) self.context.username)

View File

@ -147,7 +147,8 @@ class CloudServer(server.Server):
reason = server.metadata.get('rackconnect_unprocessable_reason', reason = server.metadata.get('rackconnect_unprocessable_reason',
None) None)
if reason is not None: if reason is not None:
LOG.warn(_LW("RackConnect unprocessable reason: %s"), reason) LOG.warning(_LW("RackConnect unprocessable reason: %s"),
reason)
msg = _("RackConnect automation has completed") msg = _("RackConnect automation has completed")
self._add_event(self.action, self.status, msg) self._add_event(self.action, self.status, msg)

View File

@ -108,8 +108,8 @@ class CloudNetwork(resource.Resource):
try: try:
self._network = self.cloud_networks().get(self.resource_id) self._network = self.cloud_networks().get(self.resource_id)
except NotFound: except NotFound:
LOG.warn(_LW("Could not find network %s but resource id is" LOG.warning(_LW("Could not find network %s but resource id is"
" set."), self.resource_id) " set."), self.resource_id)
return self._network return self._network
def cloud_networks(self): def cloud_networks(self):
@ -139,7 +139,7 @@ class CloudNetwork(resource.Resource):
try: try:
network.delete() network.delete()
except NetworkInUse: except NetworkInUse:
LOG.warn(_LW("Network '%s' still in use."), network.id) LOG.warning(_LW("Network '%s' still in use."), network.id)
else: else:
self._delete_issued = True self._delete_issued = True
return False return False

View File

@ -199,7 +199,7 @@ class WatchController(object):
# Filter criteria not met, return None # Filter criteria not met, return None
return return
except KeyError: except KeyError:
LOG.warn(_LW("Invalid filter key %s, ignoring"), f) LOG.warning(_LW("Invalid filter key %s, ignoring"), f)
return result return result

View File

@ -309,7 +309,7 @@ class StackController(object):
not_tags=not_tags, not_tags=not_tags,
not_tags_any=not_tags_any) not_tags_any=not_tags_any)
except AttributeError as ex: except AttributeError as ex:
LOG.warn(_LW("Old Engine Version: %s"), ex) LOG.warning(_LW("Old Engine Version: %s"), ex)
return stacks_view.collection(req, stacks=stacks, count=count, return stacks_view.collection(req, stacks=stacks, count=count,
tenant_safe=tenant_safe) tenant_safe=tenant_safe)

View File

@ -336,8 +336,8 @@ def startup_sanity_check():
not cfg.CONF.stack_user_domain_name): not cfg.CONF.stack_user_domain_name):
# FIXME(shardy): Legacy fallback for folks using old heat.conf # FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration # files which lack domain configuration
LOG.warn(_LW('stack_user_domain_id or stack_user_domain_name not ' LOG.warning(_LW('stack_user_domain_id or stack_user_domain_name not '
'set in heat.conf falling back to using default')) 'set in heat.conf falling back to using default'))
else: else:
domain_admin_user = cfg.CONF.stack_domain_admin domain_admin_user = cfg.CONF.stack_domain_admin
domain_admin_password = cfg.CONF.stack_domain_admin_password domain_admin_password = cfg.CONF.stack_domain_admin_password
@ -349,7 +349,7 @@ def startup_sanity_check():
'"stack_domain_admin_password"')) '"stack_domain_admin_password"'))
auth_key_len = len(cfg.CONF.auth_encryption_key) auth_key_len = len(cfg.CONF.auth_encryption_key)
if auth_key_len in (16, 24): if auth_key_len in (16, 24):
LOG.warn( LOG.warning(
_LW('Please update auth_encryption_key to be 32 characters.')) _LW('Please update auth_encryption_key to be 32 characters.'))
elif auth_key_len != 32: elif auth_key_len != 32:
raise exception.Error(_('heat.conf misconfigured, auth_encryption_key ' raise exception.Error(_('heat.conf misconfigured, auth_encryption_key '

View File

@ -172,10 +172,10 @@ class RequestContext(context.RequestContext):
LOG.warn(_LW('SHDEBUG NOT Using the keystone_authtoken')) LOG.warn(_LW('SHDEBUG NOT Using the keystone_authtoken'))
return self._trusts_auth_plugin return self._trusts_auth_plugin
LOG.warn(_LW('Using the keystone_authtoken user as the heat ' LOG.warning(_LW('Using the keystone_authtoken user as the heat '
'trustee user directly is deprecated. Please add the ' 'trustee user directly is deprecated. Please add the '
'trustee credentials you need to the %s section of ' 'trustee credentials you need to the %s section of '
'your heat.conf file.') % TRUSTEE_CONF_GROUP) 'your heat.conf file.') % TRUSTEE_CONF_GROUP)
cfg.CONF.import_group('keystone_authtoken', cfg.CONF.import_group('keystone_authtoken',
'keystonemiddleware.auth_token') 'keystonemiddleware.auth_token')

View File

@ -246,8 +246,8 @@ class KeystoneClientV3(object):
def _get_username(self, username): def _get_username(self, username):
if(len(username) > 64): if(len(username) > 64):
LOG.warn(_LW("Truncating the username %s to the last 64 " LOG.warning(_LW("Truncating the username %s to the last 64 "
"characters."), username) "characters."), username)
# get the last 64 characters of the username # get the last 64 characters of the username
return username[-64:] return username[-64:]

View File

@ -424,7 +424,7 @@ class Server(object):
self.stale_children.remove(pid) self.stale_children.remove(pid)
LOG.info(_LI('Removed stale child %s'), pid) LOG.info(_LI('Removed stale child %s'), pid)
else: else:
LOG.warn(_LW('Unrecognised child %s'), pid) LOG.warning(_LW('Unrecognised child %s'), pid)
def _verify_and_respawn_children(self, pid, status): def _verify_and_respawn_children(self, pid, status):
if len(self.stale_children) == 0: if len(self.stale_children) == 0:

View File

@ -165,32 +165,37 @@ class Attributes(collections.Mapping):
def _validate_type(self, attrib, value): def _validate_type(self, attrib, value):
if attrib.schema.type == attrib.schema.STRING: if attrib.schema.type == attrib.schema.STRING:
if not isinstance(value, six.string_types): if not isinstance(value, six.string_types):
LOG.warn(_LW("Attribute %(name)s is not of type %(att_type)s"), LOG.warning(_LW("Attribute %(name)s is not of type "
{'name': attrib.name, "%(att_type)s"),
'att_type': attrib.schema.STRING}) {'name': attrib.name,
'att_type': attrib.schema.STRING})
elif attrib.schema.type == attrib.schema.LIST: elif attrib.schema.type == attrib.schema.LIST:
if (not isinstance(value, collections.Sequence) if (not isinstance(value, collections.Sequence)
or isinstance(value, six.string_types)): or isinstance(value, six.string_types)):
LOG.warn(_LW("Attribute %(name)s is not of type %(att_type)s"), LOG.warning(_LW("Attribute %(name)s is not of type "
{'name': attrib.name, "%(att_type)s"),
'att_type': attrib.schema.LIST}) {'name': attrib.name,
'att_type': attrib.schema.LIST})
elif attrib.schema.type == attrib.schema.MAP: elif attrib.schema.type == attrib.schema.MAP:
if not isinstance(value, collections.Mapping): if not isinstance(value, collections.Mapping):
LOG.warn(_LW("Attribute %(name)s is not of type %(att_type)s"), LOG.warning(_LW("Attribute %(name)s is not of type "
{'name': attrib.name, "%(att_type)s"),
'att_type': attrib.schema.MAP}) {'name': attrib.name,
'att_type': attrib.schema.MAP})
elif attrib.schema.type == attrib.schema.INTEGER: elif attrib.schema.type == attrib.schema.INTEGER:
if not isinstance(value, int): if not isinstance(value, int):
LOG.warn(_LW("Attribute %(name)s is not of type %(att_type)s"), LOG.warning(_LW("Attribute %(name)s is not of type "
{'name': attrib.name, "%(att_type)s"),
'att_type': attrib.schema.INTEGER}) {'name': attrib.name,
'att_type': attrib.schema.INTEGER})
elif attrib.schema.type == attrib.schema.BOOLEAN: elif attrib.schema.type == attrib.schema.BOOLEAN:
try: try:
strutils.bool_from_string(value, strict=True) strutils.bool_from_string(value, strict=True)
except ValueError: except ValueError:
LOG.warn(_LW("Attribute %(name)s is not of type %(att_type)s"), LOG.warning(_LW("Attribute %(name)s is not of type "
{'name': attrib.name, "%(att_type)s"),
'att_type': attrib.schema.BOOLEAN}) {'name': attrib.name,
'att_type': attrib.schema.BOOLEAN})
def __getitem__(self, key): def __getitem__(self, key):
if key not in self: if key not in self:

View File

@ -78,7 +78,7 @@ class OpenStackClients(object):
client = getattr(self, method_name)() client = getattr(self, method_name)()
self._clients[name] = client self._clients[name] = client
return client return client
LOG.warn(_LW('Requested client "%s" not found'), name) LOG.warning(_LW('Requested client "%s" not found'), name)
@property @property
def auth_token(self): def auth_token(self):

View File

@ -127,17 +127,17 @@ class NovaClientPlugin(client_plugin.ClientPlugin):
try: try:
server = self.client().servers.get(server_id) server = self.client().servers.get(server_id)
except exceptions.OverLimit as exc: except exceptions.OverLimit as exc:
LOG.warn(_LW("Received an OverLimit response when " LOG.warning(_LW("Received an OverLimit response when "
"fetching server (%(id)s) : %(exception)s"), "fetching server (%(id)s) : %(exception)s"),
{'id': server_id, {'id': server_id,
'exception': exc}) 'exception': exc})
except exceptions.ClientException as exc: except exceptions.ClientException as exc:
if ((getattr(exc, 'http_status', getattr(exc, 'code', None)) in if ((getattr(exc, 'http_status', getattr(exc, 'code', None)) in
(500, 503))): (500, 503))):
LOG.warn(_LW("Received the following exception when " LOG.warning(_LW("Received the following exception when "
"fetching server (%(id)s) : %(exception)s"), "fetching server (%(id)s) : %(exception)s"),
{'id': server_id, {'id': server_id,
'exception': exc}) 'exception': exc})
else: else:
raise raise
return server return server
@ -150,20 +150,20 @@ class NovaClientPlugin(client_plugin.ClientPlugin):
try: try:
server.get() server.get()
except exceptions.OverLimit as exc: except exceptions.OverLimit as exc:
LOG.warn(_LW("Server %(name)s (%(id)s) received an OverLimit " LOG.warning(_LW("Server %(name)s (%(id)s) received an OverLimit "
"response during server.get(): %(exception)s"), "response during server.get(): %(exception)s"),
{'name': server.name, {'name': server.name,
'id': server.id, 'id': server.id,
'exception': exc}) 'exception': exc})
except exceptions.ClientException as exc: except exceptions.ClientException as exc:
if ((getattr(exc, 'http_status', getattr(exc, 'code', None)) in if ((getattr(exc, 'http_status', getattr(exc, 'code', None)) in
(500, 503))): (500, 503))):
LOG.warn(_LW('Server "%(name)s" (%(id)s) received the ' LOG.warning(_LW('Server "%(name)s" (%(id)s) received the '
'following exception during server.get(): ' 'following exception during server.get(): '
'%(exception)s'), '%(exception)s'),
{'name': server.name, {'name': server.name,
'id': server.id, 'id': server.id,
'exception': exc}) 'exception': exc})
else: else:
raise raise
@ -523,8 +523,8 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
try: try:
server = self.client().servers.get(server) server = self.client().servers.get(server)
except exceptions.NotFound as ex: except exceptions.NotFound as ex:
LOG.warn(_LW('Instance (%(server)s) not found: %(ex)s'), LOG.warning(_LW('Instance (%(server)s) not found: %(ex)s'),
{'server': server, 'ex': ex}) {'server': server, 'ex': ex})
else: else:
for n in sorted(server.networks, reverse=True): for n in sorted(server.networks, reverse=True):
if len(server.networks[n]) > 0: if len(server.networks[n]) > 0:

View File

@ -268,13 +268,13 @@ class ResourceRegistry(object):
for res_name in list(six.iterkeys(registry)): for res_name in list(six.iterkeys(registry)):
if (isinstance(registry[res_name], ResourceInfo) and if (isinstance(registry[res_name], ResourceInfo) and
res_name.startswith(name[:-1])): res_name.startswith(name[:-1])):
LOG.warn(_LW('Removing %(item)s from %(path)s'), { LOG.warning(_LW('Removing %(item)s from %(path)s'), {
'item': res_name, 'item': res_name,
'path': descriptive_path}) 'path': descriptive_path})
del registry[res_name] del registry[res_name]
else: else:
# delete this entry. # delete this entry.
LOG.warn(_LW('Removing %(item)s from %(path)s'), { LOG.warning(_LW('Removing %(item)s from %(path)s'), {
'item': name, 'item': name,
'path': descriptive_path}) 'path': descriptive_path})
registry.pop(name, None) registry.pop(name, None)
@ -287,8 +287,8 @@ class ResourceRegistry(object):
'path': descriptive_path, 'path': descriptive_path,
'was': str(registry[name].value), 'was': str(registry[name].value),
'now': str(info.value)} 'now': str(info.value)}
LOG.warn(_LW('Changing %(path)s from %(was)s to %(now)s'), LOG.warning(_LW('Changing %(path)s from %(was)s to %(now)s'),
details) details)
if isinstance(info, ClassResourceInfo): if isinstance(info, ClassResourceInfo):
if info.value.support_status.status != support.SUPPORTED: if info.value.support_status.status != support.SUPPORTED:

View File

@ -1295,7 +1295,7 @@ class Resource(object):
rs = resource_objects.Resource.get_obj(self.context, self.id) rs = resource_objects.Resource.get_obj(self.context, self.id)
rs.update_and_save({'nova_instance': self.resource_id}) rs.update_and_save({'nova_instance': self.resource_id})
except Exception as ex: except Exception as ex:
LOG.warn(_LW('db error %s'), ex) LOG.warning(_LW('db error %s'), ex)
def _store(self, metadata=None): def _store(self, metadata=None):
"""Create the resource in the database.""" """Create the resource in the database."""
@ -1432,7 +1432,7 @@ class Resource(object):
atomic_key=atomic_key) atomic_key=atomic_key)
if not updated_ok: if not updated_ok:
LOG.warn(_LW('Failed to unlock resource %s'), self.name) LOG.warning(_LW('Failed to unlock resource %s'), self.name)
def _resolve_all_attributes(self, attr): def _resolve_all_attributes(self, attr):
"""Method for resolving all attributes. """Method for resolving all attributes.
@ -1473,7 +1473,8 @@ class Resource(object):
resource = obj.get(self.resource_id) resource = obj.get(self.resource_id)
return resource.to_dict() return resource.to_dict()
except AttributeError as ex: except AttributeError as ex:
LOG.warn(_LW("Resolving 'show' attribute has failed : %s"), ex) LOG.warning(_LW("Resolving 'show' attribute has failed : %s"),
ex)
return None return None
def _resolve_attribute(self, name): def _resolve_attribute(self, name):
@ -1680,8 +1681,8 @@ class Resource(object):
def metadata_update(self, new_metadata=None): def metadata_update(self, new_metadata=None):
"""No-op for resources which don't explicitly override this method.""" """No-op for resources which don't explicitly override this method."""
if new_metadata: if new_metadata:
LOG.warn(_LW("Resource %s does not implement metadata update"), LOG.warning(_LW("Resource %s does not implement metadata update"),
self.name) self.name)
@classmethod @classmethod
def resource_to_template(cls, resource_type, template_type='cfn'): def resource_to_template(cls, resource_type, template_type='cfn'):

View File

@ -377,7 +377,8 @@ class ElasticIpAssociation(resource.Resource):
instance_id = self.properties[self.INSTANCE_ID] instance_id = self.properties[self.INSTANCE_ID]
port_id, port_rsrc = self._get_port_info(ni_id, instance_id) port_id, port_rsrc = self._get_port_info(ni_id, instance_id)
if not port_id or not port_rsrc: if not port_id or not port_rsrc:
LOG.warn(_LW('Skipping association, resource not specified')) LOG.warning(_LW('Skipping association, resource not '
'specified'))
return return
float_id = self.properties[self.ALLOCATION_ID] float_id = self.properties[self.ALLOCATION_ID]

View File

@ -678,11 +678,11 @@ class Instance(resource.Resource, sh.SchedulerHintsMixin):
# keep the behavior as creation # keep the behavior as creation
elif (old_network_ifaces and elif (old_network_ifaces and
(self.NETWORK_INTERFACES not in prop_diff)): (self.NETWORK_INTERFACES not in prop_diff)):
LOG.warn(_LW('There is no change of "%(net_interfaces)s" ' LOG.warning(_LW('There is no change of "%(net_interfaces)s" '
'for instance %(server)s, do nothing ' 'for instance %(server)s, do nothing '
'when updating.'), 'when updating.'),
{'net_interfaces': self.NETWORK_INTERFACES, {'net_interfaces': self.NETWORK_INTERFACES,
'server': self.resource_id}) 'server': self.resource_id})
# if the interfaces not come from property 'NetworkInterfaces', # if the interfaces not come from property 'NetworkInterfaces',
# the situation is somewhat complex, so to detach the old ifaces, # the situation is somewhat complex, so to detach the old ifaces,
# and then attach the new ones. # and then attach the new ones.
@ -805,12 +805,12 @@ class Instance(resource.Resource, sh.SchedulerHintsMixin):
if network_interfaces and subnet_id: if network_interfaces and subnet_id:
# consider the old templates, we only to log to warn user # consider the old templates, we only to log to warn user
# NetworkInterfaces has higher priority than SubnetId # NetworkInterfaces has higher priority than SubnetId
LOG.warn(_LW('"%(subnet)s" will be ignored if specified ' LOG.warning(_LW('"%(subnet)s" will be ignored if specified '
'"%(net_interfaces)s". So if you specified the ' '"%(net_interfaces)s". So if you specified the '
'"%(net_interfaces)s" property, ' '"%(net_interfaces)s" property, '
'do not specify "%(subnet)s" property.'), 'do not specify "%(subnet)s" property.'),
{'subnet': self.SUBNET_ID, {'subnet': self.SUBNET_ID,
'net_interfaces': self.NETWORK_INTERFACES}) 'net_interfaces': self.NETWORK_INTERFACES})
def handle_delete(self): def handle_delete(self):
# make sure to delete the port which implicit-created by heat # make sure to delete the port which implicit-created by heat

View File

@ -439,7 +439,7 @@ class Port(neutron.NeutronResource):
subnets.append(self.client().show_subnet( subnets.append(self.client().show_subnet(
subnet_id)['subnet']) subnet_id)['subnet'])
except Exception as ex: except Exception as ex:
LOG.warn(_LW("Failed to fetch resource attributes: %s"), ex) LOG.warning(_LW("Failed to fetch resource attributes: %s"), ex)
return return
return subnets return subnets
return super(Port, self)._resolve_attribute(name) return super(Port, self)._resolve_attribute(name)

View File

@ -237,7 +237,7 @@ class SwiftContainer(resource.Resource):
headers = self.client().head_container(self.resource_id) headers = self.client().head_container(self.resource_id)
except Exception as ex: except Exception as ex:
if self.client_plugin().is_client_exception(ex): if self.client_plugin().is_client_exception(ex):
LOG.warn(_LW("Head container failed: %s"), ex) LOG.warning(_LW("Head container failed: %s"), ex)
return None return None
raise raise
else: else:

View File

@ -166,12 +166,12 @@ class TroveCluster(resource.Resource):
return cluster return cluster
except Exception as exc: except Exception as exc:
if self.client_plugin().is_over_limit(exc): if self.client_plugin().is_over_limit(exc):
LOG.warn(_LW("Stack %(name)s (%(id)s) received an " LOG.warning(_LW("Stack %(name)s (%(id)s) received an "
"OverLimit response during clusters.get():" "OverLimit response during clusters.get():"
" %(exception)s"), " %(exception)s"),
{'name': self.stack.name, {'name': self.stack.name,
'id': self.stack.id, 'id': self.stack.id,
'exception': exc}) 'exception': exc})
return None return None
else: else:
raise raise

View File

@ -366,12 +366,12 @@ class OSDBInstance(resource.Resource):
return instance return instance
except Exception as exc: except Exception as exc:
if self.client_plugin().is_over_limit(exc): if self.client_plugin().is_over_limit(exc):
LOG.warn(_LW("Stack %(name)s (%(id)s) received an " LOG.warning(_LW("Stack %(name)s (%(id)s) received an "
"OverLimit response during instance.get():" "OverLimit response during instance.get():"
" %(exception)s"), " %(exception)s"),
{'name': self.stack.name, {'name': self.stack.name,
'id': self.stack.id, 'id': self.stack.id,
'exception': exc}) 'exception': exc})
return None return None
else: else:
raise raise

View File

@ -139,8 +139,8 @@ class SignalResponder(stack_user.StackUser):
secret_key = self.data().get('secret_key') secret_key = self.data().get('secret_key')
if not access_key or not secret_key: if not access_key or not secret_key:
LOG.warn(_LW('Cannot generate signed url, ' LOG.warning(_LW('Cannot generate signed url, '
'unable to create keypair')) 'unable to create keypair'))
return return
config_url = cfg.CONF.heat_waitcondition_server_url config_url = cfg.CONF.heat_waitcondition_server_url

View File

@ -178,8 +178,8 @@ class StackResource(resource.Resource):
child_template = self.child_template() child_template = self.child_template()
params = self.child_params() params = self.child_params()
except NotImplementedError: except NotImplementedError:
LOG.warn(_LW("Preview of '%s' not yet implemented"), LOG.warning(_LW("Preview of '%s' not yet implemented"),
self.__class__.__name__) self.__class__.__name__)
return self return self
name = "%s-%s" % (self.stack.name, self.name) name = "%s-%s" % (self.stack.name, self.name)

View File

@ -105,7 +105,7 @@ class StackUser(resource.Resource):
# compatibility with resources created before the migration # compatibility with resources created before the migration
# to stack_user.StackUser domain users. After an appropriate # to stack_user.StackUser domain users. After an appropriate
# transitional period, this should be removed. # transitional period, this should be removed.
LOG.warn(_LW('Reverting to legacy user delete path')) LOG.warning(_LW('Reverting to legacy user delete path'))
try: try:
self.keystone().delete_stack_user(user_id) self.keystone().delete_stack_user(user_id)
except kc_exception.NotFound: except kc_exception.NotFound:

View File

@ -57,8 +57,8 @@ class BaseWaitConditionHandle(signal_responder.SignalResponder):
if self._metadata_format_ok(metadata): if self._metadata_format_ok(metadata):
rsrc_metadata = self.metadata_get(refresh=True) rsrc_metadata = self.metadata_get(refresh=True)
if metadata[self.UNIQUE_ID] in rsrc_metadata: if metadata[self.UNIQUE_ID] in rsrc_metadata:
LOG.warn(_LW("Overwriting Metadata item for id %s!"), LOG.warning(_LW("Overwriting Metadata item for id %s!"),
metadata[self.UNIQUE_ID]) metadata[self.UNIQUE_ID])
safe_metadata = {} safe_metadata = {}
for k in self.METADATA_KEYS: for k in self.METADATA_KEYS:
if k == self.UNIQUE_ID: if k == self.UNIQUE_ID:

View File

@ -1424,7 +1424,7 @@ class EngineService(service.Service):
if cfg.CONF.heat_stack_user_role in cnxt.roles: if cfg.CONF.heat_stack_user_role in cnxt.roles:
if not self._authorize_stack_user(cnxt, stack, resource_name): if not self._authorize_stack_user(cnxt, stack, resource_name):
LOG.warn(_LW("Access denied to resource %s"), resource_name) LOG.warning(_LW("Access denied to resource %s"), resource_name)
raise exception.Forbidden() raise exception.Forbidden()
if resource_name not in stack: if resource_name not in stack:
@ -1686,7 +1686,7 @@ class EngineService(service.Service):
try: try:
wrn = [w.name for w in watch_rule.WatchRule.get_all(cnxt)] wrn = [w.name for w in watch_rule.WatchRule.get_all(cnxt)]
except Exception as ex: except Exception as ex:
LOG.warn(_LW('show_watch (all) db error %s'), ex) LOG.warning(_LW('show_watch (all) db error %s'), ex)
return return
wrs = [watchrule.WatchRule.load(cnxt, w) for w in wrn] wrs = [watchrule.WatchRule.load(cnxt, w) for w in wrn]
@ -1714,7 +1714,7 @@ class EngineService(service.Service):
try: try:
wds = watch_data.WatchData.get_all(cnxt) wds = watch_data.WatchData.get_all(cnxt)
except Exception as ex: except Exception as ex:
LOG.warn(_LW('show_metric (all) db error %s'), ex) LOG.warning(_LW('show_metric (all) db error %s'), ex)
return return
result = [api.format_watch_data(w) for w in wds] result = [api.format_watch_data(w) for w in wds]

View File

@ -88,8 +88,9 @@ class StackWatch(object):
wrs = watch_rule_object.WatchRule.get_all_by_stack(admin_context, wrs = watch_rule_object.WatchRule.get_all_by_stack(admin_context,
sid) sid)
except Exception as ex: except Exception as ex:
LOG.warn(_LW('periodic_task db error watch rule removed? %(ex)s'), LOG.warning(_LW('periodic_task db error watch rule'
ex) ' removed? %(ex)s'),
ex)
return return
def run_alarm_action(stk, actions, details): def run_alarm_action(stk, actions, details):

View File

@ -358,7 +358,7 @@ class Stack(collections.Mapping):
parameter. parameter.
""" """
if not self.parameters.set_stack_id(self.identifier()): if not self.parameters.set_stack_id(self.identifier()):
LOG.warn(_LW("Unable to set parameters StackId identifier")) LOG.warning(_LW("Unable to set parameters StackId identifier"))
@staticmethod @staticmethod
def get_dep_attrs(resources, outputs, resource_name): def get_dep_attrs(resources, outputs, resource_name):
@ -771,12 +771,12 @@ class Stack(collections.Mapping):
updated = self._persist_state() updated = self._persist_state()
if not updated: if not updated:
# Possibly failed concurrent update # Possibly failed concurrent update
LOG.warn(_LW("Failed to set state of stack %(name)s with" LOG.warning(_LW("Failed to set state of stack %(name)s with"
" traversal ID %(trvsl_id)s, to" " traversal ID %(trvsl_id)s, to"
" %(action)s_%(status)s"), " %(action)s_%(status)s"),
{'name': self.name, {'name': self.name,
'trvsl_id': self.current_traversal, 'trvsl_id': self.current_traversal,
'action': action, 'status': status}) 'action': action, 'status': status})
return updated return updated
# Persist state to db only if status == IN_PROGRESS # Persist state to db only if status == IN_PROGRESS
@ -1086,10 +1086,10 @@ class Stack(collections.Mapping):
# we expect to update the stack having previous traversal ID # we expect to update the stack having previous traversal ID
stack_id = self.store(exp_trvsl=previous_traversal) stack_id = self.store(exp_trvsl=previous_traversal)
if stack_id is None: if stack_id is None:
LOG.warn(_LW("Failed to store stack %(name)s with traversal ID" LOG.warning(_LW("Failed to store stack %(name)s with traversal "
" %(trvsl_id)s, aborting stack %(action)s"), "ID %(trvsl_id)s, aborting stack %(action)s"),
{'name': self.name, 'trvsl_id': previous_traversal, {'name': self.name, 'trvsl_id': previous_traversal,
'action': self.action}) 'action': self.action})
return return
self._send_notification_and_add_event() self._send_notification_and_add_event()
@ -1112,10 +1112,10 @@ class Stack(collections.Mapping):
stack_id = self.store() stack_id = self.store()
if stack_id is None: if stack_id is None:
# Failed concurrent update # Failed concurrent update
LOG.warn(_LW("Failed to store stack %(name)s with traversal ID" LOG.warning(_LW("Failed to store stack %(name)s with traversal "
" %(trvsl_id)s, aborting stack %(action)s"), "ID %(trvsl_id)s, aborting stack %(action)s"),
{'name': self.name, 'trvsl_id': self.current_traversal, {'name': self.name, 'trvsl_id': self.current_traversal,
'action': self.action}) 'action': self.action})
return return
LOG.info(_LI('convergence_dependencies: %s'), LOG.info(_LI('convergence_dependencies: %s'),
@ -1157,10 +1157,10 @@ class Stack(collections.Mapping):
stack_id = self.store() stack_id = self.store()
if stack_id is None: if stack_id is None:
# Failed concurrent update # Failed concurrent update
LOG.warn(_LW("Failed to store stack %(name)s with traversal ID" LOG.warning(_LW("Failed to store stack %(name)s with traversal"
" %(trvsl_id)s, not trigerring rollback."), " ID %(trvsl_id)s, not trigerring rollback."),
{'name': self.name, {'name': self.name,
'trvsl_id': self.current_traversal}) 'trvsl_id': self.current_traversal})
return return
self.converge_stack(rollback_tmpl, action=self.ROLLBACK) self.converge_stack(rollback_tmpl, action=self.ROLLBACK)
@ -1850,10 +1850,10 @@ class Stack(collections.Mapping):
stack_id = self.store() stack_id = self.store()
if stack_id is None: if stack_id is None:
# Failed concurrent update # Failed concurrent update
LOG.warn(_LW("Failed to store stack %(name)s with traversal ID" LOG.warning(_LW("Failed to store stack %(name)s with traversal"
" %(trvsl_id)s, aborting stack purge"), " ID %(trvsl_id)s, aborting stack purge"),
{'name': self.name, {'name': self.name,
'trvsl_id': self.current_traversal}) 'trvsl_id': self.current_traversal})
return return
raw_template_object.RawTemplate.delete(self.context, prev_tmpl_id) raw_template_object.RawTemplate.delete(self.context, prev_tmpl_id)

View File

@ -117,8 +117,8 @@ class StackLock(object):
result = stack_lock_object.StackLock.release(self.stack_id, result = stack_lock_object.StackLock.release(self.stack_id,
self.engine_id) self.engine_id)
if result is True: if result is True:
LOG.warn(_LW("Lock was already released on stack %s!"), LOG.warning(_LW("Lock was already released on stack %s!"),
self.stack_id) self.stack_id)
else: else:
LOG.debug("Engine %(engine)s released lock on stack " LOG.debug("Engine %(engine)s released lock on stack "
"%(stack)s" % {'engine': self.engine_id, "%(stack)s" % {'engine': self.engine_id,

View File

@ -85,8 +85,9 @@ class WatchRule(object):
watch = watch_rule_objects.WatchRule.get_by_name(context, watch = watch_rule_objects.WatchRule.get_by_name(context,
watch_name) watch_name)
except Exception as ex: except Exception as ex:
LOG.warn(_LW('WatchRule.load (%(watch_name)s) db error ' LOG.warning(_LW('WatchRule.load (%(watch_name)s) db error '
'%(ex)s'), {'watch_name': watch_name, 'ex': ex}) '%(ex)s'), {'watch_name': watch_name,
'ex': ex})
if watch is None: if watch is None:
raise exception.EntityNotFound(entity='Watch Rule', raise exception.EntityNotFound(entity='Watch Rule',
name=watch_name) name=watch_name)
@ -272,8 +273,8 @@ class WatchRule(object):
for refid in self.rule[self.ACTION_MAP[new_state]]: for refid in self.rule[self.ACTION_MAP[new_state]]:
actions.append(stk.resource_by_refid(refid).signal) actions.append(stk.resource_by_refid(refid).signal)
else: else:
LOG.warn(_LW("Could not process watch state %s for stack"), LOG.warning(_LW("Could not process watch state %s for stack"),
new_state) new_state)
return actions return actions
def _to_ceilometer(self, data): def _to_ceilometer(self, data):
@ -355,9 +356,9 @@ class WatchRule(object):
% {'self_state': self.state, 'name': self.name, % {'self_state': self.state, 'name': self.name,
'state': state}) 'state': state})
else: else:
LOG.warn(_LW("Unable to override state %(state)s for " LOG.warning(_LW("Unable to override state %(state)s for "
"watch %(name)s"), {'state': self.state, "watch %(name)s"), {'state': self.state,
'name': self.name}) 'name': self.name})
return actions return actions

View File

@ -39,27 +39,27 @@ class GenericResource(resource.Resource):
return True return True
def handle_create(self): def handle_create(self):
LOG.warn(_LW('Creating generic resource (Type "%s")'), LOG.warning(_LW('Creating generic resource (Type "%s")'),
self.type()) self.type())
def handle_update(self, json_snippet, tmpl_diff, prop_diff): def handle_update(self, json_snippet, tmpl_diff, prop_diff):
LOG.warn(_LW('Updating generic resource (Type "%s")'), LOG.warning(_LW('Updating generic resource (Type "%s")'),
self.type()) self.type())
def handle_delete(self): def handle_delete(self):
LOG.warn(_LW('Deleting generic resource (Type "%s")'), LOG.warning(_LW('Deleting generic resource (Type "%s")'),
self.type()) self.type())
def _resolve_attribute(self, name): def _resolve_attribute(self, name):
return self.name return self.name
def handle_suspend(self): def handle_suspend(self):
LOG.warn(_LW('Suspending generic resource (Type "%s")'), LOG.warning(_LW('Suspending generic resource (Type "%s")'),
self.type()) self.type())
def handle_resume(self): def handle_resume(self):
LOG.warn(_LW('Resuming generic resource (Type "%s")'), LOG.warning(_LW('Resuming generic resource (Type "%s")'),
self.type()) self.type())
class ResWithShowAttr(GenericResource): class ResWithShowAttr(GenericResource):
@ -189,8 +189,8 @@ class SignalResource(signal_responder.SignalResponder):
self.resource_id_set(self._get_user_id()) self.resource_id_set(self._get_user_id())
def handle_signal(self, details=None): def handle_signal(self, details=None):
LOG.warn(_LW('Signaled resource (Type "%(type)s") %(details)s'), LOG.warning(_LW('Signaled resource (Type "%(type)s") %(details)s'),
{'type': self.type(), 'details': details}) {'type': self.type(), 'details': details})
def _resolve_attribute(self, name): def _resolve_attribute(self, name):
if self.resource_id is not None: if self.resource_id is not None:

View File

@ -631,16 +631,16 @@ outputs:
md = self.client.resources.metadata(stack_identifier, 'custom_lb') md = self.client.resources.metadata(stack_identifier, 'custom_lb')
actual_md = len(md['IPs'].split(',')) actual_md = len(md['IPs'].split(','))
if actual_md != expected: if actual_md != expected:
LOG.warn('check_instance_count exp:%d, meta:%s' % (expected, LOG.warning('check_instance_count exp:%d, meta:%s' % (expected,
md['IPs'])) md['IPs']))
return False return False
stack = self.client.stacks.get(stack_identifier) stack = self.client.stacks.get(stack_identifier)
inst_list = self._stack_output(stack, 'InstanceList') inst_list = self._stack_output(stack, 'InstanceList')
actual = len(inst_list.split(',')) actual = len(inst_list.split(','))
if actual != expected: if actual != expected:
LOG.warn('check_instance_count exp:%d, act:%s' % (expected, LOG.warning('check_instance_count exp:%d, act:%s' % (expected,
inst_list)) inst_list))
return actual == expected return actual == expected
def test_scaling_meta_update(self): def test_scaling_meta_update(self):

View File

@ -95,7 +95,7 @@ Outputs:
if key_header not in oc.head_account(): if key_header not in oc.head_account():
swift_key = hashlib.sha224( swift_key = hashlib.sha224(
str(random.getrandbits(256))).hexdigest()[:32] str(random.getrandbits(256))).hexdigest()[:32]
LOG.warn('setting swift key to %s' % swift_key) LOG.warning('setting swift key to %s' % swift_key)
oc.post_account({key_header: swift_key}) oc.post_account({key_header: swift_key})
key = oc.head_account()[key_header] key = oc.head_account()[key_header]
path = '/v1/AUTH_%s/%s/%s' % (self.project_id, path = '/v1/AUTH_%s/%s/%s' % (self.project_id,

View File

@ -30,8 +30,8 @@ class CeilometerAlarmTest(scenario_base.ScenarioTestsBase):
stack = self.client.stacks.get(stack_identifier) stack = self.client.stacks.get(stack_identifier)
actual = self._stack_output(stack, 'asg_size') actual = self._stack_output(stack, 'asg_size')
if actual != expected: if actual != expected:
LOG.warn('check_instance_count exp:%d, act:%s' % (expected, LOG.warning('check_instance_count exp:%d, act:%s' % (expected,
actual)) actual))
return actual == expected return actual == expected
def test_alarm(self): def test_alarm(self):