Merge "Using LOG.warning replace LOG.warn"

This commit is contained in:
Jenkins 2015-12-30 08:27:04 +00:00 committed by Gerrit Code Review
commit 809cd7b668
35 changed files with 166 additions and 155 deletions

View File

@ -550,7 +550,8 @@ def available_resource_mapping():
if DOCKER_INSTALLED:
return resource_mapping()
else:
LOG.warn(_LW("Docker plug-in loaded, but docker lib not installed."))
LOG.warning(_LW("Docker plug-in loaded, but docker lib "
"not installed."))
return {}

View File

@ -156,8 +156,8 @@ class KeystoneClientV2(object):
Returns the keystone ID of the resulting user
"""
if len(username) > 64:
LOG.warn(_LW("Truncating the username %s to the last 64 "
"characters."), username)
LOG.warning(_LW("Truncating the username %s to the last 64 "
"characters."), username)
# get the last 64 characters of the username
username = username[-64:]
user = self.client.users.create(username,

View File

@ -65,7 +65,7 @@ class RackspaceClientPlugin(client_plugin.ClientPlugin):
tenant_id=tenant,
tenant_name=tenant_name)
if not self.pyrax.authenticated:
LOG.warn(_LW("Pyrax Authentication Failed."))
LOG.warning(_LW("Pyrax Authentication Failed."))
raise exception.AuthorizationFailure()
LOG.info(_LI("User %s authenticated successfully."),
self.context.username)

View File

@ -147,7 +147,8 @@ class CloudServer(server.Server):
reason = server.metadata.get('rackconnect_unprocessable_reason',
None)
if reason is not None:
LOG.warn(_LW("RackConnect unprocessable reason: %s"), reason)
LOG.warning(_LW("RackConnect unprocessable reason: %s"),
reason)
msg = _("RackConnect automation has completed")
self._add_event(self.action, self.status, msg)

View File

@ -108,8 +108,8 @@ class CloudNetwork(resource.Resource):
try:
self._network = self.cloud_networks().get(self.resource_id)
except NotFound:
LOG.warn(_LW("Could not find network %s but resource id is"
" set."), self.resource_id)
LOG.warning(_LW("Could not find network %s but resource id is"
" set."), self.resource_id)
return self._network
def cloud_networks(self):
@ -139,7 +139,7 @@ class CloudNetwork(resource.Resource):
try:
network.delete()
except NetworkInUse:
LOG.warn(_LW("Network '%s' still in use."), network.id)
LOG.warning(_LW("Network '%s' still in use."), network.id)
else:
self._delete_issued = True
return False

View File

@ -199,7 +199,7 @@ class WatchController(object):
# Filter criteria not met, return None
return
except KeyError:
LOG.warn(_LW("Invalid filter key %s, ignoring"), f)
LOG.warning(_LW("Invalid filter key %s, ignoring"), f)
return result

View File

@ -309,7 +309,7 @@ class StackController(object):
not_tags=not_tags,
not_tags_any=not_tags_any)
except AttributeError as ex:
LOG.warn(_LW("Old Engine Version: %s"), ex)
LOG.warning(_LW("Old Engine Version: %s"), ex)
return stacks_view.collection(req, stacks=stacks, count=count,
tenant_safe=tenant_safe)

View File

@ -336,8 +336,8 @@ def startup_sanity_check():
not cfg.CONF.stack_user_domain_name):
# FIXME(shardy): Legacy fallback for folks using old heat.conf
# files which lack domain configuration
LOG.warn(_LW('stack_user_domain_id or stack_user_domain_name not '
'set in heat.conf falling back to using default'))
LOG.warning(_LW('stack_user_domain_id or stack_user_domain_name not '
'set in heat.conf falling back to using default'))
else:
domain_admin_user = cfg.CONF.stack_domain_admin
domain_admin_password = cfg.CONF.stack_domain_admin_password
@ -349,7 +349,7 @@ def startup_sanity_check():
'"stack_domain_admin_password"'))
auth_key_len = len(cfg.CONF.auth_encryption_key)
if auth_key_len in (16, 24):
LOG.warn(
LOG.warning(
_LW('Please update auth_encryption_key to be 32 characters.'))
elif auth_key_len != 32:
raise exception.Error(_('heat.conf misconfigured, auth_encryption_key '

View File

@ -172,10 +172,10 @@ class RequestContext(context.RequestContext):
LOG.warn(_LW('SHDEBUG NOT Using the keystone_authtoken'))
return self._trusts_auth_plugin
LOG.warn(_LW('Using the keystone_authtoken user as the heat '
'trustee user directly is deprecated. Please add the '
'trustee credentials you need to the %s section of '
'your heat.conf file.') % TRUSTEE_CONF_GROUP)
LOG.warning(_LW('Using the keystone_authtoken user as the heat '
'trustee user directly is deprecated. Please add the '
'trustee credentials you need to the %s section of '
'your heat.conf file.') % TRUSTEE_CONF_GROUP)
cfg.CONF.import_group('keystone_authtoken',
'keystonemiddleware.auth_token')

View File

@ -246,8 +246,8 @@ class KeystoneClientV3(object):
def _get_username(self, username):
if(len(username) > 64):
LOG.warn(_LW("Truncating the username %s to the last 64 "
"characters."), username)
LOG.warning(_LW("Truncating the username %s to the last 64 "
"characters."), username)
# get the last 64 characters of the username
return username[-64:]

View File

@ -424,7 +424,7 @@ class Server(object):
self.stale_children.remove(pid)
LOG.info(_LI('Removed stale child %s'), pid)
else:
LOG.warn(_LW('Unrecognised child %s'), pid)
LOG.warning(_LW('Unrecognised child %s'), pid)
def _verify_and_respawn_children(self, pid, status):
if len(self.stale_children) == 0:

View File

@ -165,32 +165,37 @@ class Attributes(collections.Mapping):
def _validate_type(self, attrib, value):
if attrib.schema.type == attrib.schema.STRING:
if not isinstance(value, six.string_types):
LOG.warn(_LW("Attribute %(name)s is not of type %(att_type)s"),
{'name': attrib.name,
'att_type': attrib.schema.STRING})
LOG.warning(_LW("Attribute %(name)s is not of type "
"%(att_type)s"),
{'name': attrib.name,
'att_type': attrib.schema.STRING})
elif attrib.schema.type == attrib.schema.LIST:
if (not isinstance(value, collections.Sequence)
or isinstance(value, six.string_types)):
LOG.warn(_LW("Attribute %(name)s is not of type %(att_type)s"),
{'name': attrib.name,
'att_type': attrib.schema.LIST})
LOG.warning(_LW("Attribute %(name)s is not of type "
"%(att_type)s"),
{'name': attrib.name,
'att_type': attrib.schema.LIST})
elif attrib.schema.type == attrib.schema.MAP:
if not isinstance(value, collections.Mapping):
LOG.warn(_LW("Attribute %(name)s is not of type %(att_type)s"),
{'name': attrib.name,
'att_type': attrib.schema.MAP})
LOG.warning(_LW("Attribute %(name)s is not of type "
"%(att_type)s"),
{'name': attrib.name,
'att_type': attrib.schema.MAP})
elif attrib.schema.type == attrib.schema.INTEGER:
if not isinstance(value, int):
LOG.warn(_LW("Attribute %(name)s is not of type %(att_type)s"),
{'name': attrib.name,
'att_type': attrib.schema.INTEGER})
LOG.warning(_LW("Attribute %(name)s is not of type "
"%(att_type)s"),
{'name': attrib.name,
'att_type': attrib.schema.INTEGER})
elif attrib.schema.type == attrib.schema.BOOLEAN:
try:
strutils.bool_from_string(value, strict=True)
except ValueError:
LOG.warn(_LW("Attribute %(name)s is not of type %(att_type)s"),
{'name': attrib.name,
'att_type': attrib.schema.BOOLEAN})
LOG.warning(_LW("Attribute %(name)s is not of type "
"%(att_type)s"),
{'name': attrib.name,
'att_type': attrib.schema.BOOLEAN})
def __getitem__(self, key):
if key not in self:

View File

@ -78,7 +78,7 @@ class OpenStackClients(object):
client = getattr(self, method_name)()
self._clients[name] = client
return client
LOG.warn(_LW('Requested client "%s" not found'), name)
LOG.warning(_LW('Requested client "%s" not found'), name)
@property
def auth_token(self):

View File

@ -127,17 +127,17 @@ class NovaClientPlugin(client_plugin.ClientPlugin):
try:
server = self.client().servers.get(server_id)
except exceptions.OverLimit as exc:
LOG.warn(_LW("Received an OverLimit response when "
"fetching server (%(id)s) : %(exception)s"),
{'id': server_id,
'exception': exc})
LOG.warning(_LW("Received an OverLimit response when "
"fetching server (%(id)s) : %(exception)s"),
{'id': server_id,
'exception': exc})
except exceptions.ClientException as exc:
if ((getattr(exc, 'http_status', getattr(exc, 'code', None)) in
(500, 503))):
LOG.warn(_LW("Received the following exception when "
"fetching server (%(id)s) : %(exception)s"),
{'id': server_id,
'exception': exc})
LOG.warning(_LW("Received the following exception when "
"fetching server (%(id)s) : %(exception)s"),
{'id': server_id,
'exception': exc})
else:
raise
return server
@ -150,20 +150,20 @@ class NovaClientPlugin(client_plugin.ClientPlugin):
try:
server.get()
except exceptions.OverLimit as exc:
LOG.warn(_LW("Server %(name)s (%(id)s) received an OverLimit "
"response during server.get(): %(exception)s"),
{'name': server.name,
'id': server.id,
'exception': exc})
LOG.warning(_LW("Server %(name)s (%(id)s) received an OverLimit "
"response during server.get(): %(exception)s"),
{'name': server.name,
'id': server.id,
'exception': exc})
except exceptions.ClientException as exc:
if ((getattr(exc, 'http_status', getattr(exc, 'code', None)) in
(500, 503))):
LOG.warn(_LW('Server "%(name)s" (%(id)s) received the '
'following exception during server.get(): '
'%(exception)s'),
{'name': server.name,
'id': server.id,
'exception': exc})
LOG.warning(_LW('Server "%(name)s" (%(id)s) received the '
'following exception during server.get(): '
'%(exception)s'),
{'name': server.name,
'id': server.id,
'exception': exc})
else:
raise
@ -523,8 +523,8 @@ echo -e '%s\tALL=(ALL)\tNOPASSWD: ALL' >> /etc/sudoers
try:
server = self.client().servers.get(server)
except exceptions.NotFound as ex:
LOG.warn(_LW('Instance (%(server)s) not found: %(ex)s'),
{'server': server, 'ex': ex})
LOG.warning(_LW('Instance (%(server)s) not found: %(ex)s'),
{'server': server, 'ex': ex})
else:
for n in sorted(server.networks, reverse=True):
if len(server.networks[n]) > 0:

View File

@ -268,13 +268,13 @@ class ResourceRegistry(object):
for res_name in list(six.iterkeys(registry)):
if (isinstance(registry[res_name], ResourceInfo) and
res_name.startswith(name[:-1])):
LOG.warn(_LW('Removing %(item)s from %(path)s'), {
LOG.warning(_LW('Removing %(item)s from %(path)s'), {
'item': res_name,
'path': descriptive_path})
del registry[res_name]
else:
# delete this entry.
LOG.warn(_LW('Removing %(item)s from %(path)s'), {
LOG.warning(_LW('Removing %(item)s from %(path)s'), {
'item': name,
'path': descriptive_path})
registry.pop(name, None)
@ -287,8 +287,8 @@ class ResourceRegistry(object):
'path': descriptive_path,
'was': str(registry[name].value),
'now': str(info.value)}
LOG.warn(_LW('Changing %(path)s from %(was)s to %(now)s'),
details)
LOG.warning(_LW('Changing %(path)s from %(was)s to %(now)s'),
details)
if isinstance(info, ClassResourceInfo):
if info.value.support_status.status != support.SUPPORTED:

View File

@ -1295,7 +1295,7 @@ class Resource(object):
rs = resource_objects.Resource.get_obj(self.context, self.id)
rs.update_and_save({'nova_instance': self.resource_id})
except Exception as ex:
LOG.warn(_LW('db error %s'), ex)
LOG.warning(_LW('db error %s'), ex)
def _store(self, metadata=None):
"""Create the resource in the database."""
@ -1432,7 +1432,7 @@ class Resource(object):
atomic_key=atomic_key)
if not updated_ok:
LOG.warn(_LW('Failed to unlock resource %s'), self.name)
LOG.warning(_LW('Failed to unlock resource %s'), self.name)
def _resolve_all_attributes(self, attr):
"""Method for resolving all attributes.
@ -1473,7 +1473,8 @@ class Resource(object):
resource = obj.get(self.resource_id)
return resource.to_dict()
except AttributeError as ex:
LOG.warn(_LW("Resolving 'show' attribute has failed : %s"), ex)
LOG.warning(_LW("Resolving 'show' attribute has failed : %s"),
ex)
return None
def _resolve_attribute(self, name):
@ -1680,8 +1681,8 @@ class Resource(object):
def metadata_update(self, new_metadata=None):
"""No-op for resources which don't explicitly override this method."""
if new_metadata:
LOG.warn(_LW("Resource %s does not implement metadata update"),
self.name)
LOG.warning(_LW("Resource %s does not implement metadata update"),
self.name)
@classmethod
def resource_to_template(cls, resource_type, template_type='cfn'):

View File

@ -377,7 +377,8 @@ class ElasticIpAssociation(resource.Resource):
instance_id = self.properties[self.INSTANCE_ID]
port_id, port_rsrc = self._get_port_info(ni_id, instance_id)
if not port_id or not port_rsrc:
LOG.warn(_LW('Skipping association, resource not specified'))
LOG.warning(_LW('Skipping association, resource not '
'specified'))
return
float_id = self.properties[self.ALLOCATION_ID]

View File

@ -678,11 +678,11 @@ class Instance(resource.Resource, sh.SchedulerHintsMixin):
# keep the behavior as creation
elif (old_network_ifaces and
(self.NETWORK_INTERFACES not in prop_diff)):
LOG.warn(_LW('There is no change of "%(net_interfaces)s" '
'for instance %(server)s, do nothing '
'when updating.'),
{'net_interfaces': self.NETWORK_INTERFACES,
'server': self.resource_id})
LOG.warning(_LW('There is no change of "%(net_interfaces)s" '
'for instance %(server)s, do nothing '
'when updating.'),
{'net_interfaces': self.NETWORK_INTERFACES,
'server': self.resource_id})
# if the interfaces not come from property 'NetworkInterfaces',
# the situation is somewhat complex, so to detach the old ifaces,
# and then attach the new ones.
@ -805,12 +805,12 @@ class Instance(resource.Resource, sh.SchedulerHintsMixin):
if network_interfaces and subnet_id:
# consider the old templates, we only to log to warn user
# NetworkInterfaces has higher priority than SubnetId
LOG.warn(_LW('"%(subnet)s" will be ignored if specified '
'"%(net_interfaces)s". So if you specified the '
'"%(net_interfaces)s" property, '
'do not specify "%(subnet)s" property.'),
{'subnet': self.SUBNET_ID,
'net_interfaces': self.NETWORK_INTERFACES})
LOG.warning(_LW('"%(subnet)s" will be ignored if specified '
'"%(net_interfaces)s". So if you specified the '
'"%(net_interfaces)s" property, '
'do not specify "%(subnet)s" property.'),
{'subnet': self.SUBNET_ID,
'net_interfaces': self.NETWORK_INTERFACES})
def handle_delete(self):
# make sure to delete the port which implicit-created by heat

View File

@ -439,7 +439,7 @@ class Port(neutron.NeutronResource):
subnets.append(self.client().show_subnet(
subnet_id)['subnet'])
except Exception as ex:
LOG.warn(_LW("Failed to fetch resource attributes: %s"), ex)
LOG.warning(_LW("Failed to fetch resource attributes: %s"), ex)
return
return subnets
return super(Port, self)._resolve_attribute(name)

View File

@ -237,7 +237,7 @@ class SwiftContainer(resource.Resource):
headers = self.client().head_container(self.resource_id)
except Exception as ex:
if self.client_plugin().is_client_exception(ex):
LOG.warn(_LW("Head container failed: %s"), ex)
LOG.warning(_LW("Head container failed: %s"), ex)
return None
raise
else:

View File

@ -166,12 +166,12 @@ class TroveCluster(resource.Resource):
return cluster
except Exception as exc:
if self.client_plugin().is_over_limit(exc):
LOG.warn(_LW("Stack %(name)s (%(id)s) received an "
"OverLimit response during clusters.get():"
" %(exception)s"),
{'name': self.stack.name,
'id': self.stack.id,
'exception': exc})
LOG.warning(_LW("Stack %(name)s (%(id)s) received an "
"OverLimit response during clusters.get():"
" %(exception)s"),
{'name': self.stack.name,
'id': self.stack.id,
'exception': exc})
return None
else:
raise

View File

@ -366,12 +366,12 @@ class OSDBInstance(resource.Resource):
return instance
except Exception as exc:
if self.client_plugin().is_over_limit(exc):
LOG.warn(_LW("Stack %(name)s (%(id)s) received an "
"OverLimit response during instance.get():"
" %(exception)s"),
{'name': self.stack.name,
'id': self.stack.id,
'exception': exc})
LOG.warning(_LW("Stack %(name)s (%(id)s) received an "
"OverLimit response during instance.get():"
" %(exception)s"),
{'name': self.stack.name,
'id': self.stack.id,
'exception': exc})
return None
else:
raise

View File

@ -139,8 +139,8 @@ class SignalResponder(stack_user.StackUser):
secret_key = self.data().get('secret_key')
if not access_key or not secret_key:
LOG.warn(_LW('Cannot generate signed url, '
'unable to create keypair'))
LOG.warning(_LW('Cannot generate signed url, '
'unable to create keypair'))
return
config_url = cfg.CONF.heat_waitcondition_server_url

View File

@ -178,8 +178,8 @@ class StackResource(resource.Resource):
child_template = self.child_template()
params = self.child_params()
except NotImplementedError:
LOG.warn(_LW("Preview of '%s' not yet implemented"),
self.__class__.__name__)
LOG.warning(_LW("Preview of '%s' not yet implemented"),
self.__class__.__name__)
return self
name = "%s-%s" % (self.stack.name, self.name)

View File

@ -105,7 +105,7 @@ class StackUser(resource.Resource):
# compatibility with resources created before the migration
# to stack_user.StackUser domain users. After an appropriate
# transitional period, this should be removed.
LOG.warn(_LW('Reverting to legacy user delete path'))
LOG.warning(_LW('Reverting to legacy user delete path'))
try:
self.keystone().delete_stack_user(user_id)
except kc_exception.NotFound:

View File

@ -57,8 +57,8 @@ class BaseWaitConditionHandle(signal_responder.SignalResponder):
if self._metadata_format_ok(metadata):
rsrc_metadata = self.metadata_get(refresh=True)
if metadata[self.UNIQUE_ID] in rsrc_metadata:
LOG.warn(_LW("Overwriting Metadata item for id %s!"),
metadata[self.UNIQUE_ID])
LOG.warning(_LW("Overwriting Metadata item for id %s!"),
metadata[self.UNIQUE_ID])
safe_metadata = {}
for k in self.METADATA_KEYS:
if k == self.UNIQUE_ID:

View File

@ -1424,7 +1424,7 @@ class EngineService(service.Service):
if cfg.CONF.heat_stack_user_role in cnxt.roles:
if not self._authorize_stack_user(cnxt, stack, resource_name):
LOG.warn(_LW("Access denied to resource %s"), resource_name)
LOG.warning(_LW("Access denied to resource %s"), resource_name)
raise exception.Forbidden()
if resource_name not in stack:
@ -1686,7 +1686,7 @@ class EngineService(service.Service):
try:
wrn = [w.name for w in watch_rule.WatchRule.get_all(cnxt)]
except Exception as ex:
LOG.warn(_LW('show_watch (all) db error %s'), ex)
LOG.warning(_LW('show_watch (all) db error %s'), ex)
return
wrs = [watchrule.WatchRule.load(cnxt, w) for w in wrn]
@ -1714,7 +1714,7 @@ class EngineService(service.Service):
try:
wds = watch_data.WatchData.get_all(cnxt)
except Exception as ex:
LOG.warn(_LW('show_metric (all) db error %s'), ex)
LOG.warning(_LW('show_metric (all) db error %s'), ex)
return
result = [api.format_watch_data(w) for w in wds]

View File

@ -88,8 +88,9 @@ class StackWatch(object):
wrs = watch_rule_object.WatchRule.get_all_by_stack(admin_context,
sid)
except Exception as ex:
LOG.warn(_LW('periodic_task db error watch rule removed? %(ex)s'),
ex)
LOG.warning(_LW('periodic_task db error watch rule'
' removed? %(ex)s'),
ex)
return
def run_alarm_action(stk, actions, details):

View File

@ -358,7 +358,7 @@ class Stack(collections.Mapping):
parameter.
"""
if not self.parameters.set_stack_id(self.identifier()):
LOG.warn(_LW("Unable to set parameters StackId identifier"))
LOG.warning(_LW("Unable to set parameters StackId identifier"))
@staticmethod
def get_dep_attrs(resources, outputs, resource_name):
@ -771,12 +771,12 @@ class Stack(collections.Mapping):
updated = self._persist_state()
if not updated:
# Possibly failed concurrent update
LOG.warn(_LW("Failed to set state of stack %(name)s with"
" traversal ID %(trvsl_id)s, to"
" %(action)s_%(status)s"),
{'name': self.name,
'trvsl_id': self.current_traversal,
'action': action, 'status': status})
LOG.warning(_LW("Failed to set state of stack %(name)s with"
" traversal ID %(trvsl_id)s, to"
" %(action)s_%(status)s"),
{'name': self.name,
'trvsl_id': self.current_traversal,
'action': action, 'status': status})
return updated
# Persist state to db only if status == IN_PROGRESS
@ -1086,10 +1086,10 @@ class Stack(collections.Mapping):
# we expect to update the stack having previous traversal ID
stack_id = self.store(exp_trvsl=previous_traversal)
if stack_id is None:
LOG.warn(_LW("Failed to store stack %(name)s with traversal ID"
" %(trvsl_id)s, aborting stack %(action)s"),
{'name': self.name, 'trvsl_id': previous_traversal,
'action': self.action})
LOG.warning(_LW("Failed to store stack %(name)s with traversal "
"ID %(trvsl_id)s, aborting stack %(action)s"),
{'name': self.name, 'trvsl_id': previous_traversal,
'action': self.action})
return
self._send_notification_and_add_event()
@ -1112,10 +1112,10 @@ class Stack(collections.Mapping):
stack_id = self.store()
if stack_id is None:
# Failed concurrent update
LOG.warn(_LW("Failed to store stack %(name)s with traversal ID"
" %(trvsl_id)s, aborting stack %(action)s"),
{'name': self.name, 'trvsl_id': self.current_traversal,
'action': self.action})
LOG.warning(_LW("Failed to store stack %(name)s with traversal "
"ID %(trvsl_id)s, aborting stack %(action)s"),
{'name': self.name, 'trvsl_id': self.current_traversal,
'action': self.action})
return
LOG.info(_LI('convergence_dependencies: %s'),
@ -1157,10 +1157,10 @@ class Stack(collections.Mapping):
stack_id = self.store()
if stack_id is None:
# Failed concurrent update
LOG.warn(_LW("Failed to store stack %(name)s with traversal ID"
" %(trvsl_id)s, not trigerring rollback."),
{'name': self.name,
'trvsl_id': self.current_traversal})
LOG.warning(_LW("Failed to store stack %(name)s with traversal"
" ID %(trvsl_id)s, not trigerring rollback."),
{'name': self.name,
'trvsl_id': self.current_traversal})
return
self.converge_stack(rollback_tmpl, action=self.ROLLBACK)
@ -1850,10 +1850,10 @@ class Stack(collections.Mapping):
stack_id = self.store()
if stack_id is None:
# Failed concurrent update
LOG.warn(_LW("Failed to store stack %(name)s with traversal ID"
" %(trvsl_id)s, aborting stack purge"),
{'name': self.name,
'trvsl_id': self.current_traversal})
LOG.warning(_LW("Failed to store stack %(name)s with traversal"
" ID %(trvsl_id)s, aborting stack purge"),
{'name': self.name,
'trvsl_id': self.current_traversal})
return
raw_template_object.RawTemplate.delete(self.context, prev_tmpl_id)

View File

@ -117,8 +117,8 @@ class StackLock(object):
result = stack_lock_object.StackLock.release(self.stack_id,
self.engine_id)
if result is True:
LOG.warn(_LW("Lock was already released on stack %s!"),
self.stack_id)
LOG.warning(_LW("Lock was already released on stack %s!"),
self.stack_id)
else:
LOG.debug("Engine %(engine)s released lock on stack "
"%(stack)s" % {'engine': self.engine_id,

View File

@ -85,8 +85,9 @@ class WatchRule(object):
watch = watch_rule_objects.WatchRule.get_by_name(context,
watch_name)
except Exception as ex:
LOG.warn(_LW('WatchRule.load (%(watch_name)s) db error '
'%(ex)s'), {'watch_name': watch_name, 'ex': ex})
LOG.warning(_LW('WatchRule.load (%(watch_name)s) db error '
'%(ex)s'), {'watch_name': watch_name,
'ex': ex})
if watch is None:
raise exception.EntityNotFound(entity='Watch Rule',
name=watch_name)
@ -272,8 +273,8 @@ class WatchRule(object):
for refid in self.rule[self.ACTION_MAP[new_state]]:
actions.append(stk.resource_by_refid(refid).signal)
else:
LOG.warn(_LW("Could not process watch state %s for stack"),
new_state)
LOG.warning(_LW("Could not process watch state %s for stack"),
new_state)
return actions
def _to_ceilometer(self, data):
@ -355,9 +356,9 @@ class WatchRule(object):
% {'self_state': self.state, 'name': self.name,
'state': state})
else:
LOG.warn(_LW("Unable to override state %(state)s for "
"watch %(name)s"), {'state': self.state,
'name': self.name})
LOG.warning(_LW("Unable to override state %(state)s for "
"watch %(name)s"), {'state': self.state,
'name': self.name})
return actions

View File

@ -39,27 +39,27 @@ class GenericResource(resource.Resource):
return True
def handle_create(self):
LOG.warn(_LW('Creating generic resource (Type "%s")'),
self.type())
LOG.warning(_LW('Creating generic resource (Type "%s")'),
self.type())
def handle_update(self, json_snippet, tmpl_diff, prop_diff):
LOG.warn(_LW('Updating generic resource (Type "%s")'),
self.type())
LOG.warning(_LW('Updating generic resource (Type "%s")'),
self.type())
def handle_delete(self):
LOG.warn(_LW('Deleting generic resource (Type "%s")'),
self.type())
LOG.warning(_LW('Deleting generic resource (Type "%s")'),
self.type())
def _resolve_attribute(self, name):
return self.name
def handle_suspend(self):
LOG.warn(_LW('Suspending generic resource (Type "%s")'),
self.type())
LOG.warning(_LW('Suspending generic resource (Type "%s")'),
self.type())
def handle_resume(self):
LOG.warn(_LW('Resuming generic resource (Type "%s")'),
self.type())
LOG.warning(_LW('Resuming generic resource (Type "%s")'),
self.type())
class ResWithShowAttr(GenericResource):
@ -189,8 +189,8 @@ class SignalResource(signal_responder.SignalResponder):
self.resource_id_set(self._get_user_id())
def handle_signal(self, details=None):
LOG.warn(_LW('Signaled resource (Type "%(type)s") %(details)s'),
{'type': self.type(), 'details': details})
LOG.warning(_LW('Signaled resource (Type "%(type)s") %(details)s'),
{'type': self.type(), 'details': details})
def _resolve_attribute(self, name):
if self.resource_id is not None:

View File

@ -631,16 +631,16 @@ outputs:
md = self.client.resources.metadata(stack_identifier, 'custom_lb')
actual_md = len(md['IPs'].split(','))
if actual_md != expected:
LOG.warn('check_instance_count exp:%d, meta:%s' % (expected,
md['IPs']))
LOG.warning('check_instance_count exp:%d, meta:%s' % (expected,
md['IPs']))
return False
stack = self.client.stacks.get(stack_identifier)
inst_list = self._stack_output(stack, 'InstanceList')
actual = len(inst_list.split(','))
if actual != expected:
LOG.warn('check_instance_count exp:%d, act:%s' % (expected,
inst_list))
LOG.warning('check_instance_count exp:%d, act:%s' % (expected,
inst_list))
return actual == expected
def test_scaling_meta_update(self):

View File

@ -95,7 +95,7 @@ Outputs:
if key_header not in oc.head_account():
swift_key = hashlib.sha224(
str(random.getrandbits(256))).hexdigest()[:32]
LOG.warn('setting swift key to %s' % swift_key)
LOG.warning('setting swift key to %s' % swift_key)
oc.post_account({key_header: swift_key})
key = oc.head_account()[key_header]
path = '/v1/AUTH_%s/%s/%s' % (self.project_id,

View File

@ -30,8 +30,8 @@ class CeilometerAlarmTest(scenario_base.ScenarioTestsBase):
stack = self.client.stacks.get(stack_identifier)
actual = self._stack_output(stack, 'asg_size')
if actual != expected:
LOG.warn('check_instance_count exp:%d, act:%s' % (expected,
actual))
LOG.warning('check_instance_count exp:%d, act:%s' % (expected,
actual))
return actual == expected
def test_alarm(self):