Merge "Replace LOG.warn with LOG.warning"
This commit is contained in:
commit
7c6df48455
@ -116,7 +116,7 @@ class CollectorService(os_service.Service):
|
||||
try:
|
||||
sample = msgpack.loads(data, encoding='utf-8')
|
||||
except Exception:
|
||||
LOG.warn(_("UDP: Cannot decode data sent by %s"), source)
|
||||
LOG.warning(_("UDP: Cannot decode data sent by %s"), source)
|
||||
else:
|
||||
try:
|
||||
LOG.debug("UDP: Storing %s", sample)
|
||||
|
@ -213,7 +213,7 @@ class GnocchiDispatcher(dispatcher.MeterDispatcherBase):
|
||||
except ImportError:
|
||||
pass
|
||||
except oslo_cache.exception.ConfigurationError as exc:
|
||||
LOG.warn(_LW('unable to configure oslo_cache: %s') % exc)
|
||||
LOG.warning(_LW('unable to configure oslo_cache: %s') % exc)
|
||||
|
||||
self._gnocchi_project_id = None
|
||||
self._gnocchi_project_id_lock = threading.Lock()
|
||||
@ -312,8 +312,8 @@ class GnocchiDispatcher(dispatcher.MeterDispatcherBase):
|
||||
samples = list(samples)
|
||||
rd = self._get_resource_definition(metric_name)
|
||||
if rd is None:
|
||||
LOG.warn("metric %s is not handled by gnocchi" %
|
||||
metric_name)
|
||||
LOG.warning("metric %s is not handled by gnocchi" %
|
||||
metric_name)
|
||||
continue
|
||||
if rd.cfg.get("ignore"):
|
||||
continue
|
||||
|
@ -151,7 +151,7 @@ class SensorNotification(plugin_base.NotificationBase):
|
||||
project_id=info['project_id'])
|
||||
|
||||
except InvalidSensorData as exc:
|
||||
LOG.warn(
|
||||
LOG.warning(
|
||||
'invalid sensor data for %(resource)s: %(error)s' %
|
||||
dict(resource=resource_id, error=exc)
|
||||
)
|
||||
|
@ -116,10 +116,10 @@ def register_keystoneauth_opts(conf):
|
||||
|
||||
def setup_keystoneauth(conf):
|
||||
if conf[CFG_GROUP].auth_type == "password-ceilometer-legacy":
|
||||
LOG.warn("Value 'password-ceilometer-legacy' for '[%s]/auth_type' "
|
||||
"is deprecated. And will be removed in Ceilometer 7.0. "
|
||||
"Use 'password' instead.",
|
||||
CFG_GROUP)
|
||||
LOG.warning("Value 'password-ceilometer-legacy' for '[%s]/auth_type' "
|
||||
"is deprecated. And will be removed in Ceilometer 7.0. "
|
||||
"Use 'password' instead.",
|
||||
CFG_GROUP)
|
||||
|
||||
ka_loading.load_auth_from_conf_options(conf, CFG_GROUP)
|
||||
|
||||
|
@ -45,9 +45,9 @@ class FirewallPollster(base.BaseServicesPollster):
|
||||
status = self.get_status_id(fw['status'])
|
||||
if status == -1:
|
||||
# unknown status, skip this sample
|
||||
LOG.warn(_("Unknown status %(stat)s received on fw %(id)s,"
|
||||
"skipping sample") % {'stat': fw['status'],
|
||||
'id': fw['id']})
|
||||
LOG.warning(_("Unknown status %(stat)s received on fw %(id)s,"
|
||||
"skipping sample") % {'stat': fw['status'],
|
||||
'id': fw['id']})
|
||||
continue
|
||||
|
||||
yield sample.Sample(
|
||||
|
@ -60,9 +60,9 @@ class LBPoolPollster(base.BaseServicesPollster):
|
||||
status = self.get_status_id(pool['status'])
|
||||
if status == -1:
|
||||
# unknown status, skip this sample
|
||||
LOG.warn(_("Unknown status %(stat)s received on pool %(id)s, "
|
||||
"skipping sample") % {'stat': pool['status'],
|
||||
'id': pool['id']})
|
||||
LOG.warning(_("Unknown status %(stat)s received on pool "
|
||||
"%(id)s, skipping sample")
|
||||
% {'stat': pool['status'], 'id': pool['id']})
|
||||
continue
|
||||
|
||||
yield sample.Sample(
|
||||
@ -108,9 +108,9 @@ class LBVipPollster(base.BaseServicesPollster):
|
||||
status = self.get_status_id(vip['status'])
|
||||
if status == -1:
|
||||
# unknown status, skip this sample
|
||||
LOG.warn(_("Unknown status %(stat)s received on vip %(id)s, "
|
||||
"skipping sample") % {'stat': vip['status'],
|
||||
'id': vip['id']})
|
||||
LOG.warning(_("Unknown status %(stat)s received on vip "
|
||||
"%(id)s, skipping sample")
|
||||
% {'stat': vip['status'], 'id': vip['id']})
|
||||
continue
|
||||
|
||||
yield sample.Sample(
|
||||
@ -149,9 +149,9 @@ class LBMemberPollster(base.BaseServicesPollster):
|
||||
LOG.debug("Load Balancer Member : %s" % member)
|
||||
status = self.get_status_id(member['status'])
|
||||
if status == -1:
|
||||
LOG.warn(_("Unknown status %(stat)s received on member %(id)s,"
|
||||
"skipping sample") % {'stat': member['status'],
|
||||
'id': member['id']})
|
||||
LOG.warning(_("Unknown status %(stat)s received on member "
|
||||
"%(id)s, skipping sample")
|
||||
% {'stat': member['status'], 'id': member['id']})
|
||||
continue
|
||||
yield sample.Sample(
|
||||
name='network.services.lb.member',
|
||||
|
@ -46,9 +46,9 @@ class VPNServicesPollster(base.BaseServicesPollster):
|
||||
status = self.get_status_id(vpn['status'])
|
||||
if status == -1:
|
||||
# unknown status, skip this sample
|
||||
LOG.warn(_("Unknown status %(stat)s received on vpn %(id)s,"
|
||||
"skipping sample") % {'stat': vpn['status'],
|
||||
'id': vpn['id']})
|
||||
LOG.warning(_("Unknown status %(stat)s received on vpn "
|
||||
"%(id)s, skipping sample")
|
||||
% {'stat': vpn['status'], 'id': vpn['id']})
|
||||
continue
|
||||
|
||||
yield sample.Sample(
|
||||
|
@ -41,9 +41,9 @@ def logged(func):
|
||||
return func(*args, **kwargs)
|
||||
except exceptions.NeutronClientException as e:
|
||||
if e.status_code == 404:
|
||||
LOG.warn("The resource could not be found.")
|
||||
LOG.warning("The resource could not be found.")
|
||||
else:
|
||||
LOG.warn(e)
|
||||
LOG.warning(e)
|
||||
return []
|
||||
except Exception as e:
|
||||
LOG.exception(e)
|
||||
|
@ -92,8 +92,8 @@ class MessagingPublisher(publisher.PublisherBase):
|
||||
if self.policy in ['default', 'queue', 'drop']:
|
||||
LOG.info(_LI('Publishing policy set to %s') % self.policy)
|
||||
else:
|
||||
LOG.warn(_('Publishing policy is unknown (%s) force to default')
|
||||
% self.policy)
|
||||
LOG.warning(_('Publishing policy is unknown (%s) force to '
|
||||
'default') % self.policy)
|
||||
self.policy = 'default'
|
||||
|
||||
self.retry = 1 if self.policy in ['queue', 'drop'] else None
|
||||
@ -144,8 +144,8 @@ class MessagingPublisher(publisher.PublisherBase):
|
||||
if queue_length > self.max_queue_length > 0:
|
||||
count = queue_length - self.max_queue_length
|
||||
self.local_queue = self.local_queue[count:]
|
||||
LOG.warn(_("Publisher max local_queue length is exceeded, "
|
||||
"dropping %d oldest samples") % count)
|
||||
LOG.warning(_("Publisher max local_queue length is exceeded, "
|
||||
"dropping %d oldest samples") % count)
|
||||
|
||||
def _process_queue(self, queue, policy):
|
||||
current_retry = 0
|
||||
@ -156,12 +156,12 @@ class MessagingPublisher(publisher.PublisherBase):
|
||||
except DeliveryFailure:
|
||||
data = sum([len(m) for __, __, m in queue])
|
||||
if policy == 'queue':
|
||||
LOG.warn(_("Failed to publish %d datapoints, queue them"),
|
||||
data)
|
||||
LOG.warning(_("Failed to publish %d datapoints, queue "
|
||||
"them"), data)
|
||||
return queue
|
||||
elif policy == 'drop':
|
||||
LOG.warn(_("Failed to publish %d datapoints, "
|
||||
"dropping them"), data)
|
||||
LOG.warning(_("Failed to publish %d datapoints, "
|
||||
"dropping them"), data)
|
||||
return []
|
||||
current_retry += 1
|
||||
if current_retry >= self.max_retry:
|
||||
|
@ -64,7 +64,7 @@ class UDPPublisher(publisher.PublisherBase):
|
||||
self.socket.sendto(msgpack.dumps(msg),
|
||||
(self.host, self.port))
|
||||
except Exception as e:
|
||||
LOG.warn(_("Unable to send sample over UDP"))
|
||||
LOG.warning(_("Unable to send sample over UDP"))
|
||||
LOG.exception(e)
|
||||
|
||||
def publish_events(self, context, events):
|
||||
|
@ -471,9 +471,9 @@ def create_tables(conn, tables, column_families):
|
||||
separator=conn.table_prefix_separator,
|
||||
table_name=table))
|
||||
|
||||
LOG.warn(_("Cannot create table %(table_name)s "
|
||||
"it already exists. Ignoring error")
|
||||
% {'table_name': table})
|
||||
LOG.warning(_("Cannot create table %(table_name)s "
|
||||
"it already exists. Ignoring error")
|
||||
% {'table_name': table})
|
||||
|
||||
|
||||
def quote(s, *args):
|
||||
|
@ -307,8 +307,9 @@ class Connection(base.Connection):
|
||||
{'id': internal_id, 'meta_key': key,
|
||||
'value': v})
|
||||
except KeyError:
|
||||
LOG.warn(_("Unknown metadata type. Key (%s) "
|
||||
"will not be queryable."), key)
|
||||
LOG.warning(_("Unknown metadata type. Key "
|
||||
"(%s) will not be queryable."),
|
||||
key)
|
||||
for _model in meta_map.keys():
|
||||
conn.execute(_model.__table__.insert(),
|
||||
meta_map[_model])
|
||||
|
@ -266,8 +266,8 @@ class ConnectionPool(object):
|
||||
try:
|
||||
return MongoProxy(pymongo.MongoClient(url))
|
||||
except pymongo.errors.ConnectionFailure as e:
|
||||
LOG.warn(_('Unable to connect to the database server: '
|
||||
'%(errmsg)s.') % {'errmsg': e})
|
||||
LOG.warning(_('Unable to connect to the database server: '
|
||||
'%(errmsg)s.') % {'errmsg': e})
|
||||
raise
|
||||
|
||||
|
||||
@ -412,10 +412,10 @@ def safe_mongo_call(call):
|
||||
'after %(retries)d retries. Giving up.') %
|
||||
{'retries': max_retries})
|
||||
raise
|
||||
LOG.warn(_('Unable to reconnect to the primary mongodb: '
|
||||
'%(errmsg)s. Trying again in %(retry_interval)d '
|
||||
'seconds.') %
|
||||
{'errmsg': err, 'retry_interval': retry_interval})
|
||||
LOG.warning(_('Unable to reconnect to the primary '
|
||||
'mongodb: %(errmsg)s. Trying again in '
|
||||
'%(retry_interval)d seconds.') %
|
||||
{'errmsg': err, 'retry_interval': retry_interval})
|
||||
attempts += 1
|
||||
time.sleep(retry_interval)
|
||||
return closure
|
||||
|
@ -1157,7 +1157,7 @@ class BasePipelineTestCase(base.BaseTestCase):
|
||||
|
||||
cpu_util_sample = publisher.samples[0]
|
||||
self.assertEqual(12.5, cpu_util_sample.volume)
|
||||
the_log.warn.assert_called_with(
|
||||
the_log.warning.assert_called_with(
|
||||
'dropping out of time order sample: %s',
|
||||
(counters[1],)
|
||||
)
|
||||
@ -1523,7 +1523,7 @@ class BasePipelineTestCase(base.BaseTestCase):
|
||||
'target': {'name': 'aggregated-bytes'}
|
||||
}, expected_length=1)
|
||||
s = samples[0]
|
||||
self.assertTrue(mylog.warn.called)
|
||||
self.assertTrue(mylog.warning.called)
|
||||
self.assertEqual('aggregated-bytes', s.name)
|
||||
self.assertEqual(154, s.volume)
|
||||
self.assertEqual('test_user_bis', s.user_id)
|
||||
|
@ -150,7 +150,7 @@ class TestNotifications(base.BaseTestCase):
|
||||
processor = ipmi.TemperatureSensorNotification(None)
|
||||
|
||||
messages = []
|
||||
mylog.warn = lambda *args: messages.extend(args)
|
||||
mylog.warning = lambda *args: messages.extend(args)
|
||||
|
||||
list(processor.process_notification(ipmi_test_data.MISSING_SENSOR))
|
||||
|
||||
@ -166,7 +166,7 @@ class TestNotifications(base.BaseTestCase):
|
||||
processor = ipmi.TemperatureSensorNotification(None)
|
||||
|
||||
messages = []
|
||||
mylog.warn = lambda *args: messages.extend(args)
|
||||
mylog.warning = lambda *args: messages.extend(args)
|
||||
|
||||
list(processor.process_notification(ipmi_test_data.BAD_SENSOR))
|
||||
|
||||
@ -187,7 +187,7 @@ class TestNotifications(base.BaseTestCase):
|
||||
processor = ipmi.TemperatureSensorNotification(None)
|
||||
|
||||
messages = []
|
||||
mylog.warn = lambda *args: messages.extend(args)
|
||||
mylog.warning = lambda *args: messages.extend(args)
|
||||
|
||||
list(processor.process_notification(ipmi_test_data.NO_NODE_ID))
|
||||
|
||||
@ -203,7 +203,7 @@ class TestNotifications(base.BaseTestCase):
|
||||
processor = ipmi.TemperatureSensorNotification(None)
|
||||
|
||||
messages = []
|
||||
mylog.warn = lambda *args: messages.extend(args)
|
||||
mylog.warning = lambda *args: messages.extend(args)
|
||||
|
||||
list(processor.process_notification(ipmi_test_data.NO_SENSOR_ID))
|
||||
|
||||
|
@ -185,7 +185,7 @@ class TestPublisherPolicy(TestPublisher):
|
||||
msg_publisher.DeliveryFailure,
|
||||
getattr(publisher, self.pub_func),
|
||||
mock.MagicMock(), self.test_data)
|
||||
self.assertTrue(mylog.warn.called)
|
||||
self.assertTrue(mylog.warning.called)
|
||||
self.assertEqual('default', publisher.policy)
|
||||
self.assertEqual(0, len(publisher.local_queue))
|
||||
fake_send.assert_called_once_with(
|
||||
|
@ -54,8 +54,8 @@ class ArithmeticTransformer(transformer.TransformerBase):
|
||||
self.cache = collections.defaultdict(dict)
|
||||
self.latest_timestamp = None
|
||||
else:
|
||||
LOG.warn(_('Arithmetic transformer must use at least one'
|
||||
' meter in expression \'%s\''), self.expr)
|
||||
LOG.warning(_('Arithmetic transformer must use at least one'
|
||||
' meter in expression \'%s\''), self.expr)
|
||||
|
||||
def _update_cache(self, _sample):
|
||||
"""Update the cache with the latest sample."""
|
||||
@ -92,8 +92,8 @@ class ArithmeticTransformer(transformer.TransformerBase):
|
||||
resource_metadata=reference_sample.resource_metadata
|
||||
)
|
||||
except Exception as e:
|
||||
LOG.warn(_('Unable to evaluate expression %(expr)s: %(exc)s'),
|
||||
{'expr': self.expr, 'exc': e})
|
||||
LOG.warning(_('Unable to evaluate expression %(expr)s: %(exc)s'),
|
||||
{'expr': self.expr, 'exc': e})
|
||||
|
||||
def handle_sample(self, context, _sample):
|
||||
self._update_cache(_sample)
|
||||
|
@ -195,7 +195,7 @@ class RateOfChangeTransformer(ScalingTransformer):
|
||||
time_delta = timeutils.delta_seconds(prev_timestamp, timestamp)
|
||||
# disallow violations of the arrow of time
|
||||
if time_delta < 0:
|
||||
LOG.warn(_('dropping out of time order sample: %s'), (s,))
|
||||
LOG.warning(_('dropping out of time order sample: %s'), (s,))
|
||||
# Reset the cache to the newer sample.
|
||||
self.cache[key] = prev
|
||||
return None
|
||||
@ -213,8 +213,8 @@ class RateOfChangeTransformer(ScalingTransformer):
|
||||
s = self._convert(s, rate_of_change)
|
||||
LOG.debug('converted to: %s', s)
|
||||
else:
|
||||
LOG.warn(_('dropping sample with no predecessor: %s'),
|
||||
(s,))
|
||||
LOG.warning(_('dropping sample with no predecessor: %s'),
|
||||
(s,))
|
||||
s = None
|
||||
return s
|
||||
|
||||
@ -262,7 +262,7 @@ class AggregatorTransformer(ScalingTransformer):
|
||||
drop = ['drop'] if is_droppable else []
|
||||
if value or mandatory:
|
||||
if value not in ['last', 'first'] + drop:
|
||||
LOG.warn('%s is unknown (%s), using last' % (name, value))
|
||||
LOG.warning('%s is unknown (%s), using last' % (name, value))
|
||||
value = 'last'
|
||||
self.merged_attribute_policy[name] = value
|
||||
else:
|
||||
|
Loading…
x
Reference in New Issue
Block a user