Change-Id: I181516dae9243c41849a4b5399e9ef1e4a2fbaa9
Signed-off-by: Stephen Finucane <sfinucan@redhat.com>
This commit is contained in:
Stephen Finucane
2025-09-30 17:19:54 +01:00
parent df331c63b7
commit a1279f7b98
32 changed files with 1362 additions and 966 deletions

View File

@@ -12,18 +12,14 @@ repos:
- id: debug-statements
- id: check-yaml
files: .*\.(yaml|yml)$
- repo: https://github.com/astral-sh/ruff-pre-commit
rev: v0.14.7
hooks:
- id: ruff-check
args: ['--fix', '--unsafe-fixes']
- id: ruff-format
- repo: https://opendev.org/openstack/hacking
rev: 7.0.0
rev: 8.0.0
hooks:
- id: hacking
additional_dependencies: []
- repo: https://github.com/PyCQA/bandit
rev: 1.8.6
hooks:
- id: bandit
args: ['-c', 'pyproject.toml']
- repo: https://github.com/asottile/pyupgrade
rev: v3.20.0
hooks:
- id: pyupgrade
args: [--py310-plus]

View File

@@ -14,9 +14,9 @@
# under the License.
"""A demonstration of oslo.i18n integration module that is used
in projects wanting to implement Oslo i18n translation.
in projects wanting to implement Oslo i18n translation.
See https://docs.openstack.org/oslo.i18n/latest/user/index.html
See https://docs.openstack.org/oslo.i18n/latest/user/index.html
"""
import oslo_i18n

View File

@@ -59,14 +59,12 @@ def prepare():
# oslo_log._options.log_opts[0].default
#
extra_log_level_defaults = [
'dogpile=INFO',
'routes=INFO'
]
extra_log_level_defaults = ['dogpile=INFO', 'routes=INFO']
logging.set_defaults(
default_log_levels=logging.get_default_log_levels() +
extra_log_level_defaults)
default_log_levels=logging.get_default_log_levels()
+ extra_log_level_defaults
)
# Required setup based on configuration and domain
logging.setup(CONF, DOMAIN)

View File

@@ -61,14 +61,12 @@ def prepare():
# oslo_log._options.log_opts[0].default
#
extra_log_level_defaults = [
'dogpile=INFO',
'routes=INFO'
]
extra_log_level_defaults = ['dogpile=INFO', 'routes=INFO']
logging.set_defaults(
default_log_levels=logging.get_default_log_levels() +
extra_log_level_defaults)
default_log_levels=logging.get_default_log_levels()
+ extra_log_level_defaults
)
# Required setup based on configuration and domain
logging.setup(CONF, DOMAIN)
@@ -79,7 +77,7 @@ if __name__ == '__main__':
LOG.info("Welcome to Oslo Logging")
LOG.info("Without context")
context.RequestContext(user='6ce90b4d',
project='d6134462',
domain='a6b9360e')
context.RequestContext(
user='6ce90b4d', project='d6134462', domain='a6b9360e'
)
LOG.info("With context")

View File

@@ -70,8 +70,8 @@ def prepare():
custom_log_level_defaults = logging.get_default_log_levels() + [
'dogpile=INFO',
'routes=INFO'
]
'routes=INFO',
]
logging.set_defaults(default_log_levels=custom_log_level_defaults)

View File

@@ -50,7 +50,7 @@ change default logging levels if necessary.
.. literalinclude:: examples/usage.py
:linenos:
:lines: 51-53,61-69
:lines: 51-53,61-67
:emphasize-lines: 10
Call :func:`~oslo_log.log.setup` with the oslo.config CONF object used
@@ -59,7 +59,7 @@ to configure logging for the application.
.. literalinclude:: examples/usage.py
:linenos:
:lines: 34,36-37,70-72
:lines: 33-34,36-37,70-72
:emphasize-lines: 6
Source: :ref:`examples/usage.py <example_usage.py>`
@@ -72,7 +72,7 @@ log levels.
.. literalinclude:: examples/usage.py
:linenos:
:lines: 77-83
:lines: 77-84
**Example Logging Output:**
@@ -103,7 +103,7 @@ additional contextual information applicable for your application.
.. literalinclude:: examples/usage_context.py
:linenos:
:lines: 80-85
:lines: 78-82
:emphasize-lines: 3-5
**Example Logging Output:**

View File

@@ -16,189 +16,242 @@ from oslo_log import versionutils
_DEFAULT_LOG_DATE_FORMAT = "%Y-%m-%d %H:%M:%S"
DEFAULT_LOG_LEVELS = ['amqp=WARN', 'boto=WARN', 'sqlalchemy=WARN', 'suds=INFO',
'oslo.messaging=INFO', 'oslo_messaging=INFO',
'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN',
'urllib3.connectionpool=WARN', 'websocket=WARN',
'requests.packages.urllib3.util.retry=WARN',
'urllib3.util.retry=WARN',
'keystonemiddleware=WARN', 'routes.middleware=WARN',
'stevedore=WARN', 'taskflow=WARN',
'keystoneauth=WARN', 'oslo.cache=INFO',
'oslo_policy=INFO',
'dogpile.core.dogpile=INFO']
DEFAULT_LOG_LEVELS = [
'amqp=WARN',
'boto=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'oslo.messaging=INFO',
'oslo_messaging=INFO',
'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN',
'urllib3.connectionpool=WARN',
'websocket=WARN',
'requests.packages.urllib3.util.retry=WARN',
'urllib3.util.retry=WARN',
'keystonemiddleware=WARN',
'routes.middleware=WARN',
'stevedore=WARN',
'taskflow=WARN',
'keystoneauth=WARN',
'oslo.cache=INFO',
'oslo_policy=INFO',
'dogpile.core.dogpile=INFO',
]
_IGNORE_MESSAGE = "This option is ignored if log_config_append is set."
common_cli_opts = [
cfg.BoolOpt('debug',
short='d',
default=False,
mutable=True,
help='If set to true, the logging level will be set to '
'DEBUG instead of the default INFO level.'),
cfg.BoolOpt(
'debug',
short='d',
default=False,
mutable=True,
help='If set to true, the logging level will be set to '
'DEBUG instead of the default INFO level.',
),
]
logging_cli_opts = [
cfg.StrOpt('log-config-append',
metavar='PATH',
deprecated_name='log-config',
mutable=True,
help='The name of a logging configuration file. This file '
'is appended to any existing logging configuration '
'files. For details about logging configuration files, '
'see the Python logging module documentation. Note that '
'when logging configuration files are used then all '
'logging configuration is set in the configuration file '
'and other logging configuration options are ignored '
'(for example, log-date-format).'),
cfg.StrOpt('log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Defines the format string for %%(asctime)s in log '
'records. Default: %(default)s . '
+ _IGNORE_MESSAGE),
cfg.StrOpt('log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to send logging output to. '
'If no default is set, logging will go to stderr as '
'defined by use_stderr. '
+ _IGNORE_MESSAGE),
cfg.StrOpt('log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative log_file '
' paths. '
+ _IGNORE_MESSAGE),
cfg.BoolOpt('use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED '
'and will be changed later to honor RFC5424. '
+ _IGNORE_MESSAGE),
cfg.BoolOpt('use-journal',
default=False,
help='Enable journald for logging. '
'If running in a systemd environment you may wish '
'to enable journal support. Doing so will use the '
'journal native protocol which includes structured '
'metadata in addition to log messages.'
+ _IGNORE_MESSAGE),
cfg.StrOpt('syslog-log-facility',
default='LOG_USER',
help='Syslog facility to receive log lines. '
+ _IGNORE_MESSAGE),
cfg.BoolOpt('use-json',
default=False,
help='Use JSON formatting for logging. '
+ _IGNORE_MESSAGE),
cfg.StrOpt(
'log-config-append',
metavar='PATH',
deprecated_name='log-config',
mutable=True,
help='The name of a logging configuration file. This file '
'is appended to any existing logging configuration '
'files. For details about logging configuration files, '
'see the Python logging module documentation. Note that '
'when logging configuration files are used then all '
'logging configuration is set in the configuration file '
'and other logging configuration options are ignored '
'(for example, log-date-format).',
),
cfg.StrOpt(
'log-date-format',
default=_DEFAULT_LOG_DATE_FORMAT,
metavar='DATE_FORMAT',
help='Defines the format string for %%(asctime)s in log '
'records. Default: %(default)s . ' + _IGNORE_MESSAGE,
),
cfg.StrOpt(
'log-file',
metavar='PATH',
deprecated_name='logfile',
help='(Optional) Name of log file to send logging output to. '
'If no default is set, logging will go to stderr as '
'defined by use_stderr. ' + _IGNORE_MESSAGE,
),
cfg.StrOpt(
'log-dir',
deprecated_name='logdir',
help='(Optional) The base directory used for relative log_file '
' paths. ' + _IGNORE_MESSAGE,
),
cfg.BoolOpt(
'use-syslog',
default=False,
help='Use syslog for logging. '
'Existing syslog format is DEPRECATED '
'and will be changed later to honor RFC5424. ' + _IGNORE_MESSAGE,
),
cfg.BoolOpt(
'use-journal',
default=False,
help='Enable journald for logging. '
'If running in a systemd environment you may wish '
'to enable journal support. Doing so will use the '
'journal native protocol which includes structured '
'metadata in addition to log messages.' + _IGNORE_MESSAGE,
),
cfg.StrOpt(
'syslog-log-facility',
default='LOG_USER',
help='Syslog facility to receive log lines. ' + _IGNORE_MESSAGE,
),
cfg.BoolOpt(
'use-json',
default=False,
help='Use JSON formatting for logging. ' + _IGNORE_MESSAGE,
),
]
generic_log_opts = [
cfg.BoolOpt('use_stderr',
default=False,
help='Log output to standard error. '
+ _IGNORE_MESSAGE),
cfg.BoolOpt('log_color',
default=False,
help='(Optional) Set the \'color\' key according to log '
'levels. This option takes effect only when logging '
'to stderr or stdout is used. ' + _IGNORE_MESSAGE),
cfg.IntOpt('log_rotate_interval',
default=1,
help='The amount of time before the log files are rotated. '
'This option is ignored unless log_rotation_type is set '
'to "interval".'),
cfg.StrOpt('log_rotate_interval_type',
choices=['Seconds', 'Minutes', 'Hours', 'Days', 'Weekday',
'Midnight'],
ignore_case=True,
default='days',
help='Rotation interval type. The time of the last file '
'change (or the time when the service was started) is '
'used when scheduling the next rotation.'),
cfg.IntOpt('max_logfile_count',
default=30,
help='Maximum number of rotated log files.'),
cfg.IntOpt('max_logfile_size_mb',
default=200,
help='Log file maximum size in MB. This option is ignored if '
'"log_rotation_type" is not set to "size".'),
cfg.StrOpt('log_rotation_type',
default='none',
choices=[('interval',
'Rotate logs at predefined time intervals.'),
('size',
'Rotate logs once they reach a predefined size.'),
('none', 'Do not rotate log files.')],
ignore_case=True,
help='Log rotation type.')
cfg.BoolOpt(
'use_stderr',
default=False,
help='Log output to standard error. ' + _IGNORE_MESSAGE,
),
cfg.BoolOpt(
'log_color',
default=False,
help='(Optional) Set the \'color\' key according to log '
'levels. This option takes effect only when logging '
'to stderr or stdout is used. ' + _IGNORE_MESSAGE,
),
cfg.IntOpt(
'log_rotate_interval',
default=1,
help='The amount of time before the log files are rotated. '
'This option is ignored unless log_rotation_type is set '
'to "interval".',
),
cfg.StrOpt(
'log_rotate_interval_type',
choices=['Seconds', 'Minutes', 'Hours', 'Days', 'Weekday', 'Midnight'],
ignore_case=True,
default='days',
help='Rotation interval type. The time of the last file '
'change (or the time when the service was started) is '
'used when scheduling the next rotation.',
),
cfg.IntOpt(
'max_logfile_count',
default=30,
help='Maximum number of rotated log files.',
),
cfg.IntOpt(
'max_logfile_size_mb',
default=200,
help='Log file maximum size in MB. This option is ignored if '
'"log_rotation_type" is not set to "size".',
),
cfg.StrOpt(
'log_rotation_type',
default='none',
choices=[
('interval', 'Rotate logs at predefined time intervals.'),
('size', 'Rotate logs once they reach a predefined size.'),
('none', 'Do not rotate log files.'),
],
ignore_case=True,
help='Log rotation type.',
),
]
log_opts = [
cfg.StrOpt('logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(global_request_id)s %(request_id)s '
'%(user_identity)s] %(instance)s%(message)s',
help='Format string to use for log messages with context. '
'Used by oslo_log.formatters.ContextFormatter'),
cfg.StrOpt('logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='Format string to use for log messages when context is '
'undefined. '
'Used by oslo_log.formatters.ContextFormatter'),
cfg.StrOpt('logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='Additional data to append to log message when logging '
'level for the message is DEBUG. '
'Used by oslo_log.formatters.ContextFormatter'),
cfg.StrOpt('logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d ERROR %(name)s '
'%(instance)s',
help='Prefix each line of exception output with this format. '
'Used by oslo_log.formatters.ContextFormatter'),
cfg.StrOpt('logging_user_identity_format',
default='%(user)s %(project)s %(domain)s %(system_scope)s '
'%(user_domain)s %(project_domain)s',
help='Defines the format string for %(user_identity)s that '
'is used in logging_context_format_string. '
'Used by oslo_log.formatters.ContextFormatter'),
cfg.ListOpt('default_log_levels',
default=DEFAULT_LOG_LEVELS,
help='List of package logging levels in logger=LEVEL pairs. '
+ _IGNORE_MESSAGE),
cfg.BoolOpt('publish_errors',
default=False,
help='Enables or disables publication of error events.'),
cfg.StrOpt(
'logging_context_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [%(global_request_id)s %(request_id)s '
'%(user_identity)s] %(instance)s%(message)s',
help='Format string to use for log messages with context. '
'Used by oslo_log.formatters.ContextFormatter',
),
cfg.StrOpt(
'logging_default_format_string',
default='%(asctime)s.%(msecs)03d %(process)d %(levelname)s '
'%(name)s [-] %(instance)s%(message)s',
help='Format string to use for log messages when context is '
'undefined. '
'Used by oslo_log.formatters.ContextFormatter',
),
cfg.StrOpt(
'logging_debug_format_suffix',
default='%(funcName)s %(pathname)s:%(lineno)d',
help='Additional data to append to log message when logging '
'level for the message is DEBUG. '
'Used by oslo_log.formatters.ContextFormatter',
),
cfg.StrOpt(
'logging_exception_prefix',
default='%(asctime)s.%(msecs)03d %(process)d ERROR %(name)s '
'%(instance)s',
help='Prefix each line of exception output with this format. '
'Used by oslo_log.formatters.ContextFormatter',
),
cfg.StrOpt(
'logging_user_identity_format',
default='%(user)s %(project)s %(domain)s %(system_scope)s '
'%(user_domain)s %(project_domain)s',
help='Defines the format string for %(user_identity)s that '
'is used in logging_context_format_string. '
'Used by oslo_log.formatters.ContextFormatter',
),
cfg.ListOpt(
'default_log_levels',
default=DEFAULT_LOG_LEVELS,
help='List of package logging levels in logger=LEVEL pairs. '
+ _IGNORE_MESSAGE,
),
cfg.BoolOpt(
'publish_errors',
default=False,
help='Enables or disables publication of error events.',
),
# NOTE(mikal): there are two options here because sometimes we are handed
# a full instance (and could include more information), and other times we
# are just handed a UUID for the instance.
cfg.StrOpt('instance_format',
default='[instance: %(uuid)s] ',
help='The format for an instance that is passed with the log '
'message.'),
cfg.StrOpt('instance_uuid_format',
default='[instance: %(uuid)s] ',
help='The format for an instance UUID that is passed with the '
'log message.'),
cfg.IntOpt('rate_limit_interval',
default=0,
help='Interval, number of seconds, of log rate limiting.'),
cfg.IntOpt('rate_limit_burst',
default=0,
help='Maximum number of logged messages per '
'rate_limit_interval.'),
cfg.StrOpt('rate_limit_except_level',
default='CRITICAL',
choices=['CRITICAL', 'ERROR', 'INFO', 'WARNING', 'DEBUG', ''],
help='Log level name used by rate limiting. Logs with level '
'greater or equal to rate_limit_except_level are not '
'filtered. An empty string means that all levels are '
'filtered.'),
cfg.StrOpt(
'instance_format',
default='[instance: %(uuid)s] ',
help='The format for an instance that is passed with the log message.',
),
cfg.StrOpt(
'instance_uuid_format',
default='[instance: %(uuid)s] ',
help='The format for an instance UUID that is passed with the '
'log message.',
),
cfg.IntOpt(
'rate_limit_interval',
default=0,
help='Interval, number of seconds, of log rate limiting.',
),
cfg.IntOpt(
'rate_limit_burst',
default=0,
help='Maximum number of logged messages per rate_limit_interval.',
),
cfg.StrOpt(
'rate_limit_except_level',
default='CRITICAL',
choices=['CRITICAL', 'ERROR', 'INFO', 'WARNING', 'DEBUG', ''],
help='Log level name used by rate limiting. Logs with level '
'greater or equal to rate_limit_except_level are not '
'filtered. An empty string means that all levels are '
'filtered.',
),
]
@@ -219,6 +272,15 @@ def list_opts():
:returns: a list of (group_name, opts) tuples
"""
return [(None, (common_cli_opts + logging_cli_opts +
generic_log_opts + log_opts +
versionutils.deprecated_opts))]
return [
(
None,
(
common_cli_opts
+ logging_cli_opts
+ generic_log_opts
+ log_opts
+ versionutils.deprecated_opts
),
)
]

View File

@@ -42,7 +42,7 @@ def main():
levels=args.levels,
level_key=args.levelkey,
traceback_key=args.tbkey,
)
)
if args.lines:
# Read backward until we find all of our newline characters
# or reach the beginning of the file
@@ -63,47 +63,74 @@ def main():
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("file",
nargs='?', default=sys.stdin,
type=argparse.FileType(),
help="JSON log file to read from (if not provided"
" standard input is used instead)")
parser.add_argument("--prefix",
default='%(asctime)s.%(msecs)03d'
' %(process)s %(levelname)s %(name)s',
help="Message prefixes")
parser.add_argument("--locator",
default='[%(funcname)s %(pathname)s:%(lineno)s]',
help="Locator to append to DEBUG records")
parser.add_argument("--levelkey",
default=DEFAULT_LEVEL_KEY,
help="Key in the JSON record where the level is held")
parser.add_argument("--tbkey",
default=DEFAULT_TRACEBACK_KEY,
help="Key in the JSON record where the"
" traceback/exception is held")
parser.add_argument("-c", "--color",
action='store_true', default=False,
help="Color log levels (requires `termcolor`)")
parser.add_argument("-f", "--follow",
action='store_true', default=False,
help="Continue parsing new data until"
" KeyboardInterrupt")
parser.add_argument("-n", "--lines",
required=False, type=int,
help="Last N number of records to view."
" (May show less than N records when used"
" in conjuction with --loggers or --levels)")
parser.add_argument("--loggers",
nargs='*', default=[],
help="only return results matching given logger(s)")
parser.add_argument("--levels",
nargs='*', default=[],
help="Only return lines matching given log level(s)")
parser.add_argument(
"file",
nargs='?',
default=sys.stdin,
type=argparse.FileType(),
help="JSON log file to read from (if not provided"
" standard input is used instead)",
)
parser.add_argument(
"--prefix",
default='%(asctime)s.%(msecs)03d %(process)s %(levelname)s %(name)s',
help="Message prefixes",
)
parser.add_argument(
"--locator",
default='[%(funcname)s %(pathname)s:%(lineno)s]',
help="Locator to append to DEBUG records",
)
parser.add_argument(
"--levelkey",
default=DEFAULT_LEVEL_KEY,
help="Key in the JSON record where the level is held",
)
parser.add_argument(
"--tbkey",
default=DEFAULT_TRACEBACK_KEY,
help="Key in the JSON record where the traceback/exception is held",
)
parser.add_argument(
"-c",
"--color",
action='store_true',
default=False,
help="Color log levels (requires `termcolor`)",
)
parser.add_argument(
"-f",
"--follow",
action='store_true',
default=False,
help="Continue parsing new data until KeyboardInterrupt",
)
parser.add_argument(
"-n",
"--lines",
required=False,
type=int,
help="Last N number of records to view."
" (May show less than N records when used"
" in conjuction with --loggers or --levels)",
)
parser.add_argument(
"--loggers",
nargs='*',
default=[],
help="only return results matching given logger(s)",
)
parser.add_argument(
"--levels",
nargs='*',
default=[],
help="Only return lines matching given log level(s)",
)
args = parser.parse_args()
if args.color and not termcolor:
raise ImportError("Coloring requested but `termcolor` is not"
" importable")
raise ImportError(
"Coloring requested but `termcolor` is not importable"
)
return args
@@ -151,9 +178,15 @@ def reformat_json(fh, formatter, follow=False):
yield from formatter(record)
def console_format(prefix, locator, record, loggers=[], levels=[],
level_key=DEFAULT_LEVEL_KEY,
traceback_key=DEFAULT_TRACEBACK_KEY):
def console_format(
prefix,
locator,
record,
loggers=[],
levels=[],
level_key=DEFAULT_LEVEL_KEY,
traceback_key=DEFAULT_TRACEBACK_KEY,
):
# Provide an empty string to format-specifiers the record is
# missing, instead of failing. Doesn't work for non-string
# specifiers.
@@ -175,13 +208,17 @@ def console_format(prefix, locator, record, loggers=[], levels=[],
except TypeError:
# Thrown when a non-string format-specifier can't be filled in.
# Dict comprehension cleans up the output
yield warn('Missing non-string placeholder in record',
{str(k): str(v) if isinstance(v, str) else v
for k, v in record.items()})
yield warn(
'Missing non-string placeholder in record',
{
str(k): str(v) if isinstance(v, str) else v
for k, v in record.items()
},
)
return
locator = ''
if (record.get('levelno', 100) <= log.DEBUG or levelname == 'DEBUG'):
if record.get('levelno', 100) <= log.DEBUG or levelname == 'DEBUG':
locator = locator % record
yield ' '.join(x for x in [prefix, record['message'], locator] if x)

View File

@@ -14,3 +14,8 @@
from .logging_error import get_logging_handle_error_fixture
from .setlevel import SetLogLevel
__all__ = [
'get_logging_handle_error_fixture',
'SetLogLevel',
]

View File

@@ -24,8 +24,7 @@ def get_logging_handle_error_fixture():
self.useFixture(log_fixture.get_logging_handle_error_fixture())
"""
return fixtures.MonkeyPatch('logging.Handler.handleError',
_handleError)
return fixtures.MonkeyPatch('logging.Handler.handleError', _handleError)
def _handleError(self, record):

View File

@@ -56,10 +56,7 @@ def _update_record_with_context(record):
The request context, if there is one, will either be passed with the
incoming record or in the global thread-local store.
"""
context = record.__dict__.get(
'context',
context_utils.get_current()
)
context = record.__dict__.get('context', context_utils.get_current())
if context:
d = _dictify_context(context)
# Copy the context values directly onto the record so they can be
@@ -71,16 +68,14 @@ def _update_record_with_context(record):
def _ensure_unicode(msg):
"""Do our best to turn the input argument into a unicode object.
"""
"""Do our best to turn the input argument into a unicode object."""
if isinstance(msg, str):
return msg
if not isinstance(msg, bytes):
return str(msg)
return encodeutils.safe_decode(
msg,
incoming='utf-8',
errors='xmlcharrefreplace')
msg, incoming='utf-8', errors='xmlcharrefreplace'
)
def _get_error_summary(record):
@@ -113,8 +108,13 @@ def _get_error_summary(record):
# that uses the value simpler.
if not exc_info[0]:
exc_info = None
elif exc_info[0] in (TypeError, ValueError,
KeyError, AttributeError, ImportError):
elif exc_info[0] in (
TypeError,
ValueError,
KeyError,
AttributeError,
ImportError,
):
# NOTE(dhellmann): Do not include information about
# common built-in exceptions used to detect cases of
# bad or missing data. We don't use isinstance() here
@@ -145,7 +145,7 @@ def _get_error_summary(record):
error_summary = error_summary.split('\n', 1)[0]
except TypeError as type_err:
# Work around https://bugs.python.org/issue28603
error_summary = "<exception with %s>" % str(type_err)
error_summary = f"<exception with {str(type_err)}>"
finally:
# Remove the local reference to the exception and
# traceback to avoid a memory leak through the frame
@@ -190,11 +190,12 @@ class JSONFormatter(logging.Formatter):
except TypeError as type_error:
# Work around https://bugs.python.org/issue28603
msg = str(type_error)
lines = ['<Unprintable exception due to %s>\n' % msg]
lines = [f'<Unprintable exception due to {msg}>\n']
if strip_newlines:
lines = [filter(
lambda x: x,
line.rstrip().splitlines()) for line in lines]
lines = [
filter(lambda x: x, line.rstrip().splitlines())
for line in lines
]
lines = list(itertools.chain(*lines))
return lines
@@ -210,28 +211,30 @@ class JSONFormatter(logging.Formatter):
# the value to be formatted. Don't filter anything.
if msg_keys:
args = {k: v for k, v in args.items() if k in msg_keys}
message = {'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None,
'hostname': self.hostname,
'error_summary': _get_error_summary(record)}
message = {
'message': record.getMessage(),
'asctime': self.formatTime(record, self.datefmt),
'name': record.name,
'msg': record.msg,
'args': args,
'levelname': record.levelname,
'levelno': record.levelno,
'pathname': record.pathname,
'filename': record.filename,
'module': record.module,
'lineno': record.lineno,
'funcname': record.funcName,
'created': record.created,
'msecs': record.msecs,
'relative_created': record.relativeCreated,
'thread': record.thread,
'thread_name': record.threadName,
'process_name': record.processName,
'process': record.process,
'traceback': None,
'hostname': self.hostname,
'error_summary': _get_error_summary(record),
}
# Build the extra values that were given to us, including
# the context.
@@ -285,6 +288,7 @@ class FluentFormatter(logging.Formatter):
try:
# check if running under uwsgi
import uwsgi
svc_name = uwsgi.opt.get("name")
self.uwsgi_name = svc_name
except Exception:
@@ -296,27 +300,29 @@ class FluentFormatter(logging.Formatter):
except TypeError as type_error:
# Work around https://bugs.python.org/issue28603
msg = str(type_error)
lines = ['<Unprintable exception due to %s>\n' % msg]
lines = [f'<Unprintable exception due to {msg}>\n']
if strip_newlines:
lines = functools.reduce(lambda a,
line: a + line.rstrip().splitlines(),
lines, [])
lines = functools.reduce(
lambda a, line: a + line.rstrip().splitlines(), lines, []
)
return lines
def format(self, record):
message = {'message': record.getMessage(),
'time': self.formatTime(record, self.datefmt),
'name': record.name,
'level': record.levelname,
'filename': record.filename,
'lineno': record.lineno,
'module': record.module,
'funcname': record.funcName,
'process_name': record.processName,
'cmdline': self.cmdline,
'hostname': self.hostname,
'traceback': None,
'error_summary': _get_error_summary(record)}
message = {
'message': record.getMessage(),
'time': self.formatTime(record, self.datefmt),
'name': record.name,
'level': record.levelname,
'filename': record.filename,
'lineno': record.lineno,
'module': record.module,
'funcname': record.funcName,
'process_name': record.processName,
'cmdline': self.cmdline,
'hostname': self.hostname,
'traceback': None,
'error_summary': _get_error_summary(record),
}
# Build the extra values that were given to us, including
# the context.
@@ -410,13 +416,13 @@ class ContextFormatter(logging.Formatter):
context = _update_record_with_context(record)
if instance:
try:
instance_extra = (self.conf.instance_format
% instance)
instance_extra = self.conf.instance_format % instance
except TypeError:
instance_extra = instance
elif instance_uuid:
instance_extra = (self.conf.instance_uuid_format
% {'uuid': instance_uuid})
instance_extra = self.conf.instance_uuid_format % {
'uuid': instance_uuid
}
elif context:
# FIXME(dhellmann): We should replace these nova-isms with
# more generic handling in the Context class. See the
@@ -429,22 +435,30 @@ class ContextFormatter(logging.Formatter):
resource_uuid = getattr(context, 'resource_uuid', None)
if instance:
instance_extra = (self.conf.instance_format
% {'uuid': instance})
instance_extra = self.conf.instance_format % {'uuid': instance}
elif instance_uuid:
instance_extra = (self.conf.instance_uuid_format
% {'uuid': instance_uuid})
instance_extra = self.conf.instance_uuid_format % {
'uuid': instance_uuid
}
elif resource_uuid:
instance_extra = (self.conf.instance_uuid_format
% {'uuid': resource_uuid})
instance_extra = self.conf.instance_uuid_format % {
'uuid': resource_uuid
}
record.instance = instance_extra
# NOTE(sdague): default the fancier formatting params
# to an empty string so we don't throw an exception if
# they get used
for key in ('instance', 'color', 'user_identity', 'resource',
'user_name', 'project_name', 'global_request_id'):
for key in (
'instance',
'color',
'user_identity',
'resource',
'user_name',
'project_name',
'global_request_id',
):
if key not in record.__dict__:
record.__dict__[key] = ''
@@ -453,8 +467,8 @@ class ContextFormatter(logging.Formatter):
# get_logging_values of oslo.context.
if context:
record.user_identity = (
self.conf.logging_user_identity_format %
_ReplaceFalseValue(_dictify_context(context))
self.conf.logging_user_identity_format
% _ReplaceFalseValue(_dictify_context(context))
)
if record.__dict__.get('request_id'):
@@ -479,8 +493,10 @@ class ContextFormatter(logging.Formatter):
# string includes the bits we need to include it.
fmt += ': %(error_summary)s'
if (record.levelno == logging.DEBUG and
self.conf.logging_debug_format_suffix):
if (
record.levelno == logging.DEBUG
and self.conf.logging_debug_format_suffix
):
fmt += " " + self.conf.logging_debug_format_suffix
self._compute_iso_time(record)
@@ -493,8 +509,9 @@ class ContextFormatter(logging.Formatter):
except TypeError as err:
# Something went wrong, report that instead so we at least
# get the error message.
record.msg = 'Error formatting log line msg={!r} err={!r}'.format(
record.msg, err).replace('%', '*')
record.msg = (
f'Error formatting log line msg={record.msg!r} err={err!r}'
).replace('%', '*')
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
@@ -505,16 +522,17 @@ class ContextFormatter(logging.Formatter):
except TypeError as type_error:
# Work around https://bugs.python.org/issue28603
msg = str(type_error)
return '<Unprintable exception due to %s>\n' % msg
return f'<Unprintable exception due to {msg}>\n'
stringbuffer = io.StringIO()
try:
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
traceback.print_exception(
exc_info[0], exc_info[1], exc_info[2], None, stringbuffer
)
except TypeError as type_error:
# Work around https://bugs.python.org/issue28603
msg = str(type_error)
stringbuffer.write('<Unprintable exception due to %s>\n' % msg)
stringbuffer.write(f'<Unprintable exception due to {msg}>\n')
lines = stringbuffer.getvalue().split('\n')
stringbuffer.close()
@@ -534,8 +552,11 @@ class ContextFormatter(logging.Formatter):
def _compute_iso_time(self, record):
# set iso8601 timestamp
localtz = tz.tzlocal()
record.isotime = datetime.datetime.fromtimestamp(
record.created).replace(tzinfo=localtz).isoformat()
record.isotime = (
datetime.datetime.fromtimestamp(record.created)
.replace(tzinfo=localtz)
.isoformat()
)
if record.created == int(record.created):
# NOTE(stpierre): when the timestamp includes no
# microseconds -- e.g., 1450274066.000000 -- then the
@@ -543,5 +564,6 @@ class ContextFormatter(logging.Formatter):
# a result, in literally one in a million cases
# isoformat() looks different. This adds microseconds when
# that happens.
record.isotime = "{}.000000{}".format(record.isotime[:-6],
record.isotime[-6:])
record.isotime = (
f"{record.isotime[:-6]}.000000{record.isotime[-6:]}"
)

View File

@@ -72,7 +72,6 @@ class OSSysLogHandler(logging.Handler):
class OSJournalHandler(logging.Handler):
custom_fields = (
'project_name',
'project_id',
@@ -118,7 +117,8 @@ class OSJournalHandler(logging.Handler):
# (it's constant anyway)
if not record.exc_text:
record.exc_text = self.formatter.formatException(
record.exc_info)
record.exc_info
)
if record.exc_text:
extras['EXCEPTION_INFO'] = record.exc_text
# Leave EXCEPTION_TEXT for backward compatibility
@@ -139,6 +139,7 @@ class ColorHandler(logging.StreamHandler):
There is also a '%(reset_color)s' key that can be used to manually reset
the color within a log line.
"""
LEVEL_COLORS = {
_TRACE: '\033[00;35m', # MAGENTA
logging.DEBUG: '\033[00;32m', # GREEN

View File

@@ -20,8 +20,9 @@ import logging
def _get_full_class_name(cls):
return '{}.{}'.format(cls.__module__,
getattr(cls, '__qualname__', cls.__name__))
return '{}.{}'.format(
cls.__module__, getattr(cls, '__qualname__', cls.__name__)
)
def _is_method(obj, method):
@@ -51,18 +52,28 @@ def log_method_call(method):
if args:
first_arg = args[0]
if _is_method(first_arg, method):
cls = (first_arg if isinstance(first_arg, type)
else first_arg.__class__)
cls = (
first_arg
if isinstance(first_arg, type)
else first_arg.__class__
)
caller = _get_full_class_name(cls)
args_start_pos = 1
else:
caller = 'static'
else:
caller = 'static'
data = {'caller': caller,
'method_name': method.__name__,
'args': args[args_start_pos:], 'kwargs': kwargs}
log.debug('%(caller)s method %(method_name)s '
'called with arguments %(args)s %(kwargs)s', data)
data = {
'caller': caller,
'method_name': method.__name__,
'args': args[args_start_pos:],
'kwargs': kwargs,
}
log.debug(
'%(caller)s method %(method_name)s '
'called with arguments %(args)s %(kwargs)s',
data,
)
return method(*args, **kwargs)
return wrapper

View File

@@ -33,6 +33,7 @@ import logging.config
import logging.handlers
import os
import sys
try:
import syslog
except ImportError:
@@ -67,7 +68,7 @@ LOG_ROTATE_INTERVAL_MAPPING = {
'hours': 'h',
'days': 'd',
'weekday': 'w',
'midnight': 'midnight'
'midnight': 'midnight',
}
_EVENTLET_FIX_APPLIED = False
@@ -107,7 +108,6 @@ def _iter_loggers():
class BaseLoggerAdapter(logging.LoggerAdapter):
warn = logging.LoggerAdapter.warning
@property
@@ -172,12 +172,10 @@ class KeywordArgumentAdapter(BaseLoggerAdapter):
# only works for contexts that have the resource id set.
resource = kwargs['extra'].get('resource', None)
if resource:
# Many OpenStack resources have a name entry in their db ref
# of the form <resource_type>-<uuid>, let's just use that if
# it's passed in
if not resource.get('name', None):
# For resources that don't have the name of the format we wish
# to use (or places where the LOG call may not have the full
# object ref, allow them to pass in a dict:
@@ -187,14 +185,16 @@ class KeywordArgumentAdapter(BaseLoggerAdapter):
resource_id = resource.get('id', None)
if resource_type and resource_id:
kwargs['extra']['resource'] = ('[' + resource_type +
'-' + resource_id + '] ')
kwargs['extra']['resource'] = (
'[' + resource_type + '-' + resource_id + '] '
)
else:
# FIXME(jdg): Since the name format can be specified via conf
# entry, we may want to consider allowing this to be configured
# here as well
kwargs['extra']['resource'] = ('[' + resource.get('name', '')
+ '] ')
kwargs['extra']['resource'] = (
'[' + resource.get('name', '') + '] '
)
return msg, kwargs
@@ -203,11 +203,11 @@ def _create_logging_excepthook(product_name):
def logging_excepthook(exc_type, value, tb):
extra = {'exc_info': (exc_type, value, tb)}
getLogger(product_name).critical('Unhandled error', **extra)
return logging_excepthook
class LogConfigError(Exception):
message = _('Error loading logging config %(log_config)s: %(err_msg)s')
def __init__(self, log_config, err_msg):
@@ -215,8 +215,9 @@ class LogConfigError(Exception):
self.err_msg = err_msg
def __str__(self):
return self.message % dict(log_config=self.log_config,
err_msg=self.err_msg)
return self.message % dict(
log_config=self.log_config, err_msg=self.err_msg
)
def _load_log_config(log_config_append):
@@ -231,8 +232,9 @@ def _load_log_config(log_config_append):
logger.setLevel(logging.NOTSET)
logger.handlers = []
logger.propagate = 1
logging.config.fileConfig(log_config_append,
disable_existing_loggers=False)
logging.config.fileConfig(
log_config_append, disable_existing_loggers=False
)
_load_log_config.old_time = new_time
except (configparser.Error, KeyError, OSError, RuntimeError) as exc:
raise LogConfigError(log_config_append, str(exc))
@@ -278,12 +280,14 @@ def _fix_eventlet_logging():
if eventletutils.is_monkey_patched('thread'):
debtcollector.deprecate(
"Eventlet support is deprecated and will be removed")
"Eventlet support is deprecated and will be removed"
)
# If eventlet was not loaded before call to setup assume it's not used.
if not _EVENTLET_FIX_APPLIED:
import eventlet.green.threading
from oslo_log import pipe_mutex
logging.threading = eventlet.green.threading
logging._lock = logging.threading.RLock()
logging.Handler.createLock = pipe_mutex.pipe_createLock
@@ -301,8 +305,7 @@ def setup(conf, product_name, version='unknown', *, fix_eventlet=True):
sys.excepthook = _create_logging_excepthook(product_name)
def set_defaults(logging_context_format_string=None,
default_log_levels=None):
def set_defaults(logging_context_format_string=None, default_log_levels=None):
"""Set default values for the configuration options used by oslo.log."""
# Just in case the caller is not setting the
# default_log_level. This is insurance because
@@ -310,12 +313,13 @@ def set_defaults(logging_context_format_string=None,
# later in a backwards in-compatible change
if default_log_levels is not None:
cfg.set_defaults(
_options.log_opts,
default_log_levels=default_log_levels)
_options.log_opts, default_log_levels=default_log_levels
)
if logging_context_format_string is not None:
cfg.set_defaults(
_options.log_opts,
logging_context_format_string=logging_context_format_string)
logging_context_format_string=logging_context_format_string,
)
def tempest_set_log_file(filename):
@@ -335,15 +339,32 @@ def tempest_set_log_file(filename):
def _find_facility(facility):
# NOTE(jd): Check the validity of facilities at run time as they differ
# depending on the OS and Python version being used.
valid_facilities = [f for f in
["LOG_KERN", "LOG_USER", "LOG_MAIL",
"LOG_DAEMON", "LOG_AUTH", "LOG_SYSLOG",
"LOG_LPR", "LOG_NEWS", "LOG_UUCP",
"LOG_CRON", "LOG_AUTHPRIV", "LOG_FTP",
"LOG_LOCAL0", "LOG_LOCAL1", "LOG_LOCAL2",
"LOG_LOCAL3", "LOG_LOCAL4", "LOG_LOCAL5",
"LOG_LOCAL6", "LOG_LOCAL7"]
if getattr(syslog, f, None)]
valid_facilities = [
f
for f in [
"LOG_KERN",
"LOG_USER",
"LOG_MAIL",
"LOG_DAEMON",
"LOG_AUTH",
"LOG_SYSLOG",
"LOG_LPR",
"LOG_NEWS",
"LOG_UUCP",
"LOG_CRON",
"LOG_AUTHPRIV",
"LOG_FTP",
"LOG_LOCAL0",
"LOG_LOCAL1",
"LOG_LOCAL2",
"LOG_LOCAL3",
"LOG_LOCAL4",
"LOG_LOCAL5",
"LOG_LOCAL6",
"LOG_LOCAL7",
]
if getattr(syslog, f, None)
]
facility = facility.upper()
@@ -351,9 +372,10 @@ def _find_facility(facility):
facility = "LOG_" + facility
if facility not in valid_facilities:
raise TypeError(_('syslog facility must be one of: %s') %
', '.join("'%s'" % fac
for fac in valid_facilities))
raise TypeError(
_('syslog facility must be one of: %s')
% ', '.join(f"'{fac}'" for fac in valid_facilities)
)
return getattr(syslog, facility)
@@ -389,16 +411,18 @@ def _setup_logging_from_conf(conf, project, version):
# 'w0'-'w6' (w0 for Monday, w1 for Tuesday, and so on)'
if interval_type == 'w':
interval_type = interval_type + str(conf.log_rotate_interval)
filelog = file_handler(logpath,
when=interval_type,
interval=conf.log_rotate_interval,
backupCount=conf.max_logfile_count)
filelog = file_handler(
logpath,
when=interval_type,
interval=conf.log_rotate_interval,
backupCount=conf.max_logfile_count,
)
elif conf.log_rotation_type.lower() == "size":
file_handler = logging.handlers.RotatingFileHandler
maxBytes = conf.max_logfile_size_mb * units.Mi
filelog = file_handler(logpath,
maxBytes=maxBytes,
backupCount=conf.max_logfile_count)
filelog = file_handler(
logpath, maxBytes=maxBytes, backupCount=conf.max_logfile_count
)
else:
file_handler = logging.handlers.WatchedFileHandler
filelog = file_handler(logpath)
@@ -430,7 +454,8 @@ def _setup_logging_from_conf(conf, project, version):
if conf.publish_errors:
handler = importutils.import_object(
"oslo_messaging.notify.log_handler.PublishErrorsHandler",
logging.ERROR)
logging.ERROR,
)
log_root.addHandler(handler)
if conf.use_syslog:
@@ -443,10 +468,14 @@ def _setup_logging_from_conf(conf, project, version):
datefmt = conf.log_date_format
if not conf.use_json:
for handler in log_root.handlers:
handler.setFormatter(formatters.ContextFormatter(project=project,
version=version,
datefmt=datefmt,
config=conf))
handler.setFormatter(
formatters.ContextFormatter(
project=project,
version=version,
datefmt=datefmt,
config=conf,
)
)
else:
for handler in log_root.handlers:
handler.setFormatter(formatters.JSONFormatter(datefmt=datefmt))
@@ -471,9 +500,12 @@ def _setup_logging_from_conf(conf, project, version):
if conf.rate_limit_burst >= 1 and conf.rate_limit_interval >= 1:
from oslo_log import rate_limit
rate_limit.install_filter(conf.rate_limit_burst,
conf.rate_limit_interval,
conf.rate_limit_except_level)
rate_limit.install_filter(
conf.rate_limit_burst,
conf.rate_limit_interval,
conf.rate_limit_except_level,
)
_loggers = {}
@@ -505,9 +537,9 @@ def getLogger(name=None, project='unknown', version='unknown'):
if name and name.startswith('oslo_'):
name = 'oslo.' + name[5:]
if name not in _loggers:
_loggers[name] = KeywordArgumentAdapter(logging.getLogger(name),
{'project': project,
'version': version})
_loggers[name] = KeywordArgumentAdapter(
logging.getLogger(name), {'project': project, 'version': version}
)
return _loggers[name]

View File

@@ -90,6 +90,7 @@ class _ReallyPipeMutex(_BaseMutex):
Class code copied from Swift's swift/common/utils.py
Related eventlet bug: https://github.com/eventlet/eventlet/issues/432
"""
def __init__(self):
super().__init__()
@@ -188,6 +189,7 @@ class _AsyncioMutex(_BaseMutex):
2. An ``asyncio.Lock`` for locking across greenlets within a single OS
thread (each OS thread running greenlets has its own asyncio loop)
"""
def __init__(self):
super().__init__()
self._asyncio_lock = asyncio.Lock()
@@ -253,8 +255,7 @@ class _AsyncioMutex(_BaseMutex):
_HUB = eventlet.hubs.get_hub()
if isinstance(_HUB, eventlet.hubs.asyncio.Hub):
major, minor, patch = map(
int,
importlib.metadata.version("eventlet").split(".")[:3]
int, importlib.metadata.version("eventlet").split(".")[:3]
)
if (major, minor, patch) < (0, 38, 2):
raise RuntimeError(

View File

@@ -34,8 +34,10 @@ class _LogRateLimit(logging.Filter):
self.emit_warn = False
def filter(self, record):
if (self.except_level is not None
and record.levelno >= self.except_level):
if (
self.except_level is not None
and record.levelno >= self.except_level
):
# don't limit levels >= except_level
return True
@@ -55,9 +57,11 @@ class _LogRateLimit(logging.Filter):
if self.counter == self.burst + 1:
self.emit_warn = True
self.logger.error("Logging rate limit: "
"drop after %s records/%s sec",
self.burst, self.interval)
self.logger.error(
"Logging rate limit: drop after %s records/%s sec",
self.burst,
self.interval,
)
self.emit_warn = False
# Drop the log
@@ -108,7 +112,7 @@ def install_filter(burst, interval, except_level='CRITICAL'):
try:
except_levelno = _LOG_LEVELS[except_level]
except KeyError:
raise ValueError("invalid log level name: %r" % except_level)
raise ValueError(f"invalid log level name: {except_level!r}")
log_filter = _LogRateLimit(burst, interval, except_levelno)

View File

@@ -2,4 +2,5 @@ import os
if os.environ.get("OSLO_LOG_TEST_EVENTLET") == "1":
import eventlet
eventlet.monkey_patch()

View File

@@ -17,7 +17,6 @@ from oslotest import base as test_base
class TestLoggingFixture(test_base.BaseTestCase):
def setUp(self):
super().setUp()
self.log = logging.getLogger(__name__)
@@ -25,7 +24,6 @@ class TestLoggingFixture(test_base.BaseTestCase):
def test_logging_handle_error(self):
self.log.error('pid of first child is %(foo)s', 1)
self.useFixture(fixture.get_logging_handle_error_fixture())
self.assertRaises(TypeError,
self.log.error,
'pid of first child is %(foo)s',
1)
self.assertRaises(
TypeError, self.log.error, 'pid of first child is %(foo)s', 1
)

View File

@@ -17,7 +17,6 @@ from oslotest import base as test_base
class TestSetLevelFixture(test_base.BaseTestCase):
def test_unset_before(self):
logger = logging.getLogger('no-such-logger-unset')
self.assertEqual(logging.NOTSET, logger.level)

View File

@@ -60,7 +60,8 @@ class ConvertJsonTestCase(test_base.BaseTestCase):
text = jsonutils.dumps(TRIVIAL_RECORD)
self.assertEqual(
[TRIVIAL_RECORD, TRIVIAL_RECORD],
self._reformat("\n".join([text, text])))
self._reformat("\n".join([text, text])),
)
def _lines(self, record, pre='pre', loc='loc', **args):
return list(convert_json.console_format(pre, loc, record, **args))

View File

@@ -14,7 +14,6 @@
"""Unit Tests for oslo.log with custom log handler"""
import logging
from oslo_log import log
@@ -36,11 +35,13 @@ class CustomLogHandler(logging.StreamHandler):
class CustomLogHandlerTestCase(LogTestBase):
def setUp(self):
super().setUp()
self.config(logging_context_format_string="HAS CONTEXT "
"[%(request_id)s]: "
"%(message)s",
logging_default_format_string="NOCTXT: %(message)s",
logging_debug_format_suffix="--DBG")
self.config(
logging_context_format_string="HAS CONTEXT "
"[%(request_id)s]: "
"%(message)s",
logging_default_format_string="NOCTXT: %(message)s",
logging_debug_format_suffix="--DBG",
)
self.log = log.getLogger('') # obtain root logger instead of 'unknown'
self._add_handler_with_cleanup(self.log, handler=CustomLogHandler)
self._set_log_level_with_cleanup(self.log, logging.DEBUG)
@@ -48,4 +49,4 @@ class CustomLogHandlerTestCase(LogTestBase):
def test_log(self):
message = 'foo'
self.log.info(message)
self.assertEqual("NOCTXT: %s\n" % message, self.stream.getvalue())
self.assertEqual(f"NOCTXT: {message}\n", self.stream.getvalue())

View File

@@ -27,17 +27,18 @@ from oslo_log import log
def _fake_context():
ctxt = context.RequestContext(user_id="user",
project_id="tenant",
project_domain_id="pdomain",
user_domain_id="udomain",
overwrite=True)
ctxt = context.RequestContext(
user_id="user",
project_id="tenant",
project_domain_id="pdomain",
user_domain_id="udomain",
overwrite=True,
)
return ctxt
class FormatterTest(test_base.BaseTestCase):
def setUp(self):
super().setUp()
@@ -60,8 +61,9 @@ class FormatterTest(test_base.BaseTestCase):
def test_dictify_context_with_context(self):
ctxt = _fake_context()
self.assertEqual(ctxt.get_logging_values(),
formatters._dictify_context(ctxt))
self.assertEqual(
ctxt.get_logging_values(), formatters._dictify_context(ctxt)
)
# Test for https://bugs.python.org/issue28603
@@ -69,7 +71,8 @@ class FormatUnhashableExceptionTest(test_base.BaseTestCase):
def setUp(self):
super().setUp()
self.config_fixture = self.useFixture(
config_fixture.Config(cfg.ConfigOpts()))
config_fixture.Config(cfg.ConfigOpts())
)
self.conf = self.config_fixture.conf
log.register_options(self.conf)
@@ -84,8 +87,9 @@ class FormatUnhashableExceptionTest(test_base.BaseTestCase):
def test_error_summary(self):
exc_info = self._unhashable_exception_info()
record = logging.LogRecord('test', logging.ERROR, 'test', 0,
'test message', [], exc_info)
record = logging.LogRecord(
'test', logging.ERROR, 'test', 0, 'test message', [], exc_info
)
err_summary = formatters._get_error_summary(record)
self.assertTrue(err_summary)
@@ -110,7 +114,8 @@ class FormatUnhashableExceptionTest(test_base.BaseTestCase):
def test_context_format_exception(self):
exc_info = self._unhashable_exception_info()
formatter = formatters.ContextFormatter(config=self.conf)
record = logging.LogRecord('test', logging.ERROR, 'test', 0,
'test message', [], exc_info)
record = logging.LogRecord(
'test', logging.ERROR, 'test', 0, 'test message', [], exc_info
)
tb = formatter.format(record)
self.assertTrue(tb)

View File

@@ -18,7 +18,6 @@ from oslo_log import helpers
class LogHelpersTestCase(test_base.BaseTestCase):
def test_log_decorator(self):
'''Test that LOG.debug is called with proper arguments.'''
@@ -37,10 +36,12 @@ class LogHelpersTestCase(test_base.BaseTestCase):
obj = test_class()
for method_name in ('test_method', 'test_classmethod'):
data = {'caller': helpers._get_full_class_name(test_class),
'method_name': method_name,
'args': args,
'kwargs': kwargs}
data = {
'caller': helpers._get_full_class_name(test_class),
'method_name': method_name,
'args': args,
'kwargs': kwargs,
}
method = getattr(obj, method_name)
with mock.patch('logging.Logger.debug') as debug:
@@ -60,20 +61,24 @@ class LogHelpersTestCase(test_base.BaseTestCase):
def test_staticmethod(arg1, arg2, arg3, *args, **kwargs):
pass
data = {'caller': 'static',
'method_name': '_static_method',
'args': (),
'kwargs': {}}
data = {
'caller': 'static',
'method_name': '_static_method',
'args': (),
'kwargs': {},
}
with mock.patch('logging.Logger.debug') as debug:
_static_method()
debug.assert_called_with(mock.ANY, data)
args = tuple(range(6))
kwargs = {'kwarg1': 6, 'kwarg2': 7}
data = {'caller': 'static',
'method_name': 'test_staticmethod',
'args': args,
'kwargs': kwargs}
data = {
'caller': 'static',
'method_name': 'test_staticmethod',
'args': args,
'kwargs': kwargs,
}
with mock.patch('logging.Logger.debug') as debug:
test_class.test_staticmethod(*args, **kwargs)
debug.assert_called_with(mock.ANY, data)

File diff suppressed because it is too large Load Diff

View File

@@ -36,6 +36,7 @@ def quiet_eventlet_exceptions():
class TestPipeMutex(unittest.TestCase):
"""From Swift's test/unit/common/test_utils.py"""
def setUp(self):
self.mutex = pipe_mutex.PipeMutex()
@@ -86,8 +87,9 @@ class TestPipeMutex(unittest.TestCase):
def test_wrong_releaser(self):
self.mutex.acquire()
with quiet_eventlet_exceptions():
self.assertRaises(RuntimeError,
eventlet.spawn(self.mutex.release).wait)
self.assertRaises(
RuntimeError, eventlet.spawn(self.mutex.release).wait
)
def test_blocking(self):
evt = eventlet.event.Event()
@@ -116,11 +118,15 @@ class TestPipeMutex(unittest.TestCase):
c1.wait()
c2.wait()
self.assertEqual(sequence, [
'coro1 acquire',
'coro1 release',
'coro2 acquire',
'coro2 release'])
self.assertEqual(
sequence,
[
'coro1 acquire',
'coro1 release',
'coro2 acquire',
'coro2 release',
],
)
def test_blocking_tpool(self):
# Note: this test's success isn't a guarantee that the mutex is
@@ -142,11 +148,13 @@ class TestPipeMutex(unittest.TestCase):
greenthread2 = eventlet.spawn(do_stuff)
real_thread1 = eventlet.patcher.original('threading').Thread(
target=do_stuff)
target=do_stuff
)
real_thread1.start()
real_thread2 = eventlet.patcher.original('threading').Thread(
target=do_stuff)
target=do_stuff
)
real_thread2.start()
greenthread1.wait()
@@ -191,11 +199,13 @@ class TestPipeMutex(unittest.TestCase):
self.mutex.release()
real_thread1 = eventlet.patcher.original('threading').Thread(
target=pthread1)
target=pthread1
)
real_thread1.start()
real_thread2 = eventlet.patcher.original('threading').Thread(
target=pthread2)
target=pthread2
)
real_thread2.start()
real_thread1.join()

View File

@@ -56,10 +56,12 @@ class LogRateLimitTestCase(test_base.BaseTestCase):
logger.error("message 1")
logger.error("message 2")
logger.error("message 3")
self.assertEqual(stream.getvalue(),
'message 1\n'
'message 2\n'
'Logging rate limit: drop after 2 records/1 sec\n')
self.assertEqual(
stream.getvalue(),
'message 1\n'
'message 2\n'
'Logging rate limit: drop after 2 records/1 sec\n',
)
# second burst (clock changed)
stream.seek(0)
@@ -69,10 +71,12 @@ class LogRateLimitTestCase(test_base.BaseTestCase):
logger.error("message 4")
logger.error("message 5")
logger.error("message 6")
self.assertEqual(stream.getvalue(),
'message 4\n'
'message 5\n'
'Logging rate limit: drop after 2 records/1 sec\n')
self.assertEqual(
stream.getvalue(),
'message 4\n'
'message 5\n'
'Logging rate limit: drop after 2 records/1 sec\n',
)
@mock.patch('oslo_log.rate_limit.monotonic_clock')
def test_rate_limit_except_level(self, mock_clock):
@@ -84,11 +88,13 @@ class LogRateLimitTestCase(test_base.BaseTestCase):
logger.error("error 2")
logger.critical("critical 3")
logger.critical("critical 4")
self.assertEqual(stream.getvalue(),
'error 1\n'
'Logging rate limit: drop after 1 records/1 sec\n'
'critical 3\n'
'critical 4\n')
self.assertEqual(
stream.getvalue(),
'error 1\n'
'Logging rate limit: drop after 1 records/1 sec\n'
'critical 3\n'
'critical 4\n',
)
def test_install_twice(self):
rate_limit.install_filter(100, 1)
@@ -104,7 +110,6 @@ class LogRateLimitTestCase(test_base.BaseTestCase):
logger.error("message 1")
logger.error("message 2")
logger.error("message 3")
self.assertEqual(stream.getvalue(),
'message 1\n'
'message 2\n'
'message 3\n')
self.assertEqual(
stream.getvalue(), 'message 1\nmessage 2\nmessage 3\n'
)

View File

@@ -22,31 +22,32 @@ from oslo_log import versionutils
class DeprecatedTestCase(test_base.BaseTestCase):
def assert_deprecated(self, mock_reporter, no_removal=False,
**expected_details):
def assert_deprecated(
self, mock_reporter, no_removal=False, **expected_details
):
if 'in_favor_of' in expected_details:
if no_removal is False:
expected_msg = versionutils._deprecated_msg_with_alternative
else:
expected_msg = getattr(
versionutils,
'_deprecated_msg_with_alternative_no_removal')
versionutils, '_deprecated_msg_with_alternative_no_removal'
)
else:
if no_removal is False:
expected_msg = versionutils._deprecated_msg_no_alternative
else:
expected_msg = getattr(
versionutils,
'_deprecated_msg_with_no_alternative_no_removal')
'_deprecated_msg_with_no_alternative_no_removal',
)
# The first argument is the logger, and we don't care about
# that, so ignore it with ANY.
mock_reporter.assert_called_with(mock.ANY,
expected_msg,
expected_details)
mock_reporter.assert_called_with(
mock.ANY, expected_msg, expected_details
)
@mock.patch('oslo_log.versionutils.report_deprecated_feature')
def test_deprecating_a_function_returns_correct_value(self, mock_reporter):
@versionutils.deprecated(as_of=versionutils.deprecated.ICEHOUSE)
def do_outdated_stuff(data):
return data
@@ -58,7 +59,6 @@ class DeprecatedTestCase(test_base.BaseTestCase):
@mock.patch('oslo_log.versionutils.report_deprecated_feature')
def test_deprecating_a_method_returns_correct_value(self, mock_reporter):
class C:
@versionutils.deprecated(as_of=versionutils.deprecated.ICEHOUSE)
def outdated_method(self, *args):
@@ -70,155 +70,182 @@ class DeprecatedTestCase(test_base.BaseTestCase):
@mock.patch('oslo_log.versionutils.report_deprecated_feature')
def test_deprecated_with_unknown_future_release(self, mock_reporter):
@versionutils.deprecated(as_of=versionutils.deprecated.BEXAR,
in_favor_of='different_stuff()')
@versionutils.deprecated(
as_of=versionutils.deprecated.BEXAR,
in_favor_of='different_stuff()',
)
def do_outdated_stuff():
return
do_outdated_stuff()
self.assert_deprecated(mock_reporter,
what='do_outdated_stuff()',
in_favor_of='different_stuff()',
as_of='Bexar',
remove_in='D')
self.assert_deprecated(
mock_reporter,
what='do_outdated_stuff()',
in_favor_of='different_stuff()',
as_of='Bexar',
remove_in='D',
)
@mock.patch('oslo_log.versionutils.report_deprecated_feature')
def test_deprecated_with_known_future_release(self, mock_reporter):
@versionutils.deprecated(as_of=versionutils.deprecated.GRIZZLY,
in_favor_of='different_stuff()')
@versionutils.deprecated(
as_of=versionutils.deprecated.GRIZZLY,
in_favor_of='different_stuff()',
)
def do_outdated_stuff():
return
do_outdated_stuff()
self.assert_deprecated(mock_reporter,
what='do_outdated_stuff()',
in_favor_of='different_stuff()',
as_of='Grizzly',
remove_in='Icehouse')
self.assert_deprecated(
mock_reporter,
what='do_outdated_stuff()',
in_favor_of='different_stuff()',
as_of='Grizzly',
remove_in='Icehouse',
)
@mock.patch('oslo_log.versionutils.report_deprecated_feature')
def test_deprecated_without_replacement(self, mock_reporter):
@versionutils.deprecated(as_of=versionutils.deprecated.GRIZZLY)
def do_outdated_stuff():
return
do_outdated_stuff()
self.assert_deprecated(mock_reporter,
what='do_outdated_stuff()',
as_of='Grizzly',
remove_in='Icehouse')
self.assert_deprecated(
mock_reporter,
what='do_outdated_stuff()',
as_of='Grizzly',
remove_in='Icehouse',
)
@mock.patch('oslo_log.versionutils.report_deprecated_feature')
def test_deprecated_with_custom_what(self, mock_reporter):
@versionutils.deprecated(as_of=versionutils.deprecated.GRIZZLY,
what='v2.0 API',
in_favor_of='v3 API')
@versionutils.deprecated(
as_of=versionutils.deprecated.GRIZZLY,
what='v2.0 API',
in_favor_of='v3 API',
)
def do_outdated_stuff():
return
do_outdated_stuff()
self.assert_deprecated(mock_reporter,
what='v2.0 API',
in_favor_of='v3 API',
as_of='Grizzly',
remove_in='Icehouse')
self.assert_deprecated(
mock_reporter,
what='v2.0 API',
in_favor_of='v3 API',
as_of='Grizzly',
remove_in='Icehouse',
)
@mock.patch('oslo_log.versionutils.report_deprecated_feature')
def test_deprecated_with_removed_next_release(self, mock_reporter):
@versionutils.deprecated(as_of=versionutils.deprecated.GRIZZLY,
remove_in=1)
@versionutils.deprecated(
as_of=versionutils.deprecated.GRIZZLY, remove_in=1
)
def do_outdated_stuff():
return
do_outdated_stuff()
self.assert_deprecated(mock_reporter,
what='do_outdated_stuff()',
as_of='Grizzly',
remove_in='Havana')
self.assert_deprecated(
mock_reporter,
what='do_outdated_stuff()',
as_of='Grizzly',
remove_in='Havana',
)
@mock.patch('oslo_log.versionutils.report_deprecated_feature')
def test_deprecated_with_removed_plus_3(self, mock_reporter):
@versionutils.deprecated(as_of=versionutils.deprecated.GRIZZLY,
remove_in=+3)
@versionutils.deprecated(
as_of=versionutils.deprecated.GRIZZLY, remove_in=+3
)
def do_outdated_stuff():
return
do_outdated_stuff()
self.assert_deprecated(mock_reporter,
what='do_outdated_stuff()',
as_of='Grizzly',
remove_in='Juno')
self.assert_deprecated(
mock_reporter,
what='do_outdated_stuff()',
as_of='Grizzly',
remove_in='Juno',
)
@mock.patch('oslo_log.versionutils.report_deprecated_feature')
def test_deprecated_with_removed_zero(self, mock_reporter):
@versionutils.deprecated(as_of=versionutils.deprecated.GRIZZLY,
remove_in=0)
@versionutils.deprecated(
as_of=versionutils.deprecated.GRIZZLY, remove_in=0
)
def do_outdated_stuff():
return
do_outdated_stuff()
self.assert_deprecated(mock_reporter,
no_removal=True,
what='do_outdated_stuff()',
as_of='Grizzly',
remove_in='Grizzly')
self.assert_deprecated(
mock_reporter,
no_removal=True,
what='do_outdated_stuff()',
as_of='Grizzly',
remove_in='Grizzly',
)
@mock.patch('oslo_log.versionutils.report_deprecated_feature')
def test_deprecated_with_removed_none(self, mock_reporter):
@versionutils.deprecated(as_of=versionutils.deprecated.GRIZZLY,
remove_in=None)
@versionutils.deprecated(
as_of=versionutils.deprecated.GRIZZLY, remove_in=None
)
def do_outdated_stuff():
return
do_outdated_stuff()
self.assert_deprecated(mock_reporter,
no_removal=True,
what='do_outdated_stuff()',
as_of='Grizzly',
remove_in='Grizzly')
self.assert_deprecated(
mock_reporter,
no_removal=True,
what='do_outdated_stuff()',
as_of='Grizzly',
remove_in='Grizzly',
)
@mock.patch('oslo_log.versionutils.report_deprecated_feature')
def test_deprecated_with_removed_zero_and_alternative(self, mock_reporter):
@versionutils.deprecated(as_of=versionutils.deprecated.GRIZZLY,
in_favor_of='different_stuff()',
remove_in=0)
@versionutils.deprecated(
as_of=versionutils.deprecated.GRIZZLY,
in_favor_of='different_stuff()',
remove_in=0,
)
def do_outdated_stuff():
return
do_outdated_stuff()
self.assert_deprecated(mock_reporter,
no_removal=True,
what='do_outdated_stuff()',
as_of='Grizzly',
in_favor_of='different_stuff()',
remove_in='Grizzly')
self.assert_deprecated(
mock_reporter,
no_removal=True,
what='do_outdated_stuff()',
as_of='Grizzly',
in_favor_of='different_stuff()',
remove_in='Grizzly',
)
@mock.patch('oslo_log.versionutils.report_deprecated_feature')
def test_deprecated_class_without_init(self, mock_reporter):
@versionutils.deprecated(as_of=versionutils.deprecated.JUNO,
remove_in=+1)
@versionutils.deprecated(
as_of=versionutils.deprecated.JUNO, remove_in=+1
)
class OutdatedClass:
pass
obj = OutdatedClass()
self.assertIsInstance(obj, OutdatedClass)
self.assert_deprecated(mock_reporter,
what='OutdatedClass()',
as_of='Juno',
remove_in='Kilo')
self.assert_deprecated(
mock_reporter,
what='OutdatedClass()',
as_of='Juno',
remove_in='Kilo',
)
@mock.patch('oslo_log.versionutils.report_deprecated_feature')
def test_deprecated_class_with_init(self, mock_reporter):
@@ -226,14 +253,16 @@ class DeprecatedTestCase(test_base.BaseTestCase):
args = (1, 5, 7)
kwargs = {'first': 10, 'second': 20}
@versionutils.deprecated(as_of=versionutils.deprecated.JUNO,
remove_in=+1)
@versionutils.deprecated(
as_of=versionutils.deprecated.JUNO, remove_in=+1
)
class OutdatedClass:
def __init__(self, *args, **kwargs):
"""It is __init__ method."""
mock_arguments.args = args
mock_arguments.kwargs = kwargs
super().__init__()
obj = OutdatedClass(*args, **kwargs)
self.assertIsInstance(obj, OutdatedClass)
@@ -241,15 +270,18 @@ class DeprecatedTestCase(test_base.BaseTestCase):
self.assertEqual('It is __init__ method.', obj.__init__.__doc__)
self.assertEqual(args, mock_arguments.args)
self.assertEqual(kwargs, mock_arguments.kwargs)
self.assert_deprecated(mock_reporter,
what='OutdatedClass()',
as_of='Juno',
remove_in='Kilo')
self.assert_deprecated(
mock_reporter,
what='OutdatedClass()',
as_of='Juno',
remove_in='Kilo',
)
@mock.patch('oslo_log.versionutils.report_deprecated_feature')
def test_deprecated_exception_old(self, mock_log):
@versionutils.deprecated(as_of=versionutils.deprecated.ICEHOUSE,
remove_in=+1)
@versionutils.deprecated(
as_of=versionutils.deprecated.ICEHOUSE, remove_in=+1
)
class OldException(Exception):
pass
@@ -258,13 +290,15 @@ class DeprecatedTestCase(test_base.BaseTestCase):
except OldException:
pass
self.assert_deprecated(mock_log, what='OldException()',
as_of='Icehouse', remove_in='Juno')
self.assert_deprecated(
mock_log, what='OldException()', as_of='Icehouse', remove_in='Juno'
)
@mock.patch('oslo_log.versionutils.report_deprecated_feature')
def test_deprecated_exception_new(self, mock_log):
@versionutils.deprecated(as_of=versionutils.deprecated.ICEHOUSE,
remove_in=+1)
@versionutils.deprecated(
as_of=versionutils.deprecated.ICEHOUSE, remove_in=+1
)
class OldException(Exception):
pass
@@ -280,8 +314,9 @@ class DeprecatedTestCase(test_base.BaseTestCase):
@mock.patch('oslo_log.versionutils.report_deprecated_feature')
def test_deprecated_exception_unrelated(self, mock_log):
@versionutils.deprecated(as_of=versionutils.deprecated.ICEHOUSE,
remove_in=+1)
@versionutils.deprecated(
as_of=versionutils.deprecated.ICEHOUSE, remove_in=+1
)
class OldException(Exception):
pass
@@ -302,63 +337,76 @@ class DeprecatedTestCase(test_base.BaseTestCase):
versionutils.register_options()
mock_register_opts.assert_called_once_with(
versionutils.deprecated_opts)
versionutils.deprecated_opts
)
@mock.patch('oslo_log.versionutils.report_deprecated_feature')
def test_deprecated_mitaka_plus_two(self, mock_reporter):
@versionutils.deprecated(as_of=versionutils.deprecated.MITAKA,
remove_in=+2)
@versionutils.deprecated(
as_of=versionutils.deprecated.MITAKA, remove_in=+2
)
class OutdatedClass:
pass
obj = OutdatedClass()
self.assertIsInstance(obj, OutdatedClass)
self.assert_deprecated(mock_reporter,
what='OutdatedClass()',
as_of='Mitaka',
remove_in='Ocata')
self.assert_deprecated(
mock_reporter,
what='OutdatedClass()',
as_of='Mitaka',
remove_in='Ocata',
)
@mock.patch('oslo_log.versionutils.report_deprecated_feature')
def test_deprecated_newton_plus_two(self, mock_reporter):
@versionutils.deprecated(as_of=versionutils.deprecated.NEWTON,
remove_in=+2)
@versionutils.deprecated(
as_of=versionutils.deprecated.NEWTON, remove_in=+2
)
class OutdatedClass:
pass
obj = OutdatedClass()
self.assertIsInstance(obj, OutdatedClass)
self.assert_deprecated(mock_reporter,
what='OutdatedClass()',
as_of='Newton',
remove_in='Pike')
self.assert_deprecated(
mock_reporter,
what='OutdatedClass()',
as_of='Newton',
remove_in='Pike',
)
@mock.patch('oslo_log.versionutils.report_deprecated_feature')
def test_deprecated_ocata_plus_two(self, mock_reporter):
@versionutils.deprecated(as_of=versionutils.deprecated.OCATA,
remove_in=+2)
@versionutils.deprecated(
as_of=versionutils.deprecated.OCATA, remove_in=+2
)
class OutdatedClass:
pass
obj = OutdatedClass()
self.assertIsInstance(obj, OutdatedClass)
self.assert_deprecated(mock_reporter,
what='OutdatedClass()',
as_of='Ocata',
remove_in='Queens')
self.assert_deprecated(
mock_reporter,
what='OutdatedClass()',
as_of='Ocata',
remove_in='Queens',
)
@mock.patch('oslo_log.versionutils.report_deprecated_feature')
def test_deprecated_message(self, mock_reporter):
versionutils.deprecation_warning(
'outdated_stuff',
as_of=versionutils.deprecated.KILO,
in_favor_of='different_stuff',
remove_in=+2,
)
versionutils.deprecation_warning('outdated_stuff',
as_of=versionutils.deprecated.KILO,
in_favor_of='different_stuff',
remove_in=+2)
self.assert_deprecated(mock_reporter,
what='outdated_stuff',
in_favor_of='different_stuff',
as_of='Kilo',
remove_in='Mitaka')
self.assert_deprecated(
mock_reporter,
what='outdated_stuff',
in_favor_of='different_stuff',
as_of='Kilo',
remove_in='Mitaka',
)

View File

@@ -32,25 +32,31 @@ _DEPRECATED_EXCEPTIONS = set()
deprecated_opts = [
cfg.BoolOpt('fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.'),
cfg.BoolOpt(
'fatal_deprecations',
default=False,
help='Enables or disables fatal status of deprecations.',
),
]
_deprecated_msg_with_alternative = _(
'%(what)s is deprecated as of %(as_of)s in favor of '
'%(in_favor_of)s and may be removed in %(remove_in)s.')
'%(in_favor_of)s and may be removed in %(remove_in)s.'
)
_deprecated_msg_no_alternative = _(
'%(what)s is deprecated as of %(as_of)s and may be '
'removed in %(remove_in)s. It will not be superseded.')
'removed in %(remove_in)s. It will not be superseded.'
)
_deprecated_msg_with_alternative_no_removal = _(
'%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s.')
'%(what)s is deprecated as of %(as_of)s in favor of %(in_favor_of)s.'
)
_deprecated_msg_with_no_alternative_no_removal = _(
'%(what)s is deprecated as of %(as_of)s. It will not be superseded.')
'%(what)s is deprecated as of %(as_of)s. It will not be superseded.'
)
_RELEASES = {
@@ -106,28 +112,34 @@ class deprecated:
1. Specifying the required deprecated release
>>> @deprecated(as_of=deprecated.ICEHOUSE)
... def a(): pass
... def a():
... pass
2. Specifying a replacement:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()')
... def b(): pass
... def b():
... pass
3. Specifying the release where the functionality may be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=+1)
... def c(): pass
... def c():
... pass
4. Specifying the deprecated functionality will not be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, remove_in=None)
... def d(): pass
... def d():
... pass
5. Specifying a replacement, deprecated functionality will not be removed:
>>> @deprecated(as_of=deprecated.ICEHOUSE, in_favor_of='f()',
... remove_in=None)
... def e(): pass
>>> @deprecated(
... as_of=deprecated.ICEHOUSE, in_favor_of='f()', remove_in=None
... )
... def e():
... pass
.. warning::
@@ -185,7 +197,8 @@ class deprecated:
what=self.what or func_or_cls.__name__ + '()',
as_of=self.as_of,
in_favor_of=self.in_favor_of,
remove_in=self.remove_in)
remove_in=self.remove_in,
)
if inspect.isfunction(func_or_cls):
@@ -193,6 +206,7 @@ class deprecated:
def wrapped(*args, **kwargs):
report_deprecated()
return func_or_cls(*args, **kwargs)
return wrapped
elif inspect.isclass(func_or_cls):
orig_init = func_or_cls.__init__
@@ -202,6 +216,7 @@ class deprecated:
if self.__class__ in _DEPRECATED_EXCEPTIONS:
report_deprecated()
orig_init(self, *args, **kwargs)
func_or_cls.__init__ = new_init
_DEPRECATED_EXCEPTIONS.add(func_or_cls)
@@ -221,13 +236,15 @@ class deprecated:
if self in _DEPRECATED_EXCEPTIONS:
report_deprecated()
return super().__subclasscheck__(subclass)
func_or_cls.__meta__ = ExceptionMeta
_DEPRECATED_EXCEPTIONS.add(func_or_cls)
return func_or_cls
else:
raise TypeError('deprecated can be used only with functions or '
'classes')
raise TypeError(
'deprecated can be used only with functions or classes'
)
def _get_safe_to_remove_release(release, remove_in):
@@ -243,8 +260,9 @@ def _get_safe_to_remove_release(release, remove_in):
return new_release
def deprecation_warning(what, as_of, in_favor_of=None,
remove_in=2, logger=LOG):
def deprecation_warning(
what, as_of, in_favor_of=None, remove_in=2, logger=LOG
):
"""Warn about the deprecation of a feature.
:param what: name of the thing being deprecated.
@@ -254,9 +272,11 @@ def deprecation_warning(what, as_of, in_favor_of=None,
before removing (default: 2)
:param logger: the logging object to use for reporting (optional).
"""
details = dict(what=what,
as_of=_RELEASES[as_of],
remove_in=_get_safe_to_remove_release(as_of, remove_in))
details = dict(
what=what,
as_of=_RELEASES[as_of],
remove_in=_get_safe_to_remove_release(as_of, remove_in),
)
if in_favor_of:
details['in_favor_of'] = in_favor_of

View File

@@ -48,5 +48,19 @@ convert-json = "oslo_log.cmds.convert_json:main"
[tool.setuptools]
packages = ["oslo_log"]
[tool.bandit]
exclude_dirs = ["tests"]
[tool.ruff]
line-length = 79
[tool.ruff.format]
quote-style = "preserve"
docstring-code-format = true
[tool.ruff.lint]
select = ["E4", "E5", "E7", "E9", "F", "S", "UP"]
ignore = [
# we want to test printf-style formatting
"UP031"
]
[tool.ruff.lint.per-file-ignores]
"oslo_log/tests/*" = ["S"]

View File

@@ -23,7 +23,7 @@
# ones.
extensions = [
'reno.sphinxext',
'openstackdocstheme'
'openstackdocstheme',
]
# The master toctree document.

View File

@@ -17,4 +17,5 @@ import setuptools
setuptools.setup(
setup_requires=['pbr>=2.0.0'],
pbr=True)
pbr=True,
)

10
tox.ini
View File

@@ -57,11 +57,13 @@ commands =
coverage report --show-missing
[flake8]
# E123, E125 skipped as they are invalid PEP-8.
# W503, W504 skipped: https://www.python.org/dev/peps/pep-0008/#should-a-line-break-before-or-after-a-binary-operator
show-source = True
ignore = E123,E125,H405,W503,W504
exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build,__init__.py
exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,__init__.py
# We only enable the hacking (H) checks
select = H
# H301 ruff will put commas after imports that can't fit on one line
# H405 multiline docstrings are fine
ignore = H301,H405
[hacking]
import_exceptions = oslo_log._i18n