Add and use a new simple helper logging module

Add a new logging BLATHER level to easily allow its
usage for messages that are below the normal DEBUG level
such as compilation information and scope lookup info
which can be very verbose in logs if always enabled.

Change-Id: I828211403bd02bfd6777b10cdcfe58fb0637a52c
This commit is contained in:
Joshua Harlow 2014-09-21 10:37:35 -07:00
parent bdb2a3a89b
commit 14431bc076
33 changed files with 143 additions and 70 deletions

View File

@ -15,16 +15,12 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from taskflow import exceptions
from taskflow.utils import misc
from taskflow.utils import reflection
LOG = logging.getLogger(__name__)
def _save_as_to_mapping(save_as):
"""Convert save_as to mapping name => index.

View File

@ -12,13 +12,12 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from taskflow.conductors import base
from taskflow import exceptions as excp
from taskflow.listeners import logging as logging_listener
from taskflow import logging
from taskflow.types import timing as tt
from taskflow.utils import async_utils
from taskflow.utils import lock_utils

View File

@ -14,9 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
from taskflow.engines.action_engine import executor as ex
from taskflow import logging
from taskflow import retry as retry_atom
from taskflow import states
from taskflow.types import failure

View File

@ -14,8 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
from taskflow import logging
from taskflow import states
from taskflow import task as task_atom
from taskflow.types import failure

View File

@ -14,11 +14,11 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
import threading
from taskflow import exceptions as exc
from taskflow import flow
from taskflow import logging
from taskflow import retry
from taskflow import task
from taskflow.types import graph as gr
@ -190,18 +190,18 @@ class PatternCompiler(object):
% (self._root, type(self._root)))
self._history.clear()
# NOTE(harlowja): this one can be expensive to calculate (especially
# the cycle detection), so only do it if we know debugging is enabled
# the cycle detection), so only do it if we know BLATHER is enabled
# and not under all cases.
if LOG.isEnabledFor(logging.DEBUG):
LOG.debug("Translated '%s'", self._root)
LOG.debug("Graph:")
if LOG.isEnabledFor(logging.BLATHER):
LOG.blather("Translated '%s'", self._root)
LOG.blather("Graph:")
for line in graph.pformat().splitlines():
# Indent it so that it's slightly offset from the above line.
LOG.debug(" %s", line)
LOG.debug("Hierarchy:")
LOG.blather(" %s", line)
LOG.blather("Hierarchy:")
for line in node.pformat().splitlines():
# Indent it so that it's slightly offset from the above line.
LOG.debug(" %s", line)
LOG.blather(" %s", line)
@lock_utils.locked
def compile(self):

View File

@ -14,8 +14,7 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
from taskflow import logging
from taskflow import states as st
from taskflow.types import failure
from taskflow.types import fsm

View File

@ -14,10 +14,9 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
from taskflow import atom as atom_type
from taskflow import flow as flow_type
from taskflow import logging
LOG = logging.getLogger(__name__)
@ -102,18 +101,13 @@ class ScopeWalker(object):
visible.append(a)
else:
visible.append(a.name)
if LOG.isEnabledFor(logging.DEBUG):
if LOG.isEnabledFor(logging.BLATHER):
if not self._names_only:
visible_names = [a.name for a in visible]
else:
visible_names = visible
# TODO(harlowja): we should likely use a created TRACE level
# for this kind of *very* verbose information; otherwise the
# cinder and other folks are going to complain that there
# debug logs are full of not so useful information (it is
# useful to taskflow debugging...).
LOG.debug("Scope visible to '%s' (limited by parent '%s' index"
" < %s) is: %s", self._atom, parent.item.name,
last_idx, visible_names)
LOG.blather("Scope visible to '%s' (limited by parent '%s'"
" index < %s) is: %s", self._atom,
parent.item.name, last_idx, visible_names)
yield visible
last = parent

View File

@ -23,12 +23,14 @@ import six
import stevedore.driver
from taskflow import exceptions as exc
from taskflow import logging
from taskflow.persistence import backends as p_backends
from taskflow.utils import deprecation
from taskflow.utils import misc
from taskflow.utils import persistence_utils as p_utils
from taskflow.utils import reflection
LOG = logging.getLogger(__name__)
# NOTE(imelnikov): this is the entrypoint namespace, not the module namespace.
ENGINES_NAMESPACE = 'taskflow.engines'
@ -170,6 +172,7 @@ def load(flow, store=None, flow_detail=None, book=None,
flow_detail = p_utils.create_flow_detail(flow, book=book,
backend=backend)
LOG.debug('Looking for %r engine driver in %r', kind, namespace)
try:
mgr = stevedore.driver.DriverManager(
namespace, kind,

View File

@ -14,12 +14,11 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
from kombu import exceptions as kombu_exc
import six
from taskflow import exceptions as excp
from taskflow import logging
LOG = logging.getLogger(__name__)

View File

@ -15,7 +15,6 @@
# under the License.
import functools
import logging
import threading
from oslo.utils import timeutils
@ -25,6 +24,7 @@ from taskflow.engines.worker_based import cache
from taskflow.engines.worker_based import protocol as pr
from taskflow.engines.worker_based import proxy
from taskflow import exceptions as exc
from taskflow import logging
from taskflow.types import timing as tt
from taskflow.utils import async_utils
from taskflow.utils import misc

View File

@ -15,7 +15,6 @@
# under the License.
import abc
import logging
import threading
from concurrent import futures
@ -26,6 +25,7 @@ import six
from taskflow.engines.action_engine import executor
from taskflow import exceptions as excp
from taskflow import logging
from taskflow.types import failure as ft
from taskflow.types import timing as tt
from taskflow.utils import lock_utils

View File

@ -15,13 +15,13 @@
# under the License.
import collections
import logging
import socket
import kombu
import six
from taskflow.engines.worker_based import dispatcher
from taskflow import logging
from taskflow.utils import threading_utils
LOG = logging.getLogger(__name__)

View File

@ -15,12 +15,12 @@
# under the License.
import functools
import logging
import six
from taskflow.engines.worker_based import protocol as pr
from taskflow.engines.worker_based import proxy
from taskflow import logging
from taskflow.types import failure as ft
from taskflow.utils import misc

View File

@ -14,7 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
import os
import platform
import socket
@ -25,6 +24,7 @@ from concurrent import futures
from taskflow.engines.worker_based import endpoint
from taskflow.engines.worker_based import server
from taskflow import logging
from taskflow import task as t_task
from taskflow.utils import reflection
from taskflow.utils import threading_utils as tu

View File

@ -15,12 +15,12 @@
# under the License.
import contextlib
import logging
import six
from stevedore import driver
from taskflow import exceptions as exc
from taskflow import logging
from taskflow.utils import misc

View File

@ -17,7 +17,6 @@
import collections
import contextlib
import functools
import logging
import threading
from concurrent import futures
@ -31,6 +30,7 @@ import six
from taskflow import exceptions as excp
from taskflow.jobs import job as base_job
from taskflow.jobs import jobboard
from taskflow import logging
from taskflow.openstack.common import uuidutils
from taskflow import states
from taskflow.types import timing as tt

View File

@ -17,11 +17,11 @@
from __future__ import absolute_import
import abc
import logging
from oslo.utils import excutils
import six
from taskflow import logging
from taskflow import states
from taskflow.types import failure
from taskflow.types import notifier

View File

@ -16,10 +16,10 @@
from __future__ import absolute_import
import logging
import sys
from taskflow.listeners import base
from taskflow import logging
from taskflow import states
from taskflow.types import failure

View File

@ -17,10 +17,10 @@
from __future__ import absolute_import
import itertools
import logging
from taskflow import exceptions as exc
from taskflow.listeners import base
from taskflow import logging
from taskflow import states
from taskflow.types import timing as tt

92
taskflow/logging.py Normal file
View File

@ -0,0 +1,92 @@
# -*- coding: utf-8 -*-
# Copyright (C) 2014 Yahoo! Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import logging
import sys
_BASE = __name__.split(".", 1)[0]
# Add a BLATHER level, this matches the multiprocessing utils.py module (and
# kazoo and others) that declares a similar level, this level is for
# information that is even lower level than regular DEBUG and gives out so
# much runtime information that it is only useful by low-level/certain users...
BLATHER = 5
# Copy over *select* attributes to make it easy to use this module.
CRITICAL = logging.CRITICAL
DEBUG = logging.DEBUG
ERROR = logging.ERROR
FATAL = logging.FATAL
NOTSET = logging.NOTSET
WARN = logging.WARN
WARNING = logging.WARNING
class _BlatherLoggerAdapter(logging.LoggerAdapter):
def blather(self, msg, *args, **kwargs):
"""Delegate a blather call to the underlying logger."""
self.logger.log(BLATHER, msg, *args, **kwargs)
def warn(self, msg, *args, **kwargs):
"""Delegate a warning call to the underlying logger."""
self.warning(msg, *args, **kwargs)
# TODO(harlowja): we should remove when we no longer have to support 2.6...
if sys.version_info[0:2] == (2, 6):
class _FixedBlatherLoggerAdapter(_BlatherLoggerAdapter):
"""Ensures isEnabledFor() exists on adapters that are created."""
def isEnabledFor(self, level):
return self.logger.isEnabledFor(level)
_BlatherLoggerAdapter = _FixedBlatherLoggerAdapter
# Taken from python2.7 (same in python3.4)...
class _NullHandler(logging.Handler):
"""This handler does nothing.
It's intended to be used to avoid the
"No handlers could be found for logger XXX" one-off warning. This is
important for library code, which may contain code to log events. If a
user of the library does not configure logging, the one-off warning
might be produced; to avoid this, the library developer simply needs
to instantiate a _NullHandler and add it to the top-level logger of the
library module or package.
"""
def handle(self, record):
"""Stub."""
def emit(self, record):
"""Stub."""
def createLock(self):
self.lock = None
else:
_NullHandler = logging.NullHandler
def getLogger(name=_BASE, extra=None):
logger = logging.getLogger(name)
if not logger.handlers:
logger.addHandler(_NullHandler())
return _BlatherLoggerAdapter(logger, extra=extra)

View File

@ -15,11 +15,11 @@
# under the License.
import contextlib
import logging
from stevedore import driver
from taskflow import exceptions as exc
from taskflow import logging
from taskflow.utils import misc

View File

@ -16,7 +16,6 @@
# under the License.
import errno
import logging
import os
import shutil
@ -24,6 +23,7 @@ from oslo.serialization import jsonutils
import six
from taskflow import exceptions as exc
from taskflow import logging
from taskflow.persistence.backends import base
from taskflow.persistence import logbook
from taskflow.utils import lock_utils

View File

@ -15,11 +15,10 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
import six
from taskflow import exceptions as exc
from taskflow import logging
from taskflow.persistence.backends import base
from taskflow.persistence import logbook

View File

@ -22,7 +22,6 @@ from __future__ import absolute_import
import contextlib
import copy
import functools
import logging
import time
from oslo.utils import strutils
@ -33,6 +32,7 @@ from sqlalchemy import orm as sa_orm
from sqlalchemy import pool as sa_pool
from taskflow import exceptions as exc
from taskflow import logging
from taskflow.persistence.backends import base
from taskflow.persistence.backends.sqlalchemy import migration
from taskflow.persistence.backends.sqlalchemy import models

View File

@ -15,13 +15,13 @@
# under the License.
import contextlib
import logging
from kazoo import exceptions as k_exc
from kazoo.protocol import paths
from oslo.serialization import jsonutils
from taskflow import exceptions as exc
from taskflow import logging
from taskflow.persistence.backends import base
from taskflow.persistence import logbook
from taskflow.utils import kazoo_utils as k_utils

View File

@ -17,12 +17,12 @@
import abc
import copy
import logging
from oslo.utils import timeutils
import six
from taskflow import exceptions as exc
from taskflow import logging
from taskflow.openstack.common import uuidutils
from taskflow import states
from taskflow.types import failure as ft

View File

@ -16,7 +16,6 @@
# under the License.
import abc
import logging
import six
@ -24,8 +23,6 @@ from taskflow import atom
from taskflow import exceptions as exc
from taskflow.utils import misc
LOG = logging.getLogger(__name__)
# Decision results.
REVERT = "REVERT"
REVERT_ALL = "REVERT_ALL"

View File

@ -16,11 +16,11 @@
import abc
import contextlib
import logging
import six
from taskflow import exceptions
from taskflow import logging
from taskflow.openstack.common import uuidutils
from taskflow.persistence import logbook
from taskflow import retry
@ -696,20 +696,17 @@ class Storage(object):
injected_args = {}
mapped_args = {}
for (bound_name, name) in six.iteritems(args_mapping):
# TODO(harlowja): This logging information may be to verbose
# even for DEBUG mode, let's see if we can maybe in the future
# add a TRACE mode or something else if people complain...
if LOG.isEnabledFor(logging.DEBUG):
if LOG.isEnabledFor(logging.BLATHER):
if atom_name:
LOG.debug("Looking for %r <= %r for atom named: %s",
bound_name, name, atom_name)
LOG.blather("Looking for %r <= %r for atom named: %s",
bound_name, name, atom_name)
else:
LOG.debug("Looking for %r <= %r", bound_name, name)
LOG.blather("Looking for %r <= %r", bound_name, name)
if name in injected_args:
value = injected_args[name]
mapped_args[bound_name] = value
LOG.debug("Matched %r <= %r to %r (from injected values)",
bound_name, name, value)
LOG.blather("Matched %r <= %r to %r (from injected"
" values)", bound_name, name, value)
else:
try:
possible_providers = self._reverse_mapping[name]
@ -727,8 +724,8 @@ class Storage(object):
% (bound_name, name, len(possible_providers)))
provider, value = _item_from_first_of(providers, name)
mapped_args[bound_name] = value
LOG.debug("Matched %r <= %r to %r (from %s)",
bound_name, name, value, provider)
LOG.blather("Matched %r <= %r to %r (from %s)",
bound_name, name, value, provider)
return mapped_args
def set_flow_state(self, state):

View File

@ -19,11 +19,11 @@ import abc
import collections
import contextlib
import copy
import logging
import six
from taskflow import atom
from taskflow import logging
from taskflow.utils import reflection
LOG = logging.getLogger(__name__)

View File

@ -14,6 +14,8 @@
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import absolute_import
import collections
import logging

View File

@ -22,13 +22,13 @@
import collections
import contextlib
import errno
import logging
import os
import threading
import time
import six
from taskflow import logging
from taskflow.utils import misc
from taskflow.utils import threading_utils as tu

View File

@ -19,7 +19,6 @@ import contextlib
import datetime
import errno
import inspect
import logging
import os
import re
import sys
@ -38,7 +37,6 @@ from taskflow.utils import deprecation
from taskflow.utils import reflection
LOG = logging.getLogger(__name__)
NUMERIC_TYPES = six.integer_types + (float,)
# NOTE(imelnikov): regular expression to get scheme from URI,

View File

@ -15,10 +15,10 @@
# under the License.
import contextlib
import logging
from oslo.utils import timeutils
from taskflow import logging
from taskflow.openstack.common import uuidutils
from taskflow.persistence import logbook
from taskflow.utils import misc