Final few log tweaks, i18n, levels, including contexts, etc.

This commit is contained in:
Todd Willey 2011-01-04 00:26:41 -05:00
parent c7305af780
commit b9576a9f73
19 changed files with 79 additions and 85 deletions

View File

@ -49,16 +49,13 @@ flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('num_networks', 'nova.network.manager')
flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager')
LOG = logging.getLogger('nova-dhcpbridge')
if FLAGS.verbose:
LOG.setLevel(logging.DEBUG)
LOG = logging.getLogger('nova.dhcpbridge')
def add_lease(mac, ip_address, _hostname, _interface):
"""Set the IP that was assigned by the DHCP server."""
if FLAGS.fake_rabbit:
LOG.debug("leasing ip")
LOG.debug(_("leasing ip"))
network_manager = utils.import_object(FLAGS.network_manager)
network_manager.lease_fixed_ip(context.get_admin_context(),
mac,
@ -73,14 +70,14 @@ def add_lease(mac, ip_address, _hostname, _interface):
def old_lease(mac, ip_address, hostname, interface):
"""Update just as add lease."""
LOG.debug("Adopted old lease or got a change of mac/hostname")
LOG.debug(_("Adopted old lease or got a change of mac/hostname"))
add_lease(mac, ip_address, hostname, interface)
def del_lease(mac, ip_address, _hostname, _interface):
"""Called when a lease expires."""
if FLAGS.fake_rabbit:
LOG.debug("releasing ip")
LOG.debug(_("releasing ip"))
network_manager = utils.import_object(FLAGS.network_manager)
network_manager.release_fixed_ip(context.get_admin_context(),
mac,
@ -123,8 +120,8 @@ def main():
mac = argv[2]
ip = argv[3]
hostname = argv[4]
LOG.debug("Called %s for mac %s with ip %s and "
"hostname %s on interface %s",
LOG.debug(_("Called %s for mac %s with ip %s and "
"hostname %s on interface %s"),
action, mac, ip, hostname, interface)
globals()[action + '_lease'](mac, ip, hostname, interface)
else:

View File

@ -44,7 +44,7 @@ from nova.compute import monitor
# TODO(todd): shouldn't this be done with flags? And what about verbose?
logging.getLogger('boto').setLevel(logging.WARN)
LOG = logging.getLogger('nova-instancemonitor')
LOG = logging.getLogger('nova.instancemonitor')
if __name__ == '__main__':

View File

@ -77,7 +77,6 @@ from nova import crypto
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import quota
from nova import utils
from nova.auth import manager
@ -557,7 +556,6 @@ def main():
utils.default_flagfile()
argv = FLAGS(sys.argv)
logging._set_log_levels()
script_name = argv.pop(0)
if len(argv) < 1:
print script_name + " category action [<args>]"

View File

@ -130,7 +130,7 @@ class Lockout(wsgi.Middleware):
failures_key = "authfailures-%s" % access_key
failures = int(self.mc.get(failures_key) or 0)
if failures >= FLAGS.lockout_attempts:
detail = "Too many failed authentications."
detail = _("Too many failed authentications.")
raise webob.exc.HTTPForbidden(detail=detail)
res = req.get_response(self.application)
if res.status_int == 403:
@ -139,13 +139,14 @@ class Lockout(wsgi.Middleware):
# NOTE(vish): To use incr, failures has to be a string.
self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60)
elif failures >= FLAGS.lockout_attempts:
LOG.warn('Access key %s has had %d failed authentications'
' and will be locked out for %d minutes.',
LOG.warn(_('Access key %s has had %d failed authentications'
' and will be locked out for %d minutes.'),
access_key, failures, FLAGS.lockout_minutes)
self.mc.set(failures_key, str(failures),
time=FLAGS.lockout_minutes * 60)
return res
class Authenticate(wsgi.Middleware):
"""Authenticate an EC2 request and add 'ec2.context' to WSGI environ."""
@ -297,8 +298,9 @@ class Authorizer(wsgi.Middleware):
if self._matches_any_role(context, allowed_roles):
return self.application
else:
LOG.audit("Unauthorized request for controller=%s and action=%s",
controller_name, action, context=context)
LOG.audit(_("Unauthorized request for controller=%s "
"and action=%s"), controller_name, action,
context=context)
raise webob.exc.HTTPUnauthorized()
def _matches_any_role(self, context, roles):
@ -337,7 +339,7 @@ class Executor(wsgi.Application):
LOG.info(_('NotFound raised: %s'), str(ex), context=context)
return self._error(req, context, type(ex).__name__, str(ex))
except exception.ApiError as ex:
LOG.exception('ApiError raised', context=context)
LOG.exception(_('ApiError raised: %s'), str(ex), context=context)
if ex.code:
return self._error(req, context, ex.code, str(ex))
else:

View File

@ -609,7 +609,7 @@ class CloudController(object):
'volumeId': volume_ref['id']}
def detach_volume(self, context, volume_id, **kwargs):
LOG.audit("Detach volume %s", volume_id, context=context)
LOG.audit(_("Detach volume %s"), volume_id, context=context)
volume_ref = db.volume_get_by_ec2_id(context, volume_id)
instance_ref = db.volume_get_instance(context.elevated(),
volume_ref['id'])
@ -893,7 +893,7 @@ class CloudController(object):
return {'imagesSet': images}
def deregister_image(self, context, image_id, **kwargs):
LOG.audit("De-registering image %s", image_id, context=context)
LOG.audit(_("De-registering image %s"), image_id, context=context)
self.image_service.deregister(context, image_id)
return {'imageId': image_id}

View File

@ -83,7 +83,7 @@ class APIRouter(wsgi.Router):
server_members = {'action': 'POST'}
if FLAGS.allow_admin_api:
LOG.debug("Including admin operations in API.")
LOG.debug(_("Including admin operations in API."))
server_members['pause'] = 'POST'
server_members['unpause'] = 'POST'
server_members["diagnostics"] = "GET"

View File

@ -65,7 +65,6 @@ flags.DEFINE_string('ldap_netadmin',
flags.DEFINE_string('ldap_developer',
'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers')
LOG = logging.getLogger("nova.ldapdriver")
@ -506,7 +505,7 @@ class LdapDriver(object):
self.conn.modify_s(group_dn, attr)
except self.ldap.OBJECT_CLASS_VIOLATION:
LOG.debug(_("Attempted to remove the last member of a group. "
"Deleting the group at %s instead."), group_dn)
"Deleting the group at %s instead."), group_dn)
self.__delete_group(group_dn)
def __remove_from_all(self, uid):

View File

@ -70,8 +70,7 @@ flags.DEFINE_string('credential_rc_file', '%src',
flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver',
'Driver that auth manager uses')
LOG = logging.getLogger('nova.authmanager')
LOG = logging.getLogger('nova.auth.manager')
class AuthBase(object):

View File

@ -52,8 +52,7 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection',
flags.DEFINE_string('stub_network', False,
'Stub network related code')
LOG = logging.getLogger('nova.computemanager')
LOG = logging.getLogger('nova.compute.manager')
class ComputeManager(manager.Manager):
@ -114,7 +113,8 @@ class ComputeManager(manager.Manager):
instance_ref = self.db.instance_get(context, instance_id)
if instance_ref['name'] in self.driver.list_instances():
raise exception.Error(_("Instance has already been created"))
LOG.debug(_("instance %s: starting..."), instance_id)
LOG.audit(_("instance %s//%s: starting..."),
instance_ref['internal_id'], instance_id, context=context)
self.db.instance_update(context,
instance_id,
{'host': self.host})
@ -152,8 +152,9 @@ class ComputeManager(manager.Manager):
instance_id,
{'launched_at': now})
except Exception: # pylint: disable-msg=W0702
LOG.exception(_("instance %s: Failed to spawn"),
instance_ref['name'])
LOG.exception(_("instance %s//%s: Failed to spawn"),
instance_ref['internal_id'], instance_id,
context=context)
self.db.instance_set_state(context,
instance_id,
power_state.SHUTDOWN)
@ -193,8 +194,6 @@ class ComputeManager(manager.Manager):
self.network_manager.deallocate_fixed_ip(context.elevated(),
address)
LOG.debug(_("instance %s: terminating"), instance_id, context=context)
volumes = instance_ref.get('volumes', []) or []
for volume in volumes:
self.detach_volume(context, instance_id, volume['id'])
@ -217,14 +216,13 @@ class ComputeManager(manager.Manager):
instance_id, context=context)
if instance_ref['state'] != power_state.RUNNING:
LOG.warn(_('trying to reboot a non-running '
.warn(_('trying to reboot a non-running '
'instance: %s (state: %s excepted: %s)'),
instance_ref['internal_id'],
instance_ref['state'],
power_state.RUNNING,
context=context)
LOG.debug(_('instance %s: rebooting'), instance_ref['name'])
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@ -244,13 +242,14 @@ class ComputeManager(manager.Manager):
# potentially?
self._update_state(context, instance_id)
logging.debug(_('instance %s: snapshotting'), instance_ref['name'])
LOG.audit(_('instance %s//%s: snapshotting'),
instance_ref['internal_id'], instance_id, context=context)
if instance_ref['state'] != power_state.RUNNING:
logging.warn(_('trying to snapshot a non-running '
'instance: %s (state: %s excepted: %s)'),
instance_ref['internal_id'],
instance_ref['state'],
power_state.RUNNING)
LOG.warn(_('trying to snapshot a non-running '
'instance: %s//%s (state: %s excepted: %s)'),
instance_ref['internal_id'], instance_id,
instance_ref['state'],
power_state.RUNNING)
self.driver.snapshot(instance_ref, name)
@ -259,9 +258,8 @@ class ComputeManager(manager.Manager):
"""Rescue an instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_('instance %s: rescuing'), instance_ref['internal_id'],
context=context)
LOG.audit(_('instance %s//%s: rescuing'), instance_ref['internal_id'],
instance_id, context=context)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@ -276,8 +274,8 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_('instance %s: unrescuing'), instance_ref['internal_id'],
context=context)
LOG.audit(_('instance %s//%s: unrescuing'),
instance_ref['internal_id'], instance_id, context=context)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@ -295,9 +293,8 @@ class ComputeManager(manager.Manager):
"""Pause an instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.debug(_('instance %s: pausing'), instance_ref['internal_id'],
context=context)
LOG.audit(_('instance %s//%s: pausing'), instance_ref['internal_id'],
instance_id, context=context)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@ -313,9 +310,8 @@ class ComputeManager(manager.Manager):
"""Unpause a paused instance on this server."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.debug(_('instance %s: unpausing'), instance_ref['internal_id'],
context=context)
LOG.audit(_('instance %s//%s: unpausing'),
instance_ref['internal_id'], instance_id, context=context)
self.db.instance_set_state(context,
instance_id,
power_state.NOSTATE,
@ -332,8 +328,9 @@ class ComputeManager(manager.Manager):
instance_ref = self.db.instance_get(context, instance_id)
if instance_ref["state"] == power_state.RUNNING:
logging.debug(_("instance %s: retrieving diagnostics"),
instance_ref["internal_id"])
LOG.audit(_("instance %s//%s: retrieving diagnostics"),
instance_ref["internal_id"], instance_id,
context=context)
return self.driver.get_diagnostics(instance_ref)
@exception.wrap_exception
@ -342,8 +339,8 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
logging.debug(_('instance %s: suspending'),
instance_ref['internal_id'])
LOG.audit(_('instance %s//%s: suspending'),
instance_ref['internal_id'], instance_id, context=context)
self.db.instance_set_state(context, instance_id,
power_state.NOSTATE,
'suspending')
@ -359,7 +356,8 @@ class ComputeManager(manager.Manager):
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
logging.debug(_('instance %s: resuming'), instance_ref['internal_id'])
LOG.audit(_('instance %s//%s: resuming'), instance_ref['internal_id'],
instance_id, context=context)
self.db.instance_set_state(context, instance_id,
power_state.NOSTATE,
'resuming')
@ -374,18 +372,18 @@ class ComputeManager(manager.Manager):
"""Send the console output for an instance."""
context = context.elevated()
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_("Get console output instance %s//%s"),
LOG.audit(_("Get console output for instance %s//%s"),
instance_ref['internal_id'], instance_id, context=context)
return self.driver.get_console_output(instance_ref)
@exception.wrap_exception
def attach_volume(self, context, instance_id, volume_id, mountpoint):
"""Attach a volume to an instance."""
context = context.elevated()
LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id,
volume_id, mountpoint, context=context)
instance_ref = self.db.instance_get(context, instance_id)
LOG.audit(_("instance %s//%s: attaching volume %s to %s"),
instance_ref['internal_id'], instance_id,
volume_id, mountpoint, context=context)
dev_path = self.volume_manager.setup_compute_volume(context,
volume_id)
try:
@ -400,8 +398,9 @@ class ComputeManager(manager.Manager):
# NOTE(vish): The inline callback eats the exception info so we
# log the traceback here and reraise the same
# ecxception below.
LOG.exception(_("instance %s: attach failed %s, removing"),
instance_id, mountpoint, context=context)
LOG.exception(_("instance %s//%s: attach failed %s, removing"),
instance_ref['internal_id'], instance_id,
mountpoint, context=context)
self.volume_manager.remove_compute_volume(context,
volume_id)
raise exc
@ -418,8 +417,8 @@ class ComputeManager(manager.Manager):
volume_id, volume_ref['mountpoint'],
instance_ref['internal_id'], instance_id, context=context)
if instance_ref['name'] not in self.driver.list_instances():
LOG.warn(_("Detaching volume from unknown instance %s"),
instance_ref['name'], context=context)
LOG.warn(_("Detaching volume from unknown instance %s//%s"),
instance_ref['internal_id'], instance_id, context=context)
else:
self.driver.detach_volume(instance_ref['name'],
volume_ref['mountpoint'])

View File

@ -89,7 +89,7 @@ RRD_VALUES = {
utcnow = datetime.datetime.utcnow
LOG = logging.getLogger('nova.instancemonitor')
LOG = logging.getLogger('nova.compute.monitor')
def update_rrd(instance, name, data):

View File

@ -24,6 +24,7 @@ Nova-type exceptions. SHOULD include dedicated exception logging.
from nova import log as logging
LOG = logging.getLogger('nova.exception')
class ProcessExecutionError(IOError):
def __init__(self, stdout=None, stderr=None, exit_code=None, cmd=None,

View File

@ -60,7 +60,7 @@ from nova import utils
from nova import rpc
LOG = logging.getLogger("nova.networkmanager")
LOG = logging.getLogger("nova.network.manager")
FLAGS = flags.FLAGS
flags.DEFINE_string('flat_network_bridge', 'br100',
'Bridge for simple network instances')
@ -132,7 +132,7 @@ class NetworkManager(manager.Manager):
def set_network_host(self, context, network_id):
"""Safely sets the host of the network."""
LOG.debug(_("setting network host"))
LOG.debug(_("setting network host"), context=context)
host = self.db.network_set_host(context,
network_id,
self.host)
@ -187,7 +187,7 @@ class NetworkManager(manager.Manager):
def lease_fixed_ip(self, context, mac, address):
"""Called by dhcp-bridge when ip is leased."""
LOG.debug(_("Leasing IP %s"), address)
LOG.debug(_("Leasing IP %s"), address, context=context)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
if not instance_ref:
@ -202,11 +202,12 @@ class NetworkManager(manager.Manager):
{'leased': True,
'updated_at': now})
if not fixed_ip_ref['allocated']:
LOG.warn(_("IP %s leased that was already deallocated"), address)
LOG.warn(_("IP %s leased that was already deallocated"), address,
context=context)
def release_fixed_ip(self, context, mac, address):
"""Called by dhcp-bridge when ip is released."""
LOG.debug("Releasing IP %s", address)
LOG.debug("Releasing IP %s", address, context=context)
fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address)
instance_ref = fixed_ip_ref['instance']
if not instance_ref:
@ -216,7 +217,8 @@ class NetworkManager(manager.Manager):
raise exception.Error(_("IP %s released from bad mac %s vs %s") %
(address, instance_ref['mac_address'], mac))
if not fixed_ip_ref['leased']:
LOG.warn(_("IP %s released that was not leased"), address)
LOG.warn(_("IP %s released that was not leased"), address,
context=context)
self.db.fixed_ip_update(context,
fixed_ip_ref['address'],
{'leased': False})

View File

@ -134,7 +134,7 @@ def get_context(request):
headers=request.getAllHeaders(),
check_type='s3')
rv = context.RequestContext(user, project)
LOG.audit("Authenticated request", context=rv)
LOG.audit(_("Authenticated request"), context=rv)
return rv
except exception.Error as ex:
LOG.debug(_("Authentication Failure: %s"), ex)

View File

@ -96,8 +96,9 @@ class Consumer(messaging.Consumer):
FLAGS.rabbit_retry_interval))
self.failed_connection = True
if self.failed_connection:
LOG.exception(_("Unable to connect to AMQP server"
" after %d tries. Shutting down.") % FLAGS.rabbit_max_retries)
LOG.exception(_("Unable to connect to AMQP server "
"after %d tries. Shutting down."),
FLAGS.rabbit_max_retries)
sys.exit(1)
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):

View File

@ -209,9 +209,6 @@ def serve(*services):
FLAGS(sys.argv)
logging.basicConfig()
# TODO(todd): make this pigggyback the flag-based level override method
logging.getLogger('amqplib').setLevel(logging.WARN)
if not services:
services = [Service.create()]

View File

@ -62,9 +62,7 @@ libvirt = None
libxml2 = None
Template = None
LOG = logging.getLogger('nova.virt.libvirt_conn')
LOG = logging.getLogger('nova.virt.libvirt_conn')
FLAGS = flags.FLAGS
# TODO(vish): These flags should probably go into a shared location
@ -380,7 +378,7 @@ class LibvirtConnection(object):
return timer.start(interval=0.5, now=True)
def _flush_xen_console(self, virsh_output):
LOG.info('virsh said: %r', virsh_output)
LOG.info(_('virsh said: %r'), virsh_output)
virsh_output = virsh_output[0].strip()
if virsh_output.startswith('/dev/'):
@ -400,7 +398,7 @@ class LibvirtConnection(object):
def _dump_file(self, fpath):
fp = open(fpath, 'r+')
contents = fp.read()
LOG.info('Contents: %r', contents)
LOG.info(_('Contents of file %s: %r'), fpath, contents)
return contents
@exception.wrap_exception
@ -529,7 +527,7 @@ class LibvirtConnection(object):
def to_xml(self, instance, rescue=False):
# TODO(termie): cache?
LOG.debug('instance %s: starting toXML method', instance['name'])
LOG.debug(_('instance %s: starting toXML method'), instance['name'])
network = db.project_get_network(context.get_admin_context(),
instance['project_id'])
LOG.debug(_('instance %s: starting toXML method'), instance['name'])

View File

@ -244,7 +244,7 @@ class SessionBase(object):
full_params = (self._session,) + params
meth = getattr(self, methodname, None)
if meth is None:
LOG.debug('Raising NotImplemented')
LOG.debug(_('Raising NotImplemented'))
raise NotImplementedError(
_('xenapi.fake does not have an implementation for %s') %
methodname)

View File

@ -33,6 +33,7 @@ from nova.virt.xenapi.vm_utils import ImageType
XenAPI = None
LOG = logging.getLogger("nova.virt.xenapi.vmops")
class VMOps(object):
"""
Management class for VM-related tasks

View File

@ -36,11 +36,11 @@ import webob.exc
from nova import log as logging
# TODO(todd): should this just piggyback the handler for root logger
# since we usually log to syslog, but changes if not daemonzied?
logging.getLogger("routes.middleware").addHandler(logging.StreamHandler())
class Server(object):
"""Server class to manage multiple WSGI sockets and applications."""