Apply logging changes as a giant patch to work around the cloudpipe delete + add issue in the original patch.
This commit is contained in:
@@ -22,7 +22,6 @@ Handle lease database updates from DHCP servers.
|
||||
"""
|
||||
|
||||
import gettext
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
|
||||
@@ -39,6 +38,7 @@ gettext.install('nova', unicode=1)
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova.network import linux_net
|
||||
@@ -50,10 +50,15 @@ flags.DECLARE('num_networks', 'nova.network.manager')
|
||||
flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager')
|
||||
|
||||
|
||||
LOG = logging.getLogger('nova-dhcpbridge')
|
||||
if FLAGS.verbose:
|
||||
LOG.setLevel(logging.DEBUG)
|
||||
|
||||
|
||||
def add_lease(mac, ip_address, _hostname, _interface):
|
||||
"""Set the IP that was assigned by the DHCP server."""
|
||||
if FLAGS.fake_rabbit:
|
||||
logging.debug("leasing ip")
|
||||
LOG.debug("leasing ip")
|
||||
network_manager = utils.import_object(FLAGS.network_manager)
|
||||
network_manager.lease_fixed_ip(context.get_admin_context(),
|
||||
mac,
|
||||
@@ -68,14 +73,14 @@ def add_lease(mac, ip_address, _hostname, _interface):
|
||||
|
||||
def old_lease(mac, ip_address, hostname, interface):
|
||||
"""Update just as add lease."""
|
||||
logging.debug("Adopted old lease or got a change of mac/hostname")
|
||||
LOG.debug("Adopted old lease or got a change of mac/hostname")
|
||||
add_lease(mac, ip_address, hostname, interface)
|
||||
|
||||
|
||||
def del_lease(mac, ip_address, _hostname, _interface):
|
||||
"""Called when a lease expires."""
|
||||
if FLAGS.fake_rabbit:
|
||||
logging.debug("releasing ip")
|
||||
LOG.debug("releasing ip")
|
||||
network_manager = utils.import_object(FLAGS.network_manager)
|
||||
network_manager.release_fixed_ip(context.get_admin_context(),
|
||||
mac,
|
||||
@@ -100,6 +105,7 @@ def main():
|
||||
flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile)
|
||||
utils.default_flagfile(flagfile)
|
||||
argv = FLAGS(sys.argv)
|
||||
logging.basicConfig()
|
||||
interface = os.environ.get('DNSMASQ_INTERFACE', 'br0')
|
||||
if int(os.environ.get('TESTING', '0')):
|
||||
FLAGS.fake_rabbit = True
|
||||
@@ -117,9 +123,9 @@ def main():
|
||||
mac = argv[2]
|
||||
ip = argv[3]
|
||||
hostname = argv[4]
|
||||
logging.debug("Called %s for mac %s with ip %s and "
|
||||
"hostname %s on interface %s",
|
||||
action, mac, ip, hostname, interface)
|
||||
LOG.debug("Called %s for mac %s with ip %s and "
|
||||
"hostname %s on interface %s",
|
||||
action, mac, ip, hostname, interface)
|
||||
globals()[action + '_lease'](mac, ip, hostname, interface)
|
||||
else:
|
||||
print init_leases(interface)
|
||||
|
@@ -23,7 +23,6 @@
|
||||
|
||||
import gettext
|
||||
import os
|
||||
import logging
|
||||
import sys
|
||||
from twisted.application import service
|
||||
|
||||
@@ -37,19 +36,23 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
|
||||
|
||||
gettext.install('nova', unicode=1)
|
||||
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
from nova import twistd
|
||||
from nova.compute import monitor
|
||||
|
||||
# TODO(todd): shouldn't this be done with flags? And what about verbose?
|
||||
logging.getLogger('boto').setLevel(logging.WARN)
|
||||
|
||||
LOG = logging.getLogger('nova-instancemonitor')
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
utils.default_flagfile()
|
||||
twistd.serve(__file__)
|
||||
|
||||
if __name__ == '__builtin__':
|
||||
logging.warn('Starting instance monitor')
|
||||
LOG.warn(_('Starting instance monitor'))
|
||||
# pylint: disable-msg=C0103
|
||||
monitor = monitor.InstanceMonitor()
|
||||
|
||||
|
156
bin/nova-logspool
Normal file
156
bin/nova-logspool
Normal file
@@ -0,0 +1,156 @@
|
||||
#!/usr/bin/env python
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
"""
|
||||
Tools for working with logs generated by nova components
|
||||
"""
|
||||
|
||||
|
||||
import json
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
|
||||
|
||||
class Request(object):
|
||||
|
||||
def __init__(self):
|
||||
self.time = ""
|
||||
self.host = ""
|
||||
self.logger = ""
|
||||
self.message = ""
|
||||
self.trace = ""
|
||||
self.env = ""
|
||||
self.request_id = ""
|
||||
|
||||
def add_error_line(self, error_line):
|
||||
self.time = " ".join(error_line.split(" ")[:3])
|
||||
self.host = error_line.split(" ")[3]
|
||||
self.logger = error_line.split("(")[1].split(" ")[0]
|
||||
self.request_id = error_line.split("[")[1].split(" ")[0]
|
||||
error_lines = error_line.split("#012")
|
||||
self.message = self.clean_log_line(error_lines.pop(0))
|
||||
self.trace = "\n".join([self.clean_trace(l) for l in error_lines])
|
||||
|
||||
def add_environment_line(self, env_line):
|
||||
self.env = self.clean_env_line(env_line)
|
||||
|
||||
def clean_log_line(self, line):
|
||||
"""Remove log format for time, level, etc: split after context"""
|
||||
return line.split('] ')[-1]
|
||||
|
||||
def clean_env_line(self, line):
|
||||
"""Also has an 'Environment: ' string in the message"""
|
||||
return re.sub(r'^Environment: ', '', self.clean_log_line(line))
|
||||
|
||||
def clean_trace(self, line):
|
||||
"""trace has a different format, so split on TRACE:"""
|
||||
return line.split('TRACE: ')[-1]
|
||||
|
||||
def to_dict(self):
|
||||
return {'traceback': self.trace, 'message': self.message,
|
||||
'host': self.host, 'env': self.env, 'logger': self.logger,
|
||||
'request_id': self.request_id}
|
||||
|
||||
class LogReader(object):
|
||||
def __init__(self, filename):
|
||||
self.filename = filename
|
||||
self._errors = {}
|
||||
|
||||
def process(self, spooldir):
|
||||
with open(self.filename) as f:
|
||||
line = f.readline()
|
||||
while len(line) > 0:
|
||||
parts = line.split(" ")
|
||||
level = (len(parts) < 6) or parts[5]
|
||||
if level == 'ERROR':
|
||||
self.handle_logged_error(line)
|
||||
elif level == '[-]' and self.last_error:
|
||||
# twisted stack trace line
|
||||
clean_line = " ".join(line.split(" ")[6:])
|
||||
self.last_error.trace = self.last_error.trace + clean_line
|
||||
else:
|
||||
self.last_error = None
|
||||
line = f.readline()
|
||||
self.update_spool(spooldir)
|
||||
|
||||
def handle_logged_error(self, line):
|
||||
request_id = re.search(r' \[([A-Z0-9\-/]+)', line)
|
||||
if not request_id:
|
||||
raise Exception("Unable to parse request id from %s" % line)
|
||||
request_id = request_id.group(1)
|
||||
data = self._errors.get(request_id, Request())
|
||||
if self.is_env_line(line):
|
||||
data.add_environment_line(line)
|
||||
elif self.is_error_line(line):
|
||||
data.add_error_line(line)
|
||||
else:
|
||||
# possibly error from twsited
|
||||
data.add_error_line(line)
|
||||
self.last_error = data
|
||||
self._errors[request_id] = data
|
||||
|
||||
def is_env_line(self, line):
|
||||
return re.search('Environment: ', line)
|
||||
|
||||
def is_error_line(self, line):
|
||||
return re.search('raised', line)
|
||||
|
||||
def update_spool(self, directory):
|
||||
processed_dir = "%s/processed" % directory
|
||||
self._ensure_dir_exists(processed_dir)
|
||||
for rid, value in self._errors.iteritems():
|
||||
if not self.has_been_processed(processed_dir, rid):
|
||||
with open("%s/%s" % (directory, rid), "w") as spool:
|
||||
spool.write(json.dumps(value.to_dict()))
|
||||
self.flush_old_processed_spool(processed_dir)
|
||||
|
||||
def _ensure_dir_exists(self, d):
|
||||
mkdir = False
|
||||
try:
|
||||
os.stat(d)
|
||||
except:
|
||||
mkdir = True
|
||||
if mkdir:
|
||||
os.mkdir(d)
|
||||
|
||||
def has_been_processed(self, processed_dir, rid):
|
||||
rv = False
|
||||
try:
|
||||
os.stat("%s/%s" % (processed_dir, rid))
|
||||
rv = True
|
||||
except:
|
||||
pass
|
||||
return rv
|
||||
|
||||
def flush_old_processed_spool(self, processed_dir):
|
||||
keys = self._errors.keys()
|
||||
procs = os.listdir(processed_dir)
|
||||
for p in procs:
|
||||
if p not in keys:
|
||||
# log has rotated and the old error won't be seen again
|
||||
os.unlink("%s/%s" % (processed_dir, p))
|
||||
|
||||
if __name__ == '__main__':
|
||||
filename = '/var/log/nova.log'
|
||||
spooldir = '/var/spool/nova'
|
||||
if len(sys.argv) > 1:
|
||||
filename = sys.argv[1]
|
||||
if len(sys.argv) > 2:
|
||||
spooldir = sys.argv[2]
|
||||
LogReader(filename).process(spooldir)
|
@@ -55,8 +55,8 @@
|
||||
|
||||
import datetime
|
||||
import gettext
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import time
|
||||
|
||||
@@ -77,6 +77,7 @@ from nova import crypto
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import quota
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
@@ -499,6 +500,15 @@ class ServiceCommands(object):
|
||||
db.service_update(ctxt, svc['id'], {'disabled': True})
|
||||
|
||||
|
||||
class LogCommands(object):
|
||||
def request(self, request_id, logfile='/var/log/nova.log'):
|
||||
"""Show all fields in the log for the given request. Assumes you
|
||||
haven't changed the log format too much.
|
||||
ARGS: request_id [logfile]"""
|
||||
lines = utils.execute("cat %s | grep '\[%s '" % (logfile, request_id))
|
||||
print re.sub('#012', "\n", "\n".join(lines))
|
||||
|
||||
|
||||
CATEGORIES = [
|
||||
('user', UserCommands),
|
||||
('project', ProjectCommands),
|
||||
@@ -507,7 +517,8 @@ CATEGORIES = [
|
||||
('vpn', VpnCommands),
|
||||
('floating', FloatingIpCommands),
|
||||
('network', NetworkCommands),
|
||||
('service', ServiceCommands)]
|
||||
('service', ServiceCommands),
|
||||
('log', LogCommands)]
|
||||
|
||||
|
||||
def lazy_match(name, key_value_tuples):
|
||||
@@ -546,9 +557,7 @@ def main():
|
||||
utils.default_flagfile()
|
||||
argv = FLAGS(sys.argv)
|
||||
|
||||
if FLAGS.verbose:
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
|
||||
logging._set_log_levels()
|
||||
script_name = argv.pop(0)
|
||||
if len(argv) < 1:
|
||||
print script_name + " category action [<args>]"
|
||||
|
97
bin/nova-spoolsentry
Normal file
97
bin/nova-spoolsentry
Normal file
@@ -0,0 +1,97 @@
|
||||
#!/usr/bin/env python
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import urllib
|
||||
import urllib2
|
||||
try:
|
||||
import cPickle as pickle
|
||||
except:
|
||||
import pickle
|
||||
|
||||
|
||||
class SpoolSentry(object):
|
||||
def __init__(self, spool_dir, sentry_url, key=None):
|
||||
self.spool_dir = spool_dir
|
||||
self.sentry_url = sentry_url
|
||||
self.key = key
|
||||
|
||||
def process(self):
|
||||
for fname in os.listdir(self.spool_dir):
|
||||
if fname == "processed":
|
||||
continue
|
||||
try:
|
||||
sourcefile = "%s/%s" % (self.spool_dir, fname)
|
||||
with open(sourcefile) as f:
|
||||
fdata = f.read()
|
||||
data_from_json = json.loads(fdata)
|
||||
data = self.build_data(data_from_json)
|
||||
self.send_data(data)
|
||||
destfile = "%s/processed/%s" % (self.spool_dir, fname)
|
||||
shutil.move(sourcefile, destfile)
|
||||
except:
|
||||
logging.exception("Unable to upload record %s", fname)
|
||||
raise
|
||||
|
||||
def build_data(self, filejson):
|
||||
env = {'SERVER_NAME': 'unknown', 'SERVER_PORT': '0000',
|
||||
'SCRIPT_NAME': '/unknown/', 'PATH_INFO': 'unknown'}
|
||||
if filejson['env']:
|
||||
env = json.loads(filejson['env'])
|
||||
url = "http://%s:%s%s%s" % (env['SERVER_NAME'], env['SERVER_PORT'],
|
||||
env['SCRIPT_NAME'], env['PATH_INFO'])
|
||||
rv = {'logger': filejson['logger'], 'level': logging.ERROR,
|
||||
'server_name': filejson['host'], 'url': url,
|
||||
'message': filejson['message'],
|
||||
'traceback': filejson['traceback']}
|
||||
rv['data'] = {}
|
||||
if filejson['env']:
|
||||
rv['data']['META'] = env
|
||||
if filejson['request_id']:
|
||||
rv['data']['request_id'] = filejson['request_id']
|
||||
return rv
|
||||
|
||||
def send_data(self, data):
|
||||
data = {
|
||||
'data': base64.b64encode(pickle.dumps(data).encode('zlib')),
|
||||
'key': self.key
|
||||
}
|
||||
req = urllib2.Request(self.sentry_url)
|
||||
res = urllib2.urlopen(req, urllib.urlencode(data))
|
||||
if res.getcode() != 200:
|
||||
raise Exception("Bad HTTP code: %s" % res.getcode())
|
||||
txt = res.read()
|
||||
|
||||
if __name__ == '__main__':
|
||||
sentryurl = 'http://127.0.0.1/sentry/store/'
|
||||
key = ''
|
||||
spooldir = '/var/spool/nova'
|
||||
if len(sys.argv) > 1:
|
||||
sentryurl = sys.argv[1]
|
||||
if len(sys.argv) > 2:
|
||||
key = sys.argv[2]
|
||||
if len(sys.argv) > 3:
|
||||
spooldir = sys.argv[3]
|
||||
SpoolSentry(spooldir, sentryurl, key).process()
|
@@ -24,11 +24,11 @@ other backends by creating another class that exposes the same
|
||||
public methods.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@@ -66,6 +66,9 @@ flags.DEFINE_string('ldap_developer',
|
||||
'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers')
|
||||
|
||||
|
||||
LOG = logging.getLogger("nova.ldapdriver")
|
||||
|
||||
|
||||
# TODO(vish): make an abstract base class with the same public methods
|
||||
# to define a set interface for AuthDrivers. I'm delaying
|
||||
# creating this now because I'm expecting an auth refactor
|
||||
@@ -502,7 +505,7 @@ class LdapDriver(object):
|
||||
try:
|
||||
self.conn.modify_s(group_dn, attr)
|
||||
except self.ldap.OBJECT_CLASS_VIOLATION:
|
||||
logging.debug(_("Attempted to remove the last member of a group. "
|
||||
LOG.debug(_("Attempted to remove the last member of a group. "
|
||||
"Deleting the group at %s instead."), group_dn)
|
||||
self.__delete_group(group_dn)
|
||||
|
||||
|
@@ -20,7 +20,6 @@
|
||||
Nova authentication management
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import string # pylint: disable-msg=W0402
|
||||
@@ -33,6 +32,7 @@ from nova import crypto
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
from nova.auth import signer
|
||||
|
||||
@@ -71,6 +71,9 @@ flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver',
|
||||
'Driver that auth manager uses')
|
||||
|
||||
|
||||
LOG = logging.getLogger('nova.authmanager')
|
||||
|
||||
|
||||
class AuthBase(object):
|
||||
"""Base class for objects relating to auth
|
||||
|
||||
@@ -254,43 +257,51 @@ class AuthManager(object):
|
||||
# TODO(vish): check for valid timestamp
|
||||
(access_key, _sep, project_id) = access.partition(':')
|
||||
|
||||
logging.info(_('Looking up user: %r'), access_key)
|
||||
LOG.debug(_('Looking up user: %r'), access_key)
|
||||
user = self.get_user_from_access_key(access_key)
|
||||
logging.info('user: %r', user)
|
||||
LOG.debug('user: %r', user)
|
||||
if user == None:
|
||||
LOG.audit(_("Failed authorization for access key %s"), access_key)
|
||||
raise exception.NotFound(_('No user found for access key %s')
|
||||
% access_key)
|
||||
|
||||
# NOTE(vish): if we stop using project name as id we need better
|
||||
# logic to find a default project for user
|
||||
if project_id == '':
|
||||
LOG.debug(_("Using project name = user name (%s)"), user.name)
|
||||
project_id = user.name
|
||||
|
||||
project = self.get_project(project_id)
|
||||
if project == None:
|
||||
LOG.audit(_("failed authorization: no project named %s (user=%s)"),
|
||||
project_id, user.name)
|
||||
raise exception.NotFound(_('No project called %s could be found')
|
||||
% project_id)
|
||||
if not self.is_admin(user) and not self.is_project_member(user,
|
||||
project):
|
||||
LOG.audit(_("Failed authorization: user %s not admin and not "
|
||||
"member of project %s"), user.name, project.name)
|
||||
raise exception.NotFound(_('User %s is not a member of project %s')
|
||||
% (user.id, project.id))
|
||||
if check_type == 's3':
|
||||
sign = signer.Signer(user.secret.encode())
|
||||
expected_signature = sign.s3_authorization(headers, verb, path)
|
||||
logging.debug('user.secret: %s', user.secret)
|
||||
logging.debug('expected_signature: %s', expected_signature)
|
||||
logging.debug('signature: %s', signature)
|
||||
LOG.debug('user.secret: %s', user.secret)
|
||||
LOG.debug('expected_signature: %s', expected_signature)
|
||||
LOG.debug('signature: %s', signature)
|
||||
if signature != expected_signature:
|
||||
LOG.audit(_("Invalid signature for user %s"), user.name)
|
||||
raise exception.NotAuthorized(_('Signature does not match'))
|
||||
elif check_type == 'ec2':
|
||||
# NOTE(vish): hmac can't handle unicode, so encode ensures that
|
||||
# secret isn't unicode
|
||||
expected_signature = signer.Signer(user.secret.encode()).generate(
|
||||
params, verb, server_string, path)
|
||||
logging.debug('user.secret: %s', user.secret)
|
||||
logging.debug('expected_signature: %s', expected_signature)
|
||||
logging.debug('signature: %s', signature)
|
||||
LOG.debug('user.secret: %s', user.secret)
|
||||
LOG.debug('expected_signature: %s', expected_signature)
|
||||
LOG.debug('signature: %s', signature)
|
||||
if signature != expected_signature:
|
||||
LOG.audit(_("Invalid signature for user %s"), user.name)
|
||||
raise exception.NotAuthorized(_('Signature does not match'))
|
||||
return (user, project)
|
||||
|
||||
@@ -398,6 +409,12 @@ class AuthManager(object):
|
||||
raise exception.NotFound(_("The %s role can not be found") % role)
|
||||
if project is not None and role in FLAGS.global_roles:
|
||||
raise exception.NotFound(_("The %s role is global only") % role)
|
||||
if project:
|
||||
LOG.audit(_("Adding role %s to user %s in project %s"), role,
|
||||
User.safe_id(user), Project.safe_id(project))
|
||||
else:
|
||||
LOG.audit(_("Adding sitewide role %s to user %s"), role,
|
||||
User.safe_id(user))
|
||||
with self.driver() as drv:
|
||||
drv.add_role(User.safe_id(user), role, Project.safe_id(project))
|
||||
|
||||
@@ -418,6 +435,12 @@ class AuthManager(object):
|
||||
@type project: Project or project_id
|
||||
@param project: Project in which to remove local role.
|
||||
"""
|
||||
if project:
|
||||
LOG.audit(_("Removing role %s from user %s on project %s"),
|
||||
role, User.safe_id(user), Project.safe_id(project))
|
||||
else:
|
||||
LOG.audit(_("Removing sitewide role %s from user %s"), role,
|
||||
User.safe_id(user))
|
||||
with self.driver() as drv:
|
||||
drv.remove_role(User.safe_id(user), role, Project.safe_id(project))
|
||||
|
||||
@@ -480,6 +503,8 @@ class AuthManager(object):
|
||||
description,
|
||||
member_users)
|
||||
if project_dict:
|
||||
LOG.audit(_("Created project %s with manager %s"), name,
|
||||
manager_user)
|
||||
project = Project(**project_dict)
|
||||
return project
|
||||
|
||||
@@ -496,6 +521,7 @@ class AuthManager(object):
|
||||
@param project: This will be the new description of the project.
|
||||
|
||||
"""
|
||||
LOG.audit(_("modifying project %s"), Project.safe_id(project))
|
||||
if manager_user:
|
||||
manager_user = User.safe_id(manager_user)
|
||||
with self.driver() as drv:
|
||||
@@ -505,6 +531,8 @@ class AuthManager(object):
|
||||
|
||||
def add_to_project(self, user, project):
|
||||
"""Add user to project"""
|
||||
LOG.audit(_("Adding user %s to project %s"), User.safe_id(user),
|
||||
Project.safe_id(project))
|
||||
with self.driver() as drv:
|
||||
return drv.add_to_project(User.safe_id(user),
|
||||
Project.safe_id(project))
|
||||
@@ -523,6 +551,8 @@ class AuthManager(object):
|
||||
|
||||
def remove_from_project(self, user, project):
|
||||
"""Removes a user from a project"""
|
||||
LOG.audit(_("Remove user %s from project %s"), User.safe_id(user),
|
||||
Project.safe_id(project))
|
||||
with self.driver() as drv:
|
||||
return drv.remove_from_project(User.safe_id(user),
|
||||
Project.safe_id(project))
|
||||
@@ -549,6 +579,7 @@ class AuthManager(object):
|
||||
|
||||
def delete_project(self, project):
|
||||
"""Deletes a project"""
|
||||
LOG.audit(_("Deleting project %s"), Project.safe_id(project))
|
||||
with self.driver() as drv:
|
||||
drv.delete_project(Project.safe_id(project))
|
||||
|
||||
@@ -603,13 +634,16 @@ class AuthManager(object):
|
||||
with self.driver() as drv:
|
||||
user_dict = drv.create_user(name, access, secret, admin)
|
||||
if user_dict:
|
||||
return User(**user_dict)
|
||||
rv = User(**user_dict)
|
||||
LOG.audit(_("Created user %s (admin: %r)"), rv.name, rv.admin)
|
||||
return rv
|
||||
|
||||
def delete_user(self, user):
|
||||
"""Deletes a user
|
||||
|
||||
Additionally deletes all users key_pairs"""
|
||||
uid = User.safe_id(user)
|
||||
LOG.audit(_("Deleting user %s"), uid)
|
||||
db.key_pair_destroy_all_by_user(context.get_admin_context(),
|
||||
uid)
|
||||
with self.driver() as drv:
|
||||
@@ -618,6 +652,12 @@ class AuthManager(object):
|
||||
def modify_user(self, user, access_key=None, secret_key=None, admin=None):
|
||||
"""Modify credentials for a user"""
|
||||
uid = User.safe_id(user)
|
||||
if access_key:
|
||||
LOG.audit(_("Access Key change for user %s"), uid)
|
||||
if secret_key:
|
||||
LOG.audit(_("Secret Key change for user %s"), uid)
|
||||
if admin is not None:
|
||||
LOG.audit(_("Admin status set to %r for user %s"), admin, uid)
|
||||
with self.driver() as drv:
|
||||
drv.modify_user(uid, access_key, secret_key, admin)
|
||||
|
||||
@@ -666,7 +706,7 @@ class AuthManager(object):
|
||||
port=vpn_port)
|
||||
zippy.writestr(FLAGS.credential_vpn_file, config)
|
||||
else:
|
||||
logging.warn(_("No vpn data for project %s"), pid)
|
||||
LOG.warn(_("No vpn data for project %s"), pid)
|
||||
|
||||
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(pid))
|
||||
zippy.close()
|
||||
|
@@ -46,7 +46,6 @@ Utility class for parsing signed AMI manifests.
|
||||
import base64
|
||||
import hashlib
|
||||
import hmac
|
||||
import logging
|
||||
import urllib
|
||||
|
||||
# NOTE(vish): for new boto
|
||||
@@ -54,9 +53,13 @@ import boto
|
||||
# NOTE(vish): for old boto
|
||||
import boto.utils
|
||||
|
||||
from nova import log as logging
|
||||
from nova.exception import Error
|
||||
|
||||
|
||||
LOG = logging.getLogger('nova.signer')
|
||||
|
||||
|
||||
class Signer(object):
|
||||
"""Hacked up code from boto/connection.py"""
|
||||
|
||||
@@ -120,7 +123,7 @@ class Signer(object):
|
||||
|
||||
def _calc_signature_2(self, params, verb, server_string, path):
|
||||
"""Generate AWS signature version 2 string."""
|
||||
logging.debug('using _calc_signature_2')
|
||||
LOG.debug('using _calc_signature_2')
|
||||
string_to_sign = '%s\n%s\n%s\n' % (verb, server_string, path)
|
||||
if self.hmac_256:
|
||||
current_hmac = self.hmac_256
|
||||
@@ -136,13 +139,13 @@ class Signer(object):
|
||||
val = urllib.quote(val, safe='-_~')
|
||||
pairs.append(urllib.quote(key, safe='') + '=' + val)
|
||||
qs = '&'.join(pairs)
|
||||
logging.debug('query string: %s', qs)
|
||||
LOG.debug('query string: %s', qs)
|
||||
string_to_sign += qs
|
||||
logging.debug('string_to_sign: %s', string_to_sign)
|
||||
LOG.debug('string_to_sign: %s', string_to_sign)
|
||||
current_hmac.update(string_to_sign)
|
||||
b64 = base64.b64encode(current_hmac.digest())
|
||||
logging.debug('len(b64)=%d', len(b64))
|
||||
logging.debug('base64 encoded digest: %s', b64)
|
||||
LOG.debug('len(b64)=%d', len(b64))
|
||||
LOG.debug('base64 encoded digest: %s', b64)
|
||||
return b64
|
||||
|
||||
|
||||
|
@@ -18,12 +18,16 @@
|
||||
|
||||
"""Based a bit on the carrot.backeds.queue backend... but a lot better."""
|
||||
|
||||
import logging
|
||||
import Queue as queue
|
||||
|
||||
from carrot.backends import base
|
||||
from eventlet import greenthread
|
||||
|
||||
from nova import log as logging
|
||||
|
||||
|
||||
LOG = logging.getLogger("nova.fakerabbit")
|
||||
|
||||
|
||||
EXCHANGES = {}
|
||||
QUEUES = {}
|
||||
@@ -41,12 +45,12 @@ class Exchange(object):
|
||||
self._routes = {}
|
||||
|
||||
def publish(self, message, routing_key=None):
|
||||
logging.debug(_('(%s) publish (key: %s) %s'),
|
||||
self.name, routing_key, message)
|
||||
LOG.debug(_('(%s) publish (key: %s) %s'),
|
||||
self.name, routing_key, message)
|
||||
routing_key = routing_key.split('.')[0]
|
||||
if routing_key in self._routes:
|
||||
for f in self._routes[routing_key]:
|
||||
logging.debug(_('Publishing to route %s'), f)
|
||||
LOG.debug(_('Publishing to route %s'), f)
|
||||
f(message, routing_key=routing_key)
|
||||
|
||||
def bind(self, callback, routing_key):
|
||||
@@ -76,19 +80,19 @@ class Backend(base.BaseBackend):
|
||||
def queue_declare(self, queue, **kwargs):
|
||||
global QUEUES
|
||||
if queue not in QUEUES:
|
||||
logging.debug(_('Declaring queue %s'), queue)
|
||||
LOG.debug(_('Declaring queue %s'), queue)
|
||||
QUEUES[queue] = Queue(queue)
|
||||
|
||||
def exchange_declare(self, exchange, type, *args, **kwargs):
|
||||
global EXCHANGES
|
||||
if exchange not in EXCHANGES:
|
||||
logging.debug(_('Declaring exchange %s'), exchange)
|
||||
LOG.debug(_('Declaring exchange %s'), exchange)
|
||||
EXCHANGES[exchange] = Exchange(exchange, type)
|
||||
|
||||
def queue_bind(self, queue, exchange, routing_key, **kwargs):
|
||||
global EXCHANGES
|
||||
global QUEUES
|
||||
logging.debug(_('Binding %s to %s with key %s'),
|
||||
LOG.debug(_('Binding %s to %s with key %s'),
|
||||
queue, exchange, routing_key)
|
||||
EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key)
|
||||
|
||||
@@ -113,7 +117,7 @@ class Backend(base.BaseBackend):
|
||||
content_type=content_type,
|
||||
content_encoding=content_encoding)
|
||||
message.result = True
|
||||
logging.debug(_('Getting from %s: %s'), queue, message)
|
||||
LOG.debug(_('Getting from %s: %s'), queue, message)
|
||||
return message
|
||||
|
||||
def prepare_message(self, message_data, delivery_mode,
|
||||
|
@@ -29,8 +29,6 @@ import sys
|
||||
|
||||
import gflags
|
||||
|
||||
from nova import utils
|
||||
|
||||
|
||||
class FlagValues(gflags.FlagValues):
|
||||
"""Extension of gflags.FlagValues that allows undefined and runtime flags.
|
||||
@@ -213,10 +211,10 @@ DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
|
||||
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
|
||||
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
|
||||
DEFINE_integer('glance_port', 9292, 'glance port')
|
||||
DEFINE_string('glance_host', utils.get_my_ip(), 'glance host')
|
||||
DEFINE_string('glance_host', '127.0.0.1', 'glance host')
|
||||
DEFINE_integer('s3_port', 3333, 's3 port')
|
||||
DEFINE_string('s3_host', utils.get_my_ip(), 's3 host (for infrastructure)')
|
||||
DEFINE_string('s3_dmz', utils.get_my_ip(), 's3 dmz ip (for instances)')
|
||||
DEFINE_string('s3_host', '127.0.0.1', 's3 host (for infrastructure)')
|
||||
DEFINE_string('s3_dmz', '127.0.0.1', 's3 dmz ip (for instances)')
|
||||
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
|
||||
DEFINE_string('scheduler_topic', 'scheduler',
|
||||
'the topic scheduler nodes listen on')
|
||||
@@ -236,8 +234,8 @@ DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
|
||||
DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
|
||||
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
|
||||
DEFINE_string('ec2_prefix', 'http', 'prefix for ec2')
|
||||
DEFINE_string('cc_host', utils.get_my_ip(), 'ip of api server')
|
||||
DEFINE_string('cc_dmz', utils.get_my_ip(), 'internal ip of api server')
|
||||
DEFINE_string('cc_host', '127.0.0.1', 'ip of api server')
|
||||
DEFINE_string('cc_dmz', '127.0.0.1', 'internal ip of api server')
|
||||
DEFINE_integer('cc_port', 8773, 'cloud controller port')
|
||||
DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2')
|
||||
|
||||
|
261
nova/log.py
Normal file
261
nova/log.py
Normal file
@@ -0,0 +1,261 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Nova logging handler.
|
||||
|
||||
This module adds to logging functionality by adding the option to specify
|
||||
a context object when calling the various log methods. If the context object
|
||||
is not specified, default formatting is used.
|
||||
|
||||
It also allows setting of formatting information through flags.
|
||||
"""
|
||||
|
||||
|
||||
import cStringIO
|
||||
import json
|
||||
import logging
|
||||
import logging.handlers
|
||||
import traceback
|
||||
|
||||
from nova import flags
|
||||
# TODO(todd): fix after version.py merge
|
||||
# from nova import version
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
# TODO(todd): fix after version.py merge
|
||||
# '(%(name)s %(nova_version)s): %(levelname)s '
|
||||
flags.DEFINE_string('logging_context_format_string',
|
||||
'(%(name)s): %(levelname)s '
|
||||
'[%(request_id)s %(user)s '
|
||||
'%(project)s] %(message)s',
|
||||
'format string to use for log messages')
|
||||
|
||||
# TODO(todd): fix after version.py merge
|
||||
# '(%(name)s %(nova_version)s): %(levelname)s [N/A] '
|
||||
flags.DEFINE_string('logging_default_format_string',
|
||||
'(%(name)s): %(levelname)s [N/A] '
|
||||
'%(message)s',
|
||||
'format string to use for log messages')
|
||||
|
||||
flags.DEFINE_string('logging_debug_format_suffix',
|
||||
'from %(processName)s (pid=%(process)d) %(funcName)s'
|
||||
' %(pathname)s:%(lineno)d',
|
||||
'data to append to log format when level is DEBUG')
|
||||
|
||||
flags.DEFINE_string('logging_exception_prefix',
|
||||
'(%(name)s): TRACE: ',
|
||||
'prefix each line of exception output with this format')
|
||||
|
||||
flags.DEFINE_list('default_log_levels',
|
||||
['amqplib=WARN',
|
||||
'sqlalchemy=WARN',
|
||||
'audit=INFO'],
|
||||
'list of logger=LEVEL pairs')
|
||||
|
||||
flags.DEFINE_bool('use_syslog', False, 'output to syslog')
|
||||
flags.DEFINE_string('logfile', None, 'output to named file')
|
||||
|
||||
|
||||
|
||||
# A list of things we want to replicate from logging.
|
||||
# levels
|
||||
CRITICAL = logging.CRITICAL
|
||||
FATAL = logging.FATAL
|
||||
ERROR = logging.ERROR
|
||||
WARNING = logging.WARNING
|
||||
WARN = logging.WARN
|
||||
INFO = logging.INFO
|
||||
DEBUG = logging.DEBUG
|
||||
NOTSET = logging.NOTSET
|
||||
# methods
|
||||
getLogger = logging.getLogger
|
||||
debug = logging.debug
|
||||
info = logging.info
|
||||
warning = logging.warning
|
||||
warn = logging.warn
|
||||
error = logging.error
|
||||
exception = logging.exception
|
||||
critical = logging.critical
|
||||
log = logging.log
|
||||
# handlers
|
||||
StreamHandler = logging.StreamHandler
|
||||
FileHandler = logging.FileHandler
|
||||
# logging.SysLogHandler is nicer than logging.logging.handler.SysLogHandler.
|
||||
SysLogHandler = logging.handlers.SysLogHandler
|
||||
|
||||
|
||||
# our new audit level
|
||||
AUDIT = logging.INFO + 1
|
||||
logging.addLevelName(AUDIT, 'AUDIT')
|
||||
|
||||
|
||||
def _dictify_context(context):
|
||||
if context == None:
|
||||
return None
|
||||
if not isinstance(context, dict) \
|
||||
and getattr(context, 'to_dict', None):
|
||||
context = context.to_dict()
|
||||
return context
|
||||
|
||||
|
||||
def basicConfig():
|
||||
logging.basicConfig()
|
||||
for handler in logging.root.handlers:
|
||||
handler.setFormatter(_formatter)
|
||||
if FLAGS.verbose:
|
||||
logging.root.setLevel(logging.DEBUG)
|
||||
if FLAGS.use_syslog:
|
||||
syslog = SysLogHandler(address='/dev/log')
|
||||
syslog.setFormatter(_formatter)
|
||||
logging.root.addHandler(syslog)
|
||||
if FLAGS.logfile:
|
||||
logfile = FileHandler(FLAGS.logfile)
|
||||
logfile.setFormatter(_formatter)
|
||||
logging.root.addHandler(logfile)
|
||||
|
||||
|
||||
class NovaLogger(logging.Logger):
|
||||
"""
|
||||
NovaLogger manages request context and formatting.
|
||||
|
||||
This becomes the class that is instanciated by logging.getLogger.
|
||||
"""
|
||||
def __init__(self, name, level=NOTSET):
|
||||
level_name = self._get_level_from_flags(name, FLAGS)
|
||||
level = globals()[level_name]
|
||||
logging.Logger.__init__(self, name, level)
|
||||
|
||||
def _get_level_from_flags(self, name, FLAGS):
|
||||
# if exactly "nova", or a child logger, honor the verbose flag
|
||||
if (name == "nova" or name.startswith("nova.")) and FLAGS.verbose:
|
||||
return 'DEBUG'
|
||||
for pair in FLAGS.default_log_levels:
|
||||
logger, _sep, level = pair.partition('=')
|
||||
# NOTE(todd): if we set a.b, we want a.b.c to have the same level
|
||||
# (but not a.bc, so we check the dot)
|
||||
if name == logger:
|
||||
return level
|
||||
if name.startswith(logger) and name[len(logger)] == '.':
|
||||
return level
|
||||
return 'INFO'
|
||||
|
||||
def _log(self, level, msg, args, exc_info=None, extra=None, context=None):
|
||||
"""Extract context from any log call"""
|
||||
if not extra:
|
||||
extra = {}
|
||||
if context:
|
||||
extra.update(_dictify_context(context))
|
||||
# TODO(todd): fix after version.py merge
|
||||
#extra.update({"nova_version": version.string_with_vcs()})
|
||||
logging.Logger._log(self, level, msg, args, exc_info, extra)
|
||||
|
||||
def addHandler(self, handler):
|
||||
"""Each handler gets our custom formatter"""
|
||||
handler.setFormatter(_formatter)
|
||||
logging.Logger.addHandler(self, handler)
|
||||
|
||||
def audit(self, msg, *args, **kwargs):
|
||||
"""Shortcut for our AUDIT level"""
|
||||
if self.isEnabledFor(AUDIT):
|
||||
self._log(AUDIT, msg, args, **kwargs)
|
||||
|
||||
def exception(self, msg, *args, **kwargs):
|
||||
"""Logging.exception doesn't handle kwargs, so breaks context"""
|
||||
if not kwargs.get('exc_info'):
|
||||
kwargs['exc_info'] = 1
|
||||
self.error(msg, *args, **kwargs)
|
||||
# NOTE(todd): does this really go here, or in _log ?
|
||||
extra = kwargs.get('extra')
|
||||
if not extra:
|
||||
return
|
||||
env = extra.get('environment')
|
||||
if env:
|
||||
env = env.copy()
|
||||
for k in env.keys():
|
||||
if not isinstance(env[k], str):
|
||||
env.pop(k)
|
||||
message = "Environment: %s" % json.dumps(env)
|
||||
kwargs.pop('exc_info')
|
||||
self.error(message, **kwargs)
|
||||
|
||||
logging.setLoggerClass(NovaLogger)
|
||||
|
||||
|
||||
class NovaRootLogger(NovaLogger):
|
||||
pass
|
||||
|
||||
if not isinstance(logging.root, NovaRootLogger):
|
||||
logging.root = NovaRootLogger("nova.root", WARNING)
|
||||
NovaLogger.root = logging.root
|
||||
NovaLogger.manager.root = logging.root
|
||||
|
||||
|
||||
class NovaFormatter(logging.Formatter):
|
||||
"""
|
||||
A nova.context.RequestContext aware formatter configured through flags.
|
||||
|
||||
The flags used to set format strings are: logging_context_foramt_string
|
||||
and logging_default_format_string. You can also specify
|
||||
logging_debug_format_suffix to append extra formatting if the log level is
|
||||
debug.
|
||||
|
||||
For information about what variables are available for the formatter see:
|
||||
http://docs.python.org/library/logging.html#formatter
|
||||
"""
|
||||
|
||||
def format(self, record):
|
||||
"""Uses contextstring if request_id is set, otherwise default"""
|
||||
if record.__dict__.get('request_id', None):
|
||||
self._fmt = FLAGS.logging_context_format_string
|
||||
else:
|
||||
self._fmt = FLAGS.logging_default_format_string
|
||||
if record.levelno == logging.DEBUG \
|
||||
and FLAGS.logging_debug_format_suffix:
|
||||
self._fmt += " " + FLAGS.logging_debug_format_suffix
|
||||
# Cache this on the record, Logger will respect our formated copy
|
||||
if record.exc_info:
|
||||
record.exc_text = self.formatException(record.exc_info, record)
|
||||
return logging.Formatter.format(self, record)
|
||||
|
||||
def formatException(self, exc_info, record=None):
|
||||
"""Format exception output with FLAGS.logging_exception_prefix"""
|
||||
if not record:
|
||||
return logging.Formatter.formatException(self, exc_info)
|
||||
stringbuffer = cStringIO.StringIO()
|
||||
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
|
||||
None, stringbuffer)
|
||||
lines = stringbuffer.getvalue().split("\n")
|
||||
stringbuffer.close()
|
||||
formatted_lines = []
|
||||
for line in lines:
|
||||
pl = FLAGS.logging_exception_prefix % record.__dict__
|
||||
fl = "%s%s" % (pl, line)
|
||||
formatted_lines.append(fl)
|
||||
return "\n".join(formatted_lines)
|
||||
|
||||
_formatter = NovaFormatter()
|
||||
|
||||
|
||||
def audit(msg, *args, **kwargs):
|
||||
"""Shortcut for logging to root log with sevrity 'AUDIT'."""
|
||||
if len(logging.root.handlers) == 0:
|
||||
basicConfig()
|
||||
logging.root.log(AUDIT, msg, *args, **kwargs)
|
26
nova/rpc.py
26
nova/rpc.py
@@ -22,7 +22,6 @@ No fan-out support yet.
|
||||
"""
|
||||
|
||||
import json
|
||||
import logging
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
@@ -36,13 +35,12 @@ from nova import context
|
||||
from nova import exception
|
||||
from nova import fakerabbit
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
LOG = logging.getLogger('amqplib')
|
||||
LOG.setLevel(logging.DEBUG)
|
||||
LOG = logging.getLogger('nova.rpc')
|
||||
|
||||
|
||||
class Connection(carrot_connection.BrokerConnection):
|
||||
@@ -91,14 +89,14 @@ class Consumer(messaging.Consumer):
|
||||
self.failed_connection = False
|
||||
break
|
||||
except: # Catching all because carrot sucks
|
||||
logging.exception(_("AMQP server on %s:%d is unreachable."
|
||||
" Trying again in %d seconds.") % (
|
||||
FLAGS.rabbit_host,
|
||||
FLAGS.rabbit_port,
|
||||
FLAGS.rabbit_retry_interval))
|
||||
LOG.exception(_("AMQP server on %s:%d is unreachable."
|
||||
" Trying again in %d seconds.") % (
|
||||
FLAGS.rabbit_host,
|
||||
FLAGS.rabbit_port,
|
||||
FLAGS.rabbit_retry_interval))
|
||||
self.failed_connection = True
|
||||
if self.failed_connection:
|
||||
logging.exception(_("Unable to connect to AMQP server"
|
||||
LOG.exception(_("Unable to connect to AMQP server"
|
||||
" after %d tries. Shutting down.") % FLAGS.rabbit_max_retries)
|
||||
sys.exit(1)
|
||||
|
||||
@@ -116,14 +114,14 @@ class Consumer(messaging.Consumer):
|
||||
self.declare()
|
||||
super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks)
|
||||
if self.failed_connection:
|
||||
logging.error(_("Reconnected to queue"))
|
||||
LOG.error(_("Reconnected to queue"))
|
||||
self.failed_connection = False
|
||||
# NOTE(vish): This is catching all errors because we really don't
|
||||
# exceptions to be logged 10 times a second if some
|
||||
# persistent failure occurs.
|
||||
except Exception: # pylint: disable-msg=W0703
|
||||
if not self.failed_connection:
|
||||
logging.exception(_("Failed to fetch message from queue"))
|
||||
LOG.exception(_("Failed to fetch message from queue"))
|
||||
self.failed_connection = True
|
||||
|
||||
def attach_to_eventlet(self):
|
||||
@@ -242,8 +240,8 @@ def msg_reply(msg_id, reply=None, failure=None):
|
||||
if failure:
|
||||
message = str(failure[1])
|
||||
tb = traceback.format_exception(*failure)
|
||||
logging.error(_("Returning exception %s to caller"), message)
|
||||
logging.error(tb)
|
||||
LOG.error(_("Returning exception %s to caller"), message)
|
||||
LOG.error(tb)
|
||||
failure = (failure[0].__name__, str(failure[1]), tb)
|
||||
conn = Connection.instance(True)
|
||||
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
|
||||
|
@@ -23,7 +23,6 @@ Unittets for S3 objectstore clone.
|
||||
import boto
|
||||
import glob
|
||||
import hashlib
|
||||
import logging
|
||||
import os
|
||||
import shutil
|
||||
import tempfile
|
||||
@@ -63,7 +62,6 @@ class ObjectStoreTestCase(test.TestCase):
|
||||
self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'),
|
||||
images_path=os.path.join(OSS_TEMPDIR, 'images'),
|
||||
ca_path=os.path.join(os.path.dirname(__file__), 'CA'))
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
|
||||
self.auth_manager = manager.AuthManager()
|
||||
self.auth_manager.create_user('user1')
|
||||
|
@@ -17,7 +17,6 @@
|
||||
# under the License.
|
||||
|
||||
import unittest
|
||||
import logging
|
||||
import webob
|
||||
|
||||
from nova import context
|
||||
|
@@ -16,17 +16,18 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
import logging
|
||||
from M2Crypto import X509
|
||||
import unittest
|
||||
|
||||
from nova import crypto
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import test
|
||||
from nova.auth import manager
|
||||
from nova.api.ec2 import cloud
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger('nova.tests.auth_unittest')
|
||||
|
||||
|
||||
class user_generator(object):
|
||||
@@ -211,12 +212,12 @@ class AuthManagerTestCase(object):
|
||||
# NOTE(vish): Setup runs genroot.sh if it hasn't been run
|
||||
cloud.CloudController().setup()
|
||||
_key, cert_str = crypto.generate_x509_cert(user.id, project.id)
|
||||
logging.debug(cert_str)
|
||||
LOG.debug(cert_str)
|
||||
|
||||
full_chain = crypto.fetch_ca(project_id=project.id, chain=True)
|
||||
int_cert = crypto.fetch_ca(project_id=project.id, chain=False)
|
||||
cloud_cert = crypto.fetch_ca()
|
||||
logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain)
|
||||
LOG.debug("CA chain:\n\n =====\n%s\n\n=====", full_chain)
|
||||
signed_cert = X509.load_cert_string(cert_str)
|
||||
chain_cert = X509.load_cert_string(full_chain)
|
||||
int_cert = X509.load_cert_string(int_cert)
|
||||
@@ -331,7 +332,7 @@ class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase):
|
||||
test.TestCase.__init__(self, *args, **kwargs)
|
||||
import nova.auth.fakeldap as fakeldap
|
||||
if FLAGS.flush_db:
|
||||
logging.info("Flushing datastore")
|
||||
LOG.info("Flushing datastore")
|
||||
r = fakeldap.Store.instance()
|
||||
r.flushdb()
|
||||
|
||||
|
@@ -18,7 +18,6 @@
|
||||
|
||||
from base64 import b64decode
|
||||
import json
|
||||
import logging
|
||||
from M2Crypto import BIO
|
||||
from M2Crypto import RSA
|
||||
import os
|
||||
@@ -31,6 +30,7 @@ from nova import context
|
||||
from nova import crypto
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import rpc
|
||||
from nova import service
|
||||
from nova import test
|
||||
@@ -41,6 +41,7 @@ from nova.objectstore import image
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger('nova.tests.cloud')
|
||||
|
||||
# Temp dirs for working with image attributes through the cloud controller
|
||||
# (stole this from objectstore_unittest.py)
|
||||
@@ -56,7 +57,6 @@ class CloudTestCase(test.TestCase):
|
||||
images_path=IMAGES_PATH)
|
||||
|
||||
self.conn = rpc.Connection.instance()
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
|
||||
# set up our cloud
|
||||
self.cloud = cloud.CloudController()
|
||||
@@ -178,7 +178,7 @@ class CloudTestCase(test.TestCase):
|
||||
|
||||
def test_run_instances(self):
|
||||
if FLAGS.connection_type == 'fake':
|
||||
logging.debug("Can't test instances without a real virtual env.")
|
||||
LOG.debug(_("Can't test instances without a real virtual env."))
|
||||
return
|
||||
image_id = FLAGS.default_image
|
||||
instance_type = FLAGS.default_instance_type
|
||||
@@ -190,25 +190,25 @@ class CloudTestCase(test.TestCase):
|
||||
# TODO: check for proper response
|
||||
instance_id = rv['reservationSet'][0].keys()[0]
|
||||
instance = rv['reservationSet'][0][instance_id][0]
|
||||
logging.debug("Need to watch instance %s until it's running..." %
|
||||
instance['instance_id'])
|
||||
LOG.debug(_("Need to watch instance %s until it's running..."),
|
||||
instance['instance_id'])
|
||||
while True:
|
||||
greenthread.sleep(1)
|
||||
info = self.cloud._get_instance(instance['instance_id'])
|
||||
logging.debug(info['state'])
|
||||
LOG.debug(info['state'])
|
||||
if info['state'] == power_state.RUNNING:
|
||||
break
|
||||
self.assert_(rv)
|
||||
|
||||
if connection_type != 'fake':
|
||||
if FLAGS.connection_type != 'fake':
|
||||
time.sleep(45) # Should use boto for polling here
|
||||
for reservations in rv['reservationSet']:
|
||||
# for res_id in reservations.keys():
|
||||
# logging.debug(reservations[res_id])
|
||||
# LOG.debug(reservations[res_id])
|
||||
# for instance in reservations[res_id]:
|
||||
for instance in reservations[reservations.keys()[0]]:
|
||||
instance_id = instance['instance_id']
|
||||
logging.debug("Terminating instance %s" % instance_id)
|
||||
LOG.debug(_("Terminating instance %s"), instance_id)
|
||||
rv = yield self.compute.terminate_instance(instance_id)
|
||||
|
||||
def test_instance_update_state(self):
|
||||
|
@@ -20,12 +20,12 @@ Tests For Compute
|
||||
"""
|
||||
|
||||
import datetime
|
||||
import logging
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
@@ -33,12 +33,12 @@ from nova.compute import api as compute_api
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger('nova.tests.compute')
|
||||
|
||||
|
||||
class ComputeTestCase(test.TestCase):
|
||||
"""Test case for compute"""
|
||||
def setUp(self):
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
super(ComputeTestCase, self).setUp()
|
||||
self.flags(connection_type='fake',
|
||||
stub_network=True,
|
||||
@@ -101,13 +101,13 @@ class ComputeTestCase(test.TestCase):
|
||||
self.compute.run_instance(self.context, instance_id)
|
||||
|
||||
instances = db.instance_get_all(context.get_admin_context())
|
||||
logging.info(_("Running instances: %s"), instances)
|
||||
LOG.info(_("Running instances: %s"), instances)
|
||||
self.assertEqual(len(instances), 1)
|
||||
|
||||
self.compute.terminate_instance(self.context, instance_id)
|
||||
|
||||
instances = db.instance_get_all(context.get_admin_context())
|
||||
logging.info(_("After terminating instances: %s"), instances)
|
||||
LOG.info(_("After terminating instances: %s"), instances)
|
||||
self.assertEqual(len(instances), 0)
|
||||
|
||||
def test_run_terminate_timestamps(self):
|
||||
|
107
nova/tests/test_log.py
Normal file
107
nova/tests/test_log.py
Normal file
@@ -0,0 +1,107 @@
|
||||
import cStringIO
|
||||
|
||||
from nova import context
|
||||
from nova import log
|
||||
from nova import test
|
||||
|
||||
|
||||
def _fake_context():
|
||||
return context.RequestContext(1, 1)
|
||||
|
||||
|
||||
class RootLoggerTestCase(test.TrialTestCase):
|
||||
def setUp(self):
|
||||
super(RootLoggerTestCase, self).setUp()
|
||||
self.log = log.logging.root
|
||||
|
||||
def tearDown(self):
|
||||
super(RootLoggerTestCase, self).tearDown()
|
||||
log.NovaLogger.manager.loggerDict = {}
|
||||
|
||||
def test_is_nova_instance(self):
|
||||
self.assert_(isinstance(self.log, log.NovaLogger))
|
||||
|
||||
def test_name_is_nova_root(self):
|
||||
self.assertEqual("nova.root", self.log.name)
|
||||
|
||||
def test_handlers_have_nova_formatter(self):
|
||||
formatters = []
|
||||
for h in self.log.handlers:
|
||||
f = h.formatter
|
||||
if isinstance(f, log.NovaFormatter):
|
||||
formatters.append(f)
|
||||
self.assert_(formatters)
|
||||
self.assertEqual(len(formatters), len(self.log.handlers))
|
||||
|
||||
def test_handles_context_kwarg(self):
|
||||
self.log.info("foo", context=_fake_context())
|
||||
self.assert_(True) # didn't raise exception
|
||||
|
||||
def test_module_level_methods_handle_context_arg(self):
|
||||
log.info("foo", context=_fake_context())
|
||||
self.assert_(True) # didn't raise exception
|
||||
|
||||
def test_module_level_audit_handles_context_arg(self):
|
||||
log.audit("foo", context=_fake_context())
|
||||
self.assert_(True) # didn't raise exception
|
||||
|
||||
class NovaFormatterTestCase(test.TrialTestCase):
|
||||
def setUp(self):
|
||||
super(NovaFormatterTestCase, self).setUp()
|
||||
self.flags(logging_context_format_string="HAS CONTEXT "\
|
||||
"[%(request_id)s]: %(message)s",
|
||||
logging_default_format_string="NOCTXT: %(message)s",
|
||||
logging_debug_format_suffix="--DBG")
|
||||
self.log = log.logging.root
|
||||
self.stream = cStringIO.StringIO()
|
||||
handler = log.StreamHandler(self.stream)
|
||||
self.log.addHandler(handler)
|
||||
self.log.setLevel(log.DEBUG)
|
||||
|
||||
def tearDown(self):
|
||||
super(NovaFormatterTestCase, self).tearDown()
|
||||
log.NovaLogger.manager.loggerDict = {}
|
||||
|
||||
def test_uncontextualized_log(self):
|
||||
self.log.info("foo")
|
||||
self.assertEqual("NOCTXT: foo\n", self.stream.getvalue())
|
||||
|
||||
def test_contextualized_log(self):
|
||||
ctxt = _fake_context()
|
||||
self.log.info("bar", context=ctxt)
|
||||
expected = "HAS CONTEXT [%s]: bar\n" % ctxt.request_id
|
||||
self.assertEqual(expected, self.stream.getvalue())
|
||||
|
||||
def test_debugging_log(self):
|
||||
self.log.debug("baz")
|
||||
self.assertEqual("NOCTXT: baz --DBG\n", self.stream.getvalue())
|
||||
|
||||
class NovaLoggerTestCase(test.TrialTestCase):
|
||||
def setUp(self):
|
||||
super(NovaLoggerTestCase, self).setUp()
|
||||
self.flags(default_log_levels=["nova-test=AUDIT"], verbose=False)
|
||||
self.log = log.getLogger('nova-test')
|
||||
|
||||
def tearDown(self):
|
||||
super(NovaLoggerTestCase, self).tearDown()
|
||||
log.NovaLogger.manager.loggerDict = {}
|
||||
|
||||
def test_has_level_from_flags(self):
|
||||
self.assertEqual(log.AUDIT, self.log.level)
|
||||
|
||||
def test_child_log_has_level_of_parent_flag(self):
|
||||
l = log.getLogger('nova-test.foo')
|
||||
self.assertEqual(log.AUDIT, l.level)
|
||||
|
||||
class VerboseLoggerTestCase(test.TrialTestCase):
|
||||
def setUp(self):
|
||||
super(VerboseLoggerTestCase, self).setUp()
|
||||
self.flags(default_log_levels=["nova.test=AUDIT"], verbose=True)
|
||||
self.log = log.getLogger('nova.test')
|
||||
|
||||
def tearDown(self):
|
||||
super(VerboseLoggerTestCase, self).tearDown()
|
||||
log.NovaLogger.manager.loggerDict = {}
|
||||
|
||||
def test_will_be_verbose_if_named_nova_and_verbose_flag_set(self):
|
||||
self.assertEqual(log.DEBUG, self.log.level)
|
@@ -20,18 +20,18 @@ Unit Tests for network code
|
||||
"""
|
||||
import IPy
|
||||
import os
|
||||
import logging
|
||||
|
||||
from nova import context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import service
|
||||
from nova import log as logging
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger('nova.tests.network')
|
||||
|
||||
|
||||
class NetworkTestCase(test.TestCase):
|
||||
@@ -45,7 +45,6 @@ class NetworkTestCase(test.TestCase):
|
||||
fake_network=True,
|
||||
network_size=16,
|
||||
num_networks=5)
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
self.manager = manager.AuthManager()
|
||||
self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
|
||||
self.projects = []
|
||||
@@ -328,7 +327,7 @@ def lease_ip(private_ip):
|
||||
'TESTING': '1',
|
||||
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
|
||||
(out, err) = utils.execute(cmd, addl_env=env)
|
||||
logging.debug("ISSUE_IP: %s, %s ", out, err)
|
||||
LOG.debug("ISSUE_IP: %s, %s ", out, err)
|
||||
|
||||
|
||||
def release_ip(private_ip):
|
||||
@@ -344,4 +343,4 @@ def release_ip(private_ip):
|
||||
'TESTING': '1',
|
||||
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
|
||||
(out, err) = utils.execute(cmd, addl_env=env)
|
||||
logging.debug("RELEASE_IP: %s, %s ", out, err)
|
||||
LOG.debug("RELEASE_IP: %s, %s ", out, err)
|
||||
|
@@ -18,15 +18,16 @@
|
||||
"""
|
||||
Unit Tests for remote procedure calls using queue
|
||||
"""
|
||||
import logging
|
||||
|
||||
from nova import context
|
||||
from nova import flags
|
||||
from nova import logging
|
||||
from nova import rpc
|
||||
from nova import test
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger('nova.tests.rpc')
|
||||
|
||||
|
||||
class RpcTestCase(test.TestCase):
|
||||
@@ -115,13 +116,13 @@ class TestReceiver(object):
|
||||
@staticmethod
|
||||
def echo(context, value):
|
||||
"""Simply returns whatever value is sent in"""
|
||||
logging.debug("Received %s", value)
|
||||
LOG.debug(_("Received %s"), value)
|
||||
return value
|
||||
|
||||
@staticmethod
|
||||
def context(context, value):
|
||||
"""Returns dictionary version of context"""
|
||||
logging.debug("Received %s", context)
|
||||
LOG.debug(_("Received %s"), context)
|
||||
return context.to_dict()
|
||||
|
||||
@staticmethod
|
||||
|
@@ -19,23 +19,23 @@
|
||||
Tests for Volume Code.
|
||||
|
||||
"""
|
||||
import logging
|
||||
|
||||
from nova import context
|
||||
from nova import exception
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
from nova import test
|
||||
from nova import utils
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
LOG = logging.getLogger('nova.tests.volume')
|
||||
|
||||
|
||||
class VolumeTestCase(test.TestCase):
|
||||
"""Test Case for volumes."""
|
||||
|
||||
def setUp(self):
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
super(VolumeTestCase, self).setUp()
|
||||
self.compute = utils.import_object(FLAGS.compute_manager)
|
||||
self.flags(connection_type='fake')
|
||||
@@ -159,7 +159,7 @@ class VolumeTestCase(test.TestCase):
|
||||
volume_id)
|
||||
self.assert_(iscsi_target not in targets)
|
||||
targets.append(iscsi_target)
|
||||
logging.debug("Target %s allocated", iscsi_target)
|
||||
LOG.debug(_("Target %s allocated"), iscsi_target)
|
||||
total_slots = FLAGS.iscsi_num_targets
|
||||
for _index in xrange(total_slots):
|
||||
volume_id = self._create_volume()
|
||||
|
@@ -22,7 +22,6 @@ manage pid files and support syslogging.
|
||||
"""
|
||||
|
||||
import gflags
|
||||
import logging
|
||||
import os
|
||||
import signal
|
||||
import sys
|
||||
@@ -34,6 +33,7 @@ from twisted.python import runtime
|
||||
from twisted.python import usage
|
||||
|
||||
from nova import flags
|
||||
from nova import log as logging
|
||||
|
||||
|
||||
if runtime.platformType == "win32":
|
||||
@@ -234,22 +234,12 @@ def serve(filename):
|
||||
OptionsClass = WrapTwistedOptions(TwistdServerOptions)
|
||||
options = OptionsClass()
|
||||
argv = options.parseOptions()
|
||||
logging.getLogger('amqplib').setLevel(logging.WARN)
|
||||
FLAGS.python = filename
|
||||
FLAGS.no_save = True
|
||||
if not FLAGS.pidfile:
|
||||
FLAGS.pidfile = '%s.pid' % name
|
||||
elif FLAGS.pidfile.endswith('twistd.pid'):
|
||||
FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name)
|
||||
# NOTE(vish): if we're running nodaemon, redirect the log to stdout
|
||||
if FLAGS.nodaemon and not FLAGS.logfile:
|
||||
FLAGS.logfile = "-"
|
||||
if not FLAGS.logfile:
|
||||
FLAGS.logfile = '%s.log' % name
|
||||
elif FLAGS.logfile.endswith('twistd.log'):
|
||||
FLAGS.logfile = FLAGS.logfile.replace('twistd.log', '%s.log' % name)
|
||||
if FLAGS.logdir:
|
||||
FLAGS.logfile = os.path.join(FLAGS.logdir, FLAGS.logfile)
|
||||
if not FLAGS.prefix:
|
||||
FLAGS.prefix = name
|
||||
elif FLAGS.prefix.endswith('twisted'):
|
||||
@@ -270,19 +260,10 @@ def serve(filename):
|
||||
print 'usage: %s [options] [start|stop|restart]' % argv[0]
|
||||
sys.exit(1)
|
||||
|
||||
formatter = logging.Formatter(
|
||||
'(%(name)s): %(levelname)s %(message)s')
|
||||
handler = logging.StreamHandler(log.StdioOnnaStick())
|
||||
handler.setFormatter(formatter)
|
||||
logging.getLogger().addHandler(handler)
|
||||
|
||||
if FLAGS.verbose:
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
else:
|
||||
logging.getLogger().setLevel(logging.WARNING)
|
||||
|
||||
logging.basicConfig()
|
||||
logging.debug(_("Full set of FLAGS:"))
|
||||
for flag in FLAGS:
|
||||
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
|
||||
|
||||
logging.audit(_("Starting %s"), name)
|
||||
twistd.runApp(options)
|
||||
|
Reference in New Issue
Block a user