merge trunk

This commit is contained in:
Anthony Young
2011-01-10 13:06:36 -08:00
34 changed files with 3417 additions and 161 deletions

View File

@@ -12,3 +12,4 @@ CA/openssl.cnf
CA/serial*
CA/newcerts/*.pem
CA/private/cakey.pem
nova/vcsversion.py

View File

@@ -30,3 +30,4 @@
<rconradharris@gmail.com> <rick.harris@rackspace.com>
<corywright@gmail.com> <cory.wright@rackspace.com>
<ant@openstack.org> <amesserl@rackspace.com>
<chiradeep@cloud.com> <chiradeep@chiradeep-lt2>

View File

@@ -3,6 +3,7 @@ Anne Gentle <anne@openstack.org>
Anthony Young <sleepsonthefloor@gmail.com>
Antony Messerli <ant@openstack.org>
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
Chiradeep Vittal <chiradeep@cloud.com>
Chris Behrens <cbehrens@codestud.com>
Chmouel Boudjnah <chmouel@chmouel.com>
Cory Wright <corywright@gmail.com>
@@ -22,6 +23,7 @@ Jonathan Bryce <jbryce@jbryce.com>
Josh Kearney <josh.kearney@rackspace.com>
Joshua McKenty <jmckenty@gmail.com>
Justin Santa Barbara <justin@fathomdb.com>
Ken Pepple <ken.pepple@gmail.com>
Matt Dietz <matt.dietz@rackspace.com>
Michael Gundlach <michael.gundlach@rackspace.com>
Monty Taylor <mordred@inaugust.com>
@@ -39,4 +41,3 @@ Trey Morris <trey.morris@rackspace.com>
Vishvananda Ishaya <vishvananda@gmail.com>
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
Zhixue Wu <Zhixue.Wu@citrix.com>

109
bin/nova-api-paste Executable file
View File

@@ -0,0 +1,109 @@
#!/usr/bin/env python
# pylint: disable-msg=C0103
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter script for Nova API."""
import gettext
import os
import sys
from paste import deploy
# If ../nova/__init__.py exists, add ../ to Python search path, so that
# it will override what happens to be installed in /usr/(local/)lib/python...
possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]),
os.pardir,
os.pardir))
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
sys.path.insert(0, possible_topdir)
gettext.install('nova', unicode=1)
from nova import flags
from nova import log as logging
from nova import wsgi
LOG = logging.getLogger('nova.api')
LOG.setLevel(logging.DEBUG)
LOG.addHandler(logging.StreamHandler())
FLAGS = flags.FLAGS
API_ENDPOINTS = ['ec2', 'openstack']
def load_configuration(paste_config):
"""Load the paste configuration from the config file and return it."""
config = None
# Try each known name to get the global DEFAULTS, which will give ports
for name in API_ENDPOINTS:
try:
config = deploy.appconfig("config:%s" % paste_config, name=name)
except LookupError:
pass
if config:
verbose = config.get('verbose', None)
if verbose:
FLAGS.verbose = int(verbose) == 1
if FLAGS.verbose:
logging.getLogger().setLevel(logging.DEBUG)
return config
LOG.debug(_("Paste config at %s has no secion for known apis"),
paste_config)
print _("Paste config at %s has no secion for any known apis") % \
paste_config
os.exit(1)
def launch_api(paste_config_file, section, server, port, host):
"""Launch an api server from the specified port and IP."""
LOG.debug(_("Launching %s api on %s:%s"), section, host, port)
app = deploy.loadapp('config:%s' % paste_config_file, name=section)
server.start(app, int(port), host)
def run_app(paste_config_file):
LOG.debug(_("Using paste.deploy config at: %s"), configfile)
config = load_configuration(paste_config_file)
LOG.debug(_("Configuration: %r"), config)
server = wsgi.Server()
ip = config.get('host', '0.0.0.0')
for api in API_ENDPOINTS:
port = config.get("%s_port" % api, None)
if not port:
continue
host = config.get("%s_host" % api, ip)
launch_api(configfile, api, server, port, host)
LOG.debug(_("All api servers launched, now waiting"))
server.wait()
if __name__ == '__main__':
FLAGS(sys.argv)
configfiles = ['/etc/nova/nova-api.conf']
if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
configfiles.insert(0,
os.path.join(possible_topdir, 'etc', 'nova-api.conf'))
for configfile in configfiles:
if os.path.exists(configfile):
run_app(configfile)
break
else:
LOG.debug(_("Skipping missing configuration: %s"), configfile)

View File

@@ -22,7 +22,6 @@ Handle lease database updates from DHCP servers.
"""
import gettext
import logging
import os
import sys
@@ -39,6 +38,7 @@ gettext.install('nova', unicode=1)
from nova import context
from nova import db
from nova import flags
from nova import log as logging
from nova import rpc
from nova import utils
from nova.network import linux_net
@@ -49,11 +49,13 @@ flags.DECLARE('network_size', 'nova.network.manager')
flags.DECLARE('num_networks', 'nova.network.manager')
flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager')
LOG = logging.getLogger('nova.dhcpbridge')
def add_lease(mac, ip_address, _hostname, _interface):
"""Set the IP that was assigned by the DHCP server."""
if FLAGS.fake_rabbit:
logging.debug("leasing ip")
LOG.debug(_("leasing ip"))
network_manager = utils.import_object(FLAGS.network_manager)
network_manager.lease_fixed_ip(context.get_admin_context(),
mac,
@@ -68,14 +70,14 @@ def add_lease(mac, ip_address, _hostname, _interface):
def old_lease(mac, ip_address, hostname, interface):
"""Update just as add lease."""
logging.debug("Adopted old lease or got a change of mac/hostname")
LOG.debug(_("Adopted old lease or got a change of mac/hostname"))
add_lease(mac, ip_address, hostname, interface)
def del_lease(mac, ip_address, _hostname, _interface):
"""Called when a lease expires."""
if FLAGS.fake_rabbit:
logging.debug("releasing ip")
LOG.debug(_("releasing ip"))
network_manager = utils.import_object(FLAGS.network_manager)
network_manager.release_fixed_ip(context.get_admin_context(),
mac,
@@ -100,6 +102,7 @@ def main():
flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile)
utils.default_flagfile(flagfile)
argv = FLAGS(sys.argv)
logging.basicConfig()
interface = os.environ.get('DNSMASQ_INTERFACE', 'br0')
if int(os.environ.get('TESTING', '0')):
FLAGS.fake_rabbit = True
@@ -117,8 +120,8 @@ def main():
mac = argv[2]
ip = argv[3]
hostname = argv[4]
logging.debug("Called %s for mac %s with ip %s and "
"hostname %s on interface %s",
LOG.debug(_("Called %s for mac %s with ip %s and "
"hostname %s on interface %s"),
action, mac, ip, hostname, interface)
globals()[action + '_lease'](mac, ip, hostname, interface)
else:

View File

@@ -23,7 +23,6 @@
import gettext
import os
import logging
import sys
from twisted.application import service
@@ -37,19 +36,23 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')):
gettext.install('nova', unicode=1)
from nova import log as logging
from nova import utils
from nova import twistd
from nova.compute import monitor
# TODO(todd): shouldn't this be done with flags? And what about verbose?
logging.getLogger('boto').setLevel(logging.WARN)
LOG = logging.getLogger('nova.instancemonitor')
if __name__ == '__main__':
utils.default_flagfile()
twistd.serve(__file__)
if __name__ == '__builtin__':
logging.warn('Starting instance monitor')
LOG.warn(_('Starting instance monitor'))
# pylint: disable-msg=C0103
monitor = monitor.InstanceMonitor()

156
bin/nova-logspool Normal file
View File

@@ -0,0 +1,156 @@
#!/usr/bin/env python
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Tools for working with logs generated by nova components
"""
import json
import os
import re
import sys
class Request(object):
def __init__(self):
self.time = ""
self.host = ""
self.logger = ""
self.message = ""
self.trace = ""
self.env = ""
self.request_id = ""
def add_error_line(self, error_line):
self.time = " ".join(error_line.split(" ")[:3])
self.host = error_line.split(" ")[3]
self.logger = error_line.split("(")[1].split(" ")[0]
self.request_id = error_line.split("[")[1].split(" ")[0]
error_lines = error_line.split("#012")
self.message = self.clean_log_line(error_lines.pop(0))
self.trace = "\n".join([self.clean_trace(l) for l in error_lines])
def add_environment_line(self, env_line):
self.env = self.clean_env_line(env_line)
def clean_log_line(self, line):
"""Remove log format for time, level, etc: split after context"""
return line.split('] ')[-1]
def clean_env_line(self, line):
"""Also has an 'Environment: ' string in the message"""
return re.sub(r'^Environment: ', '', self.clean_log_line(line))
def clean_trace(self, line):
"""trace has a different format, so split on TRACE:"""
return line.split('TRACE: ')[-1]
def to_dict(self):
return {'traceback': self.trace, 'message': self.message,
'host': self.host, 'env': self.env, 'logger': self.logger,
'request_id': self.request_id}
class LogReader(object):
def __init__(self, filename):
self.filename = filename
self._errors = {}
def process(self, spooldir):
with open(self.filename) as f:
line = f.readline()
while len(line) > 0:
parts = line.split(" ")
level = (len(parts) < 6) or parts[5]
if level == 'ERROR':
self.handle_logged_error(line)
elif level == '[-]' and self.last_error:
# twisted stack trace line
clean_line = " ".join(line.split(" ")[6:])
self.last_error.trace = self.last_error.trace + clean_line
else:
self.last_error = None
line = f.readline()
self.update_spool(spooldir)
def handle_logged_error(self, line):
request_id = re.search(r' \[([A-Z0-9\-/]+)', line)
if not request_id:
raise Exception("Unable to parse request id from %s" % line)
request_id = request_id.group(1)
data = self._errors.get(request_id, Request())
if self.is_env_line(line):
data.add_environment_line(line)
elif self.is_error_line(line):
data.add_error_line(line)
else:
# possibly error from twsited
data.add_error_line(line)
self.last_error = data
self._errors[request_id] = data
def is_env_line(self, line):
return re.search('Environment: ', line)
def is_error_line(self, line):
return re.search('raised', line)
def update_spool(self, directory):
processed_dir = "%s/processed" % directory
self._ensure_dir_exists(processed_dir)
for rid, value in self._errors.iteritems():
if not self.has_been_processed(processed_dir, rid):
with open("%s/%s" % (directory, rid), "w") as spool:
spool.write(json.dumps(value.to_dict()))
self.flush_old_processed_spool(processed_dir)
def _ensure_dir_exists(self, d):
mkdir = False
try:
os.stat(d)
except:
mkdir = True
if mkdir:
os.mkdir(d)
def has_been_processed(self, processed_dir, rid):
rv = False
try:
os.stat("%s/%s" % (processed_dir, rid))
rv = True
except:
pass
return rv
def flush_old_processed_spool(self, processed_dir):
keys = self._errors.keys()
procs = os.listdir(processed_dir)
for p in procs:
if p not in keys:
# log has rotated and the old error won't be seen again
os.unlink("%s/%s" % (processed_dir, p))
if __name__ == '__main__':
filename = '/var/log/nova.log'
spooldir = '/var/spool/nova'
if len(sys.argv) > 1:
filename = sys.argv[1]
if len(sys.argv) > 2:
spooldir = sys.argv[2]
LogReader(filename).process(spooldir)

View File

@@ -53,9 +53,10 @@
CLI interface for nova management.
"""
import datetime
import gettext
import logging
import os
import re
import sys
import time
@@ -452,6 +453,61 @@ class NetworkCommands(object):
int(network_size), int(vlan_start),
int(vpn_start))
class ServiceCommands(object):
"""Enable and disable running services"""
def list(self, host=None, service=None):
"""Show a list of all running services. Filter by host & service name.
args: [host] [service]"""
ctxt = context.get_admin_context()
now = datetime.datetime.utcnow()
services = db.service_get_all(ctxt)
if host:
services = [s for s in services if s['host'] == host]
if service:
services = [s for s in services if s['binary'] == service]
for svc in services:
delta = now - (svc['updated_at'] or svc['created_at'])
alive = (delta.seconds <= 15)
art = (alive and ":-)") or "XXX"
active = 'enabled'
if svc['disabled']:
active = 'disabled'
print "%-10s %-10s %-8s %s %s" % (svc['host'], svc['binary'],
active, art,
svc['updated_at'])
def enable(self, host, service):
"""Enable scheduling for a service
args: host service"""
ctxt = context.get_admin_context()
svc = db.service_get_by_args(ctxt, host, service)
if not svc:
print "Unable to find service"
return
db.service_update(ctxt, svc['id'], {'disabled': False})
def disable(self, host, service):
"""Disable scheduling for a service
args: host service"""
ctxt = context.get_admin_context()
svc = db.service_get_by_args(ctxt, host, service)
if not svc:
print "Unable to find service"
return
db.service_update(ctxt, svc['id'], {'disabled': True})
class LogCommands(object):
def request(self, request_id, logfile='/var/log/nova.log'):
"""Show all fields in the log for the given request. Assumes you
haven't changed the log format too much.
ARGS: request_id [logfile]"""
lines = utils.execute("cat %s | grep '\[%s '" % (logfile, request_id))
print re.sub('#012', "\n", "\n".join(lines))
CATEGORIES = [
('user', UserCommands),
('project', ProjectCommands),
@@ -459,7 +515,9 @@ CATEGORIES = [
('shell', ShellCommands),
('vpn', VpnCommands),
('floating', FloatingIpCommands),
('network', NetworkCommands)]
('network', NetworkCommands),
('service', ServiceCommands),
('log', LogCommands)]
def lazy_match(name, key_value_tuples):
@@ -498,9 +556,6 @@ def main():
utils.default_flagfile()
argv = FLAGS(sys.argv)
if FLAGS.verbose:
logging.getLogger().setLevel(logging.DEBUG)
script_name = argv.pop(0)
if len(argv) < 1:
print script_name + " category action [<args>]"

97
bin/nova-spoolsentry Normal file
View File

@@ -0,0 +1,97 @@
#!/usr/bin/env python
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import json
import logging
import os
import shutil
import sys
import urllib
import urllib2
try:
import cPickle as pickle
except:
import pickle
class SpoolSentry(object):
def __init__(self, spool_dir, sentry_url, key=None):
self.spool_dir = spool_dir
self.sentry_url = sentry_url
self.key = key
def process(self):
for fname in os.listdir(self.spool_dir):
if fname == "processed":
continue
try:
sourcefile = "%s/%s" % (self.spool_dir, fname)
with open(sourcefile) as f:
fdata = f.read()
data_from_json = json.loads(fdata)
data = self.build_data(data_from_json)
self.send_data(data)
destfile = "%s/processed/%s" % (self.spool_dir, fname)
shutil.move(sourcefile, destfile)
except:
logging.exception("Unable to upload record %s", fname)
raise
def build_data(self, filejson):
env = {'SERVER_NAME': 'unknown', 'SERVER_PORT': '0000',
'SCRIPT_NAME': '/unknown/', 'PATH_INFO': 'unknown'}
if filejson['env']:
env = json.loads(filejson['env'])
url = "http://%s:%s%s%s" % (env['SERVER_NAME'], env['SERVER_PORT'],
env['SCRIPT_NAME'], env['PATH_INFO'])
rv = {'logger': filejson['logger'], 'level': logging.ERROR,
'server_name': filejson['host'], 'url': url,
'message': filejson['message'],
'traceback': filejson['traceback']}
rv['data'] = {}
if filejson['env']:
rv['data']['META'] = env
if filejson['request_id']:
rv['data']['request_id'] = filejson['request_id']
return rv
def send_data(self, data):
data = {
'data': base64.b64encode(pickle.dumps(data).encode('zlib')),
'key': self.key
}
req = urllib2.Request(self.sentry_url)
res = urllib2.urlopen(req, urllib.urlencode(data))
if res.getcode() != 200:
raise Exception("Bad HTTP code: %s" % res.getcode())
txt = res.read()
if __name__ == '__main__':
sentryurl = 'http://127.0.0.1/sentry/store/'
key = ''
spooldir = '/var/spool/nova'
if len(sys.argv) > 1:
sentryurl = sys.argv[1]
if len(sys.argv) > 2:
key = sys.argv[2]
if len(sys.argv) > 3:
spooldir = sys.argv[3]
SpoolSentry(spooldir, sentryurl, key).process()

2130
locale/nova.pot Normal file

File diff suppressed because it is too large Load Diff

View File

@@ -23,10 +23,8 @@ import base64
import boto
import httplib
from nova import flags
from boto.ec2.regioninfo import RegionInfo
FLAGS = flags.FLAGS
DEFAULT_CLC_URL = 'http://127.0.0.1:8773'
DEFAULT_REGION = 'nova'
@@ -198,8 +196,8 @@ class NovaAdminClient(object):
self,
clc_url=DEFAULT_CLC_URL,
region=DEFAULT_REGION,
access_key=FLAGS.aws_access_key_id,
secret_key=FLAGS.aws_secret_access_key,
access_key=None,
secret_key=None,
**kwargs):
parts = self.split_clc_url(clc_url)

View File

@@ -20,7 +20,6 @@
Auth driver using the DB as its backend.
"""
import logging
import sys
from nova import context

View File

@@ -24,11 +24,11 @@ other backends by creating another class that exposes the same
public methods.
"""
import logging
import sys
from nova import exception
from nova import flags
from nova import log as logging
FLAGS = flags.FLAGS
@@ -65,6 +65,8 @@ flags.DEFINE_string('ldap_netadmin',
flags.DEFINE_string('ldap_developer',
'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers')
LOG = logging.getLogger("nova.ldapdriver")
# TODO(vish): make an abstract base class with the same public methods
# to define a set interface for AuthDrivers. I'm delaying
@@ -502,7 +504,7 @@ class LdapDriver(object):
try:
self.conn.modify_s(group_dn, attr)
except self.ldap.OBJECT_CLASS_VIOLATION:
logging.debug(_("Attempted to remove the last member of a group. "
LOG.debug(_("Attempted to remove the last member of a group. "
"Deleting the group at %s instead."), group_dn)
self.__delete_group(group_dn)

View File

@@ -20,7 +20,6 @@
Nova authentication management
"""
import logging
import os
import shutil
import string # pylint: disable-msg=W0402
@@ -33,6 +32,7 @@ from nova import crypto
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import utils
from nova.auth import signer
@@ -70,6 +70,8 @@ flags.DEFINE_string('credential_rc_file', '%src',
flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver',
'Driver that auth manager uses')
LOG = logging.getLogger('nova.auth.manager')
class AuthBase(object):
"""Base class for objects relating to auth
@@ -254,43 +256,51 @@ class AuthManager(object):
# TODO(vish): check for valid timestamp
(access_key, _sep, project_id) = access.partition(':')
logging.info(_('Looking up user: %r'), access_key)
LOG.debug(_('Looking up user: %r'), access_key)
user = self.get_user_from_access_key(access_key)
logging.info('user: %r', user)
LOG.debug('user: %r', user)
if user == None:
LOG.audit(_("Failed authorization for access key %s"), access_key)
raise exception.NotFound(_('No user found for access key %s')
% access_key)
# NOTE(vish): if we stop using project name as id we need better
# logic to find a default project for user
if project_id == '':
LOG.debug(_("Using project name = user name (%s)"), user.name)
project_id = user.name
project = self.get_project(project_id)
if project == None:
LOG.audit(_("failed authorization: no project named %s (user=%s)"),
project_id, user.name)
raise exception.NotFound(_('No project called %s could be found')
% project_id)
if not self.is_admin(user) and not self.is_project_member(user,
project):
LOG.audit(_("Failed authorization: user %s not admin and not "
"member of project %s"), user.name, project.name)
raise exception.NotFound(_('User %s is not a member of project %s')
% (user.id, project.id))
if check_type == 's3':
sign = signer.Signer(user.secret.encode())
expected_signature = sign.s3_authorization(headers, verb, path)
logging.debug('user.secret: %s', user.secret)
logging.debug('expected_signature: %s', expected_signature)
logging.debug('signature: %s', signature)
LOG.debug('user.secret: %s', user.secret)
LOG.debug('expected_signature: %s', expected_signature)
LOG.debug('signature: %s', signature)
if signature != expected_signature:
LOG.audit(_("Invalid signature for user %s"), user.name)
raise exception.NotAuthorized(_('Signature does not match'))
elif check_type == 'ec2':
# NOTE(vish): hmac can't handle unicode, so encode ensures that
# secret isn't unicode
expected_signature = signer.Signer(user.secret.encode()).generate(
params, verb, server_string, path)
logging.debug('user.secret: %s', user.secret)
logging.debug('expected_signature: %s', expected_signature)
logging.debug('signature: %s', signature)
LOG.debug('user.secret: %s', user.secret)
LOG.debug('expected_signature: %s', expected_signature)
LOG.debug('signature: %s', signature)
if signature != expected_signature:
LOG.audit(_("Invalid signature for user %s"), user.name)
raise exception.NotAuthorized(_('Signature does not match'))
return (user, project)
@@ -398,6 +408,12 @@ class AuthManager(object):
raise exception.NotFound(_("The %s role can not be found") % role)
if project is not None and role in FLAGS.global_roles:
raise exception.NotFound(_("The %s role is global only") % role)
if project:
LOG.audit(_("Adding role %s to user %s in project %s"), role,
User.safe_id(user), Project.safe_id(project))
else:
LOG.audit(_("Adding sitewide role %s to user %s"), role,
User.safe_id(user))
with self.driver() as drv:
drv.add_role(User.safe_id(user), role, Project.safe_id(project))
@@ -418,6 +434,12 @@ class AuthManager(object):
@type project: Project or project_id
@param project: Project in which to remove local role.
"""
if project:
LOG.audit(_("Removing role %s from user %s on project %s"),
role, User.safe_id(user), Project.safe_id(project))
else:
LOG.audit(_("Removing sitewide role %s from user %s"), role,
User.safe_id(user))
with self.driver() as drv:
drv.remove_role(User.safe_id(user), role, Project.safe_id(project))
@@ -480,6 +502,8 @@ class AuthManager(object):
description,
member_users)
if project_dict:
LOG.audit(_("Created project %s with manager %s"), name,
manager_user)
project = Project(**project_dict)
return project
@@ -496,6 +520,7 @@ class AuthManager(object):
@param project: This will be the new description of the project.
"""
LOG.audit(_("modifying project %s"), Project.safe_id(project))
if manager_user:
manager_user = User.safe_id(manager_user)
with self.driver() as drv:
@@ -505,6 +530,8 @@ class AuthManager(object):
def add_to_project(self, user, project):
"""Add user to project"""
LOG.audit(_("Adding user %s to project %s"), User.safe_id(user),
Project.safe_id(project))
with self.driver() as drv:
return drv.add_to_project(User.safe_id(user),
Project.safe_id(project))
@@ -523,6 +550,8 @@ class AuthManager(object):
def remove_from_project(self, user, project):
"""Removes a user from a project"""
LOG.audit(_("Remove user %s from project %s"), User.safe_id(user),
Project.safe_id(project))
with self.driver() as drv:
return drv.remove_from_project(User.safe_id(user),
Project.safe_id(project))
@@ -549,6 +578,7 @@ class AuthManager(object):
def delete_project(self, project):
"""Deletes a project"""
LOG.audit(_("Deleting project %s"), Project.safe_id(project))
with self.driver() as drv:
drv.delete_project(Project.safe_id(project))
@@ -603,13 +633,16 @@ class AuthManager(object):
with self.driver() as drv:
user_dict = drv.create_user(name, access, secret, admin)
if user_dict:
return User(**user_dict)
rv = User(**user_dict)
LOG.audit(_("Created user %s (admin: %r)"), rv.name, rv.admin)
return rv
def delete_user(self, user):
"""Deletes a user
Additionally deletes all users key_pairs"""
uid = User.safe_id(user)
LOG.audit(_("Deleting user %s"), uid)
db.key_pair_destroy_all_by_user(context.get_admin_context(),
uid)
with self.driver() as drv:
@@ -618,6 +651,12 @@ class AuthManager(object):
def modify_user(self, user, access_key=None, secret_key=None, admin=None):
"""Modify credentials for a user"""
uid = User.safe_id(user)
if access_key:
LOG.audit(_("Access Key change for user %s"), uid)
if secret_key:
LOG.audit(_("Secret Key change for user %s"), uid)
if admin is not None:
LOG.audit(_("Admin status set to %r for user %s"), admin, uid)
with self.driver() as drv:
drv.modify_user(uid, access_key, secret_key, admin)
@@ -666,7 +705,7 @@ class AuthManager(object):
port=vpn_port)
zippy.writestr(FLAGS.credential_vpn_file, config)
else:
logging.warn(_("No vpn data for project %s"), pid)
LOG.warn(_("No vpn data for project %s"), pid)
zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(pid))
zippy.close()

View File

@@ -46,7 +46,6 @@ Utility class for parsing signed AMI manifests.
import base64
import hashlib
import hmac
import logging
import urllib
# NOTE(vish): for new boto
@@ -54,9 +53,13 @@ import boto
# NOTE(vish): for old boto
import boto.utils
from nova import log as logging
from nova.exception import Error
LOG = logging.getLogger('nova.signer')
class Signer(object):
"""Hacked up code from boto/connection.py"""
@@ -120,7 +123,7 @@ class Signer(object):
def _calc_signature_2(self, params, verb, server_string, path):
"""Generate AWS signature version 2 string."""
logging.debug('using _calc_signature_2')
LOG.debug('using _calc_signature_2')
string_to_sign = '%s\n%s\n%s\n' % (verb, server_string, path)
if self.hmac_256:
current_hmac = self.hmac_256
@@ -136,13 +139,13 @@ class Signer(object):
val = urllib.quote(val, safe='-_~')
pairs.append(urllib.quote(key, safe='') + '=' + val)
qs = '&'.join(pairs)
logging.debug('query string: %s', qs)
LOG.debug('query string: %s', qs)
string_to_sign += qs
logging.debug('string_to_sign: %s', string_to_sign)
LOG.debug('string_to_sign: %s', string_to_sign)
current_hmac.update(string_to_sign)
b64 = base64.b64encode(current_hmac.digest())
logging.debug('len(b64)=%d', len(b64))
logging.debug('base64 encoded digest: %s', b64)
LOG.debug('len(b64)=%d', len(b64))
LOG.debug('base64 encoded digest: %s', b64)
return b64

View File

@@ -18,12 +18,16 @@
"""Based a bit on the carrot.backeds.queue backend... but a lot better."""
import logging
import Queue as queue
from carrot.backends import base
from eventlet import greenthread
from nova import log as logging
LOG = logging.getLogger("nova.fakerabbit")
EXCHANGES = {}
QUEUES = {}
@@ -41,12 +45,12 @@ class Exchange(object):
self._routes = {}
def publish(self, message, routing_key=None):
logging.debug(_('(%s) publish (key: %s) %s'),
LOG.debug(_('(%s) publish (key: %s) %s'),
self.name, routing_key, message)
routing_key = routing_key.split('.')[0]
if routing_key in self._routes:
for f in self._routes[routing_key]:
logging.debug(_('Publishing to route %s'), f)
LOG.debug(_('Publishing to route %s'), f)
f(message, routing_key=routing_key)
def bind(self, callback, routing_key):
@@ -76,19 +80,19 @@ class Backend(base.BaseBackend):
def queue_declare(self, queue, **kwargs):
global QUEUES
if queue not in QUEUES:
logging.debug(_('Declaring queue %s'), queue)
LOG.debug(_('Declaring queue %s'), queue)
QUEUES[queue] = Queue(queue)
def exchange_declare(self, exchange, type, *args, **kwargs):
global EXCHANGES
if exchange not in EXCHANGES:
logging.debug(_('Declaring exchange %s'), exchange)
LOG.debug(_('Declaring exchange %s'), exchange)
EXCHANGES[exchange] = Exchange(exchange, type)
def queue_bind(self, queue, exchange, routing_key, **kwargs):
global EXCHANGES
global QUEUES
logging.debug(_('Binding %s to %s with key %s'),
LOG.debug(_('Binding %s to %s with key %s'),
queue, exchange, routing_key)
EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key)
@@ -113,7 +117,7 @@ class Backend(base.BaseBackend):
content_type=content_type,
content_encoding=content_encoding)
message.result = True
logging.debug(_('Getting from %s: %s'), queue, message)
LOG.debug(_('Getting from %s: %s'), queue, message)
return message
def prepare_message(self, message_data, delivery_mode,

View File

@@ -29,8 +29,6 @@ import sys
import gflags
from nova import utils
class FlagValues(gflags.FlagValues):
"""Extension of gflags.FlagValues that allows undefined and runtime flags.
@@ -213,10 +211,10 @@ DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake')
DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID')
DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key')
DEFINE_integer('glance_port', 9292, 'glance port')
DEFINE_string('glance_host', utils.get_my_ip(), 'glance host')
DEFINE_string('glance_host', '127.0.0.1', 'glance host')
DEFINE_integer('s3_port', 3333, 's3 port')
DEFINE_string('s3_host', utils.get_my_ip(), 's3 host (for infrastructure)')
DEFINE_string('s3_dmz', utils.get_my_ip(), 's3 dmz ip (for instances)')
DEFINE_string('s3_host', '127.0.0.1', 's3 host (for infrastructure)')
DEFINE_string('s3_dmz', '127.0.0.1', 's3 dmz ip (for instances)')
DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on')
DEFINE_string('scheduler_topic', 'scheduler',
'the topic scheduler nodes listen on')
@@ -243,8 +241,8 @@ DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval')
DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts')
DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to')
DEFINE_string('ec2_prefix', 'http', 'prefix for ec2')
DEFINE_string('cc_host', utils.get_my_ip(), 'ip of api server')
DEFINE_string('cc_dmz', utils.get_my_ip(), 'internal ip of api server')
DEFINE_string('cc_host', '127.0.0.1', 'ip of api server')
DEFINE_string('cc_dmz', '127.0.0.1', 'internal ip of api server')
DEFINE_integer('cc_port', 8773, 'cloud controller port')
DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2')
@@ -270,6 +268,11 @@ DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'),
DEFINE_string('sql_connection',
'sqlite:///$state_path/nova.sqlite',
'connection string for sql database')
DEFINE_string('sql_idle_timeout',
'3600',
'timeout for idle sql database connections')
DEFINE_integer('sql_max_retries', 12, 'sql connection attempts')
DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval')
DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager',
'Manager for compute')

254
nova/log.py Normal file
View File

@@ -0,0 +1,254 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Nova logging handler.
This module adds to logging functionality by adding the option to specify
a context object when calling the various log methods. If the context object
is not specified, default formatting is used.
It also allows setting of formatting information through flags.
"""
import cStringIO
import json
import logging
import logging.handlers
import traceback
from nova import flags
from nova import version
FLAGS = flags.FLAGS
flags.DEFINE_string('logging_context_format_string',
'(%(name)s %(nova_version)s): %(levelname)s '
'[%(request_id)s %(user)s '
'%(project)s] %(message)s',
'format string to use for log messages')
flags.DEFINE_string('logging_default_format_string',
'(%(name)s %(nova_version)s): %(levelname)s [N/A] '
'%(message)s',
'format string to use for log messages')
flags.DEFINE_string('logging_debug_format_suffix',
'from %(processName)s (pid=%(process)d) %(funcName)s'
' %(pathname)s:%(lineno)d',
'data to append to log format when level is DEBUG')
flags.DEFINE_string('logging_exception_prefix',
'(%(name)s): TRACE: ',
'prefix each line of exception output with this format')
flags.DEFINE_list('default_log_levels',
['amqplib=WARN',
'sqlalchemy=WARN',
'eventlet.wsgi.server=WARN'],
'list of logger=LEVEL pairs')
flags.DEFINE_bool('use_syslog', False, 'output to syslog')
flags.DEFINE_string('logfile', None, 'output to named file')
# A list of things we want to replicate from logging.
# levels
CRITICAL = logging.CRITICAL
FATAL = logging.FATAL
ERROR = logging.ERROR
WARNING = logging.WARNING
WARN = logging.WARN
INFO = logging.INFO
DEBUG = logging.DEBUG
NOTSET = logging.NOTSET
# methods
getLogger = logging.getLogger
debug = logging.debug
info = logging.info
warning = logging.warning
warn = logging.warn
error = logging.error
exception = logging.exception
critical = logging.critical
log = logging.log
# handlers
StreamHandler = logging.StreamHandler
FileHandler = logging.FileHandler
# logging.SysLogHandler is nicer than logging.logging.handler.SysLogHandler.
SysLogHandler = logging.handlers.SysLogHandler
# our new audit level
AUDIT = logging.INFO + 1
logging.addLevelName(AUDIT, 'AUDIT')
def _dictify_context(context):
if context == None:
return None
if not isinstance(context, dict) \
and getattr(context, 'to_dict', None):
context = context.to_dict()
return context
def basicConfig():
logging.basicConfig()
for handler in logging.root.handlers:
handler.setFormatter(_formatter)
if FLAGS.verbose:
logging.root.setLevel(logging.DEBUG)
if FLAGS.use_syslog:
syslog = SysLogHandler(address='/dev/log')
syslog.setFormatter(_formatter)
logging.root.addHandler(syslog)
if FLAGS.logfile:
logfile = FileHandler(FLAGS.logfile)
logfile.setFormatter(_formatter)
logging.root.addHandler(logfile)
class NovaLogger(logging.Logger):
"""
NovaLogger manages request context and formatting.
This becomes the class that is instanciated by logging.getLogger.
"""
def __init__(self, name, level=NOTSET):
level_name = self._get_level_from_flags(name, FLAGS)
level = globals()[level_name]
logging.Logger.__init__(self, name, level)
def _get_level_from_flags(self, name, FLAGS):
# if exactly "nova", or a child logger, honor the verbose flag
if (name == "nova" or name.startswith("nova.")) and FLAGS.verbose:
return 'DEBUG'
for pair in FLAGS.default_log_levels:
logger, _sep, level = pair.partition('=')
# NOTE(todd): if we set a.b, we want a.b.c to have the same level
# (but not a.bc, so we check the dot)
if name == logger:
return level
if name.startswith(logger) and name[len(logger)] == '.':
return level
return 'INFO'
def _log(self, level, msg, args, exc_info=None, extra=None, context=None):
"""Extract context from any log call"""
if not extra:
extra = {}
if context:
extra.update(_dictify_context(context))
extra.update({"nova_version": version.version_string_with_vcs()})
logging.Logger._log(self, level, msg, args, exc_info, extra)
def addHandler(self, handler):
"""Each handler gets our custom formatter"""
handler.setFormatter(_formatter)
logging.Logger.addHandler(self, handler)
def audit(self, msg, *args, **kwargs):
"""Shortcut for our AUDIT level"""
if self.isEnabledFor(AUDIT):
self._log(AUDIT, msg, args, **kwargs)
def exception(self, msg, *args, **kwargs):
"""Logging.exception doesn't handle kwargs, so breaks context"""
if not kwargs.get('exc_info'):
kwargs['exc_info'] = 1
self.error(msg, *args, **kwargs)
# NOTE(todd): does this really go here, or in _log ?
extra = kwargs.get('extra')
if not extra:
return
env = extra.get('environment')
if env:
env = env.copy()
for k in env.keys():
if not isinstance(env[k], str):
env.pop(k)
message = "Environment: %s" % json.dumps(env)
kwargs.pop('exc_info')
self.error(message, **kwargs)
logging.setLoggerClass(NovaLogger)
class NovaRootLogger(NovaLogger):
pass
if not isinstance(logging.root, NovaRootLogger):
logging.root = NovaRootLogger("nova.root", WARNING)
NovaLogger.root = logging.root
NovaLogger.manager.root = logging.root
class NovaFormatter(logging.Formatter):
"""
A nova.context.RequestContext aware formatter configured through flags.
The flags used to set format strings are: logging_context_foramt_string
and logging_default_format_string. You can also specify
logging_debug_format_suffix to append extra formatting if the log level is
debug.
For information about what variables are available for the formatter see:
http://docs.python.org/library/logging.html#formatter
"""
def format(self, record):
"""Uses contextstring if request_id is set, otherwise default"""
if record.__dict__.get('request_id', None):
self._fmt = FLAGS.logging_context_format_string
else:
self._fmt = FLAGS.logging_default_format_string
if record.levelno == logging.DEBUG \
and FLAGS.logging_debug_format_suffix:
self._fmt += " " + FLAGS.logging_debug_format_suffix
# Cache this on the record, Logger will respect our formated copy
if record.exc_info:
record.exc_text = self.formatException(record.exc_info, record)
return logging.Formatter.format(self, record)
def formatException(self, exc_info, record=None):
"""Format exception output with FLAGS.logging_exception_prefix"""
if not record:
return logging.Formatter.formatException(self, exc_info)
stringbuffer = cStringIO.StringIO()
traceback.print_exception(exc_info[0], exc_info[1], exc_info[2],
None, stringbuffer)
lines = stringbuffer.getvalue().split("\n")
stringbuffer.close()
formatted_lines = []
for line in lines:
pl = FLAGS.logging_exception_prefix % record.__dict__
fl = "%s%s" % (pl, line)
formatted_lines.append(fl)
return "\n".join(formatted_lines)
_formatter = NovaFormatter()
def audit(msg, *args, **kwargs):
"""Shortcut for logging to root log with sevrity 'AUDIT'."""
if len(logging.root.handlers) == 0:
basicConfig()
logging.root.log(AUDIT, msg, *args, **kwargs)

View File

@@ -22,7 +22,6 @@ No fan-out support yet.
"""
import json
import logging
import sys
import time
import traceback
@@ -36,13 +35,12 @@ from nova import context
from nova import exception
from nova import fakerabbit
from nova import flags
from nova import log as logging
from nova import utils
FLAGS = flags.FLAGS
LOG = logging.getLogger('amqplib')
LOG.setLevel(logging.DEBUG)
LOG = logging.getLogger('nova.rpc')
class Connection(carrot_connection.BrokerConnection):
@@ -91,15 +89,16 @@ class Consumer(messaging.Consumer):
self.failed_connection = False
break
except: # Catching all because carrot sucks
logging.exception(_("AMQP server on %s:%d is unreachable."
LOG.exception(_("AMQP server on %s:%d is unreachable."
" Trying again in %d seconds.") % (
FLAGS.rabbit_host,
FLAGS.rabbit_port,
FLAGS.rabbit_retry_interval))
self.failed_connection = True
if self.failed_connection:
logging.exception(_("Unable to connect to AMQP server"
" after %d tries. Shutting down.") % FLAGS.rabbit_max_retries)
LOG.exception(_("Unable to connect to AMQP server "
"after %d tries. Shutting down."),
FLAGS.rabbit_max_retries)
sys.exit(1)
def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False):
@@ -116,14 +115,14 @@ class Consumer(messaging.Consumer):
self.declare()
super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks)
if self.failed_connection:
logging.error(_("Reconnected to queue"))
LOG.error(_("Reconnected to queue"))
self.failed_connection = False
# NOTE(vish): This is catching all errors because we really don't
# exceptions to be logged 10 times a second if some
# persistent failure occurs.
except Exception: # pylint: disable-msg=W0703
if not self.failed_connection:
logging.exception(_("Failed to fetch message from queue"))
LOG.exception(_("Failed to fetch message from queue"))
self.failed_connection = True
def attach_to_eventlet(self):
@@ -193,6 +192,7 @@ class AdapterConsumer(TopicConsumer):
if msg_id:
msg_reply(msg_id, rval, None)
except Exception as e:
logging.exception("Exception during message handling")
if msg_id:
msg_reply(msg_id, None, sys.exc_info())
return
@@ -242,8 +242,8 @@ def msg_reply(msg_id, reply=None, failure=None):
if failure:
message = str(failure[1])
tb = traceback.format_exception(*failure)
logging.error(_("Returning exception %s to caller"), message)
logging.error(tb)
LOG.error(_("Returning exception %s to caller"), message)
LOG.error(tb)
failure = (failure[0].__name__, str(failure[1]), tb)
conn = Connection.instance(True)
publisher = DirectPublisher(connection=conn, msg_id=msg_id)

View File

@@ -43,6 +43,19 @@ class SimpleScheduler(chance.ChanceScheduler):
def schedule_run_instance(self, context, instance_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest running instances."""
instance_ref = db.instance_get(context, instance_id)
if instance_ref['availability_zone'] and context.is_admin:
zone, _x, host = instance_ref['availability_zone'].partition(':')
service = db.service_get_by_args(context.elevated(), host,
'nova-compute')
if not self.service_is_up(service):
raise driver.WillNotSchedule("Host %s is not alive" % host)
# TODO(vish): this probably belongs in the manager, if we
# can generalize this somehow
now = datetime.datetime.utcnow()
db.instance_update(context, instance_id, {'host': host,
'scheduled_at': now})
return host
results = db.service_get_all_compute_sorted(context)
for result in results:
(service, instance_cores) = result
@@ -62,6 +75,19 @@ class SimpleScheduler(chance.ChanceScheduler):
def schedule_create_volume(self, context, volume_id, *_args, **_kwargs):
"""Picks a host that is up and has the fewest volumes."""
volume_ref = db.volume_get(context, volume_id)
if (':' in volume_ref['availability_zone']) and context.is_admin:
zone, _x, host = volume_ref['availability_zone'].partition(':')
service = db.service_get_by_args(context.elevated(), host,
'nova-volume')
if not self.service_is_up(service):
raise driver.WillNotSchedule("Host %s not available" % host)
# TODO(vish): this probably belongs in the manager, if we
# can generalize this somehow
now = datetime.datetime.utcnow()
db.volume_update(context, volume_id, {'host': host,
'scheduled_at': now})
return host
results = db.service_get_all_volume_sorted(context)
for result in results:
(service, volume_gigabytes) = result

View File

@@ -0,0 +1,71 @@
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright 2010 Cloud.com, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For Hyper-V driver
"""
import random
from nova import context
from nova import db
from nova import flags
from nova import test
from nova.auth import manager
from nova.virt import hyperv
FLAGS = flags.FLAGS
FLAGS.connection_type = 'hyperv'
class HyperVTestCase(test.TestCase):
"""Test cases for the Hyper-V driver"""
def setUp(self):
super(HyperVTestCase, self).setUp()
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake',
admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.RequestContext(self.user, self.project)
def test_create_destroy(self):
"""Create a VM and destroy it"""
instance = {'internal_id': random.randint(1, 1000000),
'memory_mb': '1024',
'mac_address': '02:12:34:46:56:67',
'vcpus': 2,
'project_id': 'fake',
'instance_type': 'm1.small'}
instance_ref = db.instance_create(self.context, instance)
conn = hyperv.get_connection(False)
conn._create_vm(instance_ref) # pylint: disable-msg=W0212
found = [n for n in conn.list_instances()
if n == instance_ref['name']]
self.assertTrue(len(found) == 1)
info = conn.get_info(instance_ref['name'])
#Unfortunately since the vm is not running at this point,
#we cannot obtain memory information from get_info
self.assertEquals(info['num_cpu'], instance_ref['vcpus'])
conn.destroy(instance_ref)
found = [n for n in conn.list_instances()
if n == instance_ref['name']]
self.assertTrue(len(found) == 0)
def tearDown(self):
super(HyperVTestCase, self).tearDown()
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)

View File

@@ -23,7 +23,6 @@ Unittets for S3 objectstore clone.
import boto
import glob
import hashlib
import logging
import os
import shutil
import tempfile
@@ -63,7 +62,6 @@ class ObjectStoreTestCase(test.TestCase):
self.flags(buckets_path=os.path.join(OSS_TEMPDIR, 'buckets'),
images_path=os.path.join(OSS_TEMPDIR, 'images'),
ca_path=os.path.join(os.path.dirname(__file__), 'CA'))
logging.getLogger().setLevel(logging.DEBUG)
self.auth_manager = manager.AuthManager()
self.auth_manager.create_user('user1')

View File

@@ -17,7 +17,6 @@
# under the License.
import unittest
import logging
import webob
from nova import context

View File

@@ -16,17 +16,18 @@
# License for the specific language governing permissions and limitations
# under the License.
import logging
from M2Crypto import X509
import unittest
from nova import crypto
from nova import flags
from nova import log as logging
from nova import test
from nova.auth import manager
from nova.api.ec2 import cloud
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.auth_unittest')
class user_generator(object):
@@ -211,12 +212,12 @@ class AuthManagerTestCase(object):
# NOTE(vish): Setup runs genroot.sh if it hasn't been run
cloud.CloudController().setup()
_key, cert_str = crypto.generate_x509_cert(user.id, project.id)
logging.debug(cert_str)
LOG.debug(cert_str)
full_chain = crypto.fetch_ca(project_id=project.id, chain=True)
int_cert = crypto.fetch_ca(project_id=project.id, chain=False)
cloud_cert = crypto.fetch_ca()
logging.debug("CA chain:\n\n =====\n%s\n\n=====" % full_chain)
LOG.debug("CA chain:\n\n =====\n%s\n\n=====", full_chain)
signed_cert = X509.load_cert_string(cert_str)
chain_cert = X509.load_cert_string(full_chain)
int_cert = X509.load_cert_string(int_cert)
@@ -331,7 +332,7 @@ class AuthManagerLdapTestCase(AuthManagerTestCase, test.TestCase):
test.TestCase.__init__(self, *args, **kwargs)
import nova.auth.fakeldap as fakeldap
if FLAGS.flush_db:
logging.info("Flushing datastore")
LOG.info("Flushing datastore")
r = fakeldap.Store.instance()
r.flushdb()

View File

@@ -18,7 +18,6 @@
from base64 import b64decode
import json
import logging
from M2Crypto import BIO
from M2Crypto import RSA
import os
@@ -31,6 +30,7 @@ from nova import context
from nova import crypto
from nova import db
from nova import flags
from nova import log as logging
from nova import rpc
from nova import service
from nova import test
@@ -41,6 +41,7 @@ from nova.objectstore import image
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.cloud')
# Temp dirs for working with image attributes through the cloud controller
# (stole this from objectstore_unittest.py)
@@ -56,7 +57,6 @@ class CloudTestCase(test.TestCase):
images_path=IMAGES_PATH)
self.conn = rpc.Connection.instance()
logging.getLogger().setLevel(logging.DEBUG)
# set up our cloud
self.cloud = cloud.CloudController()
@@ -106,7 +106,7 @@ class CloudTestCase(test.TestCase):
self.cloud.allocate_address(self.context)
inst = db.instance_create(self.context, {'host': FLAGS.host})
fixed = self.network.allocate_fixed_ip(self.context, inst['id'])
ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id'])
ec2_id = cloud.id_to_ec2_id(inst['id'])
self.cloud.associate_address(self.context,
instance_id=ec2_id,
public_ip=address)
@@ -127,12 +127,29 @@ class CloudTestCase(test.TestCase):
result = self.cloud.describe_volumes(self.context)
self.assertEqual(len(result['volumeSet']), 2)
result = self.cloud.describe_volumes(self.context,
volume_id=[vol2['ec2_id']])
volume_id=[vol2['id']])
self.assertEqual(len(result['volumeSet']), 1)
self.assertEqual(result['volumeSet'][0]['volumeId'], vol2['ec2_id'])
self.assertEqual(result['volumeSet'][0]['volumeId'], vol2['id'])
db.volume_destroy(self.context, vol1['id'])
db.volume_destroy(self.context, vol2['id'])
def test_describe_instances(self):
"""Makes sure describe_instances works and filters results."""
inst1 = db.instance_create(self.context, {'reservation_id': 'a'})
inst2 = db.instance_create(self.context, {'reservation_id': 'a'})
result = self.cloud.describe_instances(self.context)
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 2)
instance_id = cloud.id_to_ec2_id(inst2['id'])
result = self.cloud.describe_instances(self.context,
instance_id=[instance_id])
result = result['reservationSet'][0]
self.assertEqual(len(result['instancesSet']), 1)
self.assertEqual(result['instancesSet'][0]['instanceId'],
instance_id)
db.instance_destroy(self.context, inst1['id'])
db.instance_destroy(self.context, inst2['id'])
def test_console_output(self):
image_id = FLAGS.default_image
instance_type = FLAGS.default_instance_type
@@ -140,15 +157,15 @@ class CloudTestCase(test.TestCase):
kwargs = {'image_id': image_id,
'instance_type': instance_type,
'max_count': max_count}
rv = yield self.cloud.run_instances(self.context, **kwargs)
rv = self.cloud.run_instances(self.context, **kwargs)
instance_id = rv['instancesSet'][0]['instanceId']
output = yield self.cloud.get_console_output(context=self.context,
output = self.cloud.get_console_output(context=self.context,
instance_id=[instance_id])
self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT')
# TODO(soren): We need this until we can stop polling in the rpc code
# for unit tests.
greenthread.sleep(0.3)
rv = yield self.cloud.terminate_instances(self.context, [instance_id])
rv = self.cloud.terminate_instances(self.context, [instance_id])
def test_ajax_console(self):
kwargs = {'image_id': image_id }
@@ -191,7 +208,7 @@ class CloudTestCase(test.TestCase):
def test_run_instances(self):
if FLAGS.connection_type == 'fake':
logging.debug("Can't test instances without a real virtual env.")
LOG.debug(_("Can't test instances without a real virtual env."))
return
image_id = FLAGS.default_image
instance_type = FLAGS.default_instance_type
@@ -199,30 +216,30 @@ class CloudTestCase(test.TestCase):
kwargs = {'image_id': image_id,
'instance_type': instance_type,
'max_count': max_count}
rv = yield self.cloud.run_instances(self.context, **kwargs)
rv = self.cloud.run_instances(self.context, **kwargs)
# TODO: check for proper response
instance_id = rv['reservationSet'][0].keys()[0]
instance = rv['reservationSet'][0][instance_id][0]
logging.debug("Need to watch instance %s until it's running..." %
LOG.debug(_("Need to watch instance %s until it's running..."),
instance['instance_id'])
while True:
greenthread.sleep(1)
info = self.cloud._get_instance(instance['instance_id'])
logging.debug(info['state'])
LOG.debug(info['state'])
if info['state'] == power_state.RUNNING:
break
self.assert_(rv)
if connection_type != 'fake':
if FLAGS.connection_type != 'fake':
time.sleep(45) # Should use boto for polling here
for reservations in rv['reservationSet']:
# for res_id in reservations.keys():
# logging.debug(reservations[res_id])
# LOG.debug(reservations[res_id])
# for instance in reservations[res_id]:
for instance in reservations[reservations.keys()[0]]:
instance_id = instance['instance_id']
logging.debug("Terminating instance %s" % instance_id)
rv = yield self.compute.terminate_instance(instance_id)
LOG.debug(_("Terminating instance %s"), instance_id)
rv = self.compute.terminate_instance(instance_id)
def test_instance_update_state(self):
def instance(num):
@@ -309,7 +326,7 @@ class CloudTestCase(test.TestCase):
def test_update_of_instance_display_fields(self):
inst = db.instance_create(self.context, {})
ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id'])
ec2_id = cloud.id_to_ec2_id(inst['id'])
self.cloud.update_instance(self.context, ec2_id,
display_name='c00l 1m4g3')
inst = db.instance_get(self.context, inst['id'])

View File

@@ -20,31 +20,31 @@ Tests For Compute
"""
import datetime
import logging
from nova import compute
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
from nova.compute import api as compute_api
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.compute')
class ComputeTestCase(test.TestCase):
"""Test case for compute"""
def setUp(self):
logging.getLogger().setLevel(logging.DEBUG)
super(ComputeTestCase, self).setUp()
self.flags(connection_type='fake',
stub_network=True,
network_manager='nova.network.manager.FlatManager')
self.compute = utils.import_object(FLAGS.compute_manager)
self.compute_api = compute_api.ComputeAPI()
self.compute_api = compute.API()
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake')
self.project = self.manager.create_project('fake', 'fake', 'fake')
@@ -72,7 +72,7 @@ class ComputeTestCase(test.TestCase):
"""Verify that an instance cannot be created without a display_name."""
cases = [dict(), dict(display_name=None)]
for instance in cases:
ref = self.compute_api.create_instances(self.context,
ref = self.compute_api.create(self.context,
FLAGS.default_instance_type, None, **instance)
try:
self.assertNotEqual(ref[0].display_name, None)
@@ -80,13 +80,13 @@ class ComputeTestCase(test.TestCase):
db.instance_destroy(self.context, ref[0]['id'])
def test_create_instance_associates_security_groups(self):
"""Make sure create_instances associates security groups"""
"""Make sure create associates security groups"""
values = {'name': 'default',
'description': 'default',
'user_id': self.user.id,
'project_id': self.project.id}
group = db.security_group_create(self.context, values)
ref = self.compute_api.create_instances(self.context,
ref = self.compute_api.create(self.context,
FLAGS.default_instance_type, None, security_group=['default'])
try:
self.assertEqual(len(ref[0]['security_groups']), 1)
@@ -101,13 +101,13 @@ class ComputeTestCase(test.TestCase):
self.compute.run_instance(self.context, instance_id)
instances = db.instance_get_all(context.get_admin_context())
logging.info(_("Running instances: %s"), instances)
LOG.info(_("Running instances: %s"), instances)
self.assertEqual(len(instances), 1)
self.compute.terminate_instance(self.context, instance_id)
instances = db.instance_get_all(context.get_admin_context())
logging.info(_("After terminating instances: %s"), instances)
LOG.info(_("After terminating instances: %s"), instances)
self.assertEqual(len(instances), 0)
def test_run_terminate_timestamps(self):
@@ -188,3 +188,22 @@ class ComputeTestCase(test.TestCase):
self.context,
instance_id)
self.compute.terminate_instance(self.context, instance_id)
def test_lock(self):
"""ensure locked instance cannot be changed"""
instance_id = self._create_instance()
self.compute.run_instance(self.context, instance_id)
non_admin_context = context.RequestContext(None, None, False, False)
# decorator should return False (fail) with locked nonadmin context
self.compute.lock_instance(self.context, instance_id)
ret_val = self.compute.reboot_instance(non_admin_context, instance_id)
self.assertEqual(ret_val, False)
# decorator should return None (success) with unlocked nonadmin context
self.compute.unlock_instance(self.context, instance_id)
ret_val = self.compute.reboot_instance(non_admin_context, instance_id)
self.assertEqual(ret_val, None)
self.compute.terminate_instance(self.context, instance_id)

110
nova/tests/test_log.py Normal file
View File

@@ -0,0 +1,110 @@
import cStringIO
from nova import context
from nova import log
from nova import test
def _fake_context():
return context.RequestContext(1, 1)
class RootLoggerTestCase(test.TrialTestCase):
def setUp(self):
super(RootLoggerTestCase, self).setUp()
self.log = log.logging.root
def tearDown(self):
super(RootLoggerTestCase, self).tearDown()
log.NovaLogger.manager.loggerDict = {}
def test_is_nova_instance(self):
self.assert_(isinstance(self.log, log.NovaLogger))
def test_name_is_nova_root(self):
self.assertEqual("nova.root", self.log.name)
def test_handlers_have_nova_formatter(self):
formatters = []
for h in self.log.handlers:
f = h.formatter
if isinstance(f, log.NovaFormatter):
formatters.append(f)
self.assert_(formatters)
self.assertEqual(len(formatters), len(self.log.handlers))
def test_handles_context_kwarg(self):
self.log.info("foo", context=_fake_context())
self.assert_(True) # didn't raise exception
def test_module_level_methods_handle_context_arg(self):
log.info("foo", context=_fake_context())
self.assert_(True) # didn't raise exception
def test_module_level_audit_handles_context_arg(self):
log.audit("foo", context=_fake_context())
self.assert_(True) # didn't raise exception
class NovaFormatterTestCase(test.TrialTestCase):
def setUp(self):
super(NovaFormatterTestCase, self).setUp()
self.flags(logging_context_format_string="HAS CONTEXT "\
"[%(request_id)s]: %(message)s",
logging_default_format_string="NOCTXT: %(message)s",
logging_debug_format_suffix="--DBG")
self.log = log.logging.root
self.stream = cStringIO.StringIO()
handler = log.StreamHandler(self.stream)
self.log.addHandler(handler)
self.log.setLevel(log.DEBUG)
def tearDown(self):
super(NovaFormatterTestCase, self).tearDown()
log.NovaLogger.manager.loggerDict = {}
def test_uncontextualized_log(self):
self.log.info("foo")
self.assertEqual("NOCTXT: foo\n", self.stream.getvalue())
def test_contextualized_log(self):
ctxt = _fake_context()
self.log.info("bar", context=ctxt)
expected = "HAS CONTEXT [%s]: bar\n" % ctxt.request_id
self.assertEqual(expected, self.stream.getvalue())
def test_debugging_log(self):
self.log.debug("baz")
self.assertEqual("NOCTXT: baz --DBG\n", self.stream.getvalue())
class NovaLoggerTestCase(test.TrialTestCase):
def setUp(self):
super(NovaLoggerTestCase, self).setUp()
self.flags(default_log_levels=["nova-test=AUDIT"], verbose=False)
self.log = log.getLogger('nova-test')
def tearDown(self):
super(NovaLoggerTestCase, self).tearDown()
log.NovaLogger.manager.loggerDict = {}
def test_has_level_from_flags(self):
self.assertEqual(log.AUDIT, self.log.level)
def test_child_log_has_level_of_parent_flag(self):
l = log.getLogger('nova-test.foo')
self.assertEqual(log.AUDIT, l.level)
class VerboseLoggerTestCase(test.TrialTestCase):
def setUp(self):
super(VerboseLoggerTestCase, self).setUp()
self.flags(default_log_levels=["nova.test=AUDIT"], verbose=True)
self.log = log.getLogger('nova.test')
def tearDown(self):
super(VerboseLoggerTestCase, self).tearDown()
log.NovaLogger.manager.loggerDict = {}
def test_will_be_verbose_if_named_nova_and_verbose_flag_set(self):
self.assertEqual(log.DEBUG, self.log.level)

View File

@@ -20,18 +20,18 @@ Unit Tests for network code
"""
import IPy
import os
import logging
from nova import context
from nova import db
from nova import exception
from nova import flags
from nova import service
from nova import log as logging
from nova import test
from nova import utils
from nova.auth import manager
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.network')
class NetworkTestCase(test.TestCase):
@@ -45,7 +45,6 @@ class NetworkTestCase(test.TestCase):
fake_network=True,
network_size=16,
num_networks=5)
logging.getLogger().setLevel(logging.DEBUG)
self.manager = manager.AuthManager()
self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
self.projects = []
@@ -328,7 +327,7 @@ def lease_ip(private_ip):
'TESTING': '1',
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
(out, err) = utils.execute(cmd, addl_env=env)
logging.debug("ISSUE_IP: %s, %s ", out, err)
LOG.debug("ISSUE_IP: %s, %s ", out, err)
def release_ip(private_ip):
@@ -344,4 +343,4 @@ def release_ip(private_ip):
'TESTING': '1',
'FLAGFILE': FLAGS.dhcpbridge_flagfile}
(out, err) = utils.execute(cmd, addl_env=env)
logging.debug("RELEASE_IP: %s, %s ", out, err)
LOG.debug("RELEASE_IP: %s, %s ", out, err)

View File

@@ -18,15 +18,16 @@
"""
Unit Tests for remote procedure calls using queue
"""
import logging
from nova import context
from nova import flags
from nova import log as logging
from nova import rpc
from nova import test
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.rpc')
class RpcTestCase(test.TestCase):
@@ -85,12 +86,12 @@ class RpcTestCase(test.TestCase):
@staticmethod
def echo(context, queue, value):
"""Calls echo in the passed queue"""
logging.debug("Nested received %s, %s", queue, value)
LOG.debug(_("Nested received %s, %s"), queue, value)
ret = rpc.call(context,
queue,
{"method": "echo",
"args": {"value": value}})
logging.debug("Nested return %s", ret)
LOG.debug(_("Nested return %s"), ret)
return value
nested = Nested()
@@ -115,13 +116,13 @@ class TestReceiver(object):
@staticmethod
def echo(context, value):
"""Simply returns whatever value is sent in"""
logging.debug("Received %s", value)
LOG.debug(_("Received %s"), value)
return value
@staticmethod
def context(context, value):
"""Returns dictionary version of context"""
logging.debug("Received %s", context)
LOG.debug(_("Received %s"), context)
return context.to_dict()
@staticmethod

View File

@@ -19,6 +19,8 @@
Tests For Scheduler
"""
import datetime
from nova import context
from nova import db
from nova import flags
@@ -95,7 +97,7 @@ class SimpleDriverTestCase(test.TestCase):
self.manager.delete_user(self.user)
self.manager.delete_project(self.project)
def _create_instance(self):
def _create_instance(self, **kwargs):
"""Create a test instance"""
inst = {}
inst['image_id'] = 'ami-test'
@@ -106,6 +108,7 @@ class SimpleDriverTestCase(test.TestCase):
inst['mac_address'] = utils.generate_mac()
inst['ami_launch_index'] = 0
inst['vcpus'] = 1
inst['availability_zone'] = kwargs.get('availability_zone', None)
return db.instance_create(self.context, inst)['id']
def _create_volume(self):
@@ -114,9 +117,33 @@ class SimpleDriverTestCase(test.TestCase):
vol['image_id'] = 'ami-test'
vol['reservation_id'] = 'r-fakeres'
vol['size'] = 1
vol['availability_zone'] = 'test'
return db.volume_create(self.context, vol)['id']
def test_hosts_are_up(self):
def test_doesnt_report_disabled_hosts_as_up(self):
"""Ensures driver doesn't find hosts before they are enabled"""
# NOTE(vish): constructing service without create method
# because we are going to use it without queue
compute1 = service.Service('host1',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute1.start()
compute2 = service.Service('host2',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute2.start()
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
s2 = db.service_get_by_args(self.context, 'host2', 'nova-compute')
db.service_update(self.context, s1['id'], {'disabled': True})
db.service_update(self.context, s2['id'], {'disabled': True})
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
self.assertEqual(0, len(hosts))
compute1.kill()
compute2.kill()
def test_reports_enabled_hosts_as_up(self):
"""Ensures driver can find the hosts that are up"""
# NOTE(vish): constructing service without create method
# because we are going to use it without queue
@@ -131,7 +158,7 @@ class SimpleDriverTestCase(test.TestCase):
FLAGS.compute_manager)
compute2.start()
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
self.assertEqual(len(hosts), 2)
self.assertEqual(2, len(hosts))
compute1.kill()
compute2.kill()
@@ -158,6 +185,63 @@ class SimpleDriverTestCase(test.TestCase):
compute1.kill()
compute2.kill()
def test_specific_host_gets_instance(self):
"""Ensures if you set availability_zone it launches on that zone"""
compute1 = service.Service('host1',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute1.start()
compute2 = service.Service('host2',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute2.start()
instance_id1 = self._create_instance()
compute1.run_instance(self.context, instance_id1)
instance_id2 = self._create_instance(availability_zone='nova:host1')
host = self.scheduler.driver.schedule_run_instance(self.context,
instance_id2)
self.assertEqual('host1', host)
compute1.terminate_instance(self.context, instance_id1)
db.instance_destroy(self.context, instance_id2)
compute1.kill()
compute2.kill()
def test_wont_sechedule_if_specified_host_is_down(self):
compute1 = service.Service('host1',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute1.start()
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
now = datetime.datetime.utcnow()
delta = datetime.timedelta(seconds=FLAGS.service_down_time * 2)
past = now - delta
db.service_update(self.context, s1['id'], {'updated_at': past})
instance_id2 = self._create_instance(availability_zone='nova:host1')
self.assertRaises(driver.WillNotSchedule,
self.scheduler.driver.schedule_run_instance,
self.context,
instance_id2)
db.instance_destroy(self.context, instance_id2)
compute1.kill()
def test_will_schedule_on_disabled_host_if_specified(self):
compute1 = service.Service('host1',
'nova-compute',
'compute',
FLAGS.compute_manager)
compute1.start()
s1 = db.service_get_by_args(self.context, 'host1', 'nova-compute')
db.service_update(self.context, s1['id'], {'disabled': True})
instance_id2 = self._create_instance(availability_zone='nova:host1')
host = self.scheduler.driver.schedule_run_instance(self.context,
instance_id2)
self.assertEqual('host1', host)
db.instance_destroy(self.context, instance_id2)
compute1.kill()
def test_too_many_cores(self):
"""Ensures we don't go over max cores"""
compute1 = service.Service('host1',

View File

@@ -208,8 +208,99 @@ class LibvirtConnTestCase(test.TestCase):
self.manager.delete_user(self.user)
class NWFilterTestCase(test.TestCase):
class IptablesFirewallTestCase(test.TestCase):
def setUp(self):
super(IptablesFirewallTestCase, self).setUp()
self.manager = manager.AuthManager()
self.user = self.manager.create_user('fake', 'fake', 'fake',
admin=True)
self.project = self.manager.create_project('fake', 'fake', 'fake')
self.context = context.RequestContext('fake', 'fake')
self.network = utils.import_object(FLAGS.network_manager)
self.fw = libvirt_conn.IptablesFirewallDriver()
def tearDown(self):
self.manager.delete_project(self.project)
self.manager.delete_user(self.user)
super(IptablesFirewallTestCase, self).tearDown()
def _p(self, *args, **kwargs):
if 'iptables-restore' in args:
print ' '.join(args), kwargs['stdin']
if 'iptables-save' in args:
return
in_rules = [
'# Generated by iptables-save v1.4.4 on Mon Dec 6 11:54:13 2010',
'*filter',
':INPUT ACCEPT [969615:281627771]',
':FORWARD ACCEPT [0:0]',
':OUTPUT ACCEPT [915599:63811649]',
':nova-block-ipv4 - [0:0]',
'-A INPUT -i virbr0 -p udp -m udp --dport 53 -j ACCEPT ',
'-A INPUT -i virbr0 -p tcp -m tcp --dport 53 -j ACCEPT ',
'-A INPUT -i virbr0 -p udp -m udp --dport 67 -j ACCEPT ',
'-A INPUT -i virbr0 -p tcp -m tcp --dport 67 -j ACCEPT ',
'-A FORWARD -d 192.168.122.0/24 -o virbr0 -m state --state RELATED'
',ESTABLISHED -j ACCEPT ',
'-A FORWARD -s 192.168.122.0/24 -i virbr0 -j ACCEPT ',
'-A FORWARD -i virbr0 -o virbr0 -j ACCEPT ',
'-A FORWARD -o virbr0 -j REJECT --reject-with icmp-port-unreachable ',
'-A FORWARD -i virbr0 -j REJECT --reject-with icmp-port-unreachable ',
'COMMIT',
'# Completed on Mon Dec 6 11:54:13 2010'
]
def test_static_filters(self):
self.fw.execute = self._p
instance_ref = db.instance_create(self.context,
{'user_id': 'fake',
'project_id': 'fake'})
ip = '10.11.12.13'
network_ref = db.project_get_network(self.context,
'fake')
fixed_ip = {'address': ip,
'network_id': network_ref['id']}
admin_ctxt = context.get_admin_context()
db.fixed_ip_create(admin_ctxt, fixed_ip)
db.fixed_ip_update(admin_ctxt, ip, {'allocated': True,
'instance_id': instance_ref['id']})
secgroup = db.security_group_create(admin_ctxt,
{'user_id': 'fake',
'project_id': 'fake',
'name': 'testgroup',
'description': 'test group'})
db.security_group_rule_create(admin_ctxt,
{'parent_group_id': secgroup['id'],
'protocol': 'tcp',
'from_port': 80,
'to_port': 81,
'cidr': '192.168.10.0/24'})
db.instance_add_security_group(admin_ctxt, instance_ref['id'],
secgroup['id'])
instance_ref = db.instance_get(admin_ctxt, instance_ref['id'])
self.fw.add_instance(instance_ref)
out_rules = self.fw.modify_rules(self.in_rules)
in_rules = filter(lambda l: not l.startswith('#'), self.in_rules)
for rule in in_rules:
if not 'nova' in rule:
self.assertTrue(rule in out_rules,
'Rule went missing: %s' % rule)
print '\n'.join(out_rules)
class NWFilterTestCase(test.TestCase):
def setUp(self):
super(NWFilterTestCase, self).setUp()
@@ -224,7 +315,8 @@ class NWFilterTestCase(test.TestCase):
self.fake_libvirt_connection = Mock()
self.fw = libvirt_conn.NWFilterFirewall(self.fake_libvirt_connection)
self.fw = libvirt_conn.NWFilterFirewall(
lambda: self.fake_libvirt_connection)
def tearDown(self):
self.manager.delete_project(self.project)
@@ -337,7 +429,7 @@ class NWFilterTestCase(test.TestCase):
self.security_group.id)
instance = db.instance_get(self.context, inst_id)
self.fw.setup_base_nwfilters()
self.fw.setup_nwfilters_for_instance(instance)
self.fw.setup_basic_filtering(instance)
self.fw.prepare_instance_filter(instance)
_ensure_all_called()
self.teardown_security_group()

View File

@@ -19,23 +19,23 @@
Tests for Volume Code.
"""
import logging
from nova import context
from nova import exception
from nova import db
from nova import flags
from nova import log as logging
from nova import test
from nova import utils
FLAGS = flags.FLAGS
LOG = logging.getLogger('nova.tests.volume')
class VolumeTestCase(test.TestCase):
"""Test Case for volumes."""
def setUp(self):
logging.getLogger().setLevel(logging.DEBUG)
super(VolumeTestCase, self).setUp()
self.compute = utils.import_object(FLAGS.compute_manager)
self.flags(connection_type='fake')
@@ -159,7 +159,7 @@ class VolumeTestCase(test.TestCase):
volume_id)
self.assert_(iscsi_target not in targets)
targets.append(iscsi_target)
logging.debug("Target %s allocated", iscsi_target)
LOG.debug(_("Target %s allocated"), iscsi_target)
total_slots = FLAGS.iscsi_num_targets
for _index in xrange(total_slots):
volume_id = self._create_volume()

View File

@@ -79,8 +79,8 @@ class XenAPIVolumeTestCase(test.TestCase):
helper = volume_utils.VolumeHelper
helper.XenAPI = session.get_imported_xenapi()
vol = self._create_volume()
info = helper.parse_volume_info(vol['ec2_id'], '/dev/sdc')
label = 'SR-%s' % vol['ec2_id']
info = helper.parse_volume_info(vol['id'], '/dev/sdc')
label = 'SR-%s' % vol['id']
description = 'Test-SR'
sr_ref = helper.create_iscsi_storage(session, info, label, description)
srs = xenapi_fake.get_all('SR')
@@ -97,7 +97,7 @@ class XenAPIVolumeTestCase(test.TestCase):
# oops, wrong mount point!
self.assertRaises(volume_utils.StorageError,
helper.parse_volume_info,
vol['ec2_id'],
vol['id'],
'/dev/sd')
db.volume_destroy(context.get_admin_context(), vol['id'])
@@ -108,8 +108,7 @@ class XenAPIVolumeTestCase(test.TestCase):
volume = self._create_volume()
instance = db.instance_create(self.values)
xenapi_fake.create_vm(instance.name, 'Running')
result = conn.attach_volume(instance.name, volume['ec2_id'],
'/dev/sdc')
result = conn.attach_volume(instance.name, volume['id'], '/dev/sdc')
def check():
# check that the VM has a VBD attached to it
@@ -134,7 +133,7 @@ class XenAPIVolumeTestCase(test.TestCase):
self.assertRaises(Exception,
conn.attach_volume,
instance.name,
volume['ec2_id'],
volume['id'],
'/dev/sdc')
def tearDown(self):
@@ -250,15 +249,16 @@ class XenAPIVMTestCase(test.TestCase):
def _create_instance(self):
"""Creates and spawns a test instance"""
values = {'name': 1, 'id': 1,
values = {
'name': 1,
'id': 1,
'project_id': self.project.id,
'user_id': self.user.id,
'image_id': 1,
'kernel_id': 2,
'ramdisk_id': 3,
'instance_type': 'm1.large',
'mac_address': 'aa:bb:cc:dd:ee:ff'
}
'mac_address': 'aa:bb:cc:dd:ee:ff'}
instance = db.instance_create(values)
self.conn.spawn(instance)
return instance

View File

@@ -22,7 +22,6 @@ manage pid files and support syslogging.
"""
import gflags
import logging
import os
import signal
import sys
@@ -34,6 +33,7 @@ from twisted.python import runtime
from twisted.python import usage
from nova import flags
from nova import log as logging
if runtime.platformType == "win32":
@@ -234,22 +234,12 @@ def serve(filename):
OptionsClass = WrapTwistedOptions(TwistdServerOptions)
options = OptionsClass()
argv = options.parseOptions()
logging.getLogger('amqplib').setLevel(logging.WARN)
FLAGS.python = filename
FLAGS.no_save = True
if not FLAGS.pidfile:
FLAGS.pidfile = '%s.pid' % name
elif FLAGS.pidfile.endswith('twistd.pid'):
FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name)
# NOTE(vish): if we're running nodaemon, redirect the log to stdout
if FLAGS.nodaemon and not FLAGS.logfile:
FLAGS.logfile = "-"
if not FLAGS.logfile:
FLAGS.logfile = '%s.log' % name
elif FLAGS.logfile.endswith('twistd.log'):
FLAGS.logfile = FLAGS.logfile.replace('twistd.log', '%s.log' % name)
if FLAGS.logdir:
FLAGS.logfile = os.path.join(FLAGS.logdir, FLAGS.logfile)
if not FLAGS.prefix:
FLAGS.prefix = name
elif FLAGS.prefix.endswith('twisted'):
@@ -270,19 +260,10 @@ def serve(filename):
print 'usage: %s [options] [start|stop|restart]' % argv[0]
sys.exit(1)
formatter = logging.Formatter(
'(%(name)s): %(levelname)s %(message)s')
handler = logging.StreamHandler(log.StdioOnnaStick())
handler.setFormatter(formatter)
logging.getLogger().addHandler(handler)
if FLAGS.verbose:
logging.getLogger().setLevel(logging.DEBUG)
else:
logging.getLogger().setLevel(logging.WARNING)
logging.basicConfig()
logging.debug(_("Full set of FLAGS:"))
for flag in FLAGS:
logging.debug("%s : %s" % (flag, FLAGS.get(flag, None)))
logging.audit(_("Starting %s"), name)
twistd.runApp(options)