merged trunk
This commit is contained in:
@@ -52,6 +52,7 @@
|
||||
CLI interface for nova management.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import os
|
||||
import sys
|
||||
import time
|
||||
@@ -89,11 +90,16 @@ class VpnCommands(object):
|
||||
def list(self):
|
||||
"""Print a listing of the VPNs for all projects."""
|
||||
print "%-12s\t" % 'project',
|
||||
print "%-12s\t" % 'ip:port',
|
||||
print "%-20s\t" % 'ip:port',
|
||||
print "%s" % 'state'
|
||||
for project in self.manager.get_projects():
|
||||
print "%-12s\t" % project.name,
|
||||
print "%s:%s\t" % (project.vpn_ip, project.vpn_port),
|
||||
|
||||
try:
|
||||
s = "%s:%s" % (project.vpn_ip, project.vpn_port)
|
||||
except exception.NotFound:
|
||||
s = "None"
|
||||
print "%-20s\t" % s,
|
||||
|
||||
vpn = self._vpn_for(project.id)
|
||||
if vpn:
|
||||
@@ -115,7 +121,7 @@ class VpnCommands(object):
|
||||
|
||||
def _vpn_for(self, project_id):
|
||||
"""Get the VPN instance for a project ID."""
|
||||
for instance in db.instance_get_all():
|
||||
for instance in db.instance_get_all(None):
|
||||
if (instance['image_id'] == FLAGS.vpn_image_id
|
||||
and not instance['state_description'] in
|
||||
['shutting_down', 'shutdown']
|
||||
@@ -442,6 +448,10 @@ def main():
|
||||
"""Parse options and call the appropriate class/method."""
|
||||
utils.default_flagfile('/etc/nova/nova-manage.conf')
|
||||
argv = FLAGS(sys.argv)
|
||||
|
||||
if FLAGS.verbose:
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
|
||||
script_name = argv.pop(0)
|
||||
if len(argv) < 1:
|
||||
print script_name + " category action [<args>]"
|
||||
|
||||
236
nova/auth/dbdriver.py
Normal file
236
nova/auth/dbdriver.py
Normal file
@@ -0,0 +1,236 @@
|
||||
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||
|
||||
# Copyright 2010 United States Government as represented by the
|
||||
# Administrator of the National Aeronautics and Space Administration.
|
||||
# All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
# not use this file except in compliance with the License. You may obtain
|
||||
# a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
"""
|
||||
Auth driver using the DB as its backend.
|
||||
"""
|
||||
|
||||
import logging
|
||||
import sys
|
||||
|
||||
from nova import exception
|
||||
from nova import db
|
||||
|
||||
|
||||
class DbDriver(object):
|
||||
"""DB Auth driver
|
||||
|
||||
Defines enter and exit and therefore supports the with/as syntax.
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
"""Imports the LDAP module"""
|
||||
pass
|
||||
db
|
||||
|
||||
def __enter__(self):
|
||||
return self
|
||||
|
||||
def __exit__(self, exc_type, exc_value, traceback):
|
||||
pass
|
||||
|
||||
def get_user(self, uid):
|
||||
"""Retrieve user by id"""
|
||||
return self._db_user_to_auth_user(db.user_get({}, uid))
|
||||
|
||||
def get_user_from_access_key(self, access):
|
||||
"""Retrieve user by access key"""
|
||||
return self._db_user_to_auth_user(db.user_get_by_access_key({}, access))
|
||||
|
||||
def get_project(self, pid):
|
||||
"""Retrieve project by id"""
|
||||
return self._db_project_to_auth_projectuser(db.project_get({}, pid))
|
||||
|
||||
def get_users(self):
|
||||
"""Retrieve list of users"""
|
||||
return [self._db_user_to_auth_user(user) for user in db.user_get_all({})]
|
||||
|
||||
def get_projects(self, uid=None):
|
||||
"""Retrieve list of projects"""
|
||||
if uid:
|
||||
result = db.project_get_by_user({}, uid)
|
||||
else:
|
||||
result = db.project_get_all({})
|
||||
return [self._db_project_to_auth_projectuser(proj) for proj in result]
|
||||
|
||||
def create_user(self, name, access_key, secret_key, is_admin):
|
||||
"""Create a user"""
|
||||
values = { 'id' : name,
|
||||
'access_key' : access_key,
|
||||
'secret_key' : secret_key,
|
||||
'is_admin' : is_admin
|
||||
}
|
||||
try:
|
||||
user_ref = db.user_create({}, values)
|
||||
return self._db_user_to_auth_user(user_ref)
|
||||
except exception.Duplicate, e:
|
||||
raise exception.Duplicate('User %s already exists' % name)
|
||||
|
||||
def _db_user_to_auth_user(self, user_ref):
|
||||
return { 'id' : user_ref['id'],
|
||||
'name' : user_ref['id'],
|
||||
'access' : user_ref['access_key'],
|
||||
'secret' : user_ref['secret_key'],
|
||||
'admin' : user_ref['is_admin'] }
|
||||
|
||||
def _db_project_to_auth_projectuser(self, project_ref):
|
||||
return { 'id' : project_ref['id'],
|
||||
'name' : project_ref['name'],
|
||||
'project_manager_id' : project_ref['project_manager'],
|
||||
'description' : project_ref['description'],
|
||||
'member_ids' : [member['id'] for member in project_ref['members']] }
|
||||
|
||||
def create_project(self, name, manager_uid,
|
||||
description=None, member_uids=None):
|
||||
"""Create a project"""
|
||||
manager = db.user_get({}, manager_uid)
|
||||
if not manager:
|
||||
raise exception.NotFound("Project can't be created because "
|
||||
"manager %s doesn't exist" % manager_uid)
|
||||
|
||||
# description is a required attribute
|
||||
if description is None:
|
||||
description = name
|
||||
|
||||
# First, we ensure that all the given users exist before we go
|
||||
# on to create the project. This way we won't have to destroy
|
||||
# the project again because a user turns out to be invalid.
|
||||
members = set([manager])
|
||||
if member_uids != None:
|
||||
for member_uid in member_uids:
|
||||
member = db.user_get({}, member_uid)
|
||||
if not member:
|
||||
raise exception.NotFound("Project can't be created "
|
||||
"because user %s doesn't exist"
|
||||
% member_uid)
|
||||
members.add(member)
|
||||
|
||||
values = { 'id' : name,
|
||||
'name' : name,
|
||||
'project_manager' : manager['id'],
|
||||
'description': description }
|
||||
|
||||
try:
|
||||
project = db.project_create({}, values)
|
||||
except exception.Duplicate:
|
||||
raise exception.Duplicate("Project can't be created because "
|
||||
"project %s already exists" % name)
|
||||
|
||||
for member in members:
|
||||
db.project_add_member({}, project['id'], member['id'])
|
||||
|
||||
# This looks silly, but ensures that the members element has been
|
||||
# correctly populated
|
||||
project_ref = db.project_get({}, project['id'])
|
||||
return self._db_project_to_auth_projectuser(project_ref)
|
||||
|
||||
def modify_project(self, project_id, manager_uid=None, description=None):
|
||||
"""Modify an existing project"""
|
||||
if not manager_uid and not description:
|
||||
return
|
||||
values = {}
|
||||
if manager_uid:
|
||||
manager = db.user_get({}, manager_uid)
|
||||
if not manager:
|
||||
raise exception.NotFound("Project can't be modified because "
|
||||
"manager %s doesn't exist" %
|
||||
manager_uid)
|
||||
values['project_manager'] = manager['id']
|
||||
if description:
|
||||
values['description'] = description
|
||||
|
||||
db.project_update({}, project_id, values)
|
||||
|
||||
def add_to_project(self, uid, project_id):
|
||||
"""Add user to project"""
|
||||
user, project = self._validate_user_and_project(uid, project_id)
|
||||
db.project_add_member({}, project['id'], user['id'])
|
||||
|
||||
def remove_from_project(self, uid, project_id):
|
||||
"""Remove user from project"""
|
||||
user, project = self._validate_user_and_project(uid, project_id)
|
||||
db.project_remove_member({}, project['id'], user['id'])
|
||||
|
||||
def is_in_project(self, uid, project_id):
|
||||
"""Check if user is in project"""
|
||||
user, project = self._validate_user_and_project(uid, project_id)
|
||||
return user in project.members
|
||||
|
||||
def has_role(self, uid, role, project_id=None):
|
||||
"""Check if user has role
|
||||
|
||||
If project is specified, it checks for local role, otherwise it
|
||||
checks for global role
|
||||
"""
|
||||
|
||||
return role in self.get_user_roles(uid, project_id)
|
||||
|
||||
def add_role(self, uid, role, project_id=None):
|
||||
"""Add role for user (or user and project)"""
|
||||
if not project_id:
|
||||
db.user_add_role({}, uid, role)
|
||||
return
|
||||
db.user_add_project_role({}, uid, project_id, role)
|
||||
|
||||
def remove_role(self, uid, role, project_id=None):
|
||||
"""Remove role for user (or user and project)"""
|
||||
if not project_id:
|
||||
db.user_remove_role({}, uid, role)
|
||||
return
|
||||
db.user_remove_project_role({}, uid, project_id, role)
|
||||
|
||||
def get_user_roles(self, uid, project_id=None):
|
||||
"""Retrieve list of roles for user (or user and project)"""
|
||||
if project_id is None:
|
||||
roles = db.user_get_roles({}, uid)
|
||||
return roles
|
||||
else:
|
||||
roles = db.user_get_roles_for_project({}, uid, project_id)
|
||||
return roles
|
||||
|
||||
def delete_user(self, id):
|
||||
"""Delete a user"""
|
||||
user = db.user_get({}, id)
|
||||
db.user_delete({}, user['id'])
|
||||
|
||||
def delete_project(self, project_id):
|
||||
"""Delete a project"""
|
||||
db.project_delete({}, project_id)
|
||||
|
||||
def modify_user(self, uid, access_key=None, secret_key=None, admin=None):
|
||||
"""Modify an existing user"""
|
||||
if not access_key and not secret_key and admin is None:
|
||||
return
|
||||
values = {}
|
||||
if access_key:
|
||||
values['access_key'] = access_key
|
||||
if secret_key:
|
||||
values['secret_key'] = secret_key
|
||||
if admin is not None:
|
||||
values['is_admin'] = admin
|
||||
db.user_update({}, uid, values)
|
||||
|
||||
def _validate_user_and_project(self, user_id, project_id):
|
||||
user = db.user_get({}, user_id)
|
||||
if not user:
|
||||
raise exception.NotFound('User "%s" not found' % user_id)
|
||||
project = db.project_get({}, project_id)
|
||||
if not project:
|
||||
raise exception.NotFound('Project "%s" not found' % project_id)
|
||||
return user, project
|
||||
|
||||
@@ -69,7 +69,7 @@ flags.DEFINE_string('credential_cert_subject',
|
||||
'/C=US/ST=California/L=MountainView/O=AnsoLabs/'
|
||||
'OU=NovaDev/CN=%s-%s',
|
||||
'Subject for certificate for users')
|
||||
flags.DEFINE_string('auth_driver', 'nova.auth.ldapdriver.FakeLdapDriver',
|
||||
flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver',
|
||||
'Driver that auth manager uses')
|
||||
|
||||
|
||||
@@ -640,7 +640,10 @@ class AuthManager(object):
|
||||
zippy.writestr(FLAGS.credential_key_file, private_key)
|
||||
zippy.writestr(FLAGS.credential_cert_file, signed_cert)
|
||||
|
||||
(vpn_ip, vpn_port) = self.get_project_vpn_data(project)
|
||||
try:
|
||||
(vpn_ip, vpn_port) = self.get_project_vpn_data(project)
|
||||
except exception.NotFound:
|
||||
vpn_ip = None
|
||||
if vpn_ip:
|
||||
configfile = open(FLAGS.vpn_client_template, "r")
|
||||
s = string.Template(configfile.read())
|
||||
|
||||
@@ -69,6 +69,9 @@ class NotEmpty(Error):
|
||||
class Invalid(Error):
|
||||
pass
|
||||
|
||||
class InvalidInputException(Error):
|
||||
pass
|
||||
|
||||
|
||||
def wrap_exception(f):
|
||||
def _wrap(*args, **kw):
|
||||
|
||||
@@ -22,6 +22,7 @@ import logging
|
||||
import Queue as queue
|
||||
|
||||
from carrot.backends import base
|
||||
from eventlet import greenthread
|
||||
|
||||
|
||||
class Message(base.BaseMessage):
|
||||
@@ -38,6 +39,7 @@ class Exchange(object):
|
||||
def publish(self, message, routing_key=None):
|
||||
logging.debug('(%s) publish (key: %s) %s',
|
||||
self.name, routing_key, message)
|
||||
routing_key = routing_key.split('.')[0]
|
||||
if routing_key in self._routes:
|
||||
for f in self._routes[routing_key]:
|
||||
logging.debug('Publishing to route %s', f)
|
||||
@@ -94,6 +96,18 @@ class Backend(object):
|
||||
self._exchanges[exchange].bind(self._queues[queue].push,
|
||||
routing_key)
|
||||
|
||||
def declare_consumer(self, queue, callback, *args, **kwargs):
|
||||
self.current_queue = queue
|
||||
self.current_callback = callback
|
||||
|
||||
def consume(self, *args, **kwargs):
|
||||
while True:
|
||||
item = self.get(self.current_queue)
|
||||
if item:
|
||||
self.current_callback(item)
|
||||
raise StopIteration()
|
||||
greenthread.sleep(0)
|
||||
|
||||
def get(self, queue, no_ack=False):
|
||||
if not queue in self._queues or not self._queues[queue].size():
|
||||
return None
|
||||
|
||||
@@ -222,6 +222,10 @@ DEFINE_string('volume_manager', 'nova.volume.manager.AOEManager',
|
||||
DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
|
||||
'Manager for scheduler')
|
||||
|
||||
# The service to use for image search and retrieval
|
||||
DEFINE_string('image_service', 'nova.image.service.LocalImageService',
|
||||
'The service to use for retrieving and searching for images.')
|
||||
|
||||
DEFINE_string('host', socket.gethostname(),
|
||||
'name of this node')
|
||||
|
||||
|
||||
@@ -113,7 +113,7 @@ class BackRelayWithInput(protocol.ProcessProtocol):
|
||||
if self.started_deferred:
|
||||
self.started_deferred.callback(self)
|
||||
if self.process_input:
|
||||
self.transport.write(self.process_input)
|
||||
self.transport.write(str(self.process_input))
|
||||
self.transport.closeStdin()
|
||||
|
||||
def get_process_output(executable, args=None, env=None, path=None,
|
||||
|
||||
@@ -28,6 +28,7 @@ import uuid
|
||||
|
||||
from carrot import connection as carrot_connection
|
||||
from carrot import messaging
|
||||
from eventlet import greenthread
|
||||
from twisted.internet import defer
|
||||
from twisted.internet import task
|
||||
|
||||
@@ -107,6 +108,14 @@ class Consumer(messaging.Consumer):
|
||||
logging.exception("Failed to fetch message from queue")
|
||||
self.failed_connection = True
|
||||
|
||||
def attach_to_eventlet(self):
|
||||
"""Only needed for unit tests!"""
|
||||
def fetch_repeatedly():
|
||||
while True:
|
||||
self.fetch(enable_callbacks=True)
|
||||
greenthread.sleep(0.1)
|
||||
greenthread.spawn(fetch_repeatedly)
|
||||
|
||||
def attach_to_twisted(self):
|
||||
"""Attach a callback to twisted that fires 10 times a second"""
|
||||
loop = task.LoopingCall(self.fetch, enable_callbacks=True)
|
||||
|
||||
@@ -106,6 +106,7 @@ def serve(name, main):
|
||||
def daemonize(args, name, main):
|
||||
"""Does the work of daemonizing the process"""
|
||||
logging.getLogger('amqplib').setLevel(logging.WARN)
|
||||
files_to_keep = []
|
||||
if FLAGS.daemonize:
|
||||
logger = logging.getLogger()
|
||||
formatter = logging.Formatter(
|
||||
@@ -114,12 +115,14 @@ def daemonize(args, name, main):
|
||||
syslog = logging.handlers.SysLogHandler(address='/dev/log')
|
||||
syslog.setFormatter(formatter)
|
||||
logger.addHandler(syslog)
|
||||
files_to_keep.append(syslog.socket)
|
||||
else:
|
||||
if not FLAGS.logfile:
|
||||
FLAGS.logfile = '%s.log' % name
|
||||
logfile = logging.FileHandler(FLAGS.logfile)
|
||||
logfile.setFormatter(formatter)
|
||||
logger.addHandler(logfile)
|
||||
files_to_keep.append(logfile.stream)
|
||||
stdin, stdout, stderr = None, None, None
|
||||
else:
|
||||
stdin, stdout, stderr = sys.stdin, sys.stdout, sys.stderr
|
||||
@@ -139,6 +142,7 @@ def daemonize(args, name, main):
|
||||
stdout=stdout,
|
||||
stderr=stderr,
|
||||
uid=FLAGS.uid,
|
||||
gid=FLAGS.gid
|
||||
gid=FLAGS.gid,
|
||||
files_preserve=files_to_keep
|
||||
):
|
||||
main(args)
|
||||
|
||||
@@ -52,11 +52,17 @@ class Service(object, service.Service):
|
||||
self.host = host
|
||||
self.binary = binary
|
||||
self.topic = topic
|
||||
manager_class = utils.import_class(manager)
|
||||
self.manager = manager_class(host=host, *args, **kwargs)
|
||||
self.manager_class_name = manager
|
||||
super(Service, self).__init__(*args, **kwargs)
|
||||
self.saved_args, self.saved_kwargs = args, kwargs
|
||||
|
||||
|
||||
def startService(self): # pylint: disable-msg C0103
|
||||
manager_class = utils.import_class(self.manager_class_name)
|
||||
self.manager = manager_class(host=self.host, *self.saved_args,
|
||||
**self.saved_kwargs)
|
||||
self.manager.init_host()
|
||||
self.model_disconnected = False
|
||||
super(Service, self).__init__(*args, **kwargs)
|
||||
try:
|
||||
service_ref = db.service_get_by_args(None,
|
||||
self.host,
|
||||
|
||||
@@ -94,6 +94,7 @@ class TrialTestCase(unittest.TestCase):
|
||||
|
||||
if FLAGS.fake_rabbit:
|
||||
fakerabbit.reset_all()
|
||||
db.security_group_destroy_all(None)
|
||||
|
||||
super(TrialTestCase, self).tearDown()
|
||||
|
||||
|
||||
@@ -91,6 +91,9 @@ class ApiEc2TestCase(test.BaseTestCase):
|
||||
self.host = '127.0.0.1'
|
||||
|
||||
self.app = api.API()
|
||||
|
||||
def expect_http(self, host=None, is_secure=False):
|
||||
"""Returns a new EC2 connection"""
|
||||
self.ec2 = boto.connect_ec2(
|
||||
aws_access_key_id='fake',
|
||||
aws_secret_access_key='fake',
|
||||
@@ -100,9 +103,6 @@ class ApiEc2TestCase(test.BaseTestCase):
|
||||
path='/services/Cloud')
|
||||
|
||||
self.mox.StubOutWithMock(self.ec2, 'new_http_connection')
|
||||
|
||||
def expect_http(self, host=None, is_secure=False):
|
||||
"""Returns a new EC2 connection"""
|
||||
http = FakeHttplibConnection(
|
||||
self.app, '%s:8773' % (self.host), False)
|
||||
# pylint: disable-msg=E1103
|
||||
@@ -138,3 +138,185 @@ class ApiEc2TestCase(test.BaseTestCase):
|
||||
self.assertEquals(len(results), 1)
|
||||
self.manager.delete_project(project)
|
||||
self.manager.delete_user(user)
|
||||
|
||||
def test_get_all_security_groups(self):
|
||||
"""Test that we can retrieve security groups"""
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
|
||||
project = self.manager.create_project('fake', 'fake', 'fake')
|
||||
|
||||
rv = self.ec2.get_all_security_groups()
|
||||
|
||||
self.assertEquals(len(rv), 1)
|
||||
self.assertEquals(rv[0].name, 'default')
|
||||
|
||||
self.manager.delete_project(project)
|
||||
self.manager.delete_user(user)
|
||||
|
||||
def test_create_delete_security_group(self):
|
||||
"""Test that we can create a security group"""
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
|
||||
project = self.manager.create_project('fake', 'fake', 'fake')
|
||||
|
||||
# At the moment, you need both of these to actually be netadmin
|
||||
self.manager.add_role('fake', 'netadmin')
|
||||
project.add_role('fake', 'netadmin')
|
||||
|
||||
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
|
||||
for x in range(random.randint(4, 8)))
|
||||
|
||||
self.ec2.create_security_group(security_group_name, 'test group')
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
rv = self.ec2.get_all_security_groups()
|
||||
self.assertEquals(len(rv), 2)
|
||||
self.assertTrue(security_group_name in [group.name for group in rv])
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
self.ec2.delete_security_group(security_group_name)
|
||||
|
||||
self.manager.delete_project(project)
|
||||
self.manager.delete_user(user)
|
||||
|
||||
def test_authorize_revoke_security_group_cidr(self):
|
||||
"""
|
||||
Test that we can add and remove CIDR based rules
|
||||
to a security group
|
||||
"""
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
user = self.manager.create_user('fake', 'fake', 'fake')
|
||||
project = self.manager.create_project('fake', 'fake', 'fake')
|
||||
|
||||
# At the moment, you need both of these to actually be netadmin
|
||||
self.manager.add_role('fake', 'netadmin')
|
||||
project.add_role('fake', 'netadmin')
|
||||
|
||||
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
|
||||
for x in range(random.randint(4, 8)))
|
||||
|
||||
group = self.ec2.create_security_group(security_group_name, 'test group')
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
group.connection = self.ec2
|
||||
|
||||
group.authorize('tcp', 80, 81, '0.0.0.0/0')
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
rv = self.ec2.get_all_security_groups()
|
||||
# I don't bother checkng that we actually find it here,
|
||||
# because the create/delete unit test further up should
|
||||
# be good enough for that.
|
||||
for group in rv:
|
||||
if group.name == security_group_name:
|
||||
self.assertEquals(len(group.rules), 1)
|
||||
self.assertEquals(int(group.rules[0].from_port), 80)
|
||||
self.assertEquals(int(group.rules[0].to_port), 81)
|
||||
self.assertEquals(len(group.rules[0].grants), 1)
|
||||
self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0')
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
group.connection = self.ec2
|
||||
|
||||
group.revoke('tcp', 80, 81, '0.0.0.0/0')
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
self.ec2.delete_security_group(security_group_name)
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
group.connection = self.ec2
|
||||
|
||||
rv = self.ec2.get_all_security_groups()
|
||||
|
||||
self.assertEqual(len(rv), 1)
|
||||
self.assertEqual(rv[0].name, 'default')
|
||||
|
||||
self.manager.delete_project(project)
|
||||
self.manager.delete_user(user)
|
||||
|
||||
return
|
||||
|
||||
def test_authorize_revoke_security_group_foreign_group(self):
|
||||
"""
|
||||
Test that we can grant and revoke another security group access
|
||||
to a security group
|
||||
"""
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
|
||||
project = self.manager.create_project('fake', 'fake', 'fake')
|
||||
|
||||
# At the moment, you need both of these to actually be netadmin
|
||||
self.manager.add_role('fake', 'netadmin')
|
||||
project.add_role('fake', 'netadmin')
|
||||
|
||||
security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
|
||||
for x in range(random.randint(4, 8)))
|
||||
other_security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") \
|
||||
for x in range(random.randint(4, 8)))
|
||||
|
||||
group = self.ec2.create_security_group(security_group_name, 'test group')
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
other_group = self.ec2.create_security_group(other_security_group_name,
|
||||
'some other group')
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
group.connection = self.ec2
|
||||
|
||||
group.authorize(src_group=other_group)
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
rv = self.ec2.get_all_security_groups()
|
||||
|
||||
# I don't bother checkng that we actually find it here,
|
||||
# because the create/delete unit test further up should
|
||||
# be good enough for that.
|
||||
for group in rv:
|
||||
if group.name == security_group_name:
|
||||
self.assertEquals(len(group.rules), 1)
|
||||
self.assertEquals(len(group.rules[0].grants), 1)
|
||||
self.assertEquals(str(group.rules[0].grants[0]),
|
||||
'%s-%s' % (other_security_group_name, 'fake'))
|
||||
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
rv = self.ec2.get_all_security_groups()
|
||||
|
||||
for group in rv:
|
||||
if group.name == security_group_name:
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
group.connection = self.ec2
|
||||
group.revoke(src_group=other_group)
|
||||
|
||||
self.expect_http()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
self.ec2.delete_security_group(security_group_name)
|
||||
|
||||
self.manager.delete_project(project)
|
||||
self.manager.delete_user(user)
|
||||
|
||||
return
|
||||
|
||||
@@ -75,8 +75,9 @@ class user_and_project_generator(object):
|
||||
self.manager.delete_user(self.user)
|
||||
self.manager.delete_project(self.project)
|
||||
|
||||
class AuthManagerTestCase(test.TrialTestCase):
|
||||
class AuthManagerTestCase(object):
|
||||
def setUp(self):
|
||||
FLAGS.auth_driver = self.auth_driver
|
||||
super(AuthManagerTestCase, self).setUp()
|
||||
self.flags(connection_type='fake')
|
||||
self.manager = manager.AuthManager()
|
||||
@@ -320,6 +321,12 @@ class AuthManagerTestCase(test.TrialTestCase):
|
||||
self.assertEqual('secret', user.secret)
|
||||
self.assertTrue(user.is_admin())
|
||||
|
||||
class AuthManagerLdapTestCase(AuthManagerTestCase, test.TrialTestCase):
|
||||
auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
|
||||
|
||||
class AuthManagerDbTestCase(AuthManagerTestCase, test.TrialTestCase):
|
||||
auth_driver = 'nova.auth.dbdriver.DbDriver'
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# TODO: Implement use_fake as an option
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from base64 import b64decode
|
||||
import json
|
||||
import logging
|
||||
from M2Crypto import BIO
|
||||
@@ -63,11 +64,17 @@ class CloudTestCase(test.TrialTestCase):
|
||||
self.cloud = cloud.CloudController()
|
||||
|
||||
# set up a service
|
||||
self.compute = utils.import_class(FLAGS.compute_manager)
|
||||
self.compute = utils.import_class(FLAGS.compute_manager)()
|
||||
self.compute_consumer = rpc.AdapterConsumer(connection=self.conn,
|
||||
topic=FLAGS.compute_topic,
|
||||
proxy=self.compute)
|
||||
self.compute_consumer.attach_to_twisted()
|
||||
self.compute_consumer.attach_to_eventlet()
|
||||
self.network = utils.import_class(FLAGS.network_manager)()
|
||||
self.network_consumer = rpc.AdapterConsumer(connection=self.conn,
|
||||
topic=FLAGS.network_topic,
|
||||
proxy=self.network)
|
||||
self.network_consumer.attach_to_eventlet()
|
||||
|
||||
|
||||
self.manager = manager.AuthManager()
|
||||
self.user = self.manager.create_user('admin', 'admin', 'admin', True)
|
||||
@@ -85,15 +92,17 @@ class CloudTestCase(test.TrialTestCase):
|
||||
return cloud._gen_key(self.context, self.context.user.id, name)
|
||||
|
||||
def test_console_output(self):
|
||||
if FLAGS.connection_type == 'fake':
|
||||
logging.debug("Can't test instances without a real virtual env.")
|
||||
return
|
||||
instance_id = 'foo'
|
||||
inst = yield self.compute.run_instance(instance_id)
|
||||
output = yield self.cloud.get_console_output(self.context, [instance_id])
|
||||
logging.debug(output)
|
||||
self.assert_(output)
|
||||
rv = yield self.compute.terminate_instance(instance_id)
|
||||
image_id = FLAGS.default_image
|
||||
instance_type = FLAGS.default_instance_type
|
||||
max_count = 1
|
||||
kwargs = {'image_id': image_id,
|
||||
'instance_type': instance_type,
|
||||
'max_count': max_count }
|
||||
rv = yield self.cloud.run_instances(self.context, **kwargs)
|
||||
instance_id = rv['instancesSet'][0]['instanceId']
|
||||
output = yield self.cloud.get_console_output(context=self.context, instance_id=[instance_id])
|
||||
self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE OUTPUT')
|
||||
rv = yield self.cloud.terminate_instances(self.context, [instance_id])
|
||||
|
||||
|
||||
def test_key_generation(self):
|
||||
@@ -236,7 +245,8 @@ class CloudTestCase(test.TrialTestCase):
|
||||
|
||||
def test_update_of_instance_display_fields(self):
|
||||
inst = db.instance_create({}, {})
|
||||
self.cloud.update_instance(self.context, inst['ec2_id'],
|
||||
ec2_id = cloud.internal_id_to_ec2_id(inst['internal_id'])
|
||||
self.cloud.update_instance(self.context, ec2_id,
|
||||
display_name='c00l 1m4g3')
|
||||
inst = db.instance_get({}, inst['id'])
|
||||
self.assertEqual('c00l 1m4g3', inst['display_name'])
|
||||
|
||||
@@ -24,7 +24,7 @@ flags.DECLARE('volume_driver', 'nova.volume.manager')
|
||||
FLAGS.volume_driver = 'nova.volume.driver.FakeAOEDriver'
|
||||
FLAGS.connection_type = 'fake'
|
||||
FLAGS.fake_rabbit = True
|
||||
FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
|
||||
FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver'
|
||||
flags.DECLARE('network_size', 'nova.network.manager')
|
||||
flags.DECLARE('num_networks', 'nova.network.manager')
|
||||
flags.DECLARE('fake_network', 'nova.network.manager')
|
||||
|
||||
@@ -133,13 +133,22 @@ class ObjectStoreTestCase(test.TrialTestCase):
|
||||
self.assertRaises(NotFound, objectstore.bucket.Bucket, 'new_bucket')
|
||||
|
||||
def test_images(self):
|
||||
self.do_test_images('1mb.manifest.xml', True,
|
||||
'image_bucket1', 'i-testing1')
|
||||
|
||||
def test_images_no_kernel_or_ramdisk(self):
|
||||
self.do_test_images('1mb.no_kernel_or_ramdisk.manifest.xml',
|
||||
False, 'image_bucket2', 'i-testing2')
|
||||
|
||||
def do_test_images(self, manifest_file, expect_kernel_and_ramdisk,
|
||||
image_bucket, image_name):
|
||||
"Test the image API."
|
||||
self.context.user = self.auth_manager.get_user('user1')
|
||||
self.context.project = self.auth_manager.get_project('proj1')
|
||||
|
||||
# create a bucket for our bundle
|
||||
objectstore.bucket.Bucket.create('image_bucket', self.context)
|
||||
bucket = objectstore.bucket.Bucket('image_bucket')
|
||||
objectstore.bucket.Bucket.create(image_bucket, self.context)
|
||||
bucket = objectstore.bucket.Bucket(image_bucket)
|
||||
|
||||
# upload an image manifest/parts
|
||||
bundle_path = os.path.join(os.path.dirname(__file__), 'bundle')
|
||||
@@ -147,18 +156,28 @@ class ObjectStoreTestCase(test.TrialTestCase):
|
||||
bucket[os.path.basename(path)] = open(path, 'rb').read()
|
||||
|
||||
# register an image
|
||||
image.Image.register_aws_image('i-testing',
|
||||
'image_bucket/1mb.manifest.xml',
|
||||
image.Image.register_aws_image(image_name,
|
||||
'%s/%s' % (image_bucket, manifest_file),
|
||||
self.context)
|
||||
|
||||
# verify image
|
||||
my_img = image.Image('i-testing')
|
||||
my_img = image.Image(image_name)
|
||||
result_image_file = os.path.join(my_img.path, 'image')
|
||||
self.assertEqual(os.stat(result_image_file).st_size, 1048576)
|
||||
|
||||
sha = hashlib.sha1(open(result_image_file).read()).hexdigest()
|
||||
self.assertEqual(sha, '3b71f43ff30f4b15b5cd85dd9e95ebc7e84eb5a3')
|
||||
|
||||
if expect_kernel_and_ramdisk:
|
||||
# Verify the default kernel and ramdisk are set
|
||||
self.assertEqual(my_img.metadata['kernelId'], 'aki-test')
|
||||
self.assertEqual(my_img.metadata['ramdiskId'], 'ari-test')
|
||||
else:
|
||||
# Verify that the default kernel and ramdisk (the one from FLAGS)
|
||||
# doesn't get embedded in the metadata
|
||||
self.assertFalse('kernelId' in my_img.metadata)
|
||||
self.assertFalse('ramdiskId' in my_img.metadata)
|
||||
|
||||
# verify image permissions
|
||||
self.context.user = self.auth_manager.get_user('user2')
|
||||
self.context.project = self.auth_manager.get_project('proj2')
|
||||
|
||||
@@ -118,10 +118,12 @@ class SimpleDriverTestCase(test.TrialTestCase):
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute1.startService()
|
||||
compute2 = service.Service('host2',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute2.startService()
|
||||
hosts = self.scheduler.driver.hosts_up(self.context, 'compute')
|
||||
self.assertEqual(len(hosts), 2)
|
||||
compute1.kill()
|
||||
@@ -133,10 +135,12 @@ class SimpleDriverTestCase(test.TrialTestCase):
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute1.startService()
|
||||
compute2 = service.Service('host2',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute2.startService()
|
||||
instance_id1 = self._create_instance()
|
||||
compute1.run_instance(self.context, instance_id1)
|
||||
instance_id2 = self._create_instance()
|
||||
@@ -154,10 +158,12 @@ class SimpleDriverTestCase(test.TrialTestCase):
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute1.startService()
|
||||
compute2 = service.Service('host2',
|
||||
'nova-compute',
|
||||
'compute',
|
||||
FLAGS.compute_manager)
|
||||
compute2.startService()
|
||||
instance_ids1 = []
|
||||
instance_ids2 = []
|
||||
for index in xrange(FLAGS.max_cores):
|
||||
@@ -185,10 +191,12 @@ class SimpleDriverTestCase(test.TrialTestCase):
|
||||
'nova-volume',
|
||||
'volume',
|
||||
FLAGS.volume_manager)
|
||||
volume1.startService()
|
||||
volume2 = service.Service('host2',
|
||||
'nova-volume',
|
||||
'volume',
|
||||
FLAGS.volume_manager)
|
||||
volume2.startService()
|
||||
volume_id1 = self._create_volume()
|
||||
volume1.create_volume(self.context, volume_id1)
|
||||
volume_id2 = self._create_volume()
|
||||
@@ -206,10 +214,12 @@ class SimpleDriverTestCase(test.TrialTestCase):
|
||||
'nova-volume',
|
||||
'volume',
|
||||
FLAGS.volume_manager)
|
||||
volume1.startService()
|
||||
volume2 = service.Service('host2',
|
||||
'nova-volume',
|
||||
'volume',
|
||||
FLAGS.volume_manager)
|
||||
volume2.startService()
|
||||
volume_ids1 = []
|
||||
volume_ids2 = []
|
||||
for index in xrange(FLAGS.max_gigabytes):
|
||||
|
||||
@@ -22,6 +22,8 @@ Unit Tests for remote procedure calls using queue
|
||||
|
||||
import mox
|
||||
|
||||
from twisted.application.app import startApplication
|
||||
|
||||
from nova import exception
|
||||
from nova import flags
|
||||
from nova import rpc
|
||||
@@ -96,6 +98,7 @@ class ServiceTestCase(test.BaseTestCase):
|
||||
self.mox.ReplayAll()
|
||||
|
||||
app = service.Service.create(host=host, binary=binary)
|
||||
startApplication(app, False)
|
||||
self.assert_(app)
|
||||
|
||||
# We're testing sort of weird behavior in how report_state decides
|
||||
|
||||
@@ -14,36 +14,72 @@
|
||||
# License for the specific language governing permissions and limitations
|
||||
# under the License.
|
||||
|
||||
from xml.etree.ElementTree import fromstring as xml_to_tree
|
||||
from xml.dom.minidom import parseString as xml_to_dom
|
||||
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import test
|
||||
from nova.api import context
|
||||
from nova.api.ec2 import cloud
|
||||
from nova.auth import manager
|
||||
|
||||
# Needed to get FLAGS.instances_path defined:
|
||||
from nova.compute import manager as compute_manager
|
||||
from nova.virt import libvirt_conn
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
class LibvirtConnTestCase(test.TrialTestCase):
|
||||
def setUp(self):
|
||||
self.manager = manager.AuthManager()
|
||||
self.user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
|
||||
self.project = self.manager.create_project('fake', 'fake', 'fake')
|
||||
FLAGS.instances_path = ''
|
||||
|
||||
def test_get_uri_and_template(self):
|
||||
class MockDataModel(object):
|
||||
def __init__(self):
|
||||
self.datamodel = { 'name' : 'i-cafebabe',
|
||||
'memory_kb' : '1024000',
|
||||
'basepath' : '/some/path',
|
||||
'bridge_name' : 'br100',
|
||||
'mac_address' : '02:12:34:46:56:67',
|
||||
'vcpus' : 2 }
|
||||
ip = '10.11.12.13'
|
||||
|
||||
instance = { 'internal_id' : 1,
|
||||
'memory_kb' : '1024000',
|
||||
'basepath' : '/some/path',
|
||||
'bridge_name' : 'br100',
|
||||
'mac_address' : '02:12:34:46:56:67',
|
||||
'vcpus' : 2,
|
||||
'project_id' : 'fake',
|
||||
'bridge' : 'br101',
|
||||
'instance_type' : 'm1.small'}
|
||||
|
||||
instance_ref = db.instance_create(None, instance)
|
||||
network_ref = db.project_get_network(None, self.project.id)
|
||||
|
||||
fixed_ip = { 'address' : ip,
|
||||
'network_id' : network_ref['id'] }
|
||||
|
||||
fixed_ip_ref = db.fixed_ip_create(None, fixed_ip)
|
||||
db.fixed_ip_update(None, ip, { 'allocated' : True,
|
||||
'instance_id' : instance_ref['id'] })
|
||||
|
||||
type_uri_map = { 'qemu' : ('qemu:///system',
|
||||
[lambda s: '<domain type=\'qemu\'>' in s,
|
||||
lambda s: 'type>hvm</type' in s,
|
||||
lambda s: 'emulator>/usr/bin/kvm' not in s]),
|
||||
[(lambda t: t.find('.').get('type'), 'qemu'),
|
||||
(lambda t: t.find('./os/type').text, 'hvm'),
|
||||
(lambda t: t.find('./devices/emulator'), None)]),
|
||||
'kvm' : ('qemu:///system',
|
||||
[lambda s: '<domain type=\'kvm\'>' in s,
|
||||
lambda s: 'type>hvm</type' in s,
|
||||
lambda s: 'emulator>/usr/bin/qemu<' not in s]),
|
||||
[(lambda t: t.find('.').get('type'), 'kvm'),
|
||||
(lambda t: t.find('./os/type').text, 'hvm'),
|
||||
(lambda t: t.find('./devices/emulator'), None)]),
|
||||
'uml' : ('uml:///system',
|
||||
[lambda s: '<domain type=\'uml\'>' in s,
|
||||
lambda s: 'type>uml</type' in s]),
|
||||
}
|
||||
[(lambda t: t.find('.').get('type'), 'uml'),
|
||||
(lambda t: t.find('./os/type').text, 'uml')]),
|
||||
}
|
||||
|
||||
common_checks = [(lambda t: t.find('.').tag, 'domain'),
|
||||
(lambda t: \
|
||||
t.find('./devices/interface/filterref/parameter') \
|
||||
.get('name'), 'IP'),
|
||||
(lambda t: \
|
||||
t.find('./devices/interface/filterref/parameter') \
|
||||
.get('value'), '10.11.12.13')]
|
||||
|
||||
for (libvirt_type,(expected_uri, checks)) in type_uri_map.iteritems():
|
||||
FLAGS.libvirt_type = libvirt_type
|
||||
@@ -52,9 +88,17 @@ class LibvirtConnTestCase(test.TrialTestCase):
|
||||
uri, template = conn.get_uri_and_template()
|
||||
self.assertEquals(uri, expected_uri)
|
||||
|
||||
for i, check in enumerate(checks):
|
||||
xml = conn.toXml(MockDataModel())
|
||||
self.assertTrue(check(xml), '%s failed check %d' % (xml, i))
|
||||
xml = conn.to_xml(instance_ref)
|
||||
tree = xml_to_tree(xml)
|
||||
for i, (check, expected_result) in enumerate(checks):
|
||||
self.assertEqual(check(tree),
|
||||
expected_result,
|
||||
'%s failed check %d' % (xml, i))
|
||||
|
||||
for i, (check, expected_result) in enumerate(common_checks):
|
||||
self.assertEqual(check(tree),
|
||||
expected_result,
|
||||
'%s failed common check %d' % (xml, i))
|
||||
|
||||
# Deliberately not just assigning this string to FLAGS.libvirt_uri and
|
||||
# checking against that later on. This way we make sure the
|
||||
@@ -67,3 +111,142 @@ class LibvirtConnTestCase(test.TrialTestCase):
|
||||
uri, template = conn.get_uri_and_template()
|
||||
self.assertEquals(uri, testuri)
|
||||
|
||||
|
||||
def tearDown(self):
|
||||
self.manager.delete_project(self.project)
|
||||
self.manager.delete_user(self.user)
|
||||
|
||||
class NWFilterTestCase(test.TrialTestCase):
|
||||
def setUp(self):
|
||||
super(NWFilterTestCase, self).setUp()
|
||||
|
||||
class Mock(object):
|
||||
pass
|
||||
|
||||
self.manager = manager.AuthManager()
|
||||
self.user = self.manager.create_user('fake', 'fake', 'fake', admin=True)
|
||||
self.project = self.manager.create_project('fake', 'fake', 'fake')
|
||||
self.context = context.APIRequestContext(self.user, self.project)
|
||||
|
||||
self.fake_libvirt_connection = Mock()
|
||||
|
||||
self.fw = libvirt_conn.NWFilterFirewall(self.fake_libvirt_connection)
|
||||
|
||||
def tearDown(self):
|
||||
self.manager.delete_project(self.project)
|
||||
self.manager.delete_user(self.user)
|
||||
|
||||
|
||||
def test_cidr_rule_nwfilter_xml(self):
|
||||
cloud_controller = cloud.CloudController()
|
||||
cloud_controller.create_security_group(self.context,
|
||||
'testgroup',
|
||||
'test group description')
|
||||
cloud_controller.authorize_security_group_ingress(self.context,
|
||||
'testgroup',
|
||||
from_port='80',
|
||||
to_port='81',
|
||||
ip_protocol='tcp',
|
||||
cidr_ip='0.0.0.0/0')
|
||||
|
||||
|
||||
security_group = db.security_group_get_by_name(self.context,
|
||||
'fake',
|
||||
'testgroup')
|
||||
|
||||
xml = self.fw.security_group_to_nwfilter_xml(security_group.id)
|
||||
|
||||
dom = xml_to_dom(xml)
|
||||
self.assertEqual(dom.firstChild.tagName, 'filter')
|
||||
|
||||
rules = dom.getElementsByTagName('rule')
|
||||
self.assertEqual(len(rules), 1)
|
||||
|
||||
# It's supposed to allow inbound traffic.
|
||||
self.assertEqual(rules[0].getAttribute('action'), 'accept')
|
||||
self.assertEqual(rules[0].getAttribute('direction'), 'in')
|
||||
|
||||
# Must be lower priority than the base filter (which blocks everything)
|
||||
self.assertTrue(int(rules[0].getAttribute('priority')) < 1000)
|
||||
|
||||
ip_conditions = rules[0].getElementsByTagName('tcp')
|
||||
self.assertEqual(len(ip_conditions), 1)
|
||||
self.assertEqual(ip_conditions[0].getAttribute('srcipaddr'), '0.0.0.0')
|
||||
self.assertEqual(ip_conditions[0].getAttribute('srcipmask'), '0.0.0.0')
|
||||
self.assertEqual(ip_conditions[0].getAttribute('dstportstart'), '80')
|
||||
self.assertEqual(ip_conditions[0].getAttribute('dstportend'), '81')
|
||||
|
||||
|
||||
self.teardown_security_group()
|
||||
|
||||
def teardown_security_group(self):
|
||||
cloud_controller = cloud.CloudController()
|
||||
cloud_controller.delete_security_group(self.context, 'testgroup')
|
||||
|
||||
|
||||
def setup_and_return_security_group(self):
|
||||
cloud_controller = cloud.CloudController()
|
||||
cloud_controller.create_security_group(self.context,
|
||||
'testgroup',
|
||||
'test group description')
|
||||
cloud_controller.authorize_security_group_ingress(self.context,
|
||||
'testgroup',
|
||||
from_port='80',
|
||||
to_port='81',
|
||||
ip_protocol='tcp',
|
||||
cidr_ip='0.0.0.0/0')
|
||||
|
||||
return db.security_group_get_by_name(self.context, 'fake', 'testgroup')
|
||||
|
||||
def test_creates_base_rule_first(self):
|
||||
# These come pre-defined by libvirt
|
||||
self.defined_filters = ['no-mac-spoofing',
|
||||
'no-ip-spoofing',
|
||||
'no-arp-spoofing',
|
||||
'allow-dhcp-server']
|
||||
|
||||
self.recursive_depends = {}
|
||||
for f in self.defined_filters:
|
||||
self.recursive_depends[f] = []
|
||||
|
||||
def _filterDefineXMLMock(xml):
|
||||
dom = xml_to_dom(xml)
|
||||
name = dom.firstChild.getAttribute('name')
|
||||
self.recursive_depends[name] = []
|
||||
for f in dom.getElementsByTagName('filterref'):
|
||||
ref = f.getAttribute('filter')
|
||||
self.assertTrue(ref in self.defined_filters,
|
||||
('%s referenced filter that does ' +
|
||||
'not yet exist: %s') % (name, ref))
|
||||
dependencies = [ref] + self.recursive_depends[ref]
|
||||
self.recursive_depends[name] += dependencies
|
||||
|
||||
self.defined_filters.append(name)
|
||||
return True
|
||||
|
||||
self.fake_libvirt_connection.nwfilterDefineXML = _filterDefineXMLMock
|
||||
|
||||
instance_ref = db.instance_create(self.context,
|
||||
{'user_id': 'fake',
|
||||
'project_id': 'fake'})
|
||||
inst_id = instance_ref['id']
|
||||
|
||||
def _ensure_all_called(_):
|
||||
instance_filter = 'nova-instance-%s' % instance_ref['name']
|
||||
secgroup_filter = 'nova-secgroup-%s' % self.security_group['id']
|
||||
for required in [secgroup_filter, 'allow-dhcp-server',
|
||||
'no-arp-spoofing', 'no-ip-spoofing',
|
||||
'no-mac-spoofing']:
|
||||
self.assertTrue(required in self.recursive_depends[instance_filter],
|
||||
"Instance's filter does not include %s" % required)
|
||||
|
||||
self.security_group = self.setup_and_return_security_group()
|
||||
|
||||
db.instance_add_security_group(self.context, inst_id, self.security_group.id)
|
||||
instance = db.instance_get(self.context, inst_id)
|
||||
|
||||
d = self.fw.setup_nwfilters_for_instance(instance)
|
||||
d.addCallback(_ensure_all_called)
|
||||
d.addCallback(lambda _:self.teardown_security_group())
|
||||
|
||||
return d
|
||||
|
||||
@@ -126,7 +126,13 @@ def runthis(prompt, cmd, check_exit_code = True):
|
||||
|
||||
|
||||
def generate_uid(topic, size=8):
|
||||
return '%s-%s' % (topic, ''.join([random.choice('01234567890abcdefghijklmnopqrstuvwxyz') for x in xrange(size)]))
|
||||
if topic == "i":
|
||||
# Instances have integer internal ids.
|
||||
return random.randint(0, 2**32-1)
|
||||
else:
|
||||
characters = '01234567890abcdefghijklmnopqrstuvwxyz'
|
||||
choices = [random.choice(characters) for x in xrange(size)]
|
||||
return '%s-%s' % (topic, ''.join(choices))
|
||||
|
||||
|
||||
def generate_mac():
|
||||
|
||||
@@ -63,7 +63,9 @@ from nova.tests.rpc_unittest import *
|
||||
from nova.tests.scheduler_unittest import *
|
||||
from nova.tests.service_unittest import *
|
||||
from nova.tests.validator_unittest import *
|
||||
from nova.tests.virt_unittest import *
|
||||
from nova.tests.volume_unittest import *
|
||||
from nova.tests.virt_unittest import *
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
25
run_tests.sh
25
run_tests.sh
@@ -6,6 +6,7 @@ function usage {
|
||||
echo ""
|
||||
echo " -V, --virtual-env Always use virtualenv. Install automatically if not present"
|
||||
echo " -N, --no-virtual-env Don't use virtualenv. Run tests in local environment"
|
||||
echo " -f, --force Force a clean re-build of the virtual environment. Useful when dependencies have been added."
|
||||
echo " -h, --help Print this usage message"
|
||||
echo ""
|
||||
echo "Note: with no options specified, the script will try to run the tests in a virtual environment,"
|
||||
@@ -14,20 +15,12 @@ function usage {
|
||||
exit
|
||||
}
|
||||
|
||||
function process_options {
|
||||
array=$1
|
||||
elements=${#array[@]}
|
||||
for (( x=0;x<$elements;x++)); do
|
||||
process_option ${array[${x}]}
|
||||
done
|
||||
}
|
||||
|
||||
function process_option {
|
||||
option=$1
|
||||
case $option in
|
||||
case "$1" in
|
||||
-h|--help) usage;;
|
||||
-V|--virtual-env) let always_venv=1; let never_venv=0;;
|
||||
-N|--no-virtual-env) let always_venv=0; let never_venv=1;;
|
||||
-f|--force) let force=1;;
|
||||
esac
|
||||
}
|
||||
|
||||
@@ -35,9 +28,11 @@ venv=.nova-venv
|
||||
with_venv=tools/with_venv.sh
|
||||
always_venv=0
|
||||
never_venv=0
|
||||
options=("$@")
|
||||
force=0
|
||||
|
||||
process_options $options
|
||||
for arg in "$@"; do
|
||||
process_option $arg
|
||||
done
|
||||
|
||||
if [ $never_venv -eq 1 ]; then
|
||||
# Just run the test suites in current environment
|
||||
@@ -45,6 +40,12 @@ if [ $never_venv -eq 1 ]; then
|
||||
exit
|
||||
fi
|
||||
|
||||
# Remove the virtual environment if --force used
|
||||
if [ $force -eq 1 ]; then
|
||||
echo "Cleaning virtualenv..."
|
||||
rm -rf ${venv}
|
||||
fi
|
||||
|
||||
if [ -e ${venv} ]; then
|
||||
${with_venv} python run_tests.py $@
|
||||
else
|
||||
|
||||
Reference in New Issue
Block a user