tests pass
This commit is contained in:
@@ -34,23 +34,23 @@ from nova import db
|
||||
from nova import flags
|
||||
from nova import rpc
|
||||
from nova import utils
|
||||
from nova.network import linux_net
|
||||
from nova.network import service
|
||||
from nova import datastore # for redis_db flag
|
||||
from nova.auth import manager # for auth flags
|
||||
from nova.network import manager # for network flags
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
||||
def add_lease(_mac, ip_address, _hostname, _interface):
|
||||
"""Set the IP that was assigned by the DHCP server."""
|
||||
if FLAGS.fake_rabbit:
|
||||
logging.debug("leasing ip")
|
||||
service.VlanNetworkService().lease_fixed_ip(ip_address)
|
||||
network_manager = utils.import_object(FLAGS.network_manager)
|
||||
network_manager.lease_fixed_ip(None, ip_address)
|
||||
else:
|
||||
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name),
|
||||
{"method": "lease_fixed_ip",
|
||||
"args": {"address": ip_address}})
|
||||
"args": {"context": None,
|
||||
"address": ip_address}})
|
||||
|
||||
|
||||
def old_lease(_mac, _ip_address, _hostname, _interface):
|
||||
@@ -62,20 +62,24 @@ def del_lease(_mac, ip_address, _hostname, _interface):
|
||||
"""Called when a lease expires."""
|
||||
if FLAGS.fake_rabbit:
|
||||
logging.debug("releasing ip")
|
||||
service.VlanNetworkService().release_fixed_ip(ip_address)
|
||||
network_manager = utils.import_object(FLAGS.network_manager)
|
||||
network_manager.release_fixed_ip(None, ip_address)
|
||||
else:
|
||||
rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name),
|
||||
{"method": "release_fixed_ip",
|
||||
"args": {"address": ip_address}})
|
||||
"args": {"context": None,
|
||||
"address": ip_address}})
|
||||
|
||||
|
||||
def init_leases(interface):
|
||||
"""Get the list of hosts for an interface."""
|
||||
network_ref = db.network_get_by_bridge(None, interface)
|
||||
return linux_net.get_dhcp_hosts(None, network_ref['id'])
|
||||
network_manager = utils.import_object(FLAGS.network_manager)
|
||||
return network_manager.driver.get_dhcp_hosts(None, network_ref['id'])
|
||||
|
||||
|
||||
def main():
|
||||
global network_manager
|
||||
"""Parse environment and arguments and call the approproate action."""
|
||||
flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile)
|
||||
utils.default_flagfile(flagfile)
|
||||
@@ -93,7 +97,6 @@ def main():
|
||||
'..',
|
||||
'_trial_temp',
|
||||
'nova.sqlite'))
|
||||
print path
|
||||
FLAGS.sql_connection = 'sqlite:///%s' % path
|
||||
#FLAGS.sql_connection = 'mysql://root@localhost/test'
|
||||
action = argv[1]
|
||||
|
||||
@@ -252,6 +252,7 @@ class AuthManager(object):
|
||||
__init__ is run every time AuthManager() is called, so we only
|
||||
reset the driver if it is not set or a new driver is specified.
|
||||
"""
|
||||
self.network_manager = utils.import_object(FLAGS.network_manager)
|
||||
if driver or not getattr(self, 'driver', None):
|
||||
self.driver = utils.import_class(driver or FLAGS.auth_driver)
|
||||
|
||||
@@ -525,7 +526,8 @@ class AuthManager(object):
|
||||
if project_dict:
|
||||
project = Project(**project_dict)
|
||||
try:
|
||||
db.network_allocate(context, project.id)
|
||||
self.network_manager.allocate_network(context,
|
||||
project.id)
|
||||
except:
|
||||
drv.delete_project(project.id)
|
||||
raise
|
||||
|
||||
@@ -60,7 +60,7 @@ class CloudController(object):
|
||||
sent to the other nodes.
|
||||
"""
|
||||
def __init__(self):
|
||||
self.network_manager = utils.load_object(FLAGS.network_manager)
|
||||
self.network_manager = utils.import_object(FLAGS.network_manager)
|
||||
self.setup()
|
||||
|
||||
def __str__(self):
|
||||
|
||||
@@ -27,8 +27,8 @@ from xml.etree import ElementTree
|
||||
from nova import flags
|
||||
from nova import rpc
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova.compute import service
|
||||
from nova.endpoint import api
|
||||
from nova.endpoint import cloud
|
||||
|
||||
@@ -53,7 +53,7 @@ class CloudTestCase(test.BaseTestCase):
|
||||
self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop))
|
||||
|
||||
# set up a service
|
||||
self.compute = service.ComputeService()
|
||||
self.compute = utils.import_class(FLAGS.compute_manager)
|
||||
self.compute_consumer = rpc.AdapterConsumer(connection=self.conn,
|
||||
topic=FLAGS.compute_topic,
|
||||
proxy=self.compute)
|
||||
|
||||
@@ -27,7 +27,6 @@ from nova import flags
|
||||
from nova import test
|
||||
from nova import utils
|
||||
from nova.auth import manager
|
||||
from nova.compute import service
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@@ -60,7 +59,7 @@ class ComputeConnectionTestCase(test.TrialTestCase):
|
||||
super(ComputeConnectionTestCase, self).setUp()
|
||||
self.flags(connection_type='fake',
|
||||
fake_storage=True)
|
||||
self.compute = service.ComputeService()
|
||||
self.compute = utils.import_object(FLAGS.compute_manager)
|
||||
self.manager = manager.AuthManager()
|
||||
user = self.manager.create_user('fake', 'fake', 'fake')
|
||||
project = self.manager.create_project('fake', 'fake', 'fake')
|
||||
|
||||
@@ -20,13 +20,19 @@ from nova import flags
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
FLAGS.connection_type = 'fake'
|
||||
flags.DECLARE('fake_storage', 'nova.volume.manager')
|
||||
FLAGS.fake_storage = True
|
||||
FLAGS.connection_type = 'fake'
|
||||
FLAGS.fake_rabbit = True
|
||||
FLAGS.fake_network = True
|
||||
FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver'
|
||||
flags.DECLARE('network_size', 'nova.network.manager')
|
||||
flags.DECLARE('num_networks', 'nova.network.manager')
|
||||
flags.DECLARE('fake_network', 'nova.network.manager')
|
||||
FLAGS.network_size = 16
|
||||
FLAGS.num_networks = 5
|
||||
FLAGS.fake_network = True
|
||||
flags.DECLARE('num_shelves', 'nova.volume.manager')
|
||||
flags.DECLARE('blades_per_shelf', 'nova.volume.manager')
|
||||
FLAGS.num_shelves = 2
|
||||
FLAGS.blades_per_shelf = 4
|
||||
FLAGS.verbose = True
|
||||
|
||||
@@ -49,14 +49,15 @@ class NetworkTestCase(test.TrialTestCase):
|
||||
self.manager = manager.AuthManager()
|
||||
self.user = self.manager.create_user('netuser', 'netuser', 'netuser')
|
||||
self.projects = []
|
||||
self.service = service.VlanNetworkService()
|
||||
self.network = utils.import_object(FLAGS.network_manager)
|
||||
self.context = None
|
||||
for i in range(5):
|
||||
name = 'project%s' % i
|
||||
self.projects.append(self.manager.create_project(name,
|
||||
'netuser',
|
||||
name))
|
||||
# create the necessary network data for the project
|
||||
self.service.set_network_host(self.projects[i].id)
|
||||
self.network.set_network_host(self.context, self.projects[i].id)
|
||||
instance_id = db.instance_create(None,
|
||||
{'mac_address': utils.generate_mac()})
|
||||
self.instance_id = instance_id
|
||||
@@ -92,16 +93,17 @@ class NetworkTestCase(test.TrialTestCase):
|
||||
db.floating_ip_get_by_address(None, ip_str)
|
||||
except exception.NotFound:
|
||||
db.floating_ip_create(None, ip_str, FLAGS.node_name)
|
||||
float_addr = self.service.allocate_floating_ip(self.projects[0].id)
|
||||
float_addr = self.network.allocate_floating_ip(self.context,
|
||||
self.projects[0].id)
|
||||
fix_addr = self._create_address(0)
|
||||
self.assertEqual(float_addr, str(pubnet[0]))
|
||||
self.service.associate_floating_ip(float_addr, fix_addr)
|
||||
self.network.associate_floating_ip(self.context, float_addr, fix_addr)
|
||||
address = db.instance_get_floating_address(None, self.instance_id)
|
||||
self.assertEqual(address, float_addr)
|
||||
self.service.disassociate_floating_ip(float_addr)
|
||||
self.network.disassociate_floating_ip(self.context, float_addr)
|
||||
address = db.instance_get_floating_address(None, self.instance_id)
|
||||
self.assertEqual(address, None)
|
||||
self.service.deallocate_floating_ip(float_addr)
|
||||
self.network.deallocate_floating_ip(self.context, float_addr)
|
||||
db.fixed_ip_deallocate(None, fix_addr)
|
||||
|
||||
def test_allocate_deallocate_fixed_ip(self):
|
||||
|
||||
@@ -30,10 +30,16 @@ from nova import flags
|
||||
from nova import rpc
|
||||
from nova import test
|
||||
from nova import service
|
||||
|
||||
from nova import manager
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
flags.DEFINE_string("fake_manager", "nova.tests.service_unittest.FakeManager",
|
||||
"Manager for testing")
|
||||
|
||||
class FakeManager(manager.Manager):
|
||||
"""Fake manager for tests"""
|
||||
pass
|
||||
|
||||
class ServiceTestCase(test.BaseTestCase):
|
||||
"""Test cases for rpc"""
|
||||
@@ -46,12 +52,12 @@ class ServiceTestCase(test.BaseTestCase):
|
||||
self.mox.StubOutWithMock(
|
||||
service.task, 'LoopingCall', use_mock_anything=True)
|
||||
rpc.AdapterConsumer(connection=mox.IgnoreArg(),
|
||||
topic='run_tests.py',
|
||||
topic='fake',
|
||||
proxy=mox.IsA(service.Service)
|
||||
).AndReturn(rpc.AdapterConsumer)
|
||||
|
||||
rpc.AdapterConsumer(connection=mox.IgnoreArg(),
|
||||
topic='run_tests.py.%s' % FLAGS.node_name,
|
||||
topic='fake.%s' % FLAGS.node_name,
|
||||
proxy=mox.IsA(service.Service)
|
||||
).AndReturn(rpc.AdapterConsumer)
|
||||
|
||||
@@ -67,7 +73,7 @@ class ServiceTestCase(test.BaseTestCase):
|
||||
rpc.AdapterConsumer.attach_to_twisted()
|
||||
self.mox.ReplayAll()
|
||||
|
||||
app = service.Service.create()
|
||||
app = service.Service.create(bin_name='nova-fake')
|
||||
self.assert_(app)
|
||||
|
||||
# We're testing sort of weird behavior in how report_state decides
|
||||
@@ -82,7 +88,7 @@ class ServiceTestCase(test.BaseTestCase):
|
||||
'binary': binary,
|
||||
'report_count': 0,
|
||||
'id': 1}
|
||||
|
||||
service.db.__getattr__('report_state')
|
||||
service.db.daemon_get_by_args(None,
|
||||
node_name,
|
||||
binary).AndReturn(daemon_ref)
|
||||
@@ -105,6 +111,7 @@ class ServiceTestCase(test.BaseTestCase):
|
||||
'report_count': 0,
|
||||
'id': 1}
|
||||
|
||||
service.db.__getattr__('report_state')
|
||||
service.db.daemon_get_by_args(None,
|
||||
node_name,
|
||||
binary).AndRaise(exception.NotFound())
|
||||
@@ -126,6 +133,7 @@ class ServiceTestCase(test.BaseTestCase):
|
||||
'report_count': 0,
|
||||
'id': 1}
|
||||
|
||||
service.db.__getattr__('report_state')
|
||||
service.db.daemon_get_by_args(None,
|
||||
node_name,
|
||||
binary).AndRaise(Exception())
|
||||
@@ -145,6 +153,7 @@ class ServiceTestCase(test.BaseTestCase):
|
||||
'report_count': 0,
|
||||
'id': 1}
|
||||
|
||||
service.db.__getattr__('report_state')
|
||||
service.db.daemon_get_by_args(None,
|
||||
node_name,
|
||||
binary).AndReturn(daemon_ref)
|
||||
|
||||
@@ -24,8 +24,7 @@ from nova import exception
|
||||
from nova import db
|
||||
from nova import flags
|
||||
from nova import test
|
||||
from nova.compute import service as compute_service
|
||||
from nova.volume import service as volume_service
|
||||
from nova import utils
|
||||
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
@@ -35,10 +34,11 @@ class VolumeTestCase(test.TrialTestCase):
|
||||
def setUp(self):
|
||||
logging.getLogger().setLevel(logging.DEBUG)
|
||||
super(VolumeTestCase, self).setUp()
|
||||
self.compute = compute_service.ComputeService()
|
||||
self.compute = utils.import_object(FLAGS.compute_manager)
|
||||
self.flags(connection_type='fake',
|
||||
fake_storage=True)
|
||||
self.volume = volume_service.VolumeService()
|
||||
self.volume = utils.import_object(FLAGS.volume_manager)
|
||||
self.context = None
|
||||
|
||||
|
||||
def _create_volume(self, size='0'):
|
||||
@@ -49,15 +49,15 @@ class VolumeTestCase(test.TrialTestCase):
|
||||
vol['availability_zone'] = FLAGS.storage_availability_zone
|
||||
vol['status'] = "creating"
|
||||
vol['attach_status'] = "detached"
|
||||
return db.volume_create(None, vol)
|
||||
return db.volume_create(None, vol)['id']
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_run_create_volume(self):
|
||||
volume_id = self._create_volume()
|
||||
yield self.volume.create_volume(volume_id)
|
||||
yield self.volume.create_volume(self.context, volume_id)
|
||||
self.assertEqual(volume_id, db.volume_get(None, volume_id).id)
|
||||
|
||||
yield self.volume.delete_volume(volume_id)
|
||||
yield self.volume.delete_volume(self.context, volume_id)
|
||||
self.assertRaises(exception.NotFound,
|
||||
db.volume_get,
|
||||
None,
|
||||
@@ -70,7 +70,7 @@ class VolumeTestCase(test.TrialTestCase):
|
||||
defer.returnValue(True)
|
||||
try:
|
||||
volume_id = self._create_volume('1001')
|
||||
yield self.volume.create_volume(volume_id)
|
||||
yield self.volume.create_volume(self.context, volume_id)
|
||||
self.fail("Should have thrown TypeError")
|
||||
except TypeError:
|
||||
pass
|
||||
@@ -81,14 +81,15 @@ class VolumeTestCase(test.TrialTestCase):
|
||||
total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf
|
||||
for i in xrange(total_slots):
|
||||
volume_id = self._create_volume()
|
||||
yield self.volume.create_volume(volume_id)
|
||||
yield self.volume.create_volume(self.context, volume_id)
|
||||
vols.append(volume_id)
|
||||
volume_id = self._create_volume()
|
||||
self.assertFailure(self.volume.create_volume(volume_id),
|
||||
self.assertFailure(self.volume.create_volume(self.context,
|
||||
volume_id),
|
||||
db.NoMoreBlades)
|
||||
db.volume_destroy(None, volume_id)
|
||||
for id in vols:
|
||||
yield self.volume.delete_volume(id)
|
||||
for volume_id in vols:
|
||||
yield self.volume.delete_volume(self.context, volume_id)
|
||||
|
||||
@defer.inlineCallbacks
|
||||
def test_run_attach_detach_volume(self):
|
||||
@@ -96,7 +97,7 @@ class VolumeTestCase(test.TrialTestCase):
|
||||
instance_id = "storage-test"
|
||||
mountpoint = "/dev/sdf"
|
||||
volume_id = self._create_volume()
|
||||
yield self.volume.create_volume(volume_id)
|
||||
yield self.volume.create_volume(self.context, volume_id)
|
||||
if FLAGS.fake_tests:
|
||||
db.volume_attached(None, volume_id, instance_id, mountpoint)
|
||||
else:
|
||||
@@ -109,15 +110,16 @@ class VolumeTestCase(test.TrialTestCase):
|
||||
self.assertEqual(vol['instance_id'], instance_id)
|
||||
self.assertEqual(vol['mountpoint'], mountpoint)
|
||||
|
||||
self.assertFailure(self.volume.delete_volume(volume_id), exception.Error)
|
||||
self.assertFailure(self.volume.delete_volume(self.context, volume_id),
|
||||
exception.Error)
|
||||
if FLAGS.fake_tests:
|
||||
db.volume_detached(None, volume_id)
|
||||
else:
|
||||
rv = yield self.volume.detach_volume(instance_id,
|
||||
rv = yield self.compute.detach_volume(instance_id,
|
||||
volume_id)
|
||||
self.assertEqual(vol['status'], "available")
|
||||
|
||||
rv = self.volume.delete_volume(volume_id)
|
||||
rv = self.volume.delete_volume(self.context, volume_id)
|
||||
self.assertRaises(exception.Error,
|
||||
db.volume_get,
|
||||
None,
|
||||
@@ -142,14 +144,13 @@ class VolumeTestCase(test.TrialTestCase):
|
||||
total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf
|
||||
for i in range(total_slots):
|
||||
volume_id = self._create_volume()
|
||||
d = self.volume.create_volume(volume_id)
|
||||
d = self.volume.create_volume(self.context, volume_id)
|
||||
d.addCallback(_check)
|
||||
d.addErrback(self.fail)
|
||||
deferreds.append(d)
|
||||
yield defer.DeferredList(deferreds)
|
||||
for volume_id in volume_ids:
|
||||
vol = db.volume_get(None, volume_id)
|
||||
vol.delete()
|
||||
self.volume.delete_volume(self.context, volume_id)
|
||||
|
||||
def test_multi_node(self):
|
||||
# TODO(termie): Figure out how to test with two nodes,
|
||||
|
||||
Reference in New Issue
Block a user