merged trunk

This commit is contained in:
Vishvananda Ishaya
2010-11-03 14:38:14 -07:00
5 changed files with 43 additions and 20 deletions

View File

@@ -138,6 +138,8 @@ class FlagValues(gflags.FlagValues):
FLAGS = FlagValues()
gflags.FLAGS = FLAGS
gflags.DEFINE_flag(gflags.HelpFlag(), FLAGS)
def _wrapper(func):
@@ -224,7 +226,7 @@ DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager',
'Manager for compute')
DEFINE_string('network_manager', 'nova.network.manager.VlanManager',
'Manager for network')
DEFINE_string('volume_manager', 'nova.volume.manager.AOEManager',
DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager',
'Manager for volume')
DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager',
'Manager for scheduler')

View File

@@ -66,6 +66,27 @@ class ComputeTestCase(test.TrialTestCase):
inst['ami_launch_index'] = 0
return db.instance_create(self.context, inst)['id']
def test_create_instance_associates_security_groups(self):
"""Make sure create_instance associates security groups"""
inst = {}
inst['user_id'] = self.user.id
inst['project_id'] = self.project.id
values = {'name': 'default',
'description': 'default',
'user_id': self.user.id,
'project_id': self.project.id}
group = db.security_group_create(self.context, values)
ref = self.compute.create_instance(self.context,
security_groups=[group['id']],
**inst)
# reload to get groups
instance_ref = db.instance_get(self.context, ref['id'])
try:
self.assertEqual(len(instance_ref['security_groups']), 1)
finally:
db.security_group_destroy(self.context, group['id'])
db.instance_destroy(self.context, instance_ref['id'])
@defer.inlineCallbacks
def test_run_terminate(self):
"""Make sure it is possible to run and terminate instance"""

View File

@@ -21,7 +21,7 @@ from nova import flags
FLAGS = flags.FLAGS
flags.DECLARE('volume_driver', 'nova.volume.manager')
FLAGS.volume_driver = 'nova.volume.driver.FakeAOEDriver'
FLAGS.volume_driver = 'nova.volume.driver.FakeISCSIDriver'
FLAGS.connection_type = 'fake'
FLAGS.fake_rabbit = True
flags.DECLARE('auth_driver', 'nova.auth.manager')
@@ -32,9 +32,11 @@ flags.DECLARE('fake_network', 'nova.network.manager')
FLAGS.network_size = 16
FLAGS.num_networks = 5
FLAGS.fake_network = True
flags.DECLARE('num_shelves', 'nova.volume.manager')
flags.DECLARE('blades_per_shelf', 'nova.volume.manager')
flags.DECLARE('num_shelves', 'nova.volume.driver')
flags.DECLARE('blades_per_shelf', 'nova.volume.driver')
flags.DECLARE('iscsi_num_targets', 'nova.volume.driver')
FLAGS.num_shelves = 2
FLAGS.blades_per_shelf = 4
FLAGS.iscsi_num_targets = 8
FLAGS.verbose = True
FLAGS.sql_connection = 'sqlite:///nova.sqlite'

View File

@@ -81,7 +81,7 @@ class SimpleDriverTestCase(test.TrialTestCase):
max_cores=4,
max_gigabytes=4,
network_manager='nova.network.manager.FlatManager',
volume_driver='nova.volume.driver.FakeAOEDriver',
volume_driver='nova.volume.driver.FakeISCSIDriver',
scheduler_driver='nova.scheduler.simple.SimpleScheduler')
self.scheduler = manager.SchedulerManager()
self.manager = auth_manager.AuthManager()

View File

@@ -85,9 +85,9 @@ class VolumeTestCase(test.TrialTestCase):
@defer.inlineCallbacks
def test_too_many_volumes(self):
"""Ensure that NoMoreBlades is raised when we run out of volumes."""
"""Ensure that NoMoreTargets is raised when we run out of volumes."""
vols = []
total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf
total_slots = FLAGS.iscsi_num_targets
for _index in xrange(total_slots):
volume_id = self._create_volume()
yield self.volume.create_volume(self.context, volume_id)
@@ -95,7 +95,7 @@ class VolumeTestCase(test.TrialTestCase):
volume_id = self._create_volume()
self.assertFailure(self.volume.create_volume(self.context,
volume_id),
db.NoMoreBlades)
db.NoMoreTargets)
db.volume_destroy(context.get_admin_context(), volume_id)
for volume_id in vols:
yield self.volume.delete_volume(self.context, volume_id)
@@ -150,24 +150,22 @@ class VolumeTestCase(test.TrialTestCase):
db.instance_destroy(self.context, instance_id)
@defer.inlineCallbacks
def test_concurrent_volumes_get_different_blades(self):
"""Ensure multiple concurrent volumes get different blades."""
def test_concurrent_volumes_get_different_targets(self):
"""Ensure multiple concurrent volumes get different targets."""
volume_ids = []
shelf_blades = []
targets = []
def _check(volume_id):
"""Make sure blades aren't duplicated."""
"""Make sure targets aren't duplicated."""
volume_ids.append(volume_id)
admin_context = context.get_admin_context()
(shelf_id, blade_id) = db.volume_get_shelf_and_blade(admin_context,
volume_id)
shelf_blade = '%s.%s' % (shelf_id, blade_id)
self.assert_(shelf_blade not in shelf_blades)
shelf_blades.append(shelf_blade)
logging.debug("Blade %s allocated", shelf_blade)
iscsi_target = db.volume_get_iscsi_target_num(admin_context,
volume_id)
self.assert_(iscsi_target not in targets)
targets.append(iscsi_target)
logging.debug("Target %s allocated", iscsi_target)
deferreds = []
total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf
total_slots = FLAGS.iscsi_num_targets
for _index in xrange(total_slots):
volume_id = self._create_volume()
d = self.volume.create_volume(self.context, volume_id)