diff --git a/nova/flags.py b/nova/flags.py index f3b0384a..380382a7 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -224,7 +224,7 @@ DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager', 'Manager for compute') DEFINE_string('network_manager', 'nova.network.manager.VlanManager', 'Manager for network') -DEFINE_string('volume_manager', 'nova.volume.manager.AOEManager', +DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager', 'Manager for volume') DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', 'Manager for scheduler') diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index 4bbef883..d695d68a 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -21,7 +21,7 @@ from nova import flags FLAGS = flags.FLAGS flags.DECLARE('volume_driver', 'nova.volume.manager') -FLAGS.volume_driver = 'nova.volume.driver.FakeAOEDriver' +FLAGS.volume_driver = 'nova.volume.driver.FakeISCSIDriver' FLAGS.connection_type = 'fake' FLAGS.fake_rabbit = True FLAGS.auth_driver = 'nova.auth.dbdriver.DbDriver' @@ -31,9 +31,11 @@ flags.DECLARE('fake_network', 'nova.network.manager') FLAGS.network_size = 16 FLAGS.num_networks = 5 FLAGS.fake_network = True -flags.DECLARE('num_shelves', 'nova.volume.manager') -flags.DECLARE('blades_per_shelf', 'nova.volume.manager') +flags.DECLARE('num_shelves', 'nova.volume.driver') +flags.DECLARE('blades_per_shelf', 'nova.volume.driver') +flags.DECLARE('iscsi_target_ids', 'nova.volume.driver') FLAGS.num_shelves = 2 FLAGS.blades_per_shelf = 4 +FLAGS.iscsi_target_ids = 8 FLAGS.verbose = True FLAGS.sql_connection = 'sqlite:///nova.sqlite' diff --git a/nova/tests/volume_unittest.py b/nova/tests/volume_unittest.py index fdee30b4..34e04c8b 100644 --- a/nova/tests/volume_unittest.py +++ b/nova/tests/volume_unittest.py @@ -83,9 +83,9 @@ class VolumeTestCase(test.TrialTestCase): @defer.inlineCallbacks def test_too_many_volumes(self): - """Ensure that NoMoreBlades is raised when we run out of volumes""" + """Ensure that NoMoreTargets is raised when we run out of volumes""" vols = [] - total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf + total_slots = FLAGS.iscsi_target_ids for _index in xrange(total_slots): volume_id = self._create_volume() yield self.volume.create_volume(self.context, volume_id) @@ -93,7 +93,7 @@ class VolumeTestCase(test.TrialTestCase): volume_id = self._create_volume() self.assertFailure(self.volume.create_volume(self.context, volume_id), - db.NoMoreBlades) + db.NoMoreTargets) db.volume_destroy(context.get_admin_context(), volume_id) for volume_id in vols: yield self.volume.delete_volume(self.context, volume_id) @@ -148,23 +148,21 @@ class VolumeTestCase(test.TrialTestCase): db.instance_destroy(self.context, instance_id) @defer.inlineCallbacks - def test_concurrent_volumes_get_different_blades(self): - """Ensure multiple concurrent volumes get different blades""" + def test_concurrent_volumes_get_different_targets(self): + """Ensure multiple concurrent volumes get different targets""" volume_ids = [] - shelf_blades = [] + targets = [] def _check(volume_id): - """Make sure blades aren't duplicated""" + """Make sure targets aren't duplicated""" volume_ids.append(volume_id) admin_context = context.get_admin_context() - (shelf_id, blade_id) = db.volume_get_shelf_and_blade(admin_context, - volume_id) - shelf_blade = '%s.%s' % (shelf_id, blade_id) - self.assert_(shelf_blade not in shelf_blades) - shelf_blades.append(shelf_blade) - logging.debug("Blade %s allocated", shelf_blade) + target_id = db.volume_get_target_id(admin_context, volume_id) + self.assert_(target_id not in targets) + targets.append(target_id) + logging.debug("Target %s allocated", target_id) deferreds = [] - total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf + total_slots = FLAGS.iscsi_target_ids for _index in xrange(total_slots): volume_id = self._create_volume() d = self.volume.create_volume(self.context, volume_id)