tests pass
This commit is contained in:
		| @@ -34,23 +34,23 @@ from nova import db | |||||||
| from nova import flags | from nova import flags | ||||||
| from nova import rpc | from nova import rpc | ||||||
| from nova import utils | from nova import utils | ||||||
| from nova.network import linux_net |  | ||||||
| from nova.network import service |  | ||||||
| from nova import datastore # for redis_db flag | from nova import datastore # for redis_db flag | ||||||
| from nova.auth import manager # for auth flags | from nova.auth import manager # for auth flags | ||||||
|  | from nova.network import manager # for network flags | ||||||
|  |  | ||||||
| FLAGS = flags.FLAGS | FLAGS = flags.FLAGS | ||||||
|  |  | ||||||
|  |  | ||||||
| def add_lease(_mac, ip_address, _hostname, _interface): | def add_lease(_mac, ip_address, _hostname, _interface): | ||||||
|     """Set the IP that was assigned by the DHCP server.""" |     """Set the IP that was assigned by the DHCP server.""" | ||||||
|     if FLAGS.fake_rabbit: |     if FLAGS.fake_rabbit: | ||||||
|         logging.debug("leasing ip") |         logging.debug("leasing ip") | ||||||
|         service.VlanNetworkService().lease_fixed_ip(ip_address) |         network_manager = utils.import_object(FLAGS.network_manager) | ||||||
|  |         network_manager.lease_fixed_ip(None, ip_address) | ||||||
|     else: |     else: | ||||||
|         rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name), |         rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name), | ||||||
|                  {"method": "lease_fixed_ip", |                  {"method": "lease_fixed_ip", | ||||||
|                   "args": {"address": ip_address}}) |                   "args": {"context": None, | ||||||
|  |                            "address": ip_address}}) | ||||||
|  |  | ||||||
|  |  | ||||||
| def old_lease(_mac, _ip_address, _hostname, _interface): | def old_lease(_mac, _ip_address, _hostname, _interface): | ||||||
| @@ -62,20 +62,24 @@ def del_lease(_mac, ip_address, _hostname, _interface): | |||||||
|     """Called when a lease expires.""" |     """Called when a lease expires.""" | ||||||
|     if FLAGS.fake_rabbit: |     if FLAGS.fake_rabbit: | ||||||
|         logging.debug("releasing ip") |         logging.debug("releasing ip") | ||||||
|         service.VlanNetworkService().release_fixed_ip(ip_address) |         network_manager = utils.import_object(FLAGS.network_manager) | ||||||
|  |         network_manager.release_fixed_ip(None, ip_address) | ||||||
|     else: |     else: | ||||||
|         rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name), |         rpc.cast("%s.%s" % (FLAGS.network_topic, FLAGS.node_name), | ||||||
|                  {"method": "release_fixed_ip", |                  {"method": "release_fixed_ip", | ||||||
|                   "args": {"address": ip_address}}) |                   "args": {"context": None, | ||||||
|  |                            "address": ip_address}}) | ||||||
|  |  | ||||||
|  |  | ||||||
| def init_leases(interface): | def init_leases(interface): | ||||||
|     """Get the list of hosts for an interface.""" |     """Get the list of hosts for an interface.""" | ||||||
|     network_ref = db.network_get_by_bridge(None, interface) |     network_ref = db.network_get_by_bridge(None, interface) | ||||||
|     return linux_net.get_dhcp_hosts(None, network_ref['id']) |     network_manager = utils.import_object(FLAGS.network_manager) | ||||||
|  |     return network_manager.driver.get_dhcp_hosts(None, network_ref['id']) | ||||||
|  |  | ||||||
|  |  | ||||||
| def main(): | def main(): | ||||||
|  |     global network_manager | ||||||
|     """Parse environment and arguments and call the approproate action.""" |     """Parse environment and arguments and call the approproate action.""" | ||||||
|     flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile) |     flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile) | ||||||
|     utils.default_flagfile(flagfile) |     utils.default_flagfile(flagfile) | ||||||
| @@ -93,7 +97,6 @@ def main(): | |||||||
|                                             '..', |                                             '..', | ||||||
|                                             '_trial_temp', |                                             '_trial_temp', | ||||||
|                                             'nova.sqlite')) |                                             'nova.sqlite')) | ||||||
|         print path |  | ||||||
|         FLAGS.sql_connection = 'sqlite:///%s' % path |         FLAGS.sql_connection = 'sqlite:///%s' % path | ||||||
|         #FLAGS.sql_connection = 'mysql://root@localhost/test' |         #FLAGS.sql_connection = 'mysql://root@localhost/test' | ||||||
|     action = argv[1] |     action = argv[1] | ||||||
|   | |||||||
| @@ -252,6 +252,7 @@ class AuthManager(object): | |||||||
|         __init__ is run every time AuthManager() is called, so we only |         __init__ is run every time AuthManager() is called, so we only | ||||||
|         reset the driver if it is not set or a new driver is specified. |         reset the driver if it is not set or a new driver is specified. | ||||||
|         """ |         """ | ||||||
|  |         self.network_manager = utils.import_object(FLAGS.network_manager) | ||||||
|         if driver or not getattr(self, 'driver', None): |         if driver or not getattr(self, 'driver', None): | ||||||
|             self.driver = utils.import_class(driver or FLAGS.auth_driver) |             self.driver = utils.import_class(driver or FLAGS.auth_driver) | ||||||
|  |  | ||||||
| @@ -525,7 +526,8 @@ class AuthManager(object): | |||||||
|             if project_dict: |             if project_dict: | ||||||
|                 project = Project(**project_dict) |                 project = Project(**project_dict) | ||||||
|                 try: |                 try: | ||||||
|                     db.network_allocate(context, project.id) |                     self.network_manager.allocate_network(context, | ||||||
|  |                                                           project.id) | ||||||
|                 except: |                 except: | ||||||
|                     drv.delete_project(project.id) |                     drv.delete_project(project.id) | ||||||
|                     raise |                     raise | ||||||
|   | |||||||
| @@ -60,7 +60,7 @@ class CloudController(object): | |||||||
|  sent to the other nodes. |  sent to the other nodes. | ||||||
| """ | """ | ||||||
|     def __init__(self): |     def __init__(self): | ||||||
|         self.network_manager = utils.load_object(FLAGS.network_manager) |         self.network_manager = utils.import_object(FLAGS.network_manager) | ||||||
|         self.setup() |         self.setup() | ||||||
|  |  | ||||||
|     def __str__(self): |     def __str__(self): | ||||||
|   | |||||||
| @@ -27,8 +27,8 @@ from xml.etree import ElementTree | |||||||
| from nova import flags | from nova import flags | ||||||
| from nova import rpc | from nova import rpc | ||||||
| from nova import test | from nova import test | ||||||
|  | from nova import utils | ||||||
| from nova.auth import manager | from nova.auth import manager | ||||||
| from nova.compute import service |  | ||||||
| from nova.endpoint import api | from nova.endpoint import api | ||||||
| from nova.endpoint import cloud | from nova.endpoint import cloud | ||||||
|  |  | ||||||
| @@ -53,7 +53,7 @@ class CloudTestCase(test.BaseTestCase): | |||||||
|         self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop)) |         self.injected.append(self.cloud_consumer.attach_to_tornado(self.ioloop)) | ||||||
|  |  | ||||||
|         # set up a service |         # set up a service | ||||||
|         self.compute = service.ComputeService() |         self.compute = utils.import_class(FLAGS.compute_manager) | ||||||
|         self.compute_consumer = rpc.AdapterConsumer(connection=self.conn, |         self.compute_consumer = rpc.AdapterConsumer(connection=self.conn, | ||||||
|                                                      topic=FLAGS.compute_topic, |                                                      topic=FLAGS.compute_topic, | ||||||
|                                                      proxy=self.compute) |                                                      proxy=self.compute) | ||||||
|   | |||||||
| @@ -27,7 +27,6 @@ from nova import flags | |||||||
| from nova import test | from nova import test | ||||||
| from nova import utils | from nova import utils | ||||||
| from nova.auth import manager | from nova.auth import manager | ||||||
| from nova.compute import service |  | ||||||
|  |  | ||||||
|  |  | ||||||
| FLAGS = flags.FLAGS | FLAGS = flags.FLAGS | ||||||
| @@ -60,7 +59,7 @@ class ComputeConnectionTestCase(test.TrialTestCase): | |||||||
|         super(ComputeConnectionTestCase, self).setUp() |         super(ComputeConnectionTestCase, self).setUp() | ||||||
|         self.flags(connection_type='fake', |         self.flags(connection_type='fake', | ||||||
|                    fake_storage=True) |                    fake_storage=True) | ||||||
|         self.compute = service.ComputeService() |         self.compute = utils.import_object(FLAGS.compute_manager) | ||||||
|         self.manager = manager.AuthManager() |         self.manager = manager.AuthManager() | ||||||
|         user = self.manager.create_user('fake', 'fake', 'fake') |         user = self.manager.create_user('fake', 'fake', 'fake') | ||||||
|         project = self.manager.create_project('fake', 'fake', 'fake') |         project = self.manager.create_project('fake', 'fake', 'fake') | ||||||
|   | |||||||
| @@ -20,13 +20,19 @@ from nova import flags | |||||||
|  |  | ||||||
| FLAGS = flags.FLAGS | FLAGS = flags.FLAGS | ||||||
|  |  | ||||||
| FLAGS.connection_type = 'fake' | flags.DECLARE('fake_storage', 'nova.volume.manager') | ||||||
| FLAGS.fake_storage = True | FLAGS.fake_storage = True | ||||||
|  | FLAGS.connection_type = 'fake' | ||||||
| FLAGS.fake_rabbit = True | FLAGS.fake_rabbit = True | ||||||
| FLAGS.fake_network = True |  | ||||||
| FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' | FLAGS.auth_driver = 'nova.auth.ldapdriver.FakeLdapDriver' | ||||||
|  | flags.DECLARE('network_size', 'nova.network.manager') | ||||||
|  | flags.DECLARE('num_networks', 'nova.network.manager') | ||||||
|  | flags.DECLARE('fake_network', 'nova.network.manager') | ||||||
| FLAGS.network_size = 16 | FLAGS.network_size = 16 | ||||||
| FLAGS.num_networks = 5 | FLAGS.num_networks = 5 | ||||||
|  | FLAGS.fake_network = True | ||||||
|  | flags.DECLARE('num_shelves', 'nova.volume.manager') | ||||||
|  | flags.DECLARE('blades_per_shelf', 'nova.volume.manager') | ||||||
| FLAGS.num_shelves = 2 | FLAGS.num_shelves = 2 | ||||||
| FLAGS.blades_per_shelf = 4 | FLAGS.blades_per_shelf = 4 | ||||||
| FLAGS.verbose = True | FLAGS.verbose = True | ||||||
|   | |||||||
| @@ -49,14 +49,15 @@ class NetworkTestCase(test.TrialTestCase): | |||||||
|         self.manager = manager.AuthManager() |         self.manager = manager.AuthManager() | ||||||
|         self.user = self.manager.create_user('netuser', 'netuser', 'netuser') |         self.user = self.manager.create_user('netuser', 'netuser', 'netuser') | ||||||
|         self.projects = [] |         self.projects = [] | ||||||
|         self.service = service.VlanNetworkService() |         self.network = utils.import_object(FLAGS.network_manager) | ||||||
|  |         self.context = None | ||||||
|         for i in range(5): |         for i in range(5): | ||||||
|             name = 'project%s' % i |             name = 'project%s' % i | ||||||
|             self.projects.append(self.manager.create_project(name, |             self.projects.append(self.manager.create_project(name, | ||||||
|                                                              'netuser', |                                                              'netuser', | ||||||
|                                                              name)) |                                                              name)) | ||||||
|             # create the necessary network data for the project |             # create the necessary network data for the project | ||||||
|             self.service.set_network_host(self.projects[i].id) |             self.network.set_network_host(self.context, self.projects[i].id) | ||||||
|         instance_id = db.instance_create(None, |         instance_id = db.instance_create(None, | ||||||
|                                          {'mac_address': utils.generate_mac()}) |                                          {'mac_address': utils.generate_mac()}) | ||||||
|         self.instance_id = instance_id |         self.instance_id = instance_id | ||||||
| @@ -92,16 +93,17 @@ class NetworkTestCase(test.TrialTestCase): | |||||||
|             db.floating_ip_get_by_address(None, ip_str) |             db.floating_ip_get_by_address(None, ip_str) | ||||||
|         except exception.NotFound: |         except exception.NotFound: | ||||||
|             db.floating_ip_create(None, ip_str, FLAGS.node_name) |             db.floating_ip_create(None, ip_str, FLAGS.node_name) | ||||||
|         float_addr = self.service.allocate_floating_ip(self.projects[0].id) |         float_addr = self.network.allocate_floating_ip(self.context, | ||||||
|  |                                                        self.projects[0].id) | ||||||
|         fix_addr = self._create_address(0) |         fix_addr = self._create_address(0) | ||||||
|         self.assertEqual(float_addr, str(pubnet[0])) |         self.assertEqual(float_addr, str(pubnet[0])) | ||||||
|         self.service.associate_floating_ip(float_addr, fix_addr) |         self.network.associate_floating_ip(self.context, float_addr, fix_addr) | ||||||
|         address = db.instance_get_floating_address(None, self.instance_id) |         address = db.instance_get_floating_address(None, self.instance_id) | ||||||
|         self.assertEqual(address, float_addr) |         self.assertEqual(address, float_addr) | ||||||
|         self.service.disassociate_floating_ip(float_addr) |         self.network.disassociate_floating_ip(self.context, float_addr) | ||||||
|         address = db.instance_get_floating_address(None, self.instance_id) |         address = db.instance_get_floating_address(None, self.instance_id) | ||||||
|         self.assertEqual(address, None) |         self.assertEqual(address, None) | ||||||
|         self.service.deallocate_floating_ip(float_addr) |         self.network.deallocate_floating_ip(self.context, float_addr) | ||||||
|         db.fixed_ip_deallocate(None, fix_addr) |         db.fixed_ip_deallocate(None, fix_addr) | ||||||
|  |  | ||||||
|     def test_allocate_deallocate_fixed_ip(self): |     def test_allocate_deallocate_fixed_ip(self): | ||||||
|   | |||||||
| @@ -30,10 +30,16 @@ from nova import flags | |||||||
| from nova import rpc | from nova import rpc | ||||||
| from nova import test | from nova import test | ||||||
| from nova import service | from nova import service | ||||||
|  | from nova import manager | ||||||
|  |  | ||||||
| FLAGS = flags.FLAGS | FLAGS = flags.FLAGS | ||||||
|  |  | ||||||
|  | flags.DEFINE_string("fake_manager", "nova.tests.service_unittest.FakeManager", | ||||||
|  |                     "Manager for testing") | ||||||
|  |  | ||||||
|  | class FakeManager(manager.Manager): | ||||||
|  |     """Fake manager for tests""" | ||||||
|  |     pass | ||||||
|  |  | ||||||
| class ServiceTestCase(test.BaseTestCase): | class ServiceTestCase(test.BaseTestCase): | ||||||
|     """Test cases for rpc""" |     """Test cases for rpc""" | ||||||
| @@ -46,12 +52,12 @@ class ServiceTestCase(test.BaseTestCase): | |||||||
|         self.mox.StubOutWithMock( |         self.mox.StubOutWithMock( | ||||||
|                 service.task, 'LoopingCall', use_mock_anything=True) |                 service.task, 'LoopingCall', use_mock_anything=True) | ||||||
|         rpc.AdapterConsumer(connection=mox.IgnoreArg(), |         rpc.AdapterConsumer(connection=mox.IgnoreArg(), | ||||||
|                             topic='run_tests.py', |                             topic='fake', | ||||||
|                             proxy=mox.IsA(service.Service) |                             proxy=mox.IsA(service.Service) | ||||||
|                             ).AndReturn(rpc.AdapterConsumer) |                             ).AndReturn(rpc.AdapterConsumer) | ||||||
|  |  | ||||||
|         rpc.AdapterConsumer(connection=mox.IgnoreArg(), |         rpc.AdapterConsumer(connection=mox.IgnoreArg(), | ||||||
|                             topic='run_tests.py.%s' % FLAGS.node_name, |                             topic='fake.%s' % FLAGS.node_name, | ||||||
|                             proxy=mox.IsA(service.Service) |                             proxy=mox.IsA(service.Service) | ||||||
|                             ).AndReturn(rpc.AdapterConsumer) |                             ).AndReturn(rpc.AdapterConsumer) | ||||||
|  |  | ||||||
| @@ -67,7 +73,7 @@ class ServiceTestCase(test.BaseTestCase): | |||||||
|         rpc.AdapterConsumer.attach_to_twisted() |         rpc.AdapterConsumer.attach_to_twisted() | ||||||
|         self.mox.ReplayAll() |         self.mox.ReplayAll() | ||||||
|  |  | ||||||
|         app = service.Service.create() |         app = service.Service.create(bin_name='nova-fake') | ||||||
|         self.assert_(app) |         self.assert_(app) | ||||||
|  |  | ||||||
|     # We're testing sort of weird behavior in how report_state decides |     # We're testing sort of weird behavior in how report_state decides | ||||||
| @@ -82,7 +88,7 @@ class ServiceTestCase(test.BaseTestCase): | |||||||
|                       'binary': binary, |                       'binary': binary, | ||||||
|                       'report_count': 0, |                       'report_count': 0, | ||||||
|                       'id': 1} |                       'id': 1} | ||||||
|  |         service.db.__getattr__('report_state') | ||||||
|         service.db.daemon_get_by_args(None, |         service.db.daemon_get_by_args(None, | ||||||
|                                       node_name, |                                       node_name, | ||||||
|                                       binary).AndReturn(daemon_ref) |                                       binary).AndReturn(daemon_ref) | ||||||
| @@ -105,6 +111,7 @@ class ServiceTestCase(test.BaseTestCase): | |||||||
|                       'report_count': 0, |                       'report_count': 0, | ||||||
|                       'id': 1} |                       'id': 1} | ||||||
|  |  | ||||||
|  |         service.db.__getattr__('report_state') | ||||||
|         service.db.daemon_get_by_args(None, |         service.db.daemon_get_by_args(None, | ||||||
|                                       node_name, |                                       node_name, | ||||||
|                                       binary).AndRaise(exception.NotFound()) |                                       binary).AndRaise(exception.NotFound()) | ||||||
| @@ -126,6 +133,7 @@ class ServiceTestCase(test.BaseTestCase): | |||||||
|                       'report_count': 0, |                       'report_count': 0, | ||||||
|                       'id': 1} |                       'id': 1} | ||||||
|  |  | ||||||
|  |         service.db.__getattr__('report_state') | ||||||
|         service.db.daemon_get_by_args(None, |         service.db.daemon_get_by_args(None, | ||||||
|                                       node_name, |                                       node_name, | ||||||
|                                       binary).AndRaise(Exception()) |                                       binary).AndRaise(Exception()) | ||||||
| @@ -145,6 +153,7 @@ class ServiceTestCase(test.BaseTestCase): | |||||||
|                       'report_count': 0, |                       'report_count': 0, | ||||||
|                       'id': 1} |                       'id': 1} | ||||||
|  |  | ||||||
|  |         service.db.__getattr__('report_state') | ||||||
|         service.db.daemon_get_by_args(None, |         service.db.daemon_get_by_args(None, | ||||||
|                                       node_name, |                                       node_name, | ||||||
|                                       binary).AndReturn(daemon_ref) |                                       binary).AndReturn(daemon_ref) | ||||||
|   | |||||||
| @@ -24,8 +24,7 @@ from nova import exception | |||||||
| from nova import db | from nova import db | ||||||
| from nova import flags | from nova import flags | ||||||
| from nova import test | from nova import test | ||||||
| from nova.compute import service as compute_service | from nova import utils | ||||||
| from nova.volume import service as volume_service |  | ||||||
|  |  | ||||||
|  |  | ||||||
| FLAGS = flags.FLAGS | FLAGS = flags.FLAGS | ||||||
| @@ -35,10 +34,11 @@ class VolumeTestCase(test.TrialTestCase): | |||||||
|     def setUp(self): |     def setUp(self): | ||||||
|         logging.getLogger().setLevel(logging.DEBUG) |         logging.getLogger().setLevel(logging.DEBUG) | ||||||
|         super(VolumeTestCase, self).setUp() |         super(VolumeTestCase, self).setUp() | ||||||
|         self.compute = compute_service.ComputeService() |         self.compute = utils.import_object(FLAGS.compute_manager) | ||||||
|         self.flags(connection_type='fake', |         self.flags(connection_type='fake', | ||||||
|                    fake_storage=True) |                    fake_storage=True) | ||||||
|         self.volume = volume_service.VolumeService() |         self.volume = utils.import_object(FLAGS.volume_manager) | ||||||
|  |         self.context = None | ||||||
|  |  | ||||||
|  |  | ||||||
|     def _create_volume(self, size='0'): |     def _create_volume(self, size='0'): | ||||||
| @@ -49,15 +49,15 @@ class VolumeTestCase(test.TrialTestCase): | |||||||
|         vol['availability_zone'] = FLAGS.storage_availability_zone |         vol['availability_zone'] = FLAGS.storage_availability_zone | ||||||
|         vol['status'] = "creating" |         vol['status'] = "creating" | ||||||
|         vol['attach_status'] = "detached" |         vol['attach_status'] = "detached" | ||||||
|         return db.volume_create(None, vol) |         return db.volume_create(None, vol)['id'] | ||||||
|  |  | ||||||
|     @defer.inlineCallbacks |     @defer.inlineCallbacks | ||||||
|     def test_run_create_volume(self): |     def test_run_create_volume(self): | ||||||
|         volume_id = self._create_volume() |         volume_id = self._create_volume() | ||||||
|         yield self.volume.create_volume(volume_id) |         yield self.volume.create_volume(self.context, volume_id) | ||||||
|         self.assertEqual(volume_id, db.volume_get(None, volume_id).id) |         self.assertEqual(volume_id, db.volume_get(None, volume_id).id) | ||||||
|  |  | ||||||
|         yield self.volume.delete_volume(volume_id) |         yield self.volume.delete_volume(self.context, volume_id) | ||||||
|         self.assertRaises(exception.NotFound, |         self.assertRaises(exception.NotFound, | ||||||
|                           db.volume_get, |                           db.volume_get, | ||||||
|                           None, |                           None, | ||||||
| @@ -70,7 +70,7 @@ class VolumeTestCase(test.TrialTestCase): | |||||||
|         defer.returnValue(True) |         defer.returnValue(True) | ||||||
|         try: |         try: | ||||||
|             volume_id = self._create_volume('1001') |             volume_id = self._create_volume('1001') | ||||||
|             yield self.volume.create_volume(volume_id) |             yield self.volume.create_volume(self.context, volume_id) | ||||||
|             self.fail("Should have thrown TypeError") |             self.fail("Should have thrown TypeError") | ||||||
|         except TypeError: |         except TypeError: | ||||||
|             pass |             pass | ||||||
| @@ -81,14 +81,15 @@ class VolumeTestCase(test.TrialTestCase): | |||||||
|         total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf |         total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf | ||||||
|         for i in xrange(total_slots): |         for i in xrange(total_slots): | ||||||
|             volume_id = self._create_volume() |             volume_id = self._create_volume() | ||||||
|             yield self.volume.create_volume(volume_id) |             yield self.volume.create_volume(self.context, volume_id) | ||||||
|             vols.append(volume_id) |             vols.append(volume_id) | ||||||
|         volume_id = self._create_volume() |         volume_id = self._create_volume() | ||||||
|         self.assertFailure(self.volume.create_volume(volume_id), |         self.assertFailure(self.volume.create_volume(self.context, | ||||||
|  |                                                      volume_id), | ||||||
|                            db.NoMoreBlades) |                            db.NoMoreBlades) | ||||||
|         db.volume_destroy(None, volume_id) |         db.volume_destroy(None, volume_id) | ||||||
|         for id in vols: |         for volume_id in vols: | ||||||
|             yield self.volume.delete_volume(id) |             yield self.volume.delete_volume(self.context, volume_id) | ||||||
|  |  | ||||||
|     @defer.inlineCallbacks |     @defer.inlineCallbacks | ||||||
|     def test_run_attach_detach_volume(self): |     def test_run_attach_detach_volume(self): | ||||||
| @@ -96,7 +97,7 @@ class VolumeTestCase(test.TrialTestCase): | |||||||
|         instance_id = "storage-test" |         instance_id = "storage-test" | ||||||
|         mountpoint = "/dev/sdf" |         mountpoint = "/dev/sdf" | ||||||
|         volume_id = self._create_volume() |         volume_id = self._create_volume() | ||||||
|         yield self.volume.create_volume(volume_id) |         yield self.volume.create_volume(self.context, volume_id) | ||||||
|         if FLAGS.fake_tests: |         if FLAGS.fake_tests: | ||||||
|             db.volume_attached(None, volume_id, instance_id, mountpoint) |             db.volume_attached(None, volume_id, instance_id, mountpoint) | ||||||
|         else: |         else: | ||||||
| @@ -109,15 +110,16 @@ class VolumeTestCase(test.TrialTestCase): | |||||||
|         self.assertEqual(vol['instance_id'], instance_id) |         self.assertEqual(vol['instance_id'], instance_id) | ||||||
|         self.assertEqual(vol['mountpoint'], mountpoint) |         self.assertEqual(vol['mountpoint'], mountpoint) | ||||||
|  |  | ||||||
|         self.assertFailure(self.volume.delete_volume(volume_id), exception.Error) |         self.assertFailure(self.volume.delete_volume(self.context, volume_id), | ||||||
|  |                            exception.Error) | ||||||
|         if FLAGS.fake_tests: |         if FLAGS.fake_tests: | ||||||
|             db.volume_detached(None, volume_id) |             db.volume_detached(None, volume_id) | ||||||
|         else: |         else: | ||||||
|             rv = yield self.volume.detach_volume(instance_id, |             rv = yield self.compute.detach_volume(instance_id, | ||||||
|                                                  volume_id) |                                                  volume_id) | ||||||
|         self.assertEqual(vol['status'], "available") |         self.assertEqual(vol['status'], "available") | ||||||
|  |  | ||||||
|         rv = self.volume.delete_volume(volume_id) |         rv = self.volume.delete_volume(self.context, volume_id) | ||||||
|         self.assertRaises(exception.Error, |         self.assertRaises(exception.Error, | ||||||
|                           db.volume_get, |                           db.volume_get, | ||||||
|                           None, |                           None, | ||||||
| @@ -142,14 +144,13 @@ class VolumeTestCase(test.TrialTestCase): | |||||||
|         total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf |         total_slots = FLAGS.num_shelves * FLAGS.blades_per_shelf | ||||||
|         for i in range(total_slots): |         for i in range(total_slots): | ||||||
|             volume_id = self._create_volume() |             volume_id = self._create_volume() | ||||||
|             d = self.volume.create_volume(volume_id) |             d = self.volume.create_volume(self.context, volume_id) | ||||||
|             d.addCallback(_check) |             d.addCallback(_check) | ||||||
|             d.addErrback(self.fail) |             d.addErrback(self.fail) | ||||||
|             deferreds.append(d) |             deferreds.append(d) | ||||||
|         yield defer.DeferredList(deferreds) |         yield defer.DeferredList(deferreds) | ||||||
|         for volume_id in volume_ids: |         for volume_id in volume_ids: | ||||||
|             vol = db.volume_get(None, volume_id) |             self.volume.delete_volume(self.context, volume_id) | ||||||
|             vol.delete() |  | ||||||
|  |  | ||||||
|     def test_multi_node(self): |     def test_multi_node(self): | ||||||
|         # TODO(termie): Figure out how to test with two nodes, |         # TODO(termie): Figure out how to test with two nodes, | ||||||
|   | |||||||
		Reference in New Issue
	
	Block a user
	 Vishvananda Ishaya
					Vishvananda Ishaya