From deb26b79aa0a68911d1b5092cca44df77286339e Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Fri, 27 May 2011 11:10:24 +0900 Subject: [PATCH 01/30] compute: implement ec2 stop/start instances This patch implements ec2 stop/start instances with block device mapping support. --- nova/scheduler/simple.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/nova/scheduler/simple.py b/nova/scheduler/simple.py index dd568d2c6..ccbc79a36 100644 --- a/nova/scheduler/simple.py +++ b/nova/scheduler/simple.py @@ -40,7 +40,7 @@ flags.DEFINE_integer("max_networks", 1000, class SimpleScheduler(chance.ChanceScheduler): """Implements Naive Scheduler that tries to find least loaded host.""" - def schedule_run_instance(self, context, instance_id, *_args, **_kwargs): + def _schedule_instance(self, context, instance_id, *_args, **_kwargs): """Picks a host that is up and has the fewest running instances.""" instance_ref = db.instance_get(context, instance_id) if (instance_ref['availability_zone'] @@ -76,6 +76,12 @@ class SimpleScheduler(chance.ChanceScheduler): " for this request. Is the appropriate" " service running?")) + def schedule_run_instance(self, context, instance_id, *_args, **_kwargs): + return self._schedule_instance(context, instance_id, *_args, **_kwargs) + + def schedule_start_instance(self, context, instance_id, *_args, **_kwargs): + return self._schedule_instance(context, instance_id, *_args, **_kwargs) + def schedule_create_volume(self, context, volume_id, *_args, **_kwargs): """Picks a host that is up and has the fewest volumes.""" volume_ref = db.volume_get(context, volume_id) From 99b4ea07e3f4695e8989f8ba6cfb2fdb8d02d969 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Fri, 27 May 2011 11:11:06 +0900 Subject: [PATCH 02/30] unittest: tests for boot from volume and stop/start instances --- nova/tests/test_cloud.py | 322 +++++++++++++++++++++++++++++++++++-- nova/tests/test_compute.py | 15 ++ 2 files changed, 327 insertions(+), 10 deletions(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 8c7520fe8..331fadaa0 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -63,6 +63,7 @@ class CloudTestCase(test.TestCase): self.compute = self.start_service('compute') self.scheduter = self.start_service('scheduler') self.network = self.start_service('network') + self.volume = self.start_service('volume') self.image_service = utils.import_object(FLAGS.image_service) self.manager = manager.AuthManager() @@ -85,6 +86,7 @@ class CloudTestCase(test.TestCase): db.network_disassociate(self.context, network_ref['id']) self.manager.delete_project(self.project) self.manager.delete_user(self.user) + self.volume.kill() self.compute.kill() self.network.kill() super(CloudTestCase, self).tearDown() @@ -364,15 +366,22 @@ class CloudTestCase(test.TestCase): self.assertRaises(exception.ImageNotFound, deregister_image, self.context, 'ami-bad001') - def test_console_output(self): - instance_type = FLAGS.default_instance_type - max_count = 1 - kwargs = {'image_id': 'ami-1', - 'instance_type': instance_type, - 'max_count': max_count} + def _run_instance(self, **kwargs): rv = self.cloud.run_instances(self.context, **kwargs) greenthread.sleep(0.3) instance_id = rv['instancesSet'][0]['instanceId'] + return instance_id + + def _run_instance_wait(self, **kwargs): + ec2_instance_id = self._run_instance(**kwargs) + self._wait_for_running(ec2_instance_id) + return ec2_instance_id + + def test_console_output(self): + instance_id = self._run_instance( + image_id='ami-1', + instance_type=FLAGS.default_instance_type, + max_count=1) output = self.cloud.get_console_output(context=self.context, instance_id=[instance_id]) self.assertEquals(b64decode(output['output']), 'FAKE CONSOLE?OUTPUT') @@ -383,10 +392,7 @@ class CloudTestCase(test.TestCase): greenthread.sleep(0.3) def test_ajax_console(self): - kwargs = {'image_id': 'ami-1'} - rv = self.cloud.run_instances(self.context, **kwargs) - instance_id = rv['instancesSet'][0]['instanceId'] - greenthread.sleep(0.3) + instance_id = self._run_instance(image_id='ami-1') output = self.cloud.get_ajax_console(context=self.context, instance_id=[instance_id]) self.assertEquals(output['url'], @@ -470,3 +476,299 @@ class CloudTestCase(test.TestCase): vol = db.volume_get(self.context, vol['id']) self.assertEqual(None, vol['mountpoint']) db.volume_destroy(self.context, vol['id']) + + def _restart_compute_service(self, periodic_interval=None): + """restart compute service. NOTE: fake driver forgets all instances.""" + self.compute.kill() + if periodic_interval: + self.compute = self.start_service( + 'compute', periodic_interval=periodic_interval) + else: + self.compute = self.start_service('compute') + + def _wait_for_state(self, ctxt, instance_id, predicate): + """Wait for an stopping instance to be a given state""" + id = ec2utils.ec2_id_to_id(instance_id) + while True: + info = self.cloud.compute_api.get(context=ctxt, instance_id=id) + LOG.debug(info) + if predicate(info): + break + greenthread.sleep(1) + + def _wait_for_running(self, instance_id): + def is_running(info): + return info['state_description'] == 'running' + self._wait_for_state(self.context, instance_id, is_running) + + def _wait_for_stopped(self, instance_id): + def is_stopped(info): + return info['state_description'] == 'stopped' + self._wait_for_state(self.context, instance_id, is_stopped) + + def _wait_for_terminate(self, instance_id): + def is_deleted(info): + return info['deleted'] + elevated = self.context.elevated(read_deleted=True) + self._wait_for_state(elevated, instance_id, is_deleted) + + def test_stop_start_instance(self): + """Makes sure stop/start instnace works""" + # enforce periodic tasks run in short time to avoid wait for 60s. + self._restart_compute_service(periodic_interval=0.3) + + kwargs = {'image_id': 'ami-1', + 'instance_type': FLAGS.default_instance_type, + 'max_count': 1,} + instance_id = self._run_instance_wait(**kwargs) + + # a running instance can't be started. It is just ignored. + result = self.cloud.start_instances(self.context, [instance_id]) + greenthread.sleep(0.3) + self.assertTrue(result) + + result = self.cloud.stop_instances(self.context, [instance_id]) + greenthread.sleep(0.3) + self.assertTrue(result) + self._wait_for_stopped(instance_id) + + result = self.cloud.start_instances(self.context, [instance_id]) + greenthread.sleep(0.3) + self.assertTrue(result) + self._wait_for_running(instance_id) + + result = self.cloud.stop_instances(self.context, [instance_id]) + greenthread.sleep(0.3) + self.assertTrue(result) + self._wait_for_stopped(instance_id) + + result = self.cloud.terminate_instances(self.context, [instance_id]) + greenthread.sleep(0.3) + self.assertTrue(result) + + self._restart_compute_service() + + def _volume_create(self): + kwargs = {'status': 'available', + 'host': self.volume.host, + 'size': 1, + 'attach_status': 'detached',} + return db.volume_create(self.context, kwargs) + + def _assert_volume_attached(self, vol, instance_id, mountpoint): + self.assertEqual(vol['instance_id'], instance_id) + self.assertEqual(vol['mountpoint'], mountpoint) + self.assertEqual(vol['status'], "in-use") + self.assertEqual(vol['attach_status'], "attached") + + def _assert_volume_detached(self, vol): + self.assertEqual(vol['instance_id'], None) + self.assertEqual(vol['mountpoint'], None) + self.assertEqual(vol['status'], "available") + self.assertEqual(vol['attach_status'], "detached") + + def test_stop_start_with_volume(self): + """Make sure run instance with block device mapping works""" + + # enforce periodic tasks run in short time to avoid wait for 60s. + self._restart_compute_service(periodic_interval=0.3) + + vol1 = self._volume_create() + vol2 = self._volume_create() + kwargs = {'image_id': 'ami-1', + 'instance_type': FLAGS.default_instance_type, + 'max_count': 1, + 'block_device_mapping': [{'device_name': '/dev/vdb', + 'volume_id': vol1['id'], + 'delete_on_termination': False,}, + {'device_name': '/dev/vdc', + 'volume_id': vol2['id'], + 'delete_on_termination': True,}, + ]} + ec2_instance_id = self._run_instance_wait(**kwargs) + instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) + + vols = db.volume_get_all_by_instance(self.context, instance_id) + self.assertEqual(len(vols), 2) + for vol in vols: + self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id']) + + vol = db.volume_get(self.context, vol1['id']) + self._assert_volume_attached(vol, instance_id, '/dev/vdb') + + vol = db.volume_get(self.context, vol2['id']) + self._assert_volume_attached(vol, instance_id, '/dev/vdc') + + result = self.cloud.stop_instances(self.context, [ec2_instance_id]) + self.assertTrue(result) + self._wait_for_stopped(ec2_instance_id) + + vol = db.volume_get(self.context, vol1['id']) + self._assert_volume_detached(vol) + vol = db.volume_get(self.context, vol2['id']) + self._assert_volume_detached(vol) + + self.cloud.start_instances(self.context, [ec2_instance_id]) + self._wait_for_running(ec2_instance_id) + vols = db.volume_get_all_by_instance(self.context, instance_id) + self.assertEqual(len(vols), 2) + for vol in vols: + self.assertTrue(vol['id'] == vol1['id'] or vol['id'] == vol2['id']) + self.assertTrue(vol['mountpoint'] == '/dev/vdb' or + vol['mountpoint'] == '/dev/vdc') + self.assertEqual(vol['instance_id'], instance_id) + self.assertEqual(vol['status'], "in-use") + self.assertEqual(vol['attach_status'], "attached") + + self.cloud.terminate_instances(self.context, [ec2_instance_id]) + greenthread.sleep(0.3) + + admin_ctxt = context.get_admin_context(read_deleted=False) + vol = db.volume_get(admin_ctxt, vol1['id']) + self.assertFalse(vol['deleted']) + db.volume_destroy(self.context, vol1['id']) + + greenthread.sleep(0.3) + admin_ctxt = context.get_admin_context(read_deleted=True) + vol = db.volume_get(admin_ctxt, vol2['id']) + self.assertTrue(vol['deleted']) + + self._restart_compute_service() + + def test_stop_with_attached_volume(self): + """Make sure attach info is reflected to block device mapping""" + # enforce periodic tasks run in short time to avoid wait for 60s. + self._restart_compute_service(periodic_interval=0.3) + + vol1 = self._volume_create() + vol2 = self._volume_create() + kwargs = {'image_id': 'ami-1', + 'instance_type': FLAGS.default_instance_type, + 'max_count': 1, + 'block_device_mapping': [{'device_name': '/dev/vdb', + 'volume_id': vol1['id'], + 'delete_on_termination': True,},]} + ec2_instance_id = self._run_instance_wait(**kwargs) + instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) + + vols = db.volume_get_all_by_instance(self.context, instance_id) + self.assertEqual(len(vols), 1) + for vol in vols: + self.assertEqual(vol['id'], vol1['id']) + self._assert_volume_attached(vol, instance_id, '/dev/vdb') + + vol = db.volume_get(self.context, vol2['id']) + self._assert_volume_detached(vol) + + self.cloud.compute_api.attach_volume(self.context, + instance_id=instance_id, + volume_id=vol2['id'], + device='/dev/vdc') + greenthread.sleep(0.3) + vol = db.volume_get(self.context, vol2['id']) + self._assert_volume_attached(vol, instance_id, '/dev/vdc') + + self.cloud.compute_api.detach_volume(self.context, + volume_id=vol1['id']) + greenthread.sleep(0.3) + vol = db.volume_get(self.context, vol1['id']) + self._assert_volume_detached(vol) + + result = self.cloud.stop_instances(self.context, [ec2_instance_id]) + self.assertTrue(result) + self._wait_for_stopped(ec2_instance_id) + + for vol_id in (vol1['id'], vol2['id']): + vol = db.volume_get(self.context, vol_id) + self._assert_volume_detached(vol) + + self.cloud.start_instances(self.context, [ec2_instance_id]) + self._wait_for_running(ec2_instance_id) + vols = db.volume_get_all_by_instance(self.context, instance_id) + self.assertEqual(len(vols), 1) + for vol in vols: + self.assertEqual(vol['id'], vol2['id']) + self._assert_volume_attached(vol, instance_id, '/dev/vdc') + + vol = db.volume_get(self.context, vol1['id']) + self._assert_volume_detached(vol) + + self.cloud.terminate_instances(self.context, [ec2_instance_id]) + greenthread.sleep(0.3) + + for vol_id in (vol1['id'], vol2['id']): + vol = db.volume_get(self.context, vol_id) + self.assertEqual(vol['id'], vol_id) + self._assert_volume_detached(vol) + db.volume_destroy(self.context, vol_id) + + self._restart_compute_service() + + def _create_snapshot(self, ec2_volume_id): + result = self.cloud.create_snapshot(self.context, + volume_id=ec2_volume_id) + greenthread.sleep(0.3) + return result['snapshotId'] + + def test_run_with_snapshot(self): + """Makes sure run/stop/start instance with snapshot works.""" + vol = self._volume_create() + ec2_volume_id = ec2utils.id_to_ec2_id(vol['id'], 'vol-%08x') + + ec2_snapshot1_id = self._create_snapshot(ec2_volume_id) + snapshot1_id = ec2utils.ec2_id_to_id(ec2_snapshot1_id) + ec2_snapshot2_id = self._create_snapshot(ec2_volume_id) + snapshot2_id = ec2utils.ec2_id_to_id(ec2_snapshot2_id) + + kwargs = {'image_id': 'ami-1', + 'instance_type': FLAGS.default_instance_type, + 'max_count': 1, + 'block_device_mapping': [{'device_name': '/dev/vdb', + 'snapshot_id': snapshot1_id, + 'delete_on_termination': False,}, + {'device_name': '/dev/vdc', + 'snapshot_id': snapshot2_id, + 'delete_on_termination': True,},],} + ec2_instance_id = self._run_instance_wait(**kwargs) + instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) + + vols = db.volume_get_all_by_instance(self.context, instance_id) + self.assertEqual(len(vols), 2) + vol1_id = None + vol2_id = None + for vol in vols: + snapshot_id = vol['snapshot_id'] + if snapshot_id == snapshot1_id: + vol1_id = vol['id'] + mountpoint = '/dev/vdb' + elif snapshot_id == snapshot2_id: + vol2_id = vol['id'] + mountpoint = '/dev/vdc' + else: + self.fail() + + self._assert_volume_attached(vol, instance_id, mountpoint) + + self.assertTrue(vol1_id) + self.assertTrue(vol2_id) + + self.cloud.terminate_instances(self.context, [ec2_instance_id]) + greenthread.sleep(0.3) + self._wait_for_terminate(ec2_instance_id) + + greenthread.sleep(0.3) + admin_ctxt = context.get_admin_context(read_deleted=False) + vol = db.volume_get(admin_ctxt, vol1_id) + self._assert_volume_detached(vol) + self.assertFalse(vol['deleted']) + db.volume_destroy(self.context, vol1_id) + + greenthread.sleep(0.3) + admin_ctxt = context.get_admin_context(read_deleted=True) + vol = db.volume_get(admin_ctxt, vol2_id) + self.assertTrue(vol['deleted']) + + for snapshot_id in (ec2_snapshot1_id, ec2_snapshot2_id): + self.cloud.delete_snapshot(self.context, snapshot_id) + greenthread.sleep(0.3) + db.volume_destroy(self.context, vol['id']) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 9170837b6..f35f9ce73 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -229,6 +229,21 @@ class ComputeTestCase(test.TestCase): self.assert_(instance_ref['launched_at'] < terminate) self.assert_(instance_ref['deleted_at'] > terminate) + def test_stop(self): + """Ensure instance can be stopped""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + self.compute.stop_instance(self.context, instance_id) + self.compute.terminate_instance(self.context, instance_id) + + def test_start(self): + """Ensure instance can be started""" + instance_id = self._create_instance() + self.compute.run_instance(self.context, instance_id) + self.compute.stop_instance(self.context, instance_id) + self.compute.start_instance(self.context, instance_id) + self.compute.terminate_instance(self.context, instance_id) + def test_pause(self): """Ensure instance can be paused""" instance_id = self._create_instance() From f96c6cd2cea6d27d6c06f664cb9d8dc74a65ccf8 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Tue, 7 Jun 2011 13:32:06 -0400 Subject: [PATCH 03/30] removing local image service --- nova/tests/fake_flags.py | 2 +- nova/tests/test_cloud.py | 30 +++++++++++++++--------------- nova/tests/test_compute.py | 14 +++++++------- 3 files changed, 23 insertions(+), 23 deletions(-) diff --git a/nova/tests/fake_flags.py b/nova/tests/fake_flags.py index ecefc464a..2297d2f0e 100644 --- a/nova/tests/fake_flags.py +++ b/nova/tests/fake_flags.py @@ -32,7 +32,7 @@ flags.DECLARE('fake_network', 'nova.network.manager') FLAGS['network_size'].SetDefault(8) FLAGS['num_networks'].SetDefault(2) FLAGS['fake_network'].SetDefault(True) -FLAGS['image_service'].SetDefault('nova.image.local.LocalImageService') +FLAGS['image_service'].SetDefault('nova.image.fake.FakeImageService') flags.DECLARE('num_shelves', 'nova.volume.driver') flags.DECLARE('blades_per_shelf', 'nova.volume.driver') flags.DECLARE('iscsi_num_targets', 'nova.volume.driver') diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index a58e8bc39..d1f02d695 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -35,7 +35,7 @@ from nova import utils from nova.auth import manager from nova.api.ec2 import cloud from nova.api.ec2 import ec2utils -from nova.image import local +from nova.image import fake FLAGS = flags.FLAGS @@ -69,8 +69,8 @@ class CloudTestCase(test.TestCase): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, 'type': 'machine', 'image_state': 'available'}} - self.stubs.Set(local.LocalImageService, 'show', fake_show) - self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show) + self.stubs.Set(fake._FakeImageService, 'show', fake_show) + self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show) # NOTE(vish): set up a manual wait so rpc.cast has a chance to finish rpc_cast = rpc.cast @@ -291,7 +291,7 @@ class CloudTestCase(test.TestCase): def fake_show_none(meh, context, id): raise exception.ImageNotFound(image_id='bad_image_id') - self.stubs.Set(local.LocalImageService, 'detail', fake_detail) + self.stubs.Set(fake._FakeImageService, 'detail', fake_detail) # list all result1 = describe_images(self.context) result1 = result1['imagesSet'][0] @@ -305,8 +305,8 @@ class CloudTestCase(test.TestCase): self.assertEqual(2, len(result3['imagesSet'])) # provide an non-existing image_id self.stubs.UnsetAll() - self.stubs.Set(local.LocalImageService, 'show', fake_show_none) - self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show_none) + self.stubs.Set(fake._FakeImageService, 'show', fake_show_none) + self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show_none) self.assertRaises(exception.ImageNotFound, describe_images, self.context, ['ami-fake']) @@ -317,8 +317,8 @@ class CloudTestCase(test.TestCase): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, 'type': 'machine'}, 'is_public': True} - self.stubs.Set(local.LocalImageService, 'show', fake_show) - self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show) + self.stubs.Set(fake._FakeImageService, 'show', fake_show) + self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show) result = describe_image_attribute(self.context, 'ami-00000001', 'launchPermission') self.assertEqual([{'group': 'all'}], result['launchPermission']) @@ -333,9 +333,9 @@ class CloudTestCase(test.TestCase): def fake_update(meh, context, image_id, metadata, data=None): return metadata - self.stubs.Set(local.LocalImageService, 'show', fake_show) - self.stubs.Set(local.LocalImageService, 'show_by_name', fake_show) - self.stubs.Set(local.LocalImageService, 'update', fake_update) + self.stubs.Set(fake._FakeImageService, 'show', fake_show) + self.stubs.Set(fake._FakeImageService, 'show_by_name', fake_show) + self.stubs.Set(fake._FakeImageService, 'update', fake_update) result = modify_image_attribute(self.context, 'ami-00000001', 'launchPermission', 'add', user_group=['all']) @@ -347,7 +347,7 @@ class CloudTestCase(test.TestCase): def fake_delete(self, context, id): return None - self.stubs.Set(local.LocalImageService, 'delete', fake_delete) + self.stubs.Set(fake._FakeImageService, 'delete', fake_delete) # valid image result = deregister_image(self.context, 'ami-00000001') self.assertEqual(result['imageId'], 'ami-00000001') @@ -357,7 +357,7 @@ class CloudTestCase(test.TestCase): def fake_detail_empty(self, context): return [] - self.stubs.Set(local.LocalImageService, 'detail', fake_detail_empty) + self.stubs.Set(fake._FakeImageService, 'detail', fake_detail_empty) self.assertRaises(exception.ImageNotFound, deregister_image, self.context, 'ami-bad001') @@ -468,7 +468,7 @@ class CloudTestCase(test.TestCase): 'type': 'machine'}} self.stubs.UnsetAll() - self.stubs.Set(local.LocalImageService, 'show', fake_show_no_state) + self.stubs.Set(fake._FakeImageService, 'show', fake_show_no_state) self.assertRaises(exception.ApiError, run_instances, self.context, **kwargs) @@ -483,7 +483,7 @@ class CloudTestCase(test.TestCase): 'type': 'machine', 'image_state': 'decrypting'}} self.stubs.UnsetAll() - self.stubs.Set(local.LocalImageService, 'show', fake_show_decrypt) + self.stubs.Set(fake._FakeImageService, 'show', fake_show_decrypt) self.assertRaises(exception.ApiError, run_instances, self.context, **kwargs) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index b4ac2dbc4..2a68df2fc 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -22,21 +22,21 @@ Tests For Compute import mox import stubout +from nova.auth import manager from nova import compute +from nova.compute import instance_types +from nova.compute import manager as compute_manager +from nova.compute import power_state from nova import context from nova import db +from nova.db.sqlalchemy import models from nova import exception from nova import flags +import nova.image.fake from nova import log as logging from nova import rpc from nova import test from nova import utils -from nova.auth import manager -from nova.compute import instance_types -from nova.compute import manager as compute_manager -from nova.compute import power_state -from nova.db.sqlalchemy import models -from nova.image import local LOG = logging.getLogger('nova.tests.compute') FLAGS = flags.FLAGS @@ -73,7 +73,7 @@ class ComputeTestCase(test.TestCase): def fake_show(meh, context, id): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1}} - self.stubs.Set(local.LocalImageService, 'show', fake_show) + self.stubs.Set(nova.image.fake._FakeImageService, 'show', fake_show) def tearDown(self): self.manager.delete_user(self.user) From c9fd0e7ac019974631340e5659ca6235cf8ed9ac Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Thu, 9 Jun 2011 15:31:10 -0400 Subject: [PATCH 04/30] further changes --- nova/flags.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/flags.py b/nova/flags.py index d5090edba..ebdfeb7ae 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -362,7 +362,7 @@ DEFINE_string('scheduler_manager', 'nova.scheduler.manager.SchedulerManager', 'Manager for scheduler') # The service to use for image search and retrieval -DEFINE_string('image_service', 'nova.image.local.LocalImageService', +DEFINE_string('image_service', 'nova.image.glance.GlanceImageService', 'The service to use for retrieving and searching for images.') DEFINE_string('host', socket.gethostname(), From bb7028158025c5f0081affdfeaaa1c290cafc90d Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Fri, 10 Jun 2011 10:12:57 -0400 Subject: [PATCH 05/30] removing LocalImageService from nova-manage --- bin/nova-manage | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index b0cd343f5..0bfc63bd9 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -96,7 +96,6 @@ flags.DECLARE('network_size', 'nova.network.manager') flags.DECLARE('vlan_start', 'nova.network.manager') flags.DECLARE('vpn_start', 'nova.network.manager') flags.DECLARE('fixed_range_v6', 'nova.network.manager') -flags.DECLARE('images_path', 'nova.image.local') flags.DECLARE('libvirt_type', 'nova.virt.libvirt.connection') flags.DEFINE_flag(flags.HelpFlag()) flags.DEFINE_flag(flags.HelpshortFlag()) @@ -1055,16 +1054,6 @@ class ImageCommands(object): machine_images = {} other_images = {} directory = os.path.abspath(directory) - # NOTE(vish): If we're importing from the images path dir, attempt - # to move the files out of the way before importing - # so we aren't writing to the same directory. This - # may fail if the dir was a mointpoint. - if (FLAGS.image_service == 'nova.image.local.LocalImageService' - and directory == os.path.abspath(FLAGS.images_path)): - new_dir = "%s_bak" % directory - os.rename(directory, new_dir) - os.mkdir(directory) - directory = new_dir for fn in glob.glob("%s/*/info.json" % directory): try: image_path = os.path.join(fn.rpartition('/')[0], 'image') From 45295b0c048f5e5906f1fd497db2a8463aab2668 Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Mon, 13 Jun 2011 16:41:31 -0400 Subject: [PATCH 06/30] Test now passes even if the rpc call does not complete on time --- nova/tests/test_cloud.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 13046f861..b491448eb 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -466,7 +466,8 @@ class CloudTestCase(test.TestCase): self.assertEqual(instance['imageId'], 'ami-00000001') self.assertEqual(instance['displayName'], 'Server 1') self.assertEqual(instance['instanceId'], 'i-00000001') - self.assertEqual(instance['instanceState']['name'], 'networking') + self.assertTrue(instance['instanceState']['name'] in + ['networking', 'scheduling']) self.assertEqual(instance['instanceType'], 'm1.small') def test_run_instances_image_state_none(self): From e977000f62a80843d2284192cd11d7abf87d862a Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Tue, 14 Jun 2011 13:14:00 -0400 Subject: [PATCH 07/30] Stub out the rpc call in a unit test to avoid a race condition --- nova/tests/test_cloud.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index b491448eb..afc661635 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -457,6 +457,12 @@ class CloudTestCase(test.TestCase): self.cloud.delete_key_pair(self.context, 'test') def test_run_instances(self): + # stub out the rpc call + def stub_cast(*args, **kwargs): + pass + + self.stubs.Set(rpc, 'cast', stub_cast) + kwargs = {'image_id': FLAGS.default_image, 'instance_type': FLAGS.default_instance_type, 'max_count': 1} @@ -466,8 +472,7 @@ class CloudTestCase(test.TestCase): self.assertEqual(instance['imageId'], 'ami-00000001') self.assertEqual(instance['displayName'], 'Server 1') self.assertEqual(instance['instanceId'], 'i-00000001') - self.assertTrue(instance['instanceState']['name'] in - ['networking', 'scheduling']) + self.assertEqual(instance['instanceState']['name'], 'scheduling') self.assertEqual(instance['instanceType'], 'm1.small') def test_run_instances_image_state_none(self): From c84a71603be03f7ebf822fbc69d7f40fab4e62b7 Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Tue, 14 Jun 2011 13:17:13 -0400 Subject: [PATCH 08/30] pep8 fix --- nova/tests/test_cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index afc661635..d2ff14f27 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -462,7 +462,7 @@ class CloudTestCase(test.TestCase): pass self.stubs.Set(rpc, 'cast', stub_cast) - + kwargs = {'image_id': FLAGS.default_image, 'instance_type': FLAGS.default_instance_type, 'max_count': 1} From c5a8dcf25f95ef9f511c40227c319fb7bf8382a3 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Wed, 15 Jun 2011 14:41:29 +0900 Subject: [PATCH 09/30] api/ec2: make the parameter parser an independent method Following the review, make the parser of argument items an independent method for readability. --- nova/tests/test_api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/test_api.py b/nova/tests/test_api.py index 7c0331eff..20b20fcbf 100644 --- a/nova/tests/test_api.py +++ b/nova/tests/test_api.py @@ -89,7 +89,7 @@ class FakeHttplibConnection(object): class XmlConversionTestCase(test.TestCase): """Unit test api xml conversion""" def test_number_conversion(self): - conv = apirequest._try_convert + conv = ec2utils._try_convert self.assertEqual(conv('None'), None) self.assertEqual(conv('True'), True) self.assertEqual(conv('False'), False) From 94a8a77ca7d9f4e10b360fee2d2b1a08ca65dfb1 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Wed, 15 Jun 2011 06:40:42 -0700 Subject: [PATCH 10/30] None project_id now default --- nova/scheduler/api.py | 5 +++-- nova/scheduler/zone_aware_scheduler.py | 3 ++- nova/scheduler/zone_manager.py | 3 ++- 3 files changed, 7 insertions(+), 4 deletions(-) diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py index ffe59d2c1..3b3195c2e 100644 --- a/nova/scheduler/api.py +++ b/nova/scheduler/api.py @@ -106,7 +106,8 @@ def _wrap_method(function, self): def _process(func, zone): """Worker stub for green thread pool. Give the worker an authenticated nova client and zone info.""" - nova = novaclient.OpenStack(zone.username, zone.password, zone.api_url) + nova = novaclient.OpenStack(zone.username, zone.password, None, + zone.api_url) nova.authenticate() return func(nova, zone) @@ -122,7 +123,7 @@ def call_zone_method(context, method_name, errors_to_ignore=None, results = [] for zone in db.zone_get_all(context): try: - nova = novaclient.OpenStack(zone.username, zone.password, + nova = novaclient.OpenStack(zone.username, zone.password, None, zone.api_url) nova.authenticate() except novaclient.exceptions.BadRequest, e: diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index f04defa64..69d4c6034 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -105,7 +105,8 @@ class ZoneAwareScheduler(driver.Scheduler): % locals()) nova = None try: - nova = novaclient.OpenStack(zone.username, zone.password, url) + nova = novaclient.OpenStack(zone.username, zone.password, None, + url) nova.authenticate() except novaclient.exceptions.BadRequest, e: raise exception.NotAuthorized(_("Bad credentials attempting " diff --git a/nova/scheduler/zone_manager.py b/nova/scheduler/zone_manager.py index 3f483adff..ba7403c15 100644 --- a/nova/scheduler/zone_manager.py +++ b/nova/scheduler/zone_manager.py @@ -89,7 +89,8 @@ class ZoneState(object): def _call_novaclient(zone): """Call novaclient. Broken out for testing purposes.""" - client = novaclient.OpenStack(zone.username, zone.password, zone.api_url) + client = novaclient.OpenStack(zone.username, zone.password, None, + zone.api_url) return client.zones.info()._info From 817bc926b24eaf7228e428f1ba994722ba836380 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Wed, 15 Jun 2011 22:58:22 +0900 Subject: [PATCH 11/30] typo --- nova/tests/test_cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index c9b75f966..1e8b8e846 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -535,7 +535,7 @@ class CloudTestCase(test.TestCase): self._wait_for_state(elevated, instance_id, is_deleted) def test_stop_start_instance(self): - """Makes sure stop/start instnace works""" + """Makes sure stop/start instance works""" # enforce periodic tasks run in short time to avoid wait for 60s. self._restart_compute_service(periodic_interval=0.3) From 496365f12a72764d6a9a301aae445257f124f0ea Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Wed, 15 Jun 2011 23:11:03 +0900 Subject: [PATCH 12/30] pep8 --- nova/tests/test_cloud.py | 40 ++++++++++++++++++++-------------------- 1 file changed, 20 insertions(+), 20 deletions(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 1e8b8e846..3d91eb2b8 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -541,7 +541,7 @@ class CloudTestCase(test.TestCase): kwargs = {'image_id': 'ami-1', 'instance_type': FLAGS.default_instance_type, - 'max_count': 1,} + 'max_count': 1, } instance_id = self._run_instance_wait(**kwargs) # a running instance can't be started. It is just ignored. @@ -553,7 +553,7 @@ class CloudTestCase(test.TestCase): greenthread.sleep(0.3) self.assertTrue(result) self._wait_for_stopped(instance_id) - + result = self.cloud.start_instances(self.context, [instance_id]) greenthread.sleep(0.3) self.assertTrue(result) @@ -563,18 +563,18 @@ class CloudTestCase(test.TestCase): greenthread.sleep(0.3) self.assertTrue(result) self._wait_for_stopped(instance_id) - + result = self.cloud.terminate_instances(self.context, [instance_id]) greenthread.sleep(0.3) self.assertTrue(result) - + self._restart_compute_service() def _volume_create(self): kwargs = {'status': 'available', 'host': self.volume.host, 'size': 1, - 'attach_status': 'detached',} + 'attach_status': 'detached', } return db.volume_create(self.context, kwargs) def _assert_volume_attached(self, vol, instance_id, mountpoint): @@ -582,7 +582,7 @@ class CloudTestCase(test.TestCase): self.assertEqual(vol['mountpoint'], mountpoint) self.assertEqual(vol['status'], "in-use") self.assertEqual(vol['attach_status'], "attached") - + def _assert_volume_detached(self, vol): self.assertEqual(vol['instance_id'], None) self.assertEqual(vol['mountpoint'], None) @@ -604,8 +604,8 @@ class CloudTestCase(test.TestCase): 'volume_id': vol1['id'], 'delete_on_termination': False,}, {'device_name': '/dev/vdc', - 'volume_id': vol2['id'], - 'delete_on_termination': True,}, + 'volume_id': vol2['id'], + 'delete_on_termination': True, }, ]} ec2_instance_id = self._run_instance_wait(**kwargs) instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) @@ -629,7 +629,7 @@ class CloudTestCase(test.TestCase): self._assert_volume_detached(vol) vol = db.volume_get(self.context, vol2['id']) self._assert_volume_detached(vol) - + self.cloud.start_instances(self.context, [ec2_instance_id]) self._wait_for_running(ec2_instance_id) vols = db.volume_get_all_by_instance(self.context, instance_id) @@ -654,7 +654,7 @@ class CloudTestCase(test.TestCase): admin_ctxt = context.get_admin_context(read_deleted=True) vol = db.volume_get(admin_ctxt, vol2['id']) self.assertTrue(vol['deleted']) - + self._restart_compute_service() def test_stop_with_attached_volume(self): @@ -669,7 +669,7 @@ class CloudTestCase(test.TestCase): 'max_count': 1, 'block_device_mapping': [{'device_name': '/dev/vdb', 'volume_id': vol1['id'], - 'delete_on_termination': True,},]} + 'delete_on_termination': True}]} ec2_instance_id = self._run_instance_wait(**kwargs) instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) @@ -695,7 +695,7 @@ class CloudTestCase(test.TestCase): greenthread.sleep(0.3) vol = db.volume_get(self.context, vol1['id']) self._assert_volume_detached(vol) - + result = self.cloud.stop_instances(self.context, [ec2_instance_id]) self.assertTrue(result) self._wait_for_stopped(ec2_instance_id) @@ -703,7 +703,7 @@ class CloudTestCase(test.TestCase): for vol_id in (vol1['id'], vol2['id']): vol = db.volume_get(self.context, vol_id) self._assert_volume_detached(vol) - + self.cloud.start_instances(self.context, [ec2_instance_id]) self._wait_for_running(ec2_instance_id) vols = db.volume_get_all_by_instance(self.context, instance_id) @@ -723,15 +723,15 @@ class CloudTestCase(test.TestCase): self.assertEqual(vol['id'], vol_id) self._assert_volume_detached(vol) db.volume_destroy(self.context, vol_id) - + self._restart_compute_service() - + def _create_snapshot(self, ec2_volume_id): result = self.cloud.create_snapshot(self.context, volume_id=ec2_volume_id) greenthread.sleep(0.3) return result['snapshotId'] - + def test_run_with_snapshot(self): """Makes sure run/stop/start instance with snapshot works.""" vol = self._volume_create() @@ -747,10 +747,10 @@ class CloudTestCase(test.TestCase): 'max_count': 1, 'block_device_mapping': [{'device_name': '/dev/vdb', 'snapshot_id': snapshot1_id, - 'delete_on_termination': False,}, + 'delete_on_termination': False, }, {'device_name': '/dev/vdc', 'snapshot_id': snapshot2_id, - 'delete_on_termination': True,},],} + 'delete_on_termination': True}]} ec2_instance_id = self._run_instance_wait(**kwargs) instance_id = ec2utils.ec2_id_to_id(ec2_instance_id) @@ -778,14 +778,14 @@ class CloudTestCase(test.TestCase): greenthread.sleep(0.3) self._wait_for_terminate(ec2_instance_id) - greenthread.sleep(0.3) + greenthread.sleep(0.3) admin_ctxt = context.get_admin_context(read_deleted=False) vol = db.volume_get(admin_ctxt, vol1_id) self._assert_volume_detached(vol) self.assertFalse(vol['deleted']) db.volume_destroy(self.context, vol1_id) - greenthread.sleep(0.3) + greenthread.sleep(0.3) admin_ctxt = context.get_admin_context(read_deleted=True) vol = db.volume_get(admin_ctxt, vol2_id) self.assertTrue(vol['deleted']) From 7ddcfbdca53894fcf88d89b3055946cb41860df8 Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Wed, 15 Jun 2011 09:45:22 -0700 Subject: [PATCH 13/30] fixed up some little project_id things with new novaclient --- nova/scheduler/zone_aware_scheduler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index 69d4c6034..0ec83ec2e 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -88,7 +88,7 @@ class ZoneAwareScheduler(driver.Scheduler): instance_properties = request_spec['instance_properties'] name = instance_properties['display_name'] - image_id = instance_properties['image_id'] + image_ref = instance_properties['image_ref'] meta = instance_properties['metadata'] flavor_id = instance_type['flavorid'] reservation_id = instance_properties['reservation_id'] @@ -112,7 +112,7 @@ class ZoneAwareScheduler(driver.Scheduler): raise exception.NotAuthorized(_("Bad credentials attempting " "to talk to zone at %(url)s.") % locals()) - nova.servers.create(name, image_id, flavor_id, ipgroup, meta, files, + nova.servers.create(name, image_ref, flavor_id, ipgroup, meta, files, child_blob, reservation_id=reservation_id) def _provision_resource_from_blob(self, context, item, instance_id, From 6f17343d9ab8af8e0d2618bb59e42d535a965b6d Mon Sep 17 00:00:00 2001 From: Sandy Walsh Date: Wed, 15 Jun 2011 10:21:41 -0700 Subject: [PATCH 14/30] don't provision to all child zones --- nova/scheduler/zone_aware_scheduler.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/nova/scheduler/zone_aware_scheduler.py b/nova/scheduler/zone_aware_scheduler.py index 0ec83ec2e..e7bff2faa 100644 --- a/nova/scheduler/zone_aware_scheduler.py +++ b/nova/scheduler/zone_aware_scheduler.py @@ -185,7 +185,11 @@ class ZoneAwareScheduler(driver.Scheduler): if not build_plan: raise driver.NoValidHost(_('No hosts were available')) - for item in build_plan: + for num in xrange(request_spec['num_instances']): + if not build_plan: + break + + item = build_plan.pop(0) self._provision_resource(context, item, instance_id, request_spec, kwargs) From 83d46bfa13d29b27420400a5443c4c60d7b285d4 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Wed, 15 Jun 2011 20:11:34 +0000 Subject: [PATCH 15/30] Adding uuid test --- nova/utils.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/nova/utils.py b/nova/utils.py index 691134ada..8ad09bc75 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -35,6 +35,7 @@ import struct import sys import time import types +import uuid from xml.sax import saxutils from eventlet import event @@ -726,3 +727,7 @@ def parse_server_string(server_str): except: LOG.debug(_('Invalid server_string: %s' % server_str)) return ('', '') + + +def gen_uuid(): + return uuid.uuid4() From a83afc66b77f14a5613bcdc5a0bb006c535bcaab Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Wed, 15 Jun 2011 21:12:37 +0000 Subject: [PATCH 16/30] Prep-work to begin on reroute_compute --- nova/scheduler/api.py | 24 +++++++++++++++++++++--- nova/utils.py | 8 ++++++++ 2 files changed, 29 insertions(+), 3 deletions(-) diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py index ffe59d2c1..28410f538 100644 --- a/nova/scheduler/api.py +++ b/nova/scheduler/api.py @@ -200,9 +200,27 @@ class RedirectResult(exception.Error): class reroute_compute(object): - """Decorator used to indicate that the method should - delegate the call the child zones if the db query - can't find anything.""" + """ + reroute_compute is responsible for trying to lookup a resource in the + current zone and if it's not found there, delegating the call to the + child zones. + + Since reroute_compute will be making 'cross-zone' calls, the ID for the + object must come in as a UUID-- if we receive an integer ID, we bail. + + The steps involved are: + + 1. Validate that item_id is UUID like + + 2. Lookup item by UUID in the zone local database + + 3. If the item was found, then extract integer ID, and pass that to + the wrapped method. (This ensures that zone-local code can + continue to use integer IDs). + + 4. If the item was not found, we delgate the call to a child zone + using the UUID. + """ def __init__(self, method_name): self.method_name = method_name diff --git a/nova/utils.py b/nova/utils.py index 8ad09bc75..c2fdebfdf 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -731,3 +731,11 @@ def parse_server_string(server_str): def gen_uuid(): return uuid.uuid4() + + +def is_uuid_like(val): + try: + int(val) + return False + except ValueError: + return True From a8ab2b84a0cece80617490c9b8fdb1bc80d704f7 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Wed, 15 Jun 2011 21:50:36 +0000 Subject: [PATCH 17/30] First attempt to rewrite reroute_compute --- nova/scheduler/api.py | 80 +++++++++++++++++++++++++++++++------------ 1 file changed, 59 insertions(+), 21 deletions(-) diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py index 28410f538..0cc8a1132 100644 --- a/nova/scheduler/api.py +++ b/nova/scheduler/api.py @@ -24,6 +24,7 @@ from nova import exception from nova import flags from nova import log as logging from nova import rpc +from nova import utils from eventlet import greenpool @@ -224,32 +225,57 @@ class reroute_compute(object): def __init__(self, method_name): self.method_name = method_name + + def _route_local(): + pass + + def _route_to_child_zones(context, collection, item_uuid): + if not FLAGS.enable_zone_routing: + raise InstanceNotFound(instance_id=item_uuid) + + zones = db.zone_get_all(context) + if not zones: + raise InstanceNotFound(instance_id=item_uuid) + + # Ask the children to provide an answer ... + LOG.debug(_("Asking child zones ...")) + result = self._call_child_zones(zones, + wrap_novaclient_function(_issue_novaclient_command, + collection, self.method_name, item_uuid)) + # Scrub the results and raise another exception + # so the API layers can bail out gracefully ... + raise RedirectResult(self.unmarshall_result(result)) + def __call__(self, f): def wrapped_f(*args, **kwargs): - collection, context, item_id = \ + collection, context, item_id_or_uuid = \ self.get_collection_context_and_id(args, kwargs) - try: - # Call the original function ... + + attempt_reroute = False + if utils.is_uuid_like(item_id_or_uuid): + item_uuid = item_id_or_uuid + try: + instance = self.db.instance_get_by_uuid( + context, item_uuid) + except exception.InstanceNotFound, e: + # NOTE(sirp): since a UUID was passed in, we can attempt + # to reroute to a child zone + attempt_reroute = True + LOG.debug(_("Instance %(item_uuid)s not found " + "locally: '%(e)s'" % locals())) + else: + # NOTE(sirp): since we're not re-routing in this case, and we + # we were passed a UUID, we need to replace that UUID with an + # integer ID in the argument list so that the zone-local code + # can continue to use integer IDs. + item_id = instance['id'] + self.replace_uuid_with_id(args, kwargs, replacement_id) + + if attempt_reroute: + self._route_to_child_zones(context, collection, item_uuid) + else: return f(*args, **kwargs) - except exception.InstanceNotFound, e: - LOG.debug(_("Instance %(item_id)s not found " - "locally: '%(e)s'" % locals())) - if not FLAGS.enable_zone_routing: - raise - - zones = db.zone_get_all(context) - if not zones: - raise - - # Ask the children to provide an answer ... - LOG.debug(_("Asking child zones ...")) - result = self._call_child_zones(zones, - wrap_novaclient_function(_issue_novaclient_command, - collection, self.method_name, item_id)) - # Scrub the results and raise another exception - # so the API layers can bail out gracefully ... - raise RedirectResult(self.unmarshall_result(result)) return wrapped_f def _call_child_zones(self, zones, function): @@ -268,6 +294,18 @@ class reroute_compute(object): instance_id = args[2] return ("servers", context, instance_id) + @staticmethod + def replace_uuid_with_id(args, kwargs, replacement_id): + """ + Extracts the UUID parameter from the arg or kwarg list and replaces + it with an integer ID. + """ + if 'instance_id' in kwargs: + kwargs['instance_id'] = replacement_id + elif len(args) > 1: + args.pop(2) + args.insert(2, replacement_id) + def unmarshall_result(self, zone_responses): """Result is a list of responses from each child zone. Each decorator derivation is responsible to turning this From 283ac275abb8c25abb9374cb7ac6442e30eb60cb Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Thu, 16 Jun 2011 15:02:18 +0000 Subject: [PATCH 18/30] Small tweaks --- nova/scheduler/api.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py index 0cc8a1132..8bf6a1592 100644 --- a/nova/scheduler/api.py +++ b/nova/scheduler/api.py @@ -225,10 +225,6 @@ class reroute_compute(object): def __init__(self, method_name): self.method_name = method_name - - def _route_local(): - pass - def _route_to_child_zones(context, collection, item_uuid): if not FLAGS.enable_zone_routing: raise InstanceNotFound(instance_id=item_uuid) @@ -255,8 +251,7 @@ class reroute_compute(object): if utils.is_uuid_like(item_id_or_uuid): item_uuid = item_id_or_uuid try: - instance = self.db.instance_get_by_uuid( - context, item_uuid) + instance = db.instance_get_by_uuid(context, item_uuid) except exception.InstanceNotFound, e: # NOTE(sirp): since a UUID was passed in, we can attempt # to reroute to a child zone @@ -269,10 +264,11 @@ class reroute_compute(object): # integer ID in the argument list so that the zone-local code # can continue to use integer IDs. item_id = instance['id'] - self.replace_uuid_with_id(args, kwargs, replacement_id) + args = list(args) # needs to be mutable to replace + self.replace_uuid_with_id(args, kwargs, item_id) if attempt_reroute: - self._route_to_child_zones(context, collection, item_uuid) + return self._route_to_child_zones(context, collection, item_uuid) else: return f(*args, **kwargs) @@ -303,6 +299,8 @@ class reroute_compute(object): if 'instance_id' in kwargs: kwargs['instance_id'] = replacement_id elif len(args) > 1: + # NOTE(sirp): args comes in as a tuple, so we need to convert it + # to a list to mutate it, and then convert it back to a tuple args.pop(2) args.insert(2, replacement_id) From d2742ecdf353fde53b65e3e43923e108b10b21cf Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Thu, 16 Jun 2011 17:27:36 +0000 Subject: [PATCH 19/30] Fixing test_servers_by_uuid --- nova/scheduler/api.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py index 8bf6a1592..a363a3119 100644 --- a/nova/scheduler/api.py +++ b/nova/scheduler/api.py @@ -218,20 +218,20 @@ class reroute_compute(object): 3. If the item was found, then extract integer ID, and pass that to the wrapped method. (This ensures that zone-local code can continue to use integer IDs). - + 4. If the item was not found, we delgate the call to a child zone using the UUID. """ def __init__(self, method_name): self.method_name = method_name - def _route_to_child_zones(context, collection, item_uuid): + def _route_to_child_zones(self, context, collection, item_uuid): if not FLAGS.enable_zone_routing: - raise InstanceNotFound(instance_id=item_uuid) + raise exception.InstanceNotFound(instance_id=item_uuid) zones = db.zone_get_all(context) if not zones: - raise InstanceNotFound(instance_id=item_uuid) + raise exception.InstanceNotFound(instance_id=item_uuid) # Ask the children to provide an answer ... LOG.debug(_("Asking child zones ...")) @@ -247,7 +247,7 @@ class reroute_compute(object): collection, context, item_id_or_uuid = \ self.get_collection_context_and_id(args, kwargs) - attempt_reroute = False + attempt_reroute = False if utils.is_uuid_like(item_id_or_uuid): item_uuid = item_id_or_uuid try: From d2df8ec07217806802281293aad8350ba80e068e Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Thu, 16 Jun 2011 17:46:13 +0000 Subject: [PATCH 20/30] Fixing another test --- nova/tests/scheduler/test_scheduler.py | 32 ++++++++++++++++++++++++-- 1 file changed, 30 insertions(+), 2 deletions(-) diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index 0d7929996..cddbc7e55 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -48,6 +48,10 @@ flags.DECLARE('stub_network', 'nova.compute.manager') flags.DECLARE('instances_path', 'nova.compute.manager') +FAKE_UUID_NOT_FOUND = 'ffff-ffff-ffff-ffff' +FAKE_UUID = 'abcd-abcd-abcd-abcd' + + class TestDriver(driver.Scheduler): """Scheduler Driver for Tests""" def schedule(context, topic, *args, **kwargs): @@ -926,12 +930,23 @@ def zone_get_all(context): ] +def fake_instance_get_by_uuid(context, uuid): + if FAKE_UUID_NOT_FOUND: + raise exception.InstanceNotFound(instance_id=uuid) + else: + return {'id': 1} + + class FakeRerouteCompute(api.reroute_compute): + def __init__(self, method_name, id_to_return=1): + super(FakeRerouteCompute, self).__init__(method_name) + self.id_to_return = id_to_return + def _call_child_zones(self, zones, function): return [] def get_collection_context_and_id(self, args, kwargs): - return ("servers", None, 1) + return ("servers", None, self.id_to_return) def unmarshall_result(self, zone_responses): return dict(magic="found me") @@ -960,6 +975,8 @@ class ZoneRedirectTest(test.TestCase): self.stubs = stubout.StubOutForTesting() self.stubs.Set(db, 'zone_get_all', zone_get_all) + self.stubs.Set(db, 'instance_get_by_uuid', + fake_instance_get_by_uuid) self.enable_zone_routing = FLAGS.enable_zone_routing FLAGS.enable_zone_routing = True @@ -976,8 +993,19 @@ class ZoneRedirectTest(test.TestCase): except api.RedirectResult, e: self.fail(_("Successful database hit should succeed")) - def test_trap_not_found_locally(self): + def test_trap_not_found_locally_id_passed(self): + """When an integer ID is not found locally, we cannot reroute to + another zone, so just return InstanceNotFound exception + """ decorator = FakeRerouteCompute("foo") + self.assertRaises(exception.InstanceNotFound, + decorator(go_boom), None, None, 1) + + def test_trap_not_found_locally_uuid_passed(self): + """When a UUID is found, if the item isn't found locally, we should + try to reroute to a child zone to see if they have it + """ + decorator = FakeRerouteCompute("foo", id_to_return=FAKE_UUID_NOT_FOUND) try: result = decorator(go_boom)(None, None, 1) self.assertFail(_("Should have rerouted.")) From 4224ef55e90b69ade6b860ae95a7c4a80edf2521 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Thu, 16 Jun 2011 17:52:34 +0000 Subject: [PATCH 21/30] PEP8 cleanup. --- nova/scheduler/api.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py index a363a3119..6e5471d76 100644 --- a/nova/scheduler/api.py +++ b/nova/scheduler/api.py @@ -259,16 +259,17 @@ class reroute_compute(object): LOG.debug(_("Instance %(item_uuid)s not found " "locally: '%(e)s'" % locals())) else: - # NOTE(sirp): since we're not re-routing in this case, and we - # we were passed a UUID, we need to replace that UUID with an - # integer ID in the argument list so that the zone-local code - # can continue to use integer IDs. + # NOTE(sirp): since we're not re-routing in this case, and + # we we were passed a UUID, we need to replace that UUID + # with an integer ID in the argument list so that the + # zone-local code can continue to use integer IDs. item_id = instance['id'] args = list(args) # needs to be mutable to replace self.replace_uuid_with_id(args, kwargs, item_id) if attempt_reroute: - return self._route_to_child_zones(context, collection, item_uuid) + return self._route_to_child_zones(context, collection, + item_uuid) else: return f(*args, **kwargs) From 2dd0ee7379e76d679d7e10aa32c4b05450de67ac Mon Sep 17 00:00:00 2001 From: "matt.dietz@rackspace.com" <> Date: Thu, 16 Jun 2011 13:56:52 -0500 Subject: [PATCH 22/30] Added a new test for confirming failure when no primary VDI is present --- nova/tests/test_xenapi.py | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index d1c88287a..c0213250a 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -33,6 +33,7 @@ from nova import utils from nova.auth import manager from nova.compute import instance_types from nova.compute import power_state +from nova import exception from nova.virt import xenapi_conn from nova.virt.xenapi import fake as xenapi_fake from nova.virt.xenapi import volume_utils @@ -228,6 +229,23 @@ class XenAPIVMTestCase(test.TestCase): instance = self._create_instance() self.conn.get_diagnostics(instance) + def test_instance_snapshot_fails_with_no_primary_vdi(self): + def create_bad_vbd(vm_ref, vdi_ref): + vbd_rec = {'VM': vm_ref, + 'VDI': vdi_ref, + 'userdevice': 'fake', + 'currently_attached': False} + vbd_ref = xenapi_fake._create_object('VBD', vbd_rec) + xenapi_fake.after_VBD_create(vbd_ref, vbd_rec) + return vbd_ref + + self.stubs.Set(xenapi_fake, 'create_vbd', create_bad_vbd) + stubs.stubout_instance_snapshot(self.stubs) + instance = self._create_instance() + + name = "MySnapshot" + self.assertRaises(exception.Error, self.conn.snapshot, instance, name) + def test_instance_snapshot(self): stubs.stubout_instance_snapshot(self.stubs) instance = self._create_instance() From 779993d0f525f721b1e981ad531eea189154f33c Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Thu, 16 Jun 2011 14:42:50 -0500 Subject: [PATCH 23/30] Renaming to _build_instance_get --- nova/scheduler/api.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/scheduler/api.py b/nova/scheduler/api.py index 6e5471d76..8ef893beb 100644 --- a/nova/scheduler/api.py +++ b/nova/scheduler/api.py @@ -300,8 +300,6 @@ class reroute_compute(object): if 'instance_id' in kwargs: kwargs['instance_id'] = replacement_id elif len(args) > 1: - # NOTE(sirp): args comes in as a tuple, so we need to convert it - # to a list to mutate it, and then convert it back to a tuple args.pop(2) args.insert(2, replacement_id) From 3ff6f6699183439059c6261deea6d4d71cb07642 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Thu, 16 Jun 2011 21:21:01 +0000 Subject: [PATCH 24/30] Glance host defaults to rather than localhost --- nova/flags.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/flags.py b/nova/flags.py index acfcf8d68..f6f12e3b2 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -272,7 +272,7 @@ DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID') DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key') # NOTE(sirp): my_ip interpolation doesn't work within nested structures DEFINE_list('glance_api_servers', - ['127.0.0.1:9292'], + ['%s:9292' % _get_my_ip()], 'list of glance api servers available to nova (host:port)') DEFINE_integer('s3_port', 3333, 's3 port') DEFINE_string('s3_host', '$my_ip', 's3 host (for infrastructure)') From 238423cf80f588c3de7601f83abf7c3c40c89d97 Mon Sep 17 00:00:00 2001 From: Thierry Carrez Date: Fri, 17 Jun 2011 14:20:18 +0200 Subject: [PATCH 25/30] Fix unitttest so that it actually fails without the fix --- nova/tests/test_cloud.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index ba133c860..c61968d2c 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -526,7 +526,9 @@ class CloudTestCase(test.TestCase): def test_update_of_instance_wont_update_private_fields(self): inst = db.instance_create(self.context, {}) - self.cloud.update_instance(self.context, inst['id'], + ec2_id = ec2utils.id_to_ec2_id(inst['id']) + self.cloud.update_instance(self.context, ec2_id, + display_name='c00l 1m4g3', mac_address='DE:AD:BE:EF') inst = db.instance_get(self.context, inst['id']) self.assertEqual(None, inst['mac_address']) From abb699f2076e012a6d90835d6676f620fcddcac4 Mon Sep 17 00:00:00 2001 From: Lorin Hochstein Date: Fri, 17 Jun 2011 10:17:07 -0400 Subject: [PATCH 26/30] Fix for a problem where run_tests.sh would output a seemingly unrelated error message when there was a sqlalchemy-migrate version number conflict --- run_tests.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/run_tests.py b/run_tests.py index 0944bb585..bb33f9139 100644 --- a/run_tests.py +++ b/run_tests.py @@ -211,6 +211,12 @@ class NovaTestResult(result.TextTestResult): break sys.stdout = stdout + # NOTE(lorinh): Initialize start_time in case a sqlalchemy-migrate + # error results in it failing to be initialized later. Otherwise, + # _handleElapsedTime will fail, causing the wrong error message to + # be outputted. + self.start_time = time.time() + def getDescription(self, test): return str(test) From ddc29d6f0b27e10df09ad2da7d7dc734762483d9 Mon Sep 17 00:00:00 2001 From: Brian Waldon Date: Fri, 17 Jun 2011 13:15:49 -0400 Subject: [PATCH 27/30] fixing test case --- nova/tests/test_cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 094fd394e..8ba2164e7 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -515,7 +515,7 @@ class CloudTestCase(test.TestCase): return {'id': 1, 'properties': {'kernel_id': 1, 'ramdisk_id': 1, 'type': 'machine'}, 'status': 'active'} - self.stubs.Set(local.LocalImageService, 'show', fake_show_stat_active) + self.stubs.Set(fake._FakeImageService, 'show', fake_show_stat_active) result = run_instances(self.context, **kwargs) self.assertEqual(len(result['instancesSet']), 1) From 558d531810631e26e7f898e561c65daf073ec234 Mon Sep 17 00:00:00 2001 From: Isaku Yamahata Date: Sat, 18 Jun 2011 08:34:20 +0900 Subject: [PATCH 28/30] pep8: white space/blank lines --- nova/tests/test_cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index 6a6256c20..09e26df4c 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -671,7 +671,7 @@ class CloudTestCase(test.TestCase): 'max_count': 1, 'block_device_mapping': [{'device_name': '/dev/vdb', 'volume_id': vol1['id'], - 'delete_on_termination': False,}, + 'delete_on_termination': False, }, {'device_name': '/dev/vdc', 'volume_id': vol2['id'], 'delete_on_termination': True, }, From 9e5d57eed76c9e51aa5f77f88651f62f6b3e7873 Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Fri, 17 Jun 2011 23:53:30 +0000 Subject: [PATCH 29/30] Using proper UUID format for uuids --- nova/tests/scheduler/test_scheduler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/tests/scheduler/test_scheduler.py b/nova/tests/scheduler/test_scheduler.py index cddbc7e55..4be59d411 100644 --- a/nova/tests/scheduler/test_scheduler.py +++ b/nova/tests/scheduler/test_scheduler.py @@ -48,8 +48,8 @@ flags.DECLARE('stub_network', 'nova.compute.manager') flags.DECLARE('instances_path', 'nova.compute.manager') -FAKE_UUID_NOT_FOUND = 'ffff-ffff-ffff-ffff' -FAKE_UUID = 'abcd-abcd-abcd-abcd' +FAKE_UUID_NOT_FOUND = 'ffffffff-ffff-ffff-ffff-ffffffffffff' +FAKE_UUID = 'aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa' class TestDriver(driver.Scheduler): From 20bf62ce86b01a43627eea87e37bdac2d5296daf Mon Sep 17 00:00:00 2001 From: Rick Harris Date: Sat, 18 Jun 2011 00:12:44 +0000 Subject: [PATCH 30/30] Adding tests for is_uuid_like --- nova/utils.py | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/nova/utils.py b/nova/utils.py index c2fdebfdf..e2ac16f31 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -734,8 +734,10 @@ def gen_uuid(): def is_uuid_like(val): - try: - int(val) + """For our purposes, a UUID is a string in canoical form: + + aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa + """ + if not isinstance(val, basestring): return False - except ValueError: - return True + return (len(val) == 36) and (val.count('-') == 4)