Allowing resizes to the same machine

This commit is contained in:
Rick Harris 2011-09-16 00:22:31 +00:00
parent e0cf82323a
commit 6f3ae6e1e5
11 changed files with 139 additions and 77 deletions

View File

@ -844,7 +844,8 @@ class ComputeManager(manager.SchedulerDependentManager):
migration_ref.instance_uuid)
network_info = self._get_instance_nw_info(context, instance_ref)
self.driver.destroy(instance_ref, network_info)
self.driver.confirm_migration(migration_ref, instance_ref, network_info)
usage_info = utils.usage_from_instance(instance_ref)
notifier.notify('compute.%s' % self.host,
'compute.instance.resize.confirm',
@ -899,7 +900,7 @@ class ComputeManager(manager.SchedulerDependentManager):
local_gb=instance_type['local_gb'],
instance_type_id=instance_type['id'])
self.driver.revert_migration(instance_ref)
self.driver.finish_revert_migration(instance_ref)
self.db.migration_update(context, migration_id,
{'status': 'reverted'})
usage_info = utils.usage_from_instance(instance_ref)
@ -923,7 +924,8 @@ class ComputeManager(manager.SchedulerDependentManager):
# of the instance down
instance_ref = self.db.instance_get_by_uuid(context, instance_id)
if instance_ref['host'] == FLAGS.host:
same_host = instance_ref['host'] == FLAGS.host
if same_host and not FLAGS.allow_resize_to_same_host:
self._instance_update(context,
instance_id,
vm_state=vm_states.ERROR)
@ -1019,8 +1021,8 @@ class ComputeManager(manager.SchedulerDependentManager):
instance_ref.uuid)
network_info = self._get_instance_nw_info(context, instance_ref)
self.driver.finish_migration(context, instance_ref, disk_info,
network_info, resize_instance)
self.driver.finish_migration(context, migration_ref, instance_ref,
disk_info, network_info, resize_instance)
self._instance_update(context,
instance_id,

View File

@ -429,3 +429,7 @@ DEFINE_list('monkey_patch_modules',
'nova.compute.api:nova.notifier.api.notify_decorator'],
'Module list representing monkey '
'patched module and decorator')
DEFINE_bool('allow_resize_to_same_host', False,
'Allow destination machine to match source for resize. Useful'
' when testing in environments with only one host machine.')

View File

@ -568,7 +568,7 @@ class ComputeTestCase(test.TestCase):
pass
self.stubs.Set(self.compute.driver, 'finish_migration', fake)
self.stubs.Set(self.compute.driver, 'revert_migration', fake)
self.stubs.Set(self.compute.driver, 'finish_revert_migration', fake)
self.stubs.Set(self.compute.network_api, 'get_instance_nw_info', fake)
self.compute.run_instance(self.context, instance_id)

View File

@ -76,7 +76,7 @@ class XenAPIVolumeTestCase(test.TestCase):
db_fakes.stub_out_db_instance_api(self.stubs)
stubs.stub_out_get_target(self.stubs)
xenapi_fake.reset()
self.values = {'id': 1,
self.instance_values = {'id': 1,
'project_id': self.user_id,
'user_id': 'fake',
'image_ref': 1,
@ -132,7 +132,7 @@ class XenAPIVolumeTestCase(test.TestCase):
stubs.stubout_session(self.stubs, stubs.FakeSessionForVolumeTests)
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
instance = db.instance_create(self.context, self.values)
instance = db.instance_create(self.context, self.instance_values)
vm = xenapi_fake.create_vm(instance.name, 'Running')
result = conn.attach_volume(instance.name, volume['id'], '/dev/sdc')
@ -152,7 +152,7 @@ class XenAPIVolumeTestCase(test.TestCase):
stubs.FakeSessionForVolumeFailedTests)
conn = xenapi_conn.get_connection(False)
volume = self._create_volume()
instance = db.instance_create(self.context, self.values)
instance = db.instance_create(self.context, self.instance_values)
xenapi_fake.create_vm(instance.name, 'Running')
self.assertRaises(Exception,
conn.attach_volume,
@ -369,7 +369,7 @@ class XenAPIVMTestCase(test.TestCase):
create_record=True, empty_dns=False):
stubs.stubout_loopingcall_start(self.stubs)
if create_record:
values = {'id': instance_id,
instance_values = {'id': instance_id,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': image_ref,
@ -378,7 +378,7 @@ class XenAPIVMTestCase(test.TestCase):
'instance_type_id': instance_type_id,
'os_type': os_type,
'architecture': architecture}
instance = db.instance_create(self.context, values)
instance = db.instance_create(self.context, instance_values)
else:
instance = db.instance_get(self.context, instance_id)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': True},
@ -622,28 +622,28 @@ class XenAPIVMTestCase(test.TestCase):
# Ensure that it will not unrescue a non-rescued instance.
self.assertRaises(Exception, conn.unrescue, instance, None)
def test_revert_migration(self):
def test_finish_revert_migration(self):
instance = self._create_instance()
class VMOpsMock():
def __init__(self):
self.revert_migration_called = False
self.finish_revert_migration_called = False
def revert_migration(self, instance):
self.revert_migration_called = True
def finish_revert_migration(self, instance):
self.finish_revert_migration_called = True
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
conn = xenapi_conn.get_connection(False)
conn._vmops = VMOpsMock()
conn.revert_migration(instance)
self.assertTrue(conn._vmops.revert_migration_called)
conn.finish_revert_migration(instance)
self.assertTrue(conn._vmops.finish_revert_migration_called)
def _create_instance(self, instance_id=1, spawn=True):
"""Creates and spawns a test instance."""
stubs.stubout_loopingcall_start(self.stubs)
values = {
instance_values = {
'id': instance_id,
'project_id': self.project_id,
'user_id': self.user_id,
@ -653,7 +653,7 @@ class XenAPIVMTestCase(test.TestCase):
'instance_type_id': '3', # m1.large
'os_type': 'linux',
'architecture': 'x86-64'}
instance = db.instance_create(self.context, values)
instance = db.instance_create(self.context, instance_values)
network_info = [({'bridge': 'fa0', 'id': 0, 'injected': False},
{'broadcast': '192.168.0.255',
'dns': ['192.168.0.1'],
@ -731,7 +731,7 @@ class XenAPIMigrateInstance(test.TestCase):
self.user_id = 'fake'
self.project_id = 'fake'
self.context = context.RequestContext(self.user_id, self.project_id)
self.values = {'id': 1,
self.instance_values = {'id': 1,
'project_id': self.project_id,
'user_id': self.user_id,
'image_ref': 1,
@ -742,22 +742,34 @@ class XenAPIMigrateInstance(test.TestCase):
'os_type': 'linux',
'architecture': 'x86-64'}
migration_values = {
'source_compute': 'nova-compute',
'dest_compute': 'nova-compute',
'dest_host': '10.127.5.114',
'status': 'post-migrating',
'instance_uuid': '15f23e6a-cc6e-4d22-b651-d9bdaac316f7',
'old_instance_type_id': 5,
'new_instance_type_id': 1
}
self.migration = db.migration_create(
context.get_admin_context(), migration_values)
fake_utils.stub_out_utils_execute(self.stubs)
stubs.stub_out_migration_methods(self.stubs)
stubs.stubout_get_this_vm_uuid(self.stubs)
glance_stubs.stubout_glance_client(self.stubs)
def test_migrate_disk_and_power_off(self):
instance = db.instance_create(self.context, self.values)
instance = db.instance_create(self.context, self.instance_values)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
conn = xenapi_conn.get_connection(False)
conn.migrate_disk_and_power_off(instance, '127.0.0.1')
def test_revert_migrate(self):
instance = db.instance_create(self.context, self.values)
instance = db.instance_create(self.context, self.instance_values)
self.called = False
self.fake_vm_start_called = False
self.fake_revert_migration_called = False
self.fake_finish_revert_migration_called = False
def fake_vm_start(*args, **kwargs):
self.fake_vm_start_called = True
@ -765,13 +777,13 @@ class XenAPIMigrateInstance(test.TestCase):
def fake_vdi_resize(*args, **kwargs):
self.called = True
def fake_revert_migration(*args, **kwargs):
self.fake_revert_migration_called = True
def fake_finish_revert_migration(*args, **kwargs):
self.fake_finish_revert_migration_called = True
self.stubs.Set(stubs.FakeSessionForMigrationTests,
"VDI_resize_online", fake_vdi_resize)
self.stubs.Set(vmops.VMOps, '_start', fake_vm_start)
self.stubs.Set(vmops.VMOps, 'revert_migration', fake_revert_migration)
self.stubs.Set(vmops.VMOps, 'finish_revert_migration', fake_finish_revert_migration)
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
stubs.stubout_loopingcall_start(self.stubs)
@ -790,17 +802,17 @@ class XenAPIMigrateInstance(test.TestCase):
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
conn.finish_migration(self.context, instance,
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, resize_instance=True)
self.assertEqual(self.called, True)
self.assertEqual(self.fake_vm_start_called, True)
conn.revert_migration(instance)
self.assertEqual(self.fake_revert_migration_called, True)
conn.finish_revert_migration(instance)
self.assertEqual(self.fake_finish_revert_migration_called, True)
def test_finish_migrate(self):
instance = db.instance_create(self.context, self.values)
instance = db.instance_create(self.context, self.instance_values)
self.called = False
self.fake_vm_start_called = False
@ -831,7 +843,7 @@ class XenAPIMigrateInstance(test.TestCase):
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
conn.finish_migration(self.context, instance,
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, resize_instance=True)
self.assertEqual(self.called, True)
@ -840,8 +852,8 @@ class XenAPIMigrateInstance(test.TestCase):
def test_finish_migrate_no_local_storage(self):
tiny_type_id = \
instance_types.get_instance_type_by_name('m1.tiny')['id']
self.values.update({'instance_type_id': tiny_type_id, 'local_gb': 0})
instance = db.instance_create(self.context, self.values)
self.instance_values.update({'instance_type_id': tiny_type_id, 'local_gb': 0})
instance = db.instance_create(self.context, self.instance_values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
@ -865,12 +877,12 @@ class XenAPIMigrateInstance(test.TestCase):
'label': 'fake',
'mac': 'DE:AD:BE:EF:00:00',
'rxtx_cap': 3})]
conn.finish_migration(self.context, instance,
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, resize_instance=True)
def test_finish_migrate_no_resize_vdi(self):
instance = db.instance_create(self.context, self.values)
instance = db.instance_create(self.context, self.instance_values)
def fake_vdi_resize(*args, **kwargs):
raise Exception("This shouldn't be called")
@ -896,7 +908,7 @@ class XenAPIMigrateInstance(test.TestCase):
'rxtx_cap': 3})]
# Resize instance would be determined by the compute call
conn.finish_migration(self.context, instance,
conn.finish_migration(self.context, self.migration, instance,
dict(base_copy='hurr', cow='durr'),
network_info, resize_instance=False)

View File

@ -297,7 +297,7 @@ class FakeSessionForMigrationTests(fake.SessionBase):
def stub_out_migration_methods(stubs):
def fake_get_snapshot(self, instance):
def fake_create_snapshot(self, instance):
return 'vm_ref', dict(image='foo', snap='bar')
@classmethod
@ -327,7 +327,7 @@ def stub_out_migration_methods(stubs):
stubs.Set(vmops.VMOps, '_destroy', fake_destroy)
stubs.Set(vm_utils.VMHelper, 'scan_default_sr', fake_sr)
stubs.Set(vm_utils.VMHelper, 'scan_sr', fake_sr)
stubs.Set(vmops.VMOps, '_get_snapshot', fake_get_snapshot)
stubs.Set(vmops.VMOps, '_create_snapshot', fake_create_snapshot)
stubs.Set(vm_utils.VMHelper, 'get_vdi_for_vm_safely', fake_get_vdi)
stubs.Set(xenapi_conn.XenAPISession, 'wait_for_task', lambda x, y, z: None)
stubs.Set(vm_utils.VMHelper, 'get_sr_path', fake_get_sr_path)

View File

@ -244,8 +244,8 @@ class ComputeDriver(object):
"""
raise NotImplementedError()
def finish_migration(self, context, instance, disk_info, network_info,
resize_instance):
def finish_migration(self, context, migration, instance, disk_info,
network_info, resize_instance):
"""Completes a resize, turning on the migrated instance
:param network_info:
@ -253,8 +253,13 @@ class ComputeDriver(object):
"""
raise NotImplementedError()
def revert_migration(self, instance):
"""Reverts a resize, powering back on the instance"""
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()
def finish_revert_migration(self, instance):
"""Finish reverting a resize, powering back on the instance"""
# TODO(Vek): Need to pass context in for access to auth_token
raise NotImplementedError()

View File

@ -694,6 +694,10 @@ class VMHelper(HelperBase):
return is_pv
@classmethod
def set_vm_name_label(cls, session, vm_ref, name_label):
session.get_xenapi().VM.set_name_label(vm_ref, name_label)
@classmethod
def lookup(cls, session, name_label):
"""Look the instance i up, and returns it if available"""

View File

@ -109,14 +109,29 @@ class VMOps(object):
instance_infos.append(instance_info)
return instance_infos
def revert_migration(self, instance):
vm_ref = VMHelper.lookup(self._session, instance.name)
def confirm_migration(self, migration, instance, network_info):
name_label = self._get_orig_vm_name_label(instance)
vm_ref = VMHelper.lookup(self._session, name_label)
return self._destroy(instance, vm_ref, network_info, shutdown=False)
def finish_revert_migration(self, instance):
# NOTE(sirp): the original vm was suffixed with '-orig'; find it using
# the old suffix, remove the suffix, then power it back on.
name_label = self._get_orig_vm_name_label(instance)
vm_ref = VMHelper.lookup(self._session, name_label)
# Remove the '-orig' suffix (which was added in case the resized VM
# ends up on the source host, common during testing)
name_label = instance.name
VMHelper.set_vm_name_label(self._session, vm_ref, name_label)
self._start(instance, vm_ref)
def finish_migration(self, context, instance, disk_info, network_info,
resize_instance):
def finish_migration(self, context, migration, instance, disk_info,
network_info, resize_instance):
vdi_uuid = self.link_disks(instance, disk_info['base_copy'],
disk_info['cow'])
vm_ref = self._create_vm(context, instance,
[dict(vdi_type='os', vdi_uuid=vdi_uuid)],
network_info)
@ -549,7 +564,8 @@ class VMOps(object):
"""
template_vm_ref = None
try:
template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
template_vm_ref, template_vdi_uuids =\
self._create_snapshot(instance)
# call plugin to ship snapshot off to glance
VMHelper.upload_image(context,
self._session, instance, template_vdi_uuids, image_id)
@ -560,7 +576,7 @@ class VMOps(object):
logging.debug(_("Finished snapshot and upload for VM %s"), instance)
def _get_snapshot(self, instance):
def _create_snapshot(self, instance):
#TODO(sirp): Add quiesce and VSS locking support when Windows support
# is added
@ -577,6 +593,20 @@ class VMOps(object):
% locals())
return
def _migrate_vhd(self, instance, vdi_uuid, dest, sr_path):
instance_id = instance.id
params = {'host': dest,
'vdi_uuid': vdi_uuid,
'instance_id': instance_id,
'sr_path': sr_path}
task = self._session.async_call_plugin('migration', 'transfer_vhd',
{'params': pickle.dumps(params)})
self._session.wait_for_task(task, instance_id)
def _get_orig_vm_name_label(self, instance):
return instance.name + '-orig'
def migrate_disk_and_power_off(self, instance, dest):
"""Copies a VHD from one host machine to another.
@ -594,34 +624,27 @@ class VMOps(object):
base_copy_uuid = cow_uuid = None
template_vdi_uuids = template_vm_ref = None
try:
# transfer the base copy
template_vm_ref, template_vdi_uuids = self._get_snapshot(instance)
template_vm_ref, template_vdi_uuids =\
self._create_snapshot(instance)
base_copy_uuid = template_vdi_uuids['image']
vdi_ref, vm_vdi_rec = \
VMHelper.get_vdi_for_vm_safely(self._session, vm_ref)
cow_uuid = vm_vdi_rec['uuid']
params = {'host': dest,
'vdi_uuid': base_copy_uuid,
'instance_id': instance.id,
'sr_path': VMHelper.get_sr_path(self._session)}
sr_path = VMHelper.get_sr_path(self._session)
task = self._session.async_call_plugin('migration', 'transfer_vhd',
{'params': pickle.dumps(params)})
self._session.wait_for_task(task, instance.id)
# transfer the base copy
self._migrate_vhd(instance, base_copy_uuid, dest, sr_path)
# Now power down the instance and transfer the COW VHD
self._shutdown(instance, vm_ref, hard=False)
self._migrate_vhd(instance, cow_uuid, dest, sr_path)
params = {'host': dest,
'vdi_uuid': cow_uuid,
'instance_id': instance.id,
'sr_path': VMHelper.get_sr_path(self._session), }
task = self._session.async_call_plugin('migration', 'transfer_vhd',
{'params': pickle.dumps(params)})
self._session.wait_for_task(task, instance.id)
# NOTE(sirp): in case we're resizing to the same host (for dev
# purposes), apply a suffix to name-label so the two VM records
# extant until a confirm_resize don't collide.
name_label = self._get_orig_vm_name_label(instance)
VMHelper.set_vm_name_label(self._session, vm_ref, name_label)
finally:
if template_vm_ref:
self._destroy(instance, template_vm_ref,

View File

@ -189,14 +189,19 @@ class XenAPIConnection(driver.ComputeDriver):
"""Create VM instance"""
self._vmops.spawn(context, instance, network_info)
def revert_migration(self, instance):
"""Reverts a resize, powering back on the instance"""
self._vmops.revert_migration(instance)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM"""
# TODO(Vek): Need to pass context in for access to auth_token
self._vmops.confirm_migration(migration, instance, network_info)
def finish_migration(self, context, instance, disk_info, network_info,
resize_instance=False):
def finish_revert_migration(self, instance):
"""Finish reverting a resize, powering back on the instance"""
self._vmops.finish_revert_migration(instance)
def finish_migration(self, context, migration, instance, disk_info,
network_info, resize_instance=False):
"""Completes a resize, turning on the migrated instance"""
self._vmops.finish_migration(context, instance, disk_info,
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, resize_instance)
def snapshot(self, context, instance, image_id):

View File

@ -211,7 +211,11 @@ def _import_vhds(sr_path, staging_path, uuid_stack):
snap_info = prepare_if_exists(staging_path, 'snap.vhd',
image_info[0])
if snap_info:
paths_to_move.append(snap_info[0])
# NOTE(sirp): this is an insert rather than an append since the
# 'snapshot' vhd needs to be copied into the SR before the base copy.
# If it doesn't, then there is a possibliity that snapwatchd will
# delete the base_copy since it is an unreferenced parent.
paths_to_move.insert(snap_info[0])
# We return this snap as the VDI instead of image.vhd
vdi_return_list.append(dict(vdi_type="os", vdi_uuid=snap_info[1]))
else:

View File

@ -48,7 +48,7 @@ def move_vhds_into_sr(session, args):
# Discover the copied VHDs locally, and then set up paths to copy
# them to under the SR
source_image_path = "%s/instance%d" % ('/images/', instance_id)
source_image_path = "/images/instance%d" % instance_id
source_base_copy_path = "%s/%s.vhd" % (source_image_path,
old_base_copy_uuid)
source_cow_path = "%s/%s.vhd" % (source_image_path, old_cow_uuid)
@ -74,9 +74,12 @@ def move_vhds_into_sr(session, args):
(new_cow_path, new_base_copy_path))
subprocess.call(shlex.split('/usr/sbin/vhd-util modify -n %s -p %s' %
(new_cow_path, new_base_copy_path)))
logging.debug('Moving VHDs into SR %s' % sr_path)
shutil.move("%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid), sr_path)
shutil.move("%s/%s.vhd" % (temp_vhd_path, new_cow_uuid), sr_path)
# NOTE(sirp): COW should be copied before base_copy to avoid snapwatchd
# GC'ing an unreferenced base copy VDI
shutil.move(new_cow_path, sr_path)
shutil.move(new_base_copy_path, sr_path)
logging.debug('Cleaning up temporary SR path %s' % temp_vhd_path)
os.rmdir(temp_vhd_path)
@ -93,7 +96,7 @@ def transfer_vhd(session, args):
vhd_path = "%s.vhd" % vdi_uuid
source_path = "%s/%s" % (sr_path, vhd_path)
dest_path = '%s:%sinstance%d/' % (host, '/images/', instance_id)
dest_path = '%s:/images/instance%d/' % (host, instance_id)
logging.debug("Preparing to transmit %s to %s" % (source_path,
dest_path))