xenapi: Refactor snapshots during resize
Currently, we use VM.snapshot for resize, which fails if we have a volume attached to the VM, which does not support snapshots. This change uses VDI.snapshot instead, for all VDIs that are not attached by nova. Also needed for xenapi: detaching and reattaching volumes during migrations and reverting of migrations. Fixes Bug #1028092 Change-Id: I3e2973747135a9c33de194e38537620c397bb87e
This commit is contained in:
committed by
Clay Gerrard
parent
1d4506c16a
commit
3595275016
@@ -787,8 +787,10 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
if str(bdm['volume_id']) == str(volume_id):
|
||||
return bdm
|
||||
|
||||
def _get_instance_volume_block_device_info(self, context, instance_uuid):
|
||||
bdms = self._get_instance_volume_bdms(context, instance_uuid)
|
||||
def _get_instance_volume_block_device_info(self, context, instance_uuid,
|
||||
bdms=None):
|
||||
if bdms is None:
|
||||
bdms = self._get_instance_volume_bdms(context, instance_uuid)
|
||||
block_device_mapping = []
|
||||
for bdm in bdms:
|
||||
try:
|
||||
@@ -847,7 +849,7 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
# NOTE(vish) get bdms before destroying the instance
|
||||
bdms = self._get_instance_volume_bdms(context, instance['uuid'])
|
||||
block_device_info = self._get_instance_volume_block_device_info(
|
||||
context, instance['uuid'])
|
||||
context, instance['uuid'], bdms=bdms)
|
||||
self.driver.destroy(instance, self._legacy_nw_info(network_info),
|
||||
block_device_info)
|
||||
for bdm in bdms:
|
||||
@@ -1439,6 +1441,14 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
|
||||
self.driver.destroy(instance, self._legacy_nw_info(network_info),
|
||||
block_device_info)
|
||||
# Terminate volume connections.
|
||||
bdms = self._get_instance_volume_bdms(context, instance['uuid'])
|
||||
if bdms:
|
||||
connector = self.driver.get_volume_connector(instance)
|
||||
for bdm in bdms:
|
||||
volume = self.volume_api.get(context, bdm['volume_id'])
|
||||
self.volume_api.terminate_connection(context, volume,
|
||||
connector)
|
||||
self.compute_rpcapi.finish_revert_resize(context, instance,
|
||||
migration_ref['id'], migration_ref['source_compute'],
|
||||
reservations)
|
||||
@@ -1466,8 +1476,15 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
old_instance_type = migration_ref['old_instance_type_id']
|
||||
instance_type = instance_types.get_instance_type(old_instance_type)
|
||||
|
||||
bdms = self._get_instance_volume_bdms(context, instance['uuid'])
|
||||
block_device_info = self._get_instance_volume_block_device_info(
|
||||
context, instance['uuid'])
|
||||
context, instance['uuid'])
|
||||
if bdms:
|
||||
connector = self.driver.get_volume_connector(instance)
|
||||
for bdm in bdms:
|
||||
volume = self.volume_api.get(context, bdm['volume_id'])
|
||||
self.volume_api.initialize_connection(context, volume,
|
||||
connector)
|
||||
|
||||
self.driver.finish_revert_migration(instance,
|
||||
self._legacy_nw_info(network_info),
|
||||
@@ -1592,6 +1609,15 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
instance_type_ref, self._legacy_nw_info(network_info),
|
||||
block_device_info)
|
||||
|
||||
# Terminate volume connections.
|
||||
bdms = self._get_instance_volume_bdms(context, instance['uuid'])
|
||||
if bdms:
|
||||
connector = self.driver.get_volume_connector(instance)
|
||||
for bdm in bdms:
|
||||
volume = self.volume_api.get(context, bdm['volume_id'])
|
||||
self.volume_api.terminate_connection(context, volume,
|
||||
connector)
|
||||
|
||||
self.db.migration_update(context,
|
||||
migration_id,
|
||||
{'status': 'post-migrating'})
|
||||
@@ -1639,8 +1665,16 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
context, instance, "finish_resize.start",
|
||||
network_info=network_info)
|
||||
|
||||
bdms = self._get_instance_volume_bdms(context, instance['uuid'])
|
||||
block_device_info = self._get_instance_volume_block_device_info(
|
||||
context, instance['uuid'])
|
||||
context, instance['uuid'], bdms=bdms)
|
||||
|
||||
if bdms:
|
||||
connector = self.driver.get_volume_connector(instance)
|
||||
for bdm in bdms:
|
||||
volume = self.volume_api.get(context, bdm['volume_id'])
|
||||
self.volume_api.initialize_connection(context, volume,
|
||||
connector)
|
||||
|
||||
self.driver.finish_migration(context, migration_ref, instance,
|
||||
disk_info,
|
||||
|
||||
@@ -358,7 +358,8 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
|
||||
self.assertDictMatch(fake_diagnostics, expected)
|
||||
|
||||
def test_instance_snapshot_fails_with_no_primary_vdi(self):
|
||||
def create_bad_vbd(vm_ref, vdi_ref):
|
||||
def create_bad_vbd(session, vm_ref, vdi_ref, userdevice,
|
||||
vbd_type='disk', read_only=False, bootable=False):
|
||||
vbd_rec = {'VM': vm_ref,
|
||||
'VDI': vdi_ref,
|
||||
'userdevice': 'fake',
|
||||
@@ -367,7 +368,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
|
||||
xenapi_fake.after_VBD_create(vbd_ref, vbd_rec)
|
||||
return vbd_ref
|
||||
|
||||
self.stubs.Set(xenapi_fake, 'create_vbd', create_bad_vbd)
|
||||
self.stubs.Set(vm_utils, 'create_vbd', create_bad_vbd)
|
||||
stubs.stubout_instance_snapshot(self.stubs)
|
||||
# Stubbing out firewall driver as previous stub sets alters
|
||||
# xml rpc result parsing
|
||||
|
||||
@@ -188,6 +188,13 @@ class XenAPIDriver(driver.ComputeDriver):
|
||||
"""Finish reverting a resize, powering back on the instance"""
|
||||
# NOTE(vish): Xen currently does not use network info.
|
||||
self._vmops.finish_revert_migration(instance)
|
||||
block_device_mapping = driver.block_device_info_get_mapping(
|
||||
block_device_info)
|
||||
for vol in block_device_mapping:
|
||||
connection_info = vol['connection_info']
|
||||
mount_device = vol['mount_device'].rpartition("/")[2]
|
||||
self._volumeops.attach_volume(connection_info,
|
||||
instance['name'], mount_device)
|
||||
|
||||
def finish_migration(self, context, migration, instance, disk_info,
|
||||
network_info, image_meta, resize_instance=False,
|
||||
@@ -195,6 +202,13 @@ class XenAPIDriver(driver.ComputeDriver):
|
||||
"""Completes a resize, turning on the migrated instance"""
|
||||
self._vmops.finish_migration(context, migration, instance, disk_info,
|
||||
network_info, image_meta, resize_instance)
|
||||
block_device_mapping = driver.block_device_info_get_mapping(
|
||||
block_device_info)
|
||||
for vol in block_device_mapping:
|
||||
connection_info = vol['connection_info']
|
||||
mount_device = vol['mount_device'].rpartition("/")[2]
|
||||
self._volumeops.attach_volume(connection_info,
|
||||
instance['name'], mount_device)
|
||||
|
||||
def snapshot(self, context, instance, image_id):
|
||||
""" Create snapshot from a running VM instance """
|
||||
@@ -237,8 +251,17 @@ class XenAPIDriver(driver.ComputeDriver):
|
||||
"""Transfers the VHD of a running instance to another host, then shuts
|
||||
off the instance copies over the COW disk"""
|
||||
# NOTE(vish): Xen currently does not use network info.
|
||||
return self._vmops.migrate_disk_and_power_off(context, instance,
|
||||
dest, instance_type)
|
||||
rv = self._vmops.migrate_disk_and_power_off(context, instance,
|
||||
dest, instance_type)
|
||||
block_device_mapping = driver.block_device_info_get_mapping(
|
||||
block_device_info)
|
||||
name_label = self._vmops._get_orig_vm_name_label(instance)
|
||||
for vol in block_device_mapping:
|
||||
connection_info = vol['connection_info']
|
||||
mount_device = vol['mount_device'].rpartition("/")[2]
|
||||
self._volumeops.detach_volume(connection_info,
|
||||
name_label, mount_device)
|
||||
return rv
|
||||
|
||||
def suspend(self, instance):
|
||||
"""suspend the specified instance"""
|
||||
|
||||
@@ -524,39 +524,51 @@ def snapshot_attached_here(session, instance, vm_ref, label):
|
||||
vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref)
|
||||
original_parent_uuid = _get_vhd_parent_uuid(session, vm_vdi_ref)
|
||||
|
||||
template_vm_ref, template_vdi_uuid = _create_snapshot(
|
||||
session, instance, vm_ref, label)
|
||||
|
||||
try:
|
||||
vdi_snapshot_recs = _vdi_snapshot_vm_base(session, instance, vm_ref)
|
||||
sr_ref = vm_vdi_rec["SR"]
|
||||
parent_uuid, base_uuid = _wait_for_vhd_coalesce(
|
||||
session, instance, sr_ref, vm_vdi_ref, original_parent_uuid)
|
||||
|
||||
vdi_uuids = [vdi_rec['uuid'] for vdi_rec in
|
||||
_walk_vdi_chain(session, template_vdi_uuid)]
|
||||
vdi_uuids = []
|
||||
for snapshot in vdi_snapshot_recs:
|
||||
vdi_uuids += [vdi_rec['uuid'] for vdi_rec in
|
||||
_walk_vdi_chain(session, snapshot['uuid'])]
|
||||
|
||||
yield vdi_uuids
|
||||
finally:
|
||||
_destroy_snapshot(session, instance, template_vm_ref)
|
||||
_destroy_snapshots(session, instance, vdi_snapshot_recs)
|
||||
|
||||
|
||||
def _create_snapshot(session, instance, vm_ref, label):
|
||||
template_vm_ref = session.call_xenapi('VM.snapshot', vm_ref, label)
|
||||
template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1]
|
||||
template_vdi_uuid = template_vdi_rec["uuid"]
|
||||
def _vdi_snapshot_vm_base(session, instance, vm_ref):
|
||||
"""Make a snapshot of every non-cinder VDI and return a list
|
||||
of the new vdi records.
|
||||
"""
|
||||
new_vdis = []
|
||||
try:
|
||||
vbd_refs = session.call_xenapi("VM.get_VBDs", vm_ref)
|
||||
for vbd_ref in vbd_refs:
|
||||
oc = session.call_xenapi("VBD.get_other_config", vbd_ref)
|
||||
if 'osvol' not in oc:
|
||||
# This volume is not a nova/cinder volume
|
||||
vdi_ref = session.call_xenapi("VBD.get_VDI", vbd_ref)
|
||||
snapshot_ref = session.call_xenapi("VDI.snapshot", vdi_ref,
|
||||
{})
|
||||
new_vdis.append(session.call_xenapi("VDI.get_record",
|
||||
snapshot_ref))
|
||||
|
||||
LOG.debug(_("Created snapshot %(template_vdi_uuid)s with label"
|
||||
" '%(label)s'"), locals(), instance=instance)
|
||||
|
||||
return template_vm_ref, template_vdi_uuid
|
||||
except session.XenAPI.Failure:
|
||||
LOG.exception(_("Failed to snapshot VDI"), instance=instance)
|
||||
raise
|
||||
finally:
|
||||
return new_vdis
|
||||
|
||||
|
||||
def _destroy_snapshot(session, instance, vm_ref):
|
||||
vdi_refs = lookup_vm_vdis(session, vm_ref)
|
||||
def _destroy_snapshots(session, instance, vdi_snapshot_recs):
|
||||
vdi_refs = [session.call_xenapi("VDI.get_by_uuid", vdi_rec['uuid'])
|
||||
for vdi_rec in vdi_snapshot_recs]
|
||||
safe_destroy_vdis(session, vdi_refs)
|
||||
|
||||
destroy_vm(session, instance, vm_ref)
|
||||
|
||||
|
||||
def get_sr_path(session):
|
||||
"""Return the path to our storage repository
|
||||
|
||||
@@ -192,10 +192,26 @@ class VMOps(object):
|
||||
if resize_instance:
|
||||
self._resize_instance(instance, root_vdi)
|
||||
|
||||
# Check if kernel and ramdisk are external
|
||||
kernel_file = None
|
||||
ramdisk_file = None
|
||||
|
||||
name_label = instance['name']
|
||||
if instance['kernel_id']:
|
||||
vdis = vm_utils.create_kernel_image(context, self._session,
|
||||
instance, name_label, instance['kernel_id'],
|
||||
vm_utils.ImageType.KERNEL)
|
||||
kernel_file = vdis['kernel'].get('file')
|
||||
if instance['ramdisk_id']:
|
||||
vdis = vm_utils.create_kernel_image(context, self._session,
|
||||
instance, name_label, instance['ramdisk_id'],
|
||||
vm_utils.ImageType.RAMDISK)
|
||||
ramdisk_file = vdis['ramdisk'].get('file')
|
||||
|
||||
vm_ref = self._create_vm(context, instance, instance['name'],
|
||||
{'root': root_vdi},
|
||||
network_info, image_meta)
|
||||
|
||||
network_info, image_meta, kernel_file,
|
||||
ramdisk_file)
|
||||
# 5. Start VM
|
||||
self._start(instance, vm_ref=vm_ref)
|
||||
self._update_instance_progress(context, instance,
|
||||
|
||||
@@ -215,8 +215,10 @@ class VolumeOps(object):
|
||||
raise Exception(_('Unable to locate volume %s') % mountpoint)
|
||||
|
||||
try:
|
||||
vm_rec = self._session.call_xenapi("VM.get_record", vm_ref)
|
||||
sr_ref = volume_utils.find_sr_from_vbd(self._session, vbd_ref)
|
||||
vm_utils.unplug_vbd(self._session, vbd_ref)
|
||||
if vm_rec['power_state'] != 'Halted':
|
||||
vm_utils.unplug_vbd(self._session, vbd_ref)
|
||||
except volume_utils.StorageError, exc:
|
||||
LOG.exception(exc)
|
||||
raise Exception(_('Unable to detach volume %s') % mountpoint)
|
||||
|
||||
Reference in New Issue
Block a user