Merge "xenapi: Avoid hotplugging volumes on resize."

This commit is contained in:
Jenkins 2013-01-04 01:07:15 +00:00 committed by Gerrit Code Review
commit 3ff2b563c0
6 changed files with 119 additions and 28 deletions

View File

@ -911,7 +911,7 @@ class XenAPIVMTestCase(stubs.XenAPITestBase):
def __init__(self):
self.finish_revert_migration_called = False
def finish_revert_migration(self, instance):
def finish_revert_migration(self, instance, block_info):
self.finish_revert_migration_called = True
conn = xenapi_conn.XenAPIDriver(fake.FakeVirtAPI(), False)

View File

@ -15,11 +15,12 @@
# under the License.
from nova import test
from nova.tests.xenapi import stubs
from nova.virt.xenapi import volumeops
class VolumeAttachTestCase(test.TestCase):
def test_connect_volume_call(self):
def test_attach_volume_call(self):
ops = volumeops.VolumeOps('session')
self.mox.StubOutWithMock(ops, 'connect_volume')
self.mox.StubOutWithMock(volumeops.vm_utils, 'vm_ref_or_raise')
@ -31,9 +32,79 @@ class VolumeAttachTestCase(test.TestCase):
volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
'devnumber')
ops.connect_volume('conn_data', 'devnumber', 'instance_1', 'vmref')
ops.connect_volume(
'conn_data', 'devnumber', 'instance_1', 'vmref', hotplug=True)
self.mox.ReplayAll()
ops.attach_volume(
dict(driver_volume_type='iscsi', data='conn_data'),
'instance_1', 'mountpoint')
def test_attach_volume_no_hotplug(self):
ops = volumeops.VolumeOps('session')
self.mox.StubOutWithMock(ops, 'connect_volume')
self.mox.StubOutWithMock(volumeops.vm_utils, 'vm_ref_or_raise')
self.mox.StubOutWithMock(volumeops.volume_utils, 'get_device_number')
volumeops.vm_utils.vm_ref_or_raise('session', 'instance_1').AndReturn(
'vmref')
volumeops.volume_utils.get_device_number('mountpoint').AndReturn(
'devnumber')
ops.connect_volume(
'conn_data', 'devnumber', 'instance_1', 'vmref', hotplug=False)
self.mox.ReplayAll()
ops.attach_volume(
dict(driver_volume_type='iscsi', data='conn_data'),
'instance_1', 'mountpoint', hotplug=False)
def test_connect_volume_no_hotplug(self):
session = stubs.FakeSessionForVolumeTests('fake_uri')
ops = volumeops.VolumeOps(session)
instance_name = 'instance_1'
sr_uuid = '1'
sr_label = 'Disk-for:%s' % instance_name
sr_params = ''
sr_ref = 'sr_ref'
vdi_uuid = '2'
vdi_ref = 'vdi_ref'
vbd_ref = 'vbd_ref'
connection_data = {'vdi_uuid': vdi_uuid}
vm_ref = 'vm_ref'
dev_number = 1
called = {'xenapi': False}
def fake_call_xenapi(self, *args, **kwargs):
# Only used for VBD.plug in this code path.
called['xenapi'] = True
raise Exception()
self.stubs.Set(ops._session, 'call_xenapi', fake_call_xenapi)
self.mox.StubOutWithMock(volumeops.volume_utils, 'parse_sr_info')
self.mox.StubOutWithMock(ops, 'introduce_sr')
self.mox.StubOutWithMock(volumeops.volume_utils, 'introduce_vdi')
self.mox.StubOutWithMock(volumeops.vm_utils, 'create_vbd')
volumeops.volume_utils.parse_sr_info(
connection_data, sr_label).AndReturn(
tuple([sr_uuid, sr_label, sr_params]))
ops.introduce_sr(sr_uuid, sr_label, sr_params).AndReturn(sr_ref)
volumeops.volume_utils.introduce_vdi(
session, sr_ref, vdi_uuid, None).AndReturn(vdi_ref)
volumeops.vm_utils.create_vbd(
session, vm_ref, vdi_ref, dev_number,
bootable=False, osvol=True).AndReturn(vbd_ref)
self.mox.ReplayAll()
ops.connect_volume(connection_data, dev_number, instance_name,
vm_ref, hotplug=False)
self.assertEquals(False, called['xenapi'])

View File

@ -178,25 +178,15 @@ class XenAPIDriver(driver.ComputeDriver):
block_device_info=None):
"""Finish reverting a resize, powering back on the instance"""
# NOTE(vish): Xen currently does not use network info.
self._vmops.finish_revert_migration(instance)
self._attach_mapped_block_devices(instance, block_device_info)
self._vmops.finish_revert_migration(instance, block_device_info)
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance=False,
block_device_info=None):
"""Completes a resize, turning on the migrated instance"""
self._vmops.finish_migration(context, migration, instance, disk_info,
network_info, image_meta, resize_instance)
self._attach_mapped_block_devices(instance, block_device_info)
def _attach_mapped_block_devices(self, instance, block_device_info):
block_device_mapping = driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self.attach_volume(connection_info,
instance['name'], mount_device)
network_info, image_meta, resize_instance,
block_device_info)
def snapshot(self, context, instance, image_id):
""" Create snapshot from a running VM instance """

View File

@ -727,6 +727,8 @@ class SessionBase(object):
return lambda *params: self._create(name, params)
elif self._is_destroy(name):
return lambda *params: self._destroy(name, params)
elif name == 'XenAPI':
return FakeXenAPI()
else:
return None
@ -890,6 +892,11 @@ class SessionBase(object):
return result
class FakeXenAPI(object):
def __init__(self):
self.Failure = Failure
# Based upon _Method from xmlrpclib.
class _Dispatcher:
def __init__(self, send, name):

View File

@ -39,11 +39,13 @@ from nova.openstack.common import jsonutils
from nova.openstack.common import log as logging
from nova.openstack.common import timeutils
from nova import utils
from nova.virt import driver as virt_driver
from nova.virt import firewall
from nova.virt.xenapi import agent as xapi_agent
from nova.virt.xenapi import pool_states
from nova.virt.xenapi import vm_utils
from nova.virt.xenapi import volume_utils
from nova.virt.xenapi import volumeops
LOG = logging.getLogger(__name__)
@ -147,6 +149,7 @@ class VMOps(object):
self.compute_api = compute.API()
self._session = session
self._virtapi = virtapi
self._volumeops = volumeops.VolumeOps(self._session)
self.firewall_driver = firewall.load_driver(
DEFAULT_FIREWALL_DRIVER,
self._virtapi,
@ -179,7 +182,20 @@ class VMOps(object):
vm_ref = vm_utils.lookup(self._session, name_label)
return self._destroy(instance, vm_ref, network_info)
def finish_revert_migration(self, instance):
def _attach_mapped_block_devices(self, instance, block_device_info):
# We are attaching these volumes before start (no hotplugging)
# because some guests (windows) don't load PV drivers quickly
block_device_mapping = virt_driver.block_device_info_get_mapping(
block_device_info)
for vol in block_device_mapping:
connection_info = vol['connection_info']
mount_device = vol['mount_device'].rpartition("/")[2]
self._volumeops.attach_volume(connection_info,
instance['name'],
mount_device,
hotplug=False)
def finish_revert_migration(self, instance, block_device_info=None):
# NOTE(sirp): the original vm was suffixed with '-orig'; find it using
# the old suffix, remove the suffix, then power it back on.
name_label = self._get_orig_vm_name_label(instance)
@ -190,6 +206,8 @@ class VMOps(object):
name_label = instance['name']
vm_utils.set_vm_name_label(self._session, vm_ref, name_label)
self._attach_mapped_block_devices(instance, block_device_info)
self._start(instance, vm_ref)
def finish_migration(self, context, migration, instance, disk_info,
@ -221,6 +239,9 @@ class VMOps(object):
{'root': root_vdi},
disk_image_type, network_info, kernel_file,
ramdisk_file)
self._attach_mapped_block_devices(instance, block_device_info)
# 5. Start VM
self._start(instance, vm_ref=vm_ref)
self._update_instance_progress(context, instance,

View File

@ -105,7 +105,8 @@ class VolumeOps(object):
LOG.exception(exc)
raise exception.NovaException(_('Could not forget SR'))
def attach_volume(self, connection_info, instance_name, mountpoint):
def attach_volume(self, connection_info, instance_name, mountpoint,
hotplug=True):
"""Attach volume storage to VM instance"""
vm_ref = vm_utils.vm_ref_or_raise(self._session, instance_name)
@ -121,14 +122,14 @@ class VolumeOps(object):
connection_data = connection_info['data']
dev_number = volume_utils.get_device_number(mountpoint)
self.connect_volume(
connection_data, dev_number, instance_name, vm_ref)
self.connect_volume(connection_data, dev_number, instance_name,
vm_ref, hotplug=hotplug)
LOG.info(_('Mountpoint %(mountpoint)s attached to'
' instance %(instance_name)s') % locals())
def connect_volume(self, connection_data, dev_number, instance_name,
vm_ref):
vm_ref, hotplug=True):
description = 'Disk-for:%s' % instance_name
uuid, label, sr_params = volume_utils.parse_sr_info(connection_data,
@ -172,13 +173,14 @@ class VolumeOps(object):
raise Exception(_('Unable to use SR %(sr_ref)s for'
' instance %(instance_name)s') % locals())
try:
self._session.call_xenapi("VBD.plug", vbd_ref)
except self._session.XenAPI.Failure, exc:
LOG.exception(exc)
self.forget_sr(uuid)
raise Exception(_('Unable to attach volume to instance %s')
% instance_name)
if hotplug:
try:
self._session.call_xenapi("VBD.plug", vbd_ref)
except self._session.XenAPI.Failure, exc:
LOG.exception(exc)
self.forget_sr(uuid)
raise Exception(_('Unable to attach volume to instance %s')
% instance_name)
def detach_volume(self, connection_info, instance_name, mountpoint):
"""Detach volume storage to VM instance"""