Implement resize down for XenAPI
This patch implements resizing an instance to a smaller disk. It implements this by copying the VDI and running e2resize, before transferring to the new host. Change-Id: Ic901a59cb6cdb79605c70528cf85064d8335ee2f
This commit is contained in:
parent
ab215c42a2
commit
c25f7e7e83
@ -679,9 +679,6 @@ class Controller(wsgi.Controller):
|
||||
except exception.CannotResizeToSameSize:
|
||||
msg = _("Resize requires a change in size.")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
except exception.CannotResizeToSmallerSize:
|
||||
msg = _("Resizing to a smaller size is not supported.")
|
||||
raise exc.HTTPBadRequest(explanation=msg)
|
||||
|
||||
return webob.Response(status_int=202)
|
||||
|
||||
|
@ -1275,8 +1275,6 @@ class API(base.Base):
|
||||
|
||||
current_memory_mb = current_instance_type['memory_mb']
|
||||
new_memory_mb = new_instance_type['memory_mb']
|
||||
if current_memory_mb > new_memory_mb:
|
||||
raise exception.CannotResizeToSmallerSize()
|
||||
|
||||
if (current_memory_mb == new_memory_mb) and flavor_id:
|
||||
raise exception.CannotResizeToSameSize()
|
||||
|
@ -1069,6 +1069,8 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
migration_ref = self.db.migration_get(context, migration_id)
|
||||
instance_ref = self.db.instance_get_by_uuid(context,
|
||||
migration_ref.instance_uuid)
|
||||
instance_type_ref = self.db.instance_type_get(context,
|
||||
migration_ref.new_instance_type_id)
|
||||
|
||||
self.db.migration_update(context,
|
||||
migration_id,
|
||||
@ -1076,7 +1078,8 @@ class ComputeManager(manager.SchedulerDependentManager):
|
||||
|
||||
try:
|
||||
disk_info = self.driver.migrate_disk_and_power_off(
|
||||
context, instance_ref, migration_ref['dest_host'])
|
||||
context, instance_ref, migration_ref['dest_host'],
|
||||
instance_type_ref)
|
||||
except exception.MigrationError, error:
|
||||
LOG.error(_('%s. Setting instance vm_state to ERROR') % (error,))
|
||||
self._instance_update(context,
|
||||
|
@ -830,10 +830,6 @@ class CannotResizeToSameSize(NovaException):
|
||||
message = _("When resizing, instances must change size!")
|
||||
|
||||
|
||||
class CannotResizeToSmallerSize(NovaException):
|
||||
message = _("Resizing to a smaller size is not supported.")
|
||||
|
||||
|
||||
class ImageTooLarge(NovaException):
|
||||
message = _("Image is larger than instance type allows")
|
||||
|
||||
|
@ -1568,23 +1568,6 @@ class ComputeAPITestCase(BaseTestCase):
|
||||
|
||||
self.compute.terminate_instance(context, instance['uuid'])
|
||||
|
||||
def test_resize_down_fails(self):
|
||||
"""Ensure resizing down raises and fails"""
|
||||
instance = self._create_fake_instance()
|
||||
context = self.context.elevated()
|
||||
instance = db.instance_get_by_uuid(context, instance['uuid'])
|
||||
self.compute.run_instance(self.context, instance['uuid'])
|
||||
|
||||
inst_type = instance_types.get_instance_type_by_name('m1.xlarge')
|
||||
db.instance_update(self.context, instance['uuid'],
|
||||
{'instance_type_id': inst_type['id']})
|
||||
|
||||
instance = db.instance_get_by_uuid(context, instance['uuid'])
|
||||
self.assertRaises(exception.CannotResizeToSmallerSize,
|
||||
self.compute_api.resize, context, instance, 1)
|
||||
|
||||
self.compute.terminate_instance(context, instance['uuid'])
|
||||
|
||||
def test_resize_same_size_fails(self):
|
||||
"""Ensure invalid flavors raise"""
|
||||
context = self.context.elevated()
|
||||
|
@ -173,8 +173,9 @@ class _VirtDriverTestCase(test.TestCase):
|
||||
@catch_notimplementederror
|
||||
def test_migrate_disk_and_power_off(self):
|
||||
instance_ref, network_info = self._get_running_instance()
|
||||
instance_type_ref = test_utils.get_test_instance_type()
|
||||
self.connection.migrate_disk_and_power_off(
|
||||
self.ctxt, instance_ref, 'dest_host')
|
||||
self.ctxt, instance_ref, 'dest_host', instance_type_ref)
|
||||
|
||||
@catch_notimplementederror
|
||||
def test_pause(self):
|
||||
|
@ -803,17 +803,20 @@ class XenAPIMigrateInstance(test.TestCase):
|
||||
product_version=(6, 0, 0))
|
||||
stubs.stubout_loopingcall_start(self.stubs)
|
||||
conn = xenapi_conn.get_connection(False)
|
||||
conn._vmops.resize_instance(instance, '')
|
||||
conn._vmops._resize_instance(instance, '')
|
||||
self.assertEqual(called['resize'], True)
|
||||
|
||||
def test_migrate_disk_and_power_off(self):
|
||||
instance = db.instance_create(self.context, self.instance_values)
|
||||
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
|
||||
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
|
||||
conn = xenapi_conn.get_connection(False)
|
||||
conn.migrate_disk_and_power_off(self.context, instance, '127.0.0.1')
|
||||
conn.migrate_disk_and_power_off(self.context, instance,
|
||||
'127.0.0.1', instance_type)
|
||||
|
||||
def test_migrate_disk_and_power_off_passes_exceptions(self):
|
||||
instance = db.instance_create(self.context, self.instance_values)
|
||||
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
|
||||
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
|
||||
|
||||
def fake_raise(*args, **kwargs):
|
||||
@ -823,7 +826,7 @@ class XenAPIMigrateInstance(test.TestCase):
|
||||
conn = xenapi_conn.get_connection(False)
|
||||
self.assertRaises(exception.MigrationError,
|
||||
conn.migrate_disk_and_power_off,
|
||||
self.context, instance, '127.0.0.1')
|
||||
self.context, instance, '127.0.0.1', instance_type)
|
||||
|
||||
def test_revert_migrate(self):
|
||||
instance = db.instance_create(self.context, self.instance_values)
|
||||
@ -1163,12 +1166,10 @@ class XenAPIAutoDiskConfigTestCase(test.TestCase):
|
||||
def assertIsPartitionCalled(self, called):
|
||||
marker = {"partition_called": False}
|
||||
|
||||
@classmethod
|
||||
def fake_resize_partition_fs(cls, dev_path, partition_path):
|
||||
def fake_resize_part_and_fs(dev, start, old, new):
|
||||
marker["partition_called"] = True
|
||||
|
||||
self.stubs.Set(vm_utils.VMHelper, "_resize_partition_and_fs",
|
||||
fake_resize_partition_fs)
|
||||
self.stubs.Set(vm_utils, "_resize_part_and_fs",
|
||||
fake_resize_part_and_fs)
|
||||
|
||||
instance = db.instance_create(self.context, self.instance_values)
|
||||
disk_image_type = vm_utils.ImageType.DISK_VHD
|
||||
@ -1193,12 +1194,10 @@ class XenAPIAutoDiskConfigTestCase(test.TestCase):
|
||||
"""Should not partition unless fail safes pass"""
|
||||
self.instance_values['auto_disk_config'] = True
|
||||
|
||||
@classmethod
|
||||
def fake_resize_partition_allowed(cls, dev_path, partition_path):
|
||||
return False
|
||||
|
||||
self.stubs.Set(vm_utils.VMHelper, "_resize_partition_allowed",
|
||||
fake_resize_partition_allowed)
|
||||
def fake_get_partitions(dev):
|
||||
return [(1, 0, 100, 'ext4'), (2, 100, 200, 'ext4')]
|
||||
self.stubs.Set(vm_utils, "_get_partitions",
|
||||
fake_get_partitions)
|
||||
|
||||
self.assertIsPartitionCalled(False)
|
||||
|
||||
@ -1209,10 +1208,9 @@ class XenAPIAutoDiskConfigTestCase(test.TestCase):
|
||||
"""
|
||||
self.instance_values['auto_disk_config'] = True
|
||||
|
||||
@classmethod
|
||||
def fake_resize_partition_allowed(cls, dev_path, partition_path):
|
||||
return True
|
||||
self.stubs.Set(vm_utils.VMHelper, "_resize_partition_allowed",
|
||||
fake_resize_partition_allowed)
|
||||
def fake_get_partitions(dev):
|
||||
return [(1, 0, 100, 'ext4')]
|
||||
self.stubs.Set(vm_utils, "_get_partitions",
|
||||
fake_get_partitions)
|
||||
|
||||
self.assertIsPartitionCalled(True)
|
||||
|
@ -34,6 +34,21 @@ def get_test_image_info(context, instance_ref):
|
||||
return image_service.show(context, image_id)
|
||||
|
||||
|
||||
def get_test_instance_type(context=None):
|
||||
if not context:
|
||||
context = get_test_admin_context()
|
||||
|
||||
test_instance_type = {'name': 'kinda.big',
|
||||
'memory_mb': 2048,
|
||||
'vcpus': 4,
|
||||
'local_gb': 40,
|
||||
'swap': 1024}
|
||||
|
||||
instance_type_ref = nova.db.instance_type_create(context,
|
||||
test_instance_type)
|
||||
return instance_type_ref
|
||||
|
||||
|
||||
def get_test_instance(context=None):
|
||||
if not context:
|
||||
context = get_test_admin_context()
|
||||
|
@ -236,7 +236,8 @@ class ComputeDriver(object):
|
||||
"""
|
||||
raise NotImplementedError()
|
||||
|
||||
def migrate_disk_and_power_off(self, context, instance, dest):
|
||||
def migrate_disk_and_power_off(self, context, instance, dest,
|
||||
instance_type):
|
||||
"""
|
||||
Transfers the disk of a running instance in multiple phases, turning
|
||||
off the instance before the end.
|
||||
|
@ -138,7 +138,8 @@ class FakeConnection(driver.ComputeDriver):
|
||||
def poll_rescued_instances(self, timeout):
|
||||
pass
|
||||
|
||||
def migrate_disk_and_power_off(self, context, instance, dest):
|
||||
def migrate_disk_and_power_off(self, context, instance, dest,
|
||||
instance_type):
|
||||
pass
|
||||
|
||||
def poll_unconfirmed_resizes(self, resize_confirm_window):
|
||||
|
@ -467,6 +467,9 @@ class SessionBase(object):
|
||||
raise Exception('No simulation in host_call_plugin for %s,%s' %
|
||||
(plugin, method))
|
||||
|
||||
def VDI_get_virtual_size(self, *args):
|
||||
return 1 * 1024 * 1024 * 1024
|
||||
|
||||
def VDI_resize_online(self, *args):
|
||||
return 'derp'
|
||||
|
||||
|
@ -387,19 +387,36 @@ class VMHelper(HelperBase):
|
||||
session.wait_for_task(task, instance.id)
|
||||
|
||||
@classmethod
|
||||
def auto_configure_disk(cls, session, vdi_ref):
|
||||
"""Partition and resize FS to match the size specified by
|
||||
instance_types.local_gb.
|
||||
"""
|
||||
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
|
||||
dev_path = utils.make_dev_path(dev)
|
||||
partition_path = utils.make_dev_path(dev, partition=1)
|
||||
if cls._resize_partition_allowed(dev_path, partition_path):
|
||||
cls._resize_partition_and_fs(dev_path, partition_path)
|
||||
def resize_disk(cls, session, vdi_ref, instance_type):
|
||||
# Copy VDI over to something we can resize
|
||||
# NOTE(jerdfelt): Would be nice to just set vdi_ref to read/write
|
||||
sr_ref = safe_find_sr(session)
|
||||
copy_ref = session.call_xenapi('VDI.copy', vdi_ref, sr_ref)
|
||||
copy_uuid = session.call_xenapi('VDI.get_uuid', copy_ref)
|
||||
|
||||
try:
|
||||
# Resize partition and filesystem down
|
||||
cls.auto_configure_disk(session=session,
|
||||
vdi_ref=copy_ref,
|
||||
new_gb=instance_type['local_gb'])
|
||||
|
||||
# Create new VDI
|
||||
new_ref = cls.fetch_blank_disk(session,
|
||||
instance_type['id'])
|
||||
new_uuid = session.call_xenapi('VDI.get_uuid', new_ref)
|
||||
|
||||
# Manually copy contents over
|
||||
virtual_size = instance_type['local_gb'] * 1024 * 1024 * 1024
|
||||
_copy_partition(session, copy_ref, new_ref, 1, virtual_size)
|
||||
|
||||
return new_ref, new_uuid
|
||||
finally:
|
||||
cls.destroy_vdi(session, copy_ref)
|
||||
|
||||
@classmethod
|
||||
def _resize_partition_allowed(cls, dev_path, partition_path):
|
||||
"""Determine whether we should resize the partition and the fs.
|
||||
def auto_configure_disk(cls, session, vdi_ref, new_gb):
|
||||
"""Partition and resize FS to match the size specified by
|
||||
instance_types.local_gb.
|
||||
|
||||
This is a fail-safe to prevent accidentally destroying data on a disk
|
||||
erroneously marked as auto_disk_config=True.
|
||||
@ -413,52 +430,16 @@ class VMHelper(HelperBase):
|
||||
|
||||
3. The file-system on the one partition must be ext3 or ext4.
|
||||
"""
|
||||
out, err = utils.execute("parted", "--script", "--machine",
|
||||
partition_path, "print", run_as_root=True)
|
||||
lines = [line for line in out.split('\n') if line]
|
||||
partitions = lines[2:]
|
||||
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
|
||||
partitions = _get_partitions(dev)
|
||||
|
||||
num_partitions = len(partitions)
|
||||
fs_type = partitions[0].split(':')[4]
|
||||
LOG.debug(_("Found %(num_partitions)s partitions, the first with"
|
||||
" fs_type '%(fs_type)s'") % locals())
|
||||
if len(partitions) != 1:
|
||||
return
|
||||
|
||||
allowed_fs = fs_type in ('ext3', 'ext4')
|
||||
return num_partitions == 1 and allowed_fs
|
||||
|
||||
@classmethod
|
||||
def _resize_partition_and_fs(cls, dev_path, partition_path):
|
||||
"""Resize partition and fileystem.
|
||||
|
||||
This assumes we are dealing with a single primary partition and using
|
||||
ext3 or ext4.
|
||||
"""
|
||||
# 1. Delete and recreate partition to end of disk
|
||||
root_helper = FLAGS.root_helper
|
||||
cmd = """echo "d
|
||||
n
|
||||
p
|
||||
1
|
||||
|
||||
|
||||
w
|
||||
" | %(root_helper)s fdisk %(dev_path)s""" % locals()
|
||||
utils.execute(cmd, run_as_root=False, shell=True)
|
||||
|
||||
# 2. Remove ext3 journal (making it ext2)
|
||||
utils.execute("tune2fs", "-O ^has_journal", partition_path,
|
||||
run_as_root=True)
|
||||
|
||||
# 3. fsck the disk
|
||||
# NOTE(sirp): using -p here to automatically repair filesystem, is
|
||||
# this okay?
|
||||
utils.execute("e2fsck", "-f", "-p", partition_path, run_as_root=True)
|
||||
|
||||
# 4. Resize the disk
|
||||
utils.execute("resize2fs", partition_path, run_as_root=True)
|
||||
|
||||
# 5. Add back journal
|
||||
utils.execute("tune2fs", "-j", partition_path, run_as_root=True)
|
||||
num, start, old_sectors, ptype = partitions[0]
|
||||
if ptype in ('ext3', 'ext4'):
|
||||
new_sectors = new_gb * 1024 * 1024 * 1024 / SECTOR_SIZE
|
||||
_resize_part_and_fs(dev, start, old_sectors, new_sectors)
|
||||
|
||||
@classmethod
|
||||
def generate_swap(cls, session, instance, vm_ref, userdevice, swap_mb):
|
||||
@ -1317,6 +1298,27 @@ def _is_vdi_pv(dev):
|
||||
return False
|
||||
|
||||
|
||||
def _get_partitions(dev):
|
||||
"""Return partition information (num, size, type) for a device."""
|
||||
dev_path = utils.make_dev_path(dev)
|
||||
out, err = utils.execute('parted', '--script', '--machine',
|
||||
dev_path, 'unit s', 'print',
|
||||
run_as_root=True)
|
||||
lines = [line for line in out.split('\n') if line]
|
||||
partitions = []
|
||||
|
||||
LOG.debug(_("Partitions:"))
|
||||
for line in lines[2:]:
|
||||
num, start, end, size, ptype = line.split(':')[:5]
|
||||
start = int(start.rstrip('s'))
|
||||
end = int(end.rstrip('s'))
|
||||
size = int(size.rstrip('s'))
|
||||
LOG.debug(_(" %(num)s: %(ptype)s %(size)d sectors") % locals())
|
||||
partitions.append((num, start, size, ptype))
|
||||
|
||||
return partitions
|
||||
|
||||
|
||||
def _stream_disk(dev, image_type, virtual_size, image_file):
|
||||
offset = 0
|
||||
if image_type == ImageType.DISK:
|
||||
@ -1353,6 +1355,68 @@ def _write_partition(virtual_size, dev):
|
||||
LOG.debug(_('Writing partition table %s done.'), dev_path)
|
||||
|
||||
|
||||
def _resize_part_and_fs(dev, start, old_sectors, new_sectors):
|
||||
"""Resize partition and fileystem.
|
||||
|
||||
This assumes we are dealing with a single primary partition and using
|
||||
ext3 or ext4.
|
||||
"""
|
||||
size = new_sectors - start
|
||||
end = new_sectors - 1
|
||||
|
||||
dev_path = utils.make_dev_path(dev)
|
||||
partition_path = utils.make_dev_path(dev, partition=1)
|
||||
|
||||
# Remove ext3 journal (making it ext2)
|
||||
utils.execute('tune2fs', '-O ^has_journal', partition_path,
|
||||
run_as_root=True)
|
||||
|
||||
# fsck the disk
|
||||
# NOTE(sirp): using -p here to automatically repair filesystem, is
|
||||
# this okay?
|
||||
utils.execute('e2fsck', '-f', '-p', partition_path, run_as_root=True)
|
||||
|
||||
if new_sectors < old_sectors:
|
||||
# Resizing down, resize filesystem before partition resize
|
||||
utils.execute('resize2fs', partition_path, '%ds' % size,
|
||||
run_as_root=True)
|
||||
|
||||
utils.execute('parted', '--script', dev_path, 'rm', '1',
|
||||
run_as_root=True)
|
||||
utils.execute('parted', '--script', dev_path, 'mkpart',
|
||||
'primary',
|
||||
'%ds' % start,
|
||||
'%ds' % end,
|
||||
run_as_root=True)
|
||||
|
||||
if new_sectors > old_sectors:
|
||||
# Resizing up, resize filesystem after partition resize
|
||||
utils.execute('resize2fs', partition_path, run_as_root=True)
|
||||
|
||||
# Add back journal
|
||||
utils.execute('tune2fs', '-j', partition_path, run_as_root=True)
|
||||
|
||||
|
||||
def _copy_partition(session, src_ref, dst_ref, partition, virtual_size):
|
||||
# Part of disk taken up by MBR
|
||||
virtual_size -= MBR_SIZE_BYTES
|
||||
|
||||
with vdi_attached_here(session, src_ref, read_only=True) as src:
|
||||
src_path = utils.make_dev_path(src, partition=partition)
|
||||
|
||||
with vdi_attached_here(session, dst_ref, read_only=False) as dst:
|
||||
dst_path = utils.make_dev_path(dst, partition=partition)
|
||||
|
||||
_write_partition(virtual_size, dst)
|
||||
|
||||
num_blocks = virtual_size / SECTOR_SIZE
|
||||
utils.execute('dd',
|
||||
'if=%s' % src_path,
|
||||
'of=%s' % dst_path,
|
||||
'count=%d' % num_blocks,
|
||||
run_as_root=True)
|
||||
|
||||
|
||||
def _mount_filesystem(dev_path, dir):
|
||||
"""mounts the device specified by dev_path in dir"""
|
||||
try:
|
||||
|
@ -21,7 +21,6 @@ Management class for VM-related functions (spawn, reboot, etc).
|
||||
|
||||
import base64
|
||||
import json
|
||||
import M2Crypto
|
||||
import os
|
||||
import pickle
|
||||
import random
|
||||
@ -30,6 +29,10 @@ import sys
|
||||
import time
|
||||
import uuid
|
||||
|
||||
import M2Crypto
|
||||
|
||||
from nova.compute import api as compute
|
||||
from nova.compute import power_state
|
||||
from nova import context as nova_context
|
||||
from nova import db
|
||||
from nova import exception
|
||||
@ -37,15 +40,14 @@ from nova import flags
|
||||
from nova import ipv6
|
||||
from nova import log as logging
|
||||
from nova import utils
|
||||
|
||||
from nova.compute import api as compute
|
||||
from nova.compute import power_state
|
||||
from nova.virt import driver
|
||||
from nova.virt.xenapi.volume_utils import VolumeHelper
|
||||
from nova.virt.xenapi.network_utils import NetworkHelper
|
||||
from nova.virt.xenapi.vm_utils import VMHelper
|
||||
from nova.virt.xenapi.vm_utils import ImageType
|
||||
from nova.virt.xenapi import volume_utils
|
||||
from nova.virt.xenapi import network_utils
|
||||
from nova.virt.xenapi import vm_utils
|
||||
|
||||
VolumeHelper = volume_utils.VolumeHelper
|
||||
NetworkHelper = network_utils.NetworkHelper
|
||||
VMHelper = vm_utils.VMHelper
|
||||
XenAPI = None
|
||||
LOG = logging.getLogger("nova.virt.xenapi.vmops")
|
||||
|
||||
@ -141,14 +143,14 @@ class VMOps(object):
|
||||
|
||||
def finish_migration(self, context, migration, instance, disk_info,
|
||||
network_info, image_meta, resize_instance):
|
||||
vdi_uuid = self.link_disks(instance, disk_info['base_copy'],
|
||||
disk_info['cow'])
|
||||
vdi_uuid = self._move_disks(instance, disk_info)
|
||||
|
||||
if resize_instance:
|
||||
self._resize_instance(instance, vdi_uuid)
|
||||
|
||||
vm_ref = self._create_vm(context, instance,
|
||||
[dict(vdi_type='os', vdi_uuid=vdi_uuid)],
|
||||
network_info, image_meta)
|
||||
if resize_instance:
|
||||
self.resize_instance(instance, vdi_uuid)
|
||||
|
||||
# 5. Start VM
|
||||
self._start(instance, vm_ref=vm_ref)
|
||||
@ -175,7 +177,7 @@ class VMOps(object):
|
||||
|
||||
for vdi in vdis:
|
||||
if vdi["vdi_type"] == "os":
|
||||
self.resize_instance(instance, vdi["vdi_uuid"])
|
||||
self._resize_instance(instance, vdi["vdi_uuid"])
|
||||
|
||||
return vdis
|
||||
|
||||
@ -242,11 +244,11 @@ class VMOps(object):
|
||||
if instance.kernel_id:
|
||||
kernel = VMHelper.fetch_image(context, self._session,
|
||||
instance, instance.kernel_id, instance.user_id,
|
||||
instance.project_id, ImageType.KERNEL)[0]
|
||||
instance.project_id, vm_utils.ImageType.KERNEL)[0]
|
||||
if instance.ramdisk_id:
|
||||
ramdisk = VMHelper.fetch_image(context, self._session,
|
||||
instance, instance.ramdisk_id, instance.user_id,
|
||||
instance.project_id, ImageType.RAMDISK)[0]
|
||||
instance.project_id, vm_utils.ImageType.RAMDISK)[0]
|
||||
|
||||
# NOTE(jk0): Since vdi_type may contain either 'os' or 'swap', we
|
||||
# need to ensure that the 'swap' VDI is not chosen as the mount
|
||||
@ -318,12 +320,15 @@ class VMOps(object):
|
||||
|
||||
def _attach_disks(self, instance, disk_image_type, vm_ref, first_vdi_ref,
|
||||
vdis):
|
||||
ctx = nova_context.get_admin_context()
|
||||
|
||||
instance_uuid = instance['uuid']
|
||||
|
||||
# device 0 reserved for RW disk
|
||||
userdevice = 0
|
||||
|
||||
# DISK_ISO needs two VBDs: the ISO disk and a blank RW disk
|
||||
if disk_image_type == ImageType.DISK_ISO:
|
||||
if disk_image_type == vm_utils.ImageType.DISK_ISO:
|
||||
LOG.debug("detected ISO image type, going to create blank VM for "
|
||||
"install")
|
||||
|
||||
@ -346,8 +351,11 @@ class VMOps(object):
|
||||
LOG.debug(_("Auto configuring disk for instance"
|
||||
" %(instance_uuid)s, attempting to"
|
||||
" resize partition...") % locals())
|
||||
instance_type = db.instance_type_get(ctx,
|
||||
instance.instance_type_id)
|
||||
VMHelper.auto_configure_disk(session=self._session,
|
||||
vdi_ref=first_vdi_ref)
|
||||
vdi_ref=first_vdi_ref,
|
||||
new_gb=instance_type['local_gb'])
|
||||
|
||||
VolumeHelper.create_vbd(session=self._session, vm_ref=vm_ref,
|
||||
vdi_ref=first_vdi_ref,
|
||||
@ -357,7 +365,6 @@ class VMOps(object):
|
||||
# userdevice 1 is reserved for rescue and we've used '0'
|
||||
userdevice = 2
|
||||
|
||||
ctx = nova_context.get_admin_context()
|
||||
instance_type = db.instance_type_get(ctx, instance.instance_type_id)
|
||||
swap_mb = instance_type['swap']
|
||||
generate_swap = swap_mb and FLAGS.xenapi_generate_swap
|
||||
@ -509,9 +516,9 @@ class VMOps(object):
|
||||
LOG.debug(_("Skipping VDI destroy for %s"), vdi_to_remove)
|
||||
if item['file']:
|
||||
# There is also a file to remove.
|
||||
if vdi_type == ImageType.KERNEL_STR:
|
||||
if vdi_type == vm_utils.ImageType.KERNEL_STR:
|
||||
kernel_file = item['file']
|
||||
elif vdi_type == ImageType.RAMDISK_STR:
|
||||
elif vdi_type == vm_utils.ImageType.RAMDISK_STR:
|
||||
ramdisk_file = item['file']
|
||||
|
||||
if kernel_file or ramdisk_file:
|
||||
@ -656,8 +663,10 @@ class VMOps(object):
|
||||
" %(progress)d") % locals())
|
||||
db.instance_update(context, instance_uuid, {'progress': progress})
|
||||
|
||||
def migrate_disk_and_power_off(self, context, instance, dest):
|
||||
"""Copies a VHD from one host machine to another.
|
||||
def migrate_disk_and_power_off(self, context, instance, dest,
|
||||
instance_type):
|
||||
"""Copies a VHD from one host machine to another, possibly
|
||||
resizing filesystem before hand.
|
||||
|
||||
:param instance: the instance that owns the VHD in question.
|
||||
:param dest: the destination host machine.
|
||||
@ -692,23 +701,69 @@ class VMOps(object):
|
||||
|
||||
sr_path = VMHelper.get_sr_path(self._session)
|
||||
|
||||
# 2. Transfer the base copy
|
||||
self._migrate_vhd(instance, base_copy_uuid, dest, sr_path)
|
||||
self._update_instance_progress(context, instance,
|
||||
step=2,
|
||||
total_steps=RESIZE_TOTAL_STEPS)
|
||||
if instance['auto_disk_config'] and \
|
||||
instance['local_gb'] > instance_type['local_gb']:
|
||||
# Resizing disk storage down
|
||||
old_gb = instance['local_gb']
|
||||
new_gb = instance_type['local_gb']
|
||||
|
||||
# 3. Now power down the instance
|
||||
self._shutdown(instance, vm_ref, hard=False)
|
||||
self._update_instance_progress(context, instance,
|
||||
step=3,
|
||||
total_steps=RESIZE_TOTAL_STEPS)
|
||||
LOG.debug(_("Resizing down VDI %(cow_uuid)s from "
|
||||
"%(old_gb)dGB to %(new_gb)dGB") % locals())
|
||||
|
||||
# 4. Transfer the COW VHD
|
||||
self._migrate_vhd(instance, cow_uuid, dest, sr_path)
|
||||
self._update_instance_progress(context, instance,
|
||||
step=4,
|
||||
total_steps=RESIZE_TOTAL_STEPS)
|
||||
# 2. Power down the instance before resizing
|
||||
self._shutdown(instance, vm_ref, hard=False)
|
||||
self._update_instance_progress(context, instance,
|
||||
step=2,
|
||||
total_steps=RESIZE_TOTAL_STEPS)
|
||||
|
||||
# 3. Copy VDI, resize partition and filesystem, forget VDI,
|
||||
# truncate VHD
|
||||
new_ref, new_uuid = VMHelper.resize_disk(self._session,
|
||||
vdi_ref,
|
||||
instance_type)
|
||||
self._update_instance_progress(context, instance,
|
||||
step=3,
|
||||
total_steps=RESIZE_TOTAL_STEPS)
|
||||
|
||||
# 4. Transfer the new VHD
|
||||
self._migrate_vhd(instance, new_uuid, dest, sr_path)
|
||||
self._update_instance_progress(context, instance,
|
||||
step=4,
|
||||
total_steps=RESIZE_TOTAL_STEPS)
|
||||
|
||||
# Clean up VDI now that it's been copied
|
||||
VMHelper.destroy_vdi(self._session, new_ref)
|
||||
|
||||
vdis = {'base_copy': new_uuid}
|
||||
else:
|
||||
# Resizing disk storage up, will be handled on destination
|
||||
|
||||
# As an optimization, we transfer the base VDI first,
|
||||
# then shut down the VM, followed by transfering the COW
|
||||
# VDI.
|
||||
|
||||
# 2. Transfer the base copy
|
||||
self._migrate_vhd(instance, base_copy_uuid, dest, sr_path)
|
||||
self._update_instance_progress(context, instance,
|
||||
step=2,
|
||||
total_steps=RESIZE_TOTAL_STEPS)
|
||||
|
||||
# 3. Now power down the instance
|
||||
self._shutdown(instance, vm_ref, hard=False)
|
||||
self._update_instance_progress(context, instance,
|
||||
step=3,
|
||||
total_steps=RESIZE_TOTAL_STEPS)
|
||||
|
||||
# 4. Transfer the COW VHD
|
||||
self._migrate_vhd(instance, cow_uuid, dest, sr_path)
|
||||
self._update_instance_progress(context, instance,
|
||||
step=4,
|
||||
total_steps=RESIZE_TOTAL_STEPS)
|
||||
|
||||
# TODO(mdietz): we could also consider renaming these to
|
||||
# something sensible so we don't need to blindly pass
|
||||
# around dictionaries
|
||||
vdis = {'base_copy': base_copy_uuid, 'cow': cow_uuid}
|
||||
|
||||
# NOTE(sirp): in case we're resizing to the same host (for dev
|
||||
# purposes), apply a suffix to name-label so the two VM records
|
||||
@ -720,20 +775,27 @@ class VMOps(object):
|
||||
self._destroy(instance, template_vm_ref,
|
||||
shutdown=False, destroy_kernel_ramdisk=False)
|
||||
|
||||
# TODO(mdietz): we could also consider renaming these to something
|
||||
# sensible so we don't need to blindly pass around dictionaries
|
||||
return {'base_copy': base_copy_uuid, 'cow': cow_uuid}
|
||||
return vdis
|
||||
|
||||
def link_disks(self, instance, base_copy_uuid, cow_uuid):
|
||||
"""Links the base copy VHD to the COW via the XAPI plugin."""
|
||||
def _move_disks(self, instance, disk_info):
|
||||
"""Move and possibly link VHDs via the XAPI plugin."""
|
||||
base_copy_uuid = disk_info['base_copy']
|
||||
new_base_copy_uuid = str(uuid.uuid4())
|
||||
new_cow_uuid = str(uuid.uuid4())
|
||||
|
||||
params = {'instance_uuid': instance['uuid'],
|
||||
'sr_path': VMHelper.get_sr_path(self._session),
|
||||
'old_base_copy_uuid': base_copy_uuid,
|
||||
'old_cow_uuid': cow_uuid,
|
||||
'new_base_copy_uuid': new_base_copy_uuid,
|
||||
'new_cow_uuid': new_cow_uuid,
|
||||
'sr_path': VMHelper.get_sr_path(self._session), }
|
||||
'new_base_copy_uuid': new_base_copy_uuid}
|
||||
|
||||
if 'cow' in disk_info:
|
||||
cow_uuid = disk_info['cow']
|
||||
new_cow_uuid = str(uuid.uuid4())
|
||||
params['old_cow_uuid'] = cow_uuid
|
||||
params['new_cow_uuid'] = new_cow_uuid
|
||||
|
||||
new_uuid = new_cow_uuid
|
||||
else:
|
||||
new_uuid = new_base_copy_uuid
|
||||
|
||||
task = self._session.async_call_plugin('migration',
|
||||
'move_vhds_into_sr', {'params': pickle.dumps(params)})
|
||||
@ -744,25 +806,33 @@ class VMOps(object):
|
||||
|
||||
# Set name-label so we can find if we need to clean up a failed
|
||||
# migration
|
||||
VMHelper.set_vdi_name_label(self._session, new_cow_uuid,
|
||||
VMHelper.set_vdi_name_label(self._session, new_uuid,
|
||||
instance.name)
|
||||
|
||||
return new_cow_uuid
|
||||
return new_uuid
|
||||
|
||||
def resize_instance(self, instance, vdi_uuid):
|
||||
"""Resize a running instance by changing its RAM and disk size."""
|
||||
def _resize_instance(self, instance, vdi_uuid):
|
||||
"""Resize a running instance by changing its disk size."""
|
||||
#TODO(mdietz): this will need to be adjusted for swap later
|
||||
#The new disk size must be in bytes
|
||||
|
||||
new_disk_size = instance.local_gb * 1024 * 1024 * 1024
|
||||
if new_disk_size > 0:
|
||||
instance_name = instance.name
|
||||
instance_local_gb = instance.local_gb
|
||||
LOG.debug(_("Resizing VDI %(vdi_uuid)s for instance"
|
||||
"%(instance_name)s. Expanding to %(instance_local_gb)d"
|
||||
" GB") % locals())
|
||||
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
|
||||
# for an instance with no local storage
|
||||
if not new_disk_size:
|
||||
return
|
||||
|
||||
# Get current size of VDI
|
||||
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
|
||||
virtual_size = self._session.call_xenapi('VDI.get_virtual_size',
|
||||
vdi_ref)
|
||||
virtual_size = int(virtual_size)
|
||||
|
||||
instance_name = instance.name
|
||||
old_gb = virtual_size / (1024 * 1024 * 1024)
|
||||
new_gb = instance.local_gb
|
||||
|
||||
if virtual_size < new_disk_size:
|
||||
# Resize up. Simple VDI resize will do the trick
|
||||
LOG.debug(_("Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to "
|
||||
"%(new_gb)dGB") % locals())
|
||||
if self._product_version[0] > 5:
|
||||
resize_func_name = 'VDI.resize'
|
||||
else:
|
||||
|
@ -245,10 +245,12 @@ class XenAPIConnection(driver.ComputeDriver):
|
||||
"""Unpause paused VM instance"""
|
||||
self._vmops.unpause(instance)
|
||||
|
||||
def migrate_disk_and_power_off(self, context, instance, dest):
|
||||
def migrate_disk_and_power_off(self, context, instance, dest,
|
||||
instance_type):
|
||||
"""Transfers the VHD of a running instance to another host, then shuts
|
||||
off the instance copies over the COW disk"""
|
||||
return self._vmops.migrate_disk_and_power_off(context, instance, dest)
|
||||
return self._vmops.migrate_disk_and_power_off(context, instance,
|
||||
dest, instance_type)
|
||||
|
||||
def suspend(self, instance):
|
||||
"""suspend the specified instance"""
|
||||
|
@ -37,50 +37,55 @@ def move_vhds_into_sr(session, args):
|
||||
params = pickle.loads(exists(args, 'params'))
|
||||
instance_uuid = params['instance_uuid']
|
||||
|
||||
old_base_copy_uuid = params['old_base_copy_uuid']
|
||||
old_cow_uuid = params['old_cow_uuid']
|
||||
|
||||
new_base_copy_uuid = params['new_base_copy_uuid']
|
||||
new_cow_uuid = params['new_cow_uuid']
|
||||
|
||||
sr_path = params['sr_path']
|
||||
sr_temp_path = "%s/tmp/" % sr_path
|
||||
|
||||
# Discover the copied VHDs locally, and then set up paths to copy
|
||||
# them to under the SR
|
||||
source_image_path = "/images/instance%s" % instance_uuid
|
||||
source_base_copy_path = "%s/%s.vhd" % (source_image_path,
|
||||
old_base_copy_uuid)
|
||||
source_cow_path = "%s/%s.vhd" % (source_image_path, old_cow_uuid)
|
||||
|
||||
temp_vhd_path = "%s/instance%s/" % (sr_temp_path, instance_uuid)
|
||||
new_base_copy_path = "%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid)
|
||||
new_cow_path = "%s/%s.vhd" % (temp_vhd_path, new_cow_uuid)
|
||||
sr_temp_path = "%s/tmp" % sr_path
|
||||
temp_vhd_path = "%s/instance%s" % (sr_temp_path, instance_uuid)
|
||||
|
||||
logging.debug('Creating temporary SR path %s' % temp_vhd_path)
|
||||
os.makedirs(temp_vhd_path)
|
||||
|
||||
logging.debug('Moving %s into %s' % (source_base_copy_path, temp_vhd_path))
|
||||
# Discover the copied VHDs locally, and then set up paths to copy
|
||||
# them to under the SR
|
||||
source_image_path = "/images/instance%s" % instance_uuid
|
||||
|
||||
old_base_copy_uuid = params['old_base_copy_uuid']
|
||||
new_base_copy_uuid = params['new_base_copy_uuid']
|
||||
source_base_copy_path = "%s/%s.vhd" % (source_image_path,
|
||||
old_base_copy_uuid)
|
||||
new_base_copy_path = "%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid)
|
||||
|
||||
logging.debug('Moving base %s into %s' % (source_base_copy_path,
|
||||
temp_vhd_path))
|
||||
shutil.move(source_base_copy_path, new_base_copy_path)
|
||||
|
||||
logging.debug('Moving %s into %s' % (source_cow_path, temp_vhd_path))
|
||||
shutil.move(source_cow_path, new_cow_path)
|
||||
if 'old_cow_uuid' in params:
|
||||
old_cow_uuid = params['old_cow_uuid']
|
||||
new_cow_uuid = params['new_cow_uuid']
|
||||
|
||||
logging.debug('Cleaning up %s' % source_image_path)
|
||||
os.rmdir(source_image_path)
|
||||
source_cow_path = "%s/%s.vhd" % (source_image_path, old_cow_uuid)
|
||||
new_cow_path = "%s/%s.vhd" % (temp_vhd_path, new_cow_uuid)
|
||||
|
||||
# Link the COW to the base copy
|
||||
logging.debug('Attaching COW to the base copy %s -> %s' %
|
||||
(new_cow_path, new_base_copy_path))
|
||||
subprocess.call(shlex.split('/usr/sbin/vhd-util modify -n %s -p %s' %
|
||||
(new_cow_path, new_base_copy_path)))
|
||||
logging.debug('Moving COW %s into %s' % (source_cow_path,
|
||||
temp_vhd_path))
|
||||
shutil.move(source_cow_path, new_cow_path)
|
||||
|
||||
logging.debug('Moving VHDs into SR %s' % sr_path)
|
||||
# NOTE(sirp): COW should be copied before base_copy to avoid snapwatchd
|
||||
# GC'ing an unreferenced base copy VDI
|
||||
shutil.move(new_cow_path, sr_path)
|
||||
# Link the COW to the base copy
|
||||
logging.debug('Attaching COW to the base %s -> %s' %
|
||||
(new_cow_path, new_base_copy_path))
|
||||
subprocess.call(['/usr/sbin/vhd-util', 'modify',
|
||||
'-n', new_cow_path, '-p', new_base_copy_path])
|
||||
|
||||
# NOTE(sirp): COW should be copied before base_copy to avoid
|
||||
# snapwatchd GC'ing an unreferenced base copy VDI
|
||||
logging.debug('Moving COW %s to %s' % (new_cow_path, sr_path))
|
||||
shutil.move(new_cow_path, sr_path)
|
||||
|
||||
logging.debug('Moving base %s to %s' % (new_base_copy_path, sr_path))
|
||||
shutil.move(new_base_copy_path, sr_path)
|
||||
|
||||
logging.debug('Cleaning up source path %s' % source_image_path)
|
||||
os.rmdir(source_image_path)
|
||||
|
||||
logging.debug('Cleaning up temporary SR path %s' % temp_vhd_path)
|
||||
os.rmdir(temp_vhd_path)
|
||||
return ""
|
||||
@ -103,7 +108,7 @@ def transfer_vhd(session, args):
|
||||
|
||||
ssh_cmd = '\"ssh -o StrictHostKeyChecking=no\"'
|
||||
|
||||
rsync_args = shlex.split('nohup /usr/bin/rsync -av --progress -e %s %s %s'
|
||||
rsync_args = shlex.split('nohup /usr/bin/rsync -av -e %s %s %s'
|
||||
% (ssh_cmd, source_path, dest_path))
|
||||
|
||||
logging.debug('rsync %s' % (' '.join(rsync_args, )))
|
||||
|
Loading…
Reference in New Issue
Block a user