Implement resize down for XenAPI
This patch implements resizing an instance to a smaller disk. It implements this by copying the VDI and running e2resize, before transferring to the new host. Change-Id: Ic901a59cb6cdb79605c70528cf85064d8335ee2f
This commit is contained in:
parent
ab215c42a2
commit
c25f7e7e83
@ -679,9 +679,6 @@ class Controller(wsgi.Controller):
|
|||||||
except exception.CannotResizeToSameSize:
|
except exception.CannotResizeToSameSize:
|
||||||
msg = _("Resize requires a change in size.")
|
msg = _("Resize requires a change in size.")
|
||||||
raise exc.HTTPBadRequest(explanation=msg)
|
raise exc.HTTPBadRequest(explanation=msg)
|
||||||
except exception.CannotResizeToSmallerSize:
|
|
||||||
msg = _("Resizing to a smaller size is not supported.")
|
|
||||||
raise exc.HTTPBadRequest(explanation=msg)
|
|
||||||
|
|
||||||
return webob.Response(status_int=202)
|
return webob.Response(status_int=202)
|
||||||
|
|
||||||
|
@ -1275,8 +1275,6 @@ class API(base.Base):
|
|||||||
|
|
||||||
current_memory_mb = current_instance_type['memory_mb']
|
current_memory_mb = current_instance_type['memory_mb']
|
||||||
new_memory_mb = new_instance_type['memory_mb']
|
new_memory_mb = new_instance_type['memory_mb']
|
||||||
if current_memory_mb > new_memory_mb:
|
|
||||||
raise exception.CannotResizeToSmallerSize()
|
|
||||||
|
|
||||||
if (current_memory_mb == new_memory_mb) and flavor_id:
|
if (current_memory_mb == new_memory_mb) and flavor_id:
|
||||||
raise exception.CannotResizeToSameSize()
|
raise exception.CannotResizeToSameSize()
|
||||||
|
@ -1069,6 +1069,8 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
migration_ref = self.db.migration_get(context, migration_id)
|
migration_ref = self.db.migration_get(context, migration_id)
|
||||||
instance_ref = self.db.instance_get_by_uuid(context,
|
instance_ref = self.db.instance_get_by_uuid(context,
|
||||||
migration_ref.instance_uuid)
|
migration_ref.instance_uuid)
|
||||||
|
instance_type_ref = self.db.instance_type_get(context,
|
||||||
|
migration_ref.new_instance_type_id)
|
||||||
|
|
||||||
self.db.migration_update(context,
|
self.db.migration_update(context,
|
||||||
migration_id,
|
migration_id,
|
||||||
@ -1076,7 +1078,8 @@ class ComputeManager(manager.SchedulerDependentManager):
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
disk_info = self.driver.migrate_disk_and_power_off(
|
disk_info = self.driver.migrate_disk_and_power_off(
|
||||||
context, instance_ref, migration_ref['dest_host'])
|
context, instance_ref, migration_ref['dest_host'],
|
||||||
|
instance_type_ref)
|
||||||
except exception.MigrationError, error:
|
except exception.MigrationError, error:
|
||||||
LOG.error(_('%s. Setting instance vm_state to ERROR') % (error,))
|
LOG.error(_('%s. Setting instance vm_state to ERROR') % (error,))
|
||||||
self._instance_update(context,
|
self._instance_update(context,
|
||||||
|
@ -830,10 +830,6 @@ class CannotResizeToSameSize(NovaException):
|
|||||||
message = _("When resizing, instances must change size!")
|
message = _("When resizing, instances must change size!")
|
||||||
|
|
||||||
|
|
||||||
class CannotResizeToSmallerSize(NovaException):
|
|
||||||
message = _("Resizing to a smaller size is not supported.")
|
|
||||||
|
|
||||||
|
|
||||||
class ImageTooLarge(NovaException):
|
class ImageTooLarge(NovaException):
|
||||||
message = _("Image is larger than instance type allows")
|
message = _("Image is larger than instance type allows")
|
||||||
|
|
||||||
|
@ -1568,23 +1568,6 @@ class ComputeAPITestCase(BaseTestCase):
|
|||||||
|
|
||||||
self.compute.terminate_instance(context, instance['uuid'])
|
self.compute.terminate_instance(context, instance['uuid'])
|
||||||
|
|
||||||
def test_resize_down_fails(self):
|
|
||||||
"""Ensure resizing down raises and fails"""
|
|
||||||
instance = self._create_fake_instance()
|
|
||||||
context = self.context.elevated()
|
|
||||||
instance = db.instance_get_by_uuid(context, instance['uuid'])
|
|
||||||
self.compute.run_instance(self.context, instance['uuid'])
|
|
||||||
|
|
||||||
inst_type = instance_types.get_instance_type_by_name('m1.xlarge')
|
|
||||||
db.instance_update(self.context, instance['uuid'],
|
|
||||||
{'instance_type_id': inst_type['id']})
|
|
||||||
|
|
||||||
instance = db.instance_get_by_uuid(context, instance['uuid'])
|
|
||||||
self.assertRaises(exception.CannotResizeToSmallerSize,
|
|
||||||
self.compute_api.resize, context, instance, 1)
|
|
||||||
|
|
||||||
self.compute.terminate_instance(context, instance['uuid'])
|
|
||||||
|
|
||||||
def test_resize_same_size_fails(self):
|
def test_resize_same_size_fails(self):
|
||||||
"""Ensure invalid flavors raise"""
|
"""Ensure invalid flavors raise"""
|
||||||
context = self.context.elevated()
|
context = self.context.elevated()
|
||||||
|
@ -173,8 +173,9 @@ class _VirtDriverTestCase(test.TestCase):
|
|||||||
@catch_notimplementederror
|
@catch_notimplementederror
|
||||||
def test_migrate_disk_and_power_off(self):
|
def test_migrate_disk_and_power_off(self):
|
||||||
instance_ref, network_info = self._get_running_instance()
|
instance_ref, network_info = self._get_running_instance()
|
||||||
|
instance_type_ref = test_utils.get_test_instance_type()
|
||||||
self.connection.migrate_disk_and_power_off(
|
self.connection.migrate_disk_and_power_off(
|
||||||
self.ctxt, instance_ref, 'dest_host')
|
self.ctxt, instance_ref, 'dest_host', instance_type_ref)
|
||||||
|
|
||||||
@catch_notimplementederror
|
@catch_notimplementederror
|
||||||
def test_pause(self):
|
def test_pause(self):
|
||||||
|
@ -803,17 +803,20 @@ class XenAPIMigrateInstance(test.TestCase):
|
|||||||
product_version=(6, 0, 0))
|
product_version=(6, 0, 0))
|
||||||
stubs.stubout_loopingcall_start(self.stubs)
|
stubs.stubout_loopingcall_start(self.stubs)
|
||||||
conn = xenapi_conn.get_connection(False)
|
conn = xenapi_conn.get_connection(False)
|
||||||
conn._vmops.resize_instance(instance, '')
|
conn._vmops._resize_instance(instance, '')
|
||||||
self.assertEqual(called['resize'], True)
|
self.assertEqual(called['resize'], True)
|
||||||
|
|
||||||
def test_migrate_disk_and_power_off(self):
|
def test_migrate_disk_and_power_off(self):
|
||||||
instance = db.instance_create(self.context, self.instance_values)
|
instance = db.instance_create(self.context, self.instance_values)
|
||||||
|
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
|
||||||
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
|
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
|
||||||
conn = xenapi_conn.get_connection(False)
|
conn = xenapi_conn.get_connection(False)
|
||||||
conn.migrate_disk_and_power_off(self.context, instance, '127.0.0.1')
|
conn.migrate_disk_and_power_off(self.context, instance,
|
||||||
|
'127.0.0.1', instance_type)
|
||||||
|
|
||||||
def test_migrate_disk_and_power_off_passes_exceptions(self):
|
def test_migrate_disk_and_power_off_passes_exceptions(self):
|
||||||
instance = db.instance_create(self.context, self.instance_values)
|
instance = db.instance_create(self.context, self.instance_values)
|
||||||
|
instance_type = db.instance_type_get_by_name(self.context, 'm1.large')
|
||||||
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
|
stubs.stubout_session(self.stubs, stubs.FakeSessionForMigrationTests)
|
||||||
|
|
||||||
def fake_raise(*args, **kwargs):
|
def fake_raise(*args, **kwargs):
|
||||||
@ -823,7 +826,7 @@ class XenAPIMigrateInstance(test.TestCase):
|
|||||||
conn = xenapi_conn.get_connection(False)
|
conn = xenapi_conn.get_connection(False)
|
||||||
self.assertRaises(exception.MigrationError,
|
self.assertRaises(exception.MigrationError,
|
||||||
conn.migrate_disk_and_power_off,
|
conn.migrate_disk_and_power_off,
|
||||||
self.context, instance, '127.0.0.1')
|
self.context, instance, '127.0.0.1', instance_type)
|
||||||
|
|
||||||
def test_revert_migrate(self):
|
def test_revert_migrate(self):
|
||||||
instance = db.instance_create(self.context, self.instance_values)
|
instance = db.instance_create(self.context, self.instance_values)
|
||||||
@ -1163,12 +1166,10 @@ class XenAPIAutoDiskConfigTestCase(test.TestCase):
|
|||||||
def assertIsPartitionCalled(self, called):
|
def assertIsPartitionCalled(self, called):
|
||||||
marker = {"partition_called": False}
|
marker = {"partition_called": False}
|
||||||
|
|
||||||
@classmethod
|
def fake_resize_part_and_fs(dev, start, old, new):
|
||||||
def fake_resize_partition_fs(cls, dev_path, partition_path):
|
|
||||||
marker["partition_called"] = True
|
marker["partition_called"] = True
|
||||||
|
self.stubs.Set(vm_utils, "_resize_part_and_fs",
|
||||||
self.stubs.Set(vm_utils.VMHelper, "_resize_partition_and_fs",
|
fake_resize_part_and_fs)
|
||||||
fake_resize_partition_fs)
|
|
||||||
|
|
||||||
instance = db.instance_create(self.context, self.instance_values)
|
instance = db.instance_create(self.context, self.instance_values)
|
||||||
disk_image_type = vm_utils.ImageType.DISK_VHD
|
disk_image_type = vm_utils.ImageType.DISK_VHD
|
||||||
@ -1193,12 +1194,10 @@ class XenAPIAutoDiskConfigTestCase(test.TestCase):
|
|||||||
"""Should not partition unless fail safes pass"""
|
"""Should not partition unless fail safes pass"""
|
||||||
self.instance_values['auto_disk_config'] = True
|
self.instance_values['auto_disk_config'] = True
|
||||||
|
|
||||||
@classmethod
|
def fake_get_partitions(dev):
|
||||||
def fake_resize_partition_allowed(cls, dev_path, partition_path):
|
return [(1, 0, 100, 'ext4'), (2, 100, 200, 'ext4')]
|
||||||
return False
|
self.stubs.Set(vm_utils, "_get_partitions",
|
||||||
|
fake_get_partitions)
|
||||||
self.stubs.Set(vm_utils.VMHelper, "_resize_partition_allowed",
|
|
||||||
fake_resize_partition_allowed)
|
|
||||||
|
|
||||||
self.assertIsPartitionCalled(False)
|
self.assertIsPartitionCalled(False)
|
||||||
|
|
||||||
@ -1209,10 +1208,9 @@ class XenAPIAutoDiskConfigTestCase(test.TestCase):
|
|||||||
"""
|
"""
|
||||||
self.instance_values['auto_disk_config'] = True
|
self.instance_values['auto_disk_config'] = True
|
||||||
|
|
||||||
@classmethod
|
def fake_get_partitions(dev):
|
||||||
def fake_resize_partition_allowed(cls, dev_path, partition_path):
|
return [(1, 0, 100, 'ext4')]
|
||||||
return True
|
self.stubs.Set(vm_utils, "_get_partitions",
|
||||||
self.stubs.Set(vm_utils.VMHelper, "_resize_partition_allowed",
|
fake_get_partitions)
|
||||||
fake_resize_partition_allowed)
|
|
||||||
|
|
||||||
self.assertIsPartitionCalled(True)
|
self.assertIsPartitionCalled(True)
|
||||||
|
@ -34,6 +34,21 @@ def get_test_image_info(context, instance_ref):
|
|||||||
return image_service.show(context, image_id)
|
return image_service.show(context, image_id)
|
||||||
|
|
||||||
|
|
||||||
|
def get_test_instance_type(context=None):
|
||||||
|
if not context:
|
||||||
|
context = get_test_admin_context()
|
||||||
|
|
||||||
|
test_instance_type = {'name': 'kinda.big',
|
||||||
|
'memory_mb': 2048,
|
||||||
|
'vcpus': 4,
|
||||||
|
'local_gb': 40,
|
||||||
|
'swap': 1024}
|
||||||
|
|
||||||
|
instance_type_ref = nova.db.instance_type_create(context,
|
||||||
|
test_instance_type)
|
||||||
|
return instance_type_ref
|
||||||
|
|
||||||
|
|
||||||
def get_test_instance(context=None):
|
def get_test_instance(context=None):
|
||||||
if not context:
|
if not context:
|
||||||
context = get_test_admin_context()
|
context = get_test_admin_context()
|
||||||
|
@ -236,7 +236,8 @@ class ComputeDriver(object):
|
|||||||
"""
|
"""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def migrate_disk_and_power_off(self, context, instance, dest):
|
def migrate_disk_and_power_off(self, context, instance, dest,
|
||||||
|
instance_type):
|
||||||
"""
|
"""
|
||||||
Transfers the disk of a running instance in multiple phases, turning
|
Transfers the disk of a running instance in multiple phases, turning
|
||||||
off the instance before the end.
|
off the instance before the end.
|
||||||
|
@ -138,7 +138,8 @@ class FakeConnection(driver.ComputeDriver):
|
|||||||
def poll_rescued_instances(self, timeout):
|
def poll_rescued_instances(self, timeout):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def migrate_disk_and_power_off(self, context, instance, dest):
|
def migrate_disk_and_power_off(self, context, instance, dest,
|
||||||
|
instance_type):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def poll_unconfirmed_resizes(self, resize_confirm_window):
|
def poll_unconfirmed_resizes(self, resize_confirm_window):
|
||||||
|
@ -467,6 +467,9 @@ class SessionBase(object):
|
|||||||
raise Exception('No simulation in host_call_plugin for %s,%s' %
|
raise Exception('No simulation in host_call_plugin for %s,%s' %
|
||||||
(plugin, method))
|
(plugin, method))
|
||||||
|
|
||||||
|
def VDI_get_virtual_size(self, *args):
|
||||||
|
return 1 * 1024 * 1024 * 1024
|
||||||
|
|
||||||
def VDI_resize_online(self, *args):
|
def VDI_resize_online(self, *args):
|
||||||
return 'derp'
|
return 'derp'
|
||||||
|
|
||||||
|
@ -387,19 +387,36 @@ class VMHelper(HelperBase):
|
|||||||
session.wait_for_task(task, instance.id)
|
session.wait_for_task(task, instance.id)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def auto_configure_disk(cls, session, vdi_ref):
|
def resize_disk(cls, session, vdi_ref, instance_type):
|
||||||
"""Partition and resize FS to match the size specified by
|
# Copy VDI over to something we can resize
|
||||||
instance_types.local_gb.
|
# NOTE(jerdfelt): Would be nice to just set vdi_ref to read/write
|
||||||
"""
|
sr_ref = safe_find_sr(session)
|
||||||
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
|
copy_ref = session.call_xenapi('VDI.copy', vdi_ref, sr_ref)
|
||||||
dev_path = utils.make_dev_path(dev)
|
copy_uuid = session.call_xenapi('VDI.get_uuid', copy_ref)
|
||||||
partition_path = utils.make_dev_path(dev, partition=1)
|
|
||||||
if cls._resize_partition_allowed(dev_path, partition_path):
|
try:
|
||||||
cls._resize_partition_and_fs(dev_path, partition_path)
|
# Resize partition and filesystem down
|
||||||
|
cls.auto_configure_disk(session=session,
|
||||||
|
vdi_ref=copy_ref,
|
||||||
|
new_gb=instance_type['local_gb'])
|
||||||
|
|
||||||
|
# Create new VDI
|
||||||
|
new_ref = cls.fetch_blank_disk(session,
|
||||||
|
instance_type['id'])
|
||||||
|
new_uuid = session.call_xenapi('VDI.get_uuid', new_ref)
|
||||||
|
|
||||||
|
# Manually copy contents over
|
||||||
|
virtual_size = instance_type['local_gb'] * 1024 * 1024 * 1024
|
||||||
|
_copy_partition(session, copy_ref, new_ref, 1, virtual_size)
|
||||||
|
|
||||||
|
return new_ref, new_uuid
|
||||||
|
finally:
|
||||||
|
cls.destroy_vdi(session, copy_ref)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _resize_partition_allowed(cls, dev_path, partition_path):
|
def auto_configure_disk(cls, session, vdi_ref, new_gb):
|
||||||
"""Determine whether we should resize the partition and the fs.
|
"""Partition and resize FS to match the size specified by
|
||||||
|
instance_types.local_gb.
|
||||||
|
|
||||||
This is a fail-safe to prevent accidentally destroying data on a disk
|
This is a fail-safe to prevent accidentally destroying data on a disk
|
||||||
erroneously marked as auto_disk_config=True.
|
erroneously marked as auto_disk_config=True.
|
||||||
@ -413,52 +430,16 @@ class VMHelper(HelperBase):
|
|||||||
|
|
||||||
3. The file-system on the one partition must be ext3 or ext4.
|
3. The file-system on the one partition must be ext3 or ext4.
|
||||||
"""
|
"""
|
||||||
out, err = utils.execute("parted", "--script", "--machine",
|
with vdi_attached_here(session, vdi_ref, read_only=False) as dev:
|
||||||
partition_path, "print", run_as_root=True)
|
partitions = _get_partitions(dev)
|
||||||
lines = [line for line in out.split('\n') if line]
|
|
||||||
partitions = lines[2:]
|
|
||||||
|
|
||||||
num_partitions = len(partitions)
|
if len(partitions) != 1:
|
||||||
fs_type = partitions[0].split(':')[4]
|
return
|
||||||
LOG.debug(_("Found %(num_partitions)s partitions, the first with"
|
|
||||||
" fs_type '%(fs_type)s'") % locals())
|
|
||||||
|
|
||||||
allowed_fs = fs_type in ('ext3', 'ext4')
|
num, start, old_sectors, ptype = partitions[0]
|
||||||
return num_partitions == 1 and allowed_fs
|
if ptype in ('ext3', 'ext4'):
|
||||||
|
new_sectors = new_gb * 1024 * 1024 * 1024 / SECTOR_SIZE
|
||||||
@classmethod
|
_resize_part_and_fs(dev, start, old_sectors, new_sectors)
|
||||||
def _resize_partition_and_fs(cls, dev_path, partition_path):
|
|
||||||
"""Resize partition and fileystem.
|
|
||||||
|
|
||||||
This assumes we are dealing with a single primary partition and using
|
|
||||||
ext3 or ext4.
|
|
||||||
"""
|
|
||||||
# 1. Delete and recreate partition to end of disk
|
|
||||||
root_helper = FLAGS.root_helper
|
|
||||||
cmd = """echo "d
|
|
||||||
n
|
|
||||||
p
|
|
||||||
1
|
|
||||||
|
|
||||||
|
|
||||||
w
|
|
||||||
" | %(root_helper)s fdisk %(dev_path)s""" % locals()
|
|
||||||
utils.execute(cmd, run_as_root=False, shell=True)
|
|
||||||
|
|
||||||
# 2. Remove ext3 journal (making it ext2)
|
|
||||||
utils.execute("tune2fs", "-O ^has_journal", partition_path,
|
|
||||||
run_as_root=True)
|
|
||||||
|
|
||||||
# 3. fsck the disk
|
|
||||||
# NOTE(sirp): using -p here to automatically repair filesystem, is
|
|
||||||
# this okay?
|
|
||||||
utils.execute("e2fsck", "-f", "-p", partition_path, run_as_root=True)
|
|
||||||
|
|
||||||
# 4. Resize the disk
|
|
||||||
utils.execute("resize2fs", partition_path, run_as_root=True)
|
|
||||||
|
|
||||||
# 5. Add back journal
|
|
||||||
utils.execute("tune2fs", "-j", partition_path, run_as_root=True)
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def generate_swap(cls, session, instance, vm_ref, userdevice, swap_mb):
|
def generate_swap(cls, session, instance, vm_ref, userdevice, swap_mb):
|
||||||
@ -1317,6 +1298,27 @@ def _is_vdi_pv(dev):
|
|||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def _get_partitions(dev):
|
||||||
|
"""Return partition information (num, size, type) for a device."""
|
||||||
|
dev_path = utils.make_dev_path(dev)
|
||||||
|
out, err = utils.execute('parted', '--script', '--machine',
|
||||||
|
dev_path, 'unit s', 'print',
|
||||||
|
run_as_root=True)
|
||||||
|
lines = [line for line in out.split('\n') if line]
|
||||||
|
partitions = []
|
||||||
|
|
||||||
|
LOG.debug(_("Partitions:"))
|
||||||
|
for line in lines[2:]:
|
||||||
|
num, start, end, size, ptype = line.split(':')[:5]
|
||||||
|
start = int(start.rstrip('s'))
|
||||||
|
end = int(end.rstrip('s'))
|
||||||
|
size = int(size.rstrip('s'))
|
||||||
|
LOG.debug(_(" %(num)s: %(ptype)s %(size)d sectors") % locals())
|
||||||
|
partitions.append((num, start, size, ptype))
|
||||||
|
|
||||||
|
return partitions
|
||||||
|
|
||||||
|
|
||||||
def _stream_disk(dev, image_type, virtual_size, image_file):
|
def _stream_disk(dev, image_type, virtual_size, image_file):
|
||||||
offset = 0
|
offset = 0
|
||||||
if image_type == ImageType.DISK:
|
if image_type == ImageType.DISK:
|
||||||
@ -1353,6 +1355,68 @@ def _write_partition(virtual_size, dev):
|
|||||||
LOG.debug(_('Writing partition table %s done.'), dev_path)
|
LOG.debug(_('Writing partition table %s done.'), dev_path)
|
||||||
|
|
||||||
|
|
||||||
|
def _resize_part_and_fs(dev, start, old_sectors, new_sectors):
|
||||||
|
"""Resize partition and fileystem.
|
||||||
|
|
||||||
|
This assumes we are dealing with a single primary partition and using
|
||||||
|
ext3 or ext4.
|
||||||
|
"""
|
||||||
|
size = new_sectors - start
|
||||||
|
end = new_sectors - 1
|
||||||
|
|
||||||
|
dev_path = utils.make_dev_path(dev)
|
||||||
|
partition_path = utils.make_dev_path(dev, partition=1)
|
||||||
|
|
||||||
|
# Remove ext3 journal (making it ext2)
|
||||||
|
utils.execute('tune2fs', '-O ^has_journal', partition_path,
|
||||||
|
run_as_root=True)
|
||||||
|
|
||||||
|
# fsck the disk
|
||||||
|
# NOTE(sirp): using -p here to automatically repair filesystem, is
|
||||||
|
# this okay?
|
||||||
|
utils.execute('e2fsck', '-f', '-p', partition_path, run_as_root=True)
|
||||||
|
|
||||||
|
if new_sectors < old_sectors:
|
||||||
|
# Resizing down, resize filesystem before partition resize
|
||||||
|
utils.execute('resize2fs', partition_path, '%ds' % size,
|
||||||
|
run_as_root=True)
|
||||||
|
|
||||||
|
utils.execute('parted', '--script', dev_path, 'rm', '1',
|
||||||
|
run_as_root=True)
|
||||||
|
utils.execute('parted', '--script', dev_path, 'mkpart',
|
||||||
|
'primary',
|
||||||
|
'%ds' % start,
|
||||||
|
'%ds' % end,
|
||||||
|
run_as_root=True)
|
||||||
|
|
||||||
|
if new_sectors > old_sectors:
|
||||||
|
# Resizing up, resize filesystem after partition resize
|
||||||
|
utils.execute('resize2fs', partition_path, run_as_root=True)
|
||||||
|
|
||||||
|
# Add back journal
|
||||||
|
utils.execute('tune2fs', '-j', partition_path, run_as_root=True)
|
||||||
|
|
||||||
|
|
||||||
|
def _copy_partition(session, src_ref, dst_ref, partition, virtual_size):
|
||||||
|
# Part of disk taken up by MBR
|
||||||
|
virtual_size -= MBR_SIZE_BYTES
|
||||||
|
|
||||||
|
with vdi_attached_here(session, src_ref, read_only=True) as src:
|
||||||
|
src_path = utils.make_dev_path(src, partition=partition)
|
||||||
|
|
||||||
|
with vdi_attached_here(session, dst_ref, read_only=False) as dst:
|
||||||
|
dst_path = utils.make_dev_path(dst, partition=partition)
|
||||||
|
|
||||||
|
_write_partition(virtual_size, dst)
|
||||||
|
|
||||||
|
num_blocks = virtual_size / SECTOR_SIZE
|
||||||
|
utils.execute('dd',
|
||||||
|
'if=%s' % src_path,
|
||||||
|
'of=%s' % dst_path,
|
||||||
|
'count=%d' % num_blocks,
|
||||||
|
run_as_root=True)
|
||||||
|
|
||||||
|
|
||||||
def _mount_filesystem(dev_path, dir):
|
def _mount_filesystem(dev_path, dir):
|
||||||
"""mounts the device specified by dev_path in dir"""
|
"""mounts the device specified by dev_path in dir"""
|
||||||
try:
|
try:
|
||||||
|
@ -21,7 +21,6 @@ Management class for VM-related functions (spawn, reboot, etc).
|
|||||||
|
|
||||||
import base64
|
import base64
|
||||||
import json
|
import json
|
||||||
import M2Crypto
|
|
||||||
import os
|
import os
|
||||||
import pickle
|
import pickle
|
||||||
import random
|
import random
|
||||||
@ -30,6 +29,10 @@ import sys
|
|||||||
import time
|
import time
|
||||||
import uuid
|
import uuid
|
||||||
|
|
||||||
|
import M2Crypto
|
||||||
|
|
||||||
|
from nova.compute import api as compute
|
||||||
|
from nova.compute import power_state
|
||||||
from nova import context as nova_context
|
from nova import context as nova_context
|
||||||
from nova import db
|
from nova import db
|
||||||
from nova import exception
|
from nova import exception
|
||||||
@ -37,15 +40,14 @@ from nova import flags
|
|||||||
from nova import ipv6
|
from nova import ipv6
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
from nova import utils
|
from nova import utils
|
||||||
|
|
||||||
from nova.compute import api as compute
|
|
||||||
from nova.compute import power_state
|
|
||||||
from nova.virt import driver
|
from nova.virt import driver
|
||||||
from nova.virt.xenapi.volume_utils import VolumeHelper
|
from nova.virt.xenapi import volume_utils
|
||||||
from nova.virt.xenapi.network_utils import NetworkHelper
|
from nova.virt.xenapi import network_utils
|
||||||
from nova.virt.xenapi.vm_utils import VMHelper
|
from nova.virt.xenapi import vm_utils
|
||||||
from nova.virt.xenapi.vm_utils import ImageType
|
|
||||||
|
|
||||||
|
VolumeHelper = volume_utils.VolumeHelper
|
||||||
|
NetworkHelper = network_utils.NetworkHelper
|
||||||
|
VMHelper = vm_utils.VMHelper
|
||||||
XenAPI = None
|
XenAPI = None
|
||||||
LOG = logging.getLogger("nova.virt.xenapi.vmops")
|
LOG = logging.getLogger("nova.virt.xenapi.vmops")
|
||||||
|
|
||||||
@ -141,14 +143,14 @@ class VMOps(object):
|
|||||||
|
|
||||||
def finish_migration(self, context, migration, instance, disk_info,
|
def finish_migration(self, context, migration, instance, disk_info,
|
||||||
network_info, image_meta, resize_instance):
|
network_info, image_meta, resize_instance):
|
||||||
vdi_uuid = self.link_disks(instance, disk_info['base_copy'],
|
vdi_uuid = self._move_disks(instance, disk_info)
|
||||||
disk_info['cow'])
|
|
||||||
|
if resize_instance:
|
||||||
|
self._resize_instance(instance, vdi_uuid)
|
||||||
|
|
||||||
vm_ref = self._create_vm(context, instance,
|
vm_ref = self._create_vm(context, instance,
|
||||||
[dict(vdi_type='os', vdi_uuid=vdi_uuid)],
|
[dict(vdi_type='os', vdi_uuid=vdi_uuid)],
|
||||||
network_info, image_meta)
|
network_info, image_meta)
|
||||||
if resize_instance:
|
|
||||||
self.resize_instance(instance, vdi_uuid)
|
|
||||||
|
|
||||||
# 5. Start VM
|
# 5. Start VM
|
||||||
self._start(instance, vm_ref=vm_ref)
|
self._start(instance, vm_ref=vm_ref)
|
||||||
@ -175,7 +177,7 @@ class VMOps(object):
|
|||||||
|
|
||||||
for vdi in vdis:
|
for vdi in vdis:
|
||||||
if vdi["vdi_type"] == "os":
|
if vdi["vdi_type"] == "os":
|
||||||
self.resize_instance(instance, vdi["vdi_uuid"])
|
self._resize_instance(instance, vdi["vdi_uuid"])
|
||||||
|
|
||||||
return vdis
|
return vdis
|
||||||
|
|
||||||
@ -242,11 +244,11 @@ class VMOps(object):
|
|||||||
if instance.kernel_id:
|
if instance.kernel_id:
|
||||||
kernel = VMHelper.fetch_image(context, self._session,
|
kernel = VMHelper.fetch_image(context, self._session,
|
||||||
instance, instance.kernel_id, instance.user_id,
|
instance, instance.kernel_id, instance.user_id,
|
||||||
instance.project_id, ImageType.KERNEL)[0]
|
instance.project_id, vm_utils.ImageType.KERNEL)[0]
|
||||||
if instance.ramdisk_id:
|
if instance.ramdisk_id:
|
||||||
ramdisk = VMHelper.fetch_image(context, self._session,
|
ramdisk = VMHelper.fetch_image(context, self._session,
|
||||||
instance, instance.ramdisk_id, instance.user_id,
|
instance, instance.ramdisk_id, instance.user_id,
|
||||||
instance.project_id, ImageType.RAMDISK)[0]
|
instance.project_id, vm_utils.ImageType.RAMDISK)[0]
|
||||||
|
|
||||||
# NOTE(jk0): Since vdi_type may contain either 'os' or 'swap', we
|
# NOTE(jk0): Since vdi_type may contain either 'os' or 'swap', we
|
||||||
# need to ensure that the 'swap' VDI is not chosen as the mount
|
# need to ensure that the 'swap' VDI is not chosen as the mount
|
||||||
@ -318,12 +320,15 @@ class VMOps(object):
|
|||||||
|
|
||||||
def _attach_disks(self, instance, disk_image_type, vm_ref, first_vdi_ref,
|
def _attach_disks(self, instance, disk_image_type, vm_ref, first_vdi_ref,
|
||||||
vdis):
|
vdis):
|
||||||
|
ctx = nova_context.get_admin_context()
|
||||||
|
|
||||||
instance_uuid = instance['uuid']
|
instance_uuid = instance['uuid']
|
||||||
|
|
||||||
# device 0 reserved for RW disk
|
# device 0 reserved for RW disk
|
||||||
userdevice = 0
|
userdevice = 0
|
||||||
|
|
||||||
# DISK_ISO needs two VBDs: the ISO disk and a blank RW disk
|
# DISK_ISO needs two VBDs: the ISO disk and a blank RW disk
|
||||||
if disk_image_type == ImageType.DISK_ISO:
|
if disk_image_type == vm_utils.ImageType.DISK_ISO:
|
||||||
LOG.debug("detected ISO image type, going to create blank VM for "
|
LOG.debug("detected ISO image type, going to create blank VM for "
|
||||||
"install")
|
"install")
|
||||||
|
|
||||||
@ -346,8 +351,11 @@ class VMOps(object):
|
|||||||
LOG.debug(_("Auto configuring disk for instance"
|
LOG.debug(_("Auto configuring disk for instance"
|
||||||
" %(instance_uuid)s, attempting to"
|
" %(instance_uuid)s, attempting to"
|
||||||
" resize partition...") % locals())
|
" resize partition...") % locals())
|
||||||
|
instance_type = db.instance_type_get(ctx,
|
||||||
|
instance.instance_type_id)
|
||||||
VMHelper.auto_configure_disk(session=self._session,
|
VMHelper.auto_configure_disk(session=self._session,
|
||||||
vdi_ref=first_vdi_ref)
|
vdi_ref=first_vdi_ref,
|
||||||
|
new_gb=instance_type['local_gb'])
|
||||||
|
|
||||||
VolumeHelper.create_vbd(session=self._session, vm_ref=vm_ref,
|
VolumeHelper.create_vbd(session=self._session, vm_ref=vm_ref,
|
||||||
vdi_ref=first_vdi_ref,
|
vdi_ref=first_vdi_ref,
|
||||||
@ -357,7 +365,6 @@ class VMOps(object):
|
|||||||
# userdevice 1 is reserved for rescue and we've used '0'
|
# userdevice 1 is reserved for rescue and we've used '0'
|
||||||
userdevice = 2
|
userdevice = 2
|
||||||
|
|
||||||
ctx = nova_context.get_admin_context()
|
|
||||||
instance_type = db.instance_type_get(ctx, instance.instance_type_id)
|
instance_type = db.instance_type_get(ctx, instance.instance_type_id)
|
||||||
swap_mb = instance_type['swap']
|
swap_mb = instance_type['swap']
|
||||||
generate_swap = swap_mb and FLAGS.xenapi_generate_swap
|
generate_swap = swap_mb and FLAGS.xenapi_generate_swap
|
||||||
@ -509,9 +516,9 @@ class VMOps(object):
|
|||||||
LOG.debug(_("Skipping VDI destroy for %s"), vdi_to_remove)
|
LOG.debug(_("Skipping VDI destroy for %s"), vdi_to_remove)
|
||||||
if item['file']:
|
if item['file']:
|
||||||
# There is also a file to remove.
|
# There is also a file to remove.
|
||||||
if vdi_type == ImageType.KERNEL_STR:
|
if vdi_type == vm_utils.ImageType.KERNEL_STR:
|
||||||
kernel_file = item['file']
|
kernel_file = item['file']
|
||||||
elif vdi_type == ImageType.RAMDISK_STR:
|
elif vdi_type == vm_utils.ImageType.RAMDISK_STR:
|
||||||
ramdisk_file = item['file']
|
ramdisk_file = item['file']
|
||||||
|
|
||||||
if kernel_file or ramdisk_file:
|
if kernel_file or ramdisk_file:
|
||||||
@ -656,8 +663,10 @@ class VMOps(object):
|
|||||||
" %(progress)d") % locals())
|
" %(progress)d") % locals())
|
||||||
db.instance_update(context, instance_uuid, {'progress': progress})
|
db.instance_update(context, instance_uuid, {'progress': progress})
|
||||||
|
|
||||||
def migrate_disk_and_power_off(self, context, instance, dest):
|
def migrate_disk_and_power_off(self, context, instance, dest,
|
||||||
"""Copies a VHD from one host machine to another.
|
instance_type):
|
||||||
|
"""Copies a VHD from one host machine to another, possibly
|
||||||
|
resizing filesystem before hand.
|
||||||
|
|
||||||
:param instance: the instance that owns the VHD in question.
|
:param instance: the instance that owns the VHD in question.
|
||||||
:param dest: the destination host machine.
|
:param dest: the destination host machine.
|
||||||
@ -692,23 +701,69 @@ class VMOps(object):
|
|||||||
|
|
||||||
sr_path = VMHelper.get_sr_path(self._session)
|
sr_path = VMHelper.get_sr_path(self._session)
|
||||||
|
|
||||||
# 2. Transfer the base copy
|
if instance['auto_disk_config'] and \
|
||||||
self._migrate_vhd(instance, base_copy_uuid, dest, sr_path)
|
instance['local_gb'] > instance_type['local_gb']:
|
||||||
self._update_instance_progress(context, instance,
|
# Resizing disk storage down
|
||||||
step=2,
|
old_gb = instance['local_gb']
|
||||||
total_steps=RESIZE_TOTAL_STEPS)
|
new_gb = instance_type['local_gb']
|
||||||
|
|
||||||
# 3. Now power down the instance
|
LOG.debug(_("Resizing down VDI %(cow_uuid)s from "
|
||||||
self._shutdown(instance, vm_ref, hard=False)
|
"%(old_gb)dGB to %(new_gb)dGB") % locals())
|
||||||
self._update_instance_progress(context, instance,
|
|
||||||
step=3,
|
|
||||||
total_steps=RESIZE_TOTAL_STEPS)
|
|
||||||
|
|
||||||
# 4. Transfer the COW VHD
|
# 2. Power down the instance before resizing
|
||||||
self._migrate_vhd(instance, cow_uuid, dest, sr_path)
|
self._shutdown(instance, vm_ref, hard=False)
|
||||||
self._update_instance_progress(context, instance,
|
self._update_instance_progress(context, instance,
|
||||||
step=4,
|
step=2,
|
||||||
total_steps=RESIZE_TOTAL_STEPS)
|
total_steps=RESIZE_TOTAL_STEPS)
|
||||||
|
|
||||||
|
# 3. Copy VDI, resize partition and filesystem, forget VDI,
|
||||||
|
# truncate VHD
|
||||||
|
new_ref, new_uuid = VMHelper.resize_disk(self._session,
|
||||||
|
vdi_ref,
|
||||||
|
instance_type)
|
||||||
|
self._update_instance_progress(context, instance,
|
||||||
|
step=3,
|
||||||
|
total_steps=RESIZE_TOTAL_STEPS)
|
||||||
|
|
||||||
|
# 4. Transfer the new VHD
|
||||||
|
self._migrate_vhd(instance, new_uuid, dest, sr_path)
|
||||||
|
self._update_instance_progress(context, instance,
|
||||||
|
step=4,
|
||||||
|
total_steps=RESIZE_TOTAL_STEPS)
|
||||||
|
|
||||||
|
# Clean up VDI now that it's been copied
|
||||||
|
VMHelper.destroy_vdi(self._session, new_ref)
|
||||||
|
|
||||||
|
vdis = {'base_copy': new_uuid}
|
||||||
|
else:
|
||||||
|
# Resizing disk storage up, will be handled on destination
|
||||||
|
|
||||||
|
# As an optimization, we transfer the base VDI first,
|
||||||
|
# then shut down the VM, followed by transfering the COW
|
||||||
|
# VDI.
|
||||||
|
|
||||||
|
# 2. Transfer the base copy
|
||||||
|
self._migrate_vhd(instance, base_copy_uuid, dest, sr_path)
|
||||||
|
self._update_instance_progress(context, instance,
|
||||||
|
step=2,
|
||||||
|
total_steps=RESIZE_TOTAL_STEPS)
|
||||||
|
|
||||||
|
# 3. Now power down the instance
|
||||||
|
self._shutdown(instance, vm_ref, hard=False)
|
||||||
|
self._update_instance_progress(context, instance,
|
||||||
|
step=3,
|
||||||
|
total_steps=RESIZE_TOTAL_STEPS)
|
||||||
|
|
||||||
|
# 4. Transfer the COW VHD
|
||||||
|
self._migrate_vhd(instance, cow_uuid, dest, sr_path)
|
||||||
|
self._update_instance_progress(context, instance,
|
||||||
|
step=4,
|
||||||
|
total_steps=RESIZE_TOTAL_STEPS)
|
||||||
|
|
||||||
|
# TODO(mdietz): we could also consider renaming these to
|
||||||
|
# something sensible so we don't need to blindly pass
|
||||||
|
# around dictionaries
|
||||||
|
vdis = {'base_copy': base_copy_uuid, 'cow': cow_uuid}
|
||||||
|
|
||||||
# NOTE(sirp): in case we're resizing to the same host (for dev
|
# NOTE(sirp): in case we're resizing to the same host (for dev
|
||||||
# purposes), apply a suffix to name-label so the two VM records
|
# purposes), apply a suffix to name-label so the two VM records
|
||||||
@ -720,20 +775,27 @@ class VMOps(object):
|
|||||||
self._destroy(instance, template_vm_ref,
|
self._destroy(instance, template_vm_ref,
|
||||||
shutdown=False, destroy_kernel_ramdisk=False)
|
shutdown=False, destroy_kernel_ramdisk=False)
|
||||||
|
|
||||||
# TODO(mdietz): we could also consider renaming these to something
|
return vdis
|
||||||
# sensible so we don't need to blindly pass around dictionaries
|
|
||||||
return {'base_copy': base_copy_uuid, 'cow': cow_uuid}
|
|
||||||
|
|
||||||
def link_disks(self, instance, base_copy_uuid, cow_uuid):
|
def _move_disks(self, instance, disk_info):
|
||||||
"""Links the base copy VHD to the COW via the XAPI plugin."""
|
"""Move and possibly link VHDs via the XAPI plugin."""
|
||||||
|
base_copy_uuid = disk_info['base_copy']
|
||||||
new_base_copy_uuid = str(uuid.uuid4())
|
new_base_copy_uuid = str(uuid.uuid4())
|
||||||
new_cow_uuid = str(uuid.uuid4())
|
|
||||||
params = {'instance_uuid': instance['uuid'],
|
params = {'instance_uuid': instance['uuid'],
|
||||||
|
'sr_path': VMHelper.get_sr_path(self._session),
|
||||||
'old_base_copy_uuid': base_copy_uuid,
|
'old_base_copy_uuid': base_copy_uuid,
|
||||||
'old_cow_uuid': cow_uuid,
|
'new_base_copy_uuid': new_base_copy_uuid}
|
||||||
'new_base_copy_uuid': new_base_copy_uuid,
|
|
||||||
'new_cow_uuid': new_cow_uuid,
|
if 'cow' in disk_info:
|
||||||
'sr_path': VMHelper.get_sr_path(self._session), }
|
cow_uuid = disk_info['cow']
|
||||||
|
new_cow_uuid = str(uuid.uuid4())
|
||||||
|
params['old_cow_uuid'] = cow_uuid
|
||||||
|
params['new_cow_uuid'] = new_cow_uuid
|
||||||
|
|
||||||
|
new_uuid = new_cow_uuid
|
||||||
|
else:
|
||||||
|
new_uuid = new_base_copy_uuid
|
||||||
|
|
||||||
task = self._session.async_call_plugin('migration',
|
task = self._session.async_call_plugin('migration',
|
||||||
'move_vhds_into_sr', {'params': pickle.dumps(params)})
|
'move_vhds_into_sr', {'params': pickle.dumps(params)})
|
||||||
@ -744,25 +806,33 @@ class VMOps(object):
|
|||||||
|
|
||||||
# Set name-label so we can find if we need to clean up a failed
|
# Set name-label so we can find if we need to clean up a failed
|
||||||
# migration
|
# migration
|
||||||
VMHelper.set_vdi_name_label(self._session, new_cow_uuid,
|
VMHelper.set_vdi_name_label(self._session, new_uuid,
|
||||||
instance.name)
|
instance.name)
|
||||||
|
|
||||||
return new_cow_uuid
|
return new_uuid
|
||||||
|
|
||||||
def resize_instance(self, instance, vdi_uuid):
|
def _resize_instance(self, instance, vdi_uuid):
|
||||||
"""Resize a running instance by changing its RAM and disk size."""
|
"""Resize a running instance by changing its disk size."""
|
||||||
#TODO(mdietz): this will need to be adjusted for swap later
|
#TODO(mdietz): this will need to be adjusted for swap later
|
||||||
#The new disk size must be in bytes
|
|
||||||
|
|
||||||
new_disk_size = instance.local_gb * 1024 * 1024 * 1024
|
new_disk_size = instance.local_gb * 1024 * 1024 * 1024
|
||||||
if new_disk_size > 0:
|
if not new_disk_size:
|
||||||
instance_name = instance.name
|
return
|
||||||
instance_local_gb = instance.local_gb
|
|
||||||
LOG.debug(_("Resizing VDI %(vdi_uuid)s for instance"
|
# Get current size of VDI
|
||||||
"%(instance_name)s. Expanding to %(instance_local_gb)d"
|
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
|
||||||
" GB") % locals())
|
virtual_size = self._session.call_xenapi('VDI.get_virtual_size',
|
||||||
vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid)
|
vdi_ref)
|
||||||
# for an instance with no local storage
|
virtual_size = int(virtual_size)
|
||||||
|
|
||||||
|
instance_name = instance.name
|
||||||
|
old_gb = virtual_size / (1024 * 1024 * 1024)
|
||||||
|
new_gb = instance.local_gb
|
||||||
|
|
||||||
|
if virtual_size < new_disk_size:
|
||||||
|
# Resize up. Simple VDI resize will do the trick
|
||||||
|
LOG.debug(_("Resizing up VDI %(vdi_uuid)s from %(old_gb)dGB to "
|
||||||
|
"%(new_gb)dGB") % locals())
|
||||||
if self._product_version[0] > 5:
|
if self._product_version[0] > 5:
|
||||||
resize_func_name = 'VDI.resize'
|
resize_func_name = 'VDI.resize'
|
||||||
else:
|
else:
|
||||||
|
@ -245,10 +245,12 @@ class XenAPIConnection(driver.ComputeDriver):
|
|||||||
"""Unpause paused VM instance"""
|
"""Unpause paused VM instance"""
|
||||||
self._vmops.unpause(instance)
|
self._vmops.unpause(instance)
|
||||||
|
|
||||||
def migrate_disk_and_power_off(self, context, instance, dest):
|
def migrate_disk_and_power_off(self, context, instance, dest,
|
||||||
|
instance_type):
|
||||||
"""Transfers the VHD of a running instance to another host, then shuts
|
"""Transfers the VHD of a running instance to another host, then shuts
|
||||||
off the instance copies over the COW disk"""
|
off the instance copies over the COW disk"""
|
||||||
return self._vmops.migrate_disk_and_power_off(context, instance, dest)
|
return self._vmops.migrate_disk_and_power_off(context, instance,
|
||||||
|
dest, instance_type)
|
||||||
|
|
||||||
def suspend(self, instance):
|
def suspend(self, instance):
|
||||||
"""suspend the specified instance"""
|
"""suspend the specified instance"""
|
||||||
|
@ -37,50 +37,55 @@ def move_vhds_into_sr(session, args):
|
|||||||
params = pickle.loads(exists(args, 'params'))
|
params = pickle.loads(exists(args, 'params'))
|
||||||
instance_uuid = params['instance_uuid']
|
instance_uuid = params['instance_uuid']
|
||||||
|
|
||||||
old_base_copy_uuid = params['old_base_copy_uuid']
|
|
||||||
old_cow_uuid = params['old_cow_uuid']
|
|
||||||
|
|
||||||
new_base_copy_uuid = params['new_base_copy_uuid']
|
|
||||||
new_cow_uuid = params['new_cow_uuid']
|
|
||||||
|
|
||||||
sr_path = params['sr_path']
|
sr_path = params['sr_path']
|
||||||
sr_temp_path = "%s/tmp/" % sr_path
|
sr_temp_path = "%s/tmp" % sr_path
|
||||||
|
temp_vhd_path = "%s/instance%s" % (sr_temp_path, instance_uuid)
|
||||||
# Discover the copied VHDs locally, and then set up paths to copy
|
|
||||||
# them to under the SR
|
|
||||||
source_image_path = "/images/instance%s" % instance_uuid
|
|
||||||
source_base_copy_path = "%s/%s.vhd" % (source_image_path,
|
|
||||||
old_base_copy_uuid)
|
|
||||||
source_cow_path = "%s/%s.vhd" % (source_image_path, old_cow_uuid)
|
|
||||||
|
|
||||||
temp_vhd_path = "%s/instance%s/" % (sr_temp_path, instance_uuid)
|
|
||||||
new_base_copy_path = "%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid)
|
|
||||||
new_cow_path = "%s/%s.vhd" % (temp_vhd_path, new_cow_uuid)
|
|
||||||
|
|
||||||
logging.debug('Creating temporary SR path %s' % temp_vhd_path)
|
logging.debug('Creating temporary SR path %s' % temp_vhd_path)
|
||||||
os.makedirs(temp_vhd_path)
|
os.makedirs(temp_vhd_path)
|
||||||
|
|
||||||
logging.debug('Moving %s into %s' % (source_base_copy_path, temp_vhd_path))
|
# Discover the copied VHDs locally, and then set up paths to copy
|
||||||
|
# them to under the SR
|
||||||
|
source_image_path = "/images/instance%s" % instance_uuid
|
||||||
|
|
||||||
|
old_base_copy_uuid = params['old_base_copy_uuid']
|
||||||
|
new_base_copy_uuid = params['new_base_copy_uuid']
|
||||||
|
source_base_copy_path = "%s/%s.vhd" % (source_image_path,
|
||||||
|
old_base_copy_uuid)
|
||||||
|
new_base_copy_path = "%s/%s.vhd" % (temp_vhd_path, new_base_copy_uuid)
|
||||||
|
|
||||||
|
logging.debug('Moving base %s into %s' % (source_base_copy_path,
|
||||||
|
temp_vhd_path))
|
||||||
shutil.move(source_base_copy_path, new_base_copy_path)
|
shutil.move(source_base_copy_path, new_base_copy_path)
|
||||||
|
|
||||||
logging.debug('Moving %s into %s' % (source_cow_path, temp_vhd_path))
|
if 'old_cow_uuid' in params:
|
||||||
shutil.move(source_cow_path, new_cow_path)
|
old_cow_uuid = params['old_cow_uuid']
|
||||||
|
new_cow_uuid = params['new_cow_uuid']
|
||||||
|
|
||||||
logging.debug('Cleaning up %s' % source_image_path)
|
source_cow_path = "%s/%s.vhd" % (source_image_path, old_cow_uuid)
|
||||||
os.rmdir(source_image_path)
|
new_cow_path = "%s/%s.vhd" % (temp_vhd_path, new_cow_uuid)
|
||||||
|
|
||||||
# Link the COW to the base copy
|
logging.debug('Moving COW %s into %s' % (source_cow_path,
|
||||||
logging.debug('Attaching COW to the base copy %s -> %s' %
|
temp_vhd_path))
|
||||||
(new_cow_path, new_base_copy_path))
|
shutil.move(source_cow_path, new_cow_path)
|
||||||
subprocess.call(shlex.split('/usr/sbin/vhd-util modify -n %s -p %s' %
|
|
||||||
(new_cow_path, new_base_copy_path)))
|
|
||||||
|
|
||||||
logging.debug('Moving VHDs into SR %s' % sr_path)
|
# Link the COW to the base copy
|
||||||
# NOTE(sirp): COW should be copied before base_copy to avoid snapwatchd
|
logging.debug('Attaching COW to the base %s -> %s' %
|
||||||
# GC'ing an unreferenced base copy VDI
|
(new_cow_path, new_base_copy_path))
|
||||||
shutil.move(new_cow_path, sr_path)
|
subprocess.call(['/usr/sbin/vhd-util', 'modify',
|
||||||
|
'-n', new_cow_path, '-p', new_base_copy_path])
|
||||||
|
|
||||||
|
# NOTE(sirp): COW should be copied before base_copy to avoid
|
||||||
|
# snapwatchd GC'ing an unreferenced base copy VDI
|
||||||
|
logging.debug('Moving COW %s to %s' % (new_cow_path, sr_path))
|
||||||
|
shutil.move(new_cow_path, sr_path)
|
||||||
|
|
||||||
|
logging.debug('Moving base %s to %s' % (new_base_copy_path, sr_path))
|
||||||
shutil.move(new_base_copy_path, sr_path)
|
shutil.move(new_base_copy_path, sr_path)
|
||||||
|
|
||||||
|
logging.debug('Cleaning up source path %s' % source_image_path)
|
||||||
|
os.rmdir(source_image_path)
|
||||||
|
|
||||||
logging.debug('Cleaning up temporary SR path %s' % temp_vhd_path)
|
logging.debug('Cleaning up temporary SR path %s' % temp_vhd_path)
|
||||||
os.rmdir(temp_vhd_path)
|
os.rmdir(temp_vhd_path)
|
||||||
return ""
|
return ""
|
||||||
@ -103,7 +108,7 @@ def transfer_vhd(session, args):
|
|||||||
|
|
||||||
ssh_cmd = '\"ssh -o StrictHostKeyChecking=no\"'
|
ssh_cmd = '\"ssh -o StrictHostKeyChecking=no\"'
|
||||||
|
|
||||||
rsync_args = shlex.split('nohup /usr/bin/rsync -av --progress -e %s %s %s'
|
rsync_args = shlex.split('nohup /usr/bin/rsync -av -e %s %s %s'
|
||||||
% (ssh_cmd, source_path, dest_path))
|
% (ssh_cmd, source_path, dest_path))
|
||||||
|
|
||||||
logging.debug('rsync %s' % (' '.join(rsync_args, )))
|
logging.debug('rsync %s' % (' '.join(rsync_args, )))
|
||||||
|
Loading…
x
Reference in New Issue
Block a user