Support resize and cold migration

This change set adds support for cold migration and resize
to a different host.

Add methods to the local disk driver to retrieve and validate if
the drivers are compatible between the source and destination host.
These methods will only be called if the driver supports shared
storage.  Currently only the SSP driver supports shared storage.

Add task flow implemention for migrate_disk_and_power_off.

There is a peculiar case when resizing a VM on the same host.
PowerVM doesn't allow two VMs to be defined with the same UUID,
therefore there is only one VM present on the host and care must
be taken during a resize revert not to delete the one and only VM
we have.  To more easily detect that case and to easily see when
a VM is being resized or migrated, we rename the source VM during
the resize/migration processes.  The renamed VM will either be
deleted or renamed back when the resize/migration operation is
confirmed or reverted.

Change-Id: Ie3afe7de9b6daca2396dbc9e2d1c1537cb85e8d9
This commit is contained in:
Kyle L. Henderson
2015-11-20 15:57:30 -06:00
parent a46d18e134
commit 1b5c427a73
14 changed files with 976 additions and 237 deletions

View File

@@ -36,7 +36,25 @@ TEST_INSTANCE = {
'root_gb': 10,
'ephemeral_gb': 0,
'instance_type_id': '5',
'flavor': TEST_FLAVOR
'flavor': TEST_FLAVOR,
}
TEST_MIGRATION = {
'id': 1,
'source_compute': 'host1',
'dest_compute': 'host2',
'migration_type': 'resize',
'old_instance_type_id': 1,
'new_instance_type_id': 2,
}
TEST_MIGRATION_SAME_HOST = {
'id': 1,
'source_compute': 'host1',
'dest_compute': 'host1',
'migration_type': 'resize',
'old_instance_type_id': 1,
'new_instance_type_id': 2,
}
# NOTE(mikal): All of this is because if dnspython is present in your

View File

@@ -43,3 +43,11 @@ class TestDiskAdapter(test.TestCase):
img_meta = {'id': 'test_id'}
temp = self.st_adpt._get_image_upload(mock.Mock(), img_meta)
self.assertIsInstance(temp, disk_dvr.IterableToFileAdapter)
def test_get_info(self):
# Ensure the base method returns empty dict
self.assertEqual({}, self.st_adpt.get_info())
def test_validate(self):
# Ensure the base method returns error message
self.assertIsNotNone(self.st_adpt.validate(None))

View File

@@ -29,6 +29,7 @@ from pypowervm.wrappers import storage as pvm_stg
from pypowervm.wrappers import virtual_io_server as pvm_vios
from nova_powervm.tests.virt.powervm import fixtures as fx
from nova_powervm.virt.powervm.disk import driver as disk_dvr
from nova_powervm.virt.powervm.disk import ssp
from nova_powervm.virt.powervm import exception as npvmex
@@ -138,6 +139,30 @@ class TestSSPDiskAdapter(test.TestCase):
resp.entry = entry_or_list
return resp
def test_capabilities(self):
ssp_stor = self._get_ssp_stor()
# Ensure return shared storage
self.assertTrue(ssp_stor.capabilities.get('shared_storage'))
def test_get_info(self):
ssp_stor = self._get_ssp_stor()
expected = {'cluster_name': 'neoclust1',
'ssp_name': 'neossp1',
'ssp_uuid': 'e357a79a-7a3d-35b6-8405-55ab6a2d0de7'}
# Ensure the base method returns empty dict
self.assertEqual(expected, ssp_stor.get_info())
def test_validate(self):
ssp_stor = self._get_ssp_stor()
fake_data = {}
# Ensure returns error message when no data
self.assertIsNotNone(ssp_stor.validate(fake_data))
# Get our own data and it should always match!
fake_data = ssp_stor.get_info()
# Ensure returns no error on good data
self.assertIsNone(ssp_stor.validate(fake_data))
def test_init_green_with_config(self):
"""Bootstrap SSPStorage, testing call to _fetch_cluster.
@@ -331,6 +356,30 @@ class TestSSPDiskAdapter(test.TestCase):
lu = ssp_stor.create_disk_from_image(None, Instance(), img, 1)
self.assertEqual('new_lu', lu)
def test_find_lu(self):
# Bad path, lu not found, None returned
ssp = self._get_ssp_stor()
lu = ssp._find_lu('not_found_name', pvm_stg.LUType.DISK)
self.assertIsNone(lu)
# Good path, found correct name and type
lu_name = 'neolu1'
lu = ssp._find_lu(lu_name, pvm_stg.LUType.DISK)
self.assertIsNotNone(lu)
self.assertEqual(lu_name, lu.name)
self.assertEqual(pvm_stg.LUType.DISK, lu.lu_type)
def test_get_disk_ref(self):
ssp = self._get_ssp_stor()
with mock.patch.object(ssp, '_find_lu', return_value='foundit'):
lu = ssp.get_disk_ref(self.instance, disk_dvr.DiskType.BOOT)
self.assertEqual('foundit', lu)
# Assert handles not finding it.
with mock.patch.object(ssp, '_find_lu', return_value=None):
lu = ssp.get_disk_ref(self.instance, disk_dvr.DiskType.BOOT)
self.assertIsNone(lu)
@mock.patch('nova_powervm.virt.powervm.disk.ssp.SSPDiskAdapter.'
'vios_uuids')
@mock.patch('pypowervm.tasks.scsi_mapper.build_vscsi_mapping')

View File

@@ -112,3 +112,64 @@ class PowerVMComputeDriver(fixtures.Fixture):
disk_adpt = self.useFixture(DiskAdapter())
self.drv.disk_dvr = disk_adpt.std_disk_adpt
class TaskFlow(fixtures.Fixture):
"""Construct a fake TaskFlow.
This fixture makes it easy to check if tasks were added to a task flow
without having to mock each task.
"""
def __init__(self, linear_flow='taskflow.patterns.linear_flow',
engines='taskflow.engines'):
"""Create the fixture.
:param linear_flow: The import path to patch for the linear flow.
:param engines: The import path to patch for the engines.
"""
super(TaskFlow, self).__init__()
self.linear_flow_import = linear_flow
self.engines_import = engines
def setUp(self):
super(TaskFlow, self).setUp()
self._linear_flow = mock.patch(self.linear_flow_import)
self.linear_flow = self._linear_flow.start()
self.tasks_added = []
self.linear_flow.Flow.return_value.add.side_effect = self._record_tasks
self.addCleanup(self._linear_flow.stop)
self._engine = mock.patch(self.engines_import)
self.engine = self._engine.start()
self.addCleanup(self._engine.stop)
def _record_tasks(self, *args, **kwargs):
self.tasks_added.append(args[0])
def assert_tasks_added(self, testcase, expected_tasks):
# Ensure the lists are the same size.
testcase.assertEqual(len(expected_tasks), len(self.tasks_added),
'Expected tasks not added: %s, %s' %
(expected_tasks,
[t.name for t in self.tasks_added]))
def compare_tasks(expected, observed):
if expected.endswith('*'):
cmplen = len(expected[:-1])
testcase.assertEqual(expected[:cmplen], observed.name[:cmplen])
else:
testcase.assertEqual(expected, observed.name)
# Compare the list of expected against added.
for func in map(compare_tasks, expected_tasks, self.tasks_added):
# Call the function to do the compare
func
class DriverTaskFlow(TaskFlow):
"""Specific TaskFlow fixture for the main compute driver."""
def __init__(self):
super(DriverTaskFlow, self).__init__(
linear_flow='nova_powervm.virt.powervm.driver.tf_lf',
engines='nova_powervm.virt.powervm.driver.tf_eng')

View File

@@ -130,3 +130,33 @@ class TestStorage(test.TestCase):
disk_dvr.disconnect_disk_from_mgmt.assert_called_with('vios_uuid',
'stg_name')
mock_rm.assert_called_with('/dev/disk')
def test_finddisk(self):
disk_dvr = mock.Mock()
disk_dvr.get_disk_ref.return_value = 'disk_ref'
instance = mock.Mock()
context = 'context'
disk_type = 'disk_type'
task = tf_stg.FindDisk(disk_dvr, context, instance, disk_type)
ret_disk = task.execute()
disk_dvr.get_disk_ref.assert_called_once_with(instance, disk_type)
self.assertEqual('disk_ref', ret_disk)
# Bad path for no disk found
disk_dvr.reset_mock()
disk_dvr.get_disk_ref.return_value = None
ret_disk = task.execute()
disk_dvr.get_disk_ref.assert_called_once_with(instance, disk_type)
self.assertIsNone(ret_disk)
def test_extenddisk(self):
disk_dvr = mock.Mock()
instance = mock.Mock()
context = 'context'
disk_info = {'type': 'disk_type'}
task = tf_stg.ExtendDisk(disk_dvr, context, instance, disk_info, 1024)
task.execute()
disk_dvr.extend_disk.assert_called_once_with(context, instance,
disk_info, 1024)

View File

@@ -0,0 +1,49 @@
# Copyright 2015 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import test
from nova_powervm.virt.powervm.tasks import vm as tf_vm
class TestVMTasks(test.TestCase):
def setUp(self):
super(TestVMTasks, self).setUp()
self.apt = mock.Mock()
self.instance = mock.Mock()
@mock.patch('nova_powervm.virt.powervm.vm.update')
def test_resize(self, mock_vm_update):
resize = tf_vm.Resize(self.apt, 'host_wrapper', self.instance,
'flavor', name='new_name')
mock_vm_update.return_value = 'resized_entry'
resized_entry = resize.execute()
mock_vm_update.assert_called_once_with(self.apt, 'host_wrapper',
self.instance, 'flavor',
entry=None, name='new_name')
self.assertEqual('resized_entry', resized_entry)
@mock.patch('nova_powervm.virt.powervm.vm.rename')
def test_rename(self, mock_vm_rename):
mock_vm_rename.return_value = 'new_entry'
rename = tf_vm.Rename(self.apt, 'host_uuid', self.instance, 'new_name')
new_entry = rename.execute()
mock_vm_rename.assert_called_once_with(self.apt, 'host_uuid',
self.instance, 'new_name')
self.assertEqual('new_entry', new_entry)

View File

@@ -22,6 +22,7 @@ from oslo_config import cfg
from oslo_serialization import jsonutils
from nova import block_device as nova_block_device
from nova.compute import task_states
from nova import exception as exc
from nova import objects
from nova.objects import base as obj_base
@@ -662,6 +663,10 @@ class TestPowerVMDriver(test.TestCase):
self.assertFalse(ret)
self.assertEqual(1, mock_get_mapping.call_count)
# Test if block_device_info is None
ret = self.drv._is_booted_from_volume(None)
self.assertFalse(ret)
def test_get_inst_xag(self):
# No volumes - should be just the SCSI mapping
xag = self.drv._get_inst_xag(mock.Mock(), None)
@@ -679,6 +684,24 @@ class TestPowerVMDriver(test.TestCase):
pvm_vios.VIOS.xags.SCSI_MAPPING,
pvm_vios.VIOS.xags.FC_MAPPING]), set(xag))
def test_add_vol_conn_task(self):
bdm, vol_drv = mock.MagicMock(), mock.MagicMock()
flow = mock.Mock()
vals = [(bdm, vol_drv), (bdm, vol_drv)]
with mock.patch.object(self.drv, '_vol_drv_iter', return_value=vals):
self.drv._add_volume_connection_tasks(
'context', 'instance', 'bdms', flow, 'stg_ftsk')
self.assertEqual(4, flow.add.call_count)
def test_add_vol_disconn_task(self):
bdm, vol_drv = mock.MagicMock(), mock.MagicMock()
flow = mock.Mock()
vals = [(bdm, vol_drv), (bdm, vol_drv)]
with mock.patch.object(self.drv, '_vol_drv_iter', return_value=vals):
self.drv._add_volume_disconnection_tasks(
'context', 'instance', 'bdms', flow, 'stg_ftsk')
self.assertEqual(2, flow.add.call_count)
@mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver.'
'_is_booted_from_volume')
@mock.patch('nova_powervm.virt.powervm.vm.dlt_lpar')
@@ -689,7 +712,7 @@ class TestPowerVMDriver(test.TestCase):
'_validate_vopt_vg')
@mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid')
@mock.patch('nova.objects.flavor.Flavor.get_by_id')
def test_destroy(
def test_destroy_internal(
self, mock_get_flv, mock_pvmuuid, mock_val_vopt, mock_dlt_vopt,
mock_pwroff, mock_dlt, mock_boot_from_vol):
"""Validates the basic PowerVM destroy."""
@@ -792,6 +815,46 @@ class TestPowerVMDriver(test.TestCase):
mock.Mock(), block_device_info=mock_bdms)
assert_not_called()
@mock.patch('nova_powervm.virt.powervm.vm.get_pvm_uuid')
@mock.patch('nova_powervm.virt.powervm.vm.get_vm_qp')
def test_destroy(self, mock_getqp, mock_getuuid):
"""Validates the basic PowerVM destroy."""
# Set up the mocks to the tasks.
inst = objects.Instance(**powervm.TEST_INSTANCE)
inst.task_state = None
# BDMs
mock_bdms = self._fake_bdms()
with mock.patch.object(self.drv, '_destroy') as mock_dst_int:
# Invoke the method.
self.drv.destroy('context', inst, mock.Mock(),
block_device_info=mock_bdms)
mock_dst_int.assert_called_with(
'context', inst, block_device_info=mock_bdms, destroy_disks=True,
shutdown=True)
# Test delete during migrate / resize
inst.task_state = task_states.RESIZE_REVERTING
mock_getqp.return_value = ('resize_' + inst.name)[:31]
with mock.patch.object(self.drv, '_destroy') as mock_dst_int:
# Invoke the method.
self.drv.destroy('context', inst, mock.Mock(),
block_device_info=mock_bdms)
# We shouldn't delete our resize_ instances
mock_dst_int.assert_not_called()
# Now test migrating...
mock_getqp.return_value = ('migrate_' + inst.name)[:31]
with mock.patch.object(self.drv, '_destroy') as mock_dst_int:
# Invoke the method.
self.drv.destroy('context', inst, mock.Mock(),
block_device_info=mock_bdms)
# If it is a migrated instance, it should be deleted.
mock_dst_int.assert_called_with(
'context', inst, block_device_info=mock_bdms, destroy_disks=True,
shutdown=True)
def test_attach_volume(self):
"""Validates the basic PowerVM attach volume."""
# Set up the mocks to the tasks.
@@ -891,11 +954,9 @@ class TestPowerVMDriver(test.TestCase):
# Validate the rollbacks were called.
self.assertEqual(2, self.vol_drv.connect_volume.call_count)
@mock.patch('nova_powervm.virt.powervm.vm.power_off')
@mock.patch('nova_powervm.virt.powervm.vm.update')
def test_resize(self, mock_update, mock_pwr_off):
"""Validates the PowerVM driver resize operation."""
# Set up the mocks to the resize operation.
def test_migrate_disk_and_power_off(self):
"""Validates the PowerVM driver migrate / resize operation."""
# Set up the mocks to the migrate / resize operation.
inst = objects.Instance(**powervm.TEST_INSTANCE)
host = self.drv.get_host_ip_addr()
resp = pvm_adp.Response('method', 'path', 'status', 'reason', {})
@@ -911,27 +972,90 @@ class TestPowerVMDriver(test.TestCase):
exc.InstanceFaultRollback, self.drv.migrate_disk_and_power_off,
'context', inst, 'dest', small_root, 'network_info', mock_bdms)
new_flav = objects.Flavor(vcpus=1, memory_mb=2048, root_gb=10)
# We don't support resize to different host.
self.assertRaises(
NotImplementedError, self.drv.migrate_disk_and_power_off,
'context', inst, 'bogus host', new_flav, 'network_info', mock_bdms)
self.drv.migrate_disk_and_power_off(
'context', inst, host, new_flav, 'network_info', mock_bdms)
mock_pwr_off.assert_called_with(
self.drv.adapter, inst, self.drv.host_uuid, entry=mock.ANY)
mock_update.assert_called_with(
self.drv.adapter, self.drv.host_wrapper, inst, new_flav,
entry=mock.ANY)
# Boot disk resize
boot_flav = objects.Flavor(vcpus=1, memory_mb=2048, root_gb=12)
self.drv.migrate_disk_and_power_off(
'context', inst, host, boot_flav, 'network_info', mock_bdms)
self.drv.disk_dvr.extend_disk.assert_called_with(
'context', inst, dict(type='boot'), 12)
# Tasks expected to be added for resize to the same host
expected = [
'pwr_off_lpar',
'extend_disk_boot',
'disconnect_vol_*',
'disconnect_vol_*',
'fake',
'rename_lpar_resize_instance-00000001',
]
with fx.DriverTaskFlow() as taskflow_fix:
self.drv.migrate_disk_and_power_off(
'context', inst, host, boot_flav, 'network_info', mock_bdms)
taskflow_fix.assert_tasks_added(self, expected)
# Check the size set in the resize task
extend_task = taskflow_fix.tasks_added[1]
self.assertEqual(extend_task.size, 12)
@mock.patch('nova.objects.flavor.Flavor.get_by_id')
def test_finish_migration(self, mock_get_flv):
inst = objects.Instance(**powervm.TEST_INSTANCE)
mock_bdms = self._fake_bdms()
mig = objects.Migration(**powervm.TEST_MIGRATION)
mig_same_host = objects.Migration(**powervm.TEST_MIGRATION_SAME_HOST)
disk_info = {}
# The first test is different hosts but local storage, should fail
self.assertRaises(exc.InstanceFaultRollback,
self.drv.finish_migration,
'context', mig, inst, disk_info, 'network_info',
'image_meta', 'resize_instance', mock_bdms)
# The rest of the test need to pass the shared disk test
self.disk_dvr.validate.return_value = None
# Tasks expected to be added for migration to different host
expected = [
'crt_lpar',
'plug_vifs',
'plug_mgmt_vif',
'find_disk',
'connect_disk',
'connect_vol_*',
'save_bdm_fake_vol1',
'connect_vol_*',
'save_bdm_fake_vol2',
'fake',
'get_lpar',
'pwr_lpar',
]
with fx.DriverTaskFlow() as taskflow_fix:
self.drv.finish_migration(
'context', mig, inst, disk_info, 'network_info', 'image_meta',
'resize_instance', block_device_info=mock_bdms)
taskflow_fix.assert_tasks_added(self, expected)
# Tasks expected to be added for resize to the same host
expected = [
'resize_lpar',
'connect_vol_*',
'save_bdm_fake_vol1',
'connect_vol_*',
'save_bdm_fake_vol2',
'fake',
'get_lpar',
'pwr_lpar',
]
with fx.DriverTaskFlow() as taskflow_fix:
self.drv.finish_migration(
'context', mig_same_host, inst, disk_info, 'network_info',
'image_meta', 'resize_instance', block_device_info=mock_bdms)
taskflow_fix.assert_tasks_added(self, expected)
# Tasks expected to be added for resize to the same host, no BDMS,
# and no power_on
expected = [
'resize_lpar',
]
with fx.DriverTaskFlow() as taskflow_fix:
self.drv.finish_migration(
'context', mig_same_host, inst, disk_info, 'network_info',
'image_meta', 'resize_instance', power_on=False)
taskflow_fix.assert_tasks_added(self, expected)
@mock.patch('nova_powervm.virt.powervm.driver.vm')
@mock.patch('nova_powervm.virt.powervm.tasks.vm.vm')
@@ -1356,3 +1480,46 @@ class TestPowerVMDriver(test.TestCase):
inst = objects.Instance(**powervm.TEST_INSTANCE)
overhead = self.drv.estimate_instance_overhead(inst_info)
self.assertEqual({'memory_mb': '2048'}, overhead)
def test_vol_drv_iter(self):
inst = objects.Instance(**powervm.TEST_INSTANCE)
block_device_info = self._fake_bdms()
vol_adpt = mock.Mock()
def _get_results(block_device_info=None, bdms=None):
# Patch so we get the same mock back each time.
with mock.patch.object(self.drv, '_get_inst_vol_adpt',
return_value=vol_adpt):
return [
(bdm, vol_drv) for bdm, vol_drv in self.drv._vol_drv_iter(
'context', inst, block_device_info=block_device_info,
bdms=bdms)]
def validate(results):
# For each good call, we should get back two bdms / vol_adpt
self.assertEqual(
'fake_vol1',
results[0][0]['connection_info']['data']['volume_id'])
self.assertEqual(vol_adpt, results[0][1])
self.assertEqual(
'fake_vol2',
results[1][0]['connection_info']['data']['volume_id'])
self.assertEqual(vol_adpt, results[1][1])
# Send block device info
results = _get_results(block_device_info=block_device_info)
validate(results)
# Same results with bdms
results = _get_results(bdms=self.drv._extract_bdm(block_device_info))
validate(results)
# Empty bdms
self.assertEqual([], _get_results(bdms=[]))
def test_build_vol_drivers(self):
# This utility just returns a list of drivers from the _vol_drv_iter()
# iterator so mock it and ensure the drivers are returned.
vals = [('bdm0', 'drv0'), ('bdm1', 'drv1')]
with mock.patch.object(self.drv, '_vol_drv_iter', return_value=vals):
drivers = self.drv._build_vol_drivers('context', 'instance')
self.assertEqual(['drv0', 'drv1'], drivers)

View File

@@ -297,6 +297,45 @@ class TestVM(test.TestCase):
self.assertRaises(exception.InvalidAttribute, vm.crt_lpar,
self.apt, host_wrapper, instance, flavor)
@mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper')
@mock.patch('nova_powervm.virt.powervm.vm.VMBuilder')
def test_update(self, mock_vmb, mock_get_inst):
instance = objects.Instance(**powervm.TEST_INSTANCE)
flavor, entry = mock.Mock(), mock.Mock()
name = "new_name"
entry.update.return_value = 'NewEntry'
bldr = mock_vmb.return_value
lpar_bldr = bldr.lpar_builder.return_value
new_entry = vm.update(self.apt, 'mock_host_wrap', instance, flavor,
entry=entry, name=name)
# Ensure the lpar was rebuilt
lpar_bldr.rebuild.assert_called_once_with(entry)
entry.update.assert_called_once_with()
self.assertEqual(name, entry.name)
self.assertEqual('NewEntry', new_entry)
@mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper')
def test_rename(self, mock_get_inst):
instance = objects.Instance(**powervm.TEST_INSTANCE)
entry = mock.Mock()
entry.update.return_value = 'NewEntry'
new_entry = vm.rename(self.apt, 'mock_host_uuid', instance, 'new_name',
entry=entry)
self.assertEqual('new_name', entry.name)
entry.update.assert_called_once_with()
self.assertEqual('NewEntry', new_entry)
# Test optional entry parameter
entry.reset_mock()
mock_get_inst.return_value = entry
new_entry = vm.rename(self.apt, 'mock_host_uuid', instance, 'new_name')
mock_get_inst.assert_called_once_with(self.apt, instance,
'mock_host_uuid')
self.assertEqual('new_name', entry.name)
entry.update.assert_called_once_with()
self.assertEqual('NewEntry', new_entry)
def test_add_IBMi_attrs(self):
inst = mock.Mock()
# Non-ibmi distro

View File

@@ -28,6 +28,7 @@ import pypowervm.util as pvm_util
import pypowervm.wrappers.virtual_io_server as pvm_vios
from nova_powervm.virt.powervm import exception as npvmex
from nova_powervm.virt.powervm.i18n import _
from nova_powervm.virt.powervm.i18n import _LW
from nova_powervm.virt.powervm import vm
@@ -68,6 +69,10 @@ class IterableToFileAdapter(object):
@six.add_metaclass(abc.ABCMeta)
class DiskAdapter(object):
capabilities = {
'shared_storage': False,
}
def __init__(self, connection):
"""Initialize the DiskAdapter
@@ -84,6 +89,33 @@ class DiskAdapter(object):
"""List the UUIDs of the Virtual I/O Servers hosting the storage."""
raise NotImplementedError()
def get_info(self):
"""Return disk information for the driver.
This method is used on cold migration to pass disk information from
the source to the destination. The data needed to be retrieved and
validated (see the validate method below) are determined by the disk
driver implementation.
Currently this and the validate method will only be called for the SSP
driver because it's the only one that supports shared storage.
:return: returns a dict of disk information
"""
return {}
def validate(self, disk_info):
"""Validate the disk information is compatible with this driver.
This method is called during cold migration to ensure the disk
drivers on the destination host is compatible with the source host.
:param disk_info: disk information dictionary
:returns: None if compatible, otherwise a reason for incompatibility
"""
return _('The configured disk driver does not support migration '
'or resize.')
def disk_match_func(self, disk_type, instance):
"""Return a matching function to locate the disk for an instance.

View File

@@ -21,6 +21,7 @@ import oslo_log.log as logging
from nova_powervm.virt.powervm.disk import driver as disk_drv
from nova_powervm.virt.powervm import exception as npvmex
from nova_powervm.virt.powervm.i18n import _
from nova_powervm.virt.powervm.i18n import _LE
from nova_powervm.virt.powervm.i18n import _LI
from nova_powervm.virt.powervm import vios
@@ -60,6 +61,10 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
exist in the future.
"""
capabilities = {
'shared_storage': True,
}
def __init__(self, connection):
"""Initialize the SSPDiskAdapter.
@@ -88,6 +93,35 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
ssp = self._ssp
return float(ssp.capacity) - float(ssp.free_space)
def get_info(self):
"""Return disk information for the driver.
This method is used on cold migration to pass disk information from
the source to the destination.
:return: returns a dict of disk information
"""
return {'cluster_name': self.clust_name,
'ssp_name': self.ssp_name,
'ssp_uuid': self._ssp.uuid}
def validate(self, disk_info):
"""Validate the disk information is compatible with this driver.
This method is called during cold migration to ensure the disk
drivers on the destination host is compatible with the source host.
:param disk_info: disk information dictionary
:returns: None if compatible, otherwise a reason for incompatibility
"""
if disk_info.get('ssp_uuid') != self._ssp.uuid:
return (_('The host is not a member of the same SSP cluster. '
'The source host cluster: %(source_clust_name)s. '
'The source host SSP: %(source_ssp_name)s.') %
{'source_clust_name': disk_info.get('cluster_name'),
'source_ssp_name': disk_info.get('ssp_name')}
)
def disconnect_image_disk(self, context, instance, stg_ftsk=None,
disk_type=None):
"""Disconnects the storage adapters from the image disk.
@@ -216,6 +250,12 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
return boot_lu
def _find_lu(self, lu_name, lu_type):
"""Find a specified lu by name and type."""
for lu in self._ssp.logical_units:
if lu.lu_type == lu_type and lu.name == lu_name:
return lu
def _get_or_upload_image_lu(self, context, img_meta):
"""Ensures our SSP has an LU containing the specified image.
@@ -231,19 +271,24 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
"""
# Key off of the name to see whether we already have the image
luname = self._get_image_name(img_meta)
ssp = self._ssp
for lu in ssp.logical_units:
if lu.lu_type == pvm_stg.LUType.IMAGE and lu.name == luname:
LOG.info(_LI('SSP: Using already-uploaded image LU %s.'),
luname)
return lu
lu = self._find_lu(luname, pvm_stg.LUType.IMAGE)
if lu:
LOG.info(_LI('SSP: Using already-uploaded image LU %s.'), luname)
return lu
# We don't have it yet. Create it and upload the glance image to it.
# Make the image LU only as big as the image.
stream = self._get_image_upload(context, img_meta)
LOG.info(_LI('SSP: Uploading new image LU %s.'), luname)
lu, f_wrap = tsk_stg.upload_new_lu(self._any_vios_uuid(), ssp, stream,
luname, img_meta['size'])
lu, f_wrap = tsk_stg.upload_new_lu(
self._any_vios_uuid(), self._ssp, stream, luname, img_meta['size'])
return lu
def get_disk_ref(self, instance, disk_type):
"""Returns a reference to the disk for the instance."""
lu_name = self._get_disk_name(disk_type, instance)
lu = self._find_lu(lu_name, pvm_stg.LUType.DISK)
return lu
def connect_disk(self, context, instance, disk_info, stg_ftsk=None):

View File

@@ -27,7 +27,6 @@ from nova import utils as n_utils
from nova.virt import configdrive
from nova.virt import driver
import re
import time
from oslo_config import cfg
from oslo_log import log as logging
@@ -43,7 +42,6 @@ from pypowervm.helpers import vios_busy as vio_hlp
from pypowervm.tasks import memory as pvm_mem
from pypowervm.tasks import power as pvm_pwr
from pypowervm.tasks import vterm as pvm_vterm
from pypowervm.utils import retry as pvm_retry
from pypowervm.wrappers import base_partition as pvm_bp
from pypowervm.wrappers import managed_system as pvm_ms
from pypowervm.wrappers import virtual_io_server as pvm_vios
@@ -330,11 +328,9 @@ class PowerVMDriver(driver.ComputeDriver):
:param flow: the flow to add the tasks to.
:param stg_ftsk: the storage task flow.
"""
for bdm in bdms or []:
conn_info = bdm.get('connection_info')
vol_drv = self._get_inst_vol_adpt(
context, instance, conn_info=conn_info, stg_ftsk=stg_ftsk)
for bdm, vol_drv in self._vol_drv_iter(context, instance, bdms=bdms,
stg_ftsk=stg_ftsk):
# First connect the volume. This will update the
# connection_info.
flow.add(tf_stg.ConnectVolume(vol_drv))
@@ -356,11 +352,8 @@ class PowerVMDriver(driver.ComputeDriver):
:param flow: the flow to add the tasks to.
:param stg_ftsk: the storage task flow.
"""
for bdm in bdms or []:
conn_info = bdm.get('connection_info')
vol_drv = self._get_inst_vol_adpt(
context, instance, conn_info=conn_info,
stg_ftsk=stg_ftsk)
for bdm, vol_drv in self._vol_drv_iter(context, instance, bdms=bdms,
stg_ftsk=stg_ftsk):
flow.add(tf_stg.DisconnectVolume(vol_drv))
def _is_booted_from_volume(self, block_device_info):
@@ -373,6 +366,9 @@ class PowerVMDriver(driver.ComputeDriver):
:return: True if the root device is in block_device_info and False if
it is not.
"""
if block_device_info is None:
return False
root_bdm = block_device.get_root_bdm(
driver.block_device_info_get_mapping(block_device_info))
return (root_bdm is not None)
@@ -381,6 +377,101 @@ class PowerVMDriver(driver.ComputeDriver):
def need_legacy_block_device_info(self):
return False
def _destroy(self, context, instance, block_device_info=None,
destroy_disks=True, shutdown=True):
"""Internal destroy method used by multiple operations.
:param context: security context
:param instance: Instance object as returned by DB layer.
:param block_device_info: Information about block devices that should
be detached from the instance.
This can be None when destroying the original
VM during confirm resize/migration. In that
case, the storage mappings have already been
removed from the original VM, so no work to
do.
:param destroy_disks: Indicates if disks should be destroyed
:param shutdown: Indicate whether to shutdown the VM first
"""
def _setup_flow_and_run():
# Extract the block devices.
bdms = self._extract_bdm(block_device_info)
# Define the flow
flow = tf_lf.Flow("destroy")
if shutdown:
# Power Off the LPAR
flow.add(tf_vm.PowerOff(self.adapter, self.host_uuid,
pvm_inst_uuid, instance))
# Create the transaction manager (FeedTask) for Storage I/O.
xag = self._get_inst_xag(instance, bdms)
stg_ftsk = vios.build_tx_feed_task(self.adapter, self.host_uuid,
xag=xag)
# Add the disconnect/deletion of the vOpt to the transaction
# manager.
flow.add(tf_stg.DeleteVOpt(self.adapter, self.host_uuid, instance,
pvm_inst_uuid, stg_ftsk=stg_ftsk))
# Determine if there are volumes to disconnect. If so, remove each
# volume (within the transaction manager)
self._add_volume_disconnection_tasks(context, instance, bdms, flow,
stg_ftsk)
# Only detach the disk adapters if this is not a boot from volume
# since volumes are handled above. This is only for disks.
destroy_disk_task = None
if not self._is_booted_from_volume(block_device_info):
# Detach the disk storage adapters (when the stg_ftsk runs)
flow.add(tf_stg.DetachDisk(
self.disk_dvr, context, instance, stg_ftsk))
# Delete the storage disks
if destroy_disks:
destroy_disk_task = tf_stg.DeleteDisk(
self.disk_dvr, context, instance)
# Add the transaction manager flow to the end of the 'storage
# connection' tasks. This will run all the disconnection ops
# in parallel
flow.add(stg_ftsk)
# The disks shouldn't be destroyed until the unmappings are done.
if destroy_disk_task:
flow.add(destroy_disk_task)
# Last step is to delete the LPAR from the system.
# Note: If moving to a Graph Flow, will need to change to depend on
# the prior step.
flow.add(tf_vm.Delete(self.adapter, pvm_inst_uuid, instance))
# Build the engine & run!
tf_eng.run(flow)
try:
pvm_inst_uuid = vm.get_pvm_uuid(instance)
_setup_flow_and_run()
except exception.InstanceNotFound:
LOG.warning(_LW('VM was not found during destroy operation.'),
instance=instance)
return
except pvm_exc.HttpError as e:
# See if we were operating on the LPAR that we're deleting
# and it wasn't found
resp = e.response
exp = '/ManagedSystem/.*/LogicalPartition/.*-.*-.*-.*-.*'
if resp.status == 404 and re.search(exp, resp.reqpath):
# It's the LPAR, so just return.
LOG.warning(_LW('VM was not found during destroy operation.'),
instance=instance)
return
else:
raise
def destroy(self, context, instance, network_info, block_device_info=None,
destroy_disks=True, migrate_data=None):
"""Destroy (shutdown and delete) the specified instance.
@@ -398,88 +489,28 @@ class PowerVMDriver(driver.ComputeDriver):
:param destroy_disks: Indicates if disks should be destroyed
:param migrate_data: implementation specific params
"""
def _run_flow():
# Extract the block devices.
bdms = self._extract_bdm(block_device_info)
# Define the flow
flow = tf_lf.Flow("destroy")
# Power Off the LPAR
flow.add(tf_vm.PowerOff(self.adapter, self.host_uuid,
pvm_inst_uuid, instance))
# Create the transaction manager (FeedTask) for Storage I/O.
xag = self._get_inst_xag(instance, bdms)
stg_ftsk = vios.build_tx_feed_task(self.adapter, self.host_uuid,
xag=xag)
# Add the disconnect/deletion of the vOpt to the transaction
# manager.
flow.add(tf_stg.DeleteVOpt(self.adapter, self.host_uuid, instance,
pvm_inst_uuid, stg_ftsk=stg_ftsk))
# Determine if there are volumes to disconnect. If so, remove each
# volume (within the transaction manager)
self._add_volume_disconnection_tasks(context, instance, bdms, flow,
stg_ftsk)
# Only attach the disk adapters if this is not a boot from volume.
destroy_disk_task = None
if not self._is_booted_from_volume(block_device_info):
# Detach the disk storage adapters (when the stg_ftsk runs)
flow.add(tf_stg.DetachDisk(
self.disk_dvr, context, instance, stg_ftsk))
# Delete the storage disks
if destroy_disks:
destroy_disk_task = tf_stg.DeleteDisk(
self.disk_dvr, context, instance)
# Add the transaction manager flow to the end of the 'storage
# connection' tasks. This will run all the connections in parallel
flow.add(stg_ftsk)
# The disks shouldn't be destroyed until the unmappings are done.
if destroy_disk_task:
flow.add(destroy_disk_task)
# Last step is to delete the LPAR from the system.
# Note: If moving to a Graph Flow, will need to change to depend on
# the prior step.
flow.add(tf_vm.Delete(self.adapter, pvm_inst_uuid, instance))
# Build the engine & run!
engine = tf_eng.load(flow)
engine.run()
self._log_operation('destroy', instance)
if instance.task_state == task_states.RESIZE_REVERTING:
# This destroy is part of resize, just skip destroying
# TODO(IBM): What to do longer term
LOG.info(_LI('Ignoring destroy call during resize revert.'))
return
LOG.info(_LI('Destroy called for migrated instance.'),
instance=instance)
# This destroy is part of resize or migrate. It's called to
# revert the resize/migration on the destination host.
try:
# Get the VM and see if we've renamed it to the resize name,
# if not delete as usual because then we know it's not the
# original VM.
pvm_inst_uuid = vm.get_pvm_uuid(instance)
_run_flow()
except exception.InstanceNotFound:
LOG.warning(_LW('VM was not found during destroy operation.'),
instance=instance)
return
except pvm_exc.HttpError as e:
# See if we were operating on the LPAR that we're deleting
# and it wasn't found
resp = e.response
exp = '/ManagedSystem/.*/LogicalPartition/.*-.*-.*-.*-.*'
if (resp.status == 404 and re.search(exp, resp.reqpath)):
# It's the LPAR, so just return.
LOG.warning(_LW('VM was not found during destroy operation.'),
instance=instance)
vm_name = vm.get_vm_qp(self.adapter, pvm_inst_uuid,
qprop='PartitionName', log_errors=False)
if vm_name == self._gen_resize_name(instance, same_host=True):
# Since it matches it must have been a resize, don't delete it!
LOG.info(_LI('Ignoring destroy call during resize revert.'),
instance=instance)
return
else:
raise
# Run the destroy
self._log_operation('destroy', instance)
self._destroy(context, instance, block_device_info=block_device_info,
destroy_disks=destroy_disks, shutdown=True)
def attach_volume(self, context, connection_info, instance, mountpoint,
disk_bus=None, device_type=None, encryption=None):
@@ -869,43 +900,6 @@ class PowerVMDriver(driver.ComputeDriver):
connector["wwpns"] = wwpn_list
return connector
def _remove_volume_connections(self, context, instance, block_device_info):
"""Removes the volume connections for the instance.
During resize disconnect if there are any volumes connected
to an instance.
:param context: security context
:param instance: Instance object
:param block_device_info: Information about block devices that should
be detached from the instance.
"""
# Extract the block devices.
bdms = self._extract_bdm(block_device_info)
# Nothing needed if there isn't a bdms.
if not bdms:
return
# Define the flow
flow = tf_lf.Flow("resize_vm")
# Create the transaction manager (FeedTask) for Storage I/O.
xag = self._get_inst_xag(instance, bdms)
stg_ftsk = vios.build_tx_feed_task(self.adapter, self.host_uuid,
xag=xag)
# Determine if there are volumes to disconnect. If so, remove each
# volume (within the transaction manager)
self._add_volume_disconnection_tasks(context, instance, bdms, flow,
stg_ftsk)
if len(flow):
# Add the transaction manager flow to the end of the 'storage
# disconnection' tasks. This will run all the connections in
# parallel
flow.add(stg_ftsk)
# Build the engine & run
tf_eng.run(flow)
def migrate_disk_and_power_off(self, context, instance, dest,
flavor, network_info,
block_device_info=None,
@@ -924,62 +918,78 @@ class PowerVMDriver(driver.ComputeDriver):
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=_('Cannot reduce disk size.')))
if dest == self.get_host_ip_addr():
same_host = dest == self.get_host_ip_addr()
if same_host:
self._log_operation('resize', instance)
# This is a local resize
# Check for disk resizes before VM resources
if flav_obj.root_gb > instance.root_gb:
vm.power_off(self.adapter, instance, self.host_uuid)
# Resize the root disk
self.disk_dvr.extend_disk(context, instance, dict(type='boot'),
flav_obj.root_gb)
# Do any VM resource changes
self._resize_vm(context, instance, flav_obj, retry_interval)
# If everything has gone well up to this point, the compute
# manager is going to terminate the volume connections for the
# instance, so we need to remove our mappings.
# Remove the volume connections for the BDMs
self._remove_volume_connections(context, instance,
block_device_info)
else:
self._log_operation('migration', instance)
raise NotImplementedError()
# TODO(IBM): The caller is expecting disk info returned
# Can't migrate the disks if they are not on shared storage
if not self._is_booted_from_volume(block_device_info):
if not self.disk_dvr.capabilities['shared_storage']:
raise exception.InstanceFaultRollback(
exception.ResizeError(
reason=_('Cannot migrate local disks.')))
# Get disk info from disk driver.
disk_info = dict(disk_info, **self.disk_dvr.get_info())
pvm_inst_uuid = vm.get_pvm_uuid(instance)
# Define the migrate flow
flow = tf_lf.Flow("migrate_vm")
# Power off the VM
flow.add(tf_vm.PowerOff(self.adapter, self.host_uuid,
pvm_inst_uuid, instance))
if flav_obj.root_gb > instance.root_gb:
# Resize the root disk
flow.add(tf_stg.ExtendDisk(self.disk_dvr, context, instance,
dict(type='boot'), flav_obj.root_gb))
# Disconnect any volumes that are attached. They are reattached
# on the new VM (or existing VM if this is just a resize.)
# Extract the block devices.
bdms = self._extract_bdm(block_device_info)
if bdms:
# Create the transaction manager (FeedTask) for Storage I/O.
xag = self._get_inst_xag(instance, bdms)
stg_ftsk = vios.build_tx_feed_task(self.adapter, self.host_uuid,
xag=xag)
# Determine if there are volumes to disconnect. If so, remove each
# volume (within the transaction manager)
self._add_volume_disconnection_tasks(context, instance, bdms, flow,
stg_ftsk)
# Add the transaction manager flow to the end of the 'storage
# disconnection' tasks. This will run all the disconnections in
# parallel
flow.add(stg_ftsk)
# We rename the VM to help identify if this is a resize and so it's
# easy to see the VM is being migrated from pvmctl. We use the resize
# name so we don't destroy it on a revert when it's on the same host.
new_name = self._gen_resize_name(instance, same_host=same_host)
flow.add(tf_vm.Rename(self.adapter, self.host_uuid, instance,
new_name))
try:
tf_eng.run(flow)
except Exception as e:
raise exception.InstanceFaultRollback(e)
return disk_info
def _resize_vm(self, context, instance, flav_obj, retry_interval=0):
def _delay(attempt, max_attempts, *args, **kwds):
LOG.info(_LI('Retrying to update VM.'), instance=instance)
time.sleep(retry_interval)
@pvm_retry.retry(delay_func=_delay)
def _update_vm():
LOG.debug('Resizing instance %s.', instance.name,
instance=instance)
entry = vm.get_instance_wrapper(self.adapter, instance,
self.host_uuid)
pwrd = vm.power_off(self.adapter, instance,
self.host_uuid, entry=entry)
# If it was powered off then the etag changed, fetch it again
if pwrd:
entry = vm.get_instance_wrapper(self.adapter, instance,
self.host_uuid)
vm.update(self.adapter, self.host_wrapper,
instance, flav_obj, entry=entry)
# Update the VM
_update_vm()
def _gen_resize_name(self, instance, same_host=False):
prefix = 'resize_' if same_host else 'migrate_'
return (prefix + instance.name)[:31]
def finish_migration(self, context, migration, instance, disk_info,
network_info, image_meta, resize_instance,
block_device_info=None, power_on=True):
"""Completes a resize.
"""Completes a resize or cold migration.
:param context: the context for the migration/resize
:param migration: the migrate/resize information
@@ -997,36 +1007,93 @@ class PowerVMDriver(driver.ComputeDriver):
otherwise
"""
# See if this was to the same host
same_host = migration.source_compute == migration.dest_compute
if same_host:
self._log_operation('finish resize', instance)
else:
self._log_operation('finish migration', instance)
# Ensure the disk drivers are compatible.
if (not same_host and
not self._is_booted_from_volume(block_device_info)):
# Can't migrate the disks if they are not on shared storage
if not self.disk_dvr.capabilities['shared_storage']:
raise exception.InstanceFaultRollback(
exception.ResizeError(
reason=_('Cannot migrate local disks.')))
# Call the disk driver to evaluate the disk info
reason = self.disk_dvr.validate(disk_info)
if reason:
raise exception.InstanceFaultRollback(
exception.ResizeError(reason=reason))
# Get the new flavor
flav_obj = flavor_obj.Flavor.get_by_id(
context, migration.new_instance_type_id)
# Extract the block devices.
bdms = self._extract_bdm(block_device_info)
# Nothing needed if there isn't a bdms or a requirement to power-on.
if not bdms and not power_on:
return
# Define the flow
flow = tf_lf.Flow("finish_migration")
if bdms:
# If attaching disks or volumes
if bdms or not same_host:
# Create the transaction manager (FeedTask) for Storage I/O.
xag = self._get_inst_xag(instance, bdms)
stg_ftsk = vios.build_tx_feed_task(self.adapter, self.host_uuid,
xag=xag)
else:
stg_ftsk = None
if same_host:
# This is just a resize.
new_name = self._gen_resize_name(instance, same_host=True)
flow.add(tf_vm.Resize(self.adapter, self.host_wrapper, instance,
flav_obj, name=new_name))
else:
# This is a migration over to another host. We have a lot of work.
# Create the LPAR
flow.add(tf_vm.Create(self.adapter, self.host_wrapper, instance,
flav_obj, stg_ftsk))
# Create a flow for the network IO
flow.add(tf_net.PlugVifs(self.virtapi, self.adapter, instance,
network_info, self.host_uuid))
flow.add(tf_net.PlugMgmtVif(self.adapter, instance,
self.host_uuid))
# Need to attach the boot disk, if present.
if not self._is_booted_from_volume(block_device_info):
flow.add(tf_stg.FindDisk(self.disk_dvr, context, instance,
disk_dvr.DiskType.BOOT))
# Connects up the disk to the LPAR
flow.add(tf_stg.ConnectDisk(self.disk_dvr, context, instance,
stg_ftsk=stg_ftsk))
if bdms:
# Determine if there are volumes to connect. If so, add a
# connection for each type.
self._add_volume_connection_tasks(context, instance, bdms,
flow, stg_ftsk)
if len(flow):
# Add the transaction manager flow to the end of the 'storage
# connection' tasks to run all the connections in parallel
flow.add(stg_ftsk)
if stg_ftsk:
# Add the transaction manager flow to the end of the 'storage
# connection' tasks to run all the connections in parallel
flow.add(stg_ftsk)
if power_on:
# Get the lpar wrapper (required by power-on), then power-on
flow.add(tf_vm.Get(self.adapter, self.host_uuid, instance))
flow.add(tf_vm.PowerOn(self.adapter, self.host_uuid, instance))
if len(flow):
try:
tf_eng.run(flow)
except Exception as e:
raise exception.InstanceFaultRollback(e)
def confirm_migration(self, migration, instance, network_info):
"""Confirms a resize, destroying the source VM.
@@ -1036,12 +1103,26 @@ class PowerVMDriver(driver.ComputeDriver):
:param network_info:
:py:meth:`~nova.network.manager.NetworkManager.get_instance_nw_info`
"""
# TODO(IBM): Anything to do here?
pass
# See if this was to the same host
same_host = migration.source_compute == migration.dest_compute
if same_host:
# This was a local resize, don't delete our only VM!
self._log_operation('confirm resize', instance)
vm.rename(self.adapter, self.host_uuid, instance, instance.name)
return
# Confirming the migrate means we need to delete source VM.
self._log_operation('confirm migration', instance)
# Destroy the old VM.
destroy_disks = not self.disk_dvr.capabilities['shared_storage']
context = ctx.get_admin_context()
self._destroy(context, instance, block_device_info=None,
destroy_disks=destroy_disks, shutdown=False)
def finish_revert_migration(self, context, instance, network_info,
block_device_info=None, power_on=True):
"""Finish reverting a resize.
"""Finish reverting a resize on the source host.
:param context: the context for the finish_revert_migration
:param instance: nova.objects.instance.Instance being migrated/resized
@@ -1051,16 +1132,17 @@ class PowerVMDriver(driver.ComputeDriver):
:param power_on: True if the instance should be powered on, False
otherwise
"""
self._log_operation('revert resize', instance)
# TODO(IBM): What to do here? Do we want to recreate the LPAR
# Or just change the settings back to the flavor?
self._log_operation('revert resize/migration', instance)
# This method is always run on the source host, so we just need to
# revert the VM back to it's old sizings, if it was even changed
# at all. If it was a migration, then it wasn't changed but it
# shouldn't hurt to "update" it with the prescribed flavor. This
# makes it easy to handle both resize and migrate.
# Get the flavor from the instance, so we can revert it
admin_ctx = ctx.get_admin_context(read_deleted='yes')
flav_obj = (
flavor_obj.Flavor.get_by_id(admin_ctx,
instance.instance_type_id))
# TODO(IBM) Get the entry once for both power_off and update
flav_obj = flavor_obj.Flavor.get_by_id(
admin_ctx, instance.instance_type_id)
vm.power_off(self.adapter, instance, self.host_uuid)
vm.update(self.adapter, self.host_wrapper, instance, flav_obj)
@@ -1357,16 +1439,30 @@ class PowerVMDriver(driver.ComputeDriver):
mig.post_live_migration_at_destination(network_info, vol_drvs)
del self.live_migrations[instance.uuid]
def _build_vol_drivers(self, context, instance, block_device_info):
def _vol_drv_iter(self, context, instance, block_device_info=None,
bdms=None, stg_ftsk=None):
"""Yields a bdm and volume driver."""
# Get a volume driver for each volume
if not bdms:
bdms = self._extract_bdm(block_device_info)
for bdm in bdms or []:
conn_info = bdm.get('connection_info')
# if it doesn't have connection_info, it's not a volume
if not conn_info:
continue
vol_drv = self._get_inst_vol_adpt(context, instance,
conn_info=conn_info,
stg_ftsk=stg_ftsk)
yield bdm, vol_drv
def _build_vol_drivers(self, context, instance, block_device_info=None,
bdms=None, stg_ftsk=None):
"""Builds the volume connector drivers for a block device info."""
# Get a volume driver for each volume
vol_drvs = []
bdms = self._extract_bdm(block_device_info)
for bdm in bdms or []:
vol_drvs.append(
self._get_inst_vol_adpt(
context, instance, conn_info=bdm.get('connection_info')))
return vol_drvs
return [vol_drv for bdm, vol_drv in self._vol_drv_iter(
context, instance, block_device_info=block_device_info, bdms=bdms,
stg_ftsk=stg_ftsk)]
def unfilter_instance(self, instance, network_info):
"""Stop filtering instance."""

View File

@@ -506,3 +506,62 @@ class SaveBDM(task.Task):
'on instance %(inst)s.'),
{'vol_id': self.bdm.volume_id, 'inst': self.instance.name})
self.bdm.save()
class FindDisk(task.Task):
"""The Task to find a disk and provide information to downstream tasks."""
def __init__(self, disk_dvr, context, instance, disk_type):
"""Create the Task.
Provides the 'disk_dev_info' for other tasks. Comes from the disk_dvr
create_disk_from_image method.
:param disk_dvr: The storage driver.
:param context: The context passed into the driver method.
:param instance: The nova instance.
:param disk_type: One of the DiskType enum values.
"""
super(FindDisk, self).__init__(name='find_disk',
provides='disk_dev_info')
self.disk_dvr = disk_dvr
self.context = context
self.instance = instance
self.disk_type = disk_type
def execute(self):
LOG.info(_LI('Finding disk for instance: %s'), self.instance.name)
disk = self.disk_dvr.get_disk_ref(self.instance, self.disk_type)
if not disk:
LOG.warn(_LW('Disk not found: %(disk_name)s'),
{'disk_name': self.disk_dvr._get_disk_name(self.disk_type,
self.instance)
}, instance=self.instance)
return disk
class ExtendDisk(task.Task):
"""Task to extend a disk."""
def __init__(self, disk_dvr, context, instance, disk_info, size):
"""Creates the Task to extend a disk.
:param disk_dvr: The storage driver.
:param context: nova context for operation.
:param instance: instance to extend the disk for.
:param disk_info: dictionary with disk info.
:param size: the new size in gb.
"""
self.disk_dvr = disk_dvr
self.context = context
self.instance = instance
self.disk_info = disk_info
self.size = size
super(ExtendDisk, self).__init__(name='extend_disk_%s' %
disk_info['type'])
def execute(self):
LOG.info(_LI('Extending disk size of disk: %(disk)s size: %(size)s.'),
{'disk': self.disk_info['type'], 'size': self.size})
self.disk_dvr.extend_disk(self.context, self.instance, self.disk_info,
self.size)

View File

@@ -89,6 +89,66 @@ class Create(task.Task):
return wrap
class Resize(task.Task):
"""The task for resizing an existing VM."""
def __init__(self, adapter, host_wrapper, instance, flavor, name=None):
"""Creates the Task to resize a VM.
Provides the 'lpar_wrap' for other tasks.
:param adapter: The adapter for the pypowervm API
:param host_wrapper: The managed system wrapper
:param instance: The nova instance.
:param flavor: The nova flavor.
:param name: VM name to use for the update. Used on resize when we
want to rename it but not use the instance name.
"""
super(Resize, self).__init__(name='resize_lpar',
provides='lpar_wrap')
self.adapter = adapter
self.host_wrapper = host_wrapper
self.instance = instance
self.flavor = flavor
self.vm_name = name
def execute(self):
LOG.info(_LI('Resizing instance: %s'), self.instance.name,
instance=self.instance)
wrap = vm.update(self.adapter, self.host_wrapper,
self.instance, self.flavor, entry=None,
name=self.vm_name)
return wrap
class Rename(task.Task):
"""The task for renaming an existing VM."""
def __init__(self, adapter, host_uuid, instance, name):
"""Creates the Task to rename a VM.
Provides the 'lpar_wrap' for other tasks.
:param adapter: The adapter for the pypowervm API
:param host_uuid: The managed system uuid
:param instance: The nova instance.
:param name: The new VM name.
"""
super(Rename, self).__init__(name='rename_lpar_%s' % name,
provides='lpar_wrap')
self.adapter = adapter
self.host_uuid = host_uuid
self.instance = instance
self.vm_name = name
def execute(self):
LOG.info(_LI('Renaming instance to name: %s'), self.name,
instance=self.instance)
wrap = vm.rename(self.adapter, self.host_uuid, self.instance,
self.vm_name)
return wrap
class PowerOn(task.Task):
"""The task to power on the instance."""
@@ -171,7 +231,8 @@ class Delete(task.Task):
self.instance = instance
def execute(self):
LOG.info(_LI('Deleting instance %s from system.'), self.instance.name)
LOG.info(_LI('Deleting instance %s from system.'), self.instance.name,
instance=self.instance)
vm.dlt_lpar(self.adapter, self.lpar_uuid)

View File

@@ -490,7 +490,7 @@ def crt_lpar(adapter, host_wrapper, instance, flavor):
return lpar_w
def update(adapter, host_wrapper, instance, flavor, entry=None):
def update(adapter, host_wrapper, instance, flavor, entry=None, name=None):
"""Update an LPAR based on the host based on the instance
:param adapter: The adapter for the pypowervm API
@@ -499,6 +499,9 @@ def update(adapter, host_wrapper, instance, flavor, entry=None):
:param flavor: The nova flavor.
:param entry: The instance pvm entry, if available, otherwise it will
be fetched.
:param name: VM name to use for the update. Used on resize when we want
to rename it but not use the instance name.
:returns: The updated LPAR wrapper.
"""
if not entry:
@@ -507,8 +510,30 @@ def update(adapter, host_wrapper, instance, flavor, entry=None):
lpar_b = VMBuilder(host_wrapper, adapter).lpar_builder(instance, flavor)
lpar_b.rebuild(entry)
# Write out the new specs
entry.update()
# Set the new name if the instance name is not desired.
if name:
entry.name = name
# Write out the new specs, return the updated version
return entry.update()
def rename(adapter, host_uuid, instance, name, entry=None):
"""Rename a VM.
:param adapter: The adapter for the pypowervm API
:param host_uuid: The host UUID.
:param instance: The nova instance.
:param name: The new name.
:param entry: The instance pvm entry, if available, otherwise it will
be fetched.
:returns: The updated LPAR wrapper.
"""
if not entry:
entry = get_instance_wrapper(adapter, instance, host_uuid)
entry.name = name
return entry.update()
def dlt_lpar(adapter, lpar_uuid):