PowerVM Driver: Snapshot

Add instance snapshot support for the PowerVM virt driver.

Blueprint: powervm-snapshot

Change-Id: I2691b09d95691915dc1065284d25ad22db41d32b
This commit is contained in:
esberglu 2018-02-09 13:59:08 -06:00
parent 2c5da2212c
commit 3bb59e393f
16 changed files with 1348 additions and 8 deletions

View File

@ -97,7 +97,7 @@ xenserver=complete
vmware=unknown
hyperv=unknown
ironic=unknown
powervm=missing
powervm=complete
[operation.power-ops]
title=Server power ops

View File

@ -673,7 +673,7 @@ driver-impl-hyperv=complete
driver-impl-ironic=missing
driver-impl-libvirt-vz-vm=complete
driver-impl-libvirt-vz-ct=complete
driver-impl-powervm=missing
driver-impl-powervm=complete
[operation.suspend]
title=Suspend instance

View File

@ -2247,3 +2247,31 @@ class CannotMigrateToSameHost(NovaException):
class VirtDriverNotReady(NovaException):
msg_fmt = _("Virt driver is not ready.")
class InstanceDiskMappingFailed(NovaException):
msg_fmt = _("Failed to map boot disk of instance %(instance_name)s to "
"the management partition from any Virtual I/O Server.")
class NewMgmtMappingNotFoundException(NovaException):
msg_fmt = _("Failed to find newly-created mapping of storage element "
"%(stg_name)s from Virtual I/O Server %(vios_name)s to the "
"management partition.")
class NoDiskDiscoveryException(NovaException):
msg_fmt = _("Having scanned SCSI bus %(bus)x on the management partition, "
"disk with UDID %(udid)s failed to appear after %(polls)d "
"polls over %(timeout)d seconds.")
class UniqueDiskDiscoveryException(NovaException):
msg_fmt = _("Expected to find exactly one disk on the management "
"partition at %(path_pattern)s; found %(count)d.")
class DeviceDeletionException(NovaException):
msg_fmt = _("Device %(devpath)s is still present on the management "
"partition after attempting to delete it. Polled %(polls)d "
"times over %(timeout)d seconds.")

View File

@ -1,4 +1,4 @@
# Copyright 2015, 2017 IBM Corp.
# Copyright 2015, 2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -89,6 +89,11 @@ class TestSSPDiskAdapter(test.NoDBTestCase):
self.pvm_uuid = self.useFixture(fixtures.MockPatch(
'nova.virt.powervm.vm.get_pvm_uuid')).mock
# Return the mgmt uuid
self.mgmt_uuid = self.useFixture(fixtures.MockPatch(
'nova.virt.powervm.mgmt.mgmt_uuid')).mock
self.mgmt_uuid.return_value = 'mp_uuid'
# The SSP disk adapter
self.ssp_drv = ssp_dvr.SSPDiskAdapter(self.apt, self.host_uuid)
@ -117,6 +122,7 @@ class TestSSPDiskAdapter(test.NoDBTestCase):
def test_capabilities(self):
self.assertTrue(self.ssp_drv.capabilities.get('shared_storage'))
self.assertTrue(self.ssp_drv.capabilities.get('snapshot'))
@mock.patch('pypowervm.util.get_req_path_uuid', autospec=True)
def test_vios_uuids(self, mock_rpu):
@ -158,7 +164,8 @@ class TestSSPDiskAdapter(test.NoDBTestCase):
@mock.patch('pypowervm.util.sanitize_file_name_for_api', autospec=True)
@mock.patch('pypowervm.tasks.storage.crt_lu', autospec=True)
@mock.patch('nova.image.api.API.download')
@mock.patch('nova.virt.powervm.disk.ssp.IterableToFileAdapter')
@mock.patch('nova.virt.powervm.disk.ssp.IterableToFileAdapter',
autospec=True)
def test_create_disk_from_image(self, mock_it2f, mock_dl, mock_crt_lu,
mock_san, mock_vuuid, mock_goru):
img = powervm.TEST_IMAGE1
@ -253,3 +260,186 @@ class TestSSPDiskAdapter(test.NoDBTestCase):
client_lpar_id=self.pvm_uuid.return_value,
match_func=mock_gmf.return_value)
self.mock_ftsk.execute.assert_called_once_with()
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
@mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
@mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter._disk_match_func')
def test_get_bootdisk_path(self, mock_match_fn, mock_findmaps,
mock_vios):
mock_vios.return_value = self.vio_wrap
# No maps found
mock_findmaps.return_value = None
devname = self.ssp_drv.get_bootdisk_path('inst', 'vios_uuid')
mock_vios.assert_called_once_with(
self.apt, uuid='vios_uuid', xag=[pvm_const.XAG.VIO_SMAP])
mock_findmaps.assert_called_once_with(
self.vio_wrap.scsi_mappings,
client_lpar_id=self.pvm_uuid.return_value,
match_func=mock_match_fn.return_value)
self.assertIsNone(devname)
# Good map
mock_lu = mock.Mock()
mock_lu.server_adapter.backing_dev_name = 'devname'
mock_findmaps.return_value = [mock_lu]
devname = self.ssp_drv.get_bootdisk_path('inst', 'vios_uuid')
self.assertEqual('devname', devname)
@mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter.'
'_vios_uuids', new_callable=mock.PropertyMock)
@mock.patch('nova.virt.powervm.vm.get_instance_wrapper', autospec=True)
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
@mock.patch('pypowervm.tasks.scsi_mapper.add_vscsi_mapping', autospec=True)
def test_connect_instance_disk_to_mgmt(self, mock_add, mock_vio_get,
mock_lw, mock_vio_uuids):
inst, lpar_wrap, vio1, vio2, vio3 = self._bld_mocks_for_instance_disk()
mock_lw.return_value = lpar_wrap
mock_vio_uuids.return_value = [1, 2]
# Test with two VIOSes, both of which contain the mapping
mock_vio_get.side_effect = [vio1, vio2]
lu, vios = self.ssp_drv.connect_instance_disk_to_mgmt(inst)
self.assertEqual('lu_udid', lu.udid)
# Should hit on the first VIOS
self.assertIs(vio1, vios)
mock_add.assert_called_once_with(self.host_uuid, vio1, 'mp_uuid', lu)
# Now the first VIOS doesn't have the mapping, but the second does
mock_add.reset_mock()
mock_vio_get.side_effect = [vio3, vio2]
lu, vios = self.ssp_drv.connect_instance_disk_to_mgmt(inst)
self.assertEqual('lu_udid', lu.udid)
# Should hit on the second VIOS
self.assertIs(vio2, vios)
self.assertEqual(1, mock_add.call_count)
mock_add.assert_called_once_with(self.host_uuid, vio2, 'mp_uuid', lu)
# No hits
mock_add.reset_mock()
mock_vio_get.side_effect = [vio3, vio3]
self.assertRaises(exception.InstanceDiskMappingFailed,
self.ssp_drv.connect_instance_disk_to_mgmt, inst)
self.assertEqual(0, mock_add.call_count)
# First add_vscsi_mapping call raises
mock_vio_get.side_effect = [vio1, vio2]
mock_add.side_effect = [Exception("mapping failed"), None]
# Should hit on the second VIOS
self.assertIs(vio2, vios)
@mock.patch('pypowervm.tasks.scsi_mapper.remove_lu_mapping', autospec=True)
def test_disconnect_disk_from_mgmt(self, mock_rm_lu_map):
self.ssp_drv.disconnect_disk_from_mgmt('vios_uuid', 'disk_name')
mock_rm_lu_map.assert_called_with(self.apt, 'vios_uuid',
'mp_uuid', disk_names=['disk_name'])
@mock.patch('pypowervm.tasks.scsi_mapper.gen_match_func', autospec=True)
@mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter._get_disk_name')
def test_disk_match_func(self, mock_disk_name, mock_gen_match):
mock_disk_name.return_value = 'disk_name'
self.ssp_drv._disk_match_func('disk_type', 'instance')
mock_disk_name.assert_called_once_with('disk_type', 'instance')
mock_gen_match.assert_called_with(pvm_stg.LU, names=['disk_name'])
@mock.patch("pypowervm.util.sanitize_file_name_for_api", autospec=True)
def test_get_disk_name(self, mock_san):
inst = mock.Mock()
inst.configure_mock(name='a_name_that_is_longer_than_eight',
uuid='01234567-abcd-abcd-abcd-123412341234')
# Long
self.assertEqual(mock_san.return_value,
self.ssp_drv._get_disk_name('type', inst))
mock_san.assert_called_with(inst.name, prefix='type_',
max_len=pvm_const.MaxLen.FILENAME_DEFAULT)
mock_san.reset_mock()
# Short
self.assertEqual(mock_san.return_value,
self.ssp_drv._get_disk_name('type', inst, short=True))
mock_san.assert_called_with('a_name_t_0123', prefix='t_',
max_len=pvm_const.MaxLen.VDISK_NAME)
@mock.patch('nova.virt.powervm.disk.ssp.SSPDiskAdapter.'
'_vios_uuids', new_callable=mock.PropertyMock)
@mock.patch('nova.virt.powervm.vm.get_instance_wrapper', autospec=True)
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
def test_get_bootdisk_iter(self, mock_vio_get, mock_lw, mock_vio_uuids):
inst, lpar_wrap, vio1, vio2, vio3 = self._bld_mocks_for_instance_disk()
mock_lw.return_value = lpar_wrap
mock_vio_uuids.return_value = [1, 2]
# Test with two VIOSes, both of which contain the mapping. Force the
# method to get the lpar_wrap.
mock_vio_get.side_effect = [vio1, vio2]
idi = self.ssp_drv._get_bootdisk_iter(inst)
lu, vios = next(idi)
self.assertEqual('lu_udid', lu.udid)
self.assertEqual('vios1', vios.name)
mock_vio_get.assert_called_once_with(self.apt, uuid=1,
xag=[pvm_const.XAG.VIO_SMAP])
lu, vios = next(idi)
self.assertEqual('lu_udid', lu.udid)
self.assertEqual('vios2', vios.name)
mock_vio_get.assert_called_with(self.apt, uuid=2,
xag=[pvm_const.XAG.VIO_SMAP])
self.assertRaises(StopIteration, next, idi)
self.assertEqual(2, mock_vio_get.call_count)
mock_lw.assert_called_once_with(self.apt, inst)
# Same, but prove that breaking out of the loop early avoids the second
# get call. Supply lpar_wrap from here on, and prove no calls to
# get_instance_wrapper
mock_vio_get.reset_mock()
mock_lw.reset_mock()
mock_vio_get.side_effect = [vio1, vio2]
for lu, vios in self.ssp_drv._get_bootdisk_iter(inst):
self.assertEqual('lu_udid', lu.udid)
self.assertEqual('vios1', vios.name)
break
mock_vio_get.assert_called_once_with(self.apt, uuid=1,
xag=[pvm_const.XAG.VIO_SMAP])
# Now the first VIOS doesn't have the mapping, but the second does
mock_vio_get.reset_mock()
mock_vio_get.side_effect = [vio3, vio2]
idi = self.ssp_drv._get_bootdisk_iter(inst)
lu, vios = next(idi)
self.assertEqual('lu_udid', lu.udid)
self.assertEqual('vios2', vios.name)
mock_vio_get.assert_has_calls(
[mock.call(self.apt, uuid=uuid, xag=[pvm_const.XAG.VIO_SMAP])
for uuid in (1, 2)])
self.assertRaises(StopIteration, next, idi)
self.assertEqual(2, mock_vio_get.call_count)
# No hits
mock_vio_get.reset_mock()
mock_vio_get.side_effect = [vio3, vio3]
self.assertEqual([], list(self.ssp_drv._get_bootdisk_iter(inst)))
self.assertEqual(2, mock_vio_get.call_count)
def _bld_mocks_for_instance_disk(self):
inst = mock.Mock()
inst.name = 'my-instance-name'
lpar_wrap = mock.Mock()
lpar_wrap.id = 4
lu_wrap = mock.Mock(spec=pvm_stg.LU)
lu_wrap.configure_mock(name='boot_my_instance_name', udid='lu_udid')
smap = mock.Mock(backing_storage=lu_wrap,
server_adapter=mock.Mock(lpar_id=4))
# Build mock VIOS Wrappers as the returns from VIOS.wrap.
# vios1 and vios2 will both have the mapping for client ID 4 and LU
# named boot_my_instance_name.
smaps = [mock.Mock(), mock.Mock(), mock.Mock(), smap]
vios1 = mock.Mock(spec=pvm_vios.VIOS)
vios1.configure_mock(name='vios1', uuid='uuid1', scsi_mappings=smaps)
vios2 = mock.Mock(spec=pvm_vios.VIOS)
vios2.configure_mock(name='vios2', uuid='uuid2', scsi_mappings=smaps)
# vios3 will not have the mapping
vios3 = mock.Mock(spec=pvm_vios.VIOS)
vios3.configure_mock(name='vios3', uuid='uuid3',
scsi_mappings=[mock.Mock(), mock.Mock()])
return inst, lpar_wrap, vios1, vios2, vios3

View File

@ -0,0 +1,68 @@
# Copyright 2015, 2018 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from nova import test
from nova.virt.powervm.tasks import image as tsk_img
class TestImage(test.TestCase):
def test_update_task_state(self):
def func(task_state, expected_state='delirious'):
self.assertEqual('task_state', task_state)
self.assertEqual('delirious', expected_state)
tf = tsk_img.UpdateTaskState(func, 'task_state')
self.assertEqual('update_task_state_task_state', tf.name)
tf.execute()
def func2(task_state, expected_state=None):
self.assertEqual('task_state', task_state)
self.assertEqual('expected_state', expected_state)
tf = tsk_img.UpdateTaskState(func2, 'task_state',
expected_state='expected_state')
tf.execute()
# Validate args on taskflow.task.Task instantiation
with mock.patch('taskflow.task.Task.__init__') as tf:
tsk_img.UpdateTaskState(func, 'task_state')
tf.assert_called_once_with(
name='update_task_state_task_state')
@mock.patch('nova.virt.powervm.image.stream_blockdev_to_glance',
autospec=True)
@mock.patch('nova.virt.powervm.image.generate_snapshot_metadata',
autospec=True)
def test_stream_to_glance(self, mock_metadata, mock_stream):
mock_metadata.return_value = 'metadata'
mock_inst = mock.Mock()
mock_inst.name = 'instance_name'
tf = tsk_img.StreamToGlance('context', 'image_api', 'image_id',
mock_inst)
self.assertEqual('stream_to_glance', tf.name)
tf.execute('disk_path')
mock_metadata.assert_called_with('context', 'image_api', 'image_id',
mock_inst)
mock_stream.assert_called_with('context', 'image_api', 'image_id',
'metadata', 'disk_path')
# Validate args on taskflow.task.Task instantiation
with mock.patch('taskflow.task.Task.__init__') as tf:
tsk_img.StreamToGlance(
'context', 'image_api', 'image_id', mock_inst)
tf.assert_called_once_with(
name='stream_to_glance', requires='disk_path')

View File

@ -16,6 +16,7 @@ import fixtures
import mock
from pypowervm import exceptions as pvm_exc
from nova import exception
from nova import test
from nova.virt.powervm.tasks import storage as tf_stg
@ -173,3 +174,149 @@ class TestStorage(test.NoDBTestCase):
self.disk_dvr, self.context, self.instance, image_meta)
tf.assert_called_once_with(
name='create_disk_from_img', provides='disk_dev_info')
@mock.patch('pypowervm.tasks.scsi_mapper.find_maps', autospec=True)
@mock.patch('nova.virt.powervm.mgmt.discover_vscsi_disk', autospec=True)
@mock.patch('nova.virt.powervm.mgmt.remove_block_dev', autospec=True)
def test_instance_disk_to_mgmt(self, mock_rm, mock_discover, mock_find):
mock_discover.return_value = '/dev/disk'
mock_instance = mock.Mock()
mock_instance.name = 'instance_name'
mock_stg = mock.Mock()
mock_stg.name = 'stg_name'
mock_vwrap = mock.Mock()
mock_vwrap.name = 'vios_name'
mock_vwrap.uuid = 'vios_uuid'
mock_vwrap.scsi_mappings = ['mapping1']
disk_dvr = mock.MagicMock()
disk_dvr.mp_uuid = 'mp_uuid'
disk_dvr.connect_instance_disk_to_mgmt.return_value = (mock_stg,
mock_vwrap)
def reset_mocks():
mock_find.reset_mock()
mock_discover.reset_mock()
mock_rm.reset_mock()
disk_dvr.reset_mock()
# Good path - find_maps returns one result
mock_find.return_value = ['one_mapping']
tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
self.assertEqual('instance_disk_to_mgmt', tf.name)
self.assertEqual((mock_stg, mock_vwrap, '/dev/disk'), tf.execute())
disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
mock_instance)
mock_find.assert_called_with(['mapping1'], client_lpar_id='mp_uuid',
stg_elem=mock_stg)
mock_discover.assert_called_with('one_mapping')
tf.revert('result', 'failures')
disk_dvr.disconnect_disk_from_mgmt.assert_called_with('vios_uuid',
'stg_name')
mock_rm.assert_called_with('/dev/disk')
# Good path - find_maps returns >1 result
reset_mocks()
mock_find.return_value = ['first_mapping', 'second_mapping']
tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
self.assertEqual((mock_stg, mock_vwrap, '/dev/disk'), tf.execute())
disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
mock_instance)
mock_find.assert_called_with(['mapping1'], client_lpar_id='mp_uuid',
stg_elem=mock_stg)
mock_discover.assert_called_with('first_mapping')
tf.revert('result', 'failures')
disk_dvr.disconnect_disk_from_mgmt.assert_called_with('vios_uuid',
'stg_name')
mock_rm.assert_called_with('/dev/disk')
# Management Partition is VIOS and NovaLink hosted storage
reset_mocks()
disk_dvr._vios_uuids = ['mp_uuid']
dev_name = '/dev/vg/fake_name'
disk_dvr.get_bootdisk_path.return_value = dev_name
tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
self.assertEqual((None, None, dev_name), tf.execute())
# Management Partition is VIOS and not NovaLink hosted storage
reset_mocks()
disk_dvr._vios_uuids = ['mp_uuid']
disk_dvr.get_bootdisk_path.return_value = None
tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
tf.execute()
disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
mock_instance)
# Bad path - find_maps returns no results
reset_mocks()
mock_find.return_value = []
tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
self.assertRaises(exception.NewMgmtMappingNotFoundException,
tf.execute)
disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
mock_instance)
# find_maps was still called
mock_find.assert_called_with(['mapping1'], client_lpar_id='mp_uuid',
stg_elem=mock_stg)
# discover_vscsi_disk didn't get called
self.assertEqual(0, mock_discover.call_count)
tf.revert('result', 'failures')
# disconnect_disk_from_mgmt got called
disk_dvr.disconnect_disk_from_mgmt.assert_called_with('vios_uuid',
'stg_name')
# ...but remove_block_dev did not.
self.assertEqual(0, mock_rm.call_count)
# Bad path - connect raises
reset_mocks()
disk_dvr.connect_instance_disk_to_mgmt.side_effect = (
exception.InstanceDiskMappingFailed(instance_name='inst_name'))
tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
self.assertRaises(exception.InstanceDiskMappingFailed, tf.execute)
disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
mock_instance)
self.assertEqual(0, mock_find.call_count)
self.assertEqual(0, mock_discover.call_count)
# revert shouldn't call disconnect or remove
tf.revert('result', 'failures')
self.assertEqual(0, disk_dvr.disconnect_disk_from_mgmt.call_count)
self.assertEqual(0, mock_rm.call_count)
# Validate args on taskflow.task.Task instantiation
with mock.patch('taskflow.task.Task.__init__') as tf:
tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
tf.assert_called_once_with(
name='instance_disk_to_mgmt',
provides=['stg_elem', 'vios_wrap', 'disk_path'])
@mock.patch('nova.virt.powervm.mgmt.remove_block_dev', autospec=True)
def test_remove_instance_disk_from_mgmt(self, mock_rm):
disk_dvr = mock.MagicMock()
mock_instance = mock.Mock()
mock_instance.name = 'instance_name'
mock_stg = mock.Mock()
mock_stg.name = 'stg_name'
mock_vwrap = mock.Mock()
mock_vwrap.name = 'vios_name'
mock_vwrap.uuid = 'vios_uuid'
tf = tf_stg.RemoveInstanceDiskFromMgmt(disk_dvr, mock_instance)
self.assertEqual('remove_inst_disk_from_mgmt', tf.name)
# Boot disk not mapped to mgmt partition
tf.execute(None, mock_vwrap, '/dev/disk')
self.assertEqual(disk_dvr.disconnect_disk_from_mgmt.call_count, 0)
self.assertEqual(mock_rm.call_count, 0)
# Boot disk mapped to mgmt partition
tf.execute(mock_stg, mock_vwrap, '/dev/disk')
disk_dvr.disconnect_disk_from_mgmt.assert_called_with('vios_uuid',
'stg_name')
mock_rm.assert_called_with('/dev/disk')
# Validate args on taskflow.task.Task instantiation
with mock.patch('taskflow.task.Task.__init__') as tf:
tf_stg.RemoveInstanceDiskFromMgmt(disk_dvr, mock_instance)
tf.assert_called_once_with(
name='remove_inst_disk_from_mgmt',
requires=['stg_elem', 'vios_wrap', 'disk_path'])

View File

@ -290,6 +290,25 @@ class TestPowerVMDriver(test.NoDBTestCase):
self.drv.destroy, 'context', self.inst, [],
block_device_info={})
@mock.patch('nova.virt.powervm.tasks.image.UpdateTaskState.'
'execute', autospec=True)
@mock.patch('nova.virt.powervm.tasks.storage.InstanceDiskToMgmt.'
'execute', autospec=True)
@mock.patch('nova.virt.powervm.tasks.image.StreamToGlance.execute')
@mock.patch('nova.virt.powervm.tasks.storage.RemoveInstanceDiskFromMgmt.'
'execute')
def test_snapshot(self, mock_rm, mock_stream, mock_conn, mock_update):
self.drv.disk_dvr = mock.Mock()
self.drv.image_api = mock.Mock()
mock_conn.return_value = 'stg_elem', 'vios_wrap', 'disk_path'
self.drv.snapshot('context', self.inst, 'image_id',
'update_task_state')
self.assertEqual(2, mock_update.call_count)
self.assertEqual(1, mock_conn.call_count)
mock_stream.assert_called_once_with(disk_path='disk_path')
mock_rm.assert_called_once_with(
stg_elem='stg_elem', vios_wrap='vios_wrap', disk_path='disk_path')
def test_power_on(self):
self.drv.power_on('context', self.inst, 'network_info')
self.pwron.assert_called_once_with(self.adp, self.inst)

View File

@ -0,0 +1,63 @@
# Copyright 2015, 2018 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import six
from nova import test
from nova.virt.powervm import image
if six.PY2:
_BUILTIN = '__builtin__'
else:
_BUILTIN = 'builtins'
class TestImage(test.TestCase):
@mock.patch('nova.utils.temporary_chown', autospec=True)
@mock.patch(_BUILTIN + '.open', autospec=True)
@mock.patch('nova.image.api.API', autospec=True)
def test_stream_blockdev_to_glance(self, mock_api, mock_open, mock_chown):
mock_open.return_value.__enter__.return_value = 'mock_stream'
image.stream_blockdev_to_glance('context', mock_api, 'image_id',
'metadata', '/dev/disk')
mock_chown.assert_called_with('/dev/disk')
mock_open.assert_called_with('/dev/disk', 'rb')
mock_api.update.assert_called_with('context', 'image_id', 'metadata',
'mock_stream')
@mock.patch('nova.image.api.API', autospec=True)
def test_generate_snapshot_metadata(self, mock_api):
mock_api.get.return_value = {'name': 'image_name'}
mock_instance = mock.Mock()
mock_instance.project_id = 'project_id'
ret = image.generate_snapshot_metadata('context', mock_api, 'image_id',
mock_instance)
mock_api.get.assert_called_with('context', 'image_id')
self.assertEqual({
'name': 'image_name',
'is_public': False,
'status': 'active',
'disk_format': 'raw',
'container_format': 'bare',
'properties': {
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': 'project_id',
}
}, ret)

View File

@ -0,0 +1,193 @@
# Copyright 2015, 2018 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import retrying
from nova import exception
from nova import test
from pypowervm.tests import test_fixtures as pvm_fx
from pypowervm.tests.test_utils import pvmhttp
from nova.virt.powervm import mgmt
LPAR_HTTPRESP_FILE = "lpar.txt"
class TestMgmt(test.TestCase):
def setUp(self):
super(TestMgmt, self).setUp()
self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt
lpar_http = pvmhttp.load_pvm_resp(LPAR_HTTPRESP_FILE, adapter=self.apt)
self.assertIsNotNone(
lpar_http, "Could not load %s " % LPAR_HTTPRESP_FILE)
self.resp = lpar_http.response
@mock.patch('pypowervm.tasks.partition.get_this_partition', autospec=True)
def test_mgmt_uuid(self, mock_get_partition):
mock_get_partition.return_value = mock.Mock(uuid='mock_mgmt')
adpt = mock.Mock()
# First run should call the partition only once
self.assertEqual('mock_mgmt', mgmt.mgmt_uuid(adpt))
mock_get_partition.assert_called_once_with(adpt)
# But a subsequent call should effectively no-op
mock_get_partition.reset_mock()
self.assertEqual('mock_mgmt', mgmt.mgmt_uuid(adpt))
self.assertEqual(mock_get_partition.call_count, 0)
@mock.patch('glob.glob', autospec=True)
@mock.patch('nova.privsep.path.writefile', autospec=True)
@mock.patch('os.path.realpath', autospec=True)
def test_discover_vscsi_disk(self, mock_realpath, mock_writefile,
mock_glob):
scanpath = '/sys/bus/vio/devices/30000005/host*/scsi_host/host*/scan'
udid = ('275b5d5f88fa5611e48be9000098be9400'
'13fb2aa55a2d7b8d150cb1b7b6bc04d6')
devlink = ('/dev/disk/by-id/scsi-SIBM_3303_NVDISK' + udid)
mapping = mock.Mock()
mapping.client_adapter.lpar_slot_num = 5
mapping.backing_storage.udid = udid
# Realistically, first glob would return e.g. .../host0/.../host0/...
# but it doesn't matter for test purposes.
mock_glob.side_effect = [[scanpath], [devlink]]
mgmt.discover_vscsi_disk(mapping)
mock_glob.assert_has_calls(
[mock.call(scanpath), mock.call('/dev/disk/by-id/*' + udid[-32:])])
mock_writefile.assert_called_once_with(scanpath, 'a', '- - -')
mock_realpath.assert_called_with(devlink)
@mock.patch('retrying.retry', autospec=True)
@mock.patch('glob.glob', autospec=True)
@mock.patch('nova.privsep.path.writefile', autospec=True)
def test_discover_vscsi_disk_not_one_result(self, mock_writefile,
mock_glob, mock_retry):
"""Zero or more than one disk is found by discover_vscsi_disk."""
def validate_retry(kwargs):
self.assertIn('retry_on_result', kwargs)
self.assertEqual(250, kwargs['wait_fixed'])
self.assertEqual(300000, kwargs['stop_max_delay'])
def raiser(unused):
raise retrying.RetryError(mock.Mock(attempt_number=123))
def retry_passthrough(**kwargs):
validate_retry(kwargs)
def wrapped(_poll_for_dev):
return _poll_for_dev
return wrapped
def retry_timeout(**kwargs):
validate_retry(kwargs)
def wrapped(_poll_for_dev):
return raiser
return wrapped
udid = ('275b5d5f88fa5611e48be9000098be9400'
'13fb2aa55a2d7b8d150cb1b7b6bc04d6')
mapping = mock.Mock()
mapping.client_adapter.lpar_slot_num = 5
mapping.backing_storage.udid = udid
# No disks found
mock_retry.side_effect = retry_timeout
mock_glob.side_effect = lambda path: []
self.assertRaises(exception.NoDiskDiscoveryException,
mgmt.discover_vscsi_disk, mapping)
# Multiple disks found
mock_retry.side_effect = retry_passthrough
mock_glob.side_effect = [['path'], ['/dev/sde', '/dev/sdf']]
self.assertRaises(exception.UniqueDiskDiscoveryException,
mgmt.discover_vscsi_disk, mapping)
@mock.patch('time.sleep', autospec=True)
@mock.patch('os.path.realpath', autospec=True)
@mock.patch('os.stat', autospec=True)
@mock.patch('nova.privsep.path.writefile', autospec=True)
def test_remove_block_dev(self, mock_writefile, mock_stat, mock_realpath,
mock_sleep):
link = '/dev/link/foo'
realpath = '/dev/sde'
delpath = '/sys/block/sde/device/delete'
mock_realpath.return_value = realpath
# Good path
mock_stat.side_effect = (None, None, OSError())
mgmt.remove_block_dev(link)
mock_realpath.assert_called_with(link)
mock_stat.assert_has_calls([mock.call(realpath), mock.call(delpath),
mock.call(realpath)])
mock_writefile.assert_called_once_with(delpath, 'a', '1')
self.assertEqual(0, mock_sleep.call_count)
# Device param not found
mock_writefile.reset_mock()
mock_stat.reset_mock()
mock_stat.side_effect = (OSError(), None, None)
self.assertRaises(exception.InvalidDevicePath, mgmt.remove_block_dev,
link)
# stat was called once; exec was not called
self.assertEqual(1, mock_stat.call_count)
self.assertEqual(0, mock_writefile.call_count)
# Delete special file not found
mock_writefile.reset_mock()
mock_stat.reset_mock()
mock_stat.side_effect = (None, OSError(), None)
self.assertRaises(exception.InvalidDevicePath, mgmt.remove_block_dev,
link)
# stat was called twice; exec was not called
self.assertEqual(2, mock_stat.call_count)
self.assertEqual(0, mock_writefile.call_count)
@mock.patch('retrying.retry')
@mock.patch('os.path.realpath')
@mock.patch('os.stat')
@mock.patch('nova.privsep.path.writefile')
def test_remove_block_dev_timeout(self, mock_dacw, mock_stat,
mock_realpath, mock_retry):
def validate_retry(kwargs):
self.assertIn('retry_on_result', kwargs)
self.assertEqual(250, kwargs['wait_fixed'])
self.assertEqual(10000, kwargs['stop_max_delay'])
def raiser(unused):
raise retrying.RetryError(mock.Mock(attempt_number=123))
def retry_timeout(**kwargs):
validate_retry(kwargs)
def wrapped(_poll_for_del):
return raiser
return wrapped
# Deletion was attempted, but device is still there
link = '/dev/link/foo'
delpath = '/sys/block/sde/device/delete'
realpath = '/dev/sde'
mock_realpath.return_value = realpath
mock_stat.side_effect = lambda path: 1
mock_retry.side_effect = retry_timeout
self.assertRaises(
exception.DeviceDeletionException, mgmt.remove_block_dev, link)
mock_realpath.assert_called_once_with(link)
mock_dacw.assert_called_with(delpath, 'a', '1')

View File

@ -1,4 +1,4 @@
# Copyright 2015, 2017 IBM Corp.
# Copyright 2015, 2018 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
@ -24,9 +24,11 @@ from pypowervm.tasks import storage as tsk_stg
import pypowervm.util as pvm_u
import pypowervm.wrappers.cluster as pvm_clust
import pypowervm.wrappers.storage as pvm_stg
import pypowervm.wrappers.virtual_io_server as pvm_vios
from nova import exception
from nova import image
from nova.virt.powervm import mgmt
from nova.virt.powervm import vm
@ -77,6 +79,7 @@ class SSPDiskAdapter(object):
capabilities = {
'shared_storage': True,
'snapshot': True,
}
def __init__(self, adapter, host_uuid):
@ -87,6 +90,7 @@ class SSPDiskAdapter(object):
"""
self._adapter = adapter
self._host_uuid = host_uuid
self.mp_uuid = mgmt.mgmt_uuid(self._adapter)
try:
self._clust = pvm_clust.Cluster.get(self._adapter)[0]
self._ssp = pvm_stg.SSP.get_by_href(
@ -135,7 +139,7 @@ class SSPDiskAdapter(object):
match_func=match_func)
# Remove the mapping from *each* VIOS on the LPAR's host.
# The LPAR's host has to be self.host_uuid, else the PowerVM API will
# The LPAR's host has to be self._host_uuid, else the PowerVM API will
# fail.
#
# Note - this may not be all the VIOSes on the system...just the ones
@ -225,7 +229,7 @@ class SSPDiskAdapter(object):
return tsk_map.add_map(vios_w, mapping)
# Add the mapping to *each* VIOS on the LPAR's host.
# The LPAR's host has to be self.host_uuid, else the PowerVM API will
# The LPAR's host has to be self._host_uuid, else the PowerVM API will
# fail.
#
# Note: this may not be all the VIOSes on the system - just the ones
@ -245,10 +249,128 @@ class SSPDiskAdapter(object):
"""
ret = []
for n in self._clust.nodes:
# Skip any nodes that we don't have the vios uuid or uri
# Skip any nodes that we don't have the VIOS uuid or uri
if not (n.vios_uuid and n.vios_uri):
continue
if self._host_uuid == pvm_u.get_req_path_uuid(
n.vios_uri, preserve_case=True, root=True):
ret.append(n.vios_uuid)
return ret
def get_bootdisk_path(self, instance, vios_uuid):
"""Get the local path for an instance's boot disk.
:param instance: nova.objects.instance.Instance object owning the
requested disk.
:param vios_uuid: PowerVM UUID of the VIOS to search for mappings.
:return: Local path for instance's boot disk.
"""
vm_uuid = vm.get_pvm_uuid(instance)
match_func = self._disk_match_func(DiskType.BOOT, instance)
vios_wrap = pvm_vios.VIOS.get(self._adapter, uuid=vios_uuid,
xag=[pvm_const.XAG.VIO_SMAP])
maps = tsk_map.find_maps(vios_wrap.scsi_mappings,
client_lpar_id=vm_uuid, match_func=match_func)
if maps:
return maps[0].server_adapter.backing_dev_name
return None
def connect_instance_disk_to_mgmt(self, instance):
"""Connect an instance's boot disk to the management partition.
:param instance: The instance whose boot disk is to be mapped.
:return stg_elem: The storage element (LU, VDisk, etc.) that was mapped
:return vios: The EntryWrapper of the VIOS from which the mapping was
made.
:raise InstanceDiskMappingFailed: If the mapping could not be done.
"""
for stg_elem, vios in self._get_bootdisk_iter(instance):
msg_args = {'disk_name': stg_elem.name, 'vios_name': vios.name}
# Create a new mapping. NOTE: If there's an existing mapping on
# the other VIOS but not this one, we'll create a second mapping
# here. It would take an extreme sequence of events to get to that
# point, and the second mapping would be harmless anyway. The
# alternative would be always checking all VIOSes for existing
# mappings, which increases the response time of the common case by
# an entire GET of VIOS+VIO_SMAP.
LOG.debug("Mapping boot disk %(disk_name)s to the management "
"partition from Virtual I/O Server %(vios_name)s.",
msg_args, instance=instance)
try:
tsk_map.add_vscsi_mapping(self._host_uuid, vios, self.mp_uuid,
stg_elem)
# If that worked, we're done. add_vscsi_mapping logged.
return stg_elem, vios
except pvm_exc.Error:
LOG.exception("Failed to map boot disk %(disk_name)s to the "
"management partition from Virtual I/O Server "
"%(vios_name)s.", msg_args, instance=instance)
# Try the next hit, if available.
# We either didn't find the boot dev, or failed all attempts to map it.
raise exception.InstanceDiskMappingFailed(instance_name=instance.name)
def disconnect_disk_from_mgmt(self, vios_uuid, disk_name):
"""Disconnect a disk from the management partition.
:param vios_uuid: The UUID of the Virtual I/O Server serving the
mapping.
:param disk_name: The name of the disk to unmap.
"""
tsk_map.remove_lu_mapping(self._adapter, vios_uuid, self.mp_uuid,
disk_names=[disk_name])
LOG.info("Unmapped boot disk %(disk_name)s from the management "
"partition from Virtual I/O Server %(vios_uuid)s.",
{'disk_name': disk_name, 'mp_uuid': self.mp_uuid,
'vios_uuid': vios_uuid})
@staticmethod
def _disk_match_func(disk_type, instance):
"""Return a matching function to locate the disk for an instance.
:param disk_type: One of the DiskType enum values.
:param instance: The instance whose disk is to be found.
:return: Callable suitable for the match_func parameter of the
pypowervm.tasks.scsi_mapper.find_maps method.
"""
disk_name = SSPDiskAdapter._get_disk_name(disk_type, instance)
return tsk_map.gen_match_func(pvm_stg.LU, names=[disk_name])
@staticmethod
def _get_disk_name(disk_type, instance, short=False):
"""Generate a name for a virtual disk associated with an instance.
:param disk_type: One of the DiskType enum values.
:param instance: The instance for which the disk is to be created.
:param short: If True, the generated name will be limited to 15
characters (the limit for virtual disk). If False, it
will be limited by the API (79 characters currently).
:return: The sanitized file name for the disk.
"""
prefix = '%s_' % (disk_type[0] if short else disk_type)
base = ('%s_%s' % (instance.name[:8], instance.uuid[:4]) if short
else instance.name)
return pvm_u.sanitize_file_name_for_api(
base, prefix=prefix, max_len=pvm_const.MaxLen.VDISK_NAME if short
else pvm_const.MaxLen.FILENAME_DEFAULT)
def _get_bootdisk_iter(self, instance):
"""Return an iterator of (storage_elem, VIOS) tuples for the instance.
storage_elem is a pypowervm storage element wrapper associated with
the instance boot disk and VIOS is the wrapper of the Virtual I/O
server owning that storage element.
:param instance: nova.objects.instance.Instance object owning the
requested disk.
:return: Iterator of tuples of (storage_elem, VIOS).
"""
lpar_wrap = vm.get_instance_wrapper(self._adapter, instance)
match_func = self._disk_match_func(DiskType.BOOT, instance)
for vios_uuid in self._vios_uuids:
vios_wrap = pvm_vios.VIOS.get(
self._adapter, uuid=vios_uuid, xag=[pvm_const.XAG.VIO_SMAP])
for scsi_map in tsk_map.find_maps(
vios_wrap.scsi_mappings, client_lpar_id=lpar_wrap.id,
match_func=match_func):
yield scsi_map.backing_storage, vios_wrap

View File

@ -27,6 +27,7 @@ from pypowervm.wrappers import managed_system as pvm_ms
import six
from taskflow.patterns import linear_flow as tf_lf
from nova.compute import task_states
from nova import conf as cfg
from nova.console import type as console_type
from nova import exception as exc
@ -37,6 +38,7 @@ from nova.virt import driver
from nova.virt.powervm.disk import ssp
from nova.virt.powervm import host as pvm_host
from nova.virt.powervm.tasks import base as tf_base
from nova.virt.powervm.tasks import image as tf_img
from nova.virt.powervm.tasks import network as tf_net
from nova.virt.powervm.tasks import storage as tf_stg
from nova.virt.powervm.tasks import vm as tf_vm
@ -296,6 +298,51 @@ class PowerVMDriver(driver.ComputeDriver):
# Convert to a Nova exception
raise exc.InstanceTerminationFailure(reason=six.text_type(e))
def snapshot(self, context, instance, image_id, update_task_state):
"""Snapshots the specified instance.
:param context: security context
:param instance: nova.objects.instance.Instance
:param image_id: Reference to a pre-created image that will hold the
snapshot.
:param update_task_state: Callback function to update the task_state
on the instance while the snapshot operation progresses. The
function takes a task_state argument and an optional
expected_task_state kwarg which defaults to
nova.compute.task_states.IMAGE_SNAPSHOT. See
nova.objects.instance.Instance.save for expected_task_state usage.
"""
# TODO(esberglu) Add check for disk driver snapshot capability when
# additional disk drivers are implemented.
self._log_operation('snapshot', instance)
# Define the flow.
flow = tf_lf.Flow("snapshot")
# Notify that we're starting the process.
flow.add(tf_img.UpdateTaskState(update_task_state,
task_states.IMAGE_PENDING_UPLOAD))
# Connect the instance's boot disk to the management partition, and
# scan the scsi bus and bring the device into the management partition.
flow.add(tf_stg.InstanceDiskToMgmt(self.disk_dvr, instance))
# Notify that the upload is in progress.
flow.add(tf_img.UpdateTaskState(
update_task_state, task_states.IMAGE_UPLOADING,
expected_state=task_states.IMAGE_PENDING_UPLOAD))
# Stream the disk to glance.
flow.add(tf_img.StreamToGlance(context, self.image_api, image_id,
instance))
# Disconnect the boot disk from the management partition and delete the
# device.
flow.add(tf_stg.RemoveInstanceDiskFromMgmt(self.disk_dvr, instance))
# Run the flow.
tf_base.run(flow, instance=instance)
def power_off(self, instance, timeout=0, retry_interval=0):
"""Power off the specified instance.

View File

@ -0,0 +1,61 @@
# Copyright 2015, 2018 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities related to glance image management for the PowerVM driver."""
from nova import utils
def stream_blockdev_to_glance(context, image_api, image_id, metadata, devpath):
"""Stream the entire contents of a block device to a glance image.
:param context: Nova security context.
:param image_api: Handle to the glance image API.
:param image_id: UUID of the prepared glance image.
:param metadata: Dictionary of metadata for the image.
:param devpath: String path to device file of block device to be uploaded,
e.g. "/dev/sde".
"""
# Make the device file owned by the current user for the duration of the
# operation.
with utils.temporary_chown(devpath), open(devpath, 'rb') as stream:
# Stream it. This is synchronous.
image_api.update(context, image_id, metadata, stream)
def generate_snapshot_metadata(context, image_api, image_id, instance):
"""Generate a metadata dictionary for an instance snapshot.
:param context: Nova security context.
:param image_api: Handle to the glance image API.
:param image_id: UUID of the prepared glance image.
:param instance: The Nova instance whose disk is to be snapshotted.
:return: A dict of metadata suitable for image_api.upload.
"""
image = image_api.get(context, image_id)
metadata = {
'name': image['name'],
'is_public': False,
'status': 'active',
'disk_format': 'raw',
'container_format': 'bare',
'properties': {
'image_location': 'snapshot',
'image_state': 'available',
'owner_id': instance.project_id,
}
}
return metadata

175
nova/virt/powervm/mgmt.py Normal file
View File

@ -0,0 +1,175 @@
# Copyright 2015, 2018 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Utilities related to the PowerVM management partition.
The management partition is a special LPAR that runs the PowerVM REST API
service. It itself appears through the REST API as a LogicalPartition of type
aixlinux, but with the is_mgmt_partition property set to True.
The PowerVM Nova Compute service runs on the management partition.
"""
import glob
import os
from os import path
from oslo_concurrency import lockutils
from oslo_log import log as logging
from pypowervm.tasks import partition as pvm_par
import retrying
from nova import exception
from nova.privsep import path as priv_path
LOG = logging.getLogger(__name__)
_MP_UUID = None
@lockutils.synchronized("mgmt_lpar_uuid")
def mgmt_uuid(adapter):
"""Returns the management partitions UUID."""
global _MP_UUID
if not _MP_UUID:
_MP_UUID = pvm_par.get_this_partition(adapter).uuid
return _MP_UUID
def discover_vscsi_disk(mapping, scan_timeout=300):
"""Bring a mapped device into the management partition and find its name.
Based on a VSCSIMapping, scan the appropriate virtual SCSI host bus,
causing the operating system to discover the mapped device. Find and
return the path of the newly-discovered device based on its UDID in the
mapping.
Note: scanning the bus will cause the operating system to discover *all*
devices on that bus. However, this method will only return the path for
the specific device from the input mapping, based on its UDID.
:param mapping: The pypowervm.wrappers.virtual_io_server.VSCSIMapping
representing the mapping of the desired disk to the
management partition.
:param scan_timeout: The maximum number of seconds after scanning to wait
for the specified device to appear.
:return: The udev-generated ("/dev/sdX") name of the discovered disk.
:raise NoDiskDiscoveryException: If the disk did not appear after the
specified timeout.
:raise UniqueDiskDiscoveryException: If more than one disk appears with the
expected UDID.
"""
# Calculate the Linux slot number from the client adapter slot number.
lslot = 0x30000000 | mapping.client_adapter.lpar_slot_num
# We'll match the device ID based on the UDID, which is actually the last
# 32 chars of the field we get from PowerVM.
udid = mapping.backing_storage.udid[-32:]
LOG.debug("Trying to discover VSCSI disk with UDID %(udid)s on slot "
"%(slot)x.", {'udid': udid, 'slot': lslot})
# Find the special file to scan the bus, and scan it.
# This glob should yield exactly one result, but use the loop just in case.
for scanpath in glob.glob(
'/sys/bus/vio/devices/%x/host*/scsi_host/host*/scan' % lslot):
# Writing '- - -' to this sysfs file triggers bus rescan
priv_path.writefile(scanpath, 'a', '- - -')
# Now see if our device showed up. If so, we can reliably match it based
# on its Linux ID, which ends with the disk's UDID.
dpathpat = '/dev/disk/by-id/*%s' % udid
# The bus scan is asynchronous. Need to poll, waiting for the device to
# spring into existence. Stop when glob finds at least one device, or
# after the specified timeout. Sleep 1/4 second between polls.
@retrying.retry(retry_on_result=lambda result: not result, wait_fixed=250,
stop_max_delay=scan_timeout * 1000)
def _poll_for_dev(globpat):
return glob.glob(globpat)
try:
disks = _poll_for_dev(dpathpat)
except retrying.RetryError as re:
raise exception.NoDiskDiscoveryException(
bus=lslot, udid=udid, polls=re.last_attempt.attempt_number,
timeout=scan_timeout)
# If we get here, _poll_for_dev returned a nonempty list. If not exactly
# one entry, this is an error.
if len(disks) != 1:
raise exception.UniqueDiskDiscoveryException(path_pattern=dpathpat,
count=len(disks))
# The by-id path is a symlink. Resolve to the /dev/sdX path
dpath = path.realpath(disks[0])
LOG.debug("Discovered VSCSI disk with UDID %(udid)s on slot %(slot)x at "
"path %(devname)s.",
{'udid': udid, 'slot': lslot, 'devname': dpath})
return dpath
def remove_block_dev(devpath, scan_timeout=10):
"""Remove a block device from the management partition.
This method causes the operating system of the management partition to
delete the device special files associated with the specified block device.
:param devpath: Any path to the block special file associated with the
device to be removed.
:param scan_timeout: The maximum number of seconds after scanning to wait
for the specified device to disappear.
:raise InvalidDevicePath: If the specified device or its 'delete' special
file cannot be found.
:raise DeviceDeletionException: If the deletion was attempted, but the
device special file is still present
afterward.
"""
# Resolve symlinks, if any, to get to the /dev/sdX path
devpath = path.realpath(devpath)
try:
os.stat(devpath)
except OSError:
raise exception.InvalidDevicePath(path=devpath)
devname = devpath.rsplit('/', 1)[-1]
delpath = '/sys/block/%s/device/delete' % devname
try:
os.stat(delpath)
except OSError:
raise exception.InvalidDevicePath(path=delpath)
LOG.debug("Deleting block device %(devpath)s from the management "
"partition via special file %(delpath)s.",
{'devpath': devpath, 'delpath': delpath})
# Writing '1' to this sysfs file deletes the block device and rescans.
priv_path.writefile(delpath, 'a', '1')
# The bus scan is asynchronous. Need to poll, waiting for the device to
# disappear. Stop when stat raises OSError (dev file not found) - which is
# success - or after the specified timeout (which is failure). Sleep 1/4
# second between polls.
@retrying.retry(retry_on_result=lambda result: result, wait_fixed=250,
stop_max_delay=scan_timeout * 1000)
def _poll_for_del(statpath):
try:
os.stat(statpath)
return True
except OSError:
# Device special file is absent, as expected
return False
try:
_poll_for_del(devpath)
except retrying.RetryError as re:
# stat just kept returning (dev file continued to exist).
raise exception.DeviceDeletionException(
devpath=devpath, polls=re.last_attempt.attempt_number,
timeout=scan_timeout)
# Else stat raised - the device disappeared - all done.

View File

@ -0,0 +1,81 @@
# Copyright 2015, 2018 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
from taskflow import task
from nova.virt.powervm import image
LOG = logging.getLogger(__name__)
class UpdateTaskState(task.Task):
def __init__(self, update_task_state, task_state, expected_state=None):
"""Invoke the update_task_state callback with the desired arguments.
:param update_task_state: update_task_state callable passed into
snapshot.
:param task_state: The new task state (from nova.compute.task_states)
to set.
:param expected_state: Optional. The expected state of the task prior
to this request.
"""
self.update_task_state = update_task_state
self.task_state = task_state
self.kwargs = {}
if expected_state is not None:
# We only want to pass expected state if it's not None! That's so
# we take the update_task_state method's default.
self.kwargs['expected_state'] = expected_state
super(UpdateTaskState, self).__init__(
name='update_task_state_%s' % task_state)
def execute(self):
self.update_task_state(self.task_state, **self.kwargs)
class StreamToGlance(task.Task):
"""Task around streaming a block device to glance."""
def __init__(self, context, image_api, image_id, instance):
"""Initialize the flow for streaming a block device to glance.
Requires: disk_path: Path to the block device file for the instance's
boot disk.
:param context: Nova security context.
:param image_api: Handle to the glance API.
:param image_id: UUID of the prepared glance image.
:param instance: Instance whose backing device is being captured.
"""
self.context = context
self.image_api = image_api
self.image_id = image_id
self.instance = instance
super(StreamToGlance, self).__init__(name='stream_to_glance',
requires='disk_path')
def execute(self, disk_path):
metadata = image.generate_snapshot_metadata(
self.context, self.image_api, self.image_id, self.instance)
LOG.info("Starting stream of boot device (local blockdev %(devpath)s) "
"to glance image %(img_id)s.",
{'devpath': disk_path, 'img_id': self.image_id},
instance=self.instance)
image.stream_blockdev_to_glance(self.context, self.image_api,
self.image_id, metadata, disk_path)

View File

@ -14,10 +14,13 @@
from oslo_log import log as logging
from pypowervm import exceptions as pvm_exc
from pypowervm.tasks import scsi_mapper as pvm_smap
from taskflow import task
from taskflow.types import failure as task_fail
from nova import exception
from nova.virt.powervm import media
from nova.virt.powervm import mgmt
LOG = logging.getLogger(__name__)
@ -205,3 +208,142 @@ class DeleteVOpt(task.Task):
def execute(self):
media_builder = media.ConfigDrivePowerVM(self.adapter)
media_builder.dlt_vopt(self.instance, stg_ftsk=self.stg_ftsk)
class InstanceDiskToMgmt(task.Task):
"""The task to connect an instance's disk to the management partition."
This task will connect the instance's disk to the management partition and
discover it. We do these two pieces together because their reversion
happens in the same order.
"""
def __init__(self, disk_dvr, instance):
"""Create the Task for connecting boot disk to mgmt partition.
Provides:
stg_elem: The storage element wrapper (pypowervm LU, PV, etc.) that was
connected.
vios_wrap: The Virtual I/O Server wrapper from which the storage
element was mapped.
disk_path: The local path to the mapped-and-discovered device, e.g.
'/dev/sde'.
:param disk_dvr: The disk driver.
:param instance: The nova instance whose boot disk is to be connected.
"""
super(InstanceDiskToMgmt, self).__init__(
name='instance_disk_to_mgmt',
provides=['stg_elem', 'vios_wrap', 'disk_path'])
self.disk_dvr = disk_dvr
self.instance = instance
self.stg_elem = None
self.vios_wrap = None
self.disk_path = None
def execute(self):
"""Map the instance's boot disk and discover it."""
# Search for boot disk on the NovaLink partition.
if self.disk_dvr.mp_uuid in self.disk_dvr._vios_uuids:
dev_name = self.disk_dvr.get_bootdisk_path(
self.instance, self.disk_dvr.mp_uuid)
if dev_name is not None:
return None, None, dev_name
self.stg_elem, self.vios_wrap = (
self.disk_dvr.connect_instance_disk_to_mgmt(self.instance))
new_maps = pvm_smap.find_maps(
self.vios_wrap.scsi_mappings, client_lpar_id=self.disk_dvr.mp_uuid,
stg_elem=self.stg_elem)
if not new_maps:
raise exception.NewMgmtMappingNotFoundException(
stg_name=self.stg_elem.name, vios_name=self.vios_wrap.name)
# new_maps should be length 1, but even if it's not - i.e. we somehow
# matched more than one mapping of the same dev to the management
# partition from the same VIOS - it is safe to use the first one.
mapping = new_maps[0]
# Scan the SCSI bus, discover the disk, find its canonical path.
LOG.info("Discovering device and path for mapping of %(dev_name)s "
"on the management partition.",
{'dev_name': self.stg_elem.name}, instance=self.instance)
self.disk_path = mgmt.discover_vscsi_disk(mapping)
return self.stg_elem, self.vios_wrap, self.disk_path
def revert(self, result, flow_failures):
"""Unmap the disk and then remove it from the management partition.
We use this order to avoid rediscovering the device in case some other
thread scans the SCSI bus between when we remove and when we unmap.
"""
if self.vios_wrap is None or self.stg_elem is None:
# We never even got connected - nothing to do.
return
LOG.warning("Unmapping boot disk %(disk_name)s from the management "
"partition via Virtual I/O Server %(vioname)s.",
{'disk_name': self.stg_elem.name,
'vioname': self.vios_wrap.name}, instance=self.instance)
self.disk_dvr.disconnect_disk_from_mgmt(self.vios_wrap.uuid,
self.stg_elem.name)
if self.disk_path is None:
# We did not discover the disk - nothing else to do.
return
LOG.warning("Removing disk %(dpath)s from the management partition.",
{'dpath': self.disk_path}, instance=self.instance)
try:
mgmt.remove_block_dev(self.disk_path)
except pvm_exc.Error:
# Don't allow revert exceptions to interrupt the revert flow.
LOG.exception("Remove disk failed during revert. Ignoring.",
instance=self.instance)
class RemoveInstanceDiskFromMgmt(task.Task):
"""Unmap and remove an instance's boot disk from the mgmt partition."""
def __init__(self, disk_dvr, instance):
"""Create task to unmap and remove an instance's boot disk from mgmt.
Requires (from InstanceDiskToMgmt):
stg_elem: The storage element wrapper (pypowervm LU, PV, etc.) that was
connected.
vios_wrap: The Virtual I/O Server wrapper.
(pypowervm.wrappers.virtual_io_server.VIOS) from which the
storage element was mapped.
disk_path: The local path to the mapped-and-discovered device, e.g.
'/dev/sde'.
:param disk_dvr: The disk driver.
:param instance: The nova instance whose boot disk is to be connected.
"""
self.disk_dvr = disk_dvr
self.instance = instance
super(RemoveInstanceDiskFromMgmt, self).__init__(
name='remove_inst_disk_from_mgmt',
requires=['stg_elem', 'vios_wrap', 'disk_path'])
def execute(self, stg_elem, vios_wrap, disk_path):
"""Unmap and remove an instance's boot disk from the mgmt partition.
Input parameters ('requires') provided by InstanceDiskToMgmt task.
:param stg_elem: The storage element wrapper (pypowervm LU, PV, etc.)
to be disconnected.
:param vios_wrap: The Virtual I/O Server wrapper from which the
mapping is to be removed.
:param disk_path: The local path to the disk device to be removed, e.g.
'/dev/sde'
"""
# stg_elem is None if boot disk was not mapped to management partition.
if stg_elem is None:
return
LOG.info("Unmapping boot disk %(disk_name)s from the management "
"partition via Virtual I/O Server %(vios_name)s.",
{'disk_name': stg_elem.name, 'vios_name': vios_wrap.name},
instance=self.instance)
self.disk_dvr.disconnect_disk_from_mgmt(vios_wrap.uuid, stg_elem.name)
LOG.info("Removing disk %(disk_path)s from the management partition.",
{'disk_path': disk_path}, instance=self.instance)
mgmt.remove_block_dev(disk_path)

View File

@ -0,0 +1,4 @@
---
features:
- |
The PowerVM virt driver now supports instance snapshot.