Snapshot In-tree Backports
This contains backports for code changes made during in-tree driver instance snapshot development [1]. - Renames boot_disk_path_for_instance() to get_bootdisk_path() for clarity - Makes disk_match_func() private and static - Renames instance_disk_iter() to _get_bootdisk_iter() and remove unnecessary parameters. - Other minor cleanup [1] https://review.openstack.org/#/c/543023/ Change-Id: I8f2eef1063b7e40c0cd71fc4adb04cbf6d35ece1
This commit is contained in:
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2015, 2017 IBM Corp.
|
# Copyright IBM Corp. and contributors
|
||||||
#
|
#
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@@ -362,7 +362,7 @@ class TestLocalDisk(test.NoDBTestCase):
|
|||||||
vios1.scsi_mappings[0].backing_storage.name = 'b_Name_Of__d506'
|
vios1.scsi_mappings[0].backing_storage.name = 'b_Name_Of__d506'
|
||||||
return inst, lpar_wrap, vios1
|
return inst, lpar_wrap, vios1
|
||||||
|
|
||||||
def test_boot_disk_path_for_instance(self):
|
def test_get_bootdisk_path(self):
|
||||||
local = self.get_ls(self.apt)
|
local = self.get_ls(self.apt)
|
||||||
inst = mock.Mock()
|
inst = mock.Mock()
|
||||||
inst.name = 'Name Of Instance'
|
inst.name = 'Name Of Instance'
|
||||||
@@ -371,17 +371,20 @@ class TestLocalDisk(test.NoDBTestCase):
|
|||||||
vios1.scsi_mappings[0].server_adapter.backing_dev_name = 'boot_7f81628'
|
vios1.scsi_mappings[0].server_adapter.backing_dev_name = 'boot_7f81628'
|
||||||
vios1.scsi_mappings[0].backing_storage.name = 'b_Name_Of__f921'
|
vios1.scsi_mappings[0].backing_storage.name = 'b_Name_Of__f921'
|
||||||
self.mock_vios_get.return_value = vios1
|
self.mock_vios_get.return_value = vios1
|
||||||
dev_name = local.boot_disk_path_for_instance(inst, vios1.uuid)
|
dev_name = local.get_bootdisk_path(inst, vios1.uuid)
|
||||||
self.assertEqual('boot_7f81628', dev_name)
|
self.assertEqual('boot_7f81628', dev_name)
|
||||||
|
|
||||||
|
@mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper',
|
||||||
|
autospec=True)
|
||||||
@mock.patch('pypowervm.wrappers.storage.VG.get', new=mock.Mock())
|
@mock.patch('pypowervm.wrappers.storage.VG.get', new=mock.Mock())
|
||||||
def test_instance_disk_iter(self):
|
def test_get_bootdisk_iter(self, mock_lpar_wrap):
|
||||||
local = self.get_ls(self.apt)
|
local = self.get_ls(self.apt)
|
||||||
inst, lpar_wrap, vios1 = self._bld_mocks_for_instance_disk()
|
inst, lpar_wrap, vios1 = self._bld_mocks_for_instance_disk()
|
||||||
|
mock_lpar_wrap.return_value = lpar_wrap
|
||||||
|
|
||||||
# Good path
|
# Good path
|
||||||
self.mock_vios_get.return_value = vios1
|
self.mock_vios_get.return_value = vios1
|
||||||
for vdisk, vios in local.instance_disk_iter(inst, lpar_wrap=lpar_wrap):
|
for vdisk, vios in local._get_bootdisk_iter(inst):
|
||||||
self.assertEqual(vios1.scsi_mappings[0].backing_storage, vdisk)
|
self.assertEqual(vios1.scsi_mappings[0].backing_storage, vdisk)
|
||||||
self.assertEqual(vios1.uuid, vios.uuid)
|
self.assertEqual(vios1.uuid, vios.uuid)
|
||||||
self.mock_vios_get.assert_called_once_with(
|
self.mock_vios_get.assert_called_once_with(
|
||||||
@@ -390,7 +393,7 @@ class TestLocalDisk(test.NoDBTestCase):
|
|||||||
# Not found because no storage of that name
|
# Not found because no storage of that name
|
||||||
self.mock_vios_get.reset_mock()
|
self.mock_vios_get.reset_mock()
|
||||||
self.mock_find_maps.return_value = []
|
self.mock_find_maps.return_value = []
|
||||||
for vdisk, vios in local.instance_disk_iter(inst, lpar_wrap=lpar_wrap):
|
for vdisk, vios in local._get_bootdisk_iter(inst):
|
||||||
self.fail()
|
self.fail()
|
||||||
self.mock_vios_get.assert_called_once_with(
|
self.mock_vios_get.assert_called_once_with(
|
||||||
self.apt, uuid='vios-uuid', xag=[pvm_const.XAG.VIO_SMAP])
|
self.apt, uuid='vios-uuid', xag=[pvm_const.XAG.VIO_SMAP])
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2015, 2017 IBM Corp.
|
# Copyright IBM Corp. and contributors
|
||||||
#
|
#
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@@ -519,7 +519,7 @@ class TestSSPDiskAdapter(test.NoDBTestCase):
|
|||||||
'vios_uuids', new_callable=mock.PropertyMock)
|
'vios_uuids', new_callable=mock.PropertyMock)
|
||||||
@mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper')
|
@mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper')
|
||||||
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
|
@mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.get')
|
||||||
def test_instance_disk_iter(self, mock_vio_get, mock_lw, mock_vio_uuids):
|
def test_get_bootdisk_iter(self, mock_vio_get, mock_lw, mock_vio_uuids):
|
||||||
inst, lpar_wrap, vio1, vio2, vio3 = self._bld_mocks_for_instance_disk()
|
inst, lpar_wrap, vio1, vio2, vio3 = self._bld_mocks_for_instance_disk()
|
||||||
mock_lw.return_value = lpar_wrap
|
mock_lw.return_value = lpar_wrap
|
||||||
mock_vio_uuids.return_value = [1, 2]
|
mock_vio_uuids.return_value = [1, 2]
|
||||||
@@ -528,7 +528,7 @@ class TestSSPDiskAdapter(test.NoDBTestCase):
|
|||||||
# Test with two VIOSes, both of which contain the mapping. Force the
|
# Test with two VIOSes, both of which contain the mapping. Force the
|
||||||
# method to get the lpar_wrap.
|
# method to get the lpar_wrap.
|
||||||
mock_vio_get.side_effect = [vio1, vio2]
|
mock_vio_get.side_effect = [vio1, vio2]
|
||||||
idi = ssp_stor.instance_disk_iter(inst)
|
idi = ssp_stor._get_bootdisk_iter(inst)
|
||||||
lu, vios = next(idi)
|
lu, vios = next(idi)
|
||||||
self.assertEqual('lu_udid', lu.udid)
|
self.assertEqual('lu_udid', lu.udid)
|
||||||
self.assertEqual('vios1', vios.name)
|
self.assertEqual('vios1', vios.name)
|
||||||
@@ -544,12 +544,11 @@ class TestSSPDiskAdapter(test.NoDBTestCase):
|
|||||||
mock_lw.assert_called_once_with(self.apt, inst)
|
mock_lw.assert_called_once_with(self.apt, inst)
|
||||||
|
|
||||||
# Same, but prove that breaking out of the loop early avoids the second
|
# Same, but prove that breaking out of the loop early avoids the second
|
||||||
# get call. Supply lpar_wrap from here on, and prove no calls to
|
# get call.
|
||||||
# get_instance_wrapper
|
|
||||||
mock_vio_get.reset_mock()
|
mock_vio_get.reset_mock()
|
||||||
mock_lw.reset_mock()
|
mock_lw.reset_mock()
|
||||||
mock_vio_get.side_effect = [vio1, vio2]
|
mock_vio_get.side_effect = [vio1, vio2]
|
||||||
for lu, vios in ssp_stor.instance_disk_iter(inst, lpar_wrap=lpar_wrap):
|
for lu, vios in ssp_stor._get_bootdisk_iter(inst):
|
||||||
self.assertEqual('lu_udid', lu.udid)
|
self.assertEqual('lu_udid', lu.udid)
|
||||||
self.assertEqual('vios1', vios.name)
|
self.assertEqual('vios1', vios.name)
|
||||||
break
|
break
|
||||||
@@ -559,7 +558,7 @@ class TestSSPDiskAdapter(test.NoDBTestCase):
|
|||||||
# Now the first VIOS doesn't have the mapping, but the second does
|
# Now the first VIOS doesn't have the mapping, but the second does
|
||||||
mock_vio_get.reset_mock()
|
mock_vio_get.reset_mock()
|
||||||
mock_vio_get.side_effect = [vio3, vio2]
|
mock_vio_get.side_effect = [vio3, vio2]
|
||||||
idi = ssp_stor.instance_disk_iter(inst, lpar_wrap=lpar_wrap)
|
idi = ssp_stor._get_bootdisk_iter(inst)
|
||||||
lu, vios = next(idi)
|
lu, vios = next(idi)
|
||||||
self.assertEqual('lu_udid', lu.udid)
|
self.assertEqual('lu_udid', lu.udid)
|
||||||
self.assertEqual('vios2', vios.name)
|
self.assertEqual('vios2', vios.name)
|
||||||
@@ -572,12 +571,9 @@ class TestSSPDiskAdapter(test.NoDBTestCase):
|
|||||||
# No hits
|
# No hits
|
||||||
mock_vio_get.reset_mock()
|
mock_vio_get.reset_mock()
|
||||||
mock_vio_get.side_effect = [vio3, vio3]
|
mock_vio_get.side_effect = [vio3, vio3]
|
||||||
self.assertEqual([], list(ssp_stor.instance_disk_iter(
|
self.assertEqual([], list(ssp_stor._get_bootdisk_iter(inst)))
|
||||||
inst, lpar_wrap=lpar_wrap)))
|
|
||||||
self.assertEqual(2, mock_vio_get.call_count)
|
self.assertEqual(2, mock_vio_get.call_count)
|
||||||
|
|
||||||
mock_lw.assert_not_called()
|
|
||||||
|
|
||||||
@mock.patch('nova_powervm.virt.powervm.disk.ssp.SSPDiskAdapter.'
|
@mock.patch('nova_powervm.virt.powervm.disk.ssp.SSPDiskAdapter.'
|
||||||
'vios_uuids', new_callable=mock.PropertyMock)
|
'vios_uuids', new_callable=mock.PropertyMock)
|
||||||
@mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper')
|
@mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper')
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2015, 2018 IBM Corp.
|
# Copyright IBM Corp. and contributors
|
||||||
#
|
#
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@@ -44,7 +44,7 @@ class TestImage(test.NoDBTestCase):
|
|||||||
|
|
||||||
@mock.patch('nova_powervm.virt.powervm.image.stream_blockdev_to_glance',
|
@mock.patch('nova_powervm.virt.powervm.image.stream_blockdev_to_glance',
|
||||||
autospec=True)
|
autospec=True)
|
||||||
@mock.patch('nova_powervm.virt.powervm.image.snapshot_metadata',
|
@mock.patch('nova_powervm.virt.powervm.image.generate_snapshot_metadata',
|
||||||
autospec=True)
|
autospec=True)
|
||||||
def test_stream_to_glance(self, mock_metadata, mock_stream):
|
def test_stream_to_glance(self, mock_metadata, mock_stream):
|
||||||
mock_metadata.return_value = 'metadata'
|
mock_metadata.return_value = 'metadata'
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2015, 2018 IBM Corp.
|
# Copyright IBM Corp. and contributors
|
||||||
#
|
#
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@@ -214,7 +214,7 @@ class TestStorage(test.NoDBTestCase):
|
|||||||
# Good path - find_maps returns one result
|
# Good path - find_maps returns one result
|
||||||
mock_find.return_value = ['one_mapping']
|
mock_find.return_value = ['one_mapping']
|
||||||
tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
|
tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
|
||||||
self.assertEqual('connect_and_discover_instance_disk_to_mgmt', tf.name)
|
self.assertEqual('instance_disk_to_mgmt', tf.name)
|
||||||
self.assertEqual((mock_stg, mock_vwrap, '/dev/disk'), tf.execute())
|
self.assertEqual((mock_stg, mock_vwrap, '/dev/disk'), tf.execute())
|
||||||
disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
|
disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
|
||||||
mock_instance)
|
mock_instance)
|
||||||
@@ -241,18 +241,18 @@ class TestStorage(test.NoDBTestCase):
|
|||||||
'stg_name')
|
'stg_name')
|
||||||
mock_rm.assert_called_with('/dev/disk')
|
mock_rm.assert_called_with('/dev/disk')
|
||||||
|
|
||||||
# Management Partition is VIOS and Novalink hosted storage
|
# Management Partition is VIOS and NovaLink hosted storage
|
||||||
reset_mocks()
|
reset_mocks()
|
||||||
disk_dvr.vios_uuids = ['mp_uuid']
|
disk_dvr.vios_uuids = ['mp_uuid']
|
||||||
dev_name = '/dev/vg/fake_name'
|
dev_name = '/dev/vg/fake_name'
|
||||||
disk_dvr.boot_disk_path_for_instance.return_value = dev_name
|
disk_dvr.get_bootdisk_path.return_value = dev_name
|
||||||
tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
|
tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
|
||||||
self.assertEqual((None, None, dev_name), tf.execute())
|
self.assertEqual((None, None, dev_name), tf.execute())
|
||||||
|
|
||||||
# Management Partition is VIOS and not Novalink hosted storage
|
# Management Partition is VIOS and not NovaLink hosted storage
|
||||||
reset_mocks()
|
reset_mocks()
|
||||||
disk_dvr.vios_uuids = ['mp_uuid']
|
disk_dvr.vios_uuids = ['mp_uuid']
|
||||||
disk_dvr.boot_disk_path_for_instance.return_value = None
|
disk_dvr.get_bootdisk_path.return_value = None
|
||||||
tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
|
tf = tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
|
||||||
tf.execute()
|
tf.execute()
|
||||||
disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
|
disk_dvr.connect_instance_disk_to_mgmt.assert_called_with(
|
||||||
@@ -296,7 +296,7 @@ class TestStorage(test.NoDBTestCase):
|
|||||||
with mock.patch('taskflow.task.Task.__init__') as tf:
|
with mock.patch('taskflow.task.Task.__init__') as tf:
|
||||||
tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
|
tf_stg.InstanceDiskToMgmt(disk_dvr, mock_instance)
|
||||||
tf.assert_called_once_with(
|
tf.assert_called_once_with(
|
||||||
name='connect_and_discover_instance_disk_to_mgmt',
|
name='instance_disk_to_mgmt',
|
||||||
provides=['stg_elem', 'vios_wrap', 'disk_path'])
|
provides=['stg_elem', 'vios_wrap', 'disk_path'])
|
||||||
|
|
||||||
@mock.patch('nova_powervm.virt.powervm.mgmt.remove_block_dev',
|
@mock.patch('nova_powervm.virt.powervm.mgmt.remove_block_dev',
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2015, 2017 IBM Corp.
|
# Copyright IBM Corp. and contributors
|
||||||
#
|
#
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@@ -42,12 +42,12 @@ class TestImage(test.NoDBTestCase):
|
|||||||
'mock_stream')
|
'mock_stream')
|
||||||
|
|
||||||
@mock.patch('nova.image.api.API', autospec=True)
|
@mock.patch('nova.image.api.API', autospec=True)
|
||||||
def test_snapshot_metadata(self, mock_api):
|
def test_generate_snapshot_metadata(self, mock_api):
|
||||||
mock_api.get.return_value = {'name': 'image_name'}
|
mock_api.get.return_value = {'name': 'image_name'}
|
||||||
mock_instance = mock.Mock()
|
mock_instance = mock.Mock()
|
||||||
mock_instance.project_id = 'project_id'
|
mock_instance.project_id = 'project_id'
|
||||||
ret = image.snapshot_metadata('context', mock_api, 'image_id',
|
ret = image.generate_snapshot_metadata('context', mock_api, 'image_id',
|
||||||
mock_instance)
|
mock_instance)
|
||||||
mock_api.get.assert_called_with('context', 'image_id')
|
mock_api.get.assert_called_with('context', 'image_id')
|
||||||
self.assertEqual({
|
self.assertEqual({
|
||||||
'name': 'image_name',
|
'name': 'image_name',
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# Copyright 2013 OpenStack Foundation
|
# Copyright 2013 OpenStack Foundation
|
||||||
# Copyright 2015, 2017 IBM Corp.
|
# Copyright IBM Corp. and contributors
|
||||||
#
|
#
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@@ -129,36 +129,26 @@ class DiskAdapter(object):
|
|||||||
return _('The configured disk driver does not support migration '
|
return _('The configured disk driver does not support migration '
|
||||||
'or resize.')
|
'or resize.')
|
||||||
|
|
||||||
def disk_match_func(self, disk_type, instance):
|
def _disk_match_func(self, disk_type, instance):
|
||||||
"""Return a matching function to locate the disk for an instance.
|
"""Return a matching function to locate the disk for an instance.
|
||||||
|
|
||||||
:param disk_type: One of the DiskType enum values.
|
:param disk_type: One of the DiskType enum values.
|
||||||
:param instance: The instance whose disk is to be found.
|
:param instance: The instance whose disk is to be found.
|
||||||
:return: Callable suitable for the match_func parameter of the
|
:return: Callable suitable for the match_func parameter of the
|
||||||
pypowervm.tasks.scsi_mapper.find_maps method, with the
|
pypowervm.tasks.scsi_mapper.find_maps method.
|
||||||
following specification:
|
|
||||||
def match_func(storage_elem)
|
|
||||||
param storage_elem: A backing storage element wrapper (VOpt,
|
|
||||||
VDisk, PV, or LU) to be analyzed.
|
|
||||||
return: True if the storage_elem's mapping should be included;
|
|
||||||
False otherwise.
|
|
||||||
"""
|
"""
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def boot_disk_path_for_instance(self, instance, vios_uuid):
|
def get_bootdisk_path(self, instance, vios_uuid):
|
||||||
"""Find scsi mappings on given VIOS for the instance.
|
"""Find the local path for the instance's boot disk.
|
||||||
|
|
||||||
This method finds all scsi mappings on a given vios that are associated
|
|
||||||
with the instance and disk_type.
|
|
||||||
|
|
||||||
:param instance: nova.objects.instance.Instance object owning the
|
:param instance: nova.objects.instance.Instance object owning the
|
||||||
requested disk.
|
requested disk.
|
||||||
:param vios_uuid: PowerVM UUID of the VIOS to search for mappings.
|
:param vios_uuid: PowerVM UUID of the VIOS to search for mappings.
|
||||||
:return: Iterator of scsi mappings that are associated with the
|
:return: Local path for instance's boot disk.
|
||||||
instance and disk_type.
|
|
||||||
"""
|
"""
|
||||||
vm_uuid = vm.get_pvm_uuid(instance)
|
vm_uuid = vm.get_pvm_uuid(instance)
|
||||||
match_func = self.disk_match_func(DiskType.BOOT, instance)
|
match_func = self._disk_match_func(DiskType.BOOT, instance)
|
||||||
vios_wrap = pvm_vios.VIOS.get(self.adapter, uuid=vios_uuid,
|
vios_wrap = pvm_vios.VIOS.get(self.adapter, uuid=vios_uuid,
|
||||||
xag=[pvm_const.XAG.VIO_SMAP])
|
xag=[pvm_const.XAG.VIO_SMAP])
|
||||||
maps = tsk_map.find_maps(vios_wrap.scsi_mappings,
|
maps = tsk_map.find_maps(vios_wrap.scsi_mappings,
|
||||||
@@ -167,27 +157,20 @@ class DiskAdapter(object):
|
|||||||
return maps[0].server_adapter.backing_dev_name
|
return maps[0].server_adapter.backing_dev_name
|
||||||
return None
|
return None
|
||||||
|
|
||||||
def instance_disk_iter(self, instance, disk_type=DiskType.BOOT,
|
def _get_bootdisk_iter(self, instance):
|
||||||
lpar_wrap=None):
|
"""Return an iterator of (storage_elem, VIOS) tuples for the instance.
|
||||||
"""Return the instance's storage element wrapper of the specified type.
|
|
||||||
|
This method returns an iterator of (storage_elem, VIOS) tuples, where
|
||||||
|
storage_elem is a pypowervm storage element wrapper associated with
|
||||||
|
the instance boot disk and VIOS is the wrapper of the Virtual I/O
|
||||||
|
server owning that storage element.
|
||||||
|
|
||||||
:param instance: nova.objects.instance.Instance object owning the
|
:param instance: nova.objects.instance.Instance object owning the
|
||||||
requested disk.
|
requested disk.
|
||||||
:param disk_type: The type of disk to find, one of the DiskType enum
|
:return: Iterator of tuples of (storage_elem, VIOS).
|
||||||
values.
|
|
||||||
:param lpar_wrap: pypowervm.wrappers.logical_partition.LPAR
|
|
||||||
corresponding to the instance. If not specified, it
|
|
||||||
will be retrieved; i.e. specify this parameter to
|
|
||||||
save on REST calls.
|
|
||||||
:return: Iterator of tuples of (storage_elem, VIOS), where storage_elem
|
|
||||||
is a storage element wrapper (pypowervm.wrappers.storage.VOpt,
|
|
||||||
VDisk, PV, or LU) associated with the instance; and VIOS is
|
|
||||||
the wrapper of the Virtual I/O Server owning that storage
|
|
||||||
element.
|
|
||||||
"""
|
"""
|
||||||
if lpar_wrap is None:
|
lpar_wrap = vm.get_instance_wrapper(self.adapter, instance)
|
||||||
lpar_wrap = vm.get_instance_wrapper(self.adapter, instance)
|
match_func = self._disk_match_func(DiskType.BOOT, instance)
|
||||||
match_func = self.disk_match_func(disk_type, instance)
|
|
||||||
for vios_uuid in self.vios_uuids:
|
for vios_uuid in self.vios_uuids:
|
||||||
vios_wrap = pvm_vios.VIOS.get(
|
vios_wrap = pvm_vios.VIOS.get(
|
||||||
self.adapter, uuid=vios_uuid, xag=[pvm_const.XAG.VIO_SMAP])
|
self.adapter, uuid=vios_uuid, xag=[pvm_const.XAG.VIO_SMAP])
|
||||||
@@ -205,9 +188,7 @@ class DiskAdapter(object):
|
|||||||
made.
|
made.
|
||||||
:raise InstanceDiskMappingFailed: If the mapping could not be done.
|
:raise InstanceDiskMappingFailed: If the mapping could not be done.
|
||||||
"""
|
"""
|
||||||
lpar_wrap = vm.get_instance_wrapper(self.adapter, instance)
|
for stg_elem, vios in self._get_bootdisk_iter(instance):
|
||||||
for stg_elem, vios in self.instance_disk_iter(instance,
|
|
||||||
lpar_wrap=lpar_wrap):
|
|
||||||
msg_args = {'disk_name': stg_elem.name, 'vios_name': vios.name}
|
msg_args = {'disk_name': stg_elem.name, 'vios_name': vios.name}
|
||||||
|
|
||||||
# Create a new mapping. NOTE: If there's an existing mapping on
|
# Create a new mapping. NOTE: If there's an existing mapping on
|
||||||
|
|||||||
@@ -1,5 +1,5 @@
|
|||||||
# Copyright 2013 OpenStack Foundation
|
# Copyright 2013 OpenStack Foundation
|
||||||
# Copyright 2015, 2017 IBM Corp.
|
# Copyright IBM Corp. and contributors
|
||||||
#
|
#
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@@ -83,21 +83,17 @@ class LocalStorage(disk_dvr.DiskAdapter):
|
|||||||
"""
|
"""
|
||||||
return [self._vios_uuid]
|
return [self._vios_uuid]
|
||||||
|
|
||||||
def disk_match_func(self, disk_type, instance):
|
@staticmethod
|
||||||
|
def _disk_match_func(disk_type, instance):
|
||||||
"""Return a matching function to locate the disk for an instance.
|
"""Return a matching function to locate the disk for an instance.
|
||||||
|
|
||||||
:param disk_type: One of the DiskType enum values.
|
:param disk_type: One of the DiskType enum values.
|
||||||
:param instance: The instance whose disk is to be found.
|
:param instance: The instance whose disk is to be found.
|
||||||
:return: Callable suitable for the match_func parameter of the
|
:return: Callable suitable for the match_func parameter of the
|
||||||
pypowervm.tasks.scsi_mapper.find_maps method, with the
|
pypowervm.tasks.scsi_mapper.find_maps method.
|
||||||
following specification:
|
|
||||||
def match_func(storage_elem)
|
|
||||||
param storage_elem: A backing storage element wrapper (VOpt,
|
|
||||||
VDisk, PV, or LU) to be analyzed.
|
|
||||||
return: True if the storage_elem's mapping should be included;
|
|
||||||
False otherwise.
|
|
||||||
"""
|
"""
|
||||||
disk_name = self._get_disk_name(disk_type, instance, short=True)
|
disk_name = LocalStorage._get_disk_name(disk_type, instance,
|
||||||
|
short=True)
|
||||||
return tsk_map.gen_match_func(pvm_stg.VDisk, names=[disk_name])
|
return tsk_map.gen_match_func(pvm_stg.VDisk, names=[disk_name])
|
||||||
|
|
||||||
@property
|
@property
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2015, 2017 IBM Corp.
|
# Copyright IBM Corp. and contributors
|
||||||
#
|
#
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@@ -448,19 +448,14 @@ class SSPDiskAdapter(disk_drv.DiskAdapter):
|
|||||||
"""
|
"""
|
||||||
return random.choice(self.vios_uuids)
|
return random.choice(self.vios_uuids)
|
||||||
|
|
||||||
def disk_match_func(self, disk_type, instance):
|
@staticmethod
|
||||||
|
def _disk_match_func(disk_type, instance):
|
||||||
"""Return a matching function to locate the disk for an instance.
|
"""Return a matching function to locate the disk for an instance.
|
||||||
|
|
||||||
:param disk_type: One of the DiskType enum values.
|
:param disk_type: One of the DiskType enum values.
|
||||||
:param instance: The instance whose disk is to be found.
|
:param instance: The instance whose disk is to be found.
|
||||||
:return: Callable suitable for the match_func parameter of the
|
:return: Callable suitable for the match_func parameter of the
|
||||||
pypowervm.tasks.scsi_mapper.find_maps method, with the
|
pypowervm.tasks.scsi_mapper.find_maps method.
|
||||||
following specification:
|
|
||||||
def match_func(storage_elem)
|
|
||||||
param storage_elem: A backing storage element wrapper (VOpt,
|
|
||||||
VDisk, PV, or LU) to be analyzed.
|
|
||||||
return: True if the storage_elem's mapping should be included;
|
|
||||||
False otherwise.
|
|
||||||
"""
|
"""
|
||||||
disk_name = self._get_disk_name(disk_type, instance)
|
disk_name = SSPDiskAdapter._get_disk_name(disk_type, instance)
|
||||||
return tsk_map.gen_match_func(pvm_stg.LU, names=[disk_name])
|
return tsk_map.gen_match_func(pvm_stg.LU, names=[disk_name])
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2014, 2018 IBM Corp.
|
# Copyright IBM Corp. and contributors
|
||||||
#
|
#
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@@ -800,20 +800,15 @@ class PowerVMDriver(driver.ComputeDriver):
|
|||||||
"""Snapshots the specified instance.
|
"""Snapshots the specified instance.
|
||||||
|
|
||||||
:param context: security context
|
:param context: security context
|
||||||
:param instance: Instance object as returned by DB layer.
|
:param instance: nova.objects.instance.Instance
|
||||||
:param image_id: Reference to a pre-created image that will
|
:param image_id: Reference to a pre-created image that will hold the
|
||||||
hold the snapshot.
|
snapshot.
|
||||||
:param update_task_state: Callable to update the state of the snapshot
|
:param update_task_state: Callback function to update the task_state
|
||||||
task with one of the IMAGE_* consts from
|
on the instance while the snapshot operation progresses. The
|
||||||
nova.compute.task_states. Call spec
|
function takes a task_state argument and an optional
|
||||||
(inferred from compute driver source):
|
expected_task_state kwarg which defaults to
|
||||||
update_task_state(task_state, expected_task_state=None)
|
nova.compute.task_states.IMAGE_SNAPSHOT. See
|
||||||
param task_state: The nova.compute.task_states.IMAGE_* state to
|
nova.objects.instance.Instance.save for expected_task_state usage.
|
||||||
set.
|
|
||||||
param expected_state: The nova.compute.task_state.IMAGE_* state
|
|
||||||
which should be in place before this
|
|
||||||
update. The driver will raise if this
|
|
||||||
doesn't match.
|
|
||||||
"""
|
"""
|
||||||
if not self.disk_dvr.capabilities.get('snapshot'):
|
if not self.disk_dvr.capabilities.get('snapshot'):
|
||||||
raise exception.NotSupportedWithOption(
|
raise exception.NotSupportedWithOption(
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2015 IBM Corp.
|
# Copyright IBM Corp. and contributors
|
||||||
#
|
#
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@@ -47,14 +47,14 @@ def stream_blockdev_to_glance(context, image_api, image_id, metadata, devpath):
|
|||||||
image_api.update(context, image_id, metadata, stream)
|
image_api.update(context, image_id, metadata, stream)
|
||||||
|
|
||||||
|
|
||||||
def snapshot_metadata(context, image_api, image_id, instance):
|
def generate_snapshot_metadata(context, image_api, image_id, instance):
|
||||||
"""Generate a metadata dictionary for an instance snapshot.
|
"""Generate a metadata dictionary for an instance snapshot.
|
||||||
|
|
||||||
:param context: Nova security context
|
:param context: Nova security context
|
||||||
:param image_api: Handle to the glance image API.
|
:param image_api: Handle to the glance image API.
|
||||||
:param image_id: UUID of the prepared glance image.
|
:param image_id: UUID of the prepared glance image.
|
||||||
:param instance: The Nova instance whose disk is to be snapshotted.
|
:param instance: The Nova instance whose disk is to be snapshotted.
|
||||||
:return: A dict of metadata suitable for image_api.upload.
|
:return: A dict of metadata suitable for image_api.update.
|
||||||
"""
|
"""
|
||||||
image = image_api.get(context, image_id)
|
image = image_api.get(context, image_id)
|
||||||
metadata = {
|
metadata = {
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2015, 2017 IBM Corp.
|
# Copyright IBM Corp. and contributors
|
||||||
#
|
#
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@@ -23,9 +23,10 @@ aixlinux, but with the is_mgmt_partition property set to True.
|
|||||||
The PowerVM Nova Compute service runs on the management partition.
|
The PowerVM Nova Compute service runs on the management partition.
|
||||||
"""
|
"""
|
||||||
import glob
|
import glob
|
||||||
|
import os
|
||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.privsep import path as priv_path
|
from nova.privsep import path as priv_path
|
||||||
import os
|
|
||||||
from oslo_concurrency import lockutils
|
from oslo_concurrency import lockutils
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
from pypowervm.tasks import partition as pvm_par
|
from pypowervm.tasks import partition as pvm_par
|
||||||
@@ -71,8 +72,6 @@ def discover_vscsi_disk(mapping, scan_timeout=300):
|
|||||||
:raise UniqueDiskDiscoveryException: If more than one disk appears with the
|
:raise UniqueDiskDiscoveryException: If more than one disk appears with the
|
||||||
expected UDID.
|
expected UDID.
|
||||||
"""
|
"""
|
||||||
# TODO(IBM): Support for other host platforms.
|
|
||||||
|
|
||||||
# Calculate the Linux slot number from the client adapter slot number.
|
# Calculate the Linux slot number from the client adapter slot number.
|
||||||
lslot = 0x30000000 | mapping.client_adapter.lpar_slot_num
|
lslot = 0x30000000 | mapping.client_adapter.lpar_slot_num
|
||||||
# We'll match the device ID based on the UDID, which is actually the last
|
# We'll match the device ID based on the UDID, which is actually the last
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2015, 2018 IBM Corp.
|
# Copyright IBM Corp. and contributors
|
||||||
#
|
#
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@@ -72,8 +72,8 @@ class StreamToGlance(task.Task):
|
|||||||
requires='disk_path')
|
requires='disk_path')
|
||||||
|
|
||||||
def execute(self, disk_path):
|
def execute(self, disk_path):
|
||||||
metadata = image.snapshot_metadata(self.context, self.image_api,
|
metadata = image.generate_snapshot_metadata(
|
||||||
self.image_id, self.instance)
|
self.context, self.image_api, self.image_id, self.instance)
|
||||||
LOG.info("Starting stream of boot device (local blockdev %(devpath)s) "
|
LOG.info("Starting stream of boot device (local blockdev %(devpath)s) "
|
||||||
"to glance image %(img_id)s.",
|
"to glance image %(img_id)s.",
|
||||||
{'devpath': disk_path, 'img_id': self.image_id},
|
{'devpath': disk_path, 'img_id': self.image_id},
|
||||||
|
|||||||
@@ -1,4 +1,4 @@
|
|||||||
# Copyright 2015, 2018 IBM Corp.
|
# Copyright IBM Corp. and contributors
|
||||||
#
|
#
|
||||||
# All Rights Reserved.
|
# All Rights Reserved.
|
||||||
#
|
#
|
||||||
@@ -14,9 +14,9 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
from pypowervm.tasks import scsi_mapper as pvm_smap
|
|
||||||
|
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
from pypowervm import exceptions as pvm_exc
|
||||||
|
from pypowervm.tasks import scsi_mapper as pvm_smap
|
||||||
from taskflow import task
|
from taskflow import task
|
||||||
from taskflow.types import failure as task_fail
|
from taskflow.types import failure as task_fail
|
||||||
|
|
||||||
@@ -201,10 +201,11 @@ class ConnectDisk(task.Task):
|
|||||||
|
|
||||||
class InstanceDiskToMgmt(task.Task):
|
class InstanceDiskToMgmt(task.Task):
|
||||||
|
|
||||||
"""Connect an instance's disk to the management partition, discover it.
|
"""The Task to connect an instance's disk to the management partition.
|
||||||
|
|
||||||
We do these two pieces together because their reversion doesn't happen in
|
This task will connect the instance's disk to the management partition and
|
||||||
the opposite order.
|
discover it. We do these two pieces together because their reversion
|
||||||
|
happens in the same order.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def __init__(self, disk_dvr, instance):
|
def __init__(self, disk_dvr, instance):
|
||||||
@@ -223,7 +224,7 @@ class InstanceDiskToMgmt(task.Task):
|
|||||||
:param instance: The nova instance whose boot disk is to be connected.
|
:param instance: The nova instance whose boot disk is to be connected.
|
||||||
"""
|
"""
|
||||||
super(InstanceDiskToMgmt, self).__init__(
|
super(InstanceDiskToMgmt, self).__init__(
|
||||||
name='connect_and_discover_instance_disk_to_mgmt',
|
name='instance_disk_to_mgmt',
|
||||||
provides=['stg_elem', 'vios_wrap', 'disk_path'])
|
provides=['stg_elem', 'vios_wrap', 'disk_path'])
|
||||||
self.disk_dvr = disk_dvr
|
self.disk_dvr = disk_dvr
|
||||||
self.instance = instance
|
self.instance = instance
|
||||||
@@ -234,9 +235,9 @@ class InstanceDiskToMgmt(task.Task):
|
|||||||
def execute(self):
|
def execute(self):
|
||||||
"""Map the instance's boot disk and discover it."""
|
"""Map the instance's boot disk and discover it."""
|
||||||
|
|
||||||
# Search for boot disk on the Novalink partition
|
# Search for boot disk on the NovaLink partition
|
||||||
if self.disk_dvr.mp_uuid in self.disk_dvr.vios_uuids:
|
if self.disk_dvr.mp_uuid in self.disk_dvr.vios_uuids:
|
||||||
dev_name = self.disk_dvr.boot_disk_path_for_instance(
|
dev_name = self.disk_dvr.get_bootdisk_path(
|
||||||
self.instance, self.disk_dvr.mp_uuid)
|
self.instance, self.disk_dvr.mp_uuid)
|
||||||
if dev_name is not None:
|
if dev_name is not None:
|
||||||
return None, None, dev_name
|
return None, None, dev_name
|
||||||
@@ -282,7 +283,12 @@ class InstanceDiskToMgmt(task.Task):
|
|||||||
return
|
return
|
||||||
LOG.warning("Removing disk %(dpath)s from the management partition.",
|
LOG.warning("Removing disk %(dpath)s from the management partition.",
|
||||||
{'dpath': self.disk_path}, instance=self.instance)
|
{'dpath': self.disk_path}, instance=self.instance)
|
||||||
mgmt.remove_block_dev(self.disk_path)
|
try:
|
||||||
|
mgmt.remove_block_dev(self.disk_path)
|
||||||
|
except pvm_exc.Error:
|
||||||
|
# Don't allow revert exceptions to interrupt the revert flow.
|
||||||
|
LOG.exception("Remove disk failed during revert. Ignoring.",
|
||||||
|
instance=self.instance)
|
||||||
|
|
||||||
|
|
||||||
class RemoveInstanceDiskFromMgmt(task.Task):
|
class RemoveInstanceDiskFromMgmt(task.Task):
|
||||||
|
|||||||
Reference in New Issue
Block a user