diff --git a/nova_powervm/objects/__init__.py b/nova_powervm/objects/__init__.py new file mode 100644 index 00000000..23621b3e --- /dev/null +++ b/nova_powervm/objects/__init__.py @@ -0,0 +1,19 @@ +# Copyright 2016 IBM Corp. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +def register_all(): + __import__('nova_powervm.objects.migrate_data') diff --git a/nova_powervm/objects/migrate_data.py b/nova_powervm/objects/migrate_data.py new file mode 100644 index 00000000..e00de247 --- /dev/null +++ b/nova_powervm/objects/migrate_data.py @@ -0,0 +1,36 @@ +# Copyright 2016 IBM Corp. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from nova.objects import base as obj_base +from nova.objects import fields +from nova.objects import migrate_data + + +@obj_base.NovaObjectRegistry.register +class PowerVMLiveMigrateData(migrate_data.LiveMigrateData): + # Version 1.0: Initial version + VERSION = '1.0' + + fields = { + 'host_mig_data': fields.DictOfNullableStringsField(), + 'dest_ip': fields.StringField(), + 'dest_user_id': fields.StringField(), + 'dest_sys_name': fields.StringField(), + 'public_key': fields.StringField(), + 'dest_proc_compat': fields.StringField(), + 'vol_data': fields.DictOfNullableStringsField(), + } diff --git a/nova_powervm/tests/objects/__init__.py b/nova_powervm/tests/objects/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/nova_powervm/tests/objects/test_migrate_data.py b/nova_powervm/tests/objects/test_migrate_data.py new file mode 100644 index 00000000..7f9aa7b4 --- /dev/null +++ b/nova_powervm/tests/objects/test_migrate_data.py @@ -0,0 +1,28 @@ +# Copyright 2016 IBM Corp. +# +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from nova import test + +from nova_powervm.objects import migrate_data + + +class TestMigrateData(test.TestCase): + + def test_migrate_data(self): + + data = migrate_data.PowerVMLiveMigrateData() + data.public_key = 'key' + self.assertEqual(data.public_key, 'key') diff --git a/nova_powervm/tests/virt/powervm/test_live_migration.py b/nova_powervm/tests/virt/powervm/test_live_migration.py index 1b82a1da..3e6cd794 100644 --- a/nova_powervm/tests/virt/powervm/test_live_migration.py +++ b/nova_powervm/tests/virt/powervm/test_live_migration.py @@ -15,12 +15,16 @@ # under the License. # +from __future__ import absolute_import + +import fixtures import mock from nova import exception from nova import objects from nova import test +from nova_powervm.objects import migrate_data as mig_obj from nova_powervm.tests.virt import powervm from nova_powervm.tests.virt.powervm import fixtures as fx from nova_powervm.virt.powervm import live_migration as lpm @@ -36,9 +40,25 @@ class TestLPM(test.TestCase): self.apt = self.drv.adapter self.inst = objects.Instance(**powervm.TEST_INSTANCE) - self.lpmsrc = lpm.LiveMigrationSrc(self.drv, self.inst, {}) + + self.mig_data = mig_obj.PowerVMLiveMigrateData() + self.mig_data.host_mig_data = {} + self.mig_data.dest_ip = '1' + self.mig_data.dest_user_id = 'neo' + self.mig_data.dest_sys_name = 'a' + self.mig_data.public_key = 'PublicKey' + self.mig_data.dest_proc_compat = 'a,b,c' + self.mig_data.vol_data = {} + + self.lpmsrc = lpm.LiveMigrationSrc(self.drv, self.inst, self.mig_data) self.lpmdst = lpm.LiveMigrationDest(self.drv, self.inst) + self.add_key = self.useFixture(fixtures.MockPatch( + 'pypowervm.tasks.management_console.add_authorized_key')).mock + self.get_key = self.useFixture(fixtures.MockPatch( + 'pypowervm.tasks.management_console.get_public_key')).mock + self.get_key.return_value = 'PublicKey' + @mock.patch('pypowervm.tasks.storage.ScrubOrphanStorageForLpar') @mock.patch('nova_powervm.virt.powervm.media.ConfigDrivePowerVM') @mock.patch('nova_powervm.virt.powervm.vm.get_instance_wrapper') @@ -55,7 +75,6 @@ class TestLPM(test.TestCase): self.lpmsrc, '_check_migration_ready', return_value=None): # Test the bad path first, then patch in values to make succeed - self.lpmsrc.dest_data = {'dest_proc_compat': 'a,b,c'} mock_wrap = mock.Mock(id=123) mock_get_wrap.return_value = mock_wrap @@ -89,7 +108,7 @@ class TestLPM(test.TestCase): # And ensure the scrubber was executed mock_scrub.return_value.execute.assert_called_once_with() mock_vol_drv.pre_live_migration_on_source.assert_called_once_with( - {'public_key': None}) + {}) # Ensure migration counts are validated migr_data['active_migrations_in_progress'] = 4 @@ -133,40 +152,34 @@ class TestLPM(test.TestCase): @mock.patch('pypowervm.tasks.storage.ComprehensiveScrub') def test_pre_live_mig(self, mock_scrub): - mock_vol_drv = mock.MagicMock() + vol_drv = mock.MagicMock() resp = self.lpmdst.pre_live_migration( 'context', 'block_device_info', 'network_info', 'disk_info', - {}, [mock_vol_drv]) + self.mig_data, [vol_drv]) # Make sure we get something back, and that the volume driver was # invoked. self.assertIsNotNone(resp) - mock_vol_drv.pre_live_migration_on_destination.assert_called_once_with( - {}, {}) + vol_drv.pre_live_migration_on_destination.assert_called_once_with( + self.mig_data.vol_data) self.assertEqual(1, mock_scrub.call_count) - # Ensure we save the data for later use. - self.assertIsNotNone(getattr(self.lpmdst, 'pre_live_data', None)) + self.add_key.assert_called_once_with(self.apt, 'PublicKey') - @mock.patch('pypowervm.tasks.management_console.add_authorized_key') - def test_pre_live_mig2(self, mock_add_key): - vol_drv = mock.Mock() + vol_drv.reset_mock() raising_vol_drv = mock.Mock() raising_vol_drv.pre_live_migration_on_destination.side_effect = ( Exception('foo')) self.assertRaises( exception.MigrationPreCheckError, self.lpmdst.pre_live_migration, 'context', 'block_device_info', 'network_info', 'disk_info', - {'migrate_data': {'public_key': 'abc123'}}, - [vol_drv, raising_vol_drv]) - mock_add_key.assert_called_once_with(self.apt, 'abc123') - vol_drv.pre_live_migration_on_destination.assert_called_once_with( - {'public_key': 'abc123'}, {}) + self.mig_data, [vol_drv, raising_vol_drv]) + vol_drv.pre_live_migration_on_destination.assert_called_once_with({}) (raising_vol_drv.pre_live_migration_on_destination. - assert_called_once_with({'public_key': 'abc123'}, {})) + assert_called_once_with({})) def test_src_cleanup(self): vol_drv = mock.Mock() - self.lpmdst.pre_live_data = {} + self.lpmdst.pre_live_vol_data = {} self.lpmdst.cleanup_volume(vol_drv) # Ensure the volume driver was called to clean up the volume. vol_drv.cleanup_volume_at_destination.assert_called_once_with({}) @@ -175,15 +188,13 @@ class TestLPM(test.TestCase): def test_live_migration(self, mock_migr): self.lpmsrc.lpar_w = mock.Mock() - self.lpmsrc.dest_data = dict( - dest_sys_name='a', dest_ip='1', dest_user_id='neo') - self.lpmsrc.live_migration('context', {}) + self.lpmsrc.live_migration('context', self.mig_data) mock_migr.called_once_with('context') # Test that we raise errors received during migration mock_migr.side_effect = ValueError() self.assertRaises(ValueError, self.lpmsrc.live_migration, 'context', - {}) + self.mig_data) mock_migr.called_once_with('context') def test_post_live_mig_src(self): @@ -221,7 +232,7 @@ class TestLPM(test.TestCase): lpar_w, host_w = mock.Mock(), mock.Mock() lpar_w.can_lpm.return_value = (True, None) self.lpmsrc._check_migration_ready(lpar_w, host_w) - lpar_w.can_lpm.assert_called_once_with(host_w, migr_data=None) + lpar_w.can_lpm.assert_called_once_with(host_w, migr_data={}) lpar_w.can_lpm.return_value = (False, 'This is the reason message.') self.assertRaises(exception.MigrationPreCheckError, diff --git a/nova_powervm/tests/virt/powervm/volume/test_npiv.py b/nova_powervm/tests/virt/powervm/volume/test_npiv.py index 852182c3..e83f4372 100644 --- a/nova_powervm/tests/virt/powervm/volume/test_npiv.py +++ b/nova_powervm/tests/virt/powervm/volume/test_npiv.py @@ -17,6 +17,7 @@ import mock from nova.compute import task_states +from oslo_serialization import jsonutils from pypowervm import const as pvm_const from pypowervm.tests import test_fixtures as pvm_fx from pypowervm.tests.test_utils import pvmhttp @@ -460,8 +461,11 @@ class TestNPIVAdapter(test_vol.TestVolumeAdapter): mig_data = {} self.vol_drv.pre_live_migration_on_source(mig_data) - self.assertEqual([1, 2], mig_data.get('npiv_fabric_slots_A')) - self.assertEqual([3], mig_data.get('npiv_fabric_slots_B')) + self.assertEqual('[1, 2]', mig_data.get('src_npiv_fabric_slots_A')) + self.assertEqual('[3]', mig_data.get('src_npiv_fabric_slots_B')) + # Ensure only string data is placed in the dict. + for key in mig_data: + self.assertEqual(str, type(mig_data[key])) @mock.patch('pypowervm.tasks.vfc_mapper.' 'build_migration_mappings_for_fabric') @@ -471,23 +475,25 @@ class TestNPIVAdapter(test_vol.TestVolumeAdapter): self, mock_fabric_names, mock_build_mig_map): mock_fabric_names.return_value = ['A', 'B'] - src_mig_data = {'npiv_fabric_slots_A': [1, 2], - 'npiv_fabric_slots_B': [3]} - dest_mig_data = {} + mig_data = {'src_npiv_fabric_slots_A': jsonutils.dumps([1, 2]), + 'src_npiv_fabric_slots_B': jsonutils.dumps([3])} mock_build_mig_map.side_effect = [['a'], ['b']] self.vol_drv.stg_ftsk = mock.MagicMock() # Execute the test - self.vol_drv.pre_live_migration_on_destination( - src_mig_data, dest_mig_data) + self.vol_drv.pre_live_migration_on_destination(mig_data) - self.assertEqual(['a'], dest_mig_data.get('npiv_fabric_mapping_A')) - self.assertEqual(['b'], dest_mig_data.get('npiv_fabric_mapping_B')) + self.assertEqual('["a"]', mig_data.get('dest_npiv_fabric_mapping_A')) + self.assertEqual('["b"]', mig_data.get('dest_npiv_fabric_mapping_B')) + # Ensure only string data is placed in the dict. + for key in mig_data: + self.assertEqual(str, type(mig_data[key])) # Order of the mappings is not important. - self.assertEqual({'b', 'a'}, - set(dest_mig_data.get('vfc_lpm_mappings'))) + self.assertEqual( + {'b', 'a'}, + set(jsonutils.loads(mig_data.get('vfc_lpm_mappings')))) # Verify that on migration, the WWPNs are reversed. self.assertEqual(2, self.vol_drv.stg_ftsk.feed.reverse.call_count) diff --git a/nova_powervm/tests/virt/powervm/volume/test_vscsi.py b/nova_powervm/tests/virt/powervm/volume/test_vscsi.py index 499221d3..ac76c3fa 100644 --- a/nova_powervm/tests/virt/powervm/volume/test_vscsi.py +++ b/nova_powervm/tests/virt/powervm/volume/test_vscsi.py @@ -105,7 +105,7 @@ class TestVSCSIAdapter(BaseVSCSITest): hdisk.LUAStatus.DEVICE_AVAILABLE, 'devname', 'udid') # Run the method - self.vol_drv.pre_live_migration_on_destination({}, {}) + self.vol_drv.pre_live_migration_on_destination({}) # Test exception path mock_discover.return_value = ( @@ -113,8 +113,7 @@ class TestVSCSIAdapter(BaseVSCSITest): # Run the method self.assertRaises(p_exc.VolumePreMigrationFailed, - self.vol_drv.pre_live_migration_on_destination, {}, - {}) + self.vol_drv.pre_live_migration_on_destination, {}) @mock.patch('pypowervm.tasks.hdisk.remove_hdisk') @mock.patch('pypowervm.wrappers.virtual_io_server.VIOS.hdisk_from_uuid') @@ -136,9 +135,9 @@ class TestVSCSIAdapter(BaseVSCSITest): def test_post_live_migr_source(self): # Bad path. volume id not found - bad_data = {'pre_live_migration_result': {'vscsi-BAD': 'udid1'}} + bad_data = {'vscsi-BAD': 'udid1'} # good path. - good_data = {'pre_live_migration_result': {'vscsi-id': 'udid1'}} + good_data = {'vscsi-id': 'udid1'} with mock.patch.object(self.vol_drv, '_cleanup_volume') as mock_cln: self.vol_drv.post_live_migration_at_source(bad_data) diff --git a/nova_powervm/virt/powervm/driver.py b/nova_powervm/virt/powervm/driver.py index bc2a243f..bffd9b61 100644 --- a/nova_powervm/virt/powervm/driver.py +++ b/nova_powervm/virt/powervm/driver.py @@ -621,7 +621,7 @@ class PowerVMDriver(driver.ComputeDriver): :param block_device_info: Information about block devices that should be detached from the instance. :param destroy_disks: Indicates if disks should be destroyed - :param migrate_data: implementation specific params + :param migrate_data: a LiveMigrateData object """ if instance.task_state == task_states.RESIZE_REVERTING: LOG.info(_LI('Destroy called for migrated/resized instance.'), @@ -1395,7 +1395,7 @@ class PowerVMDriver(driver.ComputeDriver): :param block_device_info: instance block device information :param network_info: instance network information :param disk_info: instance disk information - :param migrate_data: implementation specific data dict. + :param migrate_data: a LiveMigrateData object """ LOG.info(_LI("Pre live migration processing."), instance=instance) @@ -1426,7 +1426,7 @@ class PowerVMDriver(driver.ComputeDriver): recovery method when any exception occurs. expected nova.compute.manager._rollback_live_migration. :param block_migration: if true, migrate VM disk. - :param migrate_data: implementation specific params. + :param migrate_data: a LiveMigrateData object """ self._log_operation('live_migration', instance) @@ -1471,7 +1471,7 @@ class PowerVMDriver(driver.ComputeDriver): recovery method when any exception occurs. expected nova.compute.manager._rollback_live_migration. :param block_migration: if true, migrate VM disk. - :param migrate_data: implementation specific params. + :param migrate_data: a LiveMigrateData object :param mig: live_migration object :param ex: exception reason @@ -1500,7 +1500,7 @@ class PowerVMDriver(driver.ComputeDriver): :param block_device_info: instance block device information :param destroy_disks: if true, destroy disks at destination during cleanup - :param migrate_data: implementation specific params + :param migrate_data: a LiveMigrateData object """ del self.live_migrations[instance.uuid] @@ -1545,7 +1545,7 @@ class PowerVMDriver(driver.ComputeDriver): :param context: security context :instance: instance object that was migrated :block_device_info: instance block device information - :param migrate_data: if not None, it is a dict which has data + :param migrate_data: a LiveMigrateData object """ # Build the volume drivers vol_drvs = self._build_vol_drivers(context, instance, diff --git a/nova_powervm/virt/powervm/live_migration.py b/nova_powervm/virt/powervm/live_migration.py index 58a3e4bf..a61e8760 100644 --- a/nova_powervm/virt/powervm/live_migration.py +++ b/nova_powervm/virt/powervm/live_migration.py @@ -17,15 +17,16 @@ import abc from nova import exception +from oslo_log import log as logging +from oslo_serialization import jsonutils from pypowervm.tasks import management_console as mgmt_task from pypowervm.tasks import migration as mig from pypowervm.tasks import storage as stor_task from pypowervm.tasks import vterm - -from oslo_log import log as logging import six from nova_powervm import conf as cfg +from nova_powervm.objects import migrate_data as mig_obj from nova_powervm.virt.powervm.i18n import _ from nova_powervm.virt.powervm.i18n import _LE from nova_powervm.virt.powervm.i18n import _LI @@ -64,17 +65,17 @@ def _verify_migration_capacity(host_w, instance): @six.add_metaclass(abc.ABCMeta) class LiveMigration(object): - def __init__(self, drvr, instance, src_data, dest_data): + def __init__(self, drvr, instance, mig_data): self.drvr = drvr self.instance = instance - self.src_data = src_data # migration data from src host - self.dest_data = dest_data # migration data from dest host + self.mig_data = mig_data class LiveMigrationDest(LiveMigration): def __init__(self, drvr, instance): - super(LiveMigrationDest, self).__init__(drvr, instance, {}, {}) + super(LiveMigrationDest, self).__init__( + drvr, instance, mig_obj.PowerVMLiveMigrateData()) @staticmethod def _get_dest_user_id(): @@ -91,7 +92,7 @@ class LiveMigrationDest(LiveMigration): :param context: security context :param src_compute_info: Info about the sending machine :param dst_compute_info: Info about the receiving machine - :returns: a dict containing migration info + :returns: a PowerVMLiveMigrateData object """ # Refresh the host wrapper since we're pulling values that may change @@ -114,19 +115,18 @@ class LiveMigrationDest(LiveMigration): _verify_migration_capacity(self.drvr.host_wrapper, self.instance) - self.dest_data['dest_host_migr_data'] = (self.drvr.host_wrapper. - migration_data) - self.dest_data['dest_ip'] = CONF.my_ip - self.dest_data['dest_user_id'] = self._get_dest_user_id() - self.dest_data['dest_sys_name'] = self.drvr.host_wrapper.system_name - self.dest_data['dest_proc_compat'] = ( + self.mig_data.host_mig_data = self.drvr.host_wrapper.migration_data + self.mig_data.dest_ip = CONF.my_ip + self.mig_data.dest_user_id = self._get_dest_user_id() + self.mig_data.dest_sys_name = self.drvr.host_wrapper.system_name + self.mig_data.dest_proc_compat = ( ','.join(self.drvr.host_wrapper.proc_compat_modes)) LOG.debug('src_compute_info: %s' % src_compute_info) LOG.debug('dst_compute_info: %s' % dst_compute_info) - LOG.debug('Migration data: %s' % self.dest_data) + LOG.debug('Migration data: %s' % self.mig_data) - return self.dest_data + return self.mig_data def pre_live_migration(self, context, block_device_info, network_info, disk_info, migrate_data, vol_drvs): @@ -137,27 +137,24 @@ class LiveMigrationDest(LiveMigration): :param block_device_info: instance block device information :param network_info: instance network information :param disk_info: instance disk information - :param migrate_data: implementation specific data dict + :param migrate_data: a PowerVMLiveMigrateData object :param vol_drvs: volume drivers for the attached volumes """ LOG.debug('Running pre live migration on destination.', instance=self.instance) LOG.debug('Migration data: %s' % migrate_data) - # Set the ssh auth key if needed. - src_mig_data = migrate_data.get('migrate_data', {}) - pub_key = src_mig_data.get('public_key') - if pub_key is not None: - mgmt_task.add_authorized_key(self.drvr.adapter, pub_key) + # Set the ssh auth key. + mgmt_task.add_authorized_key(self.drvr.adapter, + migrate_data.public_key) # For each volume, make sure it's ready to migrate - dest_mig_data = {} for vol_drv in vol_drvs: LOG.info(_LI('Performing pre migration for volume %(volume)s'), dict(volume=vol_drv.volume_id)) try: - vol_drv.pre_live_migration_on_destination(src_mig_data, - dest_mig_data) + vol_drv.pre_live_migration_on_destination( + migrate_data.vol_data) except Exception as e: LOG.exception(e) # It failed. @@ -171,8 +168,8 @@ class LiveMigrationDest(LiveMigration): stor_task.ComprehensiveScrub(self.drvr.adapter).execute() # Save the migration data, we'll use it if the LPM fails - self.pre_live_data = dest_mig_data - return dest_mig_data + self.pre_live_vol_data = migrate_data.vol_data + return migrate_data def post_live_migration_at_destination(self, network_info, vol_drvs): """Do post migration cleanup on destination host. @@ -209,7 +206,7 @@ class LiveMigrationDest(LiveMigration): LOG.info(_LI('Performing detach for volume %(volume)s'), dict(volume=vol_drv.volume_id)) try: - vol_drv.cleanup_volume_at_destination(self.pre_live_data) + vol_drv.cleanup_volume_at_destination(self.pre_live_vol_data) except Exception as e: LOG.exception(e) # Log the exception but no need to raise one because @@ -218,9 +215,6 @@ class LiveMigrationDest(LiveMigration): class LiveMigrationSrc(LiveMigration): - def __init__(self, drvr, instance, dest_data): - super(LiveMigrationSrc, self).__init__(drvr, instance, {}, dest_data) - def check_source(self, context, block_device_info, vol_drvs): """Check the source host @@ -234,23 +228,18 @@ class LiveMigrationSrc(LiveMigration): :param context: security context :param block_device_info: result of _get_instance_block_device_info :param vol_drvs: volume drivers for the attached volumes - :returns: a dict containing migration info + :returns: a PowerVMLiveMigrateData object """ lpar_w = vm.get_instance_wrapper( self.drvr.adapter, self.instance, self.drvr.host_uuid) self.lpar_w = lpar_w - LOG.debug('Dest Migration data: %s' % self.dest_data) - - # Only 'migrate_data' is sent to the destination on prelive call. - mig_data = {'public_key': mgmt_task.get_public_key(self.drvr.adapter)} - self.src_data['migrate_data'] = mig_data - LOG.debug('Src Migration data: %s' % self.src_data) + LOG.debug('Dest Migration data: %s' % self.mig_data) # Check proc compatibility modes if (lpar_w.proc_compat_mode and lpar_w.proc_compat_mode not in - self.dest_data['dest_proc_compat'].split(',')): + self.mig_data.dest_proc_compat.split(',')): msg = (_("Cannot migrate %(name)s because its " "processor compatibility mode %(mode)s " "is not in the list of modes \"%(modes)s\" " @@ -258,7 +247,7 @@ class LiveMigrationSrc(LiveMigration): dict(name=self.instance.name, mode=lpar_w.proc_compat_mode, modes=', '.join( - self.dest_data['dest_proc_compat'].split(',')))) + self.mig_data.dest_proc_compat.split(',')))) raise exception.MigrationPreCheckError(reason=msg) @@ -275,10 +264,15 @@ class LiveMigrationSrc(LiveMigration): # Check the number of migrations for capacity _verify_migration_capacity(self.drvr.host_wrapper, self.instance) - # Get the 'source' pre-migration data for the volume drivers. Should - # automatically update the mig_data dictionary as needed. + self.mig_data.public_key = mgmt_task.get_public_key(self.drvr.adapter) + + # Get the 'source' pre-migration data for the volume drivers. + vol_data = {} for vol_drv in vol_drvs: - vol_drv.pre_live_migration_on_source(mig_data) + vol_drv.pre_live_migration_on_source(vol_data) + self.mig_data.vol_data = vol_data + + LOG.debug('Src Migration data: %s' % self.mig_data) # Create a FeedTask to scrub any orphaned mappings/storage associated # with this LPAR. (Don't run it yet - we want to do the VOpt removal @@ -295,28 +289,34 @@ class LiveMigrationSrc(LiveMigration): # Ensure the vterm is non-active vterm.close_vterm(self.drvr.adapter, lpar_w.uuid) - return self.src_data + return self.mig_data def live_migration(self, context, migrate_data): """Start the live migration. :param context: security context - :param migrate_data: migration data from src and dest host. + :param migrate_data: a PowerVMLiveMigrateData object """ LOG.debug("Starting migration.", instance=self.instance) LOG.debug("Migrate data: %s" % migrate_data) + # The passed in mig data has more info (dest data added), so replace + self.mig_data = migrate_data # Get the vFC and vSCSI live migration mappings - dest_pre_lm_data = migrate_data.get('pre_live_migration_result', {}) - vfc_mappings = dest_pre_lm_data.get('vfc_lpm_mappings') - vscsi_mappings = dest_pre_lm_data.get('vscsi_lpm_mappings') + vol_data = migrate_data.vol_data + vfc_mappings = vol_data.get('vfc_lpm_mappings') + if vfc_mappings is not None: + vfc_mappings = jsonutils.loads(vfc_mappings) + vscsi_mappings = vol_data.get('vscsi_lpm_mappings') + if vscsi_mappings is not None: + vscsi_mappings = jsonutils.loads(vscsi_mappings) try: # Migrate the LPAR! - mig.migrate_lpar(self.lpar_w, self.dest_data['dest_sys_name'], + mig.migrate_lpar(self.lpar_w, self.mig_data.dest_sys_name, validate_only=False, - tgt_mgmt_svr=self.dest_data['dest_ip'], - tgt_mgmt_usr=self.dest_data.get('dest_user_id'), + tgt_mgmt_svr=self.mig_data.dest_ip, + tgt_mgmt_usr=self.mig_data.dest_user_id, virtual_fc_mappings=vfc_mappings, virtual_scsi_mappings=vscsi_mappings) @@ -332,14 +332,14 @@ class LiveMigrationSrc(LiveMigration): This method is focused on storage. :param vol_drvs: volume drivers for the attached volume - :param migrate_data: migration data + :param migrate_data: a PowerVMLiveMigrateData object """ # For each volume, make sure the source is cleaned for vol_drv in vol_drvs: LOG.info(_LI('Performing post migration for volume %(volume)s'), dict(volume=vol_drv.volume_id)) try: - vol_drv.post_live_migration_at_source(migrate_data) + vol_drv.post_live_migration_at_source(migrate_data.vol_data) except Exception as e: LOG.exception(e) # Log the exception but no need to raise one because @@ -382,8 +382,8 @@ class LiveMigrationSrc(LiveMigration): :param lpar_w: LogicalPartition wrapper :param host_w: ManagedSystem wrapper """ - ready, msg = lpar_w.can_lpm(host_w, migr_data=( - self.dest_data.get('dest_host_migr_data'))) + ready, msg = lpar_w.can_lpm(host_w, + migr_data=self.mig_data.host_mig_data) if not ready: msg = (_("Live migration of instance '%(name)s' failed because it " "is not ready. Reason: %(reason)s") % diff --git a/nova_powervm/virt/powervm/volume/driver.py b/nova_powervm/virt/powervm/volume/driver.py index 96c94967..c2096cff 100644 --- a/nova_powervm/virt/powervm/volume/driver.py +++ b/nova_powervm/virt/powervm/volume/driver.py @@ -108,7 +108,7 @@ class PowerVMVolumeAdapter(object): """List of pypowervm XAGs needed to support this adapter.""" raise NotImplementedError() - def pre_live_migration_on_destination(self, src_mig_data, dest_mig_data): + def pre_live_migration_on_destination(self, mig_data): """Perform pre live migration steps for the volume on the target host. This method performs any pre live migration that is needed. @@ -121,11 +121,10 @@ class PowerVMVolumeAdapter(object): method. The data from the pre_live call will be passed in via the mig_data. This method should put its output into the dest_mig_data. - :param src_mig_data: The migration data from the source server. - :param dest_mig_data: The migration data for the destination server. - If the volume connector needs to provide - information to the live_migration command, it - should be added to this dictionary. + :param mig_data: Dict of migration data for the destination server. + If the volume connector needs to provide + information to the live_migration command, it + should be added to this dictionary. """ raise NotImplementedError() @@ -156,7 +155,7 @@ class PowerVMVolumeAdapter(object): This method can be used to handle any steps that need to taken on the source host after the VM is on the destination. - :param migrate_data: migration data + :param migrate_data: volume migration data """ pass diff --git a/nova_powervm/virt/powervm/volume/npiv.py b/nova_powervm/virt/powervm/volume/npiv.py index 7bcaf7ba..7d6ded13 100644 --- a/nova_powervm/virt/powervm/volume/npiv.py +++ b/nova_powervm/virt/powervm/volume/npiv.py @@ -19,6 +19,7 @@ from oslo_log import log as logging from taskflow import task from nova.compute import task_states +from oslo_serialization import jsonutils from pypowervm import const as pvm_const from pypowervm.tasks import vfc_mapper as pvm_vfcm @@ -116,10 +117,11 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter): client_slots.append(vfc_map.client_adapter.lpar_slot_num) # Set the client slots into the fabric data to pass to the - # destination. - mig_data['npiv_fabric_slots_%s' % fabric] = client_slots + # destination. Only strings can be stored. + mig_data['src_npiv_fabric_slots_%s' % fabric] = ( + jsonutils.dumps(client_slots)) - def pre_live_migration_on_destination(self, src_mig_data, dest_mig_data): + def pre_live_migration_on_destination(self, mig_data): """Perform pre live migration steps for the volume on the target host. This method performs any pre live migration that is needed. @@ -132,11 +134,10 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter): method. The data from the pre_live call will be passed in via the mig_data. This method should put its output into the dest_mig_data. - :param src_mig_data: The migration data from the source server. - :param dest_mig_data: The migration data for the destination server. - If the volume connector needs to provide - information to the live_migration command, it - should be added to this dictionary. + :param mig_data: Dict of migration data for the destination server. + If the volume connector needs to provide + information to the live_migration command, it + should be added to this dictionary. """ vios_wraps = self.stg_ftsk.feed @@ -145,20 +146,22 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter): # the source system what 'vfc mappings' to pass in on the live # migration command. for fabric in self._fabric_names(): - slots = src_mig_data['npiv_fabric_slots_%s' % fabric] + slots = jsonutils.loads( + mig_data['src_npiv_fabric_slots_%s' % fabric]) fabric_mapping = pvm_vfcm.build_migration_mappings_for_fabric( vios_wraps, self._fabric_ports(fabric), slots) - dest_mig_data['npiv_fabric_mapping_%s' % fabric] = fabric_mapping + mig_data['dest_npiv_fabric_mapping_%s' % fabric] = ( + jsonutils.dumps(fabric_mapping)) # Reverse the vios wrapper so that the other fabric will get the # on the second vios. vios_wraps.reverse() # Collate all of the individual fabric mappings into a single element. full_map = [] - for key, value in dest_mig_data.items(): - if key.startswith('npiv_fabric_mapping_'): - full_map.extend(value) - dest_mig_data['vfc_lpm_mappings'] = full_map + for key, value in mig_data.items(): + if key.startswith('dest_npiv_fabric_mapping_'): + full_map.extend(jsonutils.loads(value)) + mig_data['vfc_lpm_mappings'] = jsonutils.dumps(full_map) def post_live_migration_at_destination(self, mig_vol_stor): """Perform post live migration steps for the volume on the target host. @@ -178,6 +181,8 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter): # manager. Given that, we need to update the order of the WWPNs. # The first WWPN is the one that is logged into the fabric and this # will now indicate that our WWPN is logged in. + LOG.debug('Post live migrate volume store: %s' % mig_vol_stor, + instance=self.instance) for fabric in self._fabric_names(): # We check the mig_vol_stor to see if this fabric has already been # flipped. If so, we can continue. @@ -192,7 +197,8 @@ class NPIVVolumeAdapter(v_driver.FibreChannelVolumeAdapter): # Flip the WPWNs c_wwpns = port_map[1].split() c_wwpns.reverse() - + LOG.debug('Flipping WWPNs, ports: %s wwpns: %s' % + (port_map, c_wwpns), instance=self.instance) # Get the new physical WWPN. vfc_map = pvm_vfcm.find_vios_for_vfc_wwpns(vios_wraps, c_wwpns)[1] diff --git a/nova_powervm/virt/powervm/volume/vscsi.py b/nova_powervm/virt/powervm/volume/vscsi.py index 9f6b8b92..b43e3cb4 100644 --- a/nova_powervm/virt/powervm/volume/vscsi.py +++ b/nova_powervm/virt/powervm/volume/vscsi.py @@ -79,7 +79,7 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter): # SCSI mapping is for the connections between VIOS and client VM return [pvm_const.XAG.VIO_SMAP] - def pre_live_migration_on_destination(self, src_mig_data, dest_mig_data): + def pre_live_migration_on_destination(self, mig_data): """Perform pre live migration steps for the volume on the target host. This method performs any pre live migration that is needed. @@ -92,11 +92,10 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter): method. The data from the pre_live call will be passed in via the mig_data. This method should put its output into the dest_mig_data. - :param src_mig_data: The migration data from the source server. - :param dest_mig_data: The migration data for the destination server. - If the volume connector needs to provide - information to the live_migration command, it - should be added to this dictionary. + :param mig_data: Dict of migration data for the destination server. + If the volume connector needs to provide + information to the live_migration command, it + should be added to this dictionary. """ volume_id = self.volume_id found = False @@ -119,7 +118,7 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter): instance_name=self.instance.name) raise p_exc.VolumePreMigrationFailed(**ex_args) - dest_mig_data['vscsi-' + volume_id] = udid + mig_data['vscsi-' + volume_id] = udid def _cleanup_volume(self, udid): """Cleanup the hdisk associated with this udid.""" @@ -155,14 +154,13 @@ class VscsiVolumeAdapter(v_driver.FibreChannelVolumeAdapter): This method can be used to handle any steps that need to taken on the source host after the VM is on the destination. - :param migrate_data: migration data + :param migrate_data: volume migration data """ # Get the udid of the volume to remove the hdisk for. We can't # use the connection information because LPM 'refreshes' it, which # wipes out our data, so we use the data from the destination host # to avoid having to discover the hdisk to get the udid. - udid = migrate_data['pre_live_migration_result'].get( - 'vscsi-' + self.volume_id) + udid = migrate_data.get('vscsi-' + self.volume_id) self._cleanup_volume(udid) def cleanup_volume_at_destination(self, migrate_data):