diff --git a/nova_powervm/tests/virt/powervm/__init__.py b/nova_powervm/tests/virt/powervm/__init__.py index 359a33ee..d6b3267d 100644 --- a/nova_powervm/tests/virt/powervm/__init__.py +++ b/nova_powervm/tests/virt/powervm/__init__.py @@ -15,6 +15,7 @@ from nova.compute import power_state from nova.compute import task_states +from nova.compute import vm_states from nova.objects import flavor from nova.objects import image_meta from nova.objects import instance @@ -44,6 +45,7 @@ TEST_INSTANCE = { 'host': 'host1', 'flavor': TEST_FLAVOR, 'task_state': None, + 'vm_state': vm_states.ACTIVE, 'power_state': power_state.SHUTDOWN, } diff --git a/nova_powervm/tests/virt/powervm/tasks/test_vm.py b/nova_powervm/tests/virt/powervm/tasks/test_vm.py index 0f7eda9d..748d801a 100644 --- a/nova_powervm/tests/virt/powervm/tasks/test_vm.py +++ b/nova_powervm/tests/virt/powervm/tasks/test_vm.py @@ -27,6 +27,32 @@ class TestVMTasks(test.TestCase): self.apt = mock.Mock() self.instance = mock.Mock() + @mock.patch('pypowervm.tasks.storage.add_lpar_storage_scrub_tasks') + @mock.patch('nova_powervm.virt.powervm.vm.crt_lpar') + def test_create(self, mock_vm_crt, mock_stg): + nvram_mgr = mock.Mock() + nvram_mgr.fetch.return_value = 'data' + lpar_entry = mock.Mock() + + crt = tf_vm.Create(self.apt, 'host_wrapper', self.instance, + 'flavor', 'stg_ftsk', nvram_mgr=nvram_mgr) + mock_vm_crt.return_value = lpar_entry + crt_entry = crt.execute() + + mock_vm_crt.assert_called_once_with(self.apt, 'host_wrapper', + self.instance, 'flavor', + nvram='data') + self.assertEqual(lpar_entry, crt_entry) + nvram_mgr.fetch.assert_called_once_with(self.instance) + + # No exception is raised if the nvram could not be fetched + mock_vm_crt.reset_mock() + nvram_mgr.fetch.side_effect = ValueError('Not Available') + crt.execute() + mock_vm_crt.assert_called_once_with(self.apt, 'host_wrapper', + self.instance, 'flavor', + nvram=None) + @mock.patch('nova_powervm.virt.powervm.vm.update') def test_resize(self, mock_vm_update): @@ -55,3 +81,22 @@ class TestVMTasks(test.TestCase): store_nvram.execute() nvram_mgr.store.assert_called_once_with(self.instance, immediate=True) + + # No exception is raised if the NVRAM could not be stored. + nvram_mgr.reset_mock() + nvram_mgr.store.side_effect = ValueError('Not Available') + store_nvram.execute() + nvram_mgr.store.assert_called_once_with(self.instance, + immediate=True) + + def test_delete_nvram(self): + nvram_mgr = mock.Mock() + delete_nvram = tf_vm.DeleteNvram(nvram_mgr, self.instance) + delete_nvram.execute() + nvram_mgr.remove.assert_called_once_with(self.instance) + + # No exception is raised if the NVRAM could not be stored. + nvram_mgr.reset_mock() + nvram_mgr.remove.side_effect = ValueError('Not Available') + delete_nvram.execute() + nvram_mgr.remove.assert_called_once_with(self.instance) diff --git a/nova_powervm/tests/virt/powervm/test_driver.py b/nova_powervm/tests/virt/powervm/test_driver.py index 26d5acc4..94f59ffa 100644 --- a/nova_powervm/tests/virt/powervm/test_driver.py +++ b/nova_powervm/tests/virt/powervm/test_driver.py @@ -253,7 +253,8 @@ class TestPowerVMDriver(test.TestCase): self.assertTrue(mock_plug_mgmt_vif.called) self.assertTrue(mock_crt_disk_img.called) self.crt_lpar.assert_called_with( - self.apt, self.drv.host_wrapper, self.inst, self.inst.get_flavor()) + self.apt, self.drv.host_wrapper, self.inst, self.inst.get_flavor(), + nvram=None) self.assertTrue(mock_pwron.called) self.assertFalse(mock_pwron.call_args[1]['synchronous']) # Assert that tasks that are not supposed to be called are not called @@ -285,7 +286,8 @@ class TestPowerVMDriver(test.TestCase): # Create LPAR was called self.crt_lpar.assert_called_with(self.apt, self.drv.host_wrapper, - self.inst, self.inst.get_flavor()) + self.inst, self.inst.get_flavor(), + nvram=None) # Config drive was called self.assertTrue(mock_val_vopt.called) self.assertTrue(mock_cfg_vopt.called) @@ -336,7 +338,8 @@ class TestPowerVMDriver(test.TestCase): # Create LPAR was called self.crt_lpar.assert_called_with(self.apt, self.drv.host_wrapper, - self.inst, self.inst.get_flavor()) + self.inst, self.inst.get_flavor(), + nvram=None) # Power on was called self.assertTrue(mock_pwron.called) self.assertFalse(mock_pwron.call_args[1]['synchronous']) @@ -397,7 +400,8 @@ class TestPowerVMDriver(test.TestCase): # Create LPAR was called self.crt_lpar.assert_called_with(self.apt, self.drv.host_wrapper, - self.inst, self.inst.get_flavor()) + self.inst, self.inst.get_flavor(), + nvram=None) # Power on was called self.assertTrue(mock_pwron.called) self.assertFalse(mock_pwron.call_args[1]['synchronous']) @@ -445,7 +449,8 @@ class TestPowerVMDriver(test.TestCase): # Create LPAR was called self.crt_lpar.assert_called_with(self.apt, self.drv.host_wrapper, - self.inst, self.inst.get_flavor()) + self.inst, self.inst.get_flavor(), + nvram=None) # Power on was called self.assertTrue(mock_pwron.called) self.assertFalse(mock_pwron.call_args[1]['synchronous']) @@ -481,6 +486,8 @@ class TestPowerVMDriver(test.TestCase): Uses a basic disk image, attaching networks and powering on. """ # Set up the mocks to the tasks. + self.drv.nvram_mgr = mock.Mock() + self.drv.nvram_mgr.fetch.return_value = 'nvram data' mock_get_flv.return_value = self.inst.get_flavor() mock_cfg_drv.return_value = False mock_boot_from_vol.return_value = False @@ -494,7 +501,8 @@ class TestPowerVMDriver(test.TestCase): self.assertTrue(mock_plug_mgmt_vif.called) self.assertTrue(mock_find_disk.called) self.crt_lpar.assert_called_with( - self.apt, self.drv.host_wrapper, self.inst, self.inst.get_flavor()) + self.apt, self.drv.host_wrapper, self.inst, self.inst.get_flavor(), + nvram='nvram data') self.assertTrue(mock_pwron.called) self.assertFalse(mock_pwron.call_args[1]['synchronous']) # Assert that tasks that are not supposed to be called are not called @@ -530,7 +538,8 @@ class TestPowerVMDriver(test.TestCase): # Create LPAR was called self.crt_lpar.assert_called_with(self.apt, self.drv.host_wrapper, - self.inst, self.inst.get_flavor()) + self.inst, self.inst.get_flavor(), + nvram=None) self.assertEqual(2, self.vol_drv.connect_volume.call_count) # Power on was called @@ -580,7 +589,8 @@ class TestPowerVMDriver(test.TestCase): # Create LPAR was called self.crt_lpar.assert_called_with(self.apt, self.drv.host_wrapper, self.inst_ibmi, - self.inst_ibmi.get_flavor()) + self.inst_ibmi.get_flavor(), + nvram=None) self.assertTrue(mock_boot_conn_type.called) self.assertTrue(mock_update_lod_src.called) @@ -637,7 +647,7 @@ class TestPowerVMDriver(test.TestCase): self.assertTrue(mock_crt_disk_img.called) self.crt_lpar.assert_called_with( self.apt, self.drv.host_wrapper, self.inst_ibmi, - self.inst_ibmi.get_flavor()) + self.inst_ibmi.get_flavor(), nvram=None) self.assertTrue(mock_update_lod_src.called) self.assertTrue(mock_pwron.called) self.assertFalse(mock_pwron.call_args[1]['synchronous']) @@ -672,7 +682,8 @@ class TestPowerVMDriver(test.TestCase): # Create LPAR was called self.crt_lpar.assert_called_with(self.apt, self.drv.host_wrapper, - self.inst, self.inst.get_flavor()) + self.inst, self.inst.get_flavor(), + nvram=None) # Since the create disks method failed, the delete disks should not # have been called @@ -708,7 +719,8 @@ class TestPowerVMDriver(test.TestCase): # Create LPAR was called self.crt_lpar.assert_called_with(self.apt, self.drv.host_wrapper, - self.inst, self.inst.get_flavor()) + self.inst, self.inst.get_flavor(), + nvram=None) self.assertEqual(1, self.vol_drv.connect_volume.call_count) # Power on should not be called. Shouldn't get that far in flow. @@ -793,6 +805,8 @@ class TestPowerVMDriver(test.TestCase): self, mock_get_flv, mock_pvmuuid, mock_val_vopt, mock_dlt_vopt, mock_pwroff, mock_dlt, mock_boot_from_vol, mock_unplug_vifs): """Validates the basic PowerVM destroy.""" + # NVRAM Manager + self.drv.nvram_mgr = mock.Mock() # BDMs mock_bdms = self._fake_bdms() mock_boot_from_vol.return_value = False @@ -823,6 +837,9 @@ class TestPowerVMDriver(test.TestCase): self.assertTrue(self.drv.disk_dvr.delete_disks.called) self.assertTrue(self.drv.disk_dvr.disconnect_image_disk.called) + # NVRAM was deleted + self.drv.nvram_mgr.remove.assert_called_once_with(self.inst) + def reset_mocks(): # Reset the mocks for mk in [mock_pwroff, mock_dlt, mock_dlt_vopt, diff --git a/nova_powervm/tests/virt/powervm/test_vm.py b/nova_powervm/tests/virt/powervm/test_vm.py index 34380d12..13722172 100644 --- a/nova_powervm/tests/virt/powervm/test_vm.py +++ b/nova_powervm/tests/virt/powervm/test_vm.py @@ -344,8 +344,11 @@ class TestVM(test.TestCase): lparw = pvm_lpar.LPAR.wrap(self.resp.feed.entries[0]) mock_bld.return_value = lparw self.apt.create.return_value = lparw.entry - vm.crt_lpar(self.apt, host_wrapper, instance, flavor) - self.assertTrue(self.apt.create.called) + vm.crt_lpar(self.apt, host_wrapper, instance, flavor, nvram='data') + self.apt.create.assert_called_once_with( + lparw, 'ManagedSystem', child_type='LogicalPartition', + root_id=host_wrapper.uuid, service='uom', timeout=-1) + self.assertEqual(lparw.nvram, 'data') self.assertTrue(mock_vld_all.called) # Test to verify the LPAR Creation with invalid name specification diff --git a/nova_powervm/virt/powervm/driver.py b/nova_powervm/virt/powervm/driver.py index bffd9b61..4e1e3908 100644 --- a/nova_powervm/virt/powervm/driver.py +++ b/nova_powervm/virt/powervm/driver.py @@ -17,6 +17,7 @@ from nova import block_device from nova.compute import task_states from nova.compute import utils as compute_utils +from nova.compute import vm_states from nova.console import type as console_type from nova import context as ctx from nova import exception @@ -88,6 +89,9 @@ NVRAM_APIS = { 'swift': 'swift.SwiftNvramStore', } +KEEP_NVRAM_STATES = {vm_states.SHELVED, } +FETCH_NVRAM_STATES = {vm_states.SHELVED, vm_states.SHELVED_OFFLOADED} + class PowerVMDriver(driver.ComputeDriver): @@ -368,9 +372,15 @@ class PowerVMDriver(driver.ComputeDriver): stg_ftsk = vios.build_tx_feed_task(self.adapter, self.host_uuid, xag=xag) - # Create the LPAR + recreate = (instance.task_state == task_states.REBUILD_SPAWNING and + 'id' not in image_meta) + + # Create the LPAR, check if NVRAM restore is needed. + nvram_mgr = (self.nvram_mgr if self.nvram_mgr and + (recreate or instance.vm_state in FETCH_NVRAM_STATES) + else None) flow_spawn.add(tf_vm.Create(self.adapter, self.host_wrapper, instance, - flavor, stg_ftsk)) + flavor, stg_ftsk, nvram_mgr=nvram_mgr)) # Create a flow for the IO flow_spawn.add(tf_net.PlugVifs(self.virtapi, self.adapter, instance, @@ -381,9 +391,8 @@ class PowerVMDriver(driver.ComputeDriver): # Only add the image disk if this is from Glance. if not self._is_booted_from_volume(block_device_info): - # If a rebuild, just hookup the existing disk on shared storage. - if (instance.task_state == task_states.REBUILD_SPAWNING and - 'id' not in image_meta): + # If a recreate, just hookup the existing disk on shared storage. + if recreate: flow_spawn.add(tf_stg.FindDisk( self.disk_dvr, context, instance, disk_dvr.DiskType.BOOT)) else: @@ -571,11 +580,18 @@ class PowerVMDriver(driver.ComputeDriver): if destroy_disk_task: flow.add(destroy_disk_task) - # Last step is to delete the LPAR from the system. + # Last step is to delete the LPAR from the system and delete + # the NVRAM from the store. # Note: If moving to a Graph Flow, will need to change to depend on # the prior step. flow.add(tf_vm.Delete(self.adapter, pvm_inst_uuid, instance)) + if destroy_disks and instance.vm_state not in KEEP_NVRAM_STATES: + # If the disks are being destroyed and not one of the + # operations that we should keep the NVRAM around for, then + # it's probably safe to delete the NVRAM from the store. + flow.add(tf_vm.DeleteNvram(self.nvram_mgr, instance)) + # Build the engine & run! tf_eng.run(flow) @@ -1209,7 +1225,8 @@ class PowerVMDriver(driver.ComputeDriver): # Create the LPAR flow.add(tf_vm.Create(self.adapter, self.host_wrapper, instance, - flav_obj, stg_ftsk)) + flav_obj, stg_ftsk, + nvram_mgr=self.nvram_mgr)) # Create a flow for the network IO flow.add(tf_net.PlugVifs(self.virtapi, self.adapter, instance, diff --git a/nova_powervm/virt/powervm/nvram/manager.py b/nova_powervm/virt/powervm/nvram/manager.py index e315cc53..bacca1de 100644 --- a/nova_powervm/virt/powervm/nvram/manager.py +++ b/nova_powervm/virt/powervm/nvram/manager.py @@ -86,7 +86,7 @@ class NvramManager(object): update. """ if immediate: - self._update_instance(instance=instance) + self._update_nvram(instance=instance) else: # Add it to the list to update self._add_to_list(instance) @@ -148,7 +148,7 @@ class NvramManager(object): self._update_list.clear() @lockutils.synchronized(LOCK_NVRAM_STORE) - def _update_instance(self, instance=None): + def _update_nvram(self, instance=None): """Perform an update of NVRAM for instance. :param instance: The instance to update or if not specified pull the @@ -183,7 +183,7 @@ class NvramManager(object): # Get the data from the adapter. entry = vm.get_instance_wrapper(self._adapter, instance, self._host_uuid, - xag=pvm_const.XAG.NVRAM) + xag=[pvm_const.XAG.NVRAM]) data = entry.nvram LOG.debug('NVRAM for instance: %s', data, instance=instance) except pvm_exc.HttpError as e: @@ -204,5 +204,5 @@ class NvramManager(object): LOG.debug('NVRAM store manager update thread is ending.') return - self._update_instance() + self._update_nvram() time.sleep(0) diff --git a/nova_powervm/virt/powervm/tasks/vm.py b/nova_powervm/virt/powervm/tasks/vm.py index 8fdbb977..49018e1f 100644 --- a/nova_powervm/virt/powervm/tasks/vm.py +++ b/nova_powervm/virt/powervm/tasks/vm.py @@ -14,13 +14,14 @@ # License for the specific language governing permissions and limitations # under the License. +from oslo_log import log as logging from pypowervm.tasks import power from pypowervm.tasks import storage as pvm_stg - -from oslo_log import log as logging +import six from taskflow import task from taskflow.types import failure as task_fail +from nova_powervm.virt.powervm.i18n import _LE from nova_powervm.virt.powervm.i18n import _LI from nova_powervm.virt.powervm import vm @@ -54,7 +55,8 @@ class Create(task.Task): """The task for creating a VM.""" - def __init__(self, adapter, host_wrapper, instance, flavor, stg_ftsk): + def __init__(self, adapter, host_wrapper, instance, flavor, stg_ftsk, + nvram_mgr=None): """Creates the Task for creating a VM. The revert method is not implemented because the compute manager @@ -73,6 +75,8 @@ class Create(task.Task): :param instance: The nova instance. :param flavor: The nova flavor. :param stg_ftsk: A FeedTask managing storage I/O operations. + :param nvram_mgr: The NVRAM manager to fetch the NVRAM from. If None, + the NVRAM will not be fetched. """ super(Create, self).__init__(name='crt_lpar', provides='lpar_wrap') @@ -81,11 +85,28 @@ class Create(task.Task): self.instance = instance self.flavor = flavor self.stg_ftsk = stg_ftsk + self.nvram_mgr = nvram_mgr def execute(self): LOG.info(_LI('Creating instance: %s'), self.instance.name) + + data = None + if self.nvram_mgr is not None: + LOG.info(_LI('Fetching NVRAM for instance %s.'), + self.instance.name, instance=self.instance) + try: + data = self.nvram_mgr.fetch(self.instance) + LOG.debug('NVRAM data is: %s', data, instance=self.instance) + except Exception as e: + # Fetching NVRAM exception should not cause a failure + LOG.exception(_LE('Unable to fetch NVRAM for instance ' + '%(name)s. Exception: %(reason)s'), + {'name': self.instance.name, + 'reason': six.text_type(e)}, + instance=self.instance) + wrap = vm.crt_lpar(self.adapter, self.host_wrapper, self.instance, - self.flavor) + self.flavor, nvram=data) pvm_stg.add_lpar_storage_scrub_tasks([wrap.id], self.stg_ftsk, lpars_exist=True) return wrap @@ -241,8 +262,47 @@ class StoreNvram(task.Task): self.immediate = immediate def execute(self): - if self.nvram_mgr is not None: + if self.nvram_mgr is None: + return + + try: self.nvram_mgr.store(self.instance, immediate=self.immediate) + except Exception as e: + LOG.exception(_LE('Unable to store NVRAM for instance ' + '%(name)s. Exception: %(reason)s'), + {'name': self.instance.name, + 'reason': six.text_type(e)}, + instance=self.instance) + + +class DeleteNvram(task.Task): + + """Delete the NVRAM for an instance from the store.""" + + def __init__(self, nvram_mgr, instance): + """Creates a task to delete the NVRAM of an instance. + + :param nvram_mgr: The NVRAM manager. + :param instance: The nova instance. + """ + super(DeleteNvram, self).__init__(name='delete_nvram') + self.nvram_mgr = nvram_mgr + self.instance = instance + + def execute(self): + if self.nvram_mgr is None: + return + + LOG.info(_LI('Deleting NVRAM for instance: %s'), + self.instance.name, instance=self.instance) + try: + self.nvram_mgr.remove(self.instance) + except Exception as e: + LOG.exception(_LE('Unable to delete NVRAM for instance ' + '%(name)s. Exception: %(reason)s'), + {'name': self.instance.name, + 'reason': six.text_type(e)}, + instance=self.instance) class Delete(task.Task): diff --git a/nova_powervm/virt/powervm/vm.py b/nova_powervm/virt/powervm/vm.py index 6df63e90..bc43a5e5 100644 --- a/nova_powervm/virt/powervm/vm.py +++ b/nova_powervm/virt/powervm/vm.py @@ -519,13 +519,14 @@ def get_vm_qp(adapter, lpar_uuid, qprop=None, log_errors=True): return jsonutils.loads(resp.body) -def crt_lpar(adapter, host_wrapper, instance, flavor): +def crt_lpar(adapter, host_wrapper, instance, flavor, nvram=None): """Create an LPAR based on the host based on the instance :param adapter: The adapter for the pypowervm API :param host_wrapper: The host wrapper :param instance: The nova instance. :param flavor: The nova flavor. + :param nvram: The NVRAM to set on the LPAR. :return: The LPAR response from the API. """ try: @@ -533,6 +534,8 @@ def crt_lpar(adapter, host_wrapper, instance, flavor): flavor) pending_lpar_w = lpar_b.build() vldn.LPARWrapperValidator(pending_lpar_w, host_wrapper).validate_all() + if nvram is not None: + pending_lpar_w.nvram = nvram lpar_w = pending_lpar_w.create(parent_type=pvm_ms.System, parent_uuid=host_wrapper.uuid) return lpar_w