Merge "Support the nova evacuate spawn semantic"

This commit is contained in:
Jenkins 2016-01-30 16:20:04 +00:00 committed by Gerrit Code Review
commit aa077c2ea8
4 changed files with 233 additions and 4 deletions

View File

@ -37,6 +37,7 @@ def cna(mac):
class TestNetwork(test.TestCase):
def setUp(self):
super(TestNetwork, self).setUp()
self.flags(host='host1')
self.apt = self.useFixture(pvm_fx.AdapterFx()).adpt
self.mock_lpar_wrap = mock.MagicMock()
@ -181,6 +182,66 @@ class TestNetwork(test.TestCase):
# The create should have only been called once.
self.assertEqual(1, mock_vm_crt.call_count)
@mock.patch('nova_powervm.virt.powervm.vm.crt_vif')
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas')
def test_plug_vifs_diff_host(self, mock_vm_get, mock_vm_crt):
"""Tests that crt vif handles bad inst.host value."""
inst = powervm.TEST_INST1
# Set this up as a different host from the inst.host
self.flags(host='host2')
# Mock up the CNA response. Only doing one for simplicity
mock_vm_get.return_value = [cna('AABBCCDDEE11')]
# Mock up the network info.
net_info = [{'address': 'aa:bb:cc:dd:ee:ff'}]
# Run method
p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info,
'host_uuid')
with mock.patch.object(inst, 'save') as mock_inst_save:
p_vifs.execute(self.mock_lpar_wrap)
# The create should have only been called once.
self.assertEqual(1, mock_vm_crt.call_count)
# Should have called save to save the new host and then changed it back
self.assertEqual(2, mock_inst_save.call_count)
self.assertEqual('host1', inst.host)
@mock.patch('nova_powervm.virt.powervm.vm.crt_vif')
@mock.patch('nova_powervm.virt.powervm.vm.get_cnas')
def test_plug_vifs_diff_host_except(self, mock_vm_get, mock_vm_crt):
"""Tests that crt vif handles bad inst.host value.
This test ensures that if we get a timeout exception we still reset
the inst.host value back to the original value
"""
inst = powervm.TEST_INST1
# Set this up as a different host from the inst.host
self.flags(host='host2')
# Mock up the CNA response. Only doing one for simplicity
mock_vm_get.return_value = [cna('AABBCCDDEE11')]
# Mock up the network info.
net_info = [{'address': 'aa:bb:cc:dd:ee:ff'}]
# Ensure that an exception is raised by a timeout.
mock_vm_crt.side_effect = eventlet.timeout.Timeout()
# Run method
p_vifs = tf_net.PlugVifs(mock.MagicMock(), self.apt, inst, net_info,
'host_uuid')
with mock.patch.object(inst, 'save') as mock_inst_save:
self.assertRaises(exception.VirtualInterfaceCreateException,
p_vifs.execute, self.mock_lpar_wrap)
# The create should have only been called once.
self.assertEqual(1, mock_vm_crt.call_count)
# Should have called save to save the new host and then changed it back
self.assertEqual(2, mock_inst_save.call_count)
self.assertEqual('host1', inst.host)
@mock.patch('nova_powervm.virt.powervm.vm.crt_secure_rmc_vif')
@mock.patch('nova_powervm.virt.powervm.vm.get_secure_rmc_vswitch')
@mock.patch('nova_powervm.virt.powervm.vm.crt_vif')

View File

@ -28,6 +28,7 @@ from nova.objects import block_device as bdmobj
from nova import test
from nova.tests.unit import fake_instance
from nova.virt import block_device as nova_virt_bdm
from nova.virt import driver as virt_driver
from nova.virt import fake
import pypowervm.adapter as pvm_adp
import pypowervm.exceptions as pvm_exc
@ -170,6 +171,39 @@ class TestPowerVMDriver(test.TestCase):
self.assertTrue(self.drv.instance_exists(mock.Mock()))
self.assertFalse(self.drv.instance_exists(mock.Mock()))
def test_instance_on_disk(self):
"""Validates the instance_on_disk method."""
@mock.patch.object(self.drv, '_is_booted_from_volume')
@mock.patch.object(self.drv, '_get_block_device_info')
@mock.patch.object(self.disk_dvr, 'capabilities')
@mock.patch.object(self.disk_dvr, 'get_disk_ref')
def inst_on_disk(mock_disk_ref, mock_capb, mock_block, mock_boot):
# Test boot from volume.
mock_boot.return_value = True
self.assertTrue(self.drv.instance_on_disk(self.inst))
mock_boot.return_value = False
# Disk driver is shared storage and can find the disk
mock_capb['shared_storage'] = True
mock_disk_ref.return_value = 'disk_reference'
self.assertTrue(self.drv.instance_on_disk(self.inst))
# Disk driver can't find it
mock_disk_ref.return_value = None
self.assertFalse(self.drv.instance_on_disk(self.inst))
# Disk driver exception
mock_disk_ref.side_effect = ValueError('Bad disk')
self.assertFalse(self.drv.instance_on_disk(self.inst))
mock_disk_ref.side_effect = None
# Not on shared storage
mock_capb['shared_storage'] = False
self.assertFalse(self.drv.instance_on_disk(self.inst))
inst_on_disk()
@mock.patch('nova_powervm.virt.powervm.tasks.storage.'
'CreateAndConnectCfgDrive.execute')
@mock.patch('nova_powervm.virt.powervm.tasks.storage.ConnectVolume'
@ -405,6 +439,49 @@ class TestPowerVMDriver(test.TestCase):
self.scrub_stg.assert_called_with([9], self.stg_ftsk, lpars_exist=True)
@mock.patch('nova_powervm.virt.powervm.tasks.storage.'
'CreateAndConnectCfgDrive.execute')
@mock.patch('nova_powervm.virt.powervm.tasks.storage.ConnectVolume'
'.execute')
@mock.patch('nova_powervm.virt.powervm.tasks.storage.FindDisk'
'.execute')
@mock.patch('nova_powervm.virt.powervm.driver.PowerVMDriver.'
'_is_booted_from_volume')
@mock.patch('nova_powervm.virt.powervm.tasks.network.PlugMgmtVif.execute')
@mock.patch('nova_powervm.virt.powervm.tasks.network.PlugVifs.execute')
@mock.patch('nova.virt.configdrive.required_by')
@mock.patch('nova.objects.flavor.Flavor.get_by_id')
@mock.patch('pypowervm.tasks.power.power_on')
def test_spawn_recreate(
self, mock_pwron, mock_get_flv, mock_cfg_drv, mock_plug_vifs,
mock_plug_mgmt_vif, mock_boot_from_vol, mock_find_disk,
mock_conn_vol, mock_crt_cfg_drv):
"""Validates the 'recreate' spawn flow.
Uses a basic disk image, attaching networks and powering on.
"""
# Set up the mocks to the tasks.
mock_get_flv.return_value = self.inst.get_flavor()
mock_cfg_drv.return_value = False
mock_boot_from_vol.return_value = False
self.inst.task_state = task_states.REBUILD_SPAWNING
# Invoke the method.
self.drv.spawn('context', self.inst, powervm.EMPTY_IMAGE,
'injected_files', 'admin_password')
# Assert the correct tasks were called
self.assertTrue(mock_plug_vifs.called)
self.assertTrue(mock_plug_mgmt_vif.called)
self.assertTrue(mock_find_disk.called)
self.crt_lpar.assert_called_with(
self.apt, self.drv.host_wrapper, self.inst, self.inst.get_flavor())
self.assertTrue(mock_pwron.called)
self.assertFalse(mock_pwron.call_args[1]['synchronous'])
# Assert that tasks that are not supposed to be called are not called
self.assertFalse(mock_conn_vol.called)
self.assertFalse(mock_crt_cfg_drv.called)
self.scrub_stg.assert_called_with([9], self.stg_ftsk, lpars_exist=True)
@mock.patch('nova.virt.block_device.DriverVolumeBlockDevice.save')
@mock.patch('nova_powervm.virt.powervm.tasks.network.PlugMgmtVif.execute')
@mock.patch('nova_powervm.virt.powervm.tasks.network.PlugVifs.execute')
@ -1486,3 +1563,10 @@ class TestPowerVMDriver(test.TestCase):
drivers = self.drv._build_vol_drivers('context', 'instance')
self.assertEqual(['drv0', 'drv1'], drivers)
@mock.patch.object(objects.BlockDeviceMappingList, 'get_by_instance_uuid')
@mock.patch.object(virt_driver, 'get_block_device_info')
def test_get_block_device_info(self, mock_bk_dev, mock_bdml):
mock_bk_dev.return_value = 'info'
self.assertEqual('info',
self.drv._get_block_device_info('ctx', self.inst))

View File

@ -85,6 +85,12 @@ class PowerVMDriver(driver.ComputeDriver):
"""PowerVM Implementation of Compute Driver."""
capabilities = {
"has_imagecache": False,
"supports_recreate": True,
"supports_migrate_to_same_host": False
}
def __init__(self, virtapi):
super(PowerVMDriver, self).__init__(virtapi)
@ -220,6 +226,44 @@ class PowerVMDriver(driver.ComputeDriver):
"""Return the current CPU state of the host."""
return self.host_cpu_stats.get_host_cpu_stats()
def instance_on_disk(self, instance):
"""Checks access of instance files on the host.
:param instance: nova.objects.instance.Instance to lookup
Returns True if files of an instance with the supplied ID accessible on
the host, False otherwise.
.. note::
Used in rebuild for HA implementation and required for validation
of access to instance shared disk files
"""
# If the instance is booted from volume then we shouldn't
# really care if instance "disks" are on shared storage.
context = ctx.get_admin_context()
block_device_info = self._get_block_device_info(context, instance)
if self._is_booted_from_volume(block_device_info):
LOG.debug('Instance booted from volume.', instance=instance)
return True
# If configured for shared storage, see if we can find the disks
if self.disk_dvr.capabilities['shared_storage']:
LOG.debug('Looking for instance disks on shared storage.',
instance=instance)
# Try to get a reference to the disk
try:
if self.disk_dvr.get_disk_ref(instance,
disk_dvr.DiskType.BOOT):
LOG.debug('Disks found on shared storage.',
instance=instance)
return True
except Exception as e:
LOG.exception(e)
LOG.debug('Instance disks not found on this host.', instance=instance)
return False
def spawn(self, context, instance, image_meta, injected_files,
admin_password, network_info=None, block_device_info=None,
flavor=None):
@ -232,6 +276,11 @@ class PowerVMDriver(driver.ComputeDriver):
cleaned up, and the virtualization platform should be in the state
that it was before this call began.
Spawn can be called while deploying an instance for the first time or
it can be called to recreate an instance that was shelved or during
evacuation. We have to be careful to handle all these cases. During
evacuation, when on shared storage, the image_meta will be empty.
:param context: security context
:param instance: Instance object as returned by DB layer.
This function should use the data there to guide
@ -276,11 +325,17 @@ class PowerVMDriver(driver.ComputeDriver):
# Only add the image disk if this is from Glance.
if not self._is_booted_from_volume(block_device_info):
# Creates the boot image.
flow_spawn.add(tf_stg.CreateDiskForImg(
self.disk_dvr, context, instance, image_meta,
disk_size=flavor.root_gb))
# If a rebuild, just hookup the existing disk on shared storage.
if (instance.task_state == task_states.REBUILD_SPAWNING and
'id' not in image_meta):
flow_spawn.add(tf_stg.FindDisk(
self.disk_dvr, context, instance, disk_dvr.DiskType.BOOT))
else:
# Creates the boot image.
flow_spawn.add(tf_stg.CreateDiskForImg(
self.disk_dvr, context, instance, image_meta,
disk_size=flavor.root_gb))
# Connects up the disk to the LPAR
flow_spawn.add(tf_stg.ConnectDisk(self.disk_dvr, context, instance,
stg_ftsk=stg_ftsk))
@ -356,6 +411,13 @@ class PowerVMDriver(driver.ComputeDriver):
stg_ftsk=stg_ftsk):
flow.add(tf_stg.DisconnectVolume(vol_drv))
def _get_block_device_info(self, context, instance):
"""Retrieves the instance's block_device_info."""
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
context, instance.uuid)
return driver.get_block_device_info(instance, bdms)
def _is_booted_from_volume(self, block_device_info):
"""Determine whether the root device is listed in block_device_info.

View File

@ -166,6 +166,22 @@ class PlugVifs(task.Task):
instance=self.instance)
raise exception.VirtualInterfaceCreateException()
# TODO(KYLEH): We're setting up to wait for an instance event. The
# event needs to come back to our compute manager so we need to ensure
# the instance.host is set to our host. We shouldn't need to do this
# but in the evacuate/recreate case it may reflect the old host.
# See: https://bugs.launchpad.net/nova/+bug/1535918
undo_host_change = False
if self.instance.host != CONF.host:
LOG.warning(_LW('Instance was not assigned to this host. '
'It was assigned to: %s'), self.instance.host,
instance=self.instance)
# Update the instance...
old_host = self.instance.host
self.instance.host = CONF.host
self.instance.save()
undo_host_change = True
# For the VIFs, run the creates (and wait for the events back)
try:
with self.virt_api.wait_for_instance_event(
@ -185,6 +201,12 @@ class PlugVifs(task.Task):
'%(sys)s'), {'sys': self.instance.name},
instance=self.instance)
raise exception.VirtualInterfaceCreateException()
finally:
if undo_host_change:
LOG.info(_LI('Undoing temporary host assignment to instance.'),
instance=self.instance)
self.instance.host = old_host
self.instance.save()
# Return the list of created VIFs.
return cna_w_list