hyper-v: wait for neutron vif plug events

During spawn, the Hyper-V driver should wait for the Neutron
VIF plug events before starting the instance, in order to ensure
that the VIFs are already bound and the guest will be able to
send DHCP requests.

The configuration option "vif_plugging_timeout" will determine
the maximum amount of time to wait for the Neutron events.

Setting the configuration option "vif_plugging_is_fatal" to True
will cause an exception to be raised during spawn, if the Hyper-V
driver did not receive needed Neutron port bound events.

Closes-Bug: #1631872

Change-Id: Ie8b4cc64f580aca1115d88258728ab90241d112b
This commit is contained in:
Claudiu Belu 2016-11-17 13:10:06 +02:00
parent f8e35e5bf9
commit 00fdf28408
3 changed files with 92 additions and 5 deletions

View File

@ -62,7 +62,7 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
super(VMOpsTestCase, self).setUp()
self.context = 'fake-context'
self._vmops = vmops.VMOps()
self._vmops = vmops.VMOps(virtapi=mock.MagicMock())
self._vmops._vmutils = mock.MagicMock()
self._vmops._metricsutils = mock.MagicMock()
self._vmops._vhdutils = mock.MagicMock()
@ -476,6 +476,53 @@ class VMOpsTestCase(test_base.HyperVBaseTestCase):
[mock.sentinel.FILE], mock.sentinel.PASSWORD,
mock.sentinel.INFO, mock.sentinel.DEV_INFO)
@mock.patch.object(vmops.VMOps, '_get_neutron_events')
def test_wait_vif_plug_events(self, mock_get_events):
self._vmops._virtapi.wait_for_instance_event.side_effect = (
etimeout.Timeout)
self.flags(vif_plugging_timeout=1)
self.flags(vif_plugging_is_fatal=True)
def _context_user():
with self._vmops.wait_vif_plug_events(mock.sentinel.instance,
mock.sentinel.network_info):
pass
self.assertRaises(exception.VirtualInterfaceCreateException,
_context_user)
mock_get_events.assert_called_once_with(mock.sentinel.network_info)
self._vmops._virtapi.wait_for_instance_event.assert_called_once_with(
mock.sentinel.instance, mock_get_events.return_value,
deadline=CONF.vif_plugging_timeout,
error_callback=self._vmops._neutron_failed_callback)
def test_neutron_failed_callback(self):
self.flags(vif_plugging_is_fatal=True)
self.assertRaises(exception.VirtualInterfaceCreateException,
self._vmops._neutron_failed_callback,
mock.sentinel.event_name, mock.sentinel.instance)
@mock.patch.object(vmops.utils, 'is_neutron')
def test_get_neutron_events(self, mock_is_neutron):
network_info = [{'id': mock.sentinel.vif_id1, 'active': True},
{'id': mock.sentinel.vif_id2, 'active': False},
{'id': mock.sentinel.vif_id3}]
events = self._vmops._get_neutron_events(network_info)
self.assertEqual([('network-vif-plugged', mock.sentinel.vif_id2)],
events)
mock_is_neutron.assert_called_once_with()
@mock.patch.object(vmops.utils, 'is_neutron')
def test_get_neutron_events_no_timeout(self, mock_is_neutron):
self.flags(vif_plugging_timeout=0)
network_info = [{'id': mock.sentinel.vif_id1, 'active': True}]
events = self._vmops._get_neutron_events(network_info)
self.assertEqual([], events)
mock_is_neutron.assert_called_once_with()
@mock.patch.object(vmops.VMOps, '_requires_secure_boot')
@mock.patch.object(vmops.VMOps, '_requires_certificate')
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps'

View File

@ -110,7 +110,7 @@ class HyperVDriver(driver.ComputeDriver):
self._hostops = hostops.HostOps()
self._volumeops = volumeops.VolumeOps()
self._vmops = vmops.VMOps()
self._vmops = vmops.VMOps(virtapi)
self._snapshotops = snapshotops.SnapshotOps()
self._livemigrationops = livemigrationops.LiveMigrationOps()
self._migrationops = migrationops.MigrationOps()

View File

@ -17,6 +17,7 @@
"""
Management class for basic VM operations.
"""
import contextlib
import functools
import os
import time
@ -98,7 +99,8 @@ class VMOps(object):
_MAX_CONSOLE_LOG_FILE_SIZE = units.Mi / 2
_ROOT_DISK_CTRL_ADDR = 0
def __init__(self):
def __init__(self, virtapi=None):
self._virtapi = virtapi
self._vmutils = utilsfactory.get_vmutils()
self._metricsutils = utilsfactory.get_metricsutils()
self._vhdutils = utilsfactory.get_vhdutils()
@ -291,8 +293,11 @@ class VMOps(object):
self._create_ephemerals(instance, block_device_info['ephemerals'])
try:
self.create_instance(instance, network_info, root_device,
block_device_info, vm_gen, image_meta)
with self.wait_vif_plug_events(instance, network_info):
# waiting will occur after the instance is created.
self.create_instance(instance, network_info, root_device,
block_device_info, vm_gen, image_meta)
self._save_device_metadata(context, instance, block_device_info)
if configdrive.required_by(instance):
@ -309,6 +314,41 @@ class VMOps(object):
with excutils.save_and_reraise_exception():
self.destroy(instance)
@contextlib.contextmanager
def wait_vif_plug_events(self, instance, network_info):
timeout = CONF.vif_plugging_timeout
events = self._get_neutron_events(network_info)
try:
with self._virtapi.wait_for_instance_event(
instance, events, deadline=timeout,
error_callback=self._neutron_failed_callback):
yield
except etimeout.Timeout:
# We never heard from Neutron
LOG.warning(_LW('Timeout waiting for vif plugging callback for '
'instance.'), instance=instance)
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
def _neutron_failed_callback(self, event_name, instance):
LOG.error(_LE('Neutron Reported failure on event %s'),
event_name, instance=instance)
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
def _get_neutron_events(self, network_info):
# NOTE(danms): We need to collect any VIFs that are currently
# down that we expect a down->up event for. Anything that is
# already up will not undergo that transition, and for
# anything that might be stale (cache-wise) assume it's
# already up so we don't block on it.
if utils.is_neutron() and CONF.vif_plugging_timeout:
return [('network-vif-plugged', vif['id'])
for vif in network_info if vif.get('active') is False]
else:
return []
def create_instance(self, instance, network_info, root_device,
block_device_info, vm_gen, image_meta):
instance_name = instance.name