HyperVClusterDriver: reclaim failovered instances on start
The driver will now detect and reclaim any failovered instances that appeared on the host while the nova-compute service was down. Closes-Bug: #1646809 Change-Id: Id7b053f9debcb869efb4440cfaf6a3b66b067b8c
This commit is contained in:
parent
eeb97176c4
commit
a32c6b404a
@ -99,6 +99,23 @@ class ClusterOps(object):
|
||||
self._daemon.start(
|
||||
interval=CONF.hyperv.cluster_event_check_interval)
|
||||
|
||||
def reclaim_failovered_instances(self):
|
||||
# NOTE(claudiub): some instances might have failovered while the
|
||||
# nova-compute service was down. Those instances will have to be
|
||||
# reclaimed by this node.
|
||||
expected_attrs = ['id', 'uuid', 'name', 'host']
|
||||
host_instance_uuids = self._vmops.list_instance_uuids()
|
||||
nova_instances = self._get_nova_instances(expected_attrs,
|
||||
host_instance_uuids)
|
||||
|
||||
# filter out instances that are known to be on this host.
|
||||
nova_instances = [instance for instance in nova_instances if
|
||||
self._this_node.upper() != instance.host.upper()]
|
||||
|
||||
for instance in nova_instances:
|
||||
self._failover_migrate(instance.name, instance.host,
|
||||
self._this_node)
|
||||
|
||||
def _failover_migrate(self, instance_name, old_host, new_host):
|
||||
"""This method will check if the generated event is a legitimate
|
||||
failover to this node. If it is, it will proceed to prepare the
|
||||
@ -195,13 +212,20 @@ class ClusterOps(object):
|
||||
return objects.Instance.get_by_uuid(self._context, vm_uuid)
|
||||
|
||||
def _update_instance_map(self):
|
||||
expected_attrs = ['id', 'uuid', 'name']
|
||||
|
||||
for server in objects.InstanceList.get_by_filters(
|
||||
self._context, {'deleted': False},
|
||||
expected_attrs=expected_attrs):
|
||||
for server in self._get_nova_instances():
|
||||
self._instance_map[server.name] = server.uuid
|
||||
|
||||
def _get_nova_instances(self, expected_attrs=None, instance_uuids=None):
|
||||
if not expected_attrs:
|
||||
expected_attrs = ['id', 'uuid', 'name']
|
||||
|
||||
filters = {'deleted': False}
|
||||
if instance_uuids:
|
||||
filters['uuid'] = instance_uuids
|
||||
|
||||
return objects.InstanceList.get_by_filters(
|
||||
self._context, filters, expected_attrs=expected_attrs)
|
||||
|
||||
def _get_instance_block_device_mappings(self, instance):
|
||||
"""Transform block devices to the driver block_device format."""
|
||||
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
|
||||
|
@ -27,6 +27,7 @@ class HyperVClusterDriver(driver.HyperVDriver):
|
||||
self._livemigrationops = livemigrationops.ClusterLiveMigrationOps()
|
||||
|
||||
self._clops.start_failover_listener_daemon()
|
||||
self._clops.reclaim_failovered_instances()
|
||||
|
||||
def spawn(self, context, instance, image_meta, injected_files,
|
||||
admin_password, network_info=None, block_device_info=None):
|
||||
|
@ -109,6 +109,25 @@ class ClusterOpsTestCase(test_base.HyperVBaseTestCase):
|
||||
self.clusterops._clustutils.monitor_vm_failover.assert_called_with(
|
||||
self.clusterops._failover_migrate)
|
||||
|
||||
@mock.patch.object(clusterops.ClusterOps, '_failover_migrate')
|
||||
@mock.patch.object(clusterops.ClusterOps, '_get_nova_instances')
|
||||
def test_reclaim_failovered_instances(self, mock_get_instances,
|
||||
mock_failover_migrate):
|
||||
self.clusterops._this_node = 'fake_node'
|
||||
mock_instance1 = mock.MagicMock(host='other_host')
|
||||
mock_instance2 = mock.MagicMock(host=self.clusterops._this_node)
|
||||
mock_get_instances.return_value = [mock_instance1, mock_instance2]
|
||||
|
||||
self.clusterops.reclaim_failovered_instances()
|
||||
|
||||
self.clusterops._vmops.list_instance_uuids.assert_called_once_with()
|
||||
mock_get_instances.assert_called_once_with(
|
||||
['id', 'uuid', 'name', 'host'],
|
||||
self.clusterops._vmops.list_instance_uuids.return_value)
|
||||
mock_failover_migrate.assert_called_once_with(
|
||||
mock_instance1.name, mock_instance1.host,
|
||||
self.clusterops._this_node)
|
||||
|
||||
@mock.patch.object(clusterops, 'LOG')
|
||||
@mock.patch.object(clusterops.ClusterOps, '_get_instance_by_name')
|
||||
def test_failover_migrate_no_instance(self, mock_get_instance_by_name,
|
||||
@ -262,21 +281,29 @@ class ClusterOpsTestCase(test_base.HyperVBaseTestCase):
|
||||
self.assertEqual(ret, mock_instance)
|
||||
self.clusterops._update_instance_map.assert_called_with()
|
||||
|
||||
@mock.patch.object(clusterops.objects.InstanceList, 'get_by_filters')
|
||||
def test_update_instance_map(self, mock_get_by_filters):
|
||||
@mock.patch.object(clusterops.ClusterOps, '_get_nova_instances')
|
||||
def test_update_instance_map(self, mock_get_instances):
|
||||
mock_instance = mock.MagicMock(uuid=mock.sentinel.uuid)
|
||||
mock_instance.configure_mock(name=mock.sentinel.name)
|
||||
mock_get_by_filters.return_value = [mock_instance]
|
||||
mock_get_instances.return_value = [mock_instance]
|
||||
|
||||
self.clusterops._update_instance_map()
|
||||
|
||||
expected_attrs = ['id', 'uuid', 'name']
|
||||
mock_get_by_filters.assert_called_once_with(
|
||||
self.clusterops._context, {'deleted': False},
|
||||
expected_attrs=expected_attrs)
|
||||
self.assertEqual(mock.sentinel.uuid,
|
||||
self.clusterops._instance_map[mock.sentinel.name])
|
||||
|
||||
@mock.patch.object(clusterops.objects.InstanceList, 'get_by_filters')
|
||||
def test_get_nova_instances(self, mock_get_by_filters):
|
||||
instances = self.clusterops._get_nova_instances(
|
||||
instance_uuids=mock.sentinel.uuids)
|
||||
|
||||
self.assertEqual(mock_get_by_filters.return_value, instances)
|
||||
expected_attrs = ['id', 'uuid', 'name']
|
||||
expected_filters = {'deleted': False, 'uuid': mock.sentinel.uuids}
|
||||
mock_get_by_filters.assert_called_once_with(
|
||||
self.clusterops._context, expected_filters,
|
||||
expected_attrs=expected_attrs)
|
||||
|
||||
@mock.patch.object(clusterops.block_device, 'DriverVolumeBlockDevice')
|
||||
@mock.patch.object(clusterops.objects.BlockDeviceMappingList,
|
||||
'get_by_instance_uuid')
|
||||
|
Loading…
Reference in New Issue
Block a user