Make resource tracker use UUIDs instead of names
This makes the resource tracker look up and create ComputeNode objects by uuid instead of nodename. For drivers like ironic that already provide 'uuid' in the resources dict, we can use that. For those that do not, we force the uuid to be the locally-persisted node uuid, and use that to find/create the ComputeNode object. A (happy) side-effect of this is that if we find a deleted compute node object that matches that of our hypervisor, we undelete it instead of re-creating one with a new uuid, which may clash with our old one. This means we remove some of the special-casing of ironic rebalance, although the tests for that still largely stay the same. Change-Id: I6a582a38c302fd1554a49abc38cfeda7c324d911
This commit is contained in:
parent
0efdbb3660
commit
23c5f3d585
@ -97,6 +97,7 @@ from nova.virt import configdrive
|
||||
from nova.virt import driver
|
||||
from nova.virt import event as virtevent
|
||||
from nova.virt import hardware
|
||||
import nova.virt.node
|
||||
from nova.virt import storage_users
|
||||
from nova.virt import virtapi
|
||||
from nova.volume import cinder
|
||||
@ -1471,27 +1472,27 @@ class ComputeManager(manager.Manager):
|
||||
:return: a dict of ComputeNode objects keyed by the UUID of the given
|
||||
node.
|
||||
"""
|
||||
nodes_by_uuid = {}
|
||||
try:
|
||||
node_names = self.driver.get_available_nodes()
|
||||
node_uuids = self.driver.get_available_node_uuids()
|
||||
except exception.VirtDriverNotReady:
|
||||
LOG.warning(
|
||||
"Virt driver is not ready. If this is the first time this "
|
||||
"service is starting on this host, then you can ignore this "
|
||||
"warning.")
|
||||
"service is starting on this host, then you can ignore "
|
||||
"this warning.")
|
||||
return {}
|
||||
|
||||
for node_name in node_names:
|
||||
try:
|
||||
node = objects.ComputeNode.get_by_host_and_nodename(
|
||||
context, self.host, node_name)
|
||||
nodes_by_uuid[node.uuid] = node
|
||||
except exception.ComputeHostNotFound:
|
||||
LOG.warning(
|
||||
"Compute node %s not found in the database. If this is "
|
||||
"the first time this service is starting on this host, "
|
||||
"then you can ignore this warning.", node_name)
|
||||
return nodes_by_uuid
|
||||
nodes = objects.ComputeNodeList.get_all_by_uuids(context, node_uuids)
|
||||
if not nodes:
|
||||
# NOTE(danms): This should only happen if the compute_id is
|
||||
# pre-provisioned on a host that has never started.
|
||||
LOG.warning('Compute nodes %s for host %s were not found in the '
|
||||
'database. If this is the first time this service is '
|
||||
'starting on this host, then you can ignore this '
|
||||
'warning.',
|
||||
node_uuids, self.host)
|
||||
return {}
|
||||
|
||||
return {n.uuid: n for n in nodes}
|
||||
|
||||
def _ensure_existing_node_identity(self, service_ref):
|
||||
"""If we are upgrading from an older service version, we need
|
||||
|
@ -49,6 +49,7 @@ from nova import rpc
|
||||
from nova.scheduler.client import report
|
||||
from nova import utils
|
||||
from nova.virt import hardware
|
||||
from nova.virt import node
|
||||
|
||||
|
||||
CONF = nova.conf.CONF
|
||||
@ -668,50 +669,6 @@ class ResourceTracker(object):
|
||||
return (nodename not in self.compute_nodes or
|
||||
not self.driver.node_is_available(nodename))
|
||||
|
||||
def _check_for_nodes_rebalance(self, context, resources, nodename):
|
||||
"""Check if nodes rebalance has happened.
|
||||
|
||||
The ironic driver maintains a hash ring mapping bare metal nodes
|
||||
to compute nodes. If a compute dies, the hash ring is rebuilt, and
|
||||
some of its bare metal nodes (more precisely, those not in ACTIVE
|
||||
state) are assigned to other computes.
|
||||
|
||||
This method checks for this condition and adjusts the database
|
||||
accordingly.
|
||||
|
||||
:param context: security context
|
||||
:param resources: initial values
|
||||
:param nodename: node name
|
||||
:returns: True if a suitable compute node record was found, else False
|
||||
"""
|
||||
if not self.driver.rebalances_nodes:
|
||||
return False
|
||||
|
||||
# Its possible ironic just did a node re-balance, so let's
|
||||
# check if there is a compute node that already has the correct
|
||||
# hypervisor_hostname. We can re-use that rather than create a
|
||||
# new one and have to move existing placement allocations
|
||||
cn_candidates = objects.ComputeNodeList.get_by_hypervisor(
|
||||
context, nodename)
|
||||
|
||||
if len(cn_candidates) == 1:
|
||||
cn = cn_candidates[0]
|
||||
LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s",
|
||||
{"name": nodename, "old": cn.host, "new": self.host})
|
||||
cn.host = self.host
|
||||
self.compute_nodes[nodename] = cn
|
||||
self._copy_resources(cn, resources)
|
||||
self._setup_pci_tracker(context, cn, resources)
|
||||
self._update(context, cn)
|
||||
return True
|
||||
elif len(cn_candidates) > 1:
|
||||
LOG.error(
|
||||
"Found more than one ComputeNode for nodename %s. "
|
||||
"Please clean up the orphaned ComputeNode records in your DB.",
|
||||
nodename)
|
||||
|
||||
return False
|
||||
|
||||
def _init_compute_node(self, context, resources):
|
||||
"""Initialize the compute node if it does not already exist.
|
||||
|
||||
@ -729,6 +686,7 @@ class ResourceTracker(object):
|
||||
False otherwise
|
||||
"""
|
||||
nodename = resources['hypervisor_hostname']
|
||||
node_uuid = resources['uuid']
|
||||
|
||||
# if there is already a compute node just use resources
|
||||
# to initialize
|
||||
@ -740,16 +698,30 @@ class ResourceTracker(object):
|
||||
|
||||
# now try to get the compute node record from the
|
||||
# database. If we get one we use resources to initialize
|
||||
cn = self._get_compute_node(context, nodename)
|
||||
|
||||
# We use read_deleted=True so that we will find and recover a deleted
|
||||
# node object, if necessary.
|
||||
with utils.temporary_mutation(context, read_deleted='yes'):
|
||||
cn = self._get_compute_node(context, node_uuid)
|
||||
if cn and cn.deleted:
|
||||
# Undelete and save this right now so that everything below
|
||||
# can continue without read_deleted=yes
|
||||
LOG.info('Undeleting compute node %s', cn.uuid)
|
||||
cn.deleted = False
|
||||
cn.deleted_at = None
|
||||
cn.save()
|
||||
if cn:
|
||||
if cn.host != self.host:
|
||||
LOG.info("ComputeNode %(name)s moving from %(old)s to %(new)s",
|
||||
{"name": nodename, "old": cn.host, "new": self.host})
|
||||
cn.host = self.host
|
||||
self._update(context, cn)
|
||||
|
||||
self.compute_nodes[nodename] = cn
|
||||
self._copy_resources(cn, resources)
|
||||
self._setup_pci_tracker(context, cn, resources)
|
||||
return False
|
||||
|
||||
if self._check_for_nodes_rebalance(context, resources, nodename):
|
||||
return False
|
||||
|
||||
# there was no local copy and none in the database
|
||||
# so we need to create a new compute node. This needs
|
||||
# to be initialized with resource values.
|
||||
@ -889,6 +861,14 @@ class ResourceTracker(object):
|
||||
# contains a non-None value, even for non-Ironic nova-compute hosts. It
|
||||
# is this value that will be populated in the compute_nodes table.
|
||||
resources['host_ip'] = CONF.my_ip
|
||||
if 'uuid' not in resources:
|
||||
# NOTE(danms): Any driver that does not provide a uuid per
|
||||
# node gets the locally-persistent compute_id. Only ironic
|
||||
# should be setting the per-node uuid (and returning
|
||||
# multiple nodes in general). If this is the first time we
|
||||
# are creating a compute node on this host, we will
|
||||
# generate and persist this uuid for the future.
|
||||
resources['uuid'] = node.get_local_node_uuid()
|
||||
|
||||
# We want the 'cpu_info' to be None from the POV of the
|
||||
# virt driver, but the DB requires it to be non-null so
|
||||
@ -1014,14 +994,13 @@ class ResourceTracker(object):
|
||||
if startup:
|
||||
self._check_resources(context)
|
||||
|
||||
def _get_compute_node(self, context, nodename):
|
||||
def _get_compute_node(self, context, node_uuid):
|
||||
"""Returns compute node for the host and nodename."""
|
||||
try:
|
||||
return objects.ComputeNode.get_by_host_and_nodename(
|
||||
context, self.host, nodename)
|
||||
return objects.ComputeNode.get_by_uuid(context, node_uuid)
|
||||
except exception.NotFound:
|
||||
LOG.warning("No compute node record for %(host)s:%(node)s",
|
||||
{'host': self.host, 'node': nodename})
|
||||
{'host': self.host, 'node': node_uuid})
|
||||
|
||||
def _report_hypervisor_resource_view(self, resources):
|
||||
"""Log the hypervisor's view of free resources.
|
||||
|
@ -388,8 +388,11 @@ class ComputeNode(base.NovaPersistentObject, base.NovaObject):
|
||||
# The uuid field is read-only so it should only be set when
|
||||
# creating the compute node record for the first time. Ignore
|
||||
# it otherwise.
|
||||
if key == 'uuid' and 'uuid' in self:
|
||||
continue
|
||||
if (key == 'uuid' and 'uuid' in self and
|
||||
resources[key] != self.uuid):
|
||||
raise exception.InvalidNodeConfiguration(
|
||||
reason='Attempt to overwrite node %s with %s!' % (
|
||||
self.uuid, resources[key]))
|
||||
setattr(self, key, resources[key])
|
||||
|
||||
# supported_instances has a different name in compute_node
|
||||
|
@ -181,6 +181,9 @@ class ServersTestBase(integrated_helpers._IntegratedTestBase):
|
||||
with mock.patch('nova.virt.node.get_local_node_uuid') as m:
|
||||
m.return_value = str(getattr(uuids, 'node_%s' % hostname))
|
||||
self.computes[hostname] = _start_compute(hostname, host_info)
|
||||
# We need to trigger libvirt.Host() to capture the node-local
|
||||
# uuid while we have it mocked out.
|
||||
self.computes[hostname].driver._host.get_node_uuid()
|
||||
|
||||
self.compute_rp_uuids[hostname] = self.placement.get(
|
||||
'/resource_providers?name=%s' % hostname).body[
|
||||
|
@ -1160,26 +1160,25 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
|
||||
self.context, {active_instance.uuid, evacuating_instance.uuid},
|
||||
mock_get_nodes.return_value.keys())
|
||||
|
||||
@mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
|
||||
@mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
|
||||
@mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
|
||||
def test_get_nodes(self, mock_driver_get_nodes, mock_get_by_host_and_node):
|
||||
def test_get_nodes(self, mock_driver_get_nodes, mock_get_by_uuid):
|
||||
mock_driver_get_nodes.return_value = ['fake_node1', 'fake_node2']
|
||||
# NOTE(danms): The fake driver, by default, uses
|
||||
# uuidsentinel.$node_name, so we can predict the uuids it will
|
||||
# return here.
|
||||
cn1 = objects.ComputeNode(uuid=uuids.fake_node1)
|
||||
cn2 = objects.ComputeNode(uuid=uuids.fake_node2)
|
||||
mock_get_by_host_and_node.side_effect = [cn1, cn2]
|
||||
mock_get_by_uuid.return_value = [cn1, cn2]
|
||||
|
||||
nodes = self.compute._get_nodes(self.context)
|
||||
|
||||
self.assertEqual({uuids.fake_node1: cn1, uuids.fake_node2: cn2}, nodes)
|
||||
|
||||
mock_driver_get_nodes.assert_called_once_with()
|
||||
mock_get_by_host_and_node.assert_has_calls([
|
||||
mock.call(self.context, self.compute.host, 'fake_node1'),
|
||||
mock.call(self.context, self.compute.host, 'fake_node2')
|
||||
])
|
||||
mock_get_by_uuid.assert_called_once_with(self.context,
|
||||
[uuids.fake_node1,
|
||||
uuids.fake_node2])
|
||||
|
||||
@mock.patch.object(manager.LOG, 'warning')
|
||||
@mock.patch.object(
|
||||
@ -1201,29 +1200,25 @@ class ComputeManagerUnitTestCase(test.NoDBTestCase,
|
||||
"is starting on this host, then you can ignore this warning.")
|
||||
|
||||
@mock.patch.object(manager.LOG, 'warning')
|
||||
@mock.patch.object(objects.ComputeNode, 'get_by_host_and_nodename')
|
||||
@mock.patch.object(fake_driver.FakeDriver, 'get_available_nodes')
|
||||
@mock.patch.object(objects.ComputeNodeList, 'get_all_by_uuids')
|
||||
@mock.patch.object(fake_driver.FakeDriver, 'get_available_node_uuids')
|
||||
def test_get_nodes_node_not_found(
|
||||
self, mock_driver_get_nodes, mock_get_by_host_and_node,
|
||||
self, mock_driver_get_nodes, mock_get_all_by_uuids,
|
||||
mock_log_warning):
|
||||
mock_driver_get_nodes.return_value = ['fake-node1', 'fake-node2']
|
||||
cn2 = objects.ComputeNode(uuid=uuids.cn2)
|
||||
mock_get_by_host_and_node.side_effect = [
|
||||
exception.ComputeHostNotFound(host='fake-node1'), cn2]
|
||||
mock_driver_get_nodes.return_value = ['fake-node1']
|
||||
mock_get_all_by_uuids.return_value = []
|
||||
|
||||
nodes = self.compute._get_nodes(self.context)
|
||||
|
||||
self.assertEqual({uuids.cn2: cn2}, nodes)
|
||||
self.assertEqual({}, nodes)
|
||||
|
||||
mock_driver_get_nodes.assert_called_once_with()
|
||||
mock_get_by_host_and_node.assert_has_calls([
|
||||
mock.call(self.context, self.compute.host, 'fake-node1'),
|
||||
mock.call(self.context, self.compute.host, 'fake-node2')
|
||||
])
|
||||
mock_get_all_by_uuids.assert_called_once_with(self.context,
|
||||
['fake-node1'])
|
||||
mock_log_warning.assert_called_once_with(
|
||||
"Compute node %s not found in the database. If this is the first "
|
||||
"time this service is starting on this host, then you can ignore "
|
||||
"this warning.", 'fake-node1')
|
||||
"Compute nodes %s for host %s were not found in the database. "
|
||||
"If this is the first time this service is starting on this host, "
|
||||
"then you can ignore this warning.", ['fake-node1'], 'fake-mini')
|
||||
|
||||
def test_init_host_disk_devices_configuration_failure(self):
|
||||
self.flags(max_disk_devices_to_attach=0, group='compute')
|
||||
|
@ -64,11 +64,13 @@ _VIRT_DRIVER_AVAIL_RESOURCES = {
|
||||
'hypervisor_hostname': _NODENAME,
|
||||
'cpu_info': '',
|
||||
'numa_topology': None,
|
||||
'uuid': uuids.cn1,
|
||||
}
|
||||
|
||||
_COMPUTE_NODE_FIXTURES = [
|
||||
objects.ComputeNode(
|
||||
id=1,
|
||||
deleted=False,
|
||||
uuid=uuids.cn1,
|
||||
host=_HOSTNAME,
|
||||
vcpus=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus'],
|
||||
@ -586,7 +588,7 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
return_value=objects.InstancePCIRequests(requests=[]))
|
||||
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
|
||||
return_value=objects.PciDeviceList())
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
|
||||
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
|
||||
def test_disabled(self, get_mock, migr_mock, get_cn_mock, pci_mock,
|
||||
@ -619,7 +621,7 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
return_value=objects.InstancePCIRequests(requests=[]))
|
||||
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
|
||||
return_value=objects.PciDeviceList())
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
|
||||
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
|
||||
def test_no_instances_no_migrations_no_reserved(self, get_mock, migr_mock,
|
||||
@ -643,8 +645,7 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
'flavor',
|
||||
'migration_context',
|
||||
'resources'])
|
||||
get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME,
|
||||
_NODENAME)
|
||||
get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
|
||||
migr_mock.assert_called_once_with(mock.ANY, _HOSTNAME,
|
||||
_NODENAME)
|
||||
|
||||
@ -671,7 +672,7 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
return_value=objects.InstancePCIRequests(requests=[]))
|
||||
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
|
||||
return_value=objects.PciDeviceList())
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
|
||||
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
|
||||
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
|
||||
@ -730,7 +731,7 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
return_value=objects.InstancePCIRequests(requests=[]))
|
||||
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
|
||||
return_value=objects.PciDeviceList())
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
|
||||
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
|
||||
def test_no_instances_no_migrations_reserved_disk_ram_and_cpu(
|
||||
@ -747,7 +748,7 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
|
||||
update_mock = self._update_available_resources()
|
||||
|
||||
get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
|
||||
get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
|
||||
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
|
||||
vals = {
|
||||
'free_disk_gb': 5, # 6GB avail - 1 GB reserved
|
||||
@ -772,7 +773,7 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
return_value=objects.InstancePCIRequests(requests=[]))
|
||||
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
|
||||
return_value=objects.PciDeviceList())
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
|
||||
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
|
||||
def test_some_instances_no_migrations(self, get_mock, migr_mock,
|
||||
@ -797,7 +798,7 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
|
||||
update_mock = self._update_available_resources()
|
||||
|
||||
get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
|
||||
get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
|
||||
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
|
||||
vals = {
|
||||
'free_disk_gb': 5, # 6 - 1 used
|
||||
@ -823,7 +824,7 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
return_value=objects.InstancePCIRequests(requests=[]))
|
||||
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
|
||||
return_value=objects.PciDeviceList())
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
|
||||
@mock.patch('nova.objects.Instance.get_by_uuid')
|
||||
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
|
||||
@ -863,7 +864,7 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
|
||||
update_mock = self._update_available_resources()
|
||||
|
||||
get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
|
||||
get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
|
||||
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
|
||||
vals = {
|
||||
'free_disk_gb': 5,
|
||||
@ -889,7 +890,7 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
return_value=objects.InstancePCIRequests(requests=[]))
|
||||
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
|
||||
return_value=objects.PciDeviceList())
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
|
||||
@mock.patch('nova.objects.Instance.get_by_uuid')
|
||||
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
|
||||
@ -926,7 +927,7 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
|
||||
update_mock = self._update_available_resources()
|
||||
|
||||
get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
|
||||
get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
|
||||
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
|
||||
vals = {
|
||||
'free_disk_gb': 1,
|
||||
@ -952,7 +953,7 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
return_value=objects.InstancePCIRequests(requests=[]))
|
||||
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
|
||||
return_value=objects.PciDeviceList())
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
|
||||
@mock.patch('nova.objects.Instance.get_by_uuid')
|
||||
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
|
||||
@ -987,7 +988,7 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
|
||||
update_mock = self._update_available_resources()
|
||||
|
||||
get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
|
||||
get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
|
||||
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
|
||||
vals = {
|
||||
'free_disk_gb': 1,
|
||||
@ -1013,7 +1014,7 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
return_value=objects.PciDeviceList())
|
||||
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
|
||||
return_value=None)
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
|
||||
@mock.patch('nova.objects.Instance.get_by_uuid')
|
||||
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
|
||||
@ -1056,7 +1057,7 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
|
||||
update_mock = self._update_available_resources()
|
||||
|
||||
get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
|
||||
get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
|
||||
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
|
||||
vals = {
|
||||
# 6 total - 1G existing - 5G new flav - 1G old flav
|
||||
@ -1084,7 +1085,7 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
return_value=objects.InstancePCIRequests(requests=[]))
|
||||
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
|
||||
return_value=objects.PciDeviceList())
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
|
||||
@mock.patch('nova.objects.Instance.get_by_uuid')
|
||||
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
|
||||
@ -1121,7 +1122,7 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
|
||||
update_mock = self._update_available_resources()
|
||||
|
||||
get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
|
||||
get_cn_mock.assert_called_once_with(mock.ANY, uuids.cn1)
|
||||
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
|
||||
vals = {
|
||||
'free_disk_gb': 1,
|
||||
@ -1147,7 +1148,7 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
new=mock.Mock(return_value=objects.PciDeviceList()))
|
||||
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
|
||||
new=mock.Mock(return_value=None))
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
|
||||
@mock.patch('nova.objects.Instance.get_by_uuid')
|
||||
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
|
||||
@ -1199,7 +1200,7 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
new=mock.Mock(return_value=objects.PciDeviceList()))
|
||||
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
|
||||
new=mock.Mock(return_value=None))
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
|
||||
@mock.patch('nova.objects.Instance.get_by_uuid')
|
||||
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
|
||||
@ -1240,7 +1241,7 @@ class TestUpdateAvailableResources(BaseTestCase):
|
||||
new=mock.Mock(return_value=False))
|
||||
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
|
||||
new=mock.Mock(return_value=objects.PciDeviceList()))
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
|
||||
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
|
||||
def test_check_resources_startup_fail(self, mock_get_instances,
|
||||
@ -1273,7 +1274,7 @@ class TestInitComputeNode(BaseTestCase):
|
||||
return_value=objects.PciDeviceList())
|
||||
@mock.patch('nova.objects.ComputeNode.create')
|
||||
@mock.patch('nova.objects.Service.get_by_compute_host')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
|
||||
'_update')
|
||||
def test_no_op_init_compute_node(self, update_mock, get_mock, service_mock,
|
||||
@ -1296,14 +1297,14 @@ class TestInitComputeNode(BaseTestCase):
|
||||
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
|
||||
return_value=objects.PciDeviceList())
|
||||
@mock.patch('nova.objects.ComputeNode.create')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
|
||||
'_update')
|
||||
def test_compute_node_loaded(self, update_mock, get_mock, create_mock,
|
||||
pci_mock):
|
||||
self._setup_rt()
|
||||
|
||||
def fake_get_node(_ctx, host, node):
|
||||
def fake_get_node(_ctx, uuid):
|
||||
res = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
|
||||
return res
|
||||
|
||||
@ -1313,85 +1314,67 @@ class TestInitComputeNode(BaseTestCase):
|
||||
self.assertFalse(
|
||||
self.rt._init_compute_node(mock.sentinel.ctx, resources))
|
||||
|
||||
get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
|
||||
_NODENAME)
|
||||
get_mock.assert_called_once_with(mock.sentinel.ctx,
|
||||
uuids.cn1)
|
||||
self.assertFalse(create_mock.called)
|
||||
self.assertFalse(update_mock.called)
|
||||
|
||||
@mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
|
||||
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
|
||||
return_value=objects.PciDeviceList())
|
||||
@mock.patch('nova.objects.ComputeNode.create')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
|
||||
'_update')
|
||||
def test_compute_node_rebalanced(self, update_mock, get_mock, create_mock,
|
||||
pci_mock, get_by_hypervisor_mock):
|
||||
pci_mock):
|
||||
self._setup_rt()
|
||||
self.driver_mock.rebalances_nodes = True
|
||||
cn = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
|
||||
cn.host = "old-host"
|
||||
|
||||
def fake_get_all(_ctx, nodename):
|
||||
return [cn]
|
||||
def fake_get_node(_ctx, uuid):
|
||||
return cn
|
||||
|
||||
get_mock.side_effect = exc.NotFound
|
||||
get_by_hypervisor_mock.side_effect = fake_get_all
|
||||
get_mock.side_effect = fake_get_node
|
||||
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
|
||||
|
||||
self.assertFalse(
|
||||
self.rt._init_compute_node(mock.sentinel.ctx, resources))
|
||||
|
||||
get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
|
||||
_NODENAME)
|
||||
get_by_hypervisor_mock.assert_called_once_with(mock.sentinel.ctx,
|
||||
_NODENAME)
|
||||
get_mock.assert_called_once_with(mock.sentinel.ctx, uuids.cn1)
|
||||
create_mock.assert_not_called()
|
||||
update_mock.assert_called_once_with(mock.sentinel.ctx, cn)
|
||||
|
||||
self.assertEqual(_HOSTNAME, self.rt.compute_nodes[_NODENAME].host)
|
||||
|
||||
@mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
|
||||
@mock.patch('nova.objects.ComputeNode.create')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
|
||||
'_update')
|
||||
def test_compute_node_created_on_empty(self, update_mock, get_mock,
|
||||
create_mock,
|
||||
get_by_hypervisor_mock):
|
||||
get_by_hypervisor_mock.return_value = []
|
||||
self._test_compute_node_created(update_mock, get_mock, create_mock,
|
||||
get_by_hypervisor_mock)
|
||||
create_mock):
|
||||
self._test_compute_node_created(update_mock, get_mock, create_mock)
|
||||
|
||||
@mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
|
||||
@mock.patch('nova.objects.ComputeNode.create')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
|
||||
'_update')
|
||||
def test_compute_node_created_on_empty_rebalance(self, update_mock,
|
||||
get_mock,
|
||||
create_mock,
|
||||
get_by_hypervisor_mock):
|
||||
get_by_hypervisor_mock.return_value = []
|
||||
create_mock):
|
||||
self._test_compute_node_created(update_mock, get_mock, create_mock,
|
||||
get_by_hypervisor_mock,
|
||||
rebalances_nodes=True)
|
||||
|
||||
@mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
|
||||
@mock.patch('nova.objects.ComputeNode.create')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
|
||||
'_update')
|
||||
def test_compute_node_created_too_many(self, update_mock, get_mock,
|
||||
create_mock,
|
||||
get_by_hypervisor_mock):
|
||||
get_by_hypervisor_mock.return_value = ["fake_node_1", "fake_node_2"]
|
||||
create_mock):
|
||||
self._test_compute_node_created(update_mock, get_mock, create_mock,
|
||||
get_by_hypervisor_mock,
|
||||
rebalances_nodes=True)
|
||||
|
||||
def _test_compute_node_created(self, update_mock, get_mock, create_mock,
|
||||
get_by_hypervisor_mock,
|
||||
rebalances_nodes=False):
|
||||
self.flags(cpu_allocation_ratio=1.0, ram_allocation_ratio=1.0,
|
||||
disk_allocation_ratio=1.0)
|
||||
@ -1452,13 +1435,9 @@ class TestInitComputeNode(BaseTestCase):
|
||||
self.rt._init_compute_node(mock.sentinel.ctx, resources))
|
||||
|
||||
cn = self.rt.compute_nodes[_NODENAME]
|
||||
get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
|
||||
_NODENAME)
|
||||
if rebalances_nodes:
|
||||
get_by_hypervisor_mock.assert_called_once_with(
|
||||
mock.sentinel.ctx, _NODENAME)
|
||||
else:
|
||||
get_by_hypervisor_mock.assert_not_called()
|
||||
get_mock.assert_called_once_with(mock.sentinel.ctx,
|
||||
uuids.compute_node_uuid)
|
||||
|
||||
create_mock.assert_called_once_with()
|
||||
self.assertTrue(obj_base.obj_equal_prims(expected_compute, cn))
|
||||
setup_pci.assert_called_once_with(mock.sentinel.ctx, cn, resources)
|
||||
@ -1466,7 +1445,7 @@ class TestInitComputeNode(BaseTestCase):
|
||||
|
||||
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
|
||||
'_setup_pci_tracker')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename',
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid',
|
||||
side_effect=exc.ComputeHostNotFound(host=_HOSTNAME))
|
||||
@mock.patch('nova.objects.ComputeNode.create',
|
||||
side_effect=(test.TestingException, None))
|
||||
@ -1489,14 +1468,14 @@ class TestInitComputeNode(BaseTestCase):
|
||||
self.assertTrue(self.rt._init_compute_node(ctxt, resources))
|
||||
self.assertIn(_NODENAME, self.rt.compute_nodes)
|
||||
mock_get.assert_has_calls([mock.call(
|
||||
ctxt, _HOSTNAME, _NODENAME)] * 2)
|
||||
ctxt, uuids.cn_uuid)] * 2)
|
||||
self.assertEqual(2, mock_create.call_count)
|
||||
mock_setup_pci.assert_called_once_with(
|
||||
ctxt, test.MatchType(objects.ComputeNode), resources)
|
||||
|
||||
@mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
|
||||
@mock.patch('nova.objects.ComputeNode.create')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
|
||||
'_update')
|
||||
def test_node_removed(self, update_mock, get_mock,
|
||||
@ -1512,6 +1491,67 @@ class TestInitComputeNode(BaseTestCase):
|
||||
self.assertNotIn(_NODENAME, self.rt.stats)
|
||||
self.assertNotIn(_NODENAME, self.rt.old_resources)
|
||||
|
||||
@mock.patch.object(resource_tracker.ResourceTracker,
|
||||
'_update')
|
||||
@mock.patch.object(resource_tracker.ResourceTracker,
|
||||
'_setup_pci_tracker')
|
||||
def test_undelete_node(self, mock_pci, mock_update):
|
||||
self._setup_rt()
|
||||
node = mock.MagicMock()
|
||||
node.deleted = True
|
||||
node.uuid = str(uuids.host1)
|
||||
node.host = 'fake-host'
|
||||
context_mock = mock.MagicMock()
|
||||
resources = {'hypervisor_hostname': 'fake-host',
|
||||
'uuid': str(uuids.host1)}
|
||||
with mock.patch.object(self.rt, '_get_compute_node') as getcn:
|
||||
getcn.return_value = node
|
||||
|
||||
# _init_compute_node() should return False to indicate that
|
||||
# it found an existing node
|
||||
self.assertFalse(
|
||||
self.rt._init_compute_node(context_mock, resources))
|
||||
|
||||
# Node should be undeleted and saved
|
||||
self.assertFalse(node.deleted)
|
||||
self.assertIsNone(node.deleted_at)
|
||||
node.save.assert_called_once_with()
|
||||
|
||||
# Host is the same, no _update()
|
||||
self.assertEqual('fake-host', node.host)
|
||||
mock_update.assert_not_called()
|
||||
|
||||
@mock.patch.object(resource_tracker.ResourceTracker,
|
||||
'_update')
|
||||
@mock.patch.object(resource_tracker.ResourceTracker,
|
||||
'_setup_pci_tracker')
|
||||
def test_undelete_node_move_host(self, mock_pci, mock_update):
|
||||
self._setup_rt()
|
||||
node = mock.MagicMock()
|
||||
node.deleted = True
|
||||
node.uuid = str(uuids.host1)
|
||||
node.host = 'old-host'
|
||||
context_mock = mock.MagicMock()
|
||||
resources = {'hypervisor_hostname': 'fake-host',
|
||||
'uuid': str(uuids.host1)}
|
||||
with mock.patch.object(self.rt, '_get_compute_node') as getcn:
|
||||
getcn.return_value = node
|
||||
|
||||
# _init_compute_node() should return False to indicate that
|
||||
# it found an existing node
|
||||
self.assertFalse(
|
||||
self.rt._init_compute_node(context_mock, resources))
|
||||
|
||||
# Node should be undeleted and saved
|
||||
self.assertFalse(node.deleted)
|
||||
self.assertIsNone(node.deleted_at)
|
||||
node.save.assert_called_once_with()
|
||||
|
||||
# Our host changed, so we should have the updated value and have
|
||||
# called _update()
|
||||
self.assertEqual('fake-host', node.host)
|
||||
mock_update.assert_called()
|
||||
|
||||
|
||||
@ddt.ddt
|
||||
class TestUpdateComputeNode(BaseTestCase):
|
||||
@ -2635,7 +2675,7 @@ class TestResize(BaseTestCase):
|
||||
return_value=objects.InstancePCIRequests(requests=[]))
|
||||
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
|
||||
return_value=objects.PciDeviceList())
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
|
||||
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
|
||||
@mock.patch('nova.objects.ComputeNode.save')
|
||||
@ -2739,7 +2779,7 @@ class TestResize(BaseTestCase):
|
||||
return_value=objects.InstancePCIRequests(requests=[]))
|
||||
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
|
||||
return_value=objects.PciDeviceList())
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename',
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid',
|
||||
return_value=_COMPUTE_NODE_FIXTURES[0])
|
||||
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error',
|
||||
return_value=[])
|
||||
@ -2911,7 +2951,7 @@ class TestResize(BaseTestCase):
|
||||
@mock.patch('nova.pci.manager.PciDevTracker.claim_instance')
|
||||
@mock.patch('nova.pci.request.get_pci_requests_from_flavor')
|
||||
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
|
||||
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
|
||||
@mock.patch('nova.objects.ComputeNode.save')
|
||||
@ -3081,7 +3121,7 @@ class TestResize(BaseTestCase):
|
||||
return_value=objects.InstancePCIRequests(requests=[]))
|
||||
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
|
||||
return_value=objects.PciDeviceList())
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
|
||||
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
|
||||
@mock.patch('nova.objects.ComputeNode.save')
|
||||
@ -3213,7 +3253,7 @@ class TestRebuild(BaseTestCase):
|
||||
return_value=objects.InstancePCIRequests(requests=[]))
|
||||
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
|
||||
return_value=objects.PciDeviceList())
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
|
||||
@mock.patch('nova.objects.ComputeNode.get_by_uuid')
|
||||
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
|
||||
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
|
||||
@mock.patch('nova.objects.ComputeNode.save')
|
||||
|
@ -553,17 +553,15 @@ class _TestComputeNodeObject(object):
|
||||
|
||||
def test_update_from_virt_driver_uuid_already_set(self):
|
||||
"""Tests update_from_virt_driver where the compute node object already
|
||||
has a uuid value so the uuid from the virt driver is ignored.
|
||||
has a uuid value so an error is raised.
|
||||
"""
|
||||
# copy in case the update has a side effect
|
||||
resources = copy.deepcopy(fake_resources)
|
||||
# Emulate the ironic driver which adds a uuid field.
|
||||
resources['uuid'] = uuidsentinel.node_uuid
|
||||
compute = compute_node.ComputeNode(uuid=uuidsentinel.something_else)
|
||||
compute.update_from_virt_driver(resources)
|
||||
expected = fake_compute_with_resources.obj_clone()
|
||||
expected.uuid = uuidsentinel.something_else
|
||||
self.assertTrue(base.obj_equal_prims(expected, compute))
|
||||
self.assertRaises(exception.InvalidNodeConfiguration,
|
||||
compute.update_from_virt_driver, resources)
|
||||
|
||||
def test_update_from_virt_driver_missing_field(self):
|
||||
# NOTE(pmurray): update_from_virt_driver does not require
|
||||
|
@ -505,6 +505,12 @@ class FakeDriver(driver.ComputeDriver):
|
||||
host_status['host_hostname'] = nodename
|
||||
host_status['host_name_label'] = nodename
|
||||
host_status['cpu_info'] = jsonutils.dumps(cpu_info)
|
||||
# NOTE(danms): Because the fake driver runs on the same host
|
||||
# in tests, potentially with multiple nodes, we need to
|
||||
# control our node uuids. Make sure we return a unique and
|
||||
# consistent uuid for each node we are responsible for to
|
||||
# avoid the persistent local node identity from taking over.
|
||||
host_status['uuid'] = str(getattr(uuids, nodename))
|
||||
return host_status
|
||||
|
||||
def update_provider_tree(self, provider_tree, nodename, allocations=None):
|
||||
|
@ -9515,6 +9515,7 @@ class LibvirtDriver(driver.ComputeDriver):
|
||||
data["hypervisor_type"] = self._host.get_driver_type()
|
||||
data["hypervisor_version"] = self._host.get_version()
|
||||
data["hypervisor_hostname"] = self._host.get_hostname()
|
||||
data["uuid"] = self._host.get_node_uuid()
|
||||
# TODO(berrange): why do we bother converting the
|
||||
# libvirt capabilities XML into a special JSON format ?
|
||||
# The data format is different across all the drivers
|
||||
|
Loading…
Reference in New Issue
Block a user