OpenStack Compute (Nova)
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 
 
 

4179 lines
184 KiB

# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import datetime
from keystoneauth1 import exceptions as ks_exc
import mock
import os_resource_classes as orc
import os_traits
from oslo_config import cfg
from oslo_utils.fixture import uuidsentinel as uuids
from oslo_utils import timeutils
from oslo_utils import units
from nova.compute import claims
from nova.compute.monitors import base as monitor_base
from nova.compute import power_state
from nova.compute import provider_tree
from nova.compute import resource_tracker
from nova.compute import task_states
from nova.compute import utils as compute_utils
from nova.compute import vm_states
from nova import context
from nova import exception as exc
from nova import objects
from nova.objects import base as obj_base
from nova.objects import fields as obj_fields
from nova.objects import pci_device
from nova.pci import manager as pci_manager
from nova.scheduler.client import report
from nova import test
from nova.tests import fixtures
from nova.tests.unit import fake_instance
from nova.tests.unit.objects import test_pci_device as fake_pci_device
from nova.tests.unit import utils
from nova import utils as nova_utils
from nova.virt import driver
_HOSTNAME = 'fake-host'
_NODENAME = 'fake-node'
CONF = cfg.CONF
_VIRT_DRIVER_AVAIL_RESOURCES = {
'vcpus': 4,
'memory_mb': 512,
'local_gb': 6,
'vcpus_used': 0,
'memory_mb_used': 0,
'local_gb_used': 0,
'hypervisor_type': 'fake',
'hypervisor_version': 0,
'hypervisor_hostname': _NODENAME,
'cpu_info': '',
'numa_topology': None,
}
_COMPUTE_NODE_FIXTURES = [
objects.ComputeNode(
id=1,
uuid=uuids.cn1,
host=_HOSTNAME,
vcpus=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus'],
memory_mb=_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'],
local_gb=_VIRT_DRIVER_AVAIL_RESOURCES['local_gb'],
vcpus_used=_VIRT_DRIVER_AVAIL_RESOURCES['vcpus_used'],
memory_mb_used=_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used'],
local_gb_used=_VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used'],
hypervisor_type='fake',
hypervisor_version=0,
hypervisor_hostname=_NODENAME,
free_ram_mb=(_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb'] -
_VIRT_DRIVER_AVAIL_RESOURCES['memory_mb_used']),
free_disk_gb=(_VIRT_DRIVER_AVAIL_RESOURCES['local_gb'] -
_VIRT_DRIVER_AVAIL_RESOURCES['local_gb_used']),
current_workload=0,
running_vms=0,
cpu_info='{}',
disk_available_least=0,
host_ip='1.1.1.1',
supported_hv_specs=[
objects.HVSpec.from_list([
obj_fields.Architecture.I686,
obj_fields.HVType.KVM,
obj_fields.VMMode.HVM])
],
metrics=None,
pci_device_pools=None,
extra_resources=None,
stats={},
numa_topology=None,
cpu_allocation_ratio=16.0,
ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0,
),
]
_INSTANCE_TYPE_FIXTURES = {
1: {
'id': 1,
'flavorid': 'fakeid-1',
'name': 'fake1.small',
'memory_mb': 128,
'vcpus': 1,
'root_gb': 1,
'ephemeral_gb': 0,
'swap': 0,
'rxtx_factor': 0,
'vcpu_weight': 1,
'extra_specs': {},
'deleted': 0,
},
2: {
'id': 2,
'flavorid': 'fakeid-2',
'name': 'fake1.medium',
'memory_mb': 256,
'vcpus': 2,
'root_gb': 5,
'ephemeral_gb': 0,
'swap': 0,
'rxtx_factor': 0,
'vcpu_weight': 1,
'extra_specs': {},
'deleted': 0,
},
}
_INSTANCE_TYPE_OBJ_FIXTURES = {
1: objects.Flavor(id=1, flavorid='fakeid-1', name='fake1.small',
memory_mb=128, vcpus=1, root_gb=1,
ephemeral_gb=0, swap=0, rxtx_factor=0,
vcpu_weight=1, extra_specs={}, deleted=False),
2: objects.Flavor(id=2, flavorid='fakeid-2', name='fake1.medium',
memory_mb=256, vcpus=2, root_gb=5,
ephemeral_gb=0, swap=0, rxtx_factor=0,
vcpu_weight=1, extra_specs={}, deleted=False),
}
_2MB = 2 * units.Mi / units.Ki
_INSTANCE_NUMA_TOPOLOGIES = {
'2mb': objects.InstanceNUMATopology(cells=[
objects.InstanceNUMACell(
id=0, cpuset=set([1]), pcpuset=set(), memory=_2MB, pagesize=0),
objects.InstanceNUMACell(
id=1, cpuset=set([3]), pcpuset=set(), memory=_2MB, pagesize=0)]),
}
_NUMA_LIMIT_TOPOLOGIES = {
'2mb': objects.NUMATopologyLimits(id=0,
cpu_allocation_ratio=1.0,
ram_allocation_ratio=1.0),
}
_NUMA_PAGE_TOPOLOGIES = {
'2mb*1024': objects.NUMAPagesTopology(size_kb=2048, total=1024, used=0)
}
_NUMA_HOST_TOPOLOGIES = {
'2mb': objects.NUMATopology(cells=[
objects.NUMACell(
id=0,
cpuset=set([1, 2]),
pcpuset=set(),
memory=_2MB,
cpu_usage=0,
memory_usage=0,
mempages=[_NUMA_PAGE_TOPOLOGIES['2mb*1024']],
siblings=[set([1]), set([2])],
pinned_cpus=set()),
objects.NUMACell(
id=1,
cpuset=set([3, 4]),
pcpuset=set(),
memory=_2MB,
cpu_usage=0,
memory_usage=0,
mempages=[_NUMA_PAGE_TOPOLOGIES['2mb*1024']],
siblings=[set([3]), set([4])],
pinned_cpus=set())]),
}
_INSTANCE_FIXTURES = [
objects.Instance(
id=1,
host=_HOSTNAME,
node=_NODENAME,
uuid='c17741a5-6f3d-44a8-ade8-773dc8c29124',
memory_mb=_INSTANCE_TYPE_FIXTURES[1]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[1]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[1]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[1]['ephemeral_gb'],
numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'],
pci_requests=None,
pci_devices=None,
instance_type_id=1,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=None,
os_type='fake-os', # Used by the stats collector.
project_id='fake-project', # Used by the stats collector.
user_id=uuids.user_id,
flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[1],
deleted = False,
resources = None,
),
objects.Instance(
id=2,
host=_HOSTNAME,
node=_NODENAME,
uuid='33805b54-dea6-47b8-acb2-22aeb1b57919',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
pci_requests=None,
pci_devices=None,
instance_type_id=2,
vm_state=vm_states.DELETED,
power_state=power_state.SHUTDOWN,
task_state=None,
os_type='fake-os',
project_id='fake-project-2',
user_id=uuids.user_id,
flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
new_flavor = _INSTANCE_TYPE_OBJ_FIXTURES[2],
deleted = False,
resources = None,
),
]
_MIGRATION_FIXTURES = {
# A migration that has only this compute node as the source host
'source-only': objects.Migration(
id=1,
instance_uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08',
source_compute=_HOSTNAME,
dest_compute='other-host',
source_node=_NODENAME,
dest_node='other-node',
old_instance_type_id=1,
new_instance_type_id=2,
migration_type='resize',
status='migrating',
uuid=uuids.source_only,
),
# A migration that has only this compute node as the dest host
'dest-only': objects.Migration(
id=2,
instance_uuid='f6ed631a-8645-4b12-8e1e-2fff55795765',
source_compute='other-host',
dest_compute=_HOSTNAME,
source_node='other-node',
dest_node=_NODENAME,
old_instance_type_id=1,
new_instance_type_id=2,
migration_type='resize',
status='migrating',
uuid=uuids.dest_only,
),
# A migration that has this compute node as both the source and dest host
'source-and-dest': objects.Migration(
id=3,
instance_uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997',
source_compute=_HOSTNAME,
dest_compute=_HOSTNAME,
source_node=_NODENAME,
dest_node=_NODENAME,
old_instance_type_id=1,
new_instance_type_id=2,
migration_type='resize',
status='migrating',
uuid=uuids.source_and_dest,
),
# A migration that has this compute node as destination and is an evac
'dest-only-evac': objects.Migration(
id=4,
instance_uuid='077fb63a-bdc8-4330-90ef-f012082703dc',
source_compute='other-host',
dest_compute=_HOSTNAME,
source_node='other-node',
dest_node=_NODENAME,
old_instance_type_id=2,
new_instance_type_id=None,
migration_type='evacuation',
status='pre-migrating',
uuid=uuids.dest_only_evac,
),
}
_MIGRATION_INSTANCE_FIXTURES = {
# source-only
'f15ecfb0-9bf6-42db-9837-706eb2c4bf08': objects.Instance(
id=101,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08',
memory_mb=_INSTANCE_TYPE_FIXTURES[1]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[1]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[1]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[1]['ephemeral_gb'],
numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'],
pci_requests=None,
pci_devices=None,
instance_type_id=1,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
resources = None,
),
# dest-only
'f6ed631a-8645-4b12-8e1e-2fff55795765': objects.Instance(
id=102,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f6ed631a-8645-4b12-8e1e-2fff55795765',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
pci_requests=None,
pci_devices=None,
instance_type_id=2,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
resources=None,
),
# source-and-dest
'f4f0bfea-fe7e-4264-b598-01cb13ef1997': objects.Instance(
id=3,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
pci_requests=None,
pci_devices=None,
instance_type_id=2,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.RESIZE_MIGRATING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
resources=None,
),
# dest-only-evac
'077fb63a-bdc8-4330-90ef-f012082703dc': objects.Instance(
id=102,
host=None, # prevent RT trying to lazy-load this
node=None,
uuid='077fb63a-bdc8-4330-90ef-f012082703dc',
memory_mb=_INSTANCE_TYPE_FIXTURES[2]['memory_mb'],
vcpus=_INSTANCE_TYPE_FIXTURES[2]['vcpus'],
root_gb=_INSTANCE_TYPE_FIXTURES[2]['root_gb'],
ephemeral_gb=_INSTANCE_TYPE_FIXTURES[2]['ephemeral_gb'],
numa_topology=None,
pci_requests=None,
pci_devices=None,
instance_type_id=2,
vm_state=vm_states.ACTIVE,
power_state=power_state.RUNNING,
task_state=task_states.REBUILDING,
system_metadata={},
os_type='fake-os',
project_id='fake-project',
flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
old_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[1],
new_flavor=_INSTANCE_TYPE_OBJ_FIXTURES[2],
resources=None,
),
}
_MIGRATION_CONTEXT_FIXTURES = {
'f4f0bfea-fe7e-4264-b598-01cb13ef1997': objects.MigrationContext(
instance_uuid='f4f0bfea-fe7e-4264-b598-01cb13ef1997',
migration_id=3,
new_numa_topology=None,
old_numa_topology=None),
'c17741a5-6f3d-44a8-ade8-773dc8c29124': objects.MigrationContext(
instance_uuid='c17741a5-6f3d-44a8-ade8-773dc8c29124',
migration_id=3,
new_numa_topology=None,
old_numa_topology=None),
'f15ecfb0-9bf6-42db-9837-706eb2c4bf08': objects.MigrationContext(
instance_uuid='f15ecfb0-9bf6-42db-9837-706eb2c4bf08',
migration_id=1,
new_numa_topology=None,
old_numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb']),
'f6ed631a-8645-4b12-8e1e-2fff55795765': objects.MigrationContext(
instance_uuid='f6ed631a-8645-4b12-8e1e-2fff55795765',
migration_id=2,
new_numa_topology=_INSTANCE_NUMA_TOPOLOGIES['2mb'],
old_numa_topology=None),
'077fb63a-bdc8-4330-90ef-f012082703dc': objects.MigrationContext(
instance_uuid='077fb63a-bdc8-4330-90ef-f012082703dc',
migration_id=2,
new_numa_topology=None,
old_numa_topology=None),
}
def setup_rt(hostname, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES):
"""Sets up the resource tracker instance with mock fixtures.
:param virt_resources: Optional override of the resource representation
returned by the virt driver's
`get_available_resource()` method.
"""
query_client_mock = mock.MagicMock()
report_client_mock = mock.MagicMock()
notifier_mock = mock.MagicMock()
vd = mock.MagicMock(autospec=driver.ComputeDriver)
# Make sure we don't change any global fixtures during tests
virt_resources = copy.deepcopy(virt_resources)
vd.get_available_resource.return_value = virt_resources
def fake_upt(provider_tree, nodename, allocations=None):
inventory = {
'VCPU': {
'total': virt_resources['vcpus'],
'min_unit': 1,
'max_unit': virt_resources['vcpus'],
'step_size': 1,
'allocation_ratio': (
CONF.cpu_allocation_ratio or
CONF.initial_cpu_allocation_ratio),
'reserved': CONF.reserved_host_cpus,
},
'MEMORY_MB': {
'total': virt_resources['memory_mb'],
'min_unit': 1,
'max_unit': virt_resources['memory_mb'],
'step_size': 1,
'allocation_ratio': (
CONF.ram_allocation_ratio or
CONF.initial_ram_allocation_ratio),
'reserved': CONF.reserved_host_memory_mb,
},
'DISK_GB': {
'total': virt_resources['local_gb'],
'min_unit': 1,
'max_unit': virt_resources['local_gb'],
'step_size': 1,
'allocation_ratio': (
CONF.disk_allocation_ratio or
CONF.initial_disk_allocation_ratio),
'reserved': compute_utils.convert_mb_to_ceil_gb(
CONF.reserved_host_disk_mb),
},
}
provider_tree.update_inventory(nodename, inventory)
vd.update_provider_tree.side_effect = fake_upt
vd.get_host_ip_addr.return_value = _NODENAME
vd.rebalances_nodes = False
with test.nested(
mock.patch('nova.scheduler.client.query.SchedulerQueryClient',
return_value=query_client_mock),
mock.patch('nova.scheduler.client.report.SchedulerReportClient',
return_value=report_client_mock),
mock.patch('nova.rpc.get_notifier', return_value=notifier_mock)):
rt = resource_tracker.ResourceTracker(hostname, vd)
return (rt, query_client_mock, report_client_mock, vd)
def compute_update_usage(resources, flavor, sign=1):
resources.vcpus_used += sign * flavor.vcpus
resources.memory_mb_used += sign * flavor.memory_mb
resources.local_gb_used += sign * (flavor.root_gb + flavor.ephemeral_gb)
resources.free_ram_mb = resources.memory_mb - resources.memory_mb_used
resources.free_disk_gb = resources.local_gb - resources.local_gb_used
return resources
class BaseTestCase(test.NoDBTestCase):
def setUp(self):
super(BaseTestCase, self).setUp()
self.rt = None
self.flags(my_ip='1.1.1.1',
reserved_host_disk_mb=0,
reserved_host_memory_mb=0,
reserved_host_cpus=0)
self.allocations = {
_COMPUTE_NODE_FIXTURES[0].uuid: {
"generation": 0,
"resources": {
"VCPU": 1,
"MEMORY_MB": 512
}
}
}
self.compute = _COMPUTE_NODE_FIXTURES[0]
self.resource_0 = objects.Resource(provider_uuid=self.compute.uuid,
resource_class="CUSTOM_RESOURCE_0",
identifier="bar")
self.resource_1 = objects.Resource(provider_uuid=self.compute.uuid,
resource_class="CUSTOM_RESOURCE_1",
identifier="foo_1")
self.resource_2 = objects.Resource(provider_uuid=self.compute.uuid,
resource_class="CUSTOM_RESOURCE_1",
identifier="foo_2")
def _setup_rt(self, virt_resources=_VIRT_DRIVER_AVAIL_RESOURCES):
(self.rt, self.sched_client_mock, self.report_client_mock,
self.driver_mock) = setup_rt(_HOSTNAME, virt_resources)
def _setup_ptree(self, compute):
"""Set up a ProviderTree with a compute node root, and mock the
ReportClient's get_provider_tree_and_ensure_root() to return
it.
update_traits() is mocked so that tests can specify a return
value. Returns the new ProviderTree so that tests can control
its behaviour further.
"""
ptree = provider_tree.ProviderTree()
ptree.new_root(compute.hypervisor_hostname, compute.uuid)
ptree.update_traits = mock.Mock()
resources = {"CUSTOM_RESOURCE_0": {self.resource_0},
"CUSTOM_RESOURCE_1": {self.resource_1, self.resource_2}}
ptree.update_resources(compute.uuid, resources)
rc_mock = self.rt.reportclient
gptaer_mock = rc_mock.get_provider_tree_and_ensure_root
gptaer_mock.return_value = ptree
return ptree
class TestUpdateAvailableResources(BaseTestCase):
def _update_available_resources(self, **kwargs):
# We test RT._update separately, since the complexity
# of the update_available_resource() function is high enough as
# it is, we just want to focus here on testing the resources
# parameter that update_available_resource() eventually passes
# to _update().
with mock.patch.object(self.rt, '_update') as update_mock:
self.rt.update_available_resource(mock.MagicMock(), _NODENAME,
**kwargs)
return update_mock
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_disabled(self, get_mock, migr_mock, get_cn_mock, pci_mock,
instance_pci_mock):
self._setup_rt()
# Set up resource tracker in an enabled state and verify that all is
# good before simulating a disabled node.
get_mock.return_value = []
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
# This will call _init_compute_node() and create a ComputeNode object
# and will also call through to InstanceList.get_by_host_and_node()
# because the node is available.
self._update_available_resources()
self.assertTrue(get_mock.called)
get_mock.reset_mock()
# OK, now simulate a node being disabled by the Ironic virt driver.
vd = self.driver_mock
vd.node_is_available.return_value = False
self._update_available_resources()
self.assertFalse(get_mock.called)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_no_reserved(self, get_mock, migr_mock,
get_cn_mock, pci_mock,
instance_pci_mock):
self._setup_rt()
get_mock.return_value = []
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
update_mock = self._update_available_resources()
vd = self.driver_mock
vd.get_available_resource.assert_called_once_with(_NODENAME)
get_mock.assert_called_once_with(mock.ANY, _HOSTNAME,
_NODENAME,
expected_attrs=[
'system_metadata',
'numa_topology',
'flavor',
'migration_context',
'resources'])
get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME,
_NODENAME)
migr_mock.assert_called_once_with(mock.ANY, _HOSTNAME,
_NODENAME)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 6,
'local_gb': 6,
'free_ram_mb': 512,
'memory_mb_used': 0,
'vcpus_used': 0,
'local_gb_used': 0,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
}
_update_compute_node(expected_resources, **vals)
actual_resources = update_mock.call_args[0][1]
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
actual_resources))
update_mock.assert_called_once()
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_remove_deleted_instances_allocations')
def test_startup_makes_it_through(self, rdia, get_mock, migr_mock,
get_cn_mock, pci_mock,
instance_pci_mock):
"""Just make sure the startup kwarg makes it from
_update_available_resource all the way down the call stack to
_update. In this case a compute node record already exists.
"""
self._setup_rt()
get_mock.return_value = []
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
update_mock = self._update_available_resources(startup=True)
update_mock.assert_called_once_with(mock.ANY, mock.ANY, startup=True)
rdia.assert_called_once_with(
mock.ANY, get_cn_mock.return_value,
[], {})
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_init_compute_node', return_value=True)
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_remove_deleted_instances_allocations')
def test_startup_new_compute(self, rdia, get_mock, migr_mock, init_cn_mock,
pci_mock, instance_pci_mock):
"""Just make sure the startup kwarg makes it from
_update_available_resource all the way down the call stack to
_update. In this case a new compute node record is created.
"""
self._setup_rt()
cn = _COMPUTE_NODE_FIXTURES[0]
self.rt.compute_nodes[cn.hypervisor_hostname] = cn
mock_pci_tracker = mock.MagicMock()
mock_pci_tracker.stats.to_device_pools_obj.return_value = (
objects.PciDevicePoolList())
self.rt.pci_tracker = mock_pci_tracker
get_mock.return_value = []
migr_mock.return_value = []
update_mock = self._update_available_resources(startup=True)
update_mock.assert_called_once_with(mock.ANY, mock.ANY, startup=True)
rdia.assert_not_called()
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_no_migrations_reserved_disk_ram_and_cpu(
self, get_mock, migr_mock, get_cn_mock, pci_mock,
instance_pci_mock):
self.flags(reserved_host_disk_mb=1024,
reserved_host_memory_mb=512,
reserved_host_cpus=1)
self._setup_rt()
get_mock.return_value = []
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 5, # 6GB avail - 1 GB reserved
'local_gb': 6,
'free_ram_mb': 0, # 512MB avail - 512MB reserved
'memory_mb_used': 512, # 0MB used + 512MB reserved
'vcpus_used': 1,
'local_gb_used': 1, # 0GB used + 1 GB reserved
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
}
_update_compute_node(expected_resources, **vals)
actual_resources = update_mock.call_args[0][1]
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
actual_resources))
update_mock.assert_called_once()
@mock.patch('nova.compute.utils.is_volume_backed_instance')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_some_instances_no_migrations(self, get_mock, migr_mock,
get_cn_mock, pci_mock,
instance_pci_mock, bfv_check_mock):
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
# Note that the usage numbers here correspond to only the first
# Instance object, because the second instance object fixture is in
# DELETED state and therefore we should not expect it to be accounted
# for in the auditing process.
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(vcpus_used=1,
memory_mb_used=128,
local_gb_used=1)
self._setup_rt(virt_resources=virt_resources)
get_mock.return_value = _INSTANCE_FIXTURES
migr_mock.return_value = []
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
bfv_check_mock.return_value = False
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 5, # 6 - 1 used
'local_gb': 6,
'free_ram_mb': 384, # 512 - 128 used
'memory_mb_used': 128,
'vcpus_used': 1,
'local_gb_used': 1,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 1 # One active instance
}
_update_compute_node(expected_resources, **vals)
actual_resources = update_mock.call_args[0][1]
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
actual_resources))
update_mock.assert_called_once()
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=False)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_source_migration(self, get_mock, get_inst_mock,
migr_mock, get_cn_mock, pci_mock,
instance_pci_mock,
mock_is_volume_backed_instance):
# We test the behavior of update_available_resource() when
# there is an active migration that involves this compute node
# as the source host not the destination host, and the resource
# tracker does not have any instances assigned to it. This is
# the case when a migration from this compute host to another
# has been completed, but the user has not confirmed the resize
# yet, so the resource tracker must continue to keep the resources
# for the original instance type available on the source compute
# node in case of a revert of the resize.
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(vcpus_used=4,
memory_mb_used=128,
local_gb_used=1)
self._setup_rt(virt_resources=virt_resources)
get_mock.return_value = []
migr_obj = _MIGRATION_FIXTURES['source-only']
migr_mock.return_value = [migr_obj]
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
# Migration.instance property is accessed in the migration
# processing code, and this property calls
# objects.Instance.get_by_uuid, so we have the migration return
inst_uuid = migr_obj.instance_uuid
instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
get_inst_mock.return_value = instance
instance.migration_context = _MIGRATION_CONTEXT_FIXTURES[inst_uuid]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 5,
'local_gb': 6,
'free_ram_mb': 384, # 512 total - 128 for possible revert of orig
'memory_mb_used': 128, # 128 possible revert amount
'vcpus_used': 1,
'local_gb_used': 1,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
}
_update_compute_node(expected_resources, **vals)
actual_resources = update_mock.call_args[0][1]
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
actual_resources))
update_mock.assert_called_once()
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=False)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_dest_migration(self, get_mock, get_inst_mock,
migr_mock, get_cn_mock, pci_mock,
instance_pci_mock,
mock_is_volume_backed_instance):
# We test the behavior of update_available_resource() when
# there is an active migration that involves this compute node
# as the destination host not the source host, and the resource
# tracker does not yet have any instances assigned to it. This is
# the case when a migration to this compute host from another host
# is in progress, but the user has not confirmed the resize
# yet, so the resource tracker must reserve the resources
# for the possibly-to-be-confirmed instance's instance type
# node in case of a confirm of the resize.
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(vcpus_used=2,
memory_mb_used=256,
local_gb_used=5)
self._setup_rt(virt_resources=virt_resources)
get_mock.return_value = []
migr_obj = _MIGRATION_FIXTURES['dest-only']
migr_mock.return_value = [migr_obj]
inst_uuid = migr_obj.instance_uuid
instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
get_inst_mock.return_value = instance
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
instance.migration_context = _MIGRATION_CONTEXT_FIXTURES[inst_uuid]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 1,
'local_gb': 6,
'free_ram_mb': 256, # 512 total - 256 for possible confirm of new
'memory_mb_used': 256, # 256 possible confirmed amount
'vcpus_used': 2,
'local_gb_used': 5,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
}
_update_compute_node(expected_resources, **vals)
actual_resources = update_mock.call_args[0][1]
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
actual_resources))
update_mock.assert_called_once()
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=False)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_dest_evacuation(self, get_mock, get_inst_mock,
migr_mock, get_cn_mock, pci_mock,
instance_pci_mock,
mock_is_volume_backed_instance):
# We test the behavior of update_available_resource() when
# there is an active evacuation that involves this compute node
# as the destination host not the source host, and the resource
# tracker does not yet have any instances assigned to it. This is
# the case when a migration to this compute host from another host
# is in progress, but not finished yet.
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(vcpus_used=2,
memory_mb_used=256,
local_gb_used=5)
self._setup_rt(virt_resources=virt_resources)
get_mock.return_value = []
migr_obj = _MIGRATION_FIXTURES['dest-only-evac']
migr_mock.return_value = [migr_obj]
inst_uuid = migr_obj.instance_uuid
instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
get_inst_mock.return_value = instance
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
instance.migration_context = _MIGRATION_CONTEXT_FIXTURES[inst_uuid]
instance.migration_context.migration_id = migr_obj.id
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 1,
'free_ram_mb': 256, # 512 total - 256 for possible confirm of new
'memory_mb_used': 256, # 256 possible confirmed amount
'vcpus_used': 2,
'local_gb_used': 5,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
}
_update_compute_node(expected_resources, **vals)
actual_resources = update_mock.call_args[0][1]
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
actual_resources))
update_mock.assert_called_once()
@mock.patch('nova.compute.utils.is_volume_backed_instance')
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
return_value=None)
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_some_instances_source_and_dest_migration(self, get_mock,
get_inst_mock, migr_mock,
get_cn_mock,
get_mig_ctxt_mock,
pci_mock,
instance_pci_mock,
bfv_check_mock):
# We test the behavior of update_available_resource() when
# there is an active migration that involves this compute node
# as the destination host AND the source host, and the resource
# tracker has a few instances assigned to it, including the
# instance that is resizing to this same compute node. The tracking
# of resource amounts takes into account both the old and new
# resize instance types as taking up space on the node.
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(vcpus_used=4,
memory_mb_used=512,
local_gb_used=7)
self._setup_rt(virt_resources=virt_resources)
migr_obj = _MIGRATION_FIXTURES['source-and-dest']
migr_mock.return_value = [migr_obj]
inst_uuid = migr_obj.instance_uuid
# The resizing instance has already had its instance type
# changed to the *new* instance type (the bigger one, instance type 2)
resizing_instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
resizing_instance.migration_context = (
_MIGRATION_CONTEXT_FIXTURES[resizing_instance.uuid])
all_instances = _INSTANCE_FIXTURES + [resizing_instance]
get_mock.return_value = all_instances
get_inst_mock.return_value = resizing_instance
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
bfv_check_mock.return_value = False
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
# 6 total - 1G existing - 5G new flav - 1G old flav
'free_disk_gb': -1,
'local_gb': 6,
# 512 total - 128 existing - 256 new flav - 128 old flav
'free_ram_mb': 0,
'memory_mb_used': 512, # 128 exist + 256 new flav + 128 old flav
'vcpus_used': 4,
'local_gb_used': 7, # 1G existing, 5G new flav + 1 old flav
'memory_mb': 512,
'current_workload': 1, # One migrating instance...
'vcpus': 4,
'running_vms': 2
}
_update_compute_node(expected_resources, **vals)
actual_resources = update_mock.call_args[0][1]
self.assertTrue(obj_base.obj_equal_prims(expected_resources,
actual_resources))
update_mock.assert_called_once()
@mock.patch('nova.compute.utils.is_volume_backed_instance',
return_value=False)
@mock.patch('nova.objects.InstancePCIRequests.get_by_instance',
return_value=objects.InstancePCIRequests(requests=[]))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_no_instances_err_migration(
self, get_mock, get_inst_mock, migr_mock, get_cn_mock, pci_mock,
instance_pci_mock, mock_is_volume_backed_instance
):
# We test the behavior of update_available_resource() when
# there is an error migration that involves this compute node
# as the destination host not the source host, and the resource
# tracker does not yet have any instances assigned to it. This is
# the case when a migration to this compute host from another host
# is in error, the resources claimed on destination host might not
# be cleaned up, so the resource tracker must reserve the resources
# for the error migration.
# Setup virt resources to match used resources to number
# of defined instances on the hypervisor
virt_resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
virt_resources.update(
vcpus_used=2, memory_mb_used=256, local_gb_used=5)
self._setup_rt(virt_resources=virt_resources)
get_mock.return_value = []
migr_obj = _MIGRATION_FIXTURES['dest-only']
migr_obj.status = 'error'
# in-process and error migrations
migr_mock.return_value = [migr_obj]
inst_uuid = migr_obj.instance_uuid
instance = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
get_inst_mock.return_value = instance
get_cn_mock.return_value = _COMPUTE_NODE_FIXTURES[0]
instance.migration_context = _MIGRATION_CONTEXT_FIXTURES[inst_uuid]
update_mock = self._update_available_resources()
get_cn_mock.assert_called_once_with(mock.ANY, _HOSTNAME, _NODENAME)
expected_resources = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'free_disk_gb': 1,
'local_gb': 6,
'free_ram_mb': 256, # 512 total - 256 for possible confirm of new
'memory_mb_used': 256, # 256 possible confirmed amount
'vcpus_used': 2,
'local_gb_used': 5,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0
}
_update_compute_node(expected_resources, **vals)
actual_resources = update_mock.call_args[0][1]
self.assertTrue(
obj_base.obj_equal_prims(expected_resources, actual_resources))
update_mock.assert_called_once()
@mock.patch('nova.compute.utils.is_volume_backed_instance',
new=mock.Mock(return_value=False))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
new=mock.Mock(return_value=objects.PciDeviceList()))
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
new=mock.Mock(return_value=None))
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_populate_assigned_resources(self, mock_get_instances,
mock_get_instance,
mock_get_migrations,
mock_get_cn):
# when update_available_resources, rt.assigned_resources
# will be populated, resources assigned to tracked migrations
# and instances will be tracked in rt.assigned_resources.
self._setup_rt()
# one instance is in the middle of being "resized" to the same host,
# meaning there are two related resource allocations - one against
# the instance and one against the migration record
# here resource_1 and resource_2 are assigned to resizing inst
migr_obj = _MIGRATION_FIXTURES['source-and-dest']
inst_uuid = migr_obj.instance_uuid
resizing_inst = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
mig_ctxt = _MIGRATION_CONTEXT_FIXTURES[resizing_inst.uuid]
mig_ctxt.old_resources = objects.ResourceList(
objects=[self.resource_1])
mig_ctxt.new_resources = objects.ResourceList(
objects=[self.resource_2])
resizing_inst.migration_context = mig_ctxt
# the other instance is not being resized and only has the single
# resource allocation for itself
# here resource_0 is assigned to inst
inst = _INSTANCE_FIXTURES[0]
inst.resources = objects.ResourceList(objects=[self.resource_0])
mock_get_instances.return_value = [inst, resizing_inst]
mock_get_instance.return_value = resizing_inst
mock_get_migrations.return_value = [migr_obj]
mock_get_cn.return_value = self.compute
update_mock = self._update_available_resources()
update_mock.assert_called_once()
expected_assigned_resources = {self.compute.uuid: {
"CUSTOM_RESOURCE_0": {self.resource_0},
"CUSTOM_RESOURCE_1": {self.resource_1, self.resource_2}
}}
self.assertEqual(expected_assigned_resources,
self.rt.assigned_resources)
@mock.patch('nova.compute.utils.is_volume_backed_instance',
new=mock.Mock(return_value=False))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
new=mock.Mock(return_value=objects.PciDeviceList()))
@mock.patch('nova.objects.MigrationContext.get_by_instance_uuid',
new=mock.Mock(return_value=None))
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.Instance.get_by_uuid')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_check_resources_startup_success(self, mock_get_instances,
mock_get_instance,
mock_get_migrations,
mock_get_cn):
# When update_available_resources is running on startup,
# it will trigger this function to check if there are
# assigned resources not in provider tree. If so, the reason
# may be admin delete the resources on the host or delete some
# resource configurations in file.
self._setup_rt()
# there are three resources in provider tree
self.rt.provider_tree = self._setup_ptree(self.compute)
migr_obj = migr_obj = _MIGRATION_FIXTURES['source-and-dest']
inst_uuid = migr_obj.instance_uuid
resizing_inst = _MIGRATION_INSTANCE_FIXTURES[inst_uuid].obj_clone()
mig_ctxt = _MIGRATION_CONTEXT_FIXTURES[resizing_inst.uuid]
mig_ctxt.old_resources = objects.ResourceList(
objects=[self.resource_1])
mig_ctxt.new_resources = objects.ResourceList(
objects=[self.resource_2])
resizing_inst.migration_context = mig_ctxt
inst = _INSTANCE_FIXTURES[0]
inst.resources = objects.ResourceList(objects=[self.resource_0])
mock_get_instances.return_value = [inst, resizing_inst]
mock_get_instance.return_value = resizing_inst
mock_get_migrations.return_value = [migr_obj]
mock_get_cn.return_value = self.compute
# check_resources is only triggered when startup
update_mock = self._update_available_resources(startup=True)
update_mock.assert_called_once()
@mock.patch('nova.compute.utils.is_volume_backed_instance',
new=mock.Mock(return_value=False))
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
new=mock.Mock(return_value=objects.PciDeviceList()))
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.InstanceList.get_by_host_and_node')
def test_check_resources_startup_fail(self, mock_get_instances,
mock_get_migrations,
mock_get_cn):
# Similar to testcase test_check_resources_startup_success,
# and this one is for check_resources failed
resource = objects.Resource(provider_uuid=self.compute.uuid,
resource_class="CUSTOM_RESOURCE_0",
identifier="notfound")
self._setup_rt()
# there are three resources in provider tree
self.rt.provider_tree = self._setup_ptree(self.compute)
inst = _INSTANCE_FIXTURES[0]
inst.resources = objects.ResourceList(objects=[resource])
mock_get_instances.return_value = [inst]
mock_get_migrations.return_value = []
mock_get_cn.return_value = self.compute
# There are assigned resources not found in provider tree
self.assertRaises(exc.AssignedResourceNotFound,
self._update_available_resources, startup=True)
class TestInitComputeNode(BaseTestCase):
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.Service.get_by_compute_host')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_no_op_init_compute_node(self, update_mock, get_mock, service_mock,
create_mock, pci_mock):
self._setup_rt()
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
compute_node = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
self.rt.compute_nodes[_NODENAME] = compute_node
self.assertFalse(
self.rt._init_compute_node(mock.sentinel.ctx, resources))
self.assertFalse(service_mock.called)
self.assertFalse(get_mock.called)
self.assertFalse(create_mock.called)
self.assertTrue(pci_mock.called)
self.assertFalse(update_mock.called)
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_loaded(self, update_mock, get_mock, create_mock,
pci_mock):
self._setup_rt()
def fake_get_node(_ctx, host, node):
res = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
return res
get_mock.side_effect = fake_get_node
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
self.assertFalse(
self.rt._init_compute_node(mock.sentinel.ctx, resources))
get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
_NODENAME)
self.assertFalse(create_mock.called)
self.assertFalse(update_mock.called)
@mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.PciDeviceList.get_by_compute_node',
return_value=objects.PciDeviceList())
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_rebalanced(self, update_mock, get_mock, create_mock,
pci_mock, get_by_hypervisor_mock):
self._setup_rt()
self.driver_mock.rebalances_nodes = True
cn = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
cn.host = "old-host"
def fake_get_all(_ctx, nodename):
return [cn]
get_mock.side_effect = exc.NotFound
get_by_hypervisor_mock.side_effect = fake_get_all
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
self.assertFalse(
self.rt._init_compute_node(mock.sentinel.ctx, resources))
get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
_NODENAME)
get_by_hypervisor_mock.assert_called_once_with(mock.sentinel.ctx,
_NODENAME)
create_mock.assert_not_called()
update_mock.assert_called_once_with(mock.sentinel.ctx, cn)
self.assertEqual(_HOSTNAME, self.rt.compute_nodes[_NODENAME].host)
@mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_created_on_empty(self, update_mock, get_mock,
create_mock,
get_by_hypervisor_mock):
get_by_hypervisor_mock.return_value = []
self._test_compute_node_created(update_mock, get_mock, create_mock,
get_by_hypervisor_mock)
@mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_created_on_empty_rebalance(self, update_mock,
get_mock,
create_mock,
get_by_hypervisor_mock):
get_by_hypervisor_mock.return_value = []
self._test_compute_node_created(update_mock, get_mock, create_mock,
get_by_hypervisor_mock,
rebalances_nodes=True)
@mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_compute_node_created_too_many(self, update_mock, get_mock,
create_mock,
get_by_hypervisor_mock):
get_by_hypervisor_mock.return_value = ["fake_node_1", "fake_node_2"]
self._test_compute_node_created(update_mock, get_mock, create_mock,
get_by_hypervisor_mock,
rebalances_nodes=True)
def _test_compute_node_created(self, update_mock, get_mock, create_mock,
get_by_hypervisor_mock,
rebalances_nodes=False):
self.flags(cpu_allocation_ratio=1.0, ram_allocation_ratio=1.0,
disk_allocation_ratio=1.0)
self._setup_rt()
self.driver_mock.rebalances_nodes = rebalances_nodes
get_mock.side_effect = exc.NotFound
resources = {
'host_ip': '1.1.1.1',
'numa_topology': None,
'metrics': '[]',
'cpu_info': '',
'hypervisor_hostname': _NODENAME,
'free_disk_gb': 6,
'hypervisor_version': 0,
'local_gb': 6,
'free_ram_mb': 512,
'memory_mb_used': 0,
'pci_device_pools': [],
'vcpus_used': 0,
'hypervisor_type': 'fake',
'local_gb_used': 0,
'memory_mb': 512,
'current_workload': 0,
'vcpus': 4,
'running_vms': 0,
'pci_passthrough_devices': '[]',
'uuid': uuids.compute_node_uuid
}
# The expected compute represents the initial values used
# when creating a compute node.
expected_compute = objects.ComputeNode(
host_ip=resources['host_ip'],
vcpus=resources['vcpus'],
memory_mb=resources['memory_mb'],
local_gb=resources['local_gb'],
cpu_info=resources['cpu_info'],
vcpus_used=resources['vcpus_used'],
memory_mb_used=resources['memory_mb_used'],
local_gb_used=resources['local_gb_used'],
numa_topology=resources['numa_topology'],
hypervisor_type=resources['hypervisor_type'],
hypervisor_version=resources['hypervisor_version'],
hypervisor_hostname=resources['hypervisor_hostname'],
# NOTE(sbauza): ResourceTracker adds host field
host=_HOSTNAME,
# NOTE(sbauza): ResourceTracker adds CONF allocation ratios
ram_allocation_ratio=CONF.initial_ram_allocation_ratio,
cpu_allocation_ratio=CONF.initial_cpu_allocation_ratio,
disk_allocation_ratio=CONF.initial_disk_allocation_ratio,
stats={'failed_builds': 0},
uuid=uuids.compute_node_uuid
)
with mock.patch.object(self.rt, '_setup_pci_tracker') as setup_pci:
self.assertTrue(
self.rt._init_compute_node(mock.sentinel.ctx, resources))
cn = self.rt.compute_nodes[_NODENAME]
get_mock.assert_called_once_with(mock.sentinel.ctx, _HOSTNAME,
_NODENAME)
if rebalances_nodes:
get_by_hypervisor_mock.assert_called_once_with(
mock.sentinel.ctx, _NODENAME)
else:
get_by_hypervisor_mock.assert_not_called()
create_mock.assert_called_once_with()
self.assertTrue(obj_base.obj_equal_prims(expected_compute, cn))
setup_pci.assert_called_once_with(mock.sentinel.ctx, cn, resources)
self.assertFalse(update_mock.called)
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_setup_pci_tracker')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename',
side_effect=exc.ComputeHostNotFound(host=_HOSTNAME))
@mock.patch('nova.objects.ComputeNode.create',
side_effect=(test.TestingException, None))
def test_compute_node_create_fail_retry_works(self, mock_create, mock_get,
mock_setup_pci):
"""Tests that _init_compute_node will not save the ComputeNode object
in the compute_nodes dict if create() fails.
"""
self._setup_rt()
self.assertEqual({}, self.rt.compute_nodes)
ctxt = context.get_context()
# The first ComputeNode.create fails so rt.compute_nodes should
# remain empty.
resources = copy.deepcopy(_VIRT_DRIVER_AVAIL_RESOURCES)
resources['uuid'] = uuids.cn_uuid # for the LOG.info message
self.assertRaises(test.TestingException,
self.rt._init_compute_node, ctxt, resources)
self.assertEqual({}, self.rt.compute_nodes)
# Second create works so compute_nodes should have a mapping.
self.assertTrue(self.rt._init_compute_node(ctxt, resources))
self.assertIn(_NODENAME, self.rt.compute_nodes)
mock_get.assert_has_calls([mock.call(
ctxt, _HOSTNAME, _NODENAME)] * 2)
self.assertEqual(2, mock_create.call_count)
mock_setup_pci.assert_called_once_with(
ctxt, test.MatchType(objects.ComputeNode), resources)
@mock.patch('nova.objects.ComputeNodeList.get_by_hypervisor')
@mock.patch('nova.objects.ComputeNode.create')
@mock.patch('nova.objects.ComputeNode.get_by_host_and_nodename')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_update')
def test_node_removed(self, update_mock, get_mock,
create_mock, get_by_hypervisor_mock):
self._test_compute_node_created(update_mock, get_mock, create_mock,
get_by_hypervisor_mock)
self.rt.old_resources[_NODENAME] = mock.sentinel.foo
self.assertIn(_NODENAME, self.rt.compute_nodes)
self.assertIn(_NODENAME, self.rt.stats)
self.assertIn(_NODENAME, self.rt.old_resources)
self.rt.remove_node(_NODENAME)
self.assertNotIn(_NODENAME, self.rt.compute_nodes)
self.assertNotIn(_NODENAME, self.rt.stats)
self.assertNotIn(_NODENAME, self.rt.old_resources)
class TestUpdateComputeNode(BaseTestCase):
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_sync_compute_service_disabled_trait', new=mock.Mock())
@mock.patch('nova.objects.ComputeNode.save')
def test_existing_compute_node_updated_same_resources(self, save_mock):
self._setup_rt()
# This is the same set of resources as the fixture, deliberately. We
# are checking below to see that compute_node.save is not needlessly
# called when the resources don't actually change.
orig_compute = _COMPUTE_NODE_FIXTURES[0].obj_clone()
self.rt.compute_nodes[_NODENAME] = orig_compute
self.rt.old_resources[_NODENAME] = orig_compute
new_compute = orig_compute.obj_clone()
self.rt._update(mock.sentinel.ctx, new_compute)
self.assertFalse(save_mock.called)
# Even the compute node is not updated, update_provider_tree
# still got called.
self.driver_mock.update_provider_tree.assert_called_once()
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_sync_compute_service_disabled_trait', new=mock.Mock())
@mock.patch('nova.objects.ComputeNode.save')
def test_existing_compute_node_updated_diff_updated_at(self, save_mock):
# if only updated_at is changed, it won't call compute_node.save()
self._setup_rt()
ts1 = timeutils.utcnow()
ts2 = ts1 + datetime.timedelta(seconds=10)
orig_compute = _COMPUTE_NODE_FIXTURES[0].obj_clone()
orig_compute.updated_at = ts1
self.rt.compute_nodes[_NODENAME] = orig_compute
self.rt.old_resources[_NODENAME] = orig_compute
# Make the new_compute object have a different timestamp
# from orig_compute.
new_compute = orig_compute.obj_clone()
new_compute.updated_at = ts2
self.rt._update(mock.sentinel.ctx, new_compute)
self.assertFalse(save_mock.called)
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_sync_compute_service_disabled_trait', new=mock.Mock())
@mock.patch('nova.objects.ComputeNode.save')
def test_existing_compute_node_updated_new_resources(self, save_mock):
self._setup_rt()
orig_compute = _COMPUTE_NODE_FIXTURES[0].obj_clone()
self.rt.compute_nodes[_NODENAME] = orig_compute
self.rt.old_resources[_NODENAME] = orig_compute
# Deliberately changing local_gb_used, vcpus_used, and memory_mb_used
# below to be different from the compute node fixture's base usages.
# We want to check that the code paths update the stored compute node
# usage records with what is supplied to _update().
new_compute = orig_compute.obj_clone()
new_compute.memory_mb_used = 128
new_compute.vcpus_used = 2
new_compute.local_gb_used = 4
self.rt._update(mock.sentinel.ctx, new_compute)
save_mock.assert_called_once_with()
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_sync_compute_service_disabled_trait')
def test_existing_node_capabilities_as_traits(self, mock_sync_disabled):
"""The capabilities_as_traits() driver method returns traits
information for a node/provider.
"""
self._setup_rt()
rc = self.rt.reportclient
rc.set_traits_for_provider = mock.MagicMock()
# Emulate a driver that has implemented the update_from_provider_tree()
# virt driver method
self.driver_mock.update_provider_tree = mock.Mock()
self.driver_mock.capabilities_as_traits.return_value = \
{mock.sentinel.trait: True}
orig_compute = _COMPUTE_NODE_FIXTURES[0].obj_clone()
self.rt.compute_nodes[_NODENAME] = orig_compute
self.rt.old_resources[_NODENAME] = orig_compute
new_compute = orig_compute.obj_clone()
ptree = self._setup_ptree(orig_compute)
self.rt._update(mock.sentinel.ctx, new_compute)
self.driver_mock.capabilities_as_traits.assert_called_once()
# We always decorate with COMPUTE_NODE
exp_traits = {mock.sentinel.trait, os_traits.COMPUTE_NODE}
# Can't predict the order of the traits list, so use ItemsMatcher
ptree.update_traits.assert_called_once_with(
new_compute.hypervisor_hostname, utils.ItemsMatcher(exp_traits))
mock_sync_disabled.assert_called_once_with(
mock.sentinel.ctx, exp_traits)
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_sync_compute_service_disabled_trait')
@mock.patch('nova.objects.ComputeNode.save')
def test_existing_node_update_provider_tree_implemented(
self, save_mock, mock_sync_disabled):
"""The update_provider_tree() virt driver method must be implemented
by all virt drivers. This method returns inventory, trait, and
aggregate information for resource providers in a tree associated with
the compute node.
"""
fake_inv = {
orc.VCPU: {
'total': 2,
'min_unit': 1,
'max_unit': 2,
'step_size': 1,
'allocation_ratio': 16.0,
'reserved': 1,
},
orc.MEMORY_MB: {
'total': 4096,
'min_unit': 1,
'max_unit': 4096,
'step_size': 1,
'allocation_ratio': 1.5,
'reserved': 512,
},
orc.DISK_GB: {
'total': 500,
'min_unit': 1,
'max_unit': 500,
'step_size': 1,
'allocation_ratio': 1.0,
'reserved': 1,
},
}
def fake_upt(ptree, nodename, allocations=None):
self.assertIsNone(allocations)
ptree.update_inventory(nodename, fake_inv)
self._setup_rt()
# Emulate a driver that has implemented the update_from_provider_tree()
# virt driver method
self.driver_mock.update_provider_tree.side_effect = fake_upt
orig_compute = _COMPUTE_NODE_FIXTURES[0].obj_clone() #
self.rt.compute_nodes[_NODENAME] = orig_compute
self.rt.old_resources[_NODENAME] = orig_compute
# Deliberately changing local_gb to trigger updating inventory
new_compute = orig_compute.obj_clone()
new_compute.local_gb = 210000
ptree = self._setup_ptree(orig_compute)
self.rt._update(mock.sentinel.ctx, new_compute)
save_mock.assert_called_once_with()
gptaer_mock = self.rt.reportclient.get_provider_tree_and_ensure_root
gptaer_mock.assert_called_once_with(
mock.sentinel.ctx, new_compute.uuid,
name=new_compute.hypervisor_hostname)
self.driver_mock.update_provider_tree.assert_called_once_with(
ptree, new_compute.hypervisor_hostname)
self.rt.reportclient.update_from_provider_tree.assert_called_once_with(
mock.sentinel.ctx, ptree, allocations=None)
ptree.update_traits.assert_called_once_with(
new_compute.hypervisor_hostname,
[os_traits.COMPUTE_NODE]
)
exp_inv = copy.deepcopy(fake_inv)
# These ratios and reserved amounts come from fake_upt
exp_inv[orc.VCPU]['allocation_ratio'] = 16.0
exp_inv[orc.MEMORY_MB]['allocation_ratio'] = 1.5
exp_inv[orc.DISK_GB]['allocation_ratio'] = 1.0
exp_inv[orc.VCPU]['reserved'] = 1
exp_inv[orc.MEMORY_MB]['reserved'] = 512
# 1024MB in GB
exp_inv[orc.DISK_GB]['reserved'] = 1
self.assertEqual(exp_inv, ptree.data(new_compute.uuid).inventory)
mock_sync_disabled.assert_called_once()
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_sync_compute_service_disabled_trait')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_resource_change', return_value=False)
def test_update_retry_success(self, mock_resource_change,
mock_sync_disabled):
self._setup_rt()
orig_compute = _COMPUTE_NODE_FIXTURES[0].obj_clone()
self.rt.compute_nodes[_NODENAME] = orig_compute
self.rt.old_resources[_NODENAME] = orig_compute
# Deliberately changing local_gb to trigger updating inventory
new_compute = orig_compute.obj_clone()
new_compute.local_gb = 210000
# Emulate a driver that has implemented the update_from_provider_tree()
# virt driver method, so we hit the update_from_provider_tree path.
self.driver_mock.update_provider_tree.side_effect = lambda *a: None
ufpt_mock = self.rt.reportclient.update_from_provider_tree
ufpt_mock.side_effect = (
exc.ResourceProviderUpdateConflict(
uuid='uuid', generation=42, error='error'), None)
self.rt._update(mock.sentinel.ctx, new_compute)
self.assertEqual(2, ufpt_mock.call_count)
self.assertEqual(2, mock_sync_disabled.call_count)
# The retry is restricted to _update_to_placement
self.assertEqual(1, mock_resource_change.call_count)
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_sync_compute_service_disabled_trait')
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_resource_change', return_value=False)
def test_update_retry_raises(self, mock_resource_change,
mock_sync_disabled):
self._setup_rt()
orig_compute = _COMPUTE_NODE_FIXTURES[0].obj_clone()
self.rt.compute_nodes[_NODENAME] = orig_compute
self.rt.old_resources[_NODENAME] = orig_compute
# Deliberately changing local_gb to trigger updating inventory
new_compute = orig_compute.obj_clone()
new_compute.local_gb = 210000
# Emulate a driver that has implemented the update_from_provider_tree()
# virt driver method, so we hit the update_from_provider_tree path.
self.driver_mock.update_provider_tree.side_effect = lambda *a: None
ufpt_mock = self.rt.reportclient.update_from_provider_tree
ufpt_mock.side_effect = (
exc.ResourceProviderUpdateConflict(
uuid='uuid', generation=42, error='error'))
self.assertRaises(exc.ResourceProviderUpdateConflict,
self.rt._update, mock.sentinel.ctx, new_compute)
self.assertEqual(4, ufpt_mock.call_count)
self.assertEqual(4, mock_sync_disabled.call_count)
# The retry is restricted to _update_to_placement
self.assertEqual(1, mock_resource_change.call_count)
@mock.patch('nova.objects.Service.get_by_compute_host',
return_value=objects.Service(disabled=True))
def test_sync_compute_service_disabled_trait_add(self, mock_get_by_host):
"""Tests the scenario that the compute service is disabled so the
COMPUTE_STATUS_DISABLED trait is added to the traits set.
"""
self._setup_rt()
ctxt = context.get_admin_context()
traits = set()
self.rt._sync_compute_service_disabled_trait(ctxt, traits)
self.assertEqual({os_traits.COMPUTE_STATUS_DISABLED}, traits)
mock_get_by_host.assert_called_once_with(ctxt, self.rt.host)
@mock.patch('nova.objects.Service.get_by_compute_host',
return_value=objects.Service(disabled=False))
def test_sync_compute_service_disabled_trait_remove(
self, mock_get_by_host):
"""Tests the scenario that the compute service is enabled so the
COMPUTE_STATUS_DISABLED trait is removed from the traits set.
"""
self._setup_rt()
ctxt = context.get_admin_context()
# First test with the trait actually in the set.
traits = {os_traits.COMPUTE_STATUS_DISABLED}
self.rt._sync_compute_service_disabled_trait(ctxt, traits)
self.assertEqual(set(), traits)
mock_get_by_host.assert_called_once_with(ctxt, self.rt.host)
# Now run it again with the empty set to make sure the method handles
# the trait not already being in the set (idempotency).
self.rt._sync_compute_service_disabled_trait(ctxt, traits)
self.assertEqual(0, len(traits))
@mock.patch('nova.objects.Service.get_by_compute_host',
# One might think Service.get_by_compute_host would raise
# ServiceNotFound but the DB API raises ComputeHostNotFound.
side_effect=exc.ComputeHostNotFound(host=_HOSTNAME))
@mock.patch('nova.compute.resource_tracker.LOG.error')
def test_sync_compute_service_disabled_trait_service_not_found(
self, mock_log_error, mock_get_by_host):
"""Tests the scenario that the compute service is not found so the
traits set is unmodified and an error is logged.
"""
self._setup_rt()
ctxt = context.get_admin_context()
traits = set()
self.rt._sync_compute_service_disabled_trait(ctxt, traits)
self.assertEqual(0, len(traits))
mock_get_by_host.assert_called_once_with(ctxt, self.rt.host)
mock_log_error.assert_called_once()
self.assertIn('Unable to find services table record for nova-compute',
mock_log_error.call_args[0][0])
def test_update_compute_node_save_fails_restores_old_resources(self):
"""Tests the scenario that compute_node.save() fails and the
old_resources value for the node is restored to its previous value
before calling _resource_change updated it.
"""
self._setup_rt()
orig_compute = _COMPUTE_NODE_FIXTURES[0].obj_clone()
# Pretend the ComputeNode was just created in the DB but not yet saved
# with the free_disk_gb field.
delattr(orig_compute, 'free_disk_gb')
nodename = orig_compute.hypervisor_hostname
self.rt.old_resources[nodename] = orig_compute
# Now have an updated compute node with free_disk_gb set which should
# make _resource_change modify old_resources and return True.
updated_compute = _COMPUTE_NODE_FIXTURES[0].obj_clone()
ctxt = context.get_admin_context()
# Mock ComputeNode.save() to trigger some failure (realistically this
# could be a DBConnectionError).
with mock.patch.object(updated_compute, 'save',
side_effect=test.TestingException('db error')):
self.assertRaises(test.TestingException,
self.rt._update,
ctxt, updated_compute, startup=True)
# Make sure that the old_resources entry for the node has not changed
# from the original.
self.assertTrue(self.rt._resource_change(updated_compute))
def test_copy_resources_no_update_allocation_ratios(self):
"""Tests that a ComputeNode object's allocation ratio fields are
not set if the configured allocation ratio values are default None.
"""
self._setup_rt()
compute = _COMPUTE_NODE_FIXTURES[0].obj_clone()
compute.obj_reset_changes() # make sure we start clean
self.rt._copy_resources(
compute, self.driver_mock.get_available_resource.return_value)
# Assert that the ComputeNode fields were not changed.
changes = compute.obj_get_changes()
for res in ('cpu', 'disk', 'ram'):
attr_name = '%s_allocation_ratio' % res
self.assertNotIn(attr_name, changes)
def test_copy_resources_update_allocation_zero_ratios(self):
"""Tests that a ComputeNode object's allocation ratio fields are
not set if the configured allocation ratio values are 0.0.
"""
# NOTE(yikun): In Stein version, we change the default value of
# (cpu|ram|disk)_allocation_ratio from 0.0 to None, but we still
# should allow 0.0 to keep compatibility, and this 0.0 condition
# will be removed in the next version (T version).
# Set explicit ratio config values to 0.0 (the default is None).
for res in ('cpu', 'disk', 'ram'):
opt_name = '%s_allocation_ratio' % res
CONF.set_override(opt_name, 0.0)
self._setup_rt()
compute = _COMPUTE_NODE_FIXTURES[0].obj_clone()
compute.obj_reset_changes() # make sure we start clean
self.rt._copy_resources(
compute, self.driver_mock.get_available_resource.return_value)
# Assert that the ComputeNode fields were not changed.
changes = compute.obj_get_changes()
for res in ('cpu', 'disk', 'ram'):
attr_name = '%s_allocation_ratio' % res
self.assertNotIn(attr_name, changes)
def test_copy_resources_update_allocation_ratios_from_config(self):
"""Tests that a ComputeNode object's allocation ratio fields are
set if the configured allocation ratio values are not default.
"""
# Set explicit ratio config values to 1.0 (the default is None).
for res in ('cpu', 'disk', 'ram'):
opt_name = '%s_allocation_ratio' % res
CONF.set_override(opt_name, 1.0)
self._setup_rt()
compute = _COMPUTE_NODE_FIXTURES[0].obj_clone()
compute.obj_reset_changes() # make sure we start clean
self.rt._copy_resources(
compute, self.driver_mock.get_available_resource.return_value)
# Assert that the ComputeNode fields were changed.
changes = compute.obj_get_changes()
for res in ('cpu', 'disk', 'ram'):
attr_name = '%s_allocation_ratio' % res
self.assertIn(attr_name, changes)
self.assertEqual(1.0, changes[attr_name])
class TestInstanceClaim(BaseTestCase):
def setUp(self):
super(TestInstanceClaim, self).setUp()
self.flags(reserved_host_disk_mb=0, reserved_host_memory_mb=0)
self._setup_rt()
cn = _COMPUTE_NODE_FIXTURES[0].obj_clone()
self.rt.compute_nodes[_NODENAME] = cn
self.rt.provider_tree = self._setup_ptree(cn)
# not using mock.sentinel.ctx because instance_claim calls #elevated
self.ctx = mock.MagicMock()
self.elevated = mock.MagicMock()
self.ctx.elevated.return_value = self.elevated
self.instance = _INSTANCE_FIXTURES[0].obj_clone()
def assertEqualNUMAHostTopology(self, expected, got):
attrs = ('cpuset', 'pcpuset', 'memory', 'id', 'cpu_usage',
'memory_usage')
if None in (expected, got):
if expected != got:
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
else:
return
if len(expected) != len(got):
raise AssertionError("Topologies don't match due to different "
"number of cells. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
for exp_cell, got_cell in zip(expected.cells, got.cells):
for attr in attrs:
if getattr(exp_cell, attr) != getattr(got_cell, attr):
raise AssertionError("Topologies don't match. Expected: "
"%(expected)s, but got: %(got)s" %
{'expected': expected, 'got': got})
def test_claim_disabled(self):
self.rt.compute_nodes = {}
self.assertTrue(self.rt.disabled(_NODENAME))
with mock.patch.object(self.instance, 'save'):
claim = self.rt.instance_claim(mock.sentinel.ctx, self.instance,
_NODENAME, self.allocations, None)
self.assertEqual(self.rt.host, self.instance.host)
self.assertEqual(self.rt.host, self.instance.launched_on)
self.assertEqual(_NODENAME, self.instance.node)
self.assertIsInstance(claim, claims.NopClaim)
@mock.patch('nova.compute.utils.is_volume_backed_instance')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
def test_update_usage_with_claim(self, migr_mock, check_bfv_mock):
# Test that RT.update_usage() only changes the compute node
# resources if there has been a claim first.
self.instance.pci_requests = objects.InstancePCIRequests(requests=[])
check_bfv_mock.return_value = False
expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
self.rt.update_usage(self.ctx, self.instance, _NODENAME)
cn = self.rt.compute_nodes[_NODENAME]
self.assertTrue(obj_base.obj_equal_prims(expected, cn))
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
vals = {
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected.local_gb - disk_used,
"free_ram_mb": expected.memory_mb - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': objects.PciDevicePoolList(),
'stats': {
'io_workload': 0,
'num_instances': 1,
'num_task_None': 1,
'num_os_type_' + self.instance.os_type: 1,
'num_proj_' + self.instance.project_id: 1,
'num_vm_' + self.instance.vm_state: 1,
},
}
_update_compute_node(expected, **vals)
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, _NODENAME,
self.allocations, None)
cn = self.rt.compute_nodes[_NODENAME]
update_mock.assert_called_once_with(self.elevated, cn)
self.assertTrue(obj_base.obj_equal_prims(expected, cn))
@mock.patch('nova.compute.utils.is_volume_backed_instance')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
def test_update_usage_removed(self, migr_mock, check_bfv_mock):
# Test that RT.update_usage() removes the instance when update is
# called in a removed state
self.instance.pci_requests = objects.InstancePCIRequests(requests=[])
check_bfv_mock.return_value = False
cn = self.rt.compute_nodes[_NODENAME]
allocations = {
cn.uuid: {
"generation": 0,
"resources": {
"VCPU": 1,
"MEMORY_MB": 512,
"CUSTOM_RESOURCE_0": 1,
"CUSTOM_RESOURCE_1": 2,
}
}
}
expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
vals = {
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected.local_gb - disk_used,
"free_ram_mb": expected.memory_mb - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': objects.PciDevicePoolList(),
'stats': {
'io_workload': 0,
'num_instances': 1,
'num_task_None': 1,
'num_os_type_' + self.instance.os_type: 1,
'num_proj_' + self.instance.project_id: 1,
'num_vm_' + self.instance.vm_state: 1,
},
}
_update_compute_node(expected, **vals)
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, _NODENAME,
allocations, None)
cn = self.rt.compute_nodes[_NODENAME]
update_mock.assert_called_once_with(self.elevated, cn)
self.assertTrue(obj_base.obj_equal_prims(expected, cn))
# Verify that the assigned resources are tracked
for rc, amount in [("CUSTOM_RESOURCE_0", 1),
("CUSTOM_RESOURCE_1", 2)]:
self.assertEqual(amount,
len(self.rt.assigned_resources[cn.uuid][rc]))
expected_updated = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'pci_device_pools': objects.PciDevicePoolList(),
'stats': {
'io_workload': 0,
'num_instances': 0,
'num_task_None': 0,
'num_os_type_' + self.instance.os_type: 0,
'num_proj_' + self.instance.project_id: 0,
'num_vm_' + self.instance.vm_state: 0,
},
}
_update_compute_node(expected_updated, **vals)
self.instance.vm_state = vm_states.SHELVED_OFFLOADED
with mock.patch.object(self.rt, '_update') as update_mock:
self.rt.update_usage(self.ctx, self.instance, _NODENAME)
cn = self.rt.compute_nodes[_NODENAME]
self.assertTrue(obj_base.obj_equal_prims(expected_updated, cn))
# Verify that the resources are released
for rc in ["CUSTOM_RESOURCE_0", "CUSTOM_RESOURCE_1"]:
self.assertEqual(0, len(self.rt.assigned_resources[cn.uuid][rc]))
@mock.patch('nova.compute.utils.is_volume_backed_instance')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
def test_claim(self, migr_mock, check_bfv_mock):
self.instance.pci_requests = objects.InstancePCIRequests(requests=[])
check_bfv_mock.return_value = False
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected.local_gb - disk_used,
"free_ram_mb": expected.memory_mb - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': objects.PciDevicePoolList(),
'stats': {
'io_workload': 0,
'num_instances': 1,
'num_task_None': 1,
'num_os_type_' + self.instance.os_type: 1,
'num_proj_' + self.instance.project_id: 1,
'num_vm_' + self.instance.vm_state: 1,
},
}
_update_compute_node(expected, **vals)
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, _NODENAME,
self.allocations, None)
cn = self.rt.compute_nodes[_NODENAME]
update_mock.assert_called_once_with(self.elevated, cn)
self.assertTrue(obj_base.obj_equal_prims(expected, cn))
self.assertEqual(self.rt.host, self.instance.host)
self.assertEqual(self.rt.host, self.instance.launched_on)
self.assertEqual(_NODENAME, self.instance.node)
@mock.patch('nova.compute.utils.is_volume_backed_instance')
@mock.patch('nova.pci.stats.PciDeviceStats.support_requests',
return_value=True)
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
def test_claim_with_pci(self, migr_mock, pci_stats_mock,
check_bfv_mock):
# Test that a claim involving PCI requests correctly claims
# PCI devices on the host and sends an updated pci_device_pools
# attribute of the ComputeNode object.
# TODO(jaypipes): Remove once the PCI tracker is always created
# upon the resource tracker being initialized...
with mock.patch.object(
objects.PciDeviceList, 'get_by_compute_node',
return_value=objects.PciDeviceList()
):
self.rt.pci_tracker = pci_manager.PciDevTracker(
mock.sentinel.ctx, _COMPUTE_NODE_FIXTURES[0])
pci_dev = pci_device.PciDevice.create(
None, fake_pci_device.dev_dict)
pci_devs = [pci_dev]
self.rt.pci_tracker.pci_devs = objects.PciDeviceList(objects=pci_devs)
request = objects.InstancePCIRequest(count=1,
spec=[{'vendor_id': 'v', 'product_id': 'p'}])
pci_requests = objects.InstancePCIRequests(
requests=[request],
instance_uuid=self.instance.uuid)
self.instance.pci_requests = pci_requests
check_bfv_mock.return_value = False
disk_used = self.instance.root_gb + self.instance.ephemeral_gb
expected = copy.deepcopy(_COMPUTE_NODE_FIXTURES[0])
vals = {
'local_gb_used': disk_used,
'memory_mb_used': self.instance.memory_mb,
'free_disk_gb': expected.local_gb - disk_used,
"free_ram_mb": expected.memory_mb - self.instance.memory_mb,
'running_vms': 1,
'vcpus_used': 1,
'pci_device_pools': objects.PciDevicePoolList(),
'stats': {
'io_workload': 0,
'num_instances': 1,
'num_task_None': 1,
'num_os_type_' + self.instance.os_type: 1,
'num_proj_' + self.instance.project_id: 1,
'num_vm_' + self.instance.vm_state: 1,
},
}
_update_compute_node(expected, **vals)
with mock.patch.object(self.rt, '_update') as update_mock:
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, _NODENAME,
self.allocations, None)
cn = self.rt.compute_nodes[_NODENAME]
update_mock.assert_called_once_with(self.elevated, cn)
pci_stats_mock.assert_called_once_with([request])
self.assertTrue(obj_base.obj_equal_prims(expected, cn))
@mock.patch('nova.compute.utils.is_volume_backed_instance',
new=mock.Mock(return_value=False))
def test_claim_with_resources(self):
self.instance.pci_requests = objects.InstancePCIRequests(requests=[])
cn = self.rt.compute_nodes[_NODENAME]
allocations = {
cn.uuid: {
"generation": 0,
"resources": {
"VCPU": 1,
"MEMORY_MB": 512,
"CUSTOM_RESOURCE_0": 1,
"CUSTOM_RESOURCE_1": 2,
}
}
}
expected_resources_0 = {self.resource_0}
expected_resources_1 = {self.resource_1, self.resource_2}
with mock.patch.object(self.rt, '_update'):
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, _NODENAME,
allocations, None)
self.assertEqual((expected_resources_0 | expected_resources_1),
set(self.instance.resources))
@mock.patch('nova.compute.utils.is_volume_backed_instance',
new=mock.Mock(return_value=False))
def test_claim_with_resources_from_free(self):
self.instance.pci_requests = objects.InstancePCIRequests(requests=[])
cn = self.rt.compute_nodes[_NODENAME]
self.rt.assigned_resources = {
self.resource_1.provider_uuid: {
self.resource_1.resource_class: {self.resource_1}}}
allocations = {
cn.uuid: {
"generation": 0,
"resources": {
"VCPU": 1,
"MEMORY_MB": 512,
"CUSTOM_RESOURCE_1": 1,
}
}
}
# resource_1 is assigned to other instances,
# so only resource_2 is available
expected_resources = {self.resource_2}
with mock.patch.object(self.rt, '_update'):
with mock.patch.object(self.instance, 'save'):
self.rt.instance_claim(self.ctx, self.instance, _NODENAME,
allocations, None)
self.assertEqual(expected_resources, set(self.instance.resources))
@mock.patch('nova.compute.utils.is_volume_backed_instance',
new=mock.Mock(return_value=False))
def test_claim_failed_with_resources(self):
self.instance.pci_requests = objects.InstancePCIRequests(requests=[])
cn = self.rt.compute_nodes[_NODENAME]
# Only one "CUSTOM_RESOURCE_0" resource is available
allocations = {
cn.uuid: {
"generation": 0,
"resources": {
"VCPU": 1,
"MEMORY_MB": 512,
"CUSTOM_RESOURCE_0": 2
}
}
}
with mock.patch.object(self.instance, 'save'):
self.assertRaises(exc.ComputeResourcesUnavailable,
self.rt.instance_claim, self.ctx, self.instance,
_NODENAME, allocations, None)
self.assertEqual(
0, len(self.rt.assigned_resources[cn.uuid]['CUSTOM_RESOURCE_0']))
@mock.patch('nova.compute.resource_tracker.ResourceTracker.'
'_sync_compute_service_disabled_trait', new=mock.Mock())
@mock.patch('nova.compute.utils.is_volume_backed_instance')
@mock.patch('nova.objects.MigrationList.get_in_progress_and_error')
@mock.patch('nova.objects.ComputeNode.save')
def test_claim_abort_context_manager(self, save_mock, migr_mock,
check_bfv_mock):
self.instance.pci_requests = objects.InstancePCIRequests(requests=[])
check_bfv_mock.return_value = False
cn = self.rt.compute_nodes[_NODENAME]
self.assertEqual(0, cn.local_gb_used)
self.assertEqual(0, cn.memory_mb_used)
self.assertEqual(0, cn.running_vms)
mock_save = mock.MagicMock()
mock_clear_numa = mock.MagicMock()
@mock.patch.object(self.instance, 'save', mock_save)
@mock.patch.object(self.instance, 'clear_numa_topology',
mock_clear_numa)
@mock.patch.object(objects.Instance, 'obj_clone',
return_value=self.instance)
def _doit(mock_clone):
with self.rt.instance_claim(self.ctx, self.instance, _NODENAME,
self.allocations, None):
# Raise an exception. Just make sure below that the abort()
# method of the claim object was called (and the resulting
# resources reset to the pre-claimed amounts)
raise test.TestingException(