Files
nova/nova/tests/unit/scheduler/fakes.py
Stephen Finucane 278ab01c32 Add support for translating CPU policy extra specs, image meta
Map 'hw:cpu_policy' and 'hw:cpu_thread_policy' as follows:

  hw:cpu_policy
    dedicated -> resources:PCPU=${flavor.vcpus}
    shared    -> resources:VCPU=${flavor.vcpus}

  hw:cpu_thread_policy
    isolate -> trait:HW_CPU_HYPERTHREADING:forbidden
    require -> trait:HW_CPU_HYPERTHREADING:required
    prefer  -> (none, handled later during scheduling)

Ditto for the 'hw_cpu_policy' and 'hw_cpu_thread_policy' image metadata
equivalents.

In addition, increment the requested 'resources:PCPU' by 1 if the
'hw:emulator_threads_policy' extra spec is present and set to 'isolate'.

The scheduler will attempt to get PCPUs allocations and fall back to
VCPUs if that fails. This is okay because the NUMA fitting code from the
'hardware' module used by both the 'NUMATopology' filter and libvirt
driver protects us. That code doesn't know anything about PCPUs or VCPUs
but rather cares about the 'NUMATopology.pcpuset' field, (starting in
change I492803eaacc34c69af073689f9159449557919db), which can be set to
different values depending on whether this is Train with new-style
config, Train with old-style config, or Stein:

- For Train compute nodes with new-style config, 'NUMATopology.pcpuset'
  will be explictly set to the value of '[compute] cpu_dedicated_set'
  or, if only '[compute] cpu_dedicated_set' is configured, 'None' (it's
  nullable) by the virt driver so the calls to
  'hardware.numa_fit_instance_to_host' in the 'NUMATopologyFilter' or
  virt driver will fail if it can't actually fit.

- For Train compute nodes with old-style config, 'NUMATopology.pcpuset'
  will be set to the same value as 'NUMATopology.cpuset' by the virt
  driver.

- For Stein compute nodes, 'NUMATopology.pcpuset' will be unset and
  we'll detect this in 'hardware.numa_fit_instance_to_host' and simply
  set it to the same value as 'NUMATopology.cpuset'.

Part of blueprint cpu-resources

Change-Id: Ie38aa625dff543b5980fd437ad2febeba3b50079
Signed-off-by: Stephen Finucane <sfinucan@redhat.com>
2019-09-18 00:21:10 +01:00

235 lines
7.8 KiB
Python

# Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Fakes For Scheduler tests.
"""
import datetime
from oslo_utils.fixture import uuidsentinel
from nova import objects
from nova.scheduler import driver
from nova.scheduler import host_manager
# TODO(stephenfin): Rework these so they're functions instead of global
# variables that can be mutated
NUMA_TOPOLOGY = objects.NUMATopology(cells=[
objects.NUMACell(
id=0,
cpuset=set([0, 1]),
pcpuset=set([2, 3]),
memory=512,
cpu_usage=0,
memory_usage=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=16, total=387184, used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512, used=0)],
siblings=[set([0]), set([1]), set([2]), set([3])]),
objects.NUMACell(
id=1,
cpuset=set([4, 5]),
pcpuset=set([6, 7]),
memory=512,
cpu_usage=0,
memory_usage=0,
pinned_cpus=set(),
mempages=[
objects.NUMAPagesTopology(size_kb=4, total=1548736, used=0),
objects.NUMAPagesTopology(size_kb=2048, total=512, used=0)],
siblings=[set([4]), set([5]), set([6]), set([7])])])
NUMA_TOPOLOGIES_W_HT = [
objects.NUMATopology(cells=[
objects.NUMACell(
id=0,
cpuset=set(),
pcpuset=set([1, 2, 5, 6]),
memory=512,
cpu_usage=0,
memory_usage=0,
pinned_cpus=set(),
mempages=[],
siblings=[set([1, 5]), set([2, 6])]),
objects.NUMACell(
id=1,
cpuset=set(),
pcpuset=set([3, 4, 7, 8]),
memory=512,
cpu_usage=0,
memory_usage=0,
pinned_cpus=set(),
mempages=[],
siblings=[set([3, 4]), set([7, 8])])
]),
objects.NUMATopology(cells=[
objects.NUMACell(
id=0,
cpuset=set(),
pcpuset=set(),
memory=512,
cpu_usage=0,
memory_usage=0,
pinned_cpus=set(),
mempages=[],
siblings=[]),
objects.NUMACell(
id=1,
cpuset=set(),
pcpuset=set([1, 2, 5, 6]),
memory=512,
cpu_usage=0,
memory_usage=0,
pinned_cpus=set(),
mempages=[],
siblings=[set([1, 5]), set([2, 6])]),
objects.NUMACell(
id=2,
cpuset=set(),
pcpuset=set([3, 4, 7, 8]),
memory=512,
cpu_usage=0,
memory_usage=0,
pinned_cpus=set(),
mempages=[],
siblings=[set([3, 4]), set([7, 8])]),
]),
]
COMPUTE_NODES = [
objects.ComputeNode(
uuid=uuidsentinel.cn1,
id=1, local_gb=1024, memory_mb=1024, vcpus=1,
disk_available_least=None, free_ram_mb=512, vcpus_used=1,
free_disk_gb=512, local_gb_used=0,
updated_at=datetime.datetime(2015, 11, 11, 11, 0, 0),
host='host1', hypervisor_hostname='node1', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
hypervisor_type='foo', supported_hv_specs=[],
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0),
objects.ComputeNode(
uuid=uuidsentinel.cn2,
id=2, local_gb=2048, memory_mb=2048, vcpus=2,
disk_available_least=1024, free_ram_mb=1024, vcpus_used=2,
free_disk_gb=1024, local_gb_used=0,
updated_at=datetime.datetime(2015, 11, 11, 11, 0, 0),
host='host2', hypervisor_hostname='node2', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
hypervisor_type='foo', supported_hv_specs=[],
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0),
objects.ComputeNode(
uuid=uuidsentinel.cn3,
id=3, local_gb=4096, memory_mb=4096, vcpus=4,
disk_available_least=3333, free_ram_mb=3072, vcpus_used=1,
free_disk_gb=3072, local_gb_used=0,
updated_at=datetime.datetime(2015, 11, 11, 11, 0, 0),
host='host3', hypervisor_hostname='node3', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=NUMA_TOPOLOGY._to_json(),
hypervisor_type='foo', supported_hv_specs=[],
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0),
objects.ComputeNode(
uuid=uuidsentinel.cn4,
id=4, local_gb=8192, memory_mb=8192, vcpus=8,
disk_available_least=8192, free_ram_mb=8192, vcpus_used=0,
free_disk_gb=8888, local_gb_used=0,
updated_at=datetime.datetime(2015, 11, 11, 11, 0, 0),
host='host4', hypervisor_hostname='node4', host_ip='127.0.0.1',
hypervisor_version=0, numa_topology=None,
hypervisor_type='foo', supported_hv_specs=[],
pci_device_pools=None, cpu_info=None, stats=None, metrics=None,
cpu_allocation_ratio=16.0, ram_allocation_ratio=1.5,
disk_allocation_ratio=1.0),
# Broken entry
objects.ComputeNode(
uuid=uuidsentinel.cn5,
id=5, local_gb=1024, memory_mb=1024, vcpus=1,
host='fake', hypervisor_hostname='fake-hyp'),
]
def get_fake_alloc_reqs():
return [
{
'allocations': {
cn.uuid: {
'resources': {
'VCPU': 1,
'MEMORY_MB': 512,
'DISK_GB': 512,
},
}
}
} for cn in COMPUTE_NODES
]
RESOURCE_PROVIDERS = [
dict(
uuid=uuidsentinel.rp1,
name='host1',
generation=1),
dict(
uuid=uuidsentinel.rp2,
name='host2',
generation=1),
dict(
uuid=uuidsentinel.rp3,
name='host3',
generation=1),
dict(
uuid=uuidsentinel.rp4,
name='host4',
generation=1),
]
SERVICES = [
objects.Service(host='host1', disabled=False),
objects.Service(host='host2', disabled=True),
objects.Service(host='host3', disabled=False),
objects.Service(host='host4', disabled=False),
]
def get_service_by_host(host):
services = [service for service in SERVICES if service.host == host]
return services[0]
class FakeHostState(host_manager.HostState):
def __init__(self, host, node, attribute_dict, instances=None):
super(FakeHostState, self).__init__(host, node, None)
if instances:
self.instances = {inst.uuid: inst for inst in instances}
else:
self.instances = {}
for (key, val) in attribute_dict.items():
setattr(self, key, val)
class FakeScheduler(driver.Scheduler):
def select_destinations(self, context, spec_obj, instance_uuids,
alloc_reqs_by_rp_uuid, provider_summaries,
allocation_request_version=None, return_alternates=False):
return []