Merge "Support cpuset and scheduler for cpuset"

This commit is contained in:
Zuul 2018-12-11 06:33:11 +00:00 committed by Gerrit Code Review
commit dcd7cf35d4
29 changed files with 446 additions and 23 deletions

View File

@ -9,6 +9,9 @@ graphviz [doc test]
libffi-dev [platform:dpkg]
libffi-devel [platform:rpm]
# Tool to retrieve numa topology of compute host
numactl
# MySQL and PostgreSQL databases since some jobs are set up in
# OpenStack infra that need these like
libmysqlclient-dev [platform:dpkg test]

1
devstack/files/debs/zun Normal file
View File

@ -0,0 +1 @@
numactl

1
devstack/files/rpms/zun Normal file
View File

@ -0,0 +1 @@
numactl

View File

@ -375,6 +375,9 @@ class ContainersController(base.Controller):
requested_volumes = self._build_requested_volumes(context, mounts)
cpu_policy = container_dict.pop('cpu_policy', None)
container_dict['cpu_policy'] = cpu_policy
privileged = container_dict.pop('privileged', None)
if privileged is not None:
api_utils.version_check('privileged', '1.21')

View File

@ -19,6 +19,7 @@ _legacy_container_properties = {
'image': parameter_types.image_name,
'command': parameter_types.command,
'cpu': parameter_types.cpu,
'cpu_policy': parameter_types.cpu_policy,
'memory': parameter_types.memory,
'workdir': parameter_types.workdir,
'auto_remove': parameter_types.auto_remove,

View File

@ -97,6 +97,11 @@ cpu = {
'maximum': CONF.maximum_cpus,
}
cpu_policy = {
'type': 'string',
'enum': ['dedicated', 'shared']
}
# TODO(pksingh) Memory provided must be in MBs
# Will find another way if people dont find it useful.
memory = {

View File

@ -48,6 +48,7 @@ _basic_keys = (
'auto_heal',
'privileged',
'healthcheck',
'cpu_policy',
)

View File

@ -60,10 +60,11 @@ REST_API_VERSION_HISTORY = """REST API Version History:
* 1.25 - Encode/Decode archive file
* 1.26 - Introduce Quota support
* 1.27 - Add support for deleting networks
* 1.28 - Add support cpuset
"""
BASE_VER = '1.1'
CURRENT_MAX_VER = '1.27'
CURRENT_MAX_VER = '1.28'
class Version(object):

View File

@ -219,3 +219,9 @@ user documentation.
----
Introduce API for deleting network. By default, this is an admin API.
1.28
----
Add a new attribute 'cpu_policy'.
Users can use this attribute to determine which CPU policy the container uses.

View File

@ -18,6 +18,7 @@ Claim objects for use with resource tracking.
"""
from oslo_log import log as logging
import random
from zun.common import exception
from zun.common.i18n import _
@ -85,6 +86,10 @@ class Claim(NopClaim):
# Check claim at constructor to avoid mess code
# Raise exception ResourcesUnavailable if claim failed
self._claim_test(resources, limits)
if container.cpu_policy == 'dedicated':
container.cpuset = objects.container.Cpuset()
self.claim_cpuset_cpu_for_container(container, limits)
self.claim_cpuset_mem_for_container(container, limits)
@property
def memory(self):
@ -103,6 +108,15 @@ class Claim(NopClaim):
LOG.debug("Aborting claim: %s", self)
self.tracker.abort_container_claim(self.context, self.container)
def claim_cpuset_cpu_for_container(self, container, limits):
avaliable_cpu = list(set(limits['cpuset']['cpuset_cpu']) -
set(limits['cpuset']['cpuset_cpu_pinned']))
cpuset_cpu_usage = random.sample(avaliable_cpu, int(self.cpu))
container.cpuset.cpuset_cpus = set(cpuset_cpu_usage)
def claim_cpuset_mem_for_container(self, container, limits):
container.cpuset.cpuset_mems = set(limits['cpuset']['node'])
def _claim_test(self, resources, limits=None):
"""Test if this claim can be satisfied.
@ -122,6 +136,7 @@ class Claim(NopClaim):
memory_limit = limits.get('memory')
cpu_limit = limits.get('cpu')
disk_limit = limits.get('disk')
cpuset_limit = limits.get('cpuset', None)
LOG.info('Attempting claim: memory %(memory)s, '
'cpu %(cpu).02f CPU, disk %(disk)s',
@ -130,8 +145,9 @@ class Claim(NopClaim):
reasons = [self._test_memory(resources, memory_limit),
self._test_cpu(resources, cpu_limit),
self._test_disk(resources, disk_limit),
self._test_pci()]
# TODO(Shunli): test numa here
self._test_pci(),
self._test_cpuset_cpu(resources, cpuset_limit),
self._test_cpuset_mem(resources, cpuset_limit)]
reasons = [r for r in reasons if r is not None]
if len(reasons) > 0:
raise exception.ResourcesUnavailable(reason="; ".join(reasons))
@ -163,6 +179,32 @@ class Claim(NopClaim):
return self._test(type_, unit, total, used, requested, limit)
def _test_cpuset_cpu(self, resources, limit):
if limit:
type_ = _("cpuset_cpu")
unit = "core"
total = len(limit['cpuset_cpu'])
used = len(limit['cpuset_cpu_pinned'])
requested = self.cpu
return self._test(type_, unit, total, used, requested,
len(limit['cpuset_cpu']))
else:
return
def _test_cpuset_mem(self, resources, limit):
if limit:
type_ = _("cpuset_mem")
unit = "M"
total = resources.numa_topology.nodes[limit['node']].mem_total
used = 0
requested = self.memory
return self._test(type_, unit, total, used, requested,
limit['cpuset_mem'])
else:
return
def _test_disk(self, resources, limit):
type_ = _("disk")
unit = "GB"

View File

@ -226,8 +226,14 @@ class ComputeNodeTracker(object):
mem_usage = usage['memory']
cpus_usage = usage.get('cpu', 0)
disk_usage = usage['disk']
cpuset_cpus_usage = None
numa_node_id = 0
if 'cpuset_cpus' in usage.keys():
cpuset_cpus_usage = usage['cpuset_cpus']
numa_node_id = usage['node']
cn = self.compute_node
numa_topology = cn.numa_topology.nodes
cn.mem_used += sign * mem_usage
cn.cpu_used += sign * cpus_usage
cn.disk_used += sign * disk_usage
@ -237,7 +243,17 @@ class ComputeNodeTracker(object):
cn.running_containers += sign * 1
# TODO(Shunli): Calculate the numa usage here
if cpuset_cpus_usage:
for numa_node in numa_topology:
if numa_node.id == numa_node_id:
numa_node.mem_available = (numa_node.mem_available -
mem_usage * sign)
if sign > 0:
numa_node.pin_cpus(cpuset_cpus_usage)
cn._changed_fields.add('numa_topology')
else:
numa_node.unpin_cpus(cpuset_cpus_usage)
cn._changed_fields.add('numa_topology')
def _update(self, compute_node):
if not self._resource_change(compute_node):
@ -301,7 +317,9 @@ class ComputeNodeTracker(object):
usage = {'memory': memory,
'cpu': container.cpu or 0,
'disk': container.disk or 0}
# update numa usage here
if container.cpuset.cpuset_cpus:
usage['cpuset_cpus'] = container.cpuset.cpuset_cpus
usage['node'] = int(container.cpuset.cpuset_mems)
return usage

View File

@ -850,6 +850,9 @@ class Manager(periodic_task.PeriodicTasks):
# FIXME(hongbin): rt.compute_node could be None
limits = {'cpu': rt.compute_node.cpus,
'memory': rt.compute_node.mem_total}
if container.cpu_policy == 'dedicated':
limits['cpuset'] = self._get_cpuset_limits(rt.compute_node,
container)
with rt.container_update_claim(context, container, old_container,
limits):
self.driver.update(context, container)
@ -1103,6 +1106,20 @@ class Manager(periodic_task.PeriodicTasks):
rt = self._get_resource_tracker()
rt.update_available_resources(context)
def _get_cpuset_limits(self, compute_node, container):
for numa_node in compute_node.numa_topology.nodes:
if len(numa_node.cpuset) - len(
numa_node.pinned_cpus) >= container.cpu and \
numa_node.mem_available >= container.memory:
return {
'node': numa_node.id,
'cpuset_cpu': numa_node.cpuset,
'cpuset_cpu_pinned': numa_node.pinned_cpus,
'cpuset_mem': numa_node.mem_available
}
msg = _("There may be not enough numa resources.")
raise exception.NoValidHost(reason=msg)
def _get_resource_tracker(self):
if not self._resource_tracker:
rt = compute_node_tracker.ComputeNodeTracker(self.host,

View File

@ -25,6 +25,10 @@ compute_opts = [
'reserve_disk_for_image',
default=0.2,
help='reserve disk for docker images'),
cfg.BoolOpt(
'enable_cpu_pinning',
default=False,
help='allow the container with cpu_policy is dedicated'),
]
service_opts = [

View File

@ -311,6 +311,9 @@ class DockerDriver(driver.ContainerDriver):
if container.disk:
disk_size = str(container.disk) + 'G'
host_config['storage_opt'] = {'size': disk_size}
if container.cpu_policy == 'dedicated':
host_config['cpuset_cpus'] = container.cpuset.cpuset_cpus
host_config['cpuset_mems'] = str(container.cpuset.cpuset_mems)
# The time unit in docker of heath checking is us, and the unit
# of interval and timeout is seconds.
if container.healthcheck:

View File

@ -38,9 +38,11 @@ class Host(object):
# Replace this call with a more generic call when we obtain other
# NUMA related data like memory etc.
cpu_info = self.get_cpu_numa_info()
mem_info = self.get_mem_numa_info()
floating_cpus = utils.get_floating_cpu_set()
numa_node_obj = []
for node, cpuset in cpu_info.items():
for cpu, mem_total in zip(cpu_info.items(), mem_info):
node, cpuset = cpu
numa_node = objects.NUMANode()
if floating_cpus:
allowed_cpus = set(cpuset) - (floating_cpus & set(cpuset))
@ -52,6 +54,8 @@ class Host(object):
# in nature.
numa_node.cpuset = allowed_cpus
numa_node.pinned_cpus = set([])
numa_node.mem_total = mem_total
numa_node.mem_available = mem_total
numa_node_obj.append(numa_node)
numa_topo_obj.nodes = numa_node_obj

View File

@ -52,3 +52,17 @@ class LinuxHost(host_capability.Host):
elif len(val) == 2 and old_lscpu:
sock_map[val[0]].append(int(val[1]))
return sock_map
def get_mem_numa_info(self):
try:
output = utils.execute('numactl', '-H')
except exception.CommandError:
LOG.info("There was a problem while executing numactl -H, "
"Try again without the online column.")
return []
sizes = re.findall("size\: \d*", str(output))
mem_numa = []
for size in sizes:
mem_numa.append(int(size.split(' ')[1]))
return mem_numa

View File

@ -0,0 +1,38 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""support cpuset
Revision ID: 2b129060baff
Revises: 33cdd98bb9b2
Create Date: 2018-011-10 10:08:40.547664
"""
# revision identifiers, used by Alembic.
revision = '2b129060baff'
down_revision = '33cdd98bb9b2'
branch_labels = None
depends_on = None
from alembic import op
import sqlalchemy as sa
from zun.db.sqlalchemy import models
def upgrade():
op.add_column('container',
sa.Column('cpu_policy', sa.String(length=255)))
op.add_column('container',
sa.Column('cpuset', models.JSONEncodedDict, nullable=True))

View File

@ -142,6 +142,8 @@ class Container(Base):
name = Column(String(255))
image = Column(String(255))
cpu = Column(Float)
cpu_policy = Column(String(255), default='shared')
cpuset = Column(JSONEncodedDict, nullable=True)
command = Column(JSONEncodedList)
memory = Column(String(255))
status = Column(String(20))

View File

@ -28,6 +28,33 @@ LOG = logging.getLogger(__name__)
CONTAINER_OPTIONAL_ATTRS = ["pci_devices", "exec_instances"]
@base.ZunObjectRegistry.register
class Cpuset(base.ZunObject):
VERSION = '1.0'
fields = {
'cpuset_cpus': fields.SetOfIntegersField(nullable=True),
'cpuset_mems': fields.SetOfIntegersField(nullable=True),
}
def _to_dict(self):
return {
'cpuset_cpus': self.cpuset_cpus,
'cpuset_mems': self.cpuset_mems
}
@classmethod
def _from_dict(cls, data_dict):
if not data_dict:
return cls(cpuset_cpus=None,
cpuset_mems=None)
cpuset_cpus = data_dict.get('cpuset_cpus')
cpuset_mems = data_dict.get('cpuset_mems')
return cls(cpuset_cpus=cpuset_cpus,
cpuset_mems=cpuset_mems)
@base.ZunObjectRegistry.register
class Container(base.ZunPersistentObject, base.ZunObject):
# Version 1.0: Initial version
@ -68,6 +95,7 @@ class Container(base.ZunPersistentObject, base.ZunObject):
# Version 1.35: Add 'healthcheck' attribute
# Version 1.36: Add 'get_count' method
# Version 1.37: Add 'exposed_ports' attribute
# Version 1.38: Add 'cpuset' attribute
VERSION = '1.37'
fields = {
@ -79,6 +107,8 @@ class Container(base.ZunPersistentObject, base.ZunObject):
'user_id': fields.StringField(nullable=True),
'image': fields.StringField(nullable=True),
'cpu': fields.FloatField(nullable=True),
'cpu_policy': fields.StringField(nullable=True),
'cpuset': fields.ObjectField("Cpuset", nullable=True),
'memory': fields.StringField(nullable=True),
'command': fields.ListOfStringsField(nullable=True),
'status': z_fields.ContainerStatusField(nullable=True),
@ -121,6 +151,10 @@ class Container(base.ZunPersistentObject, base.ZunObject):
for field in container.fields:
if field in ['pci_devices', 'exec_instances']:
continue
if field == 'cpuset':
container.cpuset = Cpuset._from_dict(
db_container['cpuset'])
continue
setattr(container, field, db_container[field])
container.obj_reset_changes()
@ -215,6 +249,9 @@ class Container(base.ZunPersistentObject, base.ZunObject):
"""
values = self.obj_get_changes()
cpuset_obj = values.pop('cpuset', None)
if cpuset_obj is not None:
values['cpuset'] = cpuset_obj._to_dict()
db_container = dbapi.create_container(context, values)
self._from_db_object(self, db_container)
@ -247,6 +284,9 @@ class Container(base.ZunPersistentObject, base.ZunObject):
object, e.g.: Container(context)
"""
updates = self.obj_get_changes()
cpuset_obj = updates.pop('cpuset', None)
if cpuset_obj is not None:
updates['cpuset'] = cpuset_obj._to_dict()
dbapi.update_container(context, self.uuid, updates)
self.obj_reset_changes()

View File

@ -28,6 +28,8 @@ class NUMANode(base.ZunObject):
'id': fields.IntegerField(read_only=True),
'cpuset': fields.SetOfIntegersField(),
'pinned_cpus': fields.SetOfIntegersField(),
'mem_total': fields.IntegerField(nullable=True),
'mem_available': fields.IntegerField(nullable=True),
}
@property
@ -61,7 +63,9 @@ class NUMANode(base.ZunObject):
return {
'id': self.id,
'cpuset': list(self.cpuset),
'pinned_cpus': list(self.pinned_cpus)
'pinned_cpus': list(self.pinned_cpus),
'mem_total': self.mem_total,
'mem_available': self.mem_available
}
@classmethod
@ -69,8 +73,12 @@ class NUMANode(base.ZunObject):
cpuset = set(data_dict.get('cpuset', ''))
node_id = data_dict.get('id')
pinned_cpus = set(data_dict.get('pinned_cpus'))
mem_total = data_dict.get('mem_total')
mem_available = data_dict.get('mem_available')
return cls(id=node_id, cpuset=cpuset,
pinned_cpus=pinned_cpus)
pinned_cpus=pinned_cpus,
mem_total=mem_total,
mem_available=mem_available)
@base.ZunObjectRegistry.register

View File

@ -0,0 +1,58 @@
# Copyright (c) 2018 National Engineering Laboratory of electronic
# commerce and electronic payment
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log as logging
import zun.conf
from zun.scheduler import filters
LOG = logging.getLogger(__name__)
CONF = zun.conf.CONF
class CpuSetFilter(filters.BaseHostFilter):
"""Filter the host by cpu and memory request of cpuset"""
run_filter_once_per_request = True
def host_passes(self, host_state, container, extra_spec):
if container.cpu_policy is None:
container.cpu_policy = 'shared'
if container.memory is None:
container_memory = 0
else:
container_memory = int(container.memory)
if container.cpu_policy == 'dedicated':
if CONF.compute.enable_cpu_pinning:
for numa_node in host_state.numa_topology.nodes:
if len(numa_node.cpuset) - len(
numa_node.pinned_cpus) >= container.cpu and \
numa_node.mem_available >= container_memory:
host_state.limits['cpuset'] = {
'node': numa_node.id,
'cpuset_cpu': numa_node.cpuset,
'cpuset_cpu_pinned': numa_node.pinned_cpus,
'cpuset_mem': numa_node.mem_available
}
return True
return False
else:
return False
if container.cpu_policy == 'shared':
if CONF.compute.enable_cpu_pinning:
return False
else:
return True

View File

@ -29,6 +29,7 @@ class HostState(object):
# Mutable available resources.
# These will change as resources are virtually "consumed".
self.mem_available = 0
self.mem_total = 0
self.mem_free = 0
self.mem_used = 0
@ -61,6 +62,7 @@ class HostState(object):
def _update_from_compute_node(self, compute_node):
"""Update information about a host from a Compute object"""
self.mem_available = compute_node.mem_available
self.mem_total = compute_node.mem_total
self.mem_free = compute_node.mem_free
self.mem_used = compute_node.mem_used

View File

@ -28,7 +28,7 @@ class TestRootController(api_base.FunctionalTest):
'default_version':
{'id': 'v1',
'links': [{'href': 'http://localhost/v1/', 'rel': 'self'}],
'max_version': '1.27',
'max_version': '1.28',
'min_version': '1.1',
'status': 'CURRENT'},
'description': 'Zun is an OpenStack project which '
@ -37,7 +37,7 @@ class TestRootController(api_base.FunctionalTest):
'versions': [{'id': 'v1',
'links': [{'href': 'http://localhost/v1/',
'rel': 'self'}],
'max_version': '1.27',
'max_version': '1.28',
'min_version': '1.1',
'status': 'CURRENT'}]}

View File

@ -10,6 +10,7 @@
# License for the specific language governing permissions and limitations
# under the License.
from collections import defaultdict
from docker import errors
import mock
@ -39,6 +40,8 @@ CONF = conf.CONF
_numa_node = {
'id': 0,
'cpuset': [8],
'mem_available': 32768,
'mem_total': 32768,
'pinned_cpus': []
}
@ -989,6 +992,10 @@ class TestDockerDriver(base.DriverTestCase):
security_groups=test_sec_group_id)
@mock.patch('zun.common.utils.execute')
@mock.patch('zun.container.os_capability.linux.os_capability_linux'
'.LinuxHost.get_mem_numa_info')
@mock.patch('zun.container.os_capability.linux.os_capability_linux'
'.LinuxHost.get_cpu_numa_info')
@mock.patch('zun.container.docker.driver.DockerDriver'
'.get_total_disk_for_container')
@mock.patch('zun.container.driver.ContainerDriver.get_host_mem')
@ -997,8 +1004,13 @@ class TestDockerDriver(base.DriverTestCase):
@mock.patch(
'zun.container.docker.driver.DockerDriver.get_cpu_used')
def test_get_available_resources(self, mock_cpu_used, mock_info, mock_mem,
mock_disk, mock_output):
mock_disk, mock_numa_cpu, mock_numa_mem,
mock_output):
self.driver = DockerDriver()
numa_cpu_info = defaultdict(list)
numa_cpu_info['0'] = [0, 8]
mock_numa_cpu.return_value = numa_cpu_info
mock_numa_mem.return_value = [1024 * 32]
mock_output.return_value = LSCPU_ON
conf.CONF.set_override('floating_cpu_set', "0")
mock_mem.return_value = (100 * units.Ki, 50 * units.Ki, 50 * units.Ki,

View File

@ -18,6 +18,7 @@ from oslo_serialization import jsonutils as json
from zun.common import name_generator
from zun.db import api as db_api
from zun.db.etcd import api as etcd_api
from zun.objects.container import Cpuset
CONF = cfg.CONF
@ -43,6 +44,15 @@ CAPSULE_SPEC = {"kind": "capsule",
"cinder": {"size": 5}}]}}
def get_cpuset_obj():
return Cpuset._from_dict({'cpuset_cpus': set([0, 1]),
'cpuset_mems': set([0])})
def get_cpuset_dict():
return {'cpuset_cpus': set([0, 1]), 'cpuset_mems': set([0])}
def get_test_container(**kwargs):
return {
'id': kwargs.get('id', 42),
@ -108,6 +118,8 @@ def get_test_container(**kwargs):
"test": "stat /etc/passwd || exit 1",
"interval": 3}),
'exposed_ports': kwargs.get('exposed_ports', {"80/tcp": {}}),
'cpu_policy': kwargs.get('cpu_policy', None),
'cpuset': kwargs.get('cpuset', None),
}
@ -354,12 +366,16 @@ def get_test_numa_topology(**kwargs):
{
"id": 0,
"cpuset": [1, 2],
"pinned_cpus": []
"pinned_cpus": [],
"mem_total": 65536,
"mem_available": 32768
},
{
"id": 1,
"cpuset": [3, 4],
"pinned_cpus": [3, 4]
"pinned_cpus": [3, 4],
"mem_total": 65536,
"mem_available": 32768
}
]
}

View File

@ -27,7 +27,9 @@ class TestContainerObject(base.DbTestCase):
def setUp(self):
super(TestContainerObject, self).setUp()
self.fake_container = utils.get_test_container()
self.fake_cpuset = utils.get_cpuset_dict()
self.fake_container = utils.get_test_container(
cpuset=self.fake_cpuset, cpu_policy='dedicated')
def test_get_by_uuid(self):
uuid = self.fake_container['uuid']
@ -89,7 +91,10 @@ class TestContainerObject(base.DbTestCase):
with mock.patch.object(self.dbapi, 'create_container',
autospec=True) as mock_create_container:
mock_create_container.return_value = self.fake_container
container = objects.Container(self.context, **self.fake_container)
container_dict = dict(self.fake_container)
container_dict['cpuset'] = objects.container.Cpuset._from_dict(
container_dict['cpuset'])
container = objects.Container(self.context, **container_dict)
container.create(self.context)
mock_create_container.assert_called_once_with(self.context,
self.fake_container)
@ -99,7 +104,10 @@ class TestContainerObject(base.DbTestCase):
with mock.patch.object(self.dbapi, 'create_container',
autospec=True) as mock_create_container:
mock_create_container.return_value = self.fake_container
container = objects.Container(self.context, **self.fake_container)
container_dict = dict(self.fake_container)
container_dict['cpuset'] = objects.container.Cpuset._from_dict(
container_dict['cpuset'])
container = objects.Container(self.context, **container_dict)
self.assertTrue(hasattr(container, 'status_reason'))
container.status_reason = "Docker Error happened"
container.create(self.context)
@ -138,7 +146,10 @@ class TestContainerObject(base.DbTestCase):
None, uuid,
{'image': 'container.img',
'environment': {"key1": "val", "key2": "val2"},
'memory': '512m'})
'memory': '512m',
'cpuset':
{'cpuset_mems': set([0]),
'cpuset_cpus': set([0, 1])}})
self.assertEqual(self.context, container._context)
def test_refresh(self):

View File

@ -344,12 +344,13 @@ class TestObject(test_base.TestCase, _TestObject):
# For more information on object version testing, read
# https://docs.openstack.org/zun/latest/
object_data = {
'Container': '1.37-cdc1537de5adf3570b598da1a3728a68',
'Container': '1.37-193d8cd6635760882a27142760931af9',
'Cpuset': '1.0-06c4e6335683c18b87e2e54080f8c341',
'Volume': '1.0-4ec18c39ea49f898cc354f9ca178dfb7',
'VolumeMapping': '1.5-57febc66526185a75a744637e7a387c7',
'Image': '1.2-80504fdd797e9dd86128a91680e876ad',
'MyObj': '1.0-34c4b1aadefd177b13f9a2f894cc23cd',
'NUMANode': '1.0-cba878b70b2f8b52f1e031b41ac13b4e',
'NUMANode': '1.0-6da86e2dd7f28253e2b9ac60c002ea8f',
'NUMATopology': '1.0-b54086eda7e4b2e6145ecb6ee2c925ab',
'ResourceClass': '1.1-d661c7675b3cd5b8c3618b68ba64324e',
'ResourceProvider': '1.0-92b427359d5a4cf9ec6c72cbe630ee24',

View File

@ -0,0 +1,88 @@
# Copyright 2018 National Engineering Laboratory of electronic
# commerce and electronic payment.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from zun.common import context
import zun.conf
from zun import objects
from zun.scheduler.filters import cpuset_filter
from zun.tests import base
from zun.tests.unit.scheduler import fakes
CONF = zun.conf.CONF
class TestCpuSetFilter(base.TestCase):
def setUp(self):
super(TestCpuSetFilter, self).setUp()
self.context = context.RequestContext('fake_user', 'fake_project')
def test_cpuset_filter_pass_dedicated(self):
CONF.set_override('enable_cpu_pinning', True, 'compute')
self.filt_cls = cpuset_filter.CpuSetFilter()
container = objects.Container(self.context)
container.cpu_policy = 'dedicated'
container.cpu = 2.0
container.memory = '1024'
host = fakes.FakeHostState('testhost')
host.cpus = 6
host.cpu_used = 0.0
host.numa_topology = objects.NUMATopology(nodes=[
objects.NUMANode(id=0, cpuset=set([1, 2, 3]), pinned_cpus=set([]),
mem_total=32739, mem_available=32739),
objects.NUMANode(id=1, cpuset=set([4, 5, 6]), pinned_cpus=set([]),
mem_total=32739, mem_available=32739)]
)
extra_spec = {}
self.assertTrue(self.filt_cls.host_passes(host, container, extra_spec))
def test_cpuset_filter_fail_dedicated_1(self):
CONF.set_override('enable_cpu_pinning', True, 'compute')
self.filt_cls = cpuset_filter.CpuSetFilter()
container = objects.Container(self.context)
container.cpu_policy = 'dedicated'
container.cpu = 4.0
container.memory = '1024'
host = fakes.FakeHostState('testhost')
host.cpus = 6
host.cpu_used = 0.0
host.numa_topology = objects.NUMATopology(nodes=[
objects.NUMANode(id=0, cpuset=set([1, 2, 3]), pinned_cpus=set([]),
mem_total=32739, mem_available=32739),
objects.NUMANode(id=1, cpuset=set([4, 5, 6]), pinned_cpus=set([]),
mem_total=32739, mem_available=32739)]
)
extra_spec = {}
self.assertFalse(self.filt_cls.host_passes(host,
container, extra_spec))
def test_cpuset_filter_fail_dedicated_2(self):
CONF.set_override('enable_cpu_pinning', False, 'compute')
self.filt_cls = cpuset_filter.CpuSetFilter()
container = objects.Container(self.context)
container.cpu_policy = 'dedicated'
container.cpu = 2.0
container.memory = '1024'
host = fakes.FakeHostState('testhost')
host.cpus = 6
host.cpu_used = 0.0
host.numa_topology = objects.NUMATopology(nodes=[
objects.NUMANode(id=0, cpuset=set([1, 2, 3]), pinned_cpus=set([]),
mem_total=32739, mem_available=32739),
objects.NUMANode(id=1, cpuset=set([4, 5, 6]), pinned_cpus=set([]),
mem_total=32739, mem_available=32739)]
)
extra_spec = {}
self.assertFalse(self.filt_cls.host_passes(host,
container, extra_spec))

View File

@ -50,6 +50,25 @@ class FilterSchedulerTestCase(base.TestCase):
self.driver.servicegroup_api.service_is_up = mock.Mock(
return_value=True)
mock_list_by_binary.side_effect = _return_services
numa_topology = {
"nodes": [
{
"id": 0,
"cpuset": [1, 2, 3, 4],
"pinned_cpus": [],
"mem_total": 1024 * 64,
"mem_available": 1024 * 64
},
{
"id": 1,
"cpuset": [5, 6, 7, 8],
"pinned_cpus": [],
"mem_total": 1024 * 64,
"mem_available": 1024 * 64
}
]
}
numa = objects.numa.NUMATopology._from_dict(numa_topology)
test_container = utils.get_test_container()
containers = [objects.Container(self.context, **test_container)]
node1 = objects.ComputeNode(self.context)
@ -58,10 +77,11 @@ class FilterSchedulerTestCase(base.TestCase):
node1.mem_total = 1024 * 128
node1.mem_used = 1024 * 4
node1.mem_free = 1024 * 124
node1.mem_available = 1024 * 124
node1.disk_total = 80
node1.disk_used = 20
node1.hostname = 'host1'
node1.numa_topology = None
node1.numa_topology = numa
node1.labels = {}
node1.pci_device_pools = None
node1.disk_quota_supported = True
@ -72,10 +92,11 @@ class FilterSchedulerTestCase(base.TestCase):
node2.mem_total = 1024 * 128
node2.mem_used = 1024 * 4
node2.mem_free = 1024 * 124
node2.mem_available = 1024 * 124
node2.disk_total = 80
node2.disk_used = 20
node2.hostname = 'host2'
node2.numa_topology = None
node2.numa_topology = numa
node2.labels = {}
node2.pci_device_pools = None
node2.disk_quota_supported = True
@ -86,10 +107,11 @@ class FilterSchedulerTestCase(base.TestCase):
node3.mem_total = 1024 * 128
node3.mem_used = 1024 * 4
node3.mem_free = 1024 * 124
node3.mem_available = 1024 * 124
node3.disk_total = 80
node3.disk_used = 20
node3.hostname = 'host3'
node3.numa_topology = None
node3.numa_topology = numa
node3.labels = {}
node3.pci_device_pools = None
node3.disk_quota_supported = True
@ -100,10 +122,11 @@ class FilterSchedulerTestCase(base.TestCase):
node4.mem_total = 1024 * 128
node4.mem_used = 1024 * 4
node4.mem_free = 1024 * 124
node4.mem_available = 1024 * 124
node4.disk_total = 80
node4.disk_used = 20
node4.hostname = 'host4'
node4.numa_topology = None
node4.numa_topology = numa
node4.labels = {}
node4.pci_device_pools = None
node4.disk_quota_supported = True