76e270ef96
CPU distribution mechanism should be changed due to incorect requirements to nova and dpdk CPUs allocation Changes: * Change CPU distribution * Add function for recognizing DPDK NICs for node * Remove requirement of enabled hugepages for DPDK NICs (it's checked before deployment) * Change HugePages distribution. Now it take into account Nova CPUs placement Requirements Before: DPDK's CPUs should be located on the same NUMAs as Nova CPUs Requirements Now: 1. DPDK component CPU pinning has two parts: * OVS pmd core CPUs - These CPUs must be placed on the NUMAs where DPDK NIC is located. Since DPDK NIC can handle about 12 Mpps/s and 1 CPU can handle about 3 Mpps/s there is no necessity to place more than 4 CPUs per NIC. Let's name all remained CPUs as additional CPUs. * OVS Core CPUs - 1 CPU is enough and that CPU should be taken from any NUMA where at least 1 OVS pmd core CPU is located 2. To improve Nova and DPDK performance, all additional CPUs should be distributed along with Nova's CPUs as OVS pmd core CPUs. Change-Id: Ib2adf39c36b2e1536bb02b07fd8b5af50e3744b2 Closes-Bug: #1584006
189 lines
5.5 KiB
Python
189 lines
5.5 KiB
Python
# coding: utf-8
|
|
|
|
# Copyright 2016 Mirantis, Inc.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
# not use this file except in compliance with the License. You may obtain
|
|
# a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
# License for the specific language governing permissions and limitations
|
|
# under the License.
|
|
|
|
import json
|
|
import mock
|
|
from nailgun.api.v1.validators import node as node_validator
|
|
from nailgun import consts
|
|
from nailgun import errors
|
|
from nailgun import objects
|
|
from nailgun.test import base
|
|
|
|
|
|
validator = node_validator.NodeAttributesValidator.validate
|
|
|
|
|
|
def mock_cluster_attributes(func):
|
|
def wrapper(*args, **kwargs):
|
|
attr_mock = mock.patch.object(
|
|
objects.Cluster,
|
|
'get_editable_attributes',
|
|
return_value={
|
|
'common': {
|
|
'libvirt_type': {
|
|
'value': consts.HYPERVISORS.kvm,
|
|
}
|
|
}
|
|
}
|
|
)
|
|
with attr_mock:
|
|
func(*args, **kwargs)
|
|
|
|
return wrapper
|
|
|
|
|
|
class BaseNodeAttributeValidatorTest(base.BaseTestCase):
|
|
def setUp(self):
|
|
super(BaseNodeAttributeValidatorTest, self).setUp()
|
|
|
|
meta = self.env.default_metadata()
|
|
|
|
meta['numa_topology'] = {
|
|
"supported_hugepages": [2048, 1048576],
|
|
"numa_nodes": [
|
|
{"id": 0, "cpus": [0, 1], 'memory': 2 * 1024 ** 3},
|
|
{"id": 1, "cpus": [2, 3], 'memory': 2 * 1024 ** 3},
|
|
]
|
|
}
|
|
meta['cpu']['total'] = 4
|
|
|
|
attributes = {
|
|
'hugepages': {
|
|
'nova': {
|
|
'type': 'custom_hugepages',
|
|
'value': {},
|
|
},
|
|
'dpdk': {
|
|
'type': 'number',
|
|
'value': 0,
|
|
},
|
|
},
|
|
'cpu_pinning': {
|
|
'dpdk': {
|
|
'type': 'number',
|
|
'value': 0,
|
|
},
|
|
'nova': {
|
|
'type': 'number',
|
|
'value': 0,
|
|
}
|
|
}
|
|
}
|
|
self.node = mock.Mock(meta=meta, attributes=attributes)
|
|
self.cluster = mock.Mock()
|
|
|
|
|
|
@mock.patch.object(objects.Node, 'dpdk_nics', return_value=[])
|
|
class TestNodeAttributesValidatorHugepages(BaseNodeAttributeValidatorTest):
|
|
|
|
@mock_cluster_attributes
|
|
def test_defaults(self, m_dpdk_nics):
|
|
data = {}
|
|
|
|
self.assertNotRaises(errors.InvalidData, validator,
|
|
json.dumps(data), self.node, self.cluster)
|
|
|
|
@mock_cluster_attributes
|
|
def test_valid_hugepages(self, m_dpdk_nics):
|
|
data = {
|
|
'hugepages': {
|
|
'nova': {
|
|
'value': {
|
|
'2048': 1,
|
|
'1048576': 1,
|
|
},
|
|
},
|
|
'dpdk': {
|
|
'value': 2,
|
|
},
|
|
}
|
|
}
|
|
|
|
self.assertNotRaises(errors.InvalidData, validator,
|
|
json.dumps(data), self.node, self.cluster)
|
|
|
|
@mock_cluster_attributes
|
|
def test_too_much_hugepages(self, m_dpdk_nics):
|
|
data = {
|
|
'hugepages': {
|
|
'nova': {
|
|
'value': {
|
|
'2048': 100500,
|
|
'1048576': 100500,
|
|
},
|
|
},
|
|
},
|
|
}
|
|
|
|
self.assertRaisesWithMessageIn(
|
|
errors.InvalidData, 'Not enough memory for components',
|
|
validator, json.dumps(data), self.node, self.cluster)
|
|
|
|
@mock_cluster_attributes
|
|
def test_dpdk_requires_too_much(self, m_dpdk_nics):
|
|
data = {
|
|
'hugepages': {
|
|
'dpdk': {
|
|
'value': 2049,
|
|
},
|
|
}
|
|
}
|
|
|
|
self.assertRaisesWithMessageIn(
|
|
errors.InvalidData, 'could not require more memory than node has',
|
|
validator, json.dumps(data), self.node, self.cluster)
|
|
|
|
|
|
@mock.patch.object(objects.Node, 'dpdk_nics', return_value=[])
|
|
class TestNodeAttributesValidatorCpuPinning(BaseNodeAttributeValidatorTest):
|
|
@mock_cluster_attributes
|
|
def test_valid_data(self, m_dpdk_nics):
|
|
data = {
|
|
'cpu_pinning': {
|
|
'nova': {'value': 1},
|
|
},
|
|
}
|
|
|
|
self.assertNotRaises(errors.InvalidData, validator,
|
|
json.dumps(data), self.node, self.cluster)
|
|
|
|
@mock_cluster_attributes
|
|
def test_no_cpu_for_os(self, m_dpdk_nics):
|
|
pinned_count = self.node.meta['cpu']['total']
|
|
|
|
data = {
|
|
'cpu_pinning': {
|
|
'nova': {'value': pinned_count},
|
|
},
|
|
}
|
|
|
|
self.assertRaisesWithMessageIn(
|
|
errors.InvalidData, 'at least one cpu',
|
|
validator, json.dumps(data), self.node, self.cluster)
|
|
|
|
@mock_cluster_attributes
|
|
def test_one_cpu_for_os(self, m_dpdk_nics):
|
|
pinned_count = self.node.meta['cpu']['total'] - 1
|
|
|
|
data = {
|
|
'cpu_pinning': {
|
|
'nova': {'value': pinned_count},
|
|
},
|
|
}
|
|
|
|
self.assertNotRaises(errors.InvalidData, validator,
|
|
json.dumps(data), self.node, self.cluster)
|