Limit the minimal RAM amount for OVS+DPDK to 1024MB
* min value was set in openstack.yaml * appropriate validator was added * tests for validator were changed * migration node and release attributes was added * tests for migration were added Change-Id: Iec723c7f02e874bf8567665a30af30c61aff7f34 Closes-Bug: #1653081
This commit is contained in:
parent
ca498021fd
commit
6e4ef67269
|
@ -502,6 +502,14 @@ class NodeAttributesValidator(base.BasicAttributesValidator):
|
|||
", ".join(supported_hugepages)
|
||||
)
|
||||
)
|
||||
dpdk_hugepages = utils.get_in(attrs, 'hugepages', 'dpdk', 'value')
|
||||
min_dpdk_hugepages = utils.get_in(attrs, 'hugepages', 'dpdk', 'min')
|
||||
if dpdk_hugepages < min_dpdk_hugepages:
|
||||
raise errors.InvalidData(
|
||||
"Node {0} does not have enough hugepages for dpdk."
|
||||
"Need to allocate at least {1} MB.".format(node.id,
|
||||
min_dpdk_hugepages)
|
||||
)
|
||||
|
||||
try:
|
||||
objects.NodeAttributes.distribute_hugepages(node, attrs)
|
||||
|
|
|
@ -42,6 +42,7 @@ down_revision = 'f2314e5d63c9'
|
|||
def upgrade():
|
||||
upgrade_vmware_attributes_metadata()
|
||||
upgrade_attributes_metadata()
|
||||
upgrade_attributes_node()
|
||||
upgrade_cluster_roles()
|
||||
upgrade_tags_meta()
|
||||
upgrade_primary_unit()
|
||||
|
@ -59,6 +60,7 @@ def downgrade():
|
|||
downgrade_primary_unit()
|
||||
downgrade_tags_meta()
|
||||
downgrade_cluster_roles()
|
||||
downgrade_attributes_node()
|
||||
downgrade_attributes_metadata()
|
||||
downgrade_vmware_attributes_metadata()
|
||||
|
||||
|
@ -237,6 +239,9 @@ DEFAULT_RELEASE_BOND_ATTRIBUTES = {
|
|||
'metadata': {'weight': 40, 'label': 'DPDK'}
|
||||
}
|
||||
}
|
||||
|
||||
MIN_DPDK_HUGEPAGES_MEMORY = 1024
|
||||
|
||||
# version of Fuel when security group switch was added
|
||||
FUEL_SECURITY_GROUPS_VERSION = '9.0'
|
||||
|
||||
|
@ -340,6 +345,12 @@ def upgrade_attributes_metadata():
|
|||
upgrade_cluster_attributes(connection)
|
||||
|
||||
|
||||
def upgrade_attributes_node():
|
||||
connection = op.get_bind()
|
||||
upgrade_release_node_attributes(connection)
|
||||
upgrade_node_attributes(connection)
|
||||
|
||||
|
||||
def upgrade_release_attributes_metadata(connection):
|
||||
select_query = sa.sql.text(
|
||||
'SELECT id, attributes_metadata, version FROM releases '
|
||||
|
@ -387,12 +398,57 @@ def upgrade_cluster_attributes(connection):
|
|||
editable=jsonutils.dumps(editable))
|
||||
|
||||
|
||||
def upgrade_release_node_attributes(connection):
|
||||
select_query = sa.sql.text(
|
||||
'SELECT id, node_attributes FROM releases '
|
||||
'WHERE node_attributes IS NOT NULL')
|
||||
|
||||
update_query = sa.sql.text(
|
||||
'UPDATE releases SET node_attributes = :node_attributes '
|
||||
'WHERE id = :release_id')
|
||||
|
||||
for release_id, node_attrs in connection.execute(select_query):
|
||||
node_attrs = jsonutils.loads(node_attrs)
|
||||
dpdk = node_attrs.setdefault('hugepages', {}).setdefault('dpdk', {})
|
||||
dpdk['min'] = MIN_DPDK_HUGEPAGES_MEMORY
|
||||
dpdk['value'] = MIN_DPDK_HUGEPAGES_MEMORY
|
||||
connection.execute(
|
||||
update_query,
|
||||
release_id=release_id,
|
||||
node_attributes=jsonutils.dumps(node_attrs))
|
||||
|
||||
|
||||
def upgrade_node_attributes(connection):
|
||||
select_query = sa.sql.text(
|
||||
'SELECT id, attributes FROM nodes '
|
||||
'WHERE attributes IS NOT NULL')
|
||||
|
||||
update_query = sa.sql.text(
|
||||
'UPDATE nodes SET attributes = :attributes '
|
||||
'WHERE id = :node_id')
|
||||
|
||||
for node_id, attrs in connection.execute(select_query):
|
||||
attrs = jsonutils.loads(attrs)
|
||||
dpdk = attrs.setdefault('hugepages', {}).setdefault('dpdk', {})
|
||||
dpdk['min'] = MIN_DPDK_HUGEPAGES_MEMORY
|
||||
connection.execute(
|
||||
update_query,
|
||||
node_id=node_id,
|
||||
attributes=jsonutils.dumps(attrs))
|
||||
|
||||
|
||||
def downgrade_attributes_metadata():
|
||||
connection = op.get_bind()
|
||||
downgrade_cluster_attributes(connection)
|
||||
downgrade_release_attributes_metadata(connection)
|
||||
|
||||
|
||||
def downgrade_attributes_node():
|
||||
connection = op.get_bind()
|
||||
downgrade_release_node_attributes(connection)
|
||||
downgrade_node_attributes(connection)
|
||||
|
||||
|
||||
def downgrade_release_attributes_metadata(connection):
|
||||
select_query = sa.sql.text(
|
||||
'SELECT id, attributes_metadata FROM releases '
|
||||
|
@ -430,6 +486,44 @@ def downgrade_cluster_attributes(connection):
|
|||
editable=jsonutils.dumps(editable))
|
||||
|
||||
|
||||
def downgrade_release_node_attributes(connection):
|
||||
select_query = sa.sql.text(
|
||||
'SELECT id, node_attributes FROM releases '
|
||||
'WHERE node_attributes IS NOT NULL')
|
||||
|
||||
update_query = sa.sql.text(
|
||||
'UPDATE releases SET node_attributes = :node_attributes '
|
||||
'WHERE id = :release_id')
|
||||
|
||||
for release_id, node_attrs in connection.execute(select_query):
|
||||
node_attrs = jsonutils.loads(node_attrs)
|
||||
dpdk = node_attrs.setdefault('hugepages', {}).setdefault('dpdk', {})
|
||||
dpdk['min'] = 0
|
||||
connection.execute(
|
||||
update_query,
|
||||
release_id=release_id,
|
||||
node_attributes=jsonutils.dumps(node_attrs))
|
||||
|
||||
|
||||
def downgrade_node_attributes(connection):
|
||||
select_query = sa.sql.text(
|
||||
'SELECT id, attributes FROM nodes '
|
||||
'WHERE attributes IS NOT NULL')
|
||||
|
||||
update_query = sa.sql.text(
|
||||
'UPDATE nodes SET attributes = :attributes '
|
||||
'WHERE id = :node_id')
|
||||
|
||||
for node_id, attrs in connection.execute(select_query):
|
||||
attrs = jsonutils.loads(attrs)
|
||||
dpdk = attrs.setdefault('hugepages', {}).setdefault('dpdk', {})
|
||||
dpdk['min'] = 0
|
||||
connection.execute(
|
||||
update_query,
|
||||
node_id=node_id,
|
||||
attributes=jsonutils.dumps(attrs))
|
||||
|
||||
|
||||
def upgrade_cluster_roles():
|
||||
op.add_column(
|
||||
'clusters',
|
||||
|
|
|
@ -2521,8 +2521,8 @@
|
|||
description: "DPDK Huge Pages per NUMA node in MB"
|
||||
label: "DPDK Huge Pages"
|
||||
type: "number"
|
||||
value: 0
|
||||
min: 0
|
||||
value: 1024
|
||||
min: 1024
|
||||
restrictions:
|
||||
- condition: "not ('experimental' in version:feature_groups)"
|
||||
action: "hide"
|
||||
|
|
|
@ -1563,7 +1563,8 @@ class NodeAttributes(object):
|
|||
@classmethod
|
||||
def is_dpdk_hugepages_enabled(cls, node, attributes=None):
|
||||
hugepages = cls._safe_get_hugepages(node, attributes=attributes)
|
||||
return 'dpdk' in hugepages and bool(hugepages['dpdk']['value'])
|
||||
return ('dpdk' in hugepages and bool(hugepages['dpdk']['value']) and
|
||||
Node.dpdk_enabled(node))
|
||||
|
||||
@classmethod
|
||||
def dpdk_hugepages_attrs(cls, node):
|
||||
|
@ -1577,7 +1578,8 @@ class NodeAttributes(object):
|
|||
"""
|
||||
hugepages = cls._safe_get_hugepages(node)
|
||||
|
||||
if 'dpdk' not in hugepages or not hugepages['dpdk']['value']:
|
||||
if (not Node.dpdk_enabled(node) and 'dpdk' not in hugepages or
|
||||
not hugepages['dpdk']['value']):
|
||||
return {}
|
||||
|
||||
dpdk_memory = hugepages['dpdk']['value']
|
||||
|
|
|
@ -424,9 +424,8 @@ class TestDeploymentAttributesSerialization90(
|
|||
numa_nodes.append({
|
||||
'id': i,
|
||||
'cpus': [i],
|
||||
'memory': 1024 ** 3
|
||||
'memory': 2 * 1024 ** 3
|
||||
})
|
||||
|
||||
meta = {
|
||||
'numa_topology': {
|
||||
'supported_hugepages': [2048],
|
||||
|
@ -437,17 +436,21 @@ class TestDeploymentAttributesSerialization90(
|
|||
cluster_id=self.cluster_db.id,
|
||||
roles=['compute'],
|
||||
meta=meta)
|
||||
|
||||
node.interfaces[0].attributes.get('dpdk', {}).get(
|
||||
'enabled', {})['value'] = True
|
||||
|
||||
node.attributes.update({
|
||||
'hugepages': {
|
||||
'dpdk': {
|
||||
'value': 128},
|
||||
'value': 1024},
|
||||
'nova': {
|
||||
'value': {'2048': 1}}}}
|
||||
)
|
||||
serialized_for_astute = self.serialize()
|
||||
serialized_node = serialized_for_astute['nodes'][0]
|
||||
self.assertEquals(
|
||||
[128, 128, 128],
|
||||
[1024, 1024, 1024],
|
||||
serialized_node['dpdk']['ovs_socket_mem'])
|
||||
self.assertTrue(serialized_node['nova']['enable_hugepages'])
|
||||
|
||||
|
|
|
@ -496,6 +496,7 @@ class TestProvisioningSerializer90(BaseIntegrationTest):
|
|||
|
||||
node = self.env.nodes[0]
|
||||
node.attributes['hugepages']['nova']['value'] = {'2048': 5}
|
||||
node.attributes['hugepages']['dpdk']['value'] = 0
|
||||
|
||||
serialized_info = self.serializer.serialize(node.cluster, [node])
|
||||
|
||||
|
|
|
@ -98,6 +98,17 @@ TAGS_META = {
|
|||
}
|
||||
}
|
||||
|
||||
NODE_ATTRIBUTES = {
|
||||
'hugepages':
|
||||
{
|
||||
'dpdk':
|
||||
{
|
||||
'value': 1024,
|
||||
'min': 1024
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def setup_module():
|
||||
dropdb()
|
||||
|
@ -137,7 +148,8 @@ def prepare():
|
|||
'tags_matadata': jsonutils.dumps(TAGS_META),
|
||||
'is_deployable': True,
|
||||
'networks_metadata': '{}',
|
||||
'attributes_metadata': jsonutils.dumps(attrs)
|
||||
'attributes_metadata': jsonutils.dumps(attrs),
|
||||
'node_attributes': jsonutils.dumps(NODE_ATTRIBUTES),
|
||||
}
|
||||
result = db.execute(meta.tables['releases'].insert(), [release])
|
||||
release_id = result.inserted_primary_key[0]
|
||||
|
@ -176,6 +188,7 @@ def prepare():
|
|||
'status': 'ready',
|
||||
'roles': ['role_x', 'role_y'],
|
||||
'primary_tags': ['role_y', 'test'],
|
||||
'attributes': jsonutils.dumps(NODE_ATTRIBUTES),
|
||||
'meta': '{}',
|
||||
'mac': 'bb:aa:aa:aa:aa:aa',
|
||||
'timestamp': datetime.datetime.utcnow(),
|
||||
|
@ -190,7 +203,8 @@ def prepare():
|
|||
'group_id': None,
|
||||
'status': 'discover',
|
||||
'mac': 'aa:aa:aa:aa:aa:aa',
|
||||
'timestamp': datetime.datetime.utcnow()
|
||||
'timestamp': datetime.datetime.utcnow(),
|
||||
'attributes': jsonutils.dumps(NODE_ATTRIBUTES),
|
||||
}]
|
||||
)
|
||||
node_id = new_node.inserted_primary_key[0]
|
||||
|
@ -310,6 +324,16 @@ class TestAttributesDowngrade(base.BaseAlembicMigrationTest):
|
|||
common = attrs.setdefault('editable', {}).setdefault('common', {})
|
||||
self.assertEqual(common.get('security_groups'), None)
|
||||
|
||||
def test_release_node_attributes_downgrade(self):
|
||||
releases = self.meta.tables['releases']
|
||||
results = db.execute(
|
||||
sa.select([releases.c.node_attributes]))
|
||||
for node_attrs in results:
|
||||
node_attrs = jsonutils.loads(node_attrs[0])
|
||||
dpdk = node_attrs.setdefault('hugepages', {}).setdefault('dpdk',
|
||||
{})
|
||||
self.assertEqual(dpdk.get('min'), 0)
|
||||
|
||||
|
||||
class TestTags(base.BaseAlembicMigrationTest):
|
||||
def test_primary_tags_downgrade(self):
|
||||
|
|
|
@ -197,6 +197,20 @@ NODE_OFFLOADING_MODES = [
|
|||
'sub': []
|
||||
}
|
||||
]
|
||||
|
||||
NODE_ATTRIBUTES = {
|
||||
'hugepages':
|
||||
{
|
||||
'dpdk':
|
||||
{
|
||||
'value': 0,
|
||||
'min': 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
MIN_DPDK_HUGEPAGES_VALUE = 1024
|
||||
|
||||
# version of Fuel when security group switch was added
|
||||
RELEASE_VERSION = '9.0'
|
||||
# version of Fuel when tags was introduced
|
||||
|
@ -246,6 +260,7 @@ def prepare():
|
|||
'state': 'available',
|
||||
'networks_metadata': '{}',
|
||||
'attributes_metadata': jsonutils.dumps(ATTRIBUTES_METADATA),
|
||||
'node_attributes': jsonutils.dumps(NODE_ATTRIBUTES),
|
||||
'deployment_tasks': '{}',
|
||||
'roles': jsonutils.dumps([
|
||||
'controller',
|
||||
|
@ -333,6 +348,7 @@ def prepare():
|
|||
'status': 'ready',
|
||||
'roles': ['controller', 'ceph-osd'],
|
||||
'primary_roles': ['controller'],
|
||||
'attributes': jsonutils.dumps(NODE_ATTRIBUTES),
|
||||
'meta': jsonutils.dumps({
|
||||
'interfaces': [{
|
||||
'mac': '00:00:00:00:00:01'
|
||||
|
@ -353,6 +369,7 @@ def prepare():
|
|||
'group_id': None,
|
||||
'status': 'ready',
|
||||
'roles': ['controller', 'ceph-osd'],
|
||||
'attributes': jsonutils.dumps(NODE_ATTRIBUTES),
|
||||
'meta': jsonutils.dumps({
|
||||
'interfaces': [
|
||||
{
|
||||
|
@ -572,6 +589,25 @@ class TestAttributesUpdate(base.BaseAlembicMigrationTest):
|
|||
common = editable.setdefault('common', {})
|
||||
self.assertEqual(common.get('security_groups'), None)
|
||||
|
||||
def test_release_node_attributes_update(self):
|
||||
releases = self.meta.tables['releases']
|
||||
results = db.execute(
|
||||
sa.select([releases.c.node_attributes]))
|
||||
for node_attrs in results:
|
||||
node_attrs = jsonutils.loads(node_attrs[0])
|
||||
dpdk = node_attrs.setdefault('hugepages', {}).setdefault('dpdk',
|
||||
{})
|
||||
self.assertEqual(dpdk.get('min'), MIN_DPDK_HUGEPAGES_VALUE)
|
||||
|
||||
def test_node_attributes_update(self):
|
||||
nodes = self.meta.tables['nodes']
|
||||
results = db.execute(
|
||||
sa.select([nodes.c.attributes]))
|
||||
for attrs in results:
|
||||
attrs = jsonutils.loads(attrs[0])
|
||||
dpdk = attrs.setdefault('hugepages', {}).setdefault('dpdk', {})
|
||||
self.assertEqual(dpdk.get('min'), MIN_DPDK_HUGEPAGES_VALUE)
|
||||
|
||||
def get_release_ids(self, start_version, available=True):
|
||||
"""Get release ids
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ validator = node_validator.NodeAttributesValidator.validate
|
|||
|
||||
def mock_cluster_attributes(func):
|
||||
def wrapper(*args, **kwargs):
|
||||
attr_mock = mock.patch.object(
|
||||
cluster_attr_mock = mock.patch.object(
|
||||
objects.Cluster,
|
||||
'get_editable_attributes',
|
||||
return_value={
|
||||
|
@ -39,7 +39,12 @@ def mock_cluster_attributes(func):
|
|||
}
|
||||
}
|
||||
)
|
||||
with attr_mock:
|
||||
node_dpdk_mock = mock.patch.object(
|
||||
objects.Node,
|
||||
'dpdk_enabled',
|
||||
return_value=True
|
||||
)
|
||||
with cluster_attr_mock, node_dpdk_mock:
|
||||
func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
@ -54,8 +59,8 @@ class BaseNodeAttributeValidatorTest(base.BaseTestCase):
|
|||
meta['numa_topology'] = {
|
||||
"supported_hugepages": [2048, 1048576],
|
||||
"numa_nodes": [
|
||||
{"id": 0, "cpus": [0, 1], 'memory': 2 * 1024 ** 3},
|
||||
{"id": 1, "cpus": [2, 3], 'memory': 2 * 1024 ** 3},
|
||||
{"id": 0, "cpus": [0, 1], 'memory': 3 * 1024 ** 3},
|
||||
{"id": 1, "cpus": [2, 3], 'memory': 3 * 1024 ** 3},
|
||||
]
|
||||
}
|
||||
meta['cpu']['total'] = 4
|
||||
|
@ -68,7 +73,8 @@ class BaseNodeAttributeValidatorTest(base.BaseTestCase):
|
|||
},
|
||||
'dpdk': {
|
||||
'type': 'number',
|
||||
'value': 0,
|
||||
'value': 1024,
|
||||
'min': 1024,
|
||||
},
|
||||
},
|
||||
'cpu_pinning': {
|
||||
|
@ -107,11 +113,11 @@ class TestNodeAttributesValidatorHugepages(BaseNodeAttributeValidatorTest):
|
|||
},
|
||||
},
|
||||
'dpdk': {
|
||||
'value': 2,
|
||||
'value': 1024,
|
||||
'min': 1024
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
self.assertNotRaises(errors.InvalidData, validator,
|
||||
json.dumps(data), self.node, self.cluster)
|
||||
|
||||
|
@ -132,6 +138,29 @@ class TestNodeAttributesValidatorHugepages(BaseNodeAttributeValidatorTest):
|
|||
errors.InvalidData, 'Not enough memory for components',
|
||||
validator, json.dumps(data), self.node, self.cluster)
|
||||
|
||||
@mock_cluster_attributes
|
||||
def test_not_enough_dpdk_hugepages(self, m_dpdk_nics):
|
||||
data = {
|
||||
'hugepages': {
|
||||
'nova': {
|
||||
'value': {
|
||||
'2048': 1,
|
||||
'1048576': 0,
|
||||
},
|
||||
},
|
||||
'dpdk': {
|
||||
'value': 1023,
|
||||
'min': 1024
|
||||
},
|
||||
}
|
||||
}
|
||||
message = ("Node {0} does not have enough hugepages for dpdk."
|
||||
"Need to allocate at least {1} MB.").format(self.node.id,
|
||||
1024)
|
||||
self.assertRaisesWithMessageIn(
|
||||
errors.InvalidData, message,
|
||||
validator, json.dumps(data), self.node, self.cluster)
|
||||
|
||||
@mock_cluster_attributes
|
||||
def test_dpdk_requires_too_much(self, m_dpdk_nics):
|
||||
data = {
|
||||
|
|
Loading…
Reference in New Issue