Merge "DPDK and Host derive parameters workflows"

This commit is contained in:
Jenkins 2017-07-13 11:27:41 +00:00 committed by Gerrit Code Review
commit d9beb6d1da
9 changed files with 1437 additions and 11 deletions

View File

@ -74,6 +74,10 @@ mistral.actions =
tripleo.deployment.config = tripleo_common.actions.deployment:OrchestrationDeployAction tripleo.deployment.config = tripleo_common.actions.deployment:OrchestrationDeployAction
tripleo.deployment.deploy = tripleo_common.actions.deployment:DeployStackAction tripleo.deployment.deploy = tripleo_common.actions.deployment:DeployStackAction
tripleo.deployment.overcloudrc = tripleo_common.actions.deployment:OvercloudRcAction tripleo.deployment.overcloudrc = tripleo_common.actions.deployment:OvercloudRcAction
tripleo.derive_params.get_dpdk_nics_numa_info = tripleo_common.actions.derive_params:GetDpdkNicsNumaInfoAction
tripleo.derive_params.get_dpdk_core_list = tripleo_common.actions.derive_params:GetDpdkCoreListAction
tripleo.derive_params.get_dpdk_socket_memory = tripleo_common.actions.derive_params:GetDpdkSocketMemoryAction
tripleo.derive_params.get_host_cpus_list = tripleo_common.actions.derive_params:GetHostCpusListAction
tripleo.git.clean = tripleo_common.actions.vcs:GitCleanupAction tripleo.git.clean = tripleo_common.actions.vcs:GitCleanupAction
tripleo.git.clone = tripleo_common.actions.vcs:GitCloneAction tripleo.git.clone = tripleo_common.actions.vcs:GitCloneAction
tripleo.heat_capabilities.get = tripleo_common.actions.heat_capabilities:GetCapabilitiesAction tripleo.heat_capabilities.get = tripleo_common.actions.heat_capabilities:GetCapabilitiesAction
@ -82,6 +86,7 @@ mistral.actions =
tripleo.package_update.update_stack = tripleo_common.actions.package_update:UpdateStackAction tripleo.package_update.update_stack = tripleo_common.actions.package_update:UpdateStackAction
tripleo.parameters.get = tripleo_common.actions.parameters:GetParametersAction tripleo.parameters.get = tripleo_common.actions.parameters:GetParametersAction
tripleo.parameters.get_flatten = tripleo_common.actions.parameters:GetFlattenedParametersAction tripleo.parameters.get_flatten = tripleo_common.actions.parameters:GetFlattenedParametersAction
tripleo.parameters.get_network_config = tripleo_common.actions.parameters:GetNetworkConfigAction
tripleo.parameters.reset = tripleo_common.actions.parameters:ResetParametersAction tripleo.parameters.reset = tripleo_common.actions.parameters:ResetParametersAction
tripleo.parameters.update = tripleo_common.actions.parameters:UpdateParametersAction tripleo.parameters.update = tripleo_common.actions.parameters:UpdateParametersAction
tripleo.parameters.update_role = tripleo_common.actions.parameters:UpdateRoleParametersAction tripleo.parameters.update_role = tripleo_common.actions.parameters:UpdateRoleParametersAction

View File

@ -0,0 +1,329 @@
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
from mistral_lib import actions
from tripleo_common.actions import base
class GetDpdkNicsNumaInfoAction(base.TripleOAction):
"""Gets the DPDK NICs with MTU for NUMA nodes.
Find the DPDK interface names from the network config and
translate it to phsical interface names using the introspection
data. And then find the NUMA node associated with the DPDK
interface and the MTU value.
:param network_configs: network config list
:param inspect_data: introspection data
:param mtu_default: mtu default value for NICs
:return: DPDK NICs NUMA nodes info
"""
def __init__(self, network_configs, inspect_data, mtu_default=1500):
super(GetDpdkNicsNumaInfoAction, self).__init__()
self.network_configs = network_configs
self.inspect_data = inspect_data
self.mtu_default = mtu_default
# TODO(jpalanis): Expose this utility from os-net-config to sort
# active nics
def _natural_sort_key(self, s):
nsre = re.compile('([0-9]+)')
return [int(text) if text.isdigit() else text
for text in re.split(nsre, s)]
# TODO(jpalanis): Expose this utility from os-net-config to sort
# active nics
def _is_embedded_nic(self, nic):
if (nic.startswith('em') or nic.startswith('eth') or
nic.startswith('eno')):
return True
return False
# TODO(jpalanis): Expose this utility from os-net-config to sort
# active nics
def _ordered_nics(self, interfaces):
embedded_nics = []
nics = []
for iface in interfaces:
nic = iface.get('name', '')
if self._is_embedded_nic(nic):
embedded_nics.append(nic)
else:
nics.append(nic)
active_nics = (sorted(
embedded_nics, key=self._natural_sort_key) +
sorted(nics, key=self._natural_sort_key))
return active_nics
# Gets numa node id for physical NIC name
def find_numa_node_id(self, numa_nics, nic_name):
for nic_info in numa_nics:
if nic_info.get('name', '') == nic_name:
return nic_info.get('numa_node', None)
return None
# Get physical interface name for NIC name
def get_physical_iface_name(self, ordered_nics, nic_name):
if nic_name.startswith('nic'):
# Nic numbering, find the actual interface name
nic_number = int(nic_name.replace('nic', ''))
if nic_number > 0:
iface_name = ordered_nics[nic_number - 1]
return iface_name
return nic_name
# Gets dpdk interfaces and mtu info for dpdk config
# Default mtu(recommended 1500) is used if no MTU is set for DPDK NIC
def get_dpdk_interfaces(self, dpdk_objs):
mtu = self.mtu_default
dpdk_ifaces = []
for dpdk_obj in dpdk_objs:
obj_type = dpdk_obj.get('type')
mtu = dpdk_obj.get('mtu', self.mtu_default)
if obj_type == 'ovs_dpdk_port':
# Member interfaces of ovs_dpdk_port
dpdk_ifaces.extend(dpdk_obj.get('members', []))
elif obj_type == 'ovs_dpdk_bond':
# ovs_dpdk_bond will have multiple ovs_dpdk_ports
for bond_member in dpdk_obj.get('members', []):
if bond_member.get('type') == 'ovs_dpdk_port':
dpdk_ifaces.extend(bond_member.get('members', []))
return (dpdk_ifaces, mtu)
def run(self, context):
interfaces = self.inspect_data.get('inventory',
{}).get('interfaces', [])
# Checks whether inventory interfaces information is not available
# in introspection data.
if not interfaces:
msg = 'Introspection data does not have inventory.interfaces'
return actions.Result(error=msg)
numa_nics = self.inspect_data.get('numa_topology',
{}).get('nics', [])
# Checks whether numa topology nics information is not available
# in introspection data.
if not numa_nics:
msg = 'Introspection data does not have numa_topology.nics'
return actions.Result(error=msg)
active_interfaces = [iface for iface in interfaces
if iface.get('has_carrier', False)]
# Checks whether active interfaces are not available
if not active_interfaces:
msg = 'Unable to determine active interfaces (has_carrier)'
return actions.Result(error=msg)
dpdk_nics_numa_info = []
ordered_nics = self._ordered_nics(active_interfaces)
# Gets DPDK network config and parses to get DPDK NICs
# with mtu and numa node id
for config in self.network_configs:
if config.get('type', '') == 'ovs_user_bridge':
members = config.get('members', [])
dpdk_ifaces, mtu = self.get_dpdk_interfaces(members)
for dpdk_iface in dpdk_ifaces:
name = dpdk_iface.get('name', '')
phy_name = self.get_physical_iface_name(
ordered_nics, name)
node = self.find_numa_node_id(numa_nics, phy_name)
if not node:
msg = ('Unable to determine NUMA node for '
'DPDK NIC: %s' % phy_name)
return actions.Result(error=msg)
dpdk_nic_info = {'name': phy_name,
'numa_node': node,
'mtu': mtu}
dpdk_nics_numa_info.append(dpdk_nic_info)
return dpdk_nics_numa_info
class GetDpdkCoreListAction(base.TripleOAction):
"""Gets the DPDK PMD Core List.
With input as the number of physical cores for each NUMA node,
find the right logical CPUs to be allocated along with its
siblings for the PMD core list.
:param inspect_data: introspection data
:param numa_nodes_cores_count: physical cores count for each NUMA
:return: DPDK Core List
"""
def __init__(self, inspect_data, numa_nodes_cores_count):
super(GetDpdkCoreListAction, self).__init__()
self.inspect_data = inspect_data
self.numa_nodes_cores_count = numa_nodes_cores_count
def run(self, context):
dpdk_core_list = []
numa_cpus_info = self.inspect_data.get('numa_topology',
{}).get('cpus', [])
# Checks whether numa topology cpus information is not available
# in introspection data.
if not numa_cpus_info:
msg = 'Introspection data does not have numa_topology.cpus'
return actions.Result(error=msg)
# Checks whether CPU physical cores count for each NUMA nodes is
# not available
if not self.numa_nodes_cores_count:
msg = ('CPU physical cores count for each NUMA nodes '
'is not available')
return actions.Result(error=msg)
numa_nodes_threads = {}
# Creates list for all available threads in each NUMA node
for cpu in numa_cpus_info:
if not cpu['numa_node'] in numa_nodes_threads:
numa_nodes_threads[cpu['numa_node']] = []
numa_nodes_threads[cpu['numa_node']].extend(cpu['thread_siblings'])
for node_cores_count in self.numa_nodes_cores_count:
node = self.numa_nodes_cores_count.index(node_cores_count)
# Gets least thread in NUMA node
numa_node_min = min(numa_nodes_threads[node])
cores_count = node_cores_count
for cpu in numa_cpus_info:
if cpu['numa_node'] == node:
# Adds threads from core which is not having least thread
if numa_node_min not in cpu['thread_siblings']:
dpdk_core_list.extend(cpu['thread_siblings'])
cores_count -= 1
if cores_count == 0:
break
return ','.join([str(thread) for thread in dpdk_core_list])
class GetHostCpusListAction(base.TripleOAction):
"""Gets the Host CPUs List.
CPU threads from first physical core is allocated for host processes
on each NUMA nodes.
:param inspect_data: introspection data
:return: Host CPUs List
"""
def __init__(self, inspect_data):
super(GetHostCpusListAction, self).__init__()
self.inspect_data = inspect_data
def run(self, context):
host_cpus_list = []
numa_cpus_info = self.inspect_data.get('numa_topology',
{}).get('cpus', [])
# Checks whether numa topology cpus information is not available
# in introspection data.
if not numa_cpus_info:
msg = 'Introspection data does not have numa_topology.cpus'
return actions.Result(error=msg)
numa_nodes_threads = {}
# Creates a list for all available threads in each NUMA nodes
for cpu in numa_cpus_info:
if not cpu['numa_node'] in numa_nodes_threads:
numa_nodes_threads[cpu['numa_node']] = []
numa_nodes_threads[cpu['numa_node']].extend(
cpu['thread_siblings'])
for numa_node in numa_nodes_threads.keys():
node = int(numa_node)
# Gets least thread in NUMA node
numa_node_min = min(numa_nodes_threads[numa_node])
for cpu in numa_cpus_info:
if cpu['numa_node'] == node:
# Adds threads from core which is having least thread
if numa_node_min in cpu['thread_siblings']:
host_cpus_list.extend(cpu['thread_siblings'])
break
return ','.join([str(thread) for thread in host_cpus_list])
class GetDpdkSocketMemoryAction(base.TripleOAction):
"""Gets the DPDK Socket Memory List.
For NUMA node with DPDK nic, socket memory is calculated
based on MTU, Overhead and Packet size in buffer.
For NUMA node without DPDK nic, minimum socket memory is
assigned (recommended 1GB)
:param dpdk_nics_numa_info: DPDK nics numa info
:param numa_nodes: list of numa nodes
:param overhead: overhead value
:param packet_size_in_buffer: packet size in buffer
:param minimum_socket_memory: minimum socket memory
:return: DPDK Socket Memory List
"""
def __init__(self, dpdk_nics_numa_info, numa_nodes,
overhead, packet_size_in_buffer,
minimum_socket_memory=1024):
super(GetDpdkSocketMemoryAction, self).__init__()
self.dpdk_nics_numa_info = dpdk_nics_numa_info
self.numa_nodes = numa_nodes
self.overhead = overhead
self.packet_size_in_buffer = packet_size_in_buffer
self.minimum_socket_memory = minimum_socket_memory
# Calculates socket memory for a NUMA node
def calculate_node_socket_memory(
self, numa_node, dpdk_nics_numa_info, overhead,
packet_size_in_buffer, minimum_socket_memory):
distinct_mtu_per_node = []
socket_memory = 0
# For DPDK numa node
for nics_info in dpdk_nics_numa_info:
if (numa_node == nics_info['numa_node'] and
not nics_info['mtu'] in distinct_mtu_per_node):
distinct_mtu_per_node.append(nics_info['mtu'])
socket_memory += (((nics_info['mtu'] + overhead)
* packet_size_in_buffer) /
(1024 * 1024))
# For Non DPDK numa node
if socket_memory == 0:
socket_memory = minimum_socket_memory
# For DPDK numa node
else:
socket_memory += 500
socket_memory_in_gb = int(socket_memory / 1024)
if socket_memory % 1024 > 0:
socket_memory_in_gb += 1
return (socket_memory_in_gb * 1024)
def run(self, context):
dpdk_socket_memory_list = []
for node in self.numa_nodes:
socket_mem = self.calculate_node_socket_memory(
node, self.dpdk_nics_numa_info, self.overhead,
self.packet_size_in_buffer,
self.minimum_socket_memory)
dpdk_socket_memory_list.append(socket_mem)
return ','.join([str(sm) for sm in dpdk_socket_memory_list])

View File

@ -26,6 +26,7 @@
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations # License for the specific language governing permissions and limitations
# under the License. # under the License.
import json
import logging import logging
import uuid import uuid
@ -101,9 +102,11 @@ class GetParametersAction(templates.ProcessTemplatesAction):
class ResetParametersAction(base.TripleOAction): class ResetParametersAction(base.TripleOAction):
"""Provides method to delete user set parameters.""" """Provides method to delete user set parameters."""
def __init__(self, container=constants.DEFAULT_CONTAINER_NAME): def __init__(self, container=constants.DEFAULT_CONTAINER_NAME,
key=constants.DEFAULT_PLAN_ENV_KEY):
super(ResetParametersAction, self).__init__() super(ResetParametersAction, self).__init__()
self.container = container self.container = container
self.key = key
def run(self, context): def run(self, context):
swift = self.get_object_client(context) swift = self.get_object_client(context)
@ -117,7 +120,7 @@ class ResetParametersAction(base.TripleOAction):
return actions.Result(error=err_msg) return actions.Result(error=err_msg)
try: try:
plan_utils.update_in_env(swift, env, 'parameter_defaults', plan_utils.update_in_env(swift, env, self.key,
delete_key=True) delete_key=True)
except swiftexceptions.ClientException as err: except swiftexceptions.ClientException as err:
err_msg = ("Error updating environment for plan %s: %s" % ( err_msg = ("Error updating environment for plan %s: %s" % (
@ -135,10 +138,12 @@ class UpdateParametersAction(base.TripleOAction):
"""Updates plan environment with parameters.""" """Updates plan environment with parameters."""
def __init__(self, parameters, def __init__(self, parameters,
container=constants.DEFAULT_CONTAINER_NAME): container=constants.DEFAULT_CONTAINER_NAME,
key=constants.DEFAULT_PLAN_ENV_KEY):
super(UpdateParametersAction, self).__init__() super(UpdateParametersAction, self).__init__()
self.container = container self.container = container
self.parameters = parameters self.parameters = parameters
self.key = key
def run(self, context): def run(self, context):
swift = self.get_object_client(context) swift = self.get_object_client(context)
@ -152,7 +157,7 @@ class UpdateParametersAction(base.TripleOAction):
return actions.Result(error=err_msg) return actions.Result(error=err_msg)
try: try:
plan_utils.update_in_env(swift, env, 'parameter_defaults', plan_utils.update_in_env(swift, env, self.key,
self.parameters) self.parameters)
except swiftexceptions.ClientException as err: except swiftexceptions.ClientException as err:
err_msg = ("Error updating environment for plan %s: %s" % ( err_msg = ("Error updating environment for plan %s: %s" % (
@ -546,3 +551,65 @@ class RotateFernetKeysAction(GetPasswordsAction):
for key_path in key_paths[1:keys_to_be_purged + 1]: for key_path in key_paths[1:keys_to_be_purged + 1]:
del keys_map[key_path] del keys_map[key_path]
return keys_map return keys_map
class GetNetworkConfigAction(templates.ProcessTemplatesAction):
"""Gets network configuration details from available heat parameters."""
def __init__(self, role_name, container=constants.DEFAULT_CONTAINER_NAME):
super(GetNetworkConfigAction, self).__init__(container=container)
self.role_name = role_name
def run(self, context):
processed_data = super(GetNetworkConfigAction, self).run(context)
# If we receive a 'Result' instance it is because the parent action
# had an error.
if isinstance(processed_data, actions.Result):
return processed_data
fields = {
'template': processed_data['template'],
'files': processed_data['files'],
'environment': processed_data['environment'],
'stack_name': self.container,
}
orc = self.get_orchestration_client(context)
preview_data = orc.stacks.preview(**fields)
result = self.get_network_config(preview_data, self.container,
self.role_name)
return result
def get_network_config(self, preview_data, stack_name, role_name):
result = None
if preview_data:
for res in preview_data.resources:
net_script = self.process_preview_list(res,
stack_name,
role_name)
if net_script:
ns_len = len(net_script)
start_index = (net_script.find(
"echo '{\"network_config\"", 0, ns_len) + 6)
end_index = net_script.find("'", start_index, ns_len)
if (end_index > start_index):
net_config = net_script[start_index:end_index]
if net_config:
result = json.loads(net_config)
break
return result
def process_preview_list(self, res, stack_name, role_name):
if type(res) == list:
for item in res:
out = self.process_preview_list(item, stack_name, role_name)
if out:
return out
elif type(res) == dict:
res_stack_name = stack_name + '-' + role_name
if res['resource_name'] == "OsNetConfigImpl" and \
res['resource_identity'] and \
res_stack_name in res['resource_identity']['stack_name']:
return res['properties']['config']
return None

View File

@ -299,10 +299,17 @@ class ProcessTemplatesAction(base.TripleOAction):
# merge generated passwords into params first # merge generated passwords into params first
passwords = plan_env.get('passwords', {}) passwords = plan_env.get('passwords', {})
merged_params.update(passwords) merged_params.update(passwords)
# derived parameters are merged before 'parameter defaults'
# so that user-specified values can override the derived values.
derived_params = plan_env.get('derived_parameters', {})
merged_params.update(derived_params)
# handle user set parameter values next in case a user has set # handle user set parameter values next in case a user has set
# a new value for a password parameter # a new value for a password parameter
params = plan_env.get('parameter_defaults', {}) params = plan_env.get('parameter_defaults', {})
merged_params.update(params) merged_params = template_utils.deep_update(merged_params, params)
if merged_params: if merged_params:
env_temp_file = _create_temp_file( env_temp_file = _create_temp_file(
{'parameter_defaults': merged_params}) {'parameter_defaults': merged_params})

View File

@ -41,6 +41,9 @@ STACK_TIMEOUT_DEFAULT = 240
#: The default name to use for a plan container #: The default name to use for a plan container
DEFAULT_CONTAINER_NAME = 'overcloud' DEFAULT_CONTAINER_NAME = 'overcloud'
#: The default key to use for updating parameters in plan environment.
DEFAULT_PLAN_ENV_KEY = 'parameter_defaults'
#: The path to the tripleo heat templates installed on the undercloud #: The path to the tripleo heat templates installed on the undercloud
DEFAULT_TEMPLATES_PATH = '/usr/share/openstack-tripleo-heat-templates/' DEFAULT_TEMPLATES_PATH = '/usr/share/openstack-tripleo-heat-templates/'

View File

@ -0,0 +1,424 @@
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mistral_lib import actions
from tripleo_common.actions import derive_params
from tripleo_common.tests import base
class GetDpdkNicsNumaInfoActionTest(base.TestCase):
def test_run_dpdk_port(self):
network_configs = [{
"members": [{
"members": [{"name": "nic5", "type": "interface"}],
"name": "dpdk0",
"type": "ovs_dpdk_port",
"mtu": 8192,
"rx_queue": 4}],
"name": "br-link",
"type": "ovs_user_bridge"}]
inspect_data = {
"numa_topology": {
"nics": [{"name": "ens802f1", "numa_node": 1},
{"name": "ens802f0", "numa_node": 1},
{"name": "eno1", "numa_node": 0},
{"name": "eno2", "numa_node": 0},
{"name": "enp12s0f1", "numa_node": 0},
{"name": "enp12s0f0", "numa_node": 0},
{"name": "enp13s0f0", "numa_node": 0},
{"name": "enp13s0f1", "numa_node": 0}]
},
"inventory": {
"interfaces": [{"has_carrier": True,
"name": "ens802f1"},
{"has_carrier": True,
"name": "ens802f0"},
{"has_carrier": True,
"name": "eno1"},
{"has_carrier": True,
"name": "eno2"},
{"has_carrier": True,
"name": "enp12s0f0"},
{"has_carrier": False,
"name": "enp13s0f0"},
{"has_carrier": False,
"name": "enp13s0f1"}]
}
}
expected_result = [{'name': 'ens802f1', 'mtu': 8192, 'numa_node': 1}]
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkNicsNumaInfoAction(network_configs,
inspect_data)
result = action.run(mock_ctx)
self.assertEqual(result, expected_result)
def test_run_dpdk_bond(self):
network_configs = [{
"members": [{"type": "ovs_dpdk_bond", "name": "dpdkbond0",
"mtu": 9000, "rx_queue": 4,
"members": [{"type": "ovs_dpdk_port",
"name": "dpdk0",
"members": [{"type": "interface",
"name": "nic4"}]},
{"type": "ovs_dpdk_port",
"name": "dpdk1",
"members": [{"type": "interface",
"name": "nic5"}]}]}],
"name": "br-link",
"type": "ovs_user_bridge"}]
inspect_data = {
"numa_topology": {
"nics": [{"name": "ens802f1", "numa_node": 1},
{"name": "ens802f0", "numa_node": 1},
{"name": "eno1", "numa_node": 0},
{"name": "eno2", "numa_node": 0},
{"name": "enp12s0f1", "numa_node": 0},
{"name": "enp12s0f0", "numa_node": 0},
{"name": "enp13s0f0", "numa_node": 0},
{"name": "enp13s0f1", "numa_node": 0}]
},
"inventory": {
"interfaces": [{"has_carrier": True,
"name": "ens802f1"},
{"has_carrier": True,
"name": "ens802f0"},
{"has_carrier": True,
"name": "eno1"},
{"has_carrier": True,
"name": "eno2"},
{"has_carrier": True,
"name": "enp12s0f0"},
{"has_carrier": False,
"name": "enp13s0f0"},
{"has_carrier": False,
"name": "enp13s0f1"}]
}
}
expected_result = [{'mtu': 9000, 'numa_node': 1, 'name': 'ens802f0'},
{'mtu': 9000, 'numa_node': 1, 'name': 'ens802f1'}]
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkNicsNumaInfoAction(network_configs,
inspect_data)
result = action.run(mock_ctx)
self.assertEqual(result, expected_result)
@mock.patch.object(actions, 'Result', autospec=True)
def test_run_no_inspect_nics(self, mock_actions):
network_configs = [{
"members": [{
"members": [{"name": "nic5", "type": "interface"}],
"name": "dpdk0",
"type": "ovs_dpdk_port",
"mtu": 8192,
"rx_queue": 4}],
"name": "br-link",
"type": "ovs_user_bridge"}]
inspect_data = {
"numa_topology": {
"nics": []
},
"inventory": {
"interfaces": [{"has_carrier": True,
"name": "ens802f1"},
{"has_carrier": True,
"name": "ens802f0"},
{"has_carrier": True,
"name": "eno1"},
{"has_carrier": True,
"name": "eno2"},
{"has_carrier": True,
"name": "enp12s0f0"},
{"has_carrier": False,
"name": "enp13s0f0"},
{"has_carrier": False,
"name": "enp13s0f1"}]
}
}
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkNicsNumaInfoAction(network_configs,
inspect_data)
action.run(mock_ctx)
msg = 'Introspection data does not have numa_topology.nics'
mock_actions.assert_called_once_with(error=msg)
@mock.patch.object(actions, 'Result', autospec=True)
def test_run_no_inspect_interfaces(self, mock_actions):
network_configs = [{
"members": [{
"members": [{"name": "nic5", "type": "interface"}],
"name": "dpdk0",
"type": "ovs_dpdk_port",
"mtu": 8192,
"rx_queue": 4}],
"name": "br-link",
"type": "ovs_user_bridge"}]
inspect_data = {
"numa_topology": {
"nics": []
},
"inventory": {
"interfaces": []
}
}
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkNicsNumaInfoAction(network_configs,
inspect_data)
action.run(mock_ctx)
msg = 'Introspection data does not have inventory.interfaces'
mock_actions.assert_called_once_with(error=msg)
@mock.patch.object(actions, 'Result', autospec=True)
def test_run_no_inspect_active_interfaces(self, mock_actions):
network_configs = [{
"members": [{
"members": [{"name": "nic5", "type": "interface"}],
"name": "dpdk0",
"type": "ovs_dpdk_port",
"mtu": 8192,
"rx_queue": 4}],
"name": "br-link",
"type": "ovs_user_bridge"}]
inspect_data = {
"numa_topology": {
"nics": [{"name": "ens802f1", "numa_node": 1},
{"name": "ens802f0", "numa_node": 1},
{"name": "eno1", "numa_node": 0},
{"name": "eno2", "numa_node": 0},
{"name": "enp12s0f1", "numa_node": 0},
{"name": "enp12s0f0", "numa_node": 0},
{"name": "enp13s0f0", "numa_node": 0},
{"name": "enp13s0f1", "numa_node": 0}]
},
"inventory": {
"interfaces": [{"has_carrier": False,
"name": "enp13s0f0"},
{"has_carrier": False,
"name": "enp13s0f1"}]
}
}
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkNicsNumaInfoAction(network_configs,
inspect_data)
action.run(mock_ctx)
msg = 'Unable to determine active interfaces (has_carrier)'
mock_actions.assert_called_once_with(error=msg)
@mock.patch.object(actions, 'Result', autospec=True)
def test_run_no_numa_node(self, mock_actions):
network_configs = [{
"members": [{
"members": [{"name": "nic5", "type": "interface"}],
"name": "dpdk0",
"type": "ovs_dpdk_port",
"mtu": 8192,
"rx_queue": 4}],
"name": "br-link",
"type": "ovs_user_bridge"}]
inspect_data = {
"numa_topology": {
"nics": [{"name": "ens802f1"},
{"name": "ens802f0", "numa_node": 1},
{"name": "eno1", "numa_node": 0},
{"name": "eno2", "numa_node": 0},
{"name": "enp12s0f1", "numa_node": 0},
{"name": "enp12s0f0", "numa_node": 0},
{"name": "enp13s0f0", "numa_node": 0},
{"name": "enp13s0f1", "numa_node": 0}]
},
"inventory": {
"interfaces": [{"has_carrier": True,
"name": "ens802f1"},
{"has_carrier": True,
"name": "ens802f0"},
{"has_carrier": True,
"name": "eno1"},
{"has_carrier": True,
"name": "eno2"},
{"has_carrier": True,
"name": "enp12s0f0"},
{"has_carrier": False,
"name": "enp13s0f0"},
{"has_carrier": False,
"name": "enp13s0f1"}]
}
}
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkNicsNumaInfoAction(network_configs,
inspect_data)
action.run(mock_ctx)
msg = 'Unable to determine NUMA node for DPDK NIC: ens802f1'
mock_actions.assert_called_once_with(error=msg)
class GetDpdkCoreListActionTest(base.TestCase):
def test_run(self):
inspect_data = {
"numa_topology": {
"cpus": [{"cpu": 21, "numa_node": 1,
"thread_siblings": [38, 82]},
{"cpu": 27, "numa_node": 0,
"thread_siblings": [20, 64]},
{"cpu": 3, "numa_node": 1,
"thread_siblings": [25, 69]},
{"cpu": 20, "numa_node": 0,
"thread_siblings": [15, 59]},
{"cpu": 17, "numa_node": 1,
"thread_siblings": [34, 78]},
{"cpu": 16, "numa_node": 0,
"thread_siblings": [11, 55]}]
}
}
numa_nodes_cores_count = [2, 1]
expected_result = "20,64,15,59,38,82"
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkCoreListAction(inspect_data,
numa_nodes_cores_count)
result = action.run(mock_ctx)
self.assertEqual(result, expected_result)
@mock.patch.object(actions, 'Result', autospec=True)
def test_run_invalid_inspect_data(self, mock_actions):
inspect_data = {"numa_topology": {"cpus": []}}
numa_nodes_cores_count = [2, 1]
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkCoreListAction(inspect_data,
numa_nodes_cores_count)
action.run(mock_ctx)
msg = 'Introspection data does not have numa_topology.cpus'
mock_actions.assert_called_once_with(error=msg)
@mock.patch.object(actions, 'Result', autospec=True)
def test_run_invalid_numa_nodes_cores_count(self, mock_actions):
inspect_data = {"numa_topology": {
"cpus": [{"cpu": 21, "numa_node": 1, "thread_siblings": [38, 82]},
{"cpu": 27, "numa_node": 0, "thread_siblings": [20, 64]}]
}}
numa_nodes_cores_count = []
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkCoreListAction(inspect_data,
numa_nodes_cores_count)
action.run(mock_ctx)
msg = 'CPU physical cores count for each NUMA nodes is not available'
mock_actions.assert_called_once_with(error=msg)
class GetHostCpusListActionTest(base.TestCase):
def test_run_valid_inspect_data(self):
inspect_data = {
"numa_topology": {
"cpus": [{"cpu": 21, "numa_node": 1,
"thread_siblings": [38, 82]},
{"cpu": 27, "numa_node": 0,
"thread_siblings": [20, 64]},
{"cpu": 3, "numa_node": 1,
"thread_siblings": [25, 69]},
{"cpu": 20, "numa_node": 0,
"thread_siblings": [15, 59]}]
}
}
expected_result = "15,59,25,69"
mock_ctx = mock.MagicMock()
action = derive_params.GetHostCpusListAction(inspect_data)
result = action.run(mock_ctx)
self.assertEqual(result, expected_result)
@mock.patch.object(actions, 'Result', autospec=True)
def test_run_invalid_inspect_data(self, mock_actions):
inspect_data = {"numa_topology": {"cpus": []}}
mock_ctx = mock.MagicMock()
action = derive_params.GetHostCpusListAction(inspect_data)
action.run(mock_ctx)
msg = 'Introspection data does not have numa_topology.cpus'
mock_actions.assert_called_once_with(error=msg)
class GetDpdkSocketMemoryActionTest(base.TestCase):
def test_run_valid_dpdk_nics_numa_info(self):
dpdk_nics_numa_info = [{"name": "ens802f1", "numa_node": 1,
"mtu": 8192}]
numa_nodes = [0, 1]
overhead = 800
packet_size_in_buffer = (4096 * 64)
expected_result = "1024,3072"
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkSocketMemoryAction(
dpdk_nics_numa_info, numa_nodes, overhead,
packet_size_in_buffer)
result = action.run(mock_ctx)
self.assertEqual(result, expected_result)
def test_run_multiple_mtu_in_same_numa_node(self):
dpdk_nics_numa_info = [{"name": "ens802f1", "numa_node": 1,
"mtu": 1500},
{"name": "ens802f2", "numa_node": 1,
"mtu": 2048}]
numa_nodes = [0, 1]
overhead = 800
packet_size_in_buffer = (4096 * 64)
expected_result = "1024,2048"
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkSocketMemoryAction(
dpdk_nics_numa_info, numa_nodes, overhead, packet_size_in_buffer)
result = action.run(mock_ctx)
self.assertEqual(result, expected_result)
def test_run_duplicate_mtu_in_same_numa_node(self):
dpdk_nics_numa_info = [{"name": "ens802f1", "numa_node": 1,
"mtu": 4096},
{"name": "ens802f2", "numa_node": 1,
"mtu": 4096}]
numa_nodes = [0, 1]
overhead = 800
packet_size_in_buffer = (4096 * 64)
expected_result = "1024,2048"
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkSocketMemoryAction(
dpdk_nics_numa_info, numa_nodes, overhead, packet_size_in_buffer)
result = action.run(mock_ctx)
self.assertEqual(result, expected_result)

View File

@ -293,6 +293,48 @@ class UpdateParametersActionTest(base.TestCase):
"tripleo.parameters.get" "tripleo.parameters.get"
) )
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_delete')
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_run_new_key(self, mock_get_object_client, mock_cache):
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
'name': constants.DEFAULT_CONTAINER_NAME,
'temp_environment': 'temp_environment',
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}],
}, default_flow_style=False)
swift.get_object.return_value = ({}, mock_env)
mock_get_object_client.return_value = swift
# Test
test_parameters = {'SomeTestParameter': 42}
action = parameters.UpdateParametersAction(test_parameters,
key='test_key')
action.run(mock_ctx)
mock_env_updated = yaml.safe_dump({
'name': constants.DEFAULT_CONTAINER_NAME,
'temp_environment': 'temp_environment',
'test_key': {'SomeTestParameter': 42},
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}]
}, default_flow_style=False)
swift.put_object.assert_called_once_with(
constants.DEFAULT_CONTAINER_NAME,
constants.PLAN_ENVIRONMENT,
mock_env_updated
)
mock_cache.assert_called_once_with(
mock_ctx,
"overcloud",
"tripleo.parameters.get"
)
class UpdateRoleParametersActionTest(base.TestCase): class UpdateRoleParametersActionTest(base.TestCase):
@ -966,3 +1008,127 @@ class RotateFernetKeysActionTest(base.TestCase):
max_keys = 3 max_keys = 3
keys_map = action.purge_excess_keys(max_keys, keys_map) keys_map = action.purge_excess_keys(max_keys, keys_map)
self.assertEqual(2, len(keys_map)) self.assertEqual(2, len(keys_map))
class GetNetworkConfigActionTest(base.TestCase):
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_set')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_get')
@mock.patch('heatclient.common.template_utils.'
'process_multiple_environments_and_files')
@mock.patch('heatclient.common.template_utils.get_template_contents')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_workflow_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_run_valid_network_config(
self, mock_get_object_client, mock_get_workflow_client,
mock_get_orchestration_client, mock_get_template_contents,
mock_process_multiple_environments_and_files,
mock_cache_get,
mock_cache_set):
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
'temp_environment': 'temp_environment',
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}]
}, default_flow_style=False)
swift.get_object.side_effect = (
({}, mock_env),
swiftexceptions.ClientException('atest2'),
({}, mock_env)
)
mock_get_object_client.return_value = swift
mock_get_template_contents.return_value = ({}, {
'heat_template_version': '2016-04-30'
})
mock_process_multiple_environments_and_files.return_value = ({}, {})
mock_heat = mock.MagicMock()
mock_heat.stacks.preview.return_value = mock.Mock(resources=[{
"resource_identity": {"stack_name": "overcloud-Compute-0"},
"resource_name": "OsNetConfigImpl",
"properties": {"config": "echo \'{\"network_config\": {}}\'"}
}])
mock_get_orchestration_client.return_value = mock_heat
mock_cache_get.return_value = None
expected = {"network_config": {}}
# Test
action = parameters.GetNetworkConfigAction(container='overcloud',
role_name='Compute')
result = action.run(mock_ctx)
self.assertEqual(expected, result)
mock_heat.stacks.preview.assert_called_once_with(
environment={},
files={},
template={'heat_template_version': '2016-04-30'},
stack_name='overcloud',
)
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_set')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'cache_get')
@mock.patch('heatclient.common.template_utils.'
'process_multiple_environments_and_files')
@mock.patch('heatclient.common.template_utils.get_template_contents')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_workflow_client')
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_run_invalid_network_config(
self, mock_get_object_client,
mock_get_workflow_client, mock_get_orchestration_client,
mock_get_template_contents,
mock_process_multiple_environments_and_files,
mock_cache_get, mock_cache_set):
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
'temp_environment': 'temp_environment',
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}]
}, default_flow_style=False)
swift.get_object.side_effect = (
({}, mock_env),
swiftexceptions.ClientException('atest2'),
({}, mock_env)
)
mock_get_object_client.return_value = swift
mock_get_template_contents.return_value = ({}, {
'heat_template_version': '2016-04-30'
})
mock_process_multiple_environments_and_files.return_value = ({}, {})
mock_heat = mock.MagicMock()
mock_heat.stacks.preview.return_value = mock.Mock(resources=[{
"resource_identity": {"stack_name": "overcloud-Compute-0"},
"resource_name": "OsNetConfigImpl",
"properties": {"config": ""}
}])
mock_get_orchestration_client.return_value = mock_heat
mock_cache_get.return_value = None
# Test
action = parameters.GetNetworkConfigAction(container='overcloud',
role_name='Compute')
result = action.run(mock_ctx)
self.assertIsNone(result)
mock_heat.stacks.preview.assert_called_once_with(
environment={},
files={},
template={'heat_template_version': '2016-04-30'},
stack_name='overcloud',
)

View File

@ -47,13 +47,35 @@ workflows:
concurrency: 1 concurrency: 1
workflow: _derive_parameters_per_role workflow: _derive_parameters_per_role
input: input:
plan: <% $.plan %>
role_name: <% $.role_name %> role_name: <% $.role_name %>
environment_parameters: <% $.environment_parameters %> environment_parameters: <% $.environment_parameters %>
heat_resource_tree: <% $.heat_resource_tree %> heat_resource_tree: <% $.heat_resource_tree %>
user_inputs: <% $.user_inputs %> user_inputs: <% $.user_inputs %>
on-success: send_message publish:
# Gets all the roles derived parameters as dictionary
result: <% task().result.select($.get('derived_parameters', {})).sum() %>
on-success: reset_derive_parameters_in_plan
on-error: set_status_failed_for_each_role on-error: set_status_failed_for_each_role
reset_derive_parameters_in_plan:
action: tripleo.parameters.reset
input:
container: <% $.plan %>
key: 'derived_parameters'
on-success:
- update_derive_parameters_in_plan: <% $.result %>
on-error: set_status_failed_reset_derive_parameters_in_plan
update_derive_parameters_in_plan:
action: tripleo.parameters.update
input:
container: <% $.plan %>
key: 'derived_parameters'
parameters: <% $.get('result', {}) %>
on-success: send_message
on-error: set_status_failed_update_derive_parameters_in_plan
set_status_failed_get_flattened_parameters: set_status_failed_get_flattened_parameters:
on-success: send_message on-success: send_message
publish: publish:
@ -84,6 +106,18 @@ workflows:
status: FAILED status: FAILED
message: <% task(for_each_role).result.select(dict('role_name' => $.role_name, 'status' => $.get('status', 'SUCCESS'), 'message' => $.get('message', ''))) %> message: <% task(for_each_role).result.select(dict('role_name' => $.role_name, 'status' => $.get('status', 'SUCCESS'), 'message' => $.get('message', ''))) %>
set_status_failed_reset_derive_parameters_in_plan:
on-success: send_message
publish:
status: FAILED
message: <% task(reset_derive_parameters_in_plan).result %>
set_status_failed_update_derive_parameters_in_plan:
on-success: send_message
publish:
status: FAILED
message: <% task(update_derive_parameters_in_plan).result %>
send_message: send_message:
action: zaqar.queue_post action: zaqar.queue_post
retry: count=5 delay=1 retry: count=5 delay=1
@ -95,6 +129,7 @@ workflows:
payload: payload:
status: <% $.get('status', 'SUCCESS') %> status: <% $.get('status', 'SUCCESS') %>
message: <% $.get('message', '') %> message: <% $.get('message', '') %>
result: <% $.get('result', '') %>
execution: <% execution() %> execution: <% execution() %>
on-success: on-success:
- fail: <% $.get('status') = 'FAILED' %> - fail: <% $.get('status') = 'FAILED' %>
@ -105,11 +140,17 @@ workflows:
Workflow which runs per role to get the introspection data on the first matching node assigned to role. Workflow which runs per role to get the introspection data on the first matching node assigned to role.
Once introspection data is fetched, this worklow will trigger the actual derive parameters workflow Once introspection data is fetched, this worklow will trigger the actual derive parameters workflow
input: input:
- plan
- role_name - role_name
- environment_parameters - environment_parameters
- heat_resource_tree - heat_resource_tree
- user_inputs - user_inputs
output:
derived_parameters: <% $.get('derived_parameters', {}) %>
# Need role_name in output parameter to display the status for all roles in main workflow when any role fails here.
role_name: <% $.role_name %>
tasks: tasks:
get_role_info: get_role_info:
workflow: _get_role_info workflow: _get_role_info
@ -155,11 +196,37 @@ workflows:
on-error: set_status_failed_on_error_get_profile_node on-error: set_status_failed_on_error_get_profile_node
get_introspection_data: get_introspection_data:
on-error: set_status_failed_get_introspection_data
action: baremetal_introspection.get_data uuid=<% $.profile_node_uuid %> action: baremetal_introspection.get_data uuid=<% $.profile_node_uuid %>
publish: publish:
introspection_data: <% task().result %> hw_data: <% task().result %>
# TODO-Follow up patches workflows will actually be used here to derive parameters for each role on-success:
- get_dpdk_derive_params: <% $.role_features.contains("DPDK") %>
# TODO: Needs to include condition to call other service derive params if DPDK is not available.
on-error: set_status_failed_get_introspection_data
get_dpdk_derive_params:
workflow: tripleo.derive_params_formulas.v1.dpdk_derive_params
input:
plan: <% $.plan %>
role_name: <% $.role_name %>
hw_data: <% $.hw_data %>
user_inputs: <% $.user_inputs %>
publish:
derived_parameters: <% task().result.get('derived_parameters', {}) %>
on-success: get_host_derive_params
on-error: set_status_failed_get_dpdk_derive_params
get_host_derive_params:
workflow: tripleo.derive_params_formulas.v1.host_derive_params
input:
role_name: <% $.role_name %>
hw_data: <% $.hw_data %>
user_inputs: <% $.user_inputs %>
derived_parameters: <% $.derived_parameters %>
publish:
derived_parameters: <% task().result.get('derived_parameters', {}) %>
on-error: set_status_failed_get_host_derive_params
# Workflow ends here because there are no more algorithms.
set_status_failed_get_role_info: set_status_failed_get_role_info:
publish: publish:
@ -203,6 +270,21 @@ workflows:
message: <% task(get_introspection_data).result %> message: <% task(get_introspection_data).result %>
on-success: fail on-success: fail
set_status_failed_get_dpdk_derive_params:
publish:
role_name: <% $.role_name %>
status: FAILED
message: <% task(get_dpdk_derive_params).result %>
on-success: fail
set_status_failed_get_host_derive_params:
publish:
role_name: <% $.role_name %>
status: FAILED
message: <% task(get_host_derive_params).result %>
on-success: fail
_get_role_info: _get_role_info:
description: > description: >
Workflow that determines the list of derived parameter features (DPDK, Workflow that determines the list of derived parameter features (DPDK,
@ -244,8 +326,9 @@ workflows:
check_features: check_features:
on-success: build_feature_dict on-success: build_feature_dict
publish: publish:
# The role supports the DPDK feature if the NeutronDpdkCoreList parameter is present. # TODO: Need to update this logic for ODL integration.
dpdk: <% $.role_services.any($.get('parameters', []).contains('NeutronDpdkCoreList')) %> # The role supports the DPDK feature if the NeutronDatapathType parameter is present.
dpdk: <% $.role_services.any($.get('parameters', []).contains('NeutronDatapathType')) %>
# The role supports the HCI feature if it includes both NovaCompute and CephOSD services. # The role supports the HCI feature if it includes both NovaCompute and CephOSD services.
hci: <% $.role_services.any($.get('type', '').endsWith('::NovaCompute')) and $.role_services.any($.get('type', '').endsWith('::CephOSD')) %> hci: <% $.role_services.any($.get('type', '').endsWith('::NovaCompute')) and $.role_services.any($.get('type', '').endsWith('::CephOSD')) %>

View File

@ -0,0 +1,342 @@
---
version: '2.0'
name: tripleo.derive_params_formulas.v1
description: TripleO Workflows to derive deployment parameters from the introspected data
workflows:
dpdk_derive_params:
description: >
Workflow to derive parameters for DPDK service.
input:
- plan
- role_name
- hw_data # introspection data
- user_inputs
- derived_parameters: {}
output:
derived_parameters: <% $.derived_parameters.mergeWith($.get('dpdk_parameters', {})) %>
tasks:
get_network_config:
action: tripleo.parameters.get_network_config
input:
container: <% $.plan %>
role_name: <% $.role_name %>
publish:
network_configs: <% task().result.get('network_config', []) %>
on-success: get_dpdk_nics_numa_info
on-error: set_status_failed_get_network_config
get_dpdk_nics_numa_info:
action: tripleo.derive_params.get_dpdk_nics_numa_info
input:
network_configs: <% $.network_configs %>
inspect_data: <% $.hw_data %>
publish:
dpdk_nics_numa_info: <% task().result %>
on-success:
# TODO: Need to remove condtions here
# adding condition and trhow error in action for empty check
- get_dpdk_nics_numa_nodes: <% $.dpdk_nics_numa_info %>
- set_status_failed_get_dpdk_nics_numa_info: <% not $.dpdk_nics_numa_info %>
on-error: set_status_failed_on_error_get_dpdk_nics_numa_info
get_dpdk_nics_numa_nodes:
publish:
dpdk_nics_numa_nodes: <% $.dpdk_nics_numa_info.groupBy($.numa_node).select($[0]).orderBy($) %>
on-success:
- get_numa_nodes: <% $.dpdk_nics_numa_nodes %>
- set_status_failed_get_dpdk_nics_numa_nodes: <% not $.dpdk_nics_numa_nodes %>
get_numa_nodes:
publish:
numa_nodes: <% $.hw_data.numa_topology.ram.select($.numa_node).orderBy($) %>
on-success:
- get_num_cores_per_numa_nodes: <% $.numa_nodes %>
- set_status_failed_get_numa_nodes: <% not $.numa_nodes %>
# For NUMA node with DPDK nic, number of cores should be used from user input (defaults to 2)
# For NUMA node without DPDK nic, number of cores should be 1
get_num_cores_per_numa_nodes:
publish:
num_cores_per_numa_nodes: <% let(dpdk_nics_nodes => $.dpdk_nics_numa_nodes, cores => $.user_inputs.get('num_phy_cores_per_numa_node_for_pmd', 2)) -> $.numa_nodes.select(switch($ in $dpdk_nics_nodes => $cores, not $ in $dpdk_nics_nodes => 1)) %>
on-success: get_pmd_cpus
get_pmd_cpus:
action: tripleo.derive_params.get_dpdk_core_list
input:
inspect_data: <% $.hw_data %>
numa_nodes_cores_count: <% $.num_cores_per_numa_nodes %>
publish:
pmd_cpus: <% task().result %>
on-success:
- get_host_cpus: <% $.pmd_cpus %>
- set_status_failed_get_pmd_cpus: <% not $.pmd_cpus %>
on-error: set_status_failed_on_error_get_pmd_cpus
get_host_cpus:
action: tripleo.derive_params.get_host_cpus_list inspect_data=<% $.hw_data %>
publish:
host_cpus: <% task().result %>
on-success:
- get_sock_mem: <% $.host_cpus %>
- set_status_failed_get_host_cpus: <% not $.host_cpus %>
on-error: set_status_failed_on_error_get_host_cpus
get_sock_mem:
action: tripleo.derive_params.get_dpdk_socket_memory
input:
dpdk_nics_numa_info: <% $.dpdk_nics_numa_info %>
numa_nodes: <% $.numa_nodes %>
overhead: <% $.user_inputs.get('overhead', 800) %>
packet_size_in_buffer: <% 4096*64 %>
publish:
sock_mem: <% task().result %>
on-success:
- get_memory_slot_info: <% $.sock_mem %>
- set_status_failed_get_sock_mem: <% not $.sock_mem %>
on-error: set_status_failed_on_error_get_sock_mem
get_memory_slot_info:
publish:
memory_slot_info: <% $.hw_data.extra.memory.values().select($.get("slot")).where($) %>
on-success:
- remove_slots_prefix_string: <% $.memory_slot_info %>
- set_status_failed_get_memory_slot_info: <% not $.memory_slot_info %>
# Memory channels are identified by the number of memory slots to the NUMA node
# Memory slots will be off different formats in the introspection data, like P1-DIMMA1, DIMM_A1, etc
# This task removes the prefix string format like 'P1-DIMM' or 'DIMM_' and provides the list with only slot names like 'A1'
remove_slots_prefix_string:
publish:
updated_mem_slot_info: <% $.memory_slot_info.select($.replaceBy(regex("[A-Z0-9]*[-_]*DIMM{1}[-_]*"), '')) %>
on-success: remove_slots_suffix_number
# In the above list of slot names, this task removes the suffix number like '1' in 'A1' and provide the list with slot name as 'A'
remove_slots_suffix_number:
publish:
updated_mem_slot_info: <% $.updated_mem_slot_info.select($.replaceBy(regex("[0-9]"), '')) %>
on-success: get_memory_channels_per_node
# The total memory slot names list will have both NUMA nodes slot.
# Average out the length of slot name list with number of NUMA nodes to get the final value
get_memory_channels_per_node:
publish:
mem_channel: <% $.updated_mem_slot_info.distinct().len() / $.numa_nodes.len() %>
on-success:
- get_dpdk_parameters: <% $.mem_channel %>
- set_status_failed_get_memory_channels_per_node: <% not $.mem_channel %>
get_dpdk_parameters:
publish:
dpdk_parameters: <% dict(concat($.role_name, 'Parameters') => dict('OvsPmdCoreList' => $.get('pmd_cpus', ''), 'OvsDpdkCoreList' => $.get('host_cpus', ''), 'OvsDpdkSocketMemory' => $.get('sock_mem', ''), 'OvsDpdkMemoryChannels' => $.get('mem_channel', ''))) %>
set_status_failed_get_network_config:
publish:
status: FAILED
message: <% task(get_network_config).result %>
on-success: fail
set_status_failed_get_dpdk_nics_numa_info:
publish:
status: FAILED
message: "Unable to determine DPDK NIC's NUMA information"
on-success: fail
set_status_failed_on_error_get_dpdk_nics_numa_info:
publish:
status: FAILED
message: <% task(get_dpdk_nics_numa_info).result %>
on-success: fail
set_status_failed_get_dpdk_nics_numa_nodes:
publish:
status: FAILED
message: "Unable to determine DPDK NIC's numa nodes"
on-success: fail
set_status_failed_get_numa_nodes:
publish:
status: FAILED
message: 'Unable to determine available NUMA nodes'
on-success: fail
set_status_failed_get_pmd_cpus:
publish:
status: FAILED
message: 'Unable to determine OvsPmdCoreList parameter'
on-success: fail
set_status_failed_on_error_get_pmd_cpus:
publish:
status: FAILED
message: <% task(get_pmd_cpus).result %>
on-success: fail
set_status_failed_get_host_cpus:
publish:
status: FAILED
message: 'Unable to determine OvsDpdkCoreList parameter'
on-success: fail
set_status_failed_on_error_get_host_cpus:
publish:
status: FAILED
message: <% task(get_host_cpus).result %>
on-success: fail
set_status_failed_get_sock_mem:
publish:
status: FAILED
message: 'Unable to determine OvsDpdkSocketMemory parameter'
on-success: fail
set_status_failed_on_error_get_sock_mem:
publish:
status: FAILED
message: <% task(get_sock_mem).result %>
on-success: fail
set_status_failed_get_memory_slot_info:
publish:
status: FAILED
message: 'Unable to determine memory slot name on NUMA nodes'
on-success: fail
set_status_failed_get_memory_channels_per_node:
publish:
status: FAILED
message: 'Unable to determine OvsDpdkMemoryChannels parameter'
on-success: fail
host_derive_params:
description: >
This workflow derives parameters for the Host process, and is mainly associated with CPU pinning and huge memory pages.
This workflow can be dependent on any feature or also can be invoked individually as well.
input:
- role_name
- hw_data # introspection data
- user_inputs
- derived_parameters: {}
output:
derived_parameters: <% $.derived_parameters.mergeWith($.get('host_parameters', {})) %>
tasks:
get_cpus:
publish:
cpus: <% $.hw_data.numa_topology.cpus %>
on-success:
- get_role_derive_params: <% $.cpus %>
- set_status_failed_get_cpus: <% not $.cpus %>
get_role_derive_params:
publish:
role_derive_params: <% $.derived_parameters.get(concat($.role_name, 'Parameters'), {}) %>
on-success: get_host_dpdk_combined_cpus
get_host_dpdk_combined_cpus:
publish:
host_dpdk_combined_cpus: <% let(params => $.role_derive_params) -> concat($params.get('OvsPmdCoreList', ''), ',', $params.get('OvsDpdkCoreList', '')).split(",").select(int($)) %>
on-success:
- get_nova_cpus: <% $.host_dpdk_combined_cpus %>
- set_status_failed_get_host_dpdk_combined_cpus: <% not $.host_dpdk_combined_cpus %>
get_nova_cpus:
publish:
nova_cpus: <% let(invalid_threads => $.host_dpdk_combined_cpus) -> $.cpus.select($.thread_siblings).flatten().where(not $ in $invalid_threads).join(',') %>
on-success:
- get_isol_cpus: <% $.nova_cpus %>
- set_status_failed_get_nova_cpus: <% not $.nova_cpus %>
get_isol_cpus:
publish:
isol_cpus: <% let(params => $.role_derive_params) -> concat($params.get('OvsPmdCoreList',''), ',', $.nova_cpus) %>
on-success: get_host_mem
get_host_mem:
publish:
host_mem: <% $.user_inputs.get('host_mem_default', 4096) %>
on-success: check_default_hugepage_supported
check_default_hugepage_supported:
publish:
default_hugepage_supported: <% $.hw_data.get('inventory', {}).get('cpu', {}).get('flags', []).contains('cpu_hugepages_1g') %>
on-success:
- get_total_memory: <% $.default_hugepage_supported %>
- set_status_failed_check_default_hugepage_supported: <% not $.default_hugepage_supported %>
get_total_memory:
publish:
total_memory: <% $.hw_data.get('inventory', {}).get('memory', {}).get('physical_mb', 0) %>
on-success:
- get_hugepages: <% $.total_memory %>
- set_status_failed_get_total_memory: <% not $.total_memory %>
get_hugepages:
publish:
hugepages: <% let(huge_page_perc => float($.user_inputs.get('huge_page_allocation_percentage', 90))/100)-> int((($.total_memory/1024)-4) * $huge_page_perc) %>
on-success:
- get_cpu_model: <% $.hugepages %>
- set_status_failed_get_hugepages: <% not $.hugepages %>
get_cpu_model:
publish:
intel_cpu_model: <% $.hw_data.get('inventory', {}).get('cpu', {}).get('model_name', '').startsWith('Intel') %>
on-success: get_iommu_info
get_iommu_info:
publish:
iommu_info: <% switch($.intel_cpu_model => 'intel_iommu=on iommu=pt', not $.intel_cpu_model => '') %>
on-success: get_kernel_args
get_kernel_args:
publish:
kernel_args: <% concat('default_hugepagesz=1GB hugepagesz=1G ', 'hugepages=', str($.hugepages), ' ', $.iommu_info, ' isolcpus=', $.isol_cpus) %>
on-success: get_host_parameters
get_host_parameters:
publish:
host_parameters: <% dict(concat($.role_name, 'Parameters') => dict('NovaVcpuPinSet' => $.get('nova_cpus', ''), 'NovaReservedHostMemory' => $.get('host_mem', ''), 'KernelArgs' => $.get('kernel_args', ''), 'IsolCpusList' => $.get('isol_cpus', ''))) %>
set_status_failed_get_cpus:
publish:
status: FAILED
message: "Unable to determine CPU's on NUMA nodes"
on-success: fail
set_status_failed_get_host_dpdk_combined_cpus:
publish:
status: FAILED
message: 'Unable to combine host and dpdk cpus list'
on-success: fail
set_status_failed_get_nova_cpus:
publish:
status: FAILED
message: 'Unable to determine nova vcpu pin set'
on-success: fail
set_status_failed_check_default_hugepage_supported:
publish:
status: FAILED
message: 'default huge page size 1GB is not supported'
on-success: fail
set_status_failed_get_total_memory:
publish:
status: FAILED
message: 'Unable to determine total memory'
on-success: fail
set_status_failed_get_hugepages:
publish:
status: FAILED
message: 'Unable to determine huge pages'
on-success: fail