Cleanup dead mistral actions and workbooks

- Removes unused mistral actions
- All mistral workbooks
- Custom filter for undercloud nova
- mistral entry points as we don't have mistral

Depends-On: https://review.opendev.org/c/openstack/python-tripleoclient/+/775749
Change-Id: I7f826da829bbc7d7a4fcde8afcea3f61742a316a
This commit is contained in:
ramishra 2021-02-17 11:01:50 +05:30
parent 4a395662f5
commit 0b8717e0c8
27 changed files with 0 additions and 6048 deletions

View File

@ -18,7 +18,6 @@ Contents:
tripleo-mistral
contributing
uploads
reference/index
Indices and tables
==================

View File

@ -1,14 +0,0 @@
===========================
API Reference Documentation
===========================
Workbooks
=========
.. toctree::
:maxdepth: 2
:glob:
:titlesonly:
workbooks/*

View File

@ -1 +0,0 @@
*.rst

View File

@ -40,46 +40,7 @@ data_files =
share/tripleo-common = sudoers
share/tripleo-common/container-images = container-images/*
share/tripleo-common/image-yaml = image-yaml/*
share/tripleo-common/workbooks = workbooks/*
share/tripleo-common/healthcheck = healthcheck/*
share/ansible/roles/ = roles/*
share/ansible/plugins/ = ansible_plugins/*
[entry_points]
mistral.actions =
tripleo.baremetal.configure_boot = tripleo_common.actions.baremetal:ConfigureBootAction
tripleo.baremetal.configure_root_device = tripleo_common.actions.baremetal:ConfigureRootDeviceAction
tripleo.baremetal.get_node_hint = tripleo_common.actions.baremetal:GetNodeHintAction
tripleo.baremetal.get_profile = tripleo_common.actions.baremetal:GetProfileAction
tripleo.baremetal.update_node_capability = tripleo_common.actions.baremetal:UpdateNodeCapability
tripleo.baremetal.cell_v2_discover_hosts = tripleo_common.actions.baremetal:CellV2DiscoverHostsAction
tripleo.baremetal.validate_nodes = tripleo_common.actions.baremetal:ValidateNodes
tripleo.deployment.get_deployment_failures = tripleo_common.actions.deployment:DeploymentFailuresAction
tripleo.derive_params.convert_number_to_range_list = tripleo_common.actions.derive_params:ConvertNumberToRangeListAction
tripleo.derive_params.convert_range_to_number_list = tripleo_common.actions.derive_params:ConvertRangeToNumberListAction
tripleo.derive_params.get_dpdk_nics_numa_info = tripleo_common.actions.derive_params:GetDpdkNicsNumaInfoAction
tripleo.derive_params.get_dpdk_core_list = tripleo_common.actions.derive_params:GetDpdkCoreListAction
tripleo.derive_params.get_dpdk_socket_memory = tripleo_common.actions.derive_params:GetDpdkSocketMemoryAction
tripleo.derive_params.get_host_cpus_list = tripleo_common.actions.derive_params:GetHostCpusListAction
tripleo.parameters.get_flatten = tripleo_common.actions.parameters:GetFlattenedParametersAction
tripleo.parameters.get_network_config = tripleo_common.actions.parameters:GetNetworkConfigAction
tripleo.parameters.reset = tripleo_common.actions.parameters:ResetParametersAction
tripleo.parameters.update = tripleo_common.actions.parameters:UpdateParametersAction
tripleo.parameters.update_role = tripleo_common.actions.parameters:UpdateRoleParametersAction
tripleo.parameters.get_profile_of_flavor = tripleo_common.actions.parameters:GetProfileOfFlavorAction
tripleo.plan.delete = tripleo_common.actions.plan:DeletePlanAction
tripleo.plan.list = tripleo_common.actions.plan:ListPlansAction
tripleo.plan.export = tripleo_common.actions.plan:ExportPlanAction
tripleo.plan.update_roles = tripleo_common.actions.plan:UpdateRolesAction
tripleo.plan.validate_roles = tripleo_common.actions.plan:ValidateRolesDataAction
tripleo.plan.remove_noop_deploystep = tripleo_common.actions.plan:RemoveNoopDeployStepAction
tripleo.validations.get_pubkey = tripleo_common.actions.validations:GetPubkeyAction
tripleo.validations.get_privkey = tripleo_common.actions.validations:GetPrivkeyAction
tripleo.validations.enabled = tripleo_common.actions.validations:Enabled
# deprecated for pike release, will be removed in queens
tripleo.ansible-playbook = tripleo_common.actions.ansible:AnsiblePlaybookAction
# deprecated for rocky release, will be removed in the "S" cycle
tripleo.role.list = tripleo_common.actions.plan:ListRolesAction
[tool:pytest]
norecursedirs = .eggs .git .tox dist

View File

@ -299,90 +299,6 @@ class ConfigureRootDeviceAction(base.TripleOAction):
{'node': node.uuid, 'dev': root_device, 'local_gb': new_size})
class UpdateNodeCapability(base.TripleOAction):
"""Update a node's capability
Set the node's capability to the specified value.
:param node_uuid: The UUID of the node
:param capability: The name of the capability to update
:param value: The value to update token
:return: Result of updating the node
"""
def __init__(self, node_uuid, capability, value):
super(UpdateNodeCapability, self).__init__()
self.node_uuid = node_uuid
self.capability = capability
self.value = value
def run(self, context):
baremetal_client = self.get_baremetal_client(context)
try:
return nodes.update_node_capability(
self.node_uuid,
self.capability,
self.value,
baremetal_client
)
except Exception as err:
LOG.exception("Error updating node capability in ironic.")
return actions.Result(
error="%s: %s" % (type(err).__name__, str(err))
)
class CellV2DiscoverHostsAction(base.TripleOAction):
"""Run cell_v2 host discovery
Runs cell_v2 host discovery to map any newly available ironic nodes.
"""
def run(self, context):
try:
result = nodes.run_nova_cell_v2_discovery()
LOG.info(
'Successfully ran cell_v2 discover_hosts\n'
'stdout: %(stdout)r\n',
{"stdout": result[0]}
)
except Exception as err:
LOG.exception("Error running cell_v2 discover_hosts")
return actions.Result(
error="%s: %s" % (type(err).__name__, str(err))
)
class GetProfileAction(base.TripleOAction):
"""Return the profile associated with the given node """
def __init__(self, node):
super(GetProfileAction, self).__init__()
self.node = node
def run(self, context):
result = {}
result['profile'] = nodes.get_node_profile(self.node)
result['uuid'] = self.node.get('uuid')
return result
class GetNodeHintAction(base.TripleOAction):
"""Return the scheduler hint associated with the given node """
def __init__(self, node):
super(GetNodeHintAction, self).__init__()
self.node = node
def run(self, context):
result = {}
result['hint'] = nodes.get_node_hint(self.node)
result['uuid'] = self.node.get('uuid')
return result
class GetCandidateNodes(base.TripleOAction):
"""Given IPs, ports and credentials, return potential new nodes."""

View File

@ -1,465 +0,0 @@
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import math
import re
from mistral_lib import actions
from tripleo_common.actions import base
from tripleo_common import exception
LOG = logging.getLogger(__name__)
class GetDpdkNicsNumaInfoAction(base.TripleOAction):
"""Gets the DPDK NICs with MTU for NUMA nodes.
Find the DPDK interface names from the network config and
translate it to phsical interface names using the introspection
data. And then find the NUMA node associated with the DPDK
interface and the MTU value.
:param network_configs: network config list
:param inspect_data: introspection data
:param mtu_default: mtu default value for NICs
:return: DPDK NICs NUMA nodes info
"""
def __init__(self, network_configs, inspect_data, mtu_default=1500):
super(GetDpdkNicsNumaInfoAction, self).__init__()
self.network_configs = network_configs
self.inspect_data = inspect_data
self.mtu_default = mtu_default
# TODO(jpalanis): Expose this utility from os-net-config to sort
# active nics
def _natural_sort_key(self, s):
nsre = re.compile('([0-9]+)')
return [int(text) if text.isdigit() else text
for text in re.split(nsre, s)]
# TODO(jpalanis): Expose this utility from os-net-config to sort
# active nics
def _is_embedded_nic(self, nic):
if (nic.startswith('em') or nic.startswith('eth') or
nic.startswith('eno')):
return True
return False
# TODO(jpalanis): Expose this utility from os-net-config to sort
# active nics
def _ordered_nics(self, interfaces):
embedded_nics = []
nics = []
for iface in interfaces:
nic = iface.get('name', '')
if self._is_embedded_nic(nic):
embedded_nics.append(nic)
else:
nics.append(nic)
active_nics = (sorted(
embedded_nics, key=self._natural_sort_key) +
sorted(nics, key=self._natural_sort_key))
return active_nics
# Gets numa node id for physical NIC name
def find_numa_node_id(self, numa_nics, nic_name):
for nic_info in numa_nics:
if nic_info.get('name', '') == nic_name:
return nic_info.get('numa_node', None)
return None
# Get physical interface name for NIC name
def get_physical_iface_name(self, ordered_nics, nic_name):
if nic_name.startswith('nic'):
# Nic numbering, find the actual interface name
nic_number = int(nic_name.replace('nic', ''))
if nic_number > 0:
iface_name = ordered_nics[nic_number - 1]
return iface_name
return nic_name
# Gets dpdk interfaces and mtu info for dpdk config
# Default mtu(recommended 1500) is used if no MTU is set for DPDK NIC
def get_dpdk_interfaces(self, dpdk_objs):
mtu = self.mtu_default
dpdk_ifaces = []
for dpdk_obj in dpdk_objs:
obj_type = dpdk_obj.get('type')
mtu = dpdk_obj.get('mtu', self.mtu_default)
if obj_type == 'ovs_dpdk_port':
# Member interfaces of ovs_dpdk_port
dpdk_ifaces.extend(dpdk_obj.get('members', []))
elif obj_type == 'ovs_dpdk_bond':
# ovs_dpdk_bond will have multiple ovs_dpdk_ports
for bond_member in dpdk_obj.get('members', []):
if bond_member.get('type') == 'ovs_dpdk_port':
dpdk_ifaces.extend(bond_member.get('members', []))
return (dpdk_ifaces, mtu)
def run(self, context):
interfaces = self.inspect_data.get('inventory',
{}).get('interfaces', [])
# Checks whether inventory interfaces information is not available
# in introspection data.
if not interfaces:
msg = 'Introspection data does not have inventory.interfaces'
return actions.Result(error=msg)
numa_nics = self.inspect_data.get('numa_topology',
{}).get('nics', [])
# Checks whether numa topology nics information is not available
# in introspection data.
if not numa_nics:
msg = 'Introspection data does not have numa_topology.nics'
return actions.Result(error=msg)
active_interfaces = [iface for iface in interfaces
if iface.get('has_carrier', False)]
# Checks whether active interfaces are not available
if not active_interfaces:
msg = 'Unable to determine active interfaces (has_carrier)'
return actions.Result(error=msg)
dpdk_nics_numa_info = []
ordered_nics = self._ordered_nics(active_interfaces)
# Gets DPDK network config and parses to get DPDK NICs
# with mtu and numa node id
for config in self.network_configs:
if config.get('type', '') == 'ovs_user_bridge':
bridge_name = config.get('name', '')
addresses = config.get('addresses', [])
members = config.get('members', [])
dpdk_ifaces, mtu = self.get_dpdk_interfaces(members)
for dpdk_iface in dpdk_ifaces:
type = dpdk_iface.get('type', '')
if type == 'sriov_vf':
name = dpdk_iface.get('device', '')
else:
name = dpdk_iface.get('name', '')
phy_name = self.get_physical_iface_name(
ordered_nics, name)
node = self.find_numa_node_id(numa_nics, phy_name)
if node is None:
msg = ('Unable to determine NUMA node for '
'DPDK NIC: %s' % phy_name)
return actions.Result(error=msg)
dpdk_nic_info = {'name': phy_name,
'numa_node': node,
'mtu': mtu,
'bridge_name': bridge_name,
'addresses': addresses}
dpdk_nics_numa_info.append(dpdk_nic_info)
return dpdk_nics_numa_info
class GetDpdkCoreListAction(base.TripleOAction):
"""Gets the DPDK PMD Core List.
With input as the number of physical cores for each NUMA node,
find the right logical CPUs to be allocated along with its
siblings for the PMD core list.
:param inspect_data: introspection data
:param numa_nodes_cores_count: physical cores count for each NUMA
:return: DPDK Core List
"""
def __init__(self, inspect_data, numa_nodes_cores_count):
super(GetDpdkCoreListAction, self).__init__()
self.inspect_data = inspect_data
self.numa_nodes_cores_count = numa_nodes_cores_count
def run(self, context):
dpdk_core_list = []
numa_cpus_info = self.inspect_data.get('numa_topology',
{}).get('cpus', [])
# Checks whether numa topology cpus information is not available
# in introspection data.
if not numa_cpus_info:
msg = 'Introspection data does not have numa_topology.cpus'
return actions.Result(error=msg)
# Checks whether CPU physical cores count for each NUMA nodes is
# not available
if not self.numa_nodes_cores_count:
msg = ('CPU physical cores count for each NUMA nodes '
'is not available')
return actions.Result(error=msg)
numa_nodes_threads = {}
# Creates list for all available threads in each NUMA node
for cpu in numa_cpus_info:
if not cpu['numa_node'] in numa_nodes_threads:
numa_nodes_threads[cpu['numa_node']] = []
numa_nodes_threads[cpu['numa_node']].extend(cpu['thread_siblings'])
for node, node_cores_count in enumerate(self.numa_nodes_cores_count):
# Gets least thread in NUMA node
numa_node_min = min(numa_nodes_threads[node])
cores_count = node_cores_count
for cpu in numa_cpus_info:
if cpu['numa_node'] == node:
# Adds threads from core which is not having least thread
if numa_node_min not in cpu['thread_siblings']:
dpdk_core_list.extend(cpu['thread_siblings'])
cores_count -= 1
if cores_count == 0:
break
return ','.join([str(thread) for thread in dpdk_core_list])
class GetHostCpusListAction(base.TripleOAction):
"""Gets the Host CPUs List.
CPU threads from first physical core is allocated for host processes
on each NUMA nodes.
:param inspect_data: introspection data
:return: Host CPUs List
"""
def __init__(self, inspect_data):
super(GetHostCpusListAction, self).__init__()
self.inspect_data = inspect_data
def run(self, context):
host_cpus_list = []
numa_cpus_info = self.inspect_data.get('numa_topology',
{}).get('cpus', [])
# Checks whether numa topology cpus information is not available
# in introspection data.
if not numa_cpus_info:
msg = 'Introspection data does not have numa_topology.cpus'
return actions.Result(error=msg)
numa_nodes_threads = {}
# Creates a list for all available threads in each NUMA nodes
for cpu in numa_cpus_info:
if not cpu['numa_node'] in numa_nodes_threads:
numa_nodes_threads[cpu['numa_node']] = []
numa_nodes_threads[cpu['numa_node']].extend(
cpu['thread_siblings'])
for numa_node in sorted(numa_nodes_threads.keys()):
node = int(numa_node)
# Gets least thread in NUMA node
numa_node_min = min(numa_nodes_threads[numa_node])
for cpu in numa_cpus_info:
if cpu['numa_node'] == node:
# Adds threads from core which is having least thread
if numa_node_min in cpu['thread_siblings']:
host_cpus_list.extend(cpu['thread_siblings'])
break
return ','.join([str(thread) for thread in host_cpus_list])
class GetDpdkSocketMemoryAction(base.TripleOAction):
"""Gets the DPDK Socket Memory List.
For NUMA node with DPDK nic, socket memory is calculated
based on MTU, Overhead and Packet size in buffer.
For NUMA node without DPDK nic, minimum socket memory is
assigned (recommended 1GB)
:param dpdk_nics_numa_info: DPDK nics numa info
:param numa_nodes: list of numa nodes
:param overhead: overhead value
:param packet_size_in_buffer: packet size in buffer
:param minimum_socket_memory: minimum socket memory
:return: DPDK Socket Memory List
"""
def __init__(self, dpdk_nics_numa_info, numa_nodes,
overhead, packet_size_in_buffer,
minimum_socket_memory=1024):
super(GetDpdkSocketMemoryAction, self).__init__()
self.dpdk_nics_numa_info = dpdk_nics_numa_info
self.numa_nodes = numa_nodes
self.overhead = overhead
self.packet_size_in_buffer = packet_size_in_buffer
self.minimum_socket_memory = minimum_socket_memory
# Computes round off MTU value in bytes
# example: MTU value 9000 into 9216 bytes
def roundup_mtu_bytes(self, mtu):
max_div_val = int(math.ceil(float(mtu) / float(1024)))
return (max_div_val * 1024)
# Calculates socket memory for a NUMA node
def calculate_node_socket_memory(
self, numa_node, dpdk_nics_numa_info, overhead,
packet_size_in_buffer, minimum_socket_memory):
distinct_mtu_per_node = []
socket_memory = 0
# For DPDK numa node
for nics_info in dpdk_nics_numa_info:
if (numa_node == nics_info['numa_node'] and
not nics_info['mtu'] in distinct_mtu_per_node):
distinct_mtu_per_node.append(nics_info['mtu'])
roundup_mtu = self.roundup_mtu_bytes(nics_info['mtu'])
socket_memory += (((roundup_mtu + overhead) *
packet_size_in_buffer) /
(1024 * 1024))
# For Non DPDK numa node
if socket_memory == 0:
socket_memory = minimum_socket_memory
# For DPDK numa node
else:
socket_memory += 512
socket_memory_in_gb = int(socket_memory / 1024)
if socket_memory % 1024 > 0:
socket_memory_in_gb += 1
return (socket_memory_in_gb * 1024)
def run(self, context):
dpdk_socket_memory_list = []
for node in self.numa_nodes:
socket_mem = self.calculate_node_socket_memory(
node, self.dpdk_nics_numa_info, self.overhead,
self.packet_size_in_buffer,
self.minimum_socket_memory)
dpdk_socket_memory_list.append(socket_mem)
return ','.join([str(sm) for sm in dpdk_socket_memory_list])
class ConvertNumberToRangeListAction(base.TripleOAction):
"""Converts number list into range list
:param num_list: comma delimited number list as string
:return: comma delimited range list as string
"""
def __init__(self, num_list):
super(ConvertNumberToRangeListAction, self).__init__()
self.num_list = num_list
# converts number list into range list.
# here input parameter and return value as list
# example: [12, 13, 14, 17] into ["12-14", "17"]
def convert_number_to_range_list(self, num_list):
num_list.sort()
range_list = []
range_min = num_list[0]
for num in num_list:
next_val = num + 1
if next_val not in num_list:
if range_min != num:
range_list.append(str(range_min) + '-' + str(num))
else:
range_list.append(str(range_min))
next_index = num_list.index(num) + 1
if next_index < len(num_list):
range_min = num_list[next_index]
# here, range_list is a list of strings
return range_list
def run(self, context):
try:
if not self.num_list:
err_msg = ("Input param 'num_list' is blank.")
raise exception.DeriveParamsError(err_msg)
try:
# splitting a string (comma delimited list) into
# list of numbers
# example: "12,13,14,17" string into [12,13,14,17]
num_list = [int(num.strip(' '))
for num in self.num_list.split(",")]
except ValueError as exc:
err_msg = ("Invalid number in input param "
"'num_list': %s" % exc)
raise exception.DeriveParamsError(err_msg)
range_list = self.convert_number_to_range_list(num_list)
except exception.DeriveParamsError as err:
LOG.error('Derive Params Error: %s', err)
return actions.Result(error=str(err))
# converts into comma delimited range list as string
return ','.join(range_list)
class ConvertRangeToNumberListAction(base.TripleOAction):
"""Converts range list to integer list
:param range_list: comma delimited range list as string / list
:return: comma delimited number list as string
"""
def __init__(self, range_list):
super(ConvertRangeToNumberListAction, self).__init__()
self.range_list = range_list
# converts range list into number list
# here input parameter and return value as list
# example: ["12-14", "^13", "17"] into [12, 14, 17]
def convert_range_to_number_list(self, range_list):
num_list = []
exclude_num_list = []
try:
for val in range_list:
val = val.strip(' ')
if '^' in val:
exclude_num_list.append(int(val[1:]))
elif '-' in val:
split_list = val.split("-")
range_min = int(split_list[0])
range_max = int(split_list[1])
num_list.extend(range(range_min, (range_max + 1)))
else:
num_list.append(int(val))
except ValueError as exc:
err_msg = ("Invalid number in input param "
"'range_list': %s" % exc)
raise exception.DeriveParamsError(err_msg)
# here, num_list is a list of integers
return [num for num in num_list if num not in exclude_num_list]
def run(self, context):
try:
if not self.range_list:
err_msg = ("Input param 'range_list' is blank.")
raise exception.DeriveParamsError(err_msg)
range_list = self.range_list
# converts into python list if range_list is not list type
if not isinstance(range_list, list):
range_list = self.range_list.split(",")
num_list = self.convert_range_to_number_list(range_list)
except exception.DeriveParamsError as err:
LOG.error('Derive Params Error: %s', err)
return actions.Result(error=str(err))
# converts into comma delimited number list as string
return ','.join([str(num) for num in num_list])

View File

@ -1,163 +0,0 @@
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from mistral_lib import actions
import six
from tripleo_common.actions import base
from tripleo_common import constants
from tripleo_common import exception
from tripleo_common.utils import parameters as parameter_utils
from tripleo_common.utils import stack_parameters as stack_param_utils
LOG = logging.getLogger(__name__)
class ResetParametersAction(base.TripleOAction):
"""Provides method to delete user set parameters."""
def __init__(self, container=constants.DEFAULT_CONTAINER_NAME,
key=constants.DEFAULT_PLAN_ENV_KEY):
super(ResetParametersAction, self).__init__()
self.container = container
self.key = key
def run(self, context):
swift = self.get_object_client(context)
try:
return stack_param_utils.reset_parameters(
swift, self.container, self.key)
except Exception as err:
LOG.exception(six.text_type(err))
return actions.Result(six.text_type(err))
class UpdateParametersAction(base.TripleOAction):
"""Updates plan environment with parameters."""
def __init__(self, parameters,
container=constants.DEFAULT_CONTAINER_NAME,
key=constants.DEFAULT_PLAN_ENV_KEY,
validate=True):
super(UpdateParametersAction, self).__init__()
self.container = container
self.parameters = parameters
self.key = key
self.validate = validate
def run(self, context):
swift = self.get_object_client(context)
heat = self.get_orchestration_client(context)
try:
return stack_param_utils.update_parameters(
swift, heat, self.parameters,
self.container, self.key,
self.validate)
except Exception as err:
LOG.exception(six.text_type(err))
return actions.Result(six.text_type(err))
class UpdateRoleParametersAction(base.TripleOAction):
"""Updates role related parameters in plan environment ."""
def __init__(self, role, container=constants.DEFAULT_CONTAINER_NAME):
super(UpdateRoleParametersAction, self).__init__()
self.role = role
self.container = container
def run(self, context):
swift = self.get_object_client(context)
heat = self.get_orchestration_client(context)
ironic = self.get_baremetal_client(context)
nova = self.get_compute_client(context)
try:
return stack_param_utils.update_role_parameters(
swift, heat, ironic, nova, self.role, self.container)
except Exception as err:
LOG.exception(six.text_type(err))
return actions.Result(six.text_type(err))
class GetFlattenedParametersAction(base.TripleOAction):
"""Get the heat stack tree and parameters in flattened structure.
This method validates the stack of the container and returns the
parameters and the heat stack tree. The heat stack tree is flattened
for easy consumption.
"""
def __init__(self, container=constants.DEFAULT_CONTAINER_NAME):
super(GetFlattenedParametersAction, self).__init__()
self.container = container
def run(self, context):
heat = self.get_orchestration_client(context)
swift = self.get_object_client(context)
try:
return stack_param_utils.get_flattened_parameters(
swift, heat, self.container)
except Exception as err:
LOG.exception(six.text_type(err))
return actions.Result(six.text_type(err))
class GetProfileOfFlavorAction(base.TripleOAction):
"""Gets the profile name for a given flavor name.
Need flavor object to get profile name since get_keys method is
not available for external access. so we have created an action
to get profile name from flavor name.
:param flavor_name: Flavor name
:return: profile name
"""
def __init__(self, flavor_name):
super(GetProfileOfFlavorAction, self).__init__()
self.flavor_name = flavor_name
def run(self, context):
compute_client = self.get_compute_client(context)
try:
return parameter_utils.get_profile_of_flavor(self.flavor_name,
compute_client)
except exception.DeriveParamsError as err:
LOG.error('Derive Params Error: %s', err)
return actions.Result(error=str(err))
class GetNetworkConfigAction(base.TripleOAction):
"""Gets network configuration details from available heat parameters."""
def __init__(self, role_name, container=constants.DEFAULT_CONTAINER_NAME):
super(GetNetworkConfigAction, self).__init__()
self.container = container
self.role_name = role_name
def run(self, context):
try:
return stack_param_utils.get_network_configs(
self.get_baremetal_client(context),
self.get_compute_client(context),
self.container, self.role_name)
except Exception as err:
LOG.exception(six.text_type(err))
return actions.Result(six.text_type(err))

View File

@ -1,304 +0,0 @@
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from operator import itemgetter
import shutil
import tempfile
import yaml
from heatclient import exc as heatexceptions
from mistral_lib import actions
from oslo_concurrency import processutils
import six
from swiftclient import exceptions as swiftexceptions
from tripleo_common.actions import base
from tripleo_common import constants
from tripleo_common import exception
from tripleo_common.utils import plan as plan_utils
from tripleo_common.utils import roles as roles_utils
from tripleo_common.utils import swift as swiftutils
LOG = logging.getLogger(__name__)
class ListPlansAction(base.TripleOAction):
"""Lists deployment plans
This action lists all deployment plans residing in the undercloud. A
deployment plan consists of a container marked with metadata
'x-container-meta-usage-tripleo'.
"""
def run(self, context):
# Plans consist of a container object marked with metadata to ensure it
# isn't confused with another container
plan_list = []
oc = self.get_object_client(context)
for item in oc.get_account()[1]:
container = oc.get_container(item['name'])[0]
if constants.TRIPLEO_META_USAGE_KEY in container.keys():
plan_list.append(item['name'])
return list(set(plan_list))
class DeletePlanAction(base.TripleOAction):
"""Deletes a plan and associated files
Deletes a plan by deleting the container matching plan_name. It
will not delete the plan if a stack exists with the same name.
Raises StackInUseError if a stack with the same name as plan_name
exists.
"""
def __init__(self, container):
super(DeletePlanAction, self).__init__()
self.container = container
def run(self, context):
error_text = None
# heat throws HTTPNotFound if the stack is not found
try:
stack = self.get_orchestration_client(context).stacks.get(
self.container, resolve_outputs=False)
except heatexceptions.HTTPNotFound:
pass
else:
if stack is not None:
raise exception.StackInUseError(name=self.container)
try:
swift = self.get_object_client(context)
swiftutils.delete_container(swift, self.container)
swiftutils.delete_container(swift,
"%s-swift-rings" % self.container)
swiftutils.delete_container(swift,
"%s-messages" % self.container)
except swiftexceptions.ClientException as ce:
LOG.exception("Swift error deleting plan.")
error_text = ce.msg
except Exception as err:
LOG.exception("Error deleting plan.")
error_text = six.text_type(err)
if error_text:
return actions.Result(error=error_text)
class ListRolesAction(base.TripleOAction):
"""Returns a deployment plan's roles
DEPRECATED
Parses roles_data.yaml and returns the names of all available roles.
:param container: name of the Swift container / plan name
:param detail: if false(default), displays role names only. if true,
returns all roles data
:return: list of roles in the container's deployment plan
"""
def __init__(self, container=constants.DEFAULT_CONTAINER_NAME,
role_file_name=constants.OVERCLOUD_J2_ROLES_NAME,
detail=False):
super(ListRolesAction, self).__init__()
self.container = container
self.role_file_name = role_file_name
self.detail = detail
def run(self, context):
try:
swift = self.get_object_client(context)
return roles_utils.get_roles_from_plan(
swift, container=self.container,
role_file_name=self.role_file_name,
detail=self.detail)
except Exception as err:
return actions.Result(error=six.text_type(err))
class ExportPlanAction(base.TripleOAction):
"""Exports a deployment plan
This action exports a deployment plan with a given name. The plan
templates are downloaded from the Swift container, packaged up in a tarball
and uploaded to Swift.
"""
def __init__(self, plan, delete_after, exports_container):
super(ExportPlanAction, self).__init__()
self.plan = plan
self.delete_after = delete_after
self.exports_container = exports_container
def run(self, context):
swift = self.get_object_client(context)
swift_service = self.get_object_service(context)
tmp_dir = tempfile.mkdtemp()
tarball_name = '%s.tar.gz' % self.plan
try:
swiftutils.download_container(swift, self.plan, tmp_dir)
swiftutils.create_and_upload_tarball(
swift_service, tmp_dir, self.exports_container, tarball_name,
delete_after=self.delete_after)
except swiftexceptions.ClientException as err:
msg = "Error attempting an operation on container: %s" % err
return actions.Result(error=msg)
except (OSError, IOError) as err:
msg = "Error while writing file: %s" % err
return actions.Result(error=msg)
except processutils.ProcessExecutionError as err:
msg = "Error while creating a tarball: %s" % err
return actions.Result(error=msg)
except Exception as err:
msg = "Error exporting plan: %s" % err
return actions.Result(error=msg)
finally:
shutil.rmtree(tmp_dir)
class ValidateRolesDataAction(base.TripleOAction):
"""Validates Roles Data
Validates the format of input (verify that each role in input has the
required attributes set. see README in roles directory in t-h-t),
validates that roles in input exist in roles directory in deployment plan
"""
def __init__(self, roles, available_roles,
container=constants.DEFAULT_CONTAINER_NAME):
super(ValidateRolesDataAction, self).__init__()
self.container = container
self.roles = roles
self.available_roles = available_roles
def run(self, context):
err_msg = ""
# validate roles in input exist in roles directory in t-h-t
try:
roles_utils.check_role_exists(
[role['name'] for role in self.available_roles],
[role['name'] for role in self.roles])
except Exception as chk_err:
err_msg = str(chk_err)
# validate role yaml
for role in self.roles:
try:
roles_utils.validate_role_yaml(yaml.safe_dump([role]))
except exception.RoleMetadataError as rme:
if 'name' in role:
err_msg += "\n%s for %s" % (str(rme), role['name'])
else:
err_msg += "\n%s" % str(rme)
if err_msg:
return actions.Result(error=err_msg)
return actions.Result(data=True)
class UpdateRolesAction(base.TripleOAction):
"""Updates roles_data.yaml object in plan with given roles.
:param roles: role input data (json)
:param current_roles: data from roles_data.yaml file in plan (json)
:param replace_all: boolean value indicating if input roles should merge
with or replace data from roles_data.yaml. Defaults to False (merge)
:param container: name of the Swift container / plan name
"""
def __init__(self, roles, current_roles, replace_all=False,
container=constants.DEFAULT_CONTAINER_NAME):
super(UpdateRolesAction, self).__init__()
self.container = container
self.roles = roles
self.current_roles = current_roles
self.replace_all = replace_all
def run(self, context):
role_data_to_save = self.roles
# if replace_all flag is true, discard current roles and save input
# if replace_all flag is false, merge input into current roles
if not self.replace_all:
# merge the roles_data and the role_input into roles to be saved
role_data_to_save = [role for role in {
x['name']: x for x in
self.current_roles + self.roles
}.values()]
# ensure required primary tag exists in roles to be saved
primary = [role for role in role_data_to_save if
'tags' in role and 'primary' in role['tags']]
if len(primary) < 1:
# throw error
raise exception.RoleMetadataError("At least one role must contain"
" a 'primary' tag.")
# sort the data to have a predictable result
save_roles = sorted(role_data_to_save, key=itemgetter('name'),
reverse=True)
return actions.Result(data={'roles': save_roles})
class RemoveNoopDeployStepAction(base.TripleOAction):
"""Remove all the pre, post and deploy step in the plan-environment.
:param container: name of the Swift container / plan name
"""
def __init__(self, container=constants.DEFAULT_CONTAINER_NAME):
super(RemoveNoopDeployStepAction, self).__init__()
self.container = container
def run(self, context):
# get the stack. Error if doesn't exist
heat = self.get_orchestration_client(context)
try:
stack = heat.stacks.get(self.container)
except heatexceptions.HTTPNotFound:
msg = "Error retrieving stack: %s" % self.container
LOG.exception(msg)
return actions.Result(error=msg)
swift = self.get_object_client(context)
# Get output and check if DeployStep are None
removals = ['OS::TripleO::DeploymentSteps']
for output in stack.to_dict().get('outputs', {}):
if output['output_key'] == 'RoleData':
for role in output['output_value']:
removals.append("OS::TripleO::Tasks::%sPreConfig" % role)
removals.append("OS::TripleO::Tasks::%sPostConfig" % role)
plan_env = plan_utils.get_env(swift, self.container)
self.remove_noops_from_env(removals, plan_env)
plan_utils.put_env(swift, plan_env)
user_env = plan_utils.get_user_env(swift, self.container)
self.remove_noops_from_env(removals, user_env)
plan_utils.put_user_env(swift, self.container, user_env)
def remove_noops_from_env(self, removals, env):
# Remove noop Steps
for rm in removals:
if rm in env.get('resource_registry', {}):
if env['resource_registry'][rm] == 'OS::Heat::None':
env['resource_registry'].pop(rm)

View File

@ -1,176 +0,0 @@
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import logging
from mistral_lib import actions
from tripleo_common.actions import base
from tripleo_common.actions import parameters as parameters_actions
from tripleo_common import constants
from tripleo_common import update
from tripleo_common.utils import template as template_utils
LOG = logging.getLogger(__name__)
def get_group_resources_after_delete(groupname, res_to_delete, resources):
group = next(res for res in resources if
res.resource_name == groupname and
res.resource_type == constants.RESOURCE_GROUP_TYPE)
members = []
for res in resources:
stack_name, stack_id = next(
x['href'] for x in res.links if
x['rel'] == 'stack').rsplit('/', 2)[1:]
# desired new count of nodes after delete operation should be
# count of all existing nodes in ResourceGroup which are not
# in set of nodes being deleted. Also nodes in any delete state
# from a previous failed update operation are not included in
# overall count (if such nodes exist)
if (stack_id == group.physical_resource_id and
res not in res_to_delete and
not res.resource_status.startswith('DELETE')):
members.append(res)
return members
class ScaleDownAction(base.TripleOAction):
"""Deletes overcloud nodes
Before calling this method, ensure you have updated the plan
with any templates or environment files as needed.
"""
def __init__(self, timeout, nodes=[],
container=constants.DEFAULT_CONTAINER_NAME):
self.container = container
self.nodes = nodes
self.timeout_mins = timeout
super(ScaleDownAction, self).__init__()
def _update_stack(self, parameters={},
timeout_mins=constants.STACK_TIMEOUT_DEFAULT,
context=None):
heat = self.get_orchestration_client(context)
swift = self.get_object_client(context)
# TODO(rbrady): migrate _update_stack to it's own action and update
# the workflow for scale down
# update the plan parameters with the scaled down parameters
update_params_action = parameters_actions.UpdateParametersAction(
parameters, self.container)
updated_plan = update_params_action.run(context)
if isinstance(updated_plan, actions.Result):
return updated_plan
processed_data = template_utils.process_templates(
swift, heat, container=self.container
)
update.add_breakpoints_cleanup_into_env(processed_data['environment'])
fields = processed_data.copy()
fields['timeout_mins'] = timeout_mins
fields['existing'] = True
# As we do a PATCH update when deleting nodes, parameters set for a
# stack before upgrade to newton (ex. ComputeRemovalPolicies),
# would still take precedence over the ones set in parameter_defaults
# after upgrade. Clear these parameters for backward compatibility.
fields['clear_parameters'] = list(parameters.keys())
LOG.debug('stack update params: %s', fields)
heat.stacks.update(self.container, **fields)
def _get_removal_params_from_heat(self, resources_by_role, resources):
stack_params = {}
for role, role_resources in resources_by_role.items():
param_name = "{0}Count".format(role)
# get real count of nodes for each role. *Count stack parameters
# can not be used because stack parameters return parameters
# passed by user no matter if previous update operation succeeded
# or not
group_members = get_group_resources_after_delete(
role, role_resources, resources)
stack_params[param_name] = str(len(group_members))
# add instance resource names into removal_policies
# so heat knows which instances should be removed
removal_param = "{0}RemovalPolicies".format(role)
stack_params[removal_param] = [{
'resource_list': [r.resource_name for r in role_resources]
}]
# force reset the removal_policies_mode to 'append'
# as 'update' can lead to deletion of unintended nodes.
removal_mode = "{0}RemovalPoliciesMode".format(role)
stack_params[removal_mode] = 'append'
return stack_params
def _match_hostname(self, heatclient, instance_list, res, stack_name):
type_patterns = ['DeployedServer', 'Server']
if any(res.resource_type.endswith(x) for x in type_patterns):
res_details = heatclient.resources.get(
stack_name, res.resource_name)
if 'name' in res_details.attributes:
try:
instance_list.remove(res_details.attributes['name'])
return True
except ValueError:
return False
return False
def run(self, context):
heatclient = self.get_orchestration_client(context)
resources = heatclient.resources.list(self.container, nested_depth=5)
resources_by_role = collections.defaultdict(list)
instance_list = list(self.nodes)
for res in resources:
stack_name, stack_id = next(
x['href'] for x in res.links if
x['rel'] == 'stack').rsplit('/', 2)[1:]
try:
instance_list.remove(res.physical_resource_id)
except ValueError:
if not self._match_hostname(
heatclient, instance_list, res, stack_name):
continue
# get resource to remove from resource group (it's parent resource
# of nova server)
role_resource = next(x for x in resources if
x.physical_resource_id == stack_id)
# get the role name which is parent resource name in Heat
role = role_resource.parent_resource
resources_by_role[role].append(role_resource)
resources_by_role = dict(resources_by_role)
if instance_list:
raise ValueError(
"Couldn't find following instances in stack %s: %s" %
(self.container, ','.join(instance_list)))
# decrease count for each role (or resource group) and set removal
# policy for each resource group
stack_params = self._get_removal_params_from_heat(
resources_by_role, resources)
return self._update_stack(parameters=stack_params, context=context)

View File

@ -1,77 +0,0 @@
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from mistral_lib import actions
from tripleo_common.actions import base
from tripleo_common.utils import passwords as password_utils
class GetSshKeyAction(base.TripleOAction):
def run(self, context):
mc = self.get_workflow_client(context)
try:
env = mc.environments.get('ssh_keys')
p_key = env.variables[self.key_type]
except Exception:
ssh_key = password_utils.create_ssh_keypair()
p_key = ssh_key[self.key_type]
workflow_env = {
'name': 'ssh_keys',
'description': 'SSH keys for TripleO validations',
'variables': ssh_key
}
mc.environments.create(**workflow_env)
return p_key
class GetPubkeyAction(GetSshKeyAction):
key_type = 'public_key'
class GetPrivkeyAction(GetSshKeyAction):
key_type = 'private_key'
class Enabled(base.TripleOAction):
"""Indicate whether the validations have been enabled."""
def _validations_enabled(self, context):
"""Detect whether the validations are enabled on the undercloud."""
mistral = self.get_workflow_client(context)
try:
# NOTE: the `ssh_keys` environment is created by
# instack-undercloud only when the validations are enabled on the
# undercloud (or when they're installed manually). Therefore, we
# can check for its presence here:
mistral.environments.get('ssh_keys')
return True
except Exception:
return False
def run(self, context):
return_value = {'stderr': ''}
if self._validations_enabled(context):
return_value['stdout'] = 'Validations are enabled'
mistral_result = {"data": return_value}
else:
return_value['stdout'] = 'Validations are disabled'
mistral_result = {"error": return_value}
return actions.Result(**mistral_result)

View File

@ -1,46 +0,0 @@
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.scheduler import filters
from oslo_log import log as logging
LOG = logging.getLogger(__name__)
class TripleOCapabilitiesFilter(filters.BaseHostFilter):
"""Filter hosts based on capabilities in boot request
The standard Nova ComputeCapabilitiesFilter does not respect capabilities
requested in the scheduler_hints field, so we need a custom one in order
to be able to do predictable placement of nodes.
"""
# list of hosts doesn't change within a request
run_filter_once_per_request = True
def host_passes(self, host_state, spec_obj):
host_node = host_state.stats.get('node')
instance_node = spec_obj.scheduler_hints.get('capabilities:node')
# The instance didn't request a specific node
if not instance_node:
LOG.debug('No specific node requested')
return True
if host_node == instance_node[0]:
LOG.debug('Node tagged %s matches requested node %s', host_node,
instance_node[0])
return True
LOG.debug('Node tagged %s does not match requested node %s',
host_node, instance_node[0])
return False

View File

@ -1,27 +0,0 @@
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import nova
from tripleo_common.filters import capabilities_filter
def tripleo_filters():
"""Return a list of filter classes for TripleO
This is a wrapper around the Nova all_filters function so we can add our
filters to the resulting list.
"""
nova_filters = nova.scheduler.filters.all_filters()
return (nova_filters + [capabilities_filter.TripleOCapabilitiesFilter])

View File

@ -387,66 +387,6 @@ class TestConfigureRootDeviceAction(base.TestCase):
self.assertEqual(self.ironic.node.update.call_count, 0)
class TestCellV2DiscoverHostsAction(base.TestCase):
@mock.patch('tripleo_common.utils.nodes.run_nova_cell_v2_discovery')
def test_run(self, mock_command):
action = baremetal.CellV2DiscoverHostsAction()
action.run(mock.MagicMock())
mock_command.assert_called_once()
@mock.patch('tripleo_common.utils.nodes.run_nova_cell_v2_discovery')
def test_failure(self, mock_command):
mock_command.side_effect = processutils.ProcessExecutionError(
exit_code=1,
stdout='captured stdout',
stderr='captured stderr',
cmd='command'
)
action = baremetal.CellV2DiscoverHostsAction()
result = action.run(mock.MagicMock())
self.assertTrue(result.is_error())
mock_command.assert_called_once()
class TestGetProfileAction(base.TestCase):
def test_run(self):
mock_ctx = mock.MagicMock()
node = {
'uuid': 'abcd1',
'properties': {
'capabilities': 'profile:compute'
}
}
action = baremetal.GetProfileAction(node=node)
result = action.run(mock_ctx)
expected_result = {
'uuid': 'abcd1',
'profile': 'compute'
}
self.assertEqual(expected_result, result)
class TestGetNodeHintAction(base.TestCase):
def test_run(self):
mock_ctx = mock.MagicMock()
node = {
'uuid': 'abcd1',
'properties': {
'capabilities': 'profile:compute,node:compute-0'
}
}
action = baremetal.GetNodeHintAction(node=node)
result = action.run(mock_ctx)
expected_result = {
'uuid': 'abcd1',
'hint': 'compute-0'
}
self.assertEqual(expected_result, result)
@mock.patch.object(baremetal.socket, 'gethostbyname', lambda x: x)
class TestGetCandidateNodes(base.TestCase):
def setUp(self):

View File

@ -1,561 +0,0 @@
# Copyright 2017 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from mistral_lib import actions
from tripleo_common.actions import derive_params
from tripleo_common.tests import base
class GetDpdkNicsNumaInfoActionTest(base.TestCase):
def test_run_dpdk_port(self):
network_configs = [{
"members": [{
"members": [{"name": "nic5", "type": "interface"}],
"name": "dpdk0",
"type": "ovs_dpdk_port",
"mtu": 8192,
"rx_queue": 4}],
"name": "br-link",
"type": "ovs_user_bridge",
"addresses": [{"ip_netmask": ""}]}]
inspect_data = {
"numa_topology": {
"nics": [{"name": "ens802f1", "numa_node": 1},
{"name": "ens802f0", "numa_node": 1},
{"name": "eno1", "numa_node": 0},
{"name": "eno2", "numa_node": 0},
{"name": "enp12s0f1", "numa_node": 0},
{"name": "enp12s0f0", "numa_node": 0},
{"name": "enp13s0f0", "numa_node": 0},
{"name": "enp13s0f1", "numa_node": 0}]
},
"inventory": {
"interfaces": [{"has_carrier": True,
"name": "ens802f1"},
{"has_carrier": True,
"name": "ens802f0"},
{"has_carrier": True,
"name": "eno1"},
{"has_carrier": True,
"name": "eno2"},
{"has_carrier": True,
"name": "enp12s0f0"},
{"has_carrier": False,
"name": "enp13s0f0"},
{"has_carrier": False,
"name": "enp13s0f1"}]
}
}
expected_result = [{'bridge_name': 'br-link', 'name': 'ens802f1',
'mtu': 8192, 'numa_node': 1,
'addresses': [{'ip_netmask': ''}]}]
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkNicsNumaInfoAction(network_configs,
inspect_data)
result = action.run(mock_ctx)
self.assertEqual(result, expected_result)
def test_run_dpdk_bond(self):
network_configs = [{
"members": [{"type": "ovs_dpdk_bond", "name": "dpdkbond0",
"mtu": 9000, "rx_queue": 4,
"members": [{"type": "ovs_dpdk_port",
"name": "dpdk0",
"members": [{"type": "interface",
"name": "nic4"}]},
{"type": "ovs_dpdk_port",
"name": "dpdk1",
"members": [{"type": "interface",
"name": "nic5"}]}]}],
"name": "br-link",
"type": "ovs_user_bridge",
"addresses": [{"ip_netmask": "172.16.10.0/24"}]}]
inspect_data = {
"numa_topology": {
"nics": [{"name": "ens802f1", "numa_node": 1},
{"name": "ens802f0", "numa_node": 1},
{"name": "eno1", "numa_node": 0},
{"name": "eno2", "numa_node": 0},
{"name": "enp12s0f1", "numa_node": 0},
{"name": "enp12s0f0", "numa_node": 0},
{"name": "enp13s0f0", "numa_node": 0},
{"name": "enp13s0f1", "numa_node": 0}]
},
"inventory": {
"interfaces": [{"has_carrier": True,
"name": "ens802f1"},
{"has_carrier": True,
"name": "ens802f0"},
{"has_carrier": True,
"name": "eno1"},
{"has_carrier": True,
"name": "eno2"},
{"has_carrier": True,
"name": "enp12s0f0"},
{"has_carrier": False,
"name": "enp13s0f0"},
{"has_carrier": False,
"name": "enp13s0f1"}]
}
}
expected_result = [{'bridge_name': 'br-link', 'mtu': 9000,
'numa_node': 1, 'name': 'ens802f0',
'addresses': [{'ip_netmask': '172.16.10.0/24'}]},
{'bridge_name': 'br-link', 'mtu': 9000,
'numa_node': 1, 'name': 'ens802f1',
'addresses': [{'ip_netmask': '172.16.10.0/24'}]}]
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkNicsNumaInfoAction(network_configs,
inspect_data)
result = action.run(mock_ctx)
self.assertEqual(result, expected_result)
@mock.patch.object(actions, 'Result', autospec=True)
def test_run_no_inspect_nics(self, mock_actions):
network_configs = [{
"members": [{
"members": [{"name": "nic5", "type": "interface"}],
"name": "dpdk0",
"type": "ovs_dpdk_port",
"mtu": 8192,
"rx_queue": 4}],
"name": "br-link",
"type": "ovs_user_bridge"}]
inspect_data = {
"numa_topology": {
"nics": []
},
"inventory": {
"interfaces": [{"has_carrier": True,
"name": "ens802f1"},
{"has_carrier": True,
"name": "ens802f0"},
{"has_carrier": True,
"name": "eno1"},
{"has_carrier": True,
"name": "eno2"},
{"has_carrier": True,
"name": "enp12s0f0"},
{"has_carrier": False,
"name": "enp13s0f0"},
{"has_carrier": False,
"name": "enp13s0f1"}]
}
}
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkNicsNumaInfoAction(network_configs,
inspect_data)
action.run(mock_ctx)
msg = 'Introspection data does not have numa_topology.nics'
mock_actions.assert_called_once_with(error=msg)
@mock.patch.object(actions, 'Result', autospec=True)
def test_run_no_inspect_interfaces(self, mock_actions):
network_configs = [{
"members": [{
"members": [{"name": "nic5", "type": "interface"}],
"name": "dpdk0",
"type": "ovs_dpdk_port",
"mtu": 8192,
"rx_queue": 4}],
"name": "br-link",
"type": "ovs_user_bridge"}]
inspect_data = {
"numa_topology": {
"nics": []
},
"inventory": {
"interfaces": []
}
}
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkNicsNumaInfoAction(network_configs,
inspect_data)
action.run(mock_ctx)
msg = 'Introspection data does not have inventory.interfaces'
mock_actions.assert_called_once_with(error=msg)
@mock.patch.object(actions, 'Result', autospec=True)
def test_run_no_inspect_active_interfaces(self, mock_actions):
network_configs = [{
"members": [{
"members": [{"name": "nic5", "type": "interface"}],
"name": "dpdk0",
"type": "ovs_dpdk_port",
"mtu": 8192,
"rx_queue": 4}],
"name": "br-link",
"type": "ovs_user_bridge"}]
inspect_data = {
"numa_topology": {
"nics": [{"name": "ens802f1", "numa_node": 1},
{"name": "ens802f0", "numa_node": 1},
{"name": "eno1", "numa_node": 0},
{"name": "eno2", "numa_node": 0},
{"name": "enp12s0f1", "numa_node": 0},
{"name": "enp12s0f0", "numa_node": 0},
{"name": "enp13s0f0", "numa_node": 0},
{"name": "enp13s0f1", "numa_node": 0}]
},
"inventory": {
"interfaces": [{"has_carrier": False,
"name": "enp13s0f0"},
{"has_carrier": False,
"name": "enp13s0f1"}]
}
}
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkNicsNumaInfoAction(network_configs,
inspect_data)
action.run(mock_ctx)
msg = 'Unable to determine active interfaces (has_carrier)'
mock_actions.assert_called_once_with(error=msg)
@mock.patch.object(actions, 'Result', autospec=True)
def test_run_no_numa_node(self, mock_actions):
network_configs = [{
"members": [{
"members": [{"name": "nic5", "type": "interface"}],
"name": "dpdk0",
"type": "ovs_dpdk_port",
"mtu": 8192,
"rx_queue": 4}],
"name": "br-link",
"type": "ovs_user_bridge"}]
inspect_data = {
"numa_topology": {
"nics": [{"name": "ens802f1"},
{"name": "ens802f0", "numa_node": 1},
{"name": "eno1", "numa_node": 0},
{"name": "eno2", "numa_node": 0},
{"name": "enp12s0f1", "numa_node": 0},
{"name": "enp12s0f0", "numa_node": 0},
{"name": "enp13s0f0", "numa_node": 0},
{"name": "enp13s0f1", "numa_node": 0}]
},
"inventory": {
"interfaces": [{"has_carrier": True,
"name": "ens802f1"},
{"has_carrier": True,
"name": "ens802f0"},
{"has_carrier": True,
"name": "eno1"},
{"has_carrier": True,
"name": "eno2"},
{"has_carrier": True,
"name": "enp12s0f0"},
{"has_carrier": False,
"name": "enp13s0f0"},
{"has_carrier": False,
"name": "enp13s0f1"}]
}
}
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkNicsNumaInfoAction(network_configs,
inspect_data)
action.run(mock_ctx)
msg = 'Unable to determine NUMA node for DPDK NIC: ens802f1'
mock_actions.assert_called_once_with(error=msg)
class GetDpdkCoreListActionTest(base.TestCase):
def test_run(self):
inspect_data = {
"numa_topology": {
"cpus": [{"cpu": 21, "numa_node": 1,
"thread_siblings": [38, 82]},
{"cpu": 27, "numa_node": 0,
"thread_siblings": [20, 64]},
{"cpu": 3, "numa_node": 1,
"thread_siblings": [25, 69]},
{"cpu": 20, "numa_node": 0,
"thread_siblings": [15, 59]},
{"cpu": 17, "numa_node": 1,
"thread_siblings": [34, 78]},
{"cpu": 16, "numa_node": 0,
"thread_siblings": [11, 55]}]
}
}
numa_nodes_cores_count = [2, 1]
expected_result = "20,64,15,59,38,82"
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkCoreListAction(inspect_data,
numa_nodes_cores_count)
result = action.run(mock_ctx)
self.assertEqual(result, expected_result)
@mock.patch.object(actions, 'Result', autospec=True)
def test_run_invalid_inspect_data(self, mock_actions):
inspect_data = {"numa_topology": {"cpus": []}}
numa_nodes_cores_count = [2, 1]
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkCoreListAction(inspect_data,
numa_nodes_cores_count)
action.run(mock_ctx)
msg = 'Introspection data does not have numa_topology.cpus'
mock_actions.assert_called_once_with(error=msg)
@mock.patch.object(actions, 'Result', autospec=True)
def test_run_invalid_numa_nodes_cores_count(self, mock_actions):
inspect_data = {"numa_topology": {
"cpus": [{"cpu": 21, "numa_node": 1, "thread_siblings": [38, 82]},
{"cpu": 27, "numa_node": 0, "thread_siblings": [20, 64]}]
}}
numa_nodes_cores_count = []
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkCoreListAction(inspect_data,
numa_nodes_cores_count)
action.run(mock_ctx)
msg = 'CPU physical cores count for each NUMA nodes is not available'
mock_actions.assert_called_once_with(error=msg)
class GetHostCpusListActionTest(base.TestCase):
def test_run_valid_inspect_data(self):
inspect_data = {
"numa_topology": {
"cpus": [{"cpu": 21, "numa_node": 1,
"thread_siblings": [38, 82]},
{"cpu": 27, "numa_node": 0,
"thread_siblings": [20, 64]},
{"cpu": 3, "numa_node": 1,
"thread_siblings": [25, 69]},
{"cpu": 20, "numa_node": 0,
"thread_siblings": [15, 59]}]
}
}
expected_result = "15,59,25,69"
mock_ctx = mock.MagicMock()
action = derive_params.GetHostCpusListAction(inspect_data)
result = action.run(mock_ctx)
self.assertEqual(result, expected_result)
@mock.patch.object(actions, 'Result', autospec=True)
def test_run_invalid_inspect_data(self, mock_actions):
inspect_data = {"numa_topology": {"cpus": []}}
mock_ctx = mock.MagicMock()
action = derive_params.GetHostCpusListAction(inspect_data)
action.run(mock_ctx)
msg = 'Introspection data does not have numa_topology.cpus'
mock_actions.assert_called_once_with(error=msg)
class GetDpdkSocketMemoryActionTest(base.TestCase):
def test_run_valid_dpdk_nics_numa_info(self):
dpdk_nics_numa_info = [{"name": "ens802f1", "numa_node": 1,
"mtu": 8192}]
numa_nodes = [0, 1]
overhead = 800
packet_size_in_buffer = (4096 * 64)
expected_result = "1024,3072"
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkSocketMemoryAction(
dpdk_nics_numa_info, numa_nodes, overhead,
packet_size_in_buffer)
result = action.run(mock_ctx)
self.assertEqual(result, expected_result)
def test_run_multiple_mtu_in_same_numa_node(self):
dpdk_nics_numa_info = [{"name": "ens802f1", "numa_node": 1,
"mtu": 1500},
{"name": "ens802f2", "numa_node": 1,
"mtu": 2048}]
numa_nodes = [0, 1]
overhead = 800
packet_size_in_buffer = (4096 * 64)
expected_result = "1024,2048"
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkSocketMemoryAction(
dpdk_nics_numa_info, numa_nodes, overhead, packet_size_in_buffer)
result = action.run(mock_ctx)
self.assertEqual(result, expected_result)
def test_run_duplicate_mtu_in_same_numa_node(self):
dpdk_nics_numa_info = [{"name": "ens802f1", "numa_node": 1,
"mtu": 4096},
{"name": "ens802f2", "numa_node": 1,
"mtu": 4096}]
numa_nodes = [0, 1]
overhead = 800
packet_size_in_buffer = (4096 * 64)
expected_result = "1024,2048"
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkSocketMemoryAction(
dpdk_nics_numa_info, numa_nodes, overhead, packet_size_in_buffer)
result = action.run(mock_ctx)
self.assertEqual(result, expected_result)
def test_run_valid_roundup_mtu(self):
dpdk_nics_numa_info = [{"name": "ens802f1", "numa_node": 1,
"mtu": 1200}]
numa_nodes = [0, 1]
overhead = 800
packet_size_in_buffer = (4096 * 64)
expected_result = "1024,2048"
mock_ctx = mock.MagicMock()
action = derive_params.GetDpdkSocketMemoryAction(
dpdk_nics_numa_info, numa_nodes, overhead,
packet_size_in_buffer)
result = action.run(mock_ctx)
self.assertEqual(result, expected_result)
class ConvertNumberToRangeListActionTest(base.TestCase):
def test_run_with_ranges(self):
num_list = "0,22,23,24,25,60,65,66,67"
expected_result = "0,22-25,60,65-67"
mock_ctx = mock.MagicMock()
action = derive_params.ConvertNumberToRangeListAction(num_list)
result = action.run(mock_ctx)
self.assertEqual(result, expected_result)
@mock.patch.object(actions, 'Result', autospec=True)
def test_run_with_no_range(self, mock_actions):
num_list = "0,22,24,60,65,67"
expected_result = "0,22,24,60,65,67"
mock_ctx = mock.MagicMock()
action = derive_params.ConvertNumberToRangeListAction(num_list)
result = action.run(mock_ctx)
self.assertEqual(result, expected_result)
@mock.patch.object(actions, 'Result', autospec=True)
def test_run_with_empty_input(self, mock_actions):
num_list = ""
mock_ctx = mock.MagicMock()
action = derive_params.ConvertNumberToRangeListAction(num_list)
action.run(mock_ctx)
msg = "Input param 'num_list' is blank."
mock_actions.assert_called_once_with(error=msg)
@mock.patch.object(actions, 'Result', autospec=True)
def test_run_with_invalid_input(self, mock_actions):
num_list = ",d"
mock_ctx = mock.MagicMock()
action = derive_params.ConvertNumberToRangeListAction(num_list)
action.run(mock_ctx)
msg = ("Invalid number in input param 'num_list': invalid "
"literal for int() with base 10: ''")
mock_actions.assert_called_once_with(error=msg)
class ConvertRangeToNumberListActionTest(base.TestCase):
def test_run_with_ranges_in_comma_delimited_str(self):
range_list = "24-27,60,65-67"
expected_result = "24,25,26,27,60,65,66,67"
mock_ctx = mock.MagicMock()
action = derive_params.ConvertRangeToNumberListAction(range_list)
result = action.run(mock_ctx)
self.assertEqual(result, expected_result)
def test_run_with_ranges_in_comma_delimited_list(self):
range_list = ['24-27', '60', '65-67']
expected_result = "24,25,26,27,60,65,66,67"
mock_ctx = mock.MagicMock()
action = derive_params.ConvertRangeToNumberListAction(range_list)
result = action.run(mock_ctx)
self.assertEqual(result, expected_result)
@mock.patch.object(actions, 'Result', autospec=True)
def test_run_with_ranges_exclude_num(self, mock_actions):
range_list = "24-27,^25,60,65-67"
expected_result = "24,26,27,60,65,66,67"
mock_ctx = mock.MagicMock()
action = derive_params.ConvertRangeToNumberListAction(range_list)
result = action.run(mock_ctx)
self.assertEqual(result, expected_result)
def test_run_with_no_ranges(self):
range_list = "24,25,26,27,60,65,66,67"
expected_result = "24,25,26,27,60,65,66,67"
mock_ctx = mock.MagicMock()
action = derive_params.ConvertRangeToNumberListAction(range_list)
result = action.run(mock_ctx)
self.assertEqual(result, expected_result)
@mock.patch.object(actions, 'Result', autospec=True)
def test_run_with_empty_input(self, mock_actions):
range_list = ""
mock_ctx = mock.MagicMock()
action = derive_params.ConvertRangeToNumberListAction(range_list)
action.run(mock_ctx)
msg = "Input param 'range_list' is blank."
mock_actions.assert_called_once_with(error=msg)
@mock.patch.object(actions, 'Result', autospec=True)
def test_run_with_invalid_input(self, mock_actions):
range_list = ",d"
mock_ctx = mock.MagicMock()
action = derive_params.ConvertRangeToNumberListAction(range_list)
action.run(mock_ctx)
msg = ("Invalid number in input param 'range_list': invalid "
"literal for int() with base 10: ''")
mock_actions.assert_called_once_with(error=msg)
@mock.patch.object(actions, 'Result', autospec=True)
def test_run_with_invalid_exclude_number(self, mock_actions):
range_list = "12-15,^17"
expected_result = "12,13,14,15"
mock_ctx = mock.MagicMock()
action = derive_params.ConvertRangeToNumberListAction(range_list)
result = action.run(mock_ctx)
self.assertEqual(result, expected_result)

View File

@ -1,47 +0,0 @@
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from tripleo_common.actions import parameters
from tripleo_common import exception
from tripleo_common.tests import base
class GetProfileOfFlavorActionTest(base.TestCase):
@mock.patch('tripleo_common.utils.parameters.get_profile_of_flavor')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_compute_client')
def test_profile_found(self, mock_get_compute_client,
mock_get_profile_of_flavor):
mock_ctx = mock.MagicMock()
mock_get_profile_of_flavor.return_value = 'compute'
action = parameters.GetProfileOfFlavorAction('oooq_compute')
result = action.run(mock_ctx)
expected_result = "compute"
self.assertEqual(result, expected_result)
@mock.patch('tripleo_common.utils.parameters.get_profile_of_flavor')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_compute_client')
def test_profile_not_found(self, mock_get_compute_client,
mock_get_profile_of_flavor):
mock_ctx = mock.MagicMock()
profile = (exception.DeriveParamsError, )
mock_get_profile_of_flavor.side_effect = profile
action = parameters.GetProfileOfFlavorAction('no_profile')
result = action.run(mock_ctx)
self.assertTrue(result.is_error())
mock_get_profile_of_flavor.assert_called_once()

View File

@ -1,455 +0,0 @@
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from unittest import mock
from heatclient import exc as heatexceptions
from oslo_concurrency import processutils
from swiftclient import exceptions as swiftexceptions
from tripleo_common.actions import plan
from tripleo_common import exception
from tripleo_common.tests import base
ENV_YAML_CONTENTS = """
version: 1.0
template: overcloud.yaml
environments:
- path: overcloud-resource-registry-puppet.yaml
- path: environments/services/sahara.yaml
parameter_defaults:
BlockStorageCount: 42
OvercloudControlFlavor: yummy
passwords:
AdminPassword: aaaa
ZaqarPassword: zzzz
"""
RESOURCES_YAML_CONTENTS = """heat_template_version: 2016-04-08
resources:
Controller:
type: OS::Heat::ResourceGroup
NotRoleContoller:
type: OS::Dummy::DummyGroup
Compute:
type: OS::Heat::ResourceGroup
notresources:
BlockStorageDummy:
type: OS::Heat::ResourceGroup
"""
SAMPLE_ROLE = """
###############################################################################
# Role: sample #
###############################################################################
- name: sample
description: |
Sample!
networks:
- InternalApi
HostnameFormatDefault: '%stackname%-sample-%index%'
ServicesDefault:
- OS::TripleO::Services::Timesync
"""
SAMPLE_ROLE_OBJ = {
'HostnameFormatDefault': '%stackname%-sample-%index%',
'ServicesDefault': ['OS::TripleO::Services::Timesync'],
'description': 'Sample!\n',
'name': 'sample',
'networks': ['InternalApi']
}
SAMPLE_ROLE_2 = """
###############################################################################
# Role: sample2 #
###############################################################################
- name: sample2
description: |
Sample2!
networks:
- InternalApi
HostnameFormatDefault: '%stackname%-sample-%index%'
ServicesDefault:
- OS::TripleO::Services::Timesync
"""
SAMPLE_ROLE_2_OBJ = {
'HostnameFormatDefault': '%stackname%-sample-%index%',
'ServicesDefault': ['OS::TripleO::Services::Timesync'],
'description': 'Sample2!\n',
'name': 'sample2',
'networks': ['InternalApi']
}
UPDATED_ROLE = """
###############################################################################
# Role: sample #
###############################################################################
- name: sample
description: |
Sample!
networks:
- InternalApi
- ExternalApi
tags:
- primary
HostnameFormatDefault: '%stackname%-sample-%index%'
ServicesDefault:
- OS::TripleO::Services::Timesync
"""
UPDATED_ROLE_OBJ = {
'HostnameFormatDefault': '%stackname%-sample-%index%',
'ServicesDefault': ['OS::TripleO::Services::Timesync'],
'description': 'Sample!\n',
'name': 'sample',
'networks': ['InternalApi', 'ExternalApi'],
'tags': ['primary']
}
class ListPlansActionTest(base.TestCase):
def setUp(self):
super(ListPlansActionTest, self).setUp()
self.container = 'overcloud'
self.ctx = mock.MagicMock()
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_run(self, get_obj_client_mock):
# setup swift
swift = mock.MagicMock()
swift.get_account.return_value = ({}, [
{
'count': 1,
'bytes': 55,
'name': 'overcloud'
},
])
swift.get_container.return_value = ({
'x-container-meta-usage-tripleo': 'plan',
}, [])
get_obj_client_mock.return_value = swift
# Test
action = plan.ListPlansAction()
action.run(self.ctx)
# verify
self.assertEqual([self.container], action.run(self.ctx))
swift.get_account.assert_called()
swift.get_container.assert_called_with(self.container)
class DeletePlanActionTest(base.TestCase):
def setUp(self):
super(DeletePlanActionTest, self).setUp()
self.container_name = 'overcloud'
self.stack = mock.MagicMock(
id='123',
status='CREATE_COMPLETE',
stack_name=self.container_name
)
self.ctx = mock.MagicMock()
@mock.patch(
'tripleo_common.actions.base.TripleOAction.get_orchestration_client')
def test_run_stack_exists(self, get_orchestration_client):
# setup heat
heat = mock.MagicMock()
heat.stacks.get.return_value = self.stack
get_orchestration_client.return_value = heat
# test that stack exists
action = plan.DeletePlanAction(self.container_name)
self.assertRaises(exception.StackInUseError, action.run, self.ctx)
heat.stacks.get.assert_called_with(
self.container_name, resolve_outputs=False)
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
@mock.patch(
'tripleo_common.actions.base.TripleOAction.get_orchestration_client')
def test_run(self, get_orchestration_client, get_obj_client_mock):
# setup swift
swift = mock.MagicMock()
swift.get_account.return_value = ({}, [
{'name': self.container_name},
{'name': 'test'},
])
swift.get_container.return_value = (
{'x-container-meta-usage-tripleo': 'plan'}, [
{'name': 'some-name.yaml'},
{'name': 'some-other-name.yaml'},
{'name': 'yet-some-other-name.yaml'},
{'name': 'finally-another-name.yaml'}
]
)
get_obj_client_mock.return_value = swift
# setup heat
heat = mock.MagicMock()
heat.stacks.get = mock.Mock(
side_effect=heatexceptions.HTTPNotFound)
get_orchestration_client.return_value = heat
action = plan.DeletePlanAction(self.container_name)
action.run(self.ctx)
mock_calls = [
mock.call('overcloud', 'some-name.yaml'),
mock.call('overcloud', 'some-other-name.yaml'),
mock.call('overcloud', 'yet-some-other-name.yaml'),
mock.call('overcloud', 'finally-another-name.yaml')
]
swift.delete_object.assert_has_calls(
mock_calls, any_order=True)
swift.delete_container.assert_called_with(self.container_name)
class ExportPlanActionTest(base.TestCase):
def setUp(self):
super(ExportPlanActionTest, self).setUp()
self.plan = 'overcloud'
self.delete_after = 3600
self.exports_container = 'plan-exports'
# setup swift
self.template_files = (
'some-name.yaml',
'some-other-name.yaml',
'yet-some-other-name.yaml',
'finally-another-name.yaml'
)
self.swift = mock.MagicMock()
self.swift.get_container.return_value = (
{'x-container-meta-usage-tripleo': 'plan'}, [
{'name': tf} for tf in self.template_files
]
)
self.swift.get_object.return_value = ({}, RESOURCES_YAML_CONTENTS)
swift_patcher = mock.patch(
'tripleo_common.actions.base.TripleOAction.get_object_client',
return_value=self.swift)
swift_patcher.start()
self.addCleanup(swift_patcher.stop)
self.ctx = mock.MagicMock()
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_service')
@mock.patch('tripleo_common.utils.tarball.create_tarball')
def test_run_success(self,
mock_create_tarball,
mock_get_obj_service):
get_object_mock_calls = [
mock.call(self.plan, tf) for tf in self.template_files
]
swift_service = mock.MagicMock()
swift_service.upload.return_value = ([
{'success': True},
])
mock_get_obj_service.return_value = swift_service
action = plan.ExportPlanAction(self.plan, self.delete_after,
self.exports_container)
action.run(self.ctx)
self.swift.get_container.assert_called_once_with(self.plan)
self.swift.get_object.assert_has_calls(
get_object_mock_calls, any_order=True)
swift_service.upload.assert_called_once()
mock_create_tarball.assert_called_once()
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_service')
def test_run_container_does_not_exist(self,
mock_get_obj_service):
self.swift.get_container.side_effect = swiftexceptions.ClientException(
self.plan)
swift_service = mock.MagicMock()
swift_service.delete.return_value = ([
{'success': True},
])
mock_get_obj_service.return_value = swift_service
action = plan.ExportPlanAction(self.plan, self.delete_after,
self.exports_container)
result = action.run(self.ctx)
error = "Error attempting an operation on container: %s" % self.plan
self.assertIn(error, result.error)
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_service')
@mock.patch('tripleo_common.utils.tarball.create_tarball')
def test_run_error_creating_tarball(self,
mock_create_tarball,
mock_get_obj_service):
mock_create_tarball.side_effect = processutils.ProcessExecutionError
swift_service = mock.MagicMock()
swift_service.delete.return_value = ([
{'success': True},
])
mock_get_obj_service.return_value = swift_service
action = plan.ExportPlanAction(self.plan, self.delete_after,
self.exports_container)
result = action.run(self.ctx)
error = "Error while creating a tarball"
self.assertIn(error, result.error)
class ValidateRolesDataActionTest(base.TestCase):
def setUp(self):
super(ValidateRolesDataActionTest, self).setUp()
self.container = 'overcloud'
self.ctx = mock.MagicMock()
def test_valid_roles(self):
current_roles = [SAMPLE_ROLE_OBJ]
requested_roles = [SAMPLE_ROLE_OBJ]
action = plan.ValidateRolesDataAction(requested_roles, current_roles)
result = action.run(self.ctx)
self.assertTrue(result.data)
def test_invalid_roles(self):
current_roles = [SAMPLE_ROLE_2_OBJ]
requested_roles = [SAMPLE_ROLE_OBJ, ]
action = plan.ValidateRolesDataAction(requested_roles, current_roles)
result = action.run(self.ctx)
self.assertTrue(result.error)
def test_validate_role_yaml_missing_name(self):
role = SAMPLE_ROLE_OBJ.copy()
del role['name']
current_roles = [SAMPLE_ROLE_OBJ]
requested_roles = [role, ]
action = plan.ValidateRolesDataAction(requested_roles, current_roles)
result = action.run(self.ctx)
self.assertTrue(result.error)
def test_validate_role_yaml_invalid_type(self):
role = SAMPLE_ROLE_OBJ.copy()
role['CountDefault'] = 'should not be a string'
current_roles = [SAMPLE_ROLE_OBJ]
requested_roles = [role, ]
action = plan.ValidateRolesDataAction(requested_roles, current_roles)
result = action.run(self.ctx)
self.assertTrue(result.error)
class UpdateRolesActionTest(base.TestCase):
def setUp(self):
super(UpdateRolesActionTest, self).setUp()
self.container = 'overcloud'
self.ctx = mock.MagicMock()
self.current_roles = [SAMPLE_ROLE_OBJ, SAMPLE_ROLE_2_OBJ]
def test_no_primary_roles(self):
updated_role = UPDATED_ROLE_OBJ.copy()
del updated_role['tags']
action = plan.UpdateRolesAction([updated_role],
self.current_roles,
True, self.container)
self.assertRaises(exception.RoleMetadataError, action.run, self.ctx)
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_update_some_roles(self, get_obj_client_mock):
# Setup
swift = mock.MagicMock()
get_obj_client_mock.return_value = swift
action = plan.UpdateRolesAction([UPDATED_ROLE_OBJ],
self.current_roles,
False, self.container)
result = action.run(self.ctx)
self.assertEqual(result.data,
{'roles': [SAMPLE_ROLE_2_OBJ, UPDATED_ROLE_OBJ]})
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_update_replace_roles(self, get_obj_client_mock):
# Setup
swift = mock.MagicMock()
get_obj_client_mock.return_value = swift
action = plan.UpdateRolesAction([UPDATED_ROLE_OBJ],
self.current_roles,
True, self.container)
result = action.run(self.ctx)
self.assertEqual(result.data, {'roles': [UPDATED_ROLE_OBJ, ]})
class RemoveNoopDeployStepActionTest(base.TestCase):
def setUp(self):
super(RemoveNoopDeployStepActionTest, self).setUp()
self.container = 'overcloud'
self.ctx = mock.MagicMock()
self.heat = mock.MagicMock()
self.swift = mock.MagicMock()
self.rr_before = {
'OS::TripleO::Foo': 'bar.yaml',
'OS::TripleO::DeploymentSteps': 'OS::Heat::None',
}
self.rr_after = {
'OS::TripleO::Foo': 'bar.yaml',
}
@mock.patch('tripleo_common.actions.plan.plan_utils')
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
@mock.patch(
'tripleo_common.actions.base.TripleOAction.get_orchestration_client')
def test_roles_gathered(self, mock_orch_client, mock_obj_client,
mock_plan_utils):
mock_obj_client.return_value = self.swift
mock_orch_client.return_value = self.heat
mock_plan_utils.get_env.return_value = {
'name': self.container,
'resource_registry': self.rr_before,
}
mock_plan_utils.get_user_env.return_value = {
'resource_registry': self.rr_before,
}
action = plan.RemoveNoopDeployStepAction(self.container)
action.run(self.ctx)
mock_plan_utils.put_env.assert_called_with(
self.swift,
{
'name': self.container,
'resource_registry': self.rr_after,
})
mock_plan_utils.put_user_env.assert_called_with(
self.swift, self.container, {'resource_registry': self.rr_after})

View File

@ -1,306 +0,0 @@
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from unittest import mock
import yaml
from mistral_lib import actions
from swiftclient import exceptions as swiftexceptions
from tripleo_common.actions import scale
from tripleo_common import constants
from tripleo_common.tests import base
def mock_stack():
stack = mock.Mock()
stack.name = 'My Stack'
stack.parameters = {'ComputeCount': '2'}
stack.to_dict.return_value = {
'uuid': 5,
'name': 'My Stack',
'parameters': stack.parameters,
}
return stack
class ScaleDownActionTest(base.TestCase):
def setUp(self):
super(ScaleDownActionTest, self).setUp()
self.image = collections.namedtuple('image', ['id'])
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('heatclient.common.template_utils.'
'process_multiple_environments_and_files')
@mock.patch('heatclient.common.template_utils.get_template_contents')
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_run(self, mock_get_object_client,
mock_get_template_contents, mock_env_files,
mock_get_heat_client):
mock_env_files.return_value = ({}, {})
heatclient = mock.MagicMock()
heatclient.resources.list.return_value = [
mock.MagicMock(
links=[{'rel': 'stack',
'href': 'http://192.0.2.1:8004/v1/'
'a959ac7d6a4a475daf2428df315c41ef/'
'stacks/overcloud/123'}],
logical_resource_id='logical_id',
physical_resource_id='resource_id',
resource_type='OS::Heat::ResourceGroup',
resource_name='Compute'
),
mock.MagicMock(
links=[{'rel': 'stack',
'href': 'http://192.0.2.1:8004/v1/'
'a959ac7d6a4a475daf2428df315c41ef/'
'stacks/overcloud/124'}],
logical_resource_id='node0',
physical_resource_id='124',
resource_type='OS::TripleO::Compute',
parent_resource='Compute',
resource_name='node0',
)
]
heatclient.stacks.get.return_value = mock_stack()
heatclient.stacks.validate.return_value = {}
mock_get_heat_client.return_value = heatclient
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
'name': 'overcloud',
'temp_environment': 'temp_environment',
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}]
}, default_flow_style=False)
mock_roles = yaml.safe_dump([{"name": "foo"}])
mock_network = yaml.safe_dump([{'enabled': False}])
mock_exclude = yaml.safe_dump({"name": "foo"})
swift.get_object.side_effect = (
({}, mock_env),
({}, mock_env),
({}, mock_roles),
({}, mock_network),
({}, mock_exclude),
({}, mock_env),
({}, mock_env),
({}, mock_env),
({}, mock_roles),
({}, mock_network),
({}, mock_exclude),
({}, mock_env),
({}, mock_env),
swiftexceptions.ClientException('atest2')
)
def return_container_files(*args):
return ('headers', [{'name': 'foo.role.j2.yaml'}])
swift.get_container = mock.MagicMock(
side_effect=return_container_files)
mock_get_object_client.return_value = swift
env = {
'resource_registry': {
'resources': {'*': {'*': {'UpdateDeployment': {'hooks': []}}}}
}
}
mock_get_template_contents.return_value = ({}, {
'heat_template_version': '2016-04-30'
})
# Test
action = scale.ScaleDownAction(
constants.STACK_TIMEOUT_DEFAULT, ['124'], 'stack')
result = action.run(mock_ctx)
heatclient.stacks.validate.assert_called_once_with(
environment=env,
files={},
show_nested=True,
template={'heat_template_version': '2016-04-30'}
)
clear_list = list(['ComputeCount', 'ComputeRemovalPolicies',
'ComputeRemovalPoliciesMode'])
_, kwargs = heatclient.stacks.update.call_args
self.assertEqual(set(kwargs['clear_parameters']), set(clear_list))
self.assertEqual(kwargs['environment'], env)
self.assertEqual(kwargs['existing'], True)
self.assertEqual(kwargs['files'], {})
self.assertEqual(None, result)
@mock.patch('tripleo_common.actions.scale.ScaleDownAction.'
'_get_removal_params_from_heat')
@mock.patch('tripleo_common.actions.scale.ScaleDownAction._update_stack')
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
def test_run_bad_update(self, mock_get_heat_client,
mock__update_stack,
mock__get_removal_params_from_heat):
mock__update_stack.return_value = actions.Result(error='Update error')
mock__get_removal_params_from_heat.return_value = {}
heatclient = mock.MagicMock()
heatclient.resources.list.return_value = [
mock.MagicMock(
links=[{'rel': 'stack',
'href': 'http://192.0.2.1:8004/v1/'
'a959ac7d6a4a475daf2428df315c41ef/'
'stacks/overcloud/123'}],
logical_resource_id='logical_id',
physical_resource_id='resource_id',
resource_type='OS::Heat::ResourceGroup',
resource_name='Compute'
),
mock.MagicMock(
links=[{'rel': 'stack',
'href': 'http://192.0.2.1:8004/v1/'
'a959ac7d6a4a475daf2428df315c41ef/'
'stacks/overcloud/124'}],
logical_resource_id='node0',
physical_resource_id='123',
resource_type='OS::TripleO::Compute',
parent_resource='Compute',
resource_name='node0',
)
]
heatclient.stacks.get.return_value = mock_stack()
heatclient.stacks.validate.return_value = {}
mock_get_heat_client.return_value = heatclient
mock_ctx = mock.MagicMock()
# Test
action = scale.ScaleDownAction(
constants.STACK_TIMEOUT_DEFAULT, ['resource_id'], 'stack')
result = action.run(mock_ctx)
self.assertEqual(actions.Result(error='Update error'), result)
@mock.patch('tripleo_common.actions.base.TripleOAction.'
'get_orchestration_client')
@mock.patch('heatclient.common.template_utils.'
'process_multiple_environments_and_files')
@mock.patch('heatclient.common.template_utils.get_template_contents')
@mock.patch('tripleo_common.actions.base.TripleOAction.get_object_client')
def test_run_with_hostmatch(self, mock_get_object_client,
mock_get_template_contents, mock_env_files,
mock_get_heat_client):
mock_env_files.return_value = ({}, {})
heatclient = mock.MagicMock()
heatclient.resources.list.return_value = [
mock.MagicMock(
links=[{'rel': 'stack',
'href': 'http://192.0.2.1:8004/v1/'
'a959ac7d6a4a475daf2428df315c41ef/'
'stacks/overcloud/123'}],
logical_resource_id='logical_id',
physical_resource_id='resource_id',
resource_type='OS::Heat::ResourceGroup',
resource_name='Compute'
),
mock.MagicMock(
links=[{'rel': 'stack',
'href': 'http://192.0.2.1:8004/v1/'
'a959ac7d6a4a475daf2428df315c41ef/'
'stacks/overcloud/124'}],
logical_resource_id='node0',
physical_resource_id='124',
resource_type='OS::TripleO::ComputeServer',
parent_resource='Compute',
resource_name='node0',
)
]
heatclient.resources.get.return_value = mock.MagicMock(
attributes={'name': 'node0'})
heatclient.stacks.get.return_value = mock_stack()
heatclient.stacks.validate.return_value = {}
mock_get_heat_client.return_value = heatclient
mock_ctx = mock.MagicMock()
swift = mock.MagicMock(url="http://test.com")
mock_env = yaml.safe_dump({
'name': 'overcloud',
'temp_environment': 'temp_environment',
'template': 'template',
'environments': [{u'path': u'environments/test.yaml'}]
}, default_flow_style=False)
mock_roles = yaml.safe_dump([{"name": "foo"}])
mock_network = yaml.safe_dump([{'enabled': False}])
mock_exclude = yaml.safe_dump({"name": "foo"})
swift.get_object.side_effect = (
({}, mock_env),
({}, mock_env),
({}, mock_roles),
({}, mock_network),
({}, mock_exclude),
({}, mock_env),
({}, mock_env),
({}, mock_env),
({}, mock_roles),
({}, mock_network),
({}, mock_exclude),
({}, mock_env),
({}, mock_env),
swiftexceptions.ClientException('atest2')
)
def return_container_files(*args):
return ('headers', [{'name': 'foo.role.j2.yaml'}])
swift.get_container = mock.MagicMock(
side_effect=return_container_files)
mock_get_object_client.return_value = swift
env = {
'resource_registry': {
'resources': {'*': {'*': {'UpdateDeployment': {'hooks': []}}}}
}
}
mock_get_template_contents.return_value = ({}, {
'heat_template_version': '2016-04-30'
})
# Test
action = scale.ScaleDownAction(
constants.STACK_TIMEOUT_DEFAULT, ['node0'], 'stack')
result = action.run(mock_ctx)
heatclient.stacks.validate.assert_called_once_with(
environment=env,
files={},
show_nested=True,
template={'heat_template_version': '2016-04-30'}
)
clear_list = list(['ComputeCount', 'ComputeRemovalPolicies',
'ComputeRemovalPoliciesMode'])
_, kwargs = heatclient.stacks.update.call_args
self.assertEqual(set(kwargs['clear_parameters']), set(clear_list))
self.assertEqual(kwargs['environment'], env)
self.assertEqual(kwargs['existing'], True)
self.assertEqual(kwargs['files'], {})
self.assertEqual(None, result)

View File

@ -1,105 +0,0 @@
# Copyright 2016 Red Hat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
from unittest import mock
from tripleo_common.actions import validations
from tripleo_common.tests import base
class GetPubkeyActionTest(base.TestCase):
@mock.patch(
'tripleo_common.actions.base.TripleOAction.get_workflow_client')
def test_run_existing_pubkey(self, get_workflow_client_mock):
mock_ctx = mock.MagicMock()
mistral = mock.MagicMock()
get_workflow_client_mock.return_value = mistral
environment = collections.namedtuple('environment', ['variables'])
mistral.environments.get.return_value = environment(variables={
'public_key': 'existing_pubkey'
})
action = validations.GetPubkeyAction()
self.assertEqual('existing_pubkey', action.run(mock_ctx))
@mock.patch(
'tripleo_common.actions.base.TripleOAction.get_workflow_client')
@mock.patch('tripleo_common.utils.passwords.create_ssh_keypair')
def test_run_no_pubkey(self, mock_create_keypair,
get_workflow_client_mock):
mock_ctx = mock.MagicMock()
mistral = mock.MagicMock()
get_workflow_client_mock.return_value = mistral
mistral.environments.get.side_effect = 'nope, sorry'
mock_create_keypair.return_value = {
'public_key': 'public_key',
'private_key': 'private_key',
}
action = validations.GetPubkeyAction()
self.assertEqual('public_key', action.run(mock_ctx))
class Enabled(base.TestCase):
@mock.patch(
'tripleo_common.actions.base.TripleOAction.get_workflow_client')
def test_validations_enabled(self, get_workflow_client_mock):
mock_ctx = mock.MagicMock()
mistral = mock.MagicMock()
get_workflow_client_mock.return_value = mistral
mistral.environments.get.return_value = {}
action = validations.Enabled()
result = action._validations_enabled(mock_ctx)
self.assertEqual(result, True)
@mock.patch(
'tripleo_common.actions.base.TripleOAction.get_workflow_client')
def test_validations_disabled(self, get_workflow_client_mock):
mock_ctx = mock.MagicMock()
mistral = mock.MagicMock()
get_workflow_client_mock.return_value = mistral
mistral.environments.get.side_effect = Exception()
action = validations.Enabled()
result = action._validations_enabled(mock_ctx)
self.assertEqual(result, False)
@mock.patch(
'tripleo_common.actions.validations.Enabled._validations_enabled')
@mock.patch(
'tripleo_common.actions.base.TripleOAction.get_workflow_client')
def test_success_with_validations_enabled(self, get_workflow_client_mock,
validations_enabled_mock):
mock_ctx = mock.MagicMock()
validations_enabled_mock.return_value = True
action = validations.Enabled()
action_result = action.run(mock_ctx)
self.assertIsNone(action_result.error)
self.assertEqual('Validations are enabled',
action_result.data['stdout'])
@mock.patch(
'tripleo_common.actions.validations.Enabled._validations_enabled')
@mock.patch(
'tripleo_common.actions.base.TripleOAction.get_workflow_client')
def test_success_with_validations_disabled(self, get_workflow_client_mock,
validations_enabled_mock):
mock_ctx = mock.MagicMock()
validations_enabled_mock.return_value = False
action = validations.Enabled()
action_result = action.run(mock_ctx)
self.assertIsNone(action_result.data)
self.assertEqual('Validations are disabled',
action_result.error['stdout'])

View File

@ -1,65 +0,0 @@
# Copyright 2016 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from unittest import mock
from tripleo_common.tests import base
from tripleo_common.tests import fake_nova
# See the README file in the fake_nova module directory for details on why
# this is being done.
if 'nova' not in sys.modules:
sys.modules['nova'] = fake_nova
else:
raise RuntimeError('nova module already found in sys.modules. The '
'fake_nova injection should be removed.')
from tripleo_common.filters import capabilities_filter # noqa
class TestCapabilitiesFilter(base.TestCase):
def test_no_requested_node(self):
instance = capabilities_filter.TripleOCapabilitiesFilter()
host_state = mock.Mock()
host_state.stats.get.return_value = ''
spec_obj = mock.Mock()
spec_obj.scheduler_hints.get.return_value = []
self.assertTrue(instance.host_passes(host_state, spec_obj))
def test_requested_node_matches(self):
def mock_host_get(key):
if key == 'node':
return 'compute-0'
self.fail('Unexpected key requested by filter')
def mock_spec_get(key):
if key == 'capabilities:node':
return ['compute-0']
self.fail('Unexpected key requested by filter')
instance = capabilities_filter.TripleOCapabilitiesFilter()
host_state = mock.Mock()
host_state.stats.get.side_effect = mock_host_get
spec_obj = mock.Mock()
spec_obj.scheduler_hints.get.side_effect = mock_spec_get
self.assertTrue(instance.host_passes(host_state, spec_obj))
def test_requested_node_no_match(self):
instance = capabilities_filter.TripleOCapabilitiesFilter()
host_state = mock.Mock()
host_state.stats.get.return_value = 'controller-0'
spec_obj = mock.Mock()
spec_obj.scheduler_hints.get.return_value = ['compute-0']
self.assertFalse(instance.host_passes(host_state, spec_obj))

View File

@ -1,43 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
from tripleo_common.tests import base
WORKBOOK_DIRECTORY = os.path.join(os.path.dirname(__file__),
'..', '..', 'workbooks')
class TestWorkflowStructure(base.TestCase):
def setUp(self):
self.workbooks = os.listdir(WORKBOOK_DIRECTORY)
super(TestWorkflowStructure, self).setUp()
def test_tags_are_set(self):
for workbook in self.workbooks:
if workbook.startswith('.'):
continue
full_path = os.path.join(WORKBOOK_DIRECTORY, workbook)
with open(full_path) as f:
wb_yaml = yaml.load(f)
message = ("tripleo-common-managed tag is missing from a "
"workflow in {}").format(full_path)
for wf_name, wf_spec in wb_yaml['workflows'].items():
self.assertIn('tags', wf_spec, message)
self.assertIn('tripleo-common-managed', wf_spec['tags'],
message)

File diff suppressed because it is too large Load Diff

View File

@ -1,512 +0,0 @@
---
version: '2.0'
name: tripleo.derive_params.v1
description: TripleO Workflows to derive deployment parameters from the introspected data
workflows:
derive_parameters:
description: The main workflow for deriving parameters from the introspected data
input:
- plan: overcloud
- queue_name: tripleo
- persist_params_in_plan: true
- user_inputs: {}
tags:
- tripleo-common-managed
tasks:
get_flattened_parameters:
action: tripleo.parameters.get_flatten container=<% $.plan %>
publish:
environment_parameters: <% task().result.environment_parameters %>
heat_resource_tree: <% task().result.heat_resource_tree %>
on-success:
- get_roles: <% $.environment_parameters and $.heat_resource_tree %>
- set_status_failed_get_flattened_parameters: <% (not $.environment_parameters) or (not $.heat_resource_tree) %>
on-error: set_status_failed_get_flattened_parameters
get_roles:
action: tripleo.role.list container=<% $.plan %>
publish:
role_name_list: <% task().result %>
on-success:
- get_valid_roles: <% $.role_name_list %>
- set_status_failed_get_roles: <% not $.role_name_list %>
on-error: set_status_failed_on_error_get_roles
# Obtain only the roles which has count > 0, by checking <RoleName>Count parameter, like ComputeCount
get_valid_roles:
publish:
valid_role_name_list: <% let(hr => $.heat_resource_tree.parameters) -> $.role_name_list.where(int($hr.get(concat($, 'Count'), {}).get('default', 0)) > 0) %>
on-success:
- for_each_role: <% $.valid_role_name_list %>
- set_status_failed_get_valid_roles: <% not $.valid_role_name_list %>
# Execute the basic preparation workflow for each role to get introspection data
for_each_role:
with-items: role_name in <% $.valid_role_name_list %>
concurrency: 1
workflow: _derive_parameters_per_role
input:
plan: <% $.plan %>
role_name: <% $.role_name %>
environment_parameters: <% $.environment_parameters %>
heat_resource_tree: <% $.heat_resource_tree %>
user_inputs: <% $.user_inputs %>
publish:
# Gets all the roles derived parameters as dictionary
result: <% task().result.select($.get('derived_parameters', {})).sum() %>
on-success:
- send_message: <% not $.persist_params_in_plan %>
- reset_derive_parameters_in_plan: <% $.persist_params_in_plan %>
on-error: set_status_failed_for_each_role
reset_derive_parameters_in_plan:
action: tripleo.parameters.reset
input:
container: <% $.plan %>
key: 'derived_parameters'
on-success:
# Add the derived parameters to the deployment plan only when $.result
# (the derived parameters) is non-empty. Otherwise, we're done.
- update_derive_parameters_in_plan: <% $.result %>
- send_message: <% not $.result %>
on-error: set_status_failed_reset_derive_parameters_in_plan
update_derive_parameters_in_plan:
action: tripleo.parameters.update
input:
container: <% $.plan %>
key: 'derived_parameters'
parameters: <% $.get('result', {}) %>
on-success: send_message
on-error: set_status_failed_update_derive_parameters_in_plan
set_status_failed_get_flattened_parameters:
on-success: send_message
publish:
status: FAILED
message: <% task(get_flattened_parameters).result %>
set_status_failed_get_roles:
on-success: send_message
publish:
status: FAILED
message: "Unable to determine the list of roles in the deployment plan"
set_status_failed_on_error_get_roles:
on-success: send_message
publish:
status: FAILED
message: <% task(get_roles).result %>
set_status_failed_get_valid_roles:
on-success: send_message
publish:
status: FAILED
message: 'Unable to determine the list of valid roles in the deployment plan.'
set_status_failed_for_each_role:
on-success: update_message_format
publish:
status: FAILED
# gets the status and message for all roles from task result.
message: <% task(for_each_role).result.select(dict('role_name' => $.role_name, 'status' => $.get('status', 'SUCCESS'), 'message' => $.get('message', ''))) %>
update_message_format:
on-success: send_message
publish:
# updates the message format(Role 'role name': message) for each roles which are failed and joins the message list as string with ', ' separator.
message: <% $.message.where($.get('status', 'SUCCESS') != 'SUCCESS').select(concat("Role '{}':".format($.role_name), " ", $.get('message', '(error unknown)'))).join(', ') %>
set_status_failed_reset_derive_parameters_in_plan:
on-success: send_message
publish:
status: FAILED
message: <% task(reset_derive_parameters_in_plan).result %>
set_status_failed_update_derive_parameters_in_plan:
on-success: send_message
publish:
status: FAILED
message: <% task(update_derive_parameters_in_plan).result %>
send_message:
workflow: tripleo.messaging.v1.send
input:
queue_name: <% $.queue_name %>
type: <% execution().name %>
status: <% $.get('status', 'SUCCESS') %>
execution: <% execution() %>
message: <% $.get('message', '') %>
plan_name: <% $.plan %>
payload:
result: <% $.get('result', '') %>
get_flattened_parameters:
description: Workflow to retreive a list of flattened parameters
input:
- plan: overcloud
- queue_name: tripleo
output:
flattened_parameters: <% $.flattened_parameters %>
tags:
- tripleo-common-managed
tasks:
get_flattened_parameters:
action: tripleo.parameters.get_flatten container=<% $.plan %>
on-success: send_message
publish:
status: SUCCESS
message: <% task().result %>
flattened_parameters: <% task().result %>
publish-on-error:
status: FAILED
message: <% task().result %>
send_message:
workflow: tripleo.messaging.v1.send
input:
queue_name: <% $.queue_name %>
type: <% execution().name %>
status: <% $.get('status', 'SUCCESS')
execution: <% execution() %>
messages: <% $.get('message', '') %>
payload:
flattened_parameters: <% $.get('flattened_parameters', []) %>
_derive_parameters_per_role:
description: >
Workflow which runs per role to get the introspection data on the first matching node assigned to role.
Once introspection data is fetched, this worklow will trigger the actual derive parameters workflow
input:
- plan
- role_name
- environment_parameters
- heat_resource_tree
- user_inputs
output:
derived_parameters: <% $.get('derived_parameters', {}) %>
# Need role_name in output parameter to display the status for all roles in main workflow when any role fails here.
role_name: <% $.role_name %>
tags:
- tripleo-common-managed
tasks:
get_role_info:
workflow: _get_role_info
input:
role_name: <% $.role_name %>
heat_resource_tree: <% $.heat_resource_tree %>
publish:
role_features: <% task().result.get('role_features', []) %>
role_services: <% task().result.get('role_services', []) %>
on-success:
# Continue only if there are features associated with this role. Otherwise, we're done.
- get_scheduler_hints: <% $.role_features %>
on-error: set_status_failed_get_role_info
# Find a node associated with this role. Look for nodes matching any scheduler hints
# associated with the role, and if there are no scheduler hints then locate nodes
# with a profile matching the role's flavor.
get_scheduler_hints:
publish:
scheduler_hints: <% let(param_name => concat($.role_name, 'SchedulerHints')) -> $.heat_resource_tree.parameters.get($param_name, {}).get('default', {}) %>
on-success:
- get_hint_regex: <% $.scheduler_hints %>
# If there are no scheduler hints then move on to use the flavor
- get_flavor_name: <% not $.scheduler_hints %>
get_hint_regex:
publish:
hint_regex: <% $.scheduler_hints.get('capabilities:node', '').replace('%index%', '(\d+)') %>
on-success:
- get_node_with_hint: <% $.hint_regex %>
# If there is no 'capabilities:node' hint then move on to use the flavor
- get_flavor_name: <% not $.hint_regex %>
get_node_with_hint:
workflow: tripleo.baremetal.v1.nodes_with_hint
input:
hint_regex: <% concat('^', $.hint_regex, '$') %>
publish:
role_node_uuid: <% task().result.matching_nodes.first('') %>
on-success:
- get_introspection_data: <% $.role_node_uuid %>
# If no nodes match the scheduler hint then move on to use the flavor
- get_flavor_name: <% not $.role_node_uuid %>
on-error: set_status_failed_on_error_get_node_with_hint
get_flavor_name:
publish:
flavor_name: <% let(param_name => concat('Overcloud', $.role_name, 'Flavor').replace('OvercloudControllerFlavor', 'OvercloudControlFlavor')) -> $.heat_resource_tree.parameters.get($param_name, {}).get('default', '') %>
on-success:
- get_profile_name: <% $.flavor_name %>
- set_status_failed_get_flavor_name: <% not $.flavor_name %>
get_profile_name:
action: tripleo.parameters.get_profile_of_flavor flavor_name=<% $.flavor_name %>
publish:
profile_name: <% task().result %>
on-success: get_profile_node
on-error: set_status_failed_get_profile_name
get_profile_node:
workflow: tripleo.baremetal.v1.nodes_with_profile
input:
profile: <% $.profile_name %>
publish:
role_node_uuid: <% task().result.matching_nodes.first('') %>
on-success:
- get_introspection_data: <% $.role_node_uuid %>
- set_status_failed_no_matching_node_get_profile_node: <% not $.role_node_uuid %>
on-error: set_status_failed_on_error_get_profile_node
get_introspection_data:
action: baremetal_introspection.get_data uuid=<% $.role_node_uuid %>
publish:
hw_data: <% task().result %>
# Establish an empty dictionary of derived_parameters prior to
# invoking the individual "feature" algorithms
derived_parameters: <% dict() %>
on-success: handle_dpdk_feature
on-error: set_status_failed_get_introspection_data
handle_dpdk_feature:
on-success:
- get_dpdk_derive_params: <% $.role_features.contains('DPDK') %>
- handle_sriov_feature: <% not $.role_features.contains('DPDK') %>
get_dpdk_derive_params:
workflow: tripleo.derive_params_formulas.v1.dpdk_derive_params
input:
plan: <% $.plan %>
role_name: <% $.role_name %>
heat_resource_tree: <% $.heat_resource_tree %>
hw_data: <% $.hw_data %>
user_inputs: <% $.user_inputs %>
publish:
derived_parameters: <% task().result.get('derived_parameters', {}) %>
on-success: handle_sriov_feature
on-error: set_status_failed_get_dpdk_derive_params
handle_sriov_feature:
on-success:
- get_sriov_derive_params: <% $.role_features.contains('SRIOV') %>
- handle_host_feature: <% not $.role_features.contains('SRIOV') %>
get_sriov_derive_params:
workflow: tripleo.derive_params_formulas.v1.sriov_derive_params
input:
role_name: <% $.role_name %>
hw_data: <% $.hw_data %>
derived_parameters: <% $.derived_parameters %>
publish:
derived_parameters: <% task().result.get('derived_parameters', {}) %>
on-success: handle_host_feature
on-error: set_status_failed_get_sriov_derive_params
handle_host_feature:
on-success:
- get_host_derive_params: <% $.role_features.contains('HOST') %>
- handle_hci_feature: <% not $.role_features.contains('HOST') %>
get_host_derive_params:
workflow: tripleo.derive_params_formulas.v1.host_derive_params
input:
role_name: <% $.role_name %>
hw_data: <% $.hw_data %>
user_inputs: <% $.user_inputs %>
derived_parameters: <% $.derived_parameters %>
publish:
derived_parameters: <% task().result.get('derived_parameters', {}) %>
on-success: handle_hci_feature
on-error: set_status_failed_get_host_derive_params
handle_hci_feature:
on-success:
- get_hci_derive_params: <% $.role_features.contains('HCI') %>
get_hci_derive_params:
workflow: tripleo.derive_params_formulas.v1.hci_derive_params
input:
role_name: <% $.role_name %>
environment_parameters: <% $.environment_parameters %>
heat_resource_tree: <% $.heat_resource_tree %>
introspection_data: <% $.hw_data %>
user_inputs: <% $.user_inputs %>
derived_parameters: <% $.derived_parameters %>
publish:
derived_parameters: <% task().result.get('derived_parameters', {}) %>
on-error: set_status_failed_get_hci_derive_params
# Done (no more derived parameter features)
set_status_failed_get_role_info:
publish:
role_name: <% $.role_name %>
status: FAILED
message: <% task(get_role_info).result.get('message', '') %>
on-success: fail
set_status_failed_get_flavor_name:
publish:
role_name: <% $.role_name %>
status: FAILED
message: <% "Unable to determine flavor for role '{0}'".format($.role_name) %>
on-success: fail
set_status_failed_get_profile_name:
publish:
role_name: <% $.role_name %>
status: FAILED
message: <% task(get_profile_name).result %>
on-success: fail
set_status_failed_no_matching_node_get_profile_node:
publish:
role_name: <% $.role_name %>
status: FAILED
message: <% "Unable to determine matching node for profile '{0}'".format($.profile_name) %>
on-success: fail
set_status_failed_on_error_get_profile_node:
publish:
role_name: <% $.role_name %>
status: FAILED
message: <% task(get_profile_node).result %>
on-success: fail
set_status_failed_on_error_get_node_with_hint:
publish:
role_name: <% $.role_name %>
status: FAILED
message: <% task(get_node_with_hint).result %>
on-success: fail
set_status_failed_get_introspection_data:
publish:
role_name: <% $.role_name %>
status: FAILED
message: <% task(get_introspection_data).result %>
on-success: fail
set_status_failed_get_dpdk_derive_params:
publish:
role_name: <% $.role_name %>
status: FAILED
message: <% task(get_dpdk_derive_params).result.message %>
on-success: fail
set_status_failed_get_sriov_derive_params:
publish:
role_name: <% $.role_name %>
status: FAILED
message: <% task(get_sriov_derive_params).result.message %>
on-success: fail
set_status_failed_get_host_derive_params:
publish:
role_name: <% $.role_name %>
status: FAILED
message: <% task(get_host_derive_params).result.message %>
on-success: fail
set_status_failed_get_hci_derive_params:
publish:
role_name: <% $.role_name %>
status: FAILED
message: <% task(get_hci_derive_params).result.message %>
on-success: fail
_get_role_info:
description: >
Workflow that determines the list of derived parameter features (DPDK,
HCI, etc.) for a role based on the services assigned to the role.
input:
- role_name
- heat_resource_tree
tags:
- tripleo-common-managed
tasks:
get_resource_chains:
publish:
resource_chains: <% $.heat_resource_tree.resources.values().where($.get('type', '') = 'OS::Heat::ResourceChain') %>
on-success:
- get_role_chain: <% $.resource_chains %>
- set_status_failed_get_resource_chains: <% not $.resource_chains %>
get_role_chain:
publish:
role_chain: <% let(chain_name => concat($.role_name, 'ServiceChain'))-> $.heat_resource_tree.resources.values().where($.name = $chain_name).first({}) %>
on-success:
- get_service_chain: <% $.role_chain %>
- set_status_failed_get_role_chain: <% not $.role_chain %>
get_service_chain:
publish:
service_chain: <% let(resources => $.role_chain.resources)-> $.resource_chains.where($resources.contains($.id)).first('') %>
on-success:
- get_role_services: <% $.service_chain %>
- set_status_failed_get_service_chain: <% not $.service_chain %>
get_role_services:
publish:
role_services: <% let(resources => $.heat_resource_tree.resources)-> $.service_chain.resources.select($resources.get($)) %>
on-success:
- check_features: <% $.role_services %>
- set_status_failed_get_role_services: <% not $.role_services %>
check_features:
on-success: build_feature_dict
publish:
# The role supports the DPDK feature if the NeutronDatapathType parameter is present
dpdk: <% let(resources => $.heat_resource_tree.resources) -> $.role_services.any($.get('parameters', []).contains('NeutronDatapathType') or $.get('resources', []).select($resources.get($)).any($.get('parameters', []).contains('NeutronDatapathType'))) %>
# The role supports the SRIOV feature if it includes NeutronSriovAgent services.
sriov: <% $.role_services.any($.get('type', '').endsWith('::NeutronSriovAgent')) %>
# The role supports the HCI feature if it includes both NovaCompute and CephOSD services.
hci: <% $.role_services.any($.get('type', '').endsWith('::NovaCompute')) and $.role_services.any($.get('type', '').endsWith('::CephOSD')) %>
build_feature_dict:
on-success: filter_features
publish:
feature_dict: <% dict(DPDK => $.dpdk, SRIOV => $.sriov, HOST => ($.dpdk or $.sriov), HCI => $.hci) %>
filter_features:
publish:
# The list of features that are enabled (i.e. are true in the feature_dict).
role_features: <% let(feature_dict => $.feature_dict)-> $feature_dict.keys().where($feature_dict[$]) %>
set_status_failed_get_resource_chains:
publish:
message: <% 'Unable to locate any resource chains in the heat resource tree' %>
on-success: fail
set_status_failed_get_role_chain:
publish:
message: <% "Unable to determine the service chain resource for role '{0}'".format($.role_name) %>
on-success: fail
set_status_failed_get_service_chain:
publish:
message: <% "Unable to determine the service chain for role '{0}'".format($.role_name) %>
on-success: fail
set_status_failed_get_role_services:
publish:
message: <% "Unable to determine list of services for role '{0}'".format($.role_name) %>
on-success: fail

View File

@ -1,836 +0,0 @@
---
version: '2.0'
name: tripleo.derive_params_formulas.v1
description: TripleO Workflows to derive deployment parameters from the introspected data
workflows:
dpdk_derive_params:
description: >
Workflow to derive parameters for DPDK service.
input:
- plan
- role_name
- heat_resource_tree
- hw_data # introspection data
- user_inputs
- derived_parameters: {}
output:
derived_parameters: <% $.derived_parameters.mergeWith($.get('dpdk_parameters', {})) %>
tags:
- tripleo-common-managed
tasks:
get_network_config:
action: tripleo.parameters.get_network_config
input:
container: <% $.plan %>
role_name: <% $.role_name %>
publish:
network_configs: <% task().result.get('network_config', []) %>
on-success: get_dpdk_nics_numa_info
on-error: set_status_failed_get_network_config
get_dpdk_nics_numa_info:
action: tripleo.derive_params.get_dpdk_nics_numa_info
input:
network_configs: <% $.network_configs %>
inspect_data: <% $.hw_data %>
publish:
dpdk_nics_numa_info: <% task().result %>
on-success:
# TODO: Need to remove condtions here
# adding condition and throw error in action for empty check
- get_dpdk_nics_numa_nodes: <% $.dpdk_nics_numa_info %>
- set_status_failed_get_dpdk_nics_numa_info: <% not $.dpdk_nics_numa_info %>
on-error: set_status_failed_on_error_get_dpdk_nics_numa_info
get_dpdk_nics_numa_nodes:
publish:
dpdk_nics_numa_nodes: <% $.dpdk_nics_numa_info.groupBy($.numa_node).select($[0]).orderBy($) %>
on-success:
- get_numa_nodes: <% $.dpdk_nics_numa_nodes %>
- set_status_failed_get_dpdk_nics_numa_nodes: <% not $.dpdk_nics_numa_nodes %>
get_numa_nodes:
publish:
numa_nodes: <% $.hw_data.numa_topology.ram.select($.numa_node).orderBy($) %>
on-success:
- get_num_phy_cores_per_numa_for_pmd: <% $.numa_nodes %>
- set_status_failed_get_numa_nodes: <% not $.numa_nodes %>
get_num_phy_cores_per_numa_for_pmd:
publish:
num_phy_cores_per_numa_node_for_pmd: <% $.user_inputs.get('num_phy_cores_per_numa_node_for_pmd', 0) %>
on-success:
- get_num_cores_per_numa_nodes: <% isInteger($.num_phy_cores_per_numa_node_for_pmd) and $.num_phy_cores_per_numa_node_for_pmd > 0 %>
- set_status_failed_get_num_phy_cores_per_numa_for_pmd_invalid: <% not isInteger($.num_phy_cores_per_numa_node_for_pmd) %>
- set_status_failed_get_num_phy_cores_per_numa_for_pmd_not_provided: <% $.num_phy_cores_per_numa_node_for_pmd = 0 %>
# For NUMA node with DPDK nic, number of cores should be used from user input
# For NUMA node without DPDK nic, number of cores should be 1
get_num_cores_per_numa_nodes:
publish:
num_cores_per_numa_nodes: <% let(dpdk_nics_nodes => $.dpdk_nics_numa_nodes, cores => $.num_phy_cores_per_numa_node_for_pmd) -> $.numa_nodes.select(switch($ in $dpdk_nics_nodes => $cores, not $ in $dpdk_nics_nodes => 1)) %>
on-success: get_pmd_cpus
get_pmd_cpus:
action: tripleo.derive_params.get_dpdk_core_list
input:
inspect_data: <% $.hw_data %>
numa_nodes_cores_count: <% $.num_cores_per_numa_nodes %>
publish:
pmd_cpus: <% task().result %>
on-success:
- get_pmd_cpus_range_list: <% $.pmd_cpus %>
- set_status_failed_get_pmd_cpus: <% not $.pmd_cpus %>
on-error: set_status_failed_on_error_get_pmd_cpus
get_pmd_cpus_range_list:
action: tripleo.derive_params.convert_number_to_range_list
input:
num_list: <% $.pmd_cpus %>
publish:
pmd_cpus: <% task().result %>
on-success: get_host_cpus
on-error: set_status_failed_get_pmd_cpus_range_list
get_host_cpus:
workflow: tripleo.derive_params_formulas.v1.get_host_cpus
input:
role_name: <% $.role_name %>
hw_data: <% $.hw_data %>
publish:
host_cpus: <% task().result.get('host_cpus', '') %>
on-success: get_sock_mem
on-error: set_status_failed_get_host_cpus
get_sock_mem:
action: tripleo.derive_params.get_dpdk_socket_memory
input:
dpdk_nics_numa_info: <% $.dpdk_nics_numa_info %>
numa_nodes: <% $.numa_nodes %>
overhead: <% $.user_inputs.get('overhead', 800) %>
packet_size_in_buffer: <% 4096*64 %>
publish:
sock_mem: <% task().result %>
on-success:
- get_neutron_bridge_mappings: <% $.sock_mem %>
- set_status_failed_get_sock_mem: <% not $.sock_mem %>
on-error: set_status_failed_on_error_get_sock_mem
get_neutron_bridge_mappings:
publish:
neutron_bridge_mappings: <% $.heat_resource_tree.parameters.get('NeutronBridgeMappings', {}).get('default', '') %>
on-success:
- get_phy_nw_bridge_mappings: <% $.neutron_bridge_mappings %>
- get_neutron_network_type: <% not $.neutron_bridge_mappings %>
# Gets the physical network and ovs bridge mappings
get_phy_nw_bridge_mappings:
publish:
phy_nw_bridge_mappings: <% $.neutron_bridge_mappings.split(',').select(let(mapping => $.split(':')) -> dict($mapping[0] => $mapping[1])).sum() %>
on-success: get_bridge_numa_nodes_mappings
# Gets the ovs bridge and NUMA nodes mappings
get_bridge_numa_nodes_mappings:
publish:
bridge_numa_nodes_mappings: <% $.dpdk_nics_numa_info.groupBy($.bridge_name).select(dict($[0]=>$[1].select($.numa_node).distinct())).sum() %>
on-success: get_phy_nw_numa_nodes_mappings
# Gets the physical network and NUMA nodes mappings
get_phy_nw_numa_nodes_mappings:
publish:
phy_nw_numa_nodes_mappings: <% let(nw_bridge_mappings => $.phy_nw_bridge_mappings) -> $.bridge_numa_nodes_mappings.items().select(let(br => $[0], nodes => $[1]) -> $nw_bridge_mappings.items().where($[1]=$br).select(dict($[0] => $nodes)).sum()).sum() %>
on-success: get_neutron_network_type
get_neutron_network_type:
publish:
neutron_network_type: <% $.heat_resource_tree.parameters.get('NeutronNetworkType', {}).get('default', '') %>
on-success:
- get_tunnel_numa_nodes_mappings: <% 'vxlan' in $.neutron_network_type %>
- get_dpdk_parameters: <% not 'vxlan' in $.neutron_network_type %>
# Gets the list of NUMA nodes associated to all tunneled networks
# OVS-DPDK on VxLAN tunnel requires Tenant Network IP to be applied on the OVS User Bridge itself.
# With this assumption, if the IP is set on the OVS User Bridge, then OVS-DPDK is used for VxLAN tunnels also.
# Here dpdk_nics_numa_info will have the OVS User Bridges with DPDK ports only.
get_tunnel_numa_nodes_mappings:
publish:
tunnel_numa_nodes_mappings: <% $.dpdk_nics_numa_info.where($.addresses.any($.ip_netmask)).select($.numa_node).distinct() %>
on-success: get_dpdk_parameters
get_dpdk_parameters:
publish:
dpdk_parameters: <% dict(concat($.role_name, 'Parameters') => dict('OvsPmdCoreList' => $.get('pmd_cpus', ''), 'OvsDpdkCoreList' => $.get('host_cpus', ''), 'OvsDpdkSocketMemory' => $.get('sock_mem', ''))) %>
on-success:
- add_phy_nw_numa_nodes_mappings: <% $.get('phy_nw_numa_nodes_mappings', {}) %>
- add_tunnel_numa_nodes_mappings: <% $.get('tunnel_numa_nodes_mappings', []) %>
add_phy_nw_numa_nodes_mappings:
publish:
dpdk_parameters: <% $.dpdk_parameters.mergeWith(dict(concat($.role_name, 'Parameters') => dict('NeutronPhysnetNUMANodesMapping' => $.get('phy_nw_numa_nodes_mappings', {})))) %>
add_tunnel_numa_nodes_mappings:
publish:
dpdk_parameters: <% $.dpdk_parameters.mergeWith(dict(concat($.role_name, 'Parameters') => dict('NeutronTunnelNUMANodes' => $.get('tunnel_numa_nodes_mappings', [])))) %>
set_status_failed_get_network_config:
publish:
status: FAILED
message: <% task(get_network_config).result %>
on-success: fail
set_status_failed_get_dpdk_nics_numa_info:
publish:
status: FAILED
message: "Unable to determine DPDK NIC's NUMA information"
on-success: fail
set_status_failed_on_error_get_dpdk_nics_numa_info:
publish:
status: FAILED
message: <% task(get_dpdk_nics_numa_info).result %>
on-success: fail
set_status_failed_get_dpdk_nics_numa_nodes:
publish:
status: FAILED
message: "Unable to determine DPDK NIC's numa nodes"
on-success: fail
set_status_failed_get_numa_nodes:
publish:
status: FAILED
message: 'Unable to determine available NUMA nodes'
on-success: fail
set_status_failed_get_num_phy_cores_per_numa_for_pmd_invalid:
publish:
status: FAILED
message: <% "num_phy_cores_per_numa_node_for_pmd user input '{0}' is invalid".format($.num_phy_cores_per_numa_node_for_pmd) %>
on-success: fail
set_status_failed_get_num_phy_cores_per_numa_for_pmd_not_provided:
publish:
status: FAILED
message: 'num_phy_cores_per_numa_node_for_pmd user input is not provided'
on-success: fail
set_status_failed_get_pmd_cpus:
publish:
status: FAILED
message: 'Unable to determine OvsPmdCoreList parameter'
on-success: fail
set_status_failed_on_error_get_pmd_cpus:
publish:
status: FAILED
message: <% task(get_pmd_cpus).result %>
on-success: fail
set_status_failed_get_pmd_cpus_range_list:
publish:
status: FAILED
message: <% task(get_pmd_cpus_range_list).result %>
on-success: fail
set_status_failed_get_host_cpus:
publish:
status: FAILED
message: <% task(get_host_cpus).result.get('message', '') %>
on-success: fail
set_status_failed_get_sock_mem:
publish:
status: FAILED
message: 'Unable to determine OvsDpdkSocketMemory parameter'
on-success: fail
set_status_failed_on_error_get_sock_mem:
publish:
status: FAILED
message: <% task(get_sock_mem).result %>
on-success: fail
sriov_derive_params:
description: >
This workflow derives parameters for the SRIOV feature.
input:
- role_name
- hw_data # introspection data
- derived_parameters: {}
output:
derived_parameters: <% $.derived_parameters.mergeWith($.get('sriov_parameters', {})) %>
tags:
- tripleo-common-managed
tasks:
get_host_cpus:
workflow: tripleo.derive_params_formulas.v1.get_host_cpus
input:
role_name: <% $.role_name %>
hw_data: <% $.hw_data %>
publish:
host_cpus: <% task().result.get('host_cpus', '') %>
on-success: get_sriov_parameters
on-error: set_status_failed_get_host_cpus
get_sriov_parameters:
publish:
# SriovHostCpusList parameter is added temporarily and it's removed later from derived parameters result.
sriov_parameters: <% dict(concat($.role_name, 'Parameters') => dict('SriovHostCpusList' => $.get('host_cpus', ''))) %>
set_status_failed_get_host_cpus:
publish:
status: FAILED
message: <% task(get_host_cpus).result.get('message', '') %>
on-success: fail
get_host_cpus:
description: >
Fetching the host CPU list from the introspection data, and then converting the raw list into a range list.
input:
- hw_data # introspection data
output:
host_cpus: <% $.get('host_cpus', '') %>
tags:
- tripleo-common-managed
tasks:
get_host_cpus:
action: tripleo.derive_params.get_host_cpus_list inspect_data=<% $.hw_data %>
publish:
host_cpus: <% task().result %>
on-success:
- get_host_cpus_range_list: <% $.host_cpus %>
- set_status_failed_get_host_cpus: <% not $.host_cpus %>
on-error: set_status_failed_on_error_get_host_cpus
get_host_cpus_range_list:
action: tripleo.derive_params.convert_number_to_range_list
input:
num_list: <% $.host_cpus %>
publish:
host_cpus: <% task().result %>
on-error: set_status_failed_get_host_cpus_range_list
set_status_failed_get_host_cpus:
publish:
status: FAILED
message: 'Unable to determine host cpus'
on-success: fail
set_status_failed_on_error_get_host_cpus:
publish:
status: FAILED
message: <% task(get_host_cpus).result %>
on-success: fail
set_status_failed_get_host_cpus_range_list:
publish:
status: FAILED
message: <% task(get_host_cpus_range_list).result %>
on-success: fail
host_derive_params:
description: >
This workflow derives parameters for the Host process, and is mainly associated with CPU pinning and huge memory pages.
This workflow can be dependent on any feature or also can be invoked individually as well.
input:
- role_name
- hw_data # introspection data
- user_inputs
- derived_parameters: {}
output:
derived_parameters: <% $.derived_parameters.mergeWith($.get('host_parameters', {})) %>
tags:
- tripleo-common-managed
tasks:
get_cpus:
publish:
cpus: <% $.hw_data.numa_topology.cpus %>
on-success:
- get_role_derive_params: <% $.cpus %>
- set_status_failed_get_cpus: <% not $.cpus %>
get_role_derive_params:
publish:
role_derive_params: <% $.derived_parameters.get(concat($.role_name, 'Parameters'), {}) %>
# removing the role parameters (eg. ComputeParameters) in derived_parameters dictionary since already copied in role_derive_params.
derived_parameters: <% $.derived_parameters.delete(concat($.role_name, 'Parameters')) %>
on-success: get_host_cpus
get_host_cpus:
publish:
host_cpus: <% $.role_derive_params.get('OvsDpdkCoreList', '') or $.role_derive_params.get('SriovHostCpusList', '') %>
# SriovHostCpusList parameter is added temporarily for host_cpus and not needed in derived_parameters result.
# SriovHostCpusList parameter is deleted in derived_parameters list and adding the updated role parameters
# back in the derived_parameters.
derived_parameters: <% $.derived_parameters + dict(concat($.role_name, 'Parameters') => $.role_derive_params.delete('SriovHostCpusList')) %>
on-success: get_host_dpdk_combined_cpus
get_host_dpdk_combined_cpus:
publish:
host_dpdk_combined_cpus: <% let(pmd_cpus => $.role_derive_params.get('OvsPmdCoreList', '')) -> switch($pmd_cpus => concat($pmd_cpus, ',', $.host_cpus), not $pmd_cpus => $.host_cpus) %>
reserved_cpus: []
on-success:
- get_host_dpdk_combined_cpus_num_list: <% $.host_dpdk_combined_cpus %>
- set_status_failed_get_host_dpdk_combined_cpus: <% not $.host_dpdk_combined_cpus %>
get_host_dpdk_combined_cpus_num_list:
action: tripleo.derive_params.convert_range_to_number_list
input:
range_list: <% $.host_dpdk_combined_cpus %>
publish:
host_dpdk_combined_cpus: <% task().result %>
reserved_cpus: <% task().result.split(',') %>
on-success: get_nova_cpus
on-error: set_status_failed_get_host_dpdk_combined_cpus_num_list
get_nova_cpus:
publish:
nova_cpus: <% let(reserved_cpus => $.reserved_cpus) -> $.cpus.select($.thread_siblings).flatten().where(not (str($) in $reserved_cpus)).join(',') %>
on-success:
- get_isol_cpus: <% $.nova_cpus %>
- set_status_failed_get_nova_cpus: <% not $.nova_cpus %>
# concatinates OvsPmdCoreList range format and NovaVcpuPinSet in range format. it may not be in perfect range format.
# example: concatinates '12-15,19' and 16-18' ranges '12-15,19,16-18'
get_isol_cpus:
publish:
isol_cpus: <% let(pmd_cpus => $.role_derive_params.get('OvsPmdCoreList','')) -> switch($pmd_cpus => concat($pmd_cpus, ',', $.nova_cpus), not $pmd_cpus => $.nova_cpus) %>
on-success: get_isol_cpus_num_list
# Gets the isol_cpus in the number list
# example: '12-15,19,16-18' into '12,13,14,15,16,17,18,19'
get_isol_cpus_num_list:
action: tripleo.derive_params.convert_range_to_number_list
input:
range_list: <% $.isol_cpus %>
publish:
isol_cpus: <% task().result %>
on-success: get_nova_cpus_range_list
on-error: set_status_failed_get_isol_cpus_num_list
get_nova_cpus_range_list:
action: tripleo.derive_params.convert_number_to_range_list
input:
num_list: <% $.nova_cpus %>
publish:
nova_cpus: <% task().result %>
on-success: get_isol_cpus_range_list
on-error: set_status_failed_get_nova_cpus_range_list
# converts number format isol_cpus into range format
# example: '12,13,14,15,16,17,18,19' into '12-19'
get_isol_cpus_range_list:
action: tripleo.derive_params.convert_number_to_range_list
input:
num_list: <% $.isol_cpus %>
publish:
isol_cpus: <% task().result %>
on-success: get_host_mem
on-error: set_status_failed_get_isol_cpus_range_list
get_host_mem:
publish:
host_mem: <% $.user_inputs.get('host_mem_default', 4096) %>
on-success: check_default_hugepage_supported
check_default_hugepage_supported:
publish:
default_hugepage_supported: <% $.hw_data.get('inventory', {}).get('cpu', {}).get('flags', []).contains('pdpe1gb') %>
on-success:
- get_total_memory: <% $.default_hugepage_supported %>
- set_status_failed_check_default_hugepage_supported: <% not $.default_hugepage_supported %>
get_total_memory:
publish:
total_memory: <% $.hw_data.get('inventory', {}).get('memory', {}).get('physical_mb', 0) %>
on-success:
- get_hugepage_allocation_percentage: <% $.total_memory %>
- set_status_failed_get_total_memory: <% not $.total_memory %>
get_hugepage_allocation_percentage:
publish:
huge_page_allocation_percentage: <% $.user_inputs.get('huge_page_allocation_percentage', 0) %>
on-success:
- get_hugepages: <% isInteger($.huge_page_allocation_percentage) and $.huge_page_allocation_percentage > 0 %>
- set_status_failed_get_hugepage_allocation_percentage_invalid: <% not isInteger($.huge_page_allocation_percentage) %>
- set_status_failed_get_hugepage_allocation_percentage_not_provided: <% $.huge_page_allocation_percentage = 0 %>
get_hugepages:
publish:
hugepages: <% let(huge_page_perc => float($.huge_page_allocation_percentage)/100)-> int((($.total_memory/1024)-4) * $huge_page_perc) %>
on-success:
- get_cpu_model: <% $.hugepages %>
- set_status_failed_get_hugepages: <% not $.hugepages %>
get_cpu_model:
publish:
intel_cpu_model: <% $.hw_data.get('inventory', {}).get('cpu', {}).get('model_name', '').startsWith('Intel') %>
on-success: get_iommu_info
get_iommu_info:
publish:
iommu_info: <% switch($.intel_cpu_model => 'intel_iommu=on iommu=pt', not $.intel_cpu_model => '') %>
on-success: get_kernel_args
get_kernel_args:
publish:
kernel_args: <% concat('default_hugepagesz=1GB hugepagesz=1G ', 'hugepages=', str($.hugepages), ' ', $.iommu_info, ' isolcpus=', $.isol_cpus) %>
on-success: get_host_parameters
get_host_parameters:
publish:
host_parameters: <% dict(concat($.role_name, 'Parameters') => dict('NovaComputeCpuDedicatedSet' => $.get('nova_cpus', ''), 'NovaComputeCpuSharedSet' => $.get('host_cpus', ''), 'NovaReservedHostMemory' => $.get('host_mem', ''), 'KernelArgs' => $.get('kernel_args', ''), 'IsolCpusList' => $.get('isol_cpus', ''))) %>
set_status_failed_get_cpus:
publish:
status: FAILED
message: "Unable to determine CPU's on NUMA nodes"
on-success: fail
set_status_failed_get_host_dpdk_combined_cpus:
publish:
status: FAILED
message: 'Unable to combine host and dpdk cpus list'
on-success: fail
set_status_failed_get_host_dpdk_combined_cpus_num_list:
publish:
status: FAILED
message: <% task(get_host_dpdk_combined_cpus_num_list).result %>
on-success: fail
set_status_failed_get_nova_cpus:
publish:
status: FAILED
message: 'Unable to determine nova vcpu pin set'
on-success: fail
set_status_failed_get_nova_cpus_range_list:
publish:
status: FAILED
message: <% task(get_nova_cpus_range_list).result %>
on-success: fail
set_status_failed_get_isol_cpus_num_list:
publish:
status: FAILED
message: <% task(get_isol_cpus_num_list).result %>
on-success: fail
set_status_failed_get_isol_cpus_range_list:
publish:
status: FAILED
message: <% task(get_isol_cpus_range_list).result %>
on-success: fail
set_status_failed_check_default_hugepage_supported:
publish:
status: FAILED
message: 'default huge page size 1GB is not supported'
on-success: fail
set_status_failed_get_total_memory:
publish:
status: FAILED
message: 'Unable to determine total memory'
on-success: fail
set_status_failed_get_hugepage_allocation_percentage_invalid:
publish:
status: FAILED
message: <% "huge_page_allocation_percentage user input '{0}' is invalid".format($.huge_page_allocation_percentage) %>
on-success: fail
set_status_failed_get_hugepage_allocation_percentage_not_provided:
publish:
status: FAILED
message: 'huge_page_allocation_percentage user input is not provided'
on-success: fail
set_status_failed_get_hugepages:
publish:
status: FAILED
message: 'Unable to determine huge pages'
on-success: fail
hci_derive_params:
description: Derive the deployment parameters for HCI
input:
- role_name
- environment_parameters
- heat_resource_tree
- introspection_data
- user_inputs
- derived_parameters: {}
output:
derived_parameters: <% $.derived_parameters.mergeWith($.get('hci_parameters', {})) %>
tags:
- tripleo-common-managed
tasks:
get_hci_inputs:
publish:
hci_profile: <% $.user_inputs.get('hci_profile', '') %>
hci_profile_config: <% $.user_inputs.get('hci_profile_config', {}) %>
MB_PER_GB: 1024
on-success:
- get_average_guest_memory_size_in_mb: <% $.hci_profile and $.hci_profile_config.get($.hci_profile, {}) %>
- set_failed_invalid_hci_profile: <% $.hci_profile and not $.hci_profile_config.get($.hci_profile, {}) %>
# When no hci_profile is specified, the workflow terminates without deriving any HCI parameters.
get_average_guest_memory_size_in_mb:
publish:
average_guest_memory_size_in_mb: <% $.hci_profile_config.get($.hci_profile, {}).get('average_guest_memory_size_in_mb', 0) %>
on-success:
- get_average_guest_cpu_utilization_percentage: <% isInteger($.average_guest_memory_size_in_mb) %>
- set_failed_invalid_average_guest_memory_size_in_mb: <% not isInteger($.average_guest_memory_size_in_mb) %>
get_average_guest_cpu_utilization_percentage:
publish:
average_guest_cpu_utilization_percentage: <% $.hci_profile_config.get($.hci_profile, {}).get('average_guest_cpu_utilization_percentage', 0) %>
on-success:
- get_gb_overhead_per_guest: <% isInteger($.average_guest_cpu_utilization_percentage) %>
- set_failed_invalid_average_guest_cpu_utilization_percentage: <% not isInteger($.average_guest_cpu_utilization_percentage) %>
get_gb_overhead_per_guest:
publish:
gb_overhead_per_guest: <% $.user_inputs.get('gb_overhead_per_guest', 0.5) %>
on-success:
- get_gb_per_osd: <% isNumber($.gb_overhead_per_guest) %>
- set_failed_invalid_gb_overhead_per_guest: <% not isNumber($.gb_overhead_per_guest) %>
get_gb_per_osd:
publish:
gb_per_osd: <% $.user_inputs.get('gb_per_osd', 5) %>
on-success:
- get_cores_per_osd: <% isNumber($.gb_per_osd) %>
- set_failed_invalid_gb_per_osd: <% not isNumber($.gb_per_osd) %>
get_cores_per_osd:
publish:
cores_per_osd: <% $.user_inputs.get('cores_per_osd', 1.0) %>
on-success:
- get_extra_configs: <% isNumber($.cores_per_osd) %>
- set_failed_invalid_cores_per_osd: <% not isNumber($.cores_per_osd) %>
get_extra_configs:
publish:
extra_config: <% $.environment_parameters.get('ExtraConfig', {}) %>
role_extra_config: <% $.environment_parameters.get(concat($.role_name, 'ExtraConfig'), {}) %>
role_env_params: <% $.environment_parameters.get(concat($.role_name, 'Parameters'), {}) %>
role_derive_params: <% $.derived_parameters.get(concat($.role_name, 'Parameters'), {}) %>
on-success: calc_osds
calc_osds:
publish:
num_dev: <% $.heat_resource_tree.parameters.get('CephAnsibleDisksConfig', {}).get('default', {}).get('devices', []).count() %>
num_lvm: <% $.heat_resource_tree.parameters.get('CephAnsibleDisksConfig', {}).get('default', {}).get('lvm_volumes', []).count() %>
on-success: get_num_osds
get_num_osds:
publish:
num_osds: <% int($.num_dev + $.num_lvm) %>
on-success:
- get_memory_mb: <% $.num_osds %>
# If there's no CephAnsibleDisksConfig then look for OSD configuration in hiera data
- get_num_osds_from_hiera: <% not $.num_osds %>
get_num_osds_from_hiera:
publish:
num_osds: <% $.role_extra_config.get('ceph::profile::params::osds', $.extra_config.get('ceph::profile::params::osds', {})).keys().count() %>
on-success:
- get_memory_mb: <% $.num_osds %>
- set_failed_no_osds: <% not $.num_osds %>
get_memory_mb:
publish:
memory_mb: <% $.introspection_data.get('memory_mb', 0) %>
on-success:
- get_nova_vcpu_pin_set: <% $.memory_mb %>
- set_failed_get_memory_mb: <% not $.memory_mb %>
# Determine the number of CPU cores available to Nova and Ceph. If
# NovaVcpuPinSet is defined then use the number of vCPUs in the set,
# otherwise use all of the cores identified in the introspection data.
get_nova_vcpu_pin_set:
publish:
# NovaVcpuPinSet can be defined in multiple locations, and it's
# important to select the value in order of precedence:
#
# 1) User specified value for this role
# 2) User specified default value for all roles
# 3) Value derived by another derived parameters workflow
nova_vcpu_pin_set: <% $.role_env_params.get('NovaVcpuPinSet', $.environment_parameters.get('NovaVcpuPinSet', $.role_derive_params.get('NovaVcpuPinSet', ''))) %>
on-success:
- get_nova_vcpu_count: <% $.nova_vcpu_pin_set %>
- get_num_cores: <% not $.nova_vcpu_pin_set %>
get_nova_vcpu_count:
action: tripleo.derive_params.convert_range_to_number_list
input:
range_list: <% $.nova_vcpu_pin_set %>
publish:
num_cores: <% task().result.split(',').count() %>
on-success: calculate_nova_parameters
on-error: set_failed_get_nova_vcpu_count
get_num_cores:
publish:
num_cores: <% $.introspection_data.get('cpus', 0) %>
on-success:
- calculate_nova_parameters: <% $.num_cores %>
- set_failed_get_num_cores: <% not $.num_cores %>
# HCI calculations are broken into multiple steps. This is necessary
# because variables published by a Mistral task are not available
# for use by that same task. Variables computed and published in a task
# are only available in subsequent tasks.
#
# The HCI calculations compute two Nova parameters:
# - reserved_host_memory
# - cpu_allocation_ratio
#
# The reserved_host_memory calculation computes the amount of memory
# that needs to be reserved for Ceph and the total amount of "guest
# overhead" memory that is based on the anticipated number of guests.
# Psuedo-code for the calculation (disregarding MB and GB units) is
# as follows:
#
# ceph_memory = mem_per_osd * num_osds
# nova_memory = total_memory - ceph_memory
# num_guests = nova_memory /
# (average_guest_memory_size + overhead_per_guest)
# reserved_memory = ceph_memory + (num_guests * overhead_per_guest)
#
# The cpu_allocation_ratio calculation is similar in that it takes into
# account the number of cores that must be reserved for Ceph.
#
# ceph_cores = cores_per_osd * num_osds
# guest_cores = num_cores - ceph_cores
# guest_vcpus = guest_cores / average_guest_utilization
# cpu_allocation_ratio = guest_vcpus / num_cores
calculate_nova_parameters:
publish:
avg_guest_util: <% $.average_guest_cpu_utilization_percentage / 100.0 %>
avg_guest_size_gb: <% $.average_guest_memory_size_in_mb / float($.MB_PER_GB) %>
memory_gb: <% $.memory_mb / float($.MB_PER_GB) %>
ceph_mem_gb: <% $.gb_per_osd * $.num_osds %>
nonceph_cores: <% $.num_cores - int($.cores_per_osd * $.num_osds) %>
on-success: calc_step_2
calc_step_2:
publish:
num_guests: <% int(($.memory_gb - $.ceph_mem_gb) / ($.avg_guest_size_gb + $.gb_overhead_per_guest)) %>
guest_vcpus: <% $.nonceph_cores / $.avg_guest_util %>
on-success: calc_step_3
calc_step_3:
publish:
reserved_host_memory: <% $.MB_PER_GB * int($.ceph_mem_gb + ($.num_guests * $.gb_overhead_per_guest)) %>
cpu_allocation_ratio: <% $.guest_vcpus / $.num_cores %>
on-success: validate_results
validate_results:
publish:
# Verify whether HCI is viable:
# - At least 80% of the memory is reserved for Ceph and guest overhead
# - At least half of the CPU cores must be available to Nova
mem_ok: <% $.reserved_host_memory <= ($.memory_mb * 0.8) %>
cpu_ok: <% $.cpu_allocation_ratio >= 0.5 %>
on-success:
- set_failed_insufficient_mem: <% not $.mem_ok %>
- set_failed_insufficient_cpu: <% not $.cpu_ok %>
- publish_hci_parameters: <% $.mem_ok and $.cpu_ok %>
publish_hci_parameters:
publish:
# TODO(abishop): Update this when the cpu_allocation_ratio can be set
# via a THT parameter (no such parameter currently exists). Until a
# THT parameter exists, use hiera data to set the cpu_allocation_ratio.
hci_parameters: <% dict(concat($.role_name, 'Parameters') => dict('NovaReservedHostMemory' => $.reserved_host_memory)) + dict(concat($.role_name, 'ExtraConfig') => dict('nova::cpu_allocation_ratio' => $.cpu_allocation_ratio)) %>
set_failed_invalid_hci_profile:
publish:
message: "'<% $.hci_profile %>' is not a valid HCI profile."
on-success: fail
set_failed_invalid_average_guest_memory_size_in_mb:
publish:
message: "'<% $.average_guest_memory_size_in_mb %>' is not a valid average_guest_memory_size_in_mb value."
on-success: fail
set_failed_invalid_gb_overhead_per_guest:
publish:
message: "'<% $.gb_overhead_per_guest %>' is not a valid gb_overhead_per_guest value."
on-success: fail
set_failed_invalid_gb_per_osd:
publish:
message: "'<% $.gb_per_osd %>' is not a valid gb_per_osd value."
on-success: fail
set_failed_invalid_cores_per_osd:
publish:
message: "'<% $.cores_per_osd %>' is not a valid cores_per_osd value."
on-success: fail
set_failed_invalid_average_guest_cpu_utilization_percentage:
publish:
message: "'<% $.average_guest_cpu_utilization_percentage %>' is not a valid average_guest_cpu_utilization_percentage value."
on-success: fail
set_failed_no_osds:
publish:
message: "No Ceph OSDs found in the overcloud definition ('ceph::profile::params::osds')."
on-success: fail
set_failed_get_memory_mb:
publish:
message: "Unable to determine the amount of physical memory (no 'memory_mb' found in introspection_data)."
on-success: fail
set_failed_get_nova_vcpu_count:
publish:
message: <% task(get_nova_vcpu_count).result %>
on-success: fail
set_failed_get_num_cores:
publish:
message: "Unable to determine the number of CPU cores (no 'cpus' found in introspection_data)."
on-success: fail
set_failed_insufficient_mem:
publish:
message: "<% $.memory_mb %> MB is not enough memory to run hyperconverged."
on-success: fail
set_failed_insufficient_cpu:
publish:
message: "<% $.num_cores %> CPU cores are not enough to run hyperconverged."
on-success: fail

View File

@ -1,155 +0,0 @@
---
version: '2.0'
name: tripleo.messaging.v1
description: TripleO Zaqar Workflows
workflows:
send:
description: >-
Send a message to a Zaqar queue and optionally persist it to Swift
This workflow sends a standard message to Zaqar (taking care of the
error handling and retry logic) and optionally persists the message
to Swift. The output of the workflow directly matches the input, this
means it can be used to send a message and set the output of the parent
calling workflow.
If plan_name is provided the message will be persisted to Swift. In a
container named "{plan_name}-messages". The swift objects will be named:
{TYPE}/{TIMESTAMP}.yaml
If a deployment_status is provided, the top-level deployment_status.yaml
will also be updated, which will contain the deployment_status and the
related message.
The standard message format will be::
body: {
type: 'tripleo.workflow.name', # Matches the workflow name
payload: {
status: 'STATUS', # One of RUNNING, SUCCESS, FAILED
root_execution_id: 'UUID of the root execution',
execution_id: 'UUID',
message: "Human readable description",
< Arbitrary data. This should match the workflow output data >
}
}
Workflow Input:
queue_name - The Zaqar queue name to post to.
type - The message type, this should match the calling workflows name
execution - Details about the workflow execution. Should be passed by using <% execution() %>
status - Optional. The status of the message. SUCCESS/RUNNING/FAILED.
message - Optional. A human readable message to be included
payload - Optional. A dictionary output data to be sent in the message.
plan_name - Optional. The deployment plan name. This is used for the swift messages container.
deployment_status - Optional. If set the top-level deployment_status.yaml will be updated.
input:
- queue_name
- type
- execution
- status: 'SUCCESS'
- message: null
- payload: {}
- plan_name: null
- deployment_status: null
tags:
- tripleo-common-managed
output:
type: <% $.type %>
payload: <% $.payload %>
tasks:
merge_payload:
on-success:
publish:
branch:
# The payload with arbitrary keys is merged with the status, message and execution.
payload: <% {status => $.status, message => $.message, root_execution_id => $.execution.root_execution_id, execution_id => $.execution.id, plan_name => $.plan_name, deployment_status => $.deployment_status} + $.payload %>
next: prepare_messages
prepare_messages:
on-success:
publish:
branch:
swift_message: <% {type => $.type, payload => $.payload} %>
deployment_status_message: <% {deployment_status => $.deployment_status, workflow_status => {type => $.type, payload => $.payload}} %>
container: <% "{0}-messages".format($.plan_name) %>
next: branch_workflow
# It should be possible for this to happen in the next section above, but
# there seems to be a Mistral bug... to be confirmed...
branch_workflow:
on-success:
- send_message
- complete_swift: <% not bool($.plan_name) %>
- verify_container_exists: <% bool($.plan_name) %>
send_message:
action: zaqar.queue_post
retry:
delay: 4
count: 16
input:
queue_name: <% $.queue_name %>
messages:
body: <% {type => $.type, payload => $.payload} %>
on-success: check_status
verify_container_exists:
workflow: tripleo.swift.v1.container_exists container=<% $.container %>
input:
create_container: true
retry:
delay: 4
count: 16
on-success:
- wait_for_swift: <% not bool($.deployment_status) %>
- persist_to_swift_plan_latest: <% bool($.deployment_status) %>
- persist_to_swift
persist_to_swift:
action: swift.put_object
retry:
delay: 4
count: 16
input:
container: <% $.container %>
obj: <% "{0}/{1}.yaml".format($.type, now().format("%Y-%m-%d_%H:%M:%S")) %>
contents: <% yaml_dump($.swift_message) %>
on-success: wait_for_swift
persist_to_swift_plan_latest:
action: swift.put_object
retry:
delay: 4
count: 16
input:
container: <% $.container %>
obj: <% "deployment_status.yaml" %>
contents: <% yaml_dump($.deployment_status_message) %>
on-success: wait_for_swift
wait_for_swift:
# We want persist_to_swift and either persist_to_swift_plan_latest or
# verify_container_exists to join here. Two of the three tasks.
join: 2
on-success: complete_swift
complete_swift:
on-success: check_status
check_status:
# We want both complete_swift and send_message to join here. This means
# that zaqar and swift (if enabled) will all be finished.
join: all
on-complete:
- fail(msg=<% "Workflow failed due to message status. Status:{} Message:{}".format($.get('status'), $.get('message')) %>): <% $.get('status') = "FAILED" %>

View File

@ -1,79 +0,0 @@
---
version: '2.0'
name: tripleo.swift.v1
description: TripleO Swift Utility Workflows
workflows:
container_exists:
description: >-
Verify if a Swift container exists
Given the name of a Swift container this workflow will verify if it
already exists. The workflow will ERROR if it doesn't and end in
SUCCESS if it does.
If create_container: true is passed in, then the container will be
created if it doesn't exist. In this case, if the workflow fails the
container failed to create.
input:
- container
- headers: {}
- create_container: false
tags:
- tripleo-common-managed
tasks:
# We use the "prefix" input to only list the containers starting with
# the container name we want to find. There is no way to do an exact
# match, but this will greatly reduce the possible number of results.
get_containers:
action: swiftservice.list
input:
options:
prefix: <% $.container %>
publish:
container_exists: <% bool(task().result) and $.container in task().result[0].listing.name %>
create_input: <% $.headers and {"container" => $.container, "headers" => $.headers} or {"container" => $.container} %>
on-success:
- succeed: <% $.container_exists %>
- fail: <% not $.create_container and not $.container_exists %>
- create_container: <% $.create_container and not $.container_exists %>
create_container:
action: swift.put_container
input: <% $.create_input %>
object_exists:
description: >
Verify if a Swift object exists
Given the name of a Swift container and object this workflow will
verify if it already exists. The workflow will ERROR if it doesn't
and end in SUCCESS if it does.
input:
- container
- object
tags:
- tripleo-common-managed
tasks:
get_objects:
action: swift.get_container
input:
container: <% $.container %>
prefix: <% $.object %>
publish:
object_exists: <% bool(task().result) and $.object in task().result[1].select($.name) %>
on-success:
- succeed: <% $.object_exists %>
- fail: <% not $.object_exists %>

View File

@ -1,78 +0,0 @@
---
version: '2.0'
name: tripleo.validations.v1
description: TripleO Validations Workflows v1
workflows:
add_validation_ssh_key_parameter:
input:
- container
- queue_name: tripleo
tags:
- tripleo-common-managed
tasks:
test_validations_enabled:
action: tripleo.validations.enabled
on-success: get_pubkey
on-error: unset_validation_key_parameter
get_pubkey:
action: tripleo.validations.get_pubkey
on-success: set_validation_key_parameter
publish:
pubkey: <% task().result %>
set_validation_key_parameter:
action: tripleo.parameters.update
input:
parameters:
node_admin_extra_ssh_keys: <% $.pubkey %>
container: <% $.container %>
# NOTE(shadower): We need to clear keys from a previous deployment
unset_validation_key_parameter:
action: tripleo.parameters.update
input:
parameters:
node_admin_extra_ssh_keys: ""
container: <% $.container %>
copy_ssh_key:
input:
# FIXME: we should stop using heat-admin as e.g. split-stack
# environments (where Nova didn't create overcloud nodes) don't
# have it present
- overcloud_admin: heat-admin
- queue_name: tripleo
tags:
- tripleo-common-managed
tasks:
get_servers:
action: nova.servers_list
on-success: get_pubkey
publish:
servers: <% task().result._info %>
get_pubkey:
action: tripleo.validations.get_pubkey
on-success: deploy_ssh_key
publish:
pubkey: <% task().result %>
deploy_ssh_key:
workflow: tripleo.deployment.v1.deploy_on_server
with-items: server in <% $.servers %>
input:
server_name: <% $.server.name %>
server_uuid: <% $.server.id %>
config: |
#!/bin/bash
if ! grep "<% $.pubkey %>" /home/<% $.overcloud_admin %>/.ssh/authorized_keys; then
echo "<% $.pubkey %>" >> /home/<% $.overcloud_admin %>/.ssh/authorized_keys
fi
config_name: copy_ssh_key
group: script
queue_name: <% $.queue_name %>