147335f799
Left the one used for derive parameters that includes the playbooks for derive parameters. Depends-On: https://review.opendev.org/c/openstack/python-tripleoclient/+/777293 Change-Id: I7b82364bee7fcc3a89d36255b0c8507cdd1d5a38
73 lines
3.5 KiB
YAML
73 lines
3.5 KiB
YAML
version: 1.0
|
|
playbook_parameters:
|
|
cli-derive-parameters.yaml:
|
|
########### Role-Specific derive parameters ##############
|
|
# Introspection hardware data is mandatory for role-specific
|
|
# derive parameters. Role specific derive parameters workflows
|
|
# will be invoked if this parameter is true, otherwise only
|
|
# derive parameters common workflows will be invoked.
|
|
hw_data_required: true
|
|
######### DPDK Parameters #########
|
|
# Specifies the minimum number of CPU physical cores to be allocated for DPDK
|
|
# PMD threads. The actual allocation will be based on network config, if
|
|
# the a DPDK port is associated with a numa node, then this configuration
|
|
# will be used, else 1.
|
|
num_phy_cores_per_numa_node_for_pmd: 1
|
|
# Amount of memory to be configured as huge pages in percentage. Out of
|
|
# the total available memory (excluding the NovaReservedHostMemory), the
|
|
# specified percentage of the remaining is configured as huge pages.
|
|
huge_page_allocation_percentage: 50
|
|
######### HCI Parameters #########
|
|
hci_profile: default
|
|
hci_profile_config:
|
|
default:
|
|
# By default we do not not know expected workload. At leaast by
|
|
# defaulting these values to zero we can reserve memory for OSDs
|
|
average_guest_memory_size_in_mb: 0
|
|
average_guest_cpu_utilization_percentage: 0
|
|
many_small_vms:
|
|
average_guest_memory_size_in_mb: 1024
|
|
average_guest_cpu_utilization_percentage: 20
|
|
few_large_vms:
|
|
average_guest_memory_size_in_mb: 4096
|
|
average_guest_cpu_utilization_percentage: 80
|
|
nfv_default:
|
|
average_guest_memory_size_in_mb: 8192
|
|
average_guest_cpu_utilization_percentage: 90
|
|
# NOTE(cloudnull): While this interface still exists, it will be removed soon.
|
|
# Use playbook_parameters to ensure that there is no loss of
|
|
# functionality for a given deployment.
|
|
# workflow_parameters:
|
|
# tripleo.derive_params.v1.derive_parameters:
|
|
# ########### Role-Specific derive parameters ##############
|
|
# # Introspection hardware data is mandatory for role-specific
|
|
# # derive parameters. Role specific derive parameters workflows
|
|
# # will be invoked if this parameter is true, otherwise only
|
|
# # derive parameters common workflows will be invoked.
|
|
# hw_data_required: true
|
|
# ######### DPDK Parameters #########
|
|
# # Specifices the minimum number of CPU physical cores to be allocated for DPDK
|
|
# # PMD threads. The actual allocation will be based on network config, if
|
|
# # the a DPDK port is associated with a numa node, then this configuration
|
|
# # will be used, else 1.
|
|
# num_phy_cores_per_numa_node_for_pmd: 1
|
|
# # Amount of memory to be configured as huge pages in percentage. Ouf the
|
|
# # total available memory (excluding the NovaReservedHostMemory), the
|
|
# # specified percentage of the remaining is configured as huge pages.
|
|
# huge_page_allocation_percentage: 50
|
|
# ######### HCI Parameters #########
|
|
# hci_profile: default
|
|
# hci_profile_config:
|
|
# default:
|
|
# average_guest_memory_size_in_mb: 2048
|
|
# average_guest_cpu_utilization_percentage: 50
|
|
# many_small_vms:
|
|
# average_guest_memory_size_in_mb: 1024
|
|
# average_guest_cpu_utilization_percentage: 20
|
|
# few_large_vms:
|
|
# average_guest_memory_size_in_mb: 4096
|
|
# average_guest_cpu_utilization_percentage: 80
|
|
# nfv_default:
|
|
# average_guest_memory_size_in_mb: 8192
|
|
# average_guest_cpu_utilization_percentage: 90
|