dfcff68e33
... because the interface was used by Mistral, which was already removed. Change-Id: I315ab6132b9185f169ae94476dc892db81976775
37 lines
1.7 KiB
YAML
37 lines
1.7 KiB
YAML
version: 1.0
|
|
playbook_parameters:
|
|
cli-derive-parameters.yaml:
|
|
########### Role-Specific derive parameters ##############
|
|
# Introspection hardware data is mandatory for role-specific
|
|
# derive parameters. Role specific derive parameters workflows
|
|
# will be invoked if this parameter is true, otherwise only
|
|
# derive parameters common workflows will be invoked.
|
|
hw_data_required: true
|
|
######### DPDK Parameters #########
|
|
# Specifies the minimum number of CPU physical cores to be allocated for DPDK
|
|
# PMD threads. The actual allocation will be based on network config, if
|
|
# the a DPDK port is associated with a numa node, then this configuration
|
|
# will be used, else 1.
|
|
num_phy_cores_per_numa_node_for_pmd: 1
|
|
# Amount of memory to be configured as huge pages in percentage. Out of
|
|
# the total available memory (excluding the NovaReservedHostMemory), the
|
|
# specified percentage of the remaining is configured as huge pages.
|
|
huge_page_allocation_percentage: 50
|
|
######### HCI Parameters #########
|
|
hci_profile: default
|
|
hci_profile_config:
|
|
default:
|
|
# By default we do not not know expected workload. At leaast by
|
|
# defaulting these values to zero we can reserve memory for OSDs
|
|
average_guest_memory_size_in_mb: 0
|
|
average_guest_cpu_utilization_percentage: 0
|
|
many_small_vms:
|
|
average_guest_memory_size_in_mb: 1024
|
|
average_guest_cpu_utilization_percentage: 20
|
|
few_large_vms:
|
|
average_guest_memory_size_in_mb: 4096
|
|
average_guest_cpu_utilization_percentage: 80
|
|
nfv_default:
|
|
average_guest_memory_size_in_mb: 8192
|
|
average_guest_cpu_utilization_percentage: 90
|