Merge "Update plan-sample to reference new interface"

This commit is contained in:
Zuul 2020-09-19 00:07:13 +00:00 committed by Gerrit Code Review
commit 9edc46938f

View File

@ -6,8 +6,8 @@ description: >
template: overcloud.yaml
environments:
- path: overcloud-resource-registry-puppet.yaml
workflow_parameters:
tripleo.derive_params.v1.derive_parameters:
playbook_parameters:
cli-derive-parameters.yaml:
########### Role-Specific derive parameters ##############
# Introspection hardware data is mandatory for role-specific
# derive parameters. Role specific derive parameters workflows
@ -28,8 +28,10 @@ workflow_parameters:
hci_profile: default
hci_profile_config:
default:
average_guest_memory_size_in_mb: 2048
average_guest_cpu_utilization_percentage: 50
# By default we do not not know expected workload. At leaast by
# defaulting these values to zero we can reserve memory for OSDs
average_guest_memory_size_in_mb: 0
average_guest_cpu_utilization_percentage: 0
many_small_vms:
average_guest_memory_size_in_mb: 1024
average_guest_cpu_utilization_percentage: 20
@ -39,3 +41,39 @@ workflow_parameters:
nfv_default:
average_guest_memory_size_in_mb: 8192
average_guest_cpu_utilization_percentage: 90
# NOTE(cloudnull): While this interface still exists, it will be removed soon.
# Use playbook_parameters to ensure that there is no loss of
# functionality for a given deployment.
# workflow_parameters:
# tripleo.derive_params.v1.derive_parameters:
# ########### Role-Specific derive parameters ##############
# # Introspection hardware data is mandatory for role-specific
# # derive parameters. Role specific derive parameters workflows
# # will be invoked if this parameter is true, otherwise only
# # derive parameters common workflows will be invoked.
# hw_data_required: true
# ######### DPDK Parameters #########
# # Specifices the minimum number of CPU physical cores to be allocated for DPDK
# # PMD threads. The actual allocation will be based on network config, if
# # the a DPDK port is associated with a numa node, then this configuration
# # will be used, else 1.
# num_phy_cores_per_numa_node_for_pmd: 1
# # Amount of memory to be configured as huge pages in percentage. Ouf the
# # total available memory (excluding the NovaReservedHostMemory), the
# # specified percentage of the remaining is configured as huge pages.
# huge_page_allocation_percentage: 50
# ######### HCI Parameters #########
# hci_profile: default
# hci_profile_config:
# default:
# average_guest_memory_size_in_mb: 2048
# average_guest_cpu_utilization_percentage: 50
# many_small_vms:
# average_guest_memory_size_in_mb: 1024
# average_guest_cpu_utilization_percentage: 20
# few_large_vms:
# average_guest_memory_size_in_mb: 4096
# average_guest_cpu_utilization_percentage: 80
# nfv_default:
# average_guest_memory_size_in_mb: 8192
# average_guest_cpu_utilization_percentage: 90