tripleo-heat-templates/environments/neutron-opendaylight-dpdk.yaml
Itzik Brown cfd0d185a9 Disables QoS with OpenDaylight deployments
QoS is not fully supported and fails to load correctly with
networking-odl.  Therefore disabling it from Neutron extension drivers
until we have it fully working.

Change-Id: I89aa3628c1400305f9659f5c0c99942a7fa7d19e
Closes-Bug: 1708131
2017-09-06 16:40:53 -04:00

48 lines
3.5 KiB
YAML

# A Heat environment that can be used to deploy OpenDaylight with L3 DVR and DPDK
resource_registry:
OS::TripleO::Services::NeutronOvsAgent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronOvsAgent: OS::Heat::None
OS::TripleO::Services::ComputeNeutronCorePlugin: OS::Heat::None
OS::TripleO::Services::OpenDaylightApi: ../puppet/services/opendaylight-api.yaml
OS::TripleO::Services::OpenDaylightOvs: ../puppet/services/opendaylight-ovs.yaml
OS::TripleO::Services::NeutronL3Agent: OS::Heat::None
parameter_defaults:
NeutronEnableForceMetadata: true
NeutronPluginExtensions: 'port_security'
NeutronMechanismDrivers: 'opendaylight_v2'
NeutronServicePlugins: 'odl-router_v2,trunk'
NovaSchedulerDefaultFilters: "RamFilter,ComputeFilter,AvailabilityZoneFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,NUMATopologyFilter"
OpenDaylightSNATMechanism: 'controller'
ComputeOvsDpdkParameters:
OvsEnableDpdk: True
## Host configuration Parameters
#TunedProfileName: "cpu-partitioning"
#IsolCpusList: "" # Logical CPUs list to be isolated from the host process (applied via cpu-partitioning tuned).
# It is mandatory to provide isolated cpus for tuned to achive optimal performance.
# Example: "3-8,12-15,18"
#KernelArgs: "" # Space separated kernel args to configure hugepage and IOMMU.
# Deploying DPDK requires enabling hugepages for the overcloud compute nodes.
# It also requires enabling IOMMU when using the VFIO (vfio-pci) OvsDpdkDriverType.
# This should be done by configuring parameters via host-config-and-reboot.yaml environment file.
## Attempting to deploy DPDK without appropriate values for the below parameters may lead to unstable deployments
## due to CPU contention of DPDK PMD threads.
## It is highly recommended to to enable isolcpus (via KernelArgs) on compute overcloud nodes and set the following parameters:
#OvsDpdkSocketMemory: "" # Sets the amount of hugepage memory to assign per NUMA node.
# It is recommended to use the socket closest to the PCIe slot used for the
# desired DPDK NIC. Format should be comma separated per socket string such as:
# "<socket 0 mem MB>,<socket 1 mem MB>", for example: "1024,0".
#OvsDpdkDriverType: "vfio-pci" # Ensure the Overcloud NIC to be used for DPDK supports this UIO/PMD driver.
#OvsPmdCoreList: "" # List or range of CPU cores for PMD threads to be pinned to. Note, NIC
# location to cores on socket, number of hyper-threaded logical cores, and
# desired number of PMD threads can all play a role in configuring this setting.
# These cores should be on the same socket where OvsDpdkSocketMemory is assigned.
# If using hyperthreading then specify both logical cores that would equal the
# physical core. Also, specifying more than one core will trigger multiple PMD
# threads to be spawned, which may improve dataplane performance.
#NovaVcpuPinSet: "" # Cores to pin Nova instances to. For maximum performance, select cores
# on the same NUMA node(s) selected for previous settings.