tripleo-heat-templates/deployment/neutron/neutron-ovs-dpdk-agent-cont...

164 lines
6.1 KiB
YAML

heat_template_version: rocky
description: >
OpenStack Neutron OVS DPDK configured with Puppet for Compute Role (Containerized)
parameters:
ContainerNeutronConfigImage:
description: The container image to use for the neutron config_volume
type: string
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. This
mapping overrides those in ServiceNetMapDefaults.
type: json
DefaultPasswords:
default: {}
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
# below parameters has to be set in neutron agent only for compute nodes.
# as of now there is no other usecase for these parameters except dpdk.
# should be moved to compute only ovs agent in case of any other usecases.
NeutronDatapathType:
default: ""
description: Datapath type for ovs bridges
type: string
tags:
- role_specific
NeutronVhostuserSocketDir:
default: ""
description: The vhost-user socket directory for OVS
type: string
tags:
- role_specific
VhostuserSocketGroup:
default: "qemu"
description: >
The vhost-user socket directory group name.
Defaults to 'qemu'. When vhostuser mode is 'dpdkvhostuserclient'
(which is the default mode), the vhost socket is created by qemu.
type: string
tags:
- role_specific
VhostuserSocketUser:
default: "qemu"
description: >
The vhost-user socket directory user name.
Defaults to 'qemu'. When vhostuser mode is 'dpdkvhostuserclient'
(which is the default mode), the vhost socket is created by qemu.
type: string
tags:
- role_specific
resources:
NeutronOvsAgent:
type: ./neutron-ovs-agent-container-puppet.yaml
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
OpenvswitchDpdk:
type: ./../openvswitch/openvswitch-dpdk-baremetal-ansible.yaml
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
DefaultPasswords: {get_param: DefaultPasswords}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
# Merging role-specific parameters (RoleParameters) with the default parameters.
# RoleParameters will have the precedence over the default parameters.
RoleParametersValue:
type: OS::Heat::Value
properties:
type: json
value:
map_replace:
- map_replace:
- neutron::agents::ml2::ovs::datapath_type: NeutronDatapathType
neutron::agents::ml2::ovs::vhostuser_socket_dir: NeutronVhostuserSocketDir
vhostuser_socket_group: VhostuserSocketGroup
vhostuser_socket_user: VhostuserSocketUser
- values: {get_param: [RoleParameters]}
- values:
NeutronDatapathType: {get_param: NeutronDatapathType}
NeutronVhostuserSocketDir: {get_param: NeutronVhostuserSocketDir}
VhostuserSocketGroup: {get_param: VhostuserSocketGroup}
VhostuserSocketUser: {get_param: VhostuserSocketUser}
outputs:
role_data:
description: Role data for Neutron openvswitch DPDK service
value:
service_name: neutron_ovs_dpdk_agent
config_settings:
map_merge:
- get_attr: [NeutronOvsAgent, role_data, config_settings]
- nova::compute::libvirt::qemu::group: {get_attr: [RoleParametersValue, value, vhostuser_socket_group]}
- get_attr: [RoleParametersValue, value]
service_config_settings:
map_merge:
- get_attr: [NeutronOvsAgent, role_data, service_config_settings]
- collectd:
tripleo.collectd.plugins.neutron_ovs_dpdk_agent:
- ovs_events
- ovs_stats
collectd::plugin::ovs_events::socket: '/run/openvswitch/db.sock'
collectd::plugin::ovs_stats::socket: '/run/openvswitch/db.sock'
puppet_config:
config_volume: neutron
puppet_tags: neutron_config,neutron_agent_ovs,neutron_plugin_ml2
step_config:
get_attr: [NeutronOvsAgent, role_data, puppet_config, step_config]
config_image: {get_param: ContainerNeutronConfigImage}
# We need to mount /run for puppet_config step. This is because
# puppet-vswitch runs the commands "ovs-vsctl list open_vswitch ."
# when running vswitch::ovs::enable_hw_offload: true
# ovs-vsctl talks to the ovsdb-server (hosting conf.db)
# on the unix domain socket - /run/openvswitch/db.sock
volumes:
- /lib/modules:/lib/modules:ro
- /run/openvswitch:/run/openvswitch
- /etc/modules-load.d/:/etc/modules-load.d
kolla_config:
get_attr: [NeutronOvsAgent, role_data, kolla_config]
container_config_scripts:
get_attr: [NeutronOvsAgent, role_data, container_config_scripts]
docker_config:
get_attr: [NeutronOvsAgent, role_data, docker_config]
metadata_settings:
get_attr: [NeutronOvsAgent, role_data, metadata_settings]
host_prep_tasks:
get_attr: [NeutronOvsAgent, role_data, host_prep_tasks]
deploy_steps_tasks:
list_concat:
- get_attr: [NeutronOvsAgent, role_data, deploy_steps_tasks]
- get_attr: [OpenvswitchDpdk, role_data, deploy_steps_tasks]
upgrade_tasks:
get_attr: [NeutronOvsAgent, role_data, upgrade_tasks]
update_tasks:
get_attr: [NeutronOvsAgent, role_data, update_tasks]