tripleo-heat-templates/deployment/neutron/neutron-ovs-dpdk-agent-container-puppet.yaml
Alex Schultz ebab335f38 Role specific container support
We may want to be able to specific different containers at a role level.
This requires switching the container image parameters to be role
specific too allow for role based overrides.

Change-Id: I4090e889a32abd51e7c11139737a7a18e27d18e7
2022-01-21 14:18:02 -07:00

183 lines
7.3 KiB
YAML

heat_template_version: wallaby
description: >
OpenStack Neutron OVS DPDK configured with Puppet for Compute Role (Containerized)
parameters:
ContainerNeutronConfigImage:
description: The container image to use for the neutron config_volume
type: string
tags:
- role_specific
ServiceData:
default: {}
description: Dictionary packing service data
type: json
ServiceNetMap:
default: {}
description: Mapping of service_name -> network name. Typically set
via parameter_defaults in the resource registry. Use
parameter_merge_strategies to merge it with the defaults.
type: json
RoleName:
default: ''
description: Role name on which the service is applied
type: string
RoleParameters:
default: {}
description: Parameters specific to the role
type: json
EndpointMap:
default: {}
description: Mapping of service endpoint -> protocol. Typically set
via parameter_defaults in the resource registry.
type: json
# below parameters has to be set in neutron agent only for compute nodes.
# as of now there is no other usecase for these parameters except dpdk.
# should be moved to compute only ovs agent in case of any other usecases.
NeutronDatapathType:
default: ""
description: Datapath type for ovs bridges
type: string
tags:
- role_specific
NeutronVhostuserSocketDir:
default: ""
description: The vhost-user socket directory for OVS
type: string
tags:
- role_specific
VhostuserSocketGroup:
default: "qemu"
description: >
The vhost-user socket directory group name.
Defaults to 'qemu'. When vhostuser mode is 'dpdkvhostuserclient'
(which is the default mode), the vhost socket is created by qemu.
type: string
tags:
- role_specific
VhostuserSocketUser:
default: "qemu"
description: >
The vhost-user socket directory user name.
Defaults to 'qemu'. When vhostuser mode is 'dpdkvhostuserclient'
(which is the default mode), the vhost socket is created by qemu.
type: string
tags:
- role_specific
resources:
NeutronOvsAgent:
type: ./neutron-ovs-agent-container-puppet.yaml
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
OpenvswitchDpdk:
type: ./../openvswitch/openvswitch-dpdk-baremetal-ansible.yaml
properties:
ServiceData: {get_param: ServiceData}
ServiceNetMap: {get_param: ServiceNetMap}
EndpointMap: {get_param: EndpointMap}
RoleName: {get_param: RoleName}
RoleParameters: {get_param: RoleParameters}
# Merging role-specific parameters (RoleParameters) with the default parameters.
# RoleParameters will have the precedence over the default parameters.
RoleParametersValue:
type: OS::Heat::Value
properties:
type: json
value:
map_replace:
- map_replace:
- neutron::agents::ml2::ovs::datapath_type: NeutronDatapathType
neutron::agents::ml2::ovs::vhostuser_socket_dir: NeutronVhostuserSocketDir
vhostuser_socket_group: VhostuserSocketGroup
vhostuser_socket_user: VhostuserSocketUser
ContainerNeutronConfigImage: ContainerNeutronConfigImage
- values: {get_param: [RoleParameters]}
- values:
NeutronDatapathType: {get_param: NeutronDatapathType}
NeutronVhostuserSocketDir: {get_param: NeutronVhostuserSocketDir}
VhostuserSocketGroup: {get_param: VhostuserSocketGroup}
VhostuserSocketUser: {get_param: VhostuserSocketUser}
ContainerNeutronConfigImage: {get_param: ContainerNeutronConfigImage}
outputs:
role_data:
description: Role data for Neutron openvswitch DPDK service
value:
service_name: neutron_ovs_dpdk_agent
config_settings:
map_merge:
- get_attr: [NeutronOvsAgent, role_data, config_settings]
- nova::compute::libvirt::qemu::group: {get_attr: [RoleParametersValue, value, vhostuser_socket_group]}
- get_attr: [RoleParametersValue, value]
service_config_settings:
map_merge:
- get_attr: [NeutronOvsAgent, role_data, service_config_settings]
- collectd:
tripleo.collectd.plugins.neutron_ovs_dpdk_agent:
- ovs_events
- ovs_stats
collectd::plugin::ovs_events::socket: '/run/openvswitch/db.sock'
collectd::plugin::ovs_stats::socket: '/run/openvswitch/db.sock'
puppet_config:
config_volume: neutron
puppet_tags: neutron_config,neutron_agent_ovs,neutron_plugin_ml2
step_config:
get_attr: [NeutronOvsAgent, role_data, puppet_config, step_config]
config_image: {get_attr: [RoleParametersValue, value, ContainerNeutronConfigImage]}
# We need to mount /run for puppet_config step. This is because
# puppet-vswitch runs the commands "ovs-vsctl list open_vswitch ."
# when running vswitch::ovs::enable_hw_offload: true
# ovs-vsctl talks to the ovsdb-server (hosting conf.db)
# on the unix domain socket - /run/openvswitch/db.sock
volumes:
- /lib/modules:/lib/modules:ro
- /run/openvswitch:/run/openvswitch
- /etc/modules-load.d:/etc/modules-load.d
kolla_config:
get_attr: [NeutronOvsAgent, role_data, kolla_config]
container_config_scripts:
get_attr: [NeutronOvsAgent, role_data, container_config_scripts]
docker_config:
get_attr: [NeutronOvsAgent, role_data, docker_config]
metadata_settings:
get_attr: [NeutronOvsAgent, role_data, metadata_settings]
host_prep_tasks:
get_attr: [NeutronOvsAgent, role_data, host_prep_tasks]
deploy_steps_tasks:
list_concat:
- get_attr: [NeutronOvsAgent, role_data, deploy_steps_tasks]
- get_attr: [OpenvswitchDpdk, role_data, deploy_steps_tasks]
upgrade_tasks:
list_concat:
- get_attr: [NeutronOvsAgent, role_data, upgrade_tasks]
- - name: upgrade prepare for leapp to vfio-pci.conf
tags:
- never
- system_upgrade
- system_upgrade_prepare
when:
- step|int == 3
- upgrade_leapp_enabled
block:
# With an existing BZ #1898664 on dracut does not create ramfs with vfio_iommu_type1
# module, because which loading vfio-pci during the initramfs fails to load this
# module. Because of this dpdk ports are added in ERROR state. It requires a
# restart ovs to bring to normal state after ffu is complete. As a workaround,
# the module-load file vfio-pci.conf can be removed before upgrade, which will
# ensure that vfio-pci is not loaded during initramfs and it will be loaded
# when driverctl configures the vfio-pci driver to the interface.
- name: delete the vfio-pci.conf file before upgrade
file:
state: absent
path: /etc/modules-load.d/vfio-pci.conf
update_tasks:
get_attr: [NeutronOvsAgent, role_data, update_tasks]