resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml OS::TripleO::Services::NovaIronic: ../../deployment/nova/nova-ironic-container-puppet.yaml OS::TripleO::Services::IronicApi: ../../deployment/ironic/ironic-api-container-puppet.yaml OS::TripleO::Services::IronicConductor: ../../deployment/ironic/ironic-conductor-container-puppet.yaml OS::TripleO::Services::IronicPxe: ../../deployment/ironic/ironic-pxe-container-puppet.yaml # These enable Pacemaker OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml OS::TripleO::Services::HAproxy: ../../deployment/haproxy/haproxy-pacemaker-puppet.yaml OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml OS::TripleO::Services::PacemakerRemote: ../../puppet/services/pacemaker_remote.yaml OS::TripleO::Services::Clustercheck: ../../deployment/pacemaker/clustercheck-container-puppet.yaml OS::TripleO::Services::MySQL: ../../deployment/database/mysql-pacemaker-puppet.yaml # Some infra instances don't pass the ping test but are otherwise working. # Since the OVB jobs also test this functionality we can shut it off here. OS::TripleO::AllNodes::Validation: ../common/all-nodes-validation-disabled.yaml OS::TripleO::NodeExtraConfigPost: ../common/ironic_standalone_post.yaml OS::TripleO::Services::CinderVolume: ../../deployment/cinder/cinder-volume-pacemaker-puppet.yaml OS::TripleO::Services::NeutronCorePlugin: OS::TripleO::Services::NeutronCorePluginML2Ansible OS::TripleO::Services::NovaCompute: OS::Heat::None OS::TripleO::Services::NovaLibvirt: OS::Heat::None parameter_defaults: Debug: true DockerPuppetDebug: True NotificationDriver: 'noop' SwiftCeilometerPipelineEnabled: false IronicCleaningDiskErase: 'metadata' NovaSchedulerDiscoverHostsInCellsInterval: 15 NeutronMechanismDrivers: openvswitch,ansible NeutronNetworkType: vlan NeutronTypeDrivers: local,vxlan,vlan,flat IronicDefaultNetworkInterface: neutron IronicAutomatedClean: false NeutronFlatNetworks: datacentre NeutronNetworkVLANRanges: 'datacentre:500:599,tenant:300:399' ML2HostConfigs: { "net-ans-br": { "ansible_network_os": "openvswitch", "ansible_host": "127.0.0.1", "ansible_user": "root", "ansible_ssh_private_key_file": "/etc/puppet/ci-key"}} NeutronBridgeMappings: 'datacentre:br-ex,tenant:br-tenant' # Remove ContainerCli once this scenario is tested on CentOS8 ContainerCli: docker