tripleo-heat-templates/ci/environments/scenario001-multinode-conta...

182 lines
8.0 KiB
YAML

resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Services::CephMgr: ../../deployment/ceph-ansible/ceph-mgr.yaml
OS::TripleO::Services::CephMon: ../../deployment/ceph-ansible/ceph-mon.yaml
OS::TripleO::Services::CephOSD: ../../deployment/ceph-ansible/ceph-osd.yaml
OS::TripleO::Services::CephClient: ../../deployment/ceph-ansible/ceph-client.yaml
OS::TripleO::Services::PankoApi: ../../deployment/panko/panko-api-container-puppet.yaml
OS::TripleO::Services::Collectd: ../../deployment/metrics/collectd-container-puppet.yaml
OS::TripleO::Services::MetricsQdr: ../../deployment/metrics/qdr-container-puppet.yaml
OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml
OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml
OS::TripleO::Services::HAproxy: ../../deployment/haproxy/haproxy-pacemaker-puppet.yaml
OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml
OS::TripleO::Services::PacemakerRemote: ../../puppet/services/pacemaker_remote.yaml
OS::TripleO::Services::Clustercheck: ../../deployment/pacemaker/clustercheck-container-puppet.yaml
OS::TripleO::Services::Redis: ../../deployment/database/redis-pacemaker-puppet.yaml
OS::TripleO::Services::MySQL: ../../deployment/database/mysql-pacemaker-puppet.yaml
OS::TripleO::Services::CinderBackup: ../../deployment/cinder/cinder-backup-pacemaker-puppet.yaml
OS::TripleO::Services::CinderVolume: ../../deployment/cinder/cinder-volume-pacemaker-puppet.yaml
OS::TripleO::Services::Keepalived: OS::Heat::None
#Needs to run scenario001
OS::TripleO::Services::Fluentd: ../../deployment/deprecated/logging/fluentd-container-puppet.yaml
# NOTE(mmagr): We need to disable Sensu client deployment for now as the container health check is based
# on successful RabbitMQ connection, which does not happen in this case. We can enable it again when we
# will implement default connection to overcloud RabbitMQ instance,
#OS::TripleO::Services::SensuClient: ../../deployment/deprecated/monitoring/sensu-client-container-puppet.yaml
# Some infra instances don't pass the ping test but are otherwise working.
# Since the OVB jobs also test this functionality we can shut it off here.
OS::TripleO::AllNodes::Validation: ../common/all-nodes-validation-disabled.yaml
parameter_defaults:
ControllerServices:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::ContainerImagePrepare
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::LoginDefs
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::HeatApi
- OS::TripleO::Services::HeatApiCfn
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronServer
- OS::TripleO::Services::NeutronCorePlugin
- OS::TripleO::Services::OVNDBs
- OS::TripleO::Services::OVNController
- OS::TripleO::Services::OVNMetadataAgent
- OS::TripleO::Services::OsloMessagingRpc
- OS::TripleO::Services::OsloMessagingNotify
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Keepalived
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::NovaConductor
- OS::TripleO::Services::NovaApi
- OS::TripleO::Services::PlacementApi
- OS::TripleO::Services::NovaMetadata
- OS::TripleO::Services::NovaScheduler
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Sshd
- OS::TripleO::Services::Securetty
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::NovaMigrationTarget
- OS::TripleO::Services::Redis
- OS::TripleO::Services::AodhApi
- OS::TripleO::Services::AodhEvaluator
- OS::TripleO::Services::AodhNotifier
- OS::TripleO::Services::AodhListener
- OS::TripleO::Services::CeilometerAgentCentral
- OS::TripleO::Services::CeilometerAgentIpmi
- OS::TripleO::Services::CeilometerAgentNotification
- OS::TripleO::Services::ComputeCeilometerAgent
- OS::TripleO::Services::GnocchiApi
- OS::TripleO::Services::GnocchiMetricd
- OS::TripleO::Services::GnocchiStatsd
- OS::TripleO::Services::PankoApi
- OS::TripleO::Services::CephMgr
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CinderApi
- OS::TripleO::Services::CinderBackup
- OS::TripleO::Services::CinderScheduler
- OS::TripleO::Services::CinderVolume
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::MetricsQdr
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::Fluentd
#- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::Iscsid
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
# NOTE(sileht): To decrease the time test_telemetry_integration takes We
# configure Ceilometer to poll more, We configure the
# 'ceilometer-high-rate' Gnocchi archive policy to keep 1 point every 60s.
# The test will take 2 minutes instead of 10 minutes. Note that tempest
# telemetry.alarm_granularity must in sync with the archive policy, 60s
# too.
ceilometer::agent::polling::polling_interval: 15
ManagePolling: true
ManagePipeline: true
PipelinePublishers:
- gnocchi://?archive_policy=ceilometer-high-rate
EventPipelinePublishers:
- gnocchi://?archive_policy=ceilometer-high-rate
- panko://
Debug: true
DockerPuppetDebug: True
CephAnsibleDisksConfig:
devices:
- /dev/loop3
journal_size: 512
osd_scenario: collocated
CephPoolDefaultPgNum: 32
CephPoolDefaultSize: 1
CephPools:
- name: altrbd
pg_num: 8
rule_name: replicated_rule
CephAnsibleExtraConfig:
centos_package_dependencies: []
ceph_osd_docker_memory_limit: '1g'
ceph_mds_docker_memory_limit: '1g'
CephAnsibleSkipTags: ''
#NOTE: These ID's and keys should be regenerated for
# a production deployment. What is here is suitable for
# developer and CI testing only.
CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19'
CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ=='
CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
CephAnsiblePlaybookVerbosity: 1
CephAnsibleEnvironmentVariables:
ANSIBLE_SSH_RETRIES: '4'
DEFAULT_FORKS: '3'
NovaEnableRbdBackend: true
CinderEnableRbdBackend: true
CinderRbdExtraPools: altrbd
CinderBackupBackend: ceph
GlanceBackend: rbd
GnocchiBackend: rbd
CinderEnableIscsiBackend: false
GnocchiArchivePolicy: 'ceilometer-high-rate'
BannerText: |
******************************************************************
* This system is for the use of authorized users only. Usage of *
* this system may be monitored and recorded by system personnel. *
* Anyone using this system expressly consents to such monitoring *
* and is advised that if such monitoring reveals possible *
* evidence of criminal activity, system personnel may provide *
* the evidence from such monitoring to law enforcement officials.*
******************************************************************
CollectdExtraPlugins:
- rrdtool
LoggingServers:
- host: 127.0.0.1
port: 24224
#MonitoringRabbitHost: 127.0.0.1
#MonitoringRabbitPort: 5676
#MonitoringRabbitPassword: sensu
TtyValues:
- console
- tty1
- tty2
- tty3
- tty4
- tty5
- tty6
# Remove ContainerCli once this scenario is tested on CentOS8
ContainerCli: docker