tripleo-heat-templates/ci/environments/scenario001-multinode-containers.yaml
Steve Baker 93d87cf18d Always enable image prepare service for docker clouds
This change includes the service
OS::TripleO::Services::ContainerImagePrepare by default in the overcloud
which will trigger a container image prepare in the same way as is
currently done for the containerized undercloud.

Along with the mistral action which populates the container image
parameters, this change makes blueprint container-prepare-workflow
functionally complete.

Change-Id: I8b0c5e630e63ef6a2e6f70f1eb00fd02f4cfd1c0
Blueprint: container-prepare-workflow
2018-08-15 12:09:23 +00:00

183 lines
8.0 KiB
YAML

resource_registry:
OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml
OS::TripleO::Services::CephMgr: ../../docker/services/ceph-ansible/ceph-mgr.yaml
OS::TripleO::Services::CephMon: ../../docker/services/ceph-ansible/ceph-mon.yaml
OS::TripleO::Services::CephOSD: ../../docker/services/ceph-ansible/ceph-osd.yaml
OS::TripleO::Services::CephClient: ../../docker/services/ceph-ansible/ceph-client.yaml
OS::TripleO::Services::PankoApi: ../../docker/services/panko-api.yaml
OS::TripleO::Services::Collectd: ../../docker/services/metrics/collectd.yaml
OS::TripleO::Services::MetricsQdr: ../../docker/services/metrics/qdr.yaml
OS::TripleO::Services::OsloMessagingRpc: ../../docker/services/pacemaker/rpc-rabbitmq.yaml
OS::TripleO::Services::OsloMessagingNotify: ../../docker/services/messaging/notify-rabbitmq-shared.yaml
OS::TripleO::Services::HAproxy: ../../docker/services/pacemaker/haproxy.yaml
OS::TripleO::Services::Pacemaker: ../../puppet/services/pacemaker.yaml
OS::TripleO::Services::PacemakerRemote: ../../puppet/services/pacemaker_remote.yaml
OS::TripleO::Services::Clustercheck: ../../docker/services/pacemaker/clustercheck.yaml
OS::TripleO::Services::Redis: ../../docker/services/pacemaker/database/redis.yaml
OS::TripleO::Services::MySQL: ../../docker/services/pacemaker/database/mysql.yaml
OS::TripleO::Services::CinderBackup: ../../docker/services/pacemaker/cinder-backup.yaml
OS::TripleO::Services::CinderVolume: ../../docker/services/pacemaker/cinder-volume.yaml
OS::TripleO::Services::Keepalived: OS::Heat::None
OS::TripleO::Tasks::ControllerPreConfig: OS::Heat::None
OS::TripleO::Tasks::ControllerPostConfig: OS::Heat::None
OS::TripleO::Tasks::ControllerPostPuppetRestart: ../../extraconfig/tasks/post_puppet_pacemaker_restart.yaml
#Needs to run scenario001
OS::TripleO::Services::Fluentd: ../../docker/services/fluentd.yaml
# NOTE(mmagr): We need to disable Sensu client deployment for now as the container health check is based
# on successful RabbitMQ connection, which does not happen in this case. We can enable it again when we
# will implement default connection to overcloud RabbitMQ instance,
#OS::TripleO::Services::SensuClient: ../../docker/services/sensu-client.yaml
# Some infra instances don't pass the ping test but are otherwise working.
# Since the OVB jobs also test this functionality we can shut it off here.
OS::TripleO::AllNodes::Validation: ../common/all-nodes-validation-disabled.yaml
parameter_defaults:
ControllerServices:
- OS::TripleO::Services::CACerts
- OS::TripleO::Services::Clustercheck
- OS::TripleO::Services::ContainerImagePrepare
- OS::TripleO::Services::Docker
- OS::TripleO::Services::Kernel
- OS::TripleO::Services::Keystone
- OS::TripleO::Services::LoginDefs
- OS::TripleO::Services::GlanceApi
- OS::TripleO::Services::HeatApi
- OS::TripleO::Services::HeatApiCfn
- OS::TripleO::Services::HeatEngine
- OS::TripleO::Services::MySQL
- OS::TripleO::Services::MySQLClient
- OS::TripleO::Services::NeutronDhcpAgent
- OS::TripleO::Services::NeutronL3Agent
- OS::TripleO::Services::NeutronMetadataAgent
- OS::TripleO::Services::NeutronServer
- OS::TripleO::Services::NeutronCorePlugin
- OS::TripleO::Services::NeutronOvsAgent
- OS::TripleO::Services::OsloMessagingRpc
- OS::TripleO::Services::OsloMessagingNotify
- OS::TripleO::Services::HAproxy
- OS::TripleO::Services::Keepalived
- OS::TripleO::Services::Memcached
- OS::TripleO::Services::Pacemaker
- OS::TripleO::Services::NovaConductor
- OS::TripleO::Services::NovaApi
- OS::TripleO::Services::NovaPlacement
- OS::TripleO::Services::NovaMetadata
- OS::TripleO::Services::NovaScheduler
- OS::TripleO::Services::Ntp
- OS::TripleO::Services::Snmp
- OS::TripleO::Services::Sshd
- OS::TripleO::Services::Securetty
- OS::TripleO::Services::Timezone
- OS::TripleO::Services::NovaCompute
- OS::TripleO::Services::NovaLibvirt
- OS::TripleO::Services::NovaMigrationTarget
- OS::TripleO::Services::Redis
- OS::TripleO::Services::AodhApi
- OS::TripleO::Services::AodhEvaluator
- OS::TripleO::Services::AodhNotifier
- OS::TripleO::Services::AodhListener
- OS::TripleO::Services::CeilometerAgentCentral
- OS::TripleO::Services::CeilometerAgentIpmi
- OS::TripleO::Services::CeilometerAgentNotification
- OS::TripleO::Services::ComputeCeilometerAgent
- OS::TripleO::Services::GnocchiApi
- OS::TripleO::Services::GnocchiMetricd
- OS::TripleO::Services::GnocchiStatsd
- OS::TripleO::Services::PankoApi
- OS::TripleO::Services::CephMgr
- OS::TripleO::Services::CephMon
- OS::TripleO::Services::CephOSD
- OS::TripleO::Services::CephClient
- OS::TripleO::Services::CinderApi
- OS::TripleO::Services::CinderBackup
- OS::TripleO::Services::CinderScheduler
- OS::TripleO::Services::CinderVolume
- OS::TripleO::Services::Collectd
- OS::TripleO::Services::MetricsQdr
- OS::TripleO::Services::TripleoPackages
- OS::TripleO::Services::TripleoFirewall
- OS::TripleO::Services::Fluentd
#- OS::TripleO::Services::SensuClient
- OS::TripleO::Services::Iscsid
ControllerExtraConfig:
nova::compute::libvirt::services::libvirt_virt_type: qemu
nova::compute::libvirt::libvirt_virt_type: qemu
# NOTE(sileht): To decrease the time test_telemetry_integration takes We
# configure Ceilometer to poll more, We configure the 'high' Gnocchi
# archive policy to keep 1 point every 60s. The test will take 2 minutes
# instead of 10 minutes. Note that tempest telemetry.alarm_granularity must
# in sync with the archive policy, 60s too.
ceilometer::agent::polling::polling_interval: 15
ManagePolling: true
ManagePipeline: true
PipelinePublishers:
- gnocchi://?archive_policy=high
EventPipelinePublishers:
- gnocchi://?archive_policy=high
- panko://
Debug: true
DockerPuppetDebug: True
CephAnsibleDisksConfig:
devices:
- /dev/loop3
journal_size: 512
osd_scenario: collocated
CephPoolDefaultPgNum: 32
CephPoolDefaultSize: 1
CephPools:
- name: altrbd
pg_num: 8
rule_name: replicated_rule
CephAnsibleExtraConfig:
centos_package_dependencies: []
ceph_osd_docker_memory_limit: '1g'
ceph_mds_docker_memory_limit: '1g'
CephAnsibleSkipTags: ''
#NOTE: These ID's and keys should be regenerated for
# a production deployment. What is here is suitable for
# developer and CI testing only.
CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19'
CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ=='
CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
CephAnsiblePlaybookVerbosity: 1
CephAnsibleEnvironmentVariables:
ANSIBLE_SSH_RETRIES: '4'
DEFAULT_FORKS: '3'
NovaEnableRbdBackend: true
CinderEnableRbdBackend: true
CinderRbdExtraPools: altrbd
CinderBackupBackend: ceph
GlanceBackend: rbd
GnocchiBackend: rbd
CinderEnableIscsiBackend: false
GnocchiArchivePolicy: 'high'
BannerText: |
******************************************************************
* This system is for the use of authorized users only. Usage of *
* this system may be monitored and recorded by system personnel. *
* Anyone using this system expressly consents to such monitoring *
* and is advised that if such monitoring reveals possible *
* evidence of criminal activity, system personnel may provide *
* the evidence from such monitoring to law enforcement officials.*
******************************************************************
CollectdExtraPlugins:
- rrdtool
LoggingServers:
- host: 127.0.0.1
port: 24224
#MonitoringRabbitHost: 127.0.0.1
#MonitoringRabbitPort: 5676
#MonitoringRabbitPassword: sensu
TtyValues:
- console
- tty1
- tty2
- tty3
- tty4
- tty5
- tty6