We don't deploy Keepalived in multi-node as our HA story is done with Pacemaker. Therefore, we don't use VRRP protocol that Keepalived provides to maintain the VIPs alive, so we don't really need this service. Instead, we can configure the VIPs on the br-ctlplane interface which already handled the local_ip. Now it also handles the configuration of public ip and admin ip. Keepalived is now deprecated and will be removed in the next cycle. blueprint replace-keepalived-undercloud Change-Id: I3192be07cb6c19d5e26cb4cddbe68213e7e48937
101 lines
4.4 KiB
YAML
101 lines
4.4 KiB
YAML
resource_registry:
|
|
OS::TripleO::Services::CephMgr: ../../deployment/ceph-ansible/ceph-mgr.yaml
|
|
OS::TripleO::Services::CephMon: ../../deployment/ceph-ansible/ceph-mon.yaml
|
|
OS::TripleO::Services::CephOSD: ../../deployment/ceph-ansible/ceph-osd.yaml
|
|
OS::TripleO::Services::CephClient: ../../deployment/ceph-ansible/ceph-client.yaml
|
|
OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml
|
|
OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml
|
|
# Some infra instances don't pass the ping test but are otherwise working.
|
|
# Since the OVB jobs also test this functionality we can shut it off here.
|
|
OS::TripleO::AllNodes::Validation: ../common/all-nodes-validation-disabled.yaml
|
|
OS::TripleO::Services::OctaviaApi: ../../deployment/octavia/octavia-api-container-puppet.yaml
|
|
OS::TripleO::Services::OctaviaHousekeeping: ../../deployment/octavia/octavia-housekeeping-container-puppet.yaml
|
|
OS::TripleO::Services::OctaviaHealthManager: ../../deployment/octavia/octavia-health-manager-container-puppet.yaml
|
|
OS::TripleO::Services::OctaviaWorker: ../../deployment/octavia/octavia-worker-container-puppet.yaml
|
|
OS::TripleO::Services::OctaviaDeploymentConfig: ../../deployment/octavia/octavia-deployment-config.yaml
|
|
OS::TripleO::Services::CinderApi: OS::Heat::None
|
|
OS::TripleO::Services::CinderBackup: OS::Heat::None
|
|
OS::TripleO::Services::CinderScheduler: OS::Heat::None
|
|
OS::TripleO::Services::CinderVolume: OS::Heat::None
|
|
OS::TripleO::Services::SwiftProxy: OS::Heat::None
|
|
OS::TripleO::Services::SwiftDispersion: OS::Heat::None
|
|
OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
|
|
OS::TripleO::Services::SwiftStorage: OS::Heat::None
|
|
OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None
|
|
OS::TripleO::Services::SwiftStorage: OS::Heat::None
|
|
OS::TripleO::Services::Horizon: OS::Heat::None
|
|
|
|
parameter_defaults:
|
|
StandaloneExtraConfig:
|
|
octavia::controller::connection_retry_interval: 10
|
|
OctaviaAmphoraSshKeyFile: /home/zuul/.ssh/id_rsa.pub
|
|
OctaviaAmphoraImageFilename: /home/zuul/amphora.qcow2
|
|
OctaviaCaKeyPassphrase: 'upstreamci'
|
|
OctaviaManageNovaFlavor: true
|
|
OctaviaGenerateCerts: true
|
|
OctaviaEnableDriverAgent: false
|
|
NodeDataLookup:
|
|
AB4114B1-9C9D-409A-BEFB-D88C151BF2C3: {"foo": "bar"}
|
|
8CF1A7EA-7B4B-4433-AC83-17675514B1B8: {"foo2": "bar2"}
|
|
Debug: true
|
|
HideSensitiveLogs: false
|
|
CephAnsibleDisksConfig:
|
|
osd_objectstore: bluestore
|
|
osd_scenario: lvm
|
|
lvm_volumes:
|
|
- data: ceph_lv_data
|
|
data_vg: ceph_vg
|
|
db: ceph_lv_db
|
|
db_vg: ceph_vg
|
|
wal: ceph_lv_wal
|
|
wal_vg: ceph_vg
|
|
CephPoolDefaultPgNum: 32
|
|
CephPoolDefaultSize: 1
|
|
CephAnsibleExtraConfig:
|
|
centos_package_dependencies: []
|
|
ceph_osd_docker_memory_limit: '1g'
|
|
ceph_mds_docker_memory_limit: '1g'
|
|
mon_host_v1: { 'enabled': False }
|
|
handler_health_mon_check_retries: 10
|
|
handler_health_mon_check_delay: 20
|
|
#NOTE: These ID's and keys should be regenerated for
|
|
# a production deployment. What is here is suitable for
|
|
# developer and CI testing only.
|
|
CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19'
|
|
CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ=='
|
|
CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ=='
|
|
CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw=='
|
|
CephAnsiblePlaybookVerbosity: 1
|
|
CephAnsibleEnvironmentVariables:
|
|
ANSIBLE_SSH_RETRIES: '4'
|
|
DEFAULT_FORKS: '3'
|
|
NovaEnableRbdBackend: true
|
|
CinderEnableRbdBackend: true
|
|
CephAnsibleRepo: "tripleo-centos-ceph-nautilus"
|
|
CinderBackupBackend: ceph
|
|
GlanceBackend: rbd
|
|
CinderEnableIscsiBackend: false
|
|
BannerText: |
|
|
******************************************************************
|
|
* This system is for the use of authorized users only. Usage of *
|
|
* this system may be monitored and recorded by system personnel. *
|
|
* Anyone using this system expressly consents to such monitoring *
|
|
* and is advised that if such monitoring reveals possible *
|
|
* evidence of criminal activity, system personnel may provide *
|
|
* the evidence from such monitoring to law enforcement officials.*
|
|
******************************************************************
|
|
CollectdExtraPlugins:
|
|
- rrdtool
|
|
LoggingServers:
|
|
- host: 127.0.0.1
|
|
port: 24224
|
|
TtyValues:
|
|
- console
|
|
- tty1
|
|
- tty2
|
|
- tty3
|
|
- tty4
|
|
- tty5
|
|
- tty6
|
|
ContainerCli: podman
|