resource_registry: OS::TripleO::Services::CephClient: ../../deployment/cephadm/ceph-client.yaml OS::TripleO::Services::CephMgr: ../../deployment/cephadm/ceph-mgr.yaml OS::TripleO::Services::CephMon: ../../deployment/cephadm/ceph-mon.yaml OS::TripleO::Services::CephOSD: ../../deployment/cephadm/ceph-osd.yaml OS::TripleO::Services::CinderApi: OS::Heat::None OS::TripleO::Services::CinderBackup: OS::Heat::None OS::TripleO::Services::CinderScheduler: OS::Heat::None OS::TripleO::Services::CinderVolume: OS::Heat::None OS::TripleO::Services::Horizon: OS::Heat::None OS::TripleO::Services::OctaviaApi: ../../deployment/octavia/octavia-api-container-puppet.yaml OS::TripleO::Services::OctaviaDeploymentConfig: ../../deployment/octavia/octavia-deployment-config.yaml OS::TripleO::Services::OctaviaHealthManager: ../../deployment/octavia/octavia-health-manager-container-puppet.yaml OS::TripleO::Services::OctaviaHousekeeping: ../../deployment/octavia/octavia-housekeeping-container-puppet.yaml OS::TripleO::Services::OctaviaWorker: ../../deployment/octavia/octavia-worker-container-puppet.yaml OS::TripleO::Services::OsloMessagingNotify: ../../deployment/rabbitmq/rabbitmq-messaging-notify-shared-puppet.yaml OS::TripleO::Services::OsloMessagingRpc: ../../deployment/rabbitmq/rabbitmq-messaging-rpc-pacemaker-puppet.yaml OS::TripleO::Services::SwiftDispersion: OS::Heat::None OS::TripleO::Services::SwiftProxy: OS::Heat::None OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None OS::TripleO::Services::SwiftStorage: OS::Heat::None parameter_defaults: StandaloneExtraConfig: octavia::controller::connection_retry_interval: 10 OctaviaAmphoraSshKeyFile: /home/zuul/.ssh/id_rsa.pub OctaviaAmphoraImageFilename: /home/zuul/amphora.qcow2 OctaviaCaKeyPassphrase: 'upstreamci' OctaviaManageNovaFlavor: true OctaviaGenerateCerts: true NodeDataLookup: AB4114B1-9C9D-409A-BEFB-D88C151BF2C3: {"foo": "bar"} 8CF1A7EA-7B4B-4433-AC83-17675514B1B8: {"foo2": "bar2"} Debug: true HideSensitiveLogs: false CephPoolDefaultPgNum: 8 CephPoolDefaultSize: 1 #NOTE: These ID's and keys should be regenerated for # a production deployment. What is here is suitable for # developer and CI testing only. CephClusterFSID: '4b5c8c0a-ff60-454b-a1b4-9747aa737d19' CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' NovaEnableRbdBackend: true CinderEnableRbdBackend: true CinderBackupBackend: ceph GlanceBackend: rbd CinderEnableIscsiBackend: false BannerText: | ****************************************************************** * This system is for the use of authorized users only. Usage of * * this system may be monitored and recorded by system personnel. * * Anyone using this system expressly consents to such monitoring * * and is advised that if such monitoring reveals possible * * evidence of criminal activity, system personnel may provide * * the evidence from such monitoring to law enforcement officials.* ****************************************************************** CollectdExtraPlugins: - rrdtool LoggingServers: - host: 127.0.0.1 port: 24224 TtyValues: - console - tty1 - tty2 - tty3 - tty4 - tty5 - tty6 ContainerCli: podman CephConfigPath: "/etc/ceph" CephClientConfigVars: "{{ playbook_dir }}/cephadm/ceph_client.yml" CephSpecFqdn: true CephOsdSpec: data_devices: paths: - /dev/ceph_vg/ceph_lv_data