--- # Because we have three haproxy nodes, we need # to one active LB IP, and we use keepalived for that. # These variables must be defined when external_lb_vip_address or # internal_lb_vip_address is set to FQDN. ## Load Balancer Configuration (haproxy/keepalived) haproxy_keepalived_external_vip_cidr: "/" haproxy_keepalived_internal_vip_cidr: "172.29.236.9/32" haproxy_keepalived_external_interface: ens2 haproxy_keepalived_internal_interface: br-mgmt ## Ceph cluster fsid (must be generated before first run) ## Generate a uuid using: python -c 'import uuid; print(str(uuid.uuid4()))' generate_fsid: false fsid: 116f14c4-7fe1-40e4-94eb-9240b63de5c1 # Replace with your generated UUID ## ceph-ansible settings ## See https://github.com/ceph/ceph-ansible/tree/master/group_vars for ## additional configuration options available. monitor_address_block: "{{ cidr_networks.storage }}" public_network: "{{ cidr_networks.storage }}" cluster_network: "{{ cidr_networks.storage }}" journal_size: 10240 # size in MB # ceph-ansible automatically creates pools & keys for OpenStack services openstack_config: true cinder_ceph_client: cinder glance_ceph_client: glance glance_default_store: rbd glance_rbd_store_pool: images nova_libvirt_images_rbd_pool: vms cinder_backends: rbd_volumes: volume_driver: cinder.volume.drivers.rbd.RBDDriver rbd_pool: volumes rbd_ceph_conf: /etc/ceph/ceph.conf rbd_store_chunk_size: 8 volume_backend_name: rbddriver rbd_user: "{{ cinder_ceph_client }}" rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}" report_discard_supported: true