bb1287555c
With current "Ceph production example" the difference between ceph's public and storage network is not clear. We assign Storage Network to compute nodes, but it's not used there. We also asign Storage Network to ceph monitors, but it's not used there as well. Same problems apply to AIO environment. As Dmitriy suggested in [1], ceph should not use mgmt network for storage traffic. This change makes ceph use storage network for: - OSD<>OSD communication - client<>OSD communication - client<>MON communication I think it's the most common scenario where all ceph-related traffic uses dedicated(storage) network and do not depend on mgmt network. This change affects both "Ceph production example" docs and AIO environments. [1] https://review.opendev.org/c/openstack/openstack-ansible/+/856566 Change-Id: I74387a2e961e2b8355ea6a0c889b2f5674233ebf
40 lines
1.5 KiB
Plaintext
40 lines
1.5 KiB
Plaintext
---
|
|
# Because we have three haproxy nodes, we need
|
|
# to one active LB IP, and we use keepalived for that.
|
|
## Load Balancer Configuration (haproxy/keepalived)
|
|
haproxy_keepalived_external_vip_cidr: "<external_ip_address>/<netmask>"
|
|
haproxy_keepalived_internal_vip_cidr: "172.29.236.9/32"
|
|
haproxy_keepalived_external_interface: ens2
|
|
haproxy_keepalived_internal_interface: br-mgmt
|
|
|
|
## Ceph cluster fsid (must be generated before first run)
|
|
## Generate a uuid using: python -c 'import uuid; print(str(uuid.uuid4()))'
|
|
generate_fsid: false
|
|
fsid: 116f14c4-7fe1-40e4-94eb-9240b63de5c1 # Replace with your generated UUID
|
|
|
|
## ceph-ansible settings
|
|
## See https://github.com/ceph/ceph-ansible/tree/master/group_vars for
|
|
## additional configuration options available.
|
|
monitor_address_block: "{{ cidr_networks.storage }}"
|
|
public_network: "{{ cidr_networks.storage }}"
|
|
cluster_network: "{{ cidr_networks.storage }}"
|
|
journal_size: 10240 # size in MB
|
|
# ceph-ansible automatically creates pools & keys for OpenStack services
|
|
openstack_config: true
|
|
cinder_ceph_client: cinder
|
|
glance_ceph_client: glance
|
|
glance_default_store: rbd
|
|
glance_rbd_store_pool: images
|
|
nova_libvirt_images_rbd_pool: vms
|
|
|
|
cinder_backends:
|
|
rbd_volumes:
|
|
volume_driver: cinder.volume.drivers.rbd.RBDDriver
|
|
rbd_pool: volumes
|
|
rbd_ceph_conf: /etc/ceph/ceph.conf
|
|
rbd_store_chunk_size: 8
|
|
volume_backend_name: rbddriver
|
|
rbd_user: "{{ cinder_ceph_client }}"
|
|
rbd_secret_uuid: "{{ cinder_ceph_client_uuid }}"
|
|
report_discard_supported: true
|