Fix cinder-volume AIO ceph scenario

At the moment we don't configure cinder-volume properly to deal with
ceph scenario - LVM backend is always hardcoded even for ceph scenario.
We fix this by moving cinder_backends definition from conf.d to aio
templates. With that proper tempest test has been added to verify
cinder-volume functionality.

Change-Id: I545f4098e899ab80045c9dba03101873b80f9a6c
This commit is contained in:
Dmitriy Rabotyagov 2022-09-26 12:54:18 +02:00 committed by Dmitriy Rabotyagov
parent 8acc9802c3
commit 091ae6369d
4 changed files with 23 additions and 19 deletions

View File

@ -29,16 +29,3 @@ storage-infra_hosts:
storage_hosts:
aio1:
ip: 172.29.236.100
container_vars:
cinder_backends:
limit_container_types: cinder_volume
lvm:
volume_group: cinder-volumes
volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
volume_backend_name: LVM_iSCSI
iscsi_ip_address: "172.29.236.100"
lvm_type: "thin"
extra_volume_types:
- low-iops
- high-iops
- ultra-high-iops

View File

@ -91,15 +91,20 @@
data: lv-{{ d | basename }}
{% endfor %}
cinder_backends:
"rbd_volumes":
aio_ceph:
volume_driver: cinder.volume.drivers.rbd.RBDDriver
rbd_pool: volumes
rbd_ceph_conf: /etc/ceph/ceph.conf
rbd_store_chunk_size: 8
rbd_exclusive_cinder_pool: true
volume_backend_name: rbddriver
rbd_user: cinder
rbd_user: "{% raw %}{{ cinder_ceph_client }}{% endraw %}"
rbd_secret_uuid: "{% raw %}{{ cinder_ceph_client_uuid }}{% endraw %}"
report_discard_supported: true
extra_volume_types:
- low-iops
- high-iops
- ultra-high-iops
dest: /etc/openstack_deploy/user_ceph_aio.yml
force: no
become: false

View File

@ -287,3 +287,16 @@ deployment_environment_variables:
lxc_container_networks: {}
{% endif %}
{% if 'ceph' not in bootstrap_host_scenarios_expanded %}
cinder_backends:
lvm:
volume_group: cinder-volumes
volume_driver: cinder.volume.drivers.lvm.LVMVolumeDriver
volume_backend_name: LVM_iSCSI
iscsi_ip_address: "172.29.236.100"
lvm_type: "thin"
extra_volume_types:
- low-iops
- high-iops
- ultra-high-iops
{% endif %}

View File

@ -24,12 +24,11 @@ ceph_conf_overrides_custom:
global:
mon_max_pg_per_osd: 500
openstack_config: true # Ceph ansible automatically creates pools & keys
cinder_ceph_client: cinder
cinder_default_volume_type: rbd_volumes
cinder_default_volume_type: aio_ceph
glance_ceph_client: glance
glance_default_store: rbd
glance_rbd_store_pool: images
nova_libvirt_images_rbd_pool: vms
# NOTE(noonedeadpunk): ceph bug to track the issue https://tracker.ceph.com/issues/46295
tempest_test_excludelist:
- tempest.scenario.test_object_storage_basic_ops.TestObjectStorageBasicOps.test_swift_acl_anonymous_download
tempest_test_includelist:
- tempest.scenario.test_volume_boot_pattern.TestVolumeBootPattern.test_volume_boot_pattern