From e3f697df6e1ff57b6f9e2cf3d455984dbf1faf84 Mon Sep 17 00:00:00 2001 From: John Fulton Date: Fri, 23 Feb 2018 17:10:08 -0500 Subject: [PATCH] Switch scenario00{1,4}-standalone to Ceph bluestore Modify scenario00{1,4}-standalone to use Ceph's bluestore in place filestore. Bluestore is the default deployment method as of version 3.2 of ceph-ansible so we should test it in CI. Use pre-created lvm_volumes parameter to avoid issue with 'ceph-volume batch' mode which does not work on loopback devices. Will use https://review.openstack.org/#/c/638364 to quickly disable and then re-enable these jobs help land this without disruption. blueprint: bluestore Depends-On: I10426b4ff3ff17acf96fa9b17bf0ef3f89b0f542 Change-Id: Id2658ae814b580971d559af616b8ba034dff681b --- ci/environments/scenario001-standalone.yaml | 13 +++++++++---- ci/environments/scenario004-standalone.yaml | 13 +++++++++---- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/ci/environments/scenario001-standalone.yaml b/ci/environments/scenario001-standalone.yaml index 74be368af3..8ee4909ff3 100644 --- a/ci/environments/scenario001-standalone.yaml +++ b/ci/environments/scenario001-standalone.yaml @@ -63,10 +63,15 @@ parameter_defaults: # fetch dir needed for standalone LocalCephAnsibleFetchDirectoryBackup: /var/lib/ceph_ansible_fetch CephAnsibleDisksConfig: - devices: - - /dev/loop3 - journal_size: 512 - osd_scenario: collocated + osd_objectstore: bluestore + osd_scenario: lvm + lvm_volumes: + - data: ceph_lv_data + data_vg: ceph_vg + db: ceph_lv_db + db_vg: ceph_vg + wal: ceph_lv_wal + wal_vg: ceph_vg CephPoolDefaultPgNum: 32 CephPoolDefaultSize: 1 CephAnsibleExtraConfig: diff --git a/ci/environments/scenario004-standalone.yaml b/ci/environments/scenario004-standalone.yaml index 8c6b17857b..36e0fee462 100644 --- a/ci/environments/scenario004-standalone.yaml +++ b/ci/environments/scenario004-standalone.yaml @@ -39,10 +39,15 @@ parameter_defaults: # fetch dir needed for standalone LocalCephAnsibleFetchDirectoryBackup: /var/lib/ceph_ansible_fetch CephAnsibleDisksConfig: - devices: - - /dev/loop3 - journal_size: 512 - osd_scenario: collocated + osd_objectstore: bluestore + osd_scenario: lvm + lvm_volumes: + - data: ceph_lv_data + data_vg: ceph_vg + db: ceph_lv_db + db_vg: ceph_vg + wal: ceph_lv_wal + wal_vg: ceph_vg CephPoolDefaultPgNum: 32 CephPoolDefaultSize: 1 CephAnsibleExtraConfig: