From 4e85813d368b94f588471d9bdc4a04d04d3be541 Mon Sep 17 00:00:00 2001 From: Jiri Stransky Date: Tue, 26 Sep 2017 15:00:13 +0200 Subject: [PATCH] Switch scenario004-containers to use ceph-ansible MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Use ceph-ansible to match the non-containerized variant of scenario004. Depends-On: I137ca9a005df6e95a59a4d629eb94bda6ef00d3a Depends-On: I6acac1826271efcd4d1acf6633bde6eb8a653f44 Co-Authored-By: Martin André Co-Authored-By: Giulio Fidente Change-Id: I1b3c57a2cfda9e74457f17504f51d5b30c5d381d --- .../scenario001-multinode-containers.yaml | 12 ++----- .../scenario004-multinode-containers.yaml | 36 +++++++++++-------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/ci/environments/scenario001-multinode-containers.yaml b/ci/environments/scenario001-multinode-containers.yaml index fa0ef8d197..a6bf5aeec4 100644 --- a/ci/environments/scenario001-multinode-containers.yaml +++ b/ci/environments/scenario001-multinode-containers.yaml @@ -107,20 +107,14 @@ parameter_defaults: # This makes the job twice as fast ceilometer::agent::polling::polling_interval: 15 Debug: true - #NOTE(gfidente): not great but we need this to deploy on ext4 - #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/ CephAnsibleDisksConfig: devices: - /dev/loop3 journal_size: 512 osd_scenario: collocated + CephPoolDefaultPgNum: 32 + CephPoolDefaultSize: 1 CephAnsibleExtraConfig: - ceph_conf_overrides: - global: - osd_pool_default_size: 1 - osd_pool_default_pg_num: 32 - osd_max_object_name_len: 256 - osd_max_object_namespace_len: 64 centos_package_dependencies: [] CephAnsibleSkipTags: '' #NOTE: These ID's and keys should be regenerated for @@ -130,8 +124,6 @@ parameter_defaults: CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ==' CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ==' CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' - CephPoolDefaultSize: 1 - DockerCephDaemonImage: ceph/daemon:tag-stable-3.0-jewel-centos-7 NovaEnableRbdBackend: true CinderEnableRbdBackend: true CinderBackupBackend: ceph diff --git a/ci/environments/scenario004-multinode-containers.yaml b/ci/environments/scenario004-multinode-containers.yaml index ecd3f6ada2..cc7a497991 100644 --- a/ci/environments/scenario004-multinode-containers.yaml +++ b/ci/environments/scenario004-multinode-containers.yaml @@ -1,11 +1,11 @@ resource_registry: OS::TripleO::Controller::Net::SoftwareConfig: ../common/net-config-multinode.yaml OS::TripleO::Compute::Net::SoftwareConfig: ../common/net-config-multinode.yaml - # TODO deploy ceph with ceph-ansible: https://review.openstack.org/#/c/465066/ - OS::TripleO::Services::CephMds: ../../puppet/services/ceph-mds.yaml - OS::TripleO::Services::CephMon: ../../puppet/services/ceph-mon.yaml - OS::TripleO::Services::CephOSD: ../../puppet/services/ceph-osd.yaml - OS::TripleO::Services::CephRgw: ../../puppet/services/ceph-rgw.yaml + OS::TripleO::Services::CephMon: ../../docker/services/ceph-ansible/ceph-mon.yaml + OS::TripleO::Services::CephOSD: ../../docker/services/ceph-ansible/ceph-osd.yaml + OS::TripleO::Services::CephMds: ../../docker/services/ceph-ansible/ceph-mds.yaml + OS::TripleO::Services::CephRgw: ../../docker/services/ceph-ansible/ceph-rgw.yaml + OS::TripleO::Services::CephClient: ../../docker/services/ceph-ansible/ceph-client.yaml OS::TripleO::Services::SwiftProxy: OS::Heat::None OS::TripleO::Services::SwiftStorage: OS::Heat::None OS::TripleO::Services::SwiftRingBuilder: OS::Heat::None @@ -92,14 +92,17 @@ parameter_defaults: nova::compute::libvirt::services::libvirt_virt_type: qemu nova::compute::libvirt::libvirt_virt_type: qemu Debug: true - #NOTE(gfidente): not great but we need this to deploy on ext4 - #http://docs.ceph.com/docs/jewel/rados/configuration/filesystem-recommendations/ - ExtraConfig: - ceph::profile::params::osd_max_object_name_len: 256 - ceph::profile::params::osd_max_object_namespace_len: 64 - #NOTE(gfidente): necessary when deploying a single OSD - ceph::profile::params::osd_pool_default_pg_num: 32 - ceph::profile::params::osd_pool_default_pgp_num: 32 + CephAnsibleDisksConfig: + devices: + - /dev/loop3 + journal_size: 512 + journal_collocation: true + osd_scenario: collocated + CephPoolDefaultPgNum: 32 + CephPoolDefaultSize: 1 + CephAnsibleExtraConfig: + centos_package_dependencies: [] + CephAnsibleSkipTags: '' #NOTE: These ID's and keys should be regenerated for # a production deployment. What is here is suitable for # developer and CI testing only. @@ -107,7 +110,12 @@ parameter_defaults: CephMonKey: 'AQC+Ox1VmEr3BxAALZejqeHj50Nj6wJDvs96OQ==' CephAdminKey: 'AQDLOh1VgEp6FRAAFzT7Zw+Y9V6JJExQAsRnRQ==' CephClientKey: 'AQC+vYNXgDAgAhAAc8UoYt+OTz5uhV7ItLdwUw==' - CephPoolDefaultSize: 1 + NovaEnableRbdBackend: true + CinderEnableRbdBackend: true + CinderBackupBackend: ceph + GlanceBackend: rbd + GnocchiBackend: rbd + CinderEnableIscsiBackend: false SwiftCeilometerPipelineEnabled: false # TODO: in Queens, re-add bgp-vpn and l2gw services when # containerized.