diff --git a/cinder/volume/drivers/rbd.py b/cinder/volume/drivers/rbd.py index b1838e84ab1..cda8ff69699 100644 --- a/cinder/volume/drivers/rbd.py +++ b/cinder/volume/drivers/rbd.py @@ -102,13 +102,16 @@ RBD_OPTS = [ 'dynamic value (used + current free) and to False to ' 'report a static value (quota max bytes if defined and ' 'global size of cluster if not).'), - cfg.BoolOpt('rbd_exclusive_cinder_pool', default=False, - help="Set to True if the pool is used exclusively by Cinder. " + cfg.BoolOpt('rbd_exclusive_cinder_pool', default=True, + help="Set to False if the pool is shared with other usages. " "On exclusive use driver won't query images' provisioned " "size as they will match the value calculated by the " "Cinder core code for allocated_capacity_gb. This " "reduces the load on the Ceph cluster as well as on the " - "volume service."), + "volume service. On non exclusive use driver will query " + "the Ceph cluster for per image used disk, this is an " + "intensive operation having an independent request for " + "each image."), cfg.BoolOpt('enable_deferred_deletion', default=False, help='Enable deferred deletion. Upon deletion, volumes are ' 'tagged for deletion but will only be removed ' diff --git a/doc/source/configuration/block-storage/drivers/ceph-rbd-volume-driver.rst b/doc/source/configuration/block-storage/drivers/ceph-rbd-volume-driver.rst index 78c564b9ab9..cdbeed3d7a9 100644 --- a/doc/source/configuration/block-storage/drivers/ceph-rbd-volume-driver.rst +++ b/doc/source/configuration/block-storage/drivers/ceph-rbd-volume-driver.rst @@ -81,6 +81,37 @@ Ceph exposes RADOS; you can access it through the following interfaces: Linux kernel and QEMU block devices that stripe data across multiple objects. +RBD pool +~~~~~~~~ + +The RBD pool used by the Cinder backend is configured with option ``rbd_pool``, +and by default the driver expects exclusive management access to that pool, as +in being the only system creating and deleting resources in it, since that's +the recommended deployment choice. + +Pool sharing is strongly discouraged, and if we were to share the pool with +other services, within OpenStack (Nova, Glance, another Cinder backend) or +outside of OpenStack (oVirt), then the stats returned by the driver to the +scheduler would not be entirely accurate. + +The inaccuracy would be that the actual size in use by the cinder volumes would +be lower than the reported one, since it would be also including the used space +by the other services. + +We can set the ``rbd_exclusive_cinder_pool`` configuration option to ``false`` +to fix this inaccuracy, but this has a performance impact. + +.. warning:: + + Setting ``rbd_exclusive_cinder_pool`` to ``false`` will increase the burden + on the Cinder driver and the Ceph cluster, since a request will be made for + each existing image, to retrieve its size, during the stats gathering + process. + + For deployments with large amount of volumes it is recommended to leave the + default value of ``true``, and accept the inaccuracy, as it should not be + particularly problematic. + Driver options ~~~~~~~~~~~~~~ diff --git a/releasenotes/notes/change-default-rbd_exclusive_cinder_pool-e59c528c7f728780.yaml b/releasenotes/notes/change-default-rbd_exclusive_cinder_pool-e59c528c7f728780.yaml new file mode 100644 index 00000000000..6ae6f6747cb --- /dev/null +++ b/releasenotes/notes/change-default-rbd_exclusive_cinder_pool-e59c528c7f728780.yaml @@ -0,0 +1,17 @@ +--- +upgrade: + - | + Ceph/RBD volume backends will now assume exclusive cinder pools, as if they + had ``rbd_exclusive_cinder_pool = true`` in their configuration. + + This helps deployments with a large number of volumes and prevent issues on + deployments with a growing number of volumes at the small cost of a + slightly less accurate stats being reported to the scheduler. +fixes: + - | + Ceph/RBD: Fix cinder taking a long time to start for Ceph/RBD backends. + (`Related-Bug #1704106 `_) + - | + Ceph/RBD: Fix Cinder becoming non-responsive and stats gathering taking + longer that its period. (`Related-Bug #1704106 + `_)