Add support for rbd_exclusive_cinder_pool
As of the of the queens release cinder supports this config option which, if enabled, stops cinder from query all volumes in a pool every time it does a delete in order to get accurate pool usage stats. The problem is that this causes tons of non-fatal race conditions and slows down deletes to the point where the rpc thread pool fills up blocking further requests. Our charms do not configure pool by default and we are not aware of anyone doing this in the field so this patch enables this option by default. Change-Id: I5377e2886a6e206d30bd7dc38a7e43a085aa524c Closes-Bug: 1789828
This commit is contained in:
parent
f2d17851a4
commit
b04b5f3b5f
@ -50,19 +50,15 @@ class CephSubordinateContext(OSContextGenerator):
|
|||||||
volume_driver = 'cinder.volume.drivers.rbd.RBDDriver'
|
volume_driver = 'cinder.volume.drivers.rbd.RBDDriver'
|
||||||
else:
|
else:
|
||||||
volume_driver = 'cinder.volume.driver.RBDDriver'
|
volume_driver = 'cinder.volume.driver.RBDDriver'
|
||||||
return {
|
|
||||||
"cinder": {
|
section = {service: [('volume_backend_name', service),
|
||||||
"/etc/cinder/cinder.conf": {
|
|
||||||
"sections": {
|
|
||||||
service: [
|
|
||||||
('volume_backend_name', service),
|
|
||||||
('volume_driver', volume_driver),
|
('volume_driver', volume_driver),
|
||||||
('rbd_pool', service),
|
('rbd_pool', service),
|
||||||
('rbd_user', service),
|
('rbd_user', service),
|
||||||
('rbd_secret_uuid', leader_get('secret-uuid')),
|
('rbd_secret_uuid', leader_get('secret-uuid')),
|
||||||
('rbd_ceph_conf', ceph_config_file()),
|
('rbd_ceph_conf', ceph_config_file())]}
|
||||||
]
|
|
||||||
}
|
if CompareOpenStackReleases(os_codename) >= "queens":
|
||||||
}
|
section[service].append(('rbd_exclusive_cinder_pool', True))
|
||||||
}
|
|
||||||
}
|
return {'cinder': {'/etc/cinder/cinder.conf': {'sections': section}}}
|
||||||
|
@ -358,6 +358,10 @@ class CinderCephBasicDeployment(OpenStackAmuletDeployment):
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if self._get_openstack_release() >= self.xenial_queens:
|
||||||
|
section = sub_dict['cinder']["/etc/cinder/cinder.conf"]["sections"]
|
||||||
|
section["cinder-ceph"].append(('rbd_exclusive_cinder_pool', True))
|
||||||
|
|
||||||
expected = {
|
expected = {
|
||||||
'subordinate_configuration': json.dumps(sub_dict),
|
'subordinate_configuration': json.dumps(sub_dict),
|
||||||
'private-address': u.valid_ip,
|
'private-address': u.valid_ip,
|
||||||
|
@ -84,3 +84,28 @@ class TestCinderContext(CharmTestCase):
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}})
|
}})
|
||||||
|
|
||||||
|
def test_ceph_related_queens(self):
|
||||||
|
self.is_relation_made.return_value = True
|
||||||
|
self.get_os_codename_package.return_value = "queens"
|
||||||
|
service = 'mycinder'
|
||||||
|
self.service_name.return_value = service
|
||||||
|
self.assertEqual(
|
||||||
|
contexts.CephSubordinateContext()(),
|
||||||
|
{"cinder": {
|
||||||
|
"/etc/cinder/cinder.conf": {
|
||||||
|
"sections": {
|
||||||
|
service: [
|
||||||
|
('volume_backend_name', service),
|
||||||
|
('volume_driver',
|
||||||
|
'cinder.volume.drivers.rbd.RBDDriver'),
|
||||||
|
('rbd_pool', service),
|
||||||
|
('rbd_user', service),
|
||||||
|
('rbd_secret_uuid', 'libvirt-uuid'),
|
||||||
|
('rbd_ceph_conf',
|
||||||
|
'/var/lib/charm/mycinder/ceph.conf'),
|
||||||
|
('rbd_exclusive_cinder_pool', True)
|
||||||
|
]
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}})
|
||||||
|
Loading…
Reference in New Issue
Block a user