Ensure add-disk and hooks handle disks the same way

Depends-On: I2ea119f5a1b2a36ccd36df4db094f208a1db100e
Depends-On: Ie19e5318ea35c38e5d02963260b85fec0f233df6
Change-Id: Idebe45504233fb5559a3e9ddd9b2d6534cba7bb2
Closes-Bug: #1820271
This commit is contained in:
Chris MacNaughton 2019-03-15 14:06:02 +01:00 committed by Chris MacNaughton
parent 0b8a583892
commit ea56c2c197
4 changed files with 21 additions and 15 deletions

View File

@ -68,7 +68,7 @@ def add_device(request, device_path, bucket=None,
ceph_hooks.get_journal_devices(),
hookenv.config('ignore-device-errors'),
hookenv.config('osd-encrypt'),
hookenv.config('bluestore'),
charms_ceph.utils.use_bluestore(),
hookenv.config('osd-encrypt-keymanager'),
osd_id)
# Make it fast!

View File

@ -271,16 +271,6 @@ def use_vaultlocker():
return False
def use_bluestore():
"""Determine whether bluestore should be used for OSD's
:returns: whether bluestore disk format should be used
:rtype: bool"""
if cmp_pkgrevno('ceph', '12.2.0') < 0:
return False
return config('bluestore')
def install_apparmor_profile():
"""
Install ceph apparmor profiles and configure
@ -402,7 +392,7 @@ def get_ceph_context(upgrading=False):
'dio': str(config('use-direct-io')).lower(),
'short_object_len': use_short_objects(),
'upgrade_in_progress': upgrading,
'bluestore': use_bluestore(),
'bluestore': ceph.use_bluestore(),
'bluestore_experimental': cmp_pkgrevno('ceph', '12.1.0') < 0,
'bluestore_block_wal_size': config('bluestore-block-wal-size'),
'bluestore_block_db_size': config('bluestore-block-db-size'),
@ -547,8 +537,8 @@ def prepare_disks_and_activate():
if is_osd_bootstrap_ready():
log('ceph bootstrapped, rescanning disks')
emit_cephconf()
bluestore = use_bluestore()
ceph.udevadm_settle()
bluestore = ceph.use_bluestore()
for dev in get_devices():
ceph.osdize(dev, config('osd-format'),
osd_journal,

View File

@ -25,9 +25,11 @@ class AddDiskActionTests(CharmTestCase):
add_disk, ['hookenv', 'kv'])
self.kv.return_value = self.kv
@mock.patch.object(add_disk.charms_ceph.utils, 'use_bluestore')
@mock.patch.object(add_disk.ceph_hooks, 'get_journal_devices')
@mock.patch.object(add_disk.charms_ceph.utils, 'osdize')
def test_add_device(self, mock_osdize, mock_get_journal_devices):
def test_add_device(self, mock_osdize, mock_get_journal_devices,
mock_use_bluestore):
def fake_config(key):
return {
@ -41,6 +43,7 @@ class AddDiskActionTests(CharmTestCase):
self.hookenv.config.side_effect = fake_config
mock_get_journal_devices.return_value = ''
self.hookenv.relation_ids.return_value = ['ceph:0']
mock_use_bluestore.return_value = True
db = mock.MagicMock()
self.kv.return_value = db

View File

@ -26,6 +26,8 @@ with patch('charmhelpers.contrib.hardening.harden.harden') as mock_dec:
lambda *args, **kwargs: f(*args, **kwargs))
import ceph_hooks
import charms_ceph.utils as ceph_utils
CHARM_CONFIG = {'config-flags': '',
'loglevel': 1,
'use-syslog': True,
@ -63,6 +65,7 @@ class CephHooksTestCase(unittest.TestCase):
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
@patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1)
@patch.object(ceph_utils, 'use_bluestore', lambda *args: False)
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
'10.0.0.2'])
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
@ -110,6 +113,7 @@ class CephHooksTestCase(unittest.TestCase):
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
'10.0.0.2'])
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
@patch.object(ceph_utils, 'use_bluestore', lambda *args: False)
@patch.object(ceph, 'config')
@patch.object(ceph_hooks, 'config')
def test_get_ceph_context_invalid_bdev_enable_discard(self, mock_config,
@ -154,6 +158,7 @@ class CephHooksTestCase(unittest.TestCase):
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
@patch.object(ceph_hooks, 'cmp_pkgrevno',
lambda pkg, ver: -1 if ver == '12.1.0' else 1)
@patch.object(ceph_utils, 'use_bluestore', lambda *args: False)
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
'10.0.0.2'])
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
@ -198,6 +203,7 @@ class CephHooksTestCase(unittest.TestCase):
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
@patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1)
@patch.object(ceph_utils, 'use_bluestore', lambda *args: True)
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
'10.0.0.2'])
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
@ -250,6 +256,7 @@ class CephHooksTestCase(unittest.TestCase):
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
@patch.object(ceph_hooks, 'cmp_pkgrevno',
lambda pkg, ver: -1 if ver == '12.1.0' else 1)
@patch.object(ceph_utils, 'use_bluestore', lambda *args: True)
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
'10.0.0.2'])
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
@ -298,6 +305,7 @@ class CephHooksTestCase(unittest.TestCase):
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
@patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1)
@patch.object(ceph_utils, 'use_bluestore', lambda *args: False)
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
'10.0.0.2'])
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
@ -344,6 +352,7 @@ class CephHooksTestCase(unittest.TestCase):
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
@patch.object(ceph_hooks, 'get_cluster_addr', lambda *args: "10.1.0.1")
@patch.object(ceph_hooks, 'cmp_pkgrevno', lambda *args: 1)
@patch.object(ceph_utils, 'use_bluestore', lambda *args: False)
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
'10.0.0.2'])
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
@ -384,6 +393,7 @@ class CephHooksTestCase(unittest.TestCase):
'bluestore_block_db_size': 0}
self.assertEqual(ctxt, expected)
@patch.object(ceph_utils, 'cmp_pkgrevno', lambda *args: 1)
@patch.object(ceph_hooks.ch_context, 'CephBlueStoreCompressionContext')
@patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {})
@patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234')
@ -394,13 +404,16 @@ class CephHooksTestCase(unittest.TestCase):
@patch.object(ceph_hooks, 'get_mon_hosts', lambda *args: ['10.0.0.1',
'10.0.0.2'])
@patch.object(ceph_hooks, 'get_networks', lambda *args: "")
@patch.object(ceph_utils, 'config')
@patch.object(ceph, 'config')
@patch.object(ceph_hooks, 'config')
def test_get_ceph_context_bluestore_compression(
self, mock_config, mock_config2, mock_bluestore_compression):
self, mock_config, mock_config2, mock_config3,
mock_bluestore_compression):
config = copy.deepcopy(CHARM_CONFIG)
mock_config.side_effect = lambda key: config[key]
mock_config2.side_effect = lambda key: config[key]
mock_config3.side_effect = lambda key: config[key]
mock_bluestore_compression().return_value = {
'fake-bluestore-compression-key': 'fake-value'}
ctxt = ceph_hooks.get_ceph_context()