Refactor osd pool checks for flexibility and reusability in other ceph-related charms (radosgw).

This commit is contained in:
Ryan Beisner
2015-06-22 16:11:44 +00:00
parent a6568a090c
commit 9115a56fd1
4 changed files with 87 additions and 38 deletions

View File

@@ -397,8 +397,7 @@ class CephOsdBasicDeployment(OpenStackAmuletDeployment):
identical, and expect specific pools to be present."""
u.log.debug('Checking pools on ceph units...')
cmd = 'sudo ceph osd lspools'
pools = self.get_ceph_expected_pools()
expected_pools = self.get_ceph_expected_pools()
results = []
sentries = [
self.ceph_osd_sentry,
@@ -407,33 +406,31 @@ class CephOsdBasicDeployment(OpenStackAmuletDeployment):
self.ceph2_sentry
]
# Check for presence of expected pools on each unit
u.log.debug('Expected pools: {}'.format(expected_pools))
for sentry_unit in sentries:
output, code = sentry_unit.run(cmd)
results.append(output)
msg = ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
u.log.debug(msg)
if code != 0:
amulet.raise_status(amulet.FAIL, msg=msg)
pools = u.get_ceph_pools(sentry_unit)
results.append(pools)
# Check for presence of all pools on this unit
for pool in pools:
if pool not in output:
for expected_pool in expected_pools:
if expected_pool not in pools:
msg = ('{} does not have pool: '
'{}'.format(sentry_unit.info['unit_name'], pool))
'{}'.format(sentry_unit.info['unit_name'],
expected_pool))
amulet.raise_status(amulet.FAIL, msg=msg)
u.log.debug('{} has the expected '
u.log.debug('{} has (at least) the expected '
'pools.'.format(sentry_unit.info['unit_name']))
# Check that lspool produces the same output on all units
if len(set(results)) == 1:
# Check that all units returned the same pool name:id data
ret = u.validate_list_of_identical_dicts(results)
if ret:
u.log.debug('Pool list results: {}'.format(results))
msg = ('{}; Pool list results are not identical on all '
'ceph units.'.format(ret))
amulet.raise_status(amulet.FAIL, msg=msg)
else:
u.log.debug('Pool list on all ceph units produced the '
'same results (OK).')
else:
u.log.debug('Pool list results: {}'.format(results))
msg = 'Pool list results are not identical on all ceph units.'
amulet.raise_status(amulet.FAIL, msg=msg)
def test_410_ceph_cinder_vol_create(self):
"""Create and confirm a ceph-backed cinder volume, and inspect
@@ -442,7 +439,7 @@ class CephOsdBasicDeployment(OpenStackAmuletDeployment):
sentry_unit = self.ceph0_sentry
obj_count_samples = []
pool_size_samples = []
pools = self.get_ceph_expected_pools()
pools = u.get_ceph_pools(self.ceph0_sentry)
cinder_pool = pools['cinder']
# Check ceph cinder pool object count, disk space usage and pool name
@@ -499,7 +496,7 @@ class CephOsdBasicDeployment(OpenStackAmuletDeployment):
sentry_unit = self.ceph0_sentry
obj_count_samples = []
pool_size_samples = []
pools = self.get_ceph_expected_pools()
pools = u.get_ceph_pools(self.ceph0_sentry)
glance_pool = pools['glance']
# Check ceph glance pool object count, disk space usage and pool name

View File

@@ -523,3 +523,19 @@ class AmuletUtils(object):
a_pids))
self.log.debug(msg)
return None
def validate_list_of_identical_dicts(self, list_of_dicts):
"""Check that all dicts within a list are identical."""
hashes = []
for _dict in list_of_dicts:
hashes.append(hash(frozenset(_dict.items())))
self.log.debug('Hashes: {}'.format(hashes))
if len(set(hashes)) == 1:
msg = 'Dicts within list are identical'
self.log.debug(msg)
else:
msg = 'Dicts within list are not identical'
return msg
return None

View File

@@ -149,22 +149,34 @@ class OpenStackAmuletDeployment(AmuletDeployment):
else:
return releases[self.series]
def get_ceph_expected_pools(self):
"""Return a dict of expected ceph pools based on
Ubuntu-OpenStack release"""
def get_ceph_expected_pools(self, radosgw=False):
"""Return a list of expected ceph pools based on Ubuntu-OpenStack
release and whether ceph radosgw is flagged as present or not."""
if self._get_openstack_release() >= self.trusty_kilo:
# Kilo or later
return {
'rbd': 0,
'cinder': 1,
'glance': 2
}
pools = [
'rbd',
'cinder',
'glance'
]
else:
# Juno or earlier
return {
'data': 0,
'metadata': 1,
'rbd': 2,
'cinder': 3,
'glance': 4
}
pools = [
'data',
'metadata',
'rbd',
'cinder',
'glance'
]
if radosgw:
pools.extend([
'.rgw.root',
'.rgw.control',
'.rgw',
'.rgw.gc',
'.users.uid'
])
return pools

View File

@@ -445,6 +445,30 @@ class OpenStackAmuletUtils(AmuletUtils):
" | grep -o '[0-9]*'`".format(index + 1))
return cmd
def get_ceph_pools(self, sentry_unit):
"""Return a dict of ceph pools from a single ceph unit, with
pool name as keys, pool id as vals."""
pools = {}
cmd = 'sudo ceph osd lspools'
output, code = sentry_unit.run(cmd)
if code != 0:
msg = ('{} `{}` returned {} '
'{}'.format(sentry_unit.info['unit_name'],
cmd, code, output))
raise RuntimeError(msg)
# Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
for pool in str(output).split(','):
pool_id_name = pool.split(' ')
if len(pool_id_name) == 2:
pool_id = pool_id_name[0]
pool_name = pool_id_name[1]
pools[pool_name] = int(pool_id)
self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
pools))
return pools
def get_ceph_df(self, sentry_unit):
"""Return dict of ceph df json output, including ceph pool state.