sync tests/charmhelpers
This commit is contained in:
parent
e8024e0c66
commit
01dbb884ae
2
Makefile
2
Makefile
@ -21,7 +21,7 @@ bin/charm_helpers_sync.py:
|
||||
> bin/charm_helpers_sync.py
|
||||
|
||||
sync: bin/charm_helpers_sync.py
|
||||
# @$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
|
||||
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-hooks.yaml
|
||||
@$(PYTHON) bin/charm_helpers_sync.py -c charm-helpers-tests.yaml
|
||||
|
||||
publish: lint unit_test
|
||||
|
@ -1,5 +1,4 @@
|
||||
#branch: lp:charm-helpers
|
||||
branch: lp:~1chb1n/charm-helpers/amulet-ceph-cinder-updates/
|
||||
branch: lp:charm-helpers
|
||||
destination: tests/charmhelpers
|
||||
include:
|
||||
- contrib.amulet
|
||||
|
@ -185,10 +185,23 @@ class AmuletUtils(object):
|
||||
for k in expected.keys():
|
||||
if not config.has_option(section, k):
|
||||
return "section [{}] is missing option {}".format(section, k)
|
||||
if config.get(section, k) != expected[k]:
|
||||
|
||||
actual = config.get(section, k)
|
||||
v = expected[k]
|
||||
if (isinstance(v, six.string_types) or
|
||||
isinstance(v, bool) or
|
||||
isinstance(v, six.integer_types)):
|
||||
# handle explicit values
|
||||
if actual != v:
|
||||
return "section [{}] {}:{} != expected {}:{}".format(
|
||||
section, k, config.get(section, k), k, expected[k])
|
||||
section, k, actual, k, expected[k])
|
||||
else:
|
||||
# handle not_null, valid_ip boolean comparison methods, etc.
|
||||
if v(actual):
|
||||
return None
|
||||
else:
|
||||
return "section [{}] {}:{} != expected {}:{}".format(
|
||||
section, k, actual, k, expected[k])
|
||||
|
||||
def _validate_dict_data(self, expected, actual):
|
||||
"""Validate dictionary data.
|
||||
@ -406,3 +419,123 @@ class AmuletUtils(object):
|
||||
"""Convert a relative file path to a file URL."""
|
||||
_abs_path = os.path.abspath(file_rel_path)
|
||||
return urlparse.urlparse(_abs_path, scheme='file').geturl()
|
||||
|
||||
def check_commands_on_units(self, commands, sentry_units):
|
||||
"""Check that all commands in a list exit zero on all
|
||||
sentry units in a list.
|
||||
|
||||
:param commands: list of bash commands
|
||||
:param sentry_units: list of sentry unit pointers
|
||||
:returns: None if successful; Failure message otherwise
|
||||
"""
|
||||
self.log.debug('Checking exit codes for {} commands on {} '
|
||||
'sentry units...'.format(len(commands),
|
||||
len(sentry_units)))
|
||||
for sentry_unit in sentry_units:
|
||||
for cmd in commands:
|
||||
output, code = sentry_unit.run(cmd)
|
||||
if code == 0:
|
||||
msg = ('{} `{}` returned {} '
|
||||
'(OK)'.format(sentry_unit.info['unit_name'],
|
||||
cmd, code))
|
||||
self.log.debug(msg)
|
||||
else:
|
||||
msg = ('{} `{}` returned {} '
|
||||
'{}'.format(sentry_unit.info['unit_name'],
|
||||
cmd, code, output))
|
||||
return msg
|
||||
return None
|
||||
|
||||
def get_process_id_list(self, sentry_unit, process_name):
|
||||
"""Get a list of process ID(s) from a single sentry juju unit
|
||||
for a single process name.
|
||||
|
||||
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
|
||||
:param process_name: Process name
|
||||
:returns: List of process IDs
|
||||
"""
|
||||
cmd = 'pidof {}'.format(process_name)
|
||||
output, code = sentry_unit.run(cmd)
|
||||
if code != 0:
|
||||
msg = ('{} `{}` returned {} '
|
||||
'{}'.format(sentry_unit.info['unit_name'],
|
||||
cmd, code, output))
|
||||
raise RuntimeError(msg)
|
||||
return str(output).split()
|
||||
|
||||
def get_unit_process_ids(self, unit_processes):
|
||||
"""Construct a dict containing unit sentries, process names, and
|
||||
process IDs."""
|
||||
pid_dict = {}
|
||||
for sentry_unit, process_list in unit_processes.iteritems():
|
||||
pid_dict[sentry_unit] = {}
|
||||
for process in process_list:
|
||||
pids = self.get_process_id_list(sentry_unit, process)
|
||||
pid_dict[sentry_unit].update({process: pids})
|
||||
return pid_dict
|
||||
|
||||
def validate_unit_process_ids(self, expected, actual):
|
||||
"""Validate process id quantities for services on units."""
|
||||
self.log.debug('Checking units for running processes...')
|
||||
self.log.debug('Expected PIDs: {}'.format(expected))
|
||||
self.log.debug('Actual PIDs: {}'.format(actual))
|
||||
|
||||
if len(actual) != len(expected):
|
||||
msg = ('Unit count mismatch. expected, actual: {}, '
|
||||
'{} '.format(len(expected), len(actual)))
|
||||
return msg
|
||||
|
||||
for (e_sentry, e_proc_names) in expected.iteritems():
|
||||
e_sentry_name = e_sentry.info['unit_name']
|
||||
if e_sentry in actual.keys():
|
||||
a_proc_names = actual[e_sentry]
|
||||
else:
|
||||
msg = ('Expected sentry ({}) not found in actual dict data.'
|
||||
'{}'.format(e_sentry_name, e_sentry))
|
||||
return msg
|
||||
|
||||
if len(e_proc_names.keys()) != len(a_proc_names.keys()):
|
||||
msg = ('Process name count mismatch. expected, actual: {}, '
|
||||
'{}'.format(len(expected), len(actual)))
|
||||
return msg
|
||||
|
||||
for (e_proc_name, e_pids_length), (a_proc_name, a_pids) in \
|
||||
zip(e_proc_names.items(), a_proc_names.items()):
|
||||
if e_proc_name != a_proc_name:
|
||||
msg = ('Process name mismatch. expected, actual: {}, '
|
||||
'{}'.format(e_proc_name, a_proc_name))
|
||||
return msg
|
||||
|
||||
a_pids_length = len(a_pids)
|
||||
if e_pids_length != a_pids_length:
|
||||
msg = ('PID count mismatch. {} ({}) expected, actual: {}, '
|
||||
'{} ({})'.format(e_sentry_name,
|
||||
e_proc_name,
|
||||
e_pids_length,
|
||||
a_pids_length,
|
||||
a_pids))
|
||||
return msg
|
||||
else:
|
||||
msg = ('PID check OK: {} {} {}: '
|
||||
'{}'.format(e_sentry_name,
|
||||
e_proc_name,
|
||||
e_pids_length,
|
||||
a_pids))
|
||||
self.log.debug(msg)
|
||||
return None
|
||||
|
||||
def validate_list_of_identical_dicts(self, list_of_dicts):
|
||||
"""Check that all dicts within a list are identical."""
|
||||
hashes = []
|
||||
for _dict in list_of_dicts:
|
||||
hashes.append(hash(frozenset(_dict.items())))
|
||||
|
||||
self.log.debug('Hashes: {}'.format(hashes))
|
||||
if len(set(hashes)) == 1:
|
||||
msg = 'Dicts within list are identical'
|
||||
self.log.debug(msg)
|
||||
else:
|
||||
msg = 'Dicts within list are not identical'
|
||||
return msg
|
||||
|
||||
return None
|
||||
|
@ -148,3 +148,35 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
return os_origin.split('%s-' % self.series)[1].split('/')[0]
|
||||
else:
|
||||
return releases[self.series]
|
||||
|
||||
def get_ceph_expected_pools(self, radosgw=False):
|
||||
"""Return a list of expected ceph pools based on Ubuntu-OpenStack
|
||||
release and whether ceph radosgw is flagged as present or not."""
|
||||
|
||||
if self._get_openstack_release() >= self.trusty_kilo:
|
||||
# Kilo or later
|
||||
pools = [
|
||||
'rbd',
|
||||
'cinder',
|
||||
'glance'
|
||||
]
|
||||
else:
|
||||
# Juno or earlier
|
||||
pools = [
|
||||
'data',
|
||||
'metadata',
|
||||
'rbd',
|
||||
'cinder',
|
||||
'glance'
|
||||
]
|
||||
|
||||
if radosgw:
|
||||
pools.extend([
|
||||
'.rgw.root',
|
||||
'.rgw.control',
|
||||
'.rgw',
|
||||
'.rgw.gc',
|
||||
'.users.uid'
|
||||
])
|
||||
|
||||
return pools
|
||||
|
@ -14,16 +14,19 @@
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import json
|
||||
import logging
|
||||
import os
|
||||
import six
|
||||
import time
|
||||
import urllib
|
||||
|
||||
import cinderclient.v1.client as cinder_client
|
||||
import glanceclient.v1.client as glance_client
|
||||
import heatclient.v1.client as heat_client
|
||||
import keystoneclient.v2_0 as keystone_client
|
||||
import novaclient.v1_1.client as nova_client
|
||||
import swiftclient
|
||||
|
||||
from charmhelpers.contrib.amulet.utils import (
|
||||
AmuletUtils
|
||||
@ -171,6 +174,15 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
|
||||
return tenant in [t.name for t in keystone.tenants.list()]
|
||||
|
||||
def authenticate_cinder_admin(self, keystone_sentry, username,
|
||||
password, tenant):
|
||||
"""Authenticates admin user with cinder."""
|
||||
service_ip = \
|
||||
keystone_sentry.relation('shared-db',
|
||||
'mysql:shared-db')['private-address']
|
||||
ept = "http://{}:5000/v2.0".format(service_ip.strip().decode('utf-8'))
|
||||
return cinder_client.Client(username, password, tenant, ept)
|
||||
|
||||
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
||||
tenant):
|
||||
"""Authenticates admin user with the keystone admin endpoint."""
|
||||
@ -212,9 +224,29 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
return nova_client.Client(username=user, api_key=password,
|
||||
project_id=tenant, auth_url=ep)
|
||||
|
||||
def authenticate_swift_user(self, keystone, user, password, tenant):
|
||||
"""Authenticates a regular user with swift api."""
|
||||
self.log.debug('Authenticating swift user ({})...'.format(user))
|
||||
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||
endpoint_type='publicURL')
|
||||
return swiftclient.Connection(authurl=ep,
|
||||
user=user,
|
||||
key=password,
|
||||
tenant_name=tenant,
|
||||
auth_version='2.0')
|
||||
|
||||
def create_cirros_image(self, glance, image_name):
|
||||
"""Download the latest cirros image and upload it to glance."""
|
||||
self.log.debug('Creating glance image ({})...'.format(image_name))
|
||||
"""Download the latest cirros image and upload it to glance,
|
||||
validate and return a resource pointer.
|
||||
|
||||
:param glance: pointer to authenticated glance connection
|
||||
:param image_name: display name for new image
|
||||
:returns: glance image pointer
|
||||
"""
|
||||
self.log.debug('Creating glance cirros image '
|
||||
'({})...'.format(image_name))
|
||||
|
||||
# Download cirros image
|
||||
http_proxy = os.getenv('AMULET_HTTP_PROXY')
|
||||
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
|
||||
if http_proxy:
|
||||
@ -223,33 +255,51 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
else:
|
||||
opener = urllib.FancyURLopener()
|
||||
|
||||
f = opener.open("http://download.cirros-cloud.net/version/released")
|
||||
f = opener.open('http://download.cirros-cloud.net/version/released')
|
||||
version = f.read().strip()
|
||||
cirros_img = "cirros-{}-x86_64-disk.img".format(version)
|
||||
cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
|
||||
local_path = os.path.join('tests', cirros_img)
|
||||
|
||||
if not os.path.exists(local_path):
|
||||
cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
|
||||
cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
|
||||
version, cirros_img)
|
||||
opener.retrieve(cirros_url, local_path)
|
||||
f.close()
|
||||
|
||||
# Create glance image
|
||||
with open(local_path) as f:
|
||||
image = glance.images.create(name=image_name, is_public=True,
|
||||
disk_format='qcow2',
|
||||
container_format='bare', data=f)
|
||||
count = 1
|
||||
status = image.status
|
||||
while status != 'active' and count < 10:
|
||||
time.sleep(3)
|
||||
image = glance.images.get(image.id)
|
||||
status = image.status
|
||||
self.log.debug('image status: {}'.format(status))
|
||||
count += 1
|
||||
|
||||
if status != 'active':
|
||||
self.log.error('image creation timed out')
|
||||
return None
|
||||
# Wait for image to reach active status
|
||||
img_id = image.id
|
||||
ret = self.resource_reaches_status(glance.images, img_id,
|
||||
expected_stat='active',
|
||||
msg='Image status wait')
|
||||
if not ret:
|
||||
msg = 'Glance image failed to reach expected state.'
|
||||
raise RuntimeError(msg)
|
||||
|
||||
# Re-validate new image
|
||||
self.log.debug('Validating image attributes...')
|
||||
val_img_name = glance.images.get(img_id).name
|
||||
val_img_stat = glance.images.get(img_id).status
|
||||
val_img_pub = glance.images.get(img_id).is_public
|
||||
val_img_cfmt = glance.images.get(img_id).container_format
|
||||
val_img_dfmt = glance.images.get(img_id).disk_format
|
||||
msg_attr = ('Image attributes - name:{} public:{} id:{} stat:{} '
|
||||
'container fmt:{} disk fmt:{}'.format(
|
||||
val_img_name, val_img_pub, img_id,
|
||||
val_img_stat, val_img_cfmt, val_img_dfmt))
|
||||
|
||||
if val_img_name == image_name and val_img_stat == 'active' \
|
||||
and val_img_pub is True and val_img_cfmt == 'bare' \
|
||||
and val_img_dfmt == 'qcow2':
|
||||
self.log.debug(msg_attr)
|
||||
else:
|
||||
msg = ('Volume validation failed, {}'.format(msg_attr))
|
||||
raise RuntimeError(msg)
|
||||
|
||||
return image
|
||||
|
||||
@ -260,22 +310,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
self.log.warn('/!\\ DEPRECATION WARNING: use '
|
||||
'delete_resource instead of delete_image.')
|
||||
self.log.debug('Deleting glance image ({})...'.format(image))
|
||||
num_before = len(list(glance.images.list()))
|
||||
glance.images.delete(image)
|
||||
|
||||
count = 1
|
||||
num_after = len(list(glance.images.list()))
|
||||
while num_after != (num_before - 1) and count < 10:
|
||||
time.sleep(3)
|
||||
num_after = len(list(glance.images.list()))
|
||||
self.log.debug('number of images: {}'.format(num_after))
|
||||
count += 1
|
||||
|
||||
if num_after != (num_before - 1):
|
||||
self.log.error('image deletion timed out')
|
||||
return False
|
||||
|
||||
return True
|
||||
return self.delete_resource(glance.images, image, msg='glance image')
|
||||
|
||||
def create_instance(self, nova, image_name, instance_name, flavor):
|
||||
"""Create the specified instance."""
|
||||
@ -308,22 +343,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
self.log.warn('/!\\ DEPRECATION WARNING: use '
|
||||
'delete_resource instead of delete_instance.')
|
||||
self.log.debug('Deleting instance ({})...'.format(instance))
|
||||
num_before = len(list(nova.servers.list()))
|
||||
nova.servers.delete(instance)
|
||||
|
||||
count = 1
|
||||
num_after = len(list(nova.servers.list()))
|
||||
while num_after != (num_before - 1) and count < 10:
|
||||
time.sleep(3)
|
||||
num_after = len(list(nova.servers.list()))
|
||||
self.log.debug('number of instances: {}'.format(num_after))
|
||||
count += 1
|
||||
|
||||
if num_after != (num_before - 1):
|
||||
self.log.error('instance deletion timed out')
|
||||
return False
|
||||
|
||||
return True
|
||||
return self.delete_resource(nova.servers, instance, msg='nova instance')
|
||||
|
||||
def create_or_get_keypair(self, nova, keypair_name="testkey"):
|
||||
"""Create a new keypair, or return pointer if it already exists."""
|
||||
@ -339,6 +359,84 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
_keypair = nova.keypairs.create(name=keypair_name)
|
||||
return _keypair
|
||||
|
||||
def create_cinder_volume(self, cinder, vol_name="demo-vol", vol_size=1,
|
||||
img_id=None, src_vol_id=None, snap_id=None):
|
||||
"""Create cinder volume, optionally from a glance image, or
|
||||
optionally as a clone of an existing volume, or optionally
|
||||
from a snapshot. Wait for the new volume status to reach
|
||||
the expected status, validate and return a resource pointer.
|
||||
|
||||
:param vol_name: cinder volume display name
|
||||
:param vol_size: size in gigabytes
|
||||
:param img_id: optional glance image id
|
||||
:param src_vol_id: optional source volume id to clone
|
||||
:param snap_id: optional snapshot id to use
|
||||
:returns: cinder volume pointer
|
||||
"""
|
||||
# Handle parameter input
|
||||
if img_id and not src_vol_id and not snap_id:
|
||||
self.log.debug('Creating cinder volume from glance image '
|
||||
'({})...'.format(img_id))
|
||||
bootable = 'true'
|
||||
elif src_vol_id and not img_id and not snap_id:
|
||||
self.log.debug('Cloning cinder volume...')
|
||||
bootable = cinder.volumes.get(src_vol_id).bootable
|
||||
elif snap_id and not src_vol_id and not img_id:
|
||||
self.log.debug('Creating cinder volume from snapshot...')
|
||||
snap = cinder.volume_snapshots.find(id=snap_id)
|
||||
vol_size = snap.size
|
||||
snap_vol_id = cinder.volume_snapshots.get(snap_id).volume_id
|
||||
bootable = cinder.volumes.get(snap_vol_id).bootable
|
||||
elif not img_id and not src_vol_id and not snap_id:
|
||||
self.log.debug('Creating cinder volume...')
|
||||
bootable = 'false'
|
||||
else:
|
||||
msg = ('Invalid method use - name:{} size:{} img_id:{} '
|
||||
'src_vol_id:{} snap_id:{}'.format(vol_name, vol_size,
|
||||
img_id, src_vol_id,
|
||||
snap_id))
|
||||
raise RuntimeError(msg)
|
||||
|
||||
# Create new volume
|
||||
try:
|
||||
vol_new = cinder.volumes.create(display_name=vol_name,
|
||||
imageRef=img_id,
|
||||
size=vol_size,
|
||||
source_volid=src_vol_id,
|
||||
snapshot_id=snap_id)
|
||||
vol_id = vol_new.id
|
||||
except Exception as e:
|
||||
msg = 'Failed to create volume: {}'.format(e)
|
||||
raise RuntimeError(msg)
|
||||
|
||||
# Wait for volume to reach available status
|
||||
ret = self.resource_reaches_status(cinder.volumes, vol_id,
|
||||
expected_stat="available",
|
||||
msg="Volume status wait")
|
||||
if not ret:
|
||||
msg = 'Cinder volume failed to reach expected state.'
|
||||
raise RuntimeError(msg)
|
||||
|
||||
# Re-validate new volume
|
||||
self.log.debug('Validating volume attributes...')
|
||||
val_vol_name = cinder.volumes.get(vol_id).display_name
|
||||
val_vol_boot = cinder.volumes.get(vol_id).bootable
|
||||
val_vol_stat = cinder.volumes.get(vol_id).status
|
||||
val_vol_size = cinder.volumes.get(vol_id).size
|
||||
msg_attr = ('Volume attributes - name:{} id:{} stat:{} boot:'
|
||||
'{} size:{}'.format(val_vol_name, vol_id,
|
||||
val_vol_stat, val_vol_boot,
|
||||
val_vol_size))
|
||||
|
||||
if val_vol_boot == bootable and val_vol_stat == 'available' \
|
||||
and val_vol_name == vol_name and val_vol_size == vol_size:
|
||||
self.log.debug(msg_attr)
|
||||
else:
|
||||
msg = ('Volume validation failed, {}'.format(msg_attr))
|
||||
raise RuntimeError(msg)
|
||||
|
||||
return vol_new
|
||||
|
||||
def delete_resource(self, resource, resource_id,
|
||||
msg="resource", max_wait=120):
|
||||
"""Delete one openstack resource, such as one instance, keypair,
|
||||
@ -350,6 +448,8 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
:param max_wait: maximum wait time in seconds
|
||||
:returns: True if successful, otherwise False
|
||||
"""
|
||||
self.log.debug('Deleting OpenStack resource '
|
||||
'{} ({})'.format(resource_id, msg))
|
||||
num_before = len(list(resource.list()))
|
||||
resource.delete(resource_id)
|
||||
|
||||
@ -411,3 +511,90 @@ class OpenStackAmuletUtils(AmuletUtils):
|
||||
self.log.debug('{} never reached expected status: '
|
||||
'{}'.format(resource_id, expected_stat))
|
||||
return False
|
||||
|
||||
def get_ceph_osd_id_cmd(self, index):
|
||||
"""Produce a shell command that will return a ceph-osd id."""
|
||||
cmd = ("`initctl list | grep 'ceph-osd ' | awk 'NR=={} {{ print $2 }}'"
|
||||
" | grep -o '[0-9]*'`".format(index + 1))
|
||||
return cmd
|
||||
|
||||
def get_ceph_pools(self, sentry_unit):
|
||||
"""Return a dict of ceph pools from a single ceph unit, with
|
||||
pool name as keys, pool id as vals."""
|
||||
pools = {}
|
||||
cmd = 'sudo ceph osd lspools'
|
||||
output, code = sentry_unit.run(cmd)
|
||||
if code != 0:
|
||||
msg = ('{} `{}` returned {} '
|
||||
'{}'.format(sentry_unit.info['unit_name'],
|
||||
cmd, code, output))
|
||||
raise RuntimeError(msg)
|
||||
|
||||
# Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
|
||||
for pool in str(output).split(','):
|
||||
pool_id_name = pool.split(' ')
|
||||
if len(pool_id_name) == 2:
|
||||
pool_id = pool_id_name[0]
|
||||
pool_name = pool_id_name[1]
|
||||
pools[pool_name] = int(pool_id)
|
||||
|
||||
self.log.debug('Pools on {}: {}'.format(sentry_unit.info['unit_name'],
|
||||
pools))
|
||||
return pools
|
||||
|
||||
def get_ceph_df(self, sentry_unit):
|
||||
"""Return dict of ceph df json output, including ceph pool state.
|
||||
|
||||
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
|
||||
:returns: Dict of ceph df output
|
||||
"""
|
||||
cmd = 'sudo ceph df --format=json'
|
||||
output, code = sentry_unit.run(cmd)
|
||||
if code != 0:
|
||||
msg = ('{} `{}` returned {} '
|
||||
'{}'.format(sentry_unit.info['unit_name'],
|
||||
cmd, code, output))
|
||||
raise RuntimeError(msg)
|
||||
return json.loads(output)
|
||||
|
||||
def get_ceph_pool_sample(self, sentry_unit, pool_id=0):
|
||||
"""Take a sample of attributes of a ceph pool, returning ceph
|
||||
pool name, object count and disk space used for the specified
|
||||
pool ID number.
|
||||
|
||||
:param sentry_unit: Pointer to amulet sentry instance (juju unit)
|
||||
:param pool_id: Ceph pool ID
|
||||
:returns: List of pool name, object count, kb disk space used
|
||||
"""
|
||||
df = self.get_ceph_df(sentry_unit)
|
||||
pool_name = df['pools'][pool_id]['name']
|
||||
obj_count = df['pools'][pool_id]['stats']['objects']
|
||||
kb_used = df['pools'][pool_id]['stats']['kb_used']
|
||||
self.log.debug('Ceph {} pool (ID {}): {} objects, '
|
||||
'{} kb used'.format(pool_name,
|
||||
pool_id,
|
||||
obj_count,
|
||||
kb_used))
|
||||
return pool_name, obj_count, kb_used
|
||||
|
||||
def validate_ceph_pool_samples(self, samples, sample_type="resource pool"):
|
||||
"""Validate ceph pool samples taken over time, such as pool
|
||||
object counts or pool kb used, before adding, after adding, and
|
||||
after deleting items which affect those pool attributes. The
|
||||
2nd element is expected to be greater than the 1st; 3rd is expected
|
||||
to be less than the 2nd.
|
||||
|
||||
:param samples: List containing 3 data samples
|
||||
:param sample_type: String for logging and usage context
|
||||
:returns: None if successful, Failure message otherwise
|
||||
"""
|
||||
original, created, deleted = range(3)
|
||||
if samples[created] <= samples[original] or \
|
||||
samples[deleted] >= samples[created]:
|
||||
msg = ('Ceph {} samples ({}) '
|
||||
'unexpected.'.format(sample_type, samples))
|
||||
return msg
|
||||
else:
|
||||
self.log.debug('Ceph {} samples (OK): '
|
||||
'{}'.format(sample_type, samples))
|
||||
return None
|
||||
|
Loading…
Reference in New Issue
Block a user