Set appropriate application tag for pools created

Use cases are emerging for the Ceph pool application tags.  Let's
set appropriate name for the pools created for RadosGW

Reference:
http://docs.ceph.com/docs/master/rados/operations/pools/#associate-pool-to-application

Sync charm-helpers.

Change-Id: I5c944d806ef458a82234dcc413cdd5ba34be7c18
This commit is contained in:
Frode Nordahl 2019-02-20 06:50:29 +01:00
parent a25ae87994
commit 09703c286b
3 changed files with 86 additions and 71 deletions

View File

@ -31,6 +31,7 @@ from charmhelpers.contrib.storage.linux.ceph import (
CEPH_DIR = '/etc/ceph' CEPH_DIR = '/etc/ceph'
CEPH_RADOSGW_DIR = '/var/lib/ceph/radosgw' CEPH_RADOSGW_DIR = '/var/lib/ceph/radosgw'
_radosgw_keyring = "keyring.rados.gateway" _radosgw_keyring = "keyring.rados.gateway"
CEPH_POOL_APP_NAME = 'rgw'
def import_radosgw_key(key, name=None): def import_radosgw_key(key, name=None):
@ -99,10 +100,12 @@ def get_create_rgw_pools_rq(prefix=None):
pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) pool = "{prefix}{pool}".format(prefix=prefix, pool=pool)
if pg_num > 0: if pg_num > 0:
rq.add_op_create_pool(name=pool, replica_count=replicas, rq.add_op_create_pool(name=pool, replica_count=replicas,
pg_num=pg_num, group='objects') pg_num=pg_num, group='objects',
app_name=CEPH_POOL_APP_NAME)
else: else:
rq.add_op_create_pool(name=pool, replica_count=replicas, rq.add_op_create_pool(name=pool, replica_count=replicas,
weight=w, group='objects') weight=w, group='objects',
app_name=CEPH_POOL_APP_NAME)
from apt import apt_pkg from apt import apt_pkg
@ -121,7 +124,8 @@ def get_create_rgw_pools_rq(prefix=None):
for pool in heavy: for pool in heavy:
pool = "{prefix}{pool}".format(prefix=prefix, pool=pool) pool = "{prefix}{pool}".format(prefix=prefix, pool=pool)
rq.add_op_create_pool(name=pool, replica_count=replicas, rq.add_op_create_pool(name=pool, replica_count=replicas,
weight=bucket_weight, group='objects') weight=bucket_weight, group='objects',
app_name=CEPH_POOL_APP_NAME)
# NOTE: we want these pools to have a smaller pg_num/pgp_num than the # NOTE: we want these pools to have a smaller pg_num/pgp_num than the
# others since they are not expected to contain as much data # others since they are not expected to contain as much data

View File

@ -59,6 +59,7 @@ from charmhelpers.core.host import (
service_stop, service_stop,
service_running, service_running,
umount, umount,
cmp_pkgrevno,
) )
from charmhelpers.fetch import ( from charmhelpers.fetch import (
apt_install, apt_install,
@ -178,7 +179,6 @@ class Pool(object):
""" """
# read-only is easy, writeback is much harder # read-only is easy, writeback is much harder
mode = get_cache_mode(self.service, cache_pool) mode = get_cache_mode(self.service, cache_pool)
version = ceph_version()
if mode == 'readonly': if mode == 'readonly':
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none']) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
@ -186,7 +186,7 @@ class Pool(object):
elif mode == 'writeback': elif mode == 'writeback':
pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier', pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
'cache-mode', cache_pool, 'forward'] 'cache-mode', cache_pool, 'forward']
if version >= '10.1': if cmp_pkgrevno('ceph', '10.1') >= 0:
# Jewel added a mandatory flag # Jewel added a mandatory flag
pool_forward_cmd.append('--yes-i-really-mean-it') pool_forward_cmd.append('--yes-i-really-mean-it')
@ -196,7 +196,8 @@ class Pool(object):
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name]) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool]) check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT): def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT,
device_class=None):
"""Return the number of placement groups to use when creating the pool. """Return the number of placement groups to use when creating the pool.
Returns the number of placement groups which should be specified when Returns the number of placement groups which should be specified when
@ -229,6 +230,9 @@ class Pool(object):
increased. NOTE: the default is primarily to handle the scenario increased. NOTE: the default is primarily to handle the scenario
where related charms requiring pools has not been upgraded to where related charms requiring pools has not been upgraded to
include an update to indicate their relative usage of the pools. include an update to indicate their relative usage of the pools.
:param device_class: str. class of storage to use for basis of pgs
calculation; ceph supports nvme, ssd and hdd by default based
on presence of devices of each type in the deployment.
:return: int. The number of pgs to use. :return: int. The number of pgs to use.
""" """
@ -243,17 +247,20 @@ class Pool(object):
# If the expected-osd-count is specified, then use the max between # If the expected-osd-count is specified, then use the max between
# the expected-osd-count and the actual osd_count # the expected-osd-count and the actual osd_count
osd_list = get_osds(self.service) osd_list = get_osds(self.service, device_class)
expected = config('expected-osd-count') or 0 expected = config('expected-osd-count') or 0
if osd_list: if osd_list:
if device_class:
osd_count = len(osd_list)
else:
osd_count = max(expected, len(osd_list)) osd_count = max(expected, len(osd_list))
# Log a message to provide some insight if the calculations claim # Log a message to provide some insight if the calculations claim
# to be off because someone is setting the expected count and # to be off because someone is setting the expected count and
# there are more OSDs in reality. Try to make a proper guess # there are more OSDs in reality. Try to make a proper guess
# based upon the cluster itself. # based upon the cluster itself.
if expected and osd_count != expected: if not device_class and expected and osd_count != expected:
log("Found more OSDs than provided expected count. " log("Found more OSDs than provided expected count. "
"Using the actual count instead", INFO) "Using the actual count instead", INFO)
elif expected: elif expected:
@ -626,7 +633,8 @@ def remove_erasure_profile(service, profile_name):
def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
failure_domain='host', failure_domain='host',
data_chunks=2, coding_chunks=1, data_chunks=2, coding_chunks=1,
locality=None, durability_estimator=None): locality=None, durability_estimator=None,
device_class=None):
""" """
Create a new erasure code profile if one does not already exist for it. Updates Create a new erasure code profile if one does not already exist for it. Updates
the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/ the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
@ -640,10 +648,9 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
:param coding_chunks: int :param coding_chunks: int
:param locality: int :param locality: int
:param durability_estimator: int :param durability_estimator: int
:param device_class: six.string_types
:return: None. Can raise CalledProcessError :return: None. Can raise CalledProcessError
""" """
version = ceph_version()
# Ensure this failure_domain is allowed by Ceph # Ensure this failure_domain is allowed by Ceph
validator(failure_domain, six.string_types, validator(failure_domain, six.string_types,
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row']) ['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
@ -654,12 +661,20 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
if locality is not None and durability_estimator is not None: if locality is not None and durability_estimator is not None:
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.") raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0
# failure_domain changed in luminous # failure_domain changed in luminous
if version and version >= '12.0.0': if luminous_or_later:
cmd.append('crush-failure-domain=' + failure_domain) cmd.append('crush-failure-domain=' + failure_domain)
else: else:
cmd.append('ruleset-failure-domain=' + failure_domain) cmd.append('ruleset-failure-domain=' + failure_domain)
# device class new in luminous
if luminous_or_later and device_class:
cmd.append('crush-device-class={}'.format(device_class))
else:
log('Skipping device class configuration (ceph < 12.0.0)',
level=DEBUG)
# Add plugin specific information # Add plugin specific information
if locality is not None: if locality is not None:
# For local erasure codes # For local erasure codes
@ -744,12 +759,20 @@ def pool_exists(service, name):
return name in out.split() return name in out.split()
def get_osds(service): def get_osds(service, device_class=None):
"""Return a list of all Ceph Object Storage Daemons currently in the """Return a list of all Ceph Object Storage Daemons currently in the
cluster. cluster (optionally filtered by storage device class).
:param device_class: Class of storage device for OSD's
:type device_class: str
""" """
version = ceph_version() luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0
if version and version >= '0.56': if luminous_or_later and device_class:
out = check_output(['ceph', '--id', service,
'osd', 'crush', 'class',
'ls-osd', device_class,
'--format=json'])
else:
out = check_output(['ceph', '--id', service, out = check_output(['ceph', '--id', service,
'osd', 'ls', 'osd', 'ls',
'--format=json']) '--format=json'])
@ -757,8 +780,6 @@ def get_osds(service):
out = out.decode('UTF-8') out = out.decode('UTF-8')
return json.loads(out) return json.loads(out)
return None
def install(): def install():
"""Basic Ceph client installation.""" """Basic Ceph client installation."""
@ -811,7 +832,7 @@ def set_app_name_for_pool(client, pool, name):
:raises: CalledProcessError if ceph call fails :raises: CalledProcessError if ceph call fails
""" """
if ceph_version() >= '12.0.0': if cmp_pkgrevno('ceph', '12.0.0') >= 0:
cmd = ['ceph', '--id', client, 'osd', 'pool', cmd = ['ceph', '--id', client, 'osd', 'pool',
'application', 'enable', pool, name] 'application', 'enable', pool, name]
check_call(cmd) check_call(cmd)
@ -1091,22 +1112,6 @@ def ensure_ceph_keyring(service, user=None, group=None,
return True return True
def ceph_version():
"""Retrieve the local version of ceph."""
if os.path.exists('/usr/bin/ceph'):
cmd = ['ceph', '-v']
output = check_output(cmd)
if six.PY3:
output = output.decode('UTF-8')
output = output.split()
if len(output) > 3:
return output[2]
else:
return None
else:
return None
class CephBrokerRq(object): class CephBrokerRq(object):
"""Ceph broker request. """Ceph broker request.
@ -1147,7 +1152,8 @@ class CephBrokerRq(object):
'object-prefix-permissions': object_prefix_permissions}) 'object-prefix-permissions': object_prefix_permissions})
def add_op_create_pool(self, name, replica_count=3, pg_num=None, def add_op_create_pool(self, name, replica_count=3, pg_num=None,
weight=None, group=None, namespace=None): weight=None, group=None, namespace=None,
app_name=None):
"""Adds an operation to create a pool. """Adds an operation to create a pool.
@param pg_num setting: optional setting. If not provided, this value @param pg_num setting: optional setting. If not provided, this value
@ -1155,6 +1161,11 @@ class CephBrokerRq(object):
cluster at the time of creation. Note that, if provided, this value cluster at the time of creation. Note that, if provided, this value
will be capped at the current available maximum. will be capped at the current available maximum.
@param weight: the percentage of data the pool makes up @param weight: the percentage of data the pool makes up
:param app_name: (Optional) Tag pool with application name. Note that
there is certain protocols emerging upstream with
regard to meaningful application names to use.
Examples are ``rbd`` and ``rgw``.
:type app_name: str
""" """
if pg_num and weight: if pg_num and weight:
raise ValueError('pg_num and weight are mutually exclusive') raise ValueError('pg_num and weight are mutually exclusive')
@ -1162,7 +1173,7 @@ class CephBrokerRq(object):
self.ops.append({'op': 'create-pool', 'name': name, self.ops.append({'op': 'create-pool', 'name': name,
'replicas': replica_count, 'pg_num': pg_num, 'replicas': replica_count, 'pg_num': pg_num,
'weight': weight, 'group': group, 'weight': weight, 'group': group,
'group-namespace': namespace}) 'group-namespace': namespace, 'app-name': app_name})
def set_ops(self, ops): def set_ops(self, ops):
"""Set request ops to provided value. """Set request ops to provided value.

View File

@ -67,35 +67,35 @@ class CephRadosGWCephTests(CharmTestCase):
ceph.get_create_rgw_pools_rq(prefix='us-east') ceph.get_create_rgw_pools_rq(prefix='us-east')
mock_broker.assert_has_calls([ mock_broker.assert_has_calls([
call(replica_count=3, weight=19, name='us-east.rgw.buckets.data', call(replica_count=3, weight=19, name='us-east.rgw.buckets.data',
group='objects'), group='objects', app_name='rgw'),
call(pg_num=10, replica_count=3, name='us-east.rgw.control', call(pg_num=10, replica_count=3, name='us-east.rgw.control',
group='objects'), group='objects', app_name='rgw'),
call(pg_num=10, replica_count=3, name='us-east.rgw.data.root', call(pg_num=10, replica_count=3, name='us-east.rgw.data.root',
group='objects'), group='objects', app_name='rgw'),
call(pg_num=10, replica_count=3, name='us-east.rgw.gc', call(pg_num=10, replica_count=3, name='us-east.rgw.gc',
group='objects'), group='objects', app_name='rgw'),
call(pg_num=10, replica_count=3, name='us-east.rgw.log', call(pg_num=10, replica_count=3, name='us-east.rgw.log',
group='objects'), group='objects', app_name='rgw'),
call(pg_num=10, replica_count=3, name='us-east.rgw.intent-log', call(pg_num=10, replica_count=3, name='us-east.rgw.intent-log',
group='objects'), group='objects', app_name='rgw'),
call(pg_num=10, replica_count=3, name='us-east.rgw.meta', call(pg_num=10, replica_count=3, name='us-east.rgw.meta',
group='objects'), group='objects', app_name='rgw'),
call(pg_num=10, replica_count=3, name='us-east.rgw.usage', call(pg_num=10, replica_count=3, name='us-east.rgw.usage',
group='objects'), group='objects', app_name='rgw'),
call(pg_num=10, replica_count=3, name='us-east.rgw.users.keys', call(pg_num=10, replica_count=3, name='us-east.rgw.users.keys',
group='objects'), group='objects', app_name='rgw'),
call(pg_num=10, replica_count=3, name='us-east.rgw.users.email', call(pg_num=10, replica_count=3, name='us-east.rgw.users.email',
group='objects'), group='objects', app_name='rgw'),
call(pg_num=10, replica_count=3, name='us-east.rgw.users.swift', call(pg_num=10, replica_count=3, name='us-east.rgw.users.swift',
group='objects'), group='objects', app_name='rgw'),
call(pg_num=10, replica_count=3, name='us-east.rgw.users.uid', call(pg_num=10, replica_count=3, name='us-east.rgw.users.uid',
group='objects'), group='objects', app_name='rgw'),
call(pg_num=10, replica_count=3, name='us-east.rgw.buckets.extra', call(pg_num=10, replica_count=3, name='us-east.rgw.buckets.extra',
group='objects'), group='objects', app_name='rgw'),
call(pg_num=10, replica_count=3, name='us-east.rgw.buckets.index', call(pg_num=10, replica_count=3, name='us-east.rgw.buckets.index',
group='objects'), group='objects', app_name='rgw'),
call(pg_num=10, replica_count=3, name='.rgw.root', call(pg_num=10, replica_count=3, name='.rgw.root',
group='objects')], group='objects', app_name='rgw')],
) )
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq' @patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
@ -111,37 +111,37 @@ class CephRadosGWCephTests(CharmTestCase):
ceph.get_create_rgw_pools_rq(prefix=None) ceph.get_create_rgw_pools_rq(prefix=None)
mock_broker.assert_has_calls([ mock_broker.assert_has_calls([
call(replica_count=3, weight=19, name='default.rgw.buckets.data', call(replica_count=3, weight=19, name='default.rgw.buckets.data',
group='objects'), group='objects', app_name='rgw'),
call(weight=0.10, replica_count=3, name='default.rgw.control', call(weight=0.10, replica_count=3, name='default.rgw.control',
group='objects'), group='objects', app_name='rgw'),
call(weight=0.10, replica_count=3, name='default.rgw.data.root', call(weight=0.10, replica_count=3, name='default.rgw.data.root',
group='objects'), group='objects', app_name='rgw'),
call(weight=0.10, replica_count=3, name='default.rgw.gc', call(weight=0.10, replica_count=3, name='default.rgw.gc',
group='objects'), group='objects', app_name='rgw'),
call(weight=0.10, replica_count=3, name='default.rgw.log', call(weight=0.10, replica_count=3, name='default.rgw.log',
group='objects'), group='objects', app_name='rgw'),
call(weight=0.10, replica_count=3, name='default.rgw.intent-log', call(weight=0.10, replica_count=3, name='default.rgw.intent-log',
group='objects'), group='objects', app_name='rgw'),
call(weight=0.10, replica_count=3, name='default.rgw.meta', call(weight=0.10, replica_count=3, name='default.rgw.meta',
group='objects'), group='objects', app_name='rgw'),
call(weight=0.10, replica_count=3, name='default.rgw.usage', call(weight=0.10, replica_count=3, name='default.rgw.usage',
group='objects'), group='objects', app_name='rgw'),
call(weight=0.10, replica_count=3, name='default.rgw.users.keys', call(weight=0.10, replica_count=3, name='default.rgw.users.keys',
group='objects'), group='objects', app_name='rgw'),
call(weight=0.10, replica_count=3, name='default.rgw.users.email', call(weight=0.10, replica_count=3, name='default.rgw.users.email',
group='objects'), group='objects', app_name='rgw'),
call(weight=0.10, replica_count=3, name='default.rgw.users.swift', call(weight=0.10, replica_count=3, name='default.rgw.users.swift',
group='objects'), group='objects', app_name='rgw'),
call(weight=0.10, replica_count=3, name='default.rgw.users.uid', call(weight=0.10, replica_count=3, name='default.rgw.users.uid',
group='objects'), group='objects', app_name='rgw'),
call(weight=1.00, replica_count=3, call(weight=1.00, replica_count=3,
name='default.rgw.buckets.extra', name='default.rgw.buckets.extra',
group='objects'), group='objects', app_name='rgw'),
call(weight=3.00, replica_count=3, call(weight=3.00, replica_count=3,
name='default.rgw.buckets.index', name='default.rgw.buckets.index',
group='objects'), group='objects', app_name='rgw'),
call(weight=0.10, replica_count=3, name='.rgw.root', call(weight=0.10, replica_count=3, name='.rgw.root',
group='objects')], group='objects', app_name='rgw')],
) )
mock_request_access.assert_called_with(key_name='radosgw.gateway', mock_request_access.assert_called_with(key_name='radosgw.gateway',
name='objects', name='objects',