Request Ceph application name `rbd
` for pools we create.
Use cases for the Ceph pool application name tagging is emerging and thus far the protocol appears to be ``rbd`` or ``rgw``. Others might emerge too. We make use of this to provide "it just works" behaviour to the ongoing ``rbd-mirror`` feature work in the Ceph charms. Sync charm-helpers. Change-Id: Id8e59abdf5aaf578e9f11a223a79209fa971f51c
This commit is contained in:
parent
42de983c5a
commit
00b069df3f
@ -59,6 +59,7 @@ from charmhelpers.core.host import (
|
|||||||
service_stop,
|
service_stop,
|
||||||
service_running,
|
service_running,
|
||||||
umount,
|
umount,
|
||||||
|
cmp_pkgrevno,
|
||||||
)
|
)
|
||||||
from charmhelpers.fetch import (
|
from charmhelpers.fetch import (
|
||||||
apt_install,
|
apt_install,
|
||||||
@ -178,7 +179,6 @@ class Pool(object):
|
|||||||
"""
|
"""
|
||||||
# read-only is easy, writeback is much harder
|
# read-only is easy, writeback is much harder
|
||||||
mode = get_cache_mode(self.service, cache_pool)
|
mode = get_cache_mode(self.service, cache_pool)
|
||||||
version = ceph_version()
|
|
||||||
if mode == 'readonly':
|
if mode == 'readonly':
|
||||||
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'cache-mode', cache_pool, 'none'])
|
||||||
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
|
||||||
@ -186,7 +186,7 @@ class Pool(object):
|
|||||||
elif mode == 'writeback':
|
elif mode == 'writeback':
|
||||||
pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
|
pool_forward_cmd = ['ceph', '--id', self.service, 'osd', 'tier',
|
||||||
'cache-mode', cache_pool, 'forward']
|
'cache-mode', cache_pool, 'forward']
|
||||||
if version >= '10.1':
|
if cmp_pkgrevno('ceph', '10.1') >= 0:
|
||||||
# Jewel added a mandatory flag
|
# Jewel added a mandatory flag
|
||||||
pool_forward_cmd.append('--yes-i-really-mean-it')
|
pool_forward_cmd.append('--yes-i-really-mean-it')
|
||||||
|
|
||||||
@ -196,7 +196,8 @@ class Pool(object):
|
|||||||
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove-overlay', self.name])
|
||||||
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
|
check_call(['ceph', '--id', self.service, 'osd', 'tier', 'remove', self.name, cache_pool])
|
||||||
|
|
||||||
def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT):
|
def get_pgs(self, pool_size, percent_data=DEFAULT_POOL_WEIGHT,
|
||||||
|
device_class=None):
|
||||||
"""Return the number of placement groups to use when creating the pool.
|
"""Return the number of placement groups to use when creating the pool.
|
||||||
|
|
||||||
Returns the number of placement groups which should be specified when
|
Returns the number of placement groups which should be specified when
|
||||||
@ -229,6 +230,9 @@ class Pool(object):
|
|||||||
increased. NOTE: the default is primarily to handle the scenario
|
increased. NOTE: the default is primarily to handle the scenario
|
||||||
where related charms requiring pools has not been upgraded to
|
where related charms requiring pools has not been upgraded to
|
||||||
include an update to indicate their relative usage of the pools.
|
include an update to indicate their relative usage of the pools.
|
||||||
|
:param device_class: str. class of storage to use for basis of pgs
|
||||||
|
calculation; ceph supports nvme, ssd and hdd by default based
|
||||||
|
on presence of devices of each type in the deployment.
|
||||||
:return: int. The number of pgs to use.
|
:return: int. The number of pgs to use.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
@ -243,17 +247,20 @@ class Pool(object):
|
|||||||
|
|
||||||
# If the expected-osd-count is specified, then use the max between
|
# If the expected-osd-count is specified, then use the max between
|
||||||
# the expected-osd-count and the actual osd_count
|
# the expected-osd-count and the actual osd_count
|
||||||
osd_list = get_osds(self.service)
|
osd_list = get_osds(self.service, device_class)
|
||||||
expected = config('expected-osd-count') or 0
|
expected = config('expected-osd-count') or 0
|
||||||
|
|
||||||
if osd_list:
|
if osd_list:
|
||||||
osd_count = max(expected, len(osd_list))
|
if device_class:
|
||||||
|
osd_count = len(osd_list)
|
||||||
|
else:
|
||||||
|
osd_count = max(expected, len(osd_list))
|
||||||
|
|
||||||
# Log a message to provide some insight if the calculations claim
|
# Log a message to provide some insight if the calculations claim
|
||||||
# to be off because someone is setting the expected count and
|
# to be off because someone is setting the expected count and
|
||||||
# there are more OSDs in reality. Try to make a proper guess
|
# there are more OSDs in reality. Try to make a proper guess
|
||||||
# based upon the cluster itself.
|
# based upon the cluster itself.
|
||||||
if expected and osd_count != expected:
|
if not device_class and expected and osd_count != expected:
|
||||||
log("Found more OSDs than provided expected count. "
|
log("Found more OSDs than provided expected count. "
|
||||||
"Using the actual count instead", INFO)
|
"Using the actual count instead", INFO)
|
||||||
elif expected:
|
elif expected:
|
||||||
@ -626,7 +633,8 @@ def remove_erasure_profile(service, profile_name):
|
|||||||
def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
|
def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
|
||||||
failure_domain='host',
|
failure_domain='host',
|
||||||
data_chunks=2, coding_chunks=1,
|
data_chunks=2, coding_chunks=1,
|
||||||
locality=None, durability_estimator=None):
|
locality=None, durability_estimator=None,
|
||||||
|
device_class=None):
|
||||||
"""
|
"""
|
||||||
Create a new erasure code profile if one does not already exist for it. Updates
|
Create a new erasure code profile if one does not already exist for it. Updates
|
||||||
the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
|
the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
|
||||||
@ -640,10 +648,9 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
|
|||||||
:param coding_chunks: int
|
:param coding_chunks: int
|
||||||
:param locality: int
|
:param locality: int
|
||||||
:param durability_estimator: int
|
:param durability_estimator: int
|
||||||
|
:param device_class: six.string_types
|
||||||
:return: None. Can raise CalledProcessError
|
:return: None. Can raise CalledProcessError
|
||||||
"""
|
"""
|
||||||
version = ceph_version()
|
|
||||||
|
|
||||||
# Ensure this failure_domain is allowed by Ceph
|
# Ensure this failure_domain is allowed by Ceph
|
||||||
validator(failure_domain, six.string_types,
|
validator(failure_domain, six.string_types,
|
||||||
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
|
['chassis', 'datacenter', 'host', 'osd', 'pdu', 'pod', 'rack', 'region', 'room', 'root', 'row'])
|
||||||
@ -654,12 +661,20 @@ def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure'
|
|||||||
if locality is not None and durability_estimator is not None:
|
if locality is not None and durability_estimator is not None:
|
||||||
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
|
raise ValueError("create_erasure_profile should be called with k, m and one of l or c but not both.")
|
||||||
|
|
||||||
|
luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0
|
||||||
# failure_domain changed in luminous
|
# failure_domain changed in luminous
|
||||||
if version and version >= '12.0.0':
|
if luminous_or_later:
|
||||||
cmd.append('crush-failure-domain=' + failure_domain)
|
cmd.append('crush-failure-domain=' + failure_domain)
|
||||||
else:
|
else:
|
||||||
cmd.append('ruleset-failure-domain=' + failure_domain)
|
cmd.append('ruleset-failure-domain=' + failure_domain)
|
||||||
|
|
||||||
|
# device class new in luminous
|
||||||
|
if luminous_or_later and device_class:
|
||||||
|
cmd.append('crush-device-class={}'.format(device_class))
|
||||||
|
else:
|
||||||
|
log('Skipping device class configuration (ceph < 12.0.0)',
|
||||||
|
level=DEBUG)
|
||||||
|
|
||||||
# Add plugin specific information
|
# Add plugin specific information
|
||||||
if locality is not None:
|
if locality is not None:
|
||||||
# For local erasure codes
|
# For local erasure codes
|
||||||
@ -744,20 +759,26 @@ def pool_exists(service, name):
|
|||||||
return name in out.split()
|
return name in out.split()
|
||||||
|
|
||||||
|
|
||||||
def get_osds(service):
|
def get_osds(service, device_class=None):
|
||||||
"""Return a list of all Ceph Object Storage Daemons currently in the
|
"""Return a list of all Ceph Object Storage Daemons currently in the
|
||||||
cluster.
|
cluster (optionally filtered by storage device class).
|
||||||
|
|
||||||
|
:param device_class: Class of storage device for OSD's
|
||||||
|
:type device_class: str
|
||||||
"""
|
"""
|
||||||
version = ceph_version()
|
luminous_or_later = cmp_pkgrevno('ceph', '12.0.0') >= 0
|
||||||
if version and version >= '0.56':
|
if luminous_or_later and device_class:
|
||||||
|
out = check_output(['ceph', '--id', service,
|
||||||
|
'osd', 'crush', 'class',
|
||||||
|
'ls-osd', device_class,
|
||||||
|
'--format=json'])
|
||||||
|
else:
|
||||||
out = check_output(['ceph', '--id', service,
|
out = check_output(['ceph', '--id', service,
|
||||||
'osd', 'ls',
|
'osd', 'ls',
|
||||||
'--format=json'])
|
'--format=json'])
|
||||||
if six.PY3:
|
if six.PY3:
|
||||||
out = out.decode('UTF-8')
|
out = out.decode('UTF-8')
|
||||||
return json.loads(out)
|
return json.loads(out)
|
||||||
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
def install():
|
def install():
|
||||||
@ -811,7 +832,7 @@ def set_app_name_for_pool(client, pool, name):
|
|||||||
|
|
||||||
:raises: CalledProcessError if ceph call fails
|
:raises: CalledProcessError if ceph call fails
|
||||||
"""
|
"""
|
||||||
if ceph_version() >= '12.0.0':
|
if cmp_pkgrevno('ceph', '12.0.0') >= 0:
|
||||||
cmd = ['ceph', '--id', client, 'osd', 'pool',
|
cmd = ['ceph', '--id', client, 'osd', 'pool',
|
||||||
'application', 'enable', pool, name]
|
'application', 'enable', pool, name]
|
||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
@ -1091,22 +1112,6 @@ def ensure_ceph_keyring(service, user=None, group=None,
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
def ceph_version():
|
|
||||||
"""Retrieve the local version of ceph."""
|
|
||||||
if os.path.exists('/usr/bin/ceph'):
|
|
||||||
cmd = ['ceph', '-v']
|
|
||||||
output = check_output(cmd)
|
|
||||||
if six.PY3:
|
|
||||||
output = output.decode('UTF-8')
|
|
||||||
output = output.split()
|
|
||||||
if len(output) > 3:
|
|
||||||
return output[2]
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
|
|
||||||
class CephBrokerRq(object):
|
class CephBrokerRq(object):
|
||||||
"""Ceph broker request.
|
"""Ceph broker request.
|
||||||
|
|
||||||
@ -1147,7 +1152,8 @@ class CephBrokerRq(object):
|
|||||||
'object-prefix-permissions': object_prefix_permissions})
|
'object-prefix-permissions': object_prefix_permissions})
|
||||||
|
|
||||||
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
|
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
|
||||||
weight=None, group=None, namespace=None):
|
weight=None, group=None, namespace=None,
|
||||||
|
app_name=None):
|
||||||
"""Adds an operation to create a pool.
|
"""Adds an operation to create a pool.
|
||||||
|
|
||||||
@param pg_num setting: optional setting. If not provided, this value
|
@param pg_num setting: optional setting. If not provided, this value
|
||||||
@ -1155,6 +1161,11 @@ class CephBrokerRq(object):
|
|||||||
cluster at the time of creation. Note that, if provided, this value
|
cluster at the time of creation. Note that, if provided, this value
|
||||||
will be capped at the current available maximum.
|
will be capped at the current available maximum.
|
||||||
@param weight: the percentage of data the pool makes up
|
@param weight: the percentage of data the pool makes up
|
||||||
|
:param app_name: (Optional) Tag pool with application name. Note that
|
||||||
|
there is certain protocols emerging upstream with
|
||||||
|
regard to meaningful application names to use.
|
||||||
|
Examples are ``rbd`` and ``rgw``.
|
||||||
|
:type app_name: str
|
||||||
"""
|
"""
|
||||||
if pg_num and weight:
|
if pg_num and weight:
|
||||||
raise ValueError('pg_num and weight are mutually exclusive')
|
raise ValueError('pg_num and weight are mutually exclusive')
|
||||||
@ -1162,7 +1173,7 @@ class CephBrokerRq(object):
|
|||||||
self.ops.append({'op': 'create-pool', 'name': name,
|
self.ops.append({'op': 'create-pool', 'name': name,
|
||||||
'replicas': replica_count, 'pg_num': pg_num,
|
'replicas': replica_count, 'pg_num': pg_num,
|
||||||
'weight': weight, 'group': group,
|
'weight': weight, 'group': group,
|
||||||
'group-namespace': namespace})
|
'group-namespace': namespace, 'app-name': app_name})
|
||||||
|
|
||||||
def set_ops(self, ops):
|
def set_ops(self, ops):
|
||||||
"""Set request ops to provided value.
|
"""Set request ops to provided value.
|
||||||
|
@ -111,18 +111,18 @@ def get_ceph_request():
|
|||||||
pool_name = config('rbd-pool-name') or service
|
pool_name = config('rbd-pool-name') or service
|
||||||
rq.add_op_create_pool(name=pool_name, replica_count=replicas,
|
rq.add_op_create_pool(name=pool_name, replica_count=replicas,
|
||||||
weight=weight,
|
weight=weight,
|
||||||
group="volumes")
|
group='volumes', app_name='rbd')
|
||||||
if config('restrict-ceph-pools'):
|
if config('restrict-ceph-pools'):
|
||||||
rq.add_op_request_access_to_group(
|
rq.add_op_request_access_to_group(
|
||||||
name="volumes",
|
name='volumes',
|
||||||
object_prefix_permissions={'class-read': ['rbd_children']},
|
object_prefix_permissions={'class-read': ['rbd_children']},
|
||||||
permission='rwx')
|
permission='rwx')
|
||||||
rq.add_op_request_access_to_group(
|
rq.add_op_request_access_to_group(
|
||||||
name="images",
|
name='images',
|
||||||
object_prefix_permissions={'class-read': ['rbd_children']},
|
object_prefix_permissions={'class-read': ['rbd_children']},
|
||||||
permission='rwx')
|
permission='rwx')
|
||||||
rq.add_op_request_access_to_group(
|
rq.add_op_request_access_to_group(
|
||||||
name="vms",
|
name='vms',
|
||||||
object_prefix_permissions={'class-read': ['rbd_children']},
|
object_prefix_permissions={'class-read': ['rbd_children']},
|
||||||
permission='rwx')
|
permission='rwx')
|
||||||
return rq
|
return rq
|
||||||
|
@ -129,13 +129,15 @@ class TestCinderHooks(CharmTestCase):
|
|||||||
self.test_config.set('ceph-pool-weight', 20)
|
self.test_config.set('ceph-pool-weight', 20)
|
||||||
hooks.get_ceph_request()
|
hooks.get_ceph_request()
|
||||||
mock_create_pool.assert_called_with(name='cinder', replica_count=4,
|
mock_create_pool.assert_called_with(name='cinder', replica_count=4,
|
||||||
weight=20, group='volumes')
|
weight=20, group='volumes',
|
||||||
|
app_name='rbd')
|
||||||
mock_request_access.assert_not_called()
|
mock_request_access.assert_not_called()
|
||||||
|
|
||||||
self.test_config.set('restrict-ceph-pools', True)
|
self.test_config.set('restrict-ceph-pools', True)
|
||||||
hooks.get_ceph_request()
|
hooks.get_ceph_request()
|
||||||
mock_create_pool.assert_called_with(name='cinder', replica_count=4,
|
mock_create_pool.assert_called_with(name='cinder', replica_count=4,
|
||||||
weight=20, group='volumes')
|
weight=20, group='volumes',
|
||||||
|
app_name='rbd')
|
||||||
mock_request_access.assert_has_calls([
|
mock_request_access.assert_has_calls([
|
||||||
call(
|
call(
|
||||||
name='volumes',
|
name='volumes',
|
||||||
@ -165,7 +167,8 @@ class TestCinderHooks(CharmTestCase):
|
|||||||
mock_create_pool.assert_called_with(name='cinder-test',
|
mock_create_pool.assert_called_with(name='cinder-test',
|
||||||
replica_count=4,
|
replica_count=4,
|
||||||
weight=20,
|
weight=20,
|
||||||
group='volumes')
|
group='volumes',
|
||||||
|
app_name='rbd')
|
||||||
|
|
||||||
@patch('charmhelpers.core.hookenv.config')
|
@patch('charmhelpers.core.hookenv.config')
|
||||||
def test_ceph_changed_no_keys(self, mock_config):
|
def test_ceph_changed_no_keys(self, mock_config):
|
||||||
|
Loading…
Reference in New Issue
Block a user