Merge "Add BlueStore Compression support"
This commit is contained in:
@@ -3245,6 +3245,18 @@ class CephBlueStoreCompressionContext(OSContextGenerator):
|
||||
"""
|
||||
return self.op
|
||||
|
||||
def get_kwargs(self):
|
||||
"""Get values for use as keyword arguments.
|
||||
|
||||
:returns: Context values with key suitable for use as kwargs to
|
||||
CephBrokerRq add_op_create_*_pool methods.
|
||||
:rtype: Dict[str,any]
|
||||
"""
|
||||
return {
|
||||
k.replace('-', '_'): v
|
||||
for k, v in self.op.items()
|
||||
}
|
||||
|
||||
def validate(self):
|
||||
"""Validate options.
|
||||
|
||||
|
||||
@@ -705,12 +705,12 @@ class ErasurePool(BasePool):
|
||||
# from different handling of this in the `charms.ceph` library.
|
||||
self.erasure_code_profile = op.get('erasure-profile',
|
||||
'default-canonical')
|
||||
self.allow_ec_overwrites = op.get('allow-ec-overwrites')
|
||||
else:
|
||||
# We keep the class default when initialized from keyword arguments
|
||||
# to not break the API for any other consumers.
|
||||
self.erasure_code_profile = erasure_code_profile or 'default'
|
||||
|
||||
self.allow_ec_overwrites = allow_ec_overwrites
|
||||
self.allow_ec_overwrites = allow_ec_overwrites
|
||||
|
||||
def _create(self):
|
||||
# Try to find the erasure profile information in order to properly
|
||||
@@ -1972,12 +1972,14 @@ class CephBrokerRq(object):
|
||||
'request-id': self.request_id})
|
||||
|
||||
def _ops_equal(self, other):
|
||||
keys_to_compare = [
|
||||
'replicas', 'name', 'op', 'pg_num', 'group-permission',
|
||||
'object-prefix-permissions',
|
||||
]
|
||||
keys_to_compare += list(self._partial_build_common_op_create().keys())
|
||||
if len(self.ops) == len(other.ops):
|
||||
for req_no in range(0, len(self.ops)):
|
||||
for key in [
|
||||
'replicas', 'name', 'op', 'pg_num', 'weight',
|
||||
'group', 'group-namespace', 'group-permission',
|
||||
'object-prefix-permissions']:
|
||||
for key in keys_to_compare:
|
||||
if self.ops[req_no].get(key) != other.ops[req_no].get(key):
|
||||
return False
|
||||
else:
|
||||
|
||||
66
config.yaml
66
config.yaml
@@ -151,3 +151,69 @@ options:
|
||||
Device class from CRUSH map to use for placement groups for
|
||||
erasure profile - valid values: ssd, hdd or nvme (or leave
|
||||
unset to not use a device class).
|
||||
bluestore-compression-algorithm:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
Compressor to use (if any) for pools requested by this charm.
|
||||
.
|
||||
NOTE: The ceph-osd charm sets a global default for this value (defaults
|
||||
to 'lz4' unless configured by the end user) which will be used unless
|
||||
specified for individual pools.
|
||||
bluestore-compression-mode:
|
||||
type: string
|
||||
default:
|
||||
description: |
|
||||
Policy for using compression on pools requested by this charm.
|
||||
.
|
||||
'none' means never use compression.
|
||||
'passive' means use compression when clients hint that data is
|
||||
compressible.
|
||||
'aggressive' means use compression unless clients hint that
|
||||
data is not compressible.
|
||||
'force' means use compression under all circumstances even if the clients
|
||||
hint that the data is not compressible.
|
||||
bluestore-compression-required-ratio:
|
||||
type: float
|
||||
default:
|
||||
description: |
|
||||
The ratio of the size of the data chunk after compression relative to the
|
||||
original size must be at least this small in order to store the
|
||||
compressed version on pools requested by this charm.
|
||||
bluestore-compression-min-blob-size:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
Chunks smaller than this are never compressed on pools requested by
|
||||
this charm.
|
||||
bluestore-compression-min-blob-size-hdd:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
Value of bluestore compression min blob size for rotational media on
|
||||
pools requested by this charm.
|
||||
bluestore-compression-min-blob-size-ssd:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
Value of bluestore compression min blob size for solid state media on
|
||||
pools requested by this charm.
|
||||
bluestore-compression-max-blob-size:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
Chunks larger than this are broken into smaller blobs sizing bluestore
|
||||
compression max blob size before being compressed on pools requested by
|
||||
this charm.
|
||||
bluestore-compression-max-blob-size-hdd:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
Value of bluestore compression max blob size for rotational media on
|
||||
pools requested by this charm.
|
||||
bluestore-compression-max-blob-size-ssd:
|
||||
type: int
|
||||
default:
|
||||
description: |
|
||||
Value of bluestore compression max blob size for solid state media on
|
||||
pools requested by this charm.
|
||||
|
||||
@@ -32,7 +32,10 @@ def _add_path(path):
|
||||
_add_path(_root)
|
||||
|
||||
from charmhelpers.contrib.openstack.alternatives import remove_alternative
|
||||
from charmhelpers.contrib.openstack.context import CephContext
|
||||
from charmhelpers.contrib.openstack.context import (
|
||||
CephBlueStoreCompressionContext,
|
||||
CephContext,
|
||||
)
|
||||
from charmhelpers.contrib.openstack.utils import (
|
||||
clear_unit_paused,
|
||||
clear_unit_upgrading,
|
||||
@@ -49,6 +52,7 @@ from charmhelpers.contrib.storage.linux.ceph import (
|
||||
send_request_if_needed,
|
||||
)
|
||||
from charmhelpers.core.hookenv import (
|
||||
DEBUG,
|
||||
config,
|
||||
Hooks,
|
||||
is_leader,
|
||||
@@ -109,6 +113,7 @@ def get_ceph_request():
|
||||
pool_name = config('rbd-pool-name') or service
|
||||
weight = config('ceph-pool-weight')
|
||||
replicas = config('ceph-osd-replication-count')
|
||||
bluestore_compression = CephBlueStoreCompressionContext()
|
||||
|
||||
if config('pool-type') == 'erasure-coded':
|
||||
# General EC plugin config
|
||||
@@ -160,20 +165,35 @@ def get_ceph_request():
|
||||
)
|
||||
|
||||
# Create EC data pool
|
||||
rq.add_op_create_erasure_pool(
|
||||
name=pool_name,
|
||||
erasure_profile=profile_name,
|
||||
weight=weight,
|
||||
group="volumes",
|
||||
app_name="rbd",
|
||||
allow_ec_overwrites=True
|
||||
)
|
||||
else:
|
||||
rq.add_op_create_pool(name=pool_name,
|
||||
replica_count=replicas,
|
||||
weight=weight,
|
||||
group='volumes', app_name='rbd')
|
||||
|
||||
# NOTE(fnordahl): once we deprecate Python 3.5 support we can do
|
||||
# the unpacking of the BlueStore compression arguments as part of
|
||||
# the function arguments. Until then we need to build the dict
|
||||
# prior to the function call.
|
||||
kwargs = {
|
||||
'name': pool_name,
|
||||
'erasure_profile': profile_name,
|
||||
'weight': weight,
|
||||
'group': "volumes",
|
||||
'app_name': "rbd",
|
||||
'allow_ec_overwrites': True,
|
||||
}
|
||||
kwargs.update(bluestore_compression.get_kwargs())
|
||||
rq.add_op_create_erasure_pool(**kwargs)
|
||||
else:
|
||||
# NOTE(fnordahl): once we deprecate Python 3.5 support we can do
|
||||
# the unpacking of the BlueStore compression arguments as part of
|
||||
# the function arguments. Until then we need to build the dict
|
||||
# prior to the function call.
|
||||
kwargs = {
|
||||
'name': pool_name,
|
||||
'replica_count': replicas,
|
||||
'weight': weight,
|
||||
'group': 'volumes',
|
||||
'app_name': 'rbd',
|
||||
}
|
||||
kwargs.update(bluestore_compression.get_kwargs())
|
||||
rq.add_op_create_replicated_pool(**kwargs)
|
||||
if config('restrict-ceph-pools'):
|
||||
rq.add_op_request_access_to_group(
|
||||
name='volumes',
|
||||
@@ -203,18 +223,27 @@ def ceph_changed():
|
||||
log('Could not create ceph keyring: peer not ready?')
|
||||
return
|
||||
|
||||
if is_request_complete(get_ceph_request()):
|
||||
log('Request complete')
|
||||
CONFIGS.write_all()
|
||||
for rid in relation_ids('storage-backend'):
|
||||
storage_backend(rid)
|
||||
for r_id in relation_ids('ceph-access'):
|
||||
ceph_access_joined(r_id)
|
||||
# Ensure that cinder-volume is restarted since only now can we
|
||||
# guarantee that ceph resources are ready.
|
||||
service_restart('cinder-volume')
|
||||
else:
|
||||
send_request_if_needed(get_ceph_request())
|
||||
try:
|
||||
if is_request_complete(get_ceph_request()):
|
||||
log('Request complete')
|
||||
CONFIGS.write_all()
|
||||
for rid in relation_ids('storage-backend'):
|
||||
storage_backend(rid)
|
||||
for r_id in relation_ids('ceph-access'):
|
||||
ceph_access_joined(r_id)
|
||||
# Ensure that cinder-volume is restarted since only now can we
|
||||
# guarantee that ceph resources are ready.
|
||||
service_restart('cinder-volume')
|
||||
else:
|
||||
send_request_if_needed(get_ceph_request())
|
||||
except ValueError as e:
|
||||
# The end user has most likely provided a invalid value for a
|
||||
# configuration option. Just log the traceback here, the end user will
|
||||
# be notified by assess_status() called at the end of the hook
|
||||
# execution.
|
||||
log('Caught ValueError, invalid value provided for configuration?: '
|
||||
'"{}"'.format(str(e)),
|
||||
level=DEBUG)
|
||||
|
||||
|
||||
@hooks.hook('ceph-relation-broken')
|
||||
@@ -330,10 +359,21 @@ def dummy_update_status():
|
||||
pass
|
||||
|
||||
|
||||
def assess_status():
|
||||
"""Assess status of current unit."""
|
||||
os_application_version_set(VERSION_PACKAGE)
|
||||
set_os_workload_status(CONFIGS, REQUIRED_INTERFACES)
|
||||
|
||||
try:
|
||||
bluestore_compression = CephBlueStoreCompressionContext()
|
||||
bluestore_compression.validate()
|
||||
except ValueError as e:
|
||||
status_set('blocked', 'Invalid configuration: {}'.format(str(e)))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
hooks.execute(sys.argv)
|
||||
except UnregisteredHookError as e:
|
||||
log('Unknown hook {} - skipping.'.format(e))
|
||||
set_os_workload_status(CONFIGS, REQUIRED_INTERFACES)
|
||||
os_application_version_set(VERSION_PACKAGE)
|
||||
assess_status()
|
||||
|
||||
@@ -10,7 +10,7 @@
|
||||
charm-tools>=2.4.4
|
||||
requests>=2.18.4
|
||||
mock>=1.2
|
||||
flake8>=2.2.4,<=2.4.1
|
||||
flake8>=2.2.4
|
||||
stestr>=2.2.0
|
||||
coverage>=4.5.2
|
||||
pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking)
|
||||
|
||||
@@ -38,6 +38,7 @@ tests:
|
||||
- zaza.openstack.charm_tests.ceph.tests.CephLowLevelTest
|
||||
- zaza.openstack.charm_tests.ceph.tests.CephRelationTest
|
||||
- zaza.openstack.charm_tests.ceph.tests.CephTest
|
||||
- zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation
|
||||
- zaza.openstack.charm_tests.ceph.osd.tests.SecurityTest
|
||||
- zaza.openstack.charm_tests.policyd.tests.CinderTests
|
||||
- zaza.openstack.charm_tests.ceph.tests.CheckPoolTypes
|
||||
|
||||
@@ -94,6 +94,13 @@ class TestCinderHooks(CharmTestCase):
|
||||
@patch('charmhelpers.core.hookenv.config')
|
||||
def test_ceph_changed(self, mock_config, mock_get_ceph_request):
|
||||
'''It ensures ceph assets created on ceph changed'''
|
||||
# confirm ValueError is caught and logged
|
||||
self.is_request_complete.side_effect = ValueError
|
||||
hooks.hooks.execute(['hooks/ceph-relation-changed'])
|
||||
self.assertFalse(self.CONFIGS.write_all.called)
|
||||
self.assertTrue(self.log.called)
|
||||
self.is_request_complete.side_effect = None
|
||||
# normal operation
|
||||
self.is_request_complete.return_value = True
|
||||
self.CONFIGS.complete_contexts.return_value = ['ceph']
|
||||
self.service_name.return_value = 'cinder'
|
||||
@@ -119,12 +126,13 @@ class TestCinderHooks(CharmTestCase):
|
||||
group='cinder')
|
||||
self.send_request_if_needed.assert_called_with('cephreq')
|
||||
|
||||
@patch.object(hooks, 'CephBlueStoreCompressionContext')
|
||||
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
|
||||
'.add_op_request_access_to_group')
|
||||
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
|
||||
'.add_op_create_pool')
|
||||
'.add_op_create_replicated_pool')
|
||||
def test_create_pool_op(self, mock_create_pool,
|
||||
mock_request_access):
|
||||
mock_request_access, mock_bluestore_compression):
|
||||
self.service_name.return_value = 'cinder'
|
||||
self.test_config.set('ceph-osd-replication-count', 4)
|
||||
self.test_config.set('ceph-pool-weight', 20)
|
||||
@@ -154,12 +162,14 @@ class TestCinderHooks(CharmTestCase):
|
||||
permission='rwx'),
|
||||
])
|
||||
|
||||
@patch.object(hooks, 'CephBlueStoreCompressionContext')
|
||||
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
|
||||
'.add_op_request_access_to_group')
|
||||
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
|
||||
'.add_op_create_pool')
|
||||
'.add_op_create_replicated_pool')
|
||||
def test_create_pool_wth_name_op(self, mock_create_pool,
|
||||
mock_request_access):
|
||||
mock_request_access,
|
||||
mock_bluestore_compression):
|
||||
self.service_name.return_value = 'cinder'
|
||||
self.test_config.set('ceph-osd-replication-count', 4)
|
||||
self.test_config.set('ceph-pool-weight', 20)
|
||||
@@ -170,7 +180,20 @@ class TestCinderHooks(CharmTestCase):
|
||||
weight=20,
|
||||
group='volumes',
|
||||
app_name='rbd')
|
||||
# confirm operation with bluestore compression
|
||||
mock_create_pool.reset_mock()
|
||||
mock_bluestore_compression().get_kwargs.return_value = {
|
||||
'compression_mode': 'fake',
|
||||
}
|
||||
hooks.get_ceph_request()
|
||||
mock_create_pool.assert_called_once_with(name='cinder-test',
|
||||
replica_count=4,
|
||||
weight=20,
|
||||
group='volumes',
|
||||
app_name='rbd',
|
||||
compression_mode='fake')
|
||||
|
||||
@patch.object(hooks, 'CephBlueStoreCompressionContext')
|
||||
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
|
||||
'.add_op_create_erasure_pool')
|
||||
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
|
||||
@@ -182,7 +205,8 @@ class TestCinderHooks(CharmTestCase):
|
||||
def test_create_pool_erasure_coded(self, mock_create_pool,
|
||||
mock_request_access,
|
||||
mock_create_erasure_profile,
|
||||
mock_create_erasure_pool):
|
||||
mock_create_erasure_pool,
|
||||
mock_bluestore_compression):
|
||||
self.service_name.return_value = 'cinder'
|
||||
self.test_config.set('ceph-osd-replication-count', 4)
|
||||
self.test_config.set('ceph-pool-weight', 20)
|
||||
@@ -216,6 +240,21 @@ class TestCinderHooks(CharmTestCase):
|
||||
erasure_type='isa',
|
||||
erasure_technique=None
|
||||
)
|
||||
# confirm operation with bluestore compression
|
||||
mock_create_erasure_pool.reset_mock()
|
||||
mock_bluestore_compression().get_kwargs.return_value = {
|
||||
'compression_mode': 'fake',
|
||||
}
|
||||
hooks.get_ceph_request()
|
||||
mock_create_erasure_pool.assert_called_with(
|
||||
name='cinder',
|
||||
erasure_profile='cinder-profile',
|
||||
weight=19.8,
|
||||
group='volumes',
|
||||
app_name='rbd',
|
||||
allow_ec_overwrites=True,
|
||||
compression_mode='fake',
|
||||
)
|
||||
|
||||
@patch('charmhelpers.core.hookenv.config')
|
||||
def test_ceph_changed_no_keys(self, mock_config):
|
||||
@@ -348,3 +387,38 @@ class TestCinderHooks(CharmTestCase):
|
||||
relation_settings={'key': 'mykey',
|
||||
'secret-uuid': 'newuuid'}
|
||||
)
|
||||
|
||||
@patch.object(hooks, 'ceph_changed')
|
||||
@patch.object(hooks.uuid, 'uuid4')
|
||||
def test_write_and_restart(self, mock_uuid4, mock_ceph_changed):
|
||||
# confirm normal operation for any unit type
|
||||
mock_ceph_changed.side_effect = None
|
||||
hooks.write_and_restart()
|
||||
self.CONFIGS.write_all.assert_called_once_with()
|
||||
# confirm normal operation for leader
|
||||
self.leader_get.reset_mock()
|
||||
self.leader_get.return_value = None
|
||||
self.is_leader.return_value = True
|
||||
mock_uuid4.return_value = 42
|
||||
hooks.write_and_restart()
|
||||
self.leader_get.assert_called_once_with('secret-uuid')
|
||||
self.leader_set.assert_called_once_with({'secret-uuid': '42'})
|
||||
|
||||
@patch.object(hooks, 'CephBlueStoreCompressionContext')
|
||||
@patch.object(hooks, 'set_os_workload_status')
|
||||
def test_assess_status(self,
|
||||
mock_set_os_workload_status,
|
||||
mock_bluestore_compression):
|
||||
hooks.assess_status()
|
||||
self.os_application_version_set.assert_called_once_with(
|
||||
hooks.VERSION_PACKAGE)
|
||||
mock_set_os_workload_status.assert_called_once_with(
|
||||
ANY, hooks.REQUIRED_INTERFACES)
|
||||
mock_bluestore_compression().validate.assert_called_once_with()
|
||||
self.assertFalse(self.status_set.called)
|
||||
# confirm operation when user have provided invalid configuration
|
||||
mock_bluestore_compression().validate.side_effect = ValueError(
|
||||
'fake message')
|
||||
hooks.assess_status()
|
||||
self.status_set.assert_called_once_with(
|
||||
'blocked', 'Invalid configuration: fake message')
|
||||
|
||||
Reference in New Issue
Block a user