From 6eac16fda45da13994a10820b5d7baec2ba5543a Mon Sep 17 00:00:00 2001 From: Liam Young Date: Mon, 26 Oct 2020 09:47:49 +0000 Subject: [PATCH] Add bluestore compression support Add bluestore compression support and update update_status to work with changes in ops_openstack.core. Depends-On: Id04426c564b9413d50c5c28a49bce9511142a801 Depends-On: I3953d28029d6daa6d771617c596a6e75fbacf258 Change-Id: I1941a13fc402ae91d3fc091e3f181ac49e3c2768 --- README.md | 13 +++++++ config.yaml | 60 +++++++++++++++++++++++++++++ src/charm.py | 36 +++++++++++++---- test-requirements.txt | 1 + tests/bundles/focal-ec.yaml | 2 +- tests/bundles/focal.yaml | 2 +- tests/tests.yaml | 1 + unit_tests/test_ceph_iscsi_charm.py | 9 ++++- 8 files changed, 113 insertions(+), 11 deletions(-) diff --git a/README.md b/README.md index 68001da..4fa3882 100644 --- a/README.md +++ b/README.md @@ -11,6 +11,19 @@ with the [ceph-osd][ceph-osd-charm] and [ceph-mon][ceph-mon-charm] charms. See file `config.yaml` for the full list of options, along with their descriptions and default values. +## Ceph BlueStore compression + +This charm supports [BlueStore inline compression][ceph-bluestore-compression] +for its associated Ceph storage pool(s). The feature is enabled by assigning a +compression mode via the `bluestore-compression-mode` configuration option. The +default behaviour is to disable compression. + +The efficiency of compression depends heavily on what type of data is stored +in the pool and the charm provides a set of configuration options to fine tune +the compression behaviour. + +**Note**: BlueStore compression is supported starting with Ceph Mimic. + ## Deployment We are assuming a pre-existing Ceph cluster. diff --git a/config.yaml b/config.yaml index 4edf2aa..47127af 100644 --- a/config.yaml +++ b/config.yaml @@ -180,3 +180,63 @@ options: Device class from CRUSH map to use for placement groups for erasure profile - valid values: ssd, hdd or nvme (or leave unset to not use a device class). + bluestore-compression-algorithm: + type: string + default: + description: | + Compressor to use (if any) for pools requested by this charm. + . + NOTE: The ceph-osd charm sets a global default for this value (defaults + to 'lz4' unless configured by the end user) which will be used unless + specified for individual pools. + bluestore-compression-mode: + type: string + default: + description: | + Policy for using compression on pools requested by this charm. + . + 'none' means never use compression. + 'passive' means use compression when clients hint that data is + compressible. + 'aggressive' means use compression unless clients hint that + data is not compressible. + 'force' means use compression under all circumstances even if the clients + hint that the data is not compressible. + bluestore-compression-required-ratio: + type: float + default: + description: | + The ratio of the size of the data chunk after compression relative to the + original size must be at least this small in order to store the + compressed version on pools requested by this charm. + bluestore-compression-min-blob-size: + type: int + default: + description: | + Chunks smaller than this are never compressed on pools requested by + this charm. + bluestore-compression-min-blob-size-hdd: + type: int + default: + description: | + Value of bluestore compression min blob size for rotational media on + pools requested by this charm. + bluestore-compression-min-blob-size-ssd: + type: int + default: + description: | + Value of bluestore compression min blob size for solid state media on + pools requested by this charm. + bluestore-compression-max-blob-size: + type: int + default: + description: | + Chunks larger than this are broken into smaller blobs sizing bluestore + compression max blob size before being compressed on pools requested by + this charm. + bluestore-compression-max-blob-size-hdd: + type: int + default: + description: | + Value of bluestore compression max blob size for rotational media on + pools requested by this charm. diff --git a/src/charm.py b/src/charm.py index d9a6023..d7b961b 100755 --- a/src/charm.py +++ b/src/charm.py @@ -41,6 +41,7 @@ import interface_tls_certificates.ca_client as ca_client import ops_openstack.adapters import ops_openstack.core +import ops_openstack.plugins.classes import gwcli_client import cryptography.hazmat.primitives.serialization as serialization logger = logging.getLogger(__name__) @@ -132,7 +133,8 @@ class CephISCSIGatewayAdapters( } -class CephISCSIGatewayCharmBase(ops_openstack.core.OSBaseCharm): +class CephISCSIGatewayCharmBase( + ops_openstack.plugins.classes.BaseCephClientCharm): """Ceph iSCSI Base Charm.""" _stored = StoredState() @@ -173,6 +175,7 @@ class CephISCSIGatewayCharmBase(ops_openstack.core.OSBaseCharm): def __init__(self, framework): """Setup adapters and observers.""" super().__init__(framework) + super().register_status_check(self.custom_status_check) logging.info("Using %s class", self.release) self._stored.set_default( target_created=False, @@ -210,6 +213,9 @@ class CephISCSIGatewayCharmBase(ops_openstack.core.OSBaseCharm): self.framework.observe( self.on.config_changed, self.render_config) + self.framework.observe( + self.on.config_changed, + self.request_ceph_pool) self.framework.observe( self.on.upgrade_charm, self.render_config) @@ -270,7 +276,21 @@ class CephISCSIGatewayCharmBase(ops_openstack.core.OSBaseCharm): def request_ceph_pool(self, event): """Request pools from Ceph cluster.""" + print("request_ceph_pool") + if not self.ceph_client.broker_available: + logging.info("Cannot request ceph setup at this time") + return logging.info("Requesting replicated pool") + try: + bcomp_kwargs = self.get_bluestore_compression() + except ValueError as e: + # The end user has most likely provided a invalid value for + # a configuration option. Just log the traceback here, the + # end user will be notified by assess_status() called at + # the end of the hook execution. + logging.warn('Caught ValueError, invalid value provided for ' + 'configuration?: "{}"'.format(str(e))) + return self.ceph_client.create_replicated_pool( self.config_get('gateway-metadata-pool')) weight = self.config_get('ceph-pool-weight') @@ -320,7 +340,8 @@ class CephISCSIGatewayCharmBase(ops_openstack.core.OSBaseCharm): name=self.data_pool_name, erasure_profile=profile_name, weight=weight, - allow_ec_overwrites=True + allow_ec_overwrites=True, + **bcomp_kwargs ) self.ceph_client.create_replicated_pool( name=self.metadata_pool_name, @@ -330,7 +351,8 @@ class CephISCSIGatewayCharmBase(ops_openstack.core.OSBaseCharm): self.ceph_client.create_replicated_pool( name=self.data_pool_name, replicas=replicas, - weight=weight) + weight=weight, + **bcomp_kwargs) logging.info("Requesting permissions") self.ceph_client.request_ceph_permissions( 'ceph-iscsi', @@ -425,14 +447,12 @@ class CephISCSIGatewayCharmBase(ops_openstack.core.OSBaseCharm): def custom_status_check(self): """Custom update status checks.""" if ch_host.is_container(): - self.unit.status = ops.model.BlockedStatus( + return ops.model.BlockedStatus( 'Charm cannot be deployed into a container') - return False if self.peers.unit_count not in self.ALLOWED_UNIT_COUNTS: - self.unit.status = ops.model.BlockedStatus( + return ops.model.BlockedStatus( '{} is an invalid unit count'.format(self.peers.unit_count)) - return False - return True + return ops.model.ActiveStatus() # Actions diff --git a/test-requirements.txt b/test-requirements.txt index da47a9b..358e1bc 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -7,6 +7,7 @@ mock>=1.2 flake8>=2.2.4,<=2.4.1 stestr>=2.2.0 requests>=2.18.4 +psutil # oslo.i18n dropped py35 support oslo.i18n<4.0.0 git+https://github.com/openstack-charmers/zaza.git#egg=zaza diff --git a/tests/bundles/focal-ec.yaml b/tests/bundles/focal-ec.yaml index a562729..1dc9b33 100644 --- a/tests/bundles/focal-ec.yaml +++ b/tests/bundles/focal-ec.yaml @@ -32,7 +32,7 @@ applications: charm: ../../ceph-iscsi.charm num_units: 2 options: - gateway-metadata-pool: tmbtil + gateway-metadata-pool: iscsi-foo-metadata pool-type: erasure-coded ec-profile-k: 4 ec-profile-m: 2 diff --git a/tests/bundles/focal.yaml b/tests/bundles/focal.yaml index 61057dd..a980504 100644 --- a/tests/bundles/focal.yaml +++ b/tests/bundles/focal.yaml @@ -32,7 +32,7 @@ applications: charm: ../../ceph-iscsi.charm num_units: 2 options: - gateway-metadata-pool: tmbtil + gateway-metadata-pool: iscsi-foo-metadata to: - '0' - '1' diff --git a/tests/tests.yaml b/tests/tests.yaml index 9f8e4bd..7371f24 100644 --- a/tests/tests.yaml +++ b/tests/tests.yaml @@ -9,6 +9,7 @@ configure: - zaza.openstack.charm_tests.ceph.iscsi.setup.basic_guest_setup tests: - zaza.openstack.charm_tests.ceph.iscsi.tests.CephISCSIGatewayTest + - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation target_deploy_status: vault: workload-status: blocked diff --git a/unit_tests/test_ceph_iscsi_charm.py b/unit_tests/test_ceph_iscsi_charm.py index 4adf580..d07f17a 100644 --- a/unit_tests/test_ceph_iscsi_charm.py +++ b/unit_tests/test_ceph_iscsi_charm.py @@ -127,6 +127,13 @@ class CharmTestCase(unittest.TestCase): setattr(self, method, self.patch(method)) +class _CephISCSIGatewayCharmBase(charm.CephISCSIGatewayCharmBase): + + @staticmethod + def get_bluestore_compression(): + return {} + + class TestCephISCSIGatewayCharmBase(CharmTestCase): PATCHES = [ @@ -139,7 +146,7 @@ class TestCephISCSIGatewayCharmBase(CharmTestCase): def setUp(self): super().setUp(charm, self.PATCHES) self.harness = Harness( - charm.CephISCSIGatewayCharmBase, + _CephISCSIGatewayCharmBase, ) self.gwc = MagicMock() self.gwcli_client.GatewayClient.return_value = self.gwc