Add bluestore compression support

Add bluestore compression support and update update_status to
work with changes in ops_openstack.core.

Depends-On: Id04426c564b9413d50c5c28a49bce9511142a801
Depends-On: I3953d28029d6daa6d771617c596a6e75fbacf258

Change-Id: I1941a13fc402ae91d3fc091e3f181ac49e3c2768
This commit is contained in:
Liam Young 2020-10-26 09:47:49 +00:00
parent 3fbea31190
commit 6eac16fda4
8 changed files with 113 additions and 11 deletions

View File

@ -11,6 +11,19 @@ with the [ceph-osd][ceph-osd-charm] and [ceph-mon][ceph-mon-charm] charms.
See file `config.yaml` for the full list of options, along with their
descriptions and default values.
## Ceph BlueStore compression
This charm supports [BlueStore inline compression][ceph-bluestore-compression]
for its associated Ceph storage pool(s). The feature is enabled by assigning a
compression mode via the `bluestore-compression-mode` configuration option. The
default behaviour is to disable compression.
The efficiency of compression depends heavily on what type of data is stored
in the pool and the charm provides a set of configuration options to fine tune
the compression behaviour.
**Note**: BlueStore compression is supported starting with Ceph Mimic.
## Deployment
We are assuming a pre-existing Ceph cluster.

View File

@ -180,3 +180,63 @@ options:
Device class from CRUSH map to use for placement groups for
erasure profile - valid values: ssd, hdd or nvme (or leave
unset to not use a device class).
bluestore-compression-algorithm:
type: string
default:
description: |
Compressor to use (if any) for pools requested by this charm.
.
NOTE: The ceph-osd charm sets a global default for this value (defaults
to 'lz4' unless configured by the end user) which will be used unless
specified for individual pools.
bluestore-compression-mode:
type: string
default:
description: |
Policy for using compression on pools requested by this charm.
.
'none' means never use compression.
'passive' means use compression when clients hint that data is
compressible.
'aggressive' means use compression unless clients hint that
data is not compressible.
'force' means use compression under all circumstances even if the clients
hint that the data is not compressible.
bluestore-compression-required-ratio:
type: float
default:
description: |
The ratio of the size of the data chunk after compression relative to the
original size must be at least this small in order to store the
compressed version on pools requested by this charm.
bluestore-compression-min-blob-size:
type: int
default:
description: |
Chunks smaller than this are never compressed on pools requested by
this charm.
bluestore-compression-min-blob-size-hdd:
type: int
default:
description: |
Value of bluestore compression min blob size for rotational media on
pools requested by this charm.
bluestore-compression-min-blob-size-ssd:
type: int
default:
description: |
Value of bluestore compression min blob size for solid state media on
pools requested by this charm.
bluestore-compression-max-blob-size:
type: int
default:
description: |
Chunks larger than this are broken into smaller blobs sizing bluestore
compression max blob size before being compressed on pools requested by
this charm.
bluestore-compression-max-blob-size-hdd:
type: int
default:
description: |
Value of bluestore compression max blob size for rotational media on
pools requested by this charm.

View File

@ -41,6 +41,7 @@ import interface_tls_certificates.ca_client as ca_client
import ops_openstack.adapters
import ops_openstack.core
import ops_openstack.plugins.classes
import gwcli_client
import cryptography.hazmat.primitives.serialization as serialization
logger = logging.getLogger(__name__)
@ -132,7 +133,8 @@ class CephISCSIGatewayAdapters(
}
class CephISCSIGatewayCharmBase(ops_openstack.core.OSBaseCharm):
class CephISCSIGatewayCharmBase(
ops_openstack.plugins.classes.BaseCephClientCharm):
"""Ceph iSCSI Base Charm."""
_stored = StoredState()
@ -173,6 +175,7 @@ class CephISCSIGatewayCharmBase(ops_openstack.core.OSBaseCharm):
def __init__(self, framework):
"""Setup adapters and observers."""
super().__init__(framework)
super().register_status_check(self.custom_status_check)
logging.info("Using %s class", self.release)
self._stored.set_default(
target_created=False,
@ -210,6 +213,9 @@ class CephISCSIGatewayCharmBase(ops_openstack.core.OSBaseCharm):
self.framework.observe(
self.on.config_changed,
self.render_config)
self.framework.observe(
self.on.config_changed,
self.request_ceph_pool)
self.framework.observe(
self.on.upgrade_charm,
self.render_config)
@ -270,7 +276,21 @@ class CephISCSIGatewayCharmBase(ops_openstack.core.OSBaseCharm):
def request_ceph_pool(self, event):
"""Request pools from Ceph cluster."""
print("request_ceph_pool")
if not self.ceph_client.broker_available:
logging.info("Cannot request ceph setup at this time")
return
logging.info("Requesting replicated pool")
try:
bcomp_kwargs = self.get_bluestore_compression()
except ValueError as e:
# The end user has most likely provided a invalid value for
# a configuration option. Just log the traceback here, the
# end user will be notified by assess_status() called at
# the end of the hook execution.
logging.warn('Caught ValueError, invalid value provided for '
'configuration?: "{}"'.format(str(e)))
return
self.ceph_client.create_replicated_pool(
self.config_get('gateway-metadata-pool'))
weight = self.config_get('ceph-pool-weight')
@ -320,7 +340,8 @@ class CephISCSIGatewayCharmBase(ops_openstack.core.OSBaseCharm):
name=self.data_pool_name,
erasure_profile=profile_name,
weight=weight,
allow_ec_overwrites=True
allow_ec_overwrites=True,
**bcomp_kwargs
)
self.ceph_client.create_replicated_pool(
name=self.metadata_pool_name,
@ -330,7 +351,8 @@ class CephISCSIGatewayCharmBase(ops_openstack.core.OSBaseCharm):
self.ceph_client.create_replicated_pool(
name=self.data_pool_name,
replicas=replicas,
weight=weight)
weight=weight,
**bcomp_kwargs)
logging.info("Requesting permissions")
self.ceph_client.request_ceph_permissions(
'ceph-iscsi',
@ -425,14 +447,12 @@ class CephISCSIGatewayCharmBase(ops_openstack.core.OSBaseCharm):
def custom_status_check(self):
"""Custom update status checks."""
if ch_host.is_container():
self.unit.status = ops.model.BlockedStatus(
return ops.model.BlockedStatus(
'Charm cannot be deployed into a container')
return False
if self.peers.unit_count not in self.ALLOWED_UNIT_COUNTS:
self.unit.status = ops.model.BlockedStatus(
return ops.model.BlockedStatus(
'{} is an invalid unit count'.format(self.peers.unit_count))
return False
return True
return ops.model.ActiveStatus()
# Actions

View File

@ -7,6 +7,7 @@ mock>=1.2
flake8>=2.2.4,<=2.4.1
stestr>=2.2.0
requests>=2.18.4
psutil
# oslo.i18n dropped py35 support
oslo.i18n<4.0.0
git+https://github.com/openstack-charmers/zaza.git#egg=zaza

View File

@ -32,7 +32,7 @@ applications:
charm: ../../ceph-iscsi.charm
num_units: 2
options:
gateway-metadata-pool: tmbtil
gateway-metadata-pool: iscsi-foo-metadata
pool-type: erasure-coded
ec-profile-k: 4
ec-profile-m: 2

View File

@ -32,7 +32,7 @@ applications:
charm: ../../ceph-iscsi.charm
num_units: 2
options:
gateway-metadata-pool: tmbtil
gateway-metadata-pool: iscsi-foo-metadata
to:
- '0'
- '1'

View File

@ -9,6 +9,7 @@ configure:
- zaza.openstack.charm_tests.ceph.iscsi.setup.basic_guest_setup
tests:
- zaza.openstack.charm_tests.ceph.iscsi.tests.CephISCSIGatewayTest
- zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation
target_deploy_status:
vault:
workload-status: blocked

View File

@ -127,6 +127,13 @@ class CharmTestCase(unittest.TestCase):
setattr(self, method, self.patch(method))
class _CephISCSIGatewayCharmBase(charm.CephISCSIGatewayCharmBase):
@staticmethod
def get_bluestore_compression():
return {}
class TestCephISCSIGatewayCharmBase(CharmTestCase):
PATCHES = [
@ -139,7 +146,7 @@ class TestCephISCSIGatewayCharmBase(CharmTestCase):
def setUp(self):
super().setUp(charm, self.PATCHES)
self.harness = Harness(
charm.CephISCSIGatewayCharmBase,
_CephISCSIGatewayCharmBase,
)
self.gwc = MagicMock()
self.gwcli_client.GatewayClient.return_value = self.gwc