From d8eb7370468baa2a4bf604c778d153901eec3661 Mon Sep 17 00:00:00 2001 From: Frode Nordahl Date: Mon, 21 Sep 2020 12:43:04 +0200 Subject: [PATCH] Add Ceph BlueStore Compression support Ceph Bluestore Compression is a post-deploy configurable option and allowing to update the broker request is required. Drop code that gates the sending of pool broker request, the original issue has been fixed in the interface code and it is now safe to call multiple times. Unpin flake8, fix minor lints. Change-Id: Ib8a209fffddc882c4b42a620f284a0d0504a749f --- src/config.yaml | 66 +++++++++++++++++++++++++++++ src/reactive/gnocchi_handlers.py | 18 +++----- src/tests/tests.yaml | 4 +- test-requirements.txt | 2 +- unit_tests/__init__.py | 1 + unit_tests/test_gnocchi_handlers.py | 26 +++--------- 6 files changed, 82 insertions(+), 35 deletions(-) diff --git a/src/config.yaml b/src/config.yaml index c89ae89..349c860 100644 --- a/src/config.yaml +++ b/src/config.yaml @@ -55,3 +55,69 @@ options: override YAML files in the service's policy.d directory. The resource file should be a ZIP file containing at least one yaml file with a .yaml or .yml extension. If False then remove the overrides. + bluestore-compression-algorithm: + type: string + default: + description: | + Compressor to use (if any) for pools requested by this charm. + . + NOTE: The ceph-osd charm sets a global default for this value (defaults + to 'lz4' unless configured by the end user) which will be used unless + specified for individual pools. + bluestore-compression-mode: + type: string + default: + description: | + Policy for using compression on pools requested by this charm. + . + 'none' means never use compression. + 'passive' means use compression when clients hint that data is + compressible. + 'aggressive' means use compression unless clients hint that + data is not compressible. + 'force' means use compression under all circumstances even if the clients + hint that the data is not compressible. + bluestore-compression-required-ratio: + type: float + default: + description: | + The ratio of the size of the data chunk after compression relative to the + original size must be at least this small in order to store the + compressed version on pools requested by this charm. + bluestore-compression-min-blob-size: + type: int + default: + description: | + Chunks smaller than this are never compressed on pools requested by + this charm. + bluestore-compression-min-blob-size-hdd: + type: int + default: + description: | + Value of bluestore compression min blob size for rotational media on + pools requested by this charm. + bluestore-compression-min-blob-size-ssd: + type: int + default: + description: | + Value of bluestore compression min blob size for solid state media on + pools requested by this charm. + bluestore-compression-max-blob-size: + type: int + default: + description: | + Chunks larger than this are broken into smaller blobs sizing bluestore + compression max blob size before being compressed on pools requested by + this charm. + bluestore-compression-max-blob-size-hdd: + type: int + default: + description: | + Value of bluestore compression max blob size for rotational media on + pools requested by this charm. + bluestore-compression-max-blob-size-ssd: + type: int + default: + description: | + Value of bluestore compression max blob size for solid state media on + pools requested by this charm. diff --git a/src/reactive/gnocchi_handlers.py b/src/reactive/gnocchi_handlers.py index 9c63f97..b56a611 100644 --- a/src/reactive/gnocchi_handlers.py +++ b/src/reactive/gnocchi_handlers.py @@ -89,7 +89,7 @@ def storage_backend_connection(): "{}".format(e), hookenv.DEBUG) reactive.clear_flag('gnocchi-storage-network.ready') return - except botocore.exceptions.SSLError as e: + except botocore.exceptions.SSLError: # this status check does not check for ssl validation reactive.set_flag('gnocchi-upgrade.ready') return @@ -149,11 +149,11 @@ def cluster_connected(hacluster): @reactive.when_not('is-update-status-hook') -@reactive.when_not('ceph.create_pool.req.sent') @reactive.when('storage-ceph.connected') -def storage_ceph_connected(ceph): - ceph.create_pool(hookenv.service_name()) - reactive.set_state('ceph.create_pool.req.sent') +def storage_ceph_connected(): + storage_ceph = reactive.endpoint_from_flag('storage-ceph.connected') + with charm.provide_charm_instance() as charm_instance: + charm_instance.create_pool(storage_ceph) @reactive.when_not('is-update-status-hook') @@ -201,11 +201,3 @@ def provide_gnocchi_url(metric_service): hookenv.log("Providing gnocchi URL: {}" .format(charm_class.public_url), hookenv.DEBUG) metric_service.set_gnocchi_url(charm_class.public_url) - - -@reactive.when_not('is-update-status-hook') -@reactive.when('storage-ceph.needed') -@reactive.when_not('storage-ceph.connected') -@reactive.when_not('storage-ceph.pools.available') -def reset_state_create_pool_req_sent(): - reactive.remove_state('ceph.create_pool.req.sent') diff --git a/src/tests/tests.yaml b/src/tests/tests.yaml index e4164eb..d7361d8 100644 --- a/src/tests/tests.yaml +++ b/src/tests/tests.yaml @@ -17,8 +17,7 @@ gate_bundles: - test-s3: focal-ussuri-s3 - test-s3: focal-victoria-s3 smoke_bundles: - - bionic-train - - test-s3: bionic-train-s3 + - focal-ussuri dev_bundles: - eoan-train - groovy-victoria @@ -33,6 +32,7 @@ configure: tests: - zaza.openstack.charm_tests.gnocchi.tests.GnocchiTest - zaza.openstack.charm_tests.gnocchi.tests.GnocchiExternalCATest + - zaza.openstack.charm_tests.ceph.tests.BlueStoreCompressionCharmOperation - test-s3: - zaza.openstack.charm_tests.gnocchi.tests.GnocchiS3Test - zaza.openstack.charm_tests.gnocchi.tests.GnocchiTest diff --git a/test-requirements.txt b/test-requirements.txt index 0ab97f6..1c8aff7 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -4,7 +4,7 @@ # https://github.com/openstack-charmers/release-tools # # Lint and unit test requirements -flake8>=2.2.4,<=2.4.1 +flake8>=2.2.4 stestr>=2.2.0 requests>=2.18.4 charms.reactive diff --git a/unit_tests/__init__.py b/unit_tests/__init__.py index b55ca32..c87bd3d 100644 --- a/unit_tests/__init__.py +++ b/unit_tests/__init__.py @@ -39,6 +39,7 @@ def mock_more_stuff(): charmhelpers.contrib.storage.linux.ceph ) + boto3 = mock.MagicMock() botocore = mock.MagicMock() sys.modules['boto3'] = boto3 diff --git a/unit_tests/test_gnocchi_handlers.py b/unit_tests/test_gnocchi_handlers.py index bcb7733..0cda5e9 100644 --- a/unit_tests/test_gnocchi_handlers.py +++ b/unit_tests/test_gnocchi_handlers.py @@ -90,7 +90,6 @@ class TestRegisteredHooks(test_utils.TestRegisteredHooks): 'is-update-status-hook', ), 'storage_ceph_connected': ( - 'ceph.create_pool.req.sent', 'is-update-status-hook', ), 'configure_ceph': ( @@ -103,11 +102,6 @@ class TestRegisteredHooks(test_utils.TestRegisteredHooks): 'is-update-status-hook', 'storage-ceph.pools.available', ), - 'reset_state_create_pool_req_sent': ( - 'is-update-status-hook', - 'storage-ceph.connected', - 'storage-ceph.pools.available', - ), }, } # test that the hooks were registered via the @@ -143,14 +137,13 @@ class TestHandlers(test_utils.PatchHelper): handlers.init_db() self.gnocchi_charm.db_sync.assert_called_once_with() - @mock.patch.object(handlers, 'hookenv') - def test_storage_ceph_connected(self, hookenv): - mock_ceph = mock.MagicMock() - hookenv.service_name.return_value = 'mygnocchi' - handlers.storage_ceph_connected(mock_ceph) - mock_ceph.create_pool.assert_called_once_with( - 'mygnocchi', - ) + def test_storage_ceph_connected(self): + self.patch_object(handlers.reactive, 'endpoint_from_flag') + handlers.storage_ceph_connected() + self.endpoint_from_flag.assert_called_once_with( + 'storage-ceph.connected') + self.gnocchi_charm.create_pool.assert_called_once_with( + self.endpoint_from_flag()) def test_configure_ceph(self): mock_ceph = mock.MagicMock() @@ -189,8 +182,3 @@ class TestHandlers(test_utils.PatchHelper): mock_gnocchi.set_gnocchi_url.assert_called_once_with( "http://gnocchi:8041" ) - - def test_reset_state_create_pool_req_sent(self): - self.patch_object(handlers.reactive, 'remove_state') - handlers.reset_state_create_pool_req_sent() - self.remove_state.assert_called_once_with('ceph.create_pool.req.sent')