Waiting state if broker request is incomplete

When assessing the charms status check that the current
ceph broker request has been complete. If it has not
put the charm in a 'waiting' state and update the status
message.

Change-Id: I779b6933242ce88cb1577ae11ca258b96ec46b38
Closes-Bug: #1899918
This commit is contained in:
Liam Young 2020-10-15 10:25:25 +00:00
parent b7267d23a7
commit 6c6d491280
4 changed files with 225 additions and 214 deletions

View File

@ -57,6 +57,7 @@ from glance_utils import (
resume_unit_helper,
remove_old_packages,
deprecated_services,
get_ceph_request,
)
from charmhelpers.core.hookenv import (
charm_dir,
@ -113,7 +114,6 @@ from charmhelpers.contrib.storage.linux.ceph import (
send_request_if_needed,
is_request_complete,
ensure_ceph_keyring,
CephBrokerRq,
delete_keyring,
)
from charmhelpers.payload.execd import (
@ -128,7 +128,6 @@ from charmhelpers.contrib.openstack.ip import (
)
from charmhelpers.contrib.openstack.context import (
ADDRESS_TYPES,
CephBlueStoreCompressionContext,
)
from charmhelpers.contrib.charmsupport import nrpe
from charmhelpers.contrib.hardening.harden import harden
@ -303,106 +302,6 @@ def ceph_joined():
send_application_name()
def get_ceph_request():
service = service_name()
if config('rbd-pool-name'):
pool_name = config('rbd-pool-name')
else:
pool_name = service
rq = CephBrokerRq()
weight = config('ceph-pool-weight')
replicas = config('ceph-osd-replication-count')
bluestore_compression = CephBlueStoreCompressionContext()
if config('pool-type') == 'erasure-coded':
# General EC plugin config
plugin = config('ec-profile-plugin')
technique = config('ec-profile-technique')
device_class = config('ec-profile-device-class')
metadata_pool_name = (
config('ec-rbd-metadata-pool') or
"{}-metadata".format(service)
)
bdm_k = config('ec-profile-k')
bdm_m = config('ec-profile-m')
# LRC plugin config
bdm_l = config('ec-profile-locality')
crush_locality = config('ec-profile-crush-locality')
# SHEC plugin config
bdm_c = config('ec-profile-durability-estimator')
# CLAY plugin config
bdm_d = config('ec-profile-helper-chunks')
scalar_mds = config('ec-profile-scalar-mds')
# Profile name
profile_name = (
config('ec-profile-name') or "{}-profile".format(service)
)
# Metadata sizing is approximately 1% of overall data weight
# but is in effect driven by the number of rbd's rather than
# their size - so it can be very lightweight.
metadata_weight = weight * 0.01
# Resize data pool weight to accomodate metadata weight
weight = weight - metadata_weight
# Create metadata pool
rq.add_op_create_pool(
name=metadata_pool_name, replica_count=replicas,
weight=metadata_weight, group='images', app_name='rbd'
)
# Create erasure profile
rq.add_op_create_erasure_profile(
name=profile_name,
k=bdm_k, m=bdm_m,
lrc_locality=bdm_l,
lrc_crush_locality=crush_locality,
shec_durability_estimator=bdm_c,
clay_helper_chunks=bdm_d,
clay_scalar_mds=scalar_mds,
device_class=device_class,
erasure_type=plugin,
erasure_technique=technique
)
# Create EC data pool
# NOTE(fnordahl): once we deprecate Python 3.5 support we can do
# the unpacking of the BlueStore compression arguments as part of
# the function arguments. Until then we need to build the dict
# prior to the function call.
kwargs = {
'name': pool_name,
'erasure_profile': profile_name,
'weight': weight,
'group': "images",
'app_name': "rbd",
'allow_ec_overwrites': True,
}
kwargs.update(bluestore_compression.get_kwargs())
rq.add_op_create_erasure_pool(**kwargs)
else:
# NOTE(fnordahl): once we deprecate Python 3.5 support we can do
# the unpacking of the BlueStore compression arguments as part of
# the function arguments. Until then we need to build the dict
# prior to the function call.
kwargs = {
'name': pool_name,
'replica_count': replicas,
'weight': weight,
'group': 'images',
'app_name': 'rbd',
}
kwargs.update(bluestore_compression.get_kwargs())
rq.add_op_create_replicated_pool(**kwargs)
if config('restrict-ceph-pools'):
rq.add_op_request_access_to_group(
name="images",
object_prefix_permissions={'class-read': ['rbd_children']},
permission='rwx')
return rq
@hooks.hook('ceph-relation-changed')
@restart_on_change(restart_map())
def ceph_changed():

View File

@ -82,6 +82,13 @@ from charmhelpers.contrib.openstack.utils import (
from charmhelpers.core.decorators import (
retry_on_exception,
)
from charmhelpers.contrib.storage.linux.ceph import (
CephBrokerRq,
is_request_complete,
)
from charmhelpers.contrib.openstack.context import (
CephBlueStoreCompressionContext,
)
from charmhelpers.core.unitdata import kv
@ -494,7 +501,11 @@ def check_optional_config_and_relations(configs):
pass
except ValueError as e:
return ('blocked', 'Invalid configuration: {}'.format(str(e)))
# ceph pkgs are only installed after the ceph relation is etablished
# so gate checking broker requests on ceph relation being completed.
if ('ceph' in configs.complete_contexts()
and not is_request_complete(get_ceph_request())):
return ('waiting', 'Ceph broker request incomplete')
# return 'unknown' as the lowest priority to not clobber an existing
# status.
return "unknown", ""
@ -701,3 +712,103 @@ def update_image_location_policy(configs=None):
"'{}': '{}'".format(policy_key, policy_value), level=INFO)
update_json_file(GLANCE_POLICY_FILE, new_policies)
def get_ceph_request():
service = service_name()
if config('rbd-pool-name'):
pool_name = config('rbd-pool-name')
else:
pool_name = service
rq = CephBrokerRq()
weight = config('ceph-pool-weight')
replicas = config('ceph-osd-replication-count')
bluestore_compression = CephBlueStoreCompressionContext()
if config('pool-type') == 'erasure-coded':
# General EC plugin config
plugin = config('ec-profile-plugin')
technique = config('ec-profile-technique')
device_class = config('ec-profile-device-class')
metadata_pool_name = (
config('ec-rbd-metadata-pool') or
"{}-metadata".format(service)
)
bdm_k = config('ec-profile-k')
bdm_m = config('ec-profile-m')
# LRC plugin config
bdm_l = config('ec-profile-locality')
crush_locality = config('ec-profile-crush-locality')
# SHEC plugin config
bdm_c = config('ec-profile-durability-estimator')
# CLAY plugin config
bdm_d = config('ec-profile-helper-chunks')
scalar_mds = config('ec-profile-scalar-mds')
# Profile name
profile_name = (
config('ec-profile-name') or "{}-profile".format(service)
)
# Metadata sizing is approximately 1% of overall data weight
# but is in effect driven by the number of rbd's rather than
# their size - so it can be very lightweight.
metadata_weight = weight * 0.01
# Resize data pool weight to accomodate metadata weight
weight = weight - metadata_weight
# Create metadata pool
rq.add_op_create_pool(
name=metadata_pool_name, replica_count=replicas,
weight=metadata_weight, group='images', app_name='rbd'
)
# Create erasure profile
rq.add_op_create_erasure_profile(
name=profile_name,
k=bdm_k, m=bdm_m,
lrc_locality=bdm_l,
lrc_crush_locality=crush_locality,
shec_durability_estimator=bdm_c,
clay_helper_chunks=bdm_d,
clay_scalar_mds=scalar_mds,
device_class=device_class,
erasure_type=plugin,
erasure_technique=technique
)
# Create EC data pool
# NOTE(fnordahl): once we deprecate Python 3.5 support we can do
# the unpacking of the BlueStore compression arguments as part of
# the function arguments. Until then we need to build the dict
# prior to the function call.
kwargs = {
'name': pool_name,
'erasure_profile': profile_name,
'weight': weight,
'group': "images",
'app_name': "rbd",
'allow_ec_overwrites': True,
}
kwargs.update(bluestore_compression.get_kwargs())
rq.add_op_create_erasure_pool(**kwargs)
else:
# NOTE(fnordahl): once we deprecate Python 3.5 support we can do
# the unpacking of the BlueStore compression arguments as part of
# the function arguments. Until then we need to build the dict
# prior to the function call.
kwargs = {
'name': pool_name,
'replica_count': replicas,
'weight': weight,
'group': 'images',
'app_name': 'rbd',
}
kwargs.update(bluestore_compression.get_kwargs())
rq.add_op_create_replicated_pool(**kwargs)
if config('restrict-ceph-pools'):
rq.add_op_request_access_to_group(
name="images",
object_prefix_permissions={'class-read': ['rbd_children']},
permission='rwx')
return rq

View File

@ -356,116 +356,6 @@ class GlanceRelationTests(CharmTestCase):
for c in [call('/etc/glance/glance.conf')]:
self.assertNotIn(c, configs.write.call_args_list)
@patch.object(relations, 'CephBlueStoreCompressionContext')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_request_access_to_group')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_create_replicated_pool')
def test_create_pool_op(self, mock_create_pool,
mock_request_access,
mock_bluestore_compression):
self.service_name.return_value = 'glance'
self.test_config.set('ceph-osd-replication-count', 3)
self.test_config.set('ceph-pool-weight', 6)
relations.get_ceph_request()
mock_create_pool.assert_called_once_with(
name='glance',
replica_count=3,
weight=6,
group='images',
app_name='rbd')
mock_request_access.assert_not_called()
self.test_config.set('restrict-ceph-pools', True)
relations.get_ceph_request()
mock_create_pool.assert_called_with(name='glance', replica_count=3,
weight=6, group='images',
app_name='rbd')
mock_request_access.assert_has_calls([
call(
name='images',
object_prefix_permissions={'class-read': ['rbd_children']},
permission='rwx'),
])
# confirm operation with bluestore compression
mock_create_pool.reset_mock()
mock_bluestore_compression().get_kwargs.return_value = {
'compression_mode': 'fake',
}
relations.get_ceph_request()
mock_create_pool.assert_called_once_with(
name='glance',
replica_count=3,
weight=6,
group='images',
app_name='rbd',
compression_mode='fake')
@patch.object(relations, 'CephBlueStoreCompressionContext')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_create_erasure_pool')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_create_erasure_profile')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_request_access_to_group')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_create_pool')
def test_create_ec_pool_op(self, mock_create_pool,
mock_request_access,
mock_create_erasure_profile,
mock_create_erasure_pool,
mock_bluestore_compression):
self.service_name.return_value = 'glance'
self.test_config.set('ceph-osd-replication-count', 3)
self.test_config.set('ceph-pool-weight', 6)
self.test_config.set('pool-type', 'erasure-coded')
self.test_config.set('ec-profile-plugin', 'isa')
self.test_config.set('ec-profile-k', 6)
self.test_config.set('ec-profile-m', 2)
relations.get_ceph_request()
mock_create_pool.assert_called_once_with(
name='glance-metadata',
replica_count=3,
weight=0.06,
group='images',
app_name='rbd')
mock_create_erasure_profile.assert_called_once_with(
name='glance-profile',
k=6, m=2,
lrc_locality=None,
lrc_crush_locality=None,
shec_durability_estimator=None,
clay_helper_chunks=None,
clay_scalar_mds=None,
device_class=None,
erasure_type='isa',
erasure_technique=None,
)
mock_create_erasure_pool.assert_called_once_with(
name='glance',
erasure_profile='glance-profile',
weight=5.94,
group='images',
app_name='rbd',
allow_ec_overwrites=True)
mock_request_access.assert_not_called()
# confirm operation with bluestore compression
mock_create_erasure_pool.reset_mock()
mock_bluestore_compression().get_kwargs.return_value = {
'compression_mode': 'fake',
}
relations.get_ceph_request()
mock_create_erasure_pool.assert_called_once_with(
name='glance',
erasure_profile='glance-profile',
weight=5.94,
group='images',
app_name='rbd',
allow_ec_overwrites=True,
compression_mode='fake')
@patch.object(relations, 'get_ceph_request')
@patch.object(relations, 'send_request_if_needed')
@patch.object(relations, 'is_request_complete')

View File

@ -62,7 +62,7 @@ class TestGlanceUtils(CharmTestCase):
def setUp(self):
super(TestGlanceUtils, self).setUp(utils, TO_PATCH)
self.config.side_effect = self.test_config.get_all
self.config.side_effect = self.test_config.get
@patch('subprocess.check_call')
def test_migrate_database(self, check_call):
@ -498,3 +498,114 @@ class TestGlanceUtils(CharmTestCase):
db_obj.get.assert_has_calls([call('policy_get_image_location'),
call('policy_set_image_location'),
call('policy_delete_image_location')])
@patch.object(utils, 'CephBlueStoreCompressionContext')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_request_access_to_group')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_create_replicated_pool')
def test_create_pool_op(self, mock_create_pool,
mock_request_access,
mock_bluestore_compression):
self.service_name.return_value = 'glance'
self.test_config.set('ceph-osd-replication-count', 3)
self.test_config.set('ceph-pool-weight', 6)
utils.get_ceph_request()
mock_create_pool.assert_called_once_with(
name='glance',
replica_count=3,
weight=6,
group='images',
app_name='rbd')
mock_request_access.assert_not_called()
self.test_config.set('restrict-ceph-pools', True)
utils.get_ceph_request()
mock_create_pool.assert_called_with(name='glance', replica_count=3,
weight=6, group='images',
app_name='rbd')
mock_request_access.assert_has_calls([
call(
name='images',
object_prefix_permissions={'class-read': ['rbd_children']},
permission='rwx'),
])
# confirm operation with bluestore compression
mock_create_pool.reset_mock()
mock_bluestore_compression().get_kwargs.return_value = {
'compression_mode': 'fake',
}
utils.get_ceph_request()
mock_create_pool.assert_called_once_with(
name='glance',
replica_count=3,
weight=6,
group='images',
app_name='rbd',
compression_mode='fake')
@patch.object(utils, 'CephBlueStoreCompressionContext')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_create_erasure_pool')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_create_erasure_profile')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_request_access_to_group')
@patch('charmhelpers.contrib.storage.linux.ceph.CephBrokerRq'
'.add_op_create_pool')
def test_create_ec_pool_op(self, mock_create_pool,
mock_request_access,
mock_create_erasure_profile,
mock_create_erasure_pool,
mock_bluestore_compression):
self.service_name.return_value = 'glance'
self.test_config.set('ceph-osd-replication-count', 3)
self.test_config.set('ceph-pool-weight', 6)
self.test_config.set('pool-type', 'erasure-coded')
self.test_config.set('ec-profile-plugin', 'isa')
self.test_config.set('ec-profile-k', 6)
self.test_config.set('ec-profile-m', 2)
self.test_config.set('rbd-pool-name', 'glance')
utils.get_ceph_request()
mock_create_pool.assert_called_once_with(
name='glance-metadata',
replica_count=3,
weight=0.06,
group='images',
app_name='rbd')
mock_create_erasure_profile.assert_called_once_with(
name='glance-profile',
k=6, m=2,
lrc_locality=None,
lrc_crush_locality=None,
shec_durability_estimator=None,
clay_helper_chunks=None,
clay_scalar_mds=None,
device_class=None,
erasure_type='isa',
erasure_technique=None,
)
mock_create_erasure_pool.assert_called_once_with(
name='glance',
erasure_profile='glance-profile',
weight=5.94,
group='images',
app_name='rbd',
allow_ec_overwrites=True)
mock_request_access.assert_not_called()
# confirm operation with bluestore compression
mock_create_erasure_pool.reset_mock()
mock_bluestore_compression().get_kwargs.return_value = {
'compression_mode': 'fake',
}
utils.get_ceph_request()
mock_create_erasure_pool.assert_called_once_with(
name='glance',
erasure_profile='glance-profile',
weight=5.94,
group='images',
app_name='rbd',
allow_ec_overwrites=True,
compression_mode='fake')