Apply OSD settings from mons.

Apply OSD settings requested by the mons via the juju relation.
Add the OSD settings to config too. Before applying the settings
config-flags is checked to ensure there is no overlap.

Change-Id: Id69222217a1c99d0269831913abdf488791cb572
This commit is contained in:
Liam Young 2020-04-23 13:04:51 +00:00
parent cb0f757f18
commit 3e795f6a62
10 changed files with 620 additions and 32 deletions

View File

@ -93,8 +93,7 @@ from charmhelpers.contrib.network.ip import (
format_ipv6_addr,
get_relation_ip,
)
from charmhelpers.contrib.storage.linux.ceph import (
CephConfContext)
import charmhelpers.contrib.storage.linux.ceph as ch_ceph
from charmhelpers.contrib.storage.linux.utils import (
is_device_mounted,
is_block_device,
@ -435,7 +434,8 @@ def get_ceph_context(upgrading=False):
# NOTE(dosaboy): these sections must correspond to what is supported in the
# config template.
sections = ['global', 'osd']
cephcontext.update(CephConfContext(permitted_sections=sections)())
cephcontext.update(
ch_ceph.CephOSDConfContext(permitted_sections=sections)())
return cephcontext
@ -656,6 +656,9 @@ def mon_relation():
import_osd_bootstrap_key(bootstrap_key)
import_osd_upgrade_key(upgrade_key)
prepare_disks_and_activate()
_, settings, _ = (ch_ceph.CephOSDConfContext()
.filter_osd_from_mon_settings())
ceph.apply_osd_settings(settings)
else:
log('mon cluster has not yet provided conf')

View File

@ -13,7 +13,7 @@
# limitations under the License.
# Common python helper functions used for OpenStack charms.
from collections import OrderedDict
from collections import OrderedDict, namedtuple
from functools import wraps
import subprocess
@ -36,15 +36,20 @@ from charmhelpers.contrib.network import ip
from charmhelpers.core import unitdata
from charmhelpers.core.hookenv import (
WORKLOAD_STATES,
action_fail,
action_set,
config,
expected_peer_units,
expected_related_units,
log as juju_log,
charm_dir,
INFO,
ERROR,
metadata,
related_units,
relation_get,
relation_id,
relation_ids,
relation_set,
status_set,
@ -53,6 +58,7 @@ from charmhelpers.core.hookenv import (
cached,
leader_set,
leader_get,
local_unit,
)
from charmhelpers.core.strutils import (
@ -108,6 +114,10 @@ from charmhelpers.contrib.openstack.policyd import (
POLICYD_CONFIG_NAME,
)
from charmhelpers.contrib.openstack.ha.utils import (
expect_ha,
)
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
@ -1810,6 +1820,16 @@ def os_application_version_set(package):
application_version_set(application_version)
def os_application_status_set(check_function):
"""Run the supplied function and set the application status accordingly.
:param check_function: Function to run to get app states and messages.
:type check_function: function
"""
state, message = check_function()
status_set(state, message, application=True)
def enable_memcache(source=None, release=None, package=None):
"""Determine if memcache should be enabled on the local unit
@ -2046,3 +2066,287 @@ def is_db_maintenance_mode(relid=None):
'WARN')
pass
return True in notifications
@cached
def container_scoped_relations():
"""Get all the container scoped relations
:returns: List of relation names
:rtype: List
"""
md = metadata()
relations = []
for relation_type in ('provides', 'requires', 'peers'):
for relation in md.get(relation_type, []):
if md[relation_type][relation].get('scope') == 'container':
relations.append(relation)
return relations
def is_db_ready(use_current_context=False, rel_name=None):
"""Check remote database is ready to be used.
Database relations are expected to provide a list of 'allowed' units to
confirm that the database is ready for use by those units.
If db relation has provided this information and local unit is a member,
returns True otherwise False.
:param use_current_context: Whether to limit checks to current hook
context.
:type use_current_context: bool
:param rel_name: Name of relation to check
:type rel_name: string
:returns: Whether remote db is ready.
:rtype: bool
:raises: Exception
"""
key = 'allowed_units'
rel_name = rel_name or 'shared-db'
this_unit = local_unit()
if use_current_context:
if relation_id() in relation_ids(rel_name):
rids_units = [(None, None)]
else:
raise Exception("use_current_context=True but not in {} "
"rel hook contexts (currently in {})."
.format(rel_name, relation_id()))
else:
rids_units = [(r_id, u)
for r_id in relation_ids(rel_name)
for u in related_units(r_id)]
for rid, unit in rids_units:
allowed_units = relation_get(rid=rid, unit=unit, attribute=key)
if allowed_units and this_unit in allowed_units.split():
juju_log("This unit ({}) is in allowed unit list from {}".format(
this_unit,
unit), 'DEBUG')
return True
juju_log("This unit was not found in any allowed unit list")
return False
def is_expected_scale(peer_relation_name='cluster'):
"""Query juju goal-state to determine whether our peer- and dependency-
relations are at the expected scale.
Useful for deferring per unit per relation housekeeping work until we are
ready to complete it successfully and without unnecessary repetiton.
Always returns True if version of juju used does not support goal-state.
:param peer_relation_name: Name of peer relation
:type rel_name: string
:returns: True or False
:rtype: bool
"""
def _get_relation_id(rel_type):
return next((rid for rid in relation_ids(reltype=rel_type)), None)
Relation = namedtuple('Relation', 'rel_type rel_id')
peer_rid = _get_relation_id(peer_relation_name)
# Units with no peers should still have a peer relation.
if not peer_rid:
juju_log('Not at expected scale, no peer relation found', 'DEBUG')
return False
expected_relations = [
Relation(rel_type='shared-db', rel_id=_get_relation_id('shared-db'))]
if expect_ha():
expected_relations.append(
Relation(
rel_type='ha',
rel_id=_get_relation_id('ha')))
juju_log(
'Checking scale of {} relations'.format(
','.join([r.rel_type for r in expected_relations])),
'DEBUG')
try:
if (len(related_units(relid=peer_rid)) <
len(list(expected_peer_units()))):
return False
for rel in expected_relations:
if not rel.rel_id:
juju_log(
'Expected to find {} relation, but it is missing'.format(
rel.rel_type),
'DEBUG')
return False
# Goal state returns every unit even for container scoped
# relations but the charm only ever has a relation with
# the local unit.
if rel.rel_type in container_scoped_relations():
expected_count = 1
else:
expected_count = len(
list(expected_related_units(reltype=rel.rel_type)))
if len(related_units(relid=rel.rel_id)) < expected_count:
juju_log(
('Not at expected scale, not enough units on {} '
'relation'.format(rel.rel_type)),
'DEBUG')
return False
except NotImplementedError:
return True
juju_log('All checks have passed, unit is at expected scale', 'DEBUG')
return True
def get_peer_key(unit_name):
"""Get the peer key for this unit.
The peer key is the key a unit uses to publish its status down the peer
relation
:param unit_name: Name of unit
:type unit_name: string
:returns: Peer key for given unit
:rtype: string
"""
return 'unit-state-{}'.format(unit_name.replace('/', '-'))
UNIT_READY = 'READY'
UNIT_NOTREADY = 'NOTREADY'
UNIT_UNKNOWN = 'UNKNOWN'
UNIT_STATES = [UNIT_READY, UNIT_NOTREADY, UNIT_UNKNOWN]
def inform_peers_unit_state(state, relation_name='cluster'):
"""Inform peers of the state of this unit.
:param state: State of unit to publish
:type state: string
:param relation_name: Name of relation to publish state on
:type relation_name: string
"""
if state not in UNIT_STATES:
raise ValueError(
"Setting invalid state {} for unit".format(state))
for r_id in relation_ids(relation_name):
relation_set(relation_id=r_id,
relation_settings={
get_peer_key(local_unit()): state})
def get_peers_unit_state(relation_name='cluster'):
"""Get the state of all peers.
:param relation_name: Name of relation to check peers on.
:type relation_name: string
:returns: Unit states keyed on unit name.
:rtype: dict
:raises: ValueError
"""
r_ids = relation_ids(relation_name)
rids_units = [(r, u) for r in r_ids for u in related_units(r)]
unit_states = {}
for r_id, unit in rids_units:
settings = relation_get(unit=unit, rid=r_id)
unit_states[unit] = settings.get(get_peer_key(unit), UNIT_UNKNOWN)
if unit_states[unit] not in UNIT_STATES:
raise ValueError(
"Unit in unknown state {}".format(unit_states[unit]))
return unit_states
def are_peers_ready(relation_name='cluster'):
"""Check if all peers are ready.
:param relation_name: Name of relation to check peers on.
:type relation_name: string
:returns: Whether all units are ready.
:rtype: bool
"""
unit_states = get_peers_unit_state(relation_name)
return all(v == UNIT_READY for v in unit_states.values())
def inform_peers_if_ready(check_unit_ready_func, relation_name='cluster'):
"""Inform peers if this unit is ready.
The check function should return a tuple (state, message). A state
of 'READY' indicates the unit is READY.
:param check_unit_ready_func: Function to run to check readiness
:type check_unit_ready_func: function
:param relation_name: Name of relation to check peers on.
:type relation_name: string
"""
unit_ready, msg = check_unit_ready_func()
if unit_ready:
state = UNIT_READY
else:
state = UNIT_NOTREADY
juju_log('Telling peers this unit is: {}'.format(state), 'DEBUG')
inform_peers_unit_state(state, relation_name)
def check_api_unit_ready(check_db_ready=True):
"""Check if this unit is ready.
:param check_db_ready: Include checks of database readiness.
:type check_db_ready: bool
:returns: Whether unit state is ready and status message
:rtype: (bool, str)
"""
unit_state, msg = get_api_unit_status(check_db_ready=check_db_ready)
return unit_state == WORKLOAD_STATES.ACTIVE, msg
def get_api_unit_status(check_db_ready=True):
"""Return a workload status and message for this unit.
:param check_db_ready: Include checks of database readiness.
:type check_db_ready: bool
:returns: Workload state and message
:rtype: (bool, str)
"""
unit_state = WORKLOAD_STATES.ACTIVE
msg = 'Unit is ready'
if is_db_maintenance_mode():
unit_state = WORKLOAD_STATES.MAINTENANCE
msg = 'Database in maintenance mode.'
elif is_unit_paused_set():
unit_state = WORKLOAD_STATES.BLOCKED
msg = 'Unit paused.'
elif check_db_ready and not is_db_ready():
unit_state = WORKLOAD_STATES.WAITING
msg = 'Allowed_units list provided but this unit not present'
elif not is_db_initialised():
unit_state = WORKLOAD_STATES.WAITING
msg = 'Database not initialised'
elif not is_expected_scale():
unit_state = WORKLOAD_STATES.WAITING
msg = 'Charm and its dependencies not yet at expected scale'
juju_log(msg, 'DEBUG')
return unit_state, msg
def check_api_application_ready():
"""Check if this application is ready.
:returns: Whether application state is ready and status message
:rtype: (bool, str)
"""
app_state, msg = get_api_application_status()
return app_state == WORKLOAD_STATES.ACTIVE, msg
def get_api_application_status():
"""Return a workload status and message for this application.
:returns: Workload state and message
:rtype: (bool, str)
"""
app_state, msg = get_api_unit_status()
if app_state == WORKLOAD_STATES.ACTIVE:
if are_peers_ready():
return WORKLOAD_STATES.ACTIVE, 'Application Ready'
else:
return WORKLOAD_STATES.WAITING, 'Some units are not ready'
return app_state, msg

View File

@ -140,9 +140,16 @@ def vault_relation_complete(backend=None):
:ptype backend: string
:returns: whether the relation to vault is complete
:rtype: bool"""
vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND)
vault_kv()
return vault_kv.complete
try:
import hvac
except ImportError:
return False
try:
vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND)
vault_kv()
return vault_kv.complete
except hvac.exceptions.InvalidRequest:
return False
# TODO: contrib a high level unwrap method to hvac that works

View File

@ -22,6 +22,7 @@
# Adam Gandelman <adamg@ubuntu.com>
#
import collections
import errno
import hashlib
import math
@ -93,6 +94,88 @@ LEGACY_PG_COUNT = 200
DEFAULT_MINIMUM_PGS = 2
class OsdPostUpgradeError(Exception):
"""Error class for OSD post-upgrade operations."""
pass
class OSDSettingConflict(Exception):
"""Error class for conflicting osd setting requests."""
pass
class OSDSettingNotAllowed(Exception):
"""Error class for a disallowed setting."""
pass
OSD_SETTING_EXCEPTIONS = (OSDSettingConflict, OSDSettingNotAllowed)
OSD_SETTING_WHITELIST = [
'osd heartbeat grace',
'osd heartbeat interval',
]
def _order_dict_by_key(rdict):
"""Convert a dictionary into an OrderedDict sorted by key.
:param rdict: Dictionary to be ordered.
:type rdict: dict
:returns: Ordered Dictionary.
:rtype: collections.OrderedDict
"""
return collections.OrderedDict(sorted(rdict.items(), key=lambda k: k[0]))
def get_osd_settings(relation_name):
"""Consolidate requested osd settings from all clients.
Consolidate requested osd settings from all clients. Check that the
requested setting is on the whitelist and it does not conflict with
any other requested settings.
:returns: Dictionary of settings
:rtype: dict
:raises: OSDSettingNotAllowed
:raises: OSDSettingConflict
"""
rel_ids = relation_ids(relation_name)
osd_settings = {}
for relid in rel_ids:
for unit in related_units(relid):
unit_settings = relation_get('osd-settings', unit, relid) or '{}'
unit_settings = json.loads(unit_settings)
for key, value in unit_settings.items():
if key not in OSD_SETTING_WHITELIST:
msg = 'Illegal settings "{}"'.format(key)
raise OSDSettingNotAllowed(msg)
if key in osd_settings:
if osd_settings[key] != unit_settings[key]:
msg = 'Conflicting settings for "{}"'.format(key)
raise OSDSettingConflict(msg)
else:
osd_settings[key] = value
return _order_dict_by_key(osd_settings)
def send_osd_settings():
"""Pass on requested OSD settings to osd units."""
try:
settings = get_osd_settings('client')
except OSD_SETTING_EXCEPTIONS as e:
# There is a problem with the settings, not passing them on. Update
# status will notify the user.
log(e, level=ERROR)
return
data = {
'osd-settings': json.dumps(settings, sort_keys=True)}
for relid in relation_ids('osd'):
relation_set(relation_id=relid,
relation_settings=data)
def validator(value, valid_type, valid_range=None):
"""
Used to validate these: http://docs.ceph.com/docs/master/rados/operations/pools/#set-pool-values
@ -1635,5 +1718,67 @@ class CephConfContext(object):
continue
ceph_conf[key] = conf[key]
return ceph_conf
class CephOSDConfContext(CephConfContext):
"""Ceph config (ceph.conf) context.
Consolidates settings from config-flags via CephConfContext with
settings provided by the mons. The config-flag values are preserved in
conf['osd'], settings from the mons which do not clash with config-flag
settings are in conf['osd_from_client'] and finally settings which do
clash are in conf['osd_from_client_conflict']. Rather than silently drop
the conflicting settings they are provided in the context so they can be
rendered commented out to give some visability to the admin.
"""
def __init__(self, permitted_sections=None):
super(CephOSDConfContext, self).__init__(
permitted_sections=permitted_sections)
try:
self.settings_from_mons = get_osd_settings('mon')
except OSDSettingConflict:
log(
"OSD settings from mons are inconsistent, ignoring them",
level=WARNING)
self.settings_from_mons = {}
def filter_osd_from_mon_settings(self):
"""Filter settings from client relation against config-flags.
:returns: A tuple (
,config-flag values,
,client settings which do not conflict with config-flag values,
,client settings which confilct with config-flag values)
:rtype: (OrderedDict, OrderedDict, OrderedDict)
"""
ceph_conf = super(CephOSDConfContext, self).__call__()
conflicting_entries = {}
clear_entries = {}
for key, value in self.settings_from_mons.items():
if key in ceph_conf.get('osd', {}):
if ceph_conf['osd'][key] != value:
conflicting_entries[key] = value
else:
clear_entries[key] = value
clear_entries = _order_dict_by_key(clear_entries)
conflicting_entries = _order_dict_by_key(conflicting_entries)
return ceph_conf, clear_entries, conflicting_entries
def __call__(self):
"""Construct OSD config context.
Standard context with two additional special keys.
osd_from_client_conflict: client settings which confilct with
config-flag values
osd_from_client: settings which do not conflict with config-flag
values
:returns: OSD config context dict.
:rtype: dict
"""
conf, osd_clear, osd_conflict = self.filter_osd_from_mon_settings()
conf['osd_from_client_conflict'] = osd_conflict
conf['osd_from_client'] = osd_clear
return conf

View File

@ -32,6 +32,10 @@ def loopback_devices():
/dev/loop0: [0807]:961814 (/tmp/my.img)
or:
/dev/loop0: [0807]:961814 (/tmp/my.img (deleted))
:returns: dict: a dict mapping {loopback_dev: backing_file}
'''
loopbacks = {}
@ -39,9 +43,9 @@ def loopback_devices():
output = check_output(cmd)
if six.PY3:
output = output.decode('utf-8')
devs = [d.strip().split(' ') for d in output.splitlines() if d != '']
devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != '']
for dev, _, f in devs:
loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0]
loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0]
return loopbacks

View File

@ -21,6 +21,7 @@
from __future__ import print_function
import copy
from distutils.version import LooseVersion
from enum import Enum
from functools import wraps
from collections import namedtuple
import glob
@ -57,6 +58,14 @@ RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. '
'This may not be compatible with software you are '
'running in your shell.')
class WORKLOAD_STATES(Enum):
ACTIVE = 'active'
BLOCKED = 'blocked'
MAINTENANCE = 'maintenance'
WAITING = 'waiting'
cache = {}
@ -1088,22 +1097,33 @@ def function_tag():
return os.environ.get('JUJU_FUNCTION_TAG') or action_tag()
def status_set(workload_state, message):
def status_set(workload_state, message, application=False):
"""Set the workload state with a message
Use status-set to set the workload state with a message which is visible
to the user via juju status. If the status-set command is not found then
assume this is juju < 1.23 and juju-log the message unstead.
assume this is juju < 1.23 and juju-log the message instead.
workload_state -- valid juju workload state.
message -- status update message
workload_state -- valid juju workload state. str or WORKLOAD_STATES
message -- status update message
application -- Whether this is an application state set
"""
valid_states = ['maintenance', 'blocked', 'waiting', 'active']
if workload_state not in valid_states:
raise ValueError(
'{!r} is not a valid workload state'.format(workload_state)
)
cmd = ['status-set', workload_state, message]
bad_state_msg = '{!r} is not a valid workload state'
if isinstance(workload_state, str):
try:
# Convert string to enum.
workload_state = WORKLOAD_STATES[workload_state.upper()]
except KeyError:
raise ValueError(bad_state_msg.format(workload_state))
if workload_state not in WORKLOAD_STATES:
raise ValueError(bad_state_msg.format(workload_state))
cmd = ['status-set']
if application:
cmd.append('--application')
cmd.extend([workload_state.value, message])
try:
ret = subprocess.call(cmd)
if ret == 0:
@ -1111,7 +1131,7 @@ def status_set(workload_state, message):
except OSError as e:
if e.errno != errno.ENOENT:
raise
log_message = 'status-set failed: {} {}'.format(workload_state,
log_message = 'status-set failed: {} {}'.format(workload_state.value,
message)
log(log_message, level='INFO')
@ -1526,13 +1546,13 @@ def env_proxy_settings(selected_settings=None):
"""Get proxy settings from process environment variables.
Get charm proxy settings from environment variables that correspond to
juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2,
see lp:1782236) in a format suitable for passing to an application that
reacts to proxy settings passed as environment variables. Some applications
support lowercase or uppercase notation (e.g. curl), some support only
lowercase (e.g. wget), there are also subjectively rare cases of only
uppercase notation support. no_proxy CIDR and wildcard support also varies
between runtimes and applications as there is no enforced standard.
juju-http-proxy, juju-https-proxy juju-no-proxy (available as of 2.4.2, see
lp:1782236) and juju-ftp-proxy in a format suitable for passing to an
application that reacts to proxy settings passed as environment variables.
Some applications support lowercase or uppercase notation (e.g. curl), some
support only lowercase (e.g. wget), there are also subjectively rare cases
of only uppercase notation support. no_proxy CIDR and wildcard support also
varies between runtimes and applications as there is no enforced standard.
Some applications may connect to multiple destinations and expose config
options that would affect only proxy settings for a specific destination

View File

@ -17,14 +17,17 @@
import yaml
from subprocess import check_call
from subprocess import check_call, CalledProcessError
from charmhelpers.core.hookenv import (
log,
DEBUG,
ERROR,
WARNING,
)
from charmhelpers.core.host import is_container
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
@ -62,4 +65,11 @@ def create(sysctl_dict, sysctl_file, ignore=False):
if ignore:
call.append("-e")
check_call(call)
try:
check_call(call)
except CalledProcessError as e:
if is_container():
log("Error setting some sysctl keys in this container: {}".format(e.output),
level=WARNING)
else:
raise e

View File

@ -1113,6 +1113,7 @@ _default_caps = collections.OrderedDict([
admin_caps = collections.OrderedDict([
('mds', ['allow *']),
('mgr', ['allow *']),
('mon', ['allow *']),
('osd', ['allow *'])
])
@ -2640,6 +2641,7 @@ def get_osd_state(osd_num, osd_goal_state=None):
return osd_state
if osd_state == osd_goal_state:
return osd_state
time.sleep(3)
def get_all_osd_states(osd_goal_states=None):
@ -2898,6 +2900,7 @@ UPGRADE_PATHS = collections.OrderedDict([
('jewel', 'luminous'),
('luminous', 'mimic'),
('mimic', 'nautilus'),
('nautilus', 'octopus'),
])
# Map UCA codenames to ceph codenames
@ -2914,6 +2917,7 @@ UCA_CODENAME_MAP = {
'rocky': 'mimic',
'stein': 'mimic',
'train': 'nautilus',
'ussuri': 'octopus',
}
@ -3066,3 +3070,57 @@ def osd_noout(enable):
except subprocess.CalledProcessError as e:
log(e)
raise
class OSDConfigSetError(Exception):
"""Error occured applying OSD settings."""
pass
def apply_osd_settings(settings):
"""Applies the provided osd settings
Apply the provided settings to all local OSD unless settings are already
present. Settings stop being applied on encountering an error.
:param settings: dict. Dictionary of settings to apply.
:returns: bool. True if commands ran succesfully.
:raises: OSDConfigSetError
"""
current_settings = {}
base_cmd = 'ceph daemon osd.{osd_id} config --format=json'
get_cmd = base_cmd + ' get {key}'
set_cmd = base_cmd + ' set {key} {value}'
def _get_cli_key(key):
return(key.replace(' ', '_'))
# Retrieve the current values to check keys are correct and to make this a
# noop if setting are already applied.
for osd_id in get_local_osd_ids():
for key, value in sorted(settings.items()):
cli_key = _get_cli_key(key)
cmd = get_cmd.format(osd_id=osd_id, key=cli_key)
out = json.loads(
subprocess.check_output(cmd.split()).decode('UTF-8'))
if 'error' in out:
log("Error retrieving osd setting: {}".format(out['error']),
level=ERROR)
return False
current_settings[key] = out[cli_key]
settings_diff = {
k: v
for k, v in settings.items()
if str(v) != str(current_settings[k])}
for key, value in sorted(settings_diff.items()):
log("Setting {} to {}".format(key, value), level=DEBUG)
cmd = set_cmd.format(
osd_id=osd_id,
key=_get_cli_key(key),
value=value)
out = json.loads(
subprocess.check_output(cmd.split()).decode('UTF-8'))
if 'error' in out:
log("Error applying osd setting: {}".format(out['error']),
level=ERROR)
raise OSDConfigSetError
return True

View File

@ -88,6 +88,20 @@ osd max backfills = {{ osd_max_backfills }}
{%- if osd_recovery_max_active %}
osd recovery max active = {{ osd_recovery_max_active }}
{%- endif %}
{% if osd_from_client -%}
# The following are charm provided options provided via the mon relation.
{% for key in osd_from_client -%}
{{ key }} = {{ osd_from_client[key] }}
{% endfor %}
{% endif %}
{% if osd_from_client_conflict -%}
# The following are charm provided options which conflict with options from
# config-flags.
{% for key in osd_from_client_conflict -%}
# {{ key }} = {{ osd_from_client_conflict[key] }}
{% endfor %}
{% endif %}
{% if osd -%}
# The following are user-provided options provided via the config-flags charm option.
{% for key in osd -%}

View File

@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import copy
import unittest
@ -54,6 +55,7 @@ class CephHooksTestCase(unittest.TestCase):
def setUp(self):
super(CephHooksTestCase, self).setUp()
@patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {})
@patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234')
@patch.object(ceph_hooks, 'get_auth', lambda *args: False)
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
@ -82,6 +84,8 @@ class CephHooksTestCase(unittest.TestCase):
'osd_journal_size': 1024,
'osd_max_backfills': 1,
'osd_recovery_max_active': 2,
'osd_from_client': OrderedDict(),
'osd_from_client_conflict': OrderedDict(),
'public_addr': '10.0.0.1',
'short_object_len': True,
'upgrade_in_progress': False,
@ -93,6 +97,7 @@ class CephHooksTestCase(unittest.TestCase):
'bluestore_block_db_size': 0}
self.assertEqual(ctxt, expected)
@patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {})
@patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234')
@patch.object(ceph_hooks, 'get_auth', lambda *args: False)
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
@ -123,6 +128,8 @@ class CephHooksTestCase(unittest.TestCase):
'osd_journal_size': 1024,
'osd_max_backfills': 1,
'osd_recovery_max_active': 2,
'osd_from_client': OrderedDict(),
'osd_from_client_conflict': OrderedDict(),
'public_addr': '10.0.0.1',
'short_object_len': True,
'upgrade_in_progress': False,
@ -134,6 +141,7 @@ class CephHooksTestCase(unittest.TestCase):
'bluestore_block_db_size': 0}
self.assertEqual(ctxt, expected)
@patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {})
@patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234')
@patch.object(ceph_hooks, 'get_auth', lambda *args: False)
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
@ -163,6 +171,8 @@ class CephHooksTestCase(unittest.TestCase):
'osd_journal_size': 1024,
'osd_max_backfills': 1,
'osd_recovery_max_active': 2,
'osd_from_client': OrderedDict(),
'osd_from_client_conflict': OrderedDict(),
'public_addr': '10.0.0.1',
'short_object_len': True,
'upgrade_in_progress': False,
@ -174,6 +184,7 @@ class CephHooksTestCase(unittest.TestCase):
'bluestore_block_db_size': 0}
self.assertEqual(ctxt, expected)
@patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {})
@patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234')
@patch.object(ceph_hooks, 'get_auth', lambda *args: False)
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
@ -209,6 +220,8 @@ class CephHooksTestCase(unittest.TestCase):
'osd_journal_size': 1024,
'osd_max_backfills': 1,
'osd_recovery_max_active': 2,
'osd_from_client': OrderedDict(),
'osd_from_client_conflict': OrderedDict(),
'public_addr': '10.0.0.1',
'short_object_len': True,
'upgrade_in_progress': False,
@ -220,6 +233,7 @@ class CephHooksTestCase(unittest.TestCase):
'bluestore_block_db_size': BLUESTORE_DB_TEST_SIZE}
self.assertEqual(ctxt, expected)
@patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {})
@patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234')
@patch.object(ceph_hooks, 'get_auth', lambda *args: False)
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
@ -232,6 +246,7 @@ class CephHooksTestCase(unittest.TestCase):
@patch.object(ceph, 'config')
@patch.object(ceph_hooks, 'config')
def test_get_ceph_context_bluestore_old(self, mock_config, mock_config2):
self.maxDiff = None
config = copy.deepcopy(CHARM_CONFIG)
config['bluestore'] = True
config['bluestore-block-wal-size'] = BLUESTORE_WAL_TEST_SIZE
@ -252,6 +267,8 @@ class CephHooksTestCase(unittest.TestCase):
'osd_journal_size': 1024,
'osd_max_backfills': 1,
'osd_recovery_max_active': 2,
'osd_from_client': OrderedDict(),
'osd_from_client_conflict': OrderedDict(),
'public_addr': '10.0.0.1',
'short_object_len': True,
'upgrade_in_progress': False,
@ -263,6 +280,7 @@ class CephHooksTestCase(unittest.TestCase):
'bluestore_block_db_size': BLUESTORE_DB_TEST_SIZE}
self.assertEqual(ctxt, expected)
@patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {})
@patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234')
@patch.object(ceph_hooks, 'get_auth', lambda *args: False)
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
@ -288,11 +306,13 @@ class CephHooksTestCase(unittest.TestCase):
'loglevel': 1,
'mon_hosts': '10.0.0.1 10.0.0.2',
'old_auth': False,
'osd': {'osd max write size': 1024},
'osd': OrderedDict([('osd max write size', 1024)]),
'crush_initial_weight': '0',
'osd_journal_size': 1024,
'osd_max_backfills': 1,
'osd_recovery_max_active': 2,
'osd_from_client': OrderedDict(),
'osd_from_client_conflict': OrderedDict(),
'public_addr': '10.0.0.1',
'short_object_len': True,
'upgrade_in_progress': False,
@ -304,6 +324,7 @@ class CephHooksTestCase(unittest.TestCase):
'bluestore_block_db_size': 0}
self.assertEqual(ctxt, expected)
@patch.object(ceph_hooks.ch_ceph, 'get_osd_settings', lambda *args: {})
@patch.object(ceph_hooks, 'get_fsid', lambda *args: '1234')
@patch.object(ceph_hooks, 'get_auth', lambda *args: False)
@patch.object(ceph_hooks, 'get_public_addr', lambda *args: "10.0.0.1")
@ -331,11 +352,13 @@ class CephHooksTestCase(unittest.TestCase):
'loglevel': 1,
'mon_hosts': '10.0.0.1 10.0.0.2',
'old_auth': False,
'osd': {'osd max write size': 1024},
'osd': OrderedDict([('osd max write size', 1024)]),
'crush_initial_weight': '0',
'osd_journal_size': 1024,
'osd_max_backfills': 1,
'osd_recovery_max_active': 2,
'osd_from_client': OrderedDict(),
'osd_from_client_conflict': OrderedDict(),
'public_addr': '10.0.0.1',
'short_object_len': True,
'upgrade_in_progress': False,