Pre-freeze 'make sync'

Change-Id: Ic587b0a7b5f737258fcb069d0978cb7341d51158
This commit is contained in:
Aurelien Lourot 2020-05-06 19:02:47 +02:00
parent f57bb43fe8
commit d4986ad101
21 changed files with 1653 additions and 53 deletions

View File

@ -12,11 +12,14 @@ include:
- lvm - lvm
- payload.execd - payload.execd
- contrib.openstack: - contrib.openstack:
- utils
- exceptions
- alternatives - alternatives
- exceptions
- ha
- ip
- utils
- contrib.network.ip - contrib.network.ip
- contrib.charmsupport - contrib.charmsupport
- contrib.hardening|inc=* - contrib.hardening|inc=*
- contrib.python - contrib.python
- contrib.openstack.policyd - contrib.openstack.policyd
- contrib.hahelpers

View File

@ -0,0 +1,13 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,86 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import os
from charmhelpers.core import host
from charmhelpers.core.hookenv import (
config as config_get,
relation_get,
relation_ids,
related_units as relation_list,
log,
INFO,
)
def get_cert(cn=None):
# TODO: deal with multiple https endpoints via charm config
cert = config_get('ssl_cert')
key = config_get('ssl_key')
if not (cert and key):
log("Inspecting identity-service relations for SSL certificate.",
level=INFO)
cert = key = None
if cn:
ssl_cert_attr = 'ssl_cert_{}'.format(cn)
ssl_key_attr = 'ssl_key_{}'.format(cn)
else:
ssl_cert_attr = 'ssl_cert'
ssl_key_attr = 'ssl_key'
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not cert:
cert = relation_get(ssl_cert_attr,
rid=r_id, unit=unit)
if not key:
key = relation_get(ssl_key_attr,
rid=r_id, unit=unit)
return (cert, key)
def get_ca_cert():
ca_cert = config_get('ssl_ca')
if ca_cert is None:
log("Inspecting identity-service relations for CA SSL certificate.",
level=INFO)
for r_id in (relation_ids('identity-service') +
relation_ids('identity-credentials')):
for unit in relation_list(r_id):
if ca_cert is None:
ca_cert = relation_get('ca_cert',
rid=r_id, unit=unit)
return ca_cert
def retrieve_ca_cert(cert_file):
cert = None
if os.path.isfile(cert_file):
with open(cert_file, 'rb') as crt:
cert = crt.read()
return cert
def install_ca_cert(ca_cert):
host.install_ca_cert(ca_cert, 'keystone_juju_ca_cert')

View File

@ -0,0 +1,451 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2012 Canonical Ltd.
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
"""
Helpers for clustering and determining "cluster leadership" and other
clustering-related helpers.
"""
import functools
import subprocess
import os
import time
from socket import gethostname as get_unit_hostname
import six
from charmhelpers.core.hookenv import (
log,
relation_ids,
related_units as relation_list,
relation_get,
config as config_get,
INFO,
DEBUG,
WARNING,
unit_get,
is_leader as juju_is_leader,
status_set,
)
from charmhelpers.core.host import (
modulo_distribution,
)
from charmhelpers.core.decorators import (
retry_on_exception,
)
from charmhelpers.core.strutils import (
bool_from_string,
)
DC_RESOURCE_NAME = 'DC'
class HAIncompleteConfig(Exception):
pass
class HAIncorrectConfig(Exception):
pass
class CRMResourceNotFound(Exception):
pass
class CRMDCNotFound(Exception):
pass
def is_elected_leader(resource):
"""
Returns True if the charm executing this is the elected cluster leader.
It relies on two mechanisms to determine leadership:
1. If juju is sufficiently new and leadership election is supported,
the is_leader command will be used.
2. If the charm is part of a corosync cluster, call corosync to
determine leadership.
3. If the charm is not part of a corosync cluster, the leader is
determined as being "the alive unit with the lowest unit numer". In
other words, the oldest surviving unit.
"""
try:
return juju_is_leader()
except NotImplementedError:
log('Juju leadership election feature not enabled'
', using fallback support',
level=WARNING)
if is_clustered():
if not is_crm_leader(resource):
log('Deferring action to CRM leader.', level=INFO)
return False
else:
peers = peer_units()
if peers and not oldest_peer(peers):
log('Deferring action to oldest service unit.', level=INFO)
return False
return True
def is_clustered():
for r_id in (relation_ids('ha') or []):
for unit in (relation_list(r_id) or []):
clustered = relation_get('clustered',
rid=r_id,
unit=unit)
if clustered:
return True
return False
def is_crm_dc():
"""
Determine leadership by querying the pacemaker Designated Controller
"""
cmd = ['crm', 'status']
try:
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
if not isinstance(status, six.text_type):
status = six.text_type(status, "utf-8")
except subprocess.CalledProcessError as ex:
raise CRMDCNotFound(str(ex))
current_dc = ''
for line in status.split('\n'):
if line.startswith('Current DC'):
# Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum
current_dc = line.split(':')[1].split()[0]
if current_dc == get_unit_hostname():
return True
elif current_dc == 'NONE':
raise CRMDCNotFound('Current DC: NONE')
return False
@retry_on_exception(5, base_delay=2,
exc_type=(CRMResourceNotFound, CRMDCNotFound))
def is_crm_leader(resource, retry=False):
"""
Returns True if the charm calling this is the elected corosync leader,
as returned by calling the external "crm" command.
We allow this operation to be retried to avoid the possibility of getting a
false negative. See LP #1396246 for more info.
"""
if resource == DC_RESOURCE_NAME:
return is_crm_dc()
cmd = ['crm', 'resource', 'show', resource]
try:
status = subprocess.check_output(cmd, stderr=subprocess.STDOUT)
if not isinstance(status, six.text_type):
status = six.text_type(status, "utf-8")
except subprocess.CalledProcessError:
status = None
if status and get_unit_hostname() in status:
return True
if status and "resource %s is NOT running" % (resource) in status:
raise CRMResourceNotFound("CRM resource %s not found" % (resource))
return False
def is_leader(resource):
log("is_leader is deprecated. Please consider using is_crm_leader "
"instead.", level=WARNING)
return is_crm_leader(resource)
def peer_units(peer_relation="cluster"):
peers = []
for r_id in (relation_ids(peer_relation) or []):
for unit in (relation_list(r_id) or []):
peers.append(unit)
return peers
def peer_ips(peer_relation='cluster', addr_key='private-address'):
'''Return a dict of peers and their private-address'''
peers = {}
for r_id in relation_ids(peer_relation):
for unit in relation_list(r_id):
peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
return peers
def oldest_peer(peers):
"""Determines who the oldest peer is by comparing unit numbers."""
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
for peer in peers:
remote_unit_no = int(peer.split('/')[1])
if remote_unit_no < local_unit_no:
return False
return True
def eligible_leader(resource):
log("eligible_leader is deprecated. Please consider using "
"is_elected_leader instead.", level=WARNING)
return is_elected_leader(resource)
def https():
'''
Determines whether enough data has been provided in configuration
or relation data to configure HTTPS
.
returns: boolean
'''
use_https = config_get('use-https')
if use_https and bool_from_string(use_https):
return True
if config_get('ssl_cert') and config_get('ssl_key'):
return True
for r_id in relation_ids('certificates'):
for unit in relation_list(r_id):
ca = relation_get('ca', rid=r_id, unit=unit)
if ca:
return True
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
# TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
rel_state = [
relation_get('https_keystone', rid=r_id, unit=unit),
relation_get('ca_cert', rid=r_id, unit=unit),
]
# NOTE: works around (LP: #1203241)
if (None not in rel_state) and ('' not in rel_state):
return True
return False
def determine_api_port(public_port, singlenode_mode=False):
'''
Determine correct API server listening port based on
existence of HTTPS reverse proxy and/or haproxy.
public_port: int: standard public port for given service
singlenode_mode: boolean: Shuffle ports when only a single unit is present
returns: int: the correct listening port for the API service
'''
i = 0
if singlenode_mode:
i += 1
elif len(peer_units()) > 0 or is_clustered():
i += 1
if https():
i += 1
return public_port - (i * 10)
def determine_apache_port(public_port, singlenode_mode=False):
'''
Description: Determine correct apache listening port based on public IP +
state of the cluster.
public_port: int: standard public port for given service
singlenode_mode: boolean: Shuffle ports when only a single unit is present
returns: int: the correct listening port for the HAProxy service
'''
i = 0
if singlenode_mode:
i += 1
elif len(peer_units()) > 0 or is_clustered():
i += 1
return public_port - (i * 10)
determine_apache_port_single = functools.partial(
determine_apache_port, singlenode_mode=True)
def get_hacluster_config(exclude_keys=None):
'''
Obtains all relevant configuration from charm configuration required
for initiating a relation to hacluster:
ha-bindiface, ha-mcastport, vip, os-internal-hostname,
os-admin-hostname, os-public-hostname, os-access-hostname
param: exclude_keys: list of setting key(s) to be excluded.
returns: dict: A dict containing settings keyed by setting name.
raises: HAIncompleteConfig if settings are missing or incorrect.
'''
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname',
'os-admin-hostname', 'os-public-hostname', 'os-access-hostname']
conf = {}
for setting in settings:
if exclude_keys and setting in exclude_keys:
continue
conf[setting] = config_get(setting)
if not valid_hacluster_config():
raise HAIncorrectConfig('Insufficient or incorrect config data to '
'configure hacluster.')
return conf
def valid_hacluster_config():
'''
Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname
must be set.
Note: ha-bindiface and ha-macastport both have defaults and will always
be set. We only care that either vip or dns-ha is set.
:returns: boolean: valid config returns true.
raises: HAIncompatibileConfig if settings conflict.
raises: HAIncompleteConfig if settings are missing.
'''
vip = config_get('vip')
dns = config_get('dns-ha')
if not(bool(vip) ^ bool(dns)):
msg = ('HA: Either vip or dns-ha must be set but not both in order to '
'use high availability')
status_set('blocked', msg)
raise HAIncorrectConfig(msg)
# If dns-ha then one of os-*-hostname must be set
if dns:
dns_settings = ['os-internal-hostname', 'os-admin-hostname',
'os-public-hostname', 'os-access-hostname']
# At this point it is unknown if one or all of the possible
# network spaces are in HA. Validate at least one is set which is
# the minimum required.
for setting in dns_settings:
if config_get(setting):
log('DNS HA: At least one hostname is set {}: {}'
''.format(setting, config_get(setting)),
level=DEBUG)
return True
msg = ('DNS HA: At least one os-*-hostname(s) must be set to use '
'DNS HA')
status_set('blocked', msg)
raise HAIncompleteConfig(msg)
log('VIP HA: VIP is set {}'.format(vip), level=DEBUG)
return True
def canonical_url(configs, vip_setting='vip'):
'''
Returns the correct HTTP URL to this host given the state of HTTPS
configuration and hacluster.
:configs : OSTemplateRenderer: A config tempating object to inspect for
a complete https context.
:vip_setting: str: Setting in charm config that specifies
VIP address.
'''
scheme = 'http'
if 'https' in configs.complete_contexts():
scheme = 'https'
if is_clustered():
addr = config_get(vip_setting)
else:
addr = unit_get('private-address')
return '%s://%s' % (scheme, addr)
def distributed_wait(modulo=None, wait=None, operation_name='operation'):
''' Distribute operations by waiting based on modulo_distribution
If modulo and or wait are not set, check config_get for those values.
If config values are not set, default to modulo=3 and wait=30.
:param modulo: int The modulo number creates the group distribution
:param wait: int The constant time wait value
:param operation_name: string Operation name for status message
i.e. 'restart'
:side effect: Calls config_get()
:side effect: Calls log()
:side effect: Calls status_set()
:side effect: Calls time.sleep()
'''
if modulo is None:
modulo = config_get('modulo-nodes') or 3
if wait is None:
wait = config_get('known-wait') or 30
if juju_is_leader():
# The leader should never wait
calculated_wait = 0
else:
# non_zero_wait=True guarantees the non-leader who gets modulo 0
# will still wait
calculated_wait = modulo_distribution(modulo=modulo, wait=wait,
non_zero_wait=True)
msg = "Waiting {} seconds for {} ...".format(calculated_wait,
operation_name)
log(msg, DEBUG)
status_set('maintenance', msg)
time.sleep(calculated_wait)
def get_managed_services_and_ports(services, external_ports,
external_services=None,
port_conv_f=determine_apache_port_single):
"""Get the services and ports managed by this charm.
Return only the services and corresponding ports that are managed by this
charm. This excludes haproxy when there is a relation with hacluster. This
is because this charm passes responsability for stopping and starting
haproxy to hacluster.
Similarly, if a relation with hacluster exists then the ports returned by
this method correspond to those managed by the apache server rather than
haproxy.
:param services: List of services.
:type services: List[str]
:param external_ports: List of ports managed by external services.
:type external_ports: List[int]
:param external_services: List of services to be removed if ha relation is
present.
:type external_services: List[str]
:param port_conv_f: Function to apply to ports to calculate the ports
managed by services controlled by this charm.
:type port_convert_func: f()
:returns: A tuple containing a list of services first followed by a list of
ports.
:rtype: Tuple[List[str], List[int]]
"""
if external_services is None:
external_services = ['haproxy']
if relation_ids('ha'):
for svc in external_services:
try:
services.remove(svc)
except ValueError:
pass
external_ports = [port_conv_f(p) for p in external_ports]
return services, external_ports

View File

@ -0,0 +1,13 @@
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

View File

@ -0,0 +1,348 @@
# Copyright 2014-2016 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Copyright 2016 Canonical Ltd.
#
# Authors:
# Openstack Charmers <
#
"""
Helpers for high availability.
"""
import hashlib
import json
import re
from charmhelpers.core.hookenv import (
expected_related_units,
log,
relation_set,
charm_name,
config,
status_set,
DEBUG,
)
from charmhelpers.core.host import (
lsb_release
)
from charmhelpers.contrib.openstack.ip import (
resolve_address,
is_ipv6,
)
from charmhelpers.contrib.network.ip import (
get_iface_for_address,
get_netmask_for_address,
)
from charmhelpers.contrib.hahelpers.cluster import (
get_hacluster_config
)
JSON_ENCODE_OPTIONS = dict(
sort_keys=True,
allow_nan=False,
indent=None,
separators=(',', ':'),
)
VIP_GROUP_NAME = 'grp_{service}_vips'
DNSHA_GROUP_NAME = 'grp_{service}_hostnames'
class DNSHAException(Exception):
"""Raised when an error occurs setting up DNS HA
"""
pass
def update_dns_ha_resource_params(resources, resource_params,
relation_id=None,
crm_ocf='ocf:maas:dns'):
""" Configure DNS-HA resources based on provided configuration and
update resource dictionaries for the HA relation.
@param resources: Pointer to dictionary of resources.
Usually instantiated in ha_joined().
@param resource_params: Pointer to dictionary of resource parameters.
Usually instantiated in ha_joined()
@param relation_id: Relation ID of the ha relation
@param crm_ocf: Corosync Open Cluster Framework resource agent to use for
DNS HA
"""
_relation_data = {'resources': {}, 'resource_params': {}}
update_hacluster_dns_ha(charm_name(),
_relation_data,
crm_ocf)
resources.update(_relation_data['resources'])
resource_params.update(_relation_data['resource_params'])
relation_set(relation_id=relation_id, groups=_relation_data['groups'])
def assert_charm_supports_dns_ha():
"""Validate prerequisites for DNS HA
The MAAS client is only available on Xenial or greater
:raises DNSHAException: if release is < 16.04
"""
if lsb_release().get('DISTRIB_RELEASE') < '16.04':
msg = ('DNS HA is only supported on 16.04 and greater '
'versions of Ubuntu.')
status_set('blocked', msg)
raise DNSHAException(msg)
return True
def expect_ha():
""" Determine if the unit expects to be in HA
Check juju goal-state if ha relation is expected, check for VIP or dns-ha
settings which indicate the unit should expect to be related to hacluster.
@returns boolean
"""
ha_related_units = []
try:
ha_related_units = list(expected_related_units(reltype='ha'))
except (NotImplementedError, KeyError):
pass
return len(ha_related_units) > 0 or config('vip') or config('dns-ha')
def generate_ha_relation_data(service,
extra_settings=None,
haproxy_enabled=True):
""" Generate relation data for ha relation
Based on configuration options and unit interfaces, generate a json
encoded dict of relation data items for the hacluster relation,
providing configuration for DNS HA or VIP's + haproxy clone sets.
Example of supplying additional settings::
COLO_CONSOLEAUTH = 'inf: res_nova_consoleauth grp_nova_vips'
AGENT_CONSOLEAUTH = 'ocf:openstack:nova-consoleauth'
AGENT_CA_PARAMS = 'op monitor interval="5s"'
ha_console_settings = {
'colocations': {'vip_consoleauth': COLO_CONSOLEAUTH},
'init_services': {'res_nova_consoleauth': 'nova-consoleauth'},
'resources': {'res_nova_consoleauth': AGENT_CONSOLEAUTH},
'resource_params': {'res_nova_consoleauth': AGENT_CA_PARAMS})
generate_ha_relation_data('nova', extra_settings=ha_console_settings)
@param service: Name of the service being configured
@param extra_settings: Dict of additional resource data
@returns dict: json encoded data for use with relation_set
"""
_relation_data = {'resources': {}, 'resource_params': {}}
if haproxy_enabled:
_meta = 'meta migration-threshold="INFINITY" failure-timeout="5s"'
_haproxy_res = 'res_{}_haproxy'.format(service)
_relation_data['resources'] = {_haproxy_res: 'lsb:haproxy'}
_relation_data['resource_params'] = {
_haproxy_res: '{} op monitor interval="5s"'.format(_meta)
}
_relation_data['init_services'] = {_haproxy_res: 'haproxy'}
_relation_data['clones'] = {
'cl_{}_haproxy'.format(service): _haproxy_res
}
if extra_settings:
for k, v in extra_settings.items():
if _relation_data.get(k):
_relation_data[k].update(v)
else:
_relation_data[k] = v
if config('dns-ha'):
update_hacluster_dns_ha(service, _relation_data)
else:
update_hacluster_vip(service, _relation_data)
return {
'json_{}'.format(k): json.dumps(v, **JSON_ENCODE_OPTIONS)
for k, v in _relation_data.items() if v
}
def update_hacluster_dns_ha(service, relation_data,
crm_ocf='ocf:maas:dns'):
""" Configure DNS-HA resources based on provided configuration
@param service: Name of the service being configured
@param relation_data: Pointer to dictionary of relation data.
@param crm_ocf: Corosync Open Cluster Framework resource agent to use for
DNS HA
"""
# Validate the charm environment for DNS HA
assert_charm_supports_dns_ha()
settings = ['os-admin-hostname', 'os-internal-hostname',
'os-public-hostname', 'os-access-hostname']
# Check which DNS settings are set and update dictionaries
hostname_group = []
for setting in settings:
hostname = config(setting)
if hostname is None:
log('DNS HA: Hostname setting {} is None. Ignoring.'
''.format(setting),
DEBUG)
continue
m = re.search('os-(.+?)-hostname', setting)
if m:
endpoint_type = m.group(1)
# resolve_address's ADDRESS_MAP uses 'int' not 'internal'
if endpoint_type == 'internal':
endpoint_type = 'int'
else:
msg = ('Unexpected DNS hostname setting: {}. '
'Cannot determine endpoint_type name'
''.format(setting))
status_set('blocked', msg)
raise DNSHAException(msg)
hostname_key = 'res_{}_{}_hostname'.format(service, endpoint_type)
if hostname_key in hostname_group:
log('DNS HA: Resource {}: {} already exists in '
'hostname group - skipping'.format(hostname_key, hostname),
DEBUG)
continue
hostname_group.append(hostname_key)
relation_data['resources'][hostname_key] = crm_ocf
relation_data['resource_params'][hostname_key] = (
'params fqdn="{}" ip_address="{}"'
.format(hostname, resolve_address(endpoint_type=endpoint_type,
override=False)))
if len(hostname_group) >= 1:
log('DNS HA: Hostname group is set with {} as members. '
'Informing the ha relation'.format(' '.join(hostname_group)),
DEBUG)
relation_data['groups'] = {
DNSHA_GROUP_NAME.format(service=service): ' '.join(hostname_group)
}
else:
msg = 'DNS HA: Hostname group has no members.'
status_set('blocked', msg)
raise DNSHAException(msg)
def get_vip_settings(vip):
"""Calculate which nic is on the correct network for the given vip.
If nic or netmask discovery fail then fallback to using charm supplied
config. If fallback is used this is indicated via the fallback variable.
@param vip: VIP to lookup nic and cidr for.
@returns (str, str, bool): eg (iface, netmask, fallback)
"""
iface = get_iface_for_address(vip)
netmask = get_netmask_for_address(vip)
fallback = False
if iface is None:
iface = config('vip_iface')
fallback = True
if netmask is None:
netmask = config('vip_cidr')
fallback = True
return iface, netmask, fallback
def update_hacluster_vip(service, relation_data):
""" Configure VIP resources based on provided configuration
@param service: Name of the service being configured
@param relation_data: Pointer to dictionary of relation data.
"""
cluster_config = get_hacluster_config()
vip_group = []
vips_to_delete = []
for vip in cluster_config['vip'].split():
if is_ipv6(vip):
res_vip = 'ocf:heartbeat:IPv6addr'
vip_params = 'ipv6addr'
else:
res_vip = 'ocf:heartbeat:IPaddr2'
vip_params = 'ip'
iface, netmask, fallback = get_vip_settings(vip)
vip_monitoring = 'op monitor timeout="20s" interval="10s" depth="0"'
if iface is not None:
# NOTE(jamespage): Delete old VIP resources
# Old style naming encoding iface in name
# does not work well in environments where
# interface/subnet wiring is not consistent
vip_key = 'res_{}_{}_vip'.format(service, iface)
if vip_key in vips_to_delete:
vip_key = '{}_{}'.format(vip_key, vip_params)
vips_to_delete.append(vip_key)
vip_key = 'res_{}_{}_vip'.format(
service,
hashlib.sha1(vip.encode('UTF-8')).hexdigest()[:7])
relation_data['resources'][vip_key] = res_vip
# NOTE(jamespage):
# Use option provided vip params if these where used
# instead of auto-detected values
if fallback:
relation_data['resource_params'][vip_key] = (
'params {ip}="{vip}" cidr_netmask="{netmask}" '
'nic="{iface}" {vip_monitoring}'.format(
ip=vip_params,
vip=vip,
iface=iface,
netmask=netmask,
vip_monitoring=vip_monitoring))
else:
# NOTE(jamespage):
# let heartbeat figure out which interface and
# netmask to configure, which works nicely
# when network interface naming is not
# consistent across units.
relation_data['resource_params'][vip_key] = (
'params {ip}="{vip}" {vip_monitoring}'.format(
ip=vip_params,
vip=vip,
vip_monitoring=vip_monitoring))
vip_group.append(vip_key)
if vips_to_delete:
try:
relation_data['delete_resources'].extend(vips_to_delete)
except KeyError:
relation_data['delete_resources'] = vips_to_delete
if len(vip_group) >= 1:
key = VIP_GROUP_NAME.format(service=service)
try:
relation_data['groups'][key] = ' '.join(vip_group)
except KeyError:
relation_data['groups'] = {
key: ' '.join(vip_group)
}

View File

@ -0,0 +1,197 @@
# Copyright 2014-2015 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from charmhelpers.core.hookenv import (
NoNetworkBinding,
config,
unit_get,
service_name,
network_get_primary_address,
)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
is_address_in_network,
is_ipv6,
get_ipv6_addr,
resolve_network_cidr,
)
from charmhelpers.contrib.hahelpers.cluster import is_clustered
PUBLIC = 'public'
INTERNAL = 'int'
ADMIN = 'admin'
ACCESS = 'access'
ADDRESS_MAP = {
PUBLIC: {
'binding': 'public',
'config': 'os-public-network',
'fallback': 'public-address',
'override': 'os-public-hostname',
},
INTERNAL: {
'binding': 'internal',
'config': 'os-internal-network',
'fallback': 'private-address',
'override': 'os-internal-hostname',
},
ADMIN: {
'binding': 'admin',
'config': 'os-admin-network',
'fallback': 'private-address',
'override': 'os-admin-hostname',
},
ACCESS: {
'binding': 'access',
'config': 'access-network',
'fallback': 'private-address',
'override': 'os-access-hostname',
},
}
def canonical_url(configs, endpoint_type=PUBLIC):
"""Returns the correct HTTP URL to this host given the state of HTTPS
configuration, hacluster and charm configuration.
:param configs: OSTemplateRenderer config templating object to inspect
for a complete https context.
:param endpoint_type: str endpoint type to resolve.
:param returns: str base URL for services on the current service unit.
"""
scheme = _get_scheme(configs)
address = resolve_address(endpoint_type)
if is_ipv6(address):
address = "[{}]".format(address)
return '%s://%s' % (scheme, address)
def _get_scheme(configs):
"""Returns the scheme to use for the url (either http or https)
depending upon whether https is in the configs value.
:param configs: OSTemplateRenderer config templating object to inspect
for a complete https context.
:returns: either 'http' or 'https' depending on whether https is
configured within the configs context.
"""
scheme = 'http'
if configs and 'https' in configs.complete_contexts():
scheme = 'https'
return scheme
def _get_address_override(endpoint_type=PUBLIC):
"""Returns any address overrides that the user has defined based on the
endpoint type.
Note: this function allows for the service name to be inserted into the
address if the user specifies {service_name}.somehost.org.
:param endpoint_type: the type of endpoint to retrieve the override
value for.
:returns: any endpoint address or hostname that the user has overridden
or None if an override is not present.
"""
override_key = ADDRESS_MAP[endpoint_type]['override']
addr_override = config(override_key)
if not addr_override:
return None
else:
return addr_override.format(service_name=service_name())
def resolve_address(endpoint_type=PUBLIC, override=True):
"""Return unit address depending on net config.
If unit is clustered with vip(s) and has net splits defined, return vip on
correct network. If clustered with no nets defined, return primary vip.
If not clustered, return unit address ensuring address is on configured net
split if one is configured, or a Juju 2.0 extra-binding has been used.
:param endpoint_type: Network endpoing type
:param override: Accept hostname overrides or not
"""
resolved_address = None
if override:
resolved_address = _get_address_override(endpoint_type)
if resolved_address:
return resolved_address
vips = config('vip')
if vips:
vips = vips.split()
net_type = ADDRESS_MAP[endpoint_type]['config']
net_addr = config(net_type)
net_fallback = ADDRESS_MAP[endpoint_type]['fallback']
binding = ADDRESS_MAP[endpoint_type]['binding']
clustered = is_clustered()
if clustered and vips:
if net_addr:
for vip in vips:
if is_address_in_network(net_addr, vip):
resolved_address = vip
break
else:
# NOTE: endeavour to check vips against network space
# bindings
try:
bound_cidr = resolve_network_cidr(
network_get_primary_address(binding)
)
for vip in vips:
if is_address_in_network(bound_cidr, vip):
resolved_address = vip
break
except (NotImplementedError, NoNetworkBinding):
# If no net-splits configured and no support for extra
# bindings/network spaces so we expect a single vip
resolved_address = vips[0]
else:
if config('prefer-ipv6'):
fallback_addr = get_ipv6_addr(exc_list=vips)[0]
else:
fallback_addr = unit_get(net_fallback)
if net_addr:
resolved_address = get_address_in_network(net_addr, fallback_addr)
else:
# NOTE: only try to use extra bindings if legacy network
# configuration is not in use
try:
resolved_address = network_get_primary_address(binding)
except (NotImplementedError, NoNetworkBinding):
resolved_address = fallback_addr
if resolved_address is None:
raise ValueError("Unable to resolve a suitable IP address based on "
"charm state and configuration. (net_type=%s, "
"clustered=%s)" % (net_type, clustered))
return resolved_address
def get_vip_in_network(network):
matching_vip = None
vips = config('vip')
if vips:
for vip in vips.split():
if is_address_in_network(network, vip):
matching_vip = vip
return matching_vip

View File

@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
# Common python helper functions used for OpenStack charms. # Common python helper functions used for OpenStack charms.
from collections import OrderedDict from collections import OrderedDict, namedtuple
from functools import wraps from functools import wraps
import subprocess import subprocess
@ -36,15 +36,20 @@ from charmhelpers.contrib.network import ip
from charmhelpers.core import unitdata from charmhelpers.core import unitdata
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
WORKLOAD_STATES,
action_fail, action_fail,
action_set, action_set,
config, config,
expected_peer_units,
expected_related_units,
log as juju_log, log as juju_log,
charm_dir, charm_dir,
INFO, INFO,
ERROR, ERROR,
metadata,
related_units, related_units,
relation_get, relation_get,
relation_id,
relation_ids, relation_ids,
relation_set, relation_set,
status_set, status_set,
@ -53,6 +58,7 @@ from charmhelpers.core.hookenv import (
cached, cached,
leader_set, leader_set,
leader_get, leader_get,
local_unit,
) )
from charmhelpers.core.strutils import ( from charmhelpers.core.strutils import (
@ -108,6 +114,10 @@ from charmhelpers.contrib.openstack.policyd import (
POLICYD_CONFIG_NAME, POLICYD_CONFIG_NAME,
) )
from charmhelpers.contrib.openstack.ha.utils import (
expect_ha,
)
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
@ -1810,6 +1820,16 @@ def os_application_version_set(package):
application_version_set(application_version) application_version_set(application_version)
def os_application_status_set(check_function):
"""Run the supplied function and set the application status accordingly.
:param check_function: Function to run to get app states and messages.
:type check_function: function
"""
state, message = check_function()
status_set(state, message, application=True)
def enable_memcache(source=None, release=None, package=None): def enable_memcache(source=None, release=None, package=None):
"""Determine if memcache should be enabled on the local unit """Determine if memcache should be enabled on the local unit
@ -2046,3 +2066,287 @@ def is_db_maintenance_mode(relid=None):
'WARN') 'WARN')
pass pass
return True in notifications return True in notifications
@cached
def container_scoped_relations():
"""Get all the container scoped relations
:returns: List of relation names
:rtype: List
"""
md = metadata()
relations = []
for relation_type in ('provides', 'requires', 'peers'):
for relation in md.get(relation_type, []):
if md[relation_type][relation].get('scope') == 'container':
relations.append(relation)
return relations
def is_db_ready(use_current_context=False, rel_name=None):
"""Check remote database is ready to be used.
Database relations are expected to provide a list of 'allowed' units to
confirm that the database is ready for use by those units.
If db relation has provided this information and local unit is a member,
returns True otherwise False.
:param use_current_context: Whether to limit checks to current hook
context.
:type use_current_context: bool
:param rel_name: Name of relation to check
:type rel_name: string
:returns: Whether remote db is ready.
:rtype: bool
:raises: Exception
"""
key = 'allowed_units'
rel_name = rel_name or 'shared-db'
this_unit = local_unit()
if use_current_context:
if relation_id() in relation_ids(rel_name):
rids_units = [(None, None)]
else:
raise Exception("use_current_context=True but not in {} "
"rel hook contexts (currently in {})."
.format(rel_name, relation_id()))
else:
rids_units = [(r_id, u)
for r_id in relation_ids(rel_name)
for u in related_units(r_id)]
for rid, unit in rids_units:
allowed_units = relation_get(rid=rid, unit=unit, attribute=key)
if allowed_units and this_unit in allowed_units.split():
juju_log("This unit ({}) is in allowed unit list from {}".format(
this_unit,
unit), 'DEBUG')
return True
juju_log("This unit was not found in any allowed unit list")
return False
def is_expected_scale(peer_relation_name='cluster'):
"""Query juju goal-state to determine whether our peer- and dependency-
relations are at the expected scale.
Useful for deferring per unit per relation housekeeping work until we are
ready to complete it successfully and without unnecessary repetiton.
Always returns True if version of juju used does not support goal-state.
:param peer_relation_name: Name of peer relation
:type rel_name: string
:returns: True or False
:rtype: bool
"""
def _get_relation_id(rel_type):
return next((rid for rid in relation_ids(reltype=rel_type)), None)
Relation = namedtuple('Relation', 'rel_type rel_id')
peer_rid = _get_relation_id(peer_relation_name)
# Units with no peers should still have a peer relation.
if not peer_rid:
juju_log('Not at expected scale, no peer relation found', 'DEBUG')
return False
expected_relations = [
Relation(rel_type='shared-db', rel_id=_get_relation_id('shared-db'))]
if expect_ha():
expected_relations.append(
Relation(
rel_type='ha',
rel_id=_get_relation_id('ha')))
juju_log(
'Checking scale of {} relations'.format(
','.join([r.rel_type for r in expected_relations])),
'DEBUG')
try:
if (len(related_units(relid=peer_rid)) <
len(list(expected_peer_units()))):
return False
for rel in expected_relations:
if not rel.rel_id:
juju_log(
'Expected to find {} relation, but it is missing'.format(
rel.rel_type),
'DEBUG')
return False
# Goal state returns every unit even for container scoped
# relations but the charm only ever has a relation with
# the local unit.
if rel.rel_type in container_scoped_relations():
expected_count = 1
else:
expected_count = len(
list(expected_related_units(reltype=rel.rel_type)))
if len(related_units(relid=rel.rel_id)) < expected_count:
juju_log(
('Not at expected scale, not enough units on {} '
'relation'.format(rel.rel_type)),
'DEBUG')
return False
except NotImplementedError:
return True
juju_log('All checks have passed, unit is at expected scale', 'DEBUG')
return True
def get_peer_key(unit_name):
"""Get the peer key for this unit.
The peer key is the key a unit uses to publish its status down the peer
relation
:param unit_name: Name of unit
:type unit_name: string
:returns: Peer key for given unit
:rtype: string
"""
return 'unit-state-{}'.format(unit_name.replace('/', '-'))
UNIT_READY = 'READY'
UNIT_NOTREADY = 'NOTREADY'
UNIT_UNKNOWN = 'UNKNOWN'
UNIT_STATES = [UNIT_READY, UNIT_NOTREADY, UNIT_UNKNOWN]
def inform_peers_unit_state(state, relation_name='cluster'):
"""Inform peers of the state of this unit.
:param state: State of unit to publish
:type state: string
:param relation_name: Name of relation to publish state on
:type relation_name: string
"""
if state not in UNIT_STATES:
raise ValueError(
"Setting invalid state {} for unit".format(state))
for r_id in relation_ids(relation_name):
relation_set(relation_id=r_id,
relation_settings={
get_peer_key(local_unit()): state})
def get_peers_unit_state(relation_name='cluster'):
"""Get the state of all peers.
:param relation_name: Name of relation to check peers on.
:type relation_name: string
:returns: Unit states keyed on unit name.
:rtype: dict
:raises: ValueError
"""
r_ids = relation_ids(relation_name)
rids_units = [(r, u) for r in r_ids for u in related_units(r)]
unit_states = {}
for r_id, unit in rids_units:
settings = relation_get(unit=unit, rid=r_id)
unit_states[unit] = settings.get(get_peer_key(unit), UNIT_UNKNOWN)
if unit_states[unit] not in UNIT_STATES:
raise ValueError(
"Unit in unknown state {}".format(unit_states[unit]))
return unit_states
def are_peers_ready(relation_name='cluster'):
"""Check if all peers are ready.
:param relation_name: Name of relation to check peers on.
:type relation_name: string
:returns: Whether all units are ready.
:rtype: bool
"""
unit_states = get_peers_unit_state(relation_name)
return all(v == UNIT_READY for v in unit_states.values())
def inform_peers_if_ready(check_unit_ready_func, relation_name='cluster'):
"""Inform peers if this unit is ready.
The check function should return a tuple (state, message). A state
of 'READY' indicates the unit is READY.
:param check_unit_ready_func: Function to run to check readiness
:type check_unit_ready_func: function
:param relation_name: Name of relation to check peers on.
:type relation_name: string
"""
unit_ready, msg = check_unit_ready_func()
if unit_ready:
state = UNIT_READY
else:
state = UNIT_NOTREADY
juju_log('Telling peers this unit is: {}'.format(state), 'DEBUG')
inform_peers_unit_state(state, relation_name)
def check_api_unit_ready(check_db_ready=True):
"""Check if this unit is ready.
:param check_db_ready: Include checks of database readiness.
:type check_db_ready: bool
:returns: Whether unit state is ready and status message
:rtype: (bool, str)
"""
unit_state, msg = get_api_unit_status(check_db_ready=check_db_ready)
return unit_state == WORKLOAD_STATES.ACTIVE, msg
def get_api_unit_status(check_db_ready=True):
"""Return a workload status and message for this unit.
:param check_db_ready: Include checks of database readiness.
:type check_db_ready: bool
:returns: Workload state and message
:rtype: (bool, str)
"""
unit_state = WORKLOAD_STATES.ACTIVE
msg = 'Unit is ready'
if is_db_maintenance_mode():
unit_state = WORKLOAD_STATES.MAINTENANCE
msg = 'Database in maintenance mode.'
elif is_unit_paused_set():
unit_state = WORKLOAD_STATES.BLOCKED
msg = 'Unit paused.'
elif check_db_ready and not is_db_ready():
unit_state = WORKLOAD_STATES.WAITING
msg = 'Allowed_units list provided but this unit not present'
elif not is_db_initialised():
unit_state = WORKLOAD_STATES.WAITING
msg = 'Database not initialised'
elif not is_expected_scale():
unit_state = WORKLOAD_STATES.WAITING
msg = 'Charm and its dependencies not yet at expected scale'
juju_log(msg, 'DEBUG')
return unit_state, msg
def check_api_application_ready():
"""Check if this application is ready.
:returns: Whether application state is ready and status message
:rtype: (bool, str)
"""
app_state, msg = get_api_application_status()
return app_state == WORKLOAD_STATES.ACTIVE, msg
def get_api_application_status():
"""Return a workload status and message for this application.
:returns: Workload state and message
:rtype: (bool, str)
"""
app_state, msg = get_api_unit_status()
if app_state == WORKLOAD_STATES.ACTIVE:
if are_peers_ready():
return WORKLOAD_STATES.ACTIVE, 'Application Ready'
else:
return WORKLOAD_STATES.WAITING, 'Some units are not ready'
return app_state, msg

View File

@ -22,6 +22,7 @@
# Adam Gandelman <adamg@ubuntu.com> # Adam Gandelman <adamg@ubuntu.com>
# #
import collections
import errno import errno
import hashlib import hashlib
import math import math
@ -91,6 +92,89 @@ DEFAULT_PGS_PER_OSD_TARGET = 100
DEFAULT_POOL_WEIGHT = 10.0 DEFAULT_POOL_WEIGHT = 10.0
LEGACY_PG_COUNT = 200 LEGACY_PG_COUNT = 200
DEFAULT_MINIMUM_PGS = 2 DEFAULT_MINIMUM_PGS = 2
AUTOSCALER_DEFAULT_PGS = 32
class OsdPostUpgradeError(Exception):
"""Error class for OSD post-upgrade operations."""
pass
class OSDSettingConflict(Exception):
"""Error class for conflicting osd setting requests."""
pass
class OSDSettingNotAllowed(Exception):
"""Error class for a disallowed setting."""
pass
OSD_SETTING_EXCEPTIONS = (OSDSettingConflict, OSDSettingNotAllowed)
OSD_SETTING_WHITELIST = [
'osd heartbeat grace',
'osd heartbeat interval',
]
def _order_dict_by_key(rdict):
"""Convert a dictionary into an OrderedDict sorted by key.
:param rdict: Dictionary to be ordered.
:type rdict: dict
:returns: Ordered Dictionary.
:rtype: collections.OrderedDict
"""
return collections.OrderedDict(sorted(rdict.items(), key=lambda k: k[0]))
def get_osd_settings(relation_name):
"""Consolidate requested osd settings from all clients.
Consolidate requested osd settings from all clients. Check that the
requested setting is on the whitelist and it does not conflict with
any other requested settings.
:returns: Dictionary of settings
:rtype: dict
:raises: OSDSettingNotAllowed
:raises: OSDSettingConflict
"""
rel_ids = relation_ids(relation_name)
osd_settings = {}
for relid in rel_ids:
for unit in related_units(relid):
unit_settings = relation_get('osd-settings', unit, relid) or '{}'
unit_settings = json.loads(unit_settings)
for key, value in unit_settings.items():
if key not in OSD_SETTING_WHITELIST:
msg = 'Illegal settings "{}"'.format(key)
raise OSDSettingNotAllowed(msg)
if key in osd_settings:
if osd_settings[key] != unit_settings[key]:
msg = 'Conflicting settings for "{}"'.format(key)
raise OSDSettingConflict(msg)
else:
osd_settings[key] = value
return _order_dict_by_key(osd_settings)
def send_osd_settings():
"""Pass on requested OSD settings to osd units."""
try:
settings = get_osd_settings('client')
except OSD_SETTING_EXCEPTIONS as e:
# There is a problem with the settings, not passing them on. Update
# status will notify the user.
log(e, level=ERROR)
return
data = {
'osd-settings': json.dumps(settings, sort_keys=True)}
for relid in relation_ids('osd'):
relation_set(relation_id=relid,
relation_settings=data)
def validator(value, valid_type, valid_range=None): def validator(value, valid_type, valid_range=None):
@ -316,16 +400,28 @@ class ReplicatedPool(Pool):
def create(self): def create(self):
if not pool_exists(self.service, self.name): if not pool_exists(self.service, self.name):
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
# Create it # Create it
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', if nautilus_or_later:
self.name, str(self.pg_num)] cmd = [
'ceph', '--id', self.service, 'osd', 'pool', 'create',
'--pg-num-min={}'.format(
min(AUTOSCALER_DEFAULT_PGS, self.pg_num)
),
self.name, str(self.pg_num)
]
else:
cmd = [
'ceph', '--id', self.service, 'osd', 'pool', 'create',
self.name, str(self.pg_num)
]
try: try:
check_call(cmd) check_call(cmd)
# Set the pool replica size # Set the pool replica size
update_pool(client=self.service, update_pool(client=self.service,
pool=self.name, pool=self.name,
settings={'size': str(self.replicas)}) settings={'size': str(self.replicas)})
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
if nautilus_or_later: if nautilus_or_later:
# Ensure we set the expected pool ratio # Ensure we set the expected pool ratio
update_pool(client=self.service, update_pool(client=self.service,
@ -383,10 +479,24 @@ class ErasurePool(Pool):
k = int(erasure_profile['k']) k = int(erasure_profile['k'])
m = int(erasure_profile['m']) m = int(erasure_profile['m'])
pgs = self.get_pgs(k + m, self.percent_data) pgs = self.get_pgs(k + m, self.percent_data)
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
# Create it # Create it
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', if nautilus_or_later:
cmd = [
'ceph', '--id', self.service, 'osd', 'pool', 'create',
'--pg-num-min={}'.format(
min(AUTOSCALER_DEFAULT_PGS, pgs)
),
self.name, str(pgs), str(pgs), self.name, str(pgs), str(pgs),
'erasure', self.erasure_code_profile] 'erasure', self.erasure_code_profile
]
else:
cmd = [
'ceph', '--id', self.service, 'osd', 'pool', 'create',
self.name, str(pgs), str(pgs),
'erasure', self.erasure_code_profile
]
try: try:
check_call(cmd) check_call(cmd)
try: try:
@ -395,7 +505,6 @@ class ErasurePool(Pool):
name=self.app_name) name=self.app_name)
except CalledProcessError: except CalledProcessError:
log('Could not set app name for pool {}'.format(self.name, level=WARNING)) log('Could not set app name for pool {}'.format(self.name, level=WARNING))
nautilus_or_later = cmp_pkgrevno('ceph-common', '14.2.0') >= 0
if nautilus_or_later: if nautilus_or_later:
# Ensure we set the expected pool ratio # Ensure we set the expected pool ratio
update_pool(client=self.service, update_pool(client=self.service,
@ -1635,5 +1744,67 @@ class CephConfContext(object):
continue continue
ceph_conf[key] = conf[key] ceph_conf[key] = conf[key]
return ceph_conf return ceph_conf
class CephOSDConfContext(CephConfContext):
"""Ceph config (ceph.conf) context.
Consolidates settings from config-flags via CephConfContext with
settings provided by the mons. The config-flag values are preserved in
conf['osd'], settings from the mons which do not clash with config-flag
settings are in conf['osd_from_client'] and finally settings which do
clash are in conf['osd_from_client_conflict']. Rather than silently drop
the conflicting settings they are provided in the context so they can be
rendered commented out to give some visability to the admin.
"""
def __init__(self, permitted_sections=None):
super(CephOSDConfContext, self).__init__(
permitted_sections=permitted_sections)
try:
self.settings_from_mons = get_osd_settings('mon')
except OSDSettingConflict:
log(
"OSD settings from mons are inconsistent, ignoring them",
level=WARNING)
self.settings_from_mons = {}
def filter_osd_from_mon_settings(self):
"""Filter settings from client relation against config-flags.
:returns: A tuple (
,config-flag values,
,client settings which do not conflict with config-flag values,
,client settings which confilct with config-flag values)
:rtype: (OrderedDict, OrderedDict, OrderedDict)
"""
ceph_conf = super(CephOSDConfContext, self).__call__()
conflicting_entries = {}
clear_entries = {}
for key, value in self.settings_from_mons.items():
if key in ceph_conf.get('osd', {}):
if ceph_conf['osd'][key] != value:
conflicting_entries[key] = value
else:
clear_entries[key] = value
clear_entries = _order_dict_by_key(clear_entries)
conflicting_entries = _order_dict_by_key(conflicting_entries)
return ceph_conf, clear_entries, conflicting_entries
def __call__(self):
"""Construct OSD config context.
Standard context with two additional special keys.
osd_from_client_conflict: client settings which confilct with
config-flag values
osd_from_client: settings which do not conflict with config-flag
values
:returns: OSD config context dict.
:rtype: dict
"""
conf, osd_clear, osd_conflict = self.filter_osd_from_mon_settings()
conf['osd_from_client_conflict'] = osd_conflict
conf['osd_from_client'] = osd_clear
return conf

View File

@ -32,6 +32,10 @@ def loopback_devices():
/dev/loop0: [0807]:961814 (/tmp/my.img) /dev/loop0: [0807]:961814 (/tmp/my.img)
or:
/dev/loop0: [0807]:961814 (/tmp/my.img (deleted))
:returns: dict: a dict mapping {loopback_dev: backing_file} :returns: dict: a dict mapping {loopback_dev: backing_file}
''' '''
loopbacks = {} loopbacks = {}
@ -39,9 +43,9 @@ def loopback_devices():
output = check_output(cmd) output = check_output(cmd)
if six.PY3: if six.PY3:
output = output.decode('utf-8') output = output.decode('utf-8')
devs = [d.strip().split(' ') for d in output.splitlines() if d != ''] devs = [d.strip().split(' ', 2) for d in output.splitlines() if d != '']
for dev, _, f in devs: for dev, _, f in devs:
loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0] loopbacks[dev.replace(':', '')] = re.search(r'\((.+)\)', f).groups()[0]
return loopbacks return loopbacks

View File

@ -21,6 +21,7 @@
from __future__ import print_function from __future__ import print_function
import copy import copy
from distutils.version import LooseVersion from distutils.version import LooseVersion
from enum import Enum
from functools import wraps from functools import wraps
from collections import namedtuple from collections import namedtuple
import glob import glob
@ -57,6 +58,14 @@ RANGE_WARNING = ('Passing NO_PROXY string that includes a cidr. '
'This may not be compatible with software you are ' 'This may not be compatible with software you are '
'running in your shell.') 'running in your shell.')
class WORKLOAD_STATES(Enum):
ACTIVE = 'active'
BLOCKED = 'blocked'
MAINTENANCE = 'maintenance'
WAITING = 'waiting'
cache = {} cache = {}
@ -1088,22 +1097,33 @@ def function_tag():
return os.environ.get('JUJU_FUNCTION_TAG') or action_tag() return os.environ.get('JUJU_FUNCTION_TAG') or action_tag()
def status_set(workload_state, message): def status_set(workload_state, message, application=False):
"""Set the workload state with a message """Set the workload state with a message
Use status-set to set the workload state with a message which is visible Use status-set to set the workload state with a message which is visible
to the user via juju status. If the status-set command is not found then to the user via juju status. If the status-set command is not found then
assume this is juju < 1.23 and juju-log the message unstead. assume this is juju < 1.23 and juju-log the message instead.
workload_state -- valid juju workload state. workload_state -- valid juju workload state. str or WORKLOAD_STATES
message -- status update message message -- status update message
application -- Whether this is an application state set
""" """
valid_states = ['maintenance', 'blocked', 'waiting', 'active'] bad_state_msg = '{!r} is not a valid workload state'
if workload_state not in valid_states:
raise ValueError( if isinstance(workload_state, str):
'{!r} is not a valid workload state'.format(workload_state) try:
) # Convert string to enum.
cmd = ['status-set', workload_state, message] workload_state = WORKLOAD_STATES[workload_state.upper()]
except KeyError:
raise ValueError(bad_state_msg.format(workload_state))
if workload_state not in WORKLOAD_STATES:
raise ValueError(bad_state_msg.format(workload_state))
cmd = ['status-set']
if application:
cmd.append('--application')
cmd.extend([workload_state.value, message])
try: try:
ret = subprocess.call(cmd) ret = subprocess.call(cmd)
if ret == 0: if ret == 0:
@ -1111,7 +1131,7 @@ def status_set(workload_state, message):
except OSError as e: except OSError as e:
if e.errno != errno.ENOENT: if e.errno != errno.ENOENT:
raise raise
log_message = 'status-set failed: {} {}'.format(workload_state, log_message = 'status-set failed: {} {}'.format(workload_state.value,
message) message)
log(log_message, level='INFO') log(log_message, level='INFO')
@ -1526,13 +1546,13 @@ def env_proxy_settings(selected_settings=None):
"""Get proxy settings from process environment variables. """Get proxy settings from process environment variables.
Get charm proxy settings from environment variables that correspond to Get charm proxy settings from environment variables that correspond to
juju-http-proxy, juju-https-proxy and juju-no-proxy (available as of 2.4.2, juju-http-proxy, juju-https-proxy juju-no-proxy (available as of 2.4.2, see
see lp:1782236) in a format suitable for passing to an application that lp:1782236) and juju-ftp-proxy in a format suitable for passing to an
reacts to proxy settings passed as environment variables. Some applications application that reacts to proxy settings passed as environment variables.
support lowercase or uppercase notation (e.g. curl), some support only Some applications support lowercase or uppercase notation (e.g. curl), some
lowercase (e.g. wget), there are also subjectively rare cases of only support only lowercase (e.g. wget), there are also subjectively rare cases
uppercase notation support. no_proxy CIDR and wildcard support also varies of only uppercase notation support. no_proxy CIDR and wildcard support also
between runtimes and applications as there is no enforced standard. varies between runtimes and applications as there is no enforced standard.
Some applications may connect to multiple destinations and expose config Some applications may connect to multiple destinations and expose config
options that would affect only proxy settings for a specific destination options that would affect only proxy settings for a specific destination

View File

@ -17,14 +17,17 @@
import yaml import yaml
from subprocess import check_call from subprocess import check_call, CalledProcessError
from charmhelpers.core.hookenv import ( from charmhelpers.core.hookenv import (
log, log,
DEBUG, DEBUG,
ERROR, ERROR,
WARNING,
) )
from charmhelpers.core.host import is_container
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>' __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
@ -62,4 +65,11 @@ def create(sysctl_dict, sysctl_file, ignore=False):
if ignore: if ignore:
call.append("-e") call.append("-e")
try:
check_call(call) check_call(call)
except CalledProcessError as e:
if is_container():
log("Error setting some sysctl keys in this container: {}".format(e.output),
level=WARNING)
else:
raise e

View File

@ -32,9 +32,6 @@ applications:
keystone: keystone:
charm: 'cs:~openstack-charmers-next/keystone' charm: 'cs:~openstack-charmers-next/keystone'
num_units: 1 num_units: 1
options:
admin-password: openstack
admin-token: ubuntutesting
constraints: mem=1024 constraints: mem=1024
percona-cluster: percona-cluster:
charm: 'cs:~openstack-charmers-next/percona-cluster' charm: 'cs:~openstack-charmers-next/percona-cluster'

View File

@ -42,8 +42,6 @@ applications:
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:bionic-rocky openstack-origin: cloud:bionic-rocky
admin-password: openstack
admin-token: ubuntutesting
constraints: mem=1024 constraints: mem=1024
percona-cluster: percona-cluster:
charm: 'cs:~openstack-charmers-next/percona-cluster' charm: 'cs:~openstack-charmers-next/percona-cluster'

View File

@ -42,8 +42,6 @@ applications:
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:bionic-stein openstack-origin: cloud:bionic-stein
admin-password: openstack
admin-token: ubuntutesting
constraints: mem=1024 constraints: mem=1024
percona-cluster: percona-cluster:
charm: 'cs:~openstack-charmers-next/percona-cluster' charm: 'cs:~openstack-charmers-next/percona-cluster'

View File

@ -42,8 +42,6 @@ applications:
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:bionic-train openstack-origin: cloud:bionic-train
admin-password: openstack
admin-token: ubuntutesting
constraints: mem=1024 constraints: mem=1024
percona-cluster: percona-cluster:
charm: 'cs:~openstack-charmers-next/percona-cluster' charm: 'cs:~openstack-charmers-next/percona-cluster'

View File

@ -41,8 +41,6 @@ applications:
charm: 'cs:~openstack-charmers-next/keystone' charm: 'cs:~openstack-charmers-next/keystone'
num_units: 1 num_units: 1
options: options:
admin-password: openstack
admin-token: ubuntutesting
openstack-origin: cloud:trusty-mitaka openstack-origin: cloud:trusty-mitaka
constraints: mem=1024 constraints: mem=1024
percona-cluster: percona-cluster:

View File

@ -35,9 +35,6 @@ applications:
keystone: keystone:
charm: 'cs:~openstack-charmers-next/keystone' charm: 'cs:~openstack-charmers-next/keystone'
num_units: 1 num_units: 1
options:
admin-password: openstack
admin-token: ubuntutesting
constraints: mem=1024 constraints: mem=1024
percona-cluster: percona-cluster:
charm: 'cs:~openstack-charmers-next/percona-cluster' charm: 'cs:~openstack-charmers-next/percona-cluster'

View File

@ -42,8 +42,6 @@ applications:
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:xenial-ocata openstack-origin: cloud:xenial-ocata
admin-password: openstack
admin-token: ubuntutesting
constraints: mem=1024 constraints: mem=1024
percona-cluster: percona-cluster:
charm: 'cs:~openstack-charmers-next/percona-cluster' charm: 'cs:~openstack-charmers-next/percona-cluster'

View File

@ -41,8 +41,6 @@ applications:
charm: 'cs:~openstack-charmers-next/keystone' charm: 'cs:~openstack-charmers-next/keystone'
num_units: 1 num_units: 1
options: options:
admin-password: openstack
admin-token: ubuntutesting
openstack-origin: cloud:xenial-pike openstack-origin: cloud:xenial-pike
constraints: mem=1024 constraints: mem=1024
percona-cluster: percona-cluster:

View File

@ -42,8 +42,6 @@ applications:
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:xenial-queens openstack-origin: cloud:xenial-queens
admin-password: openstack
admin-token: ubuntutesting
constraints: mem=1024 constraints: mem=1024
percona-cluster: percona-cluster:
charm: 'cs:~openstack-charmers-next/percona-cluster' charm: 'cs:~openstack-charmers-next/percona-cluster'