Support swift for IPv6.
This commit is contained in:
		@@ -187,4 +187,7 @@ options:
 | 
			
		||||
      192.168.0.0/24)
 | 
			
		||||
      .
 | 
			
		||||
      This network will be used for public endpoints.
 | 
			
		||||
 | 
			
		||||
  prefer-ipv6:
 | 
			
		||||
    type: boolean
 | 
			
		||||
    default: false
 | 
			
		||||
    description: "Enable IPv6."
 | 
			
		||||
 
 | 
			
		||||
@@ -6,6 +6,11 @@
 | 
			
		||||
#  Adam Gandelman <adamg@ubuntu.com>
 | 
			
		||||
#
 | 
			
		||||
 | 
			
		||||
"""
 | 
			
		||||
Helpers for clustering and determining "cluster leadership" and other
 | 
			
		||||
clustering-related helpers.
 | 
			
		||||
"""
 | 
			
		||||
 | 
			
		||||
import subprocess
 | 
			
		||||
import os
 | 
			
		||||
 | 
			
		||||
@@ -19,6 +24,7 @@ from charmhelpers.core.hookenv import (
 | 
			
		||||
    config as config_get,
 | 
			
		||||
    INFO,
 | 
			
		||||
    ERROR,
 | 
			
		||||
    WARNING,
 | 
			
		||||
    unit_get,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
@@ -27,6 +33,29 @@ class HAIncompleteConfig(Exception):
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_elected_leader(resource):
 | 
			
		||||
    """
 | 
			
		||||
    Returns True if the charm executing this is the elected cluster leader.
 | 
			
		||||
 | 
			
		||||
    It relies on two mechanisms to determine leadership:
 | 
			
		||||
        1. If the charm is part of a corosync cluster, call corosync to
 | 
			
		||||
        determine leadership.
 | 
			
		||||
        2. If the charm is not part of a corosync cluster, the leader is
 | 
			
		||||
        determined as being "the alive unit with the lowest unit numer". In
 | 
			
		||||
        other words, the oldest surviving unit.
 | 
			
		||||
    """
 | 
			
		||||
    if is_clustered():
 | 
			
		||||
        if not is_crm_leader(resource):
 | 
			
		||||
            log('Deferring action to CRM leader.', level=INFO)
 | 
			
		||||
            return False
 | 
			
		||||
    else:
 | 
			
		||||
        peers = peer_units()
 | 
			
		||||
        if peers and not oldest_peer(peers):
 | 
			
		||||
            log('Deferring action to oldest service unit.', level=INFO)
 | 
			
		||||
            return False
 | 
			
		||||
    return True
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_clustered():
 | 
			
		||||
    for r_id in (relation_ids('ha') or []):
 | 
			
		||||
        for unit in (relation_list(r_id) or []):
 | 
			
		||||
@@ -38,7 +67,11 @@ def is_clustered():
 | 
			
		||||
    return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_leader(resource):
 | 
			
		||||
def is_crm_leader(resource):
 | 
			
		||||
    """
 | 
			
		||||
    Returns True if the charm calling this is the elected corosync leader,
 | 
			
		||||
    as returned by calling the external "crm" command.
 | 
			
		||||
    """
 | 
			
		||||
    cmd = [
 | 
			
		||||
        "crm", "resource",
 | 
			
		||||
        "show", resource
 | 
			
		||||
@@ -54,9 +87,15 @@ def is_leader(resource):
 | 
			
		||||
            return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def peer_units():
 | 
			
		||||
def is_leader(resource):
 | 
			
		||||
    log("is_leader is deprecated. Please consider using is_crm_leader "
 | 
			
		||||
        "instead.", level=WARNING)
 | 
			
		||||
    return is_crm_leader(resource)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def peer_units(peer_relation="cluster"):
 | 
			
		||||
    peers = []
 | 
			
		||||
    for r_id in (relation_ids('cluster') or []):
 | 
			
		||||
    for r_id in (relation_ids(peer_relation) or []):
 | 
			
		||||
        for unit in (relation_list(r_id) or []):
 | 
			
		||||
            peers.append(unit)
 | 
			
		||||
    return peers
 | 
			
		||||
@@ -72,6 +111,7 @@ def peer_ips(peer_relation='cluster', addr_key='private-address'):
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def oldest_peer(peers):
 | 
			
		||||
    """Determines who the oldest peer is by comparing unit numbers."""
 | 
			
		||||
    local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
 | 
			
		||||
    for peer in peers:
 | 
			
		||||
        remote_unit_no = int(peer.split('/')[1])
 | 
			
		||||
@@ -81,16 +121,9 @@ def oldest_peer(peers):
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def eligible_leader(resource):
 | 
			
		||||
    if is_clustered():
 | 
			
		||||
        if not is_leader(resource):
 | 
			
		||||
            log('Deferring action to CRM leader.', level=INFO)
 | 
			
		||||
            return False
 | 
			
		||||
    else:
 | 
			
		||||
        peers = peer_units()
 | 
			
		||||
        if peers and not oldest_peer(peers):
 | 
			
		||||
            log('Deferring action to oldest service unit.', level=INFO)
 | 
			
		||||
            return False
 | 
			
		||||
    return True
 | 
			
		||||
    log("eligible_leader is deprecated. Please consider using "
 | 
			
		||||
        "is_elected_leader instead.", level=WARNING)
 | 
			
		||||
    return is_elected_leader(resource)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def https():
 | 
			
		||||
 
 | 
			
		||||
@@ -4,7 +4,7 @@ from functools import partial
 | 
			
		||||
 | 
			
		||||
from charmhelpers.fetch import apt_install
 | 
			
		||||
from charmhelpers.core.hookenv import (
 | 
			
		||||
    ERROR, log,
 | 
			
		||||
    ERROR, log, config,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
@@ -154,3 +154,21 @@ def _get_for_address(address, key):
 | 
			
		||||
get_iface_for_address = partial(_get_for_address, key='iface')
 | 
			
		||||
 | 
			
		||||
get_netmask_for_address = partial(_get_for_address, key='netmask')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_ipv6_addr(iface="eth0"):
 | 
			
		||||
    try:
 | 
			
		||||
        iface_addrs = netifaces.ifaddresses(iface)
 | 
			
		||||
        if netifaces.AF_INET6 not in iface_addrs:
 | 
			
		||||
            raise Exception("Interface '%s' doesn't have an ipv6 address." % iface)
 | 
			
		||||
 | 
			
		||||
        addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6]
 | 
			
		||||
        ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80')
 | 
			
		||||
                     and config('vip') != a['addr']]
 | 
			
		||||
        if not ipv6_addr:
 | 
			
		||||
            raise Exception("Interface '%s' doesn't have global ipv6 address." % iface)
 | 
			
		||||
 | 
			
		||||
        return ipv6_addr[0]
 | 
			
		||||
 | 
			
		||||
    except ValueError:
 | 
			
		||||
        raise ValueError("Invalid interface '%s'" % iface)
 | 
			
		||||
 
 | 
			
		||||
@@ -44,7 +44,10 @@ from charmhelpers.contrib.openstack.neutron import (
 | 
			
		||||
    neutron_plugin_attribute,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from charmhelpers.contrib.network.ip import get_address_in_network
 | 
			
		||||
from charmhelpers.contrib.network.ip import (
 | 
			
		||||
    get_address_in_network,
 | 
			
		||||
    get_ipv6_addr,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
 | 
			
		||||
 | 
			
		||||
@@ -401,9 +404,12 @@ class HAProxyContext(OSContextGenerator):
 | 
			
		||||
 | 
			
		||||
        cluster_hosts = {}
 | 
			
		||||
        l_unit = local_unit().replace('/', '-')
 | 
			
		||||
        cluster_hosts[l_unit] = \
 | 
			
		||||
            get_address_in_network(config('os-internal-network'),
 | 
			
		||||
                                   unit_get('private-address'))
 | 
			
		||||
        if config('prefer-ipv6'):
 | 
			
		||||
            addr = get_ipv6_addr()
 | 
			
		||||
        else:
 | 
			
		||||
            addr = unit_get('private-address')
 | 
			
		||||
        cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'),
 | 
			
		||||
                                                       addr)
 | 
			
		||||
 | 
			
		||||
        for rid in relation_ids('cluster'):
 | 
			
		||||
            for unit in related_units(rid):
 | 
			
		||||
@@ -414,6 +420,16 @@ class HAProxyContext(OSContextGenerator):
 | 
			
		||||
        ctxt = {
 | 
			
		||||
            'units': cluster_hosts,
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if config('prefer-ipv6'):
 | 
			
		||||
            ctxt['local_host'] = 'ip6-localhost'
 | 
			
		||||
            ctxt['haproxy_host'] = '::'
 | 
			
		||||
            ctxt['stat_port'] = ':::8888'
 | 
			
		||||
        else:
 | 
			
		||||
            ctxt['local_host'] = '127.0.0.1'
 | 
			
		||||
            ctxt['haproxy_host'] = '0.0.0.0'
 | 
			
		||||
            ctxt['stat_port'] = ':8888'
 | 
			
		||||
 | 
			
		||||
        if len(cluster_hosts.keys()) > 1:
 | 
			
		||||
            # Enable haproxy when we have enough peers.
 | 
			
		||||
            log('Ensuring haproxy enabled in /etc/default/haproxy.')
 | 
			
		||||
 
 | 
			
		||||
@@ -7,6 +7,7 @@ from charmhelpers.contrib.network.ip import (
 | 
			
		||||
    get_address_in_network,
 | 
			
		||||
    is_address_in_network,
 | 
			
		||||
    is_ipv6,
 | 
			
		||||
    get_ipv6_addr,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from charmhelpers.contrib.hahelpers.cluster import is_clustered
 | 
			
		||||
@@ -64,10 +65,13 @@ def resolve_address(endpoint_type=PUBLIC):
 | 
			
		||||
                        vip):
 | 
			
		||||
                    resolved_address = vip
 | 
			
		||||
    else:
 | 
			
		||||
        if config('prefer-ipv6'):
 | 
			
		||||
            fallback_addr = get_ipv6_addr()
 | 
			
		||||
        else:
 | 
			
		||||
            fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
 | 
			
		||||
        resolved_address = get_address_in_network(
 | 
			
		||||
            config(_address_map[endpoint_type]['config']),
 | 
			
		||||
            unit_get(_address_map[endpoint_type]['fallback'])
 | 
			
		||||
        )
 | 
			
		||||
            config(_address_map[endpoint_type]['config']), fallback_addr)
 | 
			
		||||
 | 
			
		||||
    if resolved_address is None:
 | 
			
		||||
        raise ValueError('Unable to resolve a suitable IP address'
 | 
			
		||||
                         ' based on charm state and configuration')
 | 
			
		||||
 
 | 
			
		||||
@@ -1,6 +1,6 @@
 | 
			
		||||
global
 | 
			
		||||
    log 127.0.0.1 local0
 | 
			
		||||
    log 127.0.0.1 local1 notice
 | 
			
		||||
    log {{ local_host }} local0
 | 
			
		||||
    log {{ local_host }} local1 notice
 | 
			
		||||
    maxconn 20000
 | 
			
		||||
    user haproxy
 | 
			
		||||
    group haproxy
 | 
			
		||||
@@ -17,7 +17,7 @@ defaults
 | 
			
		||||
    timeout client 30000
 | 
			
		||||
    timeout server 30000
 | 
			
		||||
 | 
			
		||||
listen stats :8888
 | 
			
		||||
listen stats {{ stat_port }}
 | 
			
		||||
    mode http
 | 
			
		||||
    stats enable
 | 
			
		||||
    stats hide-version
 | 
			
		||||
 
 | 
			
		||||
@@ -46,5 +46,8 @@ def is_device_mounted(device):
 | 
			
		||||
    :returns: boolean: True if the path represents a mounted device, False if
 | 
			
		||||
        it doesn't.
 | 
			
		||||
    '''
 | 
			
		||||
    is_partition = bool(re.search(r".*[0-9]+\b", device))
 | 
			
		||||
    out = check_output(['mount'])
 | 
			
		||||
    if is_partition:
 | 
			
		||||
        return bool(re.search(device + r"\b", out))
 | 
			
		||||
    return bool(re.search(device + r"[0-9]+\b", out))
 | 
			
		||||
 
 | 
			
		||||
@@ -12,6 +12,8 @@ import random
 | 
			
		||||
import string
 | 
			
		||||
import subprocess
 | 
			
		||||
import hashlib
 | 
			
		||||
import shutil
 | 
			
		||||
from contextlib import contextmanager
 | 
			
		||||
 | 
			
		||||
from collections import OrderedDict
 | 
			
		||||
 | 
			
		||||
@@ -52,7 +54,7 @@ def service(action, service_name):
 | 
			
		||||
def service_running(service):
 | 
			
		||||
    """Determine whether a system service is running"""
 | 
			
		||||
    try:
 | 
			
		||||
        output = subprocess.check_output(['service', service, 'status'])
 | 
			
		||||
        output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
 | 
			
		||||
    except subprocess.CalledProcessError:
 | 
			
		||||
        return False
 | 
			
		||||
    else:
 | 
			
		||||
@@ -62,6 +64,16 @@ def service_running(service):
 | 
			
		||||
            return False
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def service_available(service_name):
 | 
			
		||||
    """Determine whether a system service is available"""
 | 
			
		||||
    try:
 | 
			
		||||
        subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
 | 
			
		||||
    except subprocess.CalledProcessError:
 | 
			
		||||
        return False
 | 
			
		||||
    else:
 | 
			
		||||
        return True
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def adduser(username, password=None, shell='/bin/bash', system_user=False):
 | 
			
		||||
    """Add a user to the system"""
 | 
			
		||||
    try:
 | 
			
		||||
@@ -329,3 +341,24 @@ def cmp_pkgrevno(package, revno, pkgcache=None):
 | 
			
		||||
        pkgcache = apt_pkg.Cache()
 | 
			
		||||
    pkg = pkgcache[package]
 | 
			
		||||
    return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@contextmanager
 | 
			
		||||
def chdir(d):
 | 
			
		||||
    cur = os.getcwd()
 | 
			
		||||
    try:
 | 
			
		||||
        yield os.chdir(d)
 | 
			
		||||
    finally:
 | 
			
		||||
        os.chdir(cur)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def chownr(path, owner, group):
 | 
			
		||||
    uid = pwd.getpwnam(owner).pw_uid
 | 
			
		||||
    gid = grp.getgrnam(group).gr_gid
 | 
			
		||||
 | 
			
		||||
    for root, dirs, files in os.walk(path):
 | 
			
		||||
        for name in dirs + files:
 | 
			
		||||
            full = os.path.join(root, name)
 | 
			
		||||
            broken_symlink = os.path.lexists(full) and not os.path.exists(full)
 | 
			
		||||
            if not broken_symlink:
 | 
			
		||||
                os.chown(full, uid, gid)
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										2
									
								
								hooks/charmhelpers/core/services/__init__.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										2
									
								
								hooks/charmhelpers/core/services/__init__.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,2 @@
 | 
			
		||||
from .base import *
 | 
			
		||||
from .helpers import *
 | 
			
		||||
							
								
								
									
										305
									
								
								hooks/charmhelpers/core/services/base.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										305
									
								
								hooks/charmhelpers/core/services/base.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,305 @@
 | 
			
		||||
import os
 | 
			
		||||
import re
 | 
			
		||||
import json
 | 
			
		||||
from collections import Iterable
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core import host
 | 
			
		||||
from charmhelpers.core import hookenv
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
__all__ = ['ServiceManager', 'ManagerCallback',
 | 
			
		||||
           'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
 | 
			
		||||
           'service_restart', 'service_stop']
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ServiceManager(object):
 | 
			
		||||
    def __init__(self, services=None):
 | 
			
		||||
        """
 | 
			
		||||
        Register a list of services, given their definitions.
 | 
			
		||||
 | 
			
		||||
        Traditional charm authoring is focused on implementing hooks.  That is,
 | 
			
		||||
        the charm author is thinking in terms of "What hook am I handling; what
 | 
			
		||||
        does this hook need to do?"  However, in most cases, the real question
 | 
			
		||||
        should be "Do I have the information I need to configure and start this
 | 
			
		||||
        piece of software and, if so, what are the steps for doing so?"  The
 | 
			
		||||
        ServiceManager framework tries to bring the focus to the data and the
 | 
			
		||||
        setup tasks, in the most declarative way possible.
 | 
			
		||||
 | 
			
		||||
        Service definitions are dicts in the following formats (all keys except
 | 
			
		||||
        'service' are optional)::
 | 
			
		||||
 | 
			
		||||
            {
 | 
			
		||||
                "service": <service name>,
 | 
			
		||||
                "required_data": <list of required data contexts>,
 | 
			
		||||
                "data_ready": <one or more callbacks>,
 | 
			
		||||
                "data_lost": <one or more callbacks>,
 | 
			
		||||
                "start": <one or more callbacks>,
 | 
			
		||||
                "stop": <one or more callbacks>,
 | 
			
		||||
                "ports": <list of ports to manage>,
 | 
			
		||||
            }
 | 
			
		||||
 | 
			
		||||
        The 'required_data' list should contain dicts of required data (or
 | 
			
		||||
        dependency managers that act like dicts and know how to collect the data).
 | 
			
		||||
        Only when all items in the 'required_data' list are populated are the list
 | 
			
		||||
        of 'data_ready' and 'start' callbacks executed.  See `is_ready()` for more
 | 
			
		||||
        information.
 | 
			
		||||
 | 
			
		||||
        The 'data_ready' value should be either a single callback, or a list of
 | 
			
		||||
        callbacks, to be called when all items in 'required_data' pass `is_ready()`.
 | 
			
		||||
        Each callback will be called with the service name as the only parameter.
 | 
			
		||||
        After all of the 'data_ready' callbacks are called, the 'start' callbacks
 | 
			
		||||
        are fired.
 | 
			
		||||
 | 
			
		||||
        The 'data_lost' value should be either a single callback, or a list of
 | 
			
		||||
        callbacks, to be called when a 'required_data' item no longer passes
 | 
			
		||||
        `is_ready()`.  Each callback will be called with the service name as the
 | 
			
		||||
        only parameter.  After all of the 'data_lost' callbacks are called,
 | 
			
		||||
        the 'stop' callbacks are fired.
 | 
			
		||||
 | 
			
		||||
        The 'start' value should be either a single callback, or a list of
 | 
			
		||||
        callbacks, to be called when starting the service, after the 'data_ready'
 | 
			
		||||
        callbacks are complete.  Each callback will be called with the service
 | 
			
		||||
        name as the only parameter.  This defaults to
 | 
			
		||||
        `[host.service_start, services.open_ports]`.
 | 
			
		||||
 | 
			
		||||
        The 'stop' value should be either a single callback, or a list of
 | 
			
		||||
        callbacks, to be called when stopping the service.  If the service is
 | 
			
		||||
        being stopped because it no longer has all of its 'required_data', this
 | 
			
		||||
        will be called after all of the 'data_lost' callbacks are complete.
 | 
			
		||||
        Each callback will be called with the service name as the only parameter.
 | 
			
		||||
        This defaults to `[services.close_ports, host.service_stop]`.
 | 
			
		||||
 | 
			
		||||
        The 'ports' value should be a list of ports to manage.  The default
 | 
			
		||||
        'start' handler will open the ports after the service is started,
 | 
			
		||||
        and the default 'stop' handler will close the ports prior to stopping
 | 
			
		||||
        the service.
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        Examples:
 | 
			
		||||
 | 
			
		||||
        The following registers an Upstart service called bingod that depends on
 | 
			
		||||
        a mongodb relation and which runs a custom `db_migrate` function prior to
 | 
			
		||||
        restarting the service, and a Runit service called spadesd::
 | 
			
		||||
 | 
			
		||||
            manager = services.ServiceManager([
 | 
			
		||||
                {
 | 
			
		||||
                    'service': 'bingod',
 | 
			
		||||
                    'ports': [80, 443],
 | 
			
		||||
                    'required_data': [MongoRelation(), config(), {'my': 'data'}],
 | 
			
		||||
                    'data_ready': [
 | 
			
		||||
                        services.template(source='bingod.conf'),
 | 
			
		||||
                        services.template(source='bingod.ini',
 | 
			
		||||
                                          target='/etc/bingod.ini',
 | 
			
		||||
                                          owner='bingo', perms=0400),
 | 
			
		||||
                    ],
 | 
			
		||||
                },
 | 
			
		||||
                {
 | 
			
		||||
                    'service': 'spadesd',
 | 
			
		||||
                    'data_ready': services.template(source='spadesd_run.j2',
 | 
			
		||||
                                                    target='/etc/sv/spadesd/run',
 | 
			
		||||
                                                    perms=0555),
 | 
			
		||||
                    'start': runit_start,
 | 
			
		||||
                    'stop': runit_stop,
 | 
			
		||||
                },
 | 
			
		||||
            ])
 | 
			
		||||
            manager.manage()
 | 
			
		||||
        """
 | 
			
		||||
        self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
 | 
			
		||||
        self._ready = None
 | 
			
		||||
        self.services = {}
 | 
			
		||||
        for service in services or []:
 | 
			
		||||
            service_name = service['service']
 | 
			
		||||
            self.services[service_name] = service
 | 
			
		||||
 | 
			
		||||
    def manage(self):
 | 
			
		||||
        """
 | 
			
		||||
        Handle the current hook by doing The Right Thing with the registered services.
 | 
			
		||||
        """
 | 
			
		||||
        hook_name = hookenv.hook_name()
 | 
			
		||||
        if hook_name == 'stop':
 | 
			
		||||
            self.stop_services()
 | 
			
		||||
        else:
 | 
			
		||||
            self.provide_data()
 | 
			
		||||
            self.reconfigure_services()
 | 
			
		||||
 | 
			
		||||
    def provide_data(self):
 | 
			
		||||
        hook_name = hookenv.hook_name()
 | 
			
		||||
        for service in self.services.values():
 | 
			
		||||
            for provider in service.get('provided_data', []):
 | 
			
		||||
                if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
 | 
			
		||||
                    data = provider.provide_data()
 | 
			
		||||
                    if provider._is_ready(data):
 | 
			
		||||
                        hookenv.relation_set(None, data)
 | 
			
		||||
 | 
			
		||||
    def reconfigure_services(self, *service_names):
 | 
			
		||||
        """
 | 
			
		||||
        Update all files for one or more registered services, and,
 | 
			
		||||
        if ready, optionally restart them.
 | 
			
		||||
 | 
			
		||||
        If no service names are given, reconfigures all registered services.
 | 
			
		||||
        """
 | 
			
		||||
        for service_name in service_names or self.services.keys():
 | 
			
		||||
            if self.is_ready(service_name):
 | 
			
		||||
                self.fire_event('data_ready', service_name)
 | 
			
		||||
                self.fire_event('start', service_name, default=[
 | 
			
		||||
                    service_restart,
 | 
			
		||||
                    manage_ports])
 | 
			
		||||
                self.save_ready(service_name)
 | 
			
		||||
            else:
 | 
			
		||||
                if self.was_ready(service_name):
 | 
			
		||||
                    self.fire_event('data_lost', service_name)
 | 
			
		||||
                self.fire_event('stop', service_name, default=[
 | 
			
		||||
                    manage_ports,
 | 
			
		||||
                    service_stop])
 | 
			
		||||
                self.save_lost(service_name)
 | 
			
		||||
 | 
			
		||||
    def stop_services(self, *service_names):
 | 
			
		||||
        """
 | 
			
		||||
        Stop one or more registered services, by name.
 | 
			
		||||
 | 
			
		||||
        If no service names are given, stops all registered services.
 | 
			
		||||
        """
 | 
			
		||||
        for service_name in service_names or self.services.keys():
 | 
			
		||||
            self.fire_event('stop', service_name, default=[
 | 
			
		||||
                manage_ports,
 | 
			
		||||
                service_stop])
 | 
			
		||||
 | 
			
		||||
    def get_service(self, service_name):
 | 
			
		||||
        """
 | 
			
		||||
        Given the name of a registered service, return its service definition.
 | 
			
		||||
        """
 | 
			
		||||
        service = self.services.get(service_name)
 | 
			
		||||
        if not service:
 | 
			
		||||
            raise KeyError('Service not registered: %s' % service_name)
 | 
			
		||||
        return service
 | 
			
		||||
 | 
			
		||||
    def fire_event(self, event_name, service_name, default=None):
 | 
			
		||||
        """
 | 
			
		||||
        Fire a data_ready, data_lost, start, or stop event on a given service.
 | 
			
		||||
        """
 | 
			
		||||
        service = self.get_service(service_name)
 | 
			
		||||
        callbacks = service.get(event_name, default)
 | 
			
		||||
        if not callbacks:
 | 
			
		||||
            return
 | 
			
		||||
        if not isinstance(callbacks, Iterable):
 | 
			
		||||
            callbacks = [callbacks]
 | 
			
		||||
        for callback in callbacks:
 | 
			
		||||
            if isinstance(callback, ManagerCallback):
 | 
			
		||||
                callback(self, service_name, event_name)
 | 
			
		||||
            else:
 | 
			
		||||
                callback(service_name)
 | 
			
		||||
 | 
			
		||||
    def is_ready(self, service_name):
 | 
			
		||||
        """
 | 
			
		||||
        Determine if a registered service is ready, by checking its 'required_data'.
 | 
			
		||||
 | 
			
		||||
        A 'required_data' item can be any mapping type, and is considered ready
 | 
			
		||||
        if `bool(item)` evaluates as True.
 | 
			
		||||
        """
 | 
			
		||||
        service = self.get_service(service_name)
 | 
			
		||||
        reqs = service.get('required_data', [])
 | 
			
		||||
        return all(bool(req) for req in reqs)
 | 
			
		||||
 | 
			
		||||
    def _load_ready_file(self):
 | 
			
		||||
        if self._ready is not None:
 | 
			
		||||
            return
 | 
			
		||||
        if os.path.exists(self._ready_file):
 | 
			
		||||
            with open(self._ready_file) as fp:
 | 
			
		||||
                self._ready = set(json.load(fp))
 | 
			
		||||
        else:
 | 
			
		||||
            self._ready = set()
 | 
			
		||||
 | 
			
		||||
    def _save_ready_file(self):
 | 
			
		||||
        if self._ready is None:
 | 
			
		||||
            return
 | 
			
		||||
        with open(self._ready_file, 'w') as fp:
 | 
			
		||||
            json.dump(list(self._ready), fp)
 | 
			
		||||
 | 
			
		||||
    def save_ready(self, service_name):
 | 
			
		||||
        """
 | 
			
		||||
        Save an indicator that the given service is now data_ready.
 | 
			
		||||
        """
 | 
			
		||||
        self._load_ready_file()
 | 
			
		||||
        self._ready.add(service_name)
 | 
			
		||||
        self._save_ready_file()
 | 
			
		||||
 | 
			
		||||
    def save_lost(self, service_name):
 | 
			
		||||
        """
 | 
			
		||||
        Save an indicator that the given service is no longer data_ready.
 | 
			
		||||
        """
 | 
			
		||||
        self._load_ready_file()
 | 
			
		||||
        self._ready.discard(service_name)
 | 
			
		||||
        self._save_ready_file()
 | 
			
		||||
 | 
			
		||||
    def was_ready(self, service_name):
 | 
			
		||||
        """
 | 
			
		||||
        Determine if the given service was previously data_ready.
 | 
			
		||||
        """
 | 
			
		||||
        self._load_ready_file()
 | 
			
		||||
        return service_name in self._ready
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ManagerCallback(object):
 | 
			
		||||
    """
 | 
			
		||||
    Special case of a callback that takes the `ServiceManager` instance
 | 
			
		||||
    in addition to the service name.
 | 
			
		||||
 | 
			
		||||
    Subclasses should implement `__call__` which should accept three parameters:
 | 
			
		||||
 | 
			
		||||
        * `manager`       The `ServiceManager` instance
 | 
			
		||||
        * `service_name`  The name of the service it's being triggered for
 | 
			
		||||
        * `event_name`    The name of the event that this callback is handling
 | 
			
		||||
    """
 | 
			
		||||
    def __call__(self, manager, service_name, event_name):
 | 
			
		||||
        raise NotImplementedError()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class PortManagerCallback(ManagerCallback):
 | 
			
		||||
    """
 | 
			
		||||
    Callback class that will open or close ports, for use as either
 | 
			
		||||
    a start or stop action.
 | 
			
		||||
    """
 | 
			
		||||
    def __call__(self, manager, service_name, event_name):
 | 
			
		||||
        service = manager.get_service(service_name)
 | 
			
		||||
        new_ports = service.get('ports', [])
 | 
			
		||||
        port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
 | 
			
		||||
        if os.path.exists(port_file):
 | 
			
		||||
            with open(port_file) as fp:
 | 
			
		||||
                old_ports = fp.read().split(',')
 | 
			
		||||
            for old_port in old_ports:
 | 
			
		||||
                if bool(old_port):
 | 
			
		||||
                    old_port = int(old_port)
 | 
			
		||||
                    if old_port not in new_ports:
 | 
			
		||||
                        hookenv.close_port(old_port)
 | 
			
		||||
        with open(port_file, 'w') as fp:
 | 
			
		||||
            fp.write(','.join(str(port) for port in new_ports))
 | 
			
		||||
        for port in new_ports:
 | 
			
		||||
            if event_name == 'start':
 | 
			
		||||
                hookenv.open_port(port)
 | 
			
		||||
            elif event_name == 'stop':
 | 
			
		||||
                hookenv.close_port(port)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def service_stop(service_name):
 | 
			
		||||
    """
 | 
			
		||||
    Wrapper around host.service_stop to prevent spurious "unknown service"
 | 
			
		||||
    messages in the logs.
 | 
			
		||||
    """
 | 
			
		||||
    if host.service_running(service_name):
 | 
			
		||||
        host.service_stop(service_name)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def service_restart(service_name):
 | 
			
		||||
    """
 | 
			
		||||
    Wrapper around host.service_restart to prevent spurious "unknown service"
 | 
			
		||||
    messages in the logs.
 | 
			
		||||
    """
 | 
			
		||||
    if host.service_available(service_name):
 | 
			
		||||
        if host.service_running(service_name):
 | 
			
		||||
            host.service_restart(service_name)
 | 
			
		||||
        else:
 | 
			
		||||
            host.service_start(service_name)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Convenience aliases
 | 
			
		||||
open_ports = close_ports = manage_ports = PortManagerCallback()
 | 
			
		||||
							
								
								
									
										125
									
								
								hooks/charmhelpers/core/services/helpers.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										125
									
								
								hooks/charmhelpers/core/services/helpers.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,125 @@
 | 
			
		||||
from charmhelpers.core import hookenv
 | 
			
		||||
from charmhelpers.core import templating
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core.services.base import ManagerCallback
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
__all__ = ['RelationContext', 'TemplateCallback',
 | 
			
		||||
           'render_template', 'template']
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class RelationContext(dict):
 | 
			
		||||
    """
 | 
			
		||||
    Base class for a context generator that gets relation data from juju.
 | 
			
		||||
 | 
			
		||||
    Subclasses must provide the attributes `name`, which is the name of the
 | 
			
		||||
    interface of interest, `interface`, which is the type of the interface of
 | 
			
		||||
    interest, and `required_keys`, which is the set of keys required for the
 | 
			
		||||
    relation to be considered complete.  The data for all interfaces matching
 | 
			
		||||
    the `name` attribute that are complete will used to populate the dictionary
 | 
			
		||||
    values (see `get_data`, below).
 | 
			
		||||
 | 
			
		||||
    The generated context will be namespaced under the interface type, to prevent
 | 
			
		||||
    potential naming conflicts.
 | 
			
		||||
    """
 | 
			
		||||
    name = None
 | 
			
		||||
    interface = None
 | 
			
		||||
    required_keys = []
 | 
			
		||||
 | 
			
		||||
    def __init__(self, *args, **kwargs):
 | 
			
		||||
        super(RelationContext, self).__init__(*args, **kwargs)
 | 
			
		||||
        self.get_data()
 | 
			
		||||
 | 
			
		||||
    def __bool__(self):
 | 
			
		||||
        """
 | 
			
		||||
        Returns True if all of the required_keys are available.
 | 
			
		||||
        """
 | 
			
		||||
        return self.is_ready()
 | 
			
		||||
 | 
			
		||||
    __nonzero__ = __bool__
 | 
			
		||||
 | 
			
		||||
    def __repr__(self):
 | 
			
		||||
        return super(RelationContext, self).__repr__()
 | 
			
		||||
 | 
			
		||||
    def is_ready(self):
 | 
			
		||||
        """
 | 
			
		||||
        Returns True if all of the `required_keys` are available from any units.
 | 
			
		||||
        """
 | 
			
		||||
        ready = len(self.get(self.name, [])) > 0
 | 
			
		||||
        if not ready:
 | 
			
		||||
            hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
 | 
			
		||||
        return ready
 | 
			
		||||
 | 
			
		||||
    def _is_ready(self, unit_data):
 | 
			
		||||
        """
 | 
			
		||||
        Helper method that tests a set of relation data and returns True if
 | 
			
		||||
        all of the `required_keys` are present.
 | 
			
		||||
        """
 | 
			
		||||
        return set(unit_data.keys()).issuperset(set(self.required_keys))
 | 
			
		||||
 | 
			
		||||
    def get_data(self):
 | 
			
		||||
        """
 | 
			
		||||
        Retrieve the relation data for each unit involved in a relation and,
 | 
			
		||||
        if complete, store it in a list under `self[self.name]`.  This
 | 
			
		||||
        is automatically called when the RelationContext is instantiated.
 | 
			
		||||
 | 
			
		||||
        The units are sorted lexographically first by the service ID, then by
 | 
			
		||||
        the unit ID.  Thus, if an interface has two other services, 'db:1'
 | 
			
		||||
        and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
 | 
			
		||||
        and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
 | 
			
		||||
        set of data, the relation data for the units will be stored in the
 | 
			
		||||
        order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
 | 
			
		||||
 | 
			
		||||
        If you only care about a single unit on the relation, you can just
 | 
			
		||||
        access it as `{{ interface[0]['key'] }}`.  However, if you can at all
 | 
			
		||||
        support multiple units on a relation, you should iterate over the list,
 | 
			
		||||
        like::
 | 
			
		||||
 | 
			
		||||
            {% for unit in interface -%}
 | 
			
		||||
                {{ unit['key'] }}{% if not loop.last %},{% endif %}
 | 
			
		||||
            {%- endfor %}
 | 
			
		||||
 | 
			
		||||
        Note that since all sets of relation data from all related services and
 | 
			
		||||
        units are in a single list, if you need to know which service or unit a
 | 
			
		||||
        set of data came from, you'll need to extend this class to preserve
 | 
			
		||||
        that information.
 | 
			
		||||
        """
 | 
			
		||||
        if not hookenv.relation_ids(self.name):
 | 
			
		||||
            return
 | 
			
		||||
 | 
			
		||||
        ns = self.setdefault(self.name, [])
 | 
			
		||||
        for rid in sorted(hookenv.relation_ids(self.name)):
 | 
			
		||||
            for unit in sorted(hookenv.related_units(rid)):
 | 
			
		||||
                reldata = hookenv.relation_get(rid=rid, unit=unit)
 | 
			
		||||
                if self._is_ready(reldata):
 | 
			
		||||
                    ns.append(reldata)
 | 
			
		||||
 | 
			
		||||
    def provide_data(self):
 | 
			
		||||
        """
 | 
			
		||||
        Return data to be relation_set for this interface.
 | 
			
		||||
        """
 | 
			
		||||
        return {}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TemplateCallback(ManagerCallback):
 | 
			
		||||
    """
 | 
			
		||||
    Callback class that will render a template, for use as a ready action.
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self, source, target, owner='root', group='root', perms=0444):
 | 
			
		||||
        self.source = source
 | 
			
		||||
        self.target = target
 | 
			
		||||
        self.owner = owner
 | 
			
		||||
        self.group = group
 | 
			
		||||
        self.perms = perms
 | 
			
		||||
 | 
			
		||||
    def __call__(self, manager, service_name, event_name):
 | 
			
		||||
        service = manager.get_service(service_name)
 | 
			
		||||
        context = {}
 | 
			
		||||
        for ctx in service.get('required_data', []):
 | 
			
		||||
            context.update(ctx)
 | 
			
		||||
        templating.render(self.source, self.target, context,
 | 
			
		||||
                          self.owner, self.group, self.perms)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
# Convenience aliases for templates
 | 
			
		||||
render_template = template = TemplateCallback
 | 
			
		||||
							
								
								
									
										51
									
								
								hooks/charmhelpers/core/templating.py
									
									
									
									
									
										Normal file
									
								
							
							
						
						
									
										51
									
								
								hooks/charmhelpers/core/templating.py
									
									
									
									
									
										Normal file
									
								
							@@ -0,0 +1,51 @@
 | 
			
		||||
import os
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core import host
 | 
			
		||||
from charmhelpers.core import hookenv
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
 | 
			
		||||
    """
 | 
			
		||||
    Render a template.
 | 
			
		||||
 | 
			
		||||
    The `source` path, if not absolute, is relative to the `templates_dir`.
 | 
			
		||||
 | 
			
		||||
    The `target` path should be absolute.
 | 
			
		||||
 | 
			
		||||
    The context should be a dict containing the values to be replaced in the
 | 
			
		||||
    template.
 | 
			
		||||
 | 
			
		||||
    The `owner`, `group`, and `perms` options will be passed to `write_file`.
 | 
			
		||||
 | 
			
		||||
    If omitted, `templates_dir` defaults to the `templates` folder in the charm.
 | 
			
		||||
 | 
			
		||||
    Note: Using this requires python-jinja2; if it is not installed, calling
 | 
			
		||||
    this will attempt to use charmhelpers.fetch.apt_install to install it.
 | 
			
		||||
    """
 | 
			
		||||
    try:
 | 
			
		||||
        from jinja2 import FileSystemLoader, Environment, exceptions
 | 
			
		||||
    except ImportError:
 | 
			
		||||
        try:
 | 
			
		||||
            from charmhelpers.fetch import apt_install
 | 
			
		||||
        except ImportError:
 | 
			
		||||
            hookenv.log('Could not import jinja2, and could not import '
 | 
			
		||||
                        'charmhelpers.fetch to install it',
 | 
			
		||||
                        level=hookenv.ERROR)
 | 
			
		||||
            raise
 | 
			
		||||
        apt_install('python-jinja2', fatal=True)
 | 
			
		||||
        from jinja2 import FileSystemLoader, Environment, exceptions
 | 
			
		||||
 | 
			
		||||
    if templates_dir is None:
 | 
			
		||||
        templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
 | 
			
		||||
    loader = Environment(loader=FileSystemLoader(templates_dir))
 | 
			
		||||
    try:
 | 
			
		||||
        source = source
 | 
			
		||||
        template = loader.get_template(source)
 | 
			
		||||
    except exceptions.TemplateNotFound as e:
 | 
			
		||||
        hookenv.log('Could not load template %s from %s.' %
 | 
			
		||||
                    (source, templates_dir),
 | 
			
		||||
                    level=hookenv.ERROR)
 | 
			
		||||
        raise e
 | 
			
		||||
    content = template.render(context)
 | 
			
		||||
    host.mkdir(os.path.dirname(target))
 | 
			
		||||
    host.write_file(target, content, owner, group, perms)
 | 
			
		||||
@@ -19,6 +19,10 @@ from charmhelpers.contrib.hahelpers.cluster import (
 | 
			
		||||
    determine_apache_port,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from charmhelpers.contrib.network.ip import (
 | 
			
		||||
    get_ipv6_addr,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from charmhelpers.contrib.openstack.utils import get_host_ip
 | 
			
		||||
import subprocess
 | 
			
		||||
import os
 | 
			
		||||
@@ -134,8 +138,12 @@ class SwiftIdentityContext(OSContextGenerator):
 | 
			
		||||
        if workers == '0':
 | 
			
		||||
            import multiprocessing
 | 
			
		||||
            workers = multiprocessing.cpu_count()
 | 
			
		||||
        if config('prefer-ipv6'):
 | 
			
		||||
            proxy_ip = '%s' % get_ipv6_addr()
 | 
			
		||||
        else:
 | 
			
		||||
            proxy_ip = get_host_ip(unit_get('private-address'))
 | 
			
		||||
        ctxt = {
 | 
			
		||||
            'proxy_ip': get_host_ip(unit_get('private-address')),
 | 
			
		||||
            'proxy_ip': proxy_ip,
 | 
			
		||||
            'bind_port': determine_api_port(bind_port),
 | 
			
		||||
            'workers': workers,
 | 
			
		||||
            'operator_roles': config('operator-roles'),
 | 
			
		||||
@@ -196,9 +204,13 @@ class SwiftIdentityContext(OSContextGenerator):
 | 
			
		||||
class MemcachedContext(OSContextGenerator):
 | 
			
		||||
 | 
			
		||||
    def __call__(self):
 | 
			
		||||
        ctxt = {
 | 
			
		||||
            'proxy_ip': get_host_ip(unit_get('private-address'))
 | 
			
		||||
        }
 | 
			
		||||
        ctxt = {}
 | 
			
		||||
        if config('prefer-ipv6'):
 | 
			
		||||
            
 | 
			
		||||
            ctxt['proxy_ip'] = get_ipv6_addr()
 | 
			
		||||
        else:
 | 
			
		||||
            ctxt['proxy_ip'] = get_host_ip(unit_get('private-address')) 
 | 
			
		||||
 | 
			
		||||
        return ctxt
 | 
			
		||||
 | 
			
		||||
SWIFT_HASH_FILE = '/var/lib/juju/swift-hash-path.conf'
 | 
			
		||||
 
 | 
			
		||||
@@ -249,22 +249,29 @@ def ha_relation_joined():
 | 
			
		||||
        'res_swift_haproxy': 'op monitor interval="5s"'
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    if config('prefer-ipv6'):
 | 
			
		||||
        res_swift_vip = 'ocf:heartbeat:IPv6addr'
 | 
			
		||||
        vip_params = 'ipv6addr'
 | 
			
		||||
    else:
 | 
			
		||||
        res_swift_vip = 'ocf:heartbeat:IPaddr2'
 | 
			
		||||
        vip_params = 'ip'
 | 
			
		||||
 | 
			
		||||
    vip_group = []
 | 
			
		||||
    for vip in vip.split():
 | 
			
		||||
        iface = get_iface_for_address(vip)
 | 
			
		||||
        if iface is not None:
 | 
			
		||||
            vip_key = 'res_swift_{}_vip'.format(iface)
 | 
			
		||||
            resources[vip_key] = 'ocf:heartbeat:IPaddr2'
 | 
			
		||||
            resources[vip_key] = res_swift_haproxy
 | 
			
		||||
            resource_params[vip_key] = (
 | 
			
		||||
                'params ip="{vip}" cidr_netmask="{netmask}"'
 | 
			
		||||
                ' nic="{iface}"'.format(vip=vip,
 | 
			
		||||
                'params {ip}="{vip}" cidr_netmask="{netmask}"'
 | 
			
		||||
                ' nic="{iface}"'.format(ip=vip_params
 | 
			
		||||
                                        vip=vip,
 | 
			
		||||
                                        iface=iface,
 | 
			
		||||
                                        netmask=get_netmask_for_address(vip))
 | 
			
		||||
            )
 | 
			
		||||
            vip_group.append(vip_key)
 | 
			
		||||
 | 
			
		||||
    if len(vip_group) > 1:
 | 
			
		||||
        relation_set(groups={'grp_swift_vips': ' '.join(vip_group)})
 | 
			
		||||
    relation_set(groups={'grp_swift_vips': ' '.join(vip_group)})
 | 
			
		||||
 | 
			
		||||
    init_services = {
 | 
			
		||||
        'res_swift_haproxy': 'haproxy'
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user