[gnuoy,r=james-page] Add support for revised hacluster charm.
This commit is contained in:
		
							
								
								
									
										12
									
								
								config.yaml
									
									
									
									
									
								
							
							
						
						
									
										12
									
								
								config.yaml
									
									
									
									
									
								
							@@ -34,15 +34,8 @@ options:
 | 
			
		||||
            Setting this to True will force all services to log to the syslog.
 | 
			
		||||
    vip:
 | 
			
		||||
        type: string
 | 
			
		||||
        default:
 | 
			
		||||
        description: "Virtual IP to use to front openstack dashboard ha configuration"
 | 
			
		||||
    vip_iface:
 | 
			
		||||
        type: string
 | 
			
		||||
        default: eth0
 | 
			
		||||
        description: "Network Interface where to place the Virtual IP"
 | 
			
		||||
    vip_cidr:
 | 
			
		||||
        type: int
 | 
			
		||||
        default: 24
 | 
			
		||||
        description: "Netmask that will be used for the Virtual IP"
 | 
			
		||||
    ha-bindiface:
 | 
			
		||||
        type: string
 | 
			
		||||
        default: eth0
 | 
			
		||||
@@ -58,6 +51,7 @@ options:
 | 
			
		||||
    # User provided SSL cert and key
 | 
			
		||||
    ssl_cert:
 | 
			
		||||
        type: string
 | 
			
		||||
        default:
 | 
			
		||||
        description: |
 | 
			
		||||
          Base64 encoded SSL certificate to install and use for API ports.
 | 
			
		||||
          .
 | 
			
		||||
@@ -70,6 +64,7 @@ options:
 | 
			
		||||
          do so).
 | 
			
		||||
    ssl_key:
 | 
			
		||||
        type: string
 | 
			
		||||
        default:
 | 
			
		||||
        description: |
 | 
			
		||||
          Base64 encoded SSL key to use with certificate specified as ssl_cert.
 | 
			
		||||
    offline-compression:
 | 
			
		||||
@@ -86,6 +81,7 @@ options:
 | 
			
		||||
        description: Use Ubuntu theme for the dashboard.
 | 
			
		||||
    secret:
 | 
			
		||||
        type: string
 | 
			
		||||
        default:
 | 
			
		||||
        description: Secret for Horizon to use when securing internal data; set this when using multiple dashboard units.
 | 
			
		||||
    neutron-network-lb:
 | 
			
		||||
        type: boolean
 | 
			
		||||
 
 | 
			
		||||
@@ -20,20 +20,27 @@ from charmhelpers.core.hookenv import (
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_cert():
 | 
			
		||||
def get_cert(cn=None):
 | 
			
		||||
    # TODO: deal with multiple https endpoints via charm config
 | 
			
		||||
    cert = config_get('ssl_cert')
 | 
			
		||||
    key = config_get('ssl_key')
 | 
			
		||||
    if not (cert and key):
 | 
			
		||||
        log("Inspecting identity-service relations for SSL certificate.",
 | 
			
		||||
            level=INFO)
 | 
			
		||||
        cert = key = None
 | 
			
		||||
        if cn:
 | 
			
		||||
            ssl_cert_attr = 'ssl_cert_{}'.format(cn)
 | 
			
		||||
            ssl_key_attr = 'ssl_key_{}'.format(cn)
 | 
			
		||||
        else:
 | 
			
		||||
            ssl_cert_attr = 'ssl_cert'
 | 
			
		||||
            ssl_key_attr = 'ssl_key'
 | 
			
		||||
        for r_id in relation_ids('identity-service'):
 | 
			
		||||
            for unit in relation_list(r_id):
 | 
			
		||||
                if not cert:
 | 
			
		||||
                    cert = relation_get('ssl_cert',
 | 
			
		||||
                    cert = relation_get(ssl_cert_attr,
 | 
			
		||||
                                        rid=r_id, unit=unit)
 | 
			
		||||
                if not key:
 | 
			
		||||
                    key = relation_get('ssl_key',
 | 
			
		||||
                    key = relation_get(ssl_key_attr,
 | 
			
		||||
                                       rid=r_id, unit=unit)
 | 
			
		||||
    return (cert, key)
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -139,10 +139,9 @@ def https():
 | 
			
		||||
        return True
 | 
			
		||||
    for r_id in relation_ids('identity-service'):
 | 
			
		||||
        for unit in relation_list(r_id):
 | 
			
		||||
            # TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
 | 
			
		||||
            rel_state = [
 | 
			
		||||
                relation_get('https_keystone', rid=r_id, unit=unit),
 | 
			
		||||
                relation_get('ssl_cert', rid=r_id, unit=unit),
 | 
			
		||||
                relation_get('ssl_key', rid=r_id, unit=unit),
 | 
			
		||||
                relation_get('ca_cert', rid=r_id, unit=unit),
 | 
			
		||||
            ]
 | 
			
		||||
            # NOTE: works around (LP: #1203241)
 | 
			
		||||
 
 | 
			
		||||
@@ -1,10 +1,16 @@
 | 
			
		||||
import glob
 | 
			
		||||
import re
 | 
			
		||||
import subprocess
 | 
			
		||||
import sys
 | 
			
		||||
 | 
			
		||||
from functools import partial
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core.hookenv import unit_get
 | 
			
		||||
from charmhelpers.fetch import apt_install
 | 
			
		||||
from charmhelpers.core.hookenv import (
 | 
			
		||||
    ERROR, log, config,
 | 
			
		||||
    WARNING,
 | 
			
		||||
    ERROR,
 | 
			
		||||
    log
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
try:
 | 
			
		||||
@@ -156,19 +162,182 @@ get_iface_for_address = partial(_get_for_address, key='iface')
 | 
			
		||||
get_netmask_for_address = partial(_get_for_address, key='netmask')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_ipv6_addr(iface="eth0"):
 | 
			
		||||
def format_ipv6_addr(address):
 | 
			
		||||
    """
 | 
			
		||||
    IPv6 needs to be wrapped with [] in url link to parse correctly.
 | 
			
		||||
    """
 | 
			
		||||
    if is_ipv6(address):
 | 
			
		||||
        address = "[%s]" % address
 | 
			
		||||
    else:
 | 
			
		||||
        log("Not a valid ipv6 address: %s" % address, level=WARNING)
 | 
			
		||||
        address = None
 | 
			
		||||
 | 
			
		||||
    return address
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
 | 
			
		||||
                   fatal=True, exc_list=None):
 | 
			
		||||
    """
 | 
			
		||||
    Return the assigned IP address for a given interface, if any, or [].
 | 
			
		||||
    """
 | 
			
		||||
    # Extract nic if passed /dev/ethX
 | 
			
		||||
    if '/' in iface:
 | 
			
		||||
        iface = iface.split('/')[-1]
 | 
			
		||||
    if not exc_list:
 | 
			
		||||
        exc_list = []
 | 
			
		||||
    try:
 | 
			
		||||
        iface_addrs = netifaces.ifaddresses(iface)
 | 
			
		||||
        if netifaces.AF_INET6 not in iface_addrs:
 | 
			
		||||
            raise Exception("Interface '%s' doesn't have an ipv6 address." % iface)
 | 
			
		||||
        inet_num = getattr(netifaces, inet_type)
 | 
			
		||||
    except AttributeError:
 | 
			
		||||
        raise Exception('Unknown inet type ' + str(inet_type))
 | 
			
		||||
 | 
			
		||||
        addresses = netifaces.ifaddresses(iface)[netifaces.AF_INET6]
 | 
			
		||||
        ipv6_addr = [a['addr'] for a in addresses if not a['addr'].startswith('fe80')
 | 
			
		||||
                     and config('vip') != a['addr']]
 | 
			
		||||
        if not ipv6_addr:
 | 
			
		||||
            raise Exception("Interface '%s' doesn't have global ipv6 address." % iface)
 | 
			
		||||
    interfaces = netifaces.interfaces()
 | 
			
		||||
    if inc_aliases:
 | 
			
		||||
        ifaces = []
 | 
			
		||||
        for _iface in interfaces:
 | 
			
		||||
            if iface == _iface or _iface.split(':')[0] == iface:
 | 
			
		||||
                ifaces.append(_iface)
 | 
			
		||||
        if fatal and not ifaces:
 | 
			
		||||
            raise Exception("Invalid interface '%s'" % iface)
 | 
			
		||||
        ifaces.sort()
 | 
			
		||||
    else:
 | 
			
		||||
        if iface not in interfaces:
 | 
			
		||||
            if fatal:
 | 
			
		||||
                raise Exception("%s not found " % (iface))
 | 
			
		||||
            else:
 | 
			
		||||
                return []
 | 
			
		||||
        else:
 | 
			
		||||
            ifaces = [iface]
 | 
			
		||||
 | 
			
		||||
        return ipv6_addr[0]
 | 
			
		||||
    addresses = []
 | 
			
		||||
    for netiface in ifaces:
 | 
			
		||||
        net_info = netifaces.ifaddresses(netiface)
 | 
			
		||||
        if inet_num in net_info:
 | 
			
		||||
            for entry in net_info[inet_num]:
 | 
			
		||||
                if 'addr' in entry and entry['addr'] not in exc_list:
 | 
			
		||||
                    addresses.append(entry['addr'])
 | 
			
		||||
    if fatal and not addresses:
 | 
			
		||||
        raise Exception("Interface '%s' doesn't have any %s addresses." %
 | 
			
		||||
                        (iface, inet_type))
 | 
			
		||||
    return addresses
 | 
			
		||||
 | 
			
		||||
    except ValueError:
 | 
			
		||||
        raise ValueError("Invalid interface '%s'" % iface)
 | 
			
		||||
get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_iface_from_addr(addr):
 | 
			
		||||
    """Work out on which interface the provided address is configured."""
 | 
			
		||||
    for iface in netifaces.interfaces():
 | 
			
		||||
        addresses = netifaces.ifaddresses(iface)
 | 
			
		||||
        for inet_type in addresses:
 | 
			
		||||
            for _addr in addresses[inet_type]:
 | 
			
		||||
                _addr = _addr['addr']
 | 
			
		||||
                # link local
 | 
			
		||||
                ll_key = re.compile("(.+)%.*")
 | 
			
		||||
                raw = re.match(ll_key, _addr)
 | 
			
		||||
                if raw:
 | 
			
		||||
                    _addr = raw.group(1)
 | 
			
		||||
                if _addr == addr:
 | 
			
		||||
                    log("Address '%s' is configured on iface '%s'" %
 | 
			
		||||
                        (addr, iface))
 | 
			
		||||
                    return iface
 | 
			
		||||
 | 
			
		||||
    msg = "Unable to infer net iface on which '%s' is configured" % (addr)
 | 
			
		||||
    raise Exception(msg)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def sniff_iface(f):
 | 
			
		||||
    """If no iface provided, inject net iface inferred from unit private
 | 
			
		||||
    address.
 | 
			
		||||
    """
 | 
			
		||||
    def iface_sniffer(*args, **kwargs):
 | 
			
		||||
        if not kwargs.get('iface', None):
 | 
			
		||||
            kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
 | 
			
		||||
 | 
			
		||||
        return f(*args, **kwargs)
 | 
			
		||||
 | 
			
		||||
    return iface_sniffer
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@sniff_iface
 | 
			
		||||
def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
 | 
			
		||||
                  dynamic_only=True):
 | 
			
		||||
    """Get assigned IPv6 address for a given interface.
 | 
			
		||||
 | 
			
		||||
    Returns list of addresses found. If no address found, returns empty list.
 | 
			
		||||
 | 
			
		||||
    If iface is None, we infer the current primary interface by doing a reverse
 | 
			
		||||
    lookup on the unit private-address.
 | 
			
		||||
 | 
			
		||||
    We currently only support scope global IPv6 addresses i.e. non-temporary
 | 
			
		||||
    addresses. If no global IPv6 address is found, return the first one found
 | 
			
		||||
    in the ipv6 address list.
 | 
			
		||||
    """
 | 
			
		||||
    addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
 | 
			
		||||
                               inc_aliases=inc_aliases, fatal=fatal,
 | 
			
		||||
                               exc_list=exc_list)
 | 
			
		||||
 | 
			
		||||
    if addresses:
 | 
			
		||||
        global_addrs = []
 | 
			
		||||
        for addr in addresses:
 | 
			
		||||
            key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
 | 
			
		||||
            m = re.match(key_scope_link_local, addr)
 | 
			
		||||
            if m:
 | 
			
		||||
                eui_64_mac = m.group(1)
 | 
			
		||||
                iface = m.group(2)
 | 
			
		||||
            else:
 | 
			
		||||
                global_addrs.append(addr)
 | 
			
		||||
 | 
			
		||||
        if global_addrs:
 | 
			
		||||
            # Make sure any found global addresses are not temporary
 | 
			
		||||
            cmd = ['ip', 'addr', 'show', iface]
 | 
			
		||||
            out = subprocess.check_output(cmd)
 | 
			
		||||
            if dynamic_only:
 | 
			
		||||
                key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
 | 
			
		||||
            else:
 | 
			
		||||
                key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
 | 
			
		||||
 | 
			
		||||
            addrs = []
 | 
			
		||||
            for line in out.split('\n'):
 | 
			
		||||
                line = line.strip()
 | 
			
		||||
                m = re.match(key, line)
 | 
			
		||||
                if m and 'temporary' not in line:
 | 
			
		||||
                    # Return the first valid address we find
 | 
			
		||||
                    for addr in global_addrs:
 | 
			
		||||
                        if m.group(1) == addr:
 | 
			
		||||
                            if not dynamic_only or \
 | 
			
		||||
                                    m.group(1).endswith(eui_64_mac):
 | 
			
		||||
                                addrs.append(addr)
 | 
			
		||||
 | 
			
		||||
            if addrs:
 | 
			
		||||
                return addrs
 | 
			
		||||
 | 
			
		||||
    if fatal:
 | 
			
		||||
        raise Exception("Interface '%s' doesn't have a scope global "
 | 
			
		||||
                        "non-temporary ipv6 address." % iface)
 | 
			
		||||
 | 
			
		||||
    return []
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_bridges(vnic_dir='/sys/devices/virtual/net'):
 | 
			
		||||
    """
 | 
			
		||||
    Return a list of bridges on the system or []
 | 
			
		||||
    """
 | 
			
		||||
    b_rgex = vnic_dir + '/*/bridge'
 | 
			
		||||
    return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
 | 
			
		||||
    """
 | 
			
		||||
    Return a list of nics comprising a given bridge on the system or []
 | 
			
		||||
    """
 | 
			
		||||
    brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge)
 | 
			
		||||
    return [x.split('/')[-1] for x in glob.glob(brif_rgex)]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def is_bridge_member(nic):
 | 
			
		||||
    """
 | 
			
		||||
    Check if a given nic is a member of a bridge
 | 
			
		||||
    """
 | 
			
		||||
    for bridge in get_bridges():
 | 
			
		||||
        if nic in get_bridge_nics(bridge):
 | 
			
		||||
            return True
 | 
			
		||||
    return False
 | 
			
		||||
 
 | 
			
		||||
@@ -10,32 +10,62 @@ class OpenStackAmuletDeployment(AmuletDeployment):
 | 
			
		||||
       that is specifically for use by OpenStack charms.
 | 
			
		||||
       """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, series=None, openstack=None, source=None):
 | 
			
		||||
    def __init__(self, series=None, openstack=None, source=None, stable=True):
 | 
			
		||||
        """Initialize the deployment environment."""
 | 
			
		||||
        super(OpenStackAmuletDeployment, self).__init__(series)
 | 
			
		||||
        self.openstack = openstack
 | 
			
		||||
        self.source = source
 | 
			
		||||
        self.stable = stable
 | 
			
		||||
        # Note(coreycb): this needs to be changed when new next branches come
 | 
			
		||||
        # out.
 | 
			
		||||
        self.current_next = "trusty"
 | 
			
		||||
 | 
			
		||||
    def _determine_branch_locations(self, other_services):
 | 
			
		||||
        """Determine the branch locations for the other services.
 | 
			
		||||
 | 
			
		||||
           Determine if the local branch being tested is derived from its
 | 
			
		||||
           stable or next (dev) branch, and based on this, use the corresonding
 | 
			
		||||
           stable or next branches for the other_services."""
 | 
			
		||||
        base_charms = ['mysql', 'mongodb', 'rabbitmq-server']
 | 
			
		||||
 | 
			
		||||
        if self.stable:
 | 
			
		||||
            for svc in other_services:
 | 
			
		||||
                temp = 'lp:charms/{}'
 | 
			
		||||
                svc['location'] = temp.format(svc['name'])
 | 
			
		||||
        else:
 | 
			
		||||
            for svc in other_services:
 | 
			
		||||
                if svc['name'] in base_charms:
 | 
			
		||||
                    temp = 'lp:charms/{}'
 | 
			
		||||
                    svc['location'] = temp.format(svc['name'])
 | 
			
		||||
                else:
 | 
			
		||||
                    temp = 'lp:~openstack-charmers/charms/{}/{}/next'
 | 
			
		||||
                    svc['location'] = temp.format(self.current_next,
 | 
			
		||||
                                                  svc['name'])
 | 
			
		||||
        return other_services
 | 
			
		||||
 | 
			
		||||
    def _add_services(self, this_service, other_services):
 | 
			
		||||
        """Add services to the deployment and set openstack-origin."""
 | 
			
		||||
        """Add services to the deployment and set openstack-origin/source."""
 | 
			
		||||
        other_services = self._determine_branch_locations(other_services)
 | 
			
		||||
 | 
			
		||||
        super(OpenStackAmuletDeployment, self)._add_services(this_service,
 | 
			
		||||
                                                             other_services)
 | 
			
		||||
        name = 0
 | 
			
		||||
 | 
			
		||||
        services = other_services
 | 
			
		||||
        services.append(this_service)
 | 
			
		||||
        use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph']
 | 
			
		||||
        use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
 | 
			
		||||
                      'ceph-osd', 'ceph-radosgw']
 | 
			
		||||
 | 
			
		||||
        if self.openstack:
 | 
			
		||||
            for svc in services:
 | 
			
		||||
                if svc[name] not in use_source:
 | 
			
		||||
                if svc['name'] not in use_source:
 | 
			
		||||
                    config = {'openstack-origin': self.openstack}
 | 
			
		||||
                    self.d.configure(svc[name], config)
 | 
			
		||||
                    self.d.configure(svc['name'], config)
 | 
			
		||||
 | 
			
		||||
        if self.source:
 | 
			
		||||
            for svc in services:
 | 
			
		||||
                if svc[name] in use_source:
 | 
			
		||||
                if svc['name'] in use_source:
 | 
			
		||||
                    config = {'source': self.source}
 | 
			
		||||
                    self.d.configure(svc[name], config)
 | 
			
		||||
                    self.d.configure(svc['name'], config)
 | 
			
		||||
 | 
			
		||||
    def _configure_services(self, configs):
 | 
			
		||||
        """Configure all of the services."""
 | 
			
		||||
 
 | 
			
		||||
@@ -187,15 +187,16 @@ class OpenStackAmuletUtils(AmuletUtils):
 | 
			
		||||
 | 
			
		||||
        f = opener.open("http://download.cirros-cloud.net/version/released")
 | 
			
		||||
        version = f.read().strip()
 | 
			
		||||
        cirros_img = "tests/cirros-{}-x86_64-disk.img".format(version)
 | 
			
		||||
        cirros_img = "cirros-{}-x86_64-disk.img".format(version)
 | 
			
		||||
        local_path = os.path.join('tests', cirros_img)
 | 
			
		||||
 | 
			
		||||
        if not os.path.exists(cirros_img):
 | 
			
		||||
        if not os.path.exists(local_path):
 | 
			
		||||
            cirros_url = "http://{}/{}/{}".format("download.cirros-cloud.net",
 | 
			
		||||
                                                  version, cirros_img)
 | 
			
		||||
            opener.retrieve(cirros_url, cirros_img)
 | 
			
		||||
            opener.retrieve(cirros_url, local_path)
 | 
			
		||||
        f.close()
 | 
			
		||||
 | 
			
		||||
        with open(cirros_img) as f:
 | 
			
		||||
        with open(local_path) as f:
 | 
			
		||||
            image = glance.images.create(name=image_name, is_public=True,
 | 
			
		||||
                                         disk_format='qcow2',
 | 
			
		||||
                                         container_format='bare', data=f)
 | 
			
		||||
 
 | 
			
		||||
@@ -8,7 +8,6 @@ from subprocess import (
 | 
			
		||||
    check_call
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
from charmhelpers.fetch import (
 | 
			
		||||
    apt_install,
 | 
			
		||||
    filter_installed_packages,
 | 
			
		||||
@@ -28,6 +27,11 @@ from charmhelpers.core.hookenv import (
 | 
			
		||||
    INFO
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core.host import (
 | 
			
		||||
    mkdir,
 | 
			
		||||
    write_file
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from charmhelpers.contrib.hahelpers.cluster import (
 | 
			
		||||
    determine_apache_port,
 | 
			
		||||
    determine_api_port,
 | 
			
		||||
@@ -38,6 +42,7 @@ from charmhelpers.contrib.hahelpers.cluster import (
 | 
			
		||||
from charmhelpers.contrib.hahelpers.apache import (
 | 
			
		||||
    get_cert,
 | 
			
		||||
    get_ca_cert,
 | 
			
		||||
    install_ca_cert,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from charmhelpers.contrib.openstack.neutron import (
 | 
			
		||||
@@ -47,6 +52,8 @@ from charmhelpers.contrib.openstack.neutron import (
 | 
			
		||||
from charmhelpers.contrib.network.ip import (
 | 
			
		||||
    get_address_in_network,
 | 
			
		||||
    get_ipv6_addr,
 | 
			
		||||
    format_ipv6_addr,
 | 
			
		||||
    is_address_in_network
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
 | 
			
		||||
@@ -168,8 +175,10 @@ class SharedDBContext(OSContextGenerator):
 | 
			
		||||
        for rid in relation_ids('shared-db'):
 | 
			
		||||
            for unit in related_units(rid):
 | 
			
		||||
                rdata = relation_get(rid=rid, unit=unit)
 | 
			
		||||
                host = rdata.get('db_host')
 | 
			
		||||
                host = format_ipv6_addr(host) or host
 | 
			
		||||
                ctxt = {
 | 
			
		||||
                    'database_host': rdata.get('db_host'),
 | 
			
		||||
                    'database_host': host,
 | 
			
		||||
                    'database': self.database,
 | 
			
		||||
                    'database_user': self.user,
 | 
			
		||||
                    'database_password': rdata.get(password_setting),
 | 
			
		||||
@@ -245,10 +254,15 @@ class IdentityServiceContext(OSContextGenerator):
 | 
			
		||||
        for rid in relation_ids('identity-service'):
 | 
			
		||||
            for unit in related_units(rid):
 | 
			
		||||
                rdata = relation_get(rid=rid, unit=unit)
 | 
			
		||||
                serv_host = rdata.get('service_host')
 | 
			
		||||
                serv_host = format_ipv6_addr(serv_host) or serv_host
 | 
			
		||||
                auth_host = rdata.get('auth_host')
 | 
			
		||||
                auth_host = format_ipv6_addr(auth_host) or auth_host
 | 
			
		||||
 | 
			
		||||
                ctxt = {
 | 
			
		||||
                    'service_port': rdata.get('service_port'),
 | 
			
		||||
                    'service_host': rdata.get('service_host'),
 | 
			
		||||
                    'auth_host': rdata.get('auth_host'),
 | 
			
		||||
                    'service_host': serv_host,
 | 
			
		||||
                    'auth_host': auth_host,
 | 
			
		||||
                    'auth_port': rdata.get('auth_port'),
 | 
			
		||||
                    'admin_tenant_name': rdata.get('service_tenant'),
 | 
			
		||||
                    'admin_user': rdata.get('service_username'),
 | 
			
		||||
@@ -297,11 +311,13 @@ class AMQPContext(OSContextGenerator):
 | 
			
		||||
            for unit in related_units(rid):
 | 
			
		||||
                if relation_get('clustered', rid=rid, unit=unit):
 | 
			
		||||
                    ctxt['clustered'] = True
 | 
			
		||||
                    ctxt['rabbitmq_host'] = relation_get('vip', rid=rid,
 | 
			
		||||
                                                         unit=unit)
 | 
			
		||||
                    vip = relation_get('vip', rid=rid, unit=unit)
 | 
			
		||||
                    vip = format_ipv6_addr(vip) or vip
 | 
			
		||||
                    ctxt['rabbitmq_host'] = vip
 | 
			
		||||
                else:
 | 
			
		||||
                    ctxt['rabbitmq_host'] = relation_get('private-address',
 | 
			
		||||
                                                         rid=rid, unit=unit)
 | 
			
		||||
                    host = relation_get('private-address', rid=rid, unit=unit)
 | 
			
		||||
                    host = format_ipv6_addr(host) or host
 | 
			
		||||
                    ctxt['rabbitmq_host'] = host
 | 
			
		||||
                ctxt.update({
 | 
			
		||||
                    'rabbitmq_user': username,
 | 
			
		||||
                    'rabbitmq_password': relation_get('password', rid=rid,
 | 
			
		||||
@@ -340,8 +356,9 @@ class AMQPContext(OSContextGenerator):
 | 
			
		||||
                    and len(related_units(rid)) > 1:
 | 
			
		||||
                rabbitmq_hosts = []
 | 
			
		||||
                for unit in related_units(rid):
 | 
			
		||||
                    rabbitmq_hosts.append(relation_get('private-address',
 | 
			
		||||
                                                       rid=rid, unit=unit))
 | 
			
		||||
                    host = relation_get('private-address', rid=rid, unit=unit)
 | 
			
		||||
                    host = format_ipv6_addr(host) or host
 | 
			
		||||
                    rabbitmq_hosts.append(host)
 | 
			
		||||
                ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
 | 
			
		||||
        if not context_complete(ctxt):
 | 
			
		||||
            return {}
 | 
			
		||||
@@ -370,6 +387,7 @@ class CephContext(OSContextGenerator):
 | 
			
		||||
                ceph_addr = \
 | 
			
		||||
                    relation_get('ceph-public-address', rid=rid, unit=unit) or \
 | 
			
		||||
                    relation_get('private-address', rid=rid, unit=unit)
 | 
			
		||||
                ceph_addr = format_ipv6_addr(ceph_addr) or ceph_addr
 | 
			
		||||
                mon_hosts.append(ceph_addr)
 | 
			
		||||
 | 
			
		||||
        ctxt = {
 | 
			
		||||
@@ -404,10 +422,12 @@ class HAProxyContext(OSContextGenerator):
 | 
			
		||||
 | 
			
		||||
        cluster_hosts = {}
 | 
			
		||||
        l_unit = local_unit().replace('/', '-')
 | 
			
		||||
 | 
			
		||||
        if config('prefer-ipv6'):
 | 
			
		||||
            addr = get_ipv6_addr()
 | 
			
		||||
            addr = get_ipv6_addr(exc_list=[config('vip')])[0]
 | 
			
		||||
        else:
 | 
			
		||||
            addr = unit_get('private-address')
 | 
			
		||||
 | 
			
		||||
        cluster_hosts[l_unit] = get_address_in_network(config('os-internal-network'),
 | 
			
		||||
                                                       addr)
 | 
			
		||||
 | 
			
		||||
@@ -421,6 +441,11 @@ class HAProxyContext(OSContextGenerator):
 | 
			
		||||
            'units': cluster_hosts,
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
        if config('haproxy-server-timeout'):
 | 
			
		||||
            ctxt['haproxy_server_timeout'] = config('haproxy-server-timeout')
 | 
			
		||||
        if config('haproxy-client-timeout'):
 | 
			
		||||
            ctxt['haproxy_client_timeout'] = config('haproxy-client-timeout')
 | 
			
		||||
 | 
			
		||||
        if config('prefer-ipv6'):
 | 
			
		||||
            ctxt['local_host'] = 'ip6-localhost'
 | 
			
		||||
            ctxt['haproxy_host'] = '::'
 | 
			
		||||
@@ -490,22 +515,36 @@ class ApacheSSLContext(OSContextGenerator):
 | 
			
		||||
        cmd = ['a2enmod', 'ssl', 'proxy', 'proxy_http']
 | 
			
		||||
        check_call(cmd)
 | 
			
		||||
 | 
			
		||||
    def configure_cert(self):
 | 
			
		||||
        if not os.path.isdir('/etc/apache2/ssl'):
 | 
			
		||||
            os.mkdir('/etc/apache2/ssl')
 | 
			
		||||
    def configure_cert(self, cn=None):
 | 
			
		||||
        ssl_dir = os.path.join('/etc/apache2/ssl/', self.service_namespace)
 | 
			
		||||
        if not os.path.isdir(ssl_dir):
 | 
			
		||||
            os.mkdir(ssl_dir)
 | 
			
		||||
        cert, key = get_cert()
 | 
			
		||||
        with open(os.path.join(ssl_dir, 'cert'), 'w') as cert_out:
 | 
			
		||||
            cert_out.write(b64decode(cert))
 | 
			
		||||
        with open(os.path.join(ssl_dir, 'key'), 'w') as key_out:
 | 
			
		||||
            key_out.write(b64decode(key))
 | 
			
		||||
        mkdir(path=ssl_dir)
 | 
			
		||||
        cert, key = get_cert(cn)
 | 
			
		||||
        if cn:
 | 
			
		||||
            cert_filename = 'cert_{}'.format(cn)
 | 
			
		||||
            key_filename = 'key_{}'.format(cn)
 | 
			
		||||
        else:
 | 
			
		||||
            cert_filename = 'cert'
 | 
			
		||||
            key_filename = 'key'
 | 
			
		||||
        write_file(path=os.path.join(ssl_dir, cert_filename),
 | 
			
		||||
                   content=b64decode(cert))
 | 
			
		||||
        write_file(path=os.path.join(ssl_dir, key_filename),
 | 
			
		||||
                   content=b64decode(key))
 | 
			
		||||
 | 
			
		||||
    def configure_ca(self):
 | 
			
		||||
        ca_cert = get_ca_cert()
 | 
			
		||||
        if ca_cert:
 | 
			
		||||
            with open(CA_CERT_PATH, 'w') as ca_out:
 | 
			
		||||
                ca_out.write(b64decode(ca_cert))
 | 
			
		||||
            check_call(['update-ca-certificates'])
 | 
			
		||||
            install_ca_cert(b64decode(ca_cert))
 | 
			
		||||
 | 
			
		||||
    def canonical_names(self):
 | 
			
		||||
        '''Figure out which canonical names clients will access this service'''
 | 
			
		||||
        cns = []
 | 
			
		||||
        for r_id in relation_ids('identity-service'):
 | 
			
		||||
            for unit in related_units(r_id):
 | 
			
		||||
                rdata = relation_get(rid=r_id, unit=unit)
 | 
			
		||||
                for k in rdata:
 | 
			
		||||
                    if k.startswith('ssl_key_'):
 | 
			
		||||
                        cns.append(k.lstrip('ssl_key_'))
 | 
			
		||||
        return list(set(cns))
 | 
			
		||||
 | 
			
		||||
    def __call__(self):
 | 
			
		||||
        if isinstance(self.external_ports, basestring):
 | 
			
		||||
@@ -513,21 +552,47 @@ class ApacheSSLContext(OSContextGenerator):
 | 
			
		||||
        if (not self.external_ports or not https()):
 | 
			
		||||
            return {}
 | 
			
		||||
 | 
			
		||||
        self.configure_cert()
 | 
			
		||||
        self.configure_ca()
 | 
			
		||||
        self.enable_modules()
 | 
			
		||||
 | 
			
		||||
        ctxt = {
 | 
			
		||||
            'namespace': self.service_namespace,
 | 
			
		||||
            'private_address': unit_get('private-address'),
 | 
			
		||||
            'endpoints': []
 | 
			
		||||
            'endpoints': [],
 | 
			
		||||
            'ext_ports': []
 | 
			
		||||
        }
 | 
			
		||||
        if is_clustered():
 | 
			
		||||
            ctxt['private_address'] = config('vip')
 | 
			
		||||
 | 
			
		||||
        for cn in self.canonical_names():
 | 
			
		||||
            self.configure_cert(cn)
 | 
			
		||||
 | 
			
		||||
        addresses = []
 | 
			
		||||
        vips = []
 | 
			
		||||
        if config('vip'):
 | 
			
		||||
            vips = config('vip').split()
 | 
			
		||||
 | 
			
		||||
        for network_type in ['os-internal-network',
 | 
			
		||||
                             'os-admin-network',
 | 
			
		||||
                             'os-public-network']:
 | 
			
		||||
            address = get_address_in_network(config(network_type),
 | 
			
		||||
                                             unit_get('private-address'))
 | 
			
		||||
            if len(vips) > 0 and is_clustered():
 | 
			
		||||
                for vip in vips:
 | 
			
		||||
                    if is_address_in_network(config(network_type),
 | 
			
		||||
                                             vip):
 | 
			
		||||
                        addresses.append((address, vip))
 | 
			
		||||
                        break
 | 
			
		||||
            elif is_clustered():
 | 
			
		||||
                addresses.append((address, config('vip')))
 | 
			
		||||
            else:
 | 
			
		||||
                addresses.append((address, address))
 | 
			
		||||
 | 
			
		||||
        for address, endpoint in set(addresses):
 | 
			
		||||
            for api_port in self.external_ports:
 | 
			
		||||
                ext_port = determine_apache_port(api_port)
 | 
			
		||||
                int_port = determine_api_port(api_port)
 | 
			
		||||
            portmap = (int(ext_port), int(int_port))
 | 
			
		||||
                portmap = (address, endpoint, int(ext_port), int(int_port))
 | 
			
		||||
                ctxt['endpoints'].append(portmap)
 | 
			
		||||
                ctxt['ext_ports'].append(int(ext_port))
 | 
			
		||||
        ctxt['ext_ports'] = list(set(ctxt['ext_ports']))
 | 
			
		||||
        return ctxt
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -787,3 +852,16 @@ class SyslogContext(OSContextGenerator):
 | 
			
		||||
            'use_syslog': config('use-syslog')
 | 
			
		||||
        }
 | 
			
		||||
        return ctxt
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class BindHostContext(OSContextGenerator):
 | 
			
		||||
 | 
			
		||||
    def __call__(self):
 | 
			
		||||
        if config('prefer-ipv6'):
 | 
			
		||||
            return {
 | 
			
		||||
                'bind_host': '::'
 | 
			
		||||
            }
 | 
			
		||||
        else:
 | 
			
		||||
            return {
 | 
			
		||||
                'bind_host': '0.0.0.0'
 | 
			
		||||
            }
 | 
			
		||||
 
 | 
			
		||||
@@ -66,7 +66,7 @@ def resolve_address(endpoint_type=PUBLIC):
 | 
			
		||||
                    resolved_address = vip
 | 
			
		||||
    else:
 | 
			
		||||
        if config('prefer-ipv6'):
 | 
			
		||||
            fallback_addr = get_ipv6_addr()
 | 
			
		||||
            fallback_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
 | 
			
		||||
        else:
 | 
			
		||||
            fallback_addr = unit_get(_address_map[endpoint_type]['fallback'])
 | 
			
		||||
        resolved_address = get_address_in_network(
 | 
			
		||||
 
 | 
			
		||||
@@ -4,6 +4,7 @@
 | 
			
		||||
from collections import OrderedDict
 | 
			
		||||
 | 
			
		||||
import subprocess
 | 
			
		||||
import json
 | 
			
		||||
import os
 | 
			
		||||
import socket
 | 
			
		||||
import sys
 | 
			
		||||
@@ -13,7 +14,9 @@ from charmhelpers.core.hookenv import (
 | 
			
		||||
    log as juju_log,
 | 
			
		||||
    charm_dir,
 | 
			
		||||
    ERROR,
 | 
			
		||||
    INFO
 | 
			
		||||
    INFO,
 | 
			
		||||
    relation_ids,
 | 
			
		||||
    relation_set
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from charmhelpers.contrib.storage.linux.lvm import (
 | 
			
		||||
@@ -22,6 +25,10 @@ from charmhelpers.contrib.storage.linux.lvm import (
 | 
			
		||||
    remove_lvm_physical_volume,
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from charmhelpers.contrib.network.ip import (
 | 
			
		||||
    get_ipv6_addr
 | 
			
		||||
)
 | 
			
		||||
 | 
			
		||||
from charmhelpers.core.host import lsb_release, mounts, umount
 | 
			
		||||
from charmhelpers.fetch import apt_install, apt_cache
 | 
			
		||||
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
 | 
			
		||||
@@ -70,6 +77,7 @@ SWIFT_CODENAMES = OrderedDict([
 | 
			
		||||
    ('1.13.0', 'icehouse'),
 | 
			
		||||
    ('1.12.0', 'icehouse'),
 | 
			
		||||
    ('1.11.0', 'icehouse'),
 | 
			
		||||
    ('2.0.0', 'juno'),
 | 
			
		||||
])
 | 
			
		||||
 | 
			
		||||
DEFAULT_LOOPBACK_SIZE = '5G'
 | 
			
		||||
@@ -456,3 +464,21 @@ def get_hostname(address, fqdn=True):
 | 
			
		||||
            return result
 | 
			
		||||
    else:
 | 
			
		||||
        return result.split('.')[0]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def sync_db_with_multi_ipv6_addresses(database, database_user,
 | 
			
		||||
                                      relation_prefix=None):
 | 
			
		||||
    hosts = get_ipv6_addr(dynamic_only=False)
 | 
			
		||||
 | 
			
		||||
    kwargs = {'database': database,
 | 
			
		||||
              'username': database_user,
 | 
			
		||||
              'hostname': json.dumps(hosts)}
 | 
			
		||||
 | 
			
		||||
    if relation_prefix:
 | 
			
		||||
        keys = kwargs.keys()
 | 
			
		||||
        for key in keys:
 | 
			
		||||
            kwargs["%s_%s" % (relation_prefix, key)] = kwargs[key]
 | 
			
		||||
            del kwargs[key]
 | 
			
		||||
 | 
			
		||||
    for rid in relation_ids('shared-db'):
 | 
			
		||||
        relation_set(relation_id=rid, **kwargs)
 | 
			
		||||
 
 | 
			
		||||
@@ -156,12 +156,15 @@ def hook_name():
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class Config(dict):
 | 
			
		||||
    """A Juju charm config dictionary that can write itself to
 | 
			
		||||
    disk (as json) and track which values have changed since
 | 
			
		||||
    the previous hook invocation.
 | 
			
		||||
    """A dictionary representation of the charm's config.yaml, with some
 | 
			
		||||
    extra features:
 | 
			
		||||
 | 
			
		||||
    Do not instantiate this object directly - instead call
 | 
			
		||||
    ``hookenv.config()``
 | 
			
		||||
    - See which values in the dictionary have changed since the previous hook.
 | 
			
		||||
    - For values that have changed, see what the previous value was.
 | 
			
		||||
    - Store arbitrary data for use in a later hook.
 | 
			
		||||
 | 
			
		||||
    NOTE: Do not instantiate this object directly - instead call
 | 
			
		||||
    ``hookenv.config()``, which will return an instance of :class:`Config`.
 | 
			
		||||
 | 
			
		||||
    Example usage::
 | 
			
		||||
 | 
			
		||||
@@ -170,8 +173,8 @@ class Config(dict):
 | 
			
		||||
        >>> config = hookenv.config()
 | 
			
		||||
        >>> config['foo']
 | 
			
		||||
        'bar'
 | 
			
		||||
        >>> # store a new key/value for later use
 | 
			
		||||
        >>> config['mykey'] = 'myval'
 | 
			
		||||
        >>> config.save()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
        >>> # user runs `juju set mycharm foo=baz`
 | 
			
		||||
@@ -188,22 +191,34 @@ class Config(dict):
 | 
			
		||||
        >>> # keys/values that we add are preserved across hooks
 | 
			
		||||
        >>> config['mykey']
 | 
			
		||||
        'myval'
 | 
			
		||||
        >>> # don't forget to save at the end of hook!
 | 
			
		||||
        >>> config.save()
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
    CONFIG_FILE_NAME = '.juju-persistent-config'
 | 
			
		||||
 | 
			
		||||
    def __init__(self, *args, **kw):
 | 
			
		||||
        super(Config, self).__init__(*args, **kw)
 | 
			
		||||
        self.implicit_save = True
 | 
			
		||||
        self._prev_dict = None
 | 
			
		||||
        self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
 | 
			
		||||
        if os.path.exists(self.path):
 | 
			
		||||
            self.load_previous()
 | 
			
		||||
 | 
			
		||||
    def __getitem__(self, key):
 | 
			
		||||
        """For regular dict lookups, check the current juju config first,
 | 
			
		||||
        then the previous (saved) copy. This ensures that user-saved values
 | 
			
		||||
        will be returned by a dict lookup.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        try:
 | 
			
		||||
            return dict.__getitem__(self, key)
 | 
			
		||||
        except KeyError:
 | 
			
		||||
            return (self._prev_dict or {})[key]
 | 
			
		||||
 | 
			
		||||
    def load_previous(self, path=None):
 | 
			
		||||
        """Load previous copy of config from disk so that current values
 | 
			
		||||
        can be compared to previous values.
 | 
			
		||||
        """Load previous copy of config from disk.
 | 
			
		||||
 | 
			
		||||
        In normal usage you don't need to call this method directly - it
 | 
			
		||||
        is called automatically at object initialization.
 | 
			
		||||
 | 
			
		||||
        :param path:
 | 
			
		||||
 | 
			
		||||
@@ -218,8 +233,8 @@ class Config(dict):
 | 
			
		||||
            self._prev_dict = json.load(f)
 | 
			
		||||
 | 
			
		||||
    def changed(self, key):
 | 
			
		||||
        """Return true if the value for this key has changed since
 | 
			
		||||
        the last save.
 | 
			
		||||
        """Return True if the current value for this key is different from
 | 
			
		||||
        the previous value.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        if self._prev_dict is None:
 | 
			
		||||
@@ -228,7 +243,7 @@ class Config(dict):
 | 
			
		||||
 | 
			
		||||
    def previous(self, key):
 | 
			
		||||
        """Return previous value for this key, or None if there
 | 
			
		||||
        is no "previous" value.
 | 
			
		||||
        is no previous value.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        if self._prev_dict:
 | 
			
		||||
@@ -238,7 +253,13 @@ class Config(dict):
 | 
			
		||||
    def save(self):
 | 
			
		||||
        """Save this config to disk.
 | 
			
		||||
 | 
			
		||||
        Preserves items in _prev_dict that do not exist in self.
 | 
			
		||||
        If the charm is using the :mod:`Services Framework <services.base>`
 | 
			
		||||
        or :meth:'@hook <Hooks.hook>' decorator, this
 | 
			
		||||
        is called automatically at the end of successful hook execution.
 | 
			
		||||
        Otherwise, it should be called directly by user code.
 | 
			
		||||
 | 
			
		||||
        To disable automatic saves, set ``implicit_save=False`` on this
 | 
			
		||||
        instance.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        if self._prev_dict:
 | 
			
		||||
@@ -465,9 +486,10 @@ class Hooks(object):
 | 
			
		||||
            hooks.execute(sys.argv)
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self):
 | 
			
		||||
    def __init__(self, config_save=True):
 | 
			
		||||
        super(Hooks, self).__init__()
 | 
			
		||||
        self._hooks = {}
 | 
			
		||||
        self._config_save = config_save
 | 
			
		||||
 | 
			
		||||
    def register(self, name, function):
 | 
			
		||||
        """Register a hook"""
 | 
			
		||||
@@ -478,6 +500,10 @@ class Hooks(object):
 | 
			
		||||
        hook_name = os.path.basename(args[0])
 | 
			
		||||
        if hook_name in self._hooks:
 | 
			
		||||
            self._hooks[hook_name]()
 | 
			
		||||
            if self._config_save:
 | 
			
		||||
                cfg = config()
 | 
			
		||||
                if cfg.implicit_save:
 | 
			
		||||
                    cfg.save()
 | 
			
		||||
        else:
 | 
			
		||||
            raise UnregisteredHookError(hook_name)
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -68,8 +68,8 @@ def service_available(service_name):
 | 
			
		||||
    """Determine whether a system service is available"""
 | 
			
		||||
    try:
 | 
			
		||||
        subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
 | 
			
		||||
    except subprocess.CalledProcessError:
 | 
			
		||||
        return False
 | 
			
		||||
    except subprocess.CalledProcessError as e:
 | 
			
		||||
        return 'unrecognized service' not in e.output
 | 
			
		||||
    else:
 | 
			
		||||
        return True
 | 
			
		||||
 | 
			
		||||
@@ -209,10 +209,15 @@ def mounts():
 | 
			
		||||
    return system_mounts
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def file_hash(path):
 | 
			
		||||
    """Generate a md5 hash of the contents of 'path' or None if not found """
 | 
			
		||||
def file_hash(path, hash_type='md5'):
 | 
			
		||||
    """
 | 
			
		||||
    Generate a hash checksum of the contents of 'path' or None if not found.
 | 
			
		||||
 | 
			
		||||
    :param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
 | 
			
		||||
                          such as md5, sha1, sha256, sha512, etc.
 | 
			
		||||
    """
 | 
			
		||||
    if os.path.exists(path):
 | 
			
		||||
        h = hashlib.md5()
 | 
			
		||||
        h = getattr(hashlib, hash_type)()
 | 
			
		||||
        with open(path, 'r') as source:
 | 
			
		||||
            h.update(source.read())  # IGNORE:E1101 - it does have update
 | 
			
		||||
        return h.hexdigest()
 | 
			
		||||
@@ -220,6 +225,26 @@ def file_hash(path):
 | 
			
		||||
        return None
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def check_hash(path, checksum, hash_type='md5'):
 | 
			
		||||
    """
 | 
			
		||||
    Validate a file using a cryptographic checksum.
 | 
			
		||||
 | 
			
		||||
    :param str checksum: Value of the checksum used to validate the file.
 | 
			
		||||
    :param str hash_type: Hash algorithm used to generate `checksum`.
 | 
			
		||||
        Can be any hash alrgorithm supported by :mod:`hashlib`,
 | 
			
		||||
        such as md5, sha1, sha256, sha512, etc.
 | 
			
		||||
    :raises ChecksumError: If the file fails the checksum
 | 
			
		||||
 | 
			
		||||
    """
 | 
			
		||||
    actual_checksum = file_hash(path, hash_type)
 | 
			
		||||
    if checksum != actual_checksum:
 | 
			
		||||
        raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ChecksumError(ValueError):
 | 
			
		||||
    pass
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def restart_on_change(restart_map, stopstart=False):
 | 
			
		||||
    """Restart services based on configuration files changing
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
@@ -118,6 +118,9 @@ class ServiceManager(object):
 | 
			
		||||
        else:
 | 
			
		||||
            self.provide_data()
 | 
			
		||||
            self.reconfigure_services()
 | 
			
		||||
        cfg = hookenv.config()
 | 
			
		||||
        if cfg.implicit_save:
 | 
			
		||||
            cfg.save()
 | 
			
		||||
 | 
			
		||||
    def provide_data(self):
 | 
			
		||||
        """
 | 
			
		||||
 
 | 
			
		||||
@@ -1,3 +1,5 @@
 | 
			
		||||
import os
 | 
			
		||||
import yaml
 | 
			
		||||
from charmhelpers.core import hookenv
 | 
			
		||||
from charmhelpers.core import templating
 | 
			
		||||
 | 
			
		||||
@@ -19,15 +21,21 @@ class RelationContext(dict):
 | 
			
		||||
    the `name` attribute that are complete will used to populate the dictionary
 | 
			
		||||
    values (see `get_data`, below).
 | 
			
		||||
 | 
			
		||||
    The generated context will be namespaced under the interface type, to prevent
 | 
			
		||||
    potential naming conflicts.
 | 
			
		||||
    The generated context will be namespaced under the relation :attr:`name`,
 | 
			
		||||
    to prevent potential naming conflicts.
 | 
			
		||||
 | 
			
		||||
    :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
 | 
			
		||||
    :param list additional_required_keys: Extend the list of :attr:`required_keys`
 | 
			
		||||
    """
 | 
			
		||||
    name = None
 | 
			
		||||
    interface = None
 | 
			
		||||
    required_keys = []
 | 
			
		||||
 | 
			
		||||
    def __init__(self, *args, **kwargs):
 | 
			
		||||
        super(RelationContext, self).__init__(*args, **kwargs)
 | 
			
		||||
    def __init__(self, name=None, additional_required_keys=None):
 | 
			
		||||
        if name is not None:
 | 
			
		||||
            self.name = name
 | 
			
		||||
        if additional_required_keys is not None:
 | 
			
		||||
            self.required_keys.extend(additional_required_keys)
 | 
			
		||||
        self.get_data()
 | 
			
		||||
 | 
			
		||||
    def __bool__(self):
 | 
			
		||||
@@ -101,9 +109,115 @@ class RelationContext(dict):
 | 
			
		||||
        return {}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class MysqlRelation(RelationContext):
 | 
			
		||||
    """
 | 
			
		||||
    Relation context for the `mysql` interface.
 | 
			
		||||
 | 
			
		||||
    :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
 | 
			
		||||
    :param list additional_required_keys: Extend the list of :attr:`required_keys`
 | 
			
		||||
    """
 | 
			
		||||
    name = 'db'
 | 
			
		||||
    interface = 'mysql'
 | 
			
		||||
    required_keys = ['host', 'user', 'password', 'database']
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class HttpRelation(RelationContext):
 | 
			
		||||
    """
 | 
			
		||||
    Relation context for the `http` interface.
 | 
			
		||||
 | 
			
		||||
    :param str name: Override the relation :attr:`name`, since it can vary from charm to charm
 | 
			
		||||
    :param list additional_required_keys: Extend the list of :attr:`required_keys`
 | 
			
		||||
    """
 | 
			
		||||
    name = 'website'
 | 
			
		||||
    interface = 'http'
 | 
			
		||||
    required_keys = ['host', 'port']
 | 
			
		||||
 | 
			
		||||
    def provide_data(self):
 | 
			
		||||
        return {
 | 
			
		||||
            'host': hookenv.unit_get('private-address'),
 | 
			
		||||
            'port': 80,
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class RequiredConfig(dict):
 | 
			
		||||
    """
 | 
			
		||||
    Data context that loads config options with one or more mandatory options.
 | 
			
		||||
 | 
			
		||||
    Once the required options have been changed from their default values, all
 | 
			
		||||
    config options will be available, namespaced under `config` to prevent
 | 
			
		||||
    potential naming conflicts (for example, between a config option and a
 | 
			
		||||
    relation property).
 | 
			
		||||
 | 
			
		||||
    :param list *args: List of options that must be changed from their default values.
 | 
			
		||||
    """
 | 
			
		||||
 | 
			
		||||
    def __init__(self, *args):
 | 
			
		||||
        self.required_options = args
 | 
			
		||||
        self['config'] = hookenv.config()
 | 
			
		||||
        with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
 | 
			
		||||
            self.config = yaml.load(fp).get('options', {})
 | 
			
		||||
 | 
			
		||||
    def __bool__(self):
 | 
			
		||||
        for option in self.required_options:
 | 
			
		||||
            if option not in self['config']:
 | 
			
		||||
                return False
 | 
			
		||||
            current_value = self['config'][option]
 | 
			
		||||
            default_value = self.config[option].get('default')
 | 
			
		||||
            if current_value == default_value:
 | 
			
		||||
                return False
 | 
			
		||||
            if current_value in (None, '') and default_value in (None, ''):
 | 
			
		||||
                return False
 | 
			
		||||
        return True
 | 
			
		||||
 | 
			
		||||
    def __nonzero__(self):
 | 
			
		||||
        return self.__bool__()
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class StoredContext(dict):
 | 
			
		||||
    """
 | 
			
		||||
    A data context that always returns the data that it was first created with.
 | 
			
		||||
 | 
			
		||||
    This is useful to do a one-time generation of things like passwords, that
 | 
			
		||||
    will thereafter use the same value that was originally generated, instead
 | 
			
		||||
    of generating a new value each time it is run.
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self, file_name, config_data):
 | 
			
		||||
        """
 | 
			
		||||
        If the file exists, populate `self` with the data from the file.
 | 
			
		||||
        Otherwise, populate with the given data and persist it to the file.
 | 
			
		||||
        """
 | 
			
		||||
        if os.path.exists(file_name):
 | 
			
		||||
            self.update(self.read_context(file_name))
 | 
			
		||||
        else:
 | 
			
		||||
            self.store_context(file_name, config_data)
 | 
			
		||||
            self.update(config_data)
 | 
			
		||||
 | 
			
		||||
    def store_context(self, file_name, config_data):
 | 
			
		||||
        if not os.path.isabs(file_name):
 | 
			
		||||
            file_name = os.path.join(hookenv.charm_dir(), file_name)
 | 
			
		||||
        with open(file_name, 'w') as file_stream:
 | 
			
		||||
            os.fchmod(file_stream.fileno(), 0600)
 | 
			
		||||
            yaml.dump(config_data, file_stream)
 | 
			
		||||
 | 
			
		||||
    def read_context(self, file_name):
 | 
			
		||||
        if not os.path.isabs(file_name):
 | 
			
		||||
            file_name = os.path.join(hookenv.charm_dir(), file_name)
 | 
			
		||||
        with open(file_name, 'r') as file_stream:
 | 
			
		||||
            data = yaml.load(file_stream)
 | 
			
		||||
            if not data:
 | 
			
		||||
                raise OSError("%s is empty" % file_name)
 | 
			
		||||
            return data
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class TemplateCallback(ManagerCallback):
 | 
			
		||||
    """
 | 
			
		||||
    Callback class that will render a template, for use as a ready action.
 | 
			
		||||
    Callback class that will render a Jinja2 template, for use as a ready action.
 | 
			
		||||
 | 
			
		||||
    :param str source: The template source file, relative to `$CHARM_DIR/templates`
 | 
			
		||||
    :param str target: The target to write the rendered template to
 | 
			
		||||
    :param str owner: The owner of the rendered file
 | 
			
		||||
    :param str group: The group of the rendered file
 | 
			
		||||
    :param int perms: The permissions of the rendered file
 | 
			
		||||
    """
 | 
			
		||||
    def __init__(self, source, target, owner='root', group='root', perms=0444):
 | 
			
		||||
        self.source = source
 | 
			
		||||
 
 | 
			
		||||
@@ -208,7 +208,8 @@ def add_source(source, key=None):
 | 
			
		||||
    """Add a package source to this system.
 | 
			
		||||
 | 
			
		||||
    @param source: a URL or sources.list entry, as supported by
 | 
			
		||||
    add-apt-repository(1). Examples:
 | 
			
		||||
    add-apt-repository(1). Examples::
 | 
			
		||||
 | 
			
		||||
        ppa:charmers/example
 | 
			
		||||
        deb https://stub:key@private.example.com/ubuntu trusty main
 | 
			
		||||
 | 
			
		||||
@@ -311,22 +312,35 @@ def configure_sources(update=False,
 | 
			
		||||
        apt_update(fatal=True)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def install_remote(source):
 | 
			
		||||
def install_remote(source, *args, **kwargs):
 | 
			
		||||
    """
 | 
			
		||||
    Install a file tree from a remote source
 | 
			
		||||
 | 
			
		||||
    The specified source should be a url of the form:
 | 
			
		||||
        scheme://[host]/path[#[option=value][&...]]
 | 
			
		||||
 | 
			
		||||
    Schemes supported are based on this modules submodules
 | 
			
		||||
    Options supported are submodule-specific"""
 | 
			
		||||
    Schemes supported are based on this modules submodules.
 | 
			
		||||
    Options supported are submodule-specific.
 | 
			
		||||
    Additional arguments are passed through to the submodule.
 | 
			
		||||
 | 
			
		||||
    For example::
 | 
			
		||||
 | 
			
		||||
        dest = install_remote('http://example.com/archive.tgz',
 | 
			
		||||
                              checksum='deadbeef',
 | 
			
		||||
                              hash_type='sha1')
 | 
			
		||||
 | 
			
		||||
    This will download `archive.tgz`, validate it using SHA1 and, if
 | 
			
		||||
    the file is ok, extract it and return the directory in which it
 | 
			
		||||
    was extracted.  If the checksum fails, it will raise
 | 
			
		||||
    :class:`charmhelpers.core.host.ChecksumError`.
 | 
			
		||||
    """
 | 
			
		||||
    # We ONLY check for True here because can_handle may return a string
 | 
			
		||||
    # explaining why it can't handle a given source.
 | 
			
		||||
    handlers = [h for h in plugins() if h.can_handle(source) is True]
 | 
			
		||||
    installed_to = None
 | 
			
		||||
    for handler in handlers:
 | 
			
		||||
        try:
 | 
			
		||||
            installed_to = handler.install(source)
 | 
			
		||||
            installed_to = handler.install(source, *args, **kwargs)
 | 
			
		||||
        except UnhandledSource:
 | 
			
		||||
            pass
 | 
			
		||||
    if not installed_to:
 | 
			
		||||
 
 | 
			
		||||
@@ -1,6 +1,8 @@
 | 
			
		||||
import os
 | 
			
		||||
import urllib2
 | 
			
		||||
from urllib import urlretrieve
 | 
			
		||||
import urlparse
 | 
			
		||||
import hashlib
 | 
			
		||||
 | 
			
		||||
from charmhelpers.fetch import (
 | 
			
		||||
    BaseFetchHandler,
 | 
			
		||||
@@ -10,11 +12,19 @@ from charmhelpers.payload.archive import (
 | 
			
		||||
    get_archive_handler,
 | 
			
		||||
    extract,
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.core.host import mkdir
 | 
			
		||||
from charmhelpers.core.host import mkdir, check_hash
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
class ArchiveUrlFetchHandler(BaseFetchHandler):
 | 
			
		||||
    """Handler for archives via generic URLs"""
 | 
			
		||||
    """
 | 
			
		||||
    Handler to download archive files from arbitrary URLs.
 | 
			
		||||
 | 
			
		||||
    Can fetch from http, https, ftp, and file URLs.
 | 
			
		||||
 | 
			
		||||
    Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
 | 
			
		||||
 | 
			
		||||
    Installs the contents of the archive in $CHARM_DIR/fetched/.
 | 
			
		||||
    """
 | 
			
		||||
    def can_handle(self, source):
 | 
			
		||||
        url_parts = self.parse_url(source)
 | 
			
		||||
        if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
 | 
			
		||||
@@ -24,6 +34,12 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
 | 
			
		||||
        return False
 | 
			
		||||
 | 
			
		||||
    def download(self, source, dest):
 | 
			
		||||
        """
 | 
			
		||||
        Download an archive file.
 | 
			
		||||
 | 
			
		||||
        :param str source: URL pointing to an archive file.
 | 
			
		||||
        :param str dest: Local path location to download archive file to.
 | 
			
		||||
        """
 | 
			
		||||
        # propogate all exceptions
 | 
			
		||||
        # URLError, OSError, etc
 | 
			
		||||
        proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
 | 
			
		||||
@@ -48,7 +64,30 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
 | 
			
		||||
                os.unlink(dest)
 | 
			
		||||
            raise e
 | 
			
		||||
 | 
			
		||||
    def install(self, source):
 | 
			
		||||
    # Mandatory file validation via Sha1 or MD5 hashing.
 | 
			
		||||
    def download_and_validate(self, url, hashsum, validate="sha1"):
 | 
			
		||||
        tempfile, headers = urlretrieve(url)
 | 
			
		||||
        check_hash(tempfile, hashsum, validate)
 | 
			
		||||
        return tempfile
 | 
			
		||||
 | 
			
		||||
    def install(self, source, dest=None, checksum=None, hash_type='sha1'):
 | 
			
		||||
        """
 | 
			
		||||
        Download and install an archive file, with optional checksum validation.
 | 
			
		||||
 | 
			
		||||
        The checksum can also be given on the `source` URL's fragment.
 | 
			
		||||
        For example::
 | 
			
		||||
 | 
			
		||||
            handler.install('http://example.com/file.tgz#sha1=deadbeef')
 | 
			
		||||
 | 
			
		||||
        :param str source: URL pointing to an archive file.
 | 
			
		||||
        :param str dest: Local destination path to install to. If not given,
 | 
			
		||||
            installs to `$CHARM_DIR/archives/archive_file_name`.
 | 
			
		||||
        :param str checksum: If given, validate the archive file after download.
 | 
			
		||||
        :param str hash_type: Algorithm used to generate `checksum`.
 | 
			
		||||
            Can be any hash alrgorithm supported by :mod:`hashlib`,
 | 
			
		||||
            such as md5, sha1, sha256, sha512, etc.
 | 
			
		||||
 | 
			
		||||
        """
 | 
			
		||||
        url_parts = self.parse_url(source)
 | 
			
		||||
        dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
 | 
			
		||||
        if not os.path.exists(dest_dir):
 | 
			
		||||
@@ -60,4 +99,10 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
 | 
			
		||||
            raise UnhandledSource(e.reason)
 | 
			
		||||
        except OSError as e:
 | 
			
		||||
            raise UnhandledSource(e.strerror)
 | 
			
		||||
        return extract(dld_file)
 | 
			
		||||
        options = urlparse.parse_qs(url_parts.fragment)
 | 
			
		||||
        for key, value in options.items():
 | 
			
		||||
            if key in hashlib.algorithms:
 | 
			
		||||
                check_hash(dld_file, value, key)
 | 
			
		||||
        if checksum:
 | 
			
		||||
            check_hash(dld_file, checksum, hash_type)
 | 
			
		||||
        return extract(dld_file, dest)
 | 
			
		||||
 
 | 
			
		||||
@@ -32,6 +32,10 @@ from horizon_utils import (
 | 
			
		||||
    enable_ssl,
 | 
			
		||||
    do_openstack_upgrade
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.contrib.network.ip import (
 | 
			
		||||
    get_iface_for_address,
 | 
			
		||||
    get_netmask_for_address,
 | 
			
		||||
)
 | 
			
		||||
from charmhelpers.contrib.hahelpers.apache import install_ca_cert
 | 
			
		||||
from charmhelpers.contrib.hahelpers.cluster import get_hacluster_config
 | 
			
		||||
from charmhelpers.payload.execd import execd_preinstall
 | 
			
		||||
@@ -114,15 +118,30 @@ def cluster_relation():
 | 
			
		||||
def ha_relation_joined():
 | 
			
		||||
    config = get_hacluster_config()
 | 
			
		||||
    resources = {
 | 
			
		||||
        'res_horizon_vip': 'ocf:heartbeat:IPaddr2',
 | 
			
		||||
        'res_horizon_haproxy': 'lsb:haproxy'
 | 
			
		||||
    }
 | 
			
		||||
    vip_params = 'params ip="{}" cidr_netmask="{}" nic="{}"'.format(
 | 
			
		||||
        config['vip'], config['vip_cidr'], config['vip_iface'])
 | 
			
		||||
 | 
			
		||||
    resource_params = {
 | 
			
		||||
        'res_horizon_vip': vip_params,
 | 
			
		||||
        'res_horizon_haproxy': 'op monitor interval="5s"'
 | 
			
		||||
    }
 | 
			
		||||
 | 
			
		||||
    vip_group = []
 | 
			
		||||
    for vip in config['vip'].split():
 | 
			
		||||
        iface = get_iface_for_address(vip)
 | 
			
		||||
        if iface is not None:
 | 
			
		||||
            vip_key = 'res_horizon_{}_vip'.format(iface)
 | 
			
		||||
            resources[vip_key] = 'ocf:heartbeat:IPaddr2'
 | 
			
		||||
            resource_params[vip_key] = (
 | 
			
		||||
                'params ip="{vip}" cidr_netmask="{netmask}"'
 | 
			
		||||
                ' nic="{iface}"'.format(vip=vip,
 | 
			
		||||
                                        iface=iface,
 | 
			
		||||
                                        netmask=get_netmask_for_address(vip))
 | 
			
		||||
            )
 | 
			
		||||
            vip_group.append(vip_key)
 | 
			
		||||
 | 
			
		||||
    if len(vip_group) > 1:
 | 
			
		||||
        relation_set(groups={'grp_horizon_vips': ' '.join(vip_group)})
 | 
			
		||||
 | 
			
		||||
    init_services = {
 | 
			
		||||
        'res_horizon_haproxy': 'haproxy'
 | 
			
		||||
    }
 | 
			
		||||
 
 | 
			
		||||
@@ -29,7 +29,10 @@ TO_PATCH = [
 | 
			
		||||
    'log',
 | 
			
		||||
    'execd_preinstall',
 | 
			
		||||
    'b64decode',
 | 
			
		||||
    'os_release']
 | 
			
		||||
    'os_release',
 | 
			
		||||
    'get_iface_for_address',
 | 
			
		||||
    'get_netmask_for_address',
 | 
			
		||||
]
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
def passthrough(value):
 | 
			
		||||
@@ -42,6 +45,7 @@ class TestHorizonHooks(CharmTestCase):
 | 
			
		||||
        super(TestHorizonHooks, self).setUp(hooks, TO_PATCH)
 | 
			
		||||
        self.config.side_effect = self.test_config.get
 | 
			
		||||
        self.b64decode.side_effect = passthrough
 | 
			
		||||
        hooks.hooks._config_save = False
 | 
			
		||||
 | 
			
		||||
    def _call_hook(self, hookname):
 | 
			
		||||
        hooks.hooks.execute([
 | 
			
		||||
@@ -59,14 +63,18 @@ class TestHorizonHooks(CharmTestCase):
 | 
			
		||||
        self.os_release.return_value = 'icehouse'
 | 
			
		||||
        self._call_hook('install')
 | 
			
		||||
        for pkg in ['nodejs', 'node-less']:
 | 
			
		||||
            self.assertFalse(pkg in self.filter_installed_packages.call_args[0][0])
 | 
			
		||||
            self.assertFalse(
 | 
			
		||||
                pkg in self.filter_installed_packages.call_args[0][0]
 | 
			
		||||
            )
 | 
			
		||||
        self.apt_install.assert_called()
 | 
			
		||||
 | 
			
		||||
    def test_install_hook_pre_icehouse_pkgs(self):
 | 
			
		||||
        self.os_release.return_value = 'grizzly'
 | 
			
		||||
        self._call_hook('install')
 | 
			
		||||
        for pkg in ['nodejs', 'node-less']:
 | 
			
		||||
            self.assertTrue(pkg in self.filter_installed_packages.call_args[0][0])
 | 
			
		||||
            self.assertTrue(
 | 
			
		||||
                pkg in self.filter_installed_packages.call_args[0][0]
 | 
			
		||||
            )
 | 
			
		||||
        self.apt_install.assert_called()
 | 
			
		||||
 | 
			
		||||
    @patch('charmhelpers.core.host.file_hash')
 | 
			
		||||
@@ -94,6 +102,8 @@ class TestHorizonHooks(CharmTestCase):
 | 
			
		||||
            'vip_iface': 'eth101',
 | 
			
		||||
            'vip_cidr': '19'
 | 
			
		||||
        }
 | 
			
		||||
        self.get_iface_for_address.return_value = 'eth101'
 | 
			
		||||
        self.get_netmask_for_address.return_value = '19'
 | 
			
		||||
        self.get_hacluster_config.return_value = conf
 | 
			
		||||
        self._call_hook('ha-relation-joined')
 | 
			
		||||
        ex_args = {
 | 
			
		||||
@@ -101,7 +111,7 @@ class TestHorizonHooks(CharmTestCase):
 | 
			
		||||
            'init_services': {
 | 
			
		||||
                'res_horizon_haproxy': 'haproxy'},
 | 
			
		||||
            'resource_params': {
 | 
			
		||||
                'res_horizon_vip':
 | 
			
		||||
                'res_horizon_eth101_vip':
 | 
			
		||||
                'params ip="192.168.25.163" cidr_netmask="19"'
 | 
			
		||||
                ' nic="eth101"',
 | 
			
		||||
                'res_horizon_haproxy': 'op monitor interval="5s"'},
 | 
			
		||||
@@ -109,7 +119,7 @@ class TestHorizonHooks(CharmTestCase):
 | 
			
		||||
            'clones': {
 | 
			
		||||
                'cl_horizon_haproxy': 'res_horizon_haproxy'},
 | 
			
		||||
            'resources': {
 | 
			
		||||
                'res_horizon_vip': 'ocf:heartbeat:IPaddr2',
 | 
			
		||||
                'res_horizon_eth101_vip': 'ocf:heartbeat:IPaddr2',
 | 
			
		||||
                'res_horizon_haproxy': 'lsb:haproxy'}
 | 
			
		||||
        }
 | 
			
		||||
        self.relation_set.assert_called_with(**ex_args)
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user