[xianghui,dosaboy,r=james-page,t=gema] Add IPv6 support using prefer-ipv6 flag
This commit is contained in:
commit
18ae01a279
4
Makefile
4
Makefile
@ -5,9 +5,5 @@ lint:
|
||||
@flake8 --exclude hooks/charmhelpers hooks
|
||||
@charm proof
|
||||
|
||||
test:
|
||||
@echo Starting tests...
|
||||
@$(PYTHON) /usr/bin/nosetests --nologcapture --with-coverage unit_tests
|
||||
|
||||
sync:
|
||||
@charm-helper-sync -c charm-helpers.yaml
|
||||
|
@ -6,3 +6,4 @@ include:
|
||||
- contrib.storage.linux
|
||||
- contrib.hahelpers
|
||||
- contrib.storage
|
||||
- contrib.network.ip
|
||||
|
16
config.yaml
16
config.yaml
@ -48,3 +48,19 @@ options:
|
||||
description: |
|
||||
Time period between checks of resource health. It consists of a number
|
||||
and a time factor, e.g. 5s = 5 seconds. 2m = 2 minutes.
|
||||
netmtu:
|
||||
type: int
|
||||
default: 1500
|
||||
description: MTU size corosync used for communication.
|
||||
prefer-ipv6:
|
||||
type: boolean
|
||||
default: False
|
||||
description: |
|
||||
If True enables IPv6 support. The charm will expect network interfaces
|
||||
to be configured with an IPv6 address. If set to False (default) IPv4
|
||||
is expected.
|
||||
.
|
||||
NOTE: these charms do not currently support IPv6 privacy extension. In
|
||||
order for this charm to function correctly, the privacy extension must be
|
||||
disabled and a non-temporary address must be configured/available on
|
||||
your network interface.
|
||||
|
@ -20,20 +20,27 @@ from charmhelpers.core.hookenv import (
|
||||
)
|
||||
|
||||
|
||||
def get_cert():
|
||||
def get_cert(cn=None):
|
||||
# TODO: deal with multiple https endpoints via charm config
|
||||
cert = config_get('ssl_cert')
|
||||
key = config_get('ssl_key')
|
||||
if not (cert and key):
|
||||
log("Inspecting identity-service relations for SSL certificate.",
|
||||
level=INFO)
|
||||
cert = key = None
|
||||
if cn:
|
||||
ssl_cert_attr = 'ssl_cert_{}'.format(cn)
|
||||
ssl_key_attr = 'ssl_key_{}'.format(cn)
|
||||
else:
|
||||
ssl_cert_attr = 'ssl_cert'
|
||||
ssl_key_attr = 'ssl_key'
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for unit in relation_list(r_id):
|
||||
if not cert:
|
||||
cert = relation_get('ssl_cert',
|
||||
cert = relation_get(ssl_cert_attr,
|
||||
rid=r_id, unit=unit)
|
||||
if not key:
|
||||
key = relation_get('ssl_key',
|
||||
key = relation_get(ssl_key_attr,
|
||||
rid=r_id, unit=unit)
|
||||
return (cert, key)
|
||||
|
||||
|
@ -6,6 +6,11 @@
|
||||
# Adam Gandelman <adamg@ubuntu.com>
|
||||
#
|
||||
|
||||
"""
|
||||
Helpers for clustering and determining "cluster leadership" and other
|
||||
clustering-related helpers.
|
||||
"""
|
||||
|
||||
import subprocess
|
||||
import os
|
||||
|
||||
@ -19,6 +24,7 @@ from charmhelpers.core.hookenv import (
|
||||
config as config_get,
|
||||
INFO,
|
||||
ERROR,
|
||||
WARNING,
|
||||
unit_get,
|
||||
)
|
||||
|
||||
@ -27,6 +33,29 @@ class HAIncompleteConfig(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def is_elected_leader(resource):
|
||||
"""
|
||||
Returns True if the charm executing this is the elected cluster leader.
|
||||
|
||||
It relies on two mechanisms to determine leadership:
|
||||
1. If the charm is part of a corosync cluster, call corosync to
|
||||
determine leadership.
|
||||
2. If the charm is not part of a corosync cluster, the leader is
|
||||
determined as being "the alive unit with the lowest unit numer". In
|
||||
other words, the oldest surviving unit.
|
||||
"""
|
||||
if is_clustered():
|
||||
if not is_crm_leader(resource):
|
||||
log('Deferring action to CRM leader.', level=INFO)
|
||||
return False
|
||||
else:
|
||||
peers = peer_units()
|
||||
if peers and not oldest_peer(peers):
|
||||
log('Deferring action to oldest service unit.', level=INFO)
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def is_clustered():
|
||||
for r_id in (relation_ids('ha') or []):
|
||||
for unit in (relation_list(r_id) or []):
|
||||
@ -38,7 +67,11 @@ def is_clustered():
|
||||
return False
|
||||
|
||||
|
||||
def is_leader(resource):
|
||||
def is_crm_leader(resource):
|
||||
"""
|
||||
Returns True if the charm calling this is the elected corosync leader,
|
||||
as returned by calling the external "crm" command.
|
||||
"""
|
||||
cmd = [
|
||||
"crm", "resource",
|
||||
"show", resource
|
||||
@ -54,15 +87,31 @@ def is_leader(resource):
|
||||
return False
|
||||
|
||||
|
||||
def peer_units():
|
||||
def is_leader(resource):
|
||||
log("is_leader is deprecated. Please consider using is_crm_leader "
|
||||
"instead.", level=WARNING)
|
||||
return is_crm_leader(resource)
|
||||
|
||||
|
||||
def peer_units(peer_relation="cluster"):
|
||||
peers = []
|
||||
for r_id in (relation_ids('cluster') or []):
|
||||
for r_id in (relation_ids(peer_relation) or []):
|
||||
for unit in (relation_list(r_id) or []):
|
||||
peers.append(unit)
|
||||
return peers
|
||||
|
||||
|
||||
def peer_ips(peer_relation='cluster', addr_key='private-address'):
|
||||
'''Return a dict of peers and their private-address'''
|
||||
peers = {}
|
||||
for r_id in relation_ids(peer_relation):
|
||||
for unit in relation_list(r_id):
|
||||
peers[unit] = relation_get(addr_key, rid=r_id, unit=unit)
|
||||
return peers
|
||||
|
||||
|
||||
def oldest_peer(peers):
|
||||
"""Determines who the oldest peer is by comparing unit numbers."""
|
||||
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
|
||||
for peer in peers:
|
||||
remote_unit_no = int(peer.split('/')[1])
|
||||
@ -72,16 +121,9 @@ def oldest_peer(peers):
|
||||
|
||||
|
||||
def eligible_leader(resource):
|
||||
if is_clustered():
|
||||
if not is_leader(resource):
|
||||
log('Deferring action to CRM leader.', level=INFO)
|
||||
return False
|
||||
else:
|
||||
peers = peer_units()
|
||||
if peers and not oldest_peer(peers):
|
||||
log('Deferring action to oldest service unit.', level=INFO)
|
||||
return False
|
||||
return True
|
||||
log("eligible_leader is deprecated. Please consider using "
|
||||
"is_elected_leader instead.", level=WARNING)
|
||||
return is_elected_leader(resource)
|
||||
|
||||
|
||||
def https():
|
||||
@ -97,10 +139,9 @@ def https():
|
||||
return True
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for unit in relation_list(r_id):
|
||||
# TODO - needs fixing for new helper as ssl_cert/key suffixes with CN
|
||||
rel_state = [
|
||||
relation_get('https_keystone', rid=r_id, unit=unit),
|
||||
relation_get('ssl_cert', rid=r_id, unit=unit),
|
||||
relation_get('ssl_key', rid=r_id, unit=unit),
|
||||
relation_get('ca_cert', rid=r_id, unit=unit),
|
||||
]
|
||||
# NOTE: works around (LP: #1203241)
|
||||
@ -146,12 +187,12 @@ def get_hacluster_config():
|
||||
Obtains all relevant configuration from charm configuration required
|
||||
for initiating a relation to hacluster:
|
||||
|
||||
ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
|
||||
ha-bindiface, ha-mcastport, vip
|
||||
|
||||
returns: dict: A dict containing settings keyed by setting name.
|
||||
raises: HAIncompleteConfig if settings are missing.
|
||||
'''
|
||||
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
|
||||
settings = ['ha-bindiface', 'ha-mcastport', 'vip']
|
||||
conf = {}
|
||||
for setting in settings:
|
||||
conf[setting] = config_get(setting)
|
||||
@ -170,6 +211,7 @@ def canonical_url(configs, vip_setting='vip'):
|
||||
|
||||
:configs : OSTemplateRenderer: A config tempating object to inspect for
|
||||
a complete https context.
|
||||
|
||||
:vip_setting: str: Setting in charm config that specifies
|
||||
VIP address.
|
||||
'''
|
||||
|
0
hooks/charmhelpers/contrib/network/__init__.py
Normal file
0
hooks/charmhelpers/contrib/network/__init__.py
Normal file
343
hooks/charmhelpers/contrib/network/ip.py
Normal file
343
hooks/charmhelpers/contrib/network/ip.py
Normal file
@ -0,0 +1,343 @@
|
||||
import glob
|
||||
import re
|
||||
import subprocess
|
||||
import sys
|
||||
|
||||
from functools import partial
|
||||
|
||||
from charmhelpers.core.hookenv import unit_get
|
||||
from charmhelpers.fetch import apt_install
|
||||
from charmhelpers.core.hookenv import (
|
||||
WARNING,
|
||||
ERROR,
|
||||
log
|
||||
)
|
||||
|
||||
try:
|
||||
import netifaces
|
||||
except ImportError:
|
||||
apt_install('python-netifaces')
|
||||
import netifaces
|
||||
|
||||
try:
|
||||
import netaddr
|
||||
except ImportError:
|
||||
apt_install('python-netaddr')
|
||||
import netaddr
|
||||
|
||||
|
||||
def _validate_cidr(network):
|
||||
try:
|
||||
netaddr.IPNetwork(network)
|
||||
except (netaddr.core.AddrFormatError, ValueError):
|
||||
raise ValueError("Network (%s) is not in CIDR presentation format" %
|
||||
network)
|
||||
|
||||
|
||||
def get_address_in_network(network, fallback=None, fatal=False):
|
||||
"""
|
||||
Get an IPv4 or IPv6 address within the network from the host.
|
||||
|
||||
:param network (str): CIDR presentation format. For example,
|
||||
'192.168.1.0/24'.
|
||||
:param fallback (str): If no address is found, return fallback.
|
||||
:param fatal (boolean): If no address is found, fallback is not
|
||||
set and fatal is True then exit(1).
|
||||
|
||||
"""
|
||||
|
||||
def not_found_error_out():
|
||||
log("No IP address found in network: %s" % network,
|
||||
level=ERROR)
|
||||
sys.exit(1)
|
||||
|
||||
if network is None:
|
||||
if fallback is not None:
|
||||
return fallback
|
||||
else:
|
||||
if fatal:
|
||||
not_found_error_out()
|
||||
|
||||
_validate_cidr(network)
|
||||
network = netaddr.IPNetwork(network)
|
||||
for iface in netifaces.interfaces():
|
||||
addresses = netifaces.ifaddresses(iface)
|
||||
if network.version == 4 and netifaces.AF_INET in addresses:
|
||||
addr = addresses[netifaces.AF_INET][0]['addr']
|
||||
netmask = addresses[netifaces.AF_INET][0]['netmask']
|
||||
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
|
||||
if cidr in network:
|
||||
return str(cidr.ip)
|
||||
if network.version == 6 and netifaces.AF_INET6 in addresses:
|
||||
for addr in addresses[netifaces.AF_INET6]:
|
||||
if not addr['addr'].startswith('fe80'):
|
||||
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
|
||||
addr['netmask']))
|
||||
if cidr in network:
|
||||
return str(cidr.ip)
|
||||
|
||||
if fallback is not None:
|
||||
return fallback
|
||||
|
||||
if fatal:
|
||||
not_found_error_out()
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def is_ipv6(address):
|
||||
'''Determine whether provided address is IPv6 or not'''
|
||||
try:
|
||||
address = netaddr.IPAddress(address)
|
||||
except netaddr.AddrFormatError:
|
||||
# probably a hostname - so not an address at all!
|
||||
return False
|
||||
else:
|
||||
return address.version == 6
|
||||
|
||||
|
||||
def is_address_in_network(network, address):
|
||||
"""
|
||||
Determine whether the provided address is within a network range.
|
||||
|
||||
:param network (str): CIDR presentation format. For example,
|
||||
'192.168.1.0/24'.
|
||||
:param address: An individual IPv4 or IPv6 address without a net
|
||||
mask or subnet prefix. For example, '192.168.1.1'.
|
||||
:returns boolean: Flag indicating whether address is in network.
|
||||
"""
|
||||
try:
|
||||
network = netaddr.IPNetwork(network)
|
||||
except (netaddr.core.AddrFormatError, ValueError):
|
||||
raise ValueError("Network (%s) is not in CIDR presentation format" %
|
||||
network)
|
||||
try:
|
||||
address = netaddr.IPAddress(address)
|
||||
except (netaddr.core.AddrFormatError, ValueError):
|
||||
raise ValueError("Address (%s) is not in correct presentation format" %
|
||||
address)
|
||||
if address in network:
|
||||
return True
|
||||
else:
|
||||
return False
|
||||
|
||||
|
||||
def _get_for_address(address, key):
|
||||
"""Retrieve an attribute of or the physical interface that
|
||||
the IP address provided could be bound to.
|
||||
|
||||
:param address (str): An individual IPv4 or IPv6 address without a net
|
||||
mask or subnet prefix. For example, '192.168.1.1'.
|
||||
:param key: 'iface' for the physical interface name or an attribute
|
||||
of the configured interface, for example 'netmask'.
|
||||
:returns str: Requested attribute or None if address is not bindable.
|
||||
"""
|
||||
address = netaddr.IPAddress(address)
|
||||
for iface in netifaces.interfaces():
|
||||
addresses = netifaces.ifaddresses(iface)
|
||||
if address.version == 4 and netifaces.AF_INET in addresses:
|
||||
addr = addresses[netifaces.AF_INET][0]['addr']
|
||||
netmask = addresses[netifaces.AF_INET][0]['netmask']
|
||||
cidr = netaddr.IPNetwork("%s/%s" % (addr, netmask))
|
||||
if address in cidr:
|
||||
if key == 'iface':
|
||||
return iface
|
||||
else:
|
||||
return addresses[netifaces.AF_INET][0][key]
|
||||
if address.version == 6 and netifaces.AF_INET6 in addresses:
|
||||
for addr in addresses[netifaces.AF_INET6]:
|
||||
if not addr['addr'].startswith('fe80'):
|
||||
cidr = netaddr.IPNetwork("%s/%s" % (addr['addr'],
|
||||
addr['netmask']))
|
||||
if address in cidr:
|
||||
if key == 'iface':
|
||||
return iface
|
||||
else:
|
||||
return addr[key]
|
||||
return None
|
||||
|
||||
|
||||
get_iface_for_address = partial(_get_for_address, key='iface')
|
||||
|
||||
get_netmask_for_address = partial(_get_for_address, key='netmask')
|
||||
|
||||
|
||||
def format_ipv6_addr(address):
|
||||
"""
|
||||
IPv6 needs to be wrapped with [] in url link to parse correctly.
|
||||
"""
|
||||
if is_ipv6(address):
|
||||
address = "[%s]" % address
|
||||
else:
|
||||
log("Not a valid ipv6 address: %s" % address, level=WARNING)
|
||||
address = None
|
||||
|
||||
return address
|
||||
|
||||
|
||||
def get_iface_addr(iface='eth0', inet_type='AF_INET', inc_aliases=False,
|
||||
fatal=True, exc_list=None):
|
||||
"""
|
||||
Return the assigned IP address for a given interface, if any, or [].
|
||||
"""
|
||||
# Extract nic if passed /dev/ethX
|
||||
if '/' in iface:
|
||||
iface = iface.split('/')[-1]
|
||||
if not exc_list:
|
||||
exc_list = []
|
||||
try:
|
||||
inet_num = getattr(netifaces, inet_type)
|
||||
except AttributeError:
|
||||
raise Exception('Unknown inet type ' + str(inet_type))
|
||||
|
||||
interfaces = netifaces.interfaces()
|
||||
if inc_aliases:
|
||||
ifaces = []
|
||||
for _iface in interfaces:
|
||||
if iface == _iface or _iface.split(':')[0] == iface:
|
||||
ifaces.append(_iface)
|
||||
if fatal and not ifaces:
|
||||
raise Exception("Invalid interface '%s'" % iface)
|
||||
ifaces.sort()
|
||||
else:
|
||||
if iface not in interfaces:
|
||||
if fatal:
|
||||
raise Exception("%s not found " % (iface))
|
||||
else:
|
||||
return []
|
||||
else:
|
||||
ifaces = [iface]
|
||||
|
||||
addresses = []
|
||||
for netiface in ifaces:
|
||||
net_info = netifaces.ifaddresses(netiface)
|
||||
if inet_num in net_info:
|
||||
for entry in net_info[inet_num]:
|
||||
if 'addr' in entry and entry['addr'] not in exc_list:
|
||||
addresses.append(entry['addr'])
|
||||
if fatal and not addresses:
|
||||
raise Exception("Interface '%s' doesn't have any %s addresses." %
|
||||
(iface, inet_type))
|
||||
return addresses
|
||||
|
||||
get_ipv4_addr = partial(get_iface_addr, inet_type='AF_INET')
|
||||
|
||||
|
||||
def get_iface_from_addr(addr):
|
||||
"""Work out on which interface the provided address is configured."""
|
||||
for iface in netifaces.interfaces():
|
||||
addresses = netifaces.ifaddresses(iface)
|
||||
for inet_type in addresses:
|
||||
for _addr in addresses[inet_type]:
|
||||
_addr = _addr['addr']
|
||||
# link local
|
||||
ll_key = re.compile("(.+)%.*")
|
||||
raw = re.match(ll_key, _addr)
|
||||
if raw:
|
||||
_addr = raw.group(1)
|
||||
if _addr == addr:
|
||||
log("Address '%s' is configured on iface '%s'" %
|
||||
(addr, iface))
|
||||
return iface
|
||||
|
||||
msg = "Unable to infer net iface on which '%s' is configured" % (addr)
|
||||
raise Exception(msg)
|
||||
|
||||
|
||||
def sniff_iface(f):
|
||||
"""If no iface provided, inject net iface inferred from unit private
|
||||
address.
|
||||
"""
|
||||
def iface_sniffer(*args, **kwargs):
|
||||
if not kwargs.get('iface', None):
|
||||
kwargs['iface'] = get_iface_from_addr(unit_get('private-address'))
|
||||
|
||||
return f(*args, **kwargs)
|
||||
|
||||
return iface_sniffer
|
||||
|
||||
|
||||
@sniff_iface
|
||||
def get_ipv6_addr(iface=None, inc_aliases=False, fatal=True, exc_list=None,
|
||||
dynamic_only=True):
|
||||
"""Get assigned IPv6 address for a given interface.
|
||||
|
||||
Returns list of addresses found. If no address found, returns empty list.
|
||||
|
||||
If iface is None, we infer the current primary interface by doing a reverse
|
||||
lookup on the unit private-address.
|
||||
|
||||
We currently only support scope global IPv6 addresses i.e. non-temporary
|
||||
addresses. If no global IPv6 address is found, return the first one found
|
||||
in the ipv6 address list.
|
||||
"""
|
||||
addresses = get_iface_addr(iface=iface, inet_type='AF_INET6',
|
||||
inc_aliases=inc_aliases, fatal=fatal,
|
||||
exc_list=exc_list)
|
||||
|
||||
if addresses:
|
||||
global_addrs = []
|
||||
for addr in addresses:
|
||||
key_scope_link_local = re.compile("^fe80::..(.+)%(.+)")
|
||||
m = re.match(key_scope_link_local, addr)
|
||||
if m:
|
||||
eui_64_mac = m.group(1)
|
||||
iface = m.group(2)
|
||||
else:
|
||||
global_addrs.append(addr)
|
||||
|
||||
if global_addrs:
|
||||
# Make sure any found global addresses are not temporary
|
||||
cmd = ['ip', 'addr', 'show', iface]
|
||||
out = subprocess.check_output(cmd)
|
||||
if dynamic_only:
|
||||
key = re.compile("inet6 (.+)/[0-9]+ scope global dynamic.*")
|
||||
else:
|
||||
key = re.compile("inet6 (.+)/[0-9]+ scope global.*")
|
||||
|
||||
addrs = []
|
||||
for line in out.split('\n'):
|
||||
line = line.strip()
|
||||
m = re.match(key, line)
|
||||
if m and 'temporary' not in line:
|
||||
# Return the first valid address we find
|
||||
for addr in global_addrs:
|
||||
if m.group(1) == addr:
|
||||
if not dynamic_only or \
|
||||
m.group(1).endswith(eui_64_mac):
|
||||
addrs.append(addr)
|
||||
|
||||
if addrs:
|
||||
return addrs
|
||||
|
||||
if fatal:
|
||||
raise Exception("Interface '%s' doesn't have a scope global "
|
||||
"non-temporary ipv6 address." % iface)
|
||||
|
||||
return []
|
||||
|
||||
|
||||
def get_bridges(vnic_dir='/sys/devices/virtual/net'):
|
||||
"""
|
||||
Return a list of bridges on the system or []
|
||||
"""
|
||||
b_rgex = vnic_dir + '/*/bridge'
|
||||
return [x.replace(vnic_dir, '').split('/')[1] for x in glob.glob(b_rgex)]
|
||||
|
||||
|
||||
def get_bridge_nics(bridge, vnic_dir='/sys/devices/virtual/net'):
|
||||
"""
|
||||
Return a list of nics comprising a given bridge on the system or []
|
||||
"""
|
||||
brif_rgex = "%s/%s/brif/*" % (vnic_dir, bridge)
|
||||
return [x.split('/')[-1] for x in glob.glob(brif_rgex)]
|
||||
|
||||
|
||||
def is_bridge_member(nic):
|
||||
"""
|
||||
Check if a given nic is a member of a bridge
|
||||
"""
|
||||
for bridge in get_bridges():
|
||||
if nic in get_bridge_nics(bridge):
|
||||
return True
|
||||
return False
|
@ -303,7 +303,7 @@ def ensure_ceph_storage(service, pool, rbd_img, sizemb, mount_point,
|
||||
blk_device, fstype, system_services=[]):
|
||||
"""
|
||||
NOTE: This function must only be called from a single service unit for
|
||||
the same rbd_img otherwise data loss will occur.
|
||||
the same rbd_img otherwise data loss will occur.
|
||||
|
||||
Ensures given pool and RBD image exists, is mapped to a block device,
|
||||
and the device is formatted and mounted at the given mount_point.
|
||||
|
@ -62,7 +62,7 @@ def list_lvm_volume_group(block_device):
|
||||
pvd = check_output(['pvdisplay', block_device]).splitlines()
|
||||
for l in pvd:
|
||||
if l.strip().startswith('VG Name'):
|
||||
vg = ' '.join(l.split()).split(' ').pop()
|
||||
vg = ' '.join(l.strip().split()[2:])
|
||||
return vg
|
||||
|
||||
|
||||
|
@ -1,8 +1,11 @@
|
||||
from os import stat
|
||||
import os
|
||||
import re
|
||||
from stat import S_ISBLK
|
||||
|
||||
from subprocess import (
|
||||
check_call
|
||||
check_call,
|
||||
check_output,
|
||||
call
|
||||
)
|
||||
|
||||
|
||||
@ -12,7 +15,9 @@ def is_block_device(path):
|
||||
|
||||
:returns: boolean: True if path is a block device, False if not.
|
||||
'''
|
||||
return S_ISBLK(stat(path).st_mode)
|
||||
if not os.path.exists(path):
|
||||
return False
|
||||
return S_ISBLK(os.stat(path).st_mode)
|
||||
|
||||
|
||||
def zap_disk(block_device):
|
||||
@ -22,5 +27,27 @@ def zap_disk(block_device):
|
||||
|
||||
:param block_device: str: Full path of block device to clean.
|
||||
'''
|
||||
check_call(['sgdisk', '--zap-all', '--clear',
|
||||
'--mbrtogpt', block_device])
|
||||
# sometimes sgdisk exits non-zero; this is OK, dd will clean up
|
||||
call(['sgdisk', '--zap-all', '--mbrtogpt',
|
||||
'--clear', block_device])
|
||||
dev_end = check_output(['blockdev', '--getsz', block_device])
|
||||
gpt_end = int(dev_end.split()[0]) - 100
|
||||
check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
|
||||
'bs=1M', 'count=1'])
|
||||
check_call(['dd', 'if=/dev/zero', 'of=%s' % (block_device),
|
||||
'bs=512', 'count=100', 'seek=%s' % (gpt_end)])
|
||||
|
||||
|
||||
def is_device_mounted(device):
|
||||
'''Given a device path, return True if that device is mounted, and False
|
||||
if it isn't.
|
||||
|
||||
:param device: str: Full path of the device to check.
|
||||
:returns: boolean: True if the path represents a mounted device, False if
|
||||
it doesn't.
|
||||
'''
|
||||
is_partition = bool(re.search(r".*[0-9]+\b", device))
|
||||
out = check_output(['mount'])
|
||||
if is_partition:
|
||||
return bool(re.search(device + r"\b", out))
|
||||
return bool(re.search(device + r"[0-9]+\b", out))
|
||||
|
116
hooks/charmhelpers/core/fstab.py
Normal file
116
hooks/charmhelpers/core/fstab.py
Normal file
@ -0,0 +1,116 @@
|
||||
#!/usr/bin/env python
|
||||
# -*- coding: utf-8 -*-
|
||||
|
||||
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
||||
|
||||
import os
|
||||
|
||||
|
||||
class Fstab(file):
|
||||
"""This class extends file in order to implement a file reader/writer
|
||||
for file `/etc/fstab`
|
||||
"""
|
||||
|
||||
class Entry(object):
|
||||
"""Entry class represents a non-comment line on the `/etc/fstab` file
|
||||
"""
|
||||
def __init__(self, device, mountpoint, filesystem,
|
||||
options, d=0, p=0):
|
||||
self.device = device
|
||||
self.mountpoint = mountpoint
|
||||
self.filesystem = filesystem
|
||||
|
||||
if not options:
|
||||
options = "defaults"
|
||||
|
||||
self.options = options
|
||||
self.d = d
|
||||
self.p = p
|
||||
|
||||
def __eq__(self, o):
|
||||
return str(self) == str(o)
|
||||
|
||||
def __str__(self):
|
||||
return "{} {} {} {} {} {}".format(self.device,
|
||||
self.mountpoint,
|
||||
self.filesystem,
|
||||
self.options,
|
||||
self.d,
|
||||
self.p)
|
||||
|
||||
DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
|
||||
|
||||
def __init__(self, path=None):
|
||||
if path:
|
||||
self._path = path
|
||||
else:
|
||||
self._path = self.DEFAULT_PATH
|
||||
file.__init__(self, self._path, 'r+')
|
||||
|
||||
def _hydrate_entry(self, line):
|
||||
# NOTE: use split with no arguments to split on any
|
||||
# whitespace including tabs
|
||||
return Fstab.Entry(*filter(
|
||||
lambda x: x not in ('', None),
|
||||
line.strip("\n").split()))
|
||||
|
||||
@property
|
||||
def entries(self):
|
||||
self.seek(0)
|
||||
for line in self.readlines():
|
||||
try:
|
||||
if not line.startswith("#"):
|
||||
yield self._hydrate_entry(line)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
def get_entry_by_attr(self, attr, value):
|
||||
for entry in self.entries:
|
||||
e_attr = getattr(entry, attr)
|
||||
if e_attr == value:
|
||||
return entry
|
||||
return None
|
||||
|
||||
def add_entry(self, entry):
|
||||
if self.get_entry_by_attr('device', entry.device):
|
||||
return False
|
||||
|
||||
self.write(str(entry) + '\n')
|
||||
self.truncate()
|
||||
return entry
|
||||
|
||||
def remove_entry(self, entry):
|
||||
self.seek(0)
|
||||
|
||||
lines = self.readlines()
|
||||
|
||||
found = False
|
||||
for index, line in enumerate(lines):
|
||||
if not line.startswith("#"):
|
||||
if self._hydrate_entry(line) == entry:
|
||||
found = True
|
||||
break
|
||||
|
||||
if not found:
|
||||
return False
|
||||
|
||||
lines.remove(line)
|
||||
|
||||
self.seek(0)
|
||||
self.write(''.join(lines))
|
||||
self.truncate()
|
||||
return True
|
||||
|
||||
@classmethod
|
||||
def remove_by_mountpoint(cls, mountpoint, path=None):
|
||||
fstab = cls(path=path)
|
||||
entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
|
||||
if entry:
|
||||
return fstab.remove_entry(entry)
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def add(cls, device, mountpoint, filesystem, options=None, path=None):
|
||||
return cls(path=path).add_entry(Fstab.Entry(device,
|
||||
mountpoint, filesystem,
|
||||
options=options))
|
@ -25,7 +25,7 @@ cache = {}
|
||||
def cached(func):
|
||||
"""Cache return values for multiple executions of func + args
|
||||
|
||||
For example:
|
||||
For example::
|
||||
|
||||
@cached
|
||||
def unit_get(attribute):
|
||||
@ -155,6 +155,121 @@ def hook_name():
|
||||
return os.path.basename(sys.argv[0])
|
||||
|
||||
|
||||
class Config(dict):
|
||||
"""A dictionary representation of the charm's config.yaml, with some
|
||||
extra features:
|
||||
|
||||
- See which values in the dictionary have changed since the previous hook.
|
||||
- For values that have changed, see what the previous value was.
|
||||
- Store arbitrary data for use in a later hook.
|
||||
|
||||
NOTE: Do not instantiate this object directly - instead call
|
||||
``hookenv.config()``, which will return an instance of :class:`Config`.
|
||||
|
||||
Example usage::
|
||||
|
||||
>>> # inside a hook
|
||||
>>> from charmhelpers.core import hookenv
|
||||
>>> config = hookenv.config()
|
||||
>>> config['foo']
|
||||
'bar'
|
||||
>>> # store a new key/value for later use
|
||||
>>> config['mykey'] = 'myval'
|
||||
|
||||
|
||||
>>> # user runs `juju set mycharm foo=baz`
|
||||
>>> # now we're inside subsequent config-changed hook
|
||||
>>> config = hookenv.config()
|
||||
>>> config['foo']
|
||||
'baz'
|
||||
>>> # test to see if this val has changed since last hook
|
||||
>>> config.changed('foo')
|
||||
True
|
||||
>>> # what was the previous value?
|
||||
>>> config.previous('foo')
|
||||
'bar'
|
||||
>>> # keys/values that we add are preserved across hooks
|
||||
>>> config['mykey']
|
||||
'myval'
|
||||
|
||||
"""
|
||||
CONFIG_FILE_NAME = '.juju-persistent-config'
|
||||
|
||||
def __init__(self, *args, **kw):
|
||||
super(Config, self).__init__(*args, **kw)
|
||||
self.implicit_save = True
|
||||
self._prev_dict = None
|
||||
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
|
||||
if os.path.exists(self.path):
|
||||
self.load_previous()
|
||||
|
||||
def __getitem__(self, key):
|
||||
"""For regular dict lookups, check the current juju config first,
|
||||
then the previous (saved) copy. This ensures that user-saved values
|
||||
will be returned by a dict lookup.
|
||||
|
||||
"""
|
||||
try:
|
||||
return dict.__getitem__(self, key)
|
||||
except KeyError:
|
||||
return (self._prev_dict or {})[key]
|
||||
|
||||
def load_previous(self, path=None):
|
||||
"""Load previous copy of config from disk.
|
||||
|
||||
In normal usage you don't need to call this method directly - it
|
||||
is called automatically at object initialization.
|
||||
|
||||
:param path:
|
||||
|
||||
File path from which to load the previous config. If `None`,
|
||||
config is loaded from the default location. If `path` is
|
||||
specified, subsequent `save()` calls will write to the same
|
||||
path.
|
||||
|
||||
"""
|
||||
self.path = path or self.path
|
||||
with open(self.path) as f:
|
||||
self._prev_dict = json.load(f)
|
||||
|
||||
def changed(self, key):
|
||||
"""Return True if the current value for this key is different from
|
||||
the previous value.
|
||||
|
||||
"""
|
||||
if self._prev_dict is None:
|
||||
return True
|
||||
return self.previous(key) != self.get(key)
|
||||
|
||||
def previous(self, key):
|
||||
"""Return previous value for this key, or None if there
|
||||
is no previous value.
|
||||
|
||||
"""
|
||||
if self._prev_dict:
|
||||
return self._prev_dict.get(key)
|
||||
return None
|
||||
|
||||
def save(self):
|
||||
"""Save this config to disk.
|
||||
|
||||
If the charm is using the :mod:`Services Framework <services.base>`
|
||||
or :meth:'@hook <Hooks.hook>' decorator, this
|
||||
is called automatically at the end of successful hook execution.
|
||||
Otherwise, it should be called directly by user code.
|
||||
|
||||
To disable automatic saves, set ``implicit_save=False`` on this
|
||||
instance.
|
||||
|
||||
"""
|
||||
if self._prev_dict:
|
||||
for k, v in self._prev_dict.iteritems():
|
||||
if k not in self:
|
||||
self[k] = v
|
||||
with open(self.path, 'w') as f:
|
||||
json.dump(self, f)
|
||||
|
||||
|
||||
@cached
|
||||
def config(scope=None):
|
||||
"""Juju charm configuration"""
|
||||
@ -163,7 +278,10 @@ def config(scope=None):
|
||||
config_cmd_line.append(scope)
|
||||
config_cmd_line.append('--format=json')
|
||||
try:
|
||||
return json.loads(subprocess.check_output(config_cmd_line))
|
||||
config_data = json.loads(subprocess.check_output(config_cmd_line))
|
||||
if scope is not None:
|
||||
return config_data
|
||||
return Config(config_data)
|
||||
except ValueError:
|
||||
return None
|
||||
|
||||
@ -188,8 +306,9 @@ def relation_get(attribute=None, unit=None, rid=None):
|
||||
raise
|
||||
|
||||
|
||||
def relation_set(relation_id=None, relation_settings={}, **kwargs):
|
||||
def relation_set(relation_id=None, relation_settings=None, **kwargs):
|
||||
"""Set relation information for the current unit"""
|
||||
relation_settings = relation_settings if relation_settings else {}
|
||||
relation_cmd_line = ['relation-set']
|
||||
if relation_id is not None:
|
||||
relation_cmd_line.extend(('-r', relation_id))
|
||||
@ -348,27 +467,29 @@ class UnregisteredHookError(Exception):
|
||||
class Hooks(object):
|
||||
"""A convenient handler for hook functions.
|
||||
|
||||
Example:
|
||||
Example::
|
||||
|
||||
hooks = Hooks()
|
||||
|
||||
# register a hook, taking its name from the function name
|
||||
@hooks.hook()
|
||||
def install():
|
||||
...
|
||||
pass # your code here
|
||||
|
||||
# register a hook, providing a custom hook name
|
||||
@hooks.hook("config-changed")
|
||||
def config_changed():
|
||||
...
|
||||
pass # your code here
|
||||
|
||||
if __name__ == "__main__":
|
||||
# execute a hook based on the name the program is called by
|
||||
hooks.execute(sys.argv)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, config_save=True):
|
||||
super(Hooks, self).__init__()
|
||||
self._hooks = {}
|
||||
self._config_save = config_save
|
||||
|
||||
def register(self, name, function):
|
||||
"""Register a hook"""
|
||||
@ -379,6 +500,10 @@ class Hooks(object):
|
||||
hook_name = os.path.basename(args[0])
|
||||
if hook_name in self._hooks:
|
||||
self._hooks[hook_name]()
|
||||
if self._config_save:
|
||||
cfg = config()
|
||||
if cfg.implicit_save:
|
||||
cfg.save()
|
||||
else:
|
||||
raise UnregisteredHookError(hook_name)
|
||||
|
||||
|
@ -12,10 +12,13 @@ import random
|
||||
import string
|
||||
import subprocess
|
||||
import hashlib
|
||||
import shutil
|
||||
from contextlib import contextmanager
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
from hookenv import log
|
||||
from fstab import Fstab
|
||||
|
||||
|
||||
def service_start(service_name):
|
||||
@ -34,7 +37,8 @@ def service_restart(service_name):
|
||||
|
||||
|
||||
def service_reload(service_name, restart_on_failure=False):
|
||||
"""Reload a system service, optionally falling back to restart if reload fails"""
|
||||
"""Reload a system service, optionally falling back to restart if
|
||||
reload fails"""
|
||||
service_result = service('reload', service_name)
|
||||
if not service_result and restart_on_failure:
|
||||
service_result = service('restart', service_name)
|
||||
@ -50,7 +54,7 @@ def service(action, service_name):
|
||||
def service_running(service):
|
||||
"""Determine whether a system service is running"""
|
||||
try:
|
||||
output = subprocess.check_output(['service', service, 'status'])
|
||||
output = subprocess.check_output(['service', service, 'status'], stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
@ -60,6 +64,16 @@ def service_running(service):
|
||||
return False
|
||||
|
||||
|
||||
def service_available(service_name):
|
||||
"""Determine whether a system service is available"""
|
||||
try:
|
||||
subprocess.check_output(['service', service_name, 'status'], stderr=subprocess.STDOUT)
|
||||
except subprocess.CalledProcessError as e:
|
||||
return 'unrecognized service' not in e.output
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
||||
"""Add a user to the system"""
|
||||
try:
|
||||
@ -143,7 +157,19 @@ def write_file(path, content, owner='root', group='root', perms=0444):
|
||||
target.write(content)
|
||||
|
||||
|
||||
def mount(device, mountpoint, options=None, persist=False):
|
||||
def fstab_remove(mp):
|
||||
"""Remove the given mountpoint entry from /etc/fstab
|
||||
"""
|
||||
return Fstab.remove_by_mountpoint(mp)
|
||||
|
||||
|
||||
def fstab_add(dev, mp, fs, options=None):
|
||||
"""Adds the given device entry to the /etc/fstab file
|
||||
"""
|
||||
return Fstab.add(dev, mp, fs, options=options)
|
||||
|
||||
|
||||
def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
|
||||
"""Mount a filesystem at a particular mountpoint"""
|
||||
cmd_args = ['mount']
|
||||
if options is not None:
|
||||
@ -154,9 +180,9 @@ def mount(device, mountpoint, options=None, persist=False):
|
||||
except subprocess.CalledProcessError, e:
|
||||
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
|
||||
return False
|
||||
|
||||
if persist:
|
||||
# TODO: update fstab
|
||||
pass
|
||||
return fstab_add(device, mountpoint, filesystem, options=options)
|
||||
return True
|
||||
|
||||
|
||||
@ -168,9 +194,9 @@ def umount(mountpoint, persist=False):
|
||||
except subprocess.CalledProcessError, e:
|
||||
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
|
||||
return False
|
||||
|
||||
if persist:
|
||||
# TODO: update fstab
|
||||
pass
|
||||
return fstab_remove(mountpoint)
|
||||
return True
|
||||
|
||||
|
||||
@ -183,10 +209,15 @@ def mounts():
|
||||
return system_mounts
|
||||
|
||||
|
||||
def file_hash(path):
|
||||
"""Generate a md5 hash of the contents of 'path' or None if not found """
|
||||
def file_hash(path, hash_type='md5'):
|
||||
"""
|
||||
Generate a hash checksum of the contents of 'path' or None if not found.
|
||||
|
||||
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
|
||||
such as md5, sha1, sha256, sha512, etc.
|
||||
"""
|
||||
if os.path.exists(path):
|
||||
h = hashlib.md5()
|
||||
h = getattr(hashlib, hash_type)()
|
||||
with open(path, 'r') as source:
|
||||
h.update(source.read()) # IGNORE:E1101 - it does have update
|
||||
return h.hexdigest()
|
||||
@ -194,16 +225,36 @@ def file_hash(path):
|
||||
return None
|
||||
|
||||
|
||||
def check_hash(path, checksum, hash_type='md5'):
|
||||
"""
|
||||
Validate a file using a cryptographic checksum.
|
||||
|
||||
:param str checksum: Value of the checksum used to validate the file.
|
||||
:param str hash_type: Hash algorithm used to generate `checksum`.
|
||||
Can be any hash alrgorithm supported by :mod:`hashlib`,
|
||||
such as md5, sha1, sha256, sha512, etc.
|
||||
:raises ChecksumError: If the file fails the checksum
|
||||
|
||||
"""
|
||||
actual_checksum = file_hash(path, hash_type)
|
||||
if checksum != actual_checksum:
|
||||
raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
|
||||
|
||||
|
||||
class ChecksumError(ValueError):
|
||||
pass
|
||||
|
||||
|
||||
def restart_on_change(restart_map, stopstart=False):
|
||||
"""Restart services based on configuration files changing
|
||||
|
||||
This function is used a decorator, for example
|
||||
This function is used a decorator, for example::
|
||||
|
||||
@restart_on_change({
|
||||
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
|
||||
})
|
||||
def ceph_client_changed():
|
||||
...
|
||||
pass # your code here
|
||||
|
||||
In this example, the cinder-api and cinder-volume services
|
||||
would be restarted if /etc/ceph/ceph.conf is changed by the
|
||||
@ -295,3 +346,40 @@ def get_nic_hwaddr(nic):
|
||||
if 'link/ether' in words:
|
||||
hwaddr = words[words.index('link/ether') + 1]
|
||||
return hwaddr
|
||||
|
||||
|
||||
def cmp_pkgrevno(package, revno, pkgcache=None):
|
||||
'''Compare supplied revno with the revno of the installed package
|
||||
|
||||
* 1 => Installed revno is greater than supplied arg
|
||||
* 0 => Installed revno is the same as supplied arg
|
||||
* -1 => Installed revno is less than supplied arg
|
||||
|
||||
'''
|
||||
import apt_pkg
|
||||
from charmhelpers.fetch import apt_cache
|
||||
if not pkgcache:
|
||||
pkgcache = apt_cache()
|
||||
pkg = pkgcache[package]
|
||||
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def chdir(d):
|
||||
cur = os.getcwd()
|
||||
try:
|
||||
yield os.chdir(d)
|
||||
finally:
|
||||
os.chdir(cur)
|
||||
|
||||
|
||||
def chownr(path, owner, group):
|
||||
uid = pwd.getpwnam(owner).pw_uid
|
||||
gid = grp.getgrnam(group).gr_gid
|
||||
|
||||
for root, dirs, files in os.walk(path):
|
||||
for name in dirs + files:
|
||||
full = os.path.join(root, name)
|
||||
broken_symlink = os.path.lexists(full) and not os.path.exists(full)
|
||||
if not broken_symlink:
|
||||
os.chown(full, uid, gid)
|
||||
|
2
hooks/charmhelpers/core/services/__init__.py
Normal file
2
hooks/charmhelpers/core/services/__init__.py
Normal file
@ -0,0 +1,2 @@
|
||||
from .base import *
|
||||
from .helpers import *
|
313
hooks/charmhelpers/core/services/base.py
Normal file
313
hooks/charmhelpers/core/services/base.py
Normal file
@ -0,0 +1,313 @@
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
from collections import Iterable
|
||||
|
||||
from charmhelpers.core import host
|
||||
from charmhelpers.core import hookenv
|
||||
|
||||
|
||||
__all__ = ['ServiceManager', 'ManagerCallback',
|
||||
'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
|
||||
'service_restart', 'service_stop']
|
||||
|
||||
|
||||
class ServiceManager(object):
|
||||
def __init__(self, services=None):
|
||||
"""
|
||||
Register a list of services, given their definitions.
|
||||
|
||||
Service definitions are dicts in the following formats (all keys except
|
||||
'service' are optional)::
|
||||
|
||||
{
|
||||
"service": <service name>,
|
||||
"required_data": <list of required data contexts>,
|
||||
"provided_data": <list of provided data contexts>,
|
||||
"data_ready": <one or more callbacks>,
|
||||
"data_lost": <one or more callbacks>,
|
||||
"start": <one or more callbacks>,
|
||||
"stop": <one or more callbacks>,
|
||||
"ports": <list of ports to manage>,
|
||||
}
|
||||
|
||||
The 'required_data' list should contain dicts of required data (or
|
||||
dependency managers that act like dicts and know how to collect the data).
|
||||
Only when all items in the 'required_data' list are populated are the list
|
||||
of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
|
||||
information.
|
||||
|
||||
The 'provided_data' list should contain relation data providers, most likely
|
||||
a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
|
||||
that will indicate a set of data to set on a given relation.
|
||||
|
||||
The 'data_ready' value should be either a single callback, or a list of
|
||||
callbacks, to be called when all items in 'required_data' pass `is_ready()`.
|
||||
Each callback will be called with the service name as the only parameter.
|
||||
After all of the 'data_ready' callbacks are called, the 'start' callbacks
|
||||
are fired.
|
||||
|
||||
The 'data_lost' value should be either a single callback, or a list of
|
||||
callbacks, to be called when a 'required_data' item no longer passes
|
||||
`is_ready()`. Each callback will be called with the service name as the
|
||||
only parameter. After all of the 'data_lost' callbacks are called,
|
||||
the 'stop' callbacks are fired.
|
||||
|
||||
The 'start' value should be either a single callback, or a list of
|
||||
callbacks, to be called when starting the service, after the 'data_ready'
|
||||
callbacks are complete. Each callback will be called with the service
|
||||
name as the only parameter. This defaults to
|
||||
`[host.service_start, services.open_ports]`.
|
||||
|
||||
The 'stop' value should be either a single callback, or a list of
|
||||
callbacks, to be called when stopping the service. If the service is
|
||||
being stopped because it no longer has all of its 'required_data', this
|
||||
will be called after all of the 'data_lost' callbacks are complete.
|
||||
Each callback will be called with the service name as the only parameter.
|
||||
This defaults to `[services.close_ports, host.service_stop]`.
|
||||
|
||||
The 'ports' value should be a list of ports to manage. The default
|
||||
'start' handler will open the ports after the service is started,
|
||||
and the default 'stop' handler will close the ports prior to stopping
|
||||
the service.
|
||||
|
||||
|
||||
Examples:
|
||||
|
||||
The following registers an Upstart service called bingod that depends on
|
||||
a mongodb relation and which runs a custom `db_migrate` function prior to
|
||||
restarting the service, and a Runit service called spadesd::
|
||||
|
||||
manager = services.ServiceManager([
|
||||
{
|
||||
'service': 'bingod',
|
||||
'ports': [80, 443],
|
||||
'required_data': [MongoRelation(), config(), {'my': 'data'}],
|
||||
'data_ready': [
|
||||
services.template(source='bingod.conf'),
|
||||
services.template(source='bingod.ini',
|
||||
target='/etc/bingod.ini',
|
||||
owner='bingo', perms=0400),
|
||||
],
|
||||
},
|
||||
{
|
||||
'service': 'spadesd',
|
||||
'data_ready': services.template(source='spadesd_run.j2',
|
||||
target='/etc/sv/spadesd/run',
|
||||
perms=0555),
|
||||
'start': runit_start,
|
||||
'stop': runit_stop,
|
||||
},
|
||||
])
|
||||
manager.manage()
|
||||
"""
|
||||
self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
|
||||
self._ready = None
|
||||
self.services = {}
|
||||
for service in services or []:
|
||||
service_name = service['service']
|
||||
self.services[service_name] = service
|
||||
|
||||
def manage(self):
|
||||
"""
|
||||
Handle the current hook by doing The Right Thing with the registered services.
|
||||
"""
|
||||
hook_name = hookenv.hook_name()
|
||||
if hook_name == 'stop':
|
||||
self.stop_services()
|
||||
else:
|
||||
self.provide_data()
|
||||
self.reconfigure_services()
|
||||
cfg = hookenv.config()
|
||||
if cfg.implicit_save:
|
||||
cfg.save()
|
||||
|
||||
def provide_data(self):
|
||||
"""
|
||||
Set the relation data for each provider in the ``provided_data`` list.
|
||||
|
||||
A provider must have a `name` attribute, which indicates which relation
|
||||
to set data on, and a `provide_data()` method, which returns a dict of
|
||||
data to set.
|
||||
"""
|
||||
hook_name = hookenv.hook_name()
|
||||
for service in self.services.values():
|
||||
for provider in service.get('provided_data', []):
|
||||
if re.match(r'{}-relation-(joined|changed)'.format(provider.name), hook_name):
|
||||
data = provider.provide_data()
|
||||
_ready = provider._is_ready(data) if hasattr(provider, '_is_ready') else data
|
||||
if _ready:
|
||||
hookenv.relation_set(None, data)
|
||||
|
||||
def reconfigure_services(self, *service_names):
|
||||
"""
|
||||
Update all files for one or more registered services, and,
|
||||
if ready, optionally restart them.
|
||||
|
||||
If no service names are given, reconfigures all registered services.
|
||||
"""
|
||||
for service_name in service_names or self.services.keys():
|
||||
if self.is_ready(service_name):
|
||||
self.fire_event('data_ready', service_name)
|
||||
self.fire_event('start', service_name, default=[
|
||||
service_restart,
|
||||
manage_ports])
|
||||
self.save_ready(service_name)
|
||||
else:
|
||||
if self.was_ready(service_name):
|
||||
self.fire_event('data_lost', service_name)
|
||||
self.fire_event('stop', service_name, default=[
|
||||
manage_ports,
|
||||
service_stop])
|
||||
self.save_lost(service_name)
|
||||
|
||||
def stop_services(self, *service_names):
|
||||
"""
|
||||
Stop one or more registered services, by name.
|
||||
|
||||
If no service names are given, stops all registered services.
|
||||
"""
|
||||
for service_name in service_names or self.services.keys():
|
||||
self.fire_event('stop', service_name, default=[
|
||||
manage_ports,
|
||||
service_stop])
|
||||
|
||||
def get_service(self, service_name):
|
||||
"""
|
||||
Given the name of a registered service, return its service definition.
|
||||
"""
|
||||
service = self.services.get(service_name)
|
||||
if not service:
|
||||
raise KeyError('Service not registered: %s' % service_name)
|
||||
return service
|
||||
|
||||
def fire_event(self, event_name, service_name, default=None):
|
||||
"""
|
||||
Fire a data_ready, data_lost, start, or stop event on a given service.
|
||||
"""
|
||||
service = self.get_service(service_name)
|
||||
callbacks = service.get(event_name, default)
|
||||
if not callbacks:
|
||||
return
|
||||
if not isinstance(callbacks, Iterable):
|
||||
callbacks = [callbacks]
|
||||
for callback in callbacks:
|
||||
if isinstance(callback, ManagerCallback):
|
||||
callback(self, service_name, event_name)
|
||||
else:
|
||||
callback(service_name)
|
||||
|
||||
def is_ready(self, service_name):
|
||||
"""
|
||||
Determine if a registered service is ready, by checking its 'required_data'.
|
||||
|
||||
A 'required_data' item can be any mapping type, and is considered ready
|
||||
if `bool(item)` evaluates as True.
|
||||
"""
|
||||
service = self.get_service(service_name)
|
||||
reqs = service.get('required_data', [])
|
||||
return all(bool(req) for req in reqs)
|
||||
|
||||
def _load_ready_file(self):
|
||||
if self._ready is not None:
|
||||
return
|
||||
if os.path.exists(self._ready_file):
|
||||
with open(self._ready_file) as fp:
|
||||
self._ready = set(json.load(fp))
|
||||
else:
|
||||
self._ready = set()
|
||||
|
||||
def _save_ready_file(self):
|
||||
if self._ready is None:
|
||||
return
|
||||
with open(self._ready_file, 'w') as fp:
|
||||
json.dump(list(self._ready), fp)
|
||||
|
||||
def save_ready(self, service_name):
|
||||
"""
|
||||
Save an indicator that the given service is now data_ready.
|
||||
"""
|
||||
self._load_ready_file()
|
||||
self._ready.add(service_name)
|
||||
self._save_ready_file()
|
||||
|
||||
def save_lost(self, service_name):
|
||||
"""
|
||||
Save an indicator that the given service is no longer data_ready.
|
||||
"""
|
||||
self._load_ready_file()
|
||||
self._ready.discard(service_name)
|
||||
self._save_ready_file()
|
||||
|
||||
def was_ready(self, service_name):
|
||||
"""
|
||||
Determine if the given service was previously data_ready.
|
||||
"""
|
||||
self._load_ready_file()
|
||||
return service_name in self._ready
|
||||
|
||||
|
||||
class ManagerCallback(object):
|
||||
"""
|
||||
Special case of a callback that takes the `ServiceManager` instance
|
||||
in addition to the service name.
|
||||
|
||||
Subclasses should implement `__call__` which should accept three parameters:
|
||||
|
||||
* `manager` The `ServiceManager` instance
|
||||
* `service_name` The name of the service it's being triggered for
|
||||
* `event_name` The name of the event that this callback is handling
|
||||
"""
|
||||
def __call__(self, manager, service_name, event_name):
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class PortManagerCallback(ManagerCallback):
|
||||
"""
|
||||
Callback class that will open or close ports, for use as either
|
||||
a start or stop action.
|
||||
"""
|
||||
def __call__(self, manager, service_name, event_name):
|
||||
service = manager.get_service(service_name)
|
||||
new_ports = service.get('ports', [])
|
||||
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
|
||||
if os.path.exists(port_file):
|
||||
with open(port_file) as fp:
|
||||
old_ports = fp.read().split(',')
|
||||
for old_port in old_ports:
|
||||
if bool(old_port):
|
||||
old_port = int(old_port)
|
||||
if old_port not in new_ports:
|
||||
hookenv.close_port(old_port)
|
||||
with open(port_file, 'w') as fp:
|
||||
fp.write(','.join(str(port) for port in new_ports))
|
||||
for port in new_ports:
|
||||
if event_name == 'start':
|
||||
hookenv.open_port(port)
|
||||
elif event_name == 'stop':
|
||||
hookenv.close_port(port)
|
||||
|
||||
|
||||
def service_stop(service_name):
|
||||
"""
|
||||
Wrapper around host.service_stop to prevent spurious "unknown service"
|
||||
messages in the logs.
|
||||
"""
|
||||
if host.service_running(service_name):
|
||||
host.service_stop(service_name)
|
||||
|
||||
|
||||
def service_restart(service_name):
|
||||
"""
|
||||
Wrapper around host.service_restart to prevent spurious "unknown service"
|
||||
messages in the logs.
|
||||
"""
|
||||
if host.service_available(service_name):
|
||||
if host.service_running(service_name):
|
||||
host.service_restart(service_name)
|
||||
else:
|
||||
host.service_start(service_name)
|
||||
|
||||
|
||||
# Convenience aliases
|
||||
open_ports = close_ports = manage_ports = PortManagerCallback()
|
239
hooks/charmhelpers/core/services/helpers.py
Normal file
239
hooks/charmhelpers/core/services/helpers.py
Normal file
@ -0,0 +1,239 @@
|
||||
import os
|
||||
import yaml
|
||||
from charmhelpers.core import hookenv
|
||||
from charmhelpers.core import templating
|
||||
|
||||
from charmhelpers.core.services.base import ManagerCallback
|
||||
|
||||
|
||||
__all__ = ['RelationContext', 'TemplateCallback',
|
||||
'render_template', 'template']
|
||||
|
||||
|
||||
class RelationContext(dict):
|
||||
"""
|
||||
Base class for a context generator that gets relation data from juju.
|
||||
|
||||
Subclasses must provide the attributes `name`, which is the name of the
|
||||
interface of interest, `interface`, which is the type of the interface of
|
||||
interest, and `required_keys`, which is the set of keys required for the
|
||||
relation to be considered complete. The data for all interfaces matching
|
||||
the `name` attribute that are complete will used to populate the dictionary
|
||||
values (see `get_data`, below).
|
||||
|
||||
The generated context will be namespaced under the relation :attr:`name`,
|
||||
to prevent potential naming conflicts.
|
||||
|
||||
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
||||
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
||||
"""
|
||||
name = None
|
||||
interface = None
|
||||
required_keys = []
|
||||
|
||||
def __init__(self, name=None, additional_required_keys=None):
|
||||
if name is not None:
|
||||
self.name = name
|
||||
if additional_required_keys is not None:
|
||||
self.required_keys.extend(additional_required_keys)
|
||||
self.get_data()
|
||||
|
||||
def __bool__(self):
|
||||
"""
|
||||
Returns True if all of the required_keys are available.
|
||||
"""
|
||||
return self.is_ready()
|
||||
|
||||
__nonzero__ = __bool__
|
||||
|
||||
def __repr__(self):
|
||||
return super(RelationContext, self).__repr__()
|
||||
|
||||
def is_ready(self):
|
||||
"""
|
||||
Returns True if all of the `required_keys` are available from any units.
|
||||
"""
|
||||
ready = len(self.get(self.name, [])) > 0
|
||||
if not ready:
|
||||
hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
|
||||
return ready
|
||||
|
||||
def _is_ready(self, unit_data):
|
||||
"""
|
||||
Helper method that tests a set of relation data and returns True if
|
||||
all of the `required_keys` are present.
|
||||
"""
|
||||
return set(unit_data.keys()).issuperset(set(self.required_keys))
|
||||
|
||||
def get_data(self):
|
||||
"""
|
||||
Retrieve the relation data for each unit involved in a relation and,
|
||||
if complete, store it in a list under `self[self.name]`. This
|
||||
is automatically called when the RelationContext is instantiated.
|
||||
|
||||
The units are sorted lexographically first by the service ID, then by
|
||||
the unit ID. Thus, if an interface has two other services, 'db:1'
|
||||
and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
|
||||
and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
|
||||
set of data, the relation data for the units will be stored in the
|
||||
order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
|
||||
|
||||
If you only care about a single unit on the relation, you can just
|
||||
access it as `{{ interface[0]['key'] }}`. However, if you can at all
|
||||
support multiple units on a relation, you should iterate over the list,
|
||||
like::
|
||||
|
||||
{% for unit in interface -%}
|
||||
{{ unit['key'] }}{% if not loop.last %},{% endif %}
|
||||
{%- endfor %}
|
||||
|
||||
Note that since all sets of relation data from all related services and
|
||||
units are in a single list, if you need to know which service or unit a
|
||||
set of data came from, you'll need to extend this class to preserve
|
||||
that information.
|
||||
"""
|
||||
if not hookenv.relation_ids(self.name):
|
||||
return
|
||||
|
||||
ns = self.setdefault(self.name, [])
|
||||
for rid in sorted(hookenv.relation_ids(self.name)):
|
||||
for unit in sorted(hookenv.related_units(rid)):
|
||||
reldata = hookenv.relation_get(rid=rid, unit=unit)
|
||||
if self._is_ready(reldata):
|
||||
ns.append(reldata)
|
||||
|
||||
def provide_data(self):
|
||||
"""
|
||||
Return data to be relation_set for this interface.
|
||||
"""
|
||||
return {}
|
||||
|
||||
|
||||
class MysqlRelation(RelationContext):
|
||||
"""
|
||||
Relation context for the `mysql` interface.
|
||||
|
||||
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
||||
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
||||
"""
|
||||
name = 'db'
|
||||
interface = 'mysql'
|
||||
required_keys = ['host', 'user', 'password', 'database']
|
||||
|
||||
|
||||
class HttpRelation(RelationContext):
|
||||
"""
|
||||
Relation context for the `http` interface.
|
||||
|
||||
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
||||
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
||||
"""
|
||||
name = 'website'
|
||||
interface = 'http'
|
||||
required_keys = ['host', 'port']
|
||||
|
||||
def provide_data(self):
|
||||
return {
|
||||
'host': hookenv.unit_get('private-address'),
|
||||
'port': 80,
|
||||
}
|
||||
|
||||
|
||||
class RequiredConfig(dict):
|
||||
"""
|
||||
Data context that loads config options with one or more mandatory options.
|
||||
|
||||
Once the required options have been changed from their default values, all
|
||||
config options will be available, namespaced under `config` to prevent
|
||||
potential naming conflicts (for example, between a config option and a
|
||||
relation property).
|
||||
|
||||
:param list *args: List of options that must be changed from their default values.
|
||||
"""
|
||||
|
||||
def __init__(self, *args):
|
||||
self.required_options = args
|
||||
self['config'] = hookenv.config()
|
||||
with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
|
||||
self.config = yaml.load(fp).get('options', {})
|
||||
|
||||
def __bool__(self):
|
||||
for option in self.required_options:
|
||||
if option not in self['config']:
|
||||
return False
|
||||
current_value = self['config'][option]
|
||||
default_value = self.config[option].get('default')
|
||||
if current_value == default_value:
|
||||
return False
|
||||
if current_value in (None, '') and default_value in (None, ''):
|
||||
return False
|
||||
return True
|
||||
|
||||
def __nonzero__(self):
|
||||
return self.__bool__()
|
||||
|
||||
|
||||
class StoredContext(dict):
|
||||
"""
|
||||
A data context that always returns the data that it was first created with.
|
||||
|
||||
This is useful to do a one-time generation of things like passwords, that
|
||||
will thereafter use the same value that was originally generated, instead
|
||||
of generating a new value each time it is run.
|
||||
"""
|
||||
def __init__(self, file_name, config_data):
|
||||
"""
|
||||
If the file exists, populate `self` with the data from the file.
|
||||
Otherwise, populate with the given data and persist it to the file.
|
||||
"""
|
||||
if os.path.exists(file_name):
|
||||
self.update(self.read_context(file_name))
|
||||
else:
|
||||
self.store_context(file_name, config_data)
|
||||
self.update(config_data)
|
||||
|
||||
def store_context(self, file_name, config_data):
|
||||
if not os.path.isabs(file_name):
|
||||
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
||||
with open(file_name, 'w') as file_stream:
|
||||
os.fchmod(file_stream.fileno(), 0600)
|
||||
yaml.dump(config_data, file_stream)
|
||||
|
||||
def read_context(self, file_name):
|
||||
if not os.path.isabs(file_name):
|
||||
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
||||
with open(file_name, 'r') as file_stream:
|
||||
data = yaml.load(file_stream)
|
||||
if not data:
|
||||
raise OSError("%s is empty" % file_name)
|
||||
return data
|
||||
|
||||
|
||||
class TemplateCallback(ManagerCallback):
|
||||
"""
|
||||
Callback class that will render a Jinja2 template, for use as a ready action.
|
||||
|
||||
:param str source: The template source file, relative to `$CHARM_DIR/templates`
|
||||
:param str target: The target to write the rendered template to
|
||||
:param str owner: The owner of the rendered file
|
||||
:param str group: The group of the rendered file
|
||||
:param int perms: The permissions of the rendered file
|
||||
"""
|
||||
def __init__(self, source, target, owner='root', group='root', perms=0444):
|
||||
self.source = source
|
||||
self.target = target
|
||||
self.owner = owner
|
||||
self.group = group
|
||||
self.perms = perms
|
||||
|
||||
def __call__(self, manager, service_name, event_name):
|
||||
service = manager.get_service(service_name)
|
||||
context = {}
|
||||
for ctx in service.get('required_data', []):
|
||||
context.update(ctx)
|
||||
templating.render(self.source, self.target, context,
|
||||
self.owner, self.group, self.perms)
|
||||
|
||||
|
||||
# Convenience aliases for templates
|
||||
render_template = template = TemplateCallback
|
51
hooks/charmhelpers/core/templating.py
Normal file
51
hooks/charmhelpers/core/templating.py
Normal file
@ -0,0 +1,51 @@
|
||||
import os
|
||||
|
||||
from charmhelpers.core import host
|
||||
from charmhelpers.core import hookenv
|
||||
|
||||
|
||||
def render(source, target, context, owner='root', group='root', perms=0444, templates_dir=None):
|
||||
"""
|
||||
Render a template.
|
||||
|
||||
The `source` path, if not absolute, is relative to the `templates_dir`.
|
||||
|
||||
The `target` path should be absolute.
|
||||
|
||||
The context should be a dict containing the values to be replaced in the
|
||||
template.
|
||||
|
||||
The `owner`, `group`, and `perms` options will be passed to `write_file`.
|
||||
|
||||
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
|
||||
|
||||
Note: Using this requires python-jinja2; if it is not installed, calling
|
||||
this will attempt to use charmhelpers.fetch.apt_install to install it.
|
||||
"""
|
||||
try:
|
||||
from jinja2 import FileSystemLoader, Environment, exceptions
|
||||
except ImportError:
|
||||
try:
|
||||
from charmhelpers.fetch import apt_install
|
||||
except ImportError:
|
||||
hookenv.log('Could not import jinja2, and could not import '
|
||||
'charmhelpers.fetch to install it',
|
||||
level=hookenv.ERROR)
|
||||
raise
|
||||
apt_install('python-jinja2', fatal=True)
|
||||
from jinja2 import FileSystemLoader, Environment, exceptions
|
||||
|
||||
if templates_dir is None:
|
||||
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
|
||||
loader = Environment(loader=FileSystemLoader(templates_dir))
|
||||
try:
|
||||
source = source
|
||||
template = loader.get_template(source)
|
||||
except exceptions.TemplateNotFound as e:
|
||||
hookenv.log('Could not load template %s from %s.' %
|
||||
(source, templates_dir),
|
||||
level=hookenv.ERROR)
|
||||
raise e
|
||||
content = template.render(context)
|
||||
host.mkdir(os.path.dirname(target))
|
||||
host.write_file(target, content, owner, group, perms)
|
@ -1,4 +1,6 @@
|
||||
import importlib
|
||||
from tempfile import NamedTemporaryFile
|
||||
import time
|
||||
from yaml import safe_load
|
||||
from charmhelpers.core.host import (
|
||||
lsb_release
|
||||
@ -12,9 +14,9 @@ from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
log,
|
||||
)
|
||||
import apt_pkg
|
||||
import os
|
||||
|
||||
|
||||
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
|
||||
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
|
||||
"""
|
||||
@ -54,176 +56,17 @@ CLOUD_ARCHIVE_POCKETS = {
|
||||
'icehouse/proposed': 'precise-proposed/icehouse',
|
||||
'precise-icehouse/proposed': 'precise-proposed/icehouse',
|
||||
'precise-proposed/icehouse': 'precise-proposed/icehouse',
|
||||
# Juno
|
||||
'juno': 'trusty-updates/juno',
|
||||
'trusty-juno': 'trusty-updates/juno',
|
||||
'trusty-juno/updates': 'trusty-updates/juno',
|
||||
'trusty-updates/juno': 'trusty-updates/juno',
|
||||
'juno/proposed': 'trusty-proposed/juno',
|
||||
'juno/proposed': 'trusty-proposed/juno',
|
||||
'trusty-juno/proposed': 'trusty-proposed/juno',
|
||||
'trusty-proposed/juno': 'trusty-proposed/juno',
|
||||
}
|
||||
|
||||
|
||||
def filter_installed_packages(packages):
|
||||
"""Returns a list of packages that require installation"""
|
||||
apt_pkg.init()
|
||||
cache = apt_pkg.Cache()
|
||||
_pkgs = []
|
||||
for package in packages:
|
||||
try:
|
||||
p = cache[package]
|
||||
p.current_ver or _pkgs.append(package)
|
||||
except KeyError:
|
||||
log('Package {} has no installation candidate.'.format(package),
|
||||
level='WARNING')
|
||||
_pkgs.append(package)
|
||||
return _pkgs
|
||||
|
||||
|
||||
def apt_install(packages, options=None, fatal=False):
|
||||
"""Install one or more packages"""
|
||||
if options is None:
|
||||
options = ['--option=Dpkg::Options::=--force-confold']
|
||||
|
||||
cmd = ['apt-get', '--assume-yes']
|
||||
cmd.extend(options)
|
||||
cmd.append('install')
|
||||
if isinstance(packages, basestring):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
log("Installing {} with options: {}".format(packages,
|
||||
options))
|
||||
env = os.environ.copy()
|
||||
if 'DEBIAN_FRONTEND' not in env:
|
||||
env['DEBIAN_FRONTEND'] = 'noninteractive'
|
||||
|
||||
if fatal:
|
||||
subprocess.check_call(cmd, env=env)
|
||||
else:
|
||||
subprocess.call(cmd, env=env)
|
||||
|
||||
|
||||
def apt_upgrade(options=None, fatal=False, dist=False):
|
||||
"""Upgrade all packages"""
|
||||
if options is None:
|
||||
options = ['--option=Dpkg::Options::=--force-confold']
|
||||
|
||||
cmd = ['apt-get', '--assume-yes']
|
||||
cmd.extend(options)
|
||||
if dist:
|
||||
cmd.append('dist-upgrade')
|
||||
else:
|
||||
cmd.append('upgrade')
|
||||
log("Upgrading with options: {}".format(options))
|
||||
|
||||
env = os.environ.copy()
|
||||
if 'DEBIAN_FRONTEND' not in env:
|
||||
env['DEBIAN_FRONTEND'] = 'noninteractive'
|
||||
|
||||
if fatal:
|
||||
subprocess.check_call(cmd, env=env)
|
||||
else:
|
||||
subprocess.call(cmd, env=env)
|
||||
|
||||
|
||||
def apt_update(fatal=False):
|
||||
"""Update local apt cache"""
|
||||
cmd = ['apt-get', 'update']
|
||||
if fatal:
|
||||
subprocess.check_call(cmd)
|
||||
else:
|
||||
subprocess.call(cmd)
|
||||
|
||||
|
||||
def apt_purge(packages, fatal=False):
|
||||
"""Purge one or more packages"""
|
||||
cmd = ['apt-get', '--assume-yes', 'purge']
|
||||
if isinstance(packages, basestring):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
log("Purging {}".format(packages))
|
||||
if fatal:
|
||||
subprocess.check_call(cmd)
|
||||
else:
|
||||
subprocess.call(cmd)
|
||||
|
||||
|
||||
def apt_hold(packages, fatal=False):
|
||||
"""Hold one or more packages"""
|
||||
cmd = ['apt-mark', 'hold']
|
||||
if isinstance(packages, basestring):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
log("Holding {}".format(packages))
|
||||
if fatal:
|
||||
subprocess.check_call(cmd)
|
||||
else:
|
||||
subprocess.call(cmd)
|
||||
|
||||
|
||||
def add_source(source, key=None):
|
||||
if source is None:
|
||||
log('Source is not present. Skipping')
|
||||
return
|
||||
|
||||
if (source.startswith('ppa:') or
|
||||
source.startswith('http') or
|
||||
source.startswith('deb ') or
|
||||
source.startswith('cloud-archive:')):
|
||||
subprocess.check_call(['add-apt-repository', '--yes', source])
|
||||
elif source.startswith('cloud:'):
|
||||
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
|
||||
fatal=True)
|
||||
pocket = source.split(':')[-1]
|
||||
if pocket not in CLOUD_ARCHIVE_POCKETS:
|
||||
raise SourceConfigError(
|
||||
'Unsupported cloud: source option %s' %
|
||||
pocket)
|
||||
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
|
||||
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
|
||||
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
|
||||
elif source == 'proposed':
|
||||
release = lsb_release()['DISTRIB_CODENAME']
|
||||
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
|
||||
apt.write(PROPOSED_POCKET.format(release))
|
||||
if key:
|
||||
subprocess.check_call(['apt-key', 'adv', '--keyserver',
|
||||
'keyserver.ubuntu.com', '--recv',
|
||||
key])
|
||||
|
||||
|
||||
class SourceConfigError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def configure_sources(update=False,
|
||||
sources_var='install_sources',
|
||||
keys_var='install_keys'):
|
||||
"""
|
||||
Configure multiple sources from charm configuration
|
||||
|
||||
Example config:
|
||||
install_sources:
|
||||
- "ppa:foo"
|
||||
- "http://example.com/repo precise main"
|
||||
install_keys:
|
||||
- null
|
||||
- "a1b2c3d4"
|
||||
|
||||
Note that 'null' (a.k.a. None) should not be quoted.
|
||||
"""
|
||||
sources = safe_load(config(sources_var))
|
||||
keys = config(keys_var)
|
||||
if keys is not None:
|
||||
keys = safe_load(keys)
|
||||
if isinstance(sources, basestring) and (
|
||||
keys is None or isinstance(keys, basestring)):
|
||||
add_source(sources, keys)
|
||||
else:
|
||||
if not len(sources) == len(keys):
|
||||
msg = 'Install sources and keys lists are different lengths'
|
||||
raise SourceConfigError(msg)
|
||||
for src_num in range(len(sources)):
|
||||
add_source(sources[src_num], keys[src_num])
|
||||
if update:
|
||||
apt_update(fatal=True)
|
||||
|
||||
# The order of this list is very important. Handlers should be listed in from
|
||||
# least- to most-specific URL matching.
|
||||
FETCH_HANDLERS = (
|
||||
@ -231,38 +74,21 @@ FETCH_HANDLERS = (
|
||||
'charmhelpers.fetch.bzrurl.BzrUrlFetchHandler',
|
||||
)
|
||||
|
||||
APT_NO_LOCK = 100 # The return code for "couldn't acquire lock" in APT.
|
||||
APT_NO_LOCK_RETRY_DELAY = 10 # Wait 10 seconds between apt lock checks.
|
||||
APT_NO_LOCK_RETRY_COUNT = 30 # Retry to acquire the lock X times.
|
||||
|
||||
|
||||
class SourceConfigError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class UnhandledSource(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def install_remote(source):
|
||||
"""
|
||||
Install a file tree from a remote source
|
||||
|
||||
The specified source should be a url of the form:
|
||||
scheme://[host]/path[#[option=value][&...]]
|
||||
|
||||
Schemes supported are based on this modules submodules
|
||||
Options supported are submodule-specific"""
|
||||
# We ONLY check for True here because can_handle may return a string
|
||||
# explaining why it can't handle a given source.
|
||||
handlers = [h for h in plugins() if h.can_handle(source) is True]
|
||||
installed_to = None
|
||||
for handler in handlers:
|
||||
try:
|
||||
installed_to = handler.install(source)
|
||||
except UnhandledSource:
|
||||
pass
|
||||
if not installed_to:
|
||||
raise UnhandledSource("No handler found for source {}".format(source))
|
||||
return installed_to
|
||||
|
||||
|
||||
def install_from_config(config_var_name):
|
||||
charm_config = config()
|
||||
source = charm_config[config_var_name]
|
||||
return install_remote(source)
|
||||
class AptLockError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class BaseFetchHandler(object):
|
||||
@ -289,6 +115,245 @@ class BaseFetchHandler(object):
|
||||
return urlunparse(parts)
|
||||
|
||||
|
||||
def filter_installed_packages(packages):
|
||||
"""Returns a list of packages that require installation"""
|
||||
cache = apt_cache()
|
||||
_pkgs = []
|
||||
for package in packages:
|
||||
try:
|
||||
p = cache[package]
|
||||
p.current_ver or _pkgs.append(package)
|
||||
except KeyError:
|
||||
log('Package {} has no installation candidate.'.format(package),
|
||||
level='WARNING')
|
||||
_pkgs.append(package)
|
||||
return _pkgs
|
||||
|
||||
|
||||
def apt_cache(in_memory=True):
|
||||
"""Build and return an apt cache"""
|
||||
import apt_pkg
|
||||
apt_pkg.init()
|
||||
if in_memory:
|
||||
apt_pkg.config.set("Dir::Cache::pkgcache", "")
|
||||
apt_pkg.config.set("Dir::Cache::srcpkgcache", "")
|
||||
return apt_pkg.Cache()
|
||||
|
||||
|
||||
def apt_install(packages, options=None, fatal=False):
|
||||
"""Install one or more packages"""
|
||||
if options is None:
|
||||
options = ['--option=Dpkg::Options::=--force-confold']
|
||||
|
||||
cmd = ['apt-get', '--assume-yes']
|
||||
cmd.extend(options)
|
||||
cmd.append('install')
|
||||
if isinstance(packages, basestring):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
log("Installing {} with options: {}".format(packages,
|
||||
options))
|
||||
_run_apt_command(cmd, fatal)
|
||||
|
||||
|
||||
def apt_upgrade(options=None, fatal=False, dist=False):
|
||||
"""Upgrade all packages"""
|
||||
if options is None:
|
||||
options = ['--option=Dpkg::Options::=--force-confold']
|
||||
|
||||
cmd = ['apt-get', '--assume-yes']
|
||||
cmd.extend(options)
|
||||
if dist:
|
||||
cmd.append('dist-upgrade')
|
||||
else:
|
||||
cmd.append('upgrade')
|
||||
log("Upgrading with options: {}".format(options))
|
||||
_run_apt_command(cmd, fatal)
|
||||
|
||||
|
||||
def apt_update(fatal=False):
|
||||
"""Update local apt cache"""
|
||||
cmd = ['apt-get', 'update']
|
||||
_run_apt_command(cmd, fatal)
|
||||
|
||||
|
||||
def apt_purge(packages, fatal=False):
|
||||
"""Purge one or more packages"""
|
||||
cmd = ['apt-get', '--assume-yes', 'purge']
|
||||
if isinstance(packages, basestring):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
log("Purging {}".format(packages))
|
||||
_run_apt_command(cmd, fatal)
|
||||
|
||||
|
||||
def apt_hold(packages, fatal=False):
|
||||
"""Hold one or more packages"""
|
||||
cmd = ['apt-mark', 'hold']
|
||||
if isinstance(packages, basestring):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
log("Holding {}".format(packages))
|
||||
|
||||
if fatal:
|
||||
subprocess.check_call(cmd)
|
||||
else:
|
||||
subprocess.call(cmd)
|
||||
|
||||
|
||||
def add_source(source, key=None):
|
||||
"""Add a package source to this system.
|
||||
|
||||
@param source: a URL or sources.list entry, as supported by
|
||||
add-apt-repository(1). Examples::
|
||||
|
||||
ppa:charmers/example
|
||||
deb https://stub:key@private.example.com/ubuntu trusty main
|
||||
|
||||
In addition:
|
||||
'proposed:' may be used to enable the standard 'proposed'
|
||||
pocket for the release.
|
||||
'cloud:' may be used to activate official cloud archive pockets,
|
||||
such as 'cloud:icehouse'
|
||||
|
||||
@param key: A key to be added to the system's APT keyring and used
|
||||
to verify the signatures on packages. Ideally, this should be an
|
||||
ASCII format GPG public key including the block headers. A GPG key
|
||||
id may also be used, but be aware that only insecure protocols are
|
||||
available to retrieve the actual public key from a public keyserver
|
||||
placing your Juju environment at risk. ppa and cloud archive keys
|
||||
are securely added automtically, so sould not be provided.
|
||||
"""
|
||||
if source is None:
|
||||
log('Source is not present. Skipping')
|
||||
return
|
||||
|
||||
if (source.startswith('ppa:') or
|
||||
source.startswith('http') or
|
||||
source.startswith('deb ') or
|
||||
source.startswith('cloud-archive:')):
|
||||
subprocess.check_call(['add-apt-repository', '--yes', source])
|
||||
elif source.startswith('cloud:'):
|
||||
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
|
||||
fatal=True)
|
||||
pocket = source.split(':')[-1]
|
||||
if pocket not in CLOUD_ARCHIVE_POCKETS:
|
||||
raise SourceConfigError(
|
||||
'Unsupported cloud: source option %s' %
|
||||
pocket)
|
||||
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
|
||||
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
|
||||
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
|
||||
elif source == 'proposed':
|
||||
release = lsb_release()['DISTRIB_CODENAME']
|
||||
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
|
||||
apt.write(PROPOSED_POCKET.format(release))
|
||||
else:
|
||||
raise SourceConfigError("Unknown source: {!r}".format(source))
|
||||
|
||||
if key:
|
||||
if '-----BEGIN PGP PUBLIC KEY BLOCK-----' in key:
|
||||
with NamedTemporaryFile() as key_file:
|
||||
key_file.write(key)
|
||||
key_file.flush()
|
||||
key_file.seek(0)
|
||||
subprocess.check_call(['apt-key', 'add', '-'], stdin=key_file)
|
||||
else:
|
||||
# Note that hkp: is in no way a secure protocol. Using a
|
||||
# GPG key id is pointless from a security POV unless you
|
||||
# absolutely trust your network and DNS.
|
||||
subprocess.check_call(['apt-key', 'adv', '--keyserver',
|
||||
'hkp://keyserver.ubuntu.com:80', '--recv',
|
||||
key])
|
||||
|
||||
|
||||
def configure_sources(update=False,
|
||||
sources_var='install_sources',
|
||||
keys_var='install_keys'):
|
||||
"""
|
||||
Configure multiple sources from charm configuration.
|
||||
|
||||
The lists are encoded as yaml fragments in the configuration.
|
||||
The frament needs to be included as a string. Sources and their
|
||||
corresponding keys are of the types supported by add_source().
|
||||
|
||||
Example config:
|
||||
install_sources: |
|
||||
- "ppa:foo"
|
||||
- "http://example.com/repo precise main"
|
||||
install_keys: |
|
||||
- null
|
||||
- "a1b2c3d4"
|
||||
|
||||
Note that 'null' (a.k.a. None) should not be quoted.
|
||||
"""
|
||||
sources = safe_load((config(sources_var) or '').strip()) or []
|
||||
keys = safe_load((config(keys_var) or '').strip()) or None
|
||||
|
||||
if isinstance(sources, basestring):
|
||||
sources = [sources]
|
||||
|
||||
if keys is None:
|
||||
for source in sources:
|
||||
add_source(source, None)
|
||||
else:
|
||||
if isinstance(keys, basestring):
|
||||
keys = [keys]
|
||||
|
||||
if len(sources) != len(keys):
|
||||
raise SourceConfigError(
|
||||
'Install sources and keys lists are different lengths')
|
||||
for source, key in zip(sources, keys):
|
||||
add_source(source, key)
|
||||
if update:
|
||||
apt_update(fatal=True)
|
||||
|
||||
|
||||
def install_remote(source, *args, **kwargs):
|
||||
"""
|
||||
Install a file tree from a remote source
|
||||
|
||||
The specified source should be a url of the form:
|
||||
scheme://[host]/path[#[option=value][&...]]
|
||||
|
||||
Schemes supported are based on this modules submodules.
|
||||
Options supported are submodule-specific.
|
||||
Additional arguments are passed through to the submodule.
|
||||
|
||||
For example::
|
||||
|
||||
dest = install_remote('http://example.com/archive.tgz',
|
||||
checksum='deadbeef',
|
||||
hash_type='sha1')
|
||||
|
||||
This will download `archive.tgz`, validate it using SHA1 and, if
|
||||
the file is ok, extract it and return the directory in which it
|
||||
was extracted. If the checksum fails, it will raise
|
||||
:class:`charmhelpers.core.host.ChecksumError`.
|
||||
"""
|
||||
# We ONLY check for True here because can_handle may return a string
|
||||
# explaining why it can't handle a given source.
|
||||
handlers = [h for h in plugins() if h.can_handle(source) is True]
|
||||
installed_to = None
|
||||
for handler in handlers:
|
||||
try:
|
||||
installed_to = handler.install(source, *args, **kwargs)
|
||||
except UnhandledSource:
|
||||
pass
|
||||
if not installed_to:
|
||||
raise UnhandledSource("No handler found for source {}".format(source))
|
||||
return installed_to
|
||||
|
||||
|
||||
def install_from_config(config_var_name):
|
||||
charm_config = config()
|
||||
source = charm_config[config_var_name]
|
||||
return install_remote(source)
|
||||
|
||||
|
||||
def plugins(fetch_handlers=None):
|
||||
if not fetch_handlers:
|
||||
fetch_handlers = FETCH_HANDLERS
|
||||
@ -306,3 +371,40 @@ def plugins(fetch_handlers=None):
|
||||
log("FetchHandler {} not found, skipping plugin".format(
|
||||
handler_name))
|
||||
return plugin_list
|
||||
|
||||
|
||||
def _run_apt_command(cmd, fatal=False):
|
||||
"""
|
||||
Run an APT command, checking output and retrying if the fatal flag is set
|
||||
to True.
|
||||
|
||||
:param: cmd: str: The apt command to run.
|
||||
:param: fatal: bool: Whether the command's output should be checked and
|
||||
retried.
|
||||
"""
|
||||
env = os.environ.copy()
|
||||
|
||||
if 'DEBIAN_FRONTEND' not in env:
|
||||
env['DEBIAN_FRONTEND'] = 'noninteractive'
|
||||
|
||||
if fatal:
|
||||
retry_count = 0
|
||||
result = None
|
||||
|
||||
# If the command is considered "fatal", we need to retry if the apt
|
||||
# lock was not acquired.
|
||||
|
||||
while result is None or result == APT_NO_LOCK:
|
||||
try:
|
||||
result = subprocess.check_call(cmd, env=env)
|
||||
except subprocess.CalledProcessError, e:
|
||||
retry_count = retry_count + 1
|
||||
if retry_count > APT_NO_LOCK_RETRY_COUNT:
|
||||
raise
|
||||
result = e.returncode
|
||||
log("Couldn't acquire DPKG lock. Will retry in {} seconds."
|
||||
"".format(APT_NO_LOCK_RETRY_DELAY))
|
||||
time.sleep(APT_NO_LOCK_RETRY_DELAY)
|
||||
|
||||
else:
|
||||
subprocess.call(cmd, env=env)
|
||||
|
@ -1,6 +1,8 @@
|
||||
import os
|
||||
import urllib2
|
||||
from urllib import urlretrieve
|
||||
import urlparse
|
||||
import hashlib
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
BaseFetchHandler,
|
||||
@ -10,11 +12,19 @@ from charmhelpers.payload.archive import (
|
||||
get_archive_handler,
|
||||
extract,
|
||||
)
|
||||
from charmhelpers.core.host import mkdir
|
||||
from charmhelpers.core.host import mkdir, check_hash
|
||||
|
||||
|
||||
class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
"""Handler for archives via generic URLs"""
|
||||
"""
|
||||
Handler to download archive files from arbitrary URLs.
|
||||
|
||||
Can fetch from http, https, ftp, and file URLs.
|
||||
|
||||
Can install either tarballs (.tar, .tgz, .tbz2, etc) or zip files.
|
||||
|
||||
Installs the contents of the archive in $CHARM_DIR/fetched/.
|
||||
"""
|
||||
def can_handle(self, source):
|
||||
url_parts = self.parse_url(source)
|
||||
if url_parts.scheme not in ('http', 'https', 'ftp', 'file'):
|
||||
@ -24,6 +34,12 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
return False
|
||||
|
||||
def download(self, source, dest):
|
||||
"""
|
||||
Download an archive file.
|
||||
|
||||
:param str source: URL pointing to an archive file.
|
||||
:param str dest: Local path location to download archive file to.
|
||||
"""
|
||||
# propogate all exceptions
|
||||
# URLError, OSError, etc
|
||||
proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
|
||||
@ -48,7 +64,30 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
os.unlink(dest)
|
||||
raise e
|
||||
|
||||
def install(self, source):
|
||||
# Mandatory file validation via Sha1 or MD5 hashing.
|
||||
def download_and_validate(self, url, hashsum, validate="sha1"):
|
||||
tempfile, headers = urlretrieve(url)
|
||||
check_hash(tempfile, hashsum, validate)
|
||||
return tempfile
|
||||
|
||||
def install(self, source, dest=None, checksum=None, hash_type='sha1'):
|
||||
"""
|
||||
Download and install an archive file, with optional checksum validation.
|
||||
|
||||
The checksum can also be given on the `source` URL's fragment.
|
||||
For example::
|
||||
|
||||
handler.install('http://example.com/file.tgz#sha1=deadbeef')
|
||||
|
||||
:param str source: URL pointing to an archive file.
|
||||
:param str dest: Local destination path to install to. If not given,
|
||||
installs to `$CHARM_DIR/archives/archive_file_name`.
|
||||
:param str checksum: If given, validate the archive file after download.
|
||||
:param str hash_type: Algorithm used to generate `checksum`.
|
||||
Can be any hash alrgorithm supported by :mod:`hashlib`,
|
||||
such as md5, sha1, sha256, sha512, etc.
|
||||
|
||||
"""
|
||||
url_parts = self.parse_url(source)
|
||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), 'fetched')
|
||||
if not os.path.exists(dest_dir):
|
||||
@ -60,4 +99,10 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
||||
raise UnhandledSource(e.reason)
|
||||
except OSError as e:
|
||||
raise UnhandledSource(e.strerror)
|
||||
return extract(dld_file)
|
||||
options = urlparse.parse_qs(url_parts.fragment)
|
||||
for key, value in options.items():
|
||||
if key in hashlib.algorithms:
|
||||
check_hash(dld_file, value, key)
|
||||
if checksum:
|
||||
check_hash(dld_file, checksum, hash_type)
|
||||
return extract(dld_file, dest)
|
||||
|
@ -39,7 +39,8 @@ class BzrUrlFetchHandler(BaseFetchHandler):
|
||||
def install(self, source):
|
||||
url_parts = self.parse_url(source)
|
||||
branch_name = url_parts.path.strip("/").split("/")[-1]
|
||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched", branch_name)
|
||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
||||
branch_name)
|
||||
if not os.path.exists(dest_dir):
|
||||
mkdir(dest_dir, perms=0755)
|
||||
try:
|
||||
|
@ -12,7 +12,15 @@ import subprocess
|
||||
import socket
|
||||
import fcntl
|
||||
import struct
|
||||
|
||||
from charmhelpers.fetch import apt_install
|
||||
from charmhelpers.contrib.network import ip as utils
|
||||
|
||||
try:
|
||||
import netifaces
|
||||
except ImportError:
|
||||
apt_install('python-netifaces')
|
||||
import netifaces
|
||||
|
||||
try:
|
||||
from netaddr import IPNetwork
|
||||
@ -79,3 +87,23 @@ def get_network_address(iface):
|
||||
return str(ip.network)
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def get_ipv6_network_address(iface):
|
||||
# Behave in same way as ipv4 get_network_address() above if iface is None.
|
||||
if not iface:
|
||||
return None
|
||||
|
||||
try:
|
||||
ipv6_addr = utils.get_ipv6_addr(iface=iface)[0]
|
||||
all_addrs = netifaces.ifaddresses(iface)
|
||||
|
||||
for addr in all_addrs[netifaces.AF_INET6]:
|
||||
if ipv6_addr == addr['addr']:
|
||||
network = "{}/{}".format(addr['addr'], addr['netmask'])
|
||||
return str(IPNetwork(network).network)
|
||||
|
||||
except ValueError:
|
||||
raise Exception("Invalid interface '%s'" % iface)
|
||||
|
||||
raise Exception("No valid network found for interface '%s'" % iface)
|
||||
|
@ -25,7 +25,8 @@ from charmhelpers.core.hookenv import (
|
||||
relation_set,
|
||||
unit_get,
|
||||
config,
|
||||
Hooks, UnregisteredHookError
|
||||
Hooks, UnregisteredHookError,
|
||||
local_unit,
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
@ -33,6 +34,7 @@ from charmhelpers.core.host import (
|
||||
service_start,
|
||||
service_restart,
|
||||
service_running,
|
||||
lsb_release
|
||||
)
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
@ -61,18 +63,31 @@ def install():
|
||||
|
||||
def get_corosync_conf():
|
||||
conf = {}
|
||||
if config('prefer-ipv6'):
|
||||
ip_version = 'ipv6'
|
||||
bindnetaddr = hacluster.get_ipv6_network_address
|
||||
else:
|
||||
ip_version = 'ipv4'
|
||||
bindnetaddr = hacluster.get_network_address
|
||||
|
||||
for relid in relation_ids('ha'):
|
||||
for unit in related_units(relid):
|
||||
bindiface = relation_get('corosync_bindiface',
|
||||
unit, relid)
|
||||
conf = {
|
||||
'corosync_bindnetaddr':
|
||||
hacluster.get_network_address(
|
||||
relation_get('corosync_bindiface',
|
||||
unit, relid)
|
||||
),
|
||||
'corosync_bindnetaddr': bindnetaddr(bindiface),
|
||||
'corosync_mcastport': relation_get('corosync_mcastport',
|
||||
unit, relid),
|
||||
'corosync_mcastaddr': config('corosync_mcastaddr'),
|
||||
'ip_version': ip_version,
|
||||
}
|
||||
|
||||
if config('prefer-ipv6'):
|
||||
local_unit_no = int(local_unit().split('/')[1])
|
||||
# nodeid should not be 0
|
||||
conf['nodeid'] = local_unit_no + 1
|
||||
conf['netmtu'] = config('netmtu')
|
||||
|
||||
if None not in conf.itervalues():
|
||||
return conf
|
||||
missing = [k for k, v in conf.iteritems() if v is None]
|
||||
@ -105,6 +120,9 @@ def emit_base_conf():
|
||||
|
||||
@hooks.hook()
|
||||
def config_changed():
|
||||
if config('prefer-ipv6'):
|
||||
assert_charm_supports_ipv6()
|
||||
|
||||
corosync_key = config('corosync_key')
|
||||
if not corosync_key:
|
||||
log('CRITICAL',
|
||||
@ -273,7 +291,7 @@ def configure_cluster():
|
||||
# Put the services in HA, if not already done so
|
||||
# if not pcmk.is_resource_present(res_name):
|
||||
if not pcmk.crm_opt_exists(res_name):
|
||||
if not res_name in resource_params:
|
||||
if res_name not in resource_params:
|
||||
cmd = 'crm -w -F configure primitive %s %s' % (res_name,
|
||||
res_type)
|
||||
else:
|
||||
@ -449,6 +467,13 @@ def render_template(template_name, context, template_dir=TEMPLATES_DIR):
|
||||
return template.render(context)
|
||||
|
||||
|
||||
def assert_charm_supports_ipv6():
|
||||
"""Check whether we are able to support charms ipv6."""
|
||||
if lsb_release()['DISTRIB_CODENAME'].lower() < "trusty":
|
||||
raise Exception("IPv6 is not supported in the charms for Ubuntu "
|
||||
"versions less than Trusty 14.04")
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
try:
|
||||
hooks.execute(sys.argv)
|
||||
|
@ -1,4 +1,3 @@
|
||||
#import lib.utils as utils
|
||||
import commands
|
||||
import subprocess
|
||||
import socket
|
||||
|
@ -25,18 +25,25 @@ totem {
|
||||
clear_node_high_bit: yes
|
||||
|
||||
# Disable encryption
|
||||
secauth: off
|
||||
secauth: off
|
||||
|
||||
# How many threads to use for encryption/decryption
|
||||
threads: 0
|
||||
threads: 0
|
||||
|
||||
# Optionally assign a fixed node id (integer)
|
||||
# nodeid: 1234
|
||||
{% if nodeid %}
|
||||
nodeid: {{ nodeid }}
|
||||
{% endif %}
|
||||
{% if ip_version %}
|
||||
ip_version: {{ ip_version }}
|
||||
{% endif %}
|
||||
{% if netmtu %}
|
||||
netmtu: {{ netmtu }}
|
||||
{% endif %}
|
||||
|
||||
# This specifies the mode of redundant ring, which may be none, active, or passive.
|
||||
rrp_mode: none
|
||||
rrp_mode: none
|
||||
|
||||
interface {
|
||||
interface {
|
||||
# The following values need to be set based on your environment
|
||||
ringnumber: 0
|
||||
bindnetaddr: {{ corosync_bindnetaddr }}
|
||||
@ -46,21 +53,21 @@ totem {
|
||||
}
|
||||
|
||||
quorum {
|
||||
# Enable and configure quorum subsystem (default: off)
|
||||
# see also corosync.conf.5 and votequorum.5
|
||||
provider: corosync_votequorum
|
||||
expected_votes: 2
|
||||
# Enable and configure quorum subsystem (default: off)
|
||||
# see also corosync.conf.5 and votequorum.5
|
||||
provider: corosync_votequorum
|
||||
expected_votes: 2
|
||||
}
|
||||
|
||||
logging {
|
||||
fileline: off
|
||||
to_stderr: yes
|
||||
to_logfile: no
|
||||
to_syslog: yes
|
||||
syslog_facility: daemon
|
||||
debug: off
|
||||
logger_subsys {
|
||||
subsys: QUORUM
|
||||
debug: off
|
||||
}
|
||||
fileline: off
|
||||
to_stderr: yes
|
||||
to_logfile: no
|
||||
to_syslog: yes
|
||||
syslog_facility: daemon
|
||||
debug: off
|
||||
logger_subsys {
|
||||
subsys: QUORUM
|
||||
debug: off
|
||||
}
|
||||
}
|
||||
|
Loading…
x
Reference in New Issue
Block a user