Resync charm-helpers

Change-Id: I0ca6981c427a3078825766b84ddb0b55dbb57019
This commit is contained in:
James Page 2016-03-02 11:07:09 +00:00
parent 3374cb6313
commit 528d63c1e8
12 changed files with 237 additions and 48 deletions

1
.gitignore vendored
View File

@ -4,3 +4,4 @@ bin
.tox .tox
tags tags
*.sw[nop] *.sw[nop]
*.pyc

View File

@ -456,3 +456,18 @@ def get_hostname(address, fqdn=True):
return result return result
else: else:
return result.split('.')[0] return result.split('.')[0]
def port_has_listener(address, port):
"""
Returns True if the address:port is open and being listened to,
else False.
@param address: an IP address or hostname
@param port: integer port
Note calls 'zc' via a subprocess shell
"""
cmd = ['nc', '-z', address, str(port)]
result = subprocess.call(cmd)
return not(bool(result))

View File

@ -121,7 +121,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
# Charms which should use the source config option # Charms which should use the source config option
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw'] 'ceph-osd', 'ceph-radosgw', 'ceph-mon']
# Charms which can not use openstack-origin, ie. many subordinates # Charms which can not use openstack-origin, ie. many subordinates
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',

View File

@ -90,6 +90,12 @@ from charmhelpers.contrib.network.ip import (
from charmhelpers.contrib.openstack.utils import get_host_ip from charmhelpers.contrib.openstack.utils import get_host_ip
from charmhelpers.core.unitdata import kv from charmhelpers.core.unitdata import kv
try:
import psutil
except ImportError:
apt_install('python-psutil', fatal=True)
import psutil
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
ADDRESS_TYPES = ['admin', 'internal', 'public'] ADDRESS_TYPES = ['admin', 'internal', 'public']
@ -404,6 +410,7 @@ class IdentityServiceContext(OSContextGenerator):
auth_host = format_ipv6_addr(auth_host) or auth_host auth_host = format_ipv6_addr(auth_host) or auth_host
svc_protocol = rdata.get('service_protocol') or 'http' svc_protocol = rdata.get('service_protocol') or 'http'
auth_protocol = rdata.get('auth_protocol') or 'http' auth_protocol = rdata.get('auth_protocol') or 'http'
api_version = rdata.get('api_version') or '2.0'
ctxt.update({'service_port': rdata.get('service_port'), ctxt.update({'service_port': rdata.get('service_port'),
'service_host': serv_host, 'service_host': serv_host,
'auth_host': auth_host, 'auth_host': auth_host,
@ -412,7 +419,8 @@ class IdentityServiceContext(OSContextGenerator):
'admin_user': rdata.get('service_username'), 'admin_user': rdata.get('service_username'),
'admin_password': rdata.get('service_password'), 'admin_password': rdata.get('service_password'),
'service_protocol': svc_protocol, 'service_protocol': svc_protocol,
'auth_protocol': auth_protocol}) 'auth_protocol': auth_protocol,
'api_version': api_version})
if self.context_complete(ctxt): if self.context_complete(ctxt):
# NOTE(jamespage) this is required for >= icehouse # NOTE(jamespage) this is required for >= icehouse
@ -1258,13 +1266,11 @@ class WorkerConfigContext(OSContextGenerator):
@property @property
def num_cpus(self): def num_cpus(self):
try: # NOTE: use cpu_count if present (16.04 support)
from psutil import NUM_CPUS if hasattr(psutil, 'cpu_count'):
except ImportError: return psutil.cpu_count()
apt_install('python-psutil', fatal=True) else:
from psutil import NUM_CPUS return psutil.NUM_CPUS
return NUM_CPUS
def __call__(self): def __call__(self):
multiplier = config('worker-multiplier') or 0 multiplier = config('worker-multiplier') or 0
@ -1467,6 +1473,8 @@ class NetworkServiceContext(OSContextGenerator):
rdata.get('service_protocol') or 'http', rdata.get('service_protocol') or 'http',
'auth_protocol': 'auth_protocol':
rdata.get('auth_protocol') or 'http', rdata.get('auth_protocol') or 'http',
'api_version':
rdata.get('api_version') or '2.0',
} }
if self.context_complete(ctxt): if self.context_complete(ctxt):
return ctxt return ctxt

View File

@ -237,10 +237,16 @@ def neutron_plugins():
plugins['midonet']['driver'] = ( plugins['midonet']['driver'] = (
'neutron.plugins.midonet.plugin.MidonetPluginV2') 'neutron.plugins.midonet.plugin.MidonetPluginV2')
if release >= 'liberty': if release >= 'liberty':
midonet_origin = config('midonet-origin') plugins['midonet']['driver'] = (
if midonet_origin is not None and midonet_origin[4:5] == '1': 'midonet.neutron.plugin_v1.MidonetPluginV2')
plugins['midonet']['driver'] = ( plugins['midonet']['server_packages'].remove(
'midonet.neutron.plugin_v1.MidonetPluginV2') 'python-neutron-plugin-midonet')
plugins['midonet']['server_packages'].append(
'python-networking-midonet')
plugins['plumgrid']['driver'] = (
'networking_plumgrid.neutron.plugins.plugin.NeutronPluginPLUMgridV2')
plugins['plumgrid']['server_packages'].remove(
'neutron-plugin-plumgrid')
return plugins return plugins

View File

@ -6,6 +6,8 @@ Listen {{ ext_port }}
<VirtualHost {{ address }}:{{ ext }}> <VirtualHost {{ address }}:{{ ext }}>
ServerName {{ endpoint }} ServerName {{ endpoint }}
SSLEngine on SSLEngine on
SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2
SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
ProxyPass / http://localhost:{{ int }}/ ProxyPass / http://localhost:{{ int }}/

View File

@ -6,6 +6,8 @@ Listen {{ ext_port }}
<VirtualHost {{ address }}:{{ ext }}> <VirtualHost {{ address }}:{{ ext }}>
ServerName {{ endpoint }} ServerName {{ endpoint }}
SSLEngine on SSLEngine on
SSLProtocol +TLSv1 +TLSv1.1 +TLSv1.2
SSLCipherSuite HIGH:!RC4:!MD5:!aNULL:!eNULL:!EXP:!LOW:!MEDIUM
SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }} SSLCertificateFile /etc/apache2/ssl/{{ namespace }}/cert_{{ endpoint }}
SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }} SSLCertificateKeyFile /etc/apache2/ssl/{{ namespace }}/key_{{ endpoint }}
ProxyPass / http://localhost:{{ int }}/ ProxyPass / http://localhost:{{ int }}/

View File

@ -1,4 +1,14 @@
{% if auth_host -%} {% if auth_host -%}
{% if api_version == '3' -%}
[keystone_authtoken]
auth_url = {{ service_protocol }}://{{ service_host }}:{{ service_port }}
project_name = {{ admin_tenant_name }}
username = {{ admin_user }}
password = {{ admin_password }}
project_domain_name = default
user_domain_name = default
auth_plugin = password
{% else -%}
[keystone_authtoken] [keystone_authtoken]
identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }} identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }}
auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }} auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }}
@ -7,3 +17,4 @@ admin_user = {{ admin_user }}
admin_password = {{ admin_password }} admin_password = {{ admin_password }}
signing_dir = {{ signing_dir }} signing_dir = {{ signing_dir }}
{% endif -%} {% endif -%}
{% endif -%}

View File

@ -23,8 +23,10 @@ import json
import os import os
import sys import sys
import re import re
import itertools
import six import six
import tempfile
import traceback import traceback
import uuid import uuid
import yaml import yaml
@ -41,6 +43,7 @@ from charmhelpers.core.hookenv import (
config, config,
log as juju_log, log as juju_log,
charm_dir, charm_dir,
DEBUG,
INFO, INFO,
related_units, related_units,
relation_ids, relation_ids,
@ -58,6 +61,7 @@ from charmhelpers.contrib.storage.linux.lvm import (
from charmhelpers.contrib.network.ip import ( from charmhelpers.contrib.network.ip import (
get_ipv6_addr, get_ipv6_addr,
is_ipv6, is_ipv6,
port_has_listener,
) )
from charmhelpers.contrib.python.packages import ( from charmhelpers.contrib.python.packages import (
@ -65,7 +69,7 @@ from charmhelpers.contrib.python.packages import (
pip_install, pip_install,
) )
from charmhelpers.core.host import lsb_release, mounts, umount from charmhelpers.core.host import lsb_release, mounts, umount, service_running
from charmhelpers.fetch import apt_install, apt_cache, install_remote from charmhelpers.fetch import apt_install, apt_cache, install_remote
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
@ -347,12 +351,42 @@ def os_release(package, base='essex'):
def import_key(keyid): def import_key(keyid):
cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \ key = keyid.strip()
"--recv-keys %s" % keyid if (key.startswith('-----BEGIN PGP PUBLIC KEY BLOCK-----') and
try: key.endswith('-----END PGP PUBLIC KEY BLOCK-----')):
subprocess.check_call(cmd.split(' ')) juju_log("PGP key found (looks like ASCII Armor format)", level=DEBUG)
except subprocess.CalledProcessError: juju_log("Importing ASCII Armor PGP key", level=DEBUG)
error_out("Error importing repo key %s" % keyid) with tempfile.NamedTemporaryFile() as keyfile:
with open(keyfile.name, 'w') as fd:
fd.write(key)
fd.write("\n")
cmd = ['apt-key', 'add', keyfile.name]
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
error_out("Error importing PGP key '%s'" % key)
else:
juju_log("PGP key found (looks like Radix64 format)", level=DEBUG)
juju_log("Importing PGP key from keyserver", level=DEBUG)
cmd = ['apt-key', 'adv', '--keyserver',
'hkp://keyserver.ubuntu.com:80', '--recv-keys', key]
try:
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
error_out("Error importing PGP key '%s'" % key)
def get_source_and_pgp_key(input):
"""Look for a pgp key ID or ascii-armor key in the given input."""
index = input.strip()
index = input.rfind('|')
if index < 0:
return input, None
key = input[index + 1:].strip('|')
source = input[:index]
return source, key
def configure_installation_source(rel): def configure_installation_source(rel):
@ -364,16 +398,16 @@ def configure_installation_source(rel):
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
f.write(DISTRO_PROPOSED % ubuntu_rel) f.write(DISTRO_PROPOSED % ubuntu_rel)
elif rel[:4] == "ppa:": elif rel[:4] == "ppa:":
src = rel src, key = get_source_and_pgp_key(rel)
if key:
import_key(key)
subprocess.check_call(["add-apt-repository", "-y", src]) subprocess.check_call(["add-apt-repository", "-y", src])
elif rel[:3] == "deb": elif rel[:3] == "deb":
l = len(rel.split('|')) src, key = get_source_and_pgp_key(rel)
if l == 2: if key:
src, key = rel.split('|')
juju_log("Importing PPA key from keyserver for %s" % src)
import_key(key) import_key(key)
elif l == 1:
src = rel
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f: with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
f.write(src) f.write(src)
elif rel[:6] == 'cloud:': elif rel[:6] == 'cloud:':
@ -828,13 +862,23 @@ def os_workload_status(configs, required_interfaces, charm_func=None):
return wrap return wrap
def set_os_workload_status(configs, required_interfaces, charm_func=None): def set_os_workload_status(configs, required_interfaces, charm_func=None, services=None, ports=None):
""" """
Set workload status based on complete contexts. Set workload status based on complete contexts.
status-set missing or incomplete contexts status-set missing or incomplete contexts
and juju-log details of missing required data. and juju-log details of missing required data.
charm_func is a charm specific function to run checking charm_func is a charm specific function to run checking
for charm specific requirements such as a VIP setting. for charm specific requirements such as a VIP setting.
This function also checks for whether the services defined are ACTUALLY
running and that the ports they advertise are open and being listened to.
@param services - OPTIONAL: a [{'service': <string>, 'ports': [<int>]]
The ports are optional.
If services is a [<string>] then ports are ignored.
@param ports - OPTIONAL: an [<int>] representing ports that shoudl be
open.
@returns None
""" """
incomplete_rel_data = incomplete_relation_data(configs, required_interfaces) incomplete_rel_data = incomplete_relation_data(configs, required_interfaces)
state = 'active' state = 'active'
@ -913,6 +957,65 @@ def set_os_workload_status(configs, required_interfaces, charm_func=None):
else: else:
message = charm_message message = charm_message
# If the charm thinks the unit is active, check that the actual services
# really are active.
if services is not None and state == 'active':
# if we're passed the dict() then just grab the values as a list.
if isinstance(services, dict):
services = services.values()
# either extract the list of services from the dictionary, or if
# it is a simple string, use that. i.e. works with mixed lists.
_s = []
for s in services:
if isinstance(s, dict) and 'service' in s:
_s.append(s['service'])
if isinstance(s, str):
_s.append(s)
services_running = [service_running(s) for s in _s]
if not all(services_running):
not_running = [s for s, running in zip(_s, services_running)
if not running]
message = ("Services not running that should be: {}"
.format(", ".join(not_running)))
state = 'blocked'
# also verify that the ports that should be open are open
# NB, that ServiceManager objects only OPTIONALLY have ports
port_map = OrderedDict([(s['service'], s['ports'])
for s in services if 'ports' in s])
if state == 'active' and port_map:
all_ports = list(itertools.chain(*port_map.values()))
ports_open = [port_has_listener('0.0.0.0', p)
for p in all_ports]
if not all(ports_open):
not_opened = [p for p, opened in zip(all_ports, ports_open)
if not opened]
map_not_open = OrderedDict()
for service, ports in port_map.items():
closed_ports = set(ports).intersection(not_opened)
if closed_ports:
map_not_open[service] = closed_ports
# find which service has missing ports. They are in service
# order which makes it a bit easier.
message = (
"Services with ports not open that should be: {}"
.format(
", ".join([
"{}: [{}]".format(
service,
", ".join([str(v) for v in ports]))
for service, ports in map_not_open.items()])))
state = 'blocked'
if ports is not None and state == 'active':
# and we can also check ports which we don't know the service for
ports_open = [port_has_listener('0.0.0.0', p) for p in ports]
if not all(ports_open):
message = (
"Ports which should be open, but are not: {}"
.format(", ".join([str(p) for p, v in zip(ports, ports_open)
if not v])))
state = 'blocked'
# Set to active if all requirements have been met # Set to active if all requirements have been met
if state == 'active': if state == 'active':
message = "Unit is ready" message = "Unit is ready"

View File

@ -19,20 +19,35 @@
import os import os
import subprocess import subprocess
import sys
from charmhelpers.fetch import apt_install, apt_update from charmhelpers.fetch import apt_install, apt_update
from charmhelpers.core.hookenv import charm_dir, log from charmhelpers.core.hookenv import charm_dir, log
try:
from pip import main as pip_execute
except ImportError:
apt_update()
apt_install('python-pip')
from pip import main as pip_execute
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>" __author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
def pip_execute(*args, **kwargs):
"""Overriden pip_execute() to stop sys.path being changed.
The act of importing main from the pip module seems to cause add wheels
from the /usr/share/python-wheels which are installed by various tools.
This function ensures that sys.path remains the same after the call is
executed.
"""
try:
_path = sys.path
try:
from pip import main as _pip_execute
except ImportError:
apt_update()
apt_install('python-pip')
from pip import main as _pip_execute
_pip_execute(*args, **kwargs)
finally:
sys.path = _path
def parse_options(given, available): def parse_options(given, available):
"""Given a set of options, check if available""" """Given a set of options, check if available"""
for key, value in sorted(given.items()): for key, value in sorted(given.items()):

View File

@ -120,6 +120,7 @@ class PoolCreationError(Exception):
""" """
A custom error to inform the caller that a pool creation failed. Provides an error message A custom error to inform the caller that a pool creation failed. Provides an error message
""" """
def __init__(self, message): def __init__(self, message):
super(PoolCreationError, self).__init__(message) super(PoolCreationError, self).__init__(message)
@ -129,6 +130,7 @@ class Pool(object):
An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool. An object oriented approach to Ceph pool creation. This base class is inherited by ReplicatedPool and ErasurePool.
Do not call create() on this base class as it will not do anything. Instantiate a child class and call create(). Do not call create() on this base class as it will not do anything. Instantiate a child class and call create().
""" """
def __init__(self, service, name): def __init__(self, service, name):
self.service = service self.service = service
self.name = name self.name = name
@ -180,36 +182,41 @@ class Pool(object):
:return: int. The number of pgs to use. :return: int. The number of pgs to use.
""" """
validator(value=pool_size, valid_type=int) validator(value=pool_size, valid_type=int)
osds = get_osds(self.service) osd_list = get_osds(self.service)
if not osds: if not osd_list:
# NOTE(james-page): Default to 200 for older ceph versions # NOTE(james-page): Default to 200 for older ceph versions
# which don't support OSD query from cli # which don't support OSD query from cli
return 200 return 200
osd_list_length = len(osd_list)
# Calculate based on Ceph best practices # Calculate based on Ceph best practices
if osds < 5: if osd_list_length < 5:
return 128 return 128
elif 5 < osds < 10: elif 5 < osd_list_length < 10:
return 512 return 512
elif 10 < osds < 50: elif 10 < osd_list_length < 50:
return 4096 return 4096
else: else:
estimate = (osds * 100) / pool_size estimate = (osd_list_length * 100) / pool_size
# Return the next nearest power of 2 # Return the next nearest power of 2
index = bisect.bisect_right(powers_of_two, estimate) index = bisect.bisect_right(powers_of_two, estimate)
return powers_of_two[index] return powers_of_two[index]
class ReplicatedPool(Pool): class ReplicatedPool(Pool):
def __init__(self, service, name, replicas=2): def __init__(self, service, name, pg_num=None, replicas=2):
super(ReplicatedPool, self).__init__(service=service, name=name) super(ReplicatedPool, self).__init__(service=service, name=name)
self.replicas = replicas self.replicas = replicas
if pg_num is None:
self.pg_num = self.get_pgs(self.replicas)
else:
self.pg_num = pg_num
def create(self): def create(self):
if not pool_exists(self.service, self.name): if not pool_exists(self.service, self.name):
# Create it # Create it
pgs = self.get_pgs(self.replicas) cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create',
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs)] self.name, str(self.pg_num)]
try: try:
check_call(cmd) check_call(cmd)
except CalledProcessError: except CalledProcessError:
@ -241,7 +248,7 @@ class ErasurePool(Pool):
pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m'])) pgs = self.get_pgs(int(erasure_profile['k']) + int(erasure_profile['m']))
# Create it # Create it
cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), cmd = ['ceph', '--id', self.service, 'osd', 'pool', 'create', self.name, str(pgs), str(pgs),
'erasure', self.erasure_code_profile] 'erasure', self.erasure_code_profile]
try: try:
check_call(cmd) check_call(cmd)
@ -322,7 +329,8 @@ def set_pool_quota(service, pool_name, max_bytes):
:return: None. Can raise CalledProcessError :return: None. Can raise CalledProcessError
""" """
# Set a byte quota on a RADOS pool in ceph. # Set a byte quota on a RADOS pool in ceph.
cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name, 'max_bytes', max_bytes] cmd = ['ceph', '--id', service, 'osd', 'pool', 'set-quota', pool_name,
'max_bytes', str(max_bytes)]
try: try:
check_call(cmd) check_call(cmd)
except CalledProcessError: except CalledProcessError:
@ -343,7 +351,25 @@ def remove_pool_quota(service, pool_name):
raise raise
def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure', failure_domain='host', def remove_erasure_profile(service, profile_name):
"""
Create a new erasure code profile if one does not already exist for it. Updates
the profile if it exists. Please see http://docs.ceph.com/docs/master/rados/operations/erasure-code-profile/
for more details
:param service: six.string_types. The Ceph user name to run the command under
:param profile_name: six.string_types
:return: None. Can raise CalledProcessError
"""
cmd = ['ceph', '--id', service, 'osd', 'erasure-code-profile', 'rm',
profile_name]
try:
check_call(cmd)
except CalledProcessError:
raise
def create_erasure_profile(service, profile_name, erasure_plugin_name='jerasure',
failure_domain='host',
data_chunks=2, coding_chunks=1, data_chunks=2, coding_chunks=1,
locality=None, durability_estimator=None): locality=None, durability_estimator=None):
""" """

View File

@ -121,7 +121,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
# Charms which should use the source config option # Charms which should use the source config option
use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph', use_source = ['mysql', 'mongodb', 'rabbitmq-server', 'ceph',
'ceph-osd', 'ceph-radosgw'] 'ceph-osd', 'ceph-radosgw', 'ceph-mon']
# Charms which can not use openstack-origin, ie. many subordinates # Charms which can not use openstack-origin, ie. many subordinates
no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe', no_origin = ['cinder-ceph', 'hacluster', 'neutron-openvswitch', 'nrpe',