Add support for certs relation
Add support for rabbitmq using the certificates relation to secure traffic with clients. Support for users supplying their own certs and for rabbit acting as its own Certificate Authority should be unchanged. The follwoing additional changes were needed: * Charm helper sync * Update rabbit to use get_relation_ip from charm helpers which is the standard helper for calculating endpoint ip addresses. * Create network helpers file to avoid circular dependancies. Change-Id: Ie60893e660efe1f8b0a0d42aaaecfbd9aae6f97c
This commit is contained in:
parent
79a1ea26d6
commit
9d3bb3e3d0
|
@ -11,6 +11,7 @@ include:
|
|||
- contrib.peerstorage
|
||||
- contrib.python.packages
|
||||
- contrib.ssl
|
||||
- contrib.hahelpers.apache
|
||||
- contrib.hahelpers.cluster
|
||||
- contrib.network.ip
|
||||
- contrib.hardening|inc=*
|
||||
|
|
|
@ -23,22 +23,22 @@ import subprocess
|
|||
import sys
|
||||
|
||||
try:
|
||||
import six # flake8: noqa
|
||||
import six # NOQA:F401
|
||||
except ImportError:
|
||||
if sys.version_info.major == 2:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python-six'])
|
||||
else:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-six'])
|
||||
import six # flake8: noqa
|
||||
import six # NOQA:F401
|
||||
|
||||
try:
|
||||
import yaml # flake8: noqa
|
||||
import yaml # NOQA:F401
|
||||
except ImportError:
|
||||
if sys.version_info.major == 2:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python-yaml'])
|
||||
else:
|
||||
subprocess.check_call(['apt-get', 'install', '-y', 'python3-yaml'])
|
||||
import yaml # flake8: noqa
|
||||
import yaml # NOQA:F401
|
||||
|
||||
|
||||
# Holds a list of mapping of mangled function names that have been deprecated
|
||||
|
|
|
@ -0,0 +1,86 @@
|
|||
# Copyright 2014-2015 Canonical Limited.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
#
|
||||
# Copyright 2012 Canonical Ltd.
|
||||
#
|
||||
# This file is sourced from lp:openstack-charm-helpers
|
||||
#
|
||||
# Authors:
|
||||
# James Page <james.page@ubuntu.com>
|
||||
# Adam Gandelman <adamg@ubuntu.com>
|
||||
#
|
||||
|
||||
import os
|
||||
|
||||
from charmhelpers.core import host
|
||||
from charmhelpers.core.hookenv import (
|
||||
config as config_get,
|
||||
relation_get,
|
||||
relation_ids,
|
||||
related_units as relation_list,
|
||||
log,
|
||||
INFO,
|
||||
)
|
||||
|
||||
|
||||
def get_cert(cn=None):
|
||||
# TODO: deal with multiple https endpoints via charm config
|
||||
cert = config_get('ssl_cert')
|
||||
key = config_get('ssl_key')
|
||||
if not (cert and key):
|
||||
log("Inspecting identity-service relations for SSL certificate.",
|
||||
level=INFO)
|
||||
cert = key = None
|
||||
if cn:
|
||||
ssl_cert_attr = 'ssl_cert_{}'.format(cn)
|
||||
ssl_key_attr = 'ssl_key_{}'.format(cn)
|
||||
else:
|
||||
ssl_cert_attr = 'ssl_cert'
|
||||
ssl_key_attr = 'ssl_key'
|
||||
for r_id in relation_ids('identity-service'):
|
||||
for unit in relation_list(r_id):
|
||||
if not cert:
|
||||
cert = relation_get(ssl_cert_attr,
|
||||
rid=r_id, unit=unit)
|
||||
if not key:
|
||||
key = relation_get(ssl_key_attr,
|
||||
rid=r_id, unit=unit)
|
||||
return (cert, key)
|
||||
|
||||
|
||||
def get_ca_cert():
|
||||
ca_cert = config_get('ssl_ca')
|
||||
if ca_cert is None:
|
||||
log("Inspecting identity-service relations for CA SSL certificate.",
|
||||
level=INFO)
|
||||
for r_id in (relation_ids('identity-service') +
|
||||
relation_ids('identity-credentials')):
|
||||
for unit in relation_list(r_id):
|
||||
if ca_cert is None:
|
||||
ca_cert = relation_get('ca_cert',
|
||||
rid=r_id, unit=unit)
|
||||
return ca_cert
|
||||
|
||||
|
||||
def retrieve_ca_cert(cert_file):
|
||||
cert = None
|
||||
if os.path.isfile(cert_file):
|
||||
with open(cert_file, 'rb') as crt:
|
||||
cert = crt.read()
|
||||
return cert
|
||||
|
||||
|
||||
def install_ca_cert(ca_cert):
|
||||
host.install_ca_cert(ca_cert, 'keystone_juju_ca_cert')
|
|
@ -14,6 +14,7 @@
|
|||
|
||||
import os
|
||||
import re
|
||||
import six
|
||||
import subprocess
|
||||
|
||||
|
||||
|
@ -95,6 +96,8 @@ class ApacheConfContext(object):
|
|||
ctxt = settings['hardening']
|
||||
|
||||
out = subprocess.check_output(['apache2', '-v'])
|
||||
if six.PY3:
|
||||
out = out.decode('utf-8')
|
||||
ctxt['apache_version'] = re.search(r'.+version: Apache/(.+?)\s.+',
|
||||
out).group(1)
|
||||
ctxt['apache_icondir'] = '/usr/share/apache2/icons/'
|
||||
|
|
|
@ -15,7 +15,7 @@
|
|||
import re
|
||||
import subprocess
|
||||
|
||||
from six import string_types
|
||||
import six
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
|
@ -35,7 +35,7 @@ class DisabledModuleAudit(BaseAudit):
|
|||
def __init__(self, modules):
|
||||
if modules is None:
|
||||
self.modules = []
|
||||
elif isinstance(modules, string_types):
|
||||
elif isinstance(modules, six.string_types):
|
||||
self.modules = [modules]
|
||||
else:
|
||||
self.modules = modules
|
||||
|
@ -69,6 +69,8 @@ class DisabledModuleAudit(BaseAudit):
|
|||
def _get_loaded_modules():
|
||||
"""Returns the modules which are enabled in Apache."""
|
||||
output = subprocess.check_output(['apache2ctl', '-M'])
|
||||
if six.PY3:
|
||||
output = output.decode('utf-8')
|
||||
modules = []
|
||||
for line in output.splitlines():
|
||||
# Each line of the enabled module output looks like:
|
||||
|
|
|
@ -27,6 +27,8 @@ from charmhelpers.contrib.hardening.ssh.checks import run_ssh_checks
|
|||
from charmhelpers.contrib.hardening.mysql.checks import run_mysql_checks
|
||||
from charmhelpers.contrib.hardening.apache.checks import run_apache_checks
|
||||
|
||||
_DISABLE_HARDENING_FOR_UNIT_TEST = False
|
||||
|
||||
|
||||
def harden(overrides=None):
|
||||
"""Hardening decorator.
|
||||
|
@ -47,16 +49,28 @@ def harden(overrides=None):
|
|||
provided with 'harden' config.
|
||||
:returns: Returns value returned by decorated function once executed.
|
||||
"""
|
||||
if overrides is None:
|
||||
overrides = []
|
||||
|
||||
def _harden_inner1(f):
|
||||
log("Hardening function '%s'" % (f.__name__), level=DEBUG)
|
||||
# As this has to be py2.7 compat, we can't use nonlocal. Use a trick
|
||||
# to capture the dictionary that can then be updated.
|
||||
_logged = {'done': False}
|
||||
|
||||
def _harden_inner2(*args, **kwargs):
|
||||
# knock out hardening via a config var; normally it won't get
|
||||
# disabled.
|
||||
if _DISABLE_HARDENING_FOR_UNIT_TEST:
|
||||
return f(*args, **kwargs)
|
||||
if not _logged['done']:
|
||||
log("Hardening function '%s'" % (f.__name__), level=DEBUG)
|
||||
_logged['done'] = True
|
||||
RUN_CATALOG = OrderedDict([('os', run_os_checks),
|
||||
('ssh', run_ssh_checks),
|
||||
('mysql', run_mysql_checks),
|
||||
('apache', run_apache_checks)])
|
||||
|
||||
enabled = overrides or (config("harden") or "").split()
|
||||
enabled = overrides[:] or (config("harden") or "").split()
|
||||
if enabled:
|
||||
modules_to_run = []
|
||||
# modules will always be performed in the following order
|
||||
|
|
|
@ -618,12 +618,12 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||
return self.authenticate_keystone(keystone_ip, user, password,
|
||||
project_name=tenant)
|
||||
|
||||
def authenticate_glance_admin(self, keystone):
|
||||
def authenticate_glance_admin(self, keystone, force_v1_client=False):
|
||||
"""Authenticates admin user with glance."""
|
||||
self.log.debug('Authenticating glance admin...')
|
||||
ep = keystone.service_catalog.url_for(service_type='image',
|
||||
interface='adminURL')
|
||||
if keystone.session:
|
||||
if not force_v1_client and keystone.session:
|
||||
return glance_clientv2.Client("2", session=keystone.session)
|
||||
else:
|
||||
return glance_client.Client(ep, token=keystone.auth_token)
|
||||
|
@ -680,18 +680,30 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||
nova.flavors.create(name, ram, vcpus, disk, flavorid,
|
||||
ephemeral, swap, rxtx_factor, is_public)
|
||||
|
||||
def create_cirros_image(self, glance, image_name):
|
||||
"""Download the latest cirros image and upload it to glance,
|
||||
validate and return a resource pointer.
|
||||
def glance_create_image(self, glance, image_name, image_url,
|
||||
download_dir='tests',
|
||||
hypervisor_type=None,
|
||||
disk_format='qcow2',
|
||||
architecture='x86_64',
|
||||
container_format='bare'):
|
||||
"""Download an image and upload it to glance, validate its status
|
||||
and return an image object pointer. KVM defaults, can override for
|
||||
LXD.
|
||||
|
||||
:param glance: pointer to authenticated glance connection
|
||||
:param glance: pointer to authenticated glance api connection
|
||||
:param image_name: display name for new image
|
||||
:param image_url: url to retrieve
|
||||
:param download_dir: directory to store downloaded image file
|
||||
:param hypervisor_type: glance image hypervisor property
|
||||
:param disk_format: glance image disk format
|
||||
:param architecture: glance image architecture property
|
||||
:param container_format: glance image container format
|
||||
:returns: glance image pointer
|
||||
"""
|
||||
self.log.debug('Creating glance cirros image '
|
||||
'({})...'.format(image_name))
|
||||
self.log.debug('Creating glance image ({}) from '
|
||||
'{}...'.format(image_name, image_url))
|
||||
|
||||
# Download cirros image
|
||||
# Download image
|
||||
http_proxy = os.getenv('AMULET_HTTP_PROXY')
|
||||
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
|
||||
if http_proxy:
|
||||
|
@ -700,31 +712,34 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||
else:
|
||||
opener = urllib.FancyURLopener()
|
||||
|
||||
f = opener.open('http://download.cirros-cloud.net/version/released')
|
||||
version = f.read().strip()
|
||||
cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
|
||||
local_path = os.path.join('tests', cirros_img)
|
||||
|
||||
if not os.path.exists(local_path):
|
||||
cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
|
||||
version, cirros_img)
|
||||
opener.retrieve(cirros_url, local_path)
|
||||
f.close()
|
||||
abs_file_name = os.path.join(download_dir, image_name)
|
||||
if not os.path.exists(abs_file_name):
|
||||
opener.retrieve(image_url, abs_file_name)
|
||||
|
||||
# Create glance image
|
||||
glance_properties = {
|
||||
'architecture': architecture,
|
||||
}
|
||||
if hypervisor_type:
|
||||
glance_properties['hypervisor_type'] = hypervisor_type
|
||||
# Create glance image
|
||||
if float(glance.version) < 2.0:
|
||||
with open(local_path) as fimage:
|
||||
image = glance.images.create(name=image_name, is_public=True,
|
||||
disk_format='qcow2',
|
||||
container_format='bare',
|
||||
data=fimage)
|
||||
with open(abs_file_name) as f:
|
||||
image = glance.images.create(
|
||||
name=image_name,
|
||||
is_public=True,
|
||||
disk_format=disk_format,
|
||||
container_format=container_format,
|
||||
properties=glance_properties,
|
||||
data=f)
|
||||
else:
|
||||
image = glance.images.create(
|
||||
name=image_name,
|
||||
disk_format="qcow2",
|
||||
visibility="public",
|
||||
container_format="bare")
|
||||
glance.images.upload(image.id, open(local_path, 'rb'))
|
||||
disk_format=disk_format,
|
||||
container_format=container_format)
|
||||
glance.images.upload(image.id, open(abs_file_name, 'rb'))
|
||||
glance.images.update(image.id, **glance_properties)
|
||||
|
||||
# Wait for image to reach active status
|
||||
img_id = image.id
|
||||
|
@ -753,15 +768,54 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||
val_img_stat, val_img_cfmt, val_img_dfmt))
|
||||
|
||||
if val_img_name == image_name and val_img_stat == 'active' \
|
||||
and val_img_pub is True and val_img_cfmt == 'bare' \
|
||||
and val_img_dfmt == 'qcow2':
|
||||
and val_img_pub is True and val_img_cfmt == container_format \
|
||||
and val_img_dfmt == disk_format:
|
||||
self.log.debug(msg_attr)
|
||||
else:
|
||||
msg = ('Volume validation failed, {}'.format(msg_attr))
|
||||
msg = ('Image validation failed, {}'.format(msg_attr))
|
||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||
|
||||
return image
|
||||
|
||||
def create_cirros_image(self, glance, image_name, hypervisor_type=None):
|
||||
"""Download the latest cirros image and upload it to glance,
|
||||
validate and return a resource pointer.
|
||||
|
||||
:param glance: pointer to authenticated glance connection
|
||||
:param image_name: display name for new image
|
||||
:param hypervisor_type: glance image hypervisor property
|
||||
:returns: glance image pointer
|
||||
"""
|
||||
# /!\ DEPRECATION WARNING
|
||||
self.log.warn('/!\\ DEPRECATION WARNING: use '
|
||||
'glance_create_image instead of '
|
||||
'create_cirros_image.')
|
||||
|
||||
self.log.debug('Creating glance cirros image '
|
||||
'({})...'.format(image_name))
|
||||
|
||||
# Get cirros image URL
|
||||
http_proxy = os.getenv('AMULET_HTTP_PROXY')
|
||||
self.log.debug('AMULET_HTTP_PROXY: {}'.format(http_proxy))
|
||||
if http_proxy:
|
||||
proxies = {'http': http_proxy}
|
||||
opener = urllib.FancyURLopener(proxies)
|
||||
else:
|
||||
opener = urllib.FancyURLopener()
|
||||
|
||||
f = opener.open('http://download.cirros-cloud.net/version/released')
|
||||
version = f.read().strip()
|
||||
cirros_img = 'cirros-{}-x86_64-disk.img'.format(version)
|
||||
cirros_url = 'http://{}/{}/{}'.format('download.cirros-cloud.net',
|
||||
version, cirros_img)
|
||||
f.close()
|
||||
|
||||
return self.glance_create_image(
|
||||
glance,
|
||||
image_name,
|
||||
cirros_url,
|
||||
hypervisor_type=hypervisor_type)
|
||||
|
||||
def delete_image(self, glance, image):
|
||||
"""Delete the specified image."""
|
||||
|
||||
|
@ -1013,6 +1067,9 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||
cmd, code, output))
|
||||
amulet.raise_status(amulet.FAIL, msg=msg)
|
||||
|
||||
# For mimic ceph osd lspools output
|
||||
output = output.replace("\n", ",")
|
||||
|
||||
# Example output: 0 data,1 metadata,2 rbd,3 cinder,4 glance,
|
||||
for pool in str(output).split(','):
|
||||
pool_id_name = pool.split(' ')
|
||||
|
|
|
@ -25,7 +25,9 @@ from charmhelpers.core.hookenv import (
|
|||
local_unit,
|
||||
network_get_primary_address,
|
||||
config,
|
||||
related_units,
|
||||
relation_get,
|
||||
relation_ids,
|
||||
unit_get,
|
||||
NoNetworkBinding,
|
||||
log,
|
||||
|
@ -225,3 +227,49 @@ def process_certificates(service_name, relation_id, unit,
|
|||
create_ip_cert_links(
|
||||
ssl_dir,
|
||||
custom_hostname_link=custom_hostname_link)
|
||||
|
||||
|
||||
def get_requests_for_local_unit(relation_name=None):
|
||||
"""Extract any certificates data targeted at this unit down relation_name.
|
||||
|
||||
:param relation_name: str Name of relation to check for data.
|
||||
:returns: List of bundles of certificates.
|
||||
:rtype: List of dicts
|
||||
"""
|
||||
local_name = local_unit().replace('/', '_')
|
||||
raw_certs_key = '{}.processed_requests'.format(local_name)
|
||||
relation_name = relation_name or 'certificates'
|
||||
bundles = []
|
||||
for rid in relation_ids(relation_name):
|
||||
for unit in related_units(rid):
|
||||
data = relation_get(rid=rid, unit=unit)
|
||||
if data.get(raw_certs_key):
|
||||
bundles.append({
|
||||
'ca': data['ca'],
|
||||
'chain': data.get('chain'),
|
||||
'certs': json.loads(data[raw_certs_key])})
|
||||
return bundles
|
||||
|
||||
|
||||
def get_bundle_for_cn(cn, relation_name=None):
|
||||
"""Extract certificates for the given cn.
|
||||
|
||||
:param cn: str Canonical Name on certificate.
|
||||
:param relation_name: str Relation to check for certificates down.
|
||||
:returns: Dictionary of certificate data,
|
||||
:rtype: dict.
|
||||
"""
|
||||
entries = get_requests_for_local_unit(relation_name)
|
||||
cert_bundle = {}
|
||||
for entry in entries:
|
||||
for _cn, bundle in entry['certs'].items():
|
||||
if _cn == cn:
|
||||
cert_bundle = {
|
||||
'cert': bundle['cert'],
|
||||
'key': bundle['key'],
|
||||
'chain': entry['chain'],
|
||||
'ca': entry['ca']}
|
||||
break
|
||||
if cert_bundle:
|
||||
break
|
||||
return cert_bundle
|
||||
|
|
|
@ -1519,6 +1519,10 @@ class NeutronAPIContext(OSContextGenerator):
|
|||
'rel_key': 'enable-qos',
|
||||
'default': False,
|
||||
},
|
||||
'enable_nsg_logging': {
|
||||
'rel_key': 'enable-nsg-logging',
|
||||
'default': False,
|
||||
},
|
||||
}
|
||||
ctxt = self.get_neutron_options({})
|
||||
for rid in relation_ids('neutron-plugin-api'):
|
||||
|
@ -1530,10 +1534,15 @@ class NeutronAPIContext(OSContextGenerator):
|
|||
if 'l2-population' in rdata:
|
||||
ctxt.update(self.get_neutron_options(rdata))
|
||||
|
||||
extension_drivers = []
|
||||
|
||||
if ctxt['enable_qos']:
|
||||
ctxt['extension_drivers'] = 'qos'
|
||||
else:
|
||||
ctxt['extension_drivers'] = ''
|
||||
extension_drivers.append('qos')
|
||||
|
||||
if ctxt['enable_nsg_logging']:
|
||||
extension_drivers.append('log')
|
||||
|
||||
ctxt['extension_drivers'] = ','.join(extension_drivers)
|
||||
|
||||
return ctxt
|
||||
|
||||
|
@ -1893,7 +1902,7 @@ class EnsureDirContext(OSContextGenerator):
|
|||
Some software requires a user to create a target directory to be
|
||||
scanned for drop-in files with a specific format. This is why this
|
||||
context is needed to do that before rendering a template.
|
||||
'''
|
||||
'''
|
||||
|
||||
def __init__(self, dirname, **kwargs):
|
||||
'''Used merely to ensure that a given directory exists.'''
|
||||
|
@ -1903,3 +1912,23 @@ class EnsureDirContext(OSContextGenerator):
|
|||
def __call__(self):
|
||||
mkdir(self.dirname, **self.kwargs)
|
||||
return {}
|
||||
|
||||
|
||||
class VersionsContext(OSContextGenerator):
|
||||
"""Context to return the openstack and operating system versions.
|
||||
|
||||
"""
|
||||
def __init__(self, pkg='python-keystone'):
|
||||
"""Initialise context.
|
||||
|
||||
:param pkg: Package to extrapolate openstack version from.
|
||||
:type pkg: str
|
||||
"""
|
||||
self.pkg = pkg
|
||||
|
||||
def __call__(self):
|
||||
ostack = os_release(self.pkg, base='icehouse')
|
||||
osystem = lsb_release()['DISTRIB_CODENAME'].lower()
|
||||
return {
|
||||
'openstack_release': ostack,
|
||||
'operating_system_release': osystem}
|
||||
|
|
|
@ -28,6 +28,7 @@ import json
|
|||
import re
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
expected_related_units,
|
||||
log,
|
||||
relation_set,
|
||||
charm_name,
|
||||
|
@ -110,12 +111,17 @@ def assert_charm_supports_dns_ha():
|
|||
def expect_ha():
|
||||
""" Determine if the unit expects to be in HA
|
||||
|
||||
Check for VIP or dns-ha settings which indicate the unit should expect to
|
||||
be related to hacluster.
|
||||
Check juju goal-state if ha relation is expected, check for VIP or dns-ha
|
||||
settings which indicate the unit should expect to be related to hacluster.
|
||||
|
||||
@returns boolean
|
||||
"""
|
||||
return config('vip') or config('dns-ha')
|
||||
ha_related_units = []
|
||||
try:
|
||||
ha_related_units = list(expected_related_units(reltype='ha'))
|
||||
except (NotImplementedError, KeyError):
|
||||
pass
|
||||
return len(ha_related_units) > 0 or config('vip') or config('dns-ha')
|
||||
|
||||
|
||||
def generate_ha_relation_data(service):
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
{% if auth_host -%}
|
||||
[keystone_authtoken]
|
||||
auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}
|
||||
auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
|
||||
auth_type = password
|
||||
{% if api_version == "3" -%}
|
||||
auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/v3
|
||||
auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v3
|
||||
project_domain_name = {{ admin_domain_name }}
|
||||
user_domain_name = {{ admin_domain_name }}
|
||||
{% else -%}
|
||||
auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}
|
||||
auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}
|
||||
project_domain_name = default
|
||||
user_domain_name = default
|
||||
{% endif -%}
|
||||
|
|
|
@ -186,7 +186,7 @@ SWIFT_CODENAMES = OrderedDict([
|
|||
('queens',
|
||||
['2.16.0', '2.17.0']),
|
||||
('rocky',
|
||||
['2.18.0']),
|
||||
['2.18.0', '2.19.0']),
|
||||
])
|
||||
|
||||
# >= Liberty version->codename mapping
|
||||
|
@ -375,7 +375,7 @@ def get_swift_codename(version):
|
|||
return codenames[0]
|
||||
|
||||
# NOTE: fallback - attempt to match with just major.minor version
|
||||
match = re.match('^(\d+)\.(\d+)', version)
|
||||
match = re.match(r'^(\d+)\.(\d+)', version)
|
||||
if match:
|
||||
major_minor_version = match.group(0)
|
||||
for codename, versions in six.iteritems(SWIFT_CODENAMES):
|
||||
|
@ -395,7 +395,7 @@ def get_os_codename_package(package, fatal=True):
|
|||
out = subprocess.check_output(cmd)
|
||||
if six.PY3:
|
||||
out = out.decode('UTF-8')
|
||||
except subprocess.CalledProcessError as e:
|
||||
except subprocess.CalledProcessError:
|
||||
return None
|
||||
lines = out.split('\n')
|
||||
for line in lines:
|
||||
|
@ -427,11 +427,11 @@ def get_os_codename_package(package, fatal=True):
|
|||
vers = apt.upstream_version(pkg.current_ver.ver_str)
|
||||
if 'swift' in pkg.name:
|
||||
# Fully x.y.z match for swift versions
|
||||
match = re.match('^(\d+)\.(\d+)\.(\d+)', vers)
|
||||
match = re.match(r'^(\d+)\.(\d+)\.(\d+)', vers)
|
||||
else:
|
||||
# x.y match only for 20XX.X
|
||||
# and ignore patch level for other packages
|
||||
match = re.match('^(\d+)\.(\d+)', vers)
|
||||
match = re.match(r'^(\d+)\.(\d+)', vers)
|
||||
|
||||
if match:
|
||||
vers = match.group(0)
|
||||
|
@ -1450,20 +1450,33 @@ def pausable_restart_on_change(restart_map, stopstart=False,
|
|||
|
||||
see core.utils.restart_on_change() for more details.
|
||||
|
||||
Note restart_map can be a callable, in which case, restart_map is only
|
||||
evaluated at runtime. This means that it is lazy and the underlying
|
||||
function won't be called if the decorated function is never called. Note,
|
||||
retains backwards compatibility for passing a non-callable dictionary.
|
||||
|
||||
@param f: the function to decorate
|
||||
@param restart_map: the restart map {conf_file: [services]}
|
||||
@param restart_map: (optionally callable, which then returns the
|
||||
restart_map) the restart map {conf_file: [services]}
|
||||
@param stopstart: DEFAULT false; whether to stop, start or just restart
|
||||
@returns decorator to use a restart_on_change with pausability
|
||||
"""
|
||||
def wrap(f):
|
||||
# py27 compatible nonlocal variable. When py3 only, replace with
|
||||
# nonlocal keyword
|
||||
__restart_map_cache = {'cache': None}
|
||||
|
||||
@functools.wraps(f)
|
||||
def wrapped_f(*args, **kwargs):
|
||||
if is_unit_paused_set():
|
||||
return f(*args, **kwargs)
|
||||
if __restart_map_cache['cache'] is None:
|
||||
__restart_map_cache['cache'] = restart_map() \
|
||||
if callable(restart_map) else restart_map
|
||||
# otherwise, normal restart_on_change functionality
|
||||
return restart_on_change_helper(
|
||||
(lambda: f(*args, **kwargs)), restart_map, stopstart,
|
||||
restart_functions)
|
||||
(lambda: f(*args, **kwargs)), __restart_map_cache['cache'],
|
||||
stopstart, restart_functions)
|
||||
return wrapped_f
|
||||
return wrap
|
||||
|
||||
|
@ -1733,3 +1746,31 @@ def is_unit_upgrading_set():
|
|||
return not(not(kv.get('unit-upgrading')))
|
||||
except Exception:
|
||||
return False
|
||||
|
||||
|
||||
def series_upgrade_prepare(pause_unit_helper=None, configs=None):
|
||||
""" Run common series upgrade prepare tasks.
|
||||
|
||||
:param pause_unit_helper: function: Function to pause unit
|
||||
:param configs: OSConfigRenderer object: Configurations
|
||||
:returns None:
|
||||
"""
|
||||
set_unit_upgrading()
|
||||
if pause_unit_helper and configs:
|
||||
if not is_unit_paused_set():
|
||||
pause_unit_helper(configs)
|
||||
|
||||
|
||||
def series_upgrade_complete(resume_unit_helper=None, configs=None):
|
||||
""" Run common series upgrade complete tasks.
|
||||
|
||||
:param resume_unit_helper: function: Function to resume unit
|
||||
:param configs: OSConfigRenderer object: Configurations
|
||||
:returns None:
|
||||
"""
|
||||
clear_unit_paused()
|
||||
clear_unit_upgrading()
|
||||
if configs:
|
||||
configs.write_all()
|
||||
if resume_unit_helper:
|
||||
resume_unit_helper(configs)
|
||||
|
|
|
@ -39,7 +39,7 @@ def loopback_devices():
|
|||
devs = [d.strip().split(' ') for d in
|
||||
check_output(cmd).splitlines() if d != '']
|
||||
for dev, _, f in devs:
|
||||
loopbacks[dev.replace(':', '')] = re.search('\((\S+)\)', f).groups()[0]
|
||||
loopbacks[dev.replace(':', '')] = re.search(r'\((\S+)\)', f).groups()[0]
|
||||
return loopbacks
|
||||
|
||||
|
||||
|
|
|
@ -48,6 +48,7 @@ INFO = "INFO"
|
|||
DEBUG = "DEBUG"
|
||||
TRACE = "TRACE"
|
||||
MARKER = object()
|
||||
SH_MAX_ARG = 131071
|
||||
|
||||
cache = {}
|
||||
|
||||
|
@ -98,7 +99,7 @@ def log(message, level=None):
|
|||
command += ['-l', level]
|
||||
if not isinstance(message, six.string_types):
|
||||
message = repr(message)
|
||||
command += [message]
|
||||
command += [message[:SH_MAX_ARG]]
|
||||
# Missing juju-log should not cause failures in unit tests
|
||||
# Send log output to stderr
|
||||
try:
|
||||
|
@ -509,6 +510,67 @@ def related_units(relid=None):
|
|||
subprocess.check_output(units_cmd_line).decode('UTF-8')) or []
|
||||
|
||||
|
||||
def expected_peer_units():
|
||||
"""Get a generator for units we expect to join peer relation based on
|
||||
goal-state.
|
||||
|
||||
The local unit is excluded from the result to make it easy to gauge
|
||||
completion of all peers joining the relation with existing hook tools.
|
||||
|
||||
Example usage:
|
||||
log('peer {} of {} joined peer relation'
|
||||
.format(len(related_units()),
|
||||
len(list(expected_peer_units()))))
|
||||
|
||||
This function will raise NotImplementedError if used with juju versions
|
||||
without goal-state support.
|
||||
|
||||
:returns: iterator
|
||||
:rtype: types.GeneratorType
|
||||
:raises: NotImplementedError
|
||||
"""
|
||||
if not has_juju_version("2.4.0"):
|
||||
# goal-state first appeared in 2.4.0.
|
||||
raise NotImplementedError("goal-state")
|
||||
_goal_state = goal_state()
|
||||
return (key for key in _goal_state['units']
|
||||
if '/' in key and key != local_unit())
|
||||
|
||||
|
||||
def expected_related_units(reltype=None):
|
||||
"""Get a generator for units we expect to join relation based on
|
||||
goal-state.
|
||||
|
||||
Note that you can not use this function for the peer relation, take a look
|
||||
at expected_peer_units() for that.
|
||||
|
||||
This function will raise KeyError if you request information for a
|
||||
relation type for which juju goal-state does not have information. It will
|
||||
raise NotImplementedError if used with juju versions without goal-state
|
||||
support.
|
||||
|
||||
Example usage:
|
||||
log('participant {} of {} joined relation {}'
|
||||
.format(len(related_units()),
|
||||
len(list(expected_related_units())),
|
||||
relation_type()))
|
||||
|
||||
:param reltype: Relation type to list data for, default is to list data for
|
||||
the realtion type we are currently executing a hook for.
|
||||
:type reltype: str
|
||||
:returns: iterator
|
||||
:rtype: types.GeneratorType
|
||||
:raises: KeyError, NotImplementedError
|
||||
"""
|
||||
if not has_juju_version("2.4.4"):
|
||||
# goal-state existed in 2.4.0, but did not list individual units to
|
||||
# join a relation in 2.4.1 through 2.4.3. (LP: #1794739)
|
||||
raise NotImplementedError("goal-state relation unit count")
|
||||
reltype = reltype or relation_type()
|
||||
_goal_state = goal_state()
|
||||
return (key for key in _goal_state['relations'][reltype] if '/' in key)
|
||||
|
||||
|
||||
@cached
|
||||
def relation_for_unit(unit=None, rid=None):
|
||||
"""Get the json represenation of a unit's relation"""
|
||||
|
@ -997,6 +1059,7 @@ def application_version_set(version):
|
|||
|
||||
|
||||
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||
@cached
|
||||
def goal_state():
|
||||
"""Juju goal state values"""
|
||||
cmd = ['goal-state', '--format=json']
|
||||
|
|
|
@ -34,13 +34,13 @@ import six
|
|||
|
||||
from contextlib import contextmanager
|
||||
from collections import OrderedDict
|
||||
from .hookenv import log, DEBUG, local_unit
|
||||
from .hookenv import log, INFO, DEBUG, local_unit, charm_name
|
||||
from .fstab import Fstab
|
||||
from charmhelpers.osplatform import get_platform
|
||||
|
||||
__platform__ = get_platform()
|
||||
if __platform__ == "ubuntu":
|
||||
from charmhelpers.core.host_factory.ubuntu import (
|
||||
from charmhelpers.core.host_factory.ubuntu import ( # NOQA:F401
|
||||
service_available,
|
||||
add_new_group,
|
||||
lsb_release,
|
||||
|
@ -48,7 +48,7 @@ if __platform__ == "ubuntu":
|
|||
CompareHostReleases,
|
||||
) # flake8: noqa -- ignore F401 for this import
|
||||
elif __platform__ == "centos":
|
||||
from charmhelpers.core.host_factory.centos import (
|
||||
from charmhelpers.core.host_factory.centos import ( # NOQA:F401
|
||||
service_available,
|
||||
add_new_group,
|
||||
lsb_release,
|
||||
|
@ -58,6 +58,7 @@ elif __platform__ == "centos":
|
|||
|
||||
UPDATEDB_PATH = '/etc/updatedb.conf'
|
||||
|
||||
|
||||
def service_start(service_name, **kwargs):
|
||||
"""Start a system service.
|
||||
|
||||
|
@ -287,8 +288,8 @@ def service_running(service_name, **kwargs):
|
|||
for key, value in six.iteritems(kwargs):
|
||||
parameter = '%s=%s' % (key, value)
|
||||
cmd.append(parameter)
|
||||
output = subprocess.check_output(cmd,
|
||||
stderr=subprocess.STDOUT).decode('UTF-8')
|
||||
output = subprocess.check_output(
|
||||
cmd, stderr=subprocess.STDOUT).decode('UTF-8')
|
||||
except subprocess.CalledProcessError:
|
||||
return False
|
||||
else:
|
||||
|
@ -442,7 +443,7 @@ def add_user_to_group(username, group):
|
|||
|
||||
|
||||
def chage(username, lastday=None, expiredate=None, inactive=None,
|
||||
mindays=None, maxdays=None, root=None, warndays=None):
|
||||
mindays=None, maxdays=None, root=None, warndays=None):
|
||||
"""Change user password expiry information
|
||||
|
||||
:param str username: User to update
|
||||
|
@ -482,8 +483,10 @@ def chage(username, lastday=None, expiredate=None, inactive=None,
|
|||
cmd.append(username)
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
remove_password_expiry = functools.partial(chage, expiredate='-1', inactive='-1', mindays='0', maxdays='-1')
|
||||
|
||||
|
||||
def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
|
||||
"""Replicate the contents of a path"""
|
||||
options = options or ['--delete', '--executability']
|
||||
|
@ -535,13 +538,15 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
|
|||
# lets see if we can grab the file and compare the context, to avoid doing
|
||||
# a write.
|
||||
existing_content = None
|
||||
existing_uid, existing_gid = None, None
|
||||
existing_uid, existing_gid, existing_perms = None, None, None
|
||||
try:
|
||||
with open(path, 'rb') as target:
|
||||
existing_content = target.read()
|
||||
stat = os.stat(path)
|
||||
existing_uid, existing_gid = stat.st_uid, stat.st_gid
|
||||
except:
|
||||
existing_uid, existing_gid, existing_perms = (
|
||||
stat.st_uid, stat.st_gid, stat.st_mode
|
||||
)
|
||||
except Exception:
|
||||
pass
|
||||
if content != existing_content:
|
||||
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms),
|
||||
|
@ -554,7 +559,7 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
|
|||
target.write(content)
|
||||
return
|
||||
# the contents were the same, but we might still need to change the
|
||||
# ownership.
|
||||
# ownership or permissions.
|
||||
if existing_uid != uid:
|
||||
log("Changing uid on already existing content: {} -> {}"
|
||||
.format(existing_uid, uid), level=DEBUG)
|
||||
|
@ -563,6 +568,10 @@ def write_file(path, content, owner='root', group='root', perms=0o444):
|
|||
log("Changing gid on already existing content: {} -> {}"
|
||||
.format(existing_gid, gid), level=DEBUG)
|
||||
os.chown(path, -1, gid)
|
||||
if existing_perms != perms:
|
||||
log("Changing permissions on existing content: {} -> {}"
|
||||
.format(existing_perms, perms), level=DEBUG)
|
||||
os.chmod(path, perms)
|
||||
|
||||
|
||||
def fstab_remove(mp):
|
||||
|
@ -827,7 +836,7 @@ def list_nics(nic_type=None):
|
|||
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
||||
ip_output = (line.strip() for line in ip_output if line)
|
||||
|
||||
key = re.compile('^[0-9]+:\s+(.+):')
|
||||
key = re.compile(r'^[0-9]+:\s+(.+):')
|
||||
for line in ip_output:
|
||||
matched = re.search(key, line)
|
||||
if matched:
|
||||
|
@ -1040,3 +1049,27 @@ def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
|
|||
return modulo * wait
|
||||
else:
|
||||
return calculated_wait_time
|
||||
|
||||
|
||||
def install_ca_cert(ca_cert, name=None):
|
||||
"""
|
||||
Install the given cert as a trusted CA.
|
||||
|
||||
The ``name`` is the stem of the filename where the cert is written, and if
|
||||
not provided, it will default to ``juju-{charm_name}``.
|
||||
|
||||
If the cert is empty or None, or is unchanged, nothing is done.
|
||||
"""
|
||||
if not ca_cert:
|
||||
return
|
||||
if not isinstance(ca_cert, bytes):
|
||||
ca_cert = ca_cert.encode('utf8')
|
||||
if not name:
|
||||
name = 'juju-{}'.format(charm_name())
|
||||
cert_file = '/usr/local/share/ca-certificates/{}.crt'.format(name)
|
||||
new_hash = hashlib.md5(ca_cert).hexdigest()
|
||||
if file_hash(cert_file) == new_hash:
|
||||
return
|
||||
log("Installing new CA cert at: {}".format(cert_file), level=INFO)
|
||||
write_file(cert_file, ca_cert)
|
||||
subprocess.check_call(['update-ca-certificates', '--fresh'])
|
||||
|
|
|
@ -26,12 +26,12 @@ from charmhelpers.core.hookenv import (
|
|||
|
||||
__platform__ = get_platform()
|
||||
if __platform__ == "ubuntu":
|
||||
from charmhelpers.core.kernel_factory.ubuntu import (
|
||||
from charmhelpers.core.kernel_factory.ubuntu import ( # NOQA:F401
|
||||
persistent_modprobe,
|
||||
update_initramfs,
|
||||
) # flake8: noqa -- ignore F401 for this import
|
||||
elif __platform__ == "centos":
|
||||
from charmhelpers.core.kernel_factory.centos import (
|
||||
from charmhelpers.core.kernel_factory.centos import ( # NOQA:F401
|
||||
persistent_modprobe,
|
||||
update_initramfs,
|
||||
) # flake8: noqa -- ignore F401 for this import
|
||||
|
|
|
@ -84,6 +84,7 @@ module = "charmhelpers.fetch.%s" % __platform__
|
|||
fetch = importlib.import_module(module)
|
||||
|
||||
filter_installed_packages = fetch.filter_installed_packages
|
||||
filter_missing_packages = fetch.filter_missing_packages
|
||||
install = fetch.apt_install
|
||||
upgrade = fetch.apt_upgrade
|
||||
update = _fetch_update = fetch.apt_update
|
||||
|
@ -96,6 +97,7 @@ if __platform__ == "ubuntu":
|
|||
apt_update = fetch.apt_update
|
||||
apt_upgrade = fetch.apt_upgrade
|
||||
apt_purge = fetch.apt_purge
|
||||
apt_autoremove = fetch.apt_autoremove
|
||||
apt_mark = fetch.apt_mark
|
||||
apt_hold = fetch.apt_hold
|
||||
apt_unhold = fetch.apt_unhold
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
# limitations under the License.
|
||||
|
||||
import os
|
||||
from subprocess import check_call
|
||||
from subprocess import STDOUT, check_output
|
||||
from charmhelpers.fetch import (
|
||||
BaseFetchHandler,
|
||||
UnhandledSource,
|
||||
|
@ -55,7 +55,7 @@ class BzrUrlFetchHandler(BaseFetchHandler):
|
|||
cmd = ['bzr', 'branch']
|
||||
cmd += cmd_opts
|
||||
cmd += [source, dest]
|
||||
check_call(cmd)
|
||||
check_output(cmd, stderr=STDOUT)
|
||||
|
||||
def install(self, source, dest=None, revno=None):
|
||||
url_parts = self.parse_url(source)
|
||||
|
|
|
@ -13,7 +13,7 @@
|
|||
# limitations under the License.
|
||||
|
||||
import os
|
||||
from subprocess import check_call, CalledProcessError
|
||||
from subprocess import check_output, CalledProcessError, STDOUT
|
||||
from charmhelpers.fetch import (
|
||||
BaseFetchHandler,
|
||||
UnhandledSource,
|
||||
|
@ -50,7 +50,7 @@ class GitUrlFetchHandler(BaseFetchHandler):
|
|||
cmd = ['git', 'clone', source, dest, '--branch', branch]
|
||||
if depth:
|
||||
cmd.extend(['--depth', depth])
|
||||
check_call(cmd)
|
||||
check_output(cmd, stderr=STDOUT)
|
||||
|
||||
def install(self, source, branch="master", dest=None, depth=None):
|
||||
url_parts = self.parse_url(source)
|
||||
|
|
|
@ -189,6 +189,18 @@ def filter_installed_packages(packages):
|
|||
return _pkgs
|
||||
|
||||
|
||||
def filter_missing_packages(packages):
|
||||
"""Return a list of packages that are installed.
|
||||
|
||||
:param packages: list of packages to evaluate.
|
||||
:returns list: Packages that are installed.
|
||||
"""
|
||||
return list(
|
||||
set(packages) -
|
||||
set(filter_installed_packages(packages))
|
||||
)
|
||||
|
||||
|
||||
def apt_cache(in_memory=True, progress=None):
|
||||
"""Build and return an apt cache."""
|
||||
from apt import apt_pkg
|
||||
|
@ -248,6 +260,14 @@ def apt_purge(packages, fatal=False):
|
|||
_run_apt_command(cmd, fatal)
|
||||
|
||||
|
||||
def apt_autoremove(purge=True, fatal=False):
|
||||
"""Purge one or more packages."""
|
||||
cmd = ['apt-get', '--assume-yes', 'autoremove']
|
||||
if purge:
|
||||
cmd.append('--purge')
|
||||
_run_apt_command(cmd, fatal)
|
||||
|
||||
|
||||
def apt_mark(packages, mark, fatal=False):
|
||||
"""Flag one or more packages using apt-mark."""
|
||||
log("Marking {} as {}".format(packages, mark))
|
||||
|
@ -274,7 +294,7 @@ def apt_unhold(packages, fatal=False):
|
|||
def import_key(key):
|
||||
"""Import an ASCII Armor key.
|
||||
|
||||
/!\ A Radix64 format keyid is also supported for backwards
|
||||
A Radix64 format keyid is also supported for backwards
|
||||
compatibility, but should never be used; the key retrieval
|
||||
mechanism is insecure and subject to man-in-the-middle attacks
|
||||
voiding all signature checks using that key.
|
||||
|
@ -434,6 +454,9 @@ def _add_apt_repository(spec):
|
|||
|
||||
:param spec: the parameter to pass to add_apt_repository
|
||||
"""
|
||||
if '{series}' in spec:
|
||||
series = lsb_release()['DISTRIB_CODENAME']
|
||||
spec = spec.replace('{series}', series)
|
||||
_run_with_retries(['add-apt-repository', '--yes', spec])
|
||||
|
||||
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
rabbitmq_server_relations.py
|
|
@ -0,0 +1 @@
|
|||
rabbitmq_server_relations.py
|
|
@ -0,0 +1,18 @@
|
|||
# Copyright 2018 Canonical Ltd
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
AMQP_OVERRIDE_CONFIG = 'access-network'
|
||||
CLUSTER_OVERRIDE_CONFIG = 'cluster-network'
|
||||
AMQP_INTERFACE = 'amqp'
|
||||
CLUSTER_INTERFACE = 'cluster'
|
|
@ -34,7 +34,6 @@ from charmhelpers.core.templating import render
|
|||
from charmhelpers.contrib.openstack.utils import (
|
||||
_determine_os_workload_status,
|
||||
get_hostname,
|
||||
get_host_ip,
|
||||
pause_unit,
|
||||
resume_unit,
|
||||
is_unit_paused_set,
|
||||
|
@ -44,11 +43,6 @@ from charmhelpers.contrib.hahelpers.cluster import (
|
|||
distributed_wait,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.network.ip import (
|
||||
get_ipv6_addr,
|
||||
get_address_in_network,
|
||||
)
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
relation_id,
|
||||
relation_ids,
|
||||
|
@ -60,12 +54,10 @@ from charmhelpers.core.hookenv import (
|
|||
service_name,
|
||||
status_set,
|
||||
cached,
|
||||
unit_get,
|
||||
relation_set,
|
||||
relation_get,
|
||||
application_version_set,
|
||||
config,
|
||||
network_get_primary_address,
|
||||
is_leader,
|
||||
leader_get,
|
||||
local_unit,
|
||||
|
@ -75,11 +67,9 @@ from charmhelpers.core.host import (
|
|||
pwgen,
|
||||
mkdir,
|
||||
write_file,
|
||||
lsb_release,
|
||||
cmp_pkgrevno,
|
||||
path_hash,
|
||||
service as system_service,
|
||||
CompareHostReleases,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.peerstorage import (
|
||||
|
@ -108,10 +98,6 @@ ENABLED_PLUGINS = '/etc/rabbitmq/enabled_plugins'
|
|||
RABBIT_USER = 'rabbitmq'
|
||||
LIB_PATH = '/var/lib/rabbitmq/'
|
||||
HOSTS_FILE = '/etc/hosts'
|
||||
AMQP_OVERRIDE_CONFIG = 'access-network'
|
||||
CLUSTER_OVERRIDE_CONFIG = 'cluster-network'
|
||||
AMQP_INTERFACE = 'amqp'
|
||||
CLUSTER_INTERFACE = 'cluster'
|
||||
|
||||
_named_passwd = '/var/lib/charm/{}/{}.passwd'
|
||||
_local_named_passwd = '/var/lib/charm/{}/{}.local_passwd'
|
||||
|
@ -713,14 +699,6 @@ def update_hosts_file(map):
|
|||
os.chmod(HOSTS_FILE, 0o644)
|
||||
|
||||
|
||||
def assert_charm_supports_ipv6():
|
||||
"""Check whether we are able to support charms ipv6."""
|
||||
_release = lsb_release()['DISTRIB_CODENAME'].lower()
|
||||
if CompareHostReleases(_release) < "trusty":
|
||||
raise Exception("IPv6 is not supported in the charms for Ubuntu "
|
||||
"versions less than Trusty 14.04")
|
||||
|
||||
|
||||
def restart_map():
|
||||
'''Determine the correct resource map to be passed to
|
||||
charmhelpers.core.restart_on_change() based on the services configured.
|
||||
|
@ -970,41 +948,6 @@ def _pause_resume_helper(f, configs):
|
|||
ports=None)
|
||||
|
||||
|
||||
def get_unit_ip(config_override=AMQP_OVERRIDE_CONFIG,
|
||||
interface=AMQP_INTERFACE):
|
||||
"""Return this unit's IP.
|
||||
Future proof to allow for network spaces or other more complex addresss
|
||||
selection.
|
||||
|
||||
@param config_override: string name of the config option for network
|
||||
override. Default to amqp-network
|
||||
@param interface: string name of the relation. Default to amqp.
|
||||
@raises Exception if prefer-ipv6 is configured but IPv6 unsupported.
|
||||
@returns IPv6 or IPv4 address
|
||||
"""
|
||||
|
||||
fallback = get_host_ip(unit_get('private-address'))
|
||||
if config('prefer-ipv6'):
|
||||
assert_charm_supports_ipv6()
|
||||
return get_ipv6_addr()[0]
|
||||
elif config(config_override):
|
||||
# NOTE(jamespage)
|
||||
# override private-address settings if access-network is
|
||||
# configured and an appropriate network interface is
|
||||
# configured.
|
||||
return get_address_in_network(config(config_override),
|
||||
fallback)
|
||||
else:
|
||||
# NOTE(jamespage)
|
||||
# Try using network spaces if access-network is not
|
||||
# configured, fallback to private address if not
|
||||
# supported
|
||||
try:
|
||||
return network_get_primary_address(interface)
|
||||
except NotImplementedError:
|
||||
return fallback
|
||||
|
||||
|
||||
def get_unit_hostname():
|
||||
"""Return this unit's hostname.
|
||||
|
||||
|
|
|
@ -131,36 +131,42 @@ class RabbitMQSSLContext(object):
|
|||
ssl_enabled = False, ssl = on -> ssl enabled
|
||||
"""
|
||||
ssl_mode, external_ca = ssl_utils.get_ssl_mode()
|
||||
|
||||
ctxt = {
|
||||
'ssl_mode': ssl_mode,
|
||||
}
|
||||
|
||||
if ssl_mode == 'off':
|
||||
close_port(config('ssl_port'))
|
||||
ssl_utils.reconfigure_client_ssl()
|
||||
return ctxt
|
||||
|
||||
ssl_key = convert_from_base64(config('ssl_key'))
|
||||
ssl_cert = convert_from_base64(config('ssl_cert'))
|
||||
ssl_ca = convert_from_base64(config('ssl_ca'))
|
||||
ssl_port = config('ssl_port')
|
||||
if ssl_mode == ssl_utils.CERTS_FROM_RELATION:
|
||||
relation_certs = ssl_utils.get_relation_cert_data()
|
||||
ctxt['ssl_mode'] = 'on'
|
||||
ssl_key = convert_from_base64(relation_certs['key'])
|
||||
ssl_cert = convert_from_base64(relation_certs['cert'])
|
||||
ssl_ca = convert_from_base64(relation_certs['ca'])
|
||||
ssl_port = config('ssl_port')
|
||||
else:
|
||||
|
||||
# If external managed certs then we need all the fields.
|
||||
if (ssl_mode in ('on', 'only') and any((ssl_key, ssl_cert)) and
|
||||
not all((ssl_key, ssl_cert))):
|
||||
log('If ssl_key or ssl_cert are specified both are required.',
|
||||
level=ERROR)
|
||||
sys.exit(1)
|
||||
ssl_key = convert_from_base64(config('ssl_key'))
|
||||
ssl_cert = convert_from_base64(config('ssl_cert'))
|
||||
ssl_ca = convert_from_base64(config('ssl_ca'))
|
||||
ssl_port = config('ssl_port')
|
||||
|
||||
if not external_ca:
|
||||
ssl_cert, ssl_key, ssl_ca = ServiceCA.get_service_cert()
|
||||
# If external managed certs then we need all the fields.
|
||||
if (ssl_mode in ('on', 'only') and any((ssl_key, ssl_cert)) and
|
||||
not all((ssl_key, ssl_cert))):
|
||||
log('If ssl_key or ssl_cert are specified both are required.',
|
||||
level=ERROR)
|
||||
sys.exit(1)
|
||||
|
||||
if not external_ca:
|
||||
ssl_cert, ssl_key, ssl_ca = ServiceCA.get_service_cert()
|
||||
|
||||
ctxt.update(self.enable_ssl(
|
||||
ssl_key, ssl_cert, ssl_port, ssl_ca,
|
||||
ssl_only=(ssl_mode == "only"), ssl_client=False
|
||||
))
|
||||
|
||||
ssl_utils.reconfigure_client_ssl(True)
|
||||
open_port(ssl_port)
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ import shutil
|
|||
import sys
|
||||
import subprocess
|
||||
|
||||
|
||||
_path = os.path.dirname(os.path.realpath(__file__))
|
||||
_root = os.path.abspath(os.path.join(_path, '..'))
|
||||
|
||||
|
@ -50,6 +51,7 @@ except ImportError:
|
|||
'python3-requests'])
|
||||
import requests # flake8: noqa
|
||||
|
||||
import rabbit_net_utils
|
||||
import rabbit_utils as rabbit
|
||||
import ssl_utils
|
||||
from rabbitmq_context import SSL_CA_FILE
|
||||
|
@ -124,6 +126,10 @@ from charmhelpers.contrib.peerstorage import (
|
|||
|
||||
from charmhelpers.core.unitdata import kv
|
||||
|
||||
import charmhelpers.contrib.openstack.cert_utils as ch_cert_utils
|
||||
|
||||
import charmhelpers.contrib.network.ip as ch_ip
|
||||
|
||||
hooks = Hooks()
|
||||
|
||||
SERVICE_NAME = os.getenv('JUJU_UNIT_NAME').split('/')[0]
|
||||
|
@ -256,7 +262,9 @@ def update_clients():
|
|||
@hooks.hook('amqp-relation-changed')
|
||||
def amqp_changed(relation_id=None, remote_unit=None):
|
||||
singleset = set(['username', 'vhost'])
|
||||
host_addr = rabbit.get_unit_ip()
|
||||
host_addr = ch_ip.get_relation_ip(
|
||||
rabbit_net_utils.AMQP_INTERFACE,
|
||||
cidr_network=config(rabbit_net_utils.AMQP_OVERRIDE_CONFIG))
|
||||
|
||||
if rabbit.leader_node_is_ready():
|
||||
relation_settings = {'hostname': host_addr,
|
||||
|
@ -348,8 +356,9 @@ def cluster_joined(relation_id=None):
|
|||
relation_settings = {
|
||||
'hostname': rabbit.get_unit_hostname(),
|
||||
'private-address':
|
||||
rabbit.get_unit_ip(config_override=rabbit.CLUSTER_OVERRIDE_CONFIG,
|
||||
interface=rabbit.CLUSTER_INTERFACE),
|
||||
ch_ip.get_relation_ip(
|
||||
rabbit_net_utils.CLUSTER_INTERFACE,
|
||||
cidr_network=config(rabbit_net_utils.CLUSTER_OVERRIDE_CONFIG)),
|
||||
}
|
||||
|
||||
relation_set(relation_id=relation_id,
|
||||
|
@ -752,10 +761,10 @@ def config_changed():
|
|||
return
|
||||
|
||||
# Update hosts with this unit's information
|
||||
rabbit.update_hosts_file(
|
||||
{rabbit.get_unit_ip(config_override=rabbit.CLUSTER_OVERRIDE_CONFIG,
|
||||
interface=rabbit.CLUSTER_INTERFACE):
|
||||
rabbit.get_unit_hostname()})
|
||||
cluster_ip = ch_ip.get_relation_ip(
|
||||
rabbit_net_utils.CLUSTER_INTERFACE,
|
||||
cidr_network=config(rabbit_net_utils.CLUSTER_OVERRIDE_CONFIG))
|
||||
rabbit.update_hosts_file({cluster_ip: rabbit.get_unit_hostname()})
|
||||
|
||||
# Add archive source if provided and not in the upgrade process
|
||||
if not leader_get("cluster_series_upgrading"):
|
||||
|
@ -887,6 +896,28 @@ def series_upgrade_complete():
|
|||
rabbit.resume_unit_helper(rabbit.ConfigRenderer(rabbit.CONFIG_FILES))
|
||||
|
||||
|
||||
@hooks.hook('certificates-relation-joined')
|
||||
def certs_joined(relation_id=None):
|
||||
req = ch_cert_utils.CertRequest()
|
||||
ip, target_cn = ssl_utils.get_unit_amqp_endpoint_data()
|
||||
req.add_entry(None, target_cn, [ip])
|
||||
relation_set(
|
||||
relation_id=relation_id,
|
||||
relation_settings=req.get_request())
|
||||
|
||||
|
||||
@hooks.hook('certificates-relation-changed')
|
||||
def certs_changed(relation_id=None, unit=None):
|
||||
# Ensure Rabbit has restart before telling the clients as rabbit may
|
||||
# take time to restart.
|
||||
@rabbit.restart_on_change(rabbit.restart_map())
|
||||
def render_and_restart():
|
||||
rabbit.ConfigRenderer(
|
||||
rabbit.CONFIG_FILES).write_all()
|
||||
render_and_restart()
|
||||
update_clients()
|
||||
|
||||
|
||||
@hooks.hook('update-status')
|
||||
@harden()
|
||||
def update_status():
|
||||
|
|
|
@ -12,6 +12,8 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import base64
|
||||
|
||||
from charmhelpers.contrib.ssl.service import ServiceCA
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
|
@ -21,29 +23,67 @@ from charmhelpers.core.hookenv import (
|
|||
relation_get,
|
||||
local_unit,
|
||||
)
|
||||
from charmhelpers.contrib.network.ip import (
|
||||
get_hostname,
|
||||
get_relation_ip,
|
||||
)
|
||||
import charmhelpers.contrib.openstack.cert_utils as ch_cert_utils
|
||||
|
||||
import base64
|
||||
import rabbit_net_utils
|
||||
|
||||
CERTS_FROM_RELATION = 'certs-relation'
|
||||
|
||||
|
||||
def get_unit_amqp_endpoint_data():
|
||||
"""Get the hostname and ip address associated with amqp interface.
|
||||
|
||||
:returns: Tuple containing ip address and hostname.
|
||||
:rtype: (str, str)
|
||||
"""
|
||||
ip = get_relation_ip(
|
||||
rabbit_net_utils.AMQP_INTERFACE,
|
||||
cidr_network=config(rabbit_net_utils.AMQP_OVERRIDE_CONFIG))
|
||||
return ip, get_hostname(ip)
|
||||
|
||||
|
||||
def get_relation_cert_data():
|
||||
"""Get certificate bundle associated with the amqp interface.
|
||||
|
||||
:returns: Dict with key, cert, ca and, optional, chain keys.
|
||||
:rtype: Dict
|
||||
"""
|
||||
_, hostname = get_unit_amqp_endpoint_data()
|
||||
return ch_cert_utils.get_bundle_for_cn(hostname)
|
||||
|
||||
|
||||
def get_ssl_mode():
|
||||
ssl_mode = config('ssl')
|
||||
external_ca = False
|
||||
|
||||
# Legacy config boolean option
|
||||
ssl_on = config('ssl_enabled')
|
||||
if ssl_mode == 'off' and ssl_on is False:
|
||||
ssl_mode = 'off'
|
||||
elif ssl_mode == 'off' and ssl_on:
|
||||
ssl_mode = 'on'
|
||||
|
||||
ssl_key = config('ssl_key')
|
||||
ssl_cert = config('ssl_cert')
|
||||
|
||||
if all((ssl_key, ssl_cert)):
|
||||
relation_certs = get_relation_cert_data()
|
||||
if relation_certs:
|
||||
ssl_mode = CERTS_FROM_RELATION
|
||||
external_ca = True
|
||||
else:
|
||||
ssl_mode = config('ssl')
|
||||
external_ca = False
|
||||
|
||||
# Legacy config boolean option
|
||||
ssl_on = config('ssl_enabled')
|
||||
if ssl_mode == 'off' and ssl_on is False:
|
||||
ssl_mode = 'off'
|
||||
elif ssl_mode == 'off' and ssl_on:
|
||||
ssl_mode = 'on'
|
||||
|
||||
ssl_key = config('ssl_key')
|
||||
ssl_cert = config('ssl_cert')
|
||||
|
||||
if all((ssl_key, ssl_cert)):
|
||||
external_ca = True
|
||||
return ssl_mode, external_ca
|
||||
|
||||
|
||||
def b64encoded_string(ss):
|
||||
return base64.b64encode(ss.encode('ascii')).decode('ascii')
|
||||
|
||||
|
||||
def configure_client_ssl(relation_data):
|
||||
"""Configure client with ssl
|
||||
"""
|
||||
|
@ -51,19 +91,23 @@ def configure_client_ssl(relation_data):
|
|||
if ssl_mode == 'off':
|
||||
return
|
||||
relation_data['ssl_port'] = config('ssl_port')
|
||||
if external_ca:
|
||||
if config('ssl_ca'):
|
||||
if "BEGIN CERTIFICATE" in config('ssl_ca'):
|
||||
ssl_ca_encoded = (base64
|
||||
.b64encode(config('ssl_ca').encode('ascii'))
|
||||
.decode('ascii'))
|
||||
else:
|
||||
ssl_ca_encoded = config('ssl_ca')
|
||||
relation_data['ssl_ca'] = ssl_ca_encoded
|
||||
return
|
||||
ca = ServiceCA.get_ca()
|
||||
relation_data['ssl_ca'] = (
|
||||
base64.b64encode(ca.get_ca_bundle().encode('ascii')).decode('ascii'))
|
||||
if ssl_mode == CERTS_FROM_RELATION:
|
||||
relation_certs = get_relation_cert_data()
|
||||
ca_data = relation_certs['ca']
|
||||
if relation_certs.get('chain'):
|
||||
ca_data = ca_data + relation_certs.get('chain')
|
||||
relation_data['ssl_ca'] = b64encoded_string(ca_data)
|
||||
else:
|
||||
if external_ca:
|
||||
if config('ssl_ca'):
|
||||
if "BEGIN CERTIFICATE" in config('ssl_ca'):
|
||||
ssl_ca_encoded = b64encoded_string(config('ssl_ca'))
|
||||
else:
|
||||
ssl_ca_encoded = config('ssl_ca')
|
||||
relation_data['ssl_ca'] = ssl_ca_encoded
|
||||
return
|
||||
ca = ServiceCA.get_ca()
|
||||
relation_data['ssl_ca'] = b64encoded_string(ca.get_ca_bundle())
|
||||
|
||||
|
||||
def reconfigure_client_ssl(ssl_enabled=False):
|
||||
|
|
|
@ -26,6 +26,8 @@ requires:
|
|||
scope: container
|
||||
ceph:
|
||||
interface: ceph-client
|
||||
certificates:
|
||||
interface: tls-certificates
|
||||
peers:
|
||||
cluster:
|
||||
interface: rabbitmq-ha
|
||||
|
|
|
@ -44,11 +44,6 @@ TO_PATCH = [
|
|||
'leader_get',
|
||||
'config',
|
||||
'is_unit_paused_set',
|
||||
'assert_charm_supports_ipv6',
|
||||
'get_ipv6_addr',
|
||||
'unit_get',
|
||||
'network_get_primary_address',
|
||||
'get_address_in_network',
|
||||
]
|
||||
|
||||
|
||||
|
@ -626,61 +621,6 @@ class UtilsTests(CharmTestCase):
|
|||
mock_cluster_ready.return_value = True
|
||||
self.assertTrue(rabbit_utils.leader_node_is_ready())
|
||||
|
||||
def test_get_unit_ip(self):
|
||||
AMQP_IP = '10.200.1.1'
|
||||
OVERRIDE_AMQP_IP = '10.250.1.1'
|
||||
CLUSTER_IP = '10.100.1.1'
|
||||
OVERRIDE_CLUSTER_IP = '10.150.1.1'
|
||||
IPV6_IP = '2001:DB8::1'
|
||||
DEFAULT_IP = '172.16.1.1'
|
||||
self.assert_charm_supports_ipv6.return_value = True
|
||||
self.get_ipv6_addr.return_value = [IPV6_IP]
|
||||
self.unit_get.return_value = DEFAULT_IP
|
||||
self.get_address_in_network.return_value = DEFAULT_IP
|
||||
self.network_get_primary_address.return_value = DEFAULT_IP
|
||||
|
||||
# IPv6
|
||||
_config = {'prefer-ipv6': True,
|
||||
'cluster-network': '10.100.1.0/24',
|
||||
'access-network': '10.200.1.0/24'}
|
||||
self.config.side_effect = lambda key: _config.get(key)
|
||||
self.assertEqual(IPV6_IP, rabbit_utils.get_unit_ip())
|
||||
|
||||
# Overrides
|
||||
_config = {'prefer-ipv6': False,
|
||||
'cluster-network': '10.100.1.0/24',
|
||||
'access-network': '10.200.1.0/24'}
|
||||
self.config.side_effect = lambda key: _config.get(key)
|
||||
|
||||
self.get_address_in_network.return_value = OVERRIDE_AMQP_IP
|
||||
self.assertEqual(OVERRIDE_AMQP_IP, rabbit_utils.get_unit_ip())
|
||||
|
||||
self.get_address_in_network.return_value = OVERRIDE_CLUSTER_IP
|
||||
self.assertEqual(OVERRIDE_CLUSTER_IP,
|
||||
rabbit_utils.get_unit_ip(
|
||||
config_override='cluster-network',
|
||||
interface='cluster'))
|
||||
|
||||
# Network-get calls
|
||||
_config = {'prefer-ipv6': False,
|
||||
'cluster-network': None,
|
||||
'access-network': None}
|
||||
self.config.side_effect = lambda key: _config.get(key)
|
||||
|
||||
self.network_get_primary_address.return_value = AMQP_IP
|
||||
self.assertEqual(AMQP_IP, rabbit_utils.get_unit_ip())
|
||||
|
||||
self.network_get_primary_address.return_value = CLUSTER_IP
|
||||
self.assertEqual(CLUSTER_IP,
|
||||
rabbit_utils.get_unit_ip(
|
||||
config_override='cluster-network',
|
||||
interface='cluster'))
|
||||
|
||||
# Default
|
||||
self.network_get_primary_address.return_value = AMQP_IP
|
||||
self.network_get_primary_address.side_effect = NotImplementedError
|
||||
self.assertEqual(DEFAULT_IP, rabbit_utils.get_unit_ip())
|
||||
|
||||
@mock.patch.object(rabbit_utils, 'get_upstream_version')
|
||||
def test_get_managment_port_legacy(self, mock_get_upstream_version):
|
||||
mock_get_upstream_version.return_value = '2.7.1'
|
||||
|
|
|
@ -63,14 +63,14 @@ class RelationUtil(CharmTestCase):
|
|||
@patch('rabbitmq_server_relations.cmp_pkgrevno')
|
||||
@patch('rabbitmq_server_relations.is_clustered')
|
||||
@patch('rabbitmq_server_relations.ssl_utils.configure_client_ssl')
|
||||
@patch('rabbitmq_server_relations.rabbit.get_unit_ip')
|
||||
@patch('rabbitmq_server_relations.ch_ip.get_relation_ip')
|
||||
@patch('rabbitmq_server_relations.relation_get')
|
||||
@patch('rabbitmq_server_relations.is_elected_leader')
|
||||
def test_amqp_changed_compare_versions_ha_queues(
|
||||
self,
|
||||
is_elected_leader,
|
||||
relation_get,
|
||||
get_unit_ip,
|
||||
get_relation_ip,
|
||||
configure_client_ssl,
|
||||
is_clustered,
|
||||
cmp_pkgrevno,
|
||||
|
@ -92,7 +92,7 @@ class RelationUtil(CharmTestCase):
|
|||
mock_leader_node_is_ready.return_value = True
|
||||
mock_config.side_effect = config
|
||||
host_addr = "10.1.2.3"
|
||||
get_unit_ip.return_value = host_addr
|
||||
get_relation_ip.return_value = host_addr
|
||||
is_elected_leader.return_value = True
|
||||
relation_get.return_value = {}
|
||||
is_clustered.return_value = False
|
||||
|
@ -119,14 +119,14 @@ class RelationUtil(CharmTestCase):
|
|||
@patch('rabbitmq_server_relations.cmp_pkgrevno')
|
||||
@patch('rabbitmq_server_relations.is_clustered')
|
||||
@patch('rabbitmq_server_relations.ssl_utils.configure_client_ssl')
|
||||
@patch('rabbitmq_server_relations.rabbit.get_unit_ip')
|
||||
@patch('rabbitmq_server_relations.ch_ip.get_relation_ip')
|
||||
@patch('rabbitmq_server_relations.relation_get')
|
||||
@patch('rabbitmq_server_relations.is_elected_leader')
|
||||
def test_amqp_changed_compare_versions_ha_queues_prefer_ipv6(
|
||||
self,
|
||||
is_elected_leader,
|
||||
relation_get,
|
||||
get_unit_ip,
|
||||
get_relation_ip,
|
||||
configure_client_ssl,
|
||||
is_clustered,
|
||||
cmp_pkgrevno,
|
||||
|
@ -148,7 +148,7 @@ class RelationUtil(CharmTestCase):
|
|||
mock_leader_node_is_ready.return_value = True
|
||||
mock_config.side_effect = config
|
||||
ipv6_addr = "2001:db8:1:0:f816:3eff:fed6:c140"
|
||||
get_unit_ip.return_value = ipv6_addr
|
||||
get_relation_ip.return_value = ipv6_addr
|
||||
is_elected_leader.return_value = True
|
||||
relation_get.return_value = {}
|
||||
is_clustered.return_value = False
|
||||
|
|
|
@ -52,7 +52,38 @@ class TestSSLUtils(CharmTestCase):
|
|||
def setUp(self):
|
||||
super(TestSSLUtils, self).setUp(ssl_utils, TO_PATCH)
|
||||
|
||||
def test_get_ssl_mode_off(self):
|
||||
@patch('ssl_utils.get_hostname')
|
||||
@patch('ssl_utils.get_relation_ip')
|
||||
def test_get_unit_amqp_endpoint_data(self, get_relation_ip, get_hostname):
|
||||
self.config.return_value = '10.0.0.0/24'
|
||||
get_relation_ip.return_value = '10.0.0.10'
|
||||
get_hostname.return_value = 'myhost'
|
||||
self.assertEqual(
|
||||
ssl_utils.get_unit_amqp_endpoint_data(),
|
||||
('10.0.0.10', 'myhost'))
|
||||
get_relation_ip.assert_called_once_with(
|
||||
'amqp',
|
||||
cidr_network='10.0.0.0/24')
|
||||
get_hostname.assert_called_once_with('10.0.0.10')
|
||||
|
||||
@patch('ssl_utils.ch_cert_utils.get_bundle_for_cn')
|
||||
@patch('ssl_utils.get_unit_amqp_endpoint_data')
|
||||
def test_get_relation_cert_data(self, get_unit_amqp_endpoint_data,
|
||||
get_bundle_for_cn):
|
||||
get_unit_amqp_endpoint_data.return_value = ('10.0.0.10',
|
||||
'juju-345.lcd')
|
||||
get_bundle_for_cn.return_value = {
|
||||
'ca': 'vaultca',
|
||||
'cert': 'vaultcert',
|
||||
'key': 'vaultkey'}
|
||||
self.assertEqual(
|
||||
ssl_utils.get_relation_cert_data(),
|
||||
{'ca': 'vaultca', 'cert': 'vaultcert', 'key': 'vaultkey'})
|
||||
get_bundle_for_cn.assert_called_once_with('juju-345.lcd')
|
||||
|
||||
@patch('ssl_utils.get_relation_cert_data')
|
||||
def test_get_ssl_mode_off(self, get_relation_cert_data):
|
||||
get_relation_cert_data.return_value = {}
|
||||
test_config = {
|
||||
'ssl': 'off',
|
||||
'ssl_enabled': False,
|
||||
|
@ -64,7 +95,9 @@ class TestSSLUtils(CharmTestCase):
|
|||
ssl_utils.get_ssl_mode(),
|
||||
('off', False))
|
||||
|
||||
def test_get_ssl_enabled_true(self):
|
||||
@patch('ssl_utils.get_relation_cert_data')
|
||||
def test_get_ssl_enabled_true(self, get_relation_cert_data):
|
||||
get_relation_cert_data.return_value = {}
|
||||
test_config = {
|
||||
'ssl': 'off',
|
||||
'ssl_enabled': True,
|
||||
|
@ -76,7 +109,9 @@ class TestSSLUtils(CharmTestCase):
|
|||
ssl_utils.get_ssl_mode(),
|
||||
('on', False))
|
||||
|
||||
def test_get_ssl_enabled_false(self):
|
||||
@patch('ssl_utils.get_relation_cert_data')
|
||||
def test_get_ssl_enabled_false(self, get_relation_cert_data):
|
||||
get_relation_cert_data.return_value = {}
|
||||
test_config = {
|
||||
'ssl': 'on',
|
||||
'ssl_enabled': False,
|
||||
|
@ -88,7 +123,9 @@ class TestSSLUtils(CharmTestCase):
|
|||
ssl_utils.get_ssl_mode(),
|
||||
('on', False))
|
||||
|
||||
def test_get_ssl_enabled_external_ca(self):
|
||||
@patch('ssl_utils.get_relation_cert_data')
|
||||
def test_get_ssl_enabled_external_ca(self, get_relation_cert_data):
|
||||
get_relation_cert_data.return_value = {}
|
||||
test_config = {
|
||||
'ssl': 'on',
|
||||
'ssl_enabled': False,
|
||||
|
@ -100,9 +137,21 @@ class TestSSLUtils(CharmTestCase):
|
|||
ssl_utils.get_ssl_mode(),
|
||||
('on', True))
|
||||
|
||||
@patch('ssl_utils.get_relation_cert_data')
|
||||
def test_get_ssl_enabled_relation_certs(self, get_relation_cert_data):
|
||||
get_relation_cert_data.return_value = {
|
||||
'cert': 'vaultcert',
|
||||
'key': 'vaultkey',
|
||||
'ca': 'vaultca'}
|
||||
self.assertEqual(
|
||||
ssl_utils.get_ssl_mode(),
|
||||
('certs-relation', True))
|
||||
|
||||
@patch('ssl_utils.get_relation_cert_data')
|
||||
@patch('ssl_utils.get_ssl_mode')
|
||||
def test_get_ssl_mode_ssl_off(self, get_ssl_mode):
|
||||
def test_get_ssl_mode_ssl_off(self, get_ssl_mode, get_relation_cert_data):
|
||||
get_ssl_mode.return_value = ('off', False)
|
||||
get_relation_cert_data.return_value = {}
|
||||
relation_data = {}
|
||||
ssl_utils.configure_client_ssl(relation_data)
|
||||
self.assertEqual(relation_data, {})
|
||||
|
|
Loading…
Reference in New Issue