Add support for block device encryption

Add new secrets-storage relation to vault, supporting the
use of block device encryption using dm-crypt/vaultlocker.

Prepared devices are now recorded in the local unit kv
store; this information is used to provide a list of
configured block devices to the swift-proxy charm,
rather than the previous best guess provided by
determine_block_devices.  This allows us to use the
dm-crypt device name, rather than the underlying
block device.

Encrypted block devices are unlocked on boot using
vaultlocker-decrypt systemd units (enabled by vaultlocker);
/etc/fstab entries for such devices make use of a
x-systemd.requires option to ensure that the block device
is unlocked prior to attempting to mount it.

Add new storage binding to allow charm to be used with
Juju storage.

Add new ephemeral-unmount configuration option to allow
cloud ephemeral storage to be used for testing purposes;
update functional testing to use this option.  The behaviour
of 'overwrite' was changed to accomodate the use of
encrypted block devices.

Change-Id: I9b3f8cd2de412ee96e0139dba4d4abdf998ecaf2
This commit is contained in:
James Page 2018-04-25 17:50:09 +01:00
parent 18d0a891db
commit 591c084532
44 changed files with 861 additions and 182 deletions

2
.gitignore vendored
View File

@ -8,3 +8,5 @@ tags
*.pyc *.pyc
tests/cirros-* tests/cirros-*
func-results.json func-results.json
.settings
**/__pycache__

View File

@ -65,7 +65,8 @@ def get_ca_cert():
if ca_cert is None: if ca_cert is None:
log("Inspecting identity-service relations for CA SSL certificate.", log("Inspecting identity-service relations for CA SSL certificate.",
level=INFO) level=INFO)
for r_id in relation_ids('identity-service'): for r_id in (relation_ids('identity-service') +
relation_ids('identity-credentials')):
for unit in relation_list(r_id): for unit in relation_list(r_id):
if ca_cert is None: if ca_cert is None:
ca_cert = relation_get('ca_cert', ca_cert = relation_get('ca_cert',
@ -76,7 +77,7 @@ def get_ca_cert():
def retrieve_ca_cert(cert_file): def retrieve_ca_cert(cert_file):
cert = None cert = None
if os.path.isfile(cert_file): if os.path.isfile(cert_file):
with open(cert_file, 'r') as crt: with open(cert_file, 'rb') as crt:
cert = crt.read() cert = crt.read()
return cert return cert

View File

@ -371,6 +371,7 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'):
''' Distribute operations by waiting based on modulo_distribution ''' Distribute operations by waiting based on modulo_distribution
If modulo and or wait are not set, check config_get for those values. If modulo and or wait are not set, check config_get for those values.
If config values are not set, default to modulo=3 and wait=30.
:param modulo: int The modulo number creates the group distribution :param modulo: int The modulo number creates the group distribution
:param wait: int The constant time wait value :param wait: int The constant time wait value
@ -382,10 +383,17 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'):
:side effect: Calls time.sleep() :side effect: Calls time.sleep()
''' '''
if modulo is None: if modulo is None:
modulo = config_get('modulo-nodes') modulo = config_get('modulo-nodes') or 3
if wait is None: if wait is None:
wait = config_get('known-wait') wait = config_get('known-wait') or 30
calculated_wait = modulo_distribution(modulo=modulo, wait=wait) if juju_is_leader():
# The leader should never wait
calculated_wait = 0
else:
# non_zero_wait=True guarantees the non-leader who gets modulo 0
# will still wait
calculated_wait = modulo_distribution(modulo=modulo, wait=wait,
non_zero_wait=True)
msg = "Waiting {} seconds for {} ...".format(calculated_wait, msg = "Waiting {} seconds for {} ...".format(calculated_wait,
operation_name) operation_name)
log(msg, DEBUG) log(msg, DEBUG)

View File

@ -441,7 +441,7 @@ class OpenStackAmuletUtils(AmuletUtils):
if rel.get('api_version') != str(api_version): if rel.get('api_version') != str(api_version):
raise Exception("api_version not propagated through relation" raise Exception("api_version not propagated through relation"
" data yet ('{}' != '{}')." " data yet ('{}' != '{}')."
"".format(rel['api_version'], api_version)) "".format(rel.get('api_version'), api_version))
def keystone_configure_api_version(self, sentry_relation_pairs, deployment, def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
api_version): api_version):
@ -463,16 +463,13 @@ class OpenStackAmuletUtils(AmuletUtils):
deployment._auto_wait_for_status() deployment._auto_wait_for_status()
self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
def authenticate_cinder_admin(self, keystone_sentry, username, def authenticate_cinder_admin(self, keystone, api_version=2):
password, tenant, api_version=2):
"""Authenticates admin user with cinder.""" """Authenticates admin user with cinder."""
# NOTE(beisner): cinder python client doesn't accept tokens. self.log.debug('Authenticating cinder admin...')
keystone_ip = keystone_sentry.info['public-address']
ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
_clients = { _clients = {
1: cinder_client.Client, 1: cinder_client.Client,
2: cinder_clientv2.Client} 2: cinder_clientv2.Client}
return _clients[api_version](username, password, tenant, ept) return _clients[api_version](session=keystone.session)
def authenticate_keystone(self, keystone_ip, username, password, def authenticate_keystone(self, keystone_ip, username, password,
api_version=False, admin_port=False, api_version=False, admin_port=False,

View File

@ -797,9 +797,9 @@ class ApacheSSLContext(OSContextGenerator):
key_filename = 'key' key_filename = 'key'
write_file(path=os.path.join(ssl_dir, cert_filename), write_file(path=os.path.join(ssl_dir, cert_filename),
content=b64decode(cert)) content=b64decode(cert), perms=0o640)
write_file(path=os.path.join(ssl_dir, key_filename), write_file(path=os.path.join(ssl_dir, key_filename),
content=b64decode(key)) content=b64decode(key), perms=0o640)
def configure_ca(self): def configure_ca(self):
ca_cert = get_ca_cert() ca_cert = get_ca_cert()
@ -1873,10 +1873,11 @@ class EnsureDirContext(OSContextGenerator):
context is needed to do that before rendering a template. context is needed to do that before rendering a template.
''' '''
def __init__(self, dirname): def __init__(self, dirname, **kwargs):
'''Used merely to ensure that a given directory exists.''' '''Used merely to ensure that a given directory exists.'''
self.dirname = dirname self.dirname = dirname
self.kwargs = kwargs
def __call__(self): def __call__(self):
mkdir(self.dirname) mkdir(self.dirname, **self.kwargs)
return {} return {}

View File

@ -6,6 +6,7 @@ global
group haproxy group haproxy
spread-checks 0 spread-checks 0
stats socket /var/run/haproxy/admin.sock mode 600 level admin stats socket /var/run/haproxy/admin.sock mode 600 level admin
stats socket /var/run/haproxy/operator.sock mode 600 level operator
stats timeout 2m stats timeout 2m
defaults defaults

View File

@ -0,0 +1,5 @@
[oslo_middleware]
# Bug #1758675
enable_proxy_headers_parsing = true

View File

@ -5,4 +5,7 @@ transport_url = {{ transport_url }}
{% if notification_topics -%} {% if notification_topics -%}
topics = {{ notification_topics }} topics = {{ notification_topics }}
{% endif -%} {% endif -%}
{% if notification_format -%}
notification_format = {{ notification_format }}
{% endif -%}
{% endif -%} {% endif -%}

View File

@ -306,7 +306,7 @@ def get_os_codename_install_source(src):
if src.startswith('cloud:'): if src.startswith('cloud:'):
ca_rel = src.split(':')[1] ca_rel = src.split(':')[1]
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0] ca_rel = ca_rel.split('-')[1].split('/')[0]
return ca_rel return ca_rel
# Best guess match based on deb string provided # Best guess match based on deb string provided

View File

@ -0,0 +1,126 @@
# Copyright 2018 Canonical Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import os
import charmhelpers.contrib.openstack.alternatives as alternatives
import charmhelpers.contrib.openstack.context as context
import charmhelpers.core.hookenv as hookenv
import charmhelpers.core.host as host
import charmhelpers.core.templating as templating
import charmhelpers.core.unitdata as unitdata
VAULTLOCKER_BACKEND = 'charm-vaultlocker'
class VaultKVContext(context.OSContextGenerator):
"""Vault KV context for interaction with vault-kv interfaces"""
interfaces = ['secrets-storage']
def __init__(self, secret_backend=None):
super(context.OSContextGenerator, self).__init__()
self.secret_backend = (
secret_backend or 'charm-{}'.format(hookenv.service_name())
)
def __call__(self):
db = unitdata.kv()
last_token = db.get('last-token')
secret_id = db.get('secret-id')
for relation_id in hookenv.relation_ids(self.interfaces[0]):
for unit in hookenv.related_units(relation_id):
data = hookenv.relation_get(unit=unit,
rid=relation_id)
vault_url = data.get('vault_url')
role_id = data.get('{}_role_id'.format(hookenv.local_unit()))
token = data.get('{}_token'.format(hookenv.local_unit()))
if all([vault_url, role_id, token]):
token = json.loads(token)
vault_url = json.loads(vault_url)
# Tokens may change when secret_id's are being
# reissued - if so use token to get new secret_id
if token != last_token:
secret_id = retrieve_secret_id(
url=vault_url,
token=token
)
db.set('secret-id', secret_id)
db.set('last-token', token)
db.flush()
ctxt = {
'vault_url': vault_url,
'role_id': json.loads(role_id),
'secret_id': secret_id,
'secret_backend': self.secret_backend,
}
vault_ca = data.get('vault_ca')
if vault_ca:
ctxt['vault_ca'] = json.loads(vault_ca)
self.complete = True
return ctxt
return {}
def write_vaultlocker_conf(context, priority=100):
"""Write vaultlocker configuration to disk and install alternative
:param context: Dict of data from vault-kv relation
:ptype: context: dict
:param priority: Priority of alternative configuration
:ptype: priority: int"""
charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf".format(
hookenv.service_name()
)
host.mkdir(os.path.dirname(charm_vl_path), perms=0o700)
templating.render(source='vaultlocker.conf.j2',
target=charm_vl_path,
context=context, perms=0o600),
alternatives.install_alternative('vaultlocker.conf',
'/etc/vaultlocker/vaultlocker.conf',
charm_vl_path, priority)
def vault_relation_complete(backend=None):
"""Determine whether vault relation is complete
:param backend: Name of secrets backend requested
:ptype backend: string
:returns: whether the relation to vault is complete
:rtype: bool"""
vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND)
vault_kv()
return vault_kv.complete
# TODO: contrib a high level unwrap method to hvac that works
def retrieve_secret_id(url, token):
"""Retrieve a response-wrapped secret_id from Vault
:param url: URL to Vault Server
:ptype url: str
:param token: One shot Token to use
:ptype token: str
:returns: secret_id to use for Vault Access
:rtype: str"""
import hvac
client = hvac.Client(url=url, token=token)
response = client._post('/v1/sys/wrapping/unwrap')
if response.status_code == 200:
data = response.json()
return data['data']['secret_id']

View File

@ -291,7 +291,7 @@ class Pool(object):
class ReplicatedPool(Pool): class ReplicatedPool(Pool):
def __init__(self, service, name, pg_num=None, replicas=2, def __init__(self, service, name, pg_num=None, replicas=2,
percent_data=10.0): percent_data=10.0, app_name=None):
super(ReplicatedPool, self).__init__(service=service, name=name) super(ReplicatedPool, self).__init__(service=service, name=name)
self.replicas = replicas self.replicas = replicas
if pg_num: if pg_num:
@ -301,6 +301,10 @@ class ReplicatedPool(Pool):
self.pg_num = min(pg_num, max_pgs) self.pg_num = min(pg_num, max_pgs)
else: else:
self.pg_num = self.get_pgs(self.replicas, percent_data) self.pg_num = self.get_pgs(self.replicas, percent_data)
if app_name:
self.app_name = app_name
else:
self.app_name = 'unknown'
def create(self): def create(self):
if not pool_exists(self.service, self.name): if not pool_exists(self.service, self.name):
@ -313,6 +317,12 @@ class ReplicatedPool(Pool):
update_pool(client=self.service, update_pool(client=self.service,
pool=self.name, pool=self.name,
settings={'size': str(self.replicas)}) settings={'size': str(self.replicas)})
try:
set_app_name_for_pool(client=self.service,
pool=self.name,
name=self.app_name)
except CalledProcessError:
log('Could not set app name for pool {}'.format(self.name, level=WARNING))
except CalledProcessError: except CalledProcessError:
raise raise
@ -320,10 +330,14 @@ class ReplicatedPool(Pool):
# Default jerasure erasure coded pool # Default jerasure erasure coded pool
class ErasurePool(Pool): class ErasurePool(Pool):
def __init__(self, service, name, erasure_code_profile="default", def __init__(self, service, name, erasure_code_profile="default",
percent_data=10.0): percent_data=10.0, app_name=None):
super(ErasurePool, self).__init__(service=service, name=name) super(ErasurePool, self).__init__(service=service, name=name)
self.erasure_code_profile = erasure_code_profile self.erasure_code_profile = erasure_code_profile
self.percent_data = percent_data self.percent_data = percent_data
if app_name:
self.app_name = app_name
else:
self.app_name = 'unknown'
def create(self): def create(self):
if not pool_exists(self.service, self.name): if not pool_exists(self.service, self.name):
@ -355,6 +369,12 @@ class ErasurePool(Pool):
'erasure', self.erasure_code_profile] 'erasure', self.erasure_code_profile]
try: try:
check_call(cmd) check_call(cmd)
try:
set_app_name_for_pool(client=self.service,
pool=self.name,
name=self.app_name)
except CalledProcessError:
log('Could not set app name for pool {}'.format(self.name, level=WARNING))
except CalledProcessError: except CalledProcessError:
raise raise
@ -778,6 +798,25 @@ def update_pool(client, pool, settings):
check_call(cmd) check_call(cmd)
def set_app_name_for_pool(client, pool, name):
"""
Calls `osd pool application enable` for the specified pool name
:param client: Name of the ceph client to use
:type client: str
:param pool: Pool to set app name for
:type pool: str
:param name: app name for the specified pool
:type name: str
:raises: CalledProcessError if ceph call fails
"""
if ceph_version() >= '12.0.0':
cmd = ['ceph', '--id', client, 'osd', 'pool',
'application', 'enable', pool, name]
check_call(cmd)
def create_pool(service, name, replicas=3, pg_num=None): def create_pool(service, name, replicas=3, pg_num=None):
"""Create a new RADOS pool.""" """Create a new RADOS pool."""
if pool_exists(service, name): if pool_exists(service, name):

View File

@ -151,3 +151,32 @@ def extend_logical_volume_by_device(lv_name, block_device):
''' '''
cmd = ['lvextend', lv_name, block_device] cmd = ['lvextend', lv_name, block_device]
check_call(cmd) check_call(cmd)
def create_logical_volume(lv_name, volume_group, size=None):
'''
Create a new logical volume in an existing volume group
:param lv_name: str: name of logical volume to be created.
:param volume_group: str: Name of volume group to use for the new volume.
:param size: str: Size of logical volume to create (100% if not supplied)
:raises subprocess.CalledProcessError: in the event that the lvcreate fails.
'''
if size:
check_call([
'lvcreate',
'--yes',
'-L',
'{}'.format(size),
'-n', lv_name, volume_group
])
# create the lv with all the space available, this is needed because the
# system call is different for LVM
else:
check_call([
'lvcreate',
'--yes',
'-l',
'100%FREE',
'-n', lv_name, volume_group
])

View File

@ -67,3 +67,19 @@ def is_device_mounted(device):
except Exception: except Exception:
return False return False
return bool(re.search(r'MOUNTPOINT=".+"', out)) return bool(re.search(r'MOUNTPOINT=".+"', out))
def mkfs_xfs(device, force=False):
"""Format device with XFS filesystem.
By default this should fail if the device already has a filesystem on it.
:param device: Full path to device to format
:ptype device: tr
:param force: Force operation
:ptype: force: boolean"""
cmd = ['mkfs.xfs']
if force:
cmd.append("-f")
cmd += ['-i', 'size=1024', device]
check_call(cmd)

View File

@ -27,6 +27,7 @@ import glob
import os import os
import json import json
import yaml import yaml
import re
import subprocess import subprocess
import sys import sys
import errno import errno
@ -67,7 +68,7 @@ def cached(func):
@wraps(func) @wraps(func)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
global cache global cache
key = str((func, args, kwargs)) key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
try: try:
return cache[key] return cache[key]
except KeyError: except KeyError:
@ -289,7 +290,7 @@ class Config(dict):
self.implicit_save = True self.implicit_save = True
self._prev_dict = None self._prev_dict = None
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
if os.path.exists(self.path): if os.path.exists(self.path) and os.stat(self.path).st_size:
self.load_previous() self.load_previous()
atexit(self._implicit_save) atexit(self._implicit_save)
@ -309,7 +310,11 @@ class Config(dict):
""" """
self.path = path or self.path self.path = path or self.path
with open(self.path) as f: with open(self.path) as f:
self._prev_dict = json.load(f) try:
self._prev_dict = json.load(f)
except ValueError as e:
log('Unable to parse previous config data - {}'.format(str(e)),
level=ERROR)
for k, v in copy.deepcopy(self._prev_dict).items(): for k, v in copy.deepcopy(self._prev_dict).items():
if k not in self: if k not in self:
self[k] = v self[k] = v
@ -353,22 +358,40 @@ class Config(dict):
self.save() self.save()
@cached _cache_config = None
def config(scope=None): def config(scope=None):
"""Juju charm configuration""" """
config_cmd_line = ['config-get'] Get the juju charm configuration (scope==None) or individual key,
if scope is not None: (scope=str). The returned value is a Python data structure loaded as
config_cmd_line.append(scope) JSON from the Juju config command.
else:
config_cmd_line.append('--all') :param scope: If set, return the value for the specified key.
config_cmd_line.append('--format=json') :type scope: Optional[str]
:returns: Either the whole config as a Config, or a key from it.
:rtype: Any
"""
global _cache_config
config_cmd_line = ['config-get', '--all', '--format=json']
try: try:
config_data = json.loads( # JSON Decode Exception for Python3.5+
subprocess.check_output(config_cmd_line).decode('UTF-8')) exc_json = json.decoder.JSONDecodeError
except AttributeError:
# JSON Decode Exception for Python2.7 through Python3.4
exc_json = ValueError
try:
if _cache_config is None:
config_data = json.loads(
subprocess.check_output(config_cmd_line).decode('UTF-8'))
_cache_config = Config(config_data)
if scope is not None: if scope is not None:
return config_data return _cache_config.get(scope)
return Config(config_data) return _cache_config
except ValueError: except (exc_json, UnicodeDecodeError) as e:
log('Unable to parse output from config-get: config_cmd_line="{}" '
'message="{}"'
.format(config_cmd_line, str(e)), level=ERROR)
return None return None
@ -1043,7 +1066,6 @@ def juju_version():
universal_newlines=True).strip() universal_newlines=True).strip()
@cached
def has_juju_version(minimum_version): def has_juju_version(minimum_version):
"""Return True if the Juju version is at least the provided version""" """Return True if the Juju version is at least the provided version"""
return LooseVersion(juju_version()) >= LooseVersion(minimum_version) return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
@ -1103,6 +1125,8 @@ def _run_atexit():
@translate_exc(from_exc=OSError, to_exc=NotImplementedError) @translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def network_get_primary_address(binding): def network_get_primary_address(binding):
''' '''
Deprecated since Juju 2.3; use network_get()
Retrieve the primary network address for a named binding Retrieve the primary network address for a named binding
:param binding: string. The name of a relation of extra-binding :param binding: string. The name of a relation of extra-binding
@ -1123,7 +1147,6 @@ def network_get_primary_address(binding):
return response return response
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def network_get(endpoint, relation_id=None): def network_get(endpoint, relation_id=None):
""" """
Retrieve the network details for a relation endpoint Retrieve the network details for a relation endpoint
@ -1131,24 +1154,20 @@ def network_get(endpoint, relation_id=None):
:param endpoint: string. The name of a relation endpoint :param endpoint: string. The name of a relation endpoint
:param relation_id: int. The ID of the relation for the current context. :param relation_id: int. The ID of the relation for the current context.
:return: dict. The loaded YAML output of the network-get query. :return: dict. The loaded YAML output of the network-get query.
:raise: NotImplementedError if run on Juju < 2.1 :raise: NotImplementedError if request not supported by the Juju version.
""" """
if not has_juju_version('2.2'):
raise NotImplementedError(juju_version()) # earlier versions require --primary-address
if relation_id and not has_juju_version('2.3'):
raise NotImplementedError # 2.3 added the -r option
cmd = ['network-get', endpoint, '--format', 'yaml'] cmd = ['network-get', endpoint, '--format', 'yaml']
if relation_id: if relation_id:
cmd.append('-r') cmd.append('-r')
cmd.append(relation_id) cmd.append(relation_id)
try: response = subprocess.check_output(
response = subprocess.check_output( cmd,
cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip()
stderr=subprocess.STDOUT).decode('UTF-8').strip()
except CalledProcessError as e:
# Early versions of Juju 2.0.x required the --primary-address argument.
# We catch that condition here and raise NotImplementedError since
# the requested semantics are not available - the caller can then
# use the network_get_primary_address() method instead.
if '--primary-address is currently required' in e.output.decode('UTF-8'):
raise NotImplementedError
raise
return yaml.safe_load(response) return yaml.safe_load(response)
@ -1204,9 +1223,23 @@ def iter_units_for_relation_name(relation_name):
def ingress_address(rid=None, unit=None): def ingress_address(rid=None, unit=None):
""" """
Retrieve the ingress-address from a relation when available. Otherwise, Retrieve the ingress-address from a relation when available.
return the private-address. This function is to be used on the consuming Otherwise, return the private-address.
side of the relation.
When used on the consuming side of the relation (unit is a remote
unit), the ingress-address is the IP address that this unit needs
to use to reach the provided service on the remote unit.
When used on the providing side of the relation (unit == local_unit()),
the ingress-address is the IP address that is advertised to remote
units on this relation. Remote units need to use this address to
reach the local provided service on this unit.
Note that charms may document some other method to use in
preference to the ingress_address(), such as an address provided
on a different relation attribute or a service discovery mechanism.
This allows charms to redirect inbound connections to their peers
or different applications such as load balancers.
Usage: Usage:
addresses = [ingress_address(rid=u.rid, unit=u.unit) addresses = [ingress_address(rid=u.rid, unit=u.unit)
@ -1220,3 +1253,40 @@ def ingress_address(rid=None, unit=None):
settings = relation_get(rid=rid, unit=unit) settings = relation_get(rid=rid, unit=unit)
return (settings.get('ingress-address') or return (settings.get('ingress-address') or
settings.get('private-address')) settings.get('private-address'))
def egress_subnets(rid=None, unit=None):
"""
Retrieve the egress-subnets from a relation.
This function is to be used on the providing side of the
relation, and provides the ranges of addresses that client
connections may come from. The result is uninteresting on
the consuming side of a relation (unit == local_unit()).
Returns a stable list of subnets in CIDR format.
eg. ['192.168.1.0/24', '2001::F00F/128']
If egress-subnets is not available, falls back to using the published
ingress-address, or finally private-address.
:param rid: string relation id
:param unit: string unit name
:side effect: calls relation_get
:return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
"""
def _to_range(addr):
if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
addr += '/32'
elif ':' in addr and '/' not in addr: # IPv6
addr += '/128'
return addr
settings = relation_get(rid=rid, unit=unit)
if 'egress-subnets' in settings:
return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
if 'ingress-address' in settings:
return [_to_range(settings['ingress-address'])]
if 'private-address' in settings:
return [_to_range(settings['private-address'])]
return [] # Should never happen

View File

@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path):
return output return output
def modulo_distribution(modulo=3, wait=30): def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
""" Modulo distribution """ Modulo distribution
This helper uses the unit number, a modulo value and a constant wait time This helper uses the unit number, a modulo value and a constant wait time
@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30):
@param modulo: int The modulo number creates the group distribution @param modulo: int The modulo number creates the group distribution
@param wait: int The constant time wait value @param wait: int The constant time wait value
@param non_zero_wait: boolean Override unit % modulo == 0,
return modulo * wait. Used to avoid collisions with
leader nodes which are often given priority.
@return: int Calculated time to wait for unit operation @return: int Calculated time to wait for unit operation
""" """
unit_number = int(local_unit().split('/')[1]) unit_number = int(local_unit().split('/')[1])
return (unit_number % modulo) * wait calculated_wait_time = (unit_number % modulo) * wait
if non_zero_wait and calculated_wait_time == 0:
return modulo * wait
else:
return calculated_wait_time

View File

@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback):
""" """
def __call__(self, manager, service_name, event_name): def __call__(self, manager, service_name, event_name):
service = manager.get_service(service_name) service = manager.get_service(service_name)
new_ports = service.get('ports', []) # turn this generator into a list,
# as we'll be going over it multiple times
new_ports = list(service.get('ports', []))
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
if os.path.exists(port_file): if os.path.exists(port_file):
with open(port_file) as fp: with open(port_file) as fp:
old_ports = fp.read().split(',') old_ports = fp.read().split(',')
for old_port in old_ports: for old_port in old_ports:
if bool(old_port): if bool(old_port) and not self.ports_contains(old_port, new_ports):
old_port = int(old_port) hookenv.close_port(old_port)
if old_port not in new_ports:
hookenv.close_port(old_port)
with open(port_file, 'w') as fp: with open(port_file, 'w') as fp:
fp.write(','.join(str(port) for port in new_ports)) fp.write(','.join(str(port) for port in new_ports))
for port in new_ports: for port in new_ports:
# A port is either a number or 'ICMP'
protocol = 'TCP'
if str(port).upper() == 'ICMP':
protocol = 'ICMP'
if event_name == 'start': if event_name == 'start':
hookenv.open_port(port) hookenv.open_port(port, protocol)
elif event_name == 'stop': elif event_name == 'stop':
hookenv.close_port(port) hookenv.close_port(port, protocol)
def ports_contains(self, port, ports):
if not bool(port):
return False
if str(port).upper() != 'ICMP':
port = int(port)
return port in ports
def service_stop(service_name): def service_stop(service_name):

View File

@ -31,18 +31,22 @@ __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
def create(sysctl_dict, sysctl_file): def create(sysctl_dict, sysctl_file):
"""Creates a sysctl.conf file from a YAML associative array """Creates a sysctl.conf file from a YAML associative array
:param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" :param sysctl_dict: a dict or YAML-formatted string of sysctl
options eg "{ 'kernel.max_pid': 1337 }"
:type sysctl_dict: str :type sysctl_dict: str
:param sysctl_file: path to the sysctl file to be saved :param sysctl_file: path to the sysctl file to be saved
:type sysctl_file: str or unicode :type sysctl_file: str or unicode
:returns: None :returns: None
""" """
try: if type(sysctl_dict) is not dict:
sysctl_dict_parsed = yaml.safe_load(sysctl_dict) try:
except yaml.YAMLError: sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), except yaml.YAMLError:
level=ERROR) log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
return level=ERROR)
return
else:
sysctl_dict_parsed = sysctl_dict
with open(sysctl_file, "w") as fd: with open(sysctl_file, "w") as fd:
for key, value in sysctl_dict_parsed.items(): for key, value in sysctl_dict_parsed.items():

View File

@ -166,6 +166,10 @@ class Storage(object):
To support dicts, lists, integer, floats, and booleans values To support dicts, lists, integer, floats, and booleans values
are automatically json encoded/decoded. are automatically json encoded/decoded.
Note: to facilitate unit testing, ':memory:' can be passed as the
path parameter which causes sqlite3 to only build the db in memory.
This should only be used for testing purposes.
""" """
def __init__(self, path=None): def __init__(self, path=None):
self.db_path = path self.db_path = path
@ -175,8 +179,9 @@ class Storage(object):
else: else:
self.db_path = os.path.join( self.db_path = os.path.join(
os.environ.get('CHARM_DIR', ''), '.unit-state.db') os.environ.get('CHARM_DIR', ''), '.unit-state.db')
with open(self.db_path, 'a') as f: if self.db_path != ':memory:':
os.fchmod(f.fileno(), 0o600) with open(self.db_path, 'a') as f:
os.fchmod(f.fileno(), 0o600)
self.conn = sqlite3.connect('%s' % self.db_path) self.conn = sqlite3.connect('%s' % self.db_path)
self.cursor = self.conn.cursor() self.cursor = self.conn.cursor()
self.revision = None self.revision = None

View File

@ -44,6 +44,7 @@ ARCH_TO_PROPOSED_POCKET = {
'x86_64': PROPOSED_POCKET, 'x86_64': PROPOSED_POCKET,
'ppc64le': PROPOSED_PORTS_POCKET, 'ppc64le': PROPOSED_PORTS_POCKET,
'aarch64': PROPOSED_PORTS_POCKET, 'aarch64': PROPOSED_PORTS_POCKET,
's390x': PROPOSED_PORTS_POCKET,
} }
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu" CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA' CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'

View File

@ -40,6 +40,17 @@ options:
description: | description: |
If true, charm will attempt to unmount and overwrite existing and in-use If true, charm will attempt to unmount and overwrite existing and in-use
block-devices (WARNING). block-devices (WARNING).
ephemeral-unmount:
type: string
default:
description: |
Cloud instances provide ephermeral storage which is normally mounted
on /mnt.
.
Setting this option to the path of the ephemeral mountpoint will force
an unmount of the corresponding device so that it can be used as a swift
storage device. This is useful for testing purposes (cloud deployment
is not a typical use case).
zone: zone:
default: 1 default: 1
type: int type: int
@ -189,3 +200,9 @@ options:
be loaded, the charm will fail to install. be loaded, the charm will fail to install.
type: boolean type: boolean
default: False default: False
encrypt:
default: false
type: boolean
description: |
Encrypt block devices used by swift using dm-crypt, making use of
vault for encryption key management; requires a relation to vault.

View File

@ -0,0 +1 @@
storage.bootstrap

View File

@ -0,0 +1 @@
storage.bootstrap

View File

@ -0,0 +1 @@
swift_storage_hooks.py

View File

@ -0,0 +1 @@
swift_storage_hooks.py

View File

@ -0,0 +1 @@
swift_storage_hooks.py

View File

@ -0,0 +1 @@
swift_storage_hooks.py

8
hooks/storage.bootstrap Executable file
View File

@ -0,0 +1,8 @@
#!/bin/sh
if ! dpkg -s swift > /dev/null 2>&1; then
juju-log "Swift not yet installed."
exit 0
fi
./hooks/storage.real

1
hooks/storage.real Symbolic link
View File

@ -0,0 +1 @@
swift_storage_hooks.py

View File

@ -14,16 +14,20 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
import base64
import copy
import json
import os import os
import shutil import shutil
import sys import sys
import socket
import subprocess
import tempfile import tempfile
from lib.swift_storage_utils import ( from lib.swift_storage_utils import (
PACKAGES, PACKAGES,
RESTART_MAP, RESTART_MAP,
SWIFT_SVCS, SWIFT_SVCS,
determine_block_devices,
do_openstack_upgrade, do_openstack_upgrade,
ensure_swift_directories, ensure_swift_directories,
fetch_swift_rings, fetch_swift_rings,
@ -53,16 +57,20 @@ from charmhelpers.core.hookenv import (
relations_of_type, relations_of_type,
status_set, status_set,
ingress_address, ingress_address,
DEBUG,
) )
from charmhelpers.fetch import ( from charmhelpers.fetch import (
apt_install, apt_install,
apt_update, apt_update,
add_source,
filter_installed_packages filter_installed_packages
) )
from charmhelpers.core.host import ( from charmhelpers.core.host import (
add_to_updatedb_prunepath, add_to_updatedb_prunepath,
rsync, rsync,
write_file,
umount,
) )
from charmhelpers.core.sysctl import create as create_sysctl from charmhelpers.core.sysctl import create as create_sysctl
@ -81,9 +89,12 @@ from charmhelpers.contrib.network.ip import (
from charmhelpers.contrib.network import ufw from charmhelpers.contrib.network import ufw
from charmhelpers.contrib.charmsupport import nrpe from charmhelpers.contrib.charmsupport import nrpe
from charmhelpers.contrib.hardening.harden import harden from charmhelpers.contrib.hardening.harden import harden
from charmhelpers.core.unitdata import kv
from distutils.dir_util import mkpath from distutils.dir_util import mkpath
import charmhelpers.contrib.openstack.vaultlocker as vaultlocker
hooks = Hooks() hooks = Hooks()
CONFIGS = register_configs() CONFIGS = register_configs()
NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins' NAGIOS_PLUGINS = '/usr/local/lib/nagios/plugins'
@ -173,8 +184,6 @@ def install():
apt_update() apt_update()
apt_install(PACKAGES, fatal=True) apt_install(PACKAGES, fatal=True)
initialize_ufw() initialize_ufw()
status_set('maintenance', 'Setting up storage')
setup_storage()
ensure_swift_directories() ensure_swift_directories()
@ -186,6 +195,10 @@ def config_changed():
initialize_ufw() initialize_ufw()
else: else:
ufw.disable() ufw.disable()
if config('ephemeral-unmount'):
umount(config('ephemeral-unmount'), persist=True)
if config('prefer-ipv6'): if config('prefer-ipv6'):
status_set('maintenance', 'Configuring ipv6') status_set('maintenance', 'Configuring ipv6')
assert_charm_supports_ipv6() assert_charm_supports_ipv6()
@ -198,10 +211,9 @@ def config_changed():
status_set('maintenance', 'Running openstack upgrade') status_set('maintenance', 'Running openstack upgrade')
do_openstack_upgrade(configs=CONFIGS) do_openstack_upgrade(configs=CONFIGS)
setup_storage() install_vaultlocker()
for rid in relation_ids('swift-storage'): configure_storage()
swift_storage_relation_joined(rid=rid)
CONFIGS.write_all() CONFIGS.write_all()
@ -216,6 +228,17 @@ def config_changed():
add_to_updatedb_prunepath(STORAGE_MOUNT_PATH) add_to_updatedb_prunepath(STORAGE_MOUNT_PATH)
def install_vaultlocker():
"""Determine whether vaultlocker is required and install"""
if config('encrypt'):
pkgs = ['vaultlocker', 'python-hvac']
installed = len(filter_installed_packages(pkgs)) == 0
if not installed:
add_source('ppa:openstack-charmers/vaultlocker')
apt_update(fatal=True)
apt_install(pkgs, fatal=True)
@hooks.hook('upgrade-charm') @hooks.hook('upgrade-charm')
@harden() @harden()
def upgrade_charm(): def upgrade_charm():
@ -227,6 +250,10 @@ def upgrade_charm():
@hooks.hook() @hooks.hook()
def swift_storage_relation_joined(rid=None): def swift_storage_relation_joined(rid=None):
if config('encrypt') and not vaultlocker.vault_relation_complete():
log('Encryption configured and vault not ready, deferring',
level=DEBUG)
return
rel_settings = { rel_settings = {
'zone': config('zone'), 'zone': config('zone'),
'object_port': config('object-server-port'), 'object_port': config('object-server-port'),
@ -234,7 +261,8 @@ def swift_storage_relation_joined(rid=None):
'account_port': config('account-server-port'), 'account_port': config('account-server-port'),
} }
devs = determine_block_devices() or [] db = kv()
devs = db.get('prepared-devices', [])
devs = [os.path.basename(d) for d in devs] devs = [os.path.basename(d) for d in devs]
rel_settings['device'] = ':'.join(devs) rel_settings['device'] = ':'.join(devs)
# Keep a reference of devices we are adding to the ring # Keep a reference of devices we are adding to the ring
@ -272,6 +300,34 @@ def swift_storage_relation_departed():
revoke_access(removed_client, port) revoke_access(removed_client, port)
@hooks.hook('secrets-storage-relation-joined')
def secrets_storage_joined(relation_id=None):
relation_set(relation_id=relation_id,
secret_backend='charm-vaultlocker',
isolated=True,
access_address=get_relation_ip('secrets-storage'),
hostname=socket.gethostname())
@hooks.hook('secrets-storage-relation-changed')
def secrets_storage_changed():
vault_ca = relation_get('vault_ca')
if vault_ca:
vault_ca = base64.decodestring(json.loads(vault_ca).encode())
write_file('/usr/local/share/ca-certificates/vault-ca.crt',
vault_ca, perms=0o644)
subprocess.check_call(['update-ca-certificates', '--fresh'])
configure_storage()
@hooks.hook('storage.real')
def configure_storage():
setup_storage(config('encrypt'))
for rid in relation_ids('swift-storage'):
swift_storage_relation_joined(rid=rid)
@hooks.hook('nrpe-external-master-relation-joined') @hooks.hook('nrpe-external-master-relation-joined')
@hooks.hook('nrpe-external-master-relation-changed') @hooks.hook('nrpe-external-master-relation-changed')
def update_nrpe_config(): def update_nrpe_config():
@ -318,7 +374,10 @@ def main():
hooks.execute(sys.argv) hooks.execute(sys.argv)
except UnregisteredHookError as e: except UnregisteredHookError as e:
log('Unknown hook {} - skipping.'.format(e)) log('Unknown hook {} - skipping.'.format(e))
set_os_workload_status(CONFIGS, REQUIRED_INTERFACES, required_interfaces = copy.deepcopy(REQUIRED_INTERFACES)
if config('encrypt'):
required_interfaces['vault'] = ['secrets-storage']
set_os_workload_status(CONFIGS, required_interfaces,
charm_func=assess_status) charm_func=assess_status)
os_application_version_set(VERSION_PACKAGE) os_application_version_set(VERSION_PACKAGE)

View File

@ -4,6 +4,7 @@ import re
import subprocess import subprocess
import shutil import shutil
import tempfile import tempfile
import uuid
from subprocess import check_call, call, CalledProcessError, check_output from subprocess import check_call, call, CalledProcessError, check_output
@ -54,6 +55,8 @@ from charmhelpers.core.hookenv import (
relation_ids, relation_ids,
iter_units_for_relation_name, iter_units_for_relation_name,
ingress_address, ingress_address,
storage_list,
storage_get,
) )
from charmhelpers.contrib.network import ufw from charmhelpers.contrib.network import ufw
@ -62,6 +65,7 @@ from charmhelpers.contrib.network.ip import get_host_ip
from charmhelpers.contrib.storage.linux.utils import ( from charmhelpers.contrib.storage.linux.utils import (
is_block_device, is_block_device,
is_device_mounted, is_device_mounted,
mkfs_xfs,
) )
from charmhelpers.contrib.storage.linux.loopback import ( from charmhelpers.contrib.storage.linux.loopback import (
@ -84,6 +88,10 @@ from charmhelpers.core.decorators import (
retry_on_exception, retry_on_exception,
) )
import charmhelpers.contrib.openstack.vaultlocker as vaultlocker
from charmhelpers.core.unitdata import kv
PACKAGES = [ PACKAGES = [
'swift', 'swift-account', 'swift-container', 'swift-object', 'swift', 'swift-account', 'swift-container', 'swift-object',
'xfsprogs', 'gdisk', 'lvm2', 'python-jinja2', 'python-psutil', 'xfsprogs', 'gdisk', 'lvm2', 'python-jinja2', 'python-psutil',
@ -162,11 +170,14 @@ def register_configs():
[SwiftStorageContext()]) [SwiftStorageContext()])
configs.register('/etc/rsync-juju.d/050-swift-storage.conf', configs.register('/etc/rsync-juju.d/050-swift-storage.conf',
[RsyncContext(), SwiftStorageServerContext()]) [RsyncContext(), SwiftStorageServerContext()])
# NOTE: add VaultKVContext so interface status can be assessed
for server in ['account', 'object', 'container']: for server in ['account', 'object', 'container']:
configs.register('/etc/swift/%s-server.conf' % server, configs.register('/etc/swift/%s-server.conf' % server,
[SwiftStorageServerContext(), [SwiftStorageServerContext(),
context.BindHostContext(), context.BindHostContext(),
context.WorkerConfigContext()]), context.WorkerConfigContext(),
vaultlocker.VaultKVContext(
vaultlocker.VAULTLOCKER_BACKEND)]),
return configs return configs
@ -269,6 +280,12 @@ def determine_block_devices():
else: else:
bdevs = block_device.split(' ') bdevs = block_device.split(' ')
# List storage instances for the 'block-devices'
# store declared for this charm too, and add
# their block device paths to the list.
storage_ids = storage_list('block-devices')
bdevs.extend((storage_get('location', s) for s in storage_ids))
bdevs = list(set(bdevs)) bdevs = list(set(bdevs))
# attempt to ensure block devices, but filter out missing devs # attempt to ensure block devices, but filter out missing devs
_none = ['None', 'none'] _none = ['None', 'none']
@ -279,19 +296,6 @@ def determine_block_devices():
return valid_bdevs return valid_bdevs
def mkfs_xfs(bdev, force=False):
"""Format device with XFS filesystem.
By default this should fail if the device already has a filesystem on it.
"""
cmd = ['mkfs.xfs']
if force:
cmd.append("-f")
cmd += ['-i', 'size=1024', bdev]
check_call(cmd)
def devstore_safe_load(devstore): def devstore_safe_load(devstore):
"""Attempt to decode json data and return None if an error occurs while """Attempt to decode json data and return None if an error occurs while
also printing a log. also printing a log.
@ -446,21 +450,73 @@ def ensure_devs_tracked():
is_device_in_ring(dev, skip_rel_check=True) is_device_in_ring(dev, skip_rel_check=True)
def setup_storage(): def setup_storage(encrypt=False):
# Preflight check vault relation if encryption is enabled
vault_kv = vaultlocker.VaultKVContext(vaultlocker.VAULTLOCKER_BACKEND)
context = vault_kv()
if encrypt and not vault_kv.complete:
log("Encryption requested but vault relation not complete",
level=DEBUG)
return
elif encrypt and vault_kv.complete:
# NOTE: only write vaultlocker configuration once relation is complete
# otherwise we run the chance of an empty configuration file
# being installed on a machine with other vaultlocker based
# services
vaultlocker.write_vaultlocker_conf(context, priority=90)
# Ensure /srv/node exists just in case no disks # Ensure /srv/node exists just in case no disks
# are detected and used. # are detected and used.
mkdir(os.path.join('/srv', 'node'), mkdir(os.path.join('/srv', 'node'),
owner='swift', group='swift', owner='swift', group='swift',
perms=0o755) perms=0o755)
reformat = str(config('overwrite')).lower() == "true" reformat = str(config('overwrite')).lower() == "true"
db = kv()
prepared_devices = db.get('prepared-devices', [])
for dev in determine_block_devices(): for dev in determine_block_devices():
if dev in prepared_devices:
log('Device {} already processed by charm,'
' skipping'.format(dev))
continue
if is_device_in_ring(os.path.basename(dev)): if is_device_in_ring(os.path.basename(dev)):
log("Device '%s' already in the ring - ignoring" % (dev)) log("Device '%s' already in the ring - ignoring" % (dev))
# NOTE: record existing use of device dealing with
# upgrades from older versions of charms without
# this feature
prepared_devices.append(dev)
db.set('prepared-devices', prepared_devices)
db.flush()
continue
# NOTE: this deals with a dm-crypt'ed block device already in
# use
if is_device_mounted(dev):
log("Device '{}' is already mounted, ignoring".format(dev))
continue continue
if reformat: if reformat:
clean_storage(dev) clean_storage(dev)
loopback_device = is_mapped_loopback_device(dev)
options = None
if encrypt and not loopback_device:
dev_uuid = str(uuid.uuid4())
check_call(['vaultlocker', 'encrypt',
'--uuid', dev_uuid,
dev])
dev = '/dev/mapper/crypt-{}'.format(dev_uuid)
options = ','.join([
"defaults",
"nofail",
("x-systemd.requires="
"vaultlocker-decrypt@{uuid}.service".format(uuid=dev_uuid)),
"comment=vaultlocker",
])
try: try:
# If not cleaned and in use, mkfs should fail. # If not cleaned and in use, mkfs should fail.
mkfs_xfs(dev, force=reformat) mkfs_xfs(dev, force=reformat)
@ -475,8 +531,6 @@ def setup_storage():
_mp = os.path.join('/srv', 'node', basename) _mp = os.path.join('/srv', 'node', basename)
mkdir(_mp, owner='swift', group='swift') mkdir(_mp, owner='swift', group='swift')
options = None
loopback_device = is_mapped_loopback_device(dev)
mountpoint = '/srv/node/%s' % basename mountpoint = '/srv/node/%s' % basename
if loopback_device: if loopback_device:
# If an exiting fstab entry exists using the image file as the # If an exiting fstab entry exists using the image file as the
@ -497,6 +551,12 @@ def setup_storage():
check_call(['chown', '-R', 'swift:swift', mountpoint]) check_call(['chown', '-R', 'swift:swift', mountpoint])
check_call(['chmod', '-R', '0755', mountpoint]) check_call(['chmod', '-R', '0755', mountpoint])
# NOTE: record preparation of device - this will be used when
# providing block device configuration for ring builders.
prepared_devices.append(dev)
db.set('prepared-devices', prepared_devices)
db.flush()
@retry_on_exception(3, base_delay=2, exc_type=CalledProcessError) @retry_on_exception(3, base_delay=2, exc_type=CalledProcessError)
def fetch_swift_rings(rings_url): def fetch_swift_rings(rings_url):

View File

@ -28,3 +28,12 @@ provides:
scope: container scope: container
swift-storage: swift-storage:
interface: swift interface: swift
requires:
secrets-storage:
interface: vault-kv
storage:
block-devices:
type: block
multiple:
range: 0-
minimum-size: 1G

View File

@ -0,0 +1,6 @@
# vaultlocker configuration from swift-proxy charm
[vault]
url = {{ vault_url }}
approle = {{ role_id }}
backend = {{ secret_backend }}
secret_id = {{ secret_id }}

View File

@ -5,12 +5,12 @@ coverage>=3.6
mock>=1.2 mock>=1.2
flake8>=2.2.4,<=2.4.1 flake8>=2.2.4,<=2.4.1
os-testr>=0.4.1 os-testr>=0.4.1
charm-tools>=2.0.0 charm-tools>=2.0.0;python_version=='2.7' # cheetah templates aren't availble in Python 3+
requests==2.6.0 requests==2.6.0
# BEGIN: Amulet OpenStack Charm Helper Requirements # BEGIN: Amulet OpenStack Charm Helper Requirements
# Liberty client lower constraints # Liberty client lower constraints
amulet>=1.14.3,<2.0 amulet>=1.14.3,<2.0
bundletester>=0.6.1,<1.0 bundletester>=0.6.1,<1.0;python_version=='2.7' # cheetah templates aren't availble in Python 3+
python-ceilometerclient>=1.5.0 python-ceilometerclient>=1.5.0
python-cinderclient>=1.4.0 python-cinderclient>=1.4.0
python-glanceclient>=1.1.0 python-glanceclient>=1.1.0

View File

@ -96,6 +96,7 @@ class SwiftStorageBasicDeployment(OpenStackAmuletDeployment):
'zone': '1', 'zone': '1',
'block-device': 'vdb', 'block-device': 'vdb',
'overwrite': 'true', 'overwrite': 'true',
'ephemeral-unmount': '/mnt',
} }
pxc_config = { pxc_config = {
'innodb-buffer-pool-size': '256M', 'innodb-buffer-pool-size': '256M',

View File

@ -441,7 +441,7 @@ class OpenStackAmuletUtils(AmuletUtils):
if rel.get('api_version') != str(api_version): if rel.get('api_version') != str(api_version):
raise Exception("api_version not propagated through relation" raise Exception("api_version not propagated through relation"
" data yet ('{}' != '{}')." " data yet ('{}' != '{}')."
"".format(rel['api_version'], api_version)) "".format(rel.get('api_version'), api_version))
def keystone_configure_api_version(self, sentry_relation_pairs, deployment, def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
api_version): api_version):
@ -463,16 +463,13 @@ class OpenStackAmuletUtils(AmuletUtils):
deployment._auto_wait_for_status() deployment._auto_wait_for_status()
self.keystone_wait_for_propagation(sentry_relation_pairs, api_version) self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
def authenticate_cinder_admin(self, keystone_sentry, username, def authenticate_cinder_admin(self, keystone, api_version=2):
password, tenant, api_version=2):
"""Authenticates admin user with cinder.""" """Authenticates admin user with cinder."""
# NOTE(beisner): cinder python client doesn't accept tokens. self.log.debug('Authenticating cinder admin...')
keystone_ip = keystone_sentry.info['public-address']
ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
_clients = { _clients = {
1: cinder_client.Client, 1: cinder_client.Client,
2: cinder_clientv2.Client} 2: cinder_clientv2.Client}
return _clients[api_version](username, password, tenant, ept) return _clients[api_version](session=keystone.session)
def authenticate_keystone(self, keystone_ip, username, password, def authenticate_keystone(self, keystone_ip, username, password,
api_version=False, admin_port=False, api_version=False, admin_port=False,

View File

@ -27,6 +27,7 @@ import glob
import os import os
import json import json
import yaml import yaml
import re
import subprocess import subprocess
import sys import sys
import errno import errno
@ -67,7 +68,7 @@ def cached(func):
@wraps(func) @wraps(func)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
global cache global cache
key = str((func, args, kwargs)) key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
try: try:
return cache[key] return cache[key]
except KeyError: except KeyError:
@ -289,7 +290,7 @@ class Config(dict):
self.implicit_save = True self.implicit_save = True
self._prev_dict = None self._prev_dict = None
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME) self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
if os.path.exists(self.path): if os.path.exists(self.path) and os.stat(self.path).st_size:
self.load_previous() self.load_previous()
atexit(self._implicit_save) atexit(self._implicit_save)
@ -309,7 +310,11 @@ class Config(dict):
""" """
self.path = path or self.path self.path = path or self.path
with open(self.path) as f: with open(self.path) as f:
self._prev_dict = json.load(f) try:
self._prev_dict = json.load(f)
except ValueError as e:
log('Unable to parse previous config data - {}'.format(str(e)),
level=ERROR)
for k, v in copy.deepcopy(self._prev_dict).items(): for k, v in copy.deepcopy(self._prev_dict).items():
if k not in self: if k not in self:
self[k] = v self[k] = v
@ -353,22 +358,40 @@ class Config(dict):
self.save() self.save()
@cached _cache_config = None
def config(scope=None): def config(scope=None):
"""Juju charm configuration""" """
config_cmd_line = ['config-get'] Get the juju charm configuration (scope==None) or individual key,
if scope is not None: (scope=str). The returned value is a Python data structure loaded as
config_cmd_line.append(scope) JSON from the Juju config command.
else:
config_cmd_line.append('--all') :param scope: If set, return the value for the specified key.
config_cmd_line.append('--format=json') :type scope: Optional[str]
:returns: Either the whole config as a Config, or a key from it.
:rtype: Any
"""
global _cache_config
config_cmd_line = ['config-get', '--all', '--format=json']
try: try:
config_data = json.loads( # JSON Decode Exception for Python3.5+
subprocess.check_output(config_cmd_line).decode('UTF-8')) exc_json = json.decoder.JSONDecodeError
except AttributeError:
# JSON Decode Exception for Python2.7 through Python3.4
exc_json = ValueError
try:
if _cache_config is None:
config_data = json.loads(
subprocess.check_output(config_cmd_line).decode('UTF-8'))
_cache_config = Config(config_data)
if scope is not None: if scope is not None:
return config_data return _cache_config.get(scope)
return Config(config_data) return _cache_config
except ValueError: except (exc_json, UnicodeDecodeError) as e:
log('Unable to parse output from config-get: config_cmd_line="{}" '
'message="{}"'
.format(config_cmd_line, str(e)), level=ERROR)
return None return None
@ -1043,7 +1066,6 @@ def juju_version():
universal_newlines=True).strip() universal_newlines=True).strip()
@cached
def has_juju_version(minimum_version): def has_juju_version(minimum_version):
"""Return True if the Juju version is at least the provided version""" """Return True if the Juju version is at least the provided version"""
return LooseVersion(juju_version()) >= LooseVersion(minimum_version) return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
@ -1103,6 +1125,8 @@ def _run_atexit():
@translate_exc(from_exc=OSError, to_exc=NotImplementedError) @translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def network_get_primary_address(binding): def network_get_primary_address(binding):
''' '''
Deprecated since Juju 2.3; use network_get()
Retrieve the primary network address for a named binding Retrieve the primary network address for a named binding
:param binding: string. The name of a relation of extra-binding :param binding: string. The name of a relation of extra-binding
@ -1123,7 +1147,6 @@ def network_get_primary_address(binding):
return response return response
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
def network_get(endpoint, relation_id=None): def network_get(endpoint, relation_id=None):
""" """
Retrieve the network details for a relation endpoint Retrieve the network details for a relation endpoint
@ -1131,24 +1154,20 @@ def network_get(endpoint, relation_id=None):
:param endpoint: string. The name of a relation endpoint :param endpoint: string. The name of a relation endpoint
:param relation_id: int. The ID of the relation for the current context. :param relation_id: int. The ID of the relation for the current context.
:return: dict. The loaded YAML output of the network-get query. :return: dict. The loaded YAML output of the network-get query.
:raise: NotImplementedError if run on Juju < 2.1 :raise: NotImplementedError if request not supported by the Juju version.
""" """
if not has_juju_version('2.2'):
raise NotImplementedError(juju_version()) # earlier versions require --primary-address
if relation_id and not has_juju_version('2.3'):
raise NotImplementedError # 2.3 added the -r option
cmd = ['network-get', endpoint, '--format', 'yaml'] cmd = ['network-get', endpoint, '--format', 'yaml']
if relation_id: if relation_id:
cmd.append('-r') cmd.append('-r')
cmd.append(relation_id) cmd.append(relation_id)
try: response = subprocess.check_output(
response = subprocess.check_output( cmd,
cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip()
stderr=subprocess.STDOUT).decode('UTF-8').strip()
except CalledProcessError as e:
# Early versions of Juju 2.0.x required the --primary-address argument.
# We catch that condition here and raise NotImplementedError since
# the requested semantics are not available - the caller can then
# use the network_get_primary_address() method instead.
if '--primary-address is currently required' in e.output.decode('UTF-8'):
raise NotImplementedError
raise
return yaml.safe_load(response) return yaml.safe_load(response)
@ -1204,9 +1223,23 @@ def iter_units_for_relation_name(relation_name):
def ingress_address(rid=None, unit=None): def ingress_address(rid=None, unit=None):
""" """
Retrieve the ingress-address from a relation when available. Otherwise, Retrieve the ingress-address from a relation when available.
return the private-address. This function is to be used on the consuming Otherwise, return the private-address.
side of the relation.
When used on the consuming side of the relation (unit is a remote
unit), the ingress-address is the IP address that this unit needs
to use to reach the provided service on the remote unit.
When used on the providing side of the relation (unit == local_unit()),
the ingress-address is the IP address that is advertised to remote
units on this relation. Remote units need to use this address to
reach the local provided service on this unit.
Note that charms may document some other method to use in
preference to the ingress_address(), such as an address provided
on a different relation attribute or a service discovery mechanism.
This allows charms to redirect inbound connections to their peers
or different applications such as load balancers.
Usage: Usage:
addresses = [ingress_address(rid=u.rid, unit=u.unit) addresses = [ingress_address(rid=u.rid, unit=u.unit)
@ -1220,3 +1253,40 @@ def ingress_address(rid=None, unit=None):
settings = relation_get(rid=rid, unit=unit) settings = relation_get(rid=rid, unit=unit)
return (settings.get('ingress-address') or return (settings.get('ingress-address') or
settings.get('private-address')) settings.get('private-address'))
def egress_subnets(rid=None, unit=None):
"""
Retrieve the egress-subnets from a relation.
This function is to be used on the providing side of the
relation, and provides the ranges of addresses that client
connections may come from. The result is uninteresting on
the consuming side of a relation (unit == local_unit()).
Returns a stable list of subnets in CIDR format.
eg. ['192.168.1.0/24', '2001::F00F/128']
If egress-subnets is not available, falls back to using the published
ingress-address, or finally private-address.
:param rid: string relation id
:param unit: string unit name
:side effect: calls relation_get
:return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
"""
def _to_range(addr):
if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
addr += '/32'
elif ':' in addr and '/' not in addr: # IPv6
addr += '/128'
return addr
settings = relation_get(rid=rid, unit=unit)
if 'egress-subnets' in settings:
return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
if 'ingress-address' in settings:
return [_to_range(settings['ingress-address'])]
if 'private-address' in settings:
return [_to_range(settings['private-address'])]
return [] # Should never happen

View File

@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path):
return output return output
def modulo_distribution(modulo=3, wait=30): def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
""" Modulo distribution """ Modulo distribution
This helper uses the unit number, a modulo value and a constant wait time This helper uses the unit number, a modulo value and a constant wait time
@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30):
@param modulo: int The modulo number creates the group distribution @param modulo: int The modulo number creates the group distribution
@param wait: int The constant time wait value @param wait: int The constant time wait value
@param non_zero_wait: boolean Override unit % modulo == 0,
return modulo * wait. Used to avoid collisions with
leader nodes which are often given priority.
@return: int Calculated time to wait for unit operation @return: int Calculated time to wait for unit operation
""" """
unit_number = int(local_unit().split('/')[1]) unit_number = int(local_unit().split('/')[1])
return (unit_number % modulo) * wait calculated_wait_time = (unit_number % modulo) * wait
if non_zero_wait and calculated_wait_time == 0:
return modulo * wait
else:
return calculated_wait_time

View File

@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback):
""" """
def __call__(self, manager, service_name, event_name): def __call__(self, manager, service_name, event_name):
service = manager.get_service(service_name) service = manager.get_service(service_name)
new_ports = service.get('ports', []) # turn this generator into a list,
# as we'll be going over it multiple times
new_ports = list(service.get('ports', []))
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name)) port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
if os.path.exists(port_file): if os.path.exists(port_file):
with open(port_file) as fp: with open(port_file) as fp:
old_ports = fp.read().split(',') old_ports = fp.read().split(',')
for old_port in old_ports: for old_port in old_ports:
if bool(old_port): if bool(old_port) and not self.ports_contains(old_port, new_ports):
old_port = int(old_port) hookenv.close_port(old_port)
if old_port not in new_ports:
hookenv.close_port(old_port)
with open(port_file, 'w') as fp: with open(port_file, 'w') as fp:
fp.write(','.join(str(port) for port in new_ports)) fp.write(','.join(str(port) for port in new_ports))
for port in new_ports: for port in new_ports:
# A port is either a number or 'ICMP'
protocol = 'TCP'
if str(port).upper() == 'ICMP':
protocol = 'ICMP'
if event_name == 'start': if event_name == 'start':
hookenv.open_port(port) hookenv.open_port(port, protocol)
elif event_name == 'stop': elif event_name == 'stop':
hookenv.close_port(port) hookenv.close_port(port, protocol)
def ports_contains(self, port, ports):
if not bool(port):
return False
if str(port).upper() != 'ICMP':
port = int(port)
return port in ports
def service_stop(service_name): def service_stop(service_name):

View File

@ -31,18 +31,22 @@ __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
def create(sysctl_dict, sysctl_file): def create(sysctl_dict, sysctl_file):
"""Creates a sysctl.conf file from a YAML associative array """Creates a sysctl.conf file from a YAML associative array
:param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }" :param sysctl_dict: a dict or YAML-formatted string of sysctl
options eg "{ 'kernel.max_pid': 1337 }"
:type sysctl_dict: str :type sysctl_dict: str
:param sysctl_file: path to the sysctl file to be saved :param sysctl_file: path to the sysctl file to be saved
:type sysctl_file: str or unicode :type sysctl_file: str or unicode
:returns: None :returns: None
""" """
try: if type(sysctl_dict) is not dict:
sysctl_dict_parsed = yaml.safe_load(sysctl_dict) try:
except yaml.YAMLError: sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict), except yaml.YAMLError:
level=ERROR) log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
return level=ERROR)
return
else:
sysctl_dict_parsed = sysctl_dict
with open(sysctl_file, "w") as fd: with open(sysctl_file, "w") as fd:
for key, value in sysctl_dict_parsed.items(): for key, value in sysctl_dict_parsed.items():

View File

@ -166,6 +166,10 @@ class Storage(object):
To support dicts, lists, integer, floats, and booleans values To support dicts, lists, integer, floats, and booleans values
are automatically json encoded/decoded. are automatically json encoded/decoded.
Note: to facilitate unit testing, ':memory:' can be passed as the
path parameter which causes sqlite3 to only build the db in memory.
This should only be used for testing purposes.
""" """
def __init__(self, path=None): def __init__(self, path=None):
self.db_path = path self.db_path = path
@ -175,8 +179,9 @@ class Storage(object):
else: else:
self.db_path = os.path.join( self.db_path = os.path.join(
os.environ.get('CHARM_DIR', ''), '.unit-state.db') os.environ.get('CHARM_DIR', ''), '.unit-state.db')
with open(self.db_path, 'a') as f: if self.db_path != ':memory:':
os.fchmod(f.fileno(), 0o600) with open(self.db_path, 'a') as f:
os.fchmod(f.fileno(), 0o600)
self.conn = sqlite3.connect('%s' % self.db_path) self.conn = sqlite3.connect('%s' % self.db_path)
self.cursor = self.conn.cursor() self.cursor = self.conn.cursor()
self.revision = None self.revision = None

View File

@ -26,6 +26,11 @@ basepython = python3.5
deps = -r{toxinidir}/requirements.txt deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt -r{toxinidir}/test-requirements.txt
[testenv:py36]
basepython = python3.6
deps = -r{toxinidir}/requirements.txt
-r{toxinidir}/test-requirements.txt
[testenv:pep8] [testenv:pep8]
basepython = python2.7 basepython = python2.7
deps = -r{toxinidir}/requirements.txt deps = -r{toxinidir}/requirements.txt

View File

@ -18,7 +18,7 @@ import json
import uuid import uuid
import tempfile import tempfile
from test_utils import CharmTestCase, patch_open from test_utils import CharmTestCase, TestKV, patch_open
with patch('hooks.charmhelpers.contrib.hardening.harden.harden') as mock_dec: with patch('hooks.charmhelpers.contrib.hardening.harden.harden') as mock_dec:
mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f: mock_dec.side_effect = (lambda *dargs, **dkwargs: lambda f:
@ -47,7 +47,6 @@ TO_PATCH = [
'configure_installation_source', 'configure_installation_source',
'openstack_upgrade_available', 'openstack_upgrade_available',
# swift_storage_utils # swift_storage_utils
'determine_block_devices',
'do_openstack_upgrade', 'do_openstack_upgrade',
'ensure_swift_directories', 'ensure_swift_directories',
'execd_preinstall', 'execd_preinstall',
@ -66,6 +65,7 @@ TO_PATCH = [
'ufw', 'ufw',
'setup_ufw', 'setup_ufw',
'revoke_access', 'revoke_access',
'kv',
] ]
@ -93,6 +93,8 @@ class SwiftStorageRelationsTests(CharmTestCase):
self.config.side_effect = self.test_config.get self.config.side_effect = self.test_config.get
self.relation_get.side_effect = self.test_relation.get self.relation_get.side_effect = self.test_relation.get
self.get_relation_ip.return_value = '10.10.10.2' self.get_relation_ip.return_value = '10.10.10.2'
self.test_kv = TestKV()
self.kv.return_value = self.test_kv
@patch.object(hooks, 'add_ufw_gre_rule', lambda *args: None) @patch.object(hooks, 'add_ufw_gre_rule', lambda *args: None)
def test_prunepath(self): def test_prunepath(self):
@ -108,8 +110,6 @@ class SwiftStorageRelationsTests(CharmTestCase):
) )
self.assertTrue(self.apt_update.called) self.assertTrue(self.apt_update.called)
self.apt_install.assert_called_with(PACKAGES, fatal=True) self.apt_install.assert_called_with(PACKAGES, fatal=True)
self.assertTrue(self.setup_storage.called)
self.assertTrue(self.execd_preinstall.called) self.assertTrue(self.execd_preinstall.called)
@patch.object(hooks, 'add_ufw_gre_rule', lambda *args: None) @patch.object(hooks, 'add_ufw_gre_rule', lambda *args: None)
@ -197,7 +197,7 @@ class SwiftStorageRelationsTests(CharmTestCase):
kvstore = mock_kvstore.return_value kvstore = mock_kvstore.return_value
kvstore.__enter__.return_value = kvstore kvstore.__enter__.return_value = kvstore
kvstore.get.return_value = None kvstore.get.return_value = None
self.determine_block_devices.return_value = ['/dev/vdb'] self.test_kv.set('prepared-devices', ['/dev/vdb'])
hooks.swift_storage_relation_joined() hooks.swift_storage_relation_joined()
@ -254,8 +254,8 @@ class SwiftStorageRelationsTests(CharmTestCase):
test_uuid = uuid.uuid4() test_uuid = uuid.uuid4()
test_environ = {'JUJU_ENV_UUID': test_uuid} test_environ = {'JUJU_ENV_UUID': test_uuid}
mock_environ.get.side_effect = test_environ.get mock_environ.get.side_effect = test_environ.get
self.determine_block_devices.return_value = ['/dev/vdb', '/dev/vdc', self.test_kv.set('prepared-devices', ['/dev/vdb', '/dev/vdc',
'/dev/vdd'] '/dev/vdd'])
mock_local_unit.return_value = 'test/0' mock_local_unit.return_value = 'test/0'
kvstore = mock_kvstore.return_value kvstore = mock_kvstore.return_value
kvstore.__enter__.return_value = kvstore kvstore.__enter__.return_value = kvstore
@ -298,8 +298,8 @@ class SwiftStorageRelationsTests(CharmTestCase):
test_uuid = uuid.uuid4() test_uuid = uuid.uuid4()
test_environ = {'JUJU_ENV_UUID': test_uuid} test_environ = {'JUJU_ENV_UUID': test_uuid}
mock_environ.get.side_effect = test_environ.get mock_environ.get.side_effect = test_environ.get
self.determine_block_devices.return_value = ['/dev/vdb', '/dev/vdc', self.test_kv.set('prepared-devices', ['/dev/vdb', '/dev/vdc',
'/dev/vdd'] '/dev/vdd'])
mock_local_unit.return_value = 'test/0' mock_local_unit.return_value = 'test/0'
kvstore = mock_kvstore.return_value kvstore = mock_kvstore.return_value
kvstore.__enter__.return_value = kvstore kvstore.__enter__.return_value = kvstore

View File

@ -17,7 +17,7 @@ import tempfile
from collections import namedtuple from collections import namedtuple
from mock import call, patch, MagicMock from mock import call, patch, MagicMock
from test_utils import CharmTestCase, patch_open from test_utils import CharmTestCase, TestKV, patch_open
import lib.swift_storage_utils as swift_utils import lib.swift_storage_utils as swift_utils
@ -50,6 +50,8 @@ TO_PATCH = [
'iter_units_for_relation_name', 'iter_units_for_relation_name',
'ingress_address', 'ingress_address',
'relation_ids', 'relation_ids',
'vaultlocker',
'kv',
] ]
@ -104,11 +106,14 @@ TARGET SOURCE FSTYPE OPTIONS
""" """
class SwiftStorageUtilsTests(CharmTestCase): class SwiftStorageUtilsTests(CharmTestCase):
def setUp(self): def setUp(self):
super(SwiftStorageUtilsTests, self).setUp(swift_utils, TO_PATCH) super(SwiftStorageUtilsTests, self).setUp(swift_utils, TO_PATCH)
self.config.side_effect = self.test_config.get self.config.side_effect = self.test_config.get
self.test_kv = TestKV()
self.kv.return_value = self.test_kv
def test_ensure_swift_directories(self): def test_ensure_swift_directories(self):
with patch('os.path.isdir') as isdir: with patch('os.path.isdir') as isdir:
@ -229,18 +234,6 @@ class SwiftStorageUtilsTests(CharmTestCase):
self.assertTrue(_find.called) self.assertTrue(_find.called)
self.assertEqual(result, []) self.assertEqual(result, [])
def test_mkfs_xfs(self):
swift_utils.mkfs_xfs('/dev/sdb')
self.check_call.assert_called_with(
['mkfs.xfs', '-i', 'size=1024', '/dev/sdb']
)
def test_mkfs_xfs_force(self):
swift_utils.mkfs_xfs('/dev/sdb', force=True)
self.check_call.assert_called_with(
['mkfs.xfs', '-f', '-i', 'size=1024', '/dev/sdb']
)
@patch.object(swift_utils.charmhelpers.core.fstab, "Fstab") @patch.object(swift_utils.charmhelpers.core.fstab, "Fstab")
@patch.object(swift_utils, 'is_device_in_ring') @patch.object(swift_utils, 'is_device_in_ring')
@patch.object(swift_utils, 'clean_storage') @patch.object(swift_utils, 'clean_storage')
@ -249,6 +242,7 @@ class SwiftStorageUtilsTests(CharmTestCase):
def test_setup_storage_no_overwrite(self, determine, mkfs, clean, def test_setup_storage_no_overwrite(self, determine, mkfs, clean,
mock_is_device_in_ring, mock_Fstab): mock_is_device_in_ring, mock_Fstab):
mock_is_device_in_ring.return_value = False mock_is_device_in_ring.return_value = False
self.is_device_mounted.return_value = False
determine.return_value = ['/dev/vdb'] determine.return_value = ['/dev/vdb']
swift_utils.setup_storage() swift_utils.setup_storage()
self.assertFalse(clean.called) self.assertFalse(clean.called)
@ -260,6 +254,8 @@ class SwiftStorageUtilsTests(CharmTestCase):
perms=0o755), perms=0o755),
call('/srv/node/vdb', group='swift', owner='swift') call('/srv/node/vdb', group='swift', owner='swift')
]) ])
self.assertEqual(self.test_kv.get('prepared-devices'),
['/dev/vdb'])
@patch.object(swift_utils, 'is_device_in_ring') @patch.object(swift_utils, 'is_device_in_ring')
@patch.object(swift_utils, 'clean_storage') @patch.object(swift_utils, 'clean_storage')
@ -270,6 +266,7 @@ class SwiftStorageUtilsTests(CharmTestCase):
self.test_config.set('overwrite', True) self.test_config.set('overwrite', True)
mock_is_device_in_ring.return_value = False mock_is_device_in_ring.return_value = False
self.is_mapped_loopback_device.return_value = None self.is_mapped_loopback_device.return_value = None
self.is_device_mounted.return_value = False
determine.return_value = ['/dev/vdb'] determine.return_value = ['/dev/vdb']
swift_utils.setup_storage() swift_utils.setup_storage()
clean.assert_called_with('/dev/vdb') clean.assert_called_with('/dev/vdb')
@ -288,6 +285,8 @@ class SwiftStorageUtilsTests(CharmTestCase):
perms=0o755), perms=0o755),
call('/srv/node/vdb', group='swift', owner='swift') call('/srv/node/vdb', group='swift', owner='swift')
]) ])
self.assertEqual(self.test_kv.get('prepared-devices'),
['/dev/vdb'])
@patch.object(swift_utils, 'is_device_in_ring') @patch.object(swift_utils, 'is_device_in_ring')
@patch.object(swift_utils, 'determine_block_devices') @patch.object(swift_utils, 'determine_block_devices')
@ -304,6 +303,71 @@ class SwiftStorageUtilsTests(CharmTestCase):
swift_utils.setup_storage() swift_utils.setup_storage()
self.assertEqual(self.check_call.call_count, 0) self.assertEqual(self.check_call.call_count, 0)
@patch.object(swift_utils, "uuid")
@patch.object(swift_utils, "vaultlocker")
@patch.object(swift_utils.charmhelpers.core.fstab, "Fstab")
@patch.object(swift_utils, 'is_device_in_ring')
@patch.object(swift_utils, 'clean_storage')
@patch.object(swift_utils, 'mkfs_xfs')
@patch.object(swift_utils, 'determine_block_devices')
def test_setup_storage_encrypt(self, determine, mkfs, clean,
mock_is_device_in_ring, mock_Fstab,
mock_vaultlocker, mock_uuid):
mock_context = MagicMock()
mock_context.complete = True
mock_context.return_value = 'test_context'
mock_vaultlocker.VaultKVContext.return_value = mock_context
mock_uuid.uuid4.return_value = '7c3ff7c8-fd20-4dca-9be6-6f44f213d3fe'
mock_is_device_in_ring.return_value = False
self.is_device_mounted.return_value = False
self.is_mapped_loopback_device.return_value = None
determine.return_value = ['/dev/vdb']
swift_utils.setup_storage(encrypt=True)
self.assertFalse(clean.called)
calls = [
call(['vaultlocker', 'encrypt',
'--uuid', '7c3ff7c8-fd20-4dca-9be6-6f44f213d3fe',
'/dev/vdb']),
call(['chown', '-R', 'swift:swift',
'/srv/node/crypt-7c3ff7c8-fd20-4dca-9be6-6f44f213d3fe']),
call(['chmod', '-R', '0755',
'/srv/node/crypt-7c3ff7c8-fd20-4dca-9be6-6f44f213d3fe'])
]
self.check_call.assert_has_calls(calls)
self.mkdir.assert_has_calls([
call('/srv/node', owner='swift', group='swift',
perms=0o755),
call('/srv/node/crypt-7c3ff7c8-fd20-4dca-9be6-6f44f213d3fe',
group='swift', owner='swift')
])
self.assertEqual(self.test_kv.get('prepared-devices'),
['/dev/mapper/crypt-7c3ff7c8-fd20-4dca-9be6-6f44f213d3fe'])
mock_vaultlocker.write_vaultlocker_conf.assert_called_with(
'test_context',
priority=90
)
@patch.object(swift_utils, "uuid")
@patch.object(swift_utils, "vaultlocker")
@patch.object(swift_utils.charmhelpers.core.fstab, "Fstab")
@patch.object(swift_utils, 'is_device_in_ring')
@patch.object(swift_utils, 'clean_storage')
@patch.object(swift_utils, 'mkfs_xfs')
@patch.object(swift_utils, 'determine_block_devices')
def test_setup_storage_encrypt_noready(self, determine, mkfs, clean,
mock_is_device_in_ring, mock_Fstab,
mock_vaultlocker, mock_uuid):
mock_context = MagicMock()
mock_context.complete = False
mock_context.return_value = {}
mock_vaultlocker.VaultKVContext.return_value = mock_context
swift_utils.setup_storage(encrypt=True)
mock_vaultlocker.write_vaultlocker_conf.assert_not_called()
clean.assert_not_called()
self.check_call.assert_not_called()
self.mkdir.assert_not_called()
self.assertEqual(self.test_kv.get('prepared-devices'), None)
def _fake_is_device_mounted(self, device): def _fake_is_device_mounted(self, device):
if device in ["/dev/sda", "/dev/vda", "/dev/cciss/c0d0"]: if device in ["/dev/sda", "/dev/vda", "/dev/cciss/c0d0"]:
return True return True
@ -373,6 +437,7 @@ class SwiftStorageUtilsTests(CharmTestCase):
server.return_value = 'swift_server_context' server.return_value = 'swift_server_context'
bind_context.return_value = 'bind_host_context' bind_context.return_value = 'bind_host_context'
worker_context.return_value = 'worker_context' worker_context.return_value = 'worker_context'
self.vaultlocker.VaultKVContext.return_value = 'vl_context'
self.get_os_codename_package.return_value = 'grizzly' self.get_os_codename_package.return_value = 'grizzly'
configs = MagicMock() configs = MagicMock()
configs.register = MagicMock() configs.register = MagicMock()
@ -386,13 +451,16 @@ class SwiftStorageUtilsTests(CharmTestCase):
['rsync_context', 'swift_context']), ['rsync_context', 'swift_context']),
call('/etc/swift/account-server.conf', ['swift_context', call('/etc/swift/account-server.conf', ['swift_context',
'bind_host_context', 'bind_host_context',
'worker_context']), 'worker_context',
'vl_context']),
call('/etc/swift/object-server.conf', ['swift_context', call('/etc/swift/object-server.conf', ['swift_context',
'bind_host_context', 'bind_host_context',
'worker_context']), 'worker_context',
'vl_context']),
call('/etc/swift/container-server.conf', ['swift_context', call('/etc/swift/container-server.conf', ['swift_context',
'bind_host_context', 'bind_host_context',
'worker_context']) 'worker_context',
'vl_context'])
] ]
self.assertEqual(ex, configs.register.call_args_list) self.assertEqual(ex, configs.register.call_args_list)
@ -434,6 +502,7 @@ class SwiftStorageUtilsTests(CharmTestCase):
mock_is_device_in_ring.return_value = False mock_is_device_in_ring.return_value = False
determine.return_value = ["/dev/loop0", ] determine.return_value = ["/dev/loop0", ]
self.is_mapped_loopback_device.return_value = "/srv/test.img" self.is_mapped_loopback_device.return_value = "/srv/test.img"
self.is_device_mounted.return_value = False
swift_utils.setup_storage() swift_utils.setup_storage()
self.mount.assert_called_with( self.mount.assert_called_with(
"/dev/loop0", "/dev/loop0",
@ -476,6 +545,7 @@ class SwiftStorageUtilsTests(CharmTestCase):
mock_is_device_in_ring.return_value = False mock_is_device_in_ring.return_value = False
determine.return_value = ["/dev/loop0", ] determine.return_value = ["/dev/loop0", ]
self.is_mapped_loopback_device.return_value = "/srv/test.img" self.is_mapped_loopback_device.return_value = "/srv/test.img"
self.is_device_mounted.return_value = False
swift_utils.setup_storage() swift_utils.setup_storage()
self.mount.assert_called_with( self.mount.assert_called_with(
"/srv/test.img", "/srv/test.img",

View File

@ -117,6 +117,23 @@ class TestRelation(object):
return None return None
class TestKV(dict):
def __init__(self):
super(TestKV, self).__init__()
self.flushed = False
self.data = {}
def get(self, attribute, default=None):
return self.data.get(attribute, default)
def set(self, attribute, value):
self.data[attribute] = value
def flush(self):
self.flushed = True
@contextmanager @contextmanager
def patch_open(): def patch_open():
'''Patch open() to allow mocking both open() itself and the file that is '''Patch open() to allow mocking both open() itself and the file that is