Add support for instance storage encryption
Add support for encryption of the underlying block device providing storage for local instances. This commit introduces a new juju storage binding and configuration option to provide a single block device for use for local instance storage; this block device is formatted and mounted at /var/lib/nova/instances. In a MAAS deployment, this could be a bcache fronted device. The configuration option is preferred over the Juju storage binding if both are supplied. This block device can optionally be encrypted using dm-crypt/LUKS with encryption keys stored in Hashicorp Vault using vaultlocker. vaultlocker ensures that keys are never persisted to local storage, providing assurance around security of data at rest in the event that disks/server are stolen. Charm support is implemented using a new configuration option 'encrypt' which when set enforces a mandatory relationship to an instance of the vault application. Copy the 'ephemeral-unmount' config option and assocaited code from the ceph-osd and swift-storage charms to enable testing in cloudy environments. Change-Id: I772baa61f45ff430f706ec4864f3018488026148
This commit is contained in:
parent
59cce54c67
commit
d10dd7795e
28
config.yaml
28
config.yaml
@ -247,8 +247,8 @@ options:
|
|||||||
type: string
|
type: string
|
||||||
default:
|
default:
|
||||||
description: |
|
description: |
|
||||||
Tell Nova which libvirt image backend to use. Supported backends are rbd,
|
Tell Nova which libvirt image backend to use. Supported backends are rbd
|
||||||
lvm and qcow2. If no backend is specified, the Nova default (qcow2) is
|
and qcow2. If no backend is specified, the Nova default (qcow2) is
|
||||||
used. Note that rbd imagebackend is only supported with >= Juno.
|
used. Note that rbd imagebackend is only supported with >= Juno.
|
||||||
force-raw-images:
|
force-raw-images:
|
||||||
type: boolean
|
type: boolean
|
||||||
@ -388,4 +388,28 @@ options:
|
|||||||
presenting the disk via device mapper (/dev/mapper/XX) to the VM instead
|
presenting the disk via device mapper (/dev/mapper/XX) to the VM instead
|
||||||
of a single path (/dev/disk/by-path/XX). If changed after deployment,
|
of a single path (/dev/disk/by-path/XX). If changed after deployment,
|
||||||
each VM will require a full stop/start for changes to take affect.
|
each VM will require a full stop/start for changes to take affect.
|
||||||
|
ephemeral-device:
|
||||||
|
type: string
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
Block devices to use for storage of ephermeral disks to support nova
|
||||||
|
instances; generally used in-conjunction with 'encrypt' to support
|
||||||
|
data-at-rest encryption of instance direct attached storage volumes.
|
||||||
|
encrypt:
|
||||||
|
default: False
|
||||||
|
type: boolean
|
||||||
|
description: |
|
||||||
|
Encrypt block devices used for nova instances using dm-crypt, making use
|
||||||
|
of vault for encryption key management; requires a relation to vault.
|
||||||
|
ephemeral-unmount:
|
||||||
|
type: string
|
||||||
|
default:
|
||||||
|
description: |
|
||||||
|
Cloud instances provide ephermeral storage which is normally mounted
|
||||||
|
on /mnt.
|
||||||
|
.
|
||||||
|
Setting this option to the path of the ephemeral mountpoint will force
|
||||||
|
an unmount of the corresponding device so that it can be used for as the
|
||||||
|
backing store for local instances. This is useful for testing purposes
|
||||||
|
(cloud deployment is not a typical use case).
|
||||||
|
|
||||||
|
@ -65,7 +65,8 @@ def get_ca_cert():
|
|||||||
if ca_cert is None:
|
if ca_cert is None:
|
||||||
log("Inspecting identity-service relations for CA SSL certificate.",
|
log("Inspecting identity-service relations for CA SSL certificate.",
|
||||||
level=INFO)
|
level=INFO)
|
||||||
for r_id in relation_ids('identity-service'):
|
for r_id in (relation_ids('identity-service') +
|
||||||
|
relation_ids('identity-credentials')):
|
||||||
for unit in relation_list(r_id):
|
for unit in relation_list(r_id):
|
||||||
if ca_cert is None:
|
if ca_cert is None:
|
||||||
ca_cert = relation_get('ca_cert',
|
ca_cert = relation_get('ca_cert',
|
||||||
@ -76,7 +77,7 @@ def get_ca_cert():
|
|||||||
def retrieve_ca_cert(cert_file):
|
def retrieve_ca_cert(cert_file):
|
||||||
cert = None
|
cert = None
|
||||||
if os.path.isfile(cert_file):
|
if os.path.isfile(cert_file):
|
||||||
with open(cert_file, 'r') as crt:
|
with open(cert_file, 'rb') as crt:
|
||||||
cert = crt.read()
|
cert = crt.read()
|
||||||
return cert
|
return cert
|
||||||
|
|
||||||
|
@ -371,6 +371,7 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'):
|
|||||||
''' Distribute operations by waiting based on modulo_distribution
|
''' Distribute operations by waiting based on modulo_distribution
|
||||||
|
|
||||||
If modulo and or wait are not set, check config_get for those values.
|
If modulo and or wait are not set, check config_get for those values.
|
||||||
|
If config values are not set, default to modulo=3 and wait=30.
|
||||||
|
|
||||||
:param modulo: int The modulo number creates the group distribution
|
:param modulo: int The modulo number creates the group distribution
|
||||||
:param wait: int The constant time wait value
|
:param wait: int The constant time wait value
|
||||||
@ -382,10 +383,17 @@ def distributed_wait(modulo=None, wait=None, operation_name='operation'):
|
|||||||
:side effect: Calls time.sleep()
|
:side effect: Calls time.sleep()
|
||||||
'''
|
'''
|
||||||
if modulo is None:
|
if modulo is None:
|
||||||
modulo = config_get('modulo-nodes')
|
modulo = config_get('modulo-nodes') or 3
|
||||||
if wait is None:
|
if wait is None:
|
||||||
wait = config_get('known-wait')
|
wait = config_get('known-wait') or 30
|
||||||
calculated_wait = modulo_distribution(modulo=modulo, wait=wait)
|
if juju_is_leader():
|
||||||
|
# The leader should never wait
|
||||||
|
calculated_wait = 0
|
||||||
|
else:
|
||||||
|
# non_zero_wait=True guarantees the non-leader who gets modulo 0
|
||||||
|
# will still wait
|
||||||
|
calculated_wait = modulo_distribution(modulo=modulo, wait=wait,
|
||||||
|
non_zero_wait=True)
|
||||||
msg = "Waiting {} seconds for {} ...".format(calculated_wait,
|
msg = "Waiting {} seconds for {} ...".format(calculated_wait,
|
||||||
operation_name)
|
operation_name)
|
||||||
log(msg, DEBUG)
|
log(msg, DEBUG)
|
||||||
|
@ -102,6 +102,8 @@ def add_ovsbridge_linuxbridge(name, bridge):
|
|||||||
log('Interface {} already exists'.format(interface), level=INFO)
|
log('Interface {} already exists'.format(interface), level=INFO)
|
||||||
return
|
return
|
||||||
|
|
||||||
|
check_for_eni_source()
|
||||||
|
|
||||||
with open('/etc/network/interfaces.d/{}.cfg'.format(
|
with open('/etc/network/interfaces.d/{}.cfg'.format(
|
||||||
linuxbridge_port), 'w') as config:
|
linuxbridge_port), 'w') as config:
|
||||||
config.write(BRIDGE_TEMPLATE.format(linuxbridge_port=linuxbridge_port,
|
config.write(BRIDGE_TEMPLATE.format(linuxbridge_port=linuxbridge_port,
|
||||||
@ -155,9 +157,40 @@ def get_certificate():
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def check_for_eni_source():
|
||||||
|
''' Juju removes the source line when setting up interfaces,
|
||||||
|
replace if missing '''
|
||||||
|
|
||||||
|
with open('/etc/network/interfaces', 'r') as eni:
|
||||||
|
for line in eni:
|
||||||
|
if line == 'source /etc/network/interfaces.d/*':
|
||||||
|
return
|
||||||
|
with open('/etc/network/interfaces', 'a') as eni:
|
||||||
|
eni.write('\nsource /etc/network/interfaces.d/*')
|
||||||
|
|
||||||
|
|
||||||
def full_restart():
|
def full_restart():
|
||||||
''' Full restart and reload of openvswitch '''
|
''' Full restart and reload of openvswitch '''
|
||||||
if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'):
|
if os.path.exists('/etc/init/openvswitch-force-reload-kmod.conf'):
|
||||||
service('start', 'openvswitch-force-reload-kmod')
|
service('start', 'openvswitch-force-reload-kmod')
|
||||||
else:
|
else:
|
||||||
service('force-reload-kmod', 'openvswitch-switch')
|
service('force-reload-kmod', 'openvswitch-switch')
|
||||||
|
|
||||||
|
|
||||||
|
def enable_ipfix(bridge, target):
|
||||||
|
'''Enable IPfix on bridge to target.
|
||||||
|
:param bridge: Bridge to monitor
|
||||||
|
:param target: IPfix remote endpoint
|
||||||
|
'''
|
||||||
|
cmd = ['ovs-vsctl', 'set', 'Bridge', bridge, 'ipfix=@i', '--',
|
||||||
|
'--id=@i', 'create', 'IPFIX', 'targets="{}"'.format(target)]
|
||||||
|
log('Enabling IPfix on {}.'.format(bridge))
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def disable_ipfix(bridge):
|
||||||
|
'''Diable IPfix on target bridge.
|
||||||
|
:param bridge: Bridge to modify
|
||||||
|
'''
|
||||||
|
cmd = ['ovs-vsctl', 'clear', 'Bridge', bridge, 'ipfix']
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
@ -151,6 +151,29 @@ def enable(soft_fail=False):
|
|||||||
return True
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def reload():
|
||||||
|
"""
|
||||||
|
Reload ufw
|
||||||
|
|
||||||
|
:returns: True if ufw is successfully enabled
|
||||||
|
"""
|
||||||
|
output = subprocess.check_output(['ufw', 'reload'],
|
||||||
|
universal_newlines=True,
|
||||||
|
env={'LANG': 'en_US',
|
||||||
|
'PATH': os.environ['PATH']})
|
||||||
|
|
||||||
|
m = re.findall('^Firewall reloaded\n',
|
||||||
|
output, re.M)
|
||||||
|
hookenv.log(output, level='DEBUG')
|
||||||
|
|
||||||
|
if len(m) == 0:
|
||||||
|
hookenv.log("ufw couldn't be reloaded", level='WARN')
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
hookenv.log("ufw reloaded", level='INFO')
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
def disable():
|
def disable():
|
||||||
"""
|
"""
|
||||||
Disable ufw
|
Disable ufw
|
||||||
|
@ -21,6 +21,9 @@ from collections import OrderedDict
|
|||||||
from charmhelpers.contrib.amulet.deployment import (
|
from charmhelpers.contrib.amulet.deployment import (
|
||||||
AmuletDeployment
|
AmuletDeployment
|
||||||
)
|
)
|
||||||
|
from charmhelpers.contrib.openstack.amulet.utils import (
|
||||||
|
OPENSTACK_RELEASES_PAIRS
|
||||||
|
)
|
||||||
|
|
||||||
DEBUG = logging.DEBUG
|
DEBUG = logging.DEBUG
|
||||||
ERROR = logging.ERROR
|
ERROR = logging.ERROR
|
||||||
@ -271,11 +274,8 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
|||||||
release.
|
release.
|
||||||
"""
|
"""
|
||||||
# Must be ordered by OpenStack release (not by Ubuntu release):
|
# Must be ordered by OpenStack release (not by Ubuntu release):
|
||||||
(self.trusty_icehouse, self.trusty_kilo, self.trusty_liberty,
|
for i, os_pair in enumerate(OPENSTACK_RELEASES_PAIRS):
|
||||||
self.trusty_mitaka, self.xenial_mitaka, self.xenial_newton,
|
setattr(self, os_pair, i)
|
||||||
self.yakkety_newton, self.xenial_ocata, self.zesty_ocata,
|
|
||||||
self.xenial_pike, self.artful_pike, self.xenial_queens,
|
|
||||||
self.bionic_queens,) = range(13)
|
|
||||||
|
|
||||||
releases = {
|
releases = {
|
||||||
('trusty', None): self.trusty_icehouse,
|
('trusty', None): self.trusty_icehouse,
|
||||||
|
@ -50,6 +50,13 @@ ERROR = logging.ERROR
|
|||||||
|
|
||||||
NOVA_CLIENT_VERSION = "2"
|
NOVA_CLIENT_VERSION = "2"
|
||||||
|
|
||||||
|
OPENSTACK_RELEASES_PAIRS = [
|
||||||
|
'trusty_icehouse', 'trusty_kilo', 'trusty_liberty',
|
||||||
|
'trusty_mitaka', 'xenial_mitaka', 'xenial_newton',
|
||||||
|
'yakkety_newton', 'xenial_ocata', 'zesty_ocata',
|
||||||
|
'xenial_pike', 'artful_pike', 'xenial_queens',
|
||||||
|
'bionic_queens']
|
||||||
|
|
||||||
|
|
||||||
class OpenStackAmuletUtils(AmuletUtils):
|
class OpenStackAmuletUtils(AmuletUtils):
|
||||||
"""OpenStack amulet utilities.
|
"""OpenStack amulet utilities.
|
||||||
@ -63,7 +70,34 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
super(OpenStackAmuletUtils, self).__init__(log_level)
|
super(OpenStackAmuletUtils, self).__init__(log_level)
|
||||||
|
|
||||||
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
|
def validate_endpoint_data(self, endpoints, admin_port, internal_port,
|
||||||
public_port, expected):
|
public_port, expected, openstack_release=None):
|
||||||
|
"""Validate endpoint data. Pick the correct validator based on
|
||||||
|
OpenStack release. Expected data should be in the v2 format:
|
||||||
|
{
|
||||||
|
'id': id,
|
||||||
|
'region': region,
|
||||||
|
'adminurl': adminurl,
|
||||||
|
'internalurl': internalurl,
|
||||||
|
'publicurl': publicurl,
|
||||||
|
'service_id': service_id}
|
||||||
|
|
||||||
|
"""
|
||||||
|
validation_function = self.validate_v2_endpoint_data
|
||||||
|
xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
|
||||||
|
if openstack_release and openstack_release >= xenial_queens:
|
||||||
|
validation_function = self.validate_v3_endpoint_data
|
||||||
|
expected = {
|
||||||
|
'id': expected['id'],
|
||||||
|
'region': expected['region'],
|
||||||
|
'region_id': 'RegionOne',
|
||||||
|
'url': self.valid_url,
|
||||||
|
'interface': self.not_null,
|
||||||
|
'service_id': expected['service_id']}
|
||||||
|
return validation_function(endpoints, admin_port, internal_port,
|
||||||
|
public_port, expected)
|
||||||
|
|
||||||
|
def validate_v2_endpoint_data(self, endpoints, admin_port, internal_port,
|
||||||
|
public_port, expected):
|
||||||
"""Validate endpoint data.
|
"""Validate endpoint data.
|
||||||
|
|
||||||
Validate actual endpoint data vs expected endpoint data. The ports
|
Validate actual endpoint data vs expected endpoint data. The ports
|
||||||
@ -141,7 +175,86 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
if len(found) != expected_num_eps:
|
if len(found) != expected_num_eps:
|
||||||
return 'Unexpected number of endpoints found'
|
return 'Unexpected number of endpoints found'
|
||||||
|
|
||||||
def validate_svc_catalog_endpoint_data(self, expected, actual):
|
def convert_svc_catalog_endpoint_data_to_v3(self, ep_data):
|
||||||
|
"""Convert v2 endpoint data into v3.
|
||||||
|
|
||||||
|
{
|
||||||
|
'service_name1': [
|
||||||
|
{
|
||||||
|
'adminURL': adminURL,
|
||||||
|
'id': id,
|
||||||
|
'region': region.
|
||||||
|
'publicURL': publicURL,
|
||||||
|
'internalURL': internalURL
|
||||||
|
}],
|
||||||
|
'service_name2': [
|
||||||
|
{
|
||||||
|
'adminURL': adminURL,
|
||||||
|
'id': id,
|
||||||
|
'region': region.
|
||||||
|
'publicURL': publicURL,
|
||||||
|
'internalURL': internalURL
|
||||||
|
}],
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
self.log.warn("Endpoint ID and Region ID validation is limited to not "
|
||||||
|
"null checks after v2 to v3 conversion")
|
||||||
|
for svc in ep_data.keys():
|
||||||
|
assert len(ep_data[svc]) == 1, "Unknown data format"
|
||||||
|
svc_ep_data = ep_data[svc][0]
|
||||||
|
ep_data[svc] = [
|
||||||
|
{
|
||||||
|
'url': svc_ep_data['adminURL'],
|
||||||
|
'interface': 'admin',
|
||||||
|
'region': svc_ep_data['region'],
|
||||||
|
'region_id': self.not_null,
|
||||||
|
'id': self.not_null},
|
||||||
|
{
|
||||||
|
'url': svc_ep_data['publicURL'],
|
||||||
|
'interface': 'public',
|
||||||
|
'region': svc_ep_data['region'],
|
||||||
|
'region_id': self.not_null,
|
||||||
|
'id': self.not_null},
|
||||||
|
{
|
||||||
|
'url': svc_ep_data['internalURL'],
|
||||||
|
'interface': 'internal',
|
||||||
|
'region': svc_ep_data['region'],
|
||||||
|
'region_id': self.not_null,
|
||||||
|
'id': self.not_null}]
|
||||||
|
return ep_data
|
||||||
|
|
||||||
|
def validate_svc_catalog_endpoint_data(self, expected, actual,
|
||||||
|
openstack_release=None):
|
||||||
|
"""Validate service catalog endpoint data. Pick the correct validator
|
||||||
|
for the OpenStack version. Expected data should be in the v2 format:
|
||||||
|
{
|
||||||
|
'service_name1': [
|
||||||
|
{
|
||||||
|
'adminURL': adminURL,
|
||||||
|
'id': id,
|
||||||
|
'region': region.
|
||||||
|
'publicURL': publicURL,
|
||||||
|
'internalURL': internalURL
|
||||||
|
}],
|
||||||
|
'service_name2': [
|
||||||
|
{
|
||||||
|
'adminURL': adminURL,
|
||||||
|
'id': id,
|
||||||
|
'region': region.
|
||||||
|
'publicURL': publicURL,
|
||||||
|
'internalURL': internalURL
|
||||||
|
}],
|
||||||
|
}
|
||||||
|
|
||||||
|
"""
|
||||||
|
validation_function = self.validate_v2_svc_catalog_endpoint_data
|
||||||
|
xenial_queens = OPENSTACK_RELEASES_PAIRS.index('xenial_queens')
|
||||||
|
if openstack_release and openstack_release >= xenial_queens:
|
||||||
|
validation_function = self.validate_v3_svc_catalog_endpoint_data
|
||||||
|
expected = self.convert_svc_catalog_endpoint_data_to_v3(expected)
|
||||||
|
return validation_function(expected, actual)
|
||||||
|
|
||||||
|
def validate_v2_svc_catalog_endpoint_data(self, expected, actual):
|
||||||
"""Validate service catalog endpoint data.
|
"""Validate service catalog endpoint data.
|
||||||
|
|
||||||
Validate a list of actual service catalog endpoints vs a list of
|
Validate a list of actual service catalog endpoints vs a list of
|
||||||
@ -328,7 +441,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
if rel.get('api_version') != str(api_version):
|
if rel.get('api_version') != str(api_version):
|
||||||
raise Exception("api_version not propagated through relation"
|
raise Exception("api_version not propagated through relation"
|
||||||
" data yet ('{}' != '{}')."
|
" data yet ('{}' != '{}')."
|
||||||
"".format(rel['api_version'], api_version))
|
"".format(rel.get('api_version'), api_version))
|
||||||
|
|
||||||
def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
|
def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
|
||||||
api_version):
|
api_version):
|
||||||
@ -350,16 +463,13 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
deployment._auto_wait_for_status()
|
deployment._auto_wait_for_status()
|
||||||
self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
|
self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
|
||||||
|
|
||||||
def authenticate_cinder_admin(self, keystone_sentry, username,
|
def authenticate_cinder_admin(self, keystone, api_version=2):
|
||||||
password, tenant, api_version=2):
|
|
||||||
"""Authenticates admin user with cinder."""
|
"""Authenticates admin user with cinder."""
|
||||||
# NOTE(beisner): cinder python client doesn't accept tokens.
|
self.log.debug('Authenticating cinder admin...')
|
||||||
keystone_ip = keystone_sentry.info['public-address']
|
|
||||||
ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
|
|
||||||
_clients = {
|
_clients = {
|
||||||
1: cinder_client.Client,
|
1: cinder_client.Client,
|
||||||
2: cinder_clientv2.Client}
|
2: cinder_clientv2.Client}
|
||||||
return _clients[api_version](username, password, tenant, ept)
|
return _clients[api_version](session=keystone.session)
|
||||||
|
|
||||||
def authenticate_keystone(self, keystone_ip, username, password,
|
def authenticate_keystone(self, keystone_ip, username, password,
|
||||||
api_version=False, admin_port=False,
|
api_version=False, admin_port=False,
|
||||||
|
@ -384,6 +384,7 @@ class IdentityServiceContext(OSContextGenerator):
|
|||||||
# so a missing value just indicates keystone needs
|
# so a missing value just indicates keystone needs
|
||||||
# upgrading
|
# upgrading
|
||||||
ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
|
ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
|
||||||
|
ctxt['admin_domain_id'] = rdata.get('service_domain_id')
|
||||||
return ctxt
|
return ctxt
|
||||||
|
|
||||||
return {}
|
return {}
|
||||||
@ -796,9 +797,9 @@ class ApacheSSLContext(OSContextGenerator):
|
|||||||
key_filename = 'key'
|
key_filename = 'key'
|
||||||
|
|
||||||
write_file(path=os.path.join(ssl_dir, cert_filename),
|
write_file(path=os.path.join(ssl_dir, cert_filename),
|
||||||
content=b64decode(cert))
|
content=b64decode(cert), perms=0o640)
|
||||||
write_file(path=os.path.join(ssl_dir, key_filename),
|
write_file(path=os.path.join(ssl_dir, key_filename),
|
||||||
content=b64decode(key))
|
content=b64decode(key), perms=0o640)
|
||||||
|
|
||||||
def configure_ca(self):
|
def configure_ca(self):
|
||||||
ca_cert = get_ca_cert()
|
ca_cert = get_ca_cert()
|
||||||
@ -1872,10 +1873,11 @@ class EnsureDirContext(OSContextGenerator):
|
|||||||
context is needed to do that before rendering a template.
|
context is needed to do that before rendering a template.
|
||||||
'''
|
'''
|
||||||
|
|
||||||
def __init__(self, dirname):
|
def __init__(self, dirname, **kwargs):
|
||||||
'''Used merely to ensure that a given directory exists.'''
|
'''Used merely to ensure that a given directory exists.'''
|
||||||
self.dirname = dirname
|
self.dirname = dirname
|
||||||
|
self.kwargs = kwargs
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
mkdir(self.dirname)
|
mkdir(self.dirname, **self.kwargs)
|
||||||
return {}
|
return {}
|
||||||
|
@ -6,6 +6,7 @@ global
|
|||||||
group haproxy
|
group haproxy
|
||||||
spread-checks 0
|
spread-checks 0
|
||||||
stats socket /var/run/haproxy/admin.sock mode 600 level admin
|
stats socket /var/run/haproxy/admin.sock mode 600 level admin
|
||||||
|
stats socket /var/run/haproxy/operator.sock mode 600 level operator
|
||||||
stats timeout 2m
|
stats timeout 2m
|
||||||
|
|
||||||
defaults
|
defaults
|
||||||
|
@ -0,0 +1,5 @@
|
|||||||
|
[oslo_middleware]
|
||||||
|
|
||||||
|
# Bug #1758675
|
||||||
|
enable_proxy_headers_parsing = true
|
||||||
|
|
@ -5,4 +5,7 @@ transport_url = {{ transport_url }}
|
|||||||
{% if notification_topics -%}
|
{% if notification_topics -%}
|
||||||
topics = {{ notification_topics }}
|
topics = {{ notification_topics }}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
{% if notification_format -%}
|
||||||
|
notification_format = {{ notification_format }}
|
||||||
|
{% endif -%}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
@ -306,7 +306,7 @@ def get_os_codename_install_source(src):
|
|||||||
|
|
||||||
if src.startswith('cloud:'):
|
if src.startswith('cloud:'):
|
||||||
ca_rel = src.split(':')[1]
|
ca_rel = src.split(':')[1]
|
||||||
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
|
ca_rel = ca_rel.split('-')[1].split('/')[0]
|
||||||
return ca_rel
|
return ca_rel
|
||||||
|
|
||||||
# Best guess match based on deb string provided
|
# Best guess match based on deb string provided
|
||||||
|
126
hooks/charmhelpers/contrib/openstack/vaultlocker.py
Normal file
126
hooks/charmhelpers/contrib/openstack/vaultlocker.py
Normal file
@ -0,0 +1,126 @@
|
|||||||
|
# Copyright 2018 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
|
||||||
|
import charmhelpers.contrib.openstack.alternatives as alternatives
|
||||||
|
import charmhelpers.contrib.openstack.context as context
|
||||||
|
|
||||||
|
import charmhelpers.core.hookenv as hookenv
|
||||||
|
import charmhelpers.core.host as host
|
||||||
|
import charmhelpers.core.templating as templating
|
||||||
|
import charmhelpers.core.unitdata as unitdata
|
||||||
|
|
||||||
|
VAULTLOCKER_BACKEND = 'charm-vaultlocker'
|
||||||
|
|
||||||
|
|
||||||
|
class VaultKVContext(context.OSContextGenerator):
|
||||||
|
"""Vault KV context for interaction with vault-kv interfaces"""
|
||||||
|
interfaces = ['secrets-storage']
|
||||||
|
|
||||||
|
def __init__(self, secret_backend=None):
|
||||||
|
super(context.OSContextGenerator, self).__init__()
|
||||||
|
self.secret_backend = (
|
||||||
|
secret_backend or 'charm-{}'.format(hookenv.service_name())
|
||||||
|
)
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
db = unitdata.kv()
|
||||||
|
last_token = db.get('last-token')
|
||||||
|
secret_id = db.get('secret-id')
|
||||||
|
for relation_id in hookenv.relation_ids(self.interfaces[0]):
|
||||||
|
for unit in hookenv.related_units(relation_id):
|
||||||
|
data = hookenv.relation_get(unit=unit,
|
||||||
|
rid=relation_id)
|
||||||
|
vault_url = data.get('vault_url')
|
||||||
|
role_id = data.get('{}_role_id'.format(hookenv.local_unit()))
|
||||||
|
token = data.get('{}_token'.format(hookenv.local_unit()))
|
||||||
|
|
||||||
|
if all([vault_url, role_id, token]):
|
||||||
|
token = json.loads(token)
|
||||||
|
vault_url = json.loads(vault_url)
|
||||||
|
|
||||||
|
# Tokens may change when secret_id's are being
|
||||||
|
# reissued - if so use token to get new secret_id
|
||||||
|
if token != last_token:
|
||||||
|
secret_id = retrieve_secret_id(
|
||||||
|
url=vault_url,
|
||||||
|
token=token
|
||||||
|
)
|
||||||
|
db.set('secret-id', secret_id)
|
||||||
|
db.set('last-token', token)
|
||||||
|
db.flush()
|
||||||
|
|
||||||
|
ctxt = {
|
||||||
|
'vault_url': vault_url,
|
||||||
|
'role_id': json.loads(role_id),
|
||||||
|
'secret_id': secret_id,
|
||||||
|
'secret_backend': self.secret_backend,
|
||||||
|
}
|
||||||
|
vault_ca = data.get('vault_ca')
|
||||||
|
if vault_ca:
|
||||||
|
ctxt['vault_ca'] = json.loads(vault_ca)
|
||||||
|
self.complete = True
|
||||||
|
return ctxt
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def write_vaultlocker_conf(context, priority=100):
|
||||||
|
"""Write vaultlocker configuration to disk and install alternative
|
||||||
|
|
||||||
|
:param context: Dict of data from vault-kv relation
|
||||||
|
:ptype: context: dict
|
||||||
|
:param priority: Priority of alternative configuration
|
||||||
|
:ptype: priority: int"""
|
||||||
|
charm_vl_path = "/var/lib/charm/{}/vaultlocker.conf".format(
|
||||||
|
hookenv.service_name()
|
||||||
|
)
|
||||||
|
host.mkdir(os.path.dirname(charm_vl_path), perms=0o700)
|
||||||
|
templating.render(source='vaultlocker.conf.j2',
|
||||||
|
target=charm_vl_path,
|
||||||
|
context=context, perms=0o600),
|
||||||
|
alternatives.install_alternative('vaultlocker.conf',
|
||||||
|
'/etc/vaultlocker/vaultlocker.conf',
|
||||||
|
charm_vl_path, priority)
|
||||||
|
|
||||||
|
|
||||||
|
def vault_relation_complete(backend=None):
|
||||||
|
"""Determine whether vault relation is complete
|
||||||
|
|
||||||
|
:param backend: Name of secrets backend requested
|
||||||
|
:ptype backend: string
|
||||||
|
:returns: whether the relation to vault is complete
|
||||||
|
:rtype: bool"""
|
||||||
|
vault_kv = VaultKVContext(secret_backend=backend or VAULTLOCKER_BACKEND)
|
||||||
|
vault_kv()
|
||||||
|
return vault_kv.complete
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: contrib a high level unwrap method to hvac that works
|
||||||
|
def retrieve_secret_id(url, token):
|
||||||
|
"""Retrieve a response-wrapped secret_id from Vault
|
||||||
|
|
||||||
|
:param url: URL to Vault Server
|
||||||
|
:ptype url: str
|
||||||
|
:param token: One shot Token to use
|
||||||
|
:ptype token: str
|
||||||
|
:returns: secret_id to use for Vault Access
|
||||||
|
:rtype: str"""
|
||||||
|
import hvac
|
||||||
|
client = hvac.Client(url=url, token=token)
|
||||||
|
response = client._post('/v1/sys/wrapping/unwrap')
|
||||||
|
if response.status_code == 200:
|
||||||
|
data = response.json()
|
||||||
|
return data['data']['secret_id']
|
@ -291,7 +291,7 @@ class Pool(object):
|
|||||||
|
|
||||||
class ReplicatedPool(Pool):
|
class ReplicatedPool(Pool):
|
||||||
def __init__(self, service, name, pg_num=None, replicas=2,
|
def __init__(self, service, name, pg_num=None, replicas=2,
|
||||||
percent_data=10.0):
|
percent_data=10.0, app_name=None):
|
||||||
super(ReplicatedPool, self).__init__(service=service, name=name)
|
super(ReplicatedPool, self).__init__(service=service, name=name)
|
||||||
self.replicas = replicas
|
self.replicas = replicas
|
||||||
if pg_num:
|
if pg_num:
|
||||||
@ -301,6 +301,10 @@ class ReplicatedPool(Pool):
|
|||||||
self.pg_num = min(pg_num, max_pgs)
|
self.pg_num = min(pg_num, max_pgs)
|
||||||
else:
|
else:
|
||||||
self.pg_num = self.get_pgs(self.replicas, percent_data)
|
self.pg_num = self.get_pgs(self.replicas, percent_data)
|
||||||
|
if app_name:
|
||||||
|
self.app_name = app_name
|
||||||
|
else:
|
||||||
|
self.app_name = 'unknown'
|
||||||
|
|
||||||
def create(self):
|
def create(self):
|
||||||
if not pool_exists(self.service, self.name):
|
if not pool_exists(self.service, self.name):
|
||||||
@ -313,6 +317,12 @@ class ReplicatedPool(Pool):
|
|||||||
update_pool(client=self.service,
|
update_pool(client=self.service,
|
||||||
pool=self.name,
|
pool=self.name,
|
||||||
settings={'size': str(self.replicas)})
|
settings={'size': str(self.replicas)})
|
||||||
|
try:
|
||||||
|
set_app_name_for_pool(client=self.service,
|
||||||
|
pool=self.name,
|
||||||
|
name=self.app_name)
|
||||||
|
except CalledProcessError:
|
||||||
|
log('Could not set app name for pool {}'.format(self.name, level=WARNING))
|
||||||
except CalledProcessError:
|
except CalledProcessError:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@ -320,10 +330,14 @@ class ReplicatedPool(Pool):
|
|||||||
# Default jerasure erasure coded pool
|
# Default jerasure erasure coded pool
|
||||||
class ErasurePool(Pool):
|
class ErasurePool(Pool):
|
||||||
def __init__(self, service, name, erasure_code_profile="default",
|
def __init__(self, service, name, erasure_code_profile="default",
|
||||||
percent_data=10.0):
|
percent_data=10.0, app_name=None):
|
||||||
super(ErasurePool, self).__init__(service=service, name=name)
|
super(ErasurePool, self).__init__(service=service, name=name)
|
||||||
self.erasure_code_profile = erasure_code_profile
|
self.erasure_code_profile = erasure_code_profile
|
||||||
self.percent_data = percent_data
|
self.percent_data = percent_data
|
||||||
|
if app_name:
|
||||||
|
self.app_name = app_name
|
||||||
|
else:
|
||||||
|
self.app_name = 'unknown'
|
||||||
|
|
||||||
def create(self):
|
def create(self):
|
||||||
if not pool_exists(self.service, self.name):
|
if not pool_exists(self.service, self.name):
|
||||||
@ -355,6 +369,12 @@ class ErasurePool(Pool):
|
|||||||
'erasure', self.erasure_code_profile]
|
'erasure', self.erasure_code_profile]
|
||||||
try:
|
try:
|
||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
|
try:
|
||||||
|
set_app_name_for_pool(client=self.service,
|
||||||
|
pool=self.name,
|
||||||
|
name=self.app_name)
|
||||||
|
except CalledProcessError:
|
||||||
|
log('Could not set app name for pool {}'.format(self.name, level=WARNING))
|
||||||
except CalledProcessError:
|
except CalledProcessError:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@ -778,6 +798,25 @@ def update_pool(client, pool, settings):
|
|||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def set_app_name_for_pool(client, pool, name):
|
||||||
|
"""
|
||||||
|
Calls `osd pool application enable` for the specified pool name
|
||||||
|
|
||||||
|
:param client: Name of the ceph client to use
|
||||||
|
:type client: str
|
||||||
|
:param pool: Pool to set app name for
|
||||||
|
:type pool: str
|
||||||
|
:param name: app name for the specified pool
|
||||||
|
:type name: str
|
||||||
|
|
||||||
|
:raises: CalledProcessError if ceph call fails
|
||||||
|
"""
|
||||||
|
if ceph_version() >= '12.0.0':
|
||||||
|
cmd = ['ceph', '--id', client, 'osd', 'pool',
|
||||||
|
'application', 'enable', pool, name]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
def create_pool(service, name, replicas=3, pg_num=None):
|
def create_pool(service, name, replicas=3, pg_num=None):
|
||||||
"""Create a new RADOS pool."""
|
"""Create a new RADOS pool."""
|
||||||
if pool_exists(service, name):
|
if pool_exists(service, name):
|
||||||
|
@ -151,3 +151,32 @@ def extend_logical_volume_by_device(lv_name, block_device):
|
|||||||
'''
|
'''
|
||||||
cmd = ['lvextend', lv_name, block_device]
|
cmd = ['lvextend', lv_name, block_device]
|
||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def create_logical_volume(lv_name, volume_group, size=None):
|
||||||
|
'''
|
||||||
|
Create a new logical volume in an existing volume group
|
||||||
|
|
||||||
|
:param lv_name: str: name of logical volume to be created.
|
||||||
|
:param volume_group: str: Name of volume group to use for the new volume.
|
||||||
|
:param size: str: Size of logical volume to create (100% if not supplied)
|
||||||
|
:raises subprocess.CalledProcessError: in the event that the lvcreate fails.
|
||||||
|
'''
|
||||||
|
if size:
|
||||||
|
check_call([
|
||||||
|
'lvcreate',
|
||||||
|
'--yes',
|
||||||
|
'-L',
|
||||||
|
'{}'.format(size),
|
||||||
|
'-n', lv_name, volume_group
|
||||||
|
])
|
||||||
|
# create the lv with all the space available, this is needed because the
|
||||||
|
# system call is different for LVM
|
||||||
|
else:
|
||||||
|
check_call([
|
||||||
|
'lvcreate',
|
||||||
|
'--yes',
|
||||||
|
'-l',
|
||||||
|
'100%FREE',
|
||||||
|
'-n', lv_name, volume_group
|
||||||
|
])
|
||||||
|
@ -67,3 +67,19 @@ def is_device_mounted(device):
|
|||||||
except Exception:
|
except Exception:
|
||||||
return False
|
return False
|
||||||
return bool(re.search(r'MOUNTPOINT=".+"', out))
|
return bool(re.search(r'MOUNTPOINT=".+"', out))
|
||||||
|
|
||||||
|
|
||||||
|
def mkfs_xfs(device, force=False):
|
||||||
|
"""Format device with XFS filesystem.
|
||||||
|
|
||||||
|
By default this should fail if the device already has a filesystem on it.
|
||||||
|
:param device: Full path to device to format
|
||||||
|
:ptype device: tr
|
||||||
|
:param force: Force operation
|
||||||
|
:ptype: force: boolean"""
|
||||||
|
cmd = ['mkfs.xfs']
|
||||||
|
if force:
|
||||||
|
cmd.append("-f")
|
||||||
|
|
||||||
|
cmd += ['-i', 'size=1024', device]
|
||||||
|
check_call(cmd)
|
||||||
|
@ -27,6 +27,7 @@ import glob
|
|||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
import yaml
|
import yaml
|
||||||
|
import re
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import errno
|
import errno
|
||||||
@ -67,7 +68,7 @@ def cached(func):
|
|||||||
@wraps(func)
|
@wraps(func)
|
||||||
def wrapper(*args, **kwargs):
|
def wrapper(*args, **kwargs):
|
||||||
global cache
|
global cache
|
||||||
key = str((func, args, kwargs))
|
key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
|
||||||
try:
|
try:
|
||||||
return cache[key]
|
return cache[key]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
@ -289,7 +290,7 @@ class Config(dict):
|
|||||||
self.implicit_save = True
|
self.implicit_save = True
|
||||||
self._prev_dict = None
|
self._prev_dict = None
|
||||||
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
|
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
|
||||||
if os.path.exists(self.path):
|
if os.path.exists(self.path) and os.stat(self.path).st_size:
|
||||||
self.load_previous()
|
self.load_previous()
|
||||||
atexit(self._implicit_save)
|
atexit(self._implicit_save)
|
||||||
|
|
||||||
@ -309,7 +310,11 @@ class Config(dict):
|
|||||||
"""
|
"""
|
||||||
self.path = path or self.path
|
self.path = path or self.path
|
||||||
with open(self.path) as f:
|
with open(self.path) as f:
|
||||||
self._prev_dict = json.load(f)
|
try:
|
||||||
|
self._prev_dict = json.load(f)
|
||||||
|
except ValueError as e:
|
||||||
|
log('Unable to parse previous config data - {}'.format(str(e)),
|
||||||
|
level=ERROR)
|
||||||
for k, v in copy.deepcopy(self._prev_dict).items():
|
for k, v in copy.deepcopy(self._prev_dict).items():
|
||||||
if k not in self:
|
if k not in self:
|
||||||
self[k] = v
|
self[k] = v
|
||||||
@ -353,22 +358,40 @@ class Config(dict):
|
|||||||
self.save()
|
self.save()
|
||||||
|
|
||||||
|
|
||||||
@cached
|
_cache_config = None
|
||||||
|
|
||||||
|
|
||||||
def config(scope=None):
|
def config(scope=None):
|
||||||
"""Juju charm configuration"""
|
"""
|
||||||
config_cmd_line = ['config-get']
|
Get the juju charm configuration (scope==None) or individual key,
|
||||||
if scope is not None:
|
(scope=str). The returned value is a Python data structure loaded as
|
||||||
config_cmd_line.append(scope)
|
JSON from the Juju config command.
|
||||||
else:
|
|
||||||
config_cmd_line.append('--all')
|
:param scope: If set, return the value for the specified key.
|
||||||
config_cmd_line.append('--format=json')
|
:type scope: Optional[str]
|
||||||
|
:returns: Either the whole config as a Config, or a key from it.
|
||||||
|
:rtype: Any
|
||||||
|
"""
|
||||||
|
global _cache_config
|
||||||
|
config_cmd_line = ['config-get', '--all', '--format=json']
|
||||||
try:
|
try:
|
||||||
config_data = json.loads(
|
# JSON Decode Exception for Python3.5+
|
||||||
subprocess.check_output(config_cmd_line).decode('UTF-8'))
|
exc_json = json.decoder.JSONDecodeError
|
||||||
|
except AttributeError:
|
||||||
|
# JSON Decode Exception for Python2.7 through Python3.4
|
||||||
|
exc_json = ValueError
|
||||||
|
try:
|
||||||
|
if _cache_config is None:
|
||||||
|
config_data = json.loads(
|
||||||
|
subprocess.check_output(config_cmd_line).decode('UTF-8'))
|
||||||
|
_cache_config = Config(config_data)
|
||||||
if scope is not None:
|
if scope is not None:
|
||||||
return config_data
|
return _cache_config.get(scope)
|
||||||
return Config(config_data)
|
return _cache_config
|
||||||
except ValueError:
|
except (exc_json, UnicodeDecodeError) as e:
|
||||||
|
log('Unable to parse output from config-get: config_cmd_line="{}" '
|
||||||
|
'message="{}"'
|
||||||
|
.format(config_cmd_line, str(e)), level=ERROR)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
@ -1043,7 +1066,6 @@ def juju_version():
|
|||||||
universal_newlines=True).strip()
|
universal_newlines=True).strip()
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def has_juju_version(minimum_version):
|
def has_juju_version(minimum_version):
|
||||||
"""Return True if the Juju version is at least the provided version"""
|
"""Return True if the Juju version is at least the provided version"""
|
||||||
return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
|
return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
|
||||||
@ -1103,6 +1125,8 @@ def _run_atexit():
|
|||||||
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||||
def network_get_primary_address(binding):
|
def network_get_primary_address(binding):
|
||||||
'''
|
'''
|
||||||
|
Deprecated since Juju 2.3; use network_get()
|
||||||
|
|
||||||
Retrieve the primary network address for a named binding
|
Retrieve the primary network address for a named binding
|
||||||
|
|
||||||
:param binding: string. The name of a relation of extra-binding
|
:param binding: string. The name of a relation of extra-binding
|
||||||
@ -1123,7 +1147,6 @@ def network_get_primary_address(binding):
|
|||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
|
||||||
def network_get(endpoint, relation_id=None):
|
def network_get(endpoint, relation_id=None):
|
||||||
"""
|
"""
|
||||||
Retrieve the network details for a relation endpoint
|
Retrieve the network details for a relation endpoint
|
||||||
@ -1131,24 +1154,20 @@ def network_get(endpoint, relation_id=None):
|
|||||||
:param endpoint: string. The name of a relation endpoint
|
:param endpoint: string. The name of a relation endpoint
|
||||||
:param relation_id: int. The ID of the relation for the current context.
|
:param relation_id: int. The ID of the relation for the current context.
|
||||||
:return: dict. The loaded YAML output of the network-get query.
|
:return: dict. The loaded YAML output of the network-get query.
|
||||||
:raise: NotImplementedError if run on Juju < 2.1
|
:raise: NotImplementedError if request not supported by the Juju version.
|
||||||
"""
|
"""
|
||||||
|
if not has_juju_version('2.2'):
|
||||||
|
raise NotImplementedError(juju_version()) # earlier versions require --primary-address
|
||||||
|
if relation_id and not has_juju_version('2.3'):
|
||||||
|
raise NotImplementedError # 2.3 added the -r option
|
||||||
|
|
||||||
cmd = ['network-get', endpoint, '--format', 'yaml']
|
cmd = ['network-get', endpoint, '--format', 'yaml']
|
||||||
if relation_id:
|
if relation_id:
|
||||||
cmd.append('-r')
|
cmd.append('-r')
|
||||||
cmd.append(relation_id)
|
cmd.append(relation_id)
|
||||||
try:
|
response = subprocess.check_output(
|
||||||
response = subprocess.check_output(
|
cmd,
|
||||||
cmd,
|
stderr=subprocess.STDOUT).decode('UTF-8').strip()
|
||||||
stderr=subprocess.STDOUT).decode('UTF-8').strip()
|
|
||||||
except CalledProcessError as e:
|
|
||||||
# Early versions of Juju 2.0.x required the --primary-address argument.
|
|
||||||
# We catch that condition here and raise NotImplementedError since
|
|
||||||
# the requested semantics are not available - the caller can then
|
|
||||||
# use the network_get_primary_address() method instead.
|
|
||||||
if '--primary-address is currently required' in e.output.decode('UTF-8'):
|
|
||||||
raise NotImplementedError
|
|
||||||
raise
|
|
||||||
return yaml.safe_load(response)
|
return yaml.safe_load(response)
|
||||||
|
|
||||||
|
|
||||||
@ -1204,9 +1223,23 @@ def iter_units_for_relation_name(relation_name):
|
|||||||
|
|
||||||
def ingress_address(rid=None, unit=None):
|
def ingress_address(rid=None, unit=None):
|
||||||
"""
|
"""
|
||||||
Retrieve the ingress-address from a relation when available. Otherwise,
|
Retrieve the ingress-address from a relation when available.
|
||||||
return the private-address. This function is to be used on the consuming
|
Otherwise, return the private-address.
|
||||||
side of the relation.
|
|
||||||
|
When used on the consuming side of the relation (unit is a remote
|
||||||
|
unit), the ingress-address is the IP address that this unit needs
|
||||||
|
to use to reach the provided service on the remote unit.
|
||||||
|
|
||||||
|
When used on the providing side of the relation (unit == local_unit()),
|
||||||
|
the ingress-address is the IP address that is advertised to remote
|
||||||
|
units on this relation. Remote units need to use this address to
|
||||||
|
reach the local provided service on this unit.
|
||||||
|
|
||||||
|
Note that charms may document some other method to use in
|
||||||
|
preference to the ingress_address(), such as an address provided
|
||||||
|
on a different relation attribute or a service discovery mechanism.
|
||||||
|
This allows charms to redirect inbound connections to their peers
|
||||||
|
or different applications such as load balancers.
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
addresses = [ingress_address(rid=u.rid, unit=u.unit)
|
addresses = [ingress_address(rid=u.rid, unit=u.unit)
|
||||||
@ -1220,3 +1253,40 @@ def ingress_address(rid=None, unit=None):
|
|||||||
settings = relation_get(rid=rid, unit=unit)
|
settings = relation_get(rid=rid, unit=unit)
|
||||||
return (settings.get('ingress-address') or
|
return (settings.get('ingress-address') or
|
||||||
settings.get('private-address'))
|
settings.get('private-address'))
|
||||||
|
|
||||||
|
|
||||||
|
def egress_subnets(rid=None, unit=None):
|
||||||
|
"""
|
||||||
|
Retrieve the egress-subnets from a relation.
|
||||||
|
|
||||||
|
This function is to be used on the providing side of the
|
||||||
|
relation, and provides the ranges of addresses that client
|
||||||
|
connections may come from. The result is uninteresting on
|
||||||
|
the consuming side of a relation (unit == local_unit()).
|
||||||
|
|
||||||
|
Returns a stable list of subnets in CIDR format.
|
||||||
|
eg. ['192.168.1.0/24', '2001::F00F/128']
|
||||||
|
|
||||||
|
If egress-subnets is not available, falls back to using the published
|
||||||
|
ingress-address, or finally private-address.
|
||||||
|
|
||||||
|
:param rid: string relation id
|
||||||
|
:param unit: string unit name
|
||||||
|
:side effect: calls relation_get
|
||||||
|
:return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
|
||||||
|
"""
|
||||||
|
def _to_range(addr):
|
||||||
|
if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
|
||||||
|
addr += '/32'
|
||||||
|
elif ':' in addr and '/' not in addr: # IPv6
|
||||||
|
addr += '/128'
|
||||||
|
return addr
|
||||||
|
|
||||||
|
settings = relation_get(rid=rid, unit=unit)
|
||||||
|
if 'egress-subnets' in settings:
|
||||||
|
return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
|
||||||
|
if 'ingress-address' in settings:
|
||||||
|
return [_to_range(settings['ingress-address'])]
|
||||||
|
if 'private-address' in settings:
|
||||||
|
return [_to_range(settings['private-address'])]
|
||||||
|
return [] # Should never happen
|
||||||
|
@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path):
|
|||||||
return output
|
return output
|
||||||
|
|
||||||
|
|
||||||
def modulo_distribution(modulo=3, wait=30):
|
def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
|
||||||
""" Modulo distribution
|
""" Modulo distribution
|
||||||
|
|
||||||
This helper uses the unit number, a modulo value and a constant wait time
|
This helper uses the unit number, a modulo value and a constant wait time
|
||||||
@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30):
|
|||||||
|
|
||||||
@param modulo: int The modulo number creates the group distribution
|
@param modulo: int The modulo number creates the group distribution
|
||||||
@param wait: int The constant time wait value
|
@param wait: int The constant time wait value
|
||||||
|
@param non_zero_wait: boolean Override unit % modulo == 0,
|
||||||
|
return modulo * wait. Used to avoid collisions with
|
||||||
|
leader nodes which are often given priority.
|
||||||
@return: int Calculated time to wait for unit operation
|
@return: int Calculated time to wait for unit operation
|
||||||
"""
|
"""
|
||||||
unit_number = int(local_unit().split('/')[1])
|
unit_number = int(local_unit().split('/')[1])
|
||||||
return (unit_number % modulo) * wait
|
calculated_wait_time = (unit_number % modulo) * wait
|
||||||
|
if non_zero_wait and calculated_wait_time == 0:
|
||||||
|
return modulo * wait
|
||||||
|
else:
|
||||||
|
return calculated_wait_time
|
||||||
|
@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback):
|
|||||||
"""
|
"""
|
||||||
def __call__(self, manager, service_name, event_name):
|
def __call__(self, manager, service_name, event_name):
|
||||||
service = manager.get_service(service_name)
|
service = manager.get_service(service_name)
|
||||||
new_ports = service.get('ports', [])
|
# turn this generator into a list,
|
||||||
|
# as we'll be going over it multiple times
|
||||||
|
new_ports = list(service.get('ports', []))
|
||||||
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
|
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
|
||||||
if os.path.exists(port_file):
|
if os.path.exists(port_file):
|
||||||
with open(port_file) as fp:
|
with open(port_file) as fp:
|
||||||
old_ports = fp.read().split(',')
|
old_ports = fp.read().split(',')
|
||||||
for old_port in old_ports:
|
for old_port in old_ports:
|
||||||
if bool(old_port):
|
if bool(old_port) and not self.ports_contains(old_port, new_ports):
|
||||||
old_port = int(old_port)
|
hookenv.close_port(old_port)
|
||||||
if old_port not in new_ports:
|
|
||||||
hookenv.close_port(old_port)
|
|
||||||
with open(port_file, 'w') as fp:
|
with open(port_file, 'w') as fp:
|
||||||
fp.write(','.join(str(port) for port in new_ports))
|
fp.write(','.join(str(port) for port in new_ports))
|
||||||
for port in new_ports:
|
for port in new_ports:
|
||||||
|
# A port is either a number or 'ICMP'
|
||||||
|
protocol = 'TCP'
|
||||||
|
if str(port).upper() == 'ICMP':
|
||||||
|
protocol = 'ICMP'
|
||||||
if event_name == 'start':
|
if event_name == 'start':
|
||||||
hookenv.open_port(port)
|
hookenv.open_port(port, protocol)
|
||||||
elif event_name == 'stop':
|
elif event_name == 'stop':
|
||||||
hookenv.close_port(port)
|
hookenv.close_port(port, protocol)
|
||||||
|
|
||||||
|
def ports_contains(self, port, ports):
|
||||||
|
if not bool(port):
|
||||||
|
return False
|
||||||
|
if str(port).upper() != 'ICMP':
|
||||||
|
port = int(port)
|
||||||
|
return port in ports
|
||||||
|
|
||||||
|
|
||||||
def service_stop(service_name):
|
def service_stop(service_name):
|
||||||
|
@ -31,18 +31,22 @@ __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
|||||||
def create(sysctl_dict, sysctl_file):
|
def create(sysctl_dict, sysctl_file):
|
||||||
"""Creates a sysctl.conf file from a YAML associative array
|
"""Creates a sysctl.conf file from a YAML associative array
|
||||||
|
|
||||||
:param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
|
:param sysctl_dict: a dict or YAML-formatted string of sysctl
|
||||||
|
options eg "{ 'kernel.max_pid': 1337 }"
|
||||||
:type sysctl_dict: str
|
:type sysctl_dict: str
|
||||||
:param sysctl_file: path to the sysctl file to be saved
|
:param sysctl_file: path to the sysctl file to be saved
|
||||||
:type sysctl_file: str or unicode
|
:type sysctl_file: str or unicode
|
||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
try:
|
if type(sysctl_dict) is not dict:
|
||||||
sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
|
try:
|
||||||
except yaml.YAMLError:
|
sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
|
||||||
log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
|
except yaml.YAMLError:
|
||||||
level=ERROR)
|
log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
|
||||||
return
|
level=ERROR)
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
sysctl_dict_parsed = sysctl_dict
|
||||||
|
|
||||||
with open(sysctl_file, "w") as fd:
|
with open(sysctl_file, "w") as fd:
|
||||||
for key, value in sysctl_dict_parsed.items():
|
for key, value in sysctl_dict_parsed.items():
|
||||||
|
@ -166,6 +166,10 @@ class Storage(object):
|
|||||||
|
|
||||||
To support dicts, lists, integer, floats, and booleans values
|
To support dicts, lists, integer, floats, and booleans values
|
||||||
are automatically json encoded/decoded.
|
are automatically json encoded/decoded.
|
||||||
|
|
||||||
|
Note: to facilitate unit testing, ':memory:' can be passed as the
|
||||||
|
path parameter which causes sqlite3 to only build the db in memory.
|
||||||
|
This should only be used for testing purposes.
|
||||||
"""
|
"""
|
||||||
def __init__(self, path=None):
|
def __init__(self, path=None):
|
||||||
self.db_path = path
|
self.db_path = path
|
||||||
@ -175,8 +179,9 @@ class Storage(object):
|
|||||||
else:
|
else:
|
||||||
self.db_path = os.path.join(
|
self.db_path = os.path.join(
|
||||||
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
|
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
|
||||||
with open(self.db_path, 'a') as f:
|
if self.db_path != ':memory:':
|
||||||
os.fchmod(f.fileno(), 0o600)
|
with open(self.db_path, 'a') as f:
|
||||||
|
os.fchmod(f.fileno(), 0o600)
|
||||||
self.conn = sqlite3.connect('%s' % self.db_path)
|
self.conn = sqlite3.connect('%s' % self.db_path)
|
||||||
self.cursor = self.conn.cursor()
|
self.cursor = self.conn.cursor()
|
||||||
self.revision = None
|
self.revision = None
|
||||||
|
@ -44,6 +44,7 @@ ARCH_TO_PROPOSED_POCKET = {
|
|||||||
'x86_64': PROPOSED_POCKET,
|
'x86_64': PROPOSED_POCKET,
|
||||||
'ppc64le': PROPOSED_PORTS_POCKET,
|
'ppc64le': PROPOSED_PORTS_POCKET,
|
||||||
'aarch64': PROPOSED_PORTS_POCKET,
|
'aarch64': PROPOSED_PORTS_POCKET,
|
||||||
|
's390x': PROPOSED_PORTS_POCKET,
|
||||||
}
|
}
|
||||||
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
|
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
|
||||||
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
|
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
|
||||||
|
1
hooks/ephemeral-device-storage-attached
Symbolic link
1
hooks/ephemeral-device-storage-attached
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
storage.bootstrap
|
1
hooks/ephemeral-device-storage-detached
Symbolic link
1
hooks/ephemeral-device-storage-detached
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
storage.bootstrap
|
@ -14,11 +14,14 @@
|
|||||||
# See the License for the specific language governing permissions and
|
# See the License for the specific language governing permissions and
|
||||||
# limitations under the License.
|
# limitations under the License.
|
||||||
|
|
||||||
|
import base64
|
||||||
|
import json
|
||||||
import platform
|
import platform
|
||||||
import sys
|
import sys
|
||||||
import uuid
|
import uuid
|
||||||
import yaml
|
import yaml
|
||||||
import os
|
import os
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
|
||||||
import charmhelpers.core.unitdata as unitdata
|
import charmhelpers.core.unitdata as unitdata
|
||||||
@ -42,12 +45,15 @@ from charmhelpers.core.templating import (
|
|||||||
)
|
)
|
||||||
from charmhelpers.core.host import (
|
from charmhelpers.core.host import (
|
||||||
service_restart,
|
service_restart,
|
||||||
|
write_file,
|
||||||
|
umount,
|
||||||
)
|
)
|
||||||
from charmhelpers.fetch import (
|
from charmhelpers.fetch import (
|
||||||
apt_install,
|
apt_install,
|
||||||
apt_purge,
|
apt_purge,
|
||||||
apt_update,
|
apt_update,
|
||||||
filter_installed_packages,
|
filter_installed_packages,
|
||||||
|
add_source,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.contrib.openstack.utils import (
|
from charmhelpers.contrib.openstack.utils import (
|
||||||
@ -94,6 +100,7 @@ from nova_compute_utils import (
|
|||||||
network_manager,
|
network_manager,
|
||||||
libvirt_daemon,
|
libvirt_daemon,
|
||||||
LIBVIRT_TYPES,
|
LIBVIRT_TYPES,
|
||||||
|
configure_local_ephemeral_storage,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.contrib.network.ip import (
|
from charmhelpers.contrib.network.ip import (
|
||||||
@ -116,6 +123,8 @@ from charmhelpers.contrib.hardening.harden import harden
|
|||||||
|
|
||||||
from socket import gethostname
|
from socket import gethostname
|
||||||
|
|
||||||
|
import charmhelpers.contrib.openstack.vaultlocker as vaultlocker
|
||||||
|
|
||||||
hooks = Hooks()
|
hooks = Hooks()
|
||||||
CONFIGS = register_configs()
|
CONFIGS = register_configs()
|
||||||
MIGRATION_AUTH_TYPES = ["ssh"]
|
MIGRATION_AUTH_TYPES = ["ssh"]
|
||||||
@ -137,6 +146,9 @@ def install():
|
|||||||
@restart_on_change(restart_map())
|
@restart_on_change(restart_map())
|
||||||
@harden()
|
@harden()
|
||||||
def config_changed():
|
def config_changed():
|
||||||
|
if config('ephemeral-unmount'):
|
||||||
|
umount(config('ephemeral-unmount'), persist=True)
|
||||||
|
|
||||||
if config('prefer-ipv6'):
|
if config('prefer-ipv6'):
|
||||||
status_set('maintenance', 'configuring ipv6')
|
status_set('maintenance', 'configuring ipv6')
|
||||||
assert_charm_supports_ipv6()
|
assert_charm_supports_ipv6()
|
||||||
@ -217,6 +229,20 @@ def config_changed():
|
|||||||
NovaAPIAppArmorContext().setup_aa_profile()
|
NovaAPIAppArmorContext().setup_aa_profile()
|
||||||
NovaNetworkAppArmorContext().setup_aa_profile()
|
NovaNetworkAppArmorContext().setup_aa_profile()
|
||||||
|
|
||||||
|
install_vaultlocker()
|
||||||
|
|
||||||
|
configure_local_ephemeral_storage()
|
||||||
|
|
||||||
|
|
||||||
|
def install_vaultlocker():
|
||||||
|
"""Determine whether vaultlocker is required and install"""
|
||||||
|
if config('encrypt'):
|
||||||
|
installed = len(filter_installed_packages(['vaultlocker'])) == 0
|
||||||
|
if not installed:
|
||||||
|
add_source('ppa:openstack-charmers/vaultlocker')
|
||||||
|
apt_update(fatal=True)
|
||||||
|
apt_install('vaultlocker', fatal=True)
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('amqp-relation-joined')
|
@hooks.hook('amqp-relation-joined')
|
||||||
def amqp_joined(relation_id=None):
|
def amqp_joined(relation_id=None):
|
||||||
@ -516,6 +542,31 @@ def ceph_access(rid=None, unit=None):
|
|||||||
key=key)
|
key=key)
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('secrets-storage-relation-joined')
|
||||||
|
def secrets_storage_joined(relation_id=None):
|
||||||
|
relation_set(relation_id=relation_id,
|
||||||
|
secret_backend=vaultlocker.VAULTLOCKER_BACKEND,
|
||||||
|
isolated=True,
|
||||||
|
access_address=get_relation_ip('secrets-storage'),
|
||||||
|
hostname=gethostname())
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('secrets-storage-relation-changed')
|
||||||
|
def secrets_storage_changed():
|
||||||
|
vault_ca = relation_get('vault_ca')
|
||||||
|
if vault_ca:
|
||||||
|
vault_ca = base64.decodestring(json.loads(vault_ca).encode())
|
||||||
|
write_file('/usr/local/share/ca-certificates/vault-ca.crt',
|
||||||
|
vault_ca, perms=0o644)
|
||||||
|
subprocess.check_call(['update-ca-certificates', '--fresh'])
|
||||||
|
configure_local_ephemeral_storage()
|
||||||
|
|
||||||
|
|
||||||
|
@hooks.hook('storage.real')
|
||||||
|
def storage_changed():
|
||||||
|
configure_local_ephemeral_storage()
|
||||||
|
|
||||||
|
|
||||||
@hooks.hook('update-status')
|
@hooks.hook('update-status')
|
||||||
@harden()
|
@harden()
|
||||||
def update_status():
|
def update_status():
|
||||||
|
@ -17,6 +17,7 @@ import re
|
|||||||
import pwd
|
import pwd
|
||||||
import subprocess
|
import subprocess
|
||||||
import platform
|
import platform
|
||||||
|
import uuid
|
||||||
|
|
||||||
from itertools import chain
|
from itertools import chain
|
||||||
from base64 import b64decode
|
from base64 import b64decode
|
||||||
@ -40,6 +41,8 @@ from charmhelpers.core.host import (
|
|||||||
lsb_release,
|
lsb_release,
|
||||||
rsync,
|
rsync,
|
||||||
CompareHostReleases,
|
CompareHostReleases,
|
||||||
|
mount,
|
||||||
|
fstab_add,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
@ -53,6 +56,8 @@ from charmhelpers.core.hookenv import (
|
|||||||
DEBUG,
|
DEBUG,
|
||||||
INFO,
|
INFO,
|
||||||
WARNING,
|
WARNING,
|
||||||
|
storage_list,
|
||||||
|
storage_get,
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.core.decorators import retry_on_exception
|
from charmhelpers.core.decorators import retry_on_exception
|
||||||
@ -99,6 +104,16 @@ from nova_compute_context import (
|
|||||||
NovaComputeAvailabilityZoneContext,
|
NovaComputeAvailabilityZoneContext,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
import charmhelpers.contrib.openstack.vaultlocker as vaultlocker
|
||||||
|
|
||||||
|
from charmhelpers.core.unitdata import kv
|
||||||
|
|
||||||
|
from charmhelpers.contrib.storage.linux.utils import (
|
||||||
|
is_block_device,
|
||||||
|
is_device_mounted,
|
||||||
|
mkfs_xfs,
|
||||||
|
)
|
||||||
|
|
||||||
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
|
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
|
||||||
|
|
||||||
TEMPLATES = 'templates/'
|
TEMPLATES = 'templates/'
|
||||||
@ -109,6 +124,7 @@ BASE_PACKAGES = [
|
|||||||
'librbd1', # bug 1440953
|
'librbd1', # bug 1440953
|
||||||
'python-six',
|
'python-six',
|
||||||
'python-psutil',
|
'python-psutil',
|
||||||
|
'xfsprogs',
|
||||||
]
|
]
|
||||||
|
|
||||||
VERSION_PACKAGE = 'nova-common'
|
VERSION_PACKAGE = 'nova-common'
|
||||||
@ -159,7 +175,9 @@ BASE_RESOURCE_MAP = {
|
|||||||
context.VolumeAPIContext('nova-common'),
|
context.VolumeAPIContext('nova-common'),
|
||||||
SerialConsoleContext(),
|
SerialConsoleContext(),
|
||||||
NovaComputeAvailabilityZoneContext(),
|
NovaComputeAvailabilityZoneContext(),
|
||||||
context.WorkerConfigContext()],
|
context.WorkerConfigContext(),
|
||||||
|
vaultlocker.VaultKVContext(
|
||||||
|
vaultlocker.VAULTLOCKER_BACKEND)],
|
||||||
},
|
},
|
||||||
NOVA_API_AA_PROFILE_PATH: {
|
NOVA_API_AA_PROFILE_PATH: {
|
||||||
'services': ['nova-api'],
|
'services': ['nova-api'],
|
||||||
@ -719,6 +737,8 @@ def get_optional_relations():
|
|||||||
optional_interfaces['neutron-plugin'] = ['neutron-plugin']
|
optional_interfaces['neutron-plugin'] = ['neutron-plugin']
|
||||||
if relation_ids('shared-db'):
|
if relation_ids('shared-db'):
|
||||||
optional_interfaces['database'] = ['shared-db']
|
optional_interfaces['database'] = ['shared-db']
|
||||||
|
if config('encrypt'):
|
||||||
|
optional_interfaces['vault'] = ['secrets-storage']
|
||||||
return optional_interfaces
|
return optional_interfaces
|
||||||
|
|
||||||
|
|
||||||
@ -789,3 +809,101 @@ def _pause_resume_helper(f, configs):
|
|||||||
f(assess_status_func(configs),
|
f(assess_status_func(configs),
|
||||||
services=services(),
|
services=services(),
|
||||||
ports=None)
|
ports=None)
|
||||||
|
|
||||||
|
|
||||||
|
def determine_block_device():
|
||||||
|
"""Determine the block device to use for ephemeral storage
|
||||||
|
|
||||||
|
:returns: Block device to use for storage
|
||||||
|
:rtype: str or None if not configured"""
|
||||||
|
config_dev = config('ephemeral-device')
|
||||||
|
|
||||||
|
if config_dev and os.path.exists(config_dev):
|
||||||
|
return config_dev
|
||||||
|
|
||||||
|
storage_ids = storage_list('ephemeral-device')
|
||||||
|
storage_devs = [storage_get('location', s) for s in storage_ids]
|
||||||
|
|
||||||
|
if storage_devs:
|
||||||
|
return storage_devs[0]
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def configure_local_ephemeral_storage():
|
||||||
|
"""Configure local block device for use as ephemeral instance storage"""
|
||||||
|
# Preflight check vault relation if encryption is enabled
|
||||||
|
vault_kv = vaultlocker.VaultKVContext(
|
||||||
|
secret_backend=vaultlocker.VAULTLOCKER_BACKEND
|
||||||
|
)
|
||||||
|
context = vault_kv()
|
||||||
|
encrypt = config('encrypt')
|
||||||
|
if encrypt and not vault_kv.complete:
|
||||||
|
log("Encryption requested but vault relation not complete",
|
||||||
|
level=DEBUG)
|
||||||
|
return
|
||||||
|
elif encrypt and vault_kv.complete:
|
||||||
|
# NOTE: only write vaultlocker configuration once relation is complete
|
||||||
|
# otherwise we run the chance of an empty configuration file
|
||||||
|
# being installed on a machine with other vaultlocker based
|
||||||
|
# services
|
||||||
|
vaultlocker.write_vaultlocker_conf(context, priority=80)
|
||||||
|
|
||||||
|
db = kv()
|
||||||
|
storage_configured = db.get('storage-configured', False)
|
||||||
|
if storage_configured:
|
||||||
|
log("Ephemeral storage already configured, skipping",
|
||||||
|
level=DEBUG)
|
||||||
|
return
|
||||||
|
|
||||||
|
dev = determine_block_device()
|
||||||
|
|
||||||
|
if not dev:
|
||||||
|
log('No block device configuration found, skipping',
|
||||||
|
level=DEBUG)
|
||||||
|
return
|
||||||
|
|
||||||
|
if not is_block_device(dev):
|
||||||
|
log("Device '{}' is not a block device, "
|
||||||
|
"unable to configure storage".format(dev),
|
||||||
|
level=DEBUG)
|
||||||
|
return
|
||||||
|
|
||||||
|
# NOTE: this deals with a dm-crypt'ed block device already in
|
||||||
|
# use
|
||||||
|
if is_device_mounted(dev):
|
||||||
|
log("Device '{}' is already mounted, "
|
||||||
|
"unable to configure storage".format(dev),
|
||||||
|
level=DEBUG)
|
||||||
|
return
|
||||||
|
|
||||||
|
options = None
|
||||||
|
if encrypt:
|
||||||
|
dev_uuid = str(uuid.uuid4())
|
||||||
|
check_call(['vaultlocker', 'encrypt',
|
||||||
|
'--uuid', dev_uuid,
|
||||||
|
dev])
|
||||||
|
dev = '/dev/mapper/crypt-{}'.format(dev_uuid)
|
||||||
|
options = ','.join([
|
||||||
|
"defaults",
|
||||||
|
"nofail",
|
||||||
|
("x-systemd.requires="
|
||||||
|
"vaultlocker-decrypt@{uuid}.service".format(uuid=dev_uuid)),
|
||||||
|
"comment=vaultlocker",
|
||||||
|
])
|
||||||
|
|
||||||
|
# If not cleaned and in use, mkfs should fail.
|
||||||
|
mkfs_xfs(dev, force=True)
|
||||||
|
|
||||||
|
mountpoint = '/var/lib/nova/instances'
|
||||||
|
filesystem = "xfs"
|
||||||
|
mount(dev, mountpoint, filesystem=filesystem)
|
||||||
|
fstab_add(dev, mountpoint, filesystem, options=options)
|
||||||
|
|
||||||
|
check_call(['chown', '-R', 'nova:nova', mountpoint])
|
||||||
|
check_call(['chmod', '-R', '0755', mountpoint])
|
||||||
|
|
||||||
|
# NOTE: record preparation of device - this ensures that ephemeral
|
||||||
|
# storage is never reconfigured by mistake, losing instance disks
|
||||||
|
db.set('storage-configured', True)
|
||||||
|
db.flush()
|
||||||
|
1
hooks/secrets-storage-relation-broken
Symbolic link
1
hooks/secrets-storage-relation-broken
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
nova_compute_hooks.py
|
1
hooks/secrets-storage-relation-changed
Symbolic link
1
hooks/secrets-storage-relation-changed
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
nova_compute_hooks.py
|
1
hooks/secrets-storage-relation-departed
Symbolic link
1
hooks/secrets-storage-relation-departed
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
nova_compute_hooks.py
|
1
hooks/secrets-storage-relation-joined
Symbolic link
1
hooks/secrets-storage-relation-joined
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
nova_compute_hooks.py
|
8
hooks/storage.bootstrap
Executable file
8
hooks/storage.bootstrap
Executable file
@ -0,0 +1,8 @@
|
|||||||
|
#!/bin/sh
|
||||||
|
|
||||||
|
if ! dpkg -s nova-compute > /dev/null 2>&1; then
|
||||||
|
juju-log "Nova not yet installed."
|
||||||
|
exit 0
|
||||||
|
fi
|
||||||
|
|
||||||
|
./hooks/storage.real
|
1
hooks/storage.real
Symbolic link
1
hooks/storage.real
Symbolic link
@ -0,0 +1 @@
|
|||||||
|
nova_compute_hooks.py
|
@ -48,6 +48,14 @@ requires:
|
|||||||
scope: container
|
scope: container
|
||||||
ceph-access:
|
ceph-access:
|
||||||
interface: cinder-ceph-key
|
interface: cinder-ceph-key
|
||||||
|
secrets-storage:
|
||||||
|
interface: vault-kv
|
||||||
peers:
|
peers:
|
||||||
compute-peer:
|
compute-peer:
|
||||||
interface: nova
|
interface: nova
|
||||||
|
storage:
|
||||||
|
ephemeral-device:
|
||||||
|
type: block
|
||||||
|
multiple:
|
||||||
|
range: 0-1
|
||||||
|
minimum-size: 10G
|
||||||
|
6
templates/vaultlocker.conf.j2
Normal file
6
templates/vaultlocker.conf.j2
Normal file
@ -0,0 +1,6 @@
|
|||||||
|
# vaultlocker configuration from nova-compute charm
|
||||||
|
[vault]
|
||||||
|
url = {{ vault_url }}
|
||||||
|
approle = {{ role_id }}
|
||||||
|
backend = {{ secret_backend }}
|
||||||
|
secret_id = {{ secret_id }}
|
@ -137,6 +137,9 @@ class NovaBasicDeployment(OpenStackAmuletDeployment):
|
|||||||
nova_config = {'config-flags': 'auto_assign_floating_ip=False',
|
nova_config = {'config-flags': 'auto_assign_floating_ip=False',
|
||||||
'enable-live-migration': 'False',
|
'enable-live-migration': 'False',
|
||||||
'aa-profile-mode': 'enforce'}
|
'aa-profile-mode': 'enforce'}
|
||||||
|
if self._get_openstack_release() > self.trusty_mitaka:
|
||||||
|
nova_config.update({'ephemeral-device': '/dev/vdb',
|
||||||
|
'ephemeral-unmount': '/mnt'})
|
||||||
nova_cc_config = {}
|
nova_cc_config = {}
|
||||||
if self._get_openstack_release() >= self.xenial_ocata:
|
if self._get_openstack_release() >= self.xenial_ocata:
|
||||||
nova_cc_config['network-manager'] = 'Neutron'
|
nova_cc_config['network-manager'] = 'Neutron'
|
||||||
|
@ -441,7 +441,7 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
if rel.get('api_version') != str(api_version):
|
if rel.get('api_version') != str(api_version):
|
||||||
raise Exception("api_version not propagated through relation"
|
raise Exception("api_version not propagated through relation"
|
||||||
" data yet ('{}' != '{}')."
|
" data yet ('{}' != '{}')."
|
||||||
"".format(rel['api_version'], api_version))
|
"".format(rel.get('api_version'), api_version))
|
||||||
|
|
||||||
def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
|
def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
|
||||||
api_version):
|
api_version):
|
||||||
@ -463,16 +463,13 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
deployment._auto_wait_for_status()
|
deployment._auto_wait_for_status()
|
||||||
self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
|
self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
|
||||||
|
|
||||||
def authenticate_cinder_admin(self, keystone_sentry, username,
|
def authenticate_cinder_admin(self, keystone, api_version=2):
|
||||||
password, tenant, api_version=2):
|
|
||||||
"""Authenticates admin user with cinder."""
|
"""Authenticates admin user with cinder."""
|
||||||
# NOTE(beisner): cinder python client doesn't accept tokens.
|
self.log.debug('Authenticating cinder admin...')
|
||||||
keystone_ip = keystone_sentry.info['public-address']
|
|
||||||
ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
|
|
||||||
_clients = {
|
_clients = {
|
||||||
1: cinder_client.Client,
|
1: cinder_client.Client,
|
||||||
2: cinder_clientv2.Client}
|
2: cinder_clientv2.Client}
|
||||||
return _clients[api_version](username, password, tenant, ept)
|
return _clients[api_version](session=keystone.session)
|
||||||
|
|
||||||
def authenticate_keystone(self, keystone_ip, username, password,
|
def authenticate_keystone(self, keystone_ip, username, password,
|
||||||
api_version=False, admin_port=False,
|
api_version=False, admin_port=False,
|
||||||
|
@ -306,7 +306,7 @@ def get_os_codename_install_source(src):
|
|||||||
|
|
||||||
if src.startswith('cloud:'):
|
if src.startswith('cloud:'):
|
||||||
ca_rel = src.split(':')[1]
|
ca_rel = src.split(':')[1]
|
||||||
ca_rel = ca_rel.split('%s-' % ubuntu_rel)[1].split('/')[0]
|
ca_rel = ca_rel.split('-')[1].split('/')[0]
|
||||||
return ca_rel
|
return ca_rel
|
||||||
|
|
||||||
# Best guess match based on deb string provided
|
# Best guess match based on deb string provided
|
||||||
|
@ -291,7 +291,7 @@ class Pool(object):
|
|||||||
|
|
||||||
class ReplicatedPool(Pool):
|
class ReplicatedPool(Pool):
|
||||||
def __init__(self, service, name, pg_num=None, replicas=2,
|
def __init__(self, service, name, pg_num=None, replicas=2,
|
||||||
percent_data=10.0):
|
percent_data=10.0, app_name=None):
|
||||||
super(ReplicatedPool, self).__init__(service=service, name=name)
|
super(ReplicatedPool, self).__init__(service=service, name=name)
|
||||||
self.replicas = replicas
|
self.replicas = replicas
|
||||||
if pg_num:
|
if pg_num:
|
||||||
@ -301,6 +301,10 @@ class ReplicatedPool(Pool):
|
|||||||
self.pg_num = min(pg_num, max_pgs)
|
self.pg_num = min(pg_num, max_pgs)
|
||||||
else:
|
else:
|
||||||
self.pg_num = self.get_pgs(self.replicas, percent_data)
|
self.pg_num = self.get_pgs(self.replicas, percent_data)
|
||||||
|
if app_name:
|
||||||
|
self.app_name = app_name
|
||||||
|
else:
|
||||||
|
self.app_name = 'unknown'
|
||||||
|
|
||||||
def create(self):
|
def create(self):
|
||||||
if not pool_exists(self.service, self.name):
|
if not pool_exists(self.service, self.name):
|
||||||
@ -313,6 +317,12 @@ class ReplicatedPool(Pool):
|
|||||||
update_pool(client=self.service,
|
update_pool(client=self.service,
|
||||||
pool=self.name,
|
pool=self.name,
|
||||||
settings={'size': str(self.replicas)})
|
settings={'size': str(self.replicas)})
|
||||||
|
try:
|
||||||
|
set_app_name_for_pool(client=self.service,
|
||||||
|
pool=self.name,
|
||||||
|
name=self.app_name)
|
||||||
|
except CalledProcessError:
|
||||||
|
log('Could not set app name for pool {}'.format(self.name, level=WARNING))
|
||||||
except CalledProcessError:
|
except CalledProcessError:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@ -320,10 +330,14 @@ class ReplicatedPool(Pool):
|
|||||||
# Default jerasure erasure coded pool
|
# Default jerasure erasure coded pool
|
||||||
class ErasurePool(Pool):
|
class ErasurePool(Pool):
|
||||||
def __init__(self, service, name, erasure_code_profile="default",
|
def __init__(self, service, name, erasure_code_profile="default",
|
||||||
percent_data=10.0):
|
percent_data=10.0, app_name=None):
|
||||||
super(ErasurePool, self).__init__(service=service, name=name)
|
super(ErasurePool, self).__init__(service=service, name=name)
|
||||||
self.erasure_code_profile = erasure_code_profile
|
self.erasure_code_profile = erasure_code_profile
|
||||||
self.percent_data = percent_data
|
self.percent_data = percent_data
|
||||||
|
if app_name:
|
||||||
|
self.app_name = app_name
|
||||||
|
else:
|
||||||
|
self.app_name = 'unknown'
|
||||||
|
|
||||||
def create(self):
|
def create(self):
|
||||||
if not pool_exists(self.service, self.name):
|
if not pool_exists(self.service, self.name):
|
||||||
@ -355,6 +369,12 @@ class ErasurePool(Pool):
|
|||||||
'erasure', self.erasure_code_profile]
|
'erasure', self.erasure_code_profile]
|
||||||
try:
|
try:
|
||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
|
try:
|
||||||
|
set_app_name_for_pool(client=self.service,
|
||||||
|
pool=self.name,
|
||||||
|
name=self.app_name)
|
||||||
|
except CalledProcessError:
|
||||||
|
log('Could not set app name for pool {}'.format(self.name, level=WARNING))
|
||||||
except CalledProcessError:
|
except CalledProcessError:
|
||||||
raise
|
raise
|
||||||
|
|
||||||
@ -778,6 +798,25 @@ def update_pool(client, pool, settings):
|
|||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def set_app_name_for_pool(client, pool, name):
|
||||||
|
"""
|
||||||
|
Calls `osd pool application enable` for the specified pool name
|
||||||
|
|
||||||
|
:param client: Name of the ceph client to use
|
||||||
|
:type client: str
|
||||||
|
:param pool: Pool to set app name for
|
||||||
|
:type pool: str
|
||||||
|
:param name: app name for the specified pool
|
||||||
|
:type name: str
|
||||||
|
|
||||||
|
:raises: CalledProcessError if ceph call fails
|
||||||
|
"""
|
||||||
|
if ceph_version() >= '12.0.0':
|
||||||
|
cmd = ['ceph', '--id', client, 'osd', 'pool',
|
||||||
|
'application', 'enable', pool, name]
|
||||||
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
def create_pool(service, name, replicas=3, pg_num=None):
|
def create_pool(service, name, replicas=3, pg_num=None):
|
||||||
"""Create a new RADOS pool."""
|
"""Create a new RADOS pool."""
|
||||||
if pool_exists(service, name):
|
if pool_exists(service, name):
|
||||||
|
@ -151,3 +151,32 @@ def extend_logical_volume_by_device(lv_name, block_device):
|
|||||||
'''
|
'''
|
||||||
cmd = ['lvextend', lv_name, block_device]
|
cmd = ['lvextend', lv_name, block_device]
|
||||||
check_call(cmd)
|
check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def create_logical_volume(lv_name, volume_group, size=None):
|
||||||
|
'''
|
||||||
|
Create a new logical volume in an existing volume group
|
||||||
|
|
||||||
|
:param lv_name: str: name of logical volume to be created.
|
||||||
|
:param volume_group: str: Name of volume group to use for the new volume.
|
||||||
|
:param size: str: Size of logical volume to create (100% if not supplied)
|
||||||
|
:raises subprocess.CalledProcessError: in the event that the lvcreate fails.
|
||||||
|
'''
|
||||||
|
if size:
|
||||||
|
check_call([
|
||||||
|
'lvcreate',
|
||||||
|
'--yes',
|
||||||
|
'-L',
|
||||||
|
'{}'.format(size),
|
||||||
|
'-n', lv_name, volume_group
|
||||||
|
])
|
||||||
|
# create the lv with all the space available, this is needed because the
|
||||||
|
# system call is different for LVM
|
||||||
|
else:
|
||||||
|
check_call([
|
||||||
|
'lvcreate',
|
||||||
|
'--yes',
|
||||||
|
'-l',
|
||||||
|
'100%FREE',
|
||||||
|
'-n', lv_name, volume_group
|
||||||
|
])
|
||||||
|
@ -67,3 +67,19 @@ def is_device_mounted(device):
|
|||||||
except Exception:
|
except Exception:
|
||||||
return False
|
return False
|
||||||
return bool(re.search(r'MOUNTPOINT=".+"', out))
|
return bool(re.search(r'MOUNTPOINT=".+"', out))
|
||||||
|
|
||||||
|
|
||||||
|
def mkfs_xfs(device, force=False):
|
||||||
|
"""Format device with XFS filesystem.
|
||||||
|
|
||||||
|
By default this should fail if the device already has a filesystem on it.
|
||||||
|
:param device: Full path to device to format
|
||||||
|
:ptype device: tr
|
||||||
|
:param force: Force operation
|
||||||
|
:ptype: force: boolean"""
|
||||||
|
cmd = ['mkfs.xfs']
|
||||||
|
if force:
|
||||||
|
cmd.append("-f")
|
||||||
|
|
||||||
|
cmd += ['-i', 'size=1024', device]
|
||||||
|
check_call(cmd)
|
||||||
|
@ -27,6 +27,7 @@ import glob
|
|||||||
import os
|
import os
|
||||||
import json
|
import json
|
||||||
import yaml
|
import yaml
|
||||||
|
import re
|
||||||
import subprocess
|
import subprocess
|
||||||
import sys
|
import sys
|
||||||
import errno
|
import errno
|
||||||
@ -67,7 +68,7 @@ def cached(func):
|
|||||||
@wraps(func)
|
@wraps(func)
|
||||||
def wrapper(*args, **kwargs):
|
def wrapper(*args, **kwargs):
|
||||||
global cache
|
global cache
|
||||||
key = str((func, args, kwargs))
|
key = json.dumps((func, args, kwargs), sort_keys=True, default=str)
|
||||||
try:
|
try:
|
||||||
return cache[key]
|
return cache[key]
|
||||||
except KeyError:
|
except KeyError:
|
||||||
@ -289,7 +290,7 @@ class Config(dict):
|
|||||||
self.implicit_save = True
|
self.implicit_save = True
|
||||||
self._prev_dict = None
|
self._prev_dict = None
|
||||||
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
|
self.path = os.path.join(charm_dir(), Config.CONFIG_FILE_NAME)
|
||||||
if os.path.exists(self.path):
|
if os.path.exists(self.path) and os.stat(self.path).st_size:
|
||||||
self.load_previous()
|
self.load_previous()
|
||||||
atexit(self._implicit_save)
|
atexit(self._implicit_save)
|
||||||
|
|
||||||
@ -309,7 +310,11 @@ class Config(dict):
|
|||||||
"""
|
"""
|
||||||
self.path = path or self.path
|
self.path = path or self.path
|
||||||
with open(self.path) as f:
|
with open(self.path) as f:
|
||||||
self._prev_dict = json.load(f)
|
try:
|
||||||
|
self._prev_dict = json.load(f)
|
||||||
|
except ValueError as e:
|
||||||
|
log('Unable to parse previous config data - {}'.format(str(e)),
|
||||||
|
level=ERROR)
|
||||||
for k, v in copy.deepcopy(self._prev_dict).items():
|
for k, v in copy.deepcopy(self._prev_dict).items():
|
||||||
if k not in self:
|
if k not in self:
|
||||||
self[k] = v
|
self[k] = v
|
||||||
@ -353,22 +358,40 @@ class Config(dict):
|
|||||||
self.save()
|
self.save()
|
||||||
|
|
||||||
|
|
||||||
@cached
|
_cache_config = None
|
||||||
|
|
||||||
|
|
||||||
def config(scope=None):
|
def config(scope=None):
|
||||||
"""Juju charm configuration"""
|
"""
|
||||||
config_cmd_line = ['config-get']
|
Get the juju charm configuration (scope==None) or individual key,
|
||||||
if scope is not None:
|
(scope=str). The returned value is a Python data structure loaded as
|
||||||
config_cmd_line.append(scope)
|
JSON from the Juju config command.
|
||||||
else:
|
|
||||||
config_cmd_line.append('--all')
|
:param scope: If set, return the value for the specified key.
|
||||||
config_cmd_line.append('--format=json')
|
:type scope: Optional[str]
|
||||||
|
:returns: Either the whole config as a Config, or a key from it.
|
||||||
|
:rtype: Any
|
||||||
|
"""
|
||||||
|
global _cache_config
|
||||||
|
config_cmd_line = ['config-get', '--all', '--format=json']
|
||||||
try:
|
try:
|
||||||
config_data = json.loads(
|
# JSON Decode Exception for Python3.5+
|
||||||
subprocess.check_output(config_cmd_line).decode('UTF-8'))
|
exc_json = json.decoder.JSONDecodeError
|
||||||
|
except AttributeError:
|
||||||
|
# JSON Decode Exception for Python2.7 through Python3.4
|
||||||
|
exc_json = ValueError
|
||||||
|
try:
|
||||||
|
if _cache_config is None:
|
||||||
|
config_data = json.loads(
|
||||||
|
subprocess.check_output(config_cmd_line).decode('UTF-8'))
|
||||||
|
_cache_config = Config(config_data)
|
||||||
if scope is not None:
|
if scope is not None:
|
||||||
return config_data
|
return _cache_config.get(scope)
|
||||||
return Config(config_data)
|
return _cache_config
|
||||||
except ValueError:
|
except (exc_json, UnicodeDecodeError) as e:
|
||||||
|
log('Unable to parse output from config-get: config_cmd_line="{}" '
|
||||||
|
'message="{}"'
|
||||||
|
.format(config_cmd_line, str(e)), level=ERROR)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
@ -1043,7 +1066,6 @@ def juju_version():
|
|||||||
universal_newlines=True).strip()
|
universal_newlines=True).strip()
|
||||||
|
|
||||||
|
|
||||||
@cached
|
|
||||||
def has_juju_version(minimum_version):
|
def has_juju_version(minimum_version):
|
||||||
"""Return True if the Juju version is at least the provided version"""
|
"""Return True if the Juju version is at least the provided version"""
|
||||||
return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
|
return LooseVersion(juju_version()) >= LooseVersion(minimum_version)
|
||||||
@ -1103,6 +1125,8 @@ def _run_atexit():
|
|||||||
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
||||||
def network_get_primary_address(binding):
|
def network_get_primary_address(binding):
|
||||||
'''
|
'''
|
||||||
|
Deprecated since Juju 2.3; use network_get()
|
||||||
|
|
||||||
Retrieve the primary network address for a named binding
|
Retrieve the primary network address for a named binding
|
||||||
|
|
||||||
:param binding: string. The name of a relation of extra-binding
|
:param binding: string. The name of a relation of extra-binding
|
||||||
@ -1123,7 +1147,6 @@ def network_get_primary_address(binding):
|
|||||||
return response
|
return response
|
||||||
|
|
||||||
|
|
||||||
@translate_exc(from_exc=OSError, to_exc=NotImplementedError)
|
|
||||||
def network_get(endpoint, relation_id=None):
|
def network_get(endpoint, relation_id=None):
|
||||||
"""
|
"""
|
||||||
Retrieve the network details for a relation endpoint
|
Retrieve the network details for a relation endpoint
|
||||||
@ -1131,24 +1154,20 @@ def network_get(endpoint, relation_id=None):
|
|||||||
:param endpoint: string. The name of a relation endpoint
|
:param endpoint: string. The name of a relation endpoint
|
||||||
:param relation_id: int. The ID of the relation for the current context.
|
:param relation_id: int. The ID of the relation for the current context.
|
||||||
:return: dict. The loaded YAML output of the network-get query.
|
:return: dict. The loaded YAML output of the network-get query.
|
||||||
:raise: NotImplementedError if run on Juju < 2.1
|
:raise: NotImplementedError if request not supported by the Juju version.
|
||||||
"""
|
"""
|
||||||
|
if not has_juju_version('2.2'):
|
||||||
|
raise NotImplementedError(juju_version()) # earlier versions require --primary-address
|
||||||
|
if relation_id and not has_juju_version('2.3'):
|
||||||
|
raise NotImplementedError # 2.3 added the -r option
|
||||||
|
|
||||||
cmd = ['network-get', endpoint, '--format', 'yaml']
|
cmd = ['network-get', endpoint, '--format', 'yaml']
|
||||||
if relation_id:
|
if relation_id:
|
||||||
cmd.append('-r')
|
cmd.append('-r')
|
||||||
cmd.append(relation_id)
|
cmd.append(relation_id)
|
||||||
try:
|
response = subprocess.check_output(
|
||||||
response = subprocess.check_output(
|
cmd,
|
||||||
cmd,
|
stderr=subprocess.STDOUT).decode('UTF-8').strip()
|
||||||
stderr=subprocess.STDOUT).decode('UTF-8').strip()
|
|
||||||
except CalledProcessError as e:
|
|
||||||
# Early versions of Juju 2.0.x required the --primary-address argument.
|
|
||||||
# We catch that condition here and raise NotImplementedError since
|
|
||||||
# the requested semantics are not available - the caller can then
|
|
||||||
# use the network_get_primary_address() method instead.
|
|
||||||
if '--primary-address is currently required' in e.output.decode('UTF-8'):
|
|
||||||
raise NotImplementedError
|
|
||||||
raise
|
|
||||||
return yaml.safe_load(response)
|
return yaml.safe_load(response)
|
||||||
|
|
||||||
|
|
||||||
@ -1204,9 +1223,23 @@ def iter_units_for_relation_name(relation_name):
|
|||||||
|
|
||||||
def ingress_address(rid=None, unit=None):
|
def ingress_address(rid=None, unit=None):
|
||||||
"""
|
"""
|
||||||
Retrieve the ingress-address from a relation when available. Otherwise,
|
Retrieve the ingress-address from a relation when available.
|
||||||
return the private-address. This function is to be used on the consuming
|
Otherwise, return the private-address.
|
||||||
side of the relation.
|
|
||||||
|
When used on the consuming side of the relation (unit is a remote
|
||||||
|
unit), the ingress-address is the IP address that this unit needs
|
||||||
|
to use to reach the provided service on the remote unit.
|
||||||
|
|
||||||
|
When used on the providing side of the relation (unit == local_unit()),
|
||||||
|
the ingress-address is the IP address that is advertised to remote
|
||||||
|
units on this relation. Remote units need to use this address to
|
||||||
|
reach the local provided service on this unit.
|
||||||
|
|
||||||
|
Note that charms may document some other method to use in
|
||||||
|
preference to the ingress_address(), such as an address provided
|
||||||
|
on a different relation attribute or a service discovery mechanism.
|
||||||
|
This allows charms to redirect inbound connections to their peers
|
||||||
|
or different applications such as load balancers.
|
||||||
|
|
||||||
Usage:
|
Usage:
|
||||||
addresses = [ingress_address(rid=u.rid, unit=u.unit)
|
addresses = [ingress_address(rid=u.rid, unit=u.unit)
|
||||||
@ -1220,3 +1253,40 @@ def ingress_address(rid=None, unit=None):
|
|||||||
settings = relation_get(rid=rid, unit=unit)
|
settings = relation_get(rid=rid, unit=unit)
|
||||||
return (settings.get('ingress-address') or
|
return (settings.get('ingress-address') or
|
||||||
settings.get('private-address'))
|
settings.get('private-address'))
|
||||||
|
|
||||||
|
|
||||||
|
def egress_subnets(rid=None, unit=None):
|
||||||
|
"""
|
||||||
|
Retrieve the egress-subnets from a relation.
|
||||||
|
|
||||||
|
This function is to be used on the providing side of the
|
||||||
|
relation, and provides the ranges of addresses that client
|
||||||
|
connections may come from. The result is uninteresting on
|
||||||
|
the consuming side of a relation (unit == local_unit()).
|
||||||
|
|
||||||
|
Returns a stable list of subnets in CIDR format.
|
||||||
|
eg. ['192.168.1.0/24', '2001::F00F/128']
|
||||||
|
|
||||||
|
If egress-subnets is not available, falls back to using the published
|
||||||
|
ingress-address, or finally private-address.
|
||||||
|
|
||||||
|
:param rid: string relation id
|
||||||
|
:param unit: string unit name
|
||||||
|
:side effect: calls relation_get
|
||||||
|
:return: list of subnets in CIDR format. eg. ['192.168.1.0/24', '2001::F00F/128']
|
||||||
|
"""
|
||||||
|
def _to_range(addr):
|
||||||
|
if re.search(r'^(?:\d{1,3}\.){3}\d{1,3}$', addr) is not None:
|
||||||
|
addr += '/32'
|
||||||
|
elif ':' in addr and '/' not in addr: # IPv6
|
||||||
|
addr += '/128'
|
||||||
|
return addr
|
||||||
|
|
||||||
|
settings = relation_get(rid=rid, unit=unit)
|
||||||
|
if 'egress-subnets' in settings:
|
||||||
|
return [n.strip() for n in settings['egress-subnets'].split(',') if n.strip()]
|
||||||
|
if 'ingress-address' in settings:
|
||||||
|
return [_to_range(settings['ingress-address'])]
|
||||||
|
if 'private-address' in settings:
|
||||||
|
return [_to_range(settings['private-address'])]
|
||||||
|
return [] # Should never happen
|
||||||
|
@ -993,7 +993,7 @@ def updatedb(updatedb_text, new_path):
|
|||||||
return output
|
return output
|
||||||
|
|
||||||
|
|
||||||
def modulo_distribution(modulo=3, wait=30):
|
def modulo_distribution(modulo=3, wait=30, non_zero_wait=False):
|
||||||
""" Modulo distribution
|
""" Modulo distribution
|
||||||
|
|
||||||
This helper uses the unit number, a modulo value and a constant wait time
|
This helper uses the unit number, a modulo value and a constant wait time
|
||||||
@ -1015,7 +1015,14 @@ def modulo_distribution(modulo=3, wait=30):
|
|||||||
|
|
||||||
@param modulo: int The modulo number creates the group distribution
|
@param modulo: int The modulo number creates the group distribution
|
||||||
@param wait: int The constant time wait value
|
@param wait: int The constant time wait value
|
||||||
|
@param non_zero_wait: boolean Override unit % modulo == 0,
|
||||||
|
return modulo * wait. Used to avoid collisions with
|
||||||
|
leader nodes which are often given priority.
|
||||||
@return: int Calculated time to wait for unit operation
|
@return: int Calculated time to wait for unit operation
|
||||||
"""
|
"""
|
||||||
unit_number = int(local_unit().split('/')[1])
|
unit_number = int(local_unit().split('/')[1])
|
||||||
return (unit_number % modulo) * wait
|
calculated_wait_time = (unit_number % modulo) * wait
|
||||||
|
if non_zero_wait and calculated_wait_time == 0:
|
||||||
|
return modulo * wait
|
||||||
|
else:
|
||||||
|
return calculated_wait_time
|
||||||
|
@ -307,23 +307,34 @@ class PortManagerCallback(ManagerCallback):
|
|||||||
"""
|
"""
|
||||||
def __call__(self, manager, service_name, event_name):
|
def __call__(self, manager, service_name, event_name):
|
||||||
service = manager.get_service(service_name)
|
service = manager.get_service(service_name)
|
||||||
new_ports = service.get('ports', [])
|
# turn this generator into a list,
|
||||||
|
# as we'll be going over it multiple times
|
||||||
|
new_ports = list(service.get('ports', []))
|
||||||
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
|
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
|
||||||
if os.path.exists(port_file):
|
if os.path.exists(port_file):
|
||||||
with open(port_file) as fp:
|
with open(port_file) as fp:
|
||||||
old_ports = fp.read().split(',')
|
old_ports = fp.read().split(',')
|
||||||
for old_port in old_ports:
|
for old_port in old_ports:
|
||||||
if bool(old_port):
|
if bool(old_port) and not self.ports_contains(old_port, new_ports):
|
||||||
old_port = int(old_port)
|
hookenv.close_port(old_port)
|
||||||
if old_port not in new_ports:
|
|
||||||
hookenv.close_port(old_port)
|
|
||||||
with open(port_file, 'w') as fp:
|
with open(port_file, 'w') as fp:
|
||||||
fp.write(','.join(str(port) for port in new_ports))
|
fp.write(','.join(str(port) for port in new_ports))
|
||||||
for port in new_ports:
|
for port in new_ports:
|
||||||
|
# A port is either a number or 'ICMP'
|
||||||
|
protocol = 'TCP'
|
||||||
|
if str(port).upper() == 'ICMP':
|
||||||
|
protocol = 'ICMP'
|
||||||
if event_name == 'start':
|
if event_name == 'start':
|
||||||
hookenv.open_port(port)
|
hookenv.open_port(port, protocol)
|
||||||
elif event_name == 'stop':
|
elif event_name == 'stop':
|
||||||
hookenv.close_port(port)
|
hookenv.close_port(port, protocol)
|
||||||
|
|
||||||
|
def ports_contains(self, port, ports):
|
||||||
|
if not bool(port):
|
||||||
|
return False
|
||||||
|
if str(port).upper() != 'ICMP':
|
||||||
|
port = int(port)
|
||||||
|
return port in ports
|
||||||
|
|
||||||
|
|
||||||
def service_stop(service_name):
|
def service_stop(service_name):
|
||||||
|
@ -31,18 +31,22 @@ __author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
|||||||
def create(sysctl_dict, sysctl_file):
|
def create(sysctl_dict, sysctl_file):
|
||||||
"""Creates a sysctl.conf file from a YAML associative array
|
"""Creates a sysctl.conf file from a YAML associative array
|
||||||
|
|
||||||
:param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
|
:param sysctl_dict: a dict or YAML-formatted string of sysctl
|
||||||
|
options eg "{ 'kernel.max_pid': 1337 }"
|
||||||
:type sysctl_dict: str
|
:type sysctl_dict: str
|
||||||
:param sysctl_file: path to the sysctl file to be saved
|
:param sysctl_file: path to the sysctl file to be saved
|
||||||
:type sysctl_file: str or unicode
|
:type sysctl_file: str or unicode
|
||||||
:returns: None
|
:returns: None
|
||||||
"""
|
"""
|
||||||
try:
|
if type(sysctl_dict) is not dict:
|
||||||
sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
|
try:
|
||||||
except yaml.YAMLError:
|
sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
|
||||||
log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
|
except yaml.YAMLError:
|
||||||
level=ERROR)
|
log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
|
||||||
return
|
level=ERROR)
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
sysctl_dict_parsed = sysctl_dict
|
||||||
|
|
||||||
with open(sysctl_file, "w") as fd:
|
with open(sysctl_file, "w") as fd:
|
||||||
for key, value in sysctl_dict_parsed.items():
|
for key, value in sysctl_dict_parsed.items():
|
||||||
|
@ -166,6 +166,10 @@ class Storage(object):
|
|||||||
|
|
||||||
To support dicts, lists, integer, floats, and booleans values
|
To support dicts, lists, integer, floats, and booleans values
|
||||||
are automatically json encoded/decoded.
|
are automatically json encoded/decoded.
|
||||||
|
|
||||||
|
Note: to facilitate unit testing, ':memory:' can be passed as the
|
||||||
|
path parameter which causes sqlite3 to only build the db in memory.
|
||||||
|
This should only be used for testing purposes.
|
||||||
"""
|
"""
|
||||||
def __init__(self, path=None):
|
def __init__(self, path=None):
|
||||||
self.db_path = path
|
self.db_path = path
|
||||||
@ -175,8 +179,9 @@ class Storage(object):
|
|||||||
else:
|
else:
|
||||||
self.db_path = os.path.join(
|
self.db_path = os.path.join(
|
||||||
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
|
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
|
||||||
with open(self.db_path, 'a') as f:
|
if self.db_path != ':memory:':
|
||||||
os.fchmod(f.fileno(), 0o600)
|
with open(self.db_path, 'a') as f:
|
||||||
|
os.fchmod(f.fileno(), 0o600)
|
||||||
self.conn = sqlite3.connect('%s' % self.db_path)
|
self.conn = sqlite3.connect('%s' % self.db_path)
|
||||||
self.cursor = self.conn.cursor()
|
self.cursor = self.conn.cursor()
|
||||||
self.revision = None
|
self.revision = None
|
||||||
|
@ -44,6 +44,7 @@ ARCH_TO_PROPOSED_POCKET = {
|
|||||||
'x86_64': PROPOSED_POCKET,
|
'x86_64': PROPOSED_POCKET,
|
||||||
'ppc64le': PROPOSED_PORTS_POCKET,
|
'ppc64le': PROPOSED_PORTS_POCKET,
|
||||||
'aarch64': PROPOSED_PORTS_POCKET,
|
'aarch64': PROPOSED_PORTS_POCKET,
|
||||||
|
's390x': PROPOSED_PORTS_POCKET,
|
||||||
}
|
}
|
||||||
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
|
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
|
||||||
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
|
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
|
||||||
|
@ -77,6 +77,7 @@ TO_PATCH = [
|
|||||||
'update_nrpe_config',
|
'update_nrpe_config',
|
||||||
'network_manager',
|
'network_manager',
|
||||||
'libvirt_daemon',
|
'libvirt_daemon',
|
||||||
|
'configure_local_ephemeral_storage',
|
||||||
# misc_utils
|
# misc_utils
|
||||||
'ensure_ceph_keyring',
|
'ensure_ceph_keyring',
|
||||||
'execd_preinstall',
|
'execd_preinstall',
|
||||||
@ -133,6 +134,7 @@ class NovaComputeRelationsTests(CharmTestCase):
|
|||||||
self.assertTrue(self.do_openstack_upgrade.called)
|
self.assertTrue(self.do_openstack_upgrade.called)
|
||||||
neutron_plugin_joined.assert_called_with('rid1', remote_restart=True)
|
neutron_plugin_joined.assert_called_with('rid1', remote_restart=True)
|
||||||
ceph_changed.assert_called_with(rid='ceph:0', unit='ceph/0')
|
ceph_changed.assert_called_with(rid='ceph:0', unit='ceph/0')
|
||||||
|
self.configure_local_ephemeral_storage.assert_called_once_with()
|
||||||
|
|
||||||
def test_config_changed_with_openstack_upgrade_action(self):
|
def test_config_changed_with_openstack_upgrade_action(self):
|
||||||
self.openstack_upgrade_available.return_value = True
|
self.openstack_upgrade_available.return_value = True
|
||||||
@ -708,3 +710,22 @@ class NovaComputeRelationsTests(CharmTestCase):
|
|||||||
group='nova',
|
group='nova',
|
||||||
key='mykey'
|
key='mykey'
|
||||||
)
|
)
|
||||||
|
|
||||||
|
def test_secrets_storage_relation_joined(self):
|
||||||
|
self.get_relation_ip.return_value = '10.23.1.2'
|
||||||
|
self.gethostname.return_value = 'testhost'
|
||||||
|
hooks.secrets_storage_joined()
|
||||||
|
self.get_relation_ip.assert_called_with('secrets-storage')
|
||||||
|
self.relation_set.assert_called_with(
|
||||||
|
relation_id=None,
|
||||||
|
secret_backend='charm-vaultlocker',
|
||||||
|
isolated=True,
|
||||||
|
access_address='10.23.1.2',
|
||||||
|
hostname='testhost'
|
||||||
|
)
|
||||||
|
self.gethostname.assert_called_once_with()
|
||||||
|
|
||||||
|
def test_secrets_storage_relation_changed(self,):
|
||||||
|
self.relation_get.return_value = None
|
||||||
|
hooks.secrets_storage_changed()
|
||||||
|
self.configure_local_ephemeral_storage.assert_called_once_with()
|
||||||
|
@ -24,7 +24,8 @@ from mock import (
|
|||||||
)
|
)
|
||||||
from test_utils import (
|
from test_utils import (
|
||||||
CharmTestCase,
|
CharmTestCase,
|
||||||
patch_open
|
patch_open,
|
||||||
|
TestKV,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@ -55,6 +56,16 @@ TO_PATCH = [
|
|||||||
'Fstab',
|
'Fstab',
|
||||||
'os_application_version_set',
|
'os_application_version_set',
|
||||||
'lsb_release',
|
'lsb_release',
|
||||||
|
'storage_list',
|
||||||
|
'storage_get',
|
||||||
|
'vaultlocker',
|
||||||
|
'kv',
|
||||||
|
'check_call',
|
||||||
|
'mkfs_xfs',
|
||||||
|
'is_block_device',
|
||||||
|
'is_device_mounted',
|
||||||
|
'fstab_add',
|
||||||
|
'mount',
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
@ -65,6 +76,8 @@ class NovaComputeUtilsTests(CharmTestCase):
|
|||||||
self.config.side_effect = self.test_config.get
|
self.config.side_effect = self.test_config.get
|
||||||
self.charm_dir.return_value = 'mycharm'
|
self.charm_dir.return_value = 'mycharm'
|
||||||
self.lsb_release.return_value = {'DISTRIB_CODENAME': 'precise'}
|
self.lsb_release.return_value = {'DISTRIB_CODENAME': 'precise'}
|
||||||
|
self.test_kv = TestKV()
|
||||||
|
self.kv.return_value = self.test_kv
|
||||||
|
|
||||||
@patch.object(utils, 'nova_metadata_requirement')
|
@patch.object(utils, 'nova_metadata_requirement')
|
||||||
@patch.object(utils, 'network_manager')
|
@patch.object(utils, 'network_manager')
|
||||||
@ -803,3 +816,139 @@ class NovaComputeUtilsTests(CharmTestCase):
|
|||||||
'DISTRIB_CODENAME': 'xenial'
|
'DISTRIB_CODENAME': 'xenial'
|
||||||
}
|
}
|
||||||
self.assertEqual(utils.libvirt_daemon(), utils.LIBVIRT_BIN_DAEMON)
|
self.assertEqual(utils.libvirt_daemon(), utils.LIBVIRT_BIN_DAEMON)
|
||||||
|
|
||||||
|
@patch.object(utils, 'os')
|
||||||
|
def test_determine_block_device(self, mock_os):
|
||||||
|
self.test_config.set('ephemeral-device', '/dev/sdd')
|
||||||
|
mock_os.path.exists.return_value = True
|
||||||
|
self.assertEqual(utils.determine_block_device(), '/dev/sdd')
|
||||||
|
self.config.assert_called_with('ephemeral-device')
|
||||||
|
|
||||||
|
def test_determine_block_device_storage(self):
|
||||||
|
_test_devices = {
|
||||||
|
'a': '/dev/bcache0'
|
||||||
|
}
|
||||||
|
self.storage_list.side_effect = _test_devices.keys()
|
||||||
|
self.storage_get.side_effect = lambda _, key: _test_devices.get(key)
|
||||||
|
self.assertEqual(utils.determine_block_device(), '/dev/bcache0')
|
||||||
|
self.config.assert_called_with('ephemeral-device')
|
||||||
|
self.storage_get.assert_called_with('location', 'a')
|
||||||
|
self.storage_list.assert_called_with('ephemeral-device')
|
||||||
|
|
||||||
|
def test_determine_block_device_none(self):
|
||||||
|
self.storage_list.return_value = []
|
||||||
|
self.assertEqual(utils.determine_block_device(), None)
|
||||||
|
self.config.assert_called_with('ephemeral-device')
|
||||||
|
self.storage_list.assert_called_with('ephemeral-device')
|
||||||
|
|
||||||
|
@patch.object(utils, 'uuid')
|
||||||
|
@patch.object(utils, 'determine_block_device')
|
||||||
|
def test_configure_local_ephemeral_storage_encrypted(
|
||||||
|
self,
|
||||||
|
determine_block_device,
|
||||||
|
uuid):
|
||||||
|
determine_block_device.return_value = '/dev/sdb'
|
||||||
|
uuid.uuid4.return_value = 'test'
|
||||||
|
|
||||||
|
mock_context = MagicMock()
|
||||||
|
mock_context.complete = True
|
||||||
|
mock_context.return_value = 'test_context'
|
||||||
|
|
||||||
|
self.test_config.set('encrypt', True)
|
||||||
|
self.vaultlocker.VaultKVContext.return_value = mock_context
|
||||||
|
self.is_block_device.return_value = True
|
||||||
|
self.is_device_mounted.return_value = False
|
||||||
|
|
||||||
|
utils.configure_local_ephemeral_storage()
|
||||||
|
|
||||||
|
self.mkfs_xfs.assert_called_with(
|
||||||
|
'/dev/mapper/crypt-test',
|
||||||
|
force=True
|
||||||
|
)
|
||||||
|
self.check_call.assert_has_calls([
|
||||||
|
call(['vaultlocker', 'encrypt',
|
||||||
|
'--uuid', 'test', '/dev/sdb']),
|
||||||
|
call(['chown', '-R', 'nova:nova',
|
||||||
|
'/var/lib/nova/instances']),
|
||||||
|
call(['chmod', '-R', '0755',
|
||||||
|
'/var/lib/nova/instances'])
|
||||||
|
])
|
||||||
|
self.mount.assert_called_with(
|
||||||
|
'/dev/mapper/crypt-test',
|
||||||
|
'/var/lib/nova/instances',
|
||||||
|
filesystem='xfs')
|
||||||
|
self.fstab_add.assert_called_with(
|
||||||
|
'/dev/mapper/crypt-test',
|
||||||
|
'/var/lib/nova/instances',
|
||||||
|
'xfs',
|
||||||
|
options='defaults,nofail,'
|
||||||
|
'x-systemd.requires=vaultlocker-decrypt@test.service,'
|
||||||
|
'comment=vaultlocker'
|
||||||
|
)
|
||||||
|
self.assertTrue(self.test_kv.get('storage-configured'))
|
||||||
|
self.vaultlocker.write_vaultlocker_conf.assert_called_with(
|
||||||
|
'test_context',
|
||||||
|
priority=80
|
||||||
|
)
|
||||||
|
|
||||||
|
@patch.object(utils, 'uuid')
|
||||||
|
@patch.object(utils, 'determine_block_device')
|
||||||
|
def test_configure_local_ephemeral_storage(self,
|
||||||
|
determine_block_device,
|
||||||
|
uuid):
|
||||||
|
determine_block_device.return_value = '/dev/sdb'
|
||||||
|
uuid.uuid4.return_value = 'test'
|
||||||
|
|
||||||
|
mock_context = MagicMock()
|
||||||
|
mock_context.complete = False
|
||||||
|
mock_context.return_value = {}
|
||||||
|
|
||||||
|
self.test_config.set('encrypt', False)
|
||||||
|
self.vaultlocker.VaultKVContext.return_value = mock_context
|
||||||
|
self.is_block_device.return_value = True
|
||||||
|
self.is_device_mounted.return_value = False
|
||||||
|
|
||||||
|
utils.configure_local_ephemeral_storage()
|
||||||
|
|
||||||
|
self.mkfs_xfs.assert_called_with(
|
||||||
|
'/dev/sdb',
|
||||||
|
force=True
|
||||||
|
)
|
||||||
|
self.check_call.assert_has_calls([
|
||||||
|
call(['chown', '-R', 'nova:nova',
|
||||||
|
'/var/lib/nova/instances']),
|
||||||
|
call(['chmod', '-R', '0755',
|
||||||
|
'/var/lib/nova/instances'])
|
||||||
|
])
|
||||||
|
self.mount.assert_called_with(
|
||||||
|
'/dev/sdb',
|
||||||
|
'/var/lib/nova/instances',
|
||||||
|
filesystem='xfs')
|
||||||
|
self.fstab_add.assert_called_with(
|
||||||
|
'/dev/sdb',
|
||||||
|
'/var/lib/nova/instances',
|
||||||
|
'xfs',
|
||||||
|
options=None
|
||||||
|
)
|
||||||
|
self.assertTrue(self.test_kv.get('storage-configured'))
|
||||||
|
self.vaultlocker.write_vaultlocker_conf.assert_not_called()
|
||||||
|
|
||||||
|
def test_configure_local_ephemeral_storage_done(self):
|
||||||
|
self.test_kv.set('storage-configured', True)
|
||||||
|
|
||||||
|
mock_context = MagicMock()
|
||||||
|
mock_context.complete = True
|
||||||
|
mock_context.return_value = 'test_context'
|
||||||
|
|
||||||
|
self.test_config.set('encrypt', True)
|
||||||
|
self.vaultlocker.VaultKVContext.return_value = mock_context
|
||||||
|
|
||||||
|
utils.configure_local_ephemeral_storage()
|
||||||
|
|
||||||
|
# NOTE: vaultlocker conf should always be re-written to
|
||||||
|
# pickup any changes to secret_id over time.
|
||||||
|
self.vaultlocker.write_vaultlocker_conf.assert_called_with(
|
||||||
|
'test_context',
|
||||||
|
priority=80
|
||||||
|
)
|
||||||
|
self.is_block_device.assert_not_called()
|
||||||
|
@ -122,6 +122,23 @@ class TestRelation(object):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
class TestKV(dict):
|
||||||
|
|
||||||
|
def __init__(self):
|
||||||
|
super(TestKV, self).__init__()
|
||||||
|
self.flushed = False
|
||||||
|
self.data = {}
|
||||||
|
|
||||||
|
def get(self, attribute, default=None):
|
||||||
|
return self.data.get(attribute, default)
|
||||||
|
|
||||||
|
def set(self, attribute, value):
|
||||||
|
self.data[attribute] = value
|
||||||
|
|
||||||
|
def flush(self):
|
||||||
|
self.flushed = True
|
||||||
|
|
||||||
|
|
||||||
@contextmanager
|
@contextmanager
|
||||||
def patch_open():
|
def patch_open():
|
||||||
'''Patch open() to allow mocking both open() itself and the file that is
|
'''Patch open() to allow mocking both open() itself and the file that is
|
||||||
|
Loading…
Reference in New Issue
Block a user