Pre-release charm-helpers sync 17.02
Get each charm up to date with lp:charm-helpers for release testing. Change-Id: I7da8173028ee482fdd84ac08414102314deb5738
This commit is contained in:
parent
93d7a0403a
commit
8f890ac84e
@ -3,3 +3,4 @@ destination: tests/charmhelpers
|
|||||||
include:
|
include:
|
||||||
- contrib.amulet
|
- contrib.amulet
|
||||||
- contrib.openstack.amulet
|
- contrib.openstack.amulet
|
||||||
|
- core
|
||||||
|
@ -424,7 +424,11 @@ def ns_query(address):
|
|||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
answers = dns.resolver.query(address, rtype)
|
try:
|
||||||
|
answers = dns.resolver.query(address, rtype)
|
||||||
|
except dns.resolver.NXDOMAIN:
|
||||||
|
return None
|
||||||
|
|
||||||
if answers:
|
if answers:
|
||||||
return str(answers[0])
|
return str(answers[0])
|
||||||
return None
|
return None
|
||||||
|
@ -20,6 +20,7 @@ import re
|
|||||||
import six
|
import six
|
||||||
import time
|
import time
|
||||||
import urllib
|
import urllib
|
||||||
|
import urlparse
|
||||||
|
|
||||||
import cinderclient.v1.client as cinder_client
|
import cinderclient.v1.client as cinder_client
|
||||||
import glanceclient.v1.client as glance_client
|
import glanceclient.v1.client as glance_client
|
||||||
@ -37,6 +38,7 @@ import swiftclient
|
|||||||
from charmhelpers.contrib.amulet.utils import (
|
from charmhelpers.contrib.amulet.utils import (
|
||||||
AmuletUtils
|
AmuletUtils
|
||||||
)
|
)
|
||||||
|
from charmhelpers.core.decorators import retry_on_exception
|
||||||
|
|
||||||
DEBUG = logging.DEBUG
|
DEBUG = logging.DEBUG
|
||||||
ERROR = logging.ERROR
|
ERROR = logging.ERROR
|
||||||
@ -303,6 +305,46 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
|
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
|
||||||
return tenant in [t.name for t in keystone.tenants.list()]
|
return tenant in [t.name for t in keystone.tenants.list()]
|
||||||
|
|
||||||
|
@retry_on_exception(5, base_delay=10)
|
||||||
|
def keystone_wait_for_propagation(self, sentry_relation_pairs,
|
||||||
|
api_version):
|
||||||
|
"""Iterate over list of sentry and relation tuples and verify that
|
||||||
|
api_version has the expected value.
|
||||||
|
|
||||||
|
:param sentry_relation_pairs: list of sentry, relation name tuples used
|
||||||
|
for monitoring propagation of relation
|
||||||
|
data
|
||||||
|
:param api_version: api_version to expect in relation data
|
||||||
|
:returns: None if successful. Raise on error.
|
||||||
|
"""
|
||||||
|
for (sentry, relation_name) in sentry_relation_pairs:
|
||||||
|
rel = sentry.relation('identity-service',
|
||||||
|
relation_name)
|
||||||
|
self.log.debug('keystone relation data: {}'.format(rel))
|
||||||
|
if rel['api_version'] != str(api_version):
|
||||||
|
raise Exception("api_version not propagated through relation"
|
||||||
|
" data yet ('{}' != '{}')."
|
||||||
|
"".format(rel['api_version'], api_version))
|
||||||
|
|
||||||
|
def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
|
||||||
|
api_version):
|
||||||
|
"""Configure preferred-api-version of keystone in deployment and
|
||||||
|
monitor provided list of relation objects for propagation
|
||||||
|
before returning to caller.
|
||||||
|
|
||||||
|
:param sentry_relation_pairs: list of sentry, relation tuples used for
|
||||||
|
monitoring propagation of relation data
|
||||||
|
:param deployment: deployment to configure
|
||||||
|
:param api_version: value preferred-api-version will be set to
|
||||||
|
:returns: None if successful. Raise on error.
|
||||||
|
"""
|
||||||
|
self.log.debug("Setting keystone preferred-api-version: '{}'"
|
||||||
|
"".format(api_version))
|
||||||
|
|
||||||
|
config = {'preferred-api-version': api_version}
|
||||||
|
deployment.d.configure('keystone', config)
|
||||||
|
self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
|
||||||
|
|
||||||
def authenticate_cinder_admin(self, keystone_sentry, username,
|
def authenticate_cinder_admin(self, keystone_sentry, username,
|
||||||
password, tenant):
|
password, tenant):
|
||||||
"""Authenticates admin user with cinder."""
|
"""Authenticates admin user with cinder."""
|
||||||
@ -311,6 +353,37 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
|
ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
|
||||||
return cinder_client.Client(username, password, tenant, ept)
|
return cinder_client.Client(username, password, tenant, ept)
|
||||||
|
|
||||||
|
def authenticate_keystone(self, keystone_ip, username, password,
|
||||||
|
api_version=False, admin_port=False,
|
||||||
|
user_domain_name=None, domain_name=None,
|
||||||
|
project_domain_name=None, project_name=None):
|
||||||
|
"""Authenticate with Keystone"""
|
||||||
|
self.log.debug('Authenticating with keystone...')
|
||||||
|
port = 5000
|
||||||
|
if admin_port:
|
||||||
|
port = 35357
|
||||||
|
base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
|
||||||
|
port)
|
||||||
|
if not api_version or api_version == 2:
|
||||||
|
ep = base_ep + "/v2.0"
|
||||||
|
return keystone_client.Client(username=username, password=password,
|
||||||
|
tenant_name=project_name,
|
||||||
|
auth_url=ep)
|
||||||
|
else:
|
||||||
|
ep = base_ep + "/v3"
|
||||||
|
auth = keystone_id_v3.Password(
|
||||||
|
user_domain_name=user_domain_name,
|
||||||
|
username=username,
|
||||||
|
password=password,
|
||||||
|
domain_name=domain_name,
|
||||||
|
project_domain_name=project_domain_name,
|
||||||
|
project_name=project_name,
|
||||||
|
auth_url=ep
|
||||||
|
)
|
||||||
|
return keystone_client_v3.Client(
|
||||||
|
session=keystone_session.Session(auth=auth)
|
||||||
|
)
|
||||||
|
|
||||||
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
||||||
tenant=None, api_version=None,
|
tenant=None, api_version=None,
|
||||||
keystone_ip=None):
|
keystone_ip=None):
|
||||||
@ -319,30 +392,28 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
if not keystone_ip:
|
if not keystone_ip:
|
||||||
keystone_ip = keystone_sentry.info['public-address']
|
keystone_ip = keystone_sentry.info['public-address']
|
||||||
|
|
||||||
base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8'))
|
user_domain_name = None
|
||||||
if not api_version or api_version == 2:
|
domain_name = None
|
||||||
ep = base_ep + "/v2.0"
|
if api_version == 3:
|
||||||
return keystone_client.Client(username=user, password=password,
|
user_domain_name = 'admin_domain'
|
||||||
tenant_name=tenant, auth_url=ep)
|
domain_name = user_domain_name
|
||||||
else:
|
|
||||||
ep = base_ep + "/v3"
|
return self.authenticate_keystone(keystone_ip, user, password,
|
||||||
auth = keystone_id_v3.Password(
|
project_name=tenant,
|
||||||
user_domain_name='admin_domain',
|
api_version=api_version,
|
||||||
username=user,
|
user_domain_name=user_domain_name,
|
||||||
password=password,
|
domain_name=domain_name,
|
||||||
domain_name='admin_domain',
|
admin_port=True)
|
||||||
auth_url=ep,
|
|
||||||
)
|
|
||||||
sess = keystone_session.Session(auth=auth)
|
|
||||||
return keystone_client_v3.Client(session=sess)
|
|
||||||
|
|
||||||
def authenticate_keystone_user(self, keystone, user, password, tenant):
|
def authenticate_keystone_user(self, keystone, user, password, tenant):
|
||||||
"""Authenticates a regular user with the keystone public endpoint."""
|
"""Authenticates a regular user with the keystone public endpoint."""
|
||||||
self.log.debug('Authenticating keystone user ({})...'.format(user))
|
self.log.debug('Authenticating keystone user ({})...'.format(user))
|
||||||
ep = keystone.service_catalog.url_for(service_type='identity',
|
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||||
endpoint_type='publicURL')
|
endpoint_type='publicURL')
|
||||||
return keystone_client.Client(username=user, password=password,
|
keystone_ip = urlparse.urlparse(ep).hostname
|
||||||
tenant_name=tenant, auth_url=ep)
|
|
||||||
|
return self.authenticate_keystone(keystone_ip, user, password,
|
||||||
|
project_name=tenant)
|
||||||
|
|
||||||
def authenticate_glance_admin(self, keystone):
|
def authenticate_glance_admin(self, keystone):
|
||||||
"""Authenticates admin user with glance."""
|
"""Authenticates admin user with glance."""
|
||||||
@ -1133,3 +1204,70 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
else:
|
else:
|
||||||
msg = 'No message retrieved.'
|
msg = 'No message retrieved.'
|
||||||
amulet.raise_status(amulet.FAIL, msg)
|
amulet.raise_status(amulet.FAIL, msg)
|
||||||
|
|
||||||
|
def validate_memcache(self, sentry_unit, conf, os_release,
|
||||||
|
earliest_release=5, section='keystone_authtoken',
|
||||||
|
check_kvs=None):
|
||||||
|
"""Check Memcache is running and is configured to be used
|
||||||
|
|
||||||
|
Example call from Amulet test:
|
||||||
|
|
||||||
|
def test_110_memcache(self):
|
||||||
|
u.validate_memcache(self.neutron_api_sentry,
|
||||||
|
'/etc/neutron/neutron.conf',
|
||||||
|
self._get_openstack_release())
|
||||||
|
|
||||||
|
:param sentry_unit: sentry unit
|
||||||
|
:param conf: OpenStack config file to check memcache settings
|
||||||
|
:param os_release: Current OpenStack release int code
|
||||||
|
:param earliest_release: Earliest Openstack release to check int code
|
||||||
|
:param section: OpenStack config file section to check
|
||||||
|
:param check_kvs: Dict of settings to check in config file
|
||||||
|
:returns: None
|
||||||
|
"""
|
||||||
|
if os_release < earliest_release:
|
||||||
|
self.log.debug('Skipping memcache checks for deployment. {} <'
|
||||||
|
'mitaka'.format(os_release))
|
||||||
|
return
|
||||||
|
_kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'}
|
||||||
|
self.log.debug('Checking memcached is running')
|
||||||
|
ret = self.validate_services_by_name({sentry_unit: ['memcached']})
|
||||||
|
if ret:
|
||||||
|
amulet.raise_status(amulet.FAIL, msg='Memcache running check'
|
||||||
|
'failed {}'.format(ret))
|
||||||
|
else:
|
||||||
|
self.log.debug('OK')
|
||||||
|
self.log.debug('Checking memcache url is configured in {}'.format(
|
||||||
|
conf))
|
||||||
|
if self.validate_config_data(sentry_unit, conf, section, _kvs):
|
||||||
|
message = "Memcache config error in: {}".format(conf)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=message)
|
||||||
|
else:
|
||||||
|
self.log.debug('OK')
|
||||||
|
self.log.debug('Checking memcache configuration in '
|
||||||
|
'/etc/memcached.conf')
|
||||||
|
contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf',
|
||||||
|
fatal=True)
|
||||||
|
ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs')
|
||||||
|
if ubuntu_release <= 'trusty':
|
||||||
|
memcache_listen_addr = 'ip6-localhost'
|
||||||
|
else:
|
||||||
|
memcache_listen_addr = '::1'
|
||||||
|
expected = {
|
||||||
|
'-p': '11211',
|
||||||
|
'-l': memcache_listen_addr}
|
||||||
|
found = []
|
||||||
|
for key, value in expected.items():
|
||||||
|
for line in contents.split('\n'):
|
||||||
|
if line.startswith(key):
|
||||||
|
self.log.debug('Checking {} is set to {}'.format(
|
||||||
|
key,
|
||||||
|
value))
|
||||||
|
assert value == line.split()[-1]
|
||||||
|
self.log.debug(line.split()[-1])
|
||||||
|
found.append(key)
|
||||||
|
if sorted(found) == sorted(expected.keys()):
|
||||||
|
self.log.debug('OK')
|
||||||
|
else:
|
||||||
|
message = "Memcache config error in: /etc/memcached.conf"
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=message)
|
||||||
|
@ -14,6 +14,7 @@
|
|||||||
|
|
||||||
import glob
|
import glob
|
||||||
import json
|
import json
|
||||||
|
import math
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import time
|
import time
|
||||||
@ -90,6 +91,9 @@ from charmhelpers.contrib.network.ip import (
|
|||||||
from charmhelpers.contrib.openstack.utils import (
|
from charmhelpers.contrib.openstack.utils import (
|
||||||
config_flags_parser,
|
config_flags_parser,
|
||||||
get_host_ip,
|
get_host_ip,
|
||||||
|
git_determine_usr_bin,
|
||||||
|
git_determine_python_path,
|
||||||
|
enable_memcache,
|
||||||
)
|
)
|
||||||
from charmhelpers.core.unitdata import kv
|
from charmhelpers.core.unitdata import kv
|
||||||
|
|
||||||
@ -1207,6 +1211,43 @@ class WorkerConfigContext(OSContextGenerator):
|
|||||||
return ctxt
|
return ctxt
|
||||||
|
|
||||||
|
|
||||||
|
class WSGIWorkerConfigContext(WorkerConfigContext):
|
||||||
|
|
||||||
|
def __init__(self, name=None, script=None, admin_script=None,
|
||||||
|
public_script=None, process_weight=1.00,
|
||||||
|
admin_process_weight=0.75, public_process_weight=0.25):
|
||||||
|
self.service_name = name
|
||||||
|
self.user = name
|
||||||
|
self.group = name
|
||||||
|
self.script = script
|
||||||
|
self.admin_script = admin_script
|
||||||
|
self.public_script = public_script
|
||||||
|
self.process_weight = process_weight
|
||||||
|
self.admin_process_weight = admin_process_weight
|
||||||
|
self.public_process_weight = public_process_weight
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
multiplier = config('worker-multiplier') or 1
|
||||||
|
total_processes = self.num_cpus * multiplier
|
||||||
|
ctxt = {
|
||||||
|
"service_name": self.service_name,
|
||||||
|
"user": self.user,
|
||||||
|
"group": self.group,
|
||||||
|
"script": self.script,
|
||||||
|
"admin_script": self.admin_script,
|
||||||
|
"public_script": self.public_script,
|
||||||
|
"processes": int(math.ceil(self.process_weight * total_processes)),
|
||||||
|
"admin_processes": int(math.ceil(self.admin_process_weight *
|
||||||
|
total_processes)),
|
||||||
|
"public_processes": int(math.ceil(self.public_process_weight *
|
||||||
|
total_processes)),
|
||||||
|
"threads": 1,
|
||||||
|
"usr_bin": git_determine_usr_bin(),
|
||||||
|
"python_path": git_determine_python_path(),
|
||||||
|
}
|
||||||
|
return ctxt
|
||||||
|
|
||||||
|
|
||||||
class ZeroMQContext(OSContextGenerator):
|
class ZeroMQContext(OSContextGenerator):
|
||||||
interfaces = ['zeromq-configuration']
|
interfaces = ['zeromq-configuration']
|
||||||
|
|
||||||
@ -1512,3 +1553,36 @@ class AppArmorContext(OSContextGenerator):
|
|||||||
"".format(self.ctxt['aa_profile'],
|
"".format(self.ctxt['aa_profile'],
|
||||||
self.ctxt['aa_profile_mode']))
|
self.ctxt['aa_profile_mode']))
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
|
|
||||||
|
class MemcacheContext(OSContextGenerator):
|
||||||
|
"""Memcache context
|
||||||
|
|
||||||
|
This context provides options for configuring a local memcache client and
|
||||||
|
server
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, package=None):
|
||||||
|
"""
|
||||||
|
@param package: Package to examine to extrapolate OpenStack release.
|
||||||
|
Used when charms have no openstack-origin config
|
||||||
|
option (ie subordinates)
|
||||||
|
"""
|
||||||
|
self.package = package
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
ctxt = {}
|
||||||
|
ctxt['use_memcache'] = enable_memcache(package=self.package)
|
||||||
|
if ctxt['use_memcache']:
|
||||||
|
# Trusty version of memcached does not support ::1 as a listen
|
||||||
|
# address so use host file entry instead
|
||||||
|
if lsb_release()['DISTRIB_CODENAME'].lower() > 'trusty':
|
||||||
|
ctxt['memcache_server'] = '::1'
|
||||||
|
else:
|
||||||
|
ctxt['memcache_server'] = 'ip6-localhost'
|
||||||
|
ctxt['memcache_server_formatted'] = '[::1]'
|
||||||
|
ctxt['memcache_port'] = '11211'
|
||||||
|
ctxt['memcache_url'] = 'inet6:{}:{}'.format(
|
||||||
|
ctxt['memcache_server_formatted'],
|
||||||
|
ctxt['memcache_port'])
|
||||||
|
return ctxt
|
||||||
|
@ -0,0 +1,53 @@
|
|||||||
|
###############################################################################
|
||||||
|
# [ WARNING ]
|
||||||
|
# memcached configuration file maintained by Juju
|
||||||
|
# local changes may be overwritten.
|
||||||
|
###############################################################################
|
||||||
|
|
||||||
|
# memcached default config file
|
||||||
|
# 2003 - Jay Bonci <jaybonci@debian.org>
|
||||||
|
# This configuration file is read by the start-memcached script provided as
|
||||||
|
# part of the Debian GNU/Linux distribution.
|
||||||
|
|
||||||
|
# Run memcached as a daemon. This command is implied, and is not needed for the
|
||||||
|
# daemon to run. See the README.Debian that comes with this package for more
|
||||||
|
# information.
|
||||||
|
-d
|
||||||
|
|
||||||
|
# Log memcached's output to /var/log/memcached
|
||||||
|
logfile /var/log/memcached.log
|
||||||
|
|
||||||
|
# Be verbose
|
||||||
|
# -v
|
||||||
|
|
||||||
|
# Be even more verbose (print client commands as well)
|
||||||
|
# -vv
|
||||||
|
|
||||||
|
# Start with a cap of 64 megs of memory. It's reasonable, and the daemon default
|
||||||
|
# Note that the daemon will grow to this size, but does not start out holding this much
|
||||||
|
# memory
|
||||||
|
-m 64
|
||||||
|
|
||||||
|
# Default connection port is 11211
|
||||||
|
-p {{ memcache_port }}
|
||||||
|
|
||||||
|
# Run the daemon as root. The start-memcached will default to running as root if no
|
||||||
|
# -u command is present in this config file
|
||||||
|
-u memcache
|
||||||
|
|
||||||
|
# Specify which IP address to listen on. The default is to listen on all IP addresses
|
||||||
|
# This parameter is one of the only security measures that memcached has, so make sure
|
||||||
|
# it's listening on a firewalled interface.
|
||||||
|
-l {{ memcache_server }}
|
||||||
|
|
||||||
|
# Limit the number of simultaneous incoming connections. The daemon default is 1024
|
||||||
|
# -c 1024
|
||||||
|
|
||||||
|
# Lock down all paged memory. Consult with the README and homepage before you do this
|
||||||
|
# -k
|
||||||
|
|
||||||
|
# Return error when memory is exhausted (rather than removing items)
|
||||||
|
# -M
|
||||||
|
|
||||||
|
# Maximize core file limit
|
||||||
|
# -r
|
@ -14,4 +14,7 @@ project_name = {{ admin_tenant_name }}
|
|||||||
username = {{ admin_user }}
|
username = {{ admin_user }}
|
||||||
password = {{ admin_password }}
|
password = {{ admin_password }}
|
||||||
signing_dir = {{ signing_dir }}
|
signing_dir = {{ signing_dir }}
|
||||||
|
{% if use_memcache == true %}
|
||||||
|
memcached_servers = {{ memcache_url }}
|
||||||
|
{% endif -%}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
@ -0,0 +1,100 @@
|
|||||||
|
# Configuration file maintained by Juju. Local changes may be overwritten.
|
||||||
|
|
||||||
|
{% if port -%}
|
||||||
|
Listen {{ port }}
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
|
{% if admin_port -%}
|
||||||
|
Listen {{ admin_port }}
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
|
{% if public_port -%}
|
||||||
|
Listen {{ public_port }}
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
|
{% if port -%}
|
||||||
|
<VirtualHost *:{{ port }}>
|
||||||
|
WSGIDaemonProcess {{ service_name }} processes={{ processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
|
||||||
|
{% if python_path -%}
|
||||||
|
python-path={{ python_path }} \
|
||||||
|
{% endif -%}
|
||||||
|
display-name=%{GROUP}
|
||||||
|
WSGIProcessGroup {{ service_name }}
|
||||||
|
WSGIScriptAlias / {{ script }}
|
||||||
|
WSGIApplicationGroup %{GLOBAL}
|
||||||
|
WSGIPassAuthorization On
|
||||||
|
<IfVersion >= 2.4>
|
||||||
|
ErrorLogFormat "%{cu}t %M"
|
||||||
|
</IfVersion>
|
||||||
|
ErrorLog /var/log/apache2/{{ service_name }}_error.log
|
||||||
|
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
|
||||||
|
|
||||||
|
<Directory {{ usr_bin }}>
|
||||||
|
<IfVersion >= 2.4>
|
||||||
|
Require all granted
|
||||||
|
</IfVersion>
|
||||||
|
<IfVersion < 2.4>
|
||||||
|
Order allow,deny
|
||||||
|
Allow from all
|
||||||
|
</IfVersion>
|
||||||
|
</Directory>
|
||||||
|
</VirtualHost>
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
|
{% if admin_port -%}
|
||||||
|
<VirtualHost *:{{ admin_port }}>
|
||||||
|
WSGIDaemonProcess {{ service_name }}-admin processes={{ admin_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
|
||||||
|
{% if python_path -%}
|
||||||
|
python-path={{ python_path }} \
|
||||||
|
{% endif -%}
|
||||||
|
display-name=%{GROUP}
|
||||||
|
WSGIProcessGroup {{ service_name }}-admin
|
||||||
|
WSGIScriptAlias / {{ admin_script }}
|
||||||
|
WSGIApplicationGroup %{GLOBAL}
|
||||||
|
WSGIPassAuthorization On
|
||||||
|
<IfVersion >= 2.4>
|
||||||
|
ErrorLogFormat "%{cu}t %M"
|
||||||
|
</IfVersion>
|
||||||
|
ErrorLog /var/log/apache2/{{ service_name }}_error.log
|
||||||
|
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
|
||||||
|
|
||||||
|
<Directory {{ usr_bin }}>
|
||||||
|
<IfVersion >= 2.4>
|
||||||
|
Require all granted
|
||||||
|
</IfVersion>
|
||||||
|
<IfVersion < 2.4>
|
||||||
|
Order allow,deny
|
||||||
|
Allow from all
|
||||||
|
</IfVersion>
|
||||||
|
</Directory>
|
||||||
|
</VirtualHost>
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
|
{% if public_port -%}
|
||||||
|
<VirtualHost *:{{ public_port }}>
|
||||||
|
WSGIDaemonProcess {{ service_name }}-public processes={{ public_processes }} threads={{ threads }} user={{ service_name }} group={{ service_name }} \
|
||||||
|
{% if python_path -%}
|
||||||
|
python-path={{ python_path }} \
|
||||||
|
{% endif -%}
|
||||||
|
display-name=%{GROUP}
|
||||||
|
WSGIProcessGroup {{ service_name }}-public
|
||||||
|
WSGIScriptAlias / {{ public_script }}
|
||||||
|
WSGIApplicationGroup %{GLOBAL}
|
||||||
|
WSGIPassAuthorization On
|
||||||
|
<IfVersion >= 2.4>
|
||||||
|
ErrorLogFormat "%{cu}t %M"
|
||||||
|
</IfVersion>
|
||||||
|
ErrorLog /var/log/apache2/{{ service_name }}_error.log
|
||||||
|
CustomLog /var/log/apache2/{{ service_name }}_access.log combined
|
||||||
|
|
||||||
|
<Directory {{ usr_bin }}>
|
||||||
|
<IfVersion >= 2.4>
|
||||||
|
Require all granted
|
||||||
|
</IfVersion>
|
||||||
|
<IfVersion < 2.4>
|
||||||
|
Order allow,deny
|
||||||
|
Allow from all
|
||||||
|
</IfVersion>
|
||||||
|
</Directory>
|
||||||
|
</VirtualHost>
|
||||||
|
{% endif -%}
|
@ -153,7 +153,7 @@ SWIFT_CODENAMES = OrderedDict([
|
|||||||
('newton',
|
('newton',
|
||||||
['2.8.0', '2.9.0', '2.10.0']),
|
['2.8.0', '2.9.0', '2.10.0']),
|
||||||
('ocata',
|
('ocata',
|
||||||
['2.11.0']),
|
['2.11.0', '2.12.0']),
|
||||||
])
|
])
|
||||||
|
|
||||||
# >= Liberty version->codename mapping
|
# >= Liberty version->codename mapping
|
||||||
@ -549,9 +549,9 @@ def configure_installation_source(rel):
|
|||||||
'newton': 'xenial-updates/newton',
|
'newton': 'xenial-updates/newton',
|
||||||
'newton/updates': 'xenial-updates/newton',
|
'newton/updates': 'xenial-updates/newton',
|
||||||
'newton/proposed': 'xenial-proposed/newton',
|
'newton/proposed': 'xenial-proposed/newton',
|
||||||
'zesty': 'zesty-updates/ocata',
|
'ocata': 'xenial-updates/ocata',
|
||||||
'zesty/updates': 'xenial-updates/ocata',
|
'ocata/updates': 'xenial-updates/ocata',
|
||||||
'zesty/proposed': 'xenial-proposed/ocata',
|
'ocata/proposed': 'xenial-proposed/ocata',
|
||||||
}
|
}
|
||||||
|
|
||||||
try:
|
try:
|
||||||
@ -1119,6 +1119,35 @@ def git_generate_systemd_init_files(templates_dir):
|
|||||||
shutil.copyfile(service_source, service_dest)
|
shutil.copyfile(service_source, service_dest)
|
||||||
|
|
||||||
|
|
||||||
|
def git_determine_usr_bin():
|
||||||
|
"""Return the /usr/bin path for Apache2 config.
|
||||||
|
|
||||||
|
The /usr/bin path will be located in the virtualenv if the charm
|
||||||
|
is configured to deploy from source.
|
||||||
|
"""
|
||||||
|
if git_install_requested():
|
||||||
|
projects_yaml = config('openstack-origin-git')
|
||||||
|
projects_yaml = git_default_repos(projects_yaml)
|
||||||
|
return os.path.join(git_pip_venv_dir(projects_yaml), 'bin')
|
||||||
|
else:
|
||||||
|
return '/usr/bin'
|
||||||
|
|
||||||
|
|
||||||
|
def git_determine_python_path():
|
||||||
|
"""Return the python-path for Apache2 config.
|
||||||
|
|
||||||
|
Returns 'None' unless the charm is configured to deploy from source,
|
||||||
|
in which case the path of the virtualenv's site-packages is returned.
|
||||||
|
"""
|
||||||
|
if git_install_requested():
|
||||||
|
projects_yaml = config('openstack-origin-git')
|
||||||
|
projects_yaml = git_default_repos(projects_yaml)
|
||||||
|
return os.path.join(git_pip_venv_dir(projects_yaml),
|
||||||
|
'lib/python2.7/site-packages')
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
def os_workload_status(configs, required_interfaces, charm_func=None):
|
def os_workload_status(configs, required_interfaces, charm_func=None):
|
||||||
"""
|
"""
|
||||||
Decorator to set workload status based on complete contexts
|
Decorator to set workload status based on complete contexts
|
||||||
@ -1925,3 +1954,36 @@ def os_application_version_set(package):
|
|||||||
application_version_set(os_release(package))
|
application_version_set(os_release(package))
|
||||||
else:
|
else:
|
||||||
application_version_set(application_version)
|
application_version_set(application_version)
|
||||||
|
|
||||||
|
|
||||||
|
def enable_memcache(source=None, release=None, package=None):
|
||||||
|
"""Determine if memcache should be enabled on the local unit
|
||||||
|
|
||||||
|
@param release: release of OpenStack currently deployed
|
||||||
|
@param package: package to derive OpenStack version deployed
|
||||||
|
@returns boolean Whether memcache should be enabled
|
||||||
|
"""
|
||||||
|
_release = None
|
||||||
|
if release:
|
||||||
|
_release = release
|
||||||
|
else:
|
||||||
|
_release = os_release(package, base='icehouse')
|
||||||
|
if not _release:
|
||||||
|
_release = get_os_codename_install_source(source)
|
||||||
|
|
||||||
|
# TODO: this should be changed to a numeric comparison using a known list
|
||||||
|
# of releases and comparing by index.
|
||||||
|
return _release >= 'mitaka'
|
||||||
|
|
||||||
|
|
||||||
|
def token_cache_pkgs(source=None, release=None):
|
||||||
|
"""Determine additional packages needed for token caching
|
||||||
|
|
||||||
|
@param source: source string for charm
|
||||||
|
@param release: release of OpenStack currently deployed
|
||||||
|
@returns List of package to enable token caching
|
||||||
|
"""
|
||||||
|
packages = []
|
||||||
|
if enable_memcache(source=source, release=release):
|
||||||
|
packages.extend(['memcached', 'python-memcache'])
|
||||||
|
return packages
|
||||||
|
@ -40,6 +40,7 @@ from subprocess import (
|
|||||||
)
|
)
|
||||||
from charmhelpers.core.hookenv import (
|
from charmhelpers.core.hookenv import (
|
||||||
config,
|
config,
|
||||||
|
service_name,
|
||||||
local_unit,
|
local_unit,
|
||||||
relation_get,
|
relation_get,
|
||||||
relation_ids,
|
relation_ids,
|
||||||
@ -1043,8 +1044,18 @@ class CephBrokerRq(object):
|
|||||||
self.request_id = str(uuid.uuid1())
|
self.request_id = str(uuid.uuid1())
|
||||||
self.ops = []
|
self.ops = []
|
||||||
|
|
||||||
|
def add_op_request_access_to_group(self, name, namespace=None,
|
||||||
|
permission=None, key_name=None):
|
||||||
|
"""
|
||||||
|
Adds the requested permissions to the current service's Ceph key,
|
||||||
|
allowing the key to access only the specified pools
|
||||||
|
"""
|
||||||
|
self.ops.append({'op': 'add-permissions-to-key', 'group': name,
|
||||||
|
'namespace': namespace, 'name': key_name or service_name(),
|
||||||
|
'group-permission': permission})
|
||||||
|
|
||||||
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
|
def add_op_create_pool(self, name, replica_count=3, pg_num=None,
|
||||||
weight=None):
|
weight=None, group=None, namespace=None):
|
||||||
"""Adds an operation to create a pool.
|
"""Adds an operation to create a pool.
|
||||||
|
|
||||||
@param pg_num setting: optional setting. If not provided, this value
|
@param pg_num setting: optional setting. If not provided, this value
|
||||||
@ -1058,7 +1069,8 @@ class CephBrokerRq(object):
|
|||||||
|
|
||||||
self.ops.append({'op': 'create-pool', 'name': name,
|
self.ops.append({'op': 'create-pool', 'name': name,
|
||||||
'replicas': replica_count, 'pg_num': pg_num,
|
'replicas': replica_count, 'pg_num': pg_num,
|
||||||
'weight': weight})
|
'weight': weight, 'group': group,
|
||||||
|
'group-namespace': namespace})
|
||||||
|
|
||||||
def set_ops(self, ops):
|
def set_ops(self, ops):
|
||||||
"""Set request ops to provided value.
|
"""Set request ops to provided value.
|
||||||
|
@ -616,6 +616,20 @@ def close_port(port, protocol="TCP"):
|
|||||||
subprocess.check_call(_args)
|
subprocess.check_call(_args)
|
||||||
|
|
||||||
|
|
||||||
|
def open_ports(start, end, protocol="TCP"):
|
||||||
|
"""Opens a range of service network ports"""
|
||||||
|
_args = ['open-port']
|
||||||
|
_args.append('{}-{}/{}'.format(start, end, protocol))
|
||||||
|
subprocess.check_call(_args)
|
||||||
|
|
||||||
|
|
||||||
|
def close_ports(start, end, protocol="TCP"):
|
||||||
|
"""Close a range of service network ports"""
|
||||||
|
_args = ['close-port']
|
||||||
|
_args.append('{}-{}/{}'.format(start, end, protocol))
|
||||||
|
subprocess.check_call(_args)
|
||||||
|
|
||||||
|
|
||||||
@cached
|
@cached
|
||||||
def unit_get(attribute):
|
def unit_get(attribute):
|
||||||
"""Get the unit ID for the remote unit"""
|
"""Get the unit ID for the remote unit"""
|
||||||
@ -1021,3 +1035,34 @@ def network_get_primary_address(binding):
|
|||||||
'''
|
'''
|
||||||
cmd = ['network-get', '--primary-address', binding]
|
cmd = ['network-get', '--primary-address', binding]
|
||||||
return subprocess.check_output(cmd).decode('UTF-8').strip()
|
return subprocess.check_output(cmd).decode('UTF-8').strip()
|
||||||
|
|
||||||
|
|
||||||
|
def add_metric(*args, **kwargs):
|
||||||
|
"""Add metric values. Values may be expressed with keyword arguments. For
|
||||||
|
metric names containing dashes, these may be expressed as one or more
|
||||||
|
'key=value' positional arguments. May only be called from the collect-metrics
|
||||||
|
hook."""
|
||||||
|
_args = ['add-metric']
|
||||||
|
_kvpairs = []
|
||||||
|
_kvpairs.extend(args)
|
||||||
|
_kvpairs.extend(['{}={}'.format(k, v) for k, v in kwargs.items()])
|
||||||
|
_args.extend(sorted(_kvpairs))
|
||||||
|
try:
|
||||||
|
subprocess.check_call(_args)
|
||||||
|
return
|
||||||
|
except EnvironmentError as e:
|
||||||
|
if e.errno != errno.ENOENT:
|
||||||
|
raise
|
||||||
|
log_message = 'add-metric failed: {}'.format(' '.join(_kvpairs))
|
||||||
|
log(log_message, level='INFO')
|
||||||
|
|
||||||
|
|
||||||
|
def meter_status():
|
||||||
|
"""Get the meter status, if running in the meter-status-changed hook."""
|
||||||
|
return os.environ.get('JUJU_METER_STATUS')
|
||||||
|
|
||||||
|
|
||||||
|
def meter_info():
|
||||||
|
"""Get the meter status information, if running in the meter-status-changed
|
||||||
|
hook."""
|
||||||
|
return os.environ.get('JUJU_METER_INFO')
|
||||||
|
@ -54,38 +54,138 @@ elif __platform__ == "centos":
|
|||||||
cmp_pkgrevno,
|
cmp_pkgrevno,
|
||||||
) # flake8: noqa -- ignore F401 for this import
|
) # flake8: noqa -- ignore F401 for this import
|
||||||
|
|
||||||
|
UPDATEDB_PATH = '/etc/updatedb.conf'
|
||||||
|
|
||||||
def service_start(service_name):
|
def service_start(service_name, **kwargs):
|
||||||
"""Start a system service"""
|
"""Start a system service.
|
||||||
return service('start', service_name)
|
|
||||||
|
The specified service name is managed via the system level init system.
|
||||||
|
Some init systems (e.g. upstart) require that additional arguments be
|
||||||
|
provided in order to directly control service instances whereas other init
|
||||||
|
systems allow for addressing instances of a service directly by name (e.g.
|
||||||
|
systemd).
|
||||||
|
|
||||||
|
The kwargs allow for the additional parameters to be passed to underlying
|
||||||
|
init systems for those systems which require/allow for them. For example,
|
||||||
|
the ceph-osd upstart script requires the id parameter to be passed along
|
||||||
|
in order to identify which running daemon should be reloaded. The follow-
|
||||||
|
ing example stops the ceph-osd service for instance id=4:
|
||||||
|
|
||||||
|
service_stop('ceph-osd', id=4)
|
||||||
|
|
||||||
|
:param service_name: the name of the service to stop
|
||||||
|
:param **kwargs: additional parameters to pass to the init system when
|
||||||
|
managing services. These will be passed as key=value
|
||||||
|
parameters to the init system's commandline. kwargs
|
||||||
|
are ignored for systemd enabled systems.
|
||||||
|
"""
|
||||||
|
return service('start', service_name, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def service_stop(service_name):
|
def service_stop(service_name, **kwargs):
|
||||||
"""Stop a system service"""
|
"""Stop a system service.
|
||||||
return service('stop', service_name)
|
|
||||||
|
The specified service name is managed via the system level init system.
|
||||||
|
Some init systems (e.g. upstart) require that additional arguments be
|
||||||
|
provided in order to directly control service instances whereas other init
|
||||||
|
systems allow for addressing instances of a service directly by name (e.g.
|
||||||
|
systemd).
|
||||||
|
|
||||||
|
The kwargs allow for the additional parameters to be passed to underlying
|
||||||
|
init systems for those systems which require/allow for them. For example,
|
||||||
|
the ceph-osd upstart script requires the id parameter to be passed along
|
||||||
|
in order to identify which running daemon should be reloaded. The follow-
|
||||||
|
ing example stops the ceph-osd service for instance id=4:
|
||||||
|
|
||||||
|
service_stop('ceph-osd', id=4)
|
||||||
|
|
||||||
|
:param service_name: the name of the service to stop
|
||||||
|
:param **kwargs: additional parameters to pass to the init system when
|
||||||
|
managing services. These will be passed as key=value
|
||||||
|
parameters to the init system's commandline. kwargs
|
||||||
|
are ignored for systemd enabled systems.
|
||||||
|
"""
|
||||||
|
return service('stop', service_name, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def service_restart(service_name):
|
def service_restart(service_name, **kwargs):
|
||||||
"""Restart a system service"""
|
"""Restart a system service.
|
||||||
|
|
||||||
|
The specified service name is managed via the system level init system.
|
||||||
|
Some init systems (e.g. upstart) require that additional arguments be
|
||||||
|
provided in order to directly control service instances whereas other init
|
||||||
|
systems allow for addressing instances of a service directly by name (e.g.
|
||||||
|
systemd).
|
||||||
|
|
||||||
|
The kwargs allow for the additional parameters to be passed to underlying
|
||||||
|
init systems for those systems which require/allow for them. For example,
|
||||||
|
the ceph-osd upstart script requires the id parameter to be passed along
|
||||||
|
in order to identify which running daemon should be restarted. The follow-
|
||||||
|
ing example restarts the ceph-osd service for instance id=4:
|
||||||
|
|
||||||
|
service_restart('ceph-osd', id=4)
|
||||||
|
|
||||||
|
:param service_name: the name of the service to restart
|
||||||
|
:param **kwargs: additional parameters to pass to the init system when
|
||||||
|
managing services. These will be passed as key=value
|
||||||
|
parameters to the init system's commandline. kwargs
|
||||||
|
are ignored for init systems not allowing additional
|
||||||
|
parameters via the commandline (systemd).
|
||||||
|
"""
|
||||||
return service('restart', service_name)
|
return service('restart', service_name)
|
||||||
|
|
||||||
|
|
||||||
def service_reload(service_name, restart_on_failure=False):
|
def service_reload(service_name, restart_on_failure=False, **kwargs):
|
||||||
"""Reload a system service, optionally falling back to restart if
|
"""Reload a system service, optionally falling back to restart if
|
||||||
reload fails"""
|
reload fails.
|
||||||
service_result = service('reload', service_name)
|
|
||||||
|
The specified service name is managed via the system level init system.
|
||||||
|
Some init systems (e.g. upstart) require that additional arguments be
|
||||||
|
provided in order to directly control service instances whereas other init
|
||||||
|
systems allow for addressing instances of a service directly by name (e.g.
|
||||||
|
systemd).
|
||||||
|
|
||||||
|
The kwargs allow for the additional parameters to be passed to underlying
|
||||||
|
init systems for those systems which require/allow for them. For example,
|
||||||
|
the ceph-osd upstart script requires the id parameter to be passed along
|
||||||
|
in order to identify which running daemon should be reloaded. The follow-
|
||||||
|
ing example restarts the ceph-osd service for instance id=4:
|
||||||
|
|
||||||
|
service_reload('ceph-osd', id=4)
|
||||||
|
|
||||||
|
:param service_name: the name of the service to reload
|
||||||
|
:param restart_on_failure: boolean indicating whether to fallback to a
|
||||||
|
restart if the reload fails.
|
||||||
|
:param **kwargs: additional parameters to pass to the init system when
|
||||||
|
managing services. These will be passed as key=value
|
||||||
|
parameters to the init system's commandline. kwargs
|
||||||
|
are ignored for init systems not allowing additional
|
||||||
|
parameters via the commandline (systemd).
|
||||||
|
"""
|
||||||
|
service_result = service('reload', service_name, **kwargs)
|
||||||
if not service_result and restart_on_failure:
|
if not service_result and restart_on_failure:
|
||||||
service_result = service('restart', service_name)
|
service_result = service('restart', service_name, **kwargs)
|
||||||
return service_result
|
return service_result
|
||||||
|
|
||||||
|
|
||||||
def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
|
def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d",
|
||||||
|
**kwargs):
|
||||||
"""Pause a system service.
|
"""Pause a system service.
|
||||||
|
|
||||||
Stop it, and prevent it from starting again at boot."""
|
Stop it, and prevent it from starting again at boot.
|
||||||
|
|
||||||
|
:param service_name: the name of the service to pause
|
||||||
|
:param init_dir: path to the upstart init directory
|
||||||
|
:param initd_dir: path to the sysv init directory
|
||||||
|
:param **kwargs: additional parameters to pass to the init system when
|
||||||
|
managing services. These will be passed as key=value
|
||||||
|
parameters to the init system's commandline. kwargs
|
||||||
|
are ignored for init systems which do not support
|
||||||
|
key=value arguments via the commandline.
|
||||||
|
"""
|
||||||
stopped = True
|
stopped = True
|
||||||
if service_running(service_name):
|
if service_running(service_name, **kwargs):
|
||||||
stopped = service_stop(service_name)
|
stopped = service_stop(service_name, **kwargs)
|
||||||
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
|
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
|
||||||
sysv_file = os.path.join(initd_dir, service_name)
|
sysv_file = os.path.join(initd_dir, service_name)
|
||||||
if init_is_systemd():
|
if init_is_systemd():
|
||||||
@ -106,10 +206,19 @@ def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d"):
|
|||||||
|
|
||||||
|
|
||||||
def service_resume(service_name, init_dir="/etc/init",
|
def service_resume(service_name, init_dir="/etc/init",
|
||||||
initd_dir="/etc/init.d"):
|
initd_dir="/etc/init.d", **kwargs):
|
||||||
"""Resume a system service.
|
"""Resume a system service.
|
||||||
|
|
||||||
Reenable starting again at boot. Start the service"""
|
Reenable starting again at boot. Start the service.
|
||||||
|
|
||||||
|
:param service_name: the name of the service to resume
|
||||||
|
:param init_dir: the path to the init dir
|
||||||
|
:param initd dir: the path to the initd dir
|
||||||
|
:param **kwargs: additional parameters to pass to the init system when
|
||||||
|
managing services. These will be passed as key=value
|
||||||
|
parameters to the init system's commandline. kwargs
|
||||||
|
are ignored for systemd enabled systems.
|
||||||
|
"""
|
||||||
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
|
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
|
||||||
sysv_file = os.path.join(initd_dir, service_name)
|
sysv_file = os.path.join(initd_dir, service_name)
|
||||||
if init_is_systemd():
|
if init_is_systemd():
|
||||||
@ -126,19 +235,28 @@ def service_resume(service_name, init_dir="/etc/init",
|
|||||||
"Unable to detect {0} as SystemD, Upstart {1} or"
|
"Unable to detect {0} as SystemD, Upstart {1} or"
|
||||||
" SysV {2}".format(
|
" SysV {2}".format(
|
||||||
service_name, upstart_file, sysv_file))
|
service_name, upstart_file, sysv_file))
|
||||||
|
started = service_running(service_name, **kwargs)
|
||||||
|
|
||||||
started = service_running(service_name)
|
|
||||||
if not started:
|
if not started:
|
||||||
started = service_start(service_name)
|
started = service_start(service_name, **kwargs)
|
||||||
return started
|
return started
|
||||||
|
|
||||||
|
|
||||||
def service(action, service_name):
|
def service(action, service_name, **kwargs):
|
||||||
"""Control a system service"""
|
"""Control a system service.
|
||||||
|
|
||||||
|
:param action: the action to take on the service
|
||||||
|
:param service_name: the name of the service to perform th action on
|
||||||
|
:param **kwargs: additional params to be passed to the service command in
|
||||||
|
the form of key=value.
|
||||||
|
"""
|
||||||
if init_is_systemd():
|
if init_is_systemd():
|
||||||
cmd = ['systemctl', action, service_name]
|
cmd = ['systemctl', action, service_name]
|
||||||
else:
|
else:
|
||||||
cmd = ['service', service_name, action]
|
cmd = ['service', service_name, action]
|
||||||
|
for key, value in six.iteritems(kwargs):
|
||||||
|
parameter = '%s=%s' % (key, value)
|
||||||
|
cmd.append(parameter)
|
||||||
return subprocess.call(cmd) == 0
|
return subprocess.call(cmd) == 0
|
||||||
|
|
||||||
|
|
||||||
@ -146,15 +264,26 @@ _UPSTART_CONF = "/etc/init/{}.conf"
|
|||||||
_INIT_D_CONF = "/etc/init.d/{}"
|
_INIT_D_CONF = "/etc/init.d/{}"
|
||||||
|
|
||||||
|
|
||||||
def service_running(service_name):
|
def service_running(service_name, **kwargs):
|
||||||
"""Determine whether a system service is running"""
|
"""Determine whether a system service is running.
|
||||||
|
|
||||||
|
:param service_name: the name of the service
|
||||||
|
:param **kwargs: additional args to pass to the service command. This is
|
||||||
|
used to pass additional key=value arguments to the
|
||||||
|
service command line for managing specific instance
|
||||||
|
units (e.g. service ceph-osd status id=2). The kwargs
|
||||||
|
are ignored in systemd services.
|
||||||
|
"""
|
||||||
if init_is_systemd():
|
if init_is_systemd():
|
||||||
return service('is-active', service_name)
|
return service('is-active', service_name)
|
||||||
else:
|
else:
|
||||||
if os.path.exists(_UPSTART_CONF.format(service_name)):
|
if os.path.exists(_UPSTART_CONF.format(service_name)):
|
||||||
try:
|
try:
|
||||||
output = subprocess.check_output(
|
cmd = ['status', service_name]
|
||||||
['status', service_name],
|
for key, value in six.iteritems(kwargs):
|
||||||
|
parameter = '%s=%s' % (key, value)
|
||||||
|
cmd.append(parameter)
|
||||||
|
output = subprocess.check_output(cmd,
|
||||||
stderr=subprocess.STDOUT).decode('UTF-8')
|
stderr=subprocess.STDOUT).decode('UTF-8')
|
||||||
except subprocess.CalledProcessError:
|
except subprocess.CalledProcessError:
|
||||||
return False
|
return False
|
||||||
@ -306,15 +435,17 @@ def add_user_to_group(username, group):
|
|||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
def rsync(from_path, to_path, flags='-r', options=None):
|
def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
|
||||||
"""Replicate the contents of a path"""
|
"""Replicate the contents of a path"""
|
||||||
options = options or ['--delete', '--executability']
|
options = options or ['--delete', '--executability']
|
||||||
cmd = ['/usr/bin/rsync', flags]
|
cmd = ['/usr/bin/rsync', flags]
|
||||||
|
if timeout:
|
||||||
|
cmd = ['timeout', str(timeout)] + cmd
|
||||||
cmd.extend(options)
|
cmd.extend(options)
|
||||||
cmd.append(from_path)
|
cmd.append(from_path)
|
||||||
cmd.append(to_path)
|
cmd.append(to_path)
|
||||||
log(" ".join(cmd))
|
log(" ".join(cmd))
|
||||||
return subprocess.check_output(cmd).decode('UTF-8').strip()
|
return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip()
|
||||||
|
|
||||||
|
|
||||||
def symlink(source, destination):
|
def symlink(source, destination):
|
||||||
@ -684,7 +815,7 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False):
|
|||||||
:param str path: The string path to start changing ownership.
|
:param str path: The string path to start changing ownership.
|
||||||
:param str owner: The owner string to use when looking up the uid.
|
:param str owner: The owner string to use when looking up the uid.
|
||||||
:param str group: The group string to use when looking up the gid.
|
:param str group: The group string to use when looking up the gid.
|
||||||
:param bool follow_links: Also Chown links if True
|
:param bool follow_links: Also follow and chown links if True
|
||||||
:param bool chowntopdir: Also chown path itself if True
|
:param bool chowntopdir: Also chown path itself if True
|
||||||
"""
|
"""
|
||||||
uid = pwd.getpwnam(owner).pw_uid
|
uid = pwd.getpwnam(owner).pw_uid
|
||||||
@ -698,7 +829,7 @@ def chownr(path, owner, group, follow_links=True, chowntopdir=False):
|
|||||||
broken_symlink = os.path.lexists(path) and not os.path.exists(path)
|
broken_symlink = os.path.lexists(path) and not os.path.exists(path)
|
||||||
if not broken_symlink:
|
if not broken_symlink:
|
||||||
chown(path, uid, gid)
|
chown(path, uid, gid)
|
||||||
for root, dirs, files in os.walk(path):
|
for root, dirs, files in os.walk(path, followlinks=follow_links):
|
||||||
for name in dirs + files:
|
for name in dirs + files:
|
||||||
full = os.path.join(root, name)
|
full = os.path.join(root, name)
|
||||||
broken_symlink = os.path.lexists(full) and not os.path.exists(full)
|
broken_symlink = os.path.lexists(full) and not os.path.exists(full)
|
||||||
@ -718,6 +849,20 @@ def lchownr(path, owner, group):
|
|||||||
chownr(path, owner, group, follow_links=False)
|
chownr(path, owner, group, follow_links=False)
|
||||||
|
|
||||||
|
|
||||||
|
def owner(path):
|
||||||
|
"""Returns a tuple containing the username & groupname owning the path.
|
||||||
|
|
||||||
|
:param str path: the string path to retrieve the ownership
|
||||||
|
:return tuple(str, str): A (username, groupname) tuple containing the
|
||||||
|
name of the user and group owning the path.
|
||||||
|
:raises OSError: if the specified path does not exist
|
||||||
|
"""
|
||||||
|
stat = os.stat(path)
|
||||||
|
username = pwd.getpwuid(stat.st_uid)[0]
|
||||||
|
groupname = grp.getgrgid(stat.st_gid)[0]
|
||||||
|
return username, groupname
|
||||||
|
|
||||||
|
|
||||||
def get_total_ram():
|
def get_total_ram():
|
||||||
"""The total amount of system RAM in bytes.
|
"""The total amount of system RAM in bytes.
|
||||||
|
|
||||||
@ -749,3 +894,25 @@ def is_container():
|
|||||||
else:
|
else:
|
||||||
# Detect using upstart container file marker
|
# Detect using upstart container file marker
|
||||||
return os.path.exists(UPSTART_CONTAINER_TYPE)
|
return os.path.exists(UPSTART_CONTAINER_TYPE)
|
||||||
|
|
||||||
|
|
||||||
|
def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH):
|
||||||
|
with open(updatedb_path, 'r+') as f_id:
|
||||||
|
updatedb_text = f_id.read()
|
||||||
|
output = updatedb(updatedb_text, path)
|
||||||
|
f_id.seek(0)
|
||||||
|
f_id.write(output)
|
||||||
|
f_id.truncate()
|
||||||
|
|
||||||
|
|
||||||
|
def updatedb(updatedb_text, new_path):
|
||||||
|
lines = [line for line in updatedb_text.split("\n")]
|
||||||
|
for i, line in enumerate(lines):
|
||||||
|
if line.startswith("PRUNEPATHS="):
|
||||||
|
paths_line = line.split("=")[1].replace('"', '')
|
||||||
|
paths = paths_line.split(" ")
|
||||||
|
if new_path not in paths:
|
||||||
|
paths.append(new_path)
|
||||||
|
lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths))
|
||||||
|
output = "\n".join(lines)
|
||||||
|
return output
|
||||||
|
@ -8,12 +8,18 @@ def get_platform():
|
|||||||
will be returned (which is the name of the module).
|
will be returned (which is the name of the module).
|
||||||
This string is used to decide which platform module should be imported.
|
This string is used to decide which platform module should be imported.
|
||||||
"""
|
"""
|
||||||
|
# linux_distribution is deprecated and will be removed in Python 3.7
|
||||||
|
# Warings *not* disabled, as we certainly need to fix this.
|
||||||
tuple_platform = platform.linux_distribution()
|
tuple_platform = platform.linux_distribution()
|
||||||
current_platform = tuple_platform[0]
|
current_platform = tuple_platform[0]
|
||||||
if "Ubuntu" in current_platform:
|
if "Ubuntu" in current_platform:
|
||||||
return "ubuntu"
|
return "ubuntu"
|
||||||
elif "CentOS" in current_platform:
|
elif "CentOS" in current_platform:
|
||||||
return "centos"
|
return "centos"
|
||||||
|
elif "debian" in current_platform:
|
||||||
|
# Stock Python does not detect Ubuntu and instead returns debian.
|
||||||
|
# Or at least it does in some build environments like Travis CI
|
||||||
|
return "ubuntu"
|
||||||
else:
|
else:
|
||||||
raise RuntimeError("This module is not supported on {}."
|
raise RuntimeError("This module is not supported on {}."
|
||||||
.format(current_platform))
|
.format(current_platform))
|
||||||
|
@ -148,7 +148,8 @@ class AmuletUtils(object):
|
|||||||
|
|
||||||
for service_name in services_list:
|
for service_name in services_list:
|
||||||
if (self.ubuntu_releases.index(release) >= systemd_switch or
|
if (self.ubuntu_releases.index(release) >= systemd_switch or
|
||||||
service_name in ['rabbitmq-server', 'apache2']):
|
service_name in ['rabbitmq-server', 'apache2',
|
||||||
|
'memcached']):
|
||||||
# init is systemd (or regular sysv)
|
# init is systemd (or regular sysv)
|
||||||
cmd = 'sudo service {} status'.format(service_name)
|
cmd = 'sudo service {} status'.format(service_name)
|
||||||
output, code = sentry_unit.run(cmd)
|
output, code = sentry_unit.run(cmd)
|
||||||
|
@ -20,6 +20,7 @@ import re
|
|||||||
import six
|
import six
|
||||||
import time
|
import time
|
||||||
import urllib
|
import urllib
|
||||||
|
import urlparse
|
||||||
|
|
||||||
import cinderclient.v1.client as cinder_client
|
import cinderclient.v1.client as cinder_client
|
||||||
import glanceclient.v1.client as glance_client
|
import glanceclient.v1.client as glance_client
|
||||||
@ -37,6 +38,7 @@ import swiftclient
|
|||||||
from charmhelpers.contrib.amulet.utils import (
|
from charmhelpers.contrib.amulet.utils import (
|
||||||
AmuletUtils
|
AmuletUtils
|
||||||
)
|
)
|
||||||
|
from charmhelpers.core.decorators import retry_on_exception
|
||||||
|
|
||||||
DEBUG = logging.DEBUG
|
DEBUG = logging.DEBUG
|
||||||
ERROR = logging.ERROR
|
ERROR = logging.ERROR
|
||||||
@ -303,6 +305,46 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
|
self.log.debug('Checking if tenant exists ({})...'.format(tenant))
|
||||||
return tenant in [t.name for t in keystone.tenants.list()]
|
return tenant in [t.name for t in keystone.tenants.list()]
|
||||||
|
|
||||||
|
@retry_on_exception(5, base_delay=10)
|
||||||
|
def keystone_wait_for_propagation(self, sentry_relation_pairs,
|
||||||
|
api_version):
|
||||||
|
"""Iterate over list of sentry and relation tuples and verify that
|
||||||
|
api_version has the expected value.
|
||||||
|
|
||||||
|
:param sentry_relation_pairs: list of sentry, relation name tuples used
|
||||||
|
for monitoring propagation of relation
|
||||||
|
data
|
||||||
|
:param api_version: api_version to expect in relation data
|
||||||
|
:returns: None if successful. Raise on error.
|
||||||
|
"""
|
||||||
|
for (sentry, relation_name) in sentry_relation_pairs:
|
||||||
|
rel = sentry.relation('identity-service',
|
||||||
|
relation_name)
|
||||||
|
self.log.debug('keystone relation data: {}'.format(rel))
|
||||||
|
if rel['api_version'] != str(api_version):
|
||||||
|
raise Exception("api_version not propagated through relation"
|
||||||
|
" data yet ('{}' != '{}')."
|
||||||
|
"".format(rel['api_version'], api_version))
|
||||||
|
|
||||||
|
def keystone_configure_api_version(self, sentry_relation_pairs, deployment,
|
||||||
|
api_version):
|
||||||
|
"""Configure preferred-api-version of keystone in deployment and
|
||||||
|
monitor provided list of relation objects for propagation
|
||||||
|
before returning to caller.
|
||||||
|
|
||||||
|
:param sentry_relation_pairs: list of sentry, relation tuples used for
|
||||||
|
monitoring propagation of relation data
|
||||||
|
:param deployment: deployment to configure
|
||||||
|
:param api_version: value preferred-api-version will be set to
|
||||||
|
:returns: None if successful. Raise on error.
|
||||||
|
"""
|
||||||
|
self.log.debug("Setting keystone preferred-api-version: '{}'"
|
||||||
|
"".format(api_version))
|
||||||
|
|
||||||
|
config = {'preferred-api-version': api_version}
|
||||||
|
deployment.d.configure('keystone', config)
|
||||||
|
self.keystone_wait_for_propagation(sentry_relation_pairs, api_version)
|
||||||
|
|
||||||
def authenticate_cinder_admin(self, keystone_sentry, username,
|
def authenticate_cinder_admin(self, keystone_sentry, username,
|
||||||
password, tenant):
|
password, tenant):
|
||||||
"""Authenticates admin user with cinder."""
|
"""Authenticates admin user with cinder."""
|
||||||
@ -311,6 +353,37 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
|
ept = "http://{}:5000/v2.0".format(keystone_ip.strip().decode('utf-8'))
|
||||||
return cinder_client.Client(username, password, tenant, ept)
|
return cinder_client.Client(username, password, tenant, ept)
|
||||||
|
|
||||||
|
def authenticate_keystone(self, keystone_ip, username, password,
|
||||||
|
api_version=False, admin_port=False,
|
||||||
|
user_domain_name=None, domain_name=None,
|
||||||
|
project_domain_name=None, project_name=None):
|
||||||
|
"""Authenticate with Keystone"""
|
||||||
|
self.log.debug('Authenticating with keystone...')
|
||||||
|
port = 5000
|
||||||
|
if admin_port:
|
||||||
|
port = 35357
|
||||||
|
base_ep = "http://{}:{}".format(keystone_ip.strip().decode('utf-8'),
|
||||||
|
port)
|
||||||
|
if not api_version or api_version == 2:
|
||||||
|
ep = base_ep + "/v2.0"
|
||||||
|
return keystone_client.Client(username=username, password=password,
|
||||||
|
tenant_name=project_name,
|
||||||
|
auth_url=ep)
|
||||||
|
else:
|
||||||
|
ep = base_ep + "/v3"
|
||||||
|
auth = keystone_id_v3.Password(
|
||||||
|
user_domain_name=user_domain_name,
|
||||||
|
username=username,
|
||||||
|
password=password,
|
||||||
|
domain_name=domain_name,
|
||||||
|
project_domain_name=project_domain_name,
|
||||||
|
project_name=project_name,
|
||||||
|
auth_url=ep
|
||||||
|
)
|
||||||
|
return keystone_client_v3.Client(
|
||||||
|
session=keystone_session.Session(auth=auth)
|
||||||
|
)
|
||||||
|
|
||||||
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
def authenticate_keystone_admin(self, keystone_sentry, user, password,
|
||||||
tenant=None, api_version=None,
|
tenant=None, api_version=None,
|
||||||
keystone_ip=None):
|
keystone_ip=None):
|
||||||
@ -319,30 +392,28 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
if not keystone_ip:
|
if not keystone_ip:
|
||||||
keystone_ip = keystone_sentry.info['public-address']
|
keystone_ip = keystone_sentry.info['public-address']
|
||||||
|
|
||||||
base_ep = "http://{}:35357".format(keystone_ip.strip().decode('utf-8'))
|
user_domain_name = None
|
||||||
if not api_version or api_version == 2:
|
domain_name = None
|
||||||
ep = base_ep + "/v2.0"
|
if api_version == 3:
|
||||||
return keystone_client.Client(username=user, password=password,
|
user_domain_name = 'admin_domain'
|
||||||
tenant_name=tenant, auth_url=ep)
|
domain_name = user_domain_name
|
||||||
else:
|
|
||||||
ep = base_ep + "/v3"
|
return self.authenticate_keystone(keystone_ip, user, password,
|
||||||
auth = keystone_id_v3.Password(
|
project_name=tenant,
|
||||||
user_domain_name='admin_domain',
|
api_version=api_version,
|
||||||
username=user,
|
user_domain_name=user_domain_name,
|
||||||
password=password,
|
domain_name=domain_name,
|
||||||
domain_name='admin_domain',
|
admin_port=True)
|
||||||
auth_url=ep,
|
|
||||||
)
|
|
||||||
sess = keystone_session.Session(auth=auth)
|
|
||||||
return keystone_client_v3.Client(session=sess)
|
|
||||||
|
|
||||||
def authenticate_keystone_user(self, keystone, user, password, tenant):
|
def authenticate_keystone_user(self, keystone, user, password, tenant):
|
||||||
"""Authenticates a regular user with the keystone public endpoint."""
|
"""Authenticates a regular user with the keystone public endpoint."""
|
||||||
self.log.debug('Authenticating keystone user ({})...'.format(user))
|
self.log.debug('Authenticating keystone user ({})...'.format(user))
|
||||||
ep = keystone.service_catalog.url_for(service_type='identity',
|
ep = keystone.service_catalog.url_for(service_type='identity',
|
||||||
endpoint_type='publicURL')
|
endpoint_type='publicURL')
|
||||||
return keystone_client.Client(username=user, password=password,
|
keystone_ip = urlparse.urlparse(ep).hostname
|
||||||
tenant_name=tenant, auth_url=ep)
|
|
||||||
|
return self.authenticate_keystone(keystone_ip, user, password,
|
||||||
|
project_name=tenant)
|
||||||
|
|
||||||
def authenticate_glance_admin(self, keystone):
|
def authenticate_glance_admin(self, keystone):
|
||||||
"""Authenticates admin user with glance."""
|
"""Authenticates admin user with glance."""
|
||||||
@ -1133,3 +1204,70 @@ class OpenStackAmuletUtils(AmuletUtils):
|
|||||||
else:
|
else:
|
||||||
msg = 'No message retrieved.'
|
msg = 'No message retrieved.'
|
||||||
amulet.raise_status(amulet.FAIL, msg)
|
amulet.raise_status(amulet.FAIL, msg)
|
||||||
|
|
||||||
|
def validate_memcache(self, sentry_unit, conf, os_release,
|
||||||
|
earliest_release=5, section='keystone_authtoken',
|
||||||
|
check_kvs=None):
|
||||||
|
"""Check Memcache is running and is configured to be used
|
||||||
|
|
||||||
|
Example call from Amulet test:
|
||||||
|
|
||||||
|
def test_110_memcache(self):
|
||||||
|
u.validate_memcache(self.neutron_api_sentry,
|
||||||
|
'/etc/neutron/neutron.conf',
|
||||||
|
self._get_openstack_release())
|
||||||
|
|
||||||
|
:param sentry_unit: sentry unit
|
||||||
|
:param conf: OpenStack config file to check memcache settings
|
||||||
|
:param os_release: Current OpenStack release int code
|
||||||
|
:param earliest_release: Earliest Openstack release to check int code
|
||||||
|
:param section: OpenStack config file section to check
|
||||||
|
:param check_kvs: Dict of settings to check in config file
|
||||||
|
:returns: None
|
||||||
|
"""
|
||||||
|
if os_release < earliest_release:
|
||||||
|
self.log.debug('Skipping memcache checks for deployment. {} <'
|
||||||
|
'mitaka'.format(os_release))
|
||||||
|
return
|
||||||
|
_kvs = check_kvs or {'memcached_servers': 'inet6:[::1]:11211'}
|
||||||
|
self.log.debug('Checking memcached is running')
|
||||||
|
ret = self.validate_services_by_name({sentry_unit: ['memcached']})
|
||||||
|
if ret:
|
||||||
|
amulet.raise_status(amulet.FAIL, msg='Memcache running check'
|
||||||
|
'failed {}'.format(ret))
|
||||||
|
else:
|
||||||
|
self.log.debug('OK')
|
||||||
|
self.log.debug('Checking memcache url is configured in {}'.format(
|
||||||
|
conf))
|
||||||
|
if self.validate_config_data(sentry_unit, conf, section, _kvs):
|
||||||
|
message = "Memcache config error in: {}".format(conf)
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=message)
|
||||||
|
else:
|
||||||
|
self.log.debug('OK')
|
||||||
|
self.log.debug('Checking memcache configuration in '
|
||||||
|
'/etc/memcached.conf')
|
||||||
|
contents = self.file_contents_safe(sentry_unit, '/etc/memcached.conf',
|
||||||
|
fatal=True)
|
||||||
|
ubuntu_release, _ = self.run_cmd_unit(sentry_unit, 'lsb_release -cs')
|
||||||
|
if ubuntu_release <= 'trusty':
|
||||||
|
memcache_listen_addr = 'ip6-localhost'
|
||||||
|
else:
|
||||||
|
memcache_listen_addr = '::1'
|
||||||
|
expected = {
|
||||||
|
'-p': '11211',
|
||||||
|
'-l': memcache_listen_addr}
|
||||||
|
found = []
|
||||||
|
for key, value in expected.items():
|
||||||
|
for line in contents.split('\n'):
|
||||||
|
if line.startswith(key):
|
||||||
|
self.log.debug('Checking {} is set to {}'.format(
|
||||||
|
key,
|
||||||
|
value))
|
||||||
|
assert value == line.split()[-1]
|
||||||
|
self.log.debug(line.split()[-1])
|
||||||
|
found.append(key)
|
||||||
|
if sorted(found) == sorted(expected.keys()):
|
||||||
|
self.log.debug('OK')
|
||||||
|
else:
|
||||||
|
message = "Memcache config error in: /etc/memcached.conf"
|
||||||
|
amulet.raise_status(amulet.FAIL, msg=message)
|
||||||
|
13
tests/charmhelpers/core/__init__.py
Normal file
13
tests/charmhelpers/core/__init__.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
55
tests/charmhelpers/core/decorators.py
Normal file
55
tests/charmhelpers/core/decorators.py
Normal file
@ -0,0 +1,55 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
#
|
||||||
|
# Copyright 2014 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Edward Hope-Morley <opentastic@gmail.com>
|
||||||
|
#
|
||||||
|
|
||||||
|
import time
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
log,
|
||||||
|
INFO,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def retry_on_exception(num_retries, base_delay=0, exc_type=Exception):
|
||||||
|
"""If the decorated function raises exception exc_type, allow num_retries
|
||||||
|
retry attempts before raise the exception.
|
||||||
|
"""
|
||||||
|
def _retry_on_exception_inner_1(f):
|
||||||
|
def _retry_on_exception_inner_2(*args, **kwargs):
|
||||||
|
retries = num_retries
|
||||||
|
multiplier = 1
|
||||||
|
while True:
|
||||||
|
try:
|
||||||
|
return f(*args, **kwargs)
|
||||||
|
except exc_type:
|
||||||
|
if not retries:
|
||||||
|
raise
|
||||||
|
|
||||||
|
delay = base_delay * multiplier
|
||||||
|
multiplier += 1
|
||||||
|
log("Retrying '%s' %d more times (delay=%s)" %
|
||||||
|
(f.__name__, retries, delay), level=INFO)
|
||||||
|
retries -= 1
|
||||||
|
if delay:
|
||||||
|
time.sleep(delay)
|
||||||
|
|
||||||
|
return _retry_on_exception_inner_2
|
||||||
|
|
||||||
|
return _retry_on_exception_inner_1
|
43
tests/charmhelpers/core/files.py
Normal file
43
tests/charmhelpers/core/files.py
Normal file
@ -0,0 +1,43 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
__author__ = 'Jorge Niedbalski <niedbalski@ubuntu.com>'
|
||||||
|
|
||||||
|
import os
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
|
||||||
|
def sed(filename, before, after, flags='g'):
|
||||||
|
"""
|
||||||
|
Search and replaces the given pattern on filename.
|
||||||
|
|
||||||
|
:param filename: relative or absolute file path.
|
||||||
|
:param before: expression to be replaced (see 'man sed')
|
||||||
|
:param after: expression to replace with (see 'man sed')
|
||||||
|
:param flags: sed-compatible regex flags in example, to make
|
||||||
|
the search and replace case insensitive, specify ``flags="i"``.
|
||||||
|
The ``g`` flag is always specified regardless, so you do not
|
||||||
|
need to remember to include it when overriding this parameter.
|
||||||
|
:returns: If the sed command exit code was zero then return,
|
||||||
|
otherwise raise CalledProcessError.
|
||||||
|
"""
|
||||||
|
expression = r's/{0}/{1}/{2}'.format(before,
|
||||||
|
after, flags)
|
||||||
|
|
||||||
|
return subprocess.check_call(["sed", "-i", "-r", "-e",
|
||||||
|
expression,
|
||||||
|
os.path.expanduser(filename)])
|
132
tests/charmhelpers/core/fstab.py
Normal file
132
tests/charmhelpers/core/fstab.py
Normal file
@ -0,0 +1,132 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import io
|
||||||
|
import os
|
||||||
|
|
||||||
|
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
||||||
|
|
||||||
|
|
||||||
|
class Fstab(io.FileIO):
|
||||||
|
"""This class extends file in order to implement a file reader/writer
|
||||||
|
for file `/etc/fstab`
|
||||||
|
"""
|
||||||
|
|
||||||
|
class Entry(object):
|
||||||
|
"""Entry class represents a non-comment line on the `/etc/fstab` file
|
||||||
|
"""
|
||||||
|
def __init__(self, device, mountpoint, filesystem,
|
||||||
|
options, d=0, p=0):
|
||||||
|
self.device = device
|
||||||
|
self.mountpoint = mountpoint
|
||||||
|
self.filesystem = filesystem
|
||||||
|
|
||||||
|
if not options:
|
||||||
|
options = "defaults"
|
||||||
|
|
||||||
|
self.options = options
|
||||||
|
self.d = int(d)
|
||||||
|
self.p = int(p)
|
||||||
|
|
||||||
|
def __eq__(self, o):
|
||||||
|
return str(self) == str(o)
|
||||||
|
|
||||||
|
def __str__(self):
|
||||||
|
return "{} {} {} {} {} {}".format(self.device,
|
||||||
|
self.mountpoint,
|
||||||
|
self.filesystem,
|
||||||
|
self.options,
|
||||||
|
self.d,
|
||||||
|
self.p)
|
||||||
|
|
||||||
|
DEFAULT_PATH = os.path.join(os.path.sep, 'etc', 'fstab')
|
||||||
|
|
||||||
|
def __init__(self, path=None):
|
||||||
|
if path:
|
||||||
|
self._path = path
|
||||||
|
else:
|
||||||
|
self._path = self.DEFAULT_PATH
|
||||||
|
super(Fstab, self).__init__(self._path, 'rb+')
|
||||||
|
|
||||||
|
def _hydrate_entry(self, line):
|
||||||
|
# NOTE: use split with no arguments to split on any
|
||||||
|
# whitespace including tabs
|
||||||
|
return Fstab.Entry(*filter(
|
||||||
|
lambda x: x not in ('', None),
|
||||||
|
line.strip("\n").split()))
|
||||||
|
|
||||||
|
@property
|
||||||
|
def entries(self):
|
||||||
|
self.seek(0)
|
||||||
|
for line in self.readlines():
|
||||||
|
line = line.decode('us-ascii')
|
||||||
|
try:
|
||||||
|
if line.strip() and not line.strip().startswith("#"):
|
||||||
|
yield self._hydrate_entry(line)
|
||||||
|
except ValueError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def get_entry_by_attr(self, attr, value):
|
||||||
|
for entry in self.entries:
|
||||||
|
e_attr = getattr(entry, attr)
|
||||||
|
if e_attr == value:
|
||||||
|
return entry
|
||||||
|
return None
|
||||||
|
|
||||||
|
def add_entry(self, entry):
|
||||||
|
if self.get_entry_by_attr('device', entry.device):
|
||||||
|
return False
|
||||||
|
|
||||||
|
self.write((str(entry) + '\n').encode('us-ascii'))
|
||||||
|
self.truncate()
|
||||||
|
return entry
|
||||||
|
|
||||||
|
def remove_entry(self, entry):
|
||||||
|
self.seek(0)
|
||||||
|
|
||||||
|
lines = [l.decode('us-ascii') for l in self.readlines()]
|
||||||
|
|
||||||
|
found = False
|
||||||
|
for index, line in enumerate(lines):
|
||||||
|
if line.strip() and not line.strip().startswith("#"):
|
||||||
|
if self._hydrate_entry(line) == entry:
|
||||||
|
found = True
|
||||||
|
break
|
||||||
|
|
||||||
|
if not found:
|
||||||
|
return False
|
||||||
|
|
||||||
|
lines.remove(line)
|
||||||
|
|
||||||
|
self.seek(0)
|
||||||
|
self.write(''.join(lines).encode('us-ascii'))
|
||||||
|
self.truncate()
|
||||||
|
return True
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def remove_by_mountpoint(cls, mountpoint, path=None):
|
||||||
|
fstab = cls(path=path)
|
||||||
|
entry = fstab.get_entry_by_attr('mountpoint', mountpoint)
|
||||||
|
if entry:
|
||||||
|
return fstab.remove_entry(entry)
|
||||||
|
return False
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def add(cls, device, mountpoint, filesystem, options=None, path=None):
|
||||||
|
return cls(path=path).add_entry(Fstab.Entry(device,
|
||||||
|
mountpoint, filesystem,
|
||||||
|
options=options))
|
1068
tests/charmhelpers/core/hookenv.py
Normal file
1068
tests/charmhelpers/core/hookenv.py
Normal file
File diff suppressed because it is too large
Load Diff
918
tests/charmhelpers/core/host.py
Normal file
918
tests/charmhelpers/core/host.py
Normal file
@ -0,0 +1,918 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
"""Tools for working with the host system"""
|
||||||
|
# Copyright 2012 Canonical Ltd.
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Nick Moffitt <nick.moffitt@canonical.com>
|
||||||
|
# Matthew Wedgwood <matthew.wedgwood@canonical.com>
|
||||||
|
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
import pwd
|
||||||
|
import glob
|
||||||
|
import grp
|
||||||
|
import random
|
||||||
|
import string
|
||||||
|
import subprocess
|
||||||
|
import hashlib
|
||||||
|
import functools
|
||||||
|
import itertools
|
||||||
|
import six
|
||||||
|
|
||||||
|
from contextlib import contextmanager
|
||||||
|
from collections import OrderedDict
|
||||||
|
from .hookenv import log
|
||||||
|
from .fstab import Fstab
|
||||||
|
from charmhelpers.osplatform import get_platform
|
||||||
|
|
||||||
|
__platform__ = get_platform()
|
||||||
|
if __platform__ == "ubuntu":
|
||||||
|
from charmhelpers.core.host_factory.ubuntu import (
|
||||||
|
service_available,
|
||||||
|
add_new_group,
|
||||||
|
lsb_release,
|
||||||
|
cmp_pkgrevno,
|
||||||
|
) # flake8: noqa -- ignore F401 for this import
|
||||||
|
elif __platform__ == "centos":
|
||||||
|
from charmhelpers.core.host_factory.centos import (
|
||||||
|
service_available,
|
||||||
|
add_new_group,
|
||||||
|
lsb_release,
|
||||||
|
cmp_pkgrevno,
|
||||||
|
) # flake8: noqa -- ignore F401 for this import
|
||||||
|
|
||||||
|
UPDATEDB_PATH = '/etc/updatedb.conf'
|
||||||
|
|
||||||
|
def service_start(service_name, **kwargs):
|
||||||
|
"""Start a system service.
|
||||||
|
|
||||||
|
The specified service name is managed via the system level init system.
|
||||||
|
Some init systems (e.g. upstart) require that additional arguments be
|
||||||
|
provided in order to directly control service instances whereas other init
|
||||||
|
systems allow for addressing instances of a service directly by name (e.g.
|
||||||
|
systemd).
|
||||||
|
|
||||||
|
The kwargs allow for the additional parameters to be passed to underlying
|
||||||
|
init systems for those systems which require/allow for them. For example,
|
||||||
|
the ceph-osd upstart script requires the id parameter to be passed along
|
||||||
|
in order to identify which running daemon should be reloaded. The follow-
|
||||||
|
ing example stops the ceph-osd service for instance id=4:
|
||||||
|
|
||||||
|
service_stop('ceph-osd', id=4)
|
||||||
|
|
||||||
|
:param service_name: the name of the service to stop
|
||||||
|
:param **kwargs: additional parameters to pass to the init system when
|
||||||
|
managing services. These will be passed as key=value
|
||||||
|
parameters to the init system's commandline. kwargs
|
||||||
|
are ignored for systemd enabled systems.
|
||||||
|
"""
|
||||||
|
return service('start', service_name, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def service_stop(service_name, **kwargs):
|
||||||
|
"""Stop a system service.
|
||||||
|
|
||||||
|
The specified service name is managed via the system level init system.
|
||||||
|
Some init systems (e.g. upstart) require that additional arguments be
|
||||||
|
provided in order to directly control service instances whereas other init
|
||||||
|
systems allow for addressing instances of a service directly by name (e.g.
|
||||||
|
systemd).
|
||||||
|
|
||||||
|
The kwargs allow for the additional parameters to be passed to underlying
|
||||||
|
init systems for those systems which require/allow for them. For example,
|
||||||
|
the ceph-osd upstart script requires the id parameter to be passed along
|
||||||
|
in order to identify which running daemon should be reloaded. The follow-
|
||||||
|
ing example stops the ceph-osd service for instance id=4:
|
||||||
|
|
||||||
|
service_stop('ceph-osd', id=4)
|
||||||
|
|
||||||
|
:param service_name: the name of the service to stop
|
||||||
|
:param **kwargs: additional parameters to pass to the init system when
|
||||||
|
managing services. These will be passed as key=value
|
||||||
|
parameters to the init system's commandline. kwargs
|
||||||
|
are ignored for systemd enabled systems.
|
||||||
|
"""
|
||||||
|
return service('stop', service_name, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def service_restart(service_name, **kwargs):
|
||||||
|
"""Restart a system service.
|
||||||
|
|
||||||
|
The specified service name is managed via the system level init system.
|
||||||
|
Some init systems (e.g. upstart) require that additional arguments be
|
||||||
|
provided in order to directly control service instances whereas other init
|
||||||
|
systems allow for addressing instances of a service directly by name (e.g.
|
||||||
|
systemd).
|
||||||
|
|
||||||
|
The kwargs allow for the additional parameters to be passed to underlying
|
||||||
|
init systems for those systems which require/allow for them. For example,
|
||||||
|
the ceph-osd upstart script requires the id parameter to be passed along
|
||||||
|
in order to identify which running daemon should be restarted. The follow-
|
||||||
|
ing example restarts the ceph-osd service for instance id=4:
|
||||||
|
|
||||||
|
service_restart('ceph-osd', id=4)
|
||||||
|
|
||||||
|
:param service_name: the name of the service to restart
|
||||||
|
:param **kwargs: additional parameters to pass to the init system when
|
||||||
|
managing services. These will be passed as key=value
|
||||||
|
parameters to the init system's commandline. kwargs
|
||||||
|
are ignored for init systems not allowing additional
|
||||||
|
parameters via the commandline (systemd).
|
||||||
|
"""
|
||||||
|
return service('restart', service_name)
|
||||||
|
|
||||||
|
|
||||||
|
def service_reload(service_name, restart_on_failure=False, **kwargs):
|
||||||
|
"""Reload a system service, optionally falling back to restart if
|
||||||
|
reload fails.
|
||||||
|
|
||||||
|
The specified service name is managed via the system level init system.
|
||||||
|
Some init systems (e.g. upstart) require that additional arguments be
|
||||||
|
provided in order to directly control service instances whereas other init
|
||||||
|
systems allow for addressing instances of a service directly by name (e.g.
|
||||||
|
systemd).
|
||||||
|
|
||||||
|
The kwargs allow for the additional parameters to be passed to underlying
|
||||||
|
init systems for those systems which require/allow for them. For example,
|
||||||
|
the ceph-osd upstart script requires the id parameter to be passed along
|
||||||
|
in order to identify which running daemon should be reloaded. The follow-
|
||||||
|
ing example restarts the ceph-osd service for instance id=4:
|
||||||
|
|
||||||
|
service_reload('ceph-osd', id=4)
|
||||||
|
|
||||||
|
:param service_name: the name of the service to reload
|
||||||
|
:param restart_on_failure: boolean indicating whether to fallback to a
|
||||||
|
restart if the reload fails.
|
||||||
|
:param **kwargs: additional parameters to pass to the init system when
|
||||||
|
managing services. These will be passed as key=value
|
||||||
|
parameters to the init system's commandline. kwargs
|
||||||
|
are ignored for init systems not allowing additional
|
||||||
|
parameters via the commandline (systemd).
|
||||||
|
"""
|
||||||
|
service_result = service('reload', service_name, **kwargs)
|
||||||
|
if not service_result and restart_on_failure:
|
||||||
|
service_result = service('restart', service_name, **kwargs)
|
||||||
|
return service_result
|
||||||
|
|
||||||
|
|
||||||
|
def service_pause(service_name, init_dir="/etc/init", initd_dir="/etc/init.d",
|
||||||
|
**kwargs):
|
||||||
|
"""Pause a system service.
|
||||||
|
|
||||||
|
Stop it, and prevent it from starting again at boot.
|
||||||
|
|
||||||
|
:param service_name: the name of the service to pause
|
||||||
|
:param init_dir: path to the upstart init directory
|
||||||
|
:param initd_dir: path to the sysv init directory
|
||||||
|
:param **kwargs: additional parameters to pass to the init system when
|
||||||
|
managing services. These will be passed as key=value
|
||||||
|
parameters to the init system's commandline. kwargs
|
||||||
|
are ignored for init systems which do not support
|
||||||
|
key=value arguments via the commandline.
|
||||||
|
"""
|
||||||
|
stopped = True
|
||||||
|
if service_running(service_name, **kwargs):
|
||||||
|
stopped = service_stop(service_name, **kwargs)
|
||||||
|
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
|
||||||
|
sysv_file = os.path.join(initd_dir, service_name)
|
||||||
|
if init_is_systemd():
|
||||||
|
service('disable', service_name)
|
||||||
|
elif os.path.exists(upstart_file):
|
||||||
|
override_path = os.path.join(
|
||||||
|
init_dir, '{}.override'.format(service_name))
|
||||||
|
with open(override_path, 'w') as fh:
|
||||||
|
fh.write("manual\n")
|
||||||
|
elif os.path.exists(sysv_file):
|
||||||
|
subprocess.check_call(["update-rc.d", service_name, "disable"])
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"Unable to detect {0} as SystemD, Upstart {1} or"
|
||||||
|
" SysV {2}".format(
|
||||||
|
service_name, upstart_file, sysv_file))
|
||||||
|
return stopped
|
||||||
|
|
||||||
|
|
||||||
|
def service_resume(service_name, init_dir="/etc/init",
|
||||||
|
initd_dir="/etc/init.d", **kwargs):
|
||||||
|
"""Resume a system service.
|
||||||
|
|
||||||
|
Reenable starting again at boot. Start the service.
|
||||||
|
|
||||||
|
:param service_name: the name of the service to resume
|
||||||
|
:param init_dir: the path to the init dir
|
||||||
|
:param initd dir: the path to the initd dir
|
||||||
|
:param **kwargs: additional parameters to pass to the init system when
|
||||||
|
managing services. These will be passed as key=value
|
||||||
|
parameters to the init system's commandline. kwargs
|
||||||
|
are ignored for systemd enabled systems.
|
||||||
|
"""
|
||||||
|
upstart_file = os.path.join(init_dir, "{}.conf".format(service_name))
|
||||||
|
sysv_file = os.path.join(initd_dir, service_name)
|
||||||
|
if init_is_systemd():
|
||||||
|
service('enable', service_name)
|
||||||
|
elif os.path.exists(upstart_file):
|
||||||
|
override_path = os.path.join(
|
||||||
|
init_dir, '{}.override'.format(service_name))
|
||||||
|
if os.path.exists(override_path):
|
||||||
|
os.unlink(override_path)
|
||||||
|
elif os.path.exists(sysv_file):
|
||||||
|
subprocess.check_call(["update-rc.d", service_name, "enable"])
|
||||||
|
else:
|
||||||
|
raise ValueError(
|
||||||
|
"Unable to detect {0} as SystemD, Upstart {1} or"
|
||||||
|
" SysV {2}".format(
|
||||||
|
service_name, upstart_file, sysv_file))
|
||||||
|
started = service_running(service_name, **kwargs)
|
||||||
|
|
||||||
|
if not started:
|
||||||
|
started = service_start(service_name, **kwargs)
|
||||||
|
return started
|
||||||
|
|
||||||
|
|
||||||
|
def service(action, service_name, **kwargs):
|
||||||
|
"""Control a system service.
|
||||||
|
|
||||||
|
:param action: the action to take on the service
|
||||||
|
:param service_name: the name of the service to perform th action on
|
||||||
|
:param **kwargs: additional params to be passed to the service command in
|
||||||
|
the form of key=value.
|
||||||
|
"""
|
||||||
|
if init_is_systemd():
|
||||||
|
cmd = ['systemctl', action, service_name]
|
||||||
|
else:
|
||||||
|
cmd = ['service', service_name, action]
|
||||||
|
for key, value in six.iteritems(kwargs):
|
||||||
|
parameter = '%s=%s' % (key, value)
|
||||||
|
cmd.append(parameter)
|
||||||
|
return subprocess.call(cmd) == 0
|
||||||
|
|
||||||
|
|
||||||
|
_UPSTART_CONF = "/etc/init/{}.conf"
|
||||||
|
_INIT_D_CONF = "/etc/init.d/{}"
|
||||||
|
|
||||||
|
|
||||||
|
def service_running(service_name, **kwargs):
|
||||||
|
"""Determine whether a system service is running.
|
||||||
|
|
||||||
|
:param service_name: the name of the service
|
||||||
|
:param **kwargs: additional args to pass to the service command. This is
|
||||||
|
used to pass additional key=value arguments to the
|
||||||
|
service command line for managing specific instance
|
||||||
|
units (e.g. service ceph-osd status id=2). The kwargs
|
||||||
|
are ignored in systemd services.
|
||||||
|
"""
|
||||||
|
if init_is_systemd():
|
||||||
|
return service('is-active', service_name)
|
||||||
|
else:
|
||||||
|
if os.path.exists(_UPSTART_CONF.format(service_name)):
|
||||||
|
try:
|
||||||
|
cmd = ['status', service_name]
|
||||||
|
for key, value in six.iteritems(kwargs):
|
||||||
|
parameter = '%s=%s' % (key, value)
|
||||||
|
cmd.append(parameter)
|
||||||
|
output = subprocess.check_output(cmd,
|
||||||
|
stderr=subprocess.STDOUT).decode('UTF-8')
|
||||||
|
except subprocess.CalledProcessError:
|
||||||
|
return False
|
||||||
|
else:
|
||||||
|
# This works for upstart scripts where the 'service' command
|
||||||
|
# returns a consistent string to represent running
|
||||||
|
# 'start/running'
|
||||||
|
if ("start/running" in output or
|
||||||
|
"is running" in output or
|
||||||
|
"up and running" in output):
|
||||||
|
return True
|
||||||
|
elif os.path.exists(_INIT_D_CONF.format(service_name)):
|
||||||
|
# Check System V scripts init script return codes
|
||||||
|
return service('status', service_name)
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
SYSTEMD_SYSTEM = '/run/systemd/system'
|
||||||
|
|
||||||
|
|
||||||
|
def init_is_systemd():
|
||||||
|
"""Return True if the host system uses systemd, False otherwise."""
|
||||||
|
return os.path.isdir(SYSTEMD_SYSTEM)
|
||||||
|
|
||||||
|
|
||||||
|
def adduser(username, password=None, shell='/bin/bash',
|
||||||
|
system_user=False, primary_group=None,
|
||||||
|
secondary_groups=None, uid=None, home_dir=None):
|
||||||
|
"""Add a user to the system.
|
||||||
|
|
||||||
|
Will log but otherwise succeed if the user already exists.
|
||||||
|
|
||||||
|
:param str username: Username to create
|
||||||
|
:param str password: Password for user; if ``None``, create a system user
|
||||||
|
:param str shell: The default shell for the user
|
||||||
|
:param bool system_user: Whether to create a login or system user
|
||||||
|
:param str primary_group: Primary group for user; defaults to username
|
||||||
|
:param list secondary_groups: Optional list of additional groups
|
||||||
|
:param int uid: UID for user being created
|
||||||
|
:param str home_dir: Home directory for user
|
||||||
|
|
||||||
|
:returns: The password database entry struct, as returned by `pwd.getpwnam`
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
user_info = pwd.getpwnam(username)
|
||||||
|
log('user {0} already exists!'.format(username))
|
||||||
|
if uid:
|
||||||
|
user_info = pwd.getpwuid(int(uid))
|
||||||
|
log('user with uid {0} already exists!'.format(uid))
|
||||||
|
except KeyError:
|
||||||
|
log('creating user {0}'.format(username))
|
||||||
|
cmd = ['useradd']
|
||||||
|
if uid:
|
||||||
|
cmd.extend(['--uid', str(uid)])
|
||||||
|
if home_dir:
|
||||||
|
cmd.extend(['--home', str(home_dir)])
|
||||||
|
if system_user or password is None:
|
||||||
|
cmd.append('--system')
|
||||||
|
else:
|
||||||
|
cmd.extend([
|
||||||
|
'--create-home',
|
||||||
|
'--shell', shell,
|
||||||
|
'--password', password,
|
||||||
|
])
|
||||||
|
if not primary_group:
|
||||||
|
try:
|
||||||
|
grp.getgrnam(username)
|
||||||
|
primary_group = username # avoid "group exists" error
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
if primary_group:
|
||||||
|
cmd.extend(['-g', primary_group])
|
||||||
|
if secondary_groups:
|
||||||
|
cmd.extend(['-G', ','.join(secondary_groups)])
|
||||||
|
cmd.append(username)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
user_info = pwd.getpwnam(username)
|
||||||
|
return user_info
|
||||||
|
|
||||||
|
|
||||||
|
def user_exists(username):
|
||||||
|
"""Check if a user exists"""
|
||||||
|
try:
|
||||||
|
pwd.getpwnam(username)
|
||||||
|
user_exists = True
|
||||||
|
except KeyError:
|
||||||
|
user_exists = False
|
||||||
|
return user_exists
|
||||||
|
|
||||||
|
|
||||||
|
def uid_exists(uid):
|
||||||
|
"""Check if a uid exists"""
|
||||||
|
try:
|
||||||
|
pwd.getpwuid(uid)
|
||||||
|
uid_exists = True
|
||||||
|
except KeyError:
|
||||||
|
uid_exists = False
|
||||||
|
return uid_exists
|
||||||
|
|
||||||
|
|
||||||
|
def group_exists(groupname):
|
||||||
|
"""Check if a group exists"""
|
||||||
|
try:
|
||||||
|
grp.getgrnam(groupname)
|
||||||
|
group_exists = True
|
||||||
|
except KeyError:
|
||||||
|
group_exists = False
|
||||||
|
return group_exists
|
||||||
|
|
||||||
|
|
||||||
|
def gid_exists(gid):
|
||||||
|
"""Check if a gid exists"""
|
||||||
|
try:
|
||||||
|
grp.getgrgid(gid)
|
||||||
|
gid_exists = True
|
||||||
|
except KeyError:
|
||||||
|
gid_exists = False
|
||||||
|
return gid_exists
|
||||||
|
|
||||||
|
|
||||||
|
def add_group(group_name, system_group=False, gid=None):
|
||||||
|
"""Add a group to the system
|
||||||
|
|
||||||
|
Will log but otherwise succeed if the group already exists.
|
||||||
|
|
||||||
|
:param str group_name: group to create
|
||||||
|
:param bool system_group: Create system group
|
||||||
|
:param int gid: GID for user being created
|
||||||
|
|
||||||
|
:returns: The password database entry struct, as returned by `grp.getgrnam`
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
group_info = grp.getgrnam(group_name)
|
||||||
|
log('group {0} already exists!'.format(group_name))
|
||||||
|
if gid:
|
||||||
|
group_info = grp.getgrgid(gid)
|
||||||
|
log('group with gid {0} already exists!'.format(gid))
|
||||||
|
except KeyError:
|
||||||
|
log('creating group {0}'.format(group_name))
|
||||||
|
add_new_group(group_name, system_group, gid)
|
||||||
|
group_info = grp.getgrnam(group_name)
|
||||||
|
return group_info
|
||||||
|
|
||||||
|
|
||||||
|
def add_user_to_group(username, group):
|
||||||
|
"""Add a user to a group"""
|
||||||
|
cmd = ['gpasswd', '-a', username, group]
|
||||||
|
log("Adding user {} to group {}".format(username, group))
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def rsync(from_path, to_path, flags='-r', options=None, timeout=None):
|
||||||
|
"""Replicate the contents of a path"""
|
||||||
|
options = options or ['--delete', '--executability']
|
||||||
|
cmd = ['/usr/bin/rsync', flags]
|
||||||
|
if timeout:
|
||||||
|
cmd = ['timeout', str(timeout)] + cmd
|
||||||
|
cmd.extend(options)
|
||||||
|
cmd.append(from_path)
|
||||||
|
cmd.append(to_path)
|
||||||
|
log(" ".join(cmd))
|
||||||
|
return subprocess.check_output(cmd, stderr=subprocess.STDOUT).decode('UTF-8').strip()
|
||||||
|
|
||||||
|
|
||||||
|
def symlink(source, destination):
|
||||||
|
"""Create a symbolic link"""
|
||||||
|
log("Symlinking {} as {}".format(source, destination))
|
||||||
|
cmd = [
|
||||||
|
'ln',
|
||||||
|
'-sf',
|
||||||
|
source,
|
||||||
|
destination,
|
||||||
|
]
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def mkdir(path, owner='root', group='root', perms=0o555, force=False):
|
||||||
|
"""Create a directory"""
|
||||||
|
log("Making dir {} {}:{} {:o}".format(path, owner, group,
|
||||||
|
perms))
|
||||||
|
uid = pwd.getpwnam(owner).pw_uid
|
||||||
|
gid = grp.getgrnam(group).gr_gid
|
||||||
|
realpath = os.path.abspath(path)
|
||||||
|
path_exists = os.path.exists(realpath)
|
||||||
|
if path_exists and force:
|
||||||
|
if not os.path.isdir(realpath):
|
||||||
|
log("Removing non-directory file {} prior to mkdir()".format(path))
|
||||||
|
os.unlink(realpath)
|
||||||
|
os.makedirs(realpath, perms)
|
||||||
|
elif not path_exists:
|
||||||
|
os.makedirs(realpath, perms)
|
||||||
|
os.chown(realpath, uid, gid)
|
||||||
|
os.chmod(realpath, perms)
|
||||||
|
|
||||||
|
|
||||||
|
def write_file(path, content, owner='root', group='root', perms=0o444):
|
||||||
|
"""Create or overwrite a file with the contents of a byte string."""
|
||||||
|
log("Writing file {} {}:{} {:o}".format(path, owner, group, perms))
|
||||||
|
uid = pwd.getpwnam(owner).pw_uid
|
||||||
|
gid = grp.getgrnam(group).gr_gid
|
||||||
|
with open(path, 'wb') as target:
|
||||||
|
os.fchown(target.fileno(), uid, gid)
|
||||||
|
os.fchmod(target.fileno(), perms)
|
||||||
|
target.write(content)
|
||||||
|
|
||||||
|
|
||||||
|
def fstab_remove(mp):
|
||||||
|
"""Remove the given mountpoint entry from /etc/fstab"""
|
||||||
|
return Fstab.remove_by_mountpoint(mp)
|
||||||
|
|
||||||
|
|
||||||
|
def fstab_add(dev, mp, fs, options=None):
|
||||||
|
"""Adds the given device entry to the /etc/fstab file"""
|
||||||
|
return Fstab.add(dev, mp, fs, options=options)
|
||||||
|
|
||||||
|
|
||||||
|
def mount(device, mountpoint, options=None, persist=False, filesystem="ext3"):
|
||||||
|
"""Mount a filesystem at a particular mountpoint"""
|
||||||
|
cmd_args = ['mount']
|
||||||
|
if options is not None:
|
||||||
|
cmd_args.extend(['-o', options])
|
||||||
|
cmd_args.extend([device, mountpoint])
|
||||||
|
try:
|
||||||
|
subprocess.check_output(cmd_args)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
log('Error mounting {} at {}\n{}'.format(device, mountpoint, e.output))
|
||||||
|
return False
|
||||||
|
|
||||||
|
if persist:
|
||||||
|
return fstab_add(device, mountpoint, filesystem, options=options)
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def umount(mountpoint, persist=False):
|
||||||
|
"""Unmount a filesystem"""
|
||||||
|
cmd_args = ['umount', mountpoint]
|
||||||
|
try:
|
||||||
|
subprocess.check_output(cmd_args)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
|
||||||
|
return False
|
||||||
|
|
||||||
|
if persist:
|
||||||
|
return fstab_remove(mountpoint)
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def mounts():
|
||||||
|
"""Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
|
||||||
|
with open('/proc/mounts') as f:
|
||||||
|
# [['/mount/point','/dev/path'],[...]]
|
||||||
|
system_mounts = [m[1::-1] for m in [l.strip().split()
|
||||||
|
for l in f.readlines()]]
|
||||||
|
return system_mounts
|
||||||
|
|
||||||
|
|
||||||
|
def fstab_mount(mountpoint):
|
||||||
|
"""Mount filesystem using fstab"""
|
||||||
|
cmd_args = ['mount', mountpoint]
|
||||||
|
try:
|
||||||
|
subprocess.check_output(cmd_args)
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
log('Error unmounting {}\n{}'.format(mountpoint, e.output))
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def file_hash(path, hash_type='md5'):
|
||||||
|
"""Generate a hash checksum of the contents of 'path' or None if not found.
|
||||||
|
|
||||||
|
:param str hash_type: Any hash alrgorithm supported by :mod:`hashlib`,
|
||||||
|
such as md5, sha1, sha256, sha512, etc.
|
||||||
|
"""
|
||||||
|
if os.path.exists(path):
|
||||||
|
h = getattr(hashlib, hash_type)()
|
||||||
|
with open(path, 'rb') as source:
|
||||||
|
h.update(source.read())
|
||||||
|
return h.hexdigest()
|
||||||
|
else:
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def path_hash(path):
|
||||||
|
"""Generate a hash checksum of all files matching 'path'. Standard
|
||||||
|
wildcards like '*' and '?' are supported, see documentation for the 'glob'
|
||||||
|
module for more information.
|
||||||
|
|
||||||
|
:return: dict: A { filename: hash } dictionary for all matched files.
|
||||||
|
Empty if none found.
|
||||||
|
"""
|
||||||
|
return {
|
||||||
|
filename: file_hash(filename)
|
||||||
|
for filename in glob.iglob(path)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def check_hash(path, checksum, hash_type='md5'):
|
||||||
|
"""Validate a file using a cryptographic checksum.
|
||||||
|
|
||||||
|
:param str checksum: Value of the checksum used to validate the file.
|
||||||
|
:param str hash_type: Hash algorithm used to generate `checksum`.
|
||||||
|
Can be any hash alrgorithm supported by :mod:`hashlib`,
|
||||||
|
such as md5, sha1, sha256, sha512, etc.
|
||||||
|
:raises ChecksumError: If the file fails the checksum
|
||||||
|
|
||||||
|
"""
|
||||||
|
actual_checksum = file_hash(path, hash_type)
|
||||||
|
if checksum != actual_checksum:
|
||||||
|
raise ChecksumError("'%s' != '%s'" % (checksum, actual_checksum))
|
||||||
|
|
||||||
|
|
||||||
|
class ChecksumError(ValueError):
|
||||||
|
"""A class derived from Value error to indicate the checksum failed."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
|
def restart_on_change(restart_map, stopstart=False, restart_functions=None):
|
||||||
|
"""Restart services based on configuration files changing
|
||||||
|
|
||||||
|
This function is used a decorator, for example::
|
||||||
|
|
||||||
|
@restart_on_change({
|
||||||
|
'/etc/ceph/ceph.conf': [ 'cinder-api', 'cinder-volume' ]
|
||||||
|
'/etc/apache/sites-enabled/*': [ 'apache2' ]
|
||||||
|
})
|
||||||
|
def config_changed():
|
||||||
|
pass # your code here
|
||||||
|
|
||||||
|
In this example, the cinder-api and cinder-volume services
|
||||||
|
would be restarted if /etc/ceph/ceph.conf is changed by the
|
||||||
|
ceph_client_changed function. The apache2 service would be
|
||||||
|
restarted if any file matching the pattern got changed, created
|
||||||
|
or removed. Standard wildcards are supported, see documentation
|
||||||
|
for the 'glob' module for more information.
|
||||||
|
|
||||||
|
@param restart_map: {path_file_name: [service_name, ...]
|
||||||
|
@param stopstart: DEFAULT false; whether to stop, start OR restart
|
||||||
|
@param restart_functions: nonstandard functions to use to restart services
|
||||||
|
{svc: func, ...}
|
||||||
|
@returns result from decorated function
|
||||||
|
"""
|
||||||
|
def wrap(f):
|
||||||
|
@functools.wraps(f)
|
||||||
|
def wrapped_f(*args, **kwargs):
|
||||||
|
return restart_on_change_helper(
|
||||||
|
(lambda: f(*args, **kwargs)), restart_map, stopstart,
|
||||||
|
restart_functions)
|
||||||
|
return wrapped_f
|
||||||
|
return wrap
|
||||||
|
|
||||||
|
|
||||||
|
def restart_on_change_helper(lambda_f, restart_map, stopstart=False,
|
||||||
|
restart_functions=None):
|
||||||
|
"""Helper function to perform the restart_on_change function.
|
||||||
|
|
||||||
|
This is provided for decorators to restart services if files described
|
||||||
|
in the restart_map have changed after an invocation of lambda_f().
|
||||||
|
|
||||||
|
@param lambda_f: function to call.
|
||||||
|
@param restart_map: {file: [service, ...]}
|
||||||
|
@param stopstart: whether to stop, start or restart a service
|
||||||
|
@param restart_functions: nonstandard functions to use to restart services
|
||||||
|
{svc: func, ...}
|
||||||
|
@returns result of lambda_f()
|
||||||
|
"""
|
||||||
|
if restart_functions is None:
|
||||||
|
restart_functions = {}
|
||||||
|
checksums = {path: path_hash(path) for path in restart_map}
|
||||||
|
r = lambda_f()
|
||||||
|
# create a list of lists of the services to restart
|
||||||
|
restarts = [restart_map[path]
|
||||||
|
for path in restart_map
|
||||||
|
if path_hash(path) != checksums[path]]
|
||||||
|
# create a flat list of ordered services without duplicates from lists
|
||||||
|
services_list = list(OrderedDict.fromkeys(itertools.chain(*restarts)))
|
||||||
|
if services_list:
|
||||||
|
actions = ('stop', 'start') if stopstart else ('restart',)
|
||||||
|
for service_name in services_list:
|
||||||
|
if service_name in restart_functions:
|
||||||
|
restart_functions[service_name](service_name)
|
||||||
|
else:
|
||||||
|
for action in actions:
|
||||||
|
service(action, service_name)
|
||||||
|
return r
|
||||||
|
|
||||||
|
|
||||||
|
def pwgen(length=None):
|
||||||
|
"""Generate a random pasword."""
|
||||||
|
if length is None:
|
||||||
|
# A random length is ok to use a weak PRNG
|
||||||
|
length = random.choice(range(35, 45))
|
||||||
|
alphanumeric_chars = [
|
||||||
|
l for l in (string.ascii_letters + string.digits)
|
||||||
|
if l not in 'l0QD1vAEIOUaeiou']
|
||||||
|
# Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
|
||||||
|
# actual password
|
||||||
|
random_generator = random.SystemRandom()
|
||||||
|
random_chars = [
|
||||||
|
random_generator.choice(alphanumeric_chars) for _ in range(length)]
|
||||||
|
return(''.join(random_chars))
|
||||||
|
|
||||||
|
|
||||||
|
def is_phy_iface(interface):
|
||||||
|
"""Returns True if interface is not virtual, otherwise False."""
|
||||||
|
if interface:
|
||||||
|
sys_net = '/sys/class/net'
|
||||||
|
if os.path.isdir(sys_net):
|
||||||
|
for iface in glob.glob(os.path.join(sys_net, '*')):
|
||||||
|
if '/virtual/' in os.path.realpath(iface):
|
||||||
|
continue
|
||||||
|
|
||||||
|
if interface == os.path.basename(iface):
|
||||||
|
return True
|
||||||
|
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
def get_bond_master(interface):
|
||||||
|
"""Returns bond master if interface is bond slave otherwise None.
|
||||||
|
|
||||||
|
NOTE: the provided interface is expected to be physical
|
||||||
|
"""
|
||||||
|
if interface:
|
||||||
|
iface_path = '/sys/class/net/%s' % (interface)
|
||||||
|
if os.path.exists(iface_path):
|
||||||
|
if '/virtual/' in os.path.realpath(iface_path):
|
||||||
|
return None
|
||||||
|
|
||||||
|
master = os.path.join(iface_path, 'master')
|
||||||
|
if os.path.exists(master):
|
||||||
|
master = os.path.realpath(master)
|
||||||
|
# make sure it is a bond master
|
||||||
|
if os.path.exists(os.path.join(master, 'bonding')):
|
||||||
|
return os.path.basename(master)
|
||||||
|
|
||||||
|
return None
|
||||||
|
|
||||||
|
|
||||||
|
def list_nics(nic_type=None):
|
||||||
|
"""Return a list of nics of given type(s)"""
|
||||||
|
if isinstance(nic_type, six.string_types):
|
||||||
|
int_types = [nic_type]
|
||||||
|
else:
|
||||||
|
int_types = nic_type
|
||||||
|
|
||||||
|
interfaces = []
|
||||||
|
if nic_type:
|
||||||
|
for int_type in int_types:
|
||||||
|
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
|
||||||
|
ip_output = subprocess.check_output(cmd).decode('UTF-8')
|
||||||
|
ip_output = ip_output.split('\n')
|
||||||
|
ip_output = (line for line in ip_output if line)
|
||||||
|
for line in ip_output:
|
||||||
|
if line.split()[1].startswith(int_type):
|
||||||
|
matched = re.search('.*: (' + int_type +
|
||||||
|
r'[0-9]+\.[0-9]+)@.*', line)
|
||||||
|
if matched:
|
||||||
|
iface = matched.groups()[0]
|
||||||
|
else:
|
||||||
|
iface = line.split()[1].replace(":", "")
|
||||||
|
|
||||||
|
if iface not in interfaces:
|
||||||
|
interfaces.append(iface)
|
||||||
|
else:
|
||||||
|
cmd = ['ip', 'a']
|
||||||
|
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
||||||
|
ip_output = (line.strip() for line in ip_output if line)
|
||||||
|
|
||||||
|
key = re.compile('^[0-9]+:\s+(.+):')
|
||||||
|
for line in ip_output:
|
||||||
|
matched = re.search(key, line)
|
||||||
|
if matched:
|
||||||
|
iface = matched.group(1)
|
||||||
|
iface = iface.partition("@")[0]
|
||||||
|
if iface not in interfaces:
|
||||||
|
interfaces.append(iface)
|
||||||
|
|
||||||
|
return interfaces
|
||||||
|
|
||||||
|
|
||||||
|
def set_nic_mtu(nic, mtu):
|
||||||
|
"""Set the Maximum Transmission Unit (MTU) on a network interface."""
|
||||||
|
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def get_nic_mtu(nic):
|
||||||
|
"""Return the Maximum Transmission Unit (MTU) for a network interface."""
|
||||||
|
cmd = ['ip', 'addr', 'show', nic]
|
||||||
|
ip_output = subprocess.check_output(cmd).decode('UTF-8').split('\n')
|
||||||
|
mtu = ""
|
||||||
|
for line in ip_output:
|
||||||
|
words = line.split()
|
||||||
|
if 'mtu' in words:
|
||||||
|
mtu = words[words.index("mtu") + 1]
|
||||||
|
return mtu
|
||||||
|
|
||||||
|
|
||||||
|
def get_nic_hwaddr(nic):
|
||||||
|
"""Return the Media Access Control (MAC) for a network interface."""
|
||||||
|
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
|
||||||
|
ip_output = subprocess.check_output(cmd).decode('UTF-8')
|
||||||
|
hwaddr = ""
|
||||||
|
words = ip_output.split()
|
||||||
|
if 'link/ether' in words:
|
||||||
|
hwaddr = words[words.index('link/ether') + 1]
|
||||||
|
return hwaddr
|
||||||
|
|
||||||
|
|
||||||
|
@contextmanager
|
||||||
|
def chdir(directory):
|
||||||
|
"""Change the current working directory to a different directory for a code
|
||||||
|
block and return the previous directory after the block exits. Useful to
|
||||||
|
run commands from a specificed directory.
|
||||||
|
|
||||||
|
:param str directory: The directory path to change to for this context.
|
||||||
|
"""
|
||||||
|
cur = os.getcwd()
|
||||||
|
try:
|
||||||
|
yield os.chdir(directory)
|
||||||
|
finally:
|
||||||
|
os.chdir(cur)
|
||||||
|
|
||||||
|
|
||||||
|
def chownr(path, owner, group, follow_links=True, chowntopdir=False):
|
||||||
|
"""Recursively change user and group ownership of files and directories
|
||||||
|
in given path. Doesn't chown path itself by default, only its children.
|
||||||
|
|
||||||
|
:param str path: The string path to start changing ownership.
|
||||||
|
:param str owner: The owner string to use when looking up the uid.
|
||||||
|
:param str group: The group string to use when looking up the gid.
|
||||||
|
:param bool follow_links: Also follow and chown links if True
|
||||||
|
:param bool chowntopdir: Also chown path itself if True
|
||||||
|
"""
|
||||||
|
uid = pwd.getpwnam(owner).pw_uid
|
||||||
|
gid = grp.getgrnam(group).gr_gid
|
||||||
|
if follow_links:
|
||||||
|
chown = os.chown
|
||||||
|
else:
|
||||||
|
chown = os.lchown
|
||||||
|
|
||||||
|
if chowntopdir:
|
||||||
|
broken_symlink = os.path.lexists(path) and not os.path.exists(path)
|
||||||
|
if not broken_symlink:
|
||||||
|
chown(path, uid, gid)
|
||||||
|
for root, dirs, files in os.walk(path, followlinks=follow_links):
|
||||||
|
for name in dirs + files:
|
||||||
|
full = os.path.join(root, name)
|
||||||
|
broken_symlink = os.path.lexists(full) and not os.path.exists(full)
|
||||||
|
if not broken_symlink:
|
||||||
|
chown(full, uid, gid)
|
||||||
|
|
||||||
|
|
||||||
|
def lchownr(path, owner, group):
|
||||||
|
"""Recursively change user and group ownership of files and directories
|
||||||
|
in a given path, not following symbolic links. See the documentation for
|
||||||
|
'os.lchown' for more information.
|
||||||
|
|
||||||
|
:param str path: The string path to start changing ownership.
|
||||||
|
:param str owner: The owner string to use when looking up the uid.
|
||||||
|
:param str group: The group string to use when looking up the gid.
|
||||||
|
"""
|
||||||
|
chownr(path, owner, group, follow_links=False)
|
||||||
|
|
||||||
|
|
||||||
|
def owner(path):
|
||||||
|
"""Returns a tuple containing the username & groupname owning the path.
|
||||||
|
|
||||||
|
:param str path: the string path to retrieve the ownership
|
||||||
|
:return tuple(str, str): A (username, groupname) tuple containing the
|
||||||
|
name of the user and group owning the path.
|
||||||
|
:raises OSError: if the specified path does not exist
|
||||||
|
"""
|
||||||
|
stat = os.stat(path)
|
||||||
|
username = pwd.getpwuid(stat.st_uid)[0]
|
||||||
|
groupname = grp.getgrgid(stat.st_gid)[0]
|
||||||
|
return username, groupname
|
||||||
|
|
||||||
|
|
||||||
|
def get_total_ram():
|
||||||
|
"""The total amount of system RAM in bytes.
|
||||||
|
|
||||||
|
This is what is reported by the OS, and may be overcommitted when
|
||||||
|
there are multiple containers hosted on the same machine.
|
||||||
|
"""
|
||||||
|
with open('/proc/meminfo', 'r') as f:
|
||||||
|
for line in f.readlines():
|
||||||
|
if line:
|
||||||
|
key, value, unit = line.split()
|
||||||
|
if key == 'MemTotal:':
|
||||||
|
assert unit == 'kB', 'Unknown unit'
|
||||||
|
return int(value) * 1024 # Classic, not KiB.
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
|
UPSTART_CONTAINER_TYPE = '/run/container_type'
|
||||||
|
|
||||||
|
|
||||||
|
def is_container():
|
||||||
|
"""Determine whether unit is running in a container
|
||||||
|
|
||||||
|
@return: boolean indicating if unit is in a container
|
||||||
|
"""
|
||||||
|
if init_is_systemd():
|
||||||
|
# Detect using systemd-detect-virt
|
||||||
|
return subprocess.call(['systemd-detect-virt',
|
||||||
|
'--container']) == 0
|
||||||
|
else:
|
||||||
|
# Detect using upstart container file marker
|
||||||
|
return os.path.exists(UPSTART_CONTAINER_TYPE)
|
||||||
|
|
||||||
|
|
||||||
|
def add_to_updatedb_prunepath(path, updatedb_path=UPDATEDB_PATH):
|
||||||
|
with open(updatedb_path, 'r+') as f_id:
|
||||||
|
updatedb_text = f_id.read()
|
||||||
|
output = updatedb(updatedb_text, path)
|
||||||
|
f_id.seek(0)
|
||||||
|
f_id.write(output)
|
||||||
|
f_id.truncate()
|
||||||
|
|
||||||
|
|
||||||
|
def updatedb(updatedb_text, new_path):
|
||||||
|
lines = [line for line in updatedb_text.split("\n")]
|
||||||
|
for i, line in enumerate(lines):
|
||||||
|
if line.startswith("PRUNEPATHS="):
|
||||||
|
paths_line = line.split("=")[1].replace('"', '')
|
||||||
|
paths = paths_line.split(" ")
|
||||||
|
if new_path not in paths:
|
||||||
|
paths.append(new_path)
|
||||||
|
lines[i] = 'PRUNEPATHS="{}"'.format(' '.join(paths))
|
||||||
|
output = "\n".join(lines)
|
||||||
|
return output
|
0
tests/charmhelpers/core/host_factory/__init__.py
Normal file
0
tests/charmhelpers/core/host_factory/__init__.py
Normal file
56
tests/charmhelpers/core/host_factory/centos.py
Normal file
56
tests/charmhelpers/core/host_factory/centos.py
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
import subprocess
|
||||||
|
import yum
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
def service_available(service_name):
|
||||||
|
# """Determine whether a system service is available."""
|
||||||
|
if os.path.isdir('/run/systemd/system'):
|
||||||
|
cmd = ['systemctl', 'is-enabled', service_name]
|
||||||
|
else:
|
||||||
|
cmd = ['service', service_name, 'is-enabled']
|
||||||
|
return subprocess.call(cmd) == 0
|
||||||
|
|
||||||
|
|
||||||
|
def add_new_group(group_name, system_group=False, gid=None):
|
||||||
|
cmd = ['groupadd']
|
||||||
|
if gid:
|
||||||
|
cmd.extend(['--gid', str(gid)])
|
||||||
|
if system_group:
|
||||||
|
cmd.append('-r')
|
||||||
|
cmd.append(group_name)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def lsb_release():
|
||||||
|
"""Return /etc/os-release in a dict."""
|
||||||
|
d = {}
|
||||||
|
with open('/etc/os-release', 'r') as lsb:
|
||||||
|
for l in lsb:
|
||||||
|
s = l.split('=')
|
||||||
|
if len(s) != 2:
|
||||||
|
continue
|
||||||
|
d[s[0].strip()] = s[1].strip()
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
def cmp_pkgrevno(package, revno, pkgcache=None):
|
||||||
|
"""Compare supplied revno with the revno of the installed package.
|
||||||
|
|
||||||
|
* 1 => Installed revno is greater than supplied arg
|
||||||
|
* 0 => Installed revno is the same as supplied arg
|
||||||
|
* -1 => Installed revno is less than supplied arg
|
||||||
|
|
||||||
|
This function imports YumBase function if the pkgcache argument
|
||||||
|
is None.
|
||||||
|
"""
|
||||||
|
if not pkgcache:
|
||||||
|
y = yum.YumBase()
|
||||||
|
packages = y.doPackageLists()
|
||||||
|
pkgcache = {i.Name: i.version for i in packages['installed']}
|
||||||
|
pkg = pkgcache[package]
|
||||||
|
if pkg > revno:
|
||||||
|
return 1
|
||||||
|
if pkg < revno:
|
||||||
|
return -1
|
||||||
|
return 0
|
56
tests/charmhelpers/core/host_factory/ubuntu.py
Normal file
56
tests/charmhelpers/core/host_factory/ubuntu.py
Normal file
@ -0,0 +1,56 @@
|
|||||||
|
import subprocess
|
||||||
|
|
||||||
|
|
||||||
|
def service_available(service_name):
|
||||||
|
"""Determine whether a system service is available"""
|
||||||
|
try:
|
||||||
|
subprocess.check_output(
|
||||||
|
['service', service_name, 'status'],
|
||||||
|
stderr=subprocess.STDOUT).decode('UTF-8')
|
||||||
|
except subprocess.CalledProcessError as e:
|
||||||
|
return b'unrecognized service' not in e.output
|
||||||
|
else:
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def add_new_group(group_name, system_group=False, gid=None):
|
||||||
|
cmd = ['addgroup']
|
||||||
|
if gid:
|
||||||
|
cmd.extend(['--gid', str(gid)])
|
||||||
|
if system_group:
|
||||||
|
cmd.append('--system')
|
||||||
|
else:
|
||||||
|
cmd.extend([
|
||||||
|
'--group',
|
||||||
|
])
|
||||||
|
cmd.append(group_name)
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def lsb_release():
|
||||||
|
"""Return /etc/lsb-release in a dict"""
|
||||||
|
d = {}
|
||||||
|
with open('/etc/lsb-release', 'r') as lsb:
|
||||||
|
for l in lsb:
|
||||||
|
k, v = l.split('=')
|
||||||
|
d[k.strip()] = v.strip()
|
||||||
|
return d
|
||||||
|
|
||||||
|
|
||||||
|
def cmp_pkgrevno(package, revno, pkgcache=None):
|
||||||
|
"""Compare supplied revno with the revno of the installed package.
|
||||||
|
|
||||||
|
* 1 => Installed revno is greater than supplied arg
|
||||||
|
* 0 => Installed revno is the same as supplied arg
|
||||||
|
* -1 => Installed revno is less than supplied arg
|
||||||
|
|
||||||
|
This function imports apt_cache function from charmhelpers.fetch if
|
||||||
|
the pkgcache argument is None. Be sure to add charmhelpers.fetch if
|
||||||
|
you call this function, or pass an apt_pkg.Cache() instance.
|
||||||
|
"""
|
||||||
|
import apt_pkg
|
||||||
|
if not pkgcache:
|
||||||
|
from charmhelpers.fetch import apt_cache
|
||||||
|
pkgcache = apt_cache()
|
||||||
|
pkg = pkgcache[package]
|
||||||
|
return apt_pkg.version_compare(pkg.current_ver.ver_str, revno)
|
69
tests/charmhelpers/core/hugepage.py
Normal file
69
tests/charmhelpers/core/hugepage.py
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
from charmhelpers.core import fstab
|
||||||
|
from charmhelpers.core import sysctl
|
||||||
|
from charmhelpers.core.host import (
|
||||||
|
add_group,
|
||||||
|
add_user_to_group,
|
||||||
|
fstab_mount,
|
||||||
|
mkdir,
|
||||||
|
)
|
||||||
|
from charmhelpers.core.strutils import bytes_from_string
|
||||||
|
from subprocess import check_output
|
||||||
|
|
||||||
|
|
||||||
|
def hugepage_support(user, group='hugetlb', nr_hugepages=256,
|
||||||
|
max_map_count=65536, mnt_point='/run/hugepages/kvm',
|
||||||
|
pagesize='2MB', mount=True, set_shmmax=False):
|
||||||
|
"""Enable hugepages on system.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
user (str) -- Username to allow access to hugepages to
|
||||||
|
group (str) -- Group name to own hugepages
|
||||||
|
nr_hugepages (int) -- Number of pages to reserve
|
||||||
|
max_map_count (int) -- Number of Virtual Memory Areas a process can own
|
||||||
|
mnt_point (str) -- Directory to mount hugepages on
|
||||||
|
pagesize (str) -- Size of hugepages
|
||||||
|
mount (bool) -- Whether to Mount hugepages
|
||||||
|
"""
|
||||||
|
group_info = add_group(group)
|
||||||
|
gid = group_info.gr_gid
|
||||||
|
add_user_to_group(user, group)
|
||||||
|
if max_map_count < 2 * nr_hugepages:
|
||||||
|
max_map_count = 2 * nr_hugepages
|
||||||
|
sysctl_settings = {
|
||||||
|
'vm.nr_hugepages': nr_hugepages,
|
||||||
|
'vm.max_map_count': max_map_count,
|
||||||
|
'vm.hugetlb_shm_group': gid,
|
||||||
|
}
|
||||||
|
if set_shmmax:
|
||||||
|
shmmax_current = int(check_output(['sysctl', '-n', 'kernel.shmmax']))
|
||||||
|
shmmax_minsize = bytes_from_string(pagesize) * nr_hugepages
|
||||||
|
if shmmax_minsize > shmmax_current:
|
||||||
|
sysctl_settings['kernel.shmmax'] = shmmax_minsize
|
||||||
|
sysctl.create(yaml.dump(sysctl_settings), '/etc/sysctl.d/10-hugepage.conf')
|
||||||
|
mkdir(mnt_point, owner='root', group='root', perms=0o755, force=False)
|
||||||
|
lfstab = fstab.Fstab()
|
||||||
|
fstab_entry = lfstab.get_entry_by_attr('mountpoint', mnt_point)
|
||||||
|
if fstab_entry:
|
||||||
|
lfstab.remove_entry(fstab_entry)
|
||||||
|
entry = lfstab.Entry('nodev', mnt_point, 'hugetlbfs',
|
||||||
|
'mode=1770,gid={},pagesize={}'.format(gid, pagesize), 0, 0)
|
||||||
|
lfstab.add_entry(entry)
|
||||||
|
if mount:
|
||||||
|
fstab_mount(mnt_point)
|
72
tests/charmhelpers/core/kernel.py
Normal file
72
tests/charmhelpers/core/kernel.py
Normal file
@ -0,0 +1,72 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import re
|
||||||
|
import subprocess
|
||||||
|
|
||||||
|
from charmhelpers.osplatform import get_platform
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
log,
|
||||||
|
INFO
|
||||||
|
)
|
||||||
|
|
||||||
|
__platform__ = get_platform()
|
||||||
|
if __platform__ == "ubuntu":
|
||||||
|
from charmhelpers.core.kernel_factory.ubuntu import (
|
||||||
|
persistent_modprobe,
|
||||||
|
update_initramfs,
|
||||||
|
) # flake8: noqa -- ignore F401 for this import
|
||||||
|
elif __platform__ == "centos":
|
||||||
|
from charmhelpers.core.kernel_factory.centos import (
|
||||||
|
persistent_modprobe,
|
||||||
|
update_initramfs,
|
||||||
|
) # flake8: noqa -- ignore F401 for this import
|
||||||
|
|
||||||
|
__author__ = "Jorge Niedbalski <jorge.niedbalski@canonical.com>"
|
||||||
|
|
||||||
|
|
||||||
|
def modprobe(module, persist=True):
|
||||||
|
"""Load a kernel module and configure for auto-load on reboot."""
|
||||||
|
cmd = ['modprobe', module]
|
||||||
|
|
||||||
|
log('Loading kernel module %s' % module, level=INFO)
|
||||||
|
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
if persist:
|
||||||
|
persistent_modprobe(module)
|
||||||
|
|
||||||
|
|
||||||
|
def rmmod(module, force=False):
|
||||||
|
"""Remove a module from the linux kernel"""
|
||||||
|
cmd = ['rmmod']
|
||||||
|
if force:
|
||||||
|
cmd.append('-f')
|
||||||
|
cmd.append(module)
|
||||||
|
log('Removing kernel module %s' % module, level=INFO)
|
||||||
|
return subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
|
||||||
|
def lsmod():
|
||||||
|
"""Shows what kernel modules are currently loaded"""
|
||||||
|
return subprocess.check_output(['lsmod'],
|
||||||
|
universal_newlines=True)
|
||||||
|
|
||||||
|
|
||||||
|
def is_module_loaded(module):
|
||||||
|
"""Checks if a kernel module is already loaded"""
|
||||||
|
matches = re.findall('^%s[ ]+' % module, lsmod(), re.M)
|
||||||
|
return len(matches) > 0
|
0
tests/charmhelpers/core/kernel_factory/__init__.py
Normal file
0
tests/charmhelpers/core/kernel_factory/__init__.py
Normal file
17
tests/charmhelpers/core/kernel_factory/centos.py
Normal file
17
tests/charmhelpers/core/kernel_factory/centos.py
Normal file
@ -0,0 +1,17 @@
|
|||||||
|
import subprocess
|
||||||
|
import os
|
||||||
|
|
||||||
|
|
||||||
|
def persistent_modprobe(module):
|
||||||
|
"""Load a kernel module and configure for auto-load on reboot."""
|
||||||
|
if not os.path.exists('/etc/rc.modules'):
|
||||||
|
open('/etc/rc.modules', 'a')
|
||||||
|
os.chmod('/etc/rc.modules', 111)
|
||||||
|
with open('/etc/rc.modules', 'r+') as modules:
|
||||||
|
if module not in modules.read():
|
||||||
|
modules.write('modprobe %s\n' % module)
|
||||||
|
|
||||||
|
|
||||||
|
def update_initramfs(version='all'):
|
||||||
|
"""Updates an initramfs image."""
|
||||||
|
return subprocess.check_call(["dracut", "-f", version])
|
13
tests/charmhelpers/core/kernel_factory/ubuntu.py
Normal file
13
tests/charmhelpers/core/kernel_factory/ubuntu.py
Normal file
@ -0,0 +1,13 @@
|
|||||||
|
import subprocess
|
||||||
|
|
||||||
|
|
||||||
|
def persistent_modprobe(module):
|
||||||
|
"""Load a kernel module and configure for auto-load on reboot."""
|
||||||
|
with open('/etc/modules', 'r+') as modules:
|
||||||
|
if module not in modules.read():
|
||||||
|
modules.write(module + "\n")
|
||||||
|
|
||||||
|
|
||||||
|
def update_initramfs(version='all'):
|
||||||
|
"""Updates an initramfs image."""
|
||||||
|
return subprocess.check_call(["update-initramfs", "-k", version, "-u"])
|
16
tests/charmhelpers/core/services/__init__.py
Normal file
16
tests/charmhelpers/core/services/__init__.py
Normal file
@ -0,0 +1,16 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
from .base import * # NOQA
|
||||||
|
from .helpers import * # NOQA
|
351
tests/charmhelpers/core/services/base.py
Normal file
351
tests/charmhelpers/core/services/base.py
Normal file
@ -0,0 +1,351 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import json
|
||||||
|
from inspect import getargspec
|
||||||
|
from collections import Iterable, OrderedDict
|
||||||
|
|
||||||
|
from charmhelpers.core import host
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['ServiceManager', 'ManagerCallback',
|
||||||
|
'PortManagerCallback', 'open_ports', 'close_ports', 'manage_ports',
|
||||||
|
'service_restart', 'service_stop']
|
||||||
|
|
||||||
|
|
||||||
|
class ServiceManager(object):
|
||||||
|
def __init__(self, services=None):
|
||||||
|
"""
|
||||||
|
Register a list of services, given their definitions.
|
||||||
|
|
||||||
|
Service definitions are dicts in the following formats (all keys except
|
||||||
|
'service' are optional)::
|
||||||
|
|
||||||
|
{
|
||||||
|
"service": <service name>,
|
||||||
|
"required_data": <list of required data contexts>,
|
||||||
|
"provided_data": <list of provided data contexts>,
|
||||||
|
"data_ready": <one or more callbacks>,
|
||||||
|
"data_lost": <one or more callbacks>,
|
||||||
|
"start": <one or more callbacks>,
|
||||||
|
"stop": <one or more callbacks>,
|
||||||
|
"ports": <list of ports to manage>,
|
||||||
|
}
|
||||||
|
|
||||||
|
The 'required_data' list should contain dicts of required data (or
|
||||||
|
dependency managers that act like dicts and know how to collect the data).
|
||||||
|
Only when all items in the 'required_data' list are populated are the list
|
||||||
|
of 'data_ready' and 'start' callbacks executed. See `is_ready()` for more
|
||||||
|
information.
|
||||||
|
|
||||||
|
The 'provided_data' list should contain relation data providers, most likely
|
||||||
|
a subclass of :class:`charmhelpers.core.services.helpers.RelationContext`,
|
||||||
|
that will indicate a set of data to set on a given relation.
|
||||||
|
|
||||||
|
The 'data_ready' value should be either a single callback, or a list of
|
||||||
|
callbacks, to be called when all items in 'required_data' pass `is_ready()`.
|
||||||
|
Each callback will be called with the service name as the only parameter.
|
||||||
|
After all of the 'data_ready' callbacks are called, the 'start' callbacks
|
||||||
|
are fired.
|
||||||
|
|
||||||
|
The 'data_lost' value should be either a single callback, or a list of
|
||||||
|
callbacks, to be called when a 'required_data' item no longer passes
|
||||||
|
`is_ready()`. Each callback will be called with the service name as the
|
||||||
|
only parameter. After all of the 'data_lost' callbacks are called,
|
||||||
|
the 'stop' callbacks are fired.
|
||||||
|
|
||||||
|
The 'start' value should be either a single callback, or a list of
|
||||||
|
callbacks, to be called when starting the service, after the 'data_ready'
|
||||||
|
callbacks are complete. Each callback will be called with the service
|
||||||
|
name as the only parameter. This defaults to
|
||||||
|
`[host.service_start, services.open_ports]`.
|
||||||
|
|
||||||
|
The 'stop' value should be either a single callback, or a list of
|
||||||
|
callbacks, to be called when stopping the service. If the service is
|
||||||
|
being stopped because it no longer has all of its 'required_data', this
|
||||||
|
will be called after all of the 'data_lost' callbacks are complete.
|
||||||
|
Each callback will be called with the service name as the only parameter.
|
||||||
|
This defaults to `[services.close_ports, host.service_stop]`.
|
||||||
|
|
||||||
|
The 'ports' value should be a list of ports to manage. The default
|
||||||
|
'start' handler will open the ports after the service is started,
|
||||||
|
and the default 'stop' handler will close the ports prior to stopping
|
||||||
|
the service.
|
||||||
|
|
||||||
|
|
||||||
|
Examples:
|
||||||
|
|
||||||
|
The following registers an Upstart service called bingod that depends on
|
||||||
|
a mongodb relation and which runs a custom `db_migrate` function prior to
|
||||||
|
restarting the service, and a Runit service called spadesd::
|
||||||
|
|
||||||
|
manager = services.ServiceManager([
|
||||||
|
{
|
||||||
|
'service': 'bingod',
|
||||||
|
'ports': [80, 443],
|
||||||
|
'required_data': [MongoRelation(), config(), {'my': 'data'}],
|
||||||
|
'data_ready': [
|
||||||
|
services.template(source='bingod.conf'),
|
||||||
|
services.template(source='bingod.ini',
|
||||||
|
target='/etc/bingod.ini',
|
||||||
|
owner='bingo', perms=0400),
|
||||||
|
],
|
||||||
|
},
|
||||||
|
{
|
||||||
|
'service': 'spadesd',
|
||||||
|
'data_ready': services.template(source='spadesd_run.j2',
|
||||||
|
target='/etc/sv/spadesd/run',
|
||||||
|
perms=0555),
|
||||||
|
'start': runit_start,
|
||||||
|
'stop': runit_stop,
|
||||||
|
},
|
||||||
|
])
|
||||||
|
manager.manage()
|
||||||
|
"""
|
||||||
|
self._ready_file = os.path.join(hookenv.charm_dir(), 'READY-SERVICES.json')
|
||||||
|
self._ready = None
|
||||||
|
self.services = OrderedDict()
|
||||||
|
for service in services or []:
|
||||||
|
service_name = service['service']
|
||||||
|
self.services[service_name] = service
|
||||||
|
|
||||||
|
def manage(self):
|
||||||
|
"""
|
||||||
|
Handle the current hook by doing The Right Thing with the registered services.
|
||||||
|
"""
|
||||||
|
hookenv._run_atstart()
|
||||||
|
try:
|
||||||
|
hook_name = hookenv.hook_name()
|
||||||
|
if hook_name == 'stop':
|
||||||
|
self.stop_services()
|
||||||
|
else:
|
||||||
|
self.reconfigure_services()
|
||||||
|
self.provide_data()
|
||||||
|
except SystemExit as x:
|
||||||
|
if x.code is None or x.code == 0:
|
||||||
|
hookenv._run_atexit()
|
||||||
|
hookenv._run_atexit()
|
||||||
|
|
||||||
|
def provide_data(self):
|
||||||
|
"""
|
||||||
|
Set the relation data for each provider in the ``provided_data`` list.
|
||||||
|
|
||||||
|
A provider must have a `name` attribute, which indicates which relation
|
||||||
|
to set data on, and a `provide_data()` method, which returns a dict of
|
||||||
|
data to set.
|
||||||
|
|
||||||
|
The `provide_data()` method can optionally accept two parameters:
|
||||||
|
|
||||||
|
* ``remote_service`` The name of the remote service that the data will
|
||||||
|
be provided to. The `provide_data()` method will be called once
|
||||||
|
for each connected service (not unit). This allows the method to
|
||||||
|
tailor its data to the given service.
|
||||||
|
* ``service_ready`` Whether or not the service definition had all of
|
||||||
|
its requirements met, and thus the ``data_ready`` callbacks run.
|
||||||
|
|
||||||
|
Note that the ``provided_data`` methods are now called **after** the
|
||||||
|
``data_ready`` callbacks are run. This gives the ``data_ready`` callbacks
|
||||||
|
a chance to generate any data necessary for the providing to the remote
|
||||||
|
services.
|
||||||
|
"""
|
||||||
|
for service_name, service in self.services.items():
|
||||||
|
service_ready = self.is_ready(service_name)
|
||||||
|
for provider in service.get('provided_data', []):
|
||||||
|
for relid in hookenv.relation_ids(provider.name):
|
||||||
|
units = hookenv.related_units(relid)
|
||||||
|
if not units:
|
||||||
|
continue
|
||||||
|
remote_service = units[0].split('/')[0]
|
||||||
|
argspec = getargspec(provider.provide_data)
|
||||||
|
if len(argspec.args) > 1:
|
||||||
|
data = provider.provide_data(remote_service, service_ready)
|
||||||
|
else:
|
||||||
|
data = provider.provide_data()
|
||||||
|
if data:
|
||||||
|
hookenv.relation_set(relid, data)
|
||||||
|
|
||||||
|
def reconfigure_services(self, *service_names):
|
||||||
|
"""
|
||||||
|
Update all files for one or more registered services, and,
|
||||||
|
if ready, optionally restart them.
|
||||||
|
|
||||||
|
If no service names are given, reconfigures all registered services.
|
||||||
|
"""
|
||||||
|
for service_name in service_names or self.services.keys():
|
||||||
|
if self.is_ready(service_name):
|
||||||
|
self.fire_event('data_ready', service_name)
|
||||||
|
self.fire_event('start', service_name, default=[
|
||||||
|
service_restart,
|
||||||
|
manage_ports])
|
||||||
|
self.save_ready(service_name)
|
||||||
|
else:
|
||||||
|
if self.was_ready(service_name):
|
||||||
|
self.fire_event('data_lost', service_name)
|
||||||
|
self.fire_event('stop', service_name, default=[
|
||||||
|
manage_ports,
|
||||||
|
service_stop])
|
||||||
|
self.save_lost(service_name)
|
||||||
|
|
||||||
|
def stop_services(self, *service_names):
|
||||||
|
"""
|
||||||
|
Stop one or more registered services, by name.
|
||||||
|
|
||||||
|
If no service names are given, stops all registered services.
|
||||||
|
"""
|
||||||
|
for service_name in service_names or self.services.keys():
|
||||||
|
self.fire_event('stop', service_name, default=[
|
||||||
|
manage_ports,
|
||||||
|
service_stop])
|
||||||
|
|
||||||
|
def get_service(self, service_name):
|
||||||
|
"""
|
||||||
|
Given the name of a registered service, return its service definition.
|
||||||
|
"""
|
||||||
|
service = self.services.get(service_name)
|
||||||
|
if not service:
|
||||||
|
raise KeyError('Service not registered: %s' % service_name)
|
||||||
|
return service
|
||||||
|
|
||||||
|
def fire_event(self, event_name, service_name, default=None):
|
||||||
|
"""
|
||||||
|
Fire a data_ready, data_lost, start, or stop event on a given service.
|
||||||
|
"""
|
||||||
|
service = self.get_service(service_name)
|
||||||
|
callbacks = service.get(event_name, default)
|
||||||
|
if not callbacks:
|
||||||
|
return
|
||||||
|
if not isinstance(callbacks, Iterable):
|
||||||
|
callbacks = [callbacks]
|
||||||
|
for callback in callbacks:
|
||||||
|
if isinstance(callback, ManagerCallback):
|
||||||
|
callback(self, service_name, event_name)
|
||||||
|
else:
|
||||||
|
callback(service_name)
|
||||||
|
|
||||||
|
def is_ready(self, service_name):
|
||||||
|
"""
|
||||||
|
Determine if a registered service is ready, by checking its 'required_data'.
|
||||||
|
|
||||||
|
A 'required_data' item can be any mapping type, and is considered ready
|
||||||
|
if `bool(item)` evaluates as True.
|
||||||
|
"""
|
||||||
|
service = self.get_service(service_name)
|
||||||
|
reqs = service.get('required_data', [])
|
||||||
|
return all(bool(req) for req in reqs)
|
||||||
|
|
||||||
|
def _load_ready_file(self):
|
||||||
|
if self._ready is not None:
|
||||||
|
return
|
||||||
|
if os.path.exists(self._ready_file):
|
||||||
|
with open(self._ready_file) as fp:
|
||||||
|
self._ready = set(json.load(fp))
|
||||||
|
else:
|
||||||
|
self._ready = set()
|
||||||
|
|
||||||
|
def _save_ready_file(self):
|
||||||
|
if self._ready is None:
|
||||||
|
return
|
||||||
|
with open(self._ready_file, 'w') as fp:
|
||||||
|
json.dump(list(self._ready), fp)
|
||||||
|
|
||||||
|
def save_ready(self, service_name):
|
||||||
|
"""
|
||||||
|
Save an indicator that the given service is now data_ready.
|
||||||
|
"""
|
||||||
|
self._load_ready_file()
|
||||||
|
self._ready.add(service_name)
|
||||||
|
self._save_ready_file()
|
||||||
|
|
||||||
|
def save_lost(self, service_name):
|
||||||
|
"""
|
||||||
|
Save an indicator that the given service is no longer data_ready.
|
||||||
|
"""
|
||||||
|
self._load_ready_file()
|
||||||
|
self._ready.discard(service_name)
|
||||||
|
self._save_ready_file()
|
||||||
|
|
||||||
|
def was_ready(self, service_name):
|
||||||
|
"""
|
||||||
|
Determine if the given service was previously data_ready.
|
||||||
|
"""
|
||||||
|
self._load_ready_file()
|
||||||
|
return service_name in self._ready
|
||||||
|
|
||||||
|
|
||||||
|
class ManagerCallback(object):
|
||||||
|
"""
|
||||||
|
Special case of a callback that takes the `ServiceManager` instance
|
||||||
|
in addition to the service name.
|
||||||
|
|
||||||
|
Subclasses should implement `__call__` which should accept three parameters:
|
||||||
|
|
||||||
|
* `manager` The `ServiceManager` instance
|
||||||
|
* `service_name` The name of the service it's being triggered for
|
||||||
|
* `event_name` The name of the event that this callback is handling
|
||||||
|
"""
|
||||||
|
def __call__(self, manager, service_name, event_name):
|
||||||
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
|
class PortManagerCallback(ManagerCallback):
|
||||||
|
"""
|
||||||
|
Callback class that will open or close ports, for use as either
|
||||||
|
a start or stop action.
|
||||||
|
"""
|
||||||
|
def __call__(self, manager, service_name, event_name):
|
||||||
|
service = manager.get_service(service_name)
|
||||||
|
new_ports = service.get('ports', [])
|
||||||
|
port_file = os.path.join(hookenv.charm_dir(), '.{}.ports'.format(service_name))
|
||||||
|
if os.path.exists(port_file):
|
||||||
|
with open(port_file) as fp:
|
||||||
|
old_ports = fp.read().split(',')
|
||||||
|
for old_port in old_ports:
|
||||||
|
if bool(old_port):
|
||||||
|
old_port = int(old_port)
|
||||||
|
if old_port not in new_ports:
|
||||||
|
hookenv.close_port(old_port)
|
||||||
|
with open(port_file, 'w') as fp:
|
||||||
|
fp.write(','.join(str(port) for port in new_ports))
|
||||||
|
for port in new_ports:
|
||||||
|
if event_name == 'start':
|
||||||
|
hookenv.open_port(port)
|
||||||
|
elif event_name == 'stop':
|
||||||
|
hookenv.close_port(port)
|
||||||
|
|
||||||
|
|
||||||
|
def service_stop(service_name):
|
||||||
|
"""
|
||||||
|
Wrapper around host.service_stop to prevent spurious "unknown service"
|
||||||
|
messages in the logs.
|
||||||
|
"""
|
||||||
|
if host.service_running(service_name):
|
||||||
|
host.service_stop(service_name)
|
||||||
|
|
||||||
|
|
||||||
|
def service_restart(service_name):
|
||||||
|
"""
|
||||||
|
Wrapper around host.service_restart to prevent spurious "unknown service"
|
||||||
|
messages in the logs.
|
||||||
|
"""
|
||||||
|
if host.service_available(service_name):
|
||||||
|
if host.service_running(service_name):
|
||||||
|
host.service_restart(service_name)
|
||||||
|
else:
|
||||||
|
host.service_start(service_name)
|
||||||
|
|
||||||
|
|
||||||
|
# Convenience aliases
|
||||||
|
open_ports = close_ports = manage_ports = PortManagerCallback()
|
290
tests/charmhelpers/core/services/helpers.py
Normal file
290
tests/charmhelpers/core/services/helpers.py
Normal file
@ -0,0 +1,290 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
from charmhelpers.core import host
|
||||||
|
from charmhelpers.core import templating
|
||||||
|
|
||||||
|
from charmhelpers.core.services.base import ManagerCallback
|
||||||
|
|
||||||
|
|
||||||
|
__all__ = ['RelationContext', 'TemplateCallback',
|
||||||
|
'render_template', 'template']
|
||||||
|
|
||||||
|
|
||||||
|
class RelationContext(dict):
|
||||||
|
"""
|
||||||
|
Base class for a context generator that gets relation data from juju.
|
||||||
|
|
||||||
|
Subclasses must provide the attributes `name`, which is the name of the
|
||||||
|
interface of interest, `interface`, which is the type of the interface of
|
||||||
|
interest, and `required_keys`, which is the set of keys required for the
|
||||||
|
relation to be considered complete. The data for all interfaces matching
|
||||||
|
the `name` attribute that are complete will used to populate the dictionary
|
||||||
|
values (see `get_data`, below).
|
||||||
|
|
||||||
|
The generated context will be namespaced under the relation :attr:`name`,
|
||||||
|
to prevent potential naming conflicts.
|
||||||
|
|
||||||
|
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
||||||
|
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
||||||
|
"""
|
||||||
|
name = None
|
||||||
|
interface = None
|
||||||
|
|
||||||
|
def __init__(self, name=None, additional_required_keys=None):
|
||||||
|
if not hasattr(self, 'required_keys'):
|
||||||
|
self.required_keys = []
|
||||||
|
|
||||||
|
if name is not None:
|
||||||
|
self.name = name
|
||||||
|
if additional_required_keys:
|
||||||
|
self.required_keys.extend(additional_required_keys)
|
||||||
|
self.get_data()
|
||||||
|
|
||||||
|
def __bool__(self):
|
||||||
|
"""
|
||||||
|
Returns True if all of the required_keys are available.
|
||||||
|
"""
|
||||||
|
return self.is_ready()
|
||||||
|
|
||||||
|
__nonzero__ = __bool__
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return super(RelationContext, self).__repr__()
|
||||||
|
|
||||||
|
def is_ready(self):
|
||||||
|
"""
|
||||||
|
Returns True if all of the `required_keys` are available from any units.
|
||||||
|
"""
|
||||||
|
ready = len(self.get(self.name, [])) > 0
|
||||||
|
if not ready:
|
||||||
|
hookenv.log('Incomplete relation: {}'.format(self.__class__.__name__), hookenv.DEBUG)
|
||||||
|
return ready
|
||||||
|
|
||||||
|
def _is_ready(self, unit_data):
|
||||||
|
"""
|
||||||
|
Helper method that tests a set of relation data and returns True if
|
||||||
|
all of the `required_keys` are present.
|
||||||
|
"""
|
||||||
|
return set(unit_data.keys()).issuperset(set(self.required_keys))
|
||||||
|
|
||||||
|
def get_data(self):
|
||||||
|
"""
|
||||||
|
Retrieve the relation data for each unit involved in a relation and,
|
||||||
|
if complete, store it in a list under `self[self.name]`. This
|
||||||
|
is automatically called when the RelationContext is instantiated.
|
||||||
|
|
||||||
|
The units are sorted lexographically first by the service ID, then by
|
||||||
|
the unit ID. Thus, if an interface has two other services, 'db:1'
|
||||||
|
and 'db:2', with 'db:1' having two units, 'wordpress/0' and 'wordpress/1',
|
||||||
|
and 'db:2' having one unit, 'mediawiki/0', all of which have a complete
|
||||||
|
set of data, the relation data for the units will be stored in the
|
||||||
|
order: 'wordpress/0', 'wordpress/1', 'mediawiki/0'.
|
||||||
|
|
||||||
|
If you only care about a single unit on the relation, you can just
|
||||||
|
access it as `{{ interface[0]['key'] }}`. However, if you can at all
|
||||||
|
support multiple units on a relation, you should iterate over the list,
|
||||||
|
like::
|
||||||
|
|
||||||
|
{% for unit in interface -%}
|
||||||
|
{{ unit['key'] }}{% if not loop.last %},{% endif %}
|
||||||
|
{%- endfor %}
|
||||||
|
|
||||||
|
Note that since all sets of relation data from all related services and
|
||||||
|
units are in a single list, if you need to know which service or unit a
|
||||||
|
set of data came from, you'll need to extend this class to preserve
|
||||||
|
that information.
|
||||||
|
"""
|
||||||
|
if not hookenv.relation_ids(self.name):
|
||||||
|
return
|
||||||
|
|
||||||
|
ns = self.setdefault(self.name, [])
|
||||||
|
for rid in sorted(hookenv.relation_ids(self.name)):
|
||||||
|
for unit in sorted(hookenv.related_units(rid)):
|
||||||
|
reldata = hookenv.relation_get(rid=rid, unit=unit)
|
||||||
|
if self._is_ready(reldata):
|
||||||
|
ns.append(reldata)
|
||||||
|
|
||||||
|
def provide_data(self):
|
||||||
|
"""
|
||||||
|
Return data to be relation_set for this interface.
|
||||||
|
"""
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
class MysqlRelation(RelationContext):
|
||||||
|
"""
|
||||||
|
Relation context for the `mysql` interface.
|
||||||
|
|
||||||
|
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
||||||
|
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
||||||
|
"""
|
||||||
|
name = 'db'
|
||||||
|
interface = 'mysql'
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.required_keys = ['host', 'user', 'password', 'database']
|
||||||
|
RelationContext.__init__(self, *args, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
class HttpRelation(RelationContext):
|
||||||
|
"""
|
||||||
|
Relation context for the `http` interface.
|
||||||
|
|
||||||
|
:param str name: Override the relation :attr:`name`, since it can vary from charm to charm
|
||||||
|
:param list additional_required_keys: Extend the list of :attr:`required_keys`
|
||||||
|
"""
|
||||||
|
name = 'website'
|
||||||
|
interface = 'http'
|
||||||
|
|
||||||
|
def __init__(self, *args, **kwargs):
|
||||||
|
self.required_keys = ['host', 'port']
|
||||||
|
RelationContext.__init__(self, *args, **kwargs)
|
||||||
|
|
||||||
|
def provide_data(self):
|
||||||
|
return {
|
||||||
|
'host': hookenv.unit_get('private-address'),
|
||||||
|
'port': 80,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
class RequiredConfig(dict):
|
||||||
|
"""
|
||||||
|
Data context that loads config options with one or more mandatory options.
|
||||||
|
|
||||||
|
Once the required options have been changed from their default values, all
|
||||||
|
config options will be available, namespaced under `config` to prevent
|
||||||
|
potential naming conflicts (for example, between a config option and a
|
||||||
|
relation property).
|
||||||
|
|
||||||
|
:param list *args: List of options that must be changed from their default values.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __init__(self, *args):
|
||||||
|
self.required_options = args
|
||||||
|
self['config'] = hookenv.config()
|
||||||
|
with open(os.path.join(hookenv.charm_dir(), 'config.yaml')) as fp:
|
||||||
|
self.config = yaml.load(fp).get('options', {})
|
||||||
|
|
||||||
|
def __bool__(self):
|
||||||
|
for option in self.required_options:
|
||||||
|
if option not in self['config']:
|
||||||
|
return False
|
||||||
|
current_value = self['config'][option]
|
||||||
|
default_value = self.config[option].get('default')
|
||||||
|
if current_value == default_value:
|
||||||
|
return False
|
||||||
|
if current_value in (None, '') and default_value in (None, ''):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def __nonzero__(self):
|
||||||
|
return self.__bool__()
|
||||||
|
|
||||||
|
|
||||||
|
class StoredContext(dict):
|
||||||
|
"""
|
||||||
|
A data context that always returns the data that it was first created with.
|
||||||
|
|
||||||
|
This is useful to do a one-time generation of things like passwords, that
|
||||||
|
will thereafter use the same value that was originally generated, instead
|
||||||
|
of generating a new value each time it is run.
|
||||||
|
"""
|
||||||
|
def __init__(self, file_name, config_data):
|
||||||
|
"""
|
||||||
|
If the file exists, populate `self` with the data from the file.
|
||||||
|
Otherwise, populate with the given data and persist it to the file.
|
||||||
|
"""
|
||||||
|
if os.path.exists(file_name):
|
||||||
|
self.update(self.read_context(file_name))
|
||||||
|
else:
|
||||||
|
self.store_context(file_name, config_data)
|
||||||
|
self.update(config_data)
|
||||||
|
|
||||||
|
def store_context(self, file_name, config_data):
|
||||||
|
if not os.path.isabs(file_name):
|
||||||
|
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
||||||
|
with open(file_name, 'w') as file_stream:
|
||||||
|
os.fchmod(file_stream.fileno(), 0o600)
|
||||||
|
yaml.dump(config_data, file_stream)
|
||||||
|
|
||||||
|
def read_context(self, file_name):
|
||||||
|
if not os.path.isabs(file_name):
|
||||||
|
file_name = os.path.join(hookenv.charm_dir(), file_name)
|
||||||
|
with open(file_name, 'r') as file_stream:
|
||||||
|
data = yaml.load(file_stream)
|
||||||
|
if not data:
|
||||||
|
raise OSError("%s is empty" % file_name)
|
||||||
|
return data
|
||||||
|
|
||||||
|
|
||||||
|
class TemplateCallback(ManagerCallback):
|
||||||
|
"""
|
||||||
|
Callback class that will render a Jinja2 template, for use as a ready
|
||||||
|
action.
|
||||||
|
|
||||||
|
:param str source: The template source file, relative to
|
||||||
|
`$CHARM_DIR/templates`
|
||||||
|
|
||||||
|
:param str target: The target to write the rendered template to (or None)
|
||||||
|
:param str owner: The owner of the rendered file
|
||||||
|
:param str group: The group of the rendered file
|
||||||
|
:param int perms: The permissions of the rendered file
|
||||||
|
:param partial on_change_action: functools partial to be executed when
|
||||||
|
rendered file changes
|
||||||
|
:param jinja2 loader template_loader: A jinja2 template loader
|
||||||
|
|
||||||
|
:return str: The rendered template
|
||||||
|
"""
|
||||||
|
def __init__(self, source, target,
|
||||||
|
owner='root', group='root', perms=0o444,
|
||||||
|
on_change_action=None, template_loader=None):
|
||||||
|
self.source = source
|
||||||
|
self.target = target
|
||||||
|
self.owner = owner
|
||||||
|
self.group = group
|
||||||
|
self.perms = perms
|
||||||
|
self.on_change_action = on_change_action
|
||||||
|
self.template_loader = template_loader
|
||||||
|
|
||||||
|
def __call__(self, manager, service_name, event_name):
|
||||||
|
pre_checksum = ''
|
||||||
|
if self.on_change_action and os.path.isfile(self.target):
|
||||||
|
pre_checksum = host.file_hash(self.target)
|
||||||
|
service = manager.get_service(service_name)
|
||||||
|
context = {'ctx': {}}
|
||||||
|
for ctx in service.get('required_data', []):
|
||||||
|
context.update(ctx)
|
||||||
|
context['ctx'].update(ctx)
|
||||||
|
|
||||||
|
result = templating.render(self.source, self.target, context,
|
||||||
|
self.owner, self.group, self.perms,
|
||||||
|
template_loader=self.template_loader)
|
||||||
|
if self.on_change_action:
|
||||||
|
if pre_checksum == host.file_hash(self.target):
|
||||||
|
hookenv.log(
|
||||||
|
'No change detected: {}'.format(self.target),
|
||||||
|
hookenv.DEBUG)
|
||||||
|
else:
|
||||||
|
self.on_change_action()
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
# Convenience aliases for templates
|
||||||
|
render_template = template = TemplateCallback
|
70
tests/charmhelpers/core/strutils.py
Normal file
70
tests/charmhelpers/core/strutils.py
Normal file
@ -0,0 +1,70 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import six
|
||||||
|
import re
|
||||||
|
|
||||||
|
|
||||||
|
def bool_from_string(value):
|
||||||
|
"""Interpret string value as boolean.
|
||||||
|
|
||||||
|
Returns True if value translates to True otherwise False.
|
||||||
|
"""
|
||||||
|
if isinstance(value, six.string_types):
|
||||||
|
value = six.text_type(value)
|
||||||
|
else:
|
||||||
|
msg = "Unable to interpret non-string value '%s' as boolean" % (value)
|
||||||
|
raise ValueError(msg)
|
||||||
|
|
||||||
|
value = value.strip().lower()
|
||||||
|
|
||||||
|
if value in ['y', 'yes', 'true', 't', 'on']:
|
||||||
|
return True
|
||||||
|
elif value in ['n', 'no', 'false', 'f', 'off']:
|
||||||
|
return False
|
||||||
|
|
||||||
|
msg = "Unable to interpret string value '%s' as boolean" % (value)
|
||||||
|
raise ValueError(msg)
|
||||||
|
|
||||||
|
|
||||||
|
def bytes_from_string(value):
|
||||||
|
"""Interpret human readable string value as bytes.
|
||||||
|
|
||||||
|
Returns int
|
||||||
|
"""
|
||||||
|
BYTE_POWER = {
|
||||||
|
'K': 1,
|
||||||
|
'KB': 1,
|
||||||
|
'M': 2,
|
||||||
|
'MB': 2,
|
||||||
|
'G': 3,
|
||||||
|
'GB': 3,
|
||||||
|
'T': 4,
|
||||||
|
'TB': 4,
|
||||||
|
'P': 5,
|
||||||
|
'PB': 5,
|
||||||
|
}
|
||||||
|
if isinstance(value, six.string_types):
|
||||||
|
value = six.text_type(value)
|
||||||
|
else:
|
||||||
|
msg = "Unable to interpret non-string value '%s' as boolean" % (value)
|
||||||
|
raise ValueError(msg)
|
||||||
|
matches = re.match("([0-9]+)([a-zA-Z]+)", value)
|
||||||
|
if not matches:
|
||||||
|
msg = "Unable to interpret string value '%s' as bytes" % (value)
|
||||||
|
raise ValueError(msg)
|
||||||
|
return int(matches.group(1)) * (1024 ** BYTE_POWER[matches.group(2)])
|
54
tests/charmhelpers/core/sysctl.py
Normal file
54
tests/charmhelpers/core/sysctl.py
Normal file
@ -0,0 +1,54 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import yaml
|
||||||
|
|
||||||
|
from subprocess import check_call
|
||||||
|
|
||||||
|
from charmhelpers.core.hookenv import (
|
||||||
|
log,
|
||||||
|
DEBUG,
|
||||||
|
ERROR,
|
||||||
|
)
|
||||||
|
|
||||||
|
__author__ = 'Jorge Niedbalski R. <jorge.niedbalski@canonical.com>'
|
||||||
|
|
||||||
|
|
||||||
|
def create(sysctl_dict, sysctl_file):
|
||||||
|
"""Creates a sysctl.conf file from a YAML associative array
|
||||||
|
|
||||||
|
:param sysctl_dict: a YAML-formatted string of sysctl options eg "{ 'kernel.max_pid': 1337 }"
|
||||||
|
:type sysctl_dict: str
|
||||||
|
:param sysctl_file: path to the sysctl file to be saved
|
||||||
|
:type sysctl_file: str or unicode
|
||||||
|
:returns: None
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
sysctl_dict_parsed = yaml.safe_load(sysctl_dict)
|
||||||
|
except yaml.YAMLError:
|
||||||
|
log("Error parsing YAML sysctl_dict: {}".format(sysctl_dict),
|
||||||
|
level=ERROR)
|
||||||
|
return
|
||||||
|
|
||||||
|
with open(sysctl_file, "w") as fd:
|
||||||
|
for key, value in sysctl_dict_parsed.items():
|
||||||
|
fd.write("{}={}\n".format(key, value))
|
||||||
|
|
||||||
|
log("Updating sysctl_file: %s values: %s" % (sysctl_file, sysctl_dict_parsed),
|
||||||
|
level=DEBUG)
|
||||||
|
|
||||||
|
check_call(["sysctl", "-p", sysctl_file])
|
84
tests/charmhelpers/core/templating.py
Normal file
84
tests/charmhelpers/core/templating.py
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
|
from charmhelpers.core import host
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
|
||||||
|
|
||||||
|
def render(source, target, context, owner='root', group='root',
|
||||||
|
perms=0o444, templates_dir=None, encoding='UTF-8', template_loader=None):
|
||||||
|
"""
|
||||||
|
Render a template.
|
||||||
|
|
||||||
|
The `source` path, if not absolute, is relative to the `templates_dir`.
|
||||||
|
|
||||||
|
The `target` path should be absolute. It can also be `None`, in which
|
||||||
|
case no file will be written.
|
||||||
|
|
||||||
|
The context should be a dict containing the values to be replaced in the
|
||||||
|
template.
|
||||||
|
|
||||||
|
The `owner`, `group`, and `perms` options will be passed to `write_file`.
|
||||||
|
|
||||||
|
If omitted, `templates_dir` defaults to the `templates` folder in the charm.
|
||||||
|
|
||||||
|
The rendered template will be written to the file as well as being returned
|
||||||
|
as a string.
|
||||||
|
|
||||||
|
Note: Using this requires python-jinja2 or python3-jinja2; if it is not
|
||||||
|
installed, calling this will attempt to use charmhelpers.fetch.apt_install
|
||||||
|
to install it.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
from jinja2 import FileSystemLoader, Environment, exceptions
|
||||||
|
except ImportError:
|
||||||
|
try:
|
||||||
|
from charmhelpers.fetch import apt_install
|
||||||
|
except ImportError:
|
||||||
|
hookenv.log('Could not import jinja2, and could not import '
|
||||||
|
'charmhelpers.fetch to install it',
|
||||||
|
level=hookenv.ERROR)
|
||||||
|
raise
|
||||||
|
if sys.version_info.major == 2:
|
||||||
|
apt_install('python-jinja2', fatal=True)
|
||||||
|
else:
|
||||||
|
apt_install('python3-jinja2', fatal=True)
|
||||||
|
from jinja2 import FileSystemLoader, Environment, exceptions
|
||||||
|
|
||||||
|
if template_loader:
|
||||||
|
template_env = Environment(loader=template_loader)
|
||||||
|
else:
|
||||||
|
if templates_dir is None:
|
||||||
|
templates_dir = os.path.join(hookenv.charm_dir(), 'templates')
|
||||||
|
template_env = Environment(loader=FileSystemLoader(templates_dir))
|
||||||
|
try:
|
||||||
|
source = source
|
||||||
|
template = template_env.get_template(source)
|
||||||
|
except exceptions.TemplateNotFound as e:
|
||||||
|
hookenv.log('Could not load template %s from %s.' %
|
||||||
|
(source, templates_dir),
|
||||||
|
level=hookenv.ERROR)
|
||||||
|
raise e
|
||||||
|
content = template.render(context)
|
||||||
|
if target is not None:
|
||||||
|
target_dir = os.path.dirname(target)
|
||||||
|
if not os.path.exists(target_dir):
|
||||||
|
# This is a terrible default directory permission, as the file
|
||||||
|
# or its siblings will often contain secrets.
|
||||||
|
host.mkdir(os.path.dirname(target), owner, group, perms=0o755)
|
||||||
|
host.write_file(target, content.encode(encoding), owner, group, perms)
|
||||||
|
return content
|
518
tests/charmhelpers/core/unitdata.py
Normal file
518
tests/charmhelpers/core/unitdata.py
Normal file
@ -0,0 +1,518 @@
|
|||||||
|
#!/usr/bin/env python
|
||||||
|
# -*- coding: utf-8 -*-
|
||||||
|
#
|
||||||
|
# Copyright 2014-2015 Canonical Limited.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
#
|
||||||
|
# Authors:
|
||||||
|
# Kapil Thangavelu <kapil.foss@gmail.com>
|
||||||
|
#
|
||||||
|
"""
|
||||||
|
Intro
|
||||||
|
-----
|
||||||
|
|
||||||
|
A simple way to store state in units. This provides a key value
|
||||||
|
storage with support for versioned, transactional operation,
|
||||||
|
and can calculate deltas from previous values to simplify unit logic
|
||||||
|
when processing changes.
|
||||||
|
|
||||||
|
|
||||||
|
Hook Integration
|
||||||
|
----------------
|
||||||
|
|
||||||
|
There are several extant frameworks for hook execution, including
|
||||||
|
|
||||||
|
- charmhelpers.core.hookenv.Hooks
|
||||||
|
- charmhelpers.core.services.ServiceManager
|
||||||
|
|
||||||
|
The storage classes are framework agnostic, one simple integration is
|
||||||
|
via the HookData contextmanager. It will record the current hook
|
||||||
|
execution environment (including relation data, config data, etc.),
|
||||||
|
setup a transaction and allow easy access to the changes from
|
||||||
|
previously seen values. One consequence of the integration is the
|
||||||
|
reservation of particular keys ('rels', 'unit', 'env', 'config',
|
||||||
|
'charm_revisions') for their respective values.
|
||||||
|
|
||||||
|
Here's a fully worked integration example using hookenv.Hooks::
|
||||||
|
|
||||||
|
from charmhelper.core import hookenv, unitdata
|
||||||
|
|
||||||
|
hook_data = unitdata.HookData()
|
||||||
|
db = unitdata.kv()
|
||||||
|
hooks = hookenv.Hooks()
|
||||||
|
|
||||||
|
@hooks.hook
|
||||||
|
def config_changed():
|
||||||
|
# Print all changes to configuration from previously seen
|
||||||
|
# values.
|
||||||
|
for changed, (prev, cur) in hook_data.conf.items():
|
||||||
|
print('config changed', changed,
|
||||||
|
'previous value', prev,
|
||||||
|
'current value', cur)
|
||||||
|
|
||||||
|
# Get some unit specific bookeeping
|
||||||
|
if not db.get('pkg_key'):
|
||||||
|
key = urllib.urlopen('https://example.com/pkg_key').read()
|
||||||
|
db.set('pkg_key', key)
|
||||||
|
|
||||||
|
# Directly access all charm config as a mapping.
|
||||||
|
conf = db.getrange('config', True)
|
||||||
|
|
||||||
|
# Directly access all relation data as a mapping
|
||||||
|
rels = db.getrange('rels', True)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with hook_data():
|
||||||
|
hook.execute()
|
||||||
|
|
||||||
|
|
||||||
|
A more basic integration is via the hook_scope context manager which simply
|
||||||
|
manages transaction scope (and records hook name, and timestamp)::
|
||||||
|
|
||||||
|
>>> from unitdata import kv
|
||||||
|
>>> db = kv()
|
||||||
|
>>> with db.hook_scope('install'):
|
||||||
|
... # do work, in transactional scope.
|
||||||
|
... db.set('x', 1)
|
||||||
|
>>> db.get('x')
|
||||||
|
1
|
||||||
|
|
||||||
|
|
||||||
|
Usage
|
||||||
|
-----
|
||||||
|
|
||||||
|
Values are automatically json de/serialized to preserve basic typing
|
||||||
|
and complex data struct capabilities (dicts, lists, ints, booleans, etc).
|
||||||
|
|
||||||
|
Individual values can be manipulated via get/set::
|
||||||
|
|
||||||
|
>>> kv.set('y', True)
|
||||||
|
>>> kv.get('y')
|
||||||
|
True
|
||||||
|
|
||||||
|
# We can set complex values (dicts, lists) as a single key.
|
||||||
|
>>> kv.set('config', {'a': 1, 'b': True'})
|
||||||
|
|
||||||
|
# Also supports returning dictionaries as a record which
|
||||||
|
# provides attribute access.
|
||||||
|
>>> config = kv.get('config', record=True)
|
||||||
|
>>> config.b
|
||||||
|
True
|
||||||
|
|
||||||
|
|
||||||
|
Groups of keys can be manipulated with update/getrange::
|
||||||
|
|
||||||
|
>>> kv.update({'z': 1, 'y': 2}, prefix="gui.")
|
||||||
|
>>> kv.getrange('gui.', strip=True)
|
||||||
|
{'z': 1, 'y': 2}
|
||||||
|
|
||||||
|
When updating values, its very helpful to understand which values
|
||||||
|
have actually changed and how have they changed. The storage
|
||||||
|
provides a delta method to provide for this::
|
||||||
|
|
||||||
|
>>> data = {'debug': True, 'option': 2}
|
||||||
|
>>> delta = kv.delta(data, 'config.')
|
||||||
|
>>> delta.debug.previous
|
||||||
|
None
|
||||||
|
>>> delta.debug.current
|
||||||
|
True
|
||||||
|
>>> delta
|
||||||
|
{'debug': (None, True), 'option': (None, 2)}
|
||||||
|
|
||||||
|
Note the delta method does not persist the actual change, it needs to
|
||||||
|
be explicitly saved via 'update' method::
|
||||||
|
|
||||||
|
>>> kv.update(data, 'config.')
|
||||||
|
|
||||||
|
Values modified in the context of a hook scope retain historical values
|
||||||
|
associated to the hookname.
|
||||||
|
|
||||||
|
>>> with db.hook_scope('config-changed'):
|
||||||
|
... db.set('x', 42)
|
||||||
|
>>> db.gethistory('x')
|
||||||
|
[(1, u'x', 1, u'install', u'2015-01-21T16:49:30.038372'),
|
||||||
|
(2, u'x', 42, u'config-changed', u'2015-01-21T16:49:30.038786')]
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
|
import collections
|
||||||
|
import contextlib
|
||||||
|
import datetime
|
||||||
|
import itertools
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import pprint
|
||||||
|
import sqlite3
|
||||||
|
import sys
|
||||||
|
|
||||||
|
__author__ = 'Kapil Thangavelu <kapil.foss@gmail.com>'
|
||||||
|
|
||||||
|
|
||||||
|
class Storage(object):
|
||||||
|
"""Simple key value database for local unit state within charms.
|
||||||
|
|
||||||
|
Modifications are not persisted unless :meth:`flush` is called.
|
||||||
|
|
||||||
|
To support dicts, lists, integer, floats, and booleans values
|
||||||
|
are automatically json encoded/decoded.
|
||||||
|
"""
|
||||||
|
def __init__(self, path=None):
|
||||||
|
self.db_path = path
|
||||||
|
if path is None:
|
||||||
|
if 'UNIT_STATE_DB' in os.environ:
|
||||||
|
self.db_path = os.environ['UNIT_STATE_DB']
|
||||||
|
else:
|
||||||
|
self.db_path = os.path.join(
|
||||||
|
os.environ.get('CHARM_DIR', ''), '.unit-state.db')
|
||||||
|
self.conn = sqlite3.connect('%s' % self.db_path)
|
||||||
|
self.cursor = self.conn.cursor()
|
||||||
|
self.revision = None
|
||||||
|
self._closed = False
|
||||||
|
self._init()
|
||||||
|
|
||||||
|
def close(self):
|
||||||
|
if self._closed:
|
||||||
|
return
|
||||||
|
self.flush(False)
|
||||||
|
self.cursor.close()
|
||||||
|
self.conn.close()
|
||||||
|
self._closed = True
|
||||||
|
|
||||||
|
def get(self, key, default=None, record=False):
|
||||||
|
self.cursor.execute('select data from kv where key=?', [key])
|
||||||
|
result = self.cursor.fetchone()
|
||||||
|
if not result:
|
||||||
|
return default
|
||||||
|
if record:
|
||||||
|
return Record(json.loads(result[0]))
|
||||||
|
return json.loads(result[0])
|
||||||
|
|
||||||
|
def getrange(self, key_prefix, strip=False):
|
||||||
|
"""
|
||||||
|
Get a range of keys starting with a common prefix as a mapping of
|
||||||
|
keys to values.
|
||||||
|
|
||||||
|
:param str key_prefix: Common prefix among all keys
|
||||||
|
:param bool strip: Optionally strip the common prefix from the key
|
||||||
|
names in the returned dict
|
||||||
|
:return dict: A (possibly empty) dict of key-value mappings
|
||||||
|
"""
|
||||||
|
self.cursor.execute("select key, data from kv where key like ?",
|
||||||
|
['%s%%' % key_prefix])
|
||||||
|
result = self.cursor.fetchall()
|
||||||
|
|
||||||
|
if not result:
|
||||||
|
return {}
|
||||||
|
if not strip:
|
||||||
|
key_prefix = ''
|
||||||
|
return dict([
|
||||||
|
(k[len(key_prefix):], json.loads(v)) for k, v in result])
|
||||||
|
|
||||||
|
def update(self, mapping, prefix=""):
|
||||||
|
"""
|
||||||
|
Set the values of multiple keys at once.
|
||||||
|
|
||||||
|
:param dict mapping: Mapping of keys to values
|
||||||
|
:param str prefix: Optional prefix to apply to all keys in `mapping`
|
||||||
|
before setting
|
||||||
|
"""
|
||||||
|
for k, v in mapping.items():
|
||||||
|
self.set("%s%s" % (prefix, k), v)
|
||||||
|
|
||||||
|
def unset(self, key):
|
||||||
|
"""
|
||||||
|
Remove a key from the database entirely.
|
||||||
|
"""
|
||||||
|
self.cursor.execute('delete from kv where key=?', [key])
|
||||||
|
if self.revision and self.cursor.rowcount:
|
||||||
|
self.cursor.execute(
|
||||||
|
'insert into kv_revisions values (?, ?, ?)',
|
||||||
|
[key, self.revision, json.dumps('DELETED')])
|
||||||
|
|
||||||
|
def unsetrange(self, keys=None, prefix=""):
|
||||||
|
"""
|
||||||
|
Remove a range of keys starting with a common prefix, from the database
|
||||||
|
entirely.
|
||||||
|
|
||||||
|
:param list keys: List of keys to remove.
|
||||||
|
:param str prefix: Optional prefix to apply to all keys in ``keys``
|
||||||
|
before removing.
|
||||||
|
"""
|
||||||
|
if keys is not None:
|
||||||
|
keys = ['%s%s' % (prefix, key) for key in keys]
|
||||||
|
self.cursor.execute('delete from kv where key in (%s)' % ','.join(['?'] * len(keys)), keys)
|
||||||
|
if self.revision and self.cursor.rowcount:
|
||||||
|
self.cursor.execute(
|
||||||
|
'insert into kv_revisions values %s' % ','.join(['(?, ?, ?)'] * len(keys)),
|
||||||
|
list(itertools.chain.from_iterable((key, self.revision, json.dumps('DELETED')) for key in keys)))
|
||||||
|
else:
|
||||||
|
self.cursor.execute('delete from kv where key like ?',
|
||||||
|
['%s%%' % prefix])
|
||||||
|
if self.revision and self.cursor.rowcount:
|
||||||
|
self.cursor.execute(
|
||||||
|
'insert into kv_revisions values (?, ?, ?)',
|
||||||
|
['%s%%' % prefix, self.revision, json.dumps('DELETED')])
|
||||||
|
|
||||||
|
def set(self, key, value):
|
||||||
|
"""
|
||||||
|
Set a value in the database.
|
||||||
|
|
||||||
|
:param str key: Key to set the value for
|
||||||
|
:param value: Any JSON-serializable value to be set
|
||||||
|
"""
|
||||||
|
serialized = json.dumps(value)
|
||||||
|
|
||||||
|
self.cursor.execute('select data from kv where key=?', [key])
|
||||||
|
exists = self.cursor.fetchone()
|
||||||
|
|
||||||
|
# Skip mutations to the same value
|
||||||
|
if exists:
|
||||||
|
if exists[0] == serialized:
|
||||||
|
return value
|
||||||
|
|
||||||
|
if not exists:
|
||||||
|
self.cursor.execute(
|
||||||
|
'insert into kv (key, data) values (?, ?)',
|
||||||
|
(key, serialized))
|
||||||
|
else:
|
||||||
|
self.cursor.execute('''
|
||||||
|
update kv
|
||||||
|
set data = ?
|
||||||
|
where key = ?''', [serialized, key])
|
||||||
|
|
||||||
|
# Save
|
||||||
|
if not self.revision:
|
||||||
|
return value
|
||||||
|
|
||||||
|
self.cursor.execute(
|
||||||
|
'select 1 from kv_revisions where key=? and revision=?',
|
||||||
|
[key, self.revision])
|
||||||
|
exists = self.cursor.fetchone()
|
||||||
|
|
||||||
|
if not exists:
|
||||||
|
self.cursor.execute(
|
||||||
|
'''insert into kv_revisions (
|
||||||
|
revision, key, data) values (?, ?, ?)''',
|
||||||
|
(self.revision, key, serialized))
|
||||||
|
else:
|
||||||
|
self.cursor.execute(
|
||||||
|
'''
|
||||||
|
update kv_revisions
|
||||||
|
set data = ?
|
||||||
|
where key = ?
|
||||||
|
and revision = ?''',
|
||||||
|
[serialized, key, self.revision])
|
||||||
|
|
||||||
|
return value
|
||||||
|
|
||||||
|
def delta(self, mapping, prefix):
|
||||||
|
"""
|
||||||
|
return a delta containing values that have changed.
|
||||||
|
"""
|
||||||
|
previous = self.getrange(prefix, strip=True)
|
||||||
|
if not previous:
|
||||||
|
pk = set()
|
||||||
|
else:
|
||||||
|
pk = set(previous.keys())
|
||||||
|
ck = set(mapping.keys())
|
||||||
|
delta = DeltaSet()
|
||||||
|
|
||||||
|
# added
|
||||||
|
for k in ck.difference(pk):
|
||||||
|
delta[k] = Delta(None, mapping[k])
|
||||||
|
|
||||||
|
# removed
|
||||||
|
for k in pk.difference(ck):
|
||||||
|
delta[k] = Delta(previous[k], None)
|
||||||
|
|
||||||
|
# changed
|
||||||
|
for k in pk.intersection(ck):
|
||||||
|
c = mapping[k]
|
||||||
|
p = previous[k]
|
||||||
|
if c != p:
|
||||||
|
delta[k] = Delta(p, c)
|
||||||
|
|
||||||
|
return delta
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def hook_scope(self, name=""):
|
||||||
|
"""Scope all future interactions to the current hook execution
|
||||||
|
revision."""
|
||||||
|
assert not self.revision
|
||||||
|
self.cursor.execute(
|
||||||
|
'insert into hooks (hook, date) values (?, ?)',
|
||||||
|
(name or sys.argv[0],
|
||||||
|
datetime.datetime.utcnow().isoformat()))
|
||||||
|
self.revision = self.cursor.lastrowid
|
||||||
|
try:
|
||||||
|
yield self.revision
|
||||||
|
self.revision = None
|
||||||
|
except:
|
||||||
|
self.flush(False)
|
||||||
|
self.revision = None
|
||||||
|
raise
|
||||||
|
else:
|
||||||
|
self.flush()
|
||||||
|
|
||||||
|
def flush(self, save=True):
|
||||||
|
if save:
|
||||||
|
self.conn.commit()
|
||||||
|
elif self._closed:
|
||||||
|
return
|
||||||
|
else:
|
||||||
|
self.conn.rollback()
|
||||||
|
|
||||||
|
def _init(self):
|
||||||
|
self.cursor.execute('''
|
||||||
|
create table if not exists kv (
|
||||||
|
key text,
|
||||||
|
data text,
|
||||||
|
primary key (key)
|
||||||
|
)''')
|
||||||
|
self.cursor.execute('''
|
||||||
|
create table if not exists kv_revisions (
|
||||||
|
key text,
|
||||||
|
revision integer,
|
||||||
|
data text,
|
||||||
|
primary key (key, revision)
|
||||||
|
)''')
|
||||||
|
self.cursor.execute('''
|
||||||
|
create table if not exists hooks (
|
||||||
|
version integer primary key autoincrement,
|
||||||
|
hook text,
|
||||||
|
date text
|
||||||
|
)''')
|
||||||
|
self.conn.commit()
|
||||||
|
|
||||||
|
def gethistory(self, key, deserialize=False):
|
||||||
|
self.cursor.execute(
|
||||||
|
'''
|
||||||
|
select kv.revision, kv.key, kv.data, h.hook, h.date
|
||||||
|
from kv_revisions kv,
|
||||||
|
hooks h
|
||||||
|
where kv.key=?
|
||||||
|
and kv.revision = h.version
|
||||||
|
''', [key])
|
||||||
|
if deserialize is False:
|
||||||
|
return self.cursor.fetchall()
|
||||||
|
return map(_parse_history, self.cursor.fetchall())
|
||||||
|
|
||||||
|
def debug(self, fh=sys.stderr):
|
||||||
|
self.cursor.execute('select * from kv')
|
||||||
|
pprint.pprint(self.cursor.fetchall(), stream=fh)
|
||||||
|
self.cursor.execute('select * from kv_revisions')
|
||||||
|
pprint.pprint(self.cursor.fetchall(), stream=fh)
|
||||||
|
|
||||||
|
|
||||||
|
def _parse_history(d):
|
||||||
|
return (d[0], d[1], json.loads(d[2]), d[3],
|
||||||
|
datetime.datetime.strptime(d[-1], "%Y-%m-%dT%H:%M:%S.%f"))
|
||||||
|
|
||||||
|
|
||||||
|
class HookData(object):
|
||||||
|
"""Simple integration for existing hook exec frameworks.
|
||||||
|
|
||||||
|
Records all unit information, and stores deltas for processing
|
||||||
|
by the hook.
|
||||||
|
|
||||||
|
Sample::
|
||||||
|
|
||||||
|
from charmhelper.core import hookenv, unitdata
|
||||||
|
|
||||||
|
changes = unitdata.HookData()
|
||||||
|
db = unitdata.kv()
|
||||||
|
hooks = hookenv.Hooks()
|
||||||
|
|
||||||
|
@hooks.hook
|
||||||
|
def config_changed():
|
||||||
|
# View all changes to configuration
|
||||||
|
for changed, (prev, cur) in changes.conf.items():
|
||||||
|
print('config changed', changed,
|
||||||
|
'previous value', prev,
|
||||||
|
'current value', cur)
|
||||||
|
|
||||||
|
# Get some unit specific bookeeping
|
||||||
|
if not db.get('pkg_key'):
|
||||||
|
key = urllib.urlopen('https://example.com/pkg_key').read()
|
||||||
|
db.set('pkg_key', key)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
with changes():
|
||||||
|
hook.execute()
|
||||||
|
|
||||||
|
"""
|
||||||
|
def __init__(self):
|
||||||
|
self.kv = kv()
|
||||||
|
self.conf = None
|
||||||
|
self.rels = None
|
||||||
|
|
||||||
|
@contextlib.contextmanager
|
||||||
|
def __call__(self):
|
||||||
|
from charmhelpers.core import hookenv
|
||||||
|
hook_name = hookenv.hook_name()
|
||||||
|
|
||||||
|
with self.kv.hook_scope(hook_name):
|
||||||
|
self._record_charm_version(hookenv.charm_dir())
|
||||||
|
delta_config, delta_relation = self._record_hook(hookenv)
|
||||||
|
yield self.kv, delta_config, delta_relation
|
||||||
|
|
||||||
|
def _record_charm_version(self, charm_dir):
|
||||||
|
# Record revisions.. charm revisions are meaningless
|
||||||
|
# to charm authors as they don't control the revision.
|
||||||
|
# so logic dependnent on revision is not particularly
|
||||||
|
# useful, however it is useful for debugging analysis.
|
||||||
|
charm_rev = open(
|
||||||
|
os.path.join(charm_dir, 'revision')).read().strip()
|
||||||
|
charm_rev = charm_rev or '0'
|
||||||
|
revs = self.kv.get('charm_revisions', [])
|
||||||
|
if charm_rev not in revs:
|
||||||
|
revs.append(charm_rev.strip() or '0')
|
||||||
|
self.kv.set('charm_revisions', revs)
|
||||||
|
|
||||||
|
def _record_hook(self, hookenv):
|
||||||
|
data = hookenv.execution_environment()
|
||||||
|
self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
|
||||||
|
self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
|
||||||
|
self.kv.set('env', dict(data['env']))
|
||||||
|
self.kv.set('unit', data['unit'])
|
||||||
|
self.kv.set('relid', data.get('relid'))
|
||||||
|
return conf_delta, rels_delta
|
||||||
|
|
||||||
|
|
||||||
|
class Record(dict):
|
||||||
|
|
||||||
|
__slots__ = ()
|
||||||
|
|
||||||
|
def __getattr__(self, k):
|
||||||
|
if k in self:
|
||||||
|
return self[k]
|
||||||
|
raise AttributeError(k)
|
||||||
|
|
||||||
|
|
||||||
|
class DeltaSet(Record):
|
||||||
|
|
||||||
|
__slots__ = ()
|
||||||
|
|
||||||
|
|
||||||
|
Delta = collections.namedtuple('Delta', ['previous', 'current'])
|
||||||
|
|
||||||
|
|
||||||
|
_KV = None
|
||||||
|
|
||||||
|
|
||||||
|
def kv():
|
||||||
|
global _KV
|
||||||
|
if _KV is None:
|
||||||
|
_KV = Storage()
|
||||||
|
return _KV
|
Loading…
Reference in New Issue
Block a user