Switch to using openstack helpers for templating

This commit is contained in:
James Page 2013-10-18 16:58:17 +01:00
parent cc18a846ac
commit 06059b380f
13 changed files with 541 additions and 115 deletions

View File

@ -1,6 +1,7 @@
branch: lp:~openstack-charmers/charm-helpers/to_upstream
branch: lp:charm-helpers
destination: hooks/charmhelpers
include:
- core
- fetch
- contrib.openstack
- contrib.hahelpers

View File

@ -0,0 +1,74 @@
import os
import uuid
from charmhelpers.core.hookenv import (
relation_ids,
relation_get,
related_units,
config
)
from charmhelpers.contrib.openstack.context import (
OSContextGenerator,
context_complete
)
CEILOMETER_DB = 'ceilometer'
class LoggingConfigContext(OSContextGenerator):
def __call__(self):
return {'debug': config('debug'), 'verbose': config('verbose')}
class MongoDBContext(OSContextGenerator):
interfaces = ['mongodb']
def __call__(self):
for relid in relation_ids('shared-db'):
for unit in related_units(relid):
conf = {
"db_host": relation_get('hostname', unit, relid),
"db_port": relation_get('port', unit, relid),
"db_name": CEILOMETER_DB
}
if context_complete(conf):
return conf
return {}
SHARED_SECRET = "/etc/ceilometer/secret.txt"
def get_shared_secret():
secret = None
if not os.path.exists(SHARED_SECRET):
secret = str(uuid.uuid4())
with open(SHARED_SECRET, 'w') as secret_file:
secret_file.write(secret)
else:
with open(SHARED_SECRET, 'r') as secret_file:
secret = secret_file.read().strip()
return secret
CEILOMETER_PORT = 8777
class CeilometerContext(OSContextGenerator):
def __call__(self):
ctxt = {
'port': CEILOMETER_PORT,
'metering_secret': get_shared_secret()
}
return ctxt
class CeilometerServiceContext(OSContextGenerator):
interfaces = ['ceilometer-service']
def __call__(self):
for relid in relation_ids('ceilometer-service'):
for unit in related_units(relid):
conf = relation_get(unit=unit, rid=relid)
if context_complete(conf):
return conf
return {}

View File

@ -1,15 +1,10 @@
#!/usr/bin/python
import sys
import os
import ceilometer_utils
from socket import gethostname as get_host_name
from charmhelpers.fetch import (
apt_install, filter_installed_packages,
apt_update
)
from charmhelpers.core.hookenv import (
config,
relation_ids,
@ -19,75 +14,38 @@ from charmhelpers.core.hookenv import (
log
)
from charmhelpers.core.host import (
service_restart
restart_on_change
)
from charmhelpers.contrib.openstack.utils import (
configure_installation_source
)
from ceilometer_utils import (
restart_map,
register_configs,
CEILOMETER_AGENT_PACKAGES
)
from charmhelpers.contrib.openstack.utils import configure_installation_source
hooks = Hooks()
CONFIGS = register_configs()
@hooks.hook()
def install():
configure_installation_source(config('openstack-origin'))
apt_update(fatal=True)
apt_install(filter_installed_packages(ceilometer_utils.CEILOMETER_AGENT_PACKAGES),
fatal=True)
apt_install(
filter_installed_packages(CEILOMETER_AGENT_PACKAGES),
fatal=True)
# TODO(jamespage): Locally scoped relation for nova and others
#ceilometer_utils.modify_config_file(ceilometer_utils.NOVA_CONF,
# ceilometer_utils.NOVA_SETTINGS)
def get_conf():
for relid in relation_ids('ceilometer-service'):
for unit in related_units(relid):
conf = {
"rabbit_host": relation_get('rabbit_host', unit, relid),
"rabbit_virtual_host": ceilometer_utils.RABBIT_VHOST,
"rabbit_userid": ceilometer_utils.RABBIT_USER,
"rabbit_password": relation_get('rabbit_password',
unit, relid),
"keystone_os_username": relation_get('keystone_os_username',
unit, relid),
"keystone_os_password": relation_get('keystone_os_password',
unit, relid),
"keystone_os_tenant": relation_get('keystone_os_tenant',
unit, relid),
"keystone_host": relation_get('keystone_host', unit, relid),
"keystone_port": relation_get('keystone_port', unit, relid),
"metering_secret": relation_get('metering_secret', unit, relid)
}
if None not in conf.itervalues():
return conf
return None
def render_ceilometer_conf(context):
if (context and os.path.exists(ceilometer_utils.CEILOMETER_CONF)):
context['service_port'] = ceilometer_utils.CEILOMETER_PORT
context['ceilometer_host'] = get_host_name()
with open(ceilometer_utils.CEILOMETER_CONF, "w") as conf:
conf.write(ceilometer_utils.render_template(
os.path.basename(ceilometer_utils.CEILOMETER_CONF), context))
for svc in ceilometer_utils.CEILOMETER_COMPUTE_SERVICES:
service_restart(svc)
return True
return False
@hooks.hook("ceilometer-service-relation-changed")
@restart_on_change(restart_map())
def ceilometer_changed():
# check if we have rabbit and keystone already set
context = get_conf()
if context:
render_ceilometer_conf(context)
else:
# still waiting
log("ceilometer: rabbit and keystone "
"credentials not yet received from peer.")
CONFIGS.write_all()
if __name__ == '__main__':

View File

@ -1,29 +1,21 @@
import os
import uuid
from charmhelpers.fetch import apt_install as install
from charmhelpers.contrib.openstack import (
templating,
)
from ceilometer_contexts import (
CeilometerServiceContext
)
from charmhelpers.contrib.openstack.utils import (
get_os_codename_package
)
RABBIT_USER = "ceilometer"
RABBIT_VHOST = "ceilometer"
CEILOMETER_CONF = "/etc/ceilometer/ceilometer.conf"
SHARED_SECRET = "/etc/ceilometer/secret.txt"
CEILOMETER_SERVICES = [
'ceilometer-agent-central', 'ceilometer-collector',
'ceilometer-api'
]
CEILOMETER_DB = "ceilometer"
CEILOMETER_SERVICE = "ceilometer"
CEILOMETER_COMPUTE_SERVICES = ['ceilometer-agent-compute']
CEILOMETER_PACKAGES = [
'python-ceilometer', 'ceilometer-common',
'ceilometer-agent-central', 'ceilometer-collector', 'ceilometer-api'
]
CEILOMETER_AGENT_SERVICES = ['ceilometer-agent-compute']
CEILOMETER_AGENT_PACKAGES = [
'python-ceilometer', 'ceilometer-common',
'ceilometer-agent-compute'
]
CEILOMETER_PORT = 8777
CEILOMETER_ROLE = "ResellerAdmin"
NOVA_CONF = "/etc/nova/nova.conf"
NOVA_SETTINGS = [
@ -32,30 +24,49 @@ NOVA_SETTINGS = [
('DEFAULT', 'notification_driver', 'ceilometer.compute.nova_notifier')
]
CONFIG_FILES = {
CEILOMETER_CONF: {
'hook_contexts': [CeilometerServiceContext()],
'services': CEILOMETER_AGENT_SERVICES
}
}
def get_shared_secret():
secret = None
if not os.path.exists(SHARED_SECRET):
secret = str(uuid.uuid4())
with open(SHARED_SECRET, 'w') as secret_file:
secret_file.write(secret)
else:
with open(SHARED_SECRET, 'r') as secret_file:
secret = secret_file.read().strip()
return secret
TEMPLATES = 'templates'
TEMPLATES_DIR = 'templates'
def register_configs():
"""
Register config files with their respective contexts.
Regstration of some configs may not be required depending on
existing of certain relations.
"""
# if called without anything installed (eg during install hook)
# just default to earliest supported release. configs dont get touched
# till post-install, anyway.
release = get_os_codename_package('ceilometer-common', fatal=False) \
or 'grizzly'
configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
openstack_release=release)
try:
import jinja2
except ImportError:
install(['python-jinja2'])
import jinja2
for conf in CONFIG_FILES:
configs.register(conf, CONFIG_FILES[conf]['hook_contexts'])
return configs
def render_template(template_name, context, template_dir=TEMPLATES_DIR):
templates = \
jinja2.Environment(loader=jinja2.FileSystemLoader(template_dir))
template = templates.get_template(template_name)
return template.render(context)
def restart_map():
'''
Determine the correct resource map to be passed to
charmhelpers.core.restart_on_change() based on the services configured.
:returns: dict: A dictionary mapping config file to lists of services
that should be restarted when file changes.
'''
_map = {}
for f, ctxt in CONFIG_FILES.iteritems():
svcs = []
for svc in ctxt['services']:
svcs.append(svc)
if svcs:
_map[f] = svcs
return _map

View File

@ -0,0 +1,58 @@
#
# Copyright 2012 Canonical Ltd.
#
# This file is sourced from lp:openstack-charm-helpers
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import subprocess
from charmhelpers.core.hookenv import (
config as config_get,
relation_get,
relation_ids,
related_units as relation_list,
log,
INFO,
)
def get_cert():
cert = config_get('ssl_cert')
key = config_get('ssl_key')
if not (cert and key):
log("Inspecting identity-service relations for SSL certificate.",
level=INFO)
cert = key = None
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not cert:
cert = relation_get('ssl_cert',
rid=r_id, unit=unit)
if not key:
key = relation_get('ssl_key',
rid=r_id, unit=unit)
return (cert, key)
def get_ca_cert():
ca_cert = None
log("Inspecting identity-service relations for CA SSL certificate.",
level=INFO)
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
if not ca_cert:
ca_cert = relation_get('ca_cert',
rid=r_id, unit=unit)
return ca_cert
def install_ca_cert(ca_cert):
if ca_cert:
with open('/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt',
'w') as crt:
crt.write(ca_cert)
subprocess.check_call(['update-ca-certificates', '--fresh'])

View File

@ -0,0 +1,183 @@
#
# Copyright 2012 Canonical Ltd.
#
# Authors:
# James Page <james.page@ubuntu.com>
# Adam Gandelman <adamg@ubuntu.com>
#
import subprocess
import os
from socket import gethostname as get_unit_hostname
from charmhelpers.core.hookenv import (
log,
relation_ids,
related_units as relation_list,
relation_get,
config as config_get,
INFO,
ERROR,
unit_get,
)
class HAIncompleteConfig(Exception):
pass
def is_clustered():
for r_id in (relation_ids('ha') or []):
for unit in (relation_list(r_id) or []):
clustered = relation_get('clustered',
rid=r_id,
unit=unit)
if clustered:
return True
return False
def is_leader(resource):
cmd = [
"crm", "resource",
"show", resource
]
try:
status = subprocess.check_output(cmd)
except subprocess.CalledProcessError:
return False
else:
if get_unit_hostname() in status:
return True
else:
return False
def peer_units():
peers = []
for r_id in (relation_ids('cluster') or []):
for unit in (relation_list(r_id) or []):
peers.append(unit)
return peers
def oldest_peer(peers):
local_unit_no = int(os.getenv('JUJU_UNIT_NAME').split('/')[1])
for peer in peers:
remote_unit_no = int(peer.split('/')[1])
if remote_unit_no < local_unit_no:
return False
return True
def eligible_leader(resource):
if is_clustered():
if not is_leader(resource):
log('Deferring action to CRM leader.', level=INFO)
return False
else:
peers = peer_units()
if peers and not oldest_peer(peers):
log('Deferring action to oldest service unit.', level=INFO)
return False
return True
def https():
'''
Determines whether enough data has been provided in configuration
or relation data to configure HTTPS
.
returns: boolean
'''
if config_get('use-https') == "yes":
return True
if config_get('ssl_cert') and config_get('ssl_key'):
return True
for r_id in relation_ids('identity-service'):
for unit in relation_list(r_id):
rel_state = [
relation_get('https_keystone', rid=r_id, unit=unit),
relation_get('ssl_cert', rid=r_id, unit=unit),
relation_get('ssl_key', rid=r_id, unit=unit),
relation_get('ca_cert', rid=r_id, unit=unit),
]
# NOTE: works around (LP: #1203241)
if (None not in rel_state) and ('' not in rel_state):
return True
return False
def determine_api_port(public_port):
'''
Determine correct API server listening port based on
existence of HTTPS reverse proxy and/or haproxy.
public_port: int: standard public port for given service
returns: int: the correct listening port for the API service
'''
i = 0
if len(peer_units()) > 0 or is_clustered():
i += 1
if https():
i += 1
return public_port - (i * 10)
def determine_haproxy_port(public_port):
'''
Description: Determine correct proxy listening port based on public IP +
existence of HTTPS reverse proxy.
public_port: int: standard public port for given service
returns: int: the correct listening port for the HAProxy service
'''
i = 0
if https():
i += 1
return public_port - (i * 10)
def get_hacluster_config():
'''
Obtains all relevant configuration from charm configuration required
for initiating a relation to hacluster:
ha-bindiface, ha-mcastport, vip, vip_iface, vip_cidr
returns: dict: A dict containing settings keyed by setting name.
raises: HAIncompleteConfig if settings are missing.
'''
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'vip_iface', 'vip_cidr']
conf = {}
for setting in settings:
conf[setting] = config_get(setting)
missing = []
[missing.append(s) for s, v in conf.iteritems() if v is None]
if missing:
log('Insufficient config data to configure hacluster.', level=ERROR)
raise HAIncompleteConfig
return conf
def canonical_url(configs, vip_setting='vip'):
'''
Returns the correct HTTP URL to this host given the state of HTTPS
configuration and hacluster.
:configs : OSTemplateRenderer: A config tempating object to inspect for
a complete https context.
:vip_setting: str: Setting in charm config that specifies
VIP address.
'''
scheme = 'http'
if 'https' in configs.complete_contexts():
scheme = 'https'
if is_clustered():
addr = config_get(vip_setting)
else:
addr = unit_get('private-address')
return '%s://%s' % (scheme, addr)

View File

@ -1,3 +1,4 @@
import json
import os
from base64 import b64decode
@ -21,6 +22,7 @@ from charmhelpers.core.hookenv import (
related_units,
unit_get,
unit_private_ip,
ERROR,
WARNING,
)
@ -431,3 +433,90 @@ class OSConfigFlagContext(OSContextGenerator):
flags[k.strip()] = v
ctxt = {'user_config_flags': flags}
return ctxt
class SubordinateConfigContext(OSContextGenerator):
"""
Responsible for inspecting relations to subordinates that
may be exporting required config via a json blob.
The subordinate interface allows subordinates to export their
configuration requirements to the principle for multiple config
files and multiple serivces. Ie, a subordinate that has interfaces
to both glance and nova may export to following yaml blob as json:
glance:
/etc/glance/glance-api.conf:
sections:
DEFAULT:
- [key1, value1]
/etc/glance/glance-registry.conf:
MYSECTION:
- [key2, value2]
nova:
/etc/nova/nova.conf:
sections:
DEFAULT:
- [key3, value3]
It is then up to the principle charms to subscribe this context to
the service+config file it is interestd in. Configuration data will
be available in the template context, in glance's case, as:
ctxt = {
... other context ...
'subordinate_config': {
'DEFAULT': {
'key1': 'value1',
},
'MYSECTION': {
'key2': 'value2',
},
}
}
"""
def __init__(self, service, config_file, interface):
"""
:param service : Service name key to query in any subordinate
data found
:param config_file : Service's config file to query sections
:param interface : Subordinate interface to inspect
"""
self.service = service
self.config_file = config_file
self.interface = interface
def __call__(self):
ctxt = {}
for rid in relation_ids(self.interface):
for unit in related_units(rid):
sub_config = relation_get('subordinate_configuration',
rid=rid, unit=unit)
if sub_config and sub_config != '':
try:
sub_config = json.loads(sub_config)
except:
log('Could not parse JSON from subordinate_config '
'setting from %s' % rid, level=ERROR)
continue
if self.service not in sub_config:
log('Found subordinate_config on %s but it contained'
'nothing for %s service' % (rid, self.service))
continue
sub_config = sub_config[self.service]
if self.config_file not in sub_config:
log('Found subordinate_config on %s but it contained'
'nothing for %s' % (rid, self.config_file))
continue
sub_config = sub_config[self.config_file]
for k, v in sub_config.iteritems():
ctxt[k] = v
if not ctxt:
ctxt['sections'] = {}
return ctxt

View File

@ -85,7 +85,7 @@ def neutron_plugin_attribute(plugin, attr, net_manager=None):
_plugin = plugins[plugin]
except KeyError:
log('Unrecognised plugin for %s: %s' % (manager, plugin), level=ERROR)
raise
raise Exception
try:
return _plugin[attr]
@ -108,7 +108,7 @@ def network_manager():
if release in ['essex']:
# E does not support neutron
log('Neutron networking not supported in Essex.', level=ERROR)
raise
raise Exception
elif release in ['folsom', 'grizzly']:
# neutron is named quantum in F and G
return 'quantum'

View File

@ -1,15 +0,0 @@
[DEFAULT]
debug=True
verbose=True
metering_secret={{ metering_secret }}
rabbit_host={{ rabbit_host }}
rabbit_port=5672
rabbit_userid={{ rabbit_userid }}
rabbit_password={{ rabbit_password }}
rabbit_virtual_host={{ rabbit_virtual_host }}
os_auth_url=http://{{ keystone_host }}:{{ keystone_port }}/v2.0
os_tenant_name = {{ keystone_os_tenant }}
os_username = {{ keystone_os_username }}
os_password = {{ keystone_os_password }}
logdir = /var/log/ceilometer
host = {{ ceilometer_host }}

View File

@ -0,0 +1,21 @@
[DEFAULT]
debug = {{ debug }}
verbose = {{ verbose }}
metering_secret = {{ metering_secret }}
rabbit_host = {{ rabbitmq_host }}
rabbit_userid = {{ rabbitmq_user }}
rabbit_password = {{ rabbitmq_password }}
rabbit_virtual_host = {{ rabbitmq_virtual_host }}
os_auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v2.0
os_tenant_name = {{ admin_tenant_name }}
os_username = {{ admin_user }}
os_password = {{ admin_password }}
logdir = /var/log/ceilometer
host = {{ ceilometer_host }}
# from socket import gethostname as get_host_name

View File

@ -0,0 +1,23 @@
# grizzly
###############################################################################
# [ WARNING ]
# ceilometer configuration file maintained by Juju
# local changes may be overwritten.
###############################################################################
[DEFAULT]
debug = {{ debug }}
verbose = {{ verbose }}
metering_secret = {{ metering_secret }}
rabbit_host = {{ rabbitmq_host }}
rabbit_userid = {{ rabbitmq_user }}
rabbit_password = {{ rabbitmq_password }}
rabbit_virtual_host = {{ rabbitmq_virtual_host }}
os_auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v2.0
os_tenant_name = {{ admin_tenant_name }}
os_username = {{ admin_user }}
os_password = {{ admin_password }}
logdir = /var/log/ceilometer

View File

@ -0,0 +1,23 @@
# havana
###############################################################################
# [ WARNING ]
# ceilometer configuration file maintained by Juju
# local changes may be overwritten.
###############################################################################
[DEFAULT]
debug = {{ debug }}
verbose = {{ verbose }}
rabbit_host = {{ rabbitmq_host }}
rabbit_userid = {{ rabbitmq_user }}
rabbit_password = {{ rabbitmq_password }}
rabbit_virtual_host = {{ rabbitmq_virtual_host }}
[service_credentials]
os_auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v2.0
os_tenant_name = {{ admin_tenant_name }}
os_username = {{ admin_user }}
os_password = {{ admin_password }}
[publisher_rpc]
metering_secret = {{ metering_secret }}