2011-12-08 09:52:12 -08:00
|
|
|
#!/usr/bin/python
|
2013-03-18 12:56:57 +00:00
|
|
|
import subprocess
|
|
|
|
import os
|
2014-12-12 15:21:32 +00:00
|
|
|
import uuid
|
2014-02-25 12:34:13 +01:00
|
|
|
import urlparse
|
2014-02-26 16:54:26 +00:00
|
|
|
import time
|
2012-10-02 17:36:25 -07:00
|
|
|
|
2014-02-25 12:34:13 +01:00
|
|
|
from base64 import b64encode
|
|
|
|
from collections import OrderedDict
|
|
|
|
from copy import deepcopy
|
|
|
|
|
|
|
|
from charmhelpers.contrib.hahelpers.cluster import(
|
|
|
|
eligible_leader,
|
|
|
|
determine_api_port,
|
|
|
|
https,
|
2014-12-12 15:21:32 +00:00
|
|
|
is_clustered,
|
|
|
|
is_elected_leader,
|
2014-03-03 09:14:09 +00:00
|
|
|
)
|
2011-12-08 09:52:12 -08:00
|
|
|
|
2014-02-25 12:34:13 +01:00
|
|
|
from charmhelpers.contrib.openstack import context, templating
|
2014-07-04 12:43:53 +01:00
|
|
|
from charmhelpers.contrib.network.ip import (
|
2014-08-04 21:47:53 +08:00
|
|
|
is_ipv6,
|
2014-09-18 19:56:23 +08:00
|
|
|
get_ipv6_addr
|
2014-07-04 12:43:53 +01:00
|
|
|
)
|
2013-03-18 12:56:57 +00:00
|
|
|
|
2014-07-28 15:30:41 +01:00
|
|
|
from charmhelpers.contrib.openstack.ip import (
|
|
|
|
resolve_address,
|
|
|
|
PUBLIC,
|
|
|
|
INTERNAL,
|
|
|
|
ADMIN
|
|
|
|
)
|
|
|
|
|
2014-02-25 12:34:13 +01:00
|
|
|
from charmhelpers.contrib.openstack.utils import (
|
|
|
|
configure_installation_source,
|
|
|
|
error_out,
|
|
|
|
get_os_codename_install_source,
|
|
|
|
os_release,
|
|
|
|
save_script_rc as _save_script_rc)
|
|
|
|
|
|
|
|
import charmhelpers.contrib.unison as unison
|
|
|
|
|
|
|
|
from charmhelpers.core.hookenv import (
|
|
|
|
config,
|
|
|
|
log,
|
2014-12-12 18:56:49 +00:00
|
|
|
local_unit,
|
2014-02-25 12:34:13 +01:00
|
|
|
relation_get,
|
|
|
|
relation_set,
|
2014-12-12 15:21:32 +00:00
|
|
|
relation_ids,
|
2014-12-12 15:58:04 +00:00
|
|
|
DEBUG,
|
2014-02-25 12:34:13 +01:00
|
|
|
INFO,
|
|
|
|
)
|
|
|
|
|
|
|
|
from charmhelpers.fetch import (
|
|
|
|
apt_install,
|
|
|
|
apt_update,
|
2014-03-28 11:45:58 +00:00
|
|
|
apt_upgrade,
|
2014-09-18 19:23:52 +08:00
|
|
|
add_source
|
2014-02-25 12:34:13 +01:00
|
|
|
)
|
|
|
|
|
2014-02-26 16:54:26 +00:00
|
|
|
from charmhelpers.core.host import (
|
|
|
|
service_stop,
|
|
|
|
service_start,
|
2014-09-18 19:23:52 +08:00
|
|
|
pwgen,
|
2014-09-18 19:56:23 +08:00
|
|
|
lsb_release
|
2014-03-28 10:39:49 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
from charmhelpers.contrib.peerstorage import (
|
2014-08-19 12:39:31 +00:00
|
|
|
peer_store_and_set,
|
2014-03-28 10:39:49 +00:00
|
|
|
peer_store,
|
|
|
|
peer_retrieve,
|
2014-02-26 16:54:26 +00:00
|
|
|
)
|
|
|
|
|
2014-02-25 12:34:13 +01:00
|
|
|
import keystone_context
|
|
|
|
import keystone_ssl as ssl
|
2013-02-07 21:03:44 -08:00
|
|
|
|
2014-02-25 12:34:13 +01:00
|
|
|
TEMPLATES = 'templates/'
|
|
|
|
|
|
|
|
# removed from original: charm-helper-sh
|
|
|
|
BASE_PACKAGES = [
|
|
|
|
'apache2',
|
|
|
|
'haproxy',
|
|
|
|
'openssl',
|
|
|
|
'python-keystoneclient',
|
|
|
|
'python-mysqldb',
|
2014-03-31 10:35:19 +02:00
|
|
|
'python-psycopg2',
|
2014-02-25 12:34:13 +01:00
|
|
|
'pwgen',
|
|
|
|
'unison',
|
|
|
|
'uuid',
|
|
|
|
]
|
|
|
|
|
|
|
|
BASE_SERVICES = [
|
|
|
|
'keystone',
|
|
|
|
]
|
|
|
|
|
|
|
|
API_PORTS = {
|
|
|
|
'keystone-admin': config('admin-port'),
|
|
|
|
'keystone-public': config('service-port')
|
|
|
|
}
|
|
|
|
|
|
|
|
KEYSTONE_CONF = "/etc/keystone/keystone.conf"
|
2014-02-27 10:55:38 +00:00
|
|
|
KEYSTONE_CONF_DIR = os.path.dirname(KEYSTONE_CONF)
|
2014-02-25 12:34:13 +01:00
|
|
|
STORED_PASSWD = "/var/lib/keystone/keystone.passwd"
|
|
|
|
STORED_TOKEN = "/var/lib/keystone/keystone.token"
|
2013-01-30 16:48:51 -08:00
|
|
|
SERVICE_PASSWD_PATH = '/var/lib/keystone/services.passwd'
|
2012-12-18 12:00:48 +00:00
|
|
|
|
2014-02-25 12:34:13 +01:00
|
|
|
HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
|
|
|
|
APACHE_CONF = '/etc/apache2/sites-available/openstack_https_frontend'
|
|
|
|
APACHE_24_CONF = '/etc/apache2/sites-available/openstack_https_frontend.conf'
|
|
|
|
|
2013-02-07 21:03:44 -08:00
|
|
|
SSL_DIR = '/var/lib/keystone/juju_ssl/'
|
|
|
|
SSL_CA_NAME = 'Ubuntu Cloud'
|
2014-07-16 14:17:03 +01:00
|
|
|
CLUSTER_RES = 'grp_ks_vips'
|
2013-03-18 12:56:57 +00:00
|
|
|
SSH_USER = 'juju_keystone'
|
2013-02-07 21:03:44 -08:00
|
|
|
|
2014-02-25 12:34:13 +01:00
|
|
|
BASE_RESOURCE_MAP = OrderedDict([
|
|
|
|
(KEYSTONE_CONF, {
|
|
|
|
'services': BASE_SERVICES,
|
|
|
|
'contexts': [keystone_context.KeystoneContext(),
|
2014-02-27 10:55:38 +00:00
|
|
|
context.SharedDBContext(ssl_dir=KEYSTONE_CONF_DIR),
|
2014-03-31 10:35:19 +02:00
|
|
|
context.PostgresqlDBContext(),
|
2014-02-25 12:34:13 +01:00
|
|
|
context.SyslogContext(),
|
2014-08-04 21:47:53 +08:00
|
|
|
keystone_context.HAProxyContext(),
|
2014-10-07 13:29:11 +01:00
|
|
|
context.BindHostContext(),
|
|
|
|
context.WorkerConfigContext()],
|
2014-02-25 12:34:13 +01:00
|
|
|
}),
|
|
|
|
(HAPROXY_CONF, {
|
|
|
|
'contexts': [context.HAProxyContext(),
|
|
|
|
keystone_context.HAProxyContext()],
|
|
|
|
'services': ['haproxy'],
|
|
|
|
}),
|
|
|
|
(APACHE_CONF, {
|
|
|
|
'contexts': [keystone_context.ApacheSSLContext()],
|
|
|
|
'services': ['apache2'],
|
|
|
|
}),
|
|
|
|
(APACHE_24_CONF, {
|
|
|
|
'contexts': [keystone_context.ApacheSSLContext()],
|
|
|
|
'services': ['apache2'],
|
|
|
|
}),
|
|
|
|
])
|
|
|
|
|
|
|
|
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
|
|
|
|
|
|
|
|
valid_services = {
|
|
|
|
"nova": {
|
|
|
|
"type": "compute",
|
|
|
|
"desc": "Nova Compute Service"
|
|
|
|
},
|
|
|
|
"nova-volume": {
|
|
|
|
"type": "volume",
|
|
|
|
"desc": "Nova Volume Service"
|
|
|
|
},
|
|
|
|
"cinder": {
|
|
|
|
"type": "volume",
|
|
|
|
"desc": "Cinder Volume Service"
|
|
|
|
},
|
|
|
|
"ec2": {
|
|
|
|
"type": "ec2",
|
|
|
|
"desc": "EC2 Compatibility Layer"
|
|
|
|
},
|
|
|
|
"glance": {
|
|
|
|
"type": "image",
|
|
|
|
"desc": "Glance Image Service"
|
|
|
|
},
|
|
|
|
"s3": {
|
|
|
|
"type": "s3",
|
|
|
|
"desc": "S3 Compatible object-store"
|
|
|
|
},
|
|
|
|
"swift": {
|
|
|
|
"type": "object-store",
|
|
|
|
"desc": "Swift Object Storage Service"
|
|
|
|
},
|
|
|
|
"quantum": {
|
|
|
|
"type": "network",
|
|
|
|
"desc": "Quantum Networking Service"
|
|
|
|
},
|
|
|
|
"oxygen": {
|
|
|
|
"type": "oxygen",
|
|
|
|
"desc": "Oxygen Cloud Image Service"
|
|
|
|
},
|
|
|
|
"ceilometer": {
|
|
|
|
"type": "metering",
|
|
|
|
"desc": "Ceilometer Metering Service"
|
|
|
|
},
|
|
|
|
"heat": {
|
|
|
|
"type": "orchestration",
|
|
|
|
"desc": "Heat Orchestration API"
|
|
|
|
},
|
|
|
|
"heat-cfn": {
|
|
|
|
"type": "cloudformation",
|
|
|
|
"desc": "Heat CloudFormation API"
|
2014-05-29 18:06:57 +00:00
|
|
|
},
|
2014-06-05 07:48:13 -07:00
|
|
|
"image-stream": {
|
|
|
|
"type": "product-streams",
|
|
|
|
"desc": "Ubuntu Product Streams"
|
2014-02-25 12:34:13 +01:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2014-02-26 16:54:26 +00:00
|
|
|
|
2014-02-25 12:34:13 +01:00
|
|
|
def resource_map():
|
|
|
|
'''
|
|
|
|
Dynamically generate a map of resources that will be managed for a single
|
|
|
|
hook execution.
|
|
|
|
'''
|
|
|
|
resource_map = deepcopy(BASE_RESOURCE_MAP)
|
2013-02-12 21:56:39 -08:00
|
|
|
|
2014-02-25 12:34:13 +01:00
|
|
|
if os.path.exists('/etc/apache2/conf-available'):
|
|
|
|
resource_map.pop(APACHE_CONF)
|
|
|
|
else:
|
|
|
|
resource_map.pop(APACHE_24_CONF)
|
|
|
|
return resource_map
|
2011-12-23 17:34:15 -08:00
|
|
|
|
|
|
|
|
2014-02-25 12:34:13 +01:00
|
|
|
def register_configs():
|
|
|
|
release = os_release('keystone')
|
|
|
|
configs = templating.OSConfigRenderer(templates_dir=TEMPLATES,
|
|
|
|
openstack_release=release)
|
|
|
|
for cfg, rscs in resource_map().iteritems():
|
|
|
|
configs.register(cfg, rscs['contexts'])
|
|
|
|
return configs
|
|
|
|
|
|
|
|
|
|
|
|
def restart_map():
|
|
|
|
return OrderedDict([(cfg, v['services'])
|
|
|
|
for cfg, v in resource_map().iteritems()
|
|
|
|
if v['services']])
|
|
|
|
|
|
|
|
|
|
|
|
def determine_ports():
|
|
|
|
'''Assemble a list of API ports for services we are managing'''
|
|
|
|
ports = [config('admin-port'), config('service-port')]
|
|
|
|
return list(set(ports))
|
|
|
|
|
|
|
|
|
|
|
|
def api_port(service):
|
|
|
|
return API_PORTS[service]
|
|
|
|
|
|
|
|
|
|
|
|
def determine_packages():
|
|
|
|
# currently all packages match service names
|
|
|
|
packages = [] + BASE_PACKAGES
|
|
|
|
for k, v in resource_map().iteritems():
|
|
|
|
packages.extend(v['services'])
|
|
|
|
return list(set(packages))
|
|
|
|
|
|
|
|
|
|
|
|
def save_script_rc():
|
|
|
|
env_vars = {'OPENSTACK_SERVICE_KEYSTONE': 'keystone',
|
|
|
|
'OPENSTACK_PORT_ADMIN': determine_api_port(
|
|
|
|
api_port('keystone-admin')),
|
|
|
|
'OPENSTACK_PORT_PUBLIC': determine_api_port(
|
|
|
|
api_port('keystone-public'))}
|
|
|
|
_save_script_rc(**env_vars)
|
|
|
|
|
|
|
|
|
|
|
|
def do_openstack_upgrade(configs):
|
|
|
|
new_src = config('openstack-origin')
|
|
|
|
new_os_rel = get_os_codename_install_source(new_src)
|
|
|
|
log('Performing OpenStack upgrade to %s.' % (new_os_rel))
|
|
|
|
|
|
|
|
configure_installation_source(new_src)
|
|
|
|
apt_update()
|
|
|
|
|
|
|
|
dpkg_opts = [
|
|
|
|
'--option', 'Dpkg::Options::=--force-confnew',
|
|
|
|
'--option', 'Dpkg::Options::=--force-confdef',
|
|
|
|
]
|
2014-03-28 11:45:58 +00:00
|
|
|
apt_upgrade(options=dpkg_opts, fatal=True, dist=True)
|
2014-02-25 12:34:13 +01:00
|
|
|
apt_install(packages=determine_packages(), options=dpkg_opts, fatal=True)
|
|
|
|
|
|
|
|
# set CONFIGS to load templates from new release and regenerate config
|
|
|
|
configs.set_release(openstack_release=new_os_rel)
|
|
|
|
configs.write_all()
|
|
|
|
|
|
|
|
if eligible_leader(CLUSTER_RES):
|
|
|
|
migrate_database()
|
|
|
|
|
2014-02-26 16:54:26 +00:00
|
|
|
|
2014-02-25 12:34:13 +01:00
|
|
|
def migrate_database():
|
|
|
|
'''Runs keystone-manage to initialize a new database or migrate existing'''
|
|
|
|
log('Migrating the keystone database.', level=INFO)
|
2014-02-26 16:54:26 +00:00
|
|
|
service_stop('keystone')
|
2014-03-27 13:49:16 +00:00
|
|
|
# NOTE(jamespage) > icehouse creates a log file as root so use
|
|
|
|
# sudo to execute as keystone otherwise keystone won't start
|
|
|
|
# afterwards.
|
|
|
|
cmd = ['sudo', '-u', 'keystone', 'keystone-manage', 'db_sync']
|
2014-02-25 12:34:13 +01:00
|
|
|
subprocess.check_output(cmd)
|
2014-02-26 16:54:26 +00:00
|
|
|
service_start('keystone')
|
|
|
|
time.sleep(10)
|
2014-02-25 12:34:13 +01:00
|
|
|
|
|
|
|
|
2014-03-03 09:14:09 +00:00
|
|
|
# OLD
|
2014-02-25 12:34:13 +01:00
|
|
|
|
2013-03-18 15:49:00 +00:00
|
|
|
def get_local_endpoint():
|
|
|
|
""" Returns the URL for the local end-point bypassing haproxy/ssl """
|
2014-08-04 21:47:53 +08:00
|
|
|
if config('prefer-ipv6'):
|
2014-09-21 21:33:35 +08:00
|
|
|
ipv6_addr = get_ipv6_addr(exc_list=[config('vip')])[0]
|
|
|
|
endpoint_url = 'http://[%s]:{}/v2.0/' % ipv6_addr
|
2014-08-04 21:47:53 +08:00
|
|
|
local_endpoint = endpoint_url.format(
|
2014-08-08 17:27:31 +08:00
|
|
|
determine_api_port(api_port('keystone-admin')))
|
2014-08-04 21:47:53 +08:00
|
|
|
else:
|
|
|
|
local_endpoint = 'http://localhost:{}/v2.0/'.format(
|
2014-08-08 17:27:31 +08:00
|
|
|
determine_api_port(api_port('keystone-admin')))
|
2014-09-18 19:56:23 +08:00
|
|
|
|
2013-03-18 15:57:01 +00:00
|
|
|
return local_endpoint
|
|
|
|
|
2013-03-18 15:49:00 +00:00
|
|
|
|
2014-02-25 12:34:13 +01:00
|
|
|
def set_admin_token(admin_token='None'):
|
2012-10-02 17:36:25 -07:00
|
|
|
"""Set admin token according to deployment config or use a randomly
|
|
|
|
generated token if none is specified (default).
|
|
|
|
"""
|
|
|
|
if admin_token != 'None':
|
2014-02-25 12:34:13 +01:00
|
|
|
log('Configuring Keystone to use a pre-configured admin token.')
|
2012-10-02 17:36:25 -07:00
|
|
|
token = admin_token
|
|
|
|
else:
|
2014-02-25 12:34:13 +01:00
|
|
|
log('Configuring Keystone to use a random admin token.')
|
|
|
|
if os.path.isfile(STORED_TOKEN):
|
2013-03-18 12:56:57 +00:00
|
|
|
msg = 'Loading a previously generated' \
|
2014-02-25 12:34:13 +01:00
|
|
|
' admin token from %s' % STORED_TOKEN
|
|
|
|
log(msg)
|
2014-03-28 11:45:58 +00:00
|
|
|
with open(STORED_TOKEN, 'r') as f:
|
|
|
|
token = f.read().strip()
|
2012-10-02 17:36:25 -07:00
|
|
|
else:
|
2014-03-28 11:45:58 +00:00
|
|
|
token = pwgen(length=64)
|
|
|
|
with open(STORED_TOKEN, 'w') as out:
|
|
|
|
out.write('%s\n' % token)
|
2014-02-25 12:34:13 +01:00
|
|
|
return(token)
|
2012-10-02 17:36:25 -07:00
|
|
|
|
2013-03-18 12:56:57 +00:00
|
|
|
|
2012-03-01 12:35:39 -08:00
|
|
|
def get_admin_token():
|
|
|
|
"""Temporary utility to grab the admin token as configured in
|
|
|
|
keystone.conf
|
|
|
|
"""
|
2014-02-25 12:34:13 +01:00
|
|
|
with open(KEYSTONE_CONF, 'r') as f:
|
2013-03-19 12:35:21 +00:00
|
|
|
for l in f.readlines():
|
|
|
|
if l.split(' ')[0] == 'admin_token':
|
|
|
|
try:
|
|
|
|
return l.split('=')[1].strip()
|
|
|
|
except:
|
|
|
|
error_out('Could not parse admin_token line from %s' %
|
2014-02-25 12:34:13 +01:00
|
|
|
KEYSTONE_CONF)
|
|
|
|
error_out('Could not find admin_token line in %s' % KEYSTONE_CONF)
|
2011-12-08 09:52:12 -08:00
|
|
|
|
2013-03-18 12:56:57 +00:00
|
|
|
|
2012-02-29 11:59:37 -08:00
|
|
|
def create_service_entry(service_name, service_type, service_desc, owner=None):
|
2011-12-08 09:52:12 -08:00
|
|
|
""" Add a new service entry to keystone if one does not already exist """
|
2012-02-29 11:59:37 -08:00
|
|
|
import manager
|
2013-03-19 12:35:21 +00:00
|
|
|
manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
|
2012-03-01 12:35:39 -08:00
|
|
|
token=get_admin_token())
|
2012-02-28 17:18:17 -08:00
|
|
|
for service in [s._info for s in manager.api.services.list()]:
|
|
|
|
if service['name'] == service_name:
|
2014-02-25 12:34:13 +01:00
|
|
|
log("Service entry for '%s' already exists." % service_name)
|
2011-12-08 09:52:12 -08:00
|
|
|
return
|
2012-02-28 17:18:17 -08:00
|
|
|
manager.api.services.create(name=service_name,
|
|
|
|
service_type=service_type,
|
|
|
|
description=service_desc)
|
2014-02-25 12:34:13 +01:00
|
|
|
log("Created new service entry '%s'" % service_name)
|
2013-03-18 12:56:57 +00:00
|
|
|
|
2011-12-08 09:52:12 -08:00
|
|
|
|
2014-03-03 09:14:09 +00:00
|
|
|
def create_endpoint_template(region, service, publicurl, adminurl,
|
2012-10-28 11:08:49 +01:00
|
|
|
internalurl):
|
2011-12-08 09:52:12 -08:00
|
|
|
""" Create a new endpoint template for service if one does not already
|
|
|
|
exist matching name *and* region """
|
2012-02-29 11:59:37 -08:00
|
|
|
import manager
|
2013-03-19 12:35:21 +00:00
|
|
|
manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
|
2012-03-01 12:35:39 -08:00
|
|
|
token=get_admin_token())
|
2012-02-28 17:18:17 -08:00
|
|
|
service_id = manager.resolve_service_id(service)
|
|
|
|
for ep in [e._info for e in manager.api.endpoints.list()]:
|
|
|
|
if ep['service_id'] == service_id and ep['region'] == region:
|
2014-02-25 12:34:13 +01:00
|
|
|
log("Endpoint template already exists for '%s' in '%s'"
|
2014-03-03 09:14:09 +00:00
|
|
|
% (service, region))
|
2012-10-28 11:13:51 +01:00
|
|
|
|
|
|
|
up_to_date = True
|
|
|
|
for k in ['publicurl', 'adminurl', 'internalurl']:
|
|
|
|
if ep[k] != locals()[k]:
|
|
|
|
up_to_date = False
|
|
|
|
|
|
|
|
if up_to_date:
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
# delete endpoint and recreate if endpoint urls need updating.
|
2014-02-25 12:34:13 +01:00
|
|
|
log("Updating endpoint template with new endpoint urls.")
|
2012-10-28 11:13:51 +01:00
|
|
|
manager.api.endpoints.delete(ep['id'])
|
2012-02-28 17:18:17 -08:00
|
|
|
|
|
|
|
manager.api.endpoints.create(region=region,
|
|
|
|
service_id=service_id,
|
2012-10-28 11:13:51 +01:00
|
|
|
publicurl=publicurl,
|
|
|
|
adminurl=adminurl,
|
|
|
|
internalurl=internalurl)
|
2014-02-25 12:34:13 +01:00
|
|
|
log("Created new endpoint template for '%s' in '%s'" % (region, service))
|
2013-03-18 12:56:57 +00:00
|
|
|
|
2011-12-08 09:52:12 -08:00
|
|
|
|
2012-02-29 11:59:37 -08:00
|
|
|
def create_tenant(name):
|
2011-12-08 09:52:12 -08:00
|
|
|
""" creates a tenant if it does not already exist """
|
2012-02-29 11:59:37 -08:00
|
|
|
import manager
|
2013-03-19 12:35:21 +00:00
|
|
|
manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
|
2012-03-01 12:35:39 -08:00
|
|
|
token=get_admin_token())
|
2012-02-28 17:18:17 -08:00
|
|
|
tenants = [t._info for t in manager.api.tenants.list()]
|
|
|
|
if not tenants or name not in [t['name'] for t in tenants]:
|
|
|
|
manager.api.tenants.create(tenant_name=name,
|
|
|
|
description='Created by Juju')
|
2014-02-25 12:34:13 +01:00
|
|
|
log("Created new tenant: %s" % name)
|
2011-12-08 09:52:12 -08:00
|
|
|
return
|
2014-02-25 12:34:13 +01:00
|
|
|
log("Tenant '%s' already exists." % name)
|
2013-03-18 12:56:57 +00:00
|
|
|
|
2011-12-08 09:52:12 -08:00
|
|
|
|
2012-02-29 11:59:37 -08:00
|
|
|
def create_user(name, password, tenant):
|
2011-12-08 09:52:12 -08:00
|
|
|
""" creates a user if it doesn't already exist, as a member of tenant """
|
2012-02-29 11:59:37 -08:00
|
|
|
import manager
|
2013-03-19 12:38:06 +00:00
|
|
|
manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
|
2012-03-01 12:35:39 -08:00
|
|
|
token=get_admin_token())
|
2012-02-28 17:18:17 -08:00
|
|
|
users = [u._info for u in manager.api.users.list()]
|
|
|
|
if not users or name not in [u['name'] for u in users]:
|
|
|
|
tenant_id = manager.resolve_tenant_id(tenant)
|
|
|
|
if not tenant_id:
|
|
|
|
error_out('Could not resolve tenant_id for tenant %s' % tenant)
|
|
|
|
manager.api.users.create(name=name,
|
|
|
|
password=password,
|
|
|
|
email='juju@localhost',
|
|
|
|
tenant_id=tenant_id)
|
2014-02-25 12:34:13 +01:00
|
|
|
log("Created new user '%s' tenant: %s" % (name, tenant_id))
|
2011-12-08 09:52:12 -08:00
|
|
|
return
|
2014-02-25 12:34:13 +01:00
|
|
|
log("A user named '%s' already exists" % name)
|
2013-03-18 12:56:57 +00:00
|
|
|
|
2011-12-08 09:52:12 -08:00
|
|
|
|
2012-12-11 19:52:01 -08:00
|
|
|
def create_role(name, user=None, tenant=None):
|
2011-12-08 09:52:12 -08:00
|
|
|
""" creates a role if it doesn't already exist. grants role to user """
|
2012-02-29 11:59:37 -08:00
|
|
|
import manager
|
2013-03-19 12:37:07 +00:00
|
|
|
manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
|
2012-03-01 12:35:39 -08:00
|
|
|
token=get_admin_token())
|
2012-02-28 17:18:17 -08:00
|
|
|
roles = [r._info for r in manager.api.roles.list()]
|
|
|
|
if not roles or name not in [r['name'] for r in roles]:
|
|
|
|
manager.api.roles.create(name=name)
|
2014-02-25 12:34:13 +01:00
|
|
|
log("Created new role '%s'" % name)
|
2011-12-08 18:04:22 -08:00
|
|
|
else:
|
2014-02-25 12:34:13 +01:00
|
|
|
log("A role named '%s' already exists" % name)
|
2012-02-28 17:18:17 -08:00
|
|
|
|
2012-12-11 19:52:01 -08:00
|
|
|
if not user and not tenant:
|
|
|
|
return
|
|
|
|
|
2012-02-28 17:18:17 -08:00
|
|
|
# NOTE(adam_g): Keystone client requires id's for add_user_role, not names
|
|
|
|
user_id = manager.resolve_user_id(user)
|
|
|
|
role_id = manager.resolve_role_id(name)
|
|
|
|
tenant_id = manager.resolve_tenant_id(tenant)
|
|
|
|
|
|
|
|
if None in [user_id, role_id, tenant_id]:
|
2013-03-18 12:56:57 +00:00
|
|
|
error_out("Could not resolve [%s, %s, %s]" %
|
2014-03-03 09:14:09 +00:00
|
|
|
(user_id, role_id, tenant_id))
|
2012-02-28 17:18:17 -08:00
|
|
|
|
2012-09-20 11:15:58 -07:00
|
|
|
grant_role(user, name, tenant)
|
2011-12-08 09:52:12 -08:00
|
|
|
|
2013-03-18 12:56:57 +00:00
|
|
|
|
2012-03-02 12:46:20 -08:00
|
|
|
def grant_role(user, role, tenant):
|
|
|
|
"""grant user+tenant a specific role"""
|
|
|
|
import manager
|
2013-03-19 12:35:21 +00:00
|
|
|
manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
|
2012-03-02 12:46:20 -08:00
|
|
|
token=get_admin_token())
|
2014-03-03 09:14:09 +00:00
|
|
|
log("Granting user '%s' role '%s' on tenant '%s'" %
|
2014-05-21 11:02:01 +01:00
|
|
|
(user, role, tenant))
|
2012-03-02 12:46:20 -08:00
|
|
|
user_id = manager.resolve_user_id(user)
|
|
|
|
role_id = manager.resolve_role_id(role)
|
|
|
|
tenant_id = manager.resolve_tenant_id(tenant)
|
2012-08-08 13:27:36 -07:00
|
|
|
|
2013-03-18 12:56:57 +00:00
|
|
|
cur_roles = manager.api.roles.roles_for_user(user_id, tenant_id)
|
2012-08-08 13:27:36 -07:00
|
|
|
if not cur_roles or role_id not in [r.id for r in cur_roles]:
|
|
|
|
manager.api.roles.add_user_role(user=user_id,
|
|
|
|
role=role_id,
|
|
|
|
tenant=tenant_id)
|
2014-03-03 09:14:09 +00:00
|
|
|
log("Granted user '%s' role '%s' on tenant '%s'" %
|
2014-05-21 11:02:01 +01:00
|
|
|
(user, role, tenant))
|
2012-08-08 13:27:36 -07:00
|
|
|
else:
|
2014-03-03 09:14:09 +00:00
|
|
|
log("User '%s' already has role '%s' on tenant '%s'" %
|
2014-05-21 11:02:01 +01:00
|
|
|
(user, role, tenant))
|
2011-12-08 09:52:12 -08:00
|
|
|
|
2013-03-18 12:56:57 +00:00
|
|
|
|
2011-12-08 09:52:12 -08:00
|
|
|
def ensure_initial_admin(config):
|
2013-03-18 12:56:57 +00:00
|
|
|
""" Ensures the minimum admin stuff exists in whatever database we're
|
|
|
|
using.
|
2011-12-08 09:52:12 -08:00
|
|
|
This and the helper functions it calls are meant to be idempotent and
|
|
|
|
run during install as well as during db-changed. This will maintain
|
|
|
|
the admin tenant, user, role, service entry and endpoint across every
|
2012-10-02 17:36:25 -07:00
|
|
|
datastore we might use.
|
2011-12-23 17:34:15 -08:00
|
|
|
TODO: Possibly migrate data from one backend to another after it
|
|
|
|
changes?
|
2011-12-08 09:52:12 -08:00
|
|
|
"""
|
2012-02-29 11:59:37 -08:00
|
|
|
create_tenant("admin")
|
2014-02-25 12:34:13 +01:00
|
|
|
create_tenant(config("service-tenant"))
|
2012-08-08 16:17:52 -07:00
|
|
|
|
2011-12-08 09:52:12 -08:00
|
|
|
passwd = ""
|
2014-02-25 12:34:13 +01:00
|
|
|
if config("admin-password") != "None":
|
|
|
|
passwd = config("admin-password")
|
|
|
|
elif os.path.isfile(STORED_PASSWD):
|
|
|
|
log("Loading stored passwd from %s" % STORED_PASSWD)
|
|
|
|
passwd = open(STORED_PASSWD, 'r').readline().strip('\n')
|
2011-12-08 09:52:12 -08:00
|
|
|
if passwd == "":
|
2014-03-03 09:14:09 +00:00
|
|
|
log("Generating new passwd for user: %s" %
|
|
|
|
config("admin-user"))
|
2014-02-25 12:34:13 +01:00
|
|
|
cmd = ['pwgen', '-c', '16', '1']
|
|
|
|
passwd = str(subprocess.check_output(cmd)).strip()
|
|
|
|
open(STORED_PASSWD, 'w+').writelines("%s\n" % passwd)
|
2014-09-05 16:09:23 +08:00
|
|
|
# User is managed by ldap backend when using ldap identity
|
|
|
|
if not (config('identity-backend') == 'ldap' and config('ldap-readonly')):
|
|
|
|
create_user(config('admin-user'), passwd, tenant='admin')
|
|
|
|
update_user_password(config('admin-user'), passwd)
|
|
|
|
create_role(config('admin-role'), config('admin-user'), 'admin')
|
2012-02-29 11:59:37 -08:00
|
|
|
create_service_entry("keystone", "identity", "Keystone Identity Service")
|
2013-02-12 21:56:39 -08:00
|
|
|
|
2014-02-25 12:34:13 +01:00
|
|
|
for region in config('region').split():
|
2014-07-28 15:30:41 +01:00
|
|
|
create_keystone_endpoint(public_ip=resolve_address(PUBLIC),
|
2014-02-25 12:34:13 +01:00
|
|
|
service_port=config("service-port"),
|
2014-07-28 15:30:41 +01:00
|
|
|
internal_ip=resolve_address(INTERNAL),
|
|
|
|
admin_ip=resolve_address(ADMIN),
|
2014-02-25 12:34:13 +01:00
|
|
|
auth_port=config("admin-port"),
|
2013-03-18 12:56:57 +00:00
|
|
|
region=region)
|
2012-12-18 12:00:48 +00:00
|
|
|
|
2014-09-23 11:11:26 +01:00
|
|
|
|
2014-09-21 18:57:48 +01:00
|
|
|
def endpoint_url(ip, port):
|
2014-02-25 12:34:13 +01:00
|
|
|
proto = 'http'
|
|
|
|
if https():
|
|
|
|
proto = 'https'
|
2014-09-21 18:57:48 +01:00
|
|
|
if is_ipv6(ip):
|
|
|
|
ip = "[{}]".format(ip)
|
|
|
|
return "%s://%s:%s/v2.0" % (proto, ip, port)
|
2014-07-04 12:43:53 +01:00
|
|
|
|
2014-09-21 18:57:48 +01:00
|
|
|
|
|
|
|
def create_keystone_endpoint(public_ip, service_port,
|
|
|
|
internal_ip, admin_ip, auth_port, region):
|
|
|
|
create_endpoint_template(region, "keystone",
|
|
|
|
endpoint_url(public_ip, service_port),
|
|
|
|
endpoint_url(admin_ip, auth_port),
|
|
|
|
endpoint_url(internal_ip, service_port))
|
2012-08-08 16:17:52 -07:00
|
|
|
|
2012-12-18 12:00:48 +00:00
|
|
|
|
2012-08-08 16:17:52 -07:00
|
|
|
def update_user_password(username, password):
|
|
|
|
import manager
|
2013-03-19 12:35:21 +00:00
|
|
|
manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
|
2012-08-08 16:17:52 -07:00
|
|
|
token=get_admin_token())
|
2014-02-25 12:34:13 +01:00
|
|
|
log("Updating password for user '%s'" % username)
|
2012-08-08 16:17:52 -07:00
|
|
|
|
|
|
|
user_id = manager.resolve_user_id(username)
|
|
|
|
if user_id is None:
|
|
|
|
error_out("Could not resolve user id for '%s'" % username)
|
|
|
|
|
|
|
|
manager.api.users.update_password(user=user_id, password=password)
|
2014-03-03 09:14:09 +00:00
|
|
|
log("Successfully updated password for user '%s'" %
|
|
|
|
username)
|
2013-03-18 12:56:57 +00:00
|
|
|
|
2012-10-02 17:36:25 -07:00
|
|
|
|
2013-01-30 16:48:51 -08:00
|
|
|
def load_stored_passwords(path=SERVICE_PASSWD_PATH):
|
|
|
|
creds = {}
|
|
|
|
if not os.path.isfile(path):
|
|
|
|
return creds
|
|
|
|
|
|
|
|
stored_passwd = open(path, 'r')
|
|
|
|
for l in stored_passwd.readlines():
|
|
|
|
user, passwd = l.strip().split(':')
|
|
|
|
creds[user] = passwd
|
|
|
|
return creds
|
|
|
|
|
2013-03-18 12:56:57 +00:00
|
|
|
|
2014-03-28 10:39:49 +00:00
|
|
|
def _migrate_service_passwords():
|
|
|
|
''' Migrate on-disk service passwords to peer storage '''
|
|
|
|
if os.path.exists(SERVICE_PASSWD_PATH):
|
|
|
|
log('Migrating on-disk stored passwords to peer storage')
|
|
|
|
creds = load_stored_passwords()
|
|
|
|
for k, v in creds.iteritems():
|
2014-03-28 11:04:08 +00:00
|
|
|
peer_store(key="{}_passwd".format(k), value=v)
|
2014-03-28 10:39:49 +00:00
|
|
|
os.unlink(SERVICE_PASSWD_PATH)
|
2013-01-30 16:48:51 -08:00
|
|
|
|
2013-03-18 12:56:57 +00:00
|
|
|
|
2013-01-30 16:48:51 -08:00
|
|
|
def get_service_password(service_username):
|
2014-03-28 10:39:49 +00:00
|
|
|
_migrate_service_passwords()
|
2014-03-28 11:04:08 +00:00
|
|
|
peer_key = "{}_passwd".format(service_username)
|
|
|
|
passwd = peer_retrieve(peer_key)
|
2014-03-28 10:39:49 +00:00
|
|
|
if passwd is None:
|
|
|
|
passwd = pwgen(length=64)
|
2014-03-28 11:04:08 +00:00
|
|
|
peer_store(key=peer_key,
|
|
|
|
value=passwd)
|
2013-01-30 16:48:51 -08:00
|
|
|
return passwd
|
2012-10-02 17:36:25 -07:00
|
|
|
|
2013-03-18 12:56:57 +00:00
|
|
|
|
2014-03-28 10:39:49 +00:00
|
|
|
def synchronize_ca():
|
2013-01-30 16:48:51 -08:00
|
|
|
'''
|
2013-01-31 13:32:49 -08:00
|
|
|
Broadcast service credentials to peers or consume those that have been
|
|
|
|
broadcasted by peer, depending on hook context.
|
|
|
|
'''
|
2014-03-28 10:39:49 +00:00
|
|
|
if not eligible_leader(CLUSTER_RES):
|
2013-01-30 16:48:51 -08:00
|
|
|
return
|
2014-03-28 10:39:49 +00:00
|
|
|
log('Synchronizing CA to all peers.')
|
2014-02-25 12:34:13 +01:00
|
|
|
if is_clustered():
|
2014-03-27 09:36:34 +00:00
|
|
|
if config('https-service-endpoints') in ['True', 'true']:
|
2014-03-04 08:56:18 -05:00
|
|
|
unison.sync_to_peers(peer_interface='cluster',
|
|
|
|
paths=[SSL_DIR], user=SSH_USER, verbose=True)
|
2013-02-12 21:56:39 -08:00
|
|
|
|
2013-02-07 21:03:44 -08:00
|
|
|
CA = []
|
2013-03-18 12:56:57 +00:00
|
|
|
|
|
|
|
|
2013-02-12 21:56:39 -08:00
|
|
|
def get_ca(user='keystone', group='keystone'):
|
|
|
|
"""
|
|
|
|
Initialize a new CA object if one hasn't already been loaded.
|
|
|
|
This will create a new CA or load an existing one.
|
|
|
|
"""
|
2013-02-07 21:03:44 -08:00
|
|
|
if not CA:
|
|
|
|
if not os.path.isdir(SSL_DIR):
|
|
|
|
os.mkdir(SSL_DIR)
|
2013-02-12 21:56:39 -08:00
|
|
|
d_name = '_'.join(SSL_CA_NAME.lower().split(' '))
|
2013-02-19 20:35:04 -08:00
|
|
|
ca = ssl.JujuCA(name=SSL_CA_NAME, user=user, group=group,
|
2013-02-07 21:03:44 -08:00
|
|
|
ca_dir=os.path.join(SSL_DIR,
|
|
|
|
'%s_intermediate_ca' % d_name),
|
|
|
|
root_ca_dir=os.path.join(SSL_DIR,
|
2014-03-03 09:14:09 +00:00
|
|
|
'%s_root_ca' % d_name))
|
2013-02-12 21:56:39 -08:00
|
|
|
# SSL_DIR is synchronized via all peers over unison+ssh, need
|
|
|
|
# to ensure permissions.
|
2014-02-25 12:34:13 +01:00
|
|
|
subprocess.check_output(['chown', '-R', '%s.%s' % (user, group),
|
2014-07-01 13:57:07 +01:00
|
|
|
'%s' % SSL_DIR])
|
2014-02-25 12:34:13 +01:00
|
|
|
subprocess.check_output(['chmod', '-R', 'g+rwx', '%s' % SSL_DIR])
|
2013-02-07 21:03:44 -08:00
|
|
|
CA.append(ca)
|
|
|
|
return CA[0]
|
2013-03-19 12:35:21 +00:00
|
|
|
|
|
|
|
|
2014-02-25 12:34:13 +01:00
|
|
|
def relation_list(rid):
|
|
|
|
cmd = [
|
|
|
|
'relation-list',
|
|
|
|
'-r', rid,
|
2014-03-03 09:14:09 +00:00
|
|
|
]
|
2014-02-25 12:34:13 +01:00
|
|
|
result = str(subprocess.check_output(cmd)).split()
|
|
|
|
if result == "":
|
|
|
|
return None
|
|
|
|
else:
|
|
|
|
return result
|
|
|
|
|
2014-02-26 16:54:26 +00:00
|
|
|
|
|
|
|
def add_service_to_keystone(relation_id=None, remote_unit=None):
|
2014-04-02 12:23:40 +01:00
|
|
|
import manager
|
|
|
|
manager = manager.KeystoneManager(endpoint=get_local_endpoint(),
|
|
|
|
token=get_admin_token())
|
2014-02-26 16:54:26 +00:00
|
|
|
settings = relation_get(rid=relation_id, unit=remote_unit)
|
2014-02-25 12:34:13 +01:00
|
|
|
# the minimum settings needed per endpoint
|
|
|
|
single = set(['service', 'region', 'public_url', 'admin_url',
|
|
|
|
'internal_url'])
|
2014-09-22 10:53:35 +01:00
|
|
|
https_cns = []
|
2014-02-25 12:34:13 +01:00
|
|
|
if single.issubset(settings):
|
|
|
|
# other end of relation advertised only one endpoint
|
|
|
|
if 'None' in [v for k, v in settings.iteritems()]:
|
|
|
|
# Some backend services advertise no endpoint but require a
|
|
|
|
# hook execution to update auth strategy.
|
|
|
|
relation_data = {}
|
|
|
|
# Check if clustered and use vip + haproxy ports if so
|
2014-07-28 15:30:41 +01:00
|
|
|
relation_data["auth_host"] = resolve_address(ADMIN)
|
|
|
|
relation_data["service_host"] = resolve_address(PUBLIC)
|
2014-02-26 18:54:04 +00:00
|
|
|
if https():
|
|
|
|
relation_data["auth_protocol"] = "https"
|
|
|
|
relation_data["service_protocol"] = "https"
|
|
|
|
else:
|
|
|
|
relation_data["auth_protocol"] = "http"
|
|
|
|
relation_data["service_protocol"] = "http"
|
2014-02-25 12:34:13 +01:00
|
|
|
relation_data["auth_port"] = config('admin-port')
|
|
|
|
relation_data["service_port"] = config('service-port')
|
|
|
|
if config('https-service-endpoints') in ['True', 'true']:
|
|
|
|
# Pass CA cert as client will need it to
|
|
|
|
# verify https connections
|
|
|
|
ca = get_ca(user=SSH_USER)
|
|
|
|
ca_bundle = ca.get_ca_bundle()
|
|
|
|
relation_data['https_keystone'] = 'True'
|
|
|
|
relation_data['ca_cert'] = b64encode(ca_bundle)
|
|
|
|
# Allow the remote service to request creation of any additional
|
|
|
|
# roles. Currently used by Horizon
|
|
|
|
for role in get_requested_roles(settings):
|
|
|
|
log("Creating requested role: %s" % role)
|
|
|
|
create_role(role)
|
2014-08-19 12:39:31 +00:00
|
|
|
peer_store_and_set(relation_id=relation_id,
|
|
|
|
**relation_data)
|
2014-02-25 12:34:13 +01:00
|
|
|
return
|
|
|
|
else:
|
|
|
|
ensure_valid_service(settings['service'])
|
|
|
|
add_endpoint(region=settings['region'],
|
|
|
|
service=settings['service'],
|
|
|
|
publicurl=settings['public_url'],
|
|
|
|
adminurl=settings['admin_url'],
|
|
|
|
internalurl=settings['internal_url'])
|
2014-12-03 12:21:50 +00:00
|
|
|
|
2014-12-03 14:28:20 +00:00
|
|
|
# If an admin username prefix is provided, ensure all services use
|
2014-12-03 12:21:50 +00:00
|
|
|
# it.
|
2014-02-25 12:34:13 +01:00
|
|
|
service_username = settings['service']
|
2014-12-03 12:21:50 +00:00
|
|
|
prefix = config('service-admin-prefix')
|
|
|
|
if prefix:
|
|
|
|
service_username = "%s%s" % (prefix, service_username)
|
|
|
|
|
2014-09-23 11:11:26 +01:00
|
|
|
# NOTE(jamespage) internal IP for backwards compat for SSL certs
|
|
|
|
internal_cn = urlparse.urlparse(settings['internal_url']).hostname
|
|
|
|
https_cns.append(internal_cn)
|
|
|
|
https_cns.append(
|
|
|
|
urlparse.urlparse(settings['public_url']).hostname)
|
2014-09-22 16:39:32 +01:00
|
|
|
https_cns.append(urlparse.urlparse(settings['admin_url']).hostname)
|
2014-02-25 12:34:13 +01:00
|
|
|
else:
|
|
|
|
# assemble multiple endpoints from relation data. service name
|
|
|
|
# should be prepended to setting name, ie:
|
|
|
|
# realtion-set ec2_service=$foo ec2_region=$foo ec2_public_url=$foo
|
|
|
|
# relation-set nova_service=$foo nova_region=$foo nova_public_url=$foo
|
|
|
|
# Results in a dict that looks like:
|
|
|
|
# { 'ec2': {
|
|
|
|
# 'service': $foo
|
|
|
|
# 'region': $foo
|
|
|
|
# 'public_url': $foo
|
|
|
|
# }
|
|
|
|
# 'nova': {
|
|
|
|
# 'service': $foo
|
|
|
|
# 'region': $foo
|
|
|
|
# 'public_url': $foo
|
|
|
|
# }
|
|
|
|
# }
|
|
|
|
endpoints = {}
|
|
|
|
for k, v in settings.iteritems():
|
|
|
|
ep = k.split('_')[0]
|
|
|
|
x = '_'.join(k.split('_')[1:])
|
|
|
|
if ep not in endpoints:
|
|
|
|
endpoints[ep] = {}
|
|
|
|
endpoints[ep][x] = v
|
|
|
|
services = []
|
|
|
|
https_cn = None
|
|
|
|
for ep in endpoints:
|
|
|
|
# weed out any unrelated relation stuff Juju might have added
|
|
|
|
# by ensuring each possible endpiont has appropriate fields
|
|
|
|
# ['service', 'region', 'public_url', 'admin_url', 'internal_url']
|
|
|
|
if single.issubset(endpoints[ep]):
|
|
|
|
ep = endpoints[ep]
|
2014-02-27 10:34:15 +00:00
|
|
|
ensure_valid_service(ep['service'])
|
2014-02-25 12:34:13 +01:00
|
|
|
add_endpoint(region=ep['region'], service=ep['service'],
|
|
|
|
publicurl=ep['public_url'],
|
|
|
|
adminurl=ep['admin_url'],
|
|
|
|
internalurl=ep['internal_url'])
|
|
|
|
services.append(ep['service'])
|
2014-09-23 11:11:26 +01:00
|
|
|
# NOTE(jamespage) internal IP for backwards compat for
|
|
|
|
# SSL certs
|
|
|
|
internal_cn = urlparse.urlparse(ep['internal_url']).hostname
|
|
|
|
https_cns.append(internal_cn)
|
2014-09-22 16:39:32 +01:00
|
|
|
https_cns.append(urlparse.urlparse(ep['public_url']).hostname)
|
|
|
|
https_cns.append(urlparse.urlparse(ep['admin_url']).hostname)
|
2014-02-25 12:34:13 +01:00
|
|
|
service_username = '_'.join(services)
|
|
|
|
|
2014-12-03 14:28:20 +00:00
|
|
|
# If an admin username prefix is provided, ensure all services use it.
|
|
|
|
prefix = config('service-admin-prefix')
|
|
|
|
if prefix:
|
|
|
|
service_username = "%s%s" % (prefix, service_username)
|
|
|
|
|
2014-02-25 12:34:13 +01:00
|
|
|
if 'None' in [v for k, v in settings.iteritems()]:
|
|
|
|
return
|
|
|
|
|
|
|
|
if not service_username:
|
|
|
|
return
|
|
|
|
|
|
|
|
token = get_admin_token()
|
|
|
|
log("Creating service credentials for '%s'" % service_username)
|
|
|
|
|
|
|
|
service_password = get_service_password(service_username)
|
|
|
|
create_user(service_username, service_password, config('service-tenant'))
|
|
|
|
grant_role(service_username, config('admin-role'),
|
|
|
|
config('service-tenant'))
|
|
|
|
|
|
|
|
# Allow the remote service to request creation of any additional roles.
|
|
|
|
# Currently used by Swift and Ceilometer.
|
|
|
|
for role in get_requested_roles(settings):
|
|
|
|
log("Creating requested role: %s" % role)
|
|
|
|
create_role(role, service_username,
|
|
|
|
config('service-tenant'))
|
|
|
|
|
|
|
|
# As of https://review.openstack.org/#change,4675, all nodes hosting
|
|
|
|
# an endpoint(s) needs a service username and password assigned to
|
|
|
|
# the service tenant and granted admin role.
|
|
|
|
# note: config('service-tenant') is created in utils.ensure_initial_admin()
|
|
|
|
# we return a token, information about our API endpoints, and the generated
|
|
|
|
# service credentials
|
2014-04-02 12:23:40 +01:00
|
|
|
service_tenant = config('service-tenant')
|
2014-02-25 12:34:13 +01:00
|
|
|
relation_data = {
|
|
|
|
"admin_token": token,
|
2014-07-28 15:30:41 +01:00
|
|
|
"service_host": resolve_address(PUBLIC),
|
2014-02-25 12:34:13 +01:00
|
|
|
"service_port": config("service-port"),
|
2014-07-28 15:30:41 +01:00
|
|
|
"auth_host": resolve_address(ADMIN),
|
2014-02-25 12:34:13 +01:00
|
|
|
"auth_port": config("admin-port"),
|
|
|
|
"service_username": service_username,
|
|
|
|
"service_password": service_password,
|
2014-04-02 12:23:40 +01:00
|
|
|
"service_tenant": service_tenant,
|
|
|
|
"service_tenant_id": manager.resolve_tenant_id(service_tenant),
|
2014-02-25 12:34:13 +01:00
|
|
|
"https_keystone": "False",
|
|
|
|
"ssl_cert": "",
|
|
|
|
"ssl_key": "",
|
|
|
|
"ca_cert": ""
|
|
|
|
}
|
|
|
|
|
2014-07-28 15:30:41 +01:00
|
|
|
# Check if https is enabled
|
2014-02-26 18:54:04 +00:00
|
|
|
if https():
|
|
|
|
relation_data["auth_protocol"] = "https"
|
|
|
|
relation_data["service_protocol"] = "https"
|
|
|
|
else:
|
|
|
|
relation_data["auth_protocol"] = "http"
|
|
|
|
relation_data["service_protocol"] = "http"
|
2014-02-25 12:34:13 +01:00
|
|
|
# generate or get a new cert/key for service if set to manage certs.
|
|
|
|
if config('https-service-endpoints') in ['True', 'true']:
|
|
|
|
ca = get_ca(user=SSH_USER)
|
2014-09-22 10:53:35 +01:00
|
|
|
# NOTE(jamespage) may have multiple cns to deal with to iterate
|
|
|
|
https_cns = set(https_cns)
|
|
|
|
for https_cn in https_cns:
|
|
|
|
cert, key = ca.get_cert_and_key(common_name=https_cn)
|
|
|
|
relation_data['ssl_cert_{}'.format(https_cn)] = b64encode(cert)
|
|
|
|
relation_data['ssl_key_{}'.format(https_cn)] = b64encode(key)
|
2014-09-23 11:11:26 +01:00
|
|
|
# NOTE(jamespage) for backwards compatibility
|
|
|
|
cert, key = ca.get_cert_and_key(common_name=internal_cn)
|
|
|
|
relation_data['ssl_cert'] = b64encode(cert)
|
|
|
|
relation_data['ssl_key'] = b64encode(key)
|
2014-02-25 12:34:13 +01:00
|
|
|
ca_bundle = ca.get_ca_bundle()
|
|
|
|
relation_data['ca_cert'] = b64encode(ca_bundle)
|
|
|
|
relation_data['https_keystone'] = 'True'
|
2014-08-19 12:39:31 +00:00
|
|
|
peer_store_and_set(relation_id=relation_id,
|
|
|
|
**relation_data)
|
2014-02-25 12:34:13 +01:00
|
|
|
|
|
|
|
|
|
|
|
def ensure_valid_service(service):
|
|
|
|
if service not in valid_services.keys():
|
|
|
|
log("Invalid service requested: '%s'" % service)
|
|
|
|
relation_set(admin_token=-1)
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
|
|
def add_endpoint(region, service, publicurl, adminurl, internalurl):
|
|
|
|
desc = valid_services[service]["desc"]
|
|
|
|
service_type = valid_services[service]["type"]
|
|
|
|
create_service_entry(service, service_type, desc)
|
|
|
|
create_endpoint_template(region=region, service=service,
|
|
|
|
publicurl=publicurl,
|
|
|
|
adminurl=adminurl,
|
|
|
|
internalurl=internalurl)
|
|
|
|
|
|
|
|
|
|
|
|
def get_requested_roles(settings):
|
|
|
|
''' Retrieve any valid requested_roles from dict settings '''
|
|
|
|
if ('requested_roles' in settings and
|
2014-03-03 09:14:09 +00:00
|
|
|
settings['requested_roles'] not in ['None', None]):
|
2014-02-25 12:34:13 +01:00
|
|
|
return settings['requested_roles'].split(',')
|
2013-03-19 12:35:21 +00:00
|
|
|
else:
|
2014-02-25 12:34:13 +01:00
|
|
|
return []
|
2014-09-18 19:23:52 +08:00
|
|
|
|
|
|
|
|
|
|
|
def setup_ipv6():
|
2014-09-30 14:24:43 +01:00
|
|
|
ubuntu_rel = lsb_release()['DISTRIB_CODENAME'].lower()
|
|
|
|
if ubuntu_rel < "trusty":
|
|
|
|
raise Exception("IPv6 is not supported in the charms for Ubuntu "
|
2014-09-18 19:23:52 +08:00
|
|
|
"versions less than Trusty 14.04")
|
|
|
|
|
|
|
|
# NOTE(xianghui): Need to install haproxy(1.5.3) from trusty-backports
|
|
|
|
# to support ipv6 address, so check is required to make sure not
|
|
|
|
# breaking other versions, IPv6 only support for >= Trusty
|
2014-09-30 15:28:37 +08:00
|
|
|
if ubuntu_rel == 'trusty':
|
2014-09-18 19:23:52 +08:00
|
|
|
add_source('deb http://archive.ubuntu.com/ubuntu trusty-backports'
|
|
|
|
' main')
|
|
|
|
apt_update()
|
|
|
|
apt_install('haproxy/trusty-backports', fatal=True)
|
2014-12-12 15:21:32 +00:00
|
|
|
|
|
|
|
|
2014-12-12 20:21:44 +00:00
|
|
|
def send_identity_service_notifications(notifications, use_trigger=False):
|
2014-12-15 12:09:04 +00:00
|
|
|
"""Send notifications to all units listening on the
|
|
|
|
identity-service-notifications interface.
|
2014-12-12 15:21:32 +00:00
|
|
|
|
|
|
|
Units are expected to ignore notifications that they don't expect.
|
|
|
|
|
|
|
|
NOTE: settings that are not required/inuse must always be set to None
|
|
|
|
so that they are removed from the relation.
|
2014-12-12 18:32:45 +00:00
|
|
|
|
|
|
|
:param notifications: dict of notification key/value pairs.
|
|
|
|
:param use_trigger: determines whether a trigger value is set to ensure the
|
|
|
|
remote hook is fired.
|
2014-12-12 15:21:32 +00:00
|
|
|
"""
|
2014-12-12 20:21:44 +00:00
|
|
|
if not notifications or not is_elected_leader(CLUSTER_RES):
|
2014-12-12 19:36:10 +00:00
|
|
|
log("Not sending notifications", level=DEBUG)
|
2014-12-12 15:21:32 +00:00
|
|
|
return
|
|
|
|
|
2014-12-12 15:58:04 +00:00
|
|
|
rel_ids = []
|
|
|
|
keys = []
|
2014-12-12 20:21:44 +00:00
|
|
|
diff = False
|
2014-12-12 15:21:32 +00:00
|
|
|
|
2014-12-12 18:56:49 +00:00
|
|
|
# Get all settings previously sent
|
2014-12-15 12:09:04 +00:00
|
|
|
for rid in relation_ids('identity-service-notifications'):
|
2014-12-12 15:58:04 +00:00
|
|
|
rel_ids.append(rid)
|
2014-12-12 18:56:49 +00:00
|
|
|
rs = relation_get(unit=local_unit(), rid=rid)
|
2014-12-12 19:36:10 +00:00
|
|
|
if rs:
|
|
|
|
keys += rs.keys()
|
2014-12-12 15:58:04 +00:00
|
|
|
|
2014-12-12 20:21:44 +00:00
|
|
|
# Work out if this notification changes anything
|
|
|
|
for k, v in notifications.iteritems():
|
|
|
|
if rs.get(k, None) != v:
|
|
|
|
diff = True
|
|
|
|
|
|
|
|
if not diff:
|
|
|
|
log("Notifications unchanged by new values so skipping broadcast",
|
|
|
|
level=DEBUG)
|
|
|
|
return
|
|
|
|
|
2014-12-12 15:58:04 +00:00
|
|
|
# Set all to None
|
|
|
|
_notifications = {k: None for k in set(keys)}
|
2014-12-12 18:56:49 +00:00
|
|
|
|
2014-12-12 18:47:36 +00:00
|
|
|
# Set new values
|
2014-12-12 15:58:04 +00:00
|
|
|
for k, v in notifications.iteritems():
|
|
|
|
_notifications[k] = v
|
|
|
|
|
2014-12-12 18:32:45 +00:00
|
|
|
if use_trigger:
|
|
|
|
_notifications['trigger'] = str(uuid.uuid4())
|
2014-12-12 15:58:04 +00:00
|
|
|
|
|
|
|
# Broadcast
|
2014-12-12 20:26:23 +00:00
|
|
|
log("Sending identity-service notifications (trigger=%s)" %
|
2014-12-12 20:21:44 +00:00
|
|
|
(use_trigger), level=DEBUG)
|
2014-12-12 15:58:04 +00:00
|
|
|
for rid in rel_ids:
|
2014-12-12 20:21:44 +00:00
|
|
|
relation_set(relation_id=rid, relation_settings=_notifications)
|