[james-page,r=james-page,t=*]
Support for Icehouse on 12.04 and 14.04 Support for Active/Active and SSL RabbitMQ Support for SSL MySQL
This commit is contained in:
commit
28731bd8c9
@ -14,14 +14,6 @@ options:
|
|||||||
Note that updating this setting to a source that is known to
|
Note that updating this setting to a source that is known to
|
||||||
provide a later version of OpenStack will trigger a software
|
provide a later version of OpenStack will trigger a software
|
||||||
upgrade.
|
upgrade.
|
||||||
rabbit-user:
|
|
||||||
default: heat
|
|
||||||
type: string
|
|
||||||
description: Username used to access rabbitmq queue
|
|
||||||
rabbit-vhost:
|
|
||||||
default: openstack
|
|
||||||
type: string
|
|
||||||
decsription: Rabbitmq vhost
|
|
||||||
database-user:
|
database-user:
|
||||||
default: heat
|
default: heat
|
||||||
type: string
|
type: string
|
||||||
|
@ -39,14 +39,15 @@ def get_cert():
|
|||||||
|
|
||||||
|
|
||||||
def get_ca_cert():
|
def get_ca_cert():
|
||||||
ca_cert = None
|
ca_cert = config_get('ssl_ca')
|
||||||
log("Inspecting identity-service relations for CA SSL certificate.",
|
if ca_cert is None:
|
||||||
level=INFO)
|
log("Inspecting identity-service relations for CA SSL certificate.",
|
||||||
for r_id in relation_ids('identity-service'):
|
level=INFO)
|
||||||
for unit in relation_list(r_id):
|
for r_id in relation_ids('identity-service'):
|
||||||
if not ca_cert:
|
for unit in relation_list(r_id):
|
||||||
ca_cert = relation_get('ca_cert',
|
if ca_cert is None:
|
||||||
rid=r_id, unit=unit)
|
ca_cert = relation_get('ca_cert',
|
||||||
|
rid=r_id, unit=unit)
|
||||||
return ca_cert
|
return ca_cert
|
||||||
|
|
||||||
|
|
||||||
|
@ -126,17 +126,17 @@ def determine_api_port(public_port):
|
|||||||
return public_port - (i * 10)
|
return public_port - (i * 10)
|
||||||
|
|
||||||
|
|
||||||
def determine_haproxy_port(public_port):
|
def determine_apache_port(public_port):
|
||||||
'''
|
'''
|
||||||
Description: Determine correct proxy listening port based on public IP +
|
Description: Determine correct apache listening port based on public IP +
|
||||||
existence of HTTPS reverse proxy.
|
state of the cluster.
|
||||||
|
|
||||||
public_port: int: standard public port for given service
|
public_port: int: standard public port for given service
|
||||||
|
|
||||||
returns: int: the correct listening port for the HAProxy service
|
returns: int: the correct listening port for the HAProxy service
|
||||||
'''
|
'''
|
||||||
i = 0
|
i = 0
|
||||||
if https():
|
if len(peer_units()) > 0 or is_clustered():
|
||||||
i += 1
|
i += 1
|
||||||
return public_port - (i * 10)
|
return public_port - (i * 10)
|
||||||
|
|
||||||
|
@ -1,5 +1,6 @@
|
|||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
|
import time
|
||||||
|
|
||||||
from base64 import b64decode
|
from base64 import b64decode
|
||||||
|
|
||||||
@ -26,11 +27,10 @@ from charmhelpers.core.hookenv import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.contrib.hahelpers.cluster import (
|
from charmhelpers.contrib.hahelpers.cluster import (
|
||||||
|
determine_apache_port,
|
||||||
determine_api_port,
|
determine_api_port,
|
||||||
determine_haproxy_port,
|
|
||||||
https,
|
https,
|
||||||
is_clustered,
|
is_clustered
|
||||||
peer_units,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
from charmhelpers.contrib.hahelpers.apache import (
|
from charmhelpers.contrib.hahelpers.apache import (
|
||||||
@ -114,7 +114,8 @@ class OSContextGenerator(object):
|
|||||||
class SharedDBContext(OSContextGenerator):
|
class SharedDBContext(OSContextGenerator):
|
||||||
interfaces = ['shared-db']
|
interfaces = ['shared-db']
|
||||||
|
|
||||||
def __init__(self, database=None, user=None, relation_prefix=None):
|
def __init__(self,
|
||||||
|
database=None, user=None, relation_prefix=None, ssl_dir=None):
|
||||||
'''
|
'''
|
||||||
Allows inspecting relation for settings prefixed with relation_prefix.
|
Allows inspecting relation for settings prefixed with relation_prefix.
|
||||||
This is useful for parsing access for multiple databases returned via
|
This is useful for parsing access for multiple databases returned via
|
||||||
@ -123,6 +124,7 @@ class SharedDBContext(OSContextGenerator):
|
|||||||
self.relation_prefix = relation_prefix
|
self.relation_prefix = relation_prefix
|
||||||
self.database = database
|
self.database = database
|
||||||
self.user = user
|
self.user = user
|
||||||
|
self.ssl_dir = ssl_dir
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
self.database = self.database or config('database')
|
self.database = self.database or config('database')
|
||||||
@ -140,19 +142,74 @@ class SharedDBContext(OSContextGenerator):
|
|||||||
|
|
||||||
for rid in relation_ids('shared-db'):
|
for rid in relation_ids('shared-db'):
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
passwd = relation_get(password_setting, rid=rid, unit=unit)
|
rdata = relation_get(rid=rid, unit=unit)
|
||||||
ctxt = {
|
ctxt = {
|
||||||
'database_host': relation_get('db_host', rid=rid,
|
'database_host': rdata.get('db_host'),
|
||||||
unit=unit),
|
|
||||||
'database': self.database,
|
'database': self.database,
|
||||||
'database_user': self.user,
|
'database_user': self.user,
|
||||||
'database_password': passwd,
|
'database_password': rdata.get(password_setting),
|
||||||
|
'database_type': 'mysql'
|
||||||
|
}
|
||||||
|
if context_complete(ctxt):
|
||||||
|
db_ssl(rdata, ctxt, self.ssl_dir)
|
||||||
|
return ctxt
|
||||||
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
class PostgresqlDBContext(OSContextGenerator):
|
||||||
|
interfaces = ['pgsql-db']
|
||||||
|
|
||||||
|
def __init__(self, database=None):
|
||||||
|
self.database = database
|
||||||
|
|
||||||
|
def __call__(self):
|
||||||
|
self.database = self.database or config('database')
|
||||||
|
if self.database is None:
|
||||||
|
log('Could not generate postgresql_db context. '
|
||||||
|
'Missing required charm config options. '
|
||||||
|
'(database name)')
|
||||||
|
raise OSContextError
|
||||||
|
ctxt = {}
|
||||||
|
|
||||||
|
for rid in relation_ids(self.interfaces[0]):
|
||||||
|
for unit in related_units(rid):
|
||||||
|
ctxt = {
|
||||||
|
'database_host': relation_get('host', rid=rid, unit=unit),
|
||||||
|
'database': self.database,
|
||||||
|
'database_user': relation_get('user', rid=rid, unit=unit),
|
||||||
|
'database_password': relation_get('password', rid=rid, unit=unit),
|
||||||
|
'database_type': 'postgresql',
|
||||||
}
|
}
|
||||||
if context_complete(ctxt):
|
if context_complete(ctxt):
|
||||||
return ctxt
|
return ctxt
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
|
|
||||||
|
def db_ssl(rdata, ctxt, ssl_dir):
|
||||||
|
if 'ssl_ca' in rdata and ssl_dir:
|
||||||
|
ca_path = os.path.join(ssl_dir, 'db-client.ca')
|
||||||
|
with open(ca_path, 'w') as fh:
|
||||||
|
fh.write(b64decode(rdata['ssl_ca']))
|
||||||
|
ctxt['database_ssl_ca'] = ca_path
|
||||||
|
elif 'ssl_ca' in rdata:
|
||||||
|
log("Charm not setup for ssl support but ssl ca found")
|
||||||
|
return ctxt
|
||||||
|
if 'ssl_cert' in rdata:
|
||||||
|
cert_path = os.path.join(
|
||||||
|
ssl_dir, 'db-client.cert')
|
||||||
|
if not os.path.exists(cert_path):
|
||||||
|
log("Waiting 1m for ssl client cert validity")
|
||||||
|
time.sleep(60)
|
||||||
|
with open(cert_path, 'w') as fh:
|
||||||
|
fh.write(b64decode(rdata['ssl_cert']))
|
||||||
|
ctxt['database_ssl_cert'] = cert_path
|
||||||
|
key_path = os.path.join(ssl_dir, 'db-client.key')
|
||||||
|
with open(key_path, 'w') as fh:
|
||||||
|
fh.write(b64decode(rdata['ssl_key']))
|
||||||
|
ctxt['database_ssl_key'] = key_path
|
||||||
|
return ctxt
|
||||||
|
|
||||||
|
|
||||||
class IdentityServiceContext(OSContextGenerator):
|
class IdentityServiceContext(OSContextGenerator):
|
||||||
interfaces = ['identity-service']
|
interfaces = ['identity-service']
|
||||||
|
|
||||||
@ -162,24 +219,25 @@ class IdentityServiceContext(OSContextGenerator):
|
|||||||
|
|
||||||
for rid in relation_ids('identity-service'):
|
for rid in relation_ids('identity-service'):
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
|
rdata = relation_get(rid=rid, unit=unit)
|
||||||
ctxt = {
|
ctxt = {
|
||||||
'service_port': relation_get('service_port', rid=rid,
|
'service_port': rdata.get('service_port'),
|
||||||
unit=unit),
|
'service_host': rdata.get('service_host'),
|
||||||
'service_host': relation_get('service_host', rid=rid,
|
'auth_host': rdata.get('auth_host'),
|
||||||
unit=unit),
|
'auth_port': rdata.get('auth_port'),
|
||||||
'auth_host': relation_get('auth_host', rid=rid, unit=unit),
|
'admin_tenant_name': rdata.get('service_tenant'),
|
||||||
'auth_port': relation_get('auth_port', rid=rid, unit=unit),
|
'admin_user': rdata.get('service_username'),
|
||||||
'admin_tenant_name': relation_get('service_tenant',
|
'admin_password': rdata.get('service_password'),
|
||||||
rid=rid, unit=unit),
|
'service_protocol':
|
||||||
'admin_user': relation_get('service_username', rid=rid,
|
rdata.get('service_protocol') or 'http',
|
||||||
unit=unit),
|
'auth_protocol':
|
||||||
'admin_password': relation_get('service_password', rid=rid,
|
rdata.get('auth_protocol') or 'http',
|
||||||
unit=unit),
|
|
||||||
# XXX: Hard-coded http.
|
|
||||||
'service_protocol': 'http',
|
|
||||||
'auth_protocol': 'http',
|
|
||||||
}
|
}
|
||||||
if context_complete(ctxt):
|
if context_complete(ctxt):
|
||||||
|
# NOTE(jamespage) this is required for >= icehouse
|
||||||
|
# so a missing value just indicates keystone needs
|
||||||
|
# upgrading
|
||||||
|
ctxt['admin_tenant_id'] = rdata.get('service_tenant_id')
|
||||||
return ctxt
|
return ctxt
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
@ -187,6 +245,9 @@ class IdentityServiceContext(OSContextGenerator):
|
|||||||
class AMQPContext(OSContextGenerator):
|
class AMQPContext(OSContextGenerator):
|
||||||
interfaces = ['amqp']
|
interfaces = ['amqp']
|
||||||
|
|
||||||
|
def __init__(self, ssl_dir=None):
|
||||||
|
self.ssl_dir = ssl_dir
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
log('Generating template context for amqp')
|
log('Generating template context for amqp')
|
||||||
conf = config()
|
conf = config()
|
||||||
@ -197,9 +258,9 @@ class AMQPContext(OSContextGenerator):
|
|||||||
log('Could not generate shared_db context. '
|
log('Could not generate shared_db context. '
|
||||||
'Missing required charm config options: %s.' % e)
|
'Missing required charm config options: %s.' % e)
|
||||||
raise OSContextError
|
raise OSContextError
|
||||||
|
|
||||||
ctxt = {}
|
ctxt = {}
|
||||||
for rid in relation_ids('amqp'):
|
for rid in relation_ids('amqp'):
|
||||||
|
ha_vip_only = False
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
if relation_get('clustered', rid=rid, unit=unit):
|
if relation_get('clustered', rid=rid, unit=unit):
|
||||||
ctxt['clustered'] = True
|
ctxt['clustered'] = True
|
||||||
@ -214,11 +275,36 @@ class AMQPContext(OSContextGenerator):
|
|||||||
unit=unit),
|
unit=unit),
|
||||||
'rabbitmq_virtual_host': vhost,
|
'rabbitmq_virtual_host': vhost,
|
||||||
})
|
})
|
||||||
|
|
||||||
|
ssl_port = relation_get('ssl_port', rid=rid, unit=unit)
|
||||||
|
if ssl_port:
|
||||||
|
ctxt['rabbit_ssl_port'] = ssl_port
|
||||||
|
ssl_ca = relation_get('ssl_ca', rid=rid, unit=unit)
|
||||||
|
if ssl_ca:
|
||||||
|
ctxt['rabbit_ssl_ca'] = ssl_ca
|
||||||
|
|
||||||
|
if relation_get('ha_queues', rid=rid, unit=unit) is not None:
|
||||||
|
ctxt['rabbitmq_ha_queues'] = True
|
||||||
|
|
||||||
|
ha_vip_only = relation_get('ha-vip-only',
|
||||||
|
rid=rid, unit=unit) is not None
|
||||||
|
|
||||||
if context_complete(ctxt):
|
if context_complete(ctxt):
|
||||||
|
if 'rabbit_ssl_ca' in ctxt:
|
||||||
|
if not self.ssl_dir:
|
||||||
|
log(("Charm not setup for ssl support "
|
||||||
|
"but ssl ca found"))
|
||||||
|
break
|
||||||
|
ca_path = os.path.join(
|
||||||
|
self.ssl_dir, 'rabbit-client-ca.pem')
|
||||||
|
with open(ca_path, 'w') as fh:
|
||||||
|
fh.write(b64decode(ctxt['rabbit_ssl_ca']))
|
||||||
|
ctxt['rabbit_ssl_ca'] = ca_path
|
||||||
# Sufficient information found = break out!
|
# Sufficient information found = break out!
|
||||||
break
|
break
|
||||||
# Used for active/active rabbitmq >= grizzly
|
# Used for active/active rabbitmq >= grizzly
|
||||||
if 'clustered' not in ctxt and len(related_units(rid)) > 1:
|
if ('clustered' not in ctxt or ha_vip_only) \
|
||||||
|
and len(related_units(rid)) > 1:
|
||||||
rabbitmq_hosts = []
|
rabbitmq_hosts = []
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
rabbitmq_hosts.append(relation_get('private-address',
|
rabbitmq_hosts.append(relation_get('private-address',
|
||||||
@ -237,10 +323,13 @@ class CephContext(OSContextGenerator):
|
|||||||
'''This generates context for /etc/ceph/ceph.conf templates'''
|
'''This generates context for /etc/ceph/ceph.conf templates'''
|
||||||
if not relation_ids('ceph'):
|
if not relation_ids('ceph'):
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
log('Generating template context for ceph')
|
log('Generating template context for ceph')
|
||||||
|
|
||||||
mon_hosts = []
|
mon_hosts = []
|
||||||
auth = None
|
auth = None
|
||||||
key = None
|
key = None
|
||||||
|
use_syslog = str(config('use-syslog')).lower()
|
||||||
for rid in relation_ids('ceph'):
|
for rid in relation_ids('ceph'):
|
||||||
for unit in related_units(rid):
|
for unit in related_units(rid):
|
||||||
mon_hosts.append(relation_get('private-address', rid=rid,
|
mon_hosts.append(relation_get('private-address', rid=rid,
|
||||||
@ -252,6 +341,7 @@ class CephContext(OSContextGenerator):
|
|||||||
'mon_hosts': ' '.join(mon_hosts),
|
'mon_hosts': ' '.join(mon_hosts),
|
||||||
'auth': auth,
|
'auth': auth,
|
||||||
'key': key,
|
'key': key,
|
||||||
|
'use_syslog': use_syslog
|
||||||
}
|
}
|
||||||
|
|
||||||
if not os.path.isdir('/etc/ceph'):
|
if not os.path.isdir('/etc/ceph'):
|
||||||
@ -380,17 +470,17 @@ class ApacheSSLContext(OSContextGenerator):
|
|||||||
'private_address': unit_get('private-address'),
|
'private_address': unit_get('private-address'),
|
||||||
'endpoints': []
|
'endpoints': []
|
||||||
}
|
}
|
||||||
for ext_port in self.external_ports:
|
if is_clustered():
|
||||||
if peer_units() or is_clustered():
|
ctxt['private_address'] = config('vip')
|
||||||
int_port = determine_haproxy_port(ext_port)
|
for api_port in self.external_ports:
|
||||||
else:
|
ext_port = determine_apache_port(api_port)
|
||||||
int_port = determine_api_port(ext_port)
|
int_port = determine_api_port(api_port)
|
||||||
portmap = (int(ext_port), int(int_port))
|
portmap = (int(ext_port), int(int_port))
|
||||||
ctxt['endpoints'].append(portmap)
|
ctxt['endpoints'].append(portmap)
|
||||||
return ctxt
|
return ctxt
|
||||||
|
|
||||||
|
|
||||||
class NeutronContext(object):
|
class NeutronContext(OSContextGenerator):
|
||||||
interfaces = []
|
interfaces = []
|
||||||
|
|
||||||
@property
|
@property
|
||||||
@ -451,6 +541,22 @@ class NeutronContext(object):
|
|||||||
|
|
||||||
return nvp_ctxt
|
return nvp_ctxt
|
||||||
|
|
||||||
|
def neutron_ctxt(self):
|
||||||
|
if https():
|
||||||
|
proto = 'https'
|
||||||
|
else:
|
||||||
|
proto = 'http'
|
||||||
|
if is_clustered():
|
||||||
|
host = config('vip')
|
||||||
|
else:
|
||||||
|
host = unit_get('private-address')
|
||||||
|
url = '%s://%s:%s' % (proto, host, '9696')
|
||||||
|
ctxt = {
|
||||||
|
'network_manager': self.network_manager,
|
||||||
|
'neutron_url': url,
|
||||||
|
}
|
||||||
|
return ctxt
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
self._ensure_packages()
|
self._ensure_packages()
|
||||||
|
|
||||||
@ -460,7 +566,7 @@ class NeutronContext(object):
|
|||||||
if not self.plugin:
|
if not self.plugin:
|
||||||
return {}
|
return {}
|
||||||
|
|
||||||
ctxt = {'network_manager': self.network_manager}
|
ctxt = self.neutron_ctxt()
|
||||||
|
|
||||||
if self.plugin == 'ovs':
|
if self.plugin == 'ovs':
|
||||||
ctxt.update(self.ovs_ctxt())
|
ctxt.update(self.ovs_ctxt())
|
||||||
@ -586,6 +692,7 @@ class SubordinateConfigContext(OSContextGenerator):
|
|||||||
|
|
||||||
|
|
||||||
class SyslogContext(OSContextGenerator):
|
class SyslogContext(OSContextGenerator):
|
||||||
|
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
ctxt = {
|
ctxt = {
|
||||||
'use_syslog': config('use-syslog')
|
'use_syslog': config('use-syslog')
|
||||||
|
@ -17,8 +17,28 @@ def headers_package():
|
|||||||
kver = check_output(['uname', '-r']).strip()
|
kver = check_output(['uname', '-r']).strip()
|
||||||
return 'linux-headers-%s' % kver
|
return 'linux-headers-%s' % kver
|
||||||
|
|
||||||
|
QUANTUM_CONF_DIR = '/etc/quantum'
|
||||||
|
|
||||||
|
|
||||||
|
def kernel_version():
|
||||||
|
""" Retrieve the current major kernel version as a tuple e.g. (3, 13) """
|
||||||
|
kver = check_output(['uname', '-r']).strip()
|
||||||
|
kver = kver.split('.')
|
||||||
|
return (int(kver[0]), int(kver[1]))
|
||||||
|
|
||||||
|
|
||||||
|
def determine_dkms_package():
|
||||||
|
""" Determine which DKMS package should be used based on kernel version """
|
||||||
|
# NOTE: 3.13 kernels have support for GRE and VXLAN native
|
||||||
|
if kernel_version() >= (3, 13):
|
||||||
|
return []
|
||||||
|
else:
|
||||||
|
return ['openvswitch-datapath-dkms']
|
||||||
|
|
||||||
|
|
||||||
# legacy
|
# legacy
|
||||||
|
|
||||||
|
|
||||||
def quantum_plugins():
|
def quantum_plugins():
|
||||||
from charmhelpers.contrib.openstack import context
|
from charmhelpers.contrib.openstack import context
|
||||||
return {
|
return {
|
||||||
@ -30,9 +50,10 @@ def quantum_plugins():
|
|||||||
'contexts': [
|
'contexts': [
|
||||||
context.SharedDBContext(user=config('neutron-database-user'),
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
database=config('neutron-database'),
|
database=config('neutron-database'),
|
||||||
relation_prefix='neutron')],
|
relation_prefix='neutron',
|
||||||
|
ssl_dir=QUANTUM_CONF_DIR)],
|
||||||
'services': ['quantum-plugin-openvswitch-agent'],
|
'services': ['quantum-plugin-openvswitch-agent'],
|
||||||
'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
|
'packages': [[headers_package()] + determine_dkms_package(),
|
||||||
['quantum-plugin-openvswitch-agent']],
|
['quantum-plugin-openvswitch-agent']],
|
||||||
'server_packages': ['quantum-server',
|
'server_packages': ['quantum-server',
|
||||||
'quantum-plugin-openvswitch'],
|
'quantum-plugin-openvswitch'],
|
||||||
@ -45,7 +66,8 @@ def quantum_plugins():
|
|||||||
'contexts': [
|
'contexts': [
|
||||||
context.SharedDBContext(user=config('neutron-database-user'),
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
database=config('neutron-database'),
|
database=config('neutron-database'),
|
||||||
relation_prefix='neutron')],
|
relation_prefix='neutron',
|
||||||
|
ssl_dir=QUANTUM_CONF_DIR)],
|
||||||
'services': [],
|
'services': [],
|
||||||
'packages': [],
|
'packages': [],
|
||||||
'server_packages': ['quantum-server',
|
'server_packages': ['quantum-server',
|
||||||
@ -54,10 +76,13 @@ def quantum_plugins():
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
NEUTRON_CONF_DIR = '/etc/neutron'
|
||||||
|
|
||||||
|
|
||||||
def neutron_plugins():
|
def neutron_plugins():
|
||||||
from charmhelpers.contrib.openstack import context
|
from charmhelpers.contrib.openstack import context
|
||||||
return {
|
release = os_release('nova-common')
|
||||||
|
plugins = {
|
||||||
'ovs': {
|
'ovs': {
|
||||||
'config': '/etc/neutron/plugins/openvswitch/'
|
'config': '/etc/neutron/plugins/openvswitch/'
|
||||||
'ovs_neutron_plugin.ini',
|
'ovs_neutron_plugin.ini',
|
||||||
@ -66,10 +91,11 @@ def neutron_plugins():
|
|||||||
'contexts': [
|
'contexts': [
|
||||||
context.SharedDBContext(user=config('neutron-database-user'),
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
database=config('neutron-database'),
|
database=config('neutron-database'),
|
||||||
relation_prefix='neutron')],
|
relation_prefix='neutron',
|
||||||
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
'services': ['neutron-plugin-openvswitch-agent'],
|
'services': ['neutron-plugin-openvswitch-agent'],
|
||||||
'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
|
'packages': [[headers_package()] + determine_dkms_package(),
|
||||||
['quantum-plugin-openvswitch-agent']],
|
['neutron-plugin-openvswitch-agent']],
|
||||||
'server_packages': ['neutron-server',
|
'server_packages': ['neutron-server',
|
||||||
'neutron-plugin-openvswitch'],
|
'neutron-plugin-openvswitch'],
|
||||||
'server_services': ['neutron-server']
|
'server_services': ['neutron-server']
|
||||||
@ -81,7 +107,8 @@ def neutron_plugins():
|
|||||||
'contexts': [
|
'contexts': [
|
||||||
context.SharedDBContext(user=config('neutron-database-user'),
|
context.SharedDBContext(user=config('neutron-database-user'),
|
||||||
database=config('neutron-database'),
|
database=config('neutron-database'),
|
||||||
relation_prefix='neutron')],
|
relation_prefix='neutron',
|
||||||
|
ssl_dir=NEUTRON_CONF_DIR)],
|
||||||
'services': [],
|
'services': [],
|
||||||
'packages': [],
|
'packages': [],
|
||||||
'server_packages': ['neutron-server',
|
'server_packages': ['neutron-server',
|
||||||
@ -89,6 +116,13 @@ def neutron_plugins():
|
|||||||
'server_services': ['neutron-server']
|
'server_services': ['neutron-server']
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
# NOTE: patch in ml2 plugin for icehouse onwards
|
||||||
|
if release >= 'icehouse':
|
||||||
|
plugins['ovs']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
|
||||||
|
plugins['ovs']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
|
||||||
|
plugins['ovs']['server_packages'] = ['neutron-server',
|
||||||
|
'neutron-plugin-ml2']
|
||||||
|
return plugins
|
||||||
|
|
||||||
|
|
||||||
def neutron_plugin_attribute(plugin, attr, net_manager=None):
|
def neutron_plugin_attribute(plugin, attr, net_manager=None):
|
||||||
|
@ -3,9 +3,13 @@
|
|||||||
# cinder configuration file maintained by Juju
|
# cinder configuration file maintained by Juju
|
||||||
# local changes may be overwritten.
|
# local changes may be overwritten.
|
||||||
###############################################################################
|
###############################################################################
|
||||||
{% if auth -%}
|
|
||||||
[global]
|
[global]
|
||||||
|
{% if auth -%}
|
||||||
auth_supported = {{ auth }}
|
auth_supported = {{ auth }}
|
||||||
keyring = /etc/ceph/$cluster.$name.keyring
|
keyring = /etc/ceph/$cluster.$name.keyring
|
||||||
mon host = {{ mon_hosts }}
|
mon host = {{ mon_hosts }}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
log to syslog = {{ use_syslog }}
|
||||||
|
err to syslog = {{ use_syslog }}
|
||||||
|
clog to syslog = {{ use_syslog }}
|
||||||
|
|
||||||
|
@ -8,8 +8,8 @@ global
|
|||||||
|
|
||||||
defaults
|
defaults
|
||||||
log global
|
log global
|
||||||
mode http
|
mode tcp
|
||||||
option httplog
|
option tcplog
|
||||||
option dontlognull
|
option dontlognull
|
||||||
retries 3
|
retries 3
|
||||||
timeout queue 1000
|
timeout queue 1000
|
||||||
@ -29,7 +29,6 @@ listen stats :8888
|
|||||||
{% for service, ports in service_ports.iteritems() -%}
|
{% for service, ports in service_ports.iteritems() -%}
|
||||||
listen {{ service }} 0.0.0.0:{{ ports[0] }}
|
listen {{ service }} 0.0.0.0:{{ ports[0] }}
|
||||||
balance roundrobin
|
balance roundrobin
|
||||||
option tcplog
|
|
||||||
{% for unit, address in units.iteritems() -%}
|
{% for unit, address in units.iteritems() -%}
|
||||||
server {{ unit }} {{ address }}:{{ ports[1] }} check
|
server {{ unit }} {{ address }}:{{ ports[1] }} check
|
||||||
{% endfor %}
|
{% endfor %}
|
||||||
|
@ -65,6 +65,10 @@ SWIFT_CODENAMES = OrderedDict([
|
|||||||
('1.10.0', 'havana'),
|
('1.10.0', 'havana'),
|
||||||
('1.9.1', 'havana'),
|
('1.9.1', 'havana'),
|
||||||
('1.9.0', 'havana'),
|
('1.9.0', 'havana'),
|
||||||
|
('1.13.1', 'icehouse'),
|
||||||
|
('1.13.0', 'icehouse'),
|
||||||
|
('1.12.0', 'icehouse'),
|
||||||
|
('1.11.0', 'icehouse'),
|
||||||
])
|
])
|
||||||
|
|
||||||
DEFAULT_LOOPBACK_SIZE = '5G'
|
DEFAULT_LOOPBACK_SIZE = '5G'
|
||||||
@ -420,19 +424,19 @@ def get_hostname(address, fqdn=True):
|
|||||||
Resolves hostname for given IP, or returns the input
|
Resolves hostname for given IP, or returns the input
|
||||||
if it is already a hostname.
|
if it is already a hostname.
|
||||||
"""
|
"""
|
||||||
if not is_ip(address):
|
if is_ip(address):
|
||||||
return address
|
try:
|
||||||
|
import dns.reversename
|
||||||
|
except ImportError:
|
||||||
|
apt_install('python-dnspython')
|
||||||
|
import dns.reversename
|
||||||
|
|
||||||
try:
|
rev = dns.reversename.from_address(address)
|
||||||
import dns.reversename
|
result = ns_query(rev)
|
||||||
except ImportError:
|
if not result:
|
||||||
apt_install('python-dnspython')
|
return None
|
||||||
import dns.reversename
|
else:
|
||||||
|
result = address
|
||||||
rev = dns.reversename.from_address(address)
|
|
||||||
result = ns_query(rev)
|
|
||||||
if not result:
|
|
||||||
return None
|
|
||||||
|
|
||||||
if fqdn:
|
if fqdn:
|
||||||
# strip trailing .
|
# strip trailing .
|
||||||
|
@ -49,6 +49,9 @@ CEPH_CONF = """[global]
|
|||||||
auth supported = {auth}
|
auth supported = {auth}
|
||||||
keyring = {keyring}
|
keyring = {keyring}
|
||||||
mon host = {mon_hosts}
|
mon host = {mon_hosts}
|
||||||
|
log to syslog = {use_syslog}
|
||||||
|
err to syslog = {use_syslog}
|
||||||
|
clog to syslog = {use_syslog}
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
|
||||||
@ -194,7 +197,7 @@ def get_ceph_nodes():
|
|||||||
return hosts
|
return hosts
|
||||||
|
|
||||||
|
|
||||||
def configure(service, key, auth):
|
def configure(service, key, auth, use_syslog):
|
||||||
''' Perform basic configuration of Ceph '''
|
''' Perform basic configuration of Ceph '''
|
||||||
create_keyring(service, key)
|
create_keyring(service, key)
|
||||||
create_key_file(service, key)
|
create_key_file(service, key)
|
||||||
@ -202,7 +205,8 @@ def configure(service, key, auth):
|
|||||||
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
|
with open('/etc/ceph/ceph.conf', 'w') as ceph_conf:
|
||||||
ceph_conf.write(CEPH_CONF.format(auth=auth,
|
ceph_conf.write(CEPH_CONF.format(auth=auth,
|
||||||
keyring=_keyring_path(service),
|
keyring=_keyring_path(service),
|
||||||
mon_hosts=",".join(map(str, hosts))))
|
mon_hosts=",".join(map(str, hosts)),
|
||||||
|
use_syslog=use_syslog))
|
||||||
modprobe('rbd')
|
modprobe('rbd')
|
||||||
|
|
||||||
|
|
||||||
|
@ -22,4 +22,5 @@ def zap_disk(block_device):
|
|||||||
|
|
||||||
:param block_device: str: Full path of block device to clean.
|
:param block_device: str: Full path of block device to clean.
|
||||||
'''
|
'''
|
||||||
check_call(['sgdisk', '--zap-all', '--mbrtogpt', block_device])
|
check_call(['sgdisk', '--zap-all', '--clear',
|
||||||
|
'--mbrtogpt', block_device])
|
||||||
|
@ -194,7 +194,7 @@ def file_hash(path):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def restart_on_change(restart_map):
|
def restart_on_change(restart_map, stopstart=False):
|
||||||
"""Restart services based on configuration files changing
|
"""Restart services based on configuration files changing
|
||||||
|
|
||||||
This function is used a decorator, for example
|
This function is used a decorator, for example
|
||||||
@ -219,8 +219,14 @@ def restart_on_change(restart_map):
|
|||||||
for path in restart_map:
|
for path in restart_map:
|
||||||
if checksums[path] != file_hash(path):
|
if checksums[path] != file_hash(path):
|
||||||
restarts += restart_map[path]
|
restarts += restart_map[path]
|
||||||
for service_name in list(OrderedDict.fromkeys(restarts)):
|
services_list = list(OrderedDict.fromkeys(restarts))
|
||||||
service('restart', service_name)
|
if not stopstart:
|
||||||
|
for service_name in services_list:
|
||||||
|
service('restart', service_name)
|
||||||
|
else:
|
||||||
|
for action in ['stop', 'start']:
|
||||||
|
for service_name in services_list:
|
||||||
|
service(action, service_name)
|
||||||
return wrapped_f
|
return wrapped_f
|
||||||
return wrap
|
return wrap
|
||||||
|
|
||||||
|
@ -97,6 +97,29 @@ def apt_install(packages, options=None, fatal=False):
|
|||||||
subprocess.call(cmd, env=env)
|
subprocess.call(cmd, env=env)
|
||||||
|
|
||||||
|
|
||||||
|
def apt_upgrade(options=None, fatal=False, dist=False):
|
||||||
|
"""Upgrade all packages"""
|
||||||
|
if options is None:
|
||||||
|
options = ['--option=Dpkg::Options::=--force-confold']
|
||||||
|
|
||||||
|
cmd = ['apt-get', '--assume-yes']
|
||||||
|
cmd.extend(options)
|
||||||
|
if dist:
|
||||||
|
cmd.append('dist-upgrade')
|
||||||
|
else:
|
||||||
|
cmd.append('upgrade')
|
||||||
|
log("Upgrading with options: {}".format(options))
|
||||||
|
|
||||||
|
env = os.environ.copy()
|
||||||
|
if 'DEBIAN_FRONTEND' not in env:
|
||||||
|
env['DEBIAN_FRONTEND'] = 'noninteractive'
|
||||||
|
|
||||||
|
if fatal:
|
||||||
|
subprocess.check_call(cmd, env=env)
|
||||||
|
else:
|
||||||
|
subprocess.call(cmd, env=env)
|
||||||
|
|
||||||
|
|
||||||
def apt_update(fatal=False):
|
def apt_update(fatal=False):
|
||||||
"""Update local apt cache"""
|
"""Update local apt cache"""
|
||||||
cmd = ['apt-get', 'update']
|
cmd = ['apt-get', 'update']
|
||||||
@ -135,6 +158,10 @@ def apt_hold(packages, fatal=False):
|
|||||||
|
|
||||||
|
|
||||||
def add_source(source, key=None):
|
def add_source(source, key=None):
|
||||||
|
if source is None:
|
||||||
|
log('Source is not present. Skipping')
|
||||||
|
return
|
||||||
|
|
||||||
if (source.startswith('ppa:') or
|
if (source.startswith('ppa:') or
|
||||||
source.startswith('http') or
|
source.startswith('http') or
|
||||||
source.startswith('deb ') or
|
source.startswith('deb ') or
|
||||||
|
@ -1,5 +1,7 @@
|
|||||||
import os
|
import os
|
||||||
import urllib2
|
import urllib2
|
||||||
|
import urlparse
|
||||||
|
|
||||||
from charmhelpers.fetch import (
|
from charmhelpers.fetch import (
|
||||||
BaseFetchHandler,
|
BaseFetchHandler,
|
||||||
UnhandledSource
|
UnhandledSource
|
||||||
@ -24,6 +26,19 @@ class ArchiveUrlFetchHandler(BaseFetchHandler):
|
|||||||
def download(self, source, dest):
|
def download(self, source, dest):
|
||||||
# propogate all exceptions
|
# propogate all exceptions
|
||||||
# URLError, OSError, etc
|
# URLError, OSError, etc
|
||||||
|
proto, netloc, path, params, query, fragment = urlparse.urlparse(source)
|
||||||
|
if proto in ('http', 'https'):
|
||||||
|
auth, barehost = urllib2.splituser(netloc)
|
||||||
|
if auth is not None:
|
||||||
|
source = urlparse.urlunparse((proto, barehost, path, params, query, fragment))
|
||||||
|
username, password = urllib2.splitpasswd(auth)
|
||||||
|
passman = urllib2.HTTPPasswordMgrWithDefaultRealm()
|
||||||
|
# Realm is set to None in add_password to force the username and password
|
||||||
|
# to be used whatever the realm
|
||||||
|
passman.add_password(None, source, username, password)
|
||||||
|
authhandler = urllib2.HTTPBasicAuthHandler(passman)
|
||||||
|
opener = urllib2.build_opener(authhandler)
|
||||||
|
urllib2.install_opener(opener)
|
||||||
response = urllib2.urlopen(source)
|
response = urllib2.urlopen(source)
|
||||||
try:
|
try:
|
||||||
with open(dest, 'w') as dest_file:
|
with open(dest, 'w') as dest_file:
|
||||||
|
@ -8,8 +8,8 @@ from charmhelpers.core.host import pwgen
|
|||||||
HEAT_PATH = '/var/lib/heat/'
|
HEAT_PATH = '/var/lib/heat/'
|
||||||
|
|
||||||
|
|
||||||
def generate_ec2_tokens(host, port):
|
def generate_ec2_tokens(protocol, host, port):
|
||||||
ec2_tokens = 'http://%s:%s/v2.0/ec2tokens' % (host, port)
|
ec2_tokens = '%s://%s:%s/v2.0/ec2tokens' % (protocol, host, port)
|
||||||
return ec2_tokens
|
return ec2_tokens
|
||||||
|
|
||||||
|
|
||||||
@ -21,7 +21,8 @@ class HeatIdentityServiceContext(context.IdentityServiceContext):
|
|||||||
|
|
||||||
# the ec2 api needs to know the location of the keystone ec2
|
# the ec2 api needs to know the location of the keystone ec2
|
||||||
# tokens endpoint, set in nova.conf
|
# tokens endpoint, set in nova.conf
|
||||||
ec2_tokens = generate_ec2_tokens(ctxt['service_host'],
|
ec2_tokens = generate_ec2_tokens(ctxt['service_protocol'] or 'http',
|
||||||
|
ctxt['service_host'],
|
||||||
ctxt['service_port'])
|
ctxt['service_port'])
|
||||||
ctxt['keystone_ec2_url'] = ec2_tokens
|
ctxt['keystone_ec2_url'] = ec2_tokens
|
||||||
return ctxt
|
return ctxt
|
||||||
|
@ -72,7 +72,7 @@ def install():
|
|||||||
log('Installing %s to /usr/bin' % f)
|
log('Installing %s to /usr/bin' % f)
|
||||||
shutil.copy2(f, '/usr/bin')
|
shutil.copy2(f, '/usr/bin')
|
||||||
|
|
||||||
for key, port in API_PORTS.iteritems():
|
for port in API_PORTS.values():
|
||||||
open_port(port)
|
open_port(port)
|
||||||
|
|
||||||
|
|
||||||
|
@ -44,14 +44,16 @@ API_PORTS = {
|
|||||||
'heat-api': 8004
|
'heat-api': 8004
|
||||||
}
|
}
|
||||||
|
|
||||||
|
HEAT_DIR = '/etc/heat'
|
||||||
HEAT_CONF = '/etc/heat/heat.conf'
|
HEAT_CONF = '/etc/heat/heat.conf'
|
||||||
HEAT_API_PASTE = '/etc/heat/api-paste.ini'
|
HEAT_API_PASTE = '/etc/heat/api-paste.ini'
|
||||||
|
|
||||||
CONFIG_FILES = OrderedDict([
|
CONFIG_FILES = OrderedDict([
|
||||||
(HEAT_CONF, {
|
(HEAT_CONF, {
|
||||||
'services': BASE_SERVICES,
|
'services': BASE_SERVICES,
|
||||||
'contexts': [context.AMQPContext(),
|
'contexts': [context.AMQPContext(ssl_dir=HEAT_DIR),
|
||||||
context.SharedDBContext(relation_prefix='heat'),
|
context.SharedDBContext(relation_prefix='heat',
|
||||||
|
ssl_dir=HEAT_DIR),
|
||||||
context.OSConfigFlagContext(),
|
context.OSConfigFlagContext(),
|
||||||
heat_context.HeatIdentityServiceContext(),
|
heat_context.HeatIdentityServiceContext(),
|
||||||
heat_context.EncryptionContext(),
|
heat_context.EncryptionContext(),
|
||||||
|
@ -7,9 +7,6 @@ description: |
|
|||||||
templates in the form of text files that can be treated like code.
|
templates in the form of text files that can be treated like code.
|
||||||
categories:
|
categories:
|
||||||
- openstack
|
- openstack
|
||||||
provides:
|
|
||||||
heat:
|
|
||||||
interface: heat
|
|
||||||
requires:
|
requires:
|
||||||
shared-db:
|
shared-db:
|
||||||
interface: mysql-shared
|
interface: mysql-shared
|
||||||
|
@ -1,6 +1,7 @@
|
|||||||
|
|
||||||
# heat-api pipeline
|
# heat-api pipeline
|
||||||
[pipeline:heat-api]
|
[pipeline:heat-api]
|
||||||
pipeline = faultwrap versionnegotiation authtoken context apiv1app
|
pipeline = faultwrap ssl versionnegotiation authurl authtoken context apiv1app
|
||||||
|
|
||||||
# heat-api pipeline for standalone heat
|
# heat-api pipeline for standalone heat
|
||||||
# ie. uses alternative auth backend that authenticates users against keystone
|
# ie. uses alternative auth backend that authenticates users against keystone
|
||||||
@ -11,7 +12,7 @@ pipeline = faultwrap versionnegotiation authtoken context apiv1app
|
|||||||
# flavor = standalone
|
# flavor = standalone
|
||||||
#
|
#
|
||||||
[pipeline:heat-api-standalone]
|
[pipeline:heat-api-standalone]
|
||||||
pipeline = faultwrap versionnegotiation authpassword context apiv1app
|
pipeline = faultwrap ssl versionnegotiation authurl authpassword context apiv1app
|
||||||
|
|
||||||
# heat-api pipeline for custom cloud backends
|
# heat-api pipeline for custom cloud backends
|
||||||
# i.e. in heat.conf:
|
# i.e. in heat.conf:
|
||||||
@ -73,16 +74,17 @@ paste.filter_factory = heat.common.context:ContextMiddleware_filter_factory
|
|||||||
[filter:ec2authtoken]
|
[filter:ec2authtoken]
|
||||||
paste.filter_factory = heat.api.aws.ec2token:EC2Token_filter_factory
|
paste.filter_factory = heat.api.aws.ec2token:EC2Token_filter_factory
|
||||||
|
|
||||||
|
[filter:ssl]
|
||||||
|
paste.filter_factory = heat.common.wsgi:filter_factory
|
||||||
|
heat.filter_factory = heat.api.openstack:sslmiddleware_filter
|
||||||
|
|
||||||
|
# Middleware to set auth_url header appropriately
|
||||||
|
[filter:authurl]
|
||||||
|
paste.filter_factory = heat.common.auth_url:filter_factory
|
||||||
|
|
||||||
# Auth middleware that validates token against keystone
|
# Auth middleware that validates token against keystone
|
||||||
[filter:authtoken]
|
[filter:authtoken]
|
||||||
paste.filter_factory = heat.common.auth_token:filter_factory
|
paste.filter_factory = keystoneclient.middleware.auth_token:filter_factory
|
||||||
auth_host = {{ auth_host }}
|
|
||||||
auth_port = {{ auth_port }}
|
|
||||||
auth_protocol = {{ auth_protocol }}
|
|
||||||
auth_uri = http://{{ service_host }}:{{ service_port }}/v2.0
|
|
||||||
admin_tenant_name = {{ admin_tenant_name }}
|
|
||||||
admin_user = {{ admin_user }}
|
|
||||||
admin_password = {{ admin_password }}
|
|
||||||
|
|
||||||
# Auth middleware that validates username/password against keystone
|
# Auth middleware that validates username/password against keystone
|
||||||
[filter:authpassword]
|
[filter:authpassword]
|
||||||
|
@ -1,81 +1,68 @@
|
|||||||
[DEFAULT]
|
[DEFAULT]
|
||||||
# The default user for new instances (string value)
|
use_syslog = {{ use_syslog }}
|
||||||
|
debug = False
|
||||||
|
verbose = False
|
||||||
|
log_dir = /var/log/heat
|
||||||
instance_user=ec2-user
|
instance_user=ec2-user
|
||||||
|
|
||||||
# Driver to use for controlling instances (string value)
|
|
||||||
instance_driver=heat.engine.nova
|
instance_driver=heat.engine.nova
|
||||||
|
|
||||||
# List of directories to search for Plugins (list value)
|
|
||||||
plugin_dirs=/usr/lib64/heat,/usr/lib/heat
|
plugin_dirs=/usr/lib64/heat,/usr/lib/heat
|
||||||
|
|
||||||
# The directory to search for environment files (string value)
|
|
||||||
environment_dir=/etc/heat/environment.d
|
environment_dir=/etc/heat/environment.d
|
||||||
|
|
||||||
# Select deferred auth method, stored password or trusts
|
|
||||||
# (string value)
|
|
||||||
deferred_auth_method=password
|
deferred_auth_method=password
|
||||||
|
|
||||||
# Name of the engine node. This can be an opaque identifier.It
|
|
||||||
# is not necessarily a hostname, FQDN, or IP address. (string
|
|
||||||
# value)
|
|
||||||
host=heat
|
host=heat
|
||||||
|
|
||||||
#
|
|
||||||
# Options defined in heat.common.crypt
|
|
||||||
#
|
|
||||||
|
|
||||||
# Encryption key used for authentication info in database
|
|
||||||
# (string value)
|
|
||||||
auth_encryption_key={{ encryption_key }}
|
auth_encryption_key={{ encryption_key }}
|
||||||
|
|
||||||
{% if rabbitmq_host -%}
|
|
||||||
rabbit_host = {{ rabbitmq_host }}
|
|
||||||
rabbit_userid = {{ rabbitmq_user }}
|
|
||||||
rabbit_password = {{ rabbitmq_password }}
|
|
||||||
rabbit_virtual_host = {{ rabbitmq_virtual_host }}
|
|
||||||
rabbit_use_ssl = false
|
|
||||||
{% endif -%}
|
|
||||||
|
|
||||||
{% if database_host -%}
|
{% if database_host -%}
|
||||||
sql_connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}
|
# < Icehouse db config
|
||||||
|
sql_connection = {{ database_type }}://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}{% if database_ssl_ca %}?ssl_ca={{ database_ssl_ca }}{% if database_ssl_cert %}&ssl_cert={{ database_ssl_cert }}&ssl_key={{ database_ssl_key }}{% endif %}{% endif %}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
|
||||||
verbose = True
|
{% if rabbitmq_host or rabbitmq_hosts -%}
|
||||||
log_dir = /var/log/heat
|
rabbit_userid = {{ rabbitmq_user }}
|
||||||
use_syslog = {{ use_syslog }}
|
rabbit_virtual_host = {{ rabbitmq_virtual_host }}
|
||||||
|
rabbit_password = {{ rabbitmq_password }}
|
||||||
|
{% if rabbitmq_hosts -%}
|
||||||
|
rabbit_hosts = {{ rabbitmq_hosts }}
|
||||||
|
{% if rabbitmq_ha_queues -%}
|
||||||
|
rabbit_ha_queues = True
|
||||||
|
rabbit_durable_queues = False
|
||||||
|
{% endif -%}
|
||||||
|
{% else -%}
|
||||||
|
rabbit_host = {{ rabbitmq_host }}
|
||||||
|
{% endif -%}
|
||||||
|
{% if rabbit_ssl_port -%}
|
||||||
|
rabbit_use_ssl = True
|
||||||
|
rabbit_port = {{ rabbit_ssl_port }}
|
||||||
|
{% if rabbit_ssl_ca -%}
|
||||||
|
kombu_ssl_ca_certs = {{ rabbit_ssl_ca }}
|
||||||
|
{% endif -%}
|
||||||
|
{% endif -%}
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
|
{% if auth_host -%}
|
||||||
[keystone_authtoken]
|
[keystone_authtoken]
|
||||||
auth_host = {{ service_host }}
|
auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/
|
||||||
auth_port = 35357
|
auth_host = {{ auth_host }}
|
||||||
auth_protocol = {{ service_protocol }}
|
auth_port = {{ auth_port }}
|
||||||
admin_tenant_name = %SERVICE_TENANT_NAME%
|
auth_protocol = {{ auth_protocol }}
|
||||||
admin_user = %SERVICE_USER%
|
admin_tenant_name = {{ admin_tenant_name }}
|
||||||
admin_password = %SERVICE_PASSWORD%
|
admin_user = {{ admin_user }}
|
||||||
|
admin_password = {{ admin_password }}
|
||||||
|
{% endif -%}
|
||||||
|
|
||||||
[ec2_authtoken]
|
[ec2_authtoken]
|
||||||
auth_uri = {{service_protocol }}://{{ service_host }}:{{ service_port }}/v2.0
|
auth_uri = {{service_protocol }}://{{ service_host }}:{{ service_port }}/v2.0
|
||||||
keystone_ec2_uri = {{service_protocol }}://{{ service_host }}:{{ service_port }}/v2.0/ec2tokens
|
keystone_ec2_uri = {{service_protocol }}://{{ service_host }}:{{ service_port }}/v2.0/ec2tokens
|
||||||
|
|
||||||
[database]
|
|
||||||
|
|
||||||
# sql
|
|
||||||
{% if database_host -%}
|
{% if database_host -%}
|
||||||
connection = mysql://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}
|
[database]
|
||||||
|
connection = {{ database_type }}://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}{% if database_ssl_ca %}?ssl_ca={{ database_ssl_ca }}{% if database_ssl_cert %}&ssl_cert={{ database_ssl_cert }}&ssl_key={{ database_ssl_key }}{% endif %}{% endif %}
|
||||||
{% endif -%}
|
{% endif -%}
|
||||||
|
|
||||||
[paste_deploy]
|
[paste_deploy]
|
||||||
|
|
||||||
# The API paste config file to use (string value)
|
|
||||||
api_paste_config=/etc/heat/api-paste.ini
|
api_paste_config=/etc/heat/api-paste.ini
|
||||||
|
|
||||||
|
|
||||||
[heat_api]
|
[heat_api]
|
||||||
|
|
||||||
# The port on which the server will listen. (integer value)
|
|
||||||
bind_port=8004
|
bind_port=8004
|
||||||
|
|
||||||
[heat_api_cfn]
|
[heat_api_cfn]
|
||||||
|
|
||||||
# The port on which the server will listen. (integer value)
|
|
||||||
bind_port=8000
|
bind_port=8000
|
||||||
|
|
||||||
|
@ -19,7 +19,8 @@ class TestHeatContext(CharmTestCase):
|
|||||||
heat_context.EncryptionContext()(),
|
heat_context.EncryptionContext()(),
|
||||||
{'encryption_key': 'key'})
|
{'encryption_key': 'key'})
|
||||||
|
|
||||||
@patch('charmhelpers.contrib.openstack.context.IdentityServiceContext.__call__')
|
@patch('charmhelpers.contrib.openstack.'
|
||||||
|
'context.IdentityServiceContext.__call__')
|
||||||
def test_identity_configuration(self, __call__):
|
def test_identity_configuration(self, __call__):
|
||||||
__call__.return_value = {
|
__call__.return_value = {
|
||||||
'service_port': 'port',
|
'service_port': 'port',
|
||||||
|
Loading…
Reference in New Issue
Block a user