[ivoks] Re-order port-pipeline to ensure end-to-end SSL
This commit is contained in:
commit
400604db75
@ -126,17 +126,17 @@ def determine_api_port(public_port):
|
||||
return public_port - (i * 10)
|
||||
|
||||
|
||||
def determine_haproxy_port(public_port):
|
||||
def determine_apache_port(public_port):
|
||||
'''
|
||||
Description: Determine correct proxy listening port based on public IP +
|
||||
existence of HTTPS reverse proxy.
|
||||
Description: Determine correct apache listening port based on public IP +
|
||||
state of the cluster.
|
||||
|
||||
public_port: int: standard public port for given service
|
||||
|
||||
returns: int: the correct listening port for the HAProxy service
|
||||
'''
|
||||
i = 0
|
||||
if https():
|
||||
if len(peer_units()) > 0 or is_clustered():
|
||||
i += 1
|
||||
return public_port - (i * 10)
|
||||
|
||||
|
@ -23,15 +23,12 @@ from charmhelpers.core.hookenv import (
|
||||
unit_get,
|
||||
unit_private_ip,
|
||||
ERROR,
|
||||
WARNING,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.hahelpers.cluster import (
|
||||
determine_apache_port,
|
||||
determine_api_port,
|
||||
determine_haproxy_port,
|
||||
https,
|
||||
is_clustered,
|
||||
peer_units,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.hahelpers.apache import (
|
||||
@ -68,6 +65,43 @@ def context_complete(ctxt):
|
||||
return True
|
||||
|
||||
|
||||
def config_flags_parser(config_flags):
|
||||
if config_flags.find('==') >= 0:
|
||||
log("config_flags is not in expected format (key=value)",
|
||||
level=ERROR)
|
||||
raise OSContextError
|
||||
# strip the following from each value.
|
||||
post_strippers = ' ,'
|
||||
# we strip any leading/trailing '=' or ' ' from the string then
|
||||
# split on '='.
|
||||
split = config_flags.strip(' =').split('=')
|
||||
limit = len(split)
|
||||
flags = {}
|
||||
for i in xrange(0, limit - 1):
|
||||
current = split[i]
|
||||
next = split[i + 1]
|
||||
vindex = next.rfind(',')
|
||||
if (i == limit - 2) or (vindex < 0):
|
||||
value = next
|
||||
else:
|
||||
value = next[:vindex]
|
||||
|
||||
if i == 0:
|
||||
key = current
|
||||
else:
|
||||
# if this not the first entry, expect an embedded key.
|
||||
index = current.rfind(',')
|
||||
if index < 0:
|
||||
log("invalid config value(s) at index %s" % (i),
|
||||
level=ERROR)
|
||||
raise OSContextError
|
||||
key = current[index + 1:]
|
||||
|
||||
# Add to collection.
|
||||
flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
|
||||
return flags
|
||||
|
||||
|
||||
class OSContextGenerator(object):
|
||||
interfaces = []
|
||||
|
||||
@ -182,10 +216,17 @@ class AMQPContext(OSContextGenerator):
|
||||
# Sufficient information found = break out!
|
||||
break
|
||||
# Used for active/active rabbitmq >= grizzly
|
||||
ctxt['rabbitmq_hosts'] = []
|
||||
for unit in related_units(rid):
|
||||
ctxt['rabbitmq_hosts'].append(relation_get('private-address',
|
||||
rid=rid, unit=unit))
|
||||
if ('clustered' not in ctxt or relation_get('ha-vip-only') == 'True') and \
|
||||
len(related_units(rid)) > 1:
|
||||
if relation_get('ha_queues'):
|
||||
ctxt['rabbitmq_ha_queues'] = relation_get('ha_queues')
|
||||
else:
|
||||
ctxt['rabbitmq_ha_queues'] = False
|
||||
rabbitmq_hosts = []
|
||||
for unit in related_units(rid):
|
||||
rabbitmq_hosts.append(relation_get('private-address',
|
||||
rid=rid, unit=unit))
|
||||
ctxt['rabbitmq_hosts'] = ','.join(rabbitmq_hosts)
|
||||
if not context_complete(ctxt):
|
||||
return {}
|
||||
else:
|
||||
@ -286,6 +327,7 @@ class ImageServiceContext(OSContextGenerator):
|
||||
|
||||
|
||||
class ApacheSSLContext(OSContextGenerator):
|
||||
|
||||
"""
|
||||
Generates a context for an apache vhost configuration that configures
|
||||
HTTPS reverse proxying for one or many endpoints. Generated context
|
||||
@ -341,11 +383,9 @@ class ApacheSSLContext(OSContextGenerator):
|
||||
'private_address': unit_get('private-address'),
|
||||
'endpoints': []
|
||||
}
|
||||
for ext_port in self.external_ports:
|
||||
if peer_units() or is_clustered():
|
||||
int_port = determine_haproxy_port(ext_port)
|
||||
else:
|
||||
int_port = determine_api_port(ext_port)
|
||||
for api_port in self.external_ports:
|
||||
ext_port = determine_apache_port(api_port)
|
||||
int_port = determine_api_port(api_port)
|
||||
portmap = (int(ext_port), int(int_port))
|
||||
ctxt['endpoints'].append(portmap)
|
||||
return ctxt
|
||||
@ -428,11 +468,17 @@ class NeutronContext(object):
|
||||
elif self.plugin == 'nvp':
|
||||
ctxt.update(self.nvp_ctxt())
|
||||
|
||||
alchemy_flags = config('neutron-alchemy-flags')
|
||||
if alchemy_flags:
|
||||
flags = config_flags_parser(alchemy_flags)
|
||||
ctxt['neutron_alchemy_flags'] = flags
|
||||
|
||||
self._save_flag_file()
|
||||
return ctxt
|
||||
|
||||
|
||||
class OSConfigFlagContext(OSContextGenerator):
|
||||
|
||||
"""
|
||||
Responsible for adding user-defined config-flags in charm config to a
|
||||
template context.
|
||||
@ -441,50 +487,18 @@ class OSConfigFlagContext(OSContextGenerator):
|
||||
key=value pairs and some Openstack config files support
|
||||
comma-separated lists as values.
|
||||
"""
|
||||
|
||||
def __call__(self):
|
||||
config_flags = config('config-flags')
|
||||
if not config_flags:
|
||||
return {}
|
||||
|
||||
if config_flags.find('==') >= 0:
|
||||
log("config_flags is not in expected format (key=value)",
|
||||
level=ERROR)
|
||||
raise OSContextError
|
||||
|
||||
# strip the following from each value.
|
||||
post_strippers = ' ,'
|
||||
# we strip any leading/trailing '=' or ' ' from the string then
|
||||
# split on '='.
|
||||
split = config_flags.strip(' =').split('=')
|
||||
limit = len(split)
|
||||
flags = {}
|
||||
for i in xrange(0, limit - 1):
|
||||
current = split[i]
|
||||
next = split[i + 1]
|
||||
vindex = next.rfind(',')
|
||||
if (i == limit - 2) or (vindex < 0):
|
||||
value = next
|
||||
else:
|
||||
value = next[:vindex]
|
||||
|
||||
if i == 0:
|
||||
key = current
|
||||
else:
|
||||
# if this not the first entry, expect an embedded key.
|
||||
index = current.rfind(',')
|
||||
if index < 0:
|
||||
log("invalid config value(s) at index %s" % (i),
|
||||
level=ERROR)
|
||||
raise OSContextError
|
||||
key = current[index + 1:]
|
||||
|
||||
# Add to collection.
|
||||
flags[key.strip(post_strippers)] = value.rstrip(post_strippers)
|
||||
|
||||
flags = config_flags_parser(config_flags)
|
||||
return {'user_config_flags': flags}
|
||||
|
||||
|
||||
class SubordinateConfigContext(OSContextGenerator):
|
||||
|
||||
"""
|
||||
Responsible for inspecting relations to subordinates that
|
||||
may be exporting required config via a json blob.
|
||||
@ -525,6 +539,7 @@ class SubordinateConfigContext(OSContextGenerator):
|
||||
}
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, service, config_file, interface):
|
||||
"""
|
||||
:param service : Service name key to query in any subordinate
|
||||
@ -569,3 +584,12 @@ class SubordinateConfigContext(OSContextGenerator):
|
||||
ctxt['sections'] = {}
|
||||
|
||||
return ctxt
|
||||
|
||||
|
||||
class SyslogContext(OSContextGenerator):
|
||||
|
||||
def __call__(self):
|
||||
ctxt = {
|
||||
'use_syslog': config('use-syslog')
|
||||
}
|
||||
return ctxt
|
||||
|
@ -8,8 +8,8 @@ global
|
||||
|
||||
defaults
|
||||
log global
|
||||
mode http
|
||||
option httplog
|
||||
mode tcp
|
||||
option tcplog
|
||||
option dontlognull
|
||||
retries 3
|
||||
timeout queue 1000
|
||||
@ -29,7 +29,6 @@ listen stats :8888
|
||||
{% for service, ports in service_ports.iteritems() -%}
|
||||
listen {{ service }} 0.0.0.0:{{ ports[0] }}
|
||||
balance roundrobin
|
||||
option tcplog
|
||||
{% for unit, address in units.iteritems() -%}
|
||||
server {{ unit }} {{ address }}:{{ ports[1] }} check
|
||||
{% endfor %}
|
||||
|
@ -41,6 +41,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
||||
('quantal', 'folsom'),
|
||||
('raring', 'grizzly'),
|
||||
('saucy', 'havana'),
|
||||
('trusty', 'icehouse')
|
||||
])
|
||||
|
||||
|
||||
@ -201,7 +202,7 @@ def os_release(package, base='essex'):
|
||||
|
||||
|
||||
def import_key(keyid):
|
||||
cmd = "apt-key adv --keyserver keyserver.ubuntu.com " \
|
||||
cmd = "apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 " \
|
||||
"--recv-keys %s" % keyid
|
||||
try:
|
||||
subprocess.check_call(cmd.split(' '))
|
||||
@ -260,6 +261,9 @@ def configure_installation_source(rel):
|
||||
'havana': 'precise-updates/havana',
|
||||
'havana/updates': 'precise-updates/havana',
|
||||
'havana/proposed': 'precise-proposed/havana',
|
||||
'icehouse': 'precise-updates/icehouse',
|
||||
'icehouse/updates': 'precise-updates/icehouse',
|
||||
'icehouse/proposed': 'precise-proposed/icehouse',
|
||||
}
|
||||
|
||||
try:
|
||||
@ -411,7 +415,7 @@ def get_host_ip(hostname):
|
||||
return ns_query(hostname)
|
||||
|
||||
|
||||
def get_hostname(address):
|
||||
def get_hostname(address, fqdn=True):
|
||||
"""
|
||||
Resolves hostname for given IP, or returns the input
|
||||
if it is already a hostname.
|
||||
@ -430,7 +434,11 @@ def get_hostname(address):
|
||||
if not result:
|
||||
return None
|
||||
|
||||
# strip trailing .
|
||||
if result.endswith('.'):
|
||||
return result[:-1]
|
||||
return result
|
||||
if fqdn:
|
||||
# strip trailing .
|
||||
if result.endswith('.'):
|
||||
return result[:-1]
|
||||
else:
|
||||
return result
|
||||
else:
|
||||
return result.split('.')[0]
|
||||
|
@ -22,4 +22,4 @@ def zap_disk(block_device):
|
||||
|
||||
:param block_device: str: Full path of block device to clean.
|
||||
'''
|
||||
check_call(['sgdisk', '--zap-all', block_device])
|
||||
check_call(['sgdisk', '--zap-all', '--mbrtogpt', block_device])
|
||||
|
@ -8,6 +8,7 @@ import os
|
||||
import json
|
||||
import yaml
|
||||
import subprocess
|
||||
import sys
|
||||
import UserDict
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
@ -149,6 +150,11 @@ def service_name():
|
||||
return local_unit().split('/')[0]
|
||||
|
||||
|
||||
def hook_name():
|
||||
"""The name of the currently executing hook"""
|
||||
return os.path.basename(sys.argv[0])
|
||||
|
||||
|
||||
@cached
|
||||
def config(scope=None):
|
||||
"""Juju charm configuration"""
|
||||
|
@ -194,7 +194,7 @@ def file_hash(path):
|
||||
return None
|
||||
|
||||
|
||||
def restart_on_change(restart_map):
|
||||
def restart_on_change(restart_map, stopstart=False):
|
||||
"""Restart services based on configuration files changing
|
||||
|
||||
This function is used a decorator, for example
|
||||
@ -219,8 +219,14 @@ def restart_on_change(restart_map):
|
||||
for path in restart_map:
|
||||
if checksums[path] != file_hash(path):
|
||||
restarts += restart_map[path]
|
||||
for service_name in list(OrderedDict.fromkeys(restarts)):
|
||||
service('restart', service_name)
|
||||
services_list = list(OrderedDict.fromkeys(restarts))
|
||||
if not stopstart:
|
||||
for service_name in services_list:
|
||||
service('restart', service_name)
|
||||
else:
|
||||
for action in ['stop', 'start']:
|
||||
for service_name in services_list:
|
||||
service(action, service_name)
|
||||
return wrapped_f
|
||||
return wrap
|
||||
|
||||
@ -245,3 +251,47 @@ def pwgen(length=None):
|
||||
random_chars = [
|
||||
random.choice(alphanumeric_chars) for _ in range(length)]
|
||||
return(''.join(random_chars))
|
||||
|
||||
|
||||
def list_nics(nic_type):
|
||||
'''Return a list of nics of given type(s)'''
|
||||
if isinstance(nic_type, basestring):
|
||||
int_types = [nic_type]
|
||||
else:
|
||||
int_types = nic_type
|
||||
interfaces = []
|
||||
for int_type in int_types:
|
||||
cmd = ['ip', 'addr', 'show', 'label', int_type + '*']
|
||||
ip_output = subprocess.check_output(cmd).split('\n')
|
||||
ip_output = (line for line in ip_output if line)
|
||||
for line in ip_output:
|
||||
if line.split()[1].startswith(int_type):
|
||||
interfaces.append(line.split()[1].replace(":", ""))
|
||||
return interfaces
|
||||
|
||||
|
||||
def set_nic_mtu(nic, mtu):
|
||||
'''Set MTU on a network interface'''
|
||||
cmd = ['ip', 'link', 'set', nic, 'mtu', mtu]
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
|
||||
def get_nic_mtu(nic):
|
||||
cmd = ['ip', 'addr', 'show', nic]
|
||||
ip_output = subprocess.check_output(cmd).split('\n')
|
||||
mtu = ""
|
||||
for line in ip_output:
|
||||
words = line.split()
|
||||
if 'mtu' in words:
|
||||
mtu = words[words.index("mtu") + 1]
|
||||
return mtu
|
||||
|
||||
|
||||
def get_nic_hwaddr(nic):
|
||||
cmd = ['ip', '-o', '-0', 'addr', 'show', nic]
|
||||
ip_output = subprocess.check_output(cmd)
|
||||
hwaddr = ""
|
||||
words = ip_output.split()
|
||||
if 'link/ether' in words:
|
||||
hwaddr = words[words.index('link/ether') + 1]
|
||||
return hwaddr
|
||||
|
@ -13,6 +13,7 @@ from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
)
|
||||
import apt_pkg
|
||||
import os
|
||||
|
||||
CLOUD_ARCHIVE = """# Ubuntu Cloud Archive
|
||||
deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
|
||||
@ -43,8 +44,16 @@ CLOUD_ARCHIVE_POCKETS = {
|
||||
'precise-havana/updates': 'precise-updates/havana',
|
||||
'precise-updates/havana': 'precise-updates/havana',
|
||||
'havana/proposed': 'precise-proposed/havana',
|
||||
'precies-havana/proposed': 'precise-proposed/havana',
|
||||
'precise-havana/proposed': 'precise-proposed/havana',
|
||||
'precise-proposed/havana': 'precise-proposed/havana',
|
||||
# Icehouse
|
||||
'icehouse': 'precise-updates/icehouse',
|
||||
'precise-icehouse': 'precise-updates/icehouse',
|
||||
'precise-icehouse/updates': 'precise-updates/icehouse',
|
||||
'precise-updates/icehouse': 'precise-updates/icehouse',
|
||||
'icehouse/proposed': 'precise-proposed/icehouse',
|
||||
'precise-icehouse/proposed': 'precise-proposed/icehouse',
|
||||
'precise-proposed/icehouse': 'precise-proposed/icehouse',
|
||||
}
|
||||
|
||||
|
||||
@ -66,8 +75,10 @@ def filter_installed_packages(packages):
|
||||
|
||||
def apt_install(packages, options=None, fatal=False):
|
||||
"""Install one or more packages"""
|
||||
options = options or []
|
||||
cmd = ['apt-get', '-y']
|
||||
if options is None:
|
||||
options = ['--option=Dpkg::Options::=--force-confold']
|
||||
|
||||
cmd = ['apt-get', '--assume-yes']
|
||||
cmd.extend(options)
|
||||
cmd.append('install')
|
||||
if isinstance(packages, basestring):
|
||||
@ -76,10 +87,14 @@ def apt_install(packages, options=None, fatal=False):
|
||||
cmd.extend(packages)
|
||||
log("Installing {} with options: {}".format(packages,
|
||||
options))
|
||||
env = os.environ.copy()
|
||||
if 'DEBIAN_FRONTEND' not in env:
|
||||
env['DEBIAN_FRONTEND'] = 'noninteractive'
|
||||
|
||||
if fatal:
|
||||
subprocess.check_call(cmd)
|
||||
subprocess.check_call(cmd, env=env)
|
||||
else:
|
||||
subprocess.call(cmd)
|
||||
subprocess.call(cmd, env=env)
|
||||
|
||||
|
||||
def apt_update(fatal=False):
|
||||
@ -93,7 +108,7 @@ def apt_update(fatal=False):
|
||||
|
||||
def apt_purge(packages, fatal=False):
|
||||
"""Purge one or more packages"""
|
||||
cmd = ['apt-get', '-y', 'purge']
|
||||
cmd = ['apt-get', '--assume-yes', 'purge']
|
||||
if isinstance(packages, basestring):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
@ -121,16 +136,18 @@ def apt_hold(packages, fatal=False):
|
||||
|
||||
def add_source(source, key=None):
|
||||
if (source.startswith('ppa:') or
|
||||
source.startswith('http:') or
|
||||
source.startswith('http') or
|
||||
source.startswith('deb ') or
|
||||
source.startswith('cloud-archive:')):
|
||||
source.startswith('cloud-archive:')):
|
||||
subprocess.check_call(['add-apt-repository', '--yes', source])
|
||||
elif source.startswith('cloud:'):
|
||||
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
|
||||
fatal=True)
|
||||
pocket = source.split(':')[-1]
|
||||
if pocket not in CLOUD_ARCHIVE_POCKETS:
|
||||
raise SourceConfigError('Unsupported cloud: source option %s' % pocket)
|
||||
raise SourceConfigError(
|
||||
'Unsupported cloud: source option %s' %
|
||||
pocket)
|
||||
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
|
||||
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
|
||||
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
|
||||
@ -139,7 +156,9 @@ def add_source(source, key=None):
|
||||
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
|
||||
apt.write(PROPOSED_POCKET.format(release))
|
||||
if key:
|
||||
subprocess.check_call(['apt-key', 'import', key])
|
||||
subprocess.check_call(['apt-key', 'adv', '--keyserver',
|
||||
'keyserver.ubuntu.com', '--recv',
|
||||
key])
|
||||
|
||||
|
||||
class SourceConfigError(Exception):
|
||||
@ -220,7 +239,9 @@ def install_from_config(config_var_name):
|
||||
|
||||
|
||||
class BaseFetchHandler(object):
|
||||
|
||||
"""Base class for FetchHandler implementations in fetch plugins"""
|
||||
|
||||
def can_handle(self, source):
|
||||
"""Returns True if the source can be handled. Otherwise returns
|
||||
a string explaining why it cannot"""
|
||||
@ -248,10 +269,13 @@ def plugins(fetch_handlers=None):
|
||||
for handler_name in fetch_handlers:
|
||||
package, classname = handler_name.rsplit('.', 1)
|
||||
try:
|
||||
handler_class = getattr(importlib.import_module(package), classname)
|
||||
handler_class = getattr(
|
||||
importlib.import_module(package),
|
||||
classname)
|
||||
plugin_list.append(handler_class())
|
||||
except (ImportError, AttributeError):
|
||||
# Skip missing plugins so that they can be ommitted from
|
||||
# installation if desired
|
||||
log("FetchHandler {} not found, skipping plugin".format(handler_name))
|
||||
log("FetchHandler {} not found, skipping plugin".format(
|
||||
handler_name))
|
||||
return plugin_list
|
||||
|
@ -6,7 +6,7 @@ from charmhelpers.fetch import apt_install, filter_installed_packages
|
||||
from charmhelpers.contrib.openstack import context, neutron, utils
|
||||
|
||||
from charmhelpers.contrib.hahelpers.cluster import (
|
||||
determine_api_port, determine_haproxy_port)
|
||||
determine_apache_port, determine_api_port)
|
||||
|
||||
|
||||
class ApacheSSLContext(context.ApacheSSLContext):
|
||||
@ -67,6 +67,13 @@ class HAProxyContext(context.HAProxyContext):
|
||||
nvol_api = determine_api_port(api_port('nova-api-os-volume'))
|
||||
neutron_api = determine_api_port(api_port('neutron-server'))
|
||||
|
||||
# Apache ports
|
||||
a_compute_api = determine_apache_port(api_port('nova-api-os-compute'))
|
||||
a_ec2_api = determine_apache_port(api_port('nova-api-ec2'))
|
||||
a_s3_api = determine_apache_port(api_port('nova-objectstore'))
|
||||
a_nvol_api = determine_apache_port(api_port('nova-api-os-volume'))
|
||||
a_neutron_api = determine_apache_port(api_port('neutron-server'))
|
||||
|
||||
# to be set in nova.conf accordingly.
|
||||
listen_ports = {
|
||||
'osapi_compute_listen_port': compute_api,
|
||||
@ -76,32 +83,24 @@ class HAProxyContext(context.HAProxyContext):
|
||||
|
||||
port_mapping = {
|
||||
'nova-api-os-compute': [
|
||||
determine_haproxy_port(api_port('nova-api-os-compute')),
|
||||
compute_api,
|
||||
],
|
||||
api_port('nova-api-os-compute'), a_compute_api],
|
||||
'nova-api-ec2': [
|
||||
determine_haproxy_port(api_port('nova-api-ec2')),
|
||||
ec2_api,
|
||||
],
|
||||
api_port('nova-api-ec2'), a_ec2_api],
|
||||
'nova-objectstore': [
|
||||
determine_haproxy_port(api_port('nova-objectstore')),
|
||||
s3_api,
|
||||
],
|
||||
api_port('nova-objectstore'), a_s3_api],
|
||||
}
|
||||
|
||||
if relation_ids('nova-volume-service'):
|
||||
port_mapping.update({
|
||||
'nova-api-ec2': [
|
||||
determine_haproxy_port(api_port('nova-api-ec2')),
|
||||
nvol_api],
|
||||
api_port('nova-api-ec2'), a_nvol_api],
|
||||
})
|
||||
listen_ports['osapi_volume_listen_port'] = nvol_api
|
||||
|
||||
if neutron.network_manager() in ['neutron', 'quantum']:
|
||||
port_mapping.update({
|
||||
'neutron-server': [
|
||||
determine_haproxy_port(api_port('neutron-server')),
|
||||
neutron_api]
|
||||
api_port('neutron-server'), a_neutron_api]
|
||||
})
|
||||
# quantum/neutron.conf listening port, set separte from nova's.
|
||||
ctxt['neutron_bind_port'] = neutron_api
|
||||
|
@ -94,7 +94,7 @@ def install():
|
||||
|
||||
|
||||
@hooks.hook('config-changed')
|
||||
@restart_on_change(restart_map())
|
||||
@restart_on_change(restart_map(), stopstart=True)
|
||||
def config_changed():
|
||||
if openstack_upgrade_available('nova-common'):
|
||||
do_openstack_upgrade(configs=CONFIGS)
|
||||
@ -333,7 +333,7 @@ def quantum_joined(rid=None):
|
||||
|
||||
@hooks.hook('cluster-relation-changed',
|
||||
'cluster-relation-departed')
|
||||
@restart_on_change(restart_map())
|
||||
@restart_on_change(restart_map(), stopstart=True)
|
||||
def cluster_changed():
|
||||
CONFIGS.write_all()
|
||||
|
||||
|
2
revision
2
revision
@ -1 +1 @@
|
||||
312
|
||||
313
|
||||
|
@ -73,6 +73,7 @@ default_floating_pool = {{ external_network }}
|
||||
|
||||
{% if network_manager and network_manager == 'quantum' -%}
|
||||
network_api_class = nova.network.quantumv2.api.API
|
||||
quantum_url = {{ neutron_url }}
|
||||
{% if auth_host -%}
|
||||
quantum_auth_strategy = keystone
|
||||
quantum_admin_tenant_name = {{ admin_tenant_name }}
|
||||
@ -82,6 +83,7 @@ quantum_admin_auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v
|
||||
{% endif -%}
|
||||
{% elif network_manager and network_manager == 'neutron' -%}
|
||||
network_api_class = nova.network.neutronv2.api.API
|
||||
neutron_url = {{ neutron_url }}
|
||||
{% if auth_host -%}
|
||||
neutron_auth_strategy = keystone
|
||||
neutron_admin_tenant_name = {{ admin_tenant_name }}
|
||||
|
Loading…
x
Reference in New Issue
Block a user