[james-page, r=gnuoy] Add 0mq support
This commit is contained in:
commit
70d4f45e2e
@ -15,6 +15,7 @@
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import six
|
||||
from collections import OrderedDict
|
||||
from charmhelpers.contrib.amulet.deployment import (
|
||||
AmuletDeployment
|
||||
)
|
||||
@ -100,12 +101,34 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
"""
|
||||
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
|
||||
self.precise_havana, self.precise_icehouse,
|
||||
self.trusty_icehouse) = range(6)
|
||||
self.trusty_icehouse, self.trusty_juno, self.trusty_kilo) = range(8)
|
||||
releases = {
|
||||
('precise', None): self.precise_essex,
|
||||
('precise', 'cloud:precise-folsom'): self.precise_folsom,
|
||||
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
|
||||
('precise', 'cloud:precise-havana'): self.precise_havana,
|
||||
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
|
||||
('trusty', None): self.trusty_icehouse}
|
||||
('trusty', None): self.trusty_icehouse,
|
||||
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
|
||||
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo}
|
||||
return releases[(self.series, self.openstack)]
|
||||
|
||||
def _get_openstack_release_string(self):
|
||||
"""Get openstack release string.
|
||||
|
||||
Return a string representing the openstack release.
|
||||
"""
|
||||
releases = OrderedDict([
|
||||
('precise', 'essex'),
|
||||
('quantal', 'folsom'),
|
||||
('raring', 'grizzly'),
|
||||
('saucy', 'havana'),
|
||||
('trusty', 'icehouse'),
|
||||
('utopic', 'juno'),
|
||||
('vivid', 'kilo'),
|
||||
])
|
||||
if self.openstack:
|
||||
os_origin = self.openstack.split(':')[1]
|
||||
return os_origin.split('%s-' % self.series)[1].split('/')[0]
|
||||
else:
|
||||
return releases[self.series]
|
||||
|
@ -47,6 +47,7 @@ from charmhelpers.core.hookenv import (
|
||||
)
|
||||
|
||||
from charmhelpers.core.sysctl import create as sysctl_create
|
||||
from charmhelpers.core.strutils import bool_from_string
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
list_nics,
|
||||
@ -67,6 +68,7 @@ from charmhelpers.contrib.hahelpers.apache import (
|
||||
)
|
||||
from charmhelpers.contrib.openstack.neutron import (
|
||||
neutron_plugin_attribute,
|
||||
parse_data_port_mappings,
|
||||
)
|
||||
from charmhelpers.contrib.openstack.ip import (
|
||||
resolve_address,
|
||||
@ -82,7 +84,6 @@ from charmhelpers.contrib.network.ip import (
|
||||
is_bridge_member,
|
||||
)
|
||||
from charmhelpers.contrib.openstack.utils import get_host_ip
|
||||
|
||||
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
|
||||
ADDRESS_TYPES = ['admin', 'internal', 'public']
|
||||
|
||||
@ -1162,3 +1163,145 @@ class SysctlContext(OSContextGenerator):
|
||||
sysctl_create(sysctl_dict,
|
||||
'/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
|
||||
return {'sysctl': sysctl_dict}
|
||||
|
||||
|
||||
class NeutronAPIContext(OSContextGenerator):
|
||||
'''
|
||||
Inspects current neutron-plugin-api relation for neutron settings. Return
|
||||
defaults if it is not present.
|
||||
'''
|
||||
interfaces = ['neutron-plugin-api']
|
||||
|
||||
def __call__(self):
|
||||
self.neutron_defaults = {
|
||||
'l2_population': {
|
||||
'rel_key': 'l2-population',
|
||||
'default': False,
|
||||
},
|
||||
'overlay_network_type': {
|
||||
'rel_key': 'overlay-network-type',
|
||||
'default': 'gre',
|
||||
},
|
||||
'neutron_security_groups': {
|
||||
'rel_key': 'neutron-security-groups',
|
||||
'default': False,
|
||||
},
|
||||
'network_device_mtu': {
|
||||
'rel_key': 'network-device-mtu',
|
||||
'default': None,
|
||||
},
|
||||
'enable_dvr': {
|
||||
'rel_key': 'enable-dvr',
|
||||
'default': False,
|
||||
},
|
||||
'enable_l3ha': {
|
||||
'rel_key': 'enable-l3ha',
|
||||
'default': False,
|
||||
},
|
||||
}
|
||||
ctxt = self.get_neutron_options({})
|
||||
for rid in relation_ids('neutron-plugin-api'):
|
||||
for unit in related_units(rid):
|
||||
rdata = relation_get(rid=rid, unit=unit)
|
||||
if 'l2-population' in rdata:
|
||||
ctxt.update(self.get_neutron_options(rdata))
|
||||
|
||||
return ctxt
|
||||
|
||||
def get_neutron_options(self, rdata):
|
||||
settings = {}
|
||||
for nkey in self.neutron_defaults.keys():
|
||||
defv = self.neutron_defaults[nkey]['default']
|
||||
rkey = self.neutron_defaults[nkey]['rel_key']
|
||||
if rkey in rdata.keys():
|
||||
if type(defv) is bool:
|
||||
settings[nkey] = bool_from_string(rdata[rkey])
|
||||
else:
|
||||
settings[nkey] = rdata[rkey]
|
||||
else:
|
||||
settings[nkey] = defv
|
||||
return settings
|
||||
|
||||
|
||||
class ExternalPortContext(NeutronPortContext):
|
||||
|
||||
def __call__(self):
|
||||
ctxt = {}
|
||||
ports = config('ext-port')
|
||||
if ports:
|
||||
ports = [p.strip() for p in ports.split()]
|
||||
ports = self.resolve_ports(ports)
|
||||
if ports:
|
||||
ctxt = {"ext_port": ports[0]}
|
||||
napi_settings = NeutronAPIContext()()
|
||||
mtu = napi_settings.get('network_device_mtu')
|
||||
if mtu:
|
||||
ctxt['ext_port_mtu'] = mtu
|
||||
|
||||
return ctxt
|
||||
|
||||
|
||||
class DataPortContext(NeutronPortContext):
|
||||
|
||||
def __call__(self):
|
||||
ports = config('data-port')
|
||||
if ports:
|
||||
portmap = parse_data_port_mappings(ports)
|
||||
ports = portmap.values()
|
||||
resolved = self.resolve_ports(ports)
|
||||
normalized = {get_nic_hwaddr(port): port for port in resolved
|
||||
if port not in ports}
|
||||
normalized.update({port: port for port in resolved
|
||||
if port in ports})
|
||||
if resolved:
|
||||
return {bridge: normalized[port] for bridge, port in
|
||||
six.iteritems(portmap) if port in normalized.keys()}
|
||||
|
||||
return None
|
||||
|
||||
|
||||
class PhyNICMTUContext(DataPortContext):
|
||||
|
||||
def __call__(self):
|
||||
ctxt = {}
|
||||
mappings = super(PhyNICMTUContext, self).__call__()
|
||||
if mappings and mappings.values():
|
||||
ports = mappings.values()
|
||||
napi_settings = NeutronAPIContext()()
|
||||
mtu = napi_settings.get('network_device_mtu')
|
||||
if mtu:
|
||||
ctxt["devs"] = '\\n'.join(ports)
|
||||
ctxt['mtu'] = mtu
|
||||
|
||||
return ctxt
|
||||
|
||||
|
||||
class NetworkServiceContext(OSContextGenerator):
|
||||
|
||||
def __init__(self, rel_name='quantum-network-service'):
|
||||
self.rel_name = rel_name
|
||||
self.interfaces = [rel_name]
|
||||
|
||||
def __call__(self):
|
||||
for rid in relation_ids(self.rel_name):
|
||||
for unit in related_units(rid):
|
||||
rdata = relation_get(rid=rid, unit=unit)
|
||||
ctxt = {
|
||||
'keystone_host': rdata.get('keystone_host'),
|
||||
'service_port': rdata.get('service_port'),
|
||||
'auth_port': rdata.get('auth_port'),
|
||||
'service_tenant': rdata.get('service_tenant'),
|
||||
'service_username': rdata.get('service_username'),
|
||||
'service_password': rdata.get('service_password'),
|
||||
'quantum_host': rdata.get('quantum_host'),
|
||||
'quantum_port': rdata.get('quantum_port'),
|
||||
'quantum_url': rdata.get('quantum_url'),
|
||||
'region': rdata.get('region'),
|
||||
'service_protocol':
|
||||
rdata.get('service_protocol') or 'http',
|
||||
'auth_protocol':
|
||||
rdata.get('auth_protocol') or 'http',
|
||||
}
|
||||
if context_complete(ctxt):
|
||||
return ctxt
|
||||
return {}
|
||||
|
13
hooks/charmhelpers/contrib/openstack/templates/git.upstart
Normal file
13
hooks/charmhelpers/contrib/openstack/templates/git.upstart
Normal file
@ -0,0 +1,13 @@
|
||||
description "{{ service_description }}"
|
||||
author "Juju {{ service_name }} Charm <juju@localhost>"
|
||||
|
||||
start on runlevel [2345]
|
||||
stop on runlevel [!2345]
|
||||
|
||||
respawn
|
||||
|
||||
exec start-stop-daemon --start --chuid {{ user_name }} \
|
||||
--chdir {{ start_dir }} --name {{ process_name }} \
|
||||
--exec {{ executable_name }} -- \
|
||||
--config-file={{ config_file }} \
|
||||
--log-file={{ log_file }}
|
@ -0,0 +1,14 @@
|
||||
{% if zmq_host -%}
|
||||
# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }})
|
||||
rpc_backend = zmq
|
||||
rpc_zmq_host = {{ zmq_host }}
|
||||
{% if zmq_redis_address -%}
|
||||
rpc_zmq_matchmaker = redis
|
||||
matchmaker_heartbeat_freq = 15
|
||||
matchmaker_heartbeat_ttl = 30
|
||||
[matchmaker_redis]
|
||||
host = {{ zmq_redis_address }}
|
||||
{% else -%}
|
||||
rpc_zmq_matchmaker = ring
|
||||
{% endif -%}
|
||||
{% endif -%}
|
@ -30,6 +30,10 @@ import yaml
|
||||
|
||||
from charmhelpers.contrib.network import ip
|
||||
|
||||
from charmhelpers.core import (
|
||||
unitdata,
|
||||
)
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
log as juju_log,
|
||||
@ -330,6 +334,21 @@ def configure_installation_source(rel):
|
||||
error_out("Invalid openstack-release specified: %s" % rel)
|
||||
|
||||
|
||||
def config_value_changed(option):
|
||||
"""
|
||||
Determine if config value changed since last call to this function.
|
||||
"""
|
||||
hook_data = unitdata.HookData()
|
||||
with hook_data():
|
||||
db = unitdata.kv()
|
||||
current = config(option)
|
||||
saved = db.get(option)
|
||||
db.set(option, current)
|
||||
if saved is None:
|
||||
return False
|
||||
return current != saved
|
||||
|
||||
|
||||
def save_script_rc(script_path="scripts/scriptrc", **env_vars):
|
||||
"""
|
||||
Write an rc file in the charm-delivered directory containing
|
||||
@ -469,82 +488,95 @@ def os_requires_version(ostack_release, pkg):
|
||||
|
||||
|
||||
def git_install_requested():
|
||||
"""Returns true if openstack-origin-git is specified."""
|
||||
return config('openstack-origin-git') != "None"
|
||||
"""
|
||||
Returns true if openstack-origin-git is specified.
|
||||
"""
|
||||
return config('openstack-origin-git') is not None
|
||||
|
||||
|
||||
requirements_dir = None
|
||||
|
||||
|
||||
def git_clone_and_install(file_name, core_project):
|
||||
"""Clone/install all OpenStack repos specified in yaml config file."""
|
||||
global requirements_dir
|
||||
def git_clone_and_install(projects_yaml, core_project):
|
||||
"""
|
||||
Clone/install all specified OpenStack repositories.
|
||||
|
||||
if file_name == "None":
|
||||
The expected format of projects_yaml is:
|
||||
repositories:
|
||||
- {name: keystone,
|
||||
repository: 'git://git.openstack.org/openstack/keystone.git',
|
||||
branch: 'stable/icehouse'}
|
||||
- {name: requirements,
|
||||
repository: 'git://git.openstack.org/openstack/requirements.git',
|
||||
branch: 'stable/icehouse'}
|
||||
directory: /mnt/openstack-git
|
||||
|
||||
The directory key is optional.
|
||||
"""
|
||||
global requirements_dir
|
||||
parent_dir = '/mnt/openstack-git'
|
||||
|
||||
if not projects_yaml:
|
||||
return
|
||||
|
||||
yaml_file = os.path.join(charm_dir(), file_name)
|
||||
projects = yaml.load(projects_yaml)
|
||||
_git_validate_projects_yaml(projects, core_project)
|
||||
|
||||
# clone/install the requirements project first
|
||||
installed = _git_clone_and_install_subset(yaml_file,
|
||||
whitelist=['requirements'])
|
||||
if 'requirements' not in installed:
|
||||
error_out('requirements git repository must be specified')
|
||||
if 'directory' in projects.keys():
|
||||
parent_dir = projects['directory']
|
||||
|
||||
# clone/install all other projects except requirements and the core project
|
||||
blacklist = ['requirements', core_project]
|
||||
_git_clone_and_install_subset(yaml_file, blacklist=blacklist,
|
||||
update_requirements=True)
|
||||
|
||||
# clone/install the core project
|
||||
whitelist = [core_project]
|
||||
installed = _git_clone_and_install_subset(yaml_file, whitelist=whitelist,
|
||||
update_requirements=True)
|
||||
if core_project not in installed:
|
||||
error_out('{} git repository must be specified'.format(core_project))
|
||||
for p in projects['repositories']:
|
||||
repo = p['repository']
|
||||
branch = p['branch']
|
||||
if p['name'] == 'requirements':
|
||||
repo_dir = _git_clone_and_install_single(repo, branch, parent_dir,
|
||||
update_requirements=False)
|
||||
requirements_dir = repo_dir
|
||||
else:
|
||||
repo_dir = _git_clone_and_install_single(repo, branch, parent_dir,
|
||||
update_requirements=True)
|
||||
|
||||
|
||||
def _git_clone_and_install_subset(yaml_file, whitelist=[], blacklist=[],
|
||||
update_requirements=False):
|
||||
"""Clone/install subset of OpenStack repos specified in yaml config file."""
|
||||
global requirements_dir
|
||||
installed = []
|
||||
def _git_validate_projects_yaml(projects, core_project):
|
||||
"""
|
||||
Validate the projects yaml.
|
||||
"""
|
||||
_git_ensure_key_exists('repositories', projects)
|
||||
|
||||
with open(yaml_file, 'r') as fd:
|
||||
projects = yaml.load(fd)
|
||||
for proj, val in projects.items():
|
||||
# The project subset is chosen based on the following 3 rules:
|
||||
# 1) If project is in blacklist, we don't clone/install it, period.
|
||||
# 2) If whitelist is empty, we clone/install everything else.
|
||||
# 3) If whitelist is not empty, we clone/install everything in the
|
||||
# whitelist.
|
||||
if proj in blacklist:
|
||||
continue
|
||||
if whitelist and proj not in whitelist:
|
||||
continue
|
||||
repo = val['repository']
|
||||
branch = val['branch']
|
||||
repo_dir = _git_clone_and_install_single(repo, branch,
|
||||
update_requirements)
|
||||
if proj == 'requirements':
|
||||
requirements_dir = repo_dir
|
||||
installed.append(proj)
|
||||
return installed
|
||||
for project in projects['repositories']:
|
||||
_git_ensure_key_exists('name', project.keys())
|
||||
_git_ensure_key_exists('repository', project.keys())
|
||||
_git_ensure_key_exists('branch', project.keys())
|
||||
|
||||
if projects['repositories'][0]['name'] != 'requirements':
|
||||
error_out('{} git repo must be specified first'.format('requirements'))
|
||||
|
||||
if projects['repositories'][-1]['name'] != core_project:
|
||||
error_out('{} git repo must be specified last'.format(core_project))
|
||||
|
||||
|
||||
def _git_clone_and_install_single(repo, branch, update_requirements=False):
|
||||
"""Clone and install a single git repository."""
|
||||
dest_parent_dir = "/mnt/openstack-git/"
|
||||
dest_dir = os.path.join(dest_parent_dir, os.path.basename(repo))
|
||||
def _git_ensure_key_exists(key, keys):
|
||||
"""
|
||||
Ensure that key exists in keys.
|
||||
"""
|
||||
if key not in keys:
|
||||
error_out('openstack-origin-git key \'{}\' is missing'.format(key))
|
||||
|
||||
if not os.path.exists(dest_parent_dir):
|
||||
juju_log('Host dir not mounted at {}. '
|
||||
'Creating directory there instead.'.format(dest_parent_dir))
|
||||
os.mkdir(dest_parent_dir)
|
||||
|
||||
def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements):
|
||||
"""
|
||||
Clone and install a single git repository.
|
||||
"""
|
||||
dest_dir = os.path.join(parent_dir, os.path.basename(repo))
|
||||
|
||||
if not os.path.exists(parent_dir):
|
||||
juju_log('Directory already exists at {}. '
|
||||
'No need to create directory.'.format(parent_dir))
|
||||
os.mkdir(parent_dir)
|
||||
|
||||
if not os.path.exists(dest_dir):
|
||||
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
|
||||
repo_dir = install_remote(repo, dest=dest_parent_dir, branch=branch)
|
||||
repo_dir = install_remote(repo, dest=parent_dir, branch=branch)
|
||||
else:
|
||||
repo_dir = dest_dir
|
||||
|
||||
@ -561,16 +593,39 @@ def _git_clone_and_install_single(repo, branch, update_requirements=False):
|
||||
|
||||
|
||||
def _git_update_requirements(package_dir, reqs_dir):
|
||||
"""Update from global requirements.
|
||||
"""
|
||||
Update from global requirements.
|
||||
|
||||
Update an OpenStack git directory's requirements.txt and
|
||||
test-requirements.txt from global-requirements.txt."""
|
||||
Update an OpenStack git directory's requirements.txt and
|
||||
test-requirements.txt from global-requirements.txt.
|
||||
"""
|
||||
orig_dir = os.getcwd()
|
||||
os.chdir(reqs_dir)
|
||||
cmd = "python update.py {}".format(package_dir)
|
||||
cmd = ['python', 'update.py', package_dir]
|
||||
try:
|
||||
subprocess.check_call(cmd.split(' '))
|
||||
subprocess.check_call(cmd)
|
||||
except subprocess.CalledProcessError:
|
||||
package = os.path.basename(package_dir)
|
||||
error_out("Error updating {} from global-requirements.txt".format(package))
|
||||
os.chdir(orig_dir)
|
||||
|
||||
|
||||
def git_src_dir(projects_yaml, project):
|
||||
"""
|
||||
Return the directory where the specified project's source is located.
|
||||
"""
|
||||
parent_dir = '/mnt/openstack-git'
|
||||
|
||||
if not projects_yaml:
|
||||
return
|
||||
|
||||
projects = yaml.load(projects_yaml)
|
||||
|
||||
if 'directory' in projects.keys():
|
||||
parent_dir = projects['directory']
|
||||
|
||||
for p in projects['repositories']:
|
||||
if p['name'] == project:
|
||||
return os.path.join(parent_dir, os.path.basename(p['repository']))
|
||||
|
||||
return None
|
||||
|
@ -443,7 +443,7 @@ class HookData(object):
|
||||
data = hookenv.execution_environment()
|
||||
self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
|
||||
self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
|
||||
self.kv.set('env', data['env'])
|
||||
self.kv.set('env', dict(data['env']))
|
||||
self.kv.set('unit', data['unit'])
|
||||
self.kv.set('relid', data.get('relid'))
|
||||
return conf_delta, rels_delta
|
||||
|
@ -46,6 +46,7 @@ from charmhelpers.contrib.openstack.utils import (
|
||||
configure_installation_source,
|
||||
openstack_upgrade_available,
|
||||
os_release,
|
||||
os_requires_version,
|
||||
sync_db_with_multi_ipv6_addresses
|
||||
)
|
||||
|
||||
@ -79,6 +80,7 @@ from nova_cc_utils import (
|
||||
migrate_nova_database,
|
||||
neutron_plugin,
|
||||
save_script_rc,
|
||||
services,
|
||||
ssh_compute_add,
|
||||
ssh_compute_remove,
|
||||
ssh_known_hosts_lines,
|
||||
@ -94,8 +96,8 @@ from nova_cc_utils import (
|
||||
console_attributes,
|
||||
service_guard,
|
||||
guard_map,
|
||||
services,
|
||||
setup_ipv6
|
||||
get_topics,
|
||||
setup_ipv6,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.hahelpers.cluster import (
|
||||
@ -172,6 +174,8 @@ def config_changed():
|
||||
for rid in relation_ids('cloud-compute')]
|
||||
for r_id in relation_ids('identity-service'):
|
||||
identity_joined(rid=r_id)
|
||||
for rid in relation_ids('zeromq-configuration'):
|
||||
zeromq_configuration_relation_joined(rid)
|
||||
[cluster_joined(rid) for rid in relation_ids('cluster')]
|
||||
update_nrpe_config()
|
||||
|
||||
@ -866,6 +870,14 @@ def neutron_api_relation_broken():
|
||||
quantum_joined(rid=rid)
|
||||
|
||||
|
||||
@hooks.hook('zeromq-configuration-relation-joined')
|
||||
@os_requires_version('kilo', 'nova-common')
|
||||
def zeromq_configuration_relation_joined(relid=None):
|
||||
relation_set(relation_id=relid,
|
||||
topics=" ".join(get_topics()),
|
||||
users="nova")
|
||||
|
||||
|
||||
@hooks.hook('nrpe-external-master-relation-joined',
|
||||
'nrpe-external-master-relation-changed')
|
||||
def update_nrpe_config():
|
||||
@ -889,6 +901,12 @@ def memcached_joined():
|
||||
CONFIGS.write(NOVA_CONF)
|
||||
|
||||
|
||||
@hooks.hook('zeromq-configuration-relation-changed')
|
||||
@restart_on_change(restart_map(), stopstart=True)
|
||||
def zeromq_configuration_relation_changed():
|
||||
CONFIGS.write(NOVA_CONF)
|
||||
|
||||
|
||||
def main():
|
||||
try:
|
||||
hooks.execute(sys.argv)
|
||||
|
@ -128,6 +128,8 @@ BASE_RESOURCE_MAP = OrderedDict([
|
||||
service='nova',
|
||||
service_user='nova'),
|
||||
nova_cc_context.VolumeServiceContext(),
|
||||
context.ZeroMQContext(),
|
||||
context.NotificationDriverContext(),
|
||||
nova_cc_context.NovaIPv6Context(),
|
||||
nova_cc_context.NeutronCCContext(),
|
||||
nova_cc_context.NovaConfigContext(),
|
||||
@ -928,6 +930,13 @@ def service_guard(guard_map, contexts, active=False):
|
||||
return wrap
|
||||
|
||||
|
||||
def get_topics():
|
||||
topics = ['scheduler', 'conductor']
|
||||
if 'nova-consoleauth' in services():
|
||||
topics.append('consoleauth')
|
||||
return topics
|
||||
|
||||
|
||||
def cmd_all_services(cmd):
|
||||
if cmd == 'start':
|
||||
for svc in services():
|
||||
|
1
hooks/zeromq-configuration-relation-changed
Symbolic link
1
hooks/zeromq-configuration-relation-changed
Symbolic link
@ -0,0 +1 @@
|
||||
nova_cc_hooks.py
|
1
hooks/zeromq-configuration-relation-joined
Symbolic link
1
hooks/zeromq-configuration-relation-joined
Symbolic link
@ -0,0 +1 @@
|
||||
nova_cc_hooks.py
|
@ -45,6 +45,9 @@ requires:
|
||||
scope: container
|
||||
memcache:
|
||||
interface: memcache
|
||||
zeromq-configuration:
|
||||
interface: zeromq-configuration
|
||||
scope: container
|
||||
peers:
|
||||
cluster:
|
||||
interface: nova-ha
|
||||
|
@ -7,7 +7,9 @@ state_path = /var/lib/neutron
|
||||
lock_path = $state_path/lock
|
||||
bind_host = {{ bind_host }}
|
||||
auth_strategy = keystone
|
||||
{% if notifications == 'True' -%}
|
||||
notification_driver = neutron.openstack.common.notifier.rpc_notifier
|
||||
{% endif -%}
|
||||
api_workers = {{ workers }}
|
||||
use_syslog = {{ use_syslog }}
|
||||
|
||||
|
156
templates/juno/nova.conf
Normal file
156
templates/juno/nova.conf
Normal file
@ -0,0 +1,156 @@
|
||||
# juno
|
||||
###############################################################################
|
||||
# [ WARNING ]
|
||||
# Configuration file maintained by Juju. Local changes may be overwritten.
|
||||
###############################################################################
|
||||
[DEFAULT]
|
||||
verbose={{ verbose }}
|
||||
debug={{ debug }}
|
||||
dhcpbridge_flagfile=/etc/nova/nova.conf
|
||||
dhcpbridge=/usr/bin/nova-dhcpbridge
|
||||
logdir=/var/log/nova
|
||||
state_path=/var/lib/nova
|
||||
lock_path=/var/lock/nova
|
||||
force_dhcp_release=True
|
||||
iscsi_helper=tgtadm
|
||||
libvirt_use_virtio_for_bridges=True
|
||||
connection_type=libvirt
|
||||
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
|
||||
ec2_private_dns_show_ip=True
|
||||
api_paste_config=/etc/nova/api-paste.ini
|
||||
volumes_path=/var/lib/nova/volumes
|
||||
enabled_apis=ec2,osapi_compute,metadata
|
||||
auth_strategy=keystone
|
||||
compute_driver=libvirt.LibvirtDriver
|
||||
use_ipv6 = {{ use_ipv6 }}
|
||||
osapi_compute_listen = {{ bind_host }}
|
||||
metadata_host = {{ bind_host }}
|
||||
s3_listen = {{ bind_host }}
|
||||
ec2_listen = {{ bind_host }}
|
||||
|
||||
osapi_compute_workers = {{ workers }}
|
||||
ec2_workers = {{ workers }}
|
||||
|
||||
scheduler_default_filters = RetryFilter,AvailabilityZoneFilter,CoreFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
|
||||
cpu_allocation_ratio = {{ cpu_allocation_ratio }}
|
||||
ram_allocation_ratio = {{ ram_allocation_ratio }}
|
||||
|
||||
use_syslog={{ use_syslog }}
|
||||
my_ip = {{ host_ip }}
|
||||
|
||||
{% if memcached_servers %}
|
||||
memcached_servers = {{ memcached_servers }}
|
||||
{% endif %}
|
||||
|
||||
{% if keystone_ec2_url -%}
|
||||
keystone_ec2_url = {{ keystone_ec2_url }}
|
||||
{% endif -%}
|
||||
|
||||
{% include "parts/rabbitmq" %}
|
||||
|
||||
{% if glance_api_servers -%}
|
||||
glance_api_servers = {{ glance_api_servers }}
|
||||
{% endif -%}
|
||||
|
||||
{% if rbd_pool -%}
|
||||
rbd_pool = {{ rbd_pool }}
|
||||
rbd_user = {{ rbd_user }}
|
||||
rbd_secret_uuid = {{ rbd_secret_uuid }}
|
||||
{% endif -%}
|
||||
|
||||
{% if neutron_plugin and neutron_plugin == 'ovs' -%}
|
||||
libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtGenericVIFDriver
|
||||
libvirt_user_virtio_for_bridges = True
|
||||
{% if neutron_security_groups -%}
|
||||
security_group_api = {{ network_manager }}
|
||||
nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver
|
||||
{% endif -%}
|
||||
{% if external_network -%}
|
||||
default_floating_pool = {{ external_network }}
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if neutron_plugin and neutron_plugin == 'nvp' -%}
|
||||
security_group_api = neutron
|
||||
nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver
|
||||
{% if external_network -%}
|
||||
default_floating_pool = {{ external_network }}
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if network_manager_config -%}
|
||||
{% for key, value in network_manager_config.iteritems() -%}
|
||||
{{ key }} = {{ value }}
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if network_manager and network_manager == 'quantum' -%}
|
||||
network_api_class = nova.network.quantumv2.api.API
|
||||
quantum_url = {{ neutron_url }}
|
||||
{% if auth_host -%}
|
||||
quantum_auth_strategy = keystone
|
||||
quantum_admin_tenant_name = {{ admin_tenant_name }}
|
||||
quantum_admin_username = {{ admin_user }}
|
||||
quantum_admin_password = {{ admin_password }}
|
||||
quantum_admin_auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v2.0
|
||||
{% endif -%}
|
||||
{% elif network_manager and network_manager == 'neutron' -%}
|
||||
network_api_class = nova.network.neutronv2.api.API
|
||||
neutron_url = {{ neutron_url }}
|
||||
{% if auth_host -%}
|
||||
neutron_auth_strategy = keystone
|
||||
neutron_admin_tenant_name = {{ admin_tenant_name }}
|
||||
neutron_admin_username = {{ admin_user }}
|
||||
neutron_admin_password = {{ admin_password }}
|
||||
neutron_admin_auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v2.0
|
||||
{% endif -%}
|
||||
{% else -%}
|
||||
network_manager = nova.network.manager.FlatDHCPManager
|
||||
{% endif -%}
|
||||
|
||||
{% if default_floating_pool -%}
|
||||
default_floating_pool = {{ default_floating_pool }}
|
||||
{% endif -%}
|
||||
|
||||
{% if volume_service -%}
|
||||
volume_api_class=nova.volume.cinder.API
|
||||
{% endif -%}
|
||||
|
||||
{% if user_config_flags -%}
|
||||
{% for key, value in user_config_flags.iteritems() -%}
|
||||
{{ key }} = {{ value }}
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if listen_ports -%}
|
||||
{% for key, value in listen_ports.iteritems() -%}
|
||||
{{ key }} = {{ value }}
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if sections and 'DEFAULT' in sections -%}
|
||||
{% for key, value in sections['DEFAULT'] -%}
|
||||
{{ key }} = {{ value }}
|
||||
{% endfor -%}
|
||||
{% endif %}
|
||||
|
||||
{% include "parts/database-v2" %}
|
||||
|
||||
{% if auth_host -%}
|
||||
[keystone_authtoken]
|
||||
auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/
|
||||
auth_host = {{ auth_host }}
|
||||
auth_port = {{ auth_port }}
|
||||
auth_protocol = {{ auth_protocol }}
|
||||
admin_tenant_name = {{ admin_tenant_name }}
|
||||
admin_user = {{ admin_user }}
|
||||
admin_password = {{ admin_password }}
|
||||
{% endif -%}
|
||||
|
||||
[osapi_v3]
|
||||
enabled=True
|
||||
|
||||
{% include "parts/cell" %}
|
||||
|
||||
[conductor]
|
||||
workers = {{ workers }}
|
@ -114,6 +114,8 @@ volume_api_class=nova.volume.cinder.API
|
||||
{% endfor -%}
|
||||
{% endif %}
|
||||
|
||||
{% include "section-zeromq" %}
|
||||
|
||||
{% include "parts/database-v2" %}
|
||||
|
||||
{% if glance_api_servers -%}
|
||||
|
@ -3,12 +3,12 @@
|
||||
rpc_backend = zmq
|
||||
rpc_zmq_host = {{ zmq_host }}
|
||||
{% if zmq_redis_address -%}
|
||||
rpc_zmq_matchmaker = oslo.messaging._drivers.matchmaker_redis.MatchMakerRedis
|
||||
rpc_zmq_matchmaker = oslo_messaging._drivers.matchmaker_redis.MatchMakerRedis
|
||||
matchmaker_heartbeat_freq = 15
|
||||
matchmaker_heartbeat_ttl = 30
|
||||
[matchmaker_redis]
|
||||
host = {{ zmq_redis_address }}
|
||||
{% else -%}
|
||||
rpc_zmq_matchmaker = oslo.messaging._drivers.matchmaker_ring.MatchMakerRing
|
||||
rpc_zmq_matchmaker = oslo_messaging._drivers.matchmaker_ring.MatchMakerRing
|
||||
{% endif -%}
|
||||
{% endif -%}
|
@ -15,6 +15,7 @@
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
import six
|
||||
from collections import OrderedDict
|
||||
from charmhelpers.contrib.amulet.deployment import (
|
||||
AmuletDeployment
|
||||
)
|
||||
@ -100,12 +101,34 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
"""
|
||||
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
|
||||
self.precise_havana, self.precise_icehouse,
|
||||
self.trusty_icehouse) = range(6)
|
||||
self.trusty_icehouse, self.trusty_juno, self.trusty_kilo) = range(8)
|
||||
releases = {
|
||||
('precise', None): self.precise_essex,
|
||||
('precise', 'cloud:precise-folsom'): self.precise_folsom,
|
||||
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
|
||||
('precise', 'cloud:precise-havana'): self.precise_havana,
|
||||
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
|
||||
('trusty', None): self.trusty_icehouse}
|
||||
('trusty', None): self.trusty_icehouse,
|
||||
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
|
||||
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo}
|
||||
return releases[(self.series, self.openstack)]
|
||||
|
||||
def _get_openstack_release_string(self):
|
||||
"""Get openstack release string.
|
||||
|
||||
Return a string representing the openstack release.
|
||||
"""
|
||||
releases = OrderedDict([
|
||||
('precise', 'essex'),
|
||||
('quantal', 'folsom'),
|
||||
('raring', 'grizzly'),
|
||||
('saucy', 'havana'),
|
||||
('trusty', 'icehouse'),
|
||||
('utopic', 'juno'),
|
||||
('vivid', 'kilo'),
|
||||
])
|
||||
if self.openstack:
|
||||
os_origin = self.openstack.split(':')[1]
|
||||
return os_origin.split('%s-' % self.series)[1].split('/')[0]
|
||||
else:
|
||||
return releases[self.series]
|
||||
|
@ -120,10 +120,12 @@ class NovaCCHooksTests(CharmTestCase):
|
||||
identity_joined, cluster_joined):
|
||||
self.openstack_upgrade_available.return_value = True
|
||||
self.relation_ids.return_value = ['generic_rid']
|
||||
_zmq_joined = self.patch('zeromq_configuration_relation_joined')
|
||||
hooks.config_changed()
|
||||
self.assertTrue(self.do_openstack_upgrade.called)
|
||||
self.assertTrue(neutron_api_joined.called)
|
||||
self.assertTrue(identity_joined.called)
|
||||
self.assertTrue(_zmq_joined.called)
|
||||
self.assertTrue(cluster_joined.called)
|
||||
self.assertTrue(self.save_script_rc.called)
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user