Merge from lp:~openstack-charmers/.../next

This commit is contained in:
Subbarayudu Mukkamala 2015-04-09 19:39:10 -07:00
commit e4d73595e6
29 changed files with 971 additions and 95 deletions

@ -15,6 +15,7 @@
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import six
from collections import OrderedDict
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
@ -100,12 +101,34 @@ class OpenStackAmuletDeployment(AmuletDeployment):
"""
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
self.precise_havana, self.precise_icehouse,
self.trusty_icehouse) = range(6)
self.trusty_icehouse, self.trusty_juno, self.trusty_kilo) = range(8)
releases = {
('precise', None): self.precise_essex,
('precise', 'cloud:precise-folsom'): self.precise_folsom,
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
('precise', 'cloud:precise-havana'): self.precise_havana,
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
('trusty', None): self.trusty_icehouse}
('trusty', None): self.trusty_icehouse,
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo}
return releases[(self.series, self.openstack)]
def _get_openstack_release_string(self):
"""Get openstack release string.
Return a string representing the openstack release.
"""
releases = OrderedDict([
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]
return os_origin.split('%s-' % self.series)[1].split('/')[0]
else:
return releases[self.series]

@ -16,6 +16,7 @@
import json
import os
import re
import time
from base64 import b64decode
from subprocess import check_call
@ -46,8 +47,11 @@ from charmhelpers.core.hookenv import (
)
from charmhelpers.core.sysctl import create as sysctl_create
from charmhelpers.core.strutils import bool_from_string
from charmhelpers.core.host import (
list_nics,
get_nic_hwaddr,
mkdir,
write_file,
)
@ -64,16 +68,22 @@ from charmhelpers.contrib.hahelpers.apache import (
)
from charmhelpers.contrib.openstack.neutron import (
neutron_plugin_attribute,
parse_data_port_mappings,
)
from charmhelpers.contrib.openstack.ip import (
resolve_address,
INTERNAL,
)
from charmhelpers.contrib.network.ip import (
get_address_in_network,
get_ipv4_addr,
get_ipv6_addr,
get_netmask_for_address,
format_ipv6_addr,
is_address_in_network,
is_bridge_member,
)
from charmhelpers.contrib.openstack.utils import get_host_ip
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
ADDRESS_TYPES = ['admin', 'internal', 'public']
@ -727,7 +737,14 @@ class ApacheSSLContext(OSContextGenerator):
'endpoints': [],
'ext_ports': []}
for cn in self.canonical_names():
cns = self.canonical_names()
if cns:
for cn in cns:
self.configure_cert(cn)
else:
# Expect cert/key provided in config (currently assumed that ca
# uses ip for cn)
cn = resolve_address(endpoint_type=INTERNAL)
self.configure_cert(cn)
addresses = self.get_network_addresses()
@ -899,6 +916,48 @@ class NeutronContext(OSContextGenerator):
return ctxt
class NeutronPortContext(OSContextGenerator):
NIC_PREFIXES = ['eth', 'bond']
def resolve_ports(self, ports):
"""Resolve NICs not yet bound to bridge(s)
If hwaddress provided then returns resolved hwaddress otherwise NIC.
"""
if not ports:
return None
hwaddr_to_nic = {}
hwaddr_to_ip = {}
for nic in list_nics(self.NIC_PREFIXES):
hwaddr = get_nic_hwaddr(nic)
hwaddr_to_nic[hwaddr] = nic
addresses = get_ipv4_addr(nic, fatal=False)
addresses += get_ipv6_addr(iface=nic, fatal=False)
hwaddr_to_ip[hwaddr] = addresses
resolved = []
mac_regex = re.compile(r'([0-9A-F]{2}[:-]){5}([0-9A-F]{2})', re.I)
for entry in ports:
if re.match(mac_regex, entry):
# NIC is in known NICs and does NOT hace an IP address
if entry in hwaddr_to_nic and not hwaddr_to_ip[entry]:
# If the nic is part of a bridge then don't use it
if is_bridge_member(hwaddr_to_nic[entry]):
continue
# Entry is a MAC address for a valid interface that doesn't
# have an IP address assigned yet.
resolved.append(hwaddr_to_nic[entry])
else:
# If the passed entry is not a MAC address, assume it's a valid
# interface, and that the user put it there on purpose (we can
# trust it to be the real external network).
resolved.append(entry)
return resolved
class OSConfigFlagContext(OSContextGenerator):
"""Provides support for user-defined config flags.
@ -1120,3 +1179,145 @@ class SysctlContext(OSContextGenerator):
sysctl_create(sysctl_dict,
'/etc/sysctl.d/50-{0}.conf'.format(charm_name()))
return {'sysctl': sysctl_dict}
class NeutronAPIContext(OSContextGenerator):
'''
Inspects current neutron-plugin-api relation for neutron settings. Return
defaults if it is not present.
'''
interfaces = ['neutron-plugin-api']
def __call__(self):
self.neutron_defaults = {
'l2_population': {
'rel_key': 'l2-population',
'default': False,
},
'overlay_network_type': {
'rel_key': 'overlay-network-type',
'default': 'gre',
},
'neutron_security_groups': {
'rel_key': 'neutron-security-groups',
'default': False,
},
'network_device_mtu': {
'rel_key': 'network-device-mtu',
'default': None,
},
'enable_dvr': {
'rel_key': 'enable-dvr',
'default': False,
},
'enable_l3ha': {
'rel_key': 'enable-l3ha',
'default': False,
},
}
ctxt = self.get_neutron_options({})
for rid in relation_ids('neutron-plugin-api'):
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
if 'l2-population' in rdata:
ctxt.update(self.get_neutron_options(rdata))
return ctxt
def get_neutron_options(self, rdata):
settings = {}
for nkey in self.neutron_defaults.keys():
defv = self.neutron_defaults[nkey]['default']
rkey = self.neutron_defaults[nkey]['rel_key']
if rkey in rdata.keys():
if type(defv) is bool:
settings[nkey] = bool_from_string(rdata[rkey])
else:
settings[nkey] = rdata[rkey]
else:
settings[nkey] = defv
return settings
class ExternalPortContext(NeutronPortContext):
def __call__(self):
ctxt = {}
ports = config('ext-port')
if ports:
ports = [p.strip() for p in ports.split()]
ports = self.resolve_ports(ports)
if ports:
ctxt = {"ext_port": ports[0]}
napi_settings = NeutronAPIContext()()
mtu = napi_settings.get('network_device_mtu')
if mtu:
ctxt['ext_port_mtu'] = mtu
return ctxt
class DataPortContext(NeutronPortContext):
def __call__(self):
ports = config('data-port')
if ports:
portmap = parse_data_port_mappings(ports)
ports = portmap.values()
resolved = self.resolve_ports(ports)
normalized = {get_nic_hwaddr(port): port for port in resolved
if port not in ports}
normalized.update({port: port for port in resolved
if port in ports})
if resolved:
return {bridge: normalized[port] for bridge, port in
six.iteritems(portmap) if port in normalized.keys()}
return None
class PhyNICMTUContext(DataPortContext):
def __call__(self):
ctxt = {}
mappings = super(PhyNICMTUContext, self).__call__()
if mappings and mappings.values():
ports = mappings.values()
napi_settings = NeutronAPIContext()()
mtu = napi_settings.get('network_device_mtu')
if mtu:
ctxt["devs"] = '\\n'.join(ports)
ctxt['mtu'] = mtu
return ctxt
class NetworkServiceContext(OSContextGenerator):
def __init__(self, rel_name='quantum-network-service'):
self.rel_name = rel_name
self.interfaces = [rel_name]
def __call__(self):
for rid in relation_ids(self.rel_name):
for unit in related_units(rid):
rdata = relation_get(rid=rid, unit=unit)
ctxt = {
'keystone_host': rdata.get('keystone_host'),
'service_port': rdata.get('service_port'),
'auth_port': rdata.get('auth_port'),
'service_tenant': rdata.get('service_tenant'),
'service_username': rdata.get('service_username'),
'service_password': rdata.get('service_password'),
'quantum_host': rdata.get('quantum_host'),
'quantum_port': rdata.get('quantum_port'),
'quantum_url': rdata.get('quantum_url'),
'region': rdata.get('region'),
'service_protocol':
rdata.get('service_protocol') or 'http',
'auth_protocol':
rdata.get('auth_protocol') or 'http',
}
if context_complete(ctxt):
return ctxt
return {}

@ -16,6 +16,7 @@
# Various utilies for dealing with Neutron and the renaming from Quantum.
import six
from subprocess import check_output
from charmhelpers.core.hookenv import (
@ -251,3 +252,72 @@ def network_manager():
else:
# ensure accurate naming for all releases post-H
return 'neutron'
def parse_mappings(mappings):
parsed = {}
if mappings:
mappings = mappings.split(' ')
for m in mappings:
p = m.partition(':')
if p[1] == ':':
parsed[p[0].strip()] = p[2].strip()
return parsed
def parse_bridge_mappings(mappings):
"""Parse bridge mappings.
Mappings must be a space-delimited list of provider:bridge mappings.
Returns dict of the form {provider:bridge}.
"""
return parse_mappings(mappings)
def parse_data_port_mappings(mappings, default_bridge='br-data'):
"""Parse data port mappings.
Mappings must be a space-delimited list of bridge:port mappings.
Returns dict of the form {bridge:port}.
"""
_mappings = parse_mappings(mappings)
if not _mappings:
if not mappings:
return {}
# For backwards-compatibility we need to support port-only provided in
# config.
_mappings = {default_bridge: mappings.split(' ')[0]}
bridges = _mappings.keys()
ports = _mappings.values()
if len(set(bridges)) != len(bridges):
raise Exception("It is not allowed to have more than one port "
"configured on the same bridge")
if len(set(ports)) != len(ports):
raise Exception("It is not allowed to have the same port configured "
"on more than one bridge")
return _mappings
def parse_vlan_range_mappings(mappings):
"""Parse vlan range mappings.
Mappings must be a space-delimited list of provider:start:end mappings.
Returns dict of the form {provider: (start, end)}.
"""
_mappings = parse_mappings(mappings)
if not _mappings:
return {}
mappings = {}
for p, r in six.iteritems(_mappings):
mappings[p] = tuple(r.split(':'))
return mappings

@ -0,0 +1,13 @@
description "{{ service_description }}"
author "Juju {{ service_name }} Charm <juju@localhost>"
start on runlevel [2345]
stop on runlevel [!2345]
respawn
exec start-stop-daemon --start --chuid {{ user_name }} \
--chdir {{ start_dir }} --name {{ process_name }} \
--exec {{ executable_name }} -- \
--config-file={{ config_file }} \
--log-file={{ log_file }}

@ -0,0 +1,9 @@
{% if auth_host -%}
[keystone_authtoken]
identity_uri = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/{{ auth_admin_prefix }}
auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/{{ service_admin_prefix }}
admin_tenant_name = {{ admin_tenant_name }}
admin_user = {{ admin_user }}
admin_password = {{ admin_password }}
signing_dir = {{ signing_dir }}
{% endif -%}

@ -0,0 +1,22 @@
{% if rabbitmq_host or rabbitmq_hosts -%}
[oslo_messaging_rabbit]
rabbit_userid = {{ rabbitmq_user }}
rabbit_virtual_host = {{ rabbitmq_virtual_host }}
rabbit_password = {{ rabbitmq_password }}
{% if rabbitmq_hosts -%}
rabbit_hosts = {{ rabbitmq_hosts }}
{% if rabbitmq_ha_queues -%}
rabbit_ha_queues = True
rabbit_durable_queues = False
{% endif -%}
{% else -%}
rabbit_host = {{ rabbitmq_host }}
{% endif -%}
{% if rabbit_ssl_port -%}
rabbit_use_ssl = True
rabbit_port = {{ rabbit_ssl_port }}
{% if rabbit_ssl_ca -%}
kombu_ssl_ca_certs = {{ rabbit_ssl_ca }}
{% endif -%}
{% endif -%}
{% endif -%}

@ -0,0 +1,14 @@
{% if zmq_host -%}
# ZeroMQ configuration (restart-nonce: {{ zmq_nonce }})
rpc_backend = zmq
rpc_zmq_host = {{ zmq_host }}
{% if zmq_redis_address -%}
rpc_zmq_matchmaker = redis
matchmaker_heartbeat_freq = 15
matchmaker_heartbeat_ttl = 30
[matchmaker_redis]
host = {{ zmq_redis_address }}
{% else -%}
rpc_zmq_matchmaker = ring
{% endif -%}
{% endif -%}

@ -30,6 +30,10 @@ import yaml
from charmhelpers.contrib.network import ip
from charmhelpers.core import (
unitdata,
)
from charmhelpers.core.hookenv import (
config,
log as juju_log,
@ -330,6 +334,21 @@ def configure_installation_source(rel):
error_out("Invalid openstack-release specified: %s" % rel)
def config_value_changed(option):
"""
Determine if config value changed since last call to this function.
"""
hook_data = unitdata.HookData()
with hook_data():
db = unitdata.kv()
current = config(option)
saved = db.get(option)
db.set(option, current)
if saved is None:
return False
return current != saved
def save_script_rc(script_path="scripts/scriptrc", **env_vars):
"""
Write an rc file in the charm-delivered directory containing
@ -469,82 +488,95 @@ def os_requires_version(ostack_release, pkg):
def git_install_requested():
"""Returns true if openstack-origin-git is specified."""
return config('openstack-origin-git') != "None"
"""
Returns true if openstack-origin-git is specified.
"""
return config('openstack-origin-git') is not None
requirements_dir = None
def git_clone_and_install(file_name, core_project):
"""Clone/install all OpenStack repos specified in yaml config file."""
global requirements_dir
def git_clone_and_install(projects_yaml, core_project):
"""
Clone/install all specified OpenStack repositories.
if file_name == "None":
The expected format of projects_yaml is:
repositories:
- {name: keystone,
repository: 'git://git.openstack.org/openstack/keystone.git',
branch: 'stable/icehouse'}
- {name: requirements,
repository: 'git://git.openstack.org/openstack/requirements.git',
branch: 'stable/icehouse'}
directory: /mnt/openstack-git
The directory key is optional.
"""
global requirements_dir
parent_dir = '/mnt/openstack-git'
if not projects_yaml:
return
yaml_file = os.path.join(charm_dir(), file_name)
projects = yaml.load(projects_yaml)
_git_validate_projects_yaml(projects, core_project)
# clone/install the requirements project first
installed = _git_clone_and_install_subset(yaml_file,
whitelist=['requirements'])
if 'requirements' not in installed:
error_out('requirements git repository must be specified')
if 'directory' in projects.keys():
parent_dir = projects['directory']
# clone/install all other projects except requirements and the core project
blacklist = ['requirements', core_project]
_git_clone_and_install_subset(yaml_file, blacklist=blacklist,
update_requirements=True)
# clone/install the core project
whitelist = [core_project]
installed = _git_clone_and_install_subset(yaml_file, whitelist=whitelist,
update_requirements=True)
if core_project not in installed:
error_out('{} git repository must be specified'.format(core_project))
for p in projects['repositories']:
repo = p['repository']
branch = p['branch']
if p['name'] == 'requirements':
repo_dir = _git_clone_and_install_single(repo, branch, parent_dir,
update_requirements=False)
requirements_dir = repo_dir
else:
repo_dir = _git_clone_and_install_single(repo, branch, parent_dir,
update_requirements=True)
def _git_clone_and_install_subset(yaml_file, whitelist=[], blacklist=[],
update_requirements=False):
"""Clone/install subset of OpenStack repos specified in yaml config file."""
global requirements_dir
installed = []
def _git_validate_projects_yaml(projects, core_project):
"""
Validate the projects yaml.
"""
_git_ensure_key_exists('repositories', projects)
with open(yaml_file, 'r') as fd:
projects = yaml.load(fd)
for proj, val in projects.items():
# The project subset is chosen based on the following 3 rules:
# 1) If project is in blacklist, we don't clone/install it, period.
# 2) If whitelist is empty, we clone/install everything else.
# 3) If whitelist is not empty, we clone/install everything in the
# whitelist.
if proj in blacklist:
continue
if whitelist and proj not in whitelist:
continue
repo = val['repository']
branch = val['branch']
repo_dir = _git_clone_and_install_single(repo, branch,
update_requirements)
if proj == 'requirements':
requirements_dir = repo_dir
installed.append(proj)
return installed
for project in projects['repositories']:
_git_ensure_key_exists('name', project.keys())
_git_ensure_key_exists('repository', project.keys())
_git_ensure_key_exists('branch', project.keys())
if projects['repositories'][0]['name'] != 'requirements':
error_out('{} git repo must be specified first'.format('requirements'))
if projects['repositories'][-1]['name'] != core_project:
error_out('{} git repo must be specified last'.format(core_project))
def _git_clone_and_install_single(repo, branch, update_requirements=False):
"""Clone and install a single git repository."""
dest_parent_dir = "/mnt/openstack-git/"
dest_dir = os.path.join(dest_parent_dir, os.path.basename(repo))
def _git_ensure_key_exists(key, keys):
"""
Ensure that key exists in keys.
"""
if key not in keys:
error_out('openstack-origin-git key \'{}\' is missing'.format(key))
if not os.path.exists(dest_parent_dir):
juju_log('Host dir not mounted at {}. '
'Creating directory there instead.'.format(dest_parent_dir))
os.mkdir(dest_parent_dir)
def _git_clone_and_install_single(repo, branch, parent_dir, update_requirements):
"""
Clone and install a single git repository.
"""
dest_dir = os.path.join(parent_dir, os.path.basename(repo))
if not os.path.exists(parent_dir):
juju_log('Directory already exists at {}. '
'No need to create directory.'.format(parent_dir))
os.mkdir(parent_dir)
if not os.path.exists(dest_dir):
juju_log('Cloning git repo: {}, branch: {}'.format(repo, branch))
repo_dir = install_remote(repo, dest=dest_parent_dir, branch=branch)
repo_dir = install_remote(repo, dest=parent_dir, branch=branch)
else:
repo_dir = dest_dir
@ -561,16 +593,39 @@ def _git_clone_and_install_single(repo, branch, update_requirements=False):
def _git_update_requirements(package_dir, reqs_dir):
"""Update from global requirements.
"""
Update from global requirements.
Update an OpenStack git directory's requirements.txt and
test-requirements.txt from global-requirements.txt."""
Update an OpenStack git directory's requirements.txt and
test-requirements.txt from global-requirements.txt.
"""
orig_dir = os.getcwd()
os.chdir(reqs_dir)
cmd = "python update.py {}".format(package_dir)
cmd = ['python', 'update.py', package_dir]
try:
subprocess.check_call(cmd.split(' '))
subprocess.check_call(cmd)
except subprocess.CalledProcessError:
package = os.path.basename(package_dir)
error_out("Error updating {} from global-requirements.txt".format(package))
os.chdir(orig_dir)
def git_src_dir(projects_yaml, project):
"""
Return the directory where the specified project's source is located.
"""
parent_dir = '/mnt/openstack-git'
if not projects_yaml:
return
projects = yaml.load(projects_yaml)
if 'directory' in projects.keys():
parent_dir = projects['directory']
for p in projects['repositories']:
if p['name'] == project:
return os.path.join(parent_dir, os.path.basename(p['repository']))
return None

@ -566,3 +566,29 @@ class Hooks(object):
def charm_dir():
"""Return the root directory of the current charm"""
return os.environ.get('CHARM_DIR')
@cached
def action_get(key=None):
"""Gets the value of an action parameter, or all key/value param pairs"""
cmd = ['action-get']
if key is not None:
cmd.append(key)
cmd.append('--format=json')
action_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
return action_data
def action_set(values):
"""Sets the values to be returned after the action finishes"""
cmd = ['action-set']
for k, v in list(values.items()):
cmd.append('{}={}'.format(k, v))
subprocess.check_call(cmd)
def action_fail(message):
"""Sets the action status to failed and sets the error message.
The results set by action_set are preserved."""
subprocess.check_call(['action-fail', message])

@ -339,12 +339,16 @@ def lsb_release():
def pwgen(length=None):
"""Generate a random pasword."""
if length is None:
# A random length is ok to use a weak PRNG
length = random.choice(range(35, 45))
alphanumeric_chars = [
l for l in (string.ascii_letters + string.digits)
if l not in 'l0QD1vAEIOUaeiou']
# Use a crypto-friendly PRNG (e.g. /dev/urandom) for making the
# actual password
random_generator = random.SystemRandom()
random_chars = [
random.choice(alphanumeric_chars) for _ in range(length)]
random_generator.choice(alphanumeric_chars) for _ in range(length)]
return(''.join(random_chars))

@ -139,7 +139,7 @@ class MysqlRelation(RelationContext):
def __init__(self, *args, **kwargs):
self.required_keys = ['host', 'user', 'password', 'database']
super(HttpRelation).__init__(self, *args, **kwargs)
RelationContext.__init__(self, *args, **kwargs)
class HttpRelation(RelationContext):
@ -154,7 +154,7 @@ class HttpRelation(RelationContext):
def __init__(self, *args, **kwargs):
self.required_keys = ['host', 'port']
super(HttpRelation).__init__(self, *args, **kwargs)
RelationContext.__init__(self, *args, **kwargs)
def provide_data(self):
return {

@ -443,7 +443,7 @@ class HookData(object):
data = hookenv.execution_environment()
self.conf = conf_delta = self.kv.delta(data['conf'], 'config')
self.rels = rels_delta = self.kv.delta(data['rels'], 'rels')
self.kv.set('env', data['env'])
self.kv.set('env', dict(data['env']))
self.kv.set('unit', data['unit'])
self.kv.set('relid', data.get('relid'))
return conf_delta, rels_delta

@ -1,3 +1,5 @@
import os
from charmhelpers.core.hookenv import (
config,
relation_ids,
@ -329,4 +331,22 @@ class InstanceConsoleContext(context.OSContextGenerator):
servers = []
ctxt['memcached_servers'] = ','.join(servers)
# Configure nova-novncproxy https if nova-api is using https.
if https():
cn = resolve_address(endpoint_type=INTERNAL)
if cn:
cert_filename = 'cert_{}'.format(cn)
key_filename = 'key_{}'.format(cn)
else:
cert_filename = 'cert'
key_filename = 'key'
ssl_dir = '/etc/apache2/ssl/nova'
cert = os.path.join(ssl_dir, cert_filename)
key = os.path.join(ssl_dir, key_filename)
if os.path.exists(cert) and os.path.exists(key):
ctxt['ssl_cert'] = cert
ctxt['ssl_key'] = key
return ctxt

@ -5,7 +5,10 @@ import shutil
import sys
import uuid
from subprocess import check_call
from subprocess import (
check_call,
)
from urlparse import urlparse
from charmhelpers.core.hookenv import (
@ -29,6 +32,7 @@ from charmhelpers.core.host import (
restart_on_change,
service_running,
service_stop,
service_reload,
service_restart,
)
@ -42,6 +46,7 @@ from charmhelpers.contrib.openstack.utils import (
configure_installation_source,
openstack_upgrade_available,
os_release,
os_requires_version,
sync_db_with_multi_ipv6_addresses
)
@ -75,6 +80,7 @@ from nova_cc_utils import (
migrate_nova_database,
neutron_plugin,
save_script_rc,
services,
ssh_compute_add,
ssh_compute_remove,
ssh_known_hosts_lines,
@ -90,8 +96,8 @@ from nova_cc_utils import (
console_attributes,
service_guard,
guard_map,
services,
setup_ipv6
get_topics,
setup_ipv6,
)
from charmhelpers.contrib.hahelpers.cluster import (
@ -168,6 +174,8 @@ def config_changed():
for rid in relation_ids('cloud-compute')]
for r_id in relation_ids('identity-service'):
identity_joined(rid=r_id)
for rid in relation_ids('zeromq-configuration'):
zeromq_configuration_relation_joined(rid)
[cluster_joined(rid) for rid in relation_ids('cluster')]
update_nrpe_config()
@ -744,6 +752,10 @@ def configure_https():
cmd = ['a2dissite', 'openstack_https_frontend']
check_call(cmd)
# TODO: improve this by checking if local CN certs are available
# first then checking reload status (see LP #1433114).
service_reload('apache2', restart_on_failure=True)
for rid in relation_ids('identity-service'):
identity_joined(rid=rid)
@ -858,6 +870,14 @@ def neutron_api_relation_broken():
quantum_joined(rid=rid)
@hooks.hook('zeromq-configuration-relation-joined')
@os_requires_version('kilo', 'nova-common')
def zeromq_configuration_relation_joined(relid=None):
relation_set(relation_id=relid,
topics=" ".join(get_topics()),
users="nova")
@hooks.hook('nrpe-external-master-relation-joined',
'nrpe-external-master-relation-changed')
def update_nrpe_config():
@ -881,6 +901,12 @@ def memcached_joined():
CONFIGS.write(NOVA_CONF)
@hooks.hook('zeromq-configuration-relation-changed')
@restart_on_change(restart_map(), stopstart=True)
def zeromq_configuration_relation_changed():
CONFIGS.write(NOVA_CONF)
def main():
try:
hooks.execute(sys.argv)

@ -124,8 +124,12 @@ BASE_RESOURCE_MAP = OrderedDict([
context.SyslogContext(),
context.LogLevelContext(),
nova_cc_context.HAProxyContext(),
nova_cc_context.IdentityServiceContext(),
nova_cc_context.IdentityServiceContext(
service='nova',
service_user='nova'),
nova_cc_context.VolumeServiceContext(),
context.ZeroMQContext(),
context.NotificationDriverContext(),
nova_cc_context.NovaIPv6Context(),
nova_cc_context.NeutronCCContext(),
nova_cc_context.NovaConfigContext(),
@ -145,7 +149,9 @@ BASE_RESOURCE_MAP = OrderedDict([
ssl_dir=QUANTUM_CONF_DIR),
nova_cc_context.NeutronPostgresqlDBContext(),
nova_cc_context.HAProxyContext(),
nova_cc_context.IdentityServiceContext(),
nova_cc_context.IdentityServiceContext(
service='neutron',
service_user='neutron'),
nova_cc_context.NeutronCCContext(),
context.SyslogContext()],
}),
@ -166,7 +172,9 @@ BASE_RESOURCE_MAP = OrderedDict([
relation_prefix='neutron',
ssl_dir=NEUTRON_CONF_DIR),
nova_cc_context.NeutronPostgresqlDBContext(),
nova_cc_context.IdentityServiceContext(),
nova_cc_context.IdentityServiceContext(
service='neutron',
service_user='neutron'),
nova_cc_context.NeutronCCContext(),
nova_cc_context.HAProxyContext(),
context.SyslogContext(),
@ -840,6 +848,23 @@ def determine_endpoints(public_url, internal_url, admin_url):
'quantum_internal_url': neutron_internal_url,
})
if os_rel >= 'kilo':
# NOTE(jamespage) drop endpoints for ec2 and s3
# ec2 is deprecated
# s3 is insecure and should die in flames
endpoints.update({
'ec2_service': None,
'ec2_region': None,
'ec2_public_url': None,
'ec2_admin_url': None,
'ec2_internal_url': None,
's3_service': None,
's3_region': None,
's3_public_url': None,
's3_admin_url': None,
's3_internal_url': None,
})
return endpoints
@ -905,6 +930,13 @@ def service_guard(guard_map, contexts, active=False):
return wrap
def get_topics():
topics = ['scheduler', 'conductor']
if 'nova-consoleauth' in services():
topics.append('consoleauth')
return topics
def cmd_all_services(cmd):
if cmd == 'start':
for svc in services():

@ -0,0 +1 @@
nova_cc_hooks.py

@ -0,0 +1 @@
nova_cc_hooks.py

@ -45,6 +45,9 @@ requires:
scope: container
memcache:
interface: memcache
zeromq-configuration:
interface: zeromq-configuration
scope: container
peers:
cluster:
interface: nova-ha

@ -7,7 +7,9 @@ state_path = /var/lib/neutron
lock_path = $state_path/lock
bind_host = {{ bind_host }}
auth_strategy = keystone
{% if notifications == 'True' -%}
notification_driver = neutron.openstack.common.notifier.rpc_notifier
{% endif -%}
api_workers = {{ workers }}
use_syslog = {{ use_syslog }}
@ -52,7 +54,7 @@ quota_items = network,subnet,port,security_group,security_group_rule
root_helper = sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf
[keystone_authtoken]
signing_dir = $state_path/keystone-signing
signing_dir = {{ signing_dir }}
{% if service_host -%}
service_protocol = {{ service_protocol }}
service_host = {{ service_host }}

@ -42,6 +42,8 @@ my_ip = {{ host_ip }}
memcached_servers = {{ memcached_servers }}
{% endif %}
{% include "parts/novnc" %}
{% if keystone_ec2_url -%}
keystone_ec2_url = {{ keystone_ec2_url }}
{% endif -%}
@ -82,6 +84,11 @@ default_floating_pool = {{ external_network }}
{% endif -%}
{% endif -%}
{% if neutron_plugin and neutron_plugin == 'Calico' -%}
security_group_api = neutron
nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver
{% endif -%}
{% if network_manager_config -%}
{% for key, value in network_manager_config.iteritems() -%}
{{ key }} = {{ value }}

158
templates/juno/nova.conf Normal file

@ -0,0 +1,158 @@
# juno
###############################################################################
# [ WARNING ]
# Configuration file maintained by Juju. Local changes may be overwritten.
###############################################################################
[DEFAULT]
verbose={{ verbose }}
debug={{ debug }}
dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/var/lock/nova
force_dhcp_release=True
iscsi_helper=tgtadm
libvirt_use_virtio_for_bridges=True
connection_type=libvirt
root_helper=sudo nova-rootwrap /etc/nova/rootwrap.conf
ec2_private_dns_show_ip=True
api_paste_config=/etc/nova/api-paste.ini
volumes_path=/var/lib/nova/volumes
enabled_apis=ec2,osapi_compute,metadata
auth_strategy=keystone
compute_driver=libvirt.LibvirtDriver
use_ipv6 = {{ use_ipv6 }}
osapi_compute_listen = {{ bind_host }}
metadata_host = {{ bind_host }}
s3_listen = {{ bind_host }}
ec2_listen = {{ bind_host }}
osapi_compute_workers = {{ workers }}
ec2_workers = {{ workers }}
scheduler_default_filters = RetryFilter,AvailabilityZoneFilter,CoreFilter,RamFilter,ComputeFilter,ComputeCapabilitiesFilter,ImagePropertiesFilter,ServerGroupAntiAffinityFilter,ServerGroupAffinityFilter
cpu_allocation_ratio = {{ cpu_allocation_ratio }}
ram_allocation_ratio = {{ ram_allocation_ratio }}
use_syslog={{ use_syslog }}
my_ip = {{ host_ip }}
{% if memcached_servers %}
memcached_servers = {{ memcached_servers }}
{% endif %}
{% include "parts/novnc" %}
{% if keystone_ec2_url -%}
keystone_ec2_url = {{ keystone_ec2_url }}
{% endif -%}
{% include "parts/rabbitmq" %}
{% if glance_api_servers -%}
glance_api_servers = {{ glance_api_servers }}
{% endif -%}
{% if rbd_pool -%}
rbd_pool = {{ rbd_pool }}
rbd_user = {{ rbd_user }}
rbd_secret_uuid = {{ rbd_secret_uuid }}
{% endif -%}
{% if neutron_plugin and neutron_plugin == 'ovs' -%}
libvirt_vif_driver = nova.virt.libvirt.vif.LibvirtGenericVIFDriver
libvirt_user_virtio_for_bridges = True
{% if neutron_security_groups -%}
security_group_api = {{ network_manager }}
nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver
{% endif -%}
{% if external_network -%}
default_floating_pool = {{ external_network }}
{% endif -%}
{% endif -%}
{% if neutron_plugin and neutron_plugin == 'nvp' -%}
security_group_api = neutron
nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver
{% if external_network -%}
default_floating_pool = {{ external_network }}
{% endif -%}
{% endif -%}
{% if network_manager_config -%}
{% for key, value in network_manager_config.iteritems() -%}
{{ key }} = {{ value }}
{% endfor -%}
{% endif -%}
{% if network_manager and network_manager == 'quantum' -%}
network_api_class = nova.network.quantumv2.api.API
quantum_url = {{ neutron_url }}
{% if auth_host -%}
quantum_auth_strategy = keystone
quantum_admin_tenant_name = {{ admin_tenant_name }}
quantum_admin_username = {{ admin_user }}
quantum_admin_password = {{ admin_password }}
quantum_admin_auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v2.0
{% endif -%}
{% elif network_manager and network_manager == 'neutron' -%}
network_api_class = nova.network.neutronv2.api.API
neutron_url = {{ neutron_url }}
{% if auth_host -%}
neutron_auth_strategy = keystone
neutron_admin_tenant_name = {{ admin_tenant_name }}
neutron_admin_username = {{ admin_user }}
neutron_admin_password = {{ admin_password }}
neutron_admin_auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v2.0
{% endif -%}
{% else -%}
network_manager = nova.network.manager.FlatDHCPManager
{% endif -%}
{% if default_floating_pool -%}
default_floating_pool = {{ default_floating_pool }}
{% endif -%}
{% if volume_service -%}
volume_api_class=nova.volume.cinder.API
{% endif -%}
{% if user_config_flags -%}
{% for key, value in user_config_flags.iteritems() -%}
{{ key }} = {{ value }}
{% endfor -%}
{% endif -%}
{% if listen_ports -%}
{% for key, value in listen_ports.iteritems() -%}
{{ key }} = {{ value }}
{% endfor -%}
{% endif -%}
{% if sections and 'DEFAULT' in sections -%}
{% for key, value in sections['DEFAULT'] -%}
{{ key }} = {{ value }}
{% endfor -%}
{% endif %}
{% include "parts/database-v2" %}
{% if auth_host -%}
[keystone_authtoken]
auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/
auth_host = {{ auth_host }}
auth_port = {{ auth_port }}
auth_protocol = {{ auth_protocol }}
admin_tenant_name = {{ admin_tenant_name }}
admin_user = {{ admin_user }}
admin_password = {{ admin_password }}
{% endif -%}
[osapi_v3]
enabled=True
{% include "parts/cell" %}
[conductor]
workers = {{ workers }}

@ -0,0 +1,147 @@
############
# Metadata #
############
[composite:metadata]
use = egg:Paste#urlmap
/: meta
[pipeline:meta]
pipeline = ec2faultwrap logrequest metaapp
[app:metaapp]
paste.app_factory = nova.api.metadata.handler:MetadataRequestHandler.factory
#######
# EC2 #
#######
[composite:ec2]
use = egg:Paste#urlmap
/: ec2cloud
[composite:ec2cloud]
use = call:nova.api.auth:pipeline_factory
noauth = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
noauth2 = ec2faultwrap logrequest ec2noauth cloudrequest validator ec2executor
keystone = ec2faultwrap logrequest ec2keystoneauth cloudrequest validator ec2executor
[filter:ec2faultwrap]
paste.filter_factory = nova.api.ec2:FaultWrapper.factory
[filter:logrequest]
paste.filter_factory = nova.api.ec2:RequestLogging.factory
[filter:ec2lockout]
paste.filter_factory = nova.api.ec2:Lockout.factory
[filter:ec2keystoneauth]
paste.filter_factory = nova.api.ec2:EC2KeystoneAuth.factory
[filter:ec2noauth]
paste.filter_factory = nova.api.ec2:NoAuth.factory
[filter:cloudrequest]
controller = nova.api.ec2.cloud.CloudController
paste.filter_factory = nova.api.ec2:Requestify.factory
[filter:authorizer]
paste.filter_factory = nova.api.ec2:Authorizer.factory
[filter:validator]
paste.filter_factory = nova.api.ec2:Validator.factory
[app:ec2executor]
paste.app_factory = nova.api.ec2:Executor.factory
#############
# OpenStack #
#############
[composite:osapi_compute]
use = call:nova.api.openstack.urlmap:urlmap_factory
/: oscomputeversions
/v1.1: openstack_compute_api_v2
/v2: openstack_compute_api_v2
/v2.1: openstack_compute_api_v21
/v3: openstack_compute_api_v3
[composite:openstack_compute_api_v2]
use = call:nova.api.auth:pipeline_factory
noauth = compute_req_id faultwrap sizelimit noauth ratelimit osapi_compute_app_v2
noauth2 = compute_req_id faultwrap sizelimit noauth2 ratelimit osapi_compute_app_v2
keystone = compute_req_id faultwrap sizelimit authtoken keystonecontext ratelimit osapi_compute_app_v2
keystone_nolimit = compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v2
[composite:openstack_compute_api_v21]
use = call:nova.api.auth:pipeline_factory_v21
noauth = compute_req_id faultwrap sizelimit noauth osapi_compute_app_v21
noauth2 = compute_req_id faultwrap sizelimit noauth2 osapi_compute_app_v21
keystone = compute_req_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v21
[composite:openstack_compute_api_v3]
use = call:nova.api.auth:pipeline_factory_v21
noauth = request_id faultwrap sizelimit noauth_v3 osapi_compute_app_v3
noauth2 = request_id faultwrap sizelimit noauth_v3 osapi_compute_app_v3
keystone = request_id faultwrap sizelimit authtoken keystonecontext osapi_compute_app_v3
[filter:request_id]
paste.filter_factory = oslo.middleware:RequestId.factory
[filter:compute_req_id]
paste.filter_factory = nova.api.compute_req_id:ComputeReqIdMiddleware.factory
[filter:faultwrap]
paste.filter_factory = nova.api.openstack:FaultWrapper.factory
[filter:noauth]
paste.filter_factory = nova.api.openstack.auth:NoAuthMiddlewareOld.factory
[filter:noauth2]
paste.filter_factory = nova.api.openstack.auth:NoAuthMiddleware.factory
[filter:noauth_v3]
paste.filter_factory = nova.api.openstack.auth:NoAuthMiddlewareV3.factory
[filter:ratelimit]
paste.filter_factory = nova.api.openstack.compute.limits:RateLimitingMiddleware.factory
[filter:sizelimit]
paste.filter_factory = oslo.middleware:RequestBodySizeLimiter.factory
[app:osapi_compute_app_v2]
paste.app_factory = nova.api.openstack.compute:APIRouter.factory
[app:osapi_compute_app_v21]
paste.app_factory = nova.api.openstack.compute:APIRouterV21.factory
[app:osapi_compute_app_v3]
paste.app_factory = nova.api.openstack.compute:APIRouterV3.factory
[pipeline:oscomputeversions]
pipeline = faultwrap oscomputeversionapp
[app:oscomputeversionapp]
paste.app_factory = nova.api.openstack.compute.versions:Versions.factory
##########
# Shared #
##########
[filter:keystonecontext]
paste.filter_factory = nova.api.auth:NovaKeystoneContext.factory
[filter:authtoken]
paste.filter_factory = keystonemiddleware.auth_token:filter_factory
{% if service_host -%}
# NOTE(jamespage) - not used - but required for relation to nova-compute
service_protocol = {{ service_protocol }}
service_host = {{ service_host }}
service_port = {{ service_port }}
auth_host = {{ auth_host }}
auth_port = {{ auth_port }}
auth_protocol = {{ auth_protocol }}
admin_tenant_name = {{ admin_tenant_name }}
admin_user = {{ admin_user }}
admin_password = {{ admin_password }}
{% endif -%}

@ -10,7 +10,6 @@ dhcpbridge_flagfile=/etc/nova/nova.conf
dhcpbridge=/usr/bin/nova-dhcpbridge
logdir=/var/log/nova
state_path=/var/lib/nova
lock_path=/var/lock/nova
force_dhcp_release=True
iscsi_helper=tgtadm
libvirt_use_virtio_for_bridges=True
@ -42,12 +41,12 @@ my_ip = {{ host_ip }}
memcached_servers = {{ memcached_servers }}
{% endif %}
{% include "parts/novnc" %}
{% if keystone_ec2_url -%}
keystone_ec2_url = {{ keystone_ec2_url }}
{% endif -%}
{% include "parts/rabbitmq" %}
{% if rbd_pool -%}
rbd_pool = {{ rbd_pool }}
rbd_user = {{ rbd_user }}
@ -74,6 +73,11 @@ default_floating_pool = {{ external_network }}
{% endif -%}
{% endif -%}
{% if neutron_plugin and neutron_plugin == 'Calico' -%}
security_group_api = neutron
nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver
{% endif -%}
{% if network_manager_config -%}
{% for key, value in network_manager_config.iteritems() -%}
{{ key }} = {{ value }}
@ -112,6 +116,8 @@ volume_api_class=nova.volume.cinder.API
{% endfor -%}
{% endif %}
{% include "section-zeromq" %}
{% include "parts/database-v2" %}
{% if glance_api_servers -%}
@ -131,16 +137,7 @@ admin_auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }}/v2.0
{% endif -%}
{% endif -%}
{% if auth_host -%}
[keystone_authtoken]
auth_uri = {{ service_protocol }}://{{ service_host }}:{{ service_port }}/
auth_host = {{ auth_host }}
auth_port = {{ auth_port }}
auth_protocol = {{ auth_protocol }}
admin_tenant_name = {{ admin_tenant_name }}
admin_user = {{ admin_user }}
admin_password = {{ admin_password }}
{% endif -%}
{% include "section-keystone-authtoken" %}
[osapi_v3]
enabled=True
@ -149,3 +146,8 @@ enabled=True
[conductor]
workers = {{ workers }}
{% include "section-rabbitmq-oslo" %}
[oslo_concurrency]
lock_path=/var/lock/nova

@ -3,12 +3,12 @@
rpc_backend = zmq
rpc_zmq_host = {{ zmq_host }}
{% if zmq_redis_address -%}
rpc_zmq_matchmaker = oslo.messaging._drivers.matchmaker_redis.MatchMakerRedis
rpc_zmq_matchmaker = oslo_messaging._drivers.matchmaker_redis.MatchMakerRedis
matchmaker_heartbeat_freq = 15
matchmaker_heartbeat_ttl = 30
[matchmaker_redis]
host = {{ zmq_redis_address }}
{% else -%}
rpc_zmq_matchmaker = oslo.messaging._drivers.matchmaker_ring.MatchMakerRing
rpc_zmq_matchmaker = oslo_messaging._drivers.matchmaker_ring.MatchMakerRing
{% endif -%}
{% endif -%}

9
templates/parts/novnc Normal file

@ -0,0 +1,9 @@
{%- if ssl_only -%}
ssl_only=true
{% endif -%}
{% if ssl_cert -%}
cert={{ ssl_cert }}
{% endif -%}
{% if ssl_key -%}
key={{ ssl_key }}
{% endif %}

@ -281,7 +281,6 @@ class NovaCCBasicDeployment(OpenStackAmuletDeployment):
'auth_port': '35357',
'auth_protocol': 'http',
'private-address': u.valid_ip,
'https_keystone': 'False',
'auth_host': u.valid_ip,
'service_username': 's3_ec2_nova',
'service_tenant_id': u.not_null,

@ -15,6 +15,7 @@
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
import six
from collections import OrderedDict
from charmhelpers.contrib.amulet.deployment import (
AmuletDeployment
)
@ -100,12 +101,34 @@ class OpenStackAmuletDeployment(AmuletDeployment):
"""
(self.precise_essex, self.precise_folsom, self.precise_grizzly,
self.precise_havana, self.precise_icehouse,
self.trusty_icehouse) = range(6)
self.trusty_icehouse, self.trusty_juno, self.trusty_kilo) = range(8)
releases = {
('precise', None): self.precise_essex,
('precise', 'cloud:precise-folsom'): self.precise_folsom,
('precise', 'cloud:precise-grizzly'): self.precise_grizzly,
('precise', 'cloud:precise-havana'): self.precise_havana,
('precise', 'cloud:precise-icehouse'): self.precise_icehouse,
('trusty', None): self.trusty_icehouse}
('trusty', None): self.trusty_icehouse,
('trusty', 'cloud:trusty-juno'): self.trusty_juno,
('trusty', 'cloud:trusty-kilo'): self.trusty_kilo}
return releases[(self.series, self.openstack)]
def _get_openstack_release_string(self):
"""Get openstack release string.
Return a string representing the openstack release.
"""
releases = OrderedDict([
('precise', 'essex'),
('quantal', 'folsom'),
('raring', 'grizzly'),
('saucy', 'havana'),
('trusty', 'icehouse'),
('utopic', 'juno'),
('vivid', 'kilo'),
])
if self.openstack:
os_origin = self.openstack.split(':')[1]
return os_origin.split('%s-' % self.series)[1].split('/')[0]
else:
return releases[self.series]

@ -47,6 +47,8 @@ class NovaComputeContextTests(CharmTestCase):
self.config.side_effect = self.test_config.get
self.log.side_effect = fake_log
@mock.patch.object(context, 'resolve_address',
lambda *args, **kwargs: None)
@mock.patch.object(utils, 'os_release')
@mock.patch('charmhelpers.contrib.network.ip.log')
def test_instance_console_context_without_memcache(self, os_release, log_):
@ -57,6 +59,8 @@ class NovaComputeContextTests(CharmTestCase):
self.assertEqual({'memcached_servers': ''},
instance_console())
@mock.patch.object(context, 'resolve_address',
lambda *args, **kwargs: None)
@mock.patch.object(utils, 'os_release')
@mock.patch('charmhelpers.contrib.network.ip.log')
def test_instance_console_context_with_memcache(self, os_release, log_):
@ -64,6 +68,8 @@ class NovaComputeContextTests(CharmTestCase):
'127.0.1.1',
'127.0.1.1')
@mock.patch.object(context, 'resolve_address',
lambda *args, **kwargs: None)
@mock.patch.object(utils, 'os_release')
@mock.patch('charmhelpers.contrib.network.ip.log')
def test_instance_console_context_with_memcache_ipv6(self, os_release,

@ -49,6 +49,7 @@ TO_PATCH = [
'ssh_known_hosts_lines',
'ssh_authorized_keys_lines',
'save_script_rc',
'service_reload',
'service_restart',
'service_running',
'service_stop',
@ -119,10 +120,12 @@ class NovaCCHooksTests(CharmTestCase):
identity_joined, cluster_joined):
self.openstack_upgrade_available.return_value = True
self.relation_ids.return_value = ['generic_rid']
_zmq_joined = self.patch('zeromq_configuration_relation_joined')
hooks.config_changed()
self.assertTrue(self.do_openstack_upgrade.called)
self.assertTrue(neutron_api_joined.called)
self.assertTrue(identity_joined.called)
self.assertTrue(_zmq_joined.called)
self.assertTrue(cluster_joined.called)
self.assertTrue(self.save_script_rc.called)