[gandelman-a] NVP + VMware vSphere supprot
This commit is contained in:
commit
dc28cedb16
charm-helpers.yamlconfig.yamlnova-vmware-relation-changednova-vmware-relation-joinednova_cc_context.pynova_cc_hooks.pynova_cc_utils.pymetadata.yamlrevision
hooks
charmhelpers
contrib
core
fetch
templates
unit_tests
@ -7,5 +7,4 @@ include:
|
||||
- contrib.storage
|
||||
- contrib.hahelpers:
|
||||
- apache
|
||||
- ceph
|
||||
- payload.execd
|
||||
|
29
config.yaml
29
config.yaml
@ -69,6 +69,7 @@ options:
|
||||
Quantum plugin to use for network management; supports
|
||||
.
|
||||
ovs - OpenvSwitch Plugin
|
||||
nvp - Nicira Network Virtualization Platform
|
||||
.
|
||||
This configuration only has context when used with
|
||||
network-manager Quantum.
|
||||
@ -125,3 +126,31 @@ options:
|
||||
ssl_key:
|
||||
type: string
|
||||
description: SSL key to use with certificate specified as ssl_cert.
|
||||
# Neutron NVP Plugin configuration
|
||||
nvp-controllers:
|
||||
type: string
|
||||
description: Space delimited addresses of NVP controllers
|
||||
nvp-username:
|
||||
type: string
|
||||
default: admin
|
||||
description: Username to connect to NVP controllers with
|
||||
nvp-password:
|
||||
type: string
|
||||
default: admin
|
||||
description: Password to connect to NVP controllers with
|
||||
nvp-cluster-name:
|
||||
type: string
|
||||
default: example
|
||||
description: Name of the NVP cluster configuration to create (grizzly only)
|
||||
nvp-tz-uuid:
|
||||
type: string
|
||||
description: |
|
||||
This is uuid of the default NVP Transport zone that will be used for
|
||||
creating tunneled isolated Quantum networks. It needs to be created
|
||||
in NVP before starting Quantum with the nvp plugin.
|
||||
nvp-l3-uuid:
|
||||
type: string
|
||||
description: |
|
||||
This is uuid of the default NVP L3 Gateway Service.
|
||||
# end of NVP configuration
|
||||
|
||||
|
17
hooks/charmhelpers/contrib/openstack/alternatives.py
Normal file
17
hooks/charmhelpers/contrib/openstack/alternatives.py
Normal file
@ -0,0 +1,17 @@
|
||||
''' Helper for managing alternatives for file conflict resolution '''
|
||||
|
||||
import subprocess
|
||||
import shutil
|
||||
import os
|
||||
|
||||
|
||||
def install_alternative(name, target, source, priority=50):
|
||||
''' Install alternative configuration '''
|
||||
if (os.path.exists(target) and not os.path.islink(target)):
|
||||
# Move existing file/directory away before installing
|
||||
shutil.move(target, '{}.bak'.format(target))
|
||||
cmd = [
|
||||
'update-alternatives', '--force', '--install',
|
||||
target, name, source, str(priority)
|
||||
]
|
||||
subprocess.check_call(cmd)
|
@ -385,16 +385,33 @@ class NeutronContext(object):
|
||||
def ovs_ctxt(self):
|
||||
driver = neutron_plugin_attribute(self.plugin, 'driver',
|
||||
self.network_manager)
|
||||
|
||||
config = neutron_plugin_attribute(self.plugin, 'config',
|
||||
self.network_manager)
|
||||
ovs_ctxt = {
|
||||
'core_plugin': driver,
|
||||
'neutron_plugin': 'ovs',
|
||||
'neutron_security_groups': self.neutron_security_groups,
|
||||
'local_ip': unit_private_ip(),
|
||||
'config': config
|
||||
}
|
||||
|
||||
return ovs_ctxt
|
||||
|
||||
def nvp_ctxt(self):
|
||||
driver = neutron_plugin_attribute(self.plugin, 'driver',
|
||||
self.network_manager)
|
||||
config = neutron_plugin_attribute(self.plugin, 'config',
|
||||
self.network_manager)
|
||||
nvp_ctxt = {
|
||||
'core_plugin': driver,
|
||||
'neutron_plugin': 'nvp',
|
||||
'neutron_security_groups': self.neutron_security_groups,
|
||||
'local_ip': unit_private_ip(),
|
||||
'config': config
|
||||
}
|
||||
|
||||
return nvp_ctxt
|
||||
|
||||
def __call__(self):
|
||||
self._ensure_packages()
|
||||
|
||||
@ -408,6 +425,8 @@ class NeutronContext(object):
|
||||
|
||||
if self.plugin == 'ovs':
|
||||
ctxt.update(self.ovs_ctxt())
|
||||
elif self.plugin == 'nvp':
|
||||
ctxt.update(self.nvp_ctxt())
|
||||
|
||||
self._save_flag_file()
|
||||
return ctxt
|
||||
|
@ -34,13 +34,23 @@ def quantum_plugins():
|
||||
'services': ['quantum-plugin-openvswitch-agent'],
|
||||
'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
|
||||
['quantum-plugin-openvswitch-agent']],
|
||||
'server_packages': ['quantum-server',
|
||||
'quantum-plugin-openvswitch'],
|
||||
'server_services': ['quantum-server']
|
||||
},
|
||||
'nvp': {
|
||||
'config': '/etc/quantum/plugins/nicira/nvp.ini',
|
||||
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
|
||||
'QuantumPlugin.NvpPluginV2',
|
||||
'contexts': [
|
||||
context.SharedDBContext(user=config('neutron-database-user'),
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron')],
|
||||
'services': [],
|
||||
'packages': [],
|
||||
'server_packages': ['quantum-server',
|
||||
'quantum-plugin-nicira'],
|
||||
'server_services': ['quantum-server']
|
||||
}
|
||||
}
|
||||
|
||||
@ -60,13 +70,23 @@ def neutron_plugins():
|
||||
'services': ['neutron-plugin-openvswitch-agent'],
|
||||
'packages': [[headers_package(), 'openvswitch-datapath-dkms'],
|
||||
['quantum-plugin-openvswitch-agent']],
|
||||
'server_packages': ['neutron-server',
|
||||
'neutron-plugin-openvswitch'],
|
||||
'server_services': ['neutron-server']
|
||||
},
|
||||
'nvp': {
|
||||
'config': '/etc/neutron/plugins/nicira/nvp.ini',
|
||||
'driver': 'neutron.plugins.nicira.nicira_nvp_plugin.'
|
||||
'NeutronPlugin.NvpPluginV2',
|
||||
'contexts': [
|
||||
context.SharedDBContext(user=config('neutron-database-user'),
|
||||
database=config('neutron-database'),
|
||||
relation_prefix='neutron')],
|
||||
'services': [],
|
||||
'packages': [],
|
||||
'server_packages': ['neutron-server',
|
||||
'neutron-plugin-nicira'],
|
||||
'server_services': ['neutron-server']
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -13,19 +13,28 @@ from charmhelpers.core.hookenv import (
|
||||
config,
|
||||
log as juju_log,
|
||||
charm_dir,
|
||||
ERROR,
|
||||
INFO
|
||||
)
|
||||
|
||||
from charmhelpers.core.host import (
|
||||
lsb_release,
|
||||
from charmhelpers.contrib.storage.linux.lvm import (
|
||||
deactivate_lvm_volume_group,
|
||||
is_lvm_physical_volume,
|
||||
remove_lvm_physical_volume,
|
||||
)
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
apt_install,
|
||||
)
|
||||
from charmhelpers.core.host import lsb_release, mounts, umount
|
||||
from charmhelpers.fetch import apt_install
|
||||
from charmhelpers.contrib.storage.linux.utils import is_block_device, zap_disk
|
||||
from charmhelpers.contrib.storage.linux.loopback import ensure_loopback_device
|
||||
|
||||
CLOUD_ARCHIVE_URL = "http://ubuntu-cloud.archive.canonical.com/ubuntu"
|
||||
CLOUD_ARCHIVE_KEY_ID = '5EDB1B62EC4926EA'
|
||||
|
||||
DISTRO_PROPOSED = ('deb http://archive.ubuntu.com/ubuntu/ %s-proposed '
|
||||
'restricted main multiverse universe')
|
||||
|
||||
|
||||
UBUNTU_OPENSTACK_RELEASE = OrderedDict([
|
||||
('oneiric', 'diablo'),
|
||||
('precise', 'essex'),
|
||||
@ -57,6 +66,8 @@ SWIFT_CODENAMES = OrderedDict([
|
||||
('1.9.0', 'havana'),
|
||||
])
|
||||
|
||||
DEFAULT_LOOPBACK_SIZE = '5G'
|
||||
|
||||
|
||||
def error_out(msg):
|
||||
juju_log("FATAL ERROR: %s" % msg, level='ERROR')
|
||||
@ -67,7 +78,7 @@ def get_os_codename_install_source(src):
|
||||
'''Derive OpenStack release codename from a given installation source.'''
|
||||
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
||||
rel = ''
|
||||
if src == 'distro':
|
||||
if src in ['distro', 'distro-proposed']:
|
||||
try:
|
||||
rel = UBUNTU_OPENSTACK_RELEASE[ubuntu_rel]
|
||||
except KeyError:
|
||||
@ -202,6 +213,10 @@ def configure_installation_source(rel):
|
||||
'''Configure apt installation source.'''
|
||||
if rel == 'distro':
|
||||
return
|
||||
elif rel == 'distro-proposed':
|
||||
ubuntu_rel = lsb_release()['DISTRIB_CODENAME']
|
||||
with open('/etc/apt/sources.list.d/juju_deb.list', 'w') as f:
|
||||
f.write(DISTRO_PROPOSED % ubuntu_rel)
|
||||
elif rel[:4] == "ppa:":
|
||||
src = rel
|
||||
subprocess.check_call(["add-apt-repository", "-y", src])
|
||||
@ -299,6 +314,62 @@ def openstack_upgrade_available(package):
|
||||
return apt.version_compare(available_vers, cur_vers) == 1
|
||||
|
||||
|
||||
def ensure_block_device(block_device):
|
||||
'''
|
||||
Confirm block_device, create as loopback if necessary.
|
||||
|
||||
:param block_device: str: Full path of block device to ensure.
|
||||
|
||||
:returns: str: Full path of ensured block device.
|
||||
'''
|
||||
_none = ['None', 'none', None]
|
||||
if (block_device in _none):
|
||||
error_out('prepare_storage(): Missing required input: '
|
||||
'block_device=%s.' % block_device, level=ERROR)
|
||||
|
||||
if block_device.startswith('/dev/'):
|
||||
bdev = block_device
|
||||
elif block_device.startswith('/'):
|
||||
_bd = block_device.split('|')
|
||||
if len(_bd) == 2:
|
||||
bdev, size = _bd
|
||||
else:
|
||||
bdev = block_device
|
||||
size = DEFAULT_LOOPBACK_SIZE
|
||||
bdev = ensure_loopback_device(bdev, size)
|
||||
else:
|
||||
bdev = '/dev/%s' % block_device
|
||||
|
||||
if not is_block_device(bdev):
|
||||
error_out('Failed to locate valid block device at %s' % bdev,
|
||||
level=ERROR)
|
||||
|
||||
return bdev
|
||||
|
||||
|
||||
def clean_storage(block_device):
|
||||
'''
|
||||
Ensures a block device is clean. That is:
|
||||
- unmounted
|
||||
- any lvm volume groups are deactivated
|
||||
- any lvm physical device signatures removed
|
||||
- partition table wiped
|
||||
|
||||
:param block_device: str: Full path to block device to clean.
|
||||
'''
|
||||
for mp, d in mounts():
|
||||
if d == block_device:
|
||||
juju_log('clean_storage(): %s is mounted @ %s, unmounting.' %
|
||||
(d, mp), level=INFO)
|
||||
umount(mp, persist=True)
|
||||
|
||||
if is_lvm_physical_volume(block_device):
|
||||
deactivate_lvm_volume_group(block_device)
|
||||
remove_lvm_physical_volume(block_device)
|
||||
else:
|
||||
zap_disk(block_device)
|
||||
|
||||
|
||||
def is_ip(address):
|
||||
"""
|
||||
Returns True if address is a valid IP address.
|
||||
|
@ -102,8 +102,12 @@ def get_osds(service):
|
||||
Return a list of all Ceph Object Storage Daemons
|
||||
currently in the cluster
|
||||
'''
|
||||
return json.loads(check_output(['ceph', '--id', service,
|
||||
'osd', 'ls', '--format=json']))
|
||||
version = ceph_version()
|
||||
if version and version >= '0.56':
|
||||
return json.loads(check_output(['ceph', '--id', service,
|
||||
'osd', 'ls', '--format=json']))
|
||||
else:
|
||||
return None
|
||||
|
||||
|
||||
def create_pool(service, name, replicas=2):
|
||||
@ -114,7 +118,13 @@ def create_pool(service, name, replicas=2):
|
||||
return
|
||||
# Calculate the number of placement groups based
|
||||
# on upstream recommended best practices.
|
||||
pgnum = (len(get_osds(service)) * 100 / replicas)
|
||||
osds = get_osds(service)
|
||||
if osds:
|
||||
pgnum = (len(osds) * 100 / replicas)
|
||||
else:
|
||||
# NOTE(james-page): Default to 200 for older ceph versions
|
||||
# which don't support OSD query from cli
|
||||
pgnum = 200
|
||||
cmd = [
|
||||
'ceph', '--id', service,
|
||||
'osd', 'pool', 'create',
|
||||
@ -357,3 +367,17 @@ def ensure_ceph_keyring(service, user=None, group=None):
|
||||
if user and group:
|
||||
check_call(['chown', '%s.%s' % (user, group), keyring])
|
||||
return True
|
||||
|
||||
|
||||
def ceph_version():
|
||||
''' Retrieve the local version of ceph '''
|
||||
if os.path.exists('/usr/bin/ceph'):
|
||||
cmd = ['ceph', '-v']
|
||||
output = check_output(cmd)
|
||||
output = output.split()
|
||||
if len(output) > 3:
|
||||
return output[2]
|
||||
else:
|
||||
return None
|
||||
else:
|
||||
return None
|
||||
|
@ -9,6 +9,7 @@ import json
|
||||
import yaml
|
||||
import subprocess
|
||||
import UserDict
|
||||
from subprocess import CalledProcessError
|
||||
|
||||
CRITICAL = "CRITICAL"
|
||||
ERROR = "ERROR"
|
||||
@ -21,7 +22,7 @@ cache = {}
|
||||
|
||||
|
||||
def cached(func):
|
||||
''' Cache return values for multiple executions of func + args
|
||||
"""Cache return values for multiple executions of func + args
|
||||
|
||||
For example:
|
||||
|
||||
@ -32,7 +33,7 @@ def cached(func):
|
||||
unit_get('test')
|
||||
|
||||
will cache the result of unit_get + 'test' for future calls.
|
||||
'''
|
||||
"""
|
||||
def wrapper(*args, **kwargs):
|
||||
global cache
|
||||
key = str((func, args, kwargs))
|
||||
@ -46,8 +47,8 @@ def cached(func):
|
||||
|
||||
|
||||
def flush(key):
|
||||
''' Flushes any entries from function cache where the
|
||||
key is found in the function+args '''
|
||||
"""Flushes any entries from function cache where the
|
||||
key is found in the function+args """
|
||||
flush_list = []
|
||||
for item in cache:
|
||||
if key in item:
|
||||
@ -57,7 +58,7 @@ def flush(key):
|
||||
|
||||
|
||||
def log(message, level=None):
|
||||
"Write a message to the juju log"
|
||||
"""Write a message to the juju log"""
|
||||
command = ['juju-log']
|
||||
if level:
|
||||
command += ['-l', level]
|
||||
@ -66,7 +67,7 @@ def log(message, level=None):
|
||||
|
||||
|
||||
class Serializable(UserDict.IterableUserDict):
|
||||
"Wrapper, an object that can be serialized to yaml or json"
|
||||
"""Wrapper, an object that can be serialized to yaml or json"""
|
||||
|
||||
def __init__(self, obj):
|
||||
# wrap the object
|
||||
@ -96,11 +97,11 @@ class Serializable(UserDict.IterableUserDict):
|
||||
self.data = state
|
||||
|
||||
def json(self):
|
||||
"Serialize the object to json"
|
||||
"""Serialize the object to json"""
|
||||
return json.dumps(self.data)
|
||||
|
||||
def yaml(self):
|
||||
"Serialize the object to yaml"
|
||||
"""Serialize the object to yaml"""
|
||||
return yaml.dump(self.data)
|
||||
|
||||
|
||||
@ -119,38 +120,38 @@ def execution_environment():
|
||||
|
||||
|
||||
def in_relation_hook():
|
||||
"Determine whether we're running in a relation hook"
|
||||
"""Determine whether we're running in a relation hook"""
|
||||
return 'JUJU_RELATION' in os.environ
|
||||
|
||||
|
||||
def relation_type():
|
||||
"The scope for the current relation hook"
|
||||
"""The scope for the current relation hook"""
|
||||
return os.environ.get('JUJU_RELATION', None)
|
||||
|
||||
|
||||
def relation_id():
|
||||
"The relation ID for the current relation hook"
|
||||
"""The relation ID for the current relation hook"""
|
||||
return os.environ.get('JUJU_RELATION_ID', None)
|
||||
|
||||
|
||||
def local_unit():
|
||||
"Local unit ID"
|
||||
"""Local unit ID"""
|
||||
return os.environ['JUJU_UNIT_NAME']
|
||||
|
||||
|
||||
def remote_unit():
|
||||
"The remote unit for the current relation hook"
|
||||
"""The remote unit for the current relation hook"""
|
||||
return os.environ['JUJU_REMOTE_UNIT']
|
||||
|
||||
|
||||
def service_name():
|
||||
"The name service group this unit belongs to"
|
||||
"""The name service group this unit belongs to"""
|
||||
return local_unit().split('/')[0]
|
||||
|
||||
|
||||
@cached
|
||||
def config(scope=None):
|
||||
"Juju charm configuration"
|
||||
"""Juju charm configuration"""
|
||||
config_cmd_line = ['config-get']
|
||||
if scope is not None:
|
||||
config_cmd_line.append(scope)
|
||||
@ -163,6 +164,7 @@ def config(scope=None):
|
||||
|
||||
@cached
|
||||
def relation_get(attribute=None, unit=None, rid=None):
|
||||
"""Get relation information"""
|
||||
_args = ['relation-get', '--format=json']
|
||||
if rid:
|
||||
_args.append('-r')
|
||||
@ -174,9 +176,14 @@ def relation_get(attribute=None, unit=None, rid=None):
|
||||
return json.loads(subprocess.check_output(_args))
|
||||
except ValueError:
|
||||
return None
|
||||
except CalledProcessError, e:
|
||||
if e.returncode == 2:
|
||||
return None
|
||||
raise
|
||||
|
||||
|
||||
def relation_set(relation_id=None, relation_settings={}, **kwargs):
|
||||
"""Set relation information for the current unit"""
|
||||
relation_cmd_line = ['relation-set']
|
||||
if relation_id is not None:
|
||||
relation_cmd_line.extend(('-r', relation_id))
|
||||
@ -192,7 +199,7 @@ def relation_set(relation_id=None, relation_settings={}, **kwargs):
|
||||
|
||||
@cached
|
||||
def relation_ids(reltype=None):
|
||||
"A list of relation_ids"
|
||||
"""A list of relation_ids"""
|
||||
reltype = reltype or relation_type()
|
||||
relid_cmd_line = ['relation-ids', '--format=json']
|
||||
if reltype is not None:
|
||||
@ -203,7 +210,7 @@ def relation_ids(reltype=None):
|
||||
|
||||
@cached
|
||||
def related_units(relid=None):
|
||||
"A list of related units"
|
||||
"""A list of related units"""
|
||||
relid = relid or relation_id()
|
||||
units_cmd_line = ['relation-list', '--format=json']
|
||||
if relid is not None:
|
||||
@ -213,7 +220,7 @@ def related_units(relid=None):
|
||||
|
||||
@cached
|
||||
def relation_for_unit(unit=None, rid=None):
|
||||
"Get the json represenation of a unit's relation"
|
||||
"""Get the json represenation of a unit's relation"""
|
||||
unit = unit or remote_unit()
|
||||
relation = relation_get(unit=unit, rid=rid)
|
||||
for key in relation:
|
||||
@ -225,7 +232,7 @@ def relation_for_unit(unit=None, rid=None):
|
||||
|
||||
@cached
|
||||
def relations_for_id(relid=None):
|
||||
"Get relations of a specific relation ID"
|
||||
"""Get relations of a specific relation ID"""
|
||||
relation_data = []
|
||||
relid = relid or relation_ids()
|
||||
for unit in related_units(relid):
|
||||
@ -237,7 +244,7 @@ def relations_for_id(relid=None):
|
||||
|
||||
@cached
|
||||
def relations_of_type(reltype=None):
|
||||
"Get relations of a specific type"
|
||||
"""Get relations of a specific type"""
|
||||
relation_data = []
|
||||
reltype = reltype or relation_type()
|
||||
for relid in relation_ids(reltype):
|
||||
@ -249,7 +256,7 @@ def relations_of_type(reltype=None):
|
||||
|
||||
@cached
|
||||
def relation_types():
|
||||
"Get a list of relation types supported by this charm"
|
||||
"""Get a list of relation types supported by this charm"""
|
||||
charmdir = os.environ.get('CHARM_DIR', '')
|
||||
mdf = open(os.path.join(charmdir, 'metadata.yaml'))
|
||||
md = yaml.safe_load(mdf)
|
||||
@ -264,6 +271,7 @@ def relation_types():
|
||||
|
||||
@cached
|
||||
def relations():
|
||||
"""Get a nested dictionary of relation data for all related units"""
|
||||
rels = {}
|
||||
for reltype in relation_types():
|
||||
relids = {}
|
||||
@ -277,15 +285,35 @@ def relations():
|
||||
return rels
|
||||
|
||||
|
||||
@cached
|
||||
def is_relation_made(relation, keys='private-address'):
|
||||
'''
|
||||
Determine whether a relation is established by checking for
|
||||
presence of key(s). If a list of keys is provided, they
|
||||
must all be present for the relation to be identified as made
|
||||
'''
|
||||
if isinstance(keys, str):
|
||||
keys = [keys]
|
||||
for r_id in relation_ids(relation):
|
||||
for unit in related_units(r_id):
|
||||
context = {}
|
||||
for k in keys:
|
||||
context[k] = relation_get(k, rid=r_id,
|
||||
unit=unit)
|
||||
if None not in context.values():
|
||||
return True
|
||||
return False
|
||||
|
||||
|
||||
def open_port(port, protocol="TCP"):
|
||||
"Open a service network port"
|
||||
"""Open a service network port"""
|
||||
_args = ['open-port']
|
||||
_args.append('{}/{}'.format(port, protocol))
|
||||
subprocess.check_call(_args)
|
||||
|
||||
|
||||
def close_port(port, protocol="TCP"):
|
||||
"Close a service network port"
|
||||
"""Close a service network port"""
|
||||
_args = ['close-port']
|
||||
_args.append('{}/{}'.format(port, protocol))
|
||||
subprocess.check_call(_args)
|
||||
@ -293,6 +321,7 @@ def close_port(port, protocol="TCP"):
|
||||
|
||||
@cached
|
||||
def unit_get(attribute):
|
||||
"""Get the unit ID for the remote unit"""
|
||||
_args = ['unit-get', '--format=json', attribute]
|
||||
try:
|
||||
return json.loads(subprocess.check_output(_args))
|
||||
@ -301,22 +330,46 @@ def unit_get(attribute):
|
||||
|
||||
|
||||
def unit_private_ip():
|
||||
"""Get this unit's private IP address"""
|
||||
return unit_get('private-address')
|
||||
|
||||
|
||||
class UnregisteredHookError(Exception):
|
||||
"""Raised when an undefined hook is called"""
|
||||
pass
|
||||
|
||||
|
||||
class Hooks(object):
|
||||
"""A convenient handler for hook functions.
|
||||
|
||||
Example:
|
||||
hooks = Hooks()
|
||||
|
||||
# register a hook, taking its name from the function name
|
||||
@hooks.hook()
|
||||
def install():
|
||||
...
|
||||
|
||||
# register a hook, providing a custom hook name
|
||||
@hooks.hook("config-changed")
|
||||
def config_changed():
|
||||
...
|
||||
|
||||
if __name__ == "__main__":
|
||||
# execute a hook based on the name the program is called by
|
||||
hooks.execute(sys.argv)
|
||||
"""
|
||||
|
||||
def __init__(self):
|
||||
super(Hooks, self).__init__()
|
||||
self._hooks = {}
|
||||
|
||||
def register(self, name, function):
|
||||
"""Register a hook"""
|
||||
self._hooks[name] = function
|
||||
|
||||
def execute(self, args):
|
||||
"""Execute a registered hook based on args[0]"""
|
||||
hook_name = os.path.basename(args[0])
|
||||
if hook_name in self._hooks:
|
||||
self._hooks[hook_name]()
|
||||
@ -324,6 +377,7 @@ class Hooks(object):
|
||||
raise UnregisteredHookError(hook_name)
|
||||
|
||||
def hook(self, *hook_names):
|
||||
"""Decorator, registering them as hooks"""
|
||||
def wrapper(decorated):
|
||||
for hook_name in hook_names:
|
||||
self.register(hook_name, decorated)
|
||||
@ -337,4 +391,5 @@ class Hooks(object):
|
||||
|
||||
|
||||
def charm_dir():
|
||||
"""Return the root directory of the current charm"""
|
||||
return os.environ.get('CHARM_DIR')
|
||||
|
@ -19,18 +19,22 @@ from hookenv import log
|
||||
|
||||
|
||||
def service_start(service_name):
|
||||
"""Start a system service"""
|
||||
return service('start', service_name)
|
||||
|
||||
|
||||
def service_stop(service_name):
|
||||
"""Stop a system service"""
|
||||
return service('stop', service_name)
|
||||
|
||||
|
||||
def service_restart(service_name):
|
||||
"""Restart a system service"""
|
||||
return service('restart', service_name)
|
||||
|
||||
|
||||
def service_reload(service_name, restart_on_failure=False):
|
||||
"""Reload a system service, optionally falling back to restart if reload fails"""
|
||||
service_result = service('reload', service_name)
|
||||
if not service_result and restart_on_failure:
|
||||
service_result = service('restart', service_name)
|
||||
@ -38,11 +42,13 @@ def service_reload(service_name, restart_on_failure=False):
|
||||
|
||||
|
||||
def service(action, service_name):
|
||||
"""Control a system service"""
|
||||
cmd = ['service', service_name, action]
|
||||
return subprocess.call(cmd) == 0
|
||||
|
||||
|
||||
def service_running(service):
|
||||
"""Determine whether a system service is running"""
|
||||
try:
|
||||
output = subprocess.check_output(['service', service, 'status'])
|
||||
except subprocess.CalledProcessError:
|
||||
@ -55,7 +61,7 @@ def service_running(service):
|
||||
|
||||
|
||||
def adduser(username, password=None, shell='/bin/bash', system_user=False):
|
||||
"""Add a user"""
|
||||
"""Add a user to the system"""
|
||||
try:
|
||||
user_info = pwd.getpwnam(username)
|
||||
log('user {0} already exists!'.format(username))
|
||||
@ -138,7 +144,7 @@ def write_file(path, content, owner='root', group='root', perms=0444):
|
||||
|
||||
|
||||
def mount(device, mountpoint, options=None, persist=False):
|
||||
'''Mount a filesystem'''
|
||||
"""Mount a filesystem at a particular mountpoint"""
|
||||
cmd_args = ['mount']
|
||||
if options is not None:
|
||||
cmd_args.extend(['-o', options])
|
||||
@ -155,7 +161,7 @@ def mount(device, mountpoint, options=None, persist=False):
|
||||
|
||||
|
||||
def umount(mountpoint, persist=False):
|
||||
'''Unmount a filesystem'''
|
||||
"""Unmount a filesystem"""
|
||||
cmd_args = ['umount', mountpoint]
|
||||
try:
|
||||
subprocess.check_output(cmd_args)
|
||||
@ -169,7 +175,7 @@ def umount(mountpoint, persist=False):
|
||||
|
||||
|
||||
def mounts():
|
||||
'''List of all mounted volumes as [[mountpoint,device],[...]]'''
|
||||
"""Get a list of all mounted volumes as [[mountpoint,device],[...]]"""
|
||||
with open('/proc/mounts') as f:
|
||||
# [['/mount/point','/dev/path'],[...]]
|
||||
system_mounts = [m[1::-1] for m in [l.strip().split()
|
||||
@ -178,7 +184,7 @@ def mounts():
|
||||
|
||||
|
||||
def file_hash(path):
|
||||
''' Generate a md5 hash of the contents of 'path' or None if not found '''
|
||||
"""Generate a md5 hash of the contents of 'path' or None if not found """
|
||||
if os.path.exists(path):
|
||||
h = hashlib.md5()
|
||||
with open(path, 'r') as source:
|
||||
@ -189,7 +195,7 @@ def file_hash(path):
|
||||
|
||||
|
||||
def restart_on_change(restart_map):
|
||||
''' Restart services based on configuration files changing
|
||||
"""Restart services based on configuration files changing
|
||||
|
||||
This function is used a decorator, for example
|
||||
|
||||
@ -202,7 +208,7 @@ def restart_on_change(restart_map):
|
||||
In this example, the cinder-api and cinder-volume services
|
||||
would be restarted if /etc/ceph/ceph.conf is changed by the
|
||||
ceph_client_changed function.
|
||||
'''
|
||||
"""
|
||||
def wrap(f):
|
||||
def wrapped_f(*args):
|
||||
checksums = {}
|
||||
@ -220,7 +226,7 @@ def restart_on_change(restart_map):
|
||||
|
||||
|
||||
def lsb_release():
|
||||
'''Return /etc/lsb-release in a dict'''
|
||||
"""Return /etc/lsb-release in a dict"""
|
||||
d = {}
|
||||
with open('/etc/lsb-release', 'r') as lsb:
|
||||
for l in lsb:
|
||||
@ -230,7 +236,7 @@ def lsb_release():
|
||||
|
||||
|
||||
def pwgen(length=None):
|
||||
'''Generate a random pasword.'''
|
||||
"""Generate a random pasword."""
|
||||
if length is None:
|
||||
length = random.choice(range(35, 45))
|
||||
alphanumeric_chars = [
|
||||
|
@ -20,6 +20,32 @@ deb http://ubuntu-cloud.archive.canonical.com/ubuntu {} main
|
||||
PROPOSED_POCKET = """# Proposed
|
||||
deb http://archive.ubuntu.com/ubuntu {}-proposed main universe multiverse restricted
|
||||
"""
|
||||
CLOUD_ARCHIVE_POCKETS = {
|
||||
# Folsom
|
||||
'folsom': 'precise-updates/folsom',
|
||||
'precise-folsom': 'precise-updates/folsom',
|
||||
'precise-folsom/updates': 'precise-updates/folsom',
|
||||
'precise-updates/folsom': 'precise-updates/folsom',
|
||||
'folsom/proposed': 'precise-proposed/folsom',
|
||||
'precise-folsom/proposed': 'precise-proposed/folsom',
|
||||
'precise-proposed/folsom': 'precise-proposed/folsom',
|
||||
# Grizzly
|
||||
'grizzly': 'precise-updates/grizzly',
|
||||
'precise-grizzly': 'precise-updates/grizzly',
|
||||
'precise-grizzly/updates': 'precise-updates/grizzly',
|
||||
'precise-updates/grizzly': 'precise-updates/grizzly',
|
||||
'grizzly/proposed': 'precise-proposed/grizzly',
|
||||
'precise-grizzly/proposed': 'precise-proposed/grizzly',
|
||||
'precise-proposed/grizzly': 'precise-proposed/grizzly',
|
||||
# Havana
|
||||
'havana': 'precise-updates/havana',
|
||||
'precise-havana': 'precise-updates/havana',
|
||||
'precise-havana/updates': 'precise-updates/havana',
|
||||
'precise-updates/havana': 'precise-updates/havana',
|
||||
'havana/proposed': 'precise-proposed/havana',
|
||||
'precies-havana/proposed': 'precise-proposed/havana',
|
||||
'precise-proposed/havana': 'precise-proposed/havana',
|
||||
}
|
||||
|
||||
|
||||
def filter_installed_packages(packages):
|
||||
@ -79,16 +105,35 @@ def apt_purge(packages, fatal=False):
|
||||
subprocess.call(cmd)
|
||||
|
||||
|
||||
def apt_hold(packages, fatal=False):
|
||||
"""Hold one or more packages"""
|
||||
cmd = ['apt-mark', 'hold']
|
||||
if isinstance(packages, basestring):
|
||||
cmd.append(packages)
|
||||
else:
|
||||
cmd.extend(packages)
|
||||
log("Holding {}".format(packages))
|
||||
if fatal:
|
||||
subprocess.check_call(cmd)
|
||||
else:
|
||||
subprocess.call(cmd)
|
||||
|
||||
|
||||
def add_source(source, key=None):
|
||||
if ((source.startswith('ppa:') or
|
||||
source.startswith('http:'))):
|
||||
if (source.startswith('ppa:') or
|
||||
source.startswith('http:') or
|
||||
source.startswith('deb ') or
|
||||
source.startswith('cloud-archive:')):
|
||||
subprocess.check_call(['add-apt-repository', '--yes', source])
|
||||
elif source.startswith('cloud:'):
|
||||
apt_install(filter_installed_packages(['ubuntu-cloud-keyring']),
|
||||
fatal=True)
|
||||
pocket = source.split(':')[-1]
|
||||
if pocket not in CLOUD_ARCHIVE_POCKETS:
|
||||
raise SourceConfigError('Unsupported cloud: source option %s' % pocket)
|
||||
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
|
||||
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
|
||||
apt.write(CLOUD_ARCHIVE.format(pocket))
|
||||
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
|
||||
elif source == 'proposed':
|
||||
release = lsb_release()['DISTRIB_CODENAME']
|
||||
with open('/etc/apt/sources.list.d/proposed.list', 'w') as apt:
|
||||
@ -118,8 +163,11 @@ def configure_sources(update=False,
|
||||
Note that 'null' (a.k.a. None) should not be quoted.
|
||||
"""
|
||||
sources = safe_load(config(sources_var))
|
||||
keys = safe_load(config(keys_var))
|
||||
if isinstance(sources, basestring) and isinstance(keys, basestring):
|
||||
keys = config(keys_var)
|
||||
if keys is not None:
|
||||
keys = safe_load(keys)
|
||||
if isinstance(sources, basestring) and (
|
||||
keys is None or isinstance(keys, basestring)):
|
||||
add_source(sources, keys)
|
||||
else:
|
||||
if not len(sources) == len(keys):
|
||||
|
@ -12,6 +12,7 @@ except ImportError:
|
||||
apt_install("python-bzrlib")
|
||||
from bzrlib.branch import Branch
|
||||
|
||||
|
||||
class BzrUrlFetchHandler(BaseFetchHandler):
|
||||
"""Handler for bazaar branches via generic and lp URLs"""
|
||||
def can_handle(self, source):
|
||||
@ -46,4 +47,3 @@ class BzrUrlFetchHandler(BaseFetchHandler):
|
||||
except OSError as e:
|
||||
raise UnhandledSource(e.strerror)
|
||||
return dest_dir
|
||||
|
||||
|
1
hooks/nova-vmware-relation-changed
Symbolic link
1
hooks/nova-vmware-relation-changed
Symbolic link
@ -0,0 +1 @@
|
||||
nova_cc_hooks.py
|
1
hooks/nova-vmware-relation-joined
Symbolic link
1
hooks/nova-vmware-relation-joined
Symbolic link
@ -0,0 +1 @@
|
||||
nova_cc_hooks.py
|
@ -139,6 +139,16 @@ class NeutronCCContext(context.NeutronContext):
|
||||
def __call__(self):
|
||||
ctxt = super(NeutronCCContext, self).__call__()
|
||||
ctxt['external_network'] = config('neutron-external-network')
|
||||
if 'nvp' in [config('quantum-plugin'), config('neutron-plugin')]:
|
||||
_config = config()
|
||||
for k, v in _config.iteritems():
|
||||
if k.startswith('nvp'):
|
||||
ctxt[k.replace('-', '_')] = v
|
||||
if 'nvp-controllers' in _config:
|
||||
ctxt['nvp_controllers'] = \
|
||||
','.join(_config['nvp-controllers'].split())
|
||||
ctxt['nvp_controllers_list'] = \
|
||||
_config['nvp-controllers'].split()
|
||||
return ctxt
|
||||
|
||||
|
||||
|
@ -3,6 +3,7 @@
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import uuid
|
||||
|
||||
from subprocess import check_call
|
||||
from urlparse import urlparse
|
||||
@ -25,7 +26,7 @@ from charmhelpers.core.host import (
|
||||
)
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
apt_install, apt_update, filter_installed_packages
|
||||
apt_install, apt_update
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.utils import (
|
||||
@ -148,6 +149,9 @@ def db_changed():
|
||||
|
||||
if eligible_leader(CLUSTER_RES):
|
||||
migrate_database()
|
||||
log('Triggering remote cloud-compute restarts.')
|
||||
[compute_joined(rid=rid, remote_restart=True)
|
||||
for rid in relation_ids('cloud-compute')]
|
||||
|
||||
|
||||
@hooks.hook('image-service-relation-changed')
|
||||
@ -184,6 +188,7 @@ def identity_changed():
|
||||
CONFIGS.write(NEUTRON_CONF)
|
||||
[compute_joined(rid) for rid in relation_ids('cloud-compute')]
|
||||
[quantum_joined(rid) for rid in relation_ids('quantum-network-service')]
|
||||
[nova_vmware_relation_joined(rid) for rid in relation_ids('nova-vmware')]
|
||||
configure_https()
|
||||
|
||||
|
||||
@ -230,19 +235,9 @@ def save_novarc():
|
||||
out.write('export OS_REGION_NAME=%s\n' % config('region'))
|
||||
|
||||
|
||||
@hooks.hook('cloud-compute-relation-joined')
|
||||
def compute_joined(rid=None):
|
||||
if not eligible_leader(CLUSTER_RES):
|
||||
return
|
||||
rel_settings = {
|
||||
'network_manager': network_manager(),
|
||||
'volume_service': volume_service(),
|
||||
# (comment from bash vers) XXX Should point to VIP if clustered, or
|
||||
# this may not even be needed.
|
||||
'ec2_host': unit_get('private-address'),
|
||||
}
|
||||
|
||||
def keystone_compute_settings():
|
||||
ks_auth_config = _auth_config()
|
||||
rel_settings = {}
|
||||
|
||||
if network_manager() in ['quantum', 'neutron']:
|
||||
if ks_auth_config:
|
||||
@ -260,6 +255,28 @@ def compute_joined(rid=None):
|
||||
ks_ca = keystone_ca_cert_b64()
|
||||
if ks_auth_config and ks_ca:
|
||||
rel_settings['ca_cert'] = ks_ca
|
||||
|
||||
return rel_settings
|
||||
|
||||
|
||||
@hooks.hook('cloud-compute-relation-joined')
|
||||
def compute_joined(rid=None, remote_restart=False):
|
||||
if not eligible_leader(CLUSTER_RES):
|
||||
return
|
||||
rel_settings = {
|
||||
'network_manager': network_manager(),
|
||||
'volume_service': volume_service(),
|
||||
# (comment from bash vers) XXX Should point to VIP if clustered, or
|
||||
# this may not even be needed.
|
||||
'ec2_host': unit_get('private-address'),
|
||||
}
|
||||
|
||||
# update relation setting if we're attempting to restart remote
|
||||
# services
|
||||
if remote_restart:
|
||||
rel_settings['restart_trigger'] = str(uuid.uuid4())
|
||||
|
||||
rel_settings.update(keystone_compute_settings())
|
||||
relation_set(relation_id=rid, **rel_settings)
|
||||
|
||||
|
||||
@ -287,15 +304,6 @@ def quantum_joined(rid=None):
|
||||
if not eligible_leader(CLUSTER_RES):
|
||||
return
|
||||
|
||||
if network_manager() == 'quantum':
|
||||
pkg = 'quantum-server'
|
||||
else:
|
||||
pkg = 'neutron-server'
|
||||
|
||||
required_pkg = filter_installed_packages([pkg])
|
||||
if required_pkg:
|
||||
apt_install(required_pkg)
|
||||
|
||||
url = canonical_url(CONFIGS) + ':9696'
|
||||
# XXX: Can we rename to neutron_*?
|
||||
rel_settings = {
|
||||
@ -397,6 +405,28 @@ def configure_https():
|
||||
identity_joined(rid=rid)
|
||||
|
||||
|
||||
@hooks.hook()
|
||||
def nova_vmware_relation_joined(rid=None):
|
||||
rel_settings = {'network_manager': network_manager()}
|
||||
|
||||
ks_auth = _auth_config()
|
||||
if ks_auth:
|
||||
rel_settings.update(ks_auth)
|
||||
rel_settings.update({
|
||||
'quantum_plugin': neutron_plugin(),
|
||||
'quantum_security_groups': config('quantum-security-groups'),
|
||||
'quantum_url': (canonical_url(CONFIGS) + ':' +
|
||||
str(api_port('neutron-server')))})
|
||||
|
||||
relation_set(relation_id=rid, **rel_settings)
|
||||
|
||||
|
||||
@hooks.hook('nova-vmware-relation-changed')
|
||||
@restart_on_change(restart_map())
|
||||
def nova_vmware_relation_changed():
|
||||
CONFIGS.write('/etc/nova/nova.conf')
|
||||
|
||||
|
||||
@hooks.hook('upgrade-charm')
|
||||
def upgrade_charm():
|
||||
for r_id in relation_ids('amqp'):
|
||||
|
@ -76,6 +76,8 @@ NEUTRON_CONF = '/etc/neutron/neutron.conf'
|
||||
HAPROXY_CONF = '/etc/haproxy/haproxy.cfg'
|
||||
APACHE_CONF = '/etc/apache2/sites-available/openstack_https_frontend'
|
||||
APACHE_24_CONF = '/etc/apache2/sites-available/openstack_https_frontend.conf'
|
||||
NEUTRON_DEFAULT = '/etc/default/neutron-server'
|
||||
QUANTUM_DEFAULT = '/etc/default/quantum-server'
|
||||
|
||||
BASE_RESOURCE_MAP = OrderedDict([
|
||||
(NOVA_CONF, {
|
||||
@ -84,6 +86,11 @@ BASE_RESOURCE_MAP = OrderedDict([
|
||||
context.SharedDBContext(relation_prefix='nova'),
|
||||
context.ImageServiceContext(),
|
||||
context.OSConfigFlagContext(),
|
||||
context.SubordinateConfigContext(
|
||||
interface='nova-vmware',
|
||||
service='nova',
|
||||
config_file=NOVA_CONF,
|
||||
),
|
||||
nova_cc_context.HAProxyContext(),
|
||||
nova_cc_context.IdentityServiceContext(),
|
||||
nova_cc_context.VolumeServiceContext(),
|
||||
@ -100,6 +107,10 @@ BASE_RESOURCE_MAP = OrderedDict([
|
||||
nova_cc_context.IdentityServiceContext(),
|
||||
nova_cc_context.NeutronCCContext()],
|
||||
}),
|
||||
(QUANTUM_DEFAULT, {
|
||||
'services': ['quantum-server'],
|
||||
'contexts': [nova_cc_context.NeutronCCContext()],
|
||||
}),
|
||||
(QUANTUM_API_PASTE, {
|
||||
'services': ['quantum-server'],
|
||||
'contexts': [nova_cc_context.IdentityServiceContext()],
|
||||
@ -111,6 +122,10 @@ BASE_RESOURCE_MAP = OrderedDict([
|
||||
nova_cc_context.NeutronCCContext(),
|
||||
nova_cc_context.HAProxyContext()],
|
||||
}),
|
||||
(NEUTRON_DEFAULT, {
|
||||
'services': ['neutron-server'],
|
||||
'contexts': [nova_cc_context.NeutronCCContext()],
|
||||
}),
|
||||
(HAPROXY_CONF, {
|
||||
'contexts': [context.HAProxyContext(),
|
||||
nova_cc_context.HAProxyContext()],
|
||||
@ -166,11 +181,12 @@ def resource_map():
|
||||
plugin = neutron_plugin()
|
||||
if plugin:
|
||||
conf = neutron_plugin_attribute(plugin, 'config', net_manager)
|
||||
service = '%s-server' % net_manager
|
||||
ctxts = (neutron_plugin_attribute(plugin, 'contexts', net_manager)
|
||||
or [])
|
||||
services = neutron_plugin_attribute(plugin, 'server_services',
|
||||
net_manager)
|
||||
resource_map[conf] = {}
|
||||
resource_map[conf]['services'] = [service]
|
||||
resource_map[conf]['services'] = services
|
||||
resource_map[conf]['contexts'] = ctxts
|
||||
resource_map[conf]['contexts'].append(
|
||||
nova_cc_context.NeutronCCContext())
|
||||
@ -178,6 +194,16 @@ def resource_map():
|
||||
# nova-conductor for releases >= G.
|
||||
if os_release('nova-common') not in ['essex', 'folsom']:
|
||||
resource_map['/etc/nova/nova.conf']['services'] += ['nova-conductor']
|
||||
|
||||
# also manage any configs that are being updated by subordinates.
|
||||
vmware_ctxt = context.SubordinateConfigContext(interface='nova-vmware',
|
||||
service='nova',
|
||||
config_file=NOVA_CONF)
|
||||
vmware_ctxt = vmware_ctxt()
|
||||
if vmware_ctxt and 'services' in vmware_ctxt:
|
||||
for s in vmware_ctxt['services']:
|
||||
if s not in resource_map[NOVA_CONF]['services']:
|
||||
resource_map[NOVA_CONF]['services'].append(s)
|
||||
return resource_map
|
||||
|
||||
|
||||
@ -217,6 +243,10 @@ def determine_packages():
|
||||
packages = [] + BASE_PACKAGES
|
||||
for k, v in resource_map().iteritems():
|
||||
packages.extend(v['services'])
|
||||
if network_manager() in ['neutron', 'quantum']:
|
||||
pkgs = neutron_plugin_attribute(neutron_plugin(), 'server_packages',
|
||||
network_manager())
|
||||
packages.extend(pkgs)
|
||||
return list(set(packages))
|
||||
|
||||
|
||||
|
@ -29,6 +29,9 @@ requires:
|
||||
ha:
|
||||
interface: hacluster
|
||||
scope: container
|
||||
nova-vmware:
|
||||
interface: nova-vmware
|
||||
scope: container
|
||||
peers:
|
||||
cluster:
|
||||
interface: nova-ha
|
||||
|
2
revision
2
revision
@ -1 +1 @@
|
||||
307
|
||||
311
|
||||
|
@ -57,6 +57,14 @@ default_floating_pool = {{ external_network }}
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if neutron_plugin and neutron_plugin == 'nvp' -%}
|
||||
security_group_api = neutron
|
||||
nova_firewall_driver = nova.virt.firewall.NoopFirewallDriver
|
||||
{% if external_network -%}
|
||||
default_floating_pool = {{ external_network }}
|
||||
{% endif -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if network_manager_config -%}
|
||||
{% for key, value in network_manager_config.iteritems() -%}
|
||||
{{ key }} = {{ value }}
|
||||
@ -90,3 +98,9 @@ volume_api_class=nova.volume.cinder.API
|
||||
{{ key }} = {{ value }}
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
||||
|
||||
{% if sections and 'DEFAULT' in sections -%}
|
||||
{% for key, value in sections['DEFAULT'] -%}
|
||||
{{ key }} = {{ value }}
|
||||
{% endfor -%}
|
||||
{% endif -%}
|
||||
|
6
templates/folsom/quantum-server
Normal file
6
templates/folsom/quantum-server
Normal file
@ -0,0 +1,6 @@
|
||||
# quantum
|
||||
###############################################################################
|
||||
# [ WARNING ]
|
||||
# Configuration file maintained by Juju. Local changes may be overwritten.
|
||||
###############################################################################
|
||||
QUANTUM_PLUGIN_CONFIG="{{ config }}"
|
6
templates/havana/neutron-server
Normal file
6
templates/havana/neutron-server
Normal file
@ -0,0 +1,6 @@
|
||||
# havana
|
||||
###############################################################################
|
||||
# [ WARNING ]
|
||||
# Configuration file maintained by Juju. Local changes may be overwritten.
|
||||
###############################################################################
|
||||
NEUTRON_PLUGIN_CONFIG="{{ config }}"
|
11
templates/havana/nvp.ini
Normal file
11
templates/havana/nvp.ini
Normal file
@ -0,0 +1,11 @@
|
||||
# havana
|
||||
###############################################################################
|
||||
# [ WARNING ]
|
||||
# Configuration file maintained by Juju. Local changes may be overwritten.
|
||||
###############################################################################
|
||||
[DEFAULT]
|
||||
nvp_user = {{ nvp_username }}
|
||||
nvp_password = {{ nvp_password }}
|
||||
nvp_controllers = {{ nvp_controllers }}
|
||||
default_tz_uuid = {{ nvp_tz_uuid }}
|
||||
default_l3_gw_service_uuid = {{ nvp_l3_uuid }}
|
@ -17,8 +17,10 @@ utils.restart_map = _map
|
||||
|
||||
|
||||
TO_PATCH = [
|
||||
'api_port',
|
||||
'apt_update',
|
||||
'apt_install',
|
||||
'canonical_url',
|
||||
'configure_installation_source',
|
||||
'charm_dir',
|
||||
'do_openstack_upgrade',
|
||||
@ -33,11 +35,32 @@ TO_PATCH = [
|
||||
'ssh_known_hosts_b64',
|
||||
'ssh_authorized_keys_b64',
|
||||
'save_script_rc',
|
||||
'execd_preinstall'
|
||||
'execd_preinstall',
|
||||
'network_manager',
|
||||
'volume_service',
|
||||
'unit_get',
|
||||
'eligible_leader',
|
||||
'keystone_ca_cert_b64',
|
||||
'neutron_plugin',
|
||||
]
|
||||
|
||||
|
||||
FAKE_KS_AUTH_CFG = {
|
||||
'auth_host': 'kshost',
|
||||
'auth_port': '5000',
|
||||
'service_port': 'token',
|
||||
'service_username': 'admin_user',
|
||||
'service_password': 'admin_passwd',
|
||||
'service_tenant_name': 'admin_tenant',
|
||||
'auth_uri': 'http://kshost:5000/v2',
|
||||
# quantum-gateway interface deviates a bit.
|
||||
'keystone_host': 'kshost',
|
||||
'service_tenant': 'service_tenant',
|
||||
}
|
||||
|
||||
|
||||
class NovaCCHooksTests(CharmTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(NovaCCHooksTests, self).setUp(hooks, TO_PATCH)
|
||||
self.config.side_effect = self.test_config.get
|
||||
@ -76,3 +99,42 @@ class NovaCCHooksTests(CharmTestCase):
|
||||
self.ssh_compute_add.assert_called_with('fookey')
|
||||
self.relation_set.assert_called_with(known_hosts='hosts',
|
||||
authorized_keys='keys')
|
||||
|
||||
@patch.object(hooks, '_auth_config')
|
||||
def test_compute_joined_neutron(self, auth_config):
|
||||
self.network_manager.return_value = 'neutron'
|
||||
self.eligible_leader = True
|
||||
self.keystone_ca_cert_b64.return_value = 'foocert64'
|
||||
self.volume_service.return_value = 'cinder'
|
||||
self.unit_get.return_value = 'nova-cc-host1'
|
||||
self.canonical_url.return_value = 'http://nova-cc-host1'
|
||||
self.api_port.return_value = '9696'
|
||||
self.neutron_plugin.return_value = 'nvp'
|
||||
auth_config.return_value = FAKE_KS_AUTH_CFG
|
||||
hooks.compute_joined()
|
||||
|
||||
self.relation_set.assert_called_with(
|
||||
relation_id=None,
|
||||
quantum_url='http://nova-cc-host1:9696',
|
||||
ca_cert='foocert64',
|
||||
quantum_security_groups='no',
|
||||
region='RegionOne',
|
||||
volume_service='cinder',
|
||||
ec2_host='nova-cc-host1',
|
||||
quantum_plugin='nvp',
|
||||
network_manager='neutron', **FAKE_KS_AUTH_CFG)
|
||||
|
||||
@patch.object(hooks, '_auth_config')
|
||||
def test_nova_vmware_joined(self, auth_config):
|
||||
auth_config.return_value = FAKE_KS_AUTH_CFG
|
||||
# quantum-security-groups, plugin
|
||||
self.neutron_plugin.return_value = 'nvp'
|
||||
self.network_manager.return_value = 'neutron'
|
||||
self.canonical_url.return_value = 'http://nova-cc-host1'
|
||||
self.api_port.return_value = '9696'
|
||||
hooks.nova_vmware_relation_joined()
|
||||
self.relation_set.assert_called_with(
|
||||
network_manager='neutron', quantum_security_groups='no',
|
||||
quantum_url='http://nova-cc-host1:9696', quantum_plugin='nvp',
|
||||
relation_id=None,
|
||||
**FAKE_KS_AUTH_CFG)
|
||||
|
@ -70,10 +70,11 @@ RESTART_MAP = OrderedDict([
|
||||
'nova-api-ec2', 'nova-api-os-compute'
|
||||
]),
|
||||
('/etc/neutron/neutron.conf', ['neutron-server']),
|
||||
('/etc/default/neutron-server', ['neutron-server']),
|
||||
('/etc/haproxy/haproxy.cfg', ['haproxy']),
|
||||
('/etc/apache2/sites-available/openstack_https_frontend', ['apache2']),
|
||||
('/etc/quantum/plugins/openvswitch/ovs_quantum_plugin.ini',
|
||||
['neutron-server'])
|
||||
['quantum-server'])
|
||||
])
|
||||
|
||||
|
||||
@ -87,13 +88,17 @@ PLUGIN_ATTRIBUTES = {
|
||||
'services': ['quantum-plugin-openvswitch-agent'],
|
||||
'packages': ['quantum-plugin-openvswitch-agent',
|
||||
'openvswitch-datapath-dkms'],
|
||||
'server_packages': ['quantum-server', 'quantum-plugin-openvswitch'],
|
||||
'server_services': ['quantum-server'],
|
||||
},
|
||||
'nvp': {
|
||||
'config': '/etc/quantum/plugins/nicira/nvp.ini',
|
||||
'driver': 'quantum.plugins.nicira.nicira_nvp_plugin.'
|
||||
'QuantumPlugin.NvpPluginV2',
|
||||
'services': [],
|
||||
'packages': ['quantum-plugin-nicira'],
|
||||
'packages': [],
|
||||
'server_packages': ['quantum-server', 'quantum-plugin-nicria'],
|
||||
'server_services': ['quantum-server'],
|
||||
}
|
||||
}
|
||||
|
||||
@ -107,6 +112,7 @@ def fake_plugin_attribute(plugin, attr, net_manager):
|
||||
|
||||
|
||||
class NovaCCUtilsTests(CharmTestCase):
|
||||
|
||||
def setUp(self):
|
||||
super(NovaCCUtilsTests, self).setUp(utils, TO_PATCH)
|
||||
self.config.side_effect = self.test_config.get
|
||||
@ -119,9 +125,13 @@ class NovaCCUtilsTests(CharmTestCase):
|
||||
self.neutron_plugin_attribute.side_effect = fake_plugin_attribute
|
||||
if volume_manager == 'nova-volume':
|
||||
self.relation_ids.return_value = 'nova-volume-service:0'
|
||||
return utils.resource_map()
|
||||
with patch('charmhelpers.contrib.openstack.context.'
|
||||
'SubordinateConfigContext'):
|
||||
_map = utils.resource_map()
|
||||
return _map
|
||||
|
||||
def test_resource_map_quantum(self):
|
||||
@patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')
|
||||
def test_resource_map_quantum(self, subcontext):
|
||||
self._resource_map(network_manager='quantum')
|
||||
_map = utils.resource_map()
|
||||
confs = [
|
||||
@ -131,7 +141,8 @@ class NovaCCUtilsTests(CharmTestCase):
|
||||
]
|
||||
[self.assertIn(q_conf, _map.keys()) for q_conf in confs]
|
||||
|
||||
def test_resource_map_neutron(self):
|
||||
@patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')
|
||||
def test_resource_map_neutron(self, subcontext):
|
||||
self._resource_map(network_manager='neutron')
|
||||
_map = utils.resource_map()
|
||||
confs = [
|
||||
@ -139,7 +150,21 @@ class NovaCCUtilsTests(CharmTestCase):
|
||||
]
|
||||
[self.assertIn(q_conf, _map.keys()) for q_conf in confs]
|
||||
|
||||
def test_resource_map_neutron_no_agent_installed(self):
|
||||
@patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')
|
||||
def test_resource_map_vmware(self, subcontext):
|
||||
fake_context = MagicMock()
|
||||
fake_context.return_value = {
|
||||
'sections': [],
|
||||
'services': ['nova-compute', 'nova-network'],
|
||||
|
||||
}
|
||||
subcontext.return_value = fake_context
|
||||
_map = utils.resource_map()
|
||||
for s in ['nova-compute', 'nova-network']:
|
||||
self.assertIn(s, _map['/etc/nova/nova.conf']['services'])
|
||||
|
||||
@patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')
|
||||
def test_resource_map_neutron_no_agent_installed(self, subcontext):
|
||||
self._resource_map(network_manager='neutron')
|
||||
_map = utils.resource_map()
|
||||
services = []
|
||||
@ -147,22 +172,25 @@ class NovaCCUtilsTests(CharmTestCase):
|
||||
for svc in services:
|
||||
self.assertNotIn('agent', svc)
|
||||
|
||||
def test_resource_map_nova_volume(self):
|
||||
@patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')
|
||||
def test_resource_map_nova_volume(self, subcontext):
|
||||
self.relation_ids.return_value = ['nova-volume-service:0']
|
||||
_map = utils.resource_map()
|
||||
self.assertIn('nova-api-os-volume',
|
||||
_map['/etc/nova/nova.conf']['services'])
|
||||
|
||||
@patch('os.path.exists')
|
||||
def test_restart_map_api_before_frontends(self, _exists):
|
||||
@patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')
|
||||
def test_restart_map_api_before_frontends(self, subcontext, _exists):
|
||||
_exists.return_value = False
|
||||
self._resource_map(network_manager='neutron')
|
||||
_map = utils.restart_map()
|
||||
self.assertTrue(isinstance(_map, OrderedDict))
|
||||
self.assertEquals(_map, RESTART_MAP)
|
||||
|
||||
@patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')
|
||||
@patch('os.path.exists')
|
||||
def test_restart_map_apache24(self, _exists):
|
||||
def test_restart_map_apache24(self, _exists, subcontext):
|
||||
_exists.return_Value = True
|
||||
self._resource_map(network_manager='neutron')
|
||||
_map = utils.restart_map()
|
||||
@ -171,29 +199,34 @@ class NovaCCUtilsTests(CharmTestCase):
|
||||
self.assertTrue('/etc/apache2/sites-available/'
|
||||
'openstack_https_frontend' not in _map)
|
||||
|
||||
def test_determine_packages_quantum(self):
|
||||
@patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')
|
||||
def test_determine_packages_quantum(self, subcontext):
|
||||
self._resource_map(network_manager='quantum')
|
||||
pkgs = utils.determine_packages()
|
||||
self.assertIn('quantum-server', pkgs)
|
||||
|
||||
def test_determine_packages_neutron(self):
|
||||
@patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')
|
||||
def test_determine_packages_neutron(self, subcontext):
|
||||
self._resource_map(network_manager='neutron')
|
||||
pkgs = utils.determine_packages()
|
||||
self.assertIn('neutron-server', pkgs)
|
||||
|
||||
def test_determine_packages_nova_volume(self):
|
||||
@patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')
|
||||
def test_determine_packages_nova_volume(self, subcontext):
|
||||
self.relation_ids.return_value = ['nova-volume-service:0']
|
||||
pkgs = utils.determine_packages()
|
||||
self.assertIn('nova-api-os-volume', pkgs)
|
||||
|
||||
def test_determine_packages_base(self):
|
||||
@patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')
|
||||
def test_determine_packages_base(self, subcontext):
|
||||
self.relation_ids.return_value = []
|
||||
self.os_release.return_value = 'folsom'
|
||||
pkgs = utils.determine_packages()
|
||||
ex = list(set(utils.BASE_PACKAGES + utils.BASE_SERVICES))
|
||||
self.assertEquals(ex, pkgs)
|
||||
|
||||
def test_determine_packages_base_grizzly_beyond(self):
|
||||
@patch('charmhelpers.contrib.openstack.context.SubordinateConfigContext')
|
||||
def test_determine_packages_base_grizzly_beyond(self, subcontext):
|
||||
self.relation_ids.return_value = []
|
||||
self.os_release.return_value = 'grizzly'
|
||||
pkgs = utils.determine_packages()
|
||||
|
@ -45,6 +45,7 @@ def get_default_config():
|
||||
|
||||
|
||||
class CharmTestCase(unittest.TestCase):
|
||||
|
||||
def setUp(self, obj, patches):
|
||||
super(CharmTestCase, self).setUp()
|
||||
self.patches = patches
|
||||
@ -65,6 +66,7 @@ class CharmTestCase(unittest.TestCase):
|
||||
|
||||
|
||||
class TestConfig(object):
|
||||
|
||||
def __init__(self):
|
||||
self.config = get_default_config()
|
||||
|
||||
@ -86,6 +88,7 @@ class TestConfig(object):
|
||||
|
||||
|
||||
class TestRelation(object):
|
||||
|
||||
def __init__(self, relation_data={}):
|
||||
self.relation_data = relation_data
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user