Switch to using charm-store for amulet tests
All OpenStack charms are now directly published to the charm store on landing; switch Amulet helper to resolve charms using the charm store rather than bzr branches, removing the lag between charm changes landing and being available for other charms to use for testing. This is also important for new layered charms where the charm must be build and published prior to being consumable. Change-Id: I1b601d88e32368b0f0705795ba03272a13816bb2
This commit is contained in:
parent
85cf60219b
commit
6ee7c2fc17
@ -41,10 +41,11 @@ from charmhelpers.core.hookenv import (
|
||||
relation_get,
|
||||
config as config_get,
|
||||
INFO,
|
||||
ERROR,
|
||||
DEBUG,
|
||||
WARNING,
|
||||
unit_get,
|
||||
is_leader as juju_is_leader
|
||||
is_leader as juju_is_leader,
|
||||
status_set,
|
||||
)
|
||||
from charmhelpers.core.decorators import (
|
||||
retry_on_exception,
|
||||
@ -60,6 +61,10 @@ class HAIncompleteConfig(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class HAIncorrectConfig(Exception):
|
||||
pass
|
||||
|
||||
|
||||
class CRMResourceNotFound(Exception):
|
||||
pass
|
||||
|
||||
@ -274,27 +279,71 @@ def get_hacluster_config(exclude_keys=None):
|
||||
Obtains all relevant configuration from charm configuration required
|
||||
for initiating a relation to hacluster:
|
||||
|
||||
ha-bindiface, ha-mcastport, vip
|
||||
ha-bindiface, ha-mcastport, vip, os-internal-hostname,
|
||||
os-admin-hostname, os-public-hostname
|
||||
|
||||
param: exclude_keys: list of setting key(s) to be excluded.
|
||||
returns: dict: A dict containing settings keyed by setting name.
|
||||
raises: HAIncompleteConfig if settings are missing.
|
||||
raises: HAIncompleteConfig if settings are missing or incorrect.
|
||||
'''
|
||||
settings = ['ha-bindiface', 'ha-mcastport', 'vip']
|
||||
settings = ['ha-bindiface', 'ha-mcastport', 'vip', 'os-internal-hostname',
|
||||
'os-admin-hostname', 'os-public-hostname']
|
||||
conf = {}
|
||||
for setting in settings:
|
||||
if exclude_keys and setting in exclude_keys:
|
||||
continue
|
||||
|
||||
conf[setting] = config_get(setting)
|
||||
missing = []
|
||||
[missing.append(s) for s, v in six.iteritems(conf) if v is None]
|
||||
if missing:
|
||||
log('Insufficient config data to configure hacluster.', level=ERROR)
|
||||
raise HAIncompleteConfig
|
||||
|
||||
if not valid_hacluster_config():
|
||||
raise HAIncorrectConfig('Insufficient or incorrect config data to '
|
||||
'configure hacluster.')
|
||||
return conf
|
||||
|
||||
|
||||
def valid_hacluster_config():
|
||||
'''
|
||||
Check that either vip or dns-ha is set. If dns-ha then one of os-*-hostname
|
||||
must be set.
|
||||
|
||||
Note: ha-bindiface and ha-macastport both have defaults and will always
|
||||
be set. We only care that either vip or dns-ha is set.
|
||||
|
||||
:returns: boolean: valid config returns true.
|
||||
raises: HAIncompatibileConfig if settings conflict.
|
||||
raises: HAIncompleteConfig if settings are missing.
|
||||
'''
|
||||
vip = config_get('vip')
|
||||
dns = config_get('dns-ha')
|
||||
if not(bool(vip) ^ bool(dns)):
|
||||
msg = ('HA: Either vip or dns-ha must be set but not both in order to '
|
||||
'use high availability')
|
||||
status_set('blocked', msg)
|
||||
raise HAIncorrectConfig(msg)
|
||||
|
||||
# If dns-ha then one of os-*-hostname must be set
|
||||
if dns:
|
||||
dns_settings = ['os-internal-hostname', 'os-admin-hostname',
|
||||
'os-public-hostname']
|
||||
# At this point it is unknown if one or all of the possible
|
||||
# network spaces are in HA. Validate at least one is set which is
|
||||
# the minimum required.
|
||||
for setting in dns_settings:
|
||||
if config_get(setting):
|
||||
log('DNS HA: At least one hostname is set {}: {}'
|
||||
''.format(setting, config_get(setting)),
|
||||
level=DEBUG)
|
||||
return True
|
||||
|
||||
msg = ('DNS HA: At least one os-*-hostname(s) must be set to use '
|
||||
'DNS HA')
|
||||
status_set('blocked', msg)
|
||||
raise HAIncompleteConfig(msg)
|
||||
|
||||
log('VIP HA: VIP is set {}'.format(vip), level=DEBUG)
|
||||
return True
|
||||
|
||||
|
||||
def canonical_url(configs, vip_setting='vip'):
|
||||
'''
|
||||
Returns the correct HTTP URL to this host given the state of HTTPS
|
||||
|
@ -43,9 +43,6 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
self.openstack = openstack
|
||||
self.source = source
|
||||
self.stable = stable
|
||||
# Note(coreycb): this needs to be changed when new next branches come
|
||||
# out.
|
||||
self.current_next = "trusty"
|
||||
|
||||
def get_logger(self, name="deployment-logger", level=logging.DEBUG):
|
||||
"""Get a logger object that will log to stdout."""
|
||||
@ -72,38 +69,34 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
|
||||
self.log.info('OpenStackAmuletDeployment: determine branch locations')
|
||||
|
||||
# Charms outside the lp:~openstack-charmers namespace
|
||||
base_charms = ['mysql', 'mongodb', 'nrpe']
|
||||
|
||||
# Force these charms to current series even when using an older series.
|
||||
# ie. Use trusty/nrpe even when series is precise, as the P charm
|
||||
# does not possess the necessary external master config and hooks.
|
||||
force_series_current = ['nrpe']
|
||||
|
||||
if self.series in ['precise', 'trusty']:
|
||||
base_series = self.series
|
||||
else:
|
||||
base_series = self.current_next
|
||||
# Charms outside the ~openstack-charmers
|
||||
base_charms = {
|
||||
'mysql': ['precise', 'trusty'],
|
||||
'mongodb': ['precise', 'trusty'],
|
||||
'nrpe': ['precise', 'trusty'],
|
||||
}
|
||||
|
||||
for svc in other_services:
|
||||
if svc['name'] in force_series_current:
|
||||
base_series = self.current_next
|
||||
# If a location has been explicitly set, use it
|
||||
if svc.get('location'):
|
||||
continue
|
||||
if self.stable:
|
||||
temp = 'lp:charms/{}/{}'
|
||||
svc['location'] = temp.format(base_series,
|
||||
svc['name'])
|
||||
if svc['name'] in base_charms:
|
||||
# NOTE: not all charms have support for all series we
|
||||
# want/need to test against, so fix to most recent
|
||||
# that each base charm supports
|
||||
target_series = self.series
|
||||
if self.series not in base_charms[svc['name']]:
|
||||
target_series = base_charms[svc['name']][-1]
|
||||
svc['location'] = 'cs:{}/{}'.format(target_series,
|
||||
svc['name'])
|
||||
elif self.stable:
|
||||
svc['location'] = 'cs:{}/{}'.format(self.series,
|
||||
svc['name'])
|
||||
else:
|
||||
if svc['name'] in base_charms:
|
||||
temp = 'lp:charms/{}/{}'
|
||||
svc['location'] = temp.format(base_series,
|
||||
svc['name'])
|
||||
else:
|
||||
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
|
||||
svc['location'] = temp.format(self.current_next,
|
||||
svc['name'])
|
||||
svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format(
|
||||
self.series,
|
||||
svc['name']
|
||||
)
|
||||
|
||||
return other_services
|
||||
|
||||
|
0
hooks/charmhelpers/contrib/openstack/ha/__init__.py
Normal file
0
hooks/charmhelpers/contrib/openstack/ha/__init__.py
Normal file
111
hooks/charmhelpers/contrib/openstack/ha/utils.py
Normal file
111
hooks/charmhelpers/contrib/openstack/ha/utils.py
Normal file
@ -0,0 +1,111 @@
|
||||
# Copyright 2014-2016 Canonical Limited.
|
||||
#
|
||||
# This file is part of charm-helpers.
|
||||
#
|
||||
# charm-helpers is free software: you can redistribute it and/or modify
|
||||
# it under the terms of the GNU Lesser General Public License version 3 as
|
||||
# published by the Free Software Foundation.
|
||||
#
|
||||
# charm-helpers is distributed in the hope that it will be useful,
|
||||
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
# GNU Lesser General Public License for more details.
|
||||
#
|
||||
# You should have received a copy of the GNU Lesser General Public License
|
||||
# along with charm-helpers. If not, see <http://www.gnu.org/licenses/>.
|
||||
|
||||
#
|
||||
# Copyright 2016 Canonical Ltd.
|
||||
#
|
||||
# Authors:
|
||||
# Openstack Charmers <
|
||||
#
|
||||
|
||||
"""
|
||||
Helpers for high availability.
|
||||
"""
|
||||
|
||||
import re
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
log,
|
||||
relation_set,
|
||||
charm_name,
|
||||
config,
|
||||
status_set,
|
||||
DEBUG,
|
||||
)
|
||||
|
||||
from charmhelpers.contrib.openstack.ip import (
|
||||
resolve_address,
|
||||
)
|
||||
|
||||
|
||||
class DNSHAException(Exception):
|
||||
"""Raised when an error occurs setting up DNS HA
|
||||
"""
|
||||
|
||||
pass
|
||||
|
||||
|
||||
def update_dns_ha_resource_params(resources, resource_params,
|
||||
relation_id=None,
|
||||
crm_ocf='ocf:maas:dns'):
|
||||
""" Check for os-*-hostname settings and update resource dictionaries for
|
||||
the HA relation.
|
||||
|
||||
@param resources: Pointer to dictionary of resources.
|
||||
Usually instantiated in ha_joined().
|
||||
@param resource_params: Pointer to dictionary of resource parameters.
|
||||
Usually instantiated in ha_joined()
|
||||
@param relation_id: Relation ID of the ha relation
|
||||
@param crm_ocf: Corosync Open Cluster Framework resource agent to use for
|
||||
DNS HA
|
||||
"""
|
||||
|
||||
settings = ['os-admin-hostname', 'os-internal-hostname',
|
||||
'os-public-hostname']
|
||||
|
||||
# Check which DNS settings are set and update dictionaries
|
||||
hostname_group = []
|
||||
for setting in settings:
|
||||
hostname = config(setting)
|
||||
if hostname is None:
|
||||
log('DNS HA: Hostname setting {} is None. Ignoring.'
|
||||
''.format(setting),
|
||||
DEBUG)
|
||||
continue
|
||||
m = re.search('os-(.+?)-hostname', setting)
|
||||
if m:
|
||||
networkspace = m.group(1)
|
||||
else:
|
||||
msg = ('Unexpected DNS hostname setting: {}. '
|
||||
'Cannot determine network space name'
|
||||
''.format(setting))
|
||||
status_set('blocked', msg)
|
||||
raise DNSHAException(msg)
|
||||
|
||||
hostname_key = 'res_{}_{}_hostname'.format(charm_name(), networkspace)
|
||||
if hostname_key in hostname_group:
|
||||
log('DNS HA: Resource {}: {} already exists in '
|
||||
'hostname group - skipping'.format(hostname_key, hostname),
|
||||
DEBUG)
|
||||
continue
|
||||
|
||||
hostname_group.append(hostname_key)
|
||||
resources[hostname_key] = crm_ocf
|
||||
resource_params[hostname_key] = (
|
||||
'params fqdn="{}" ip_address="{}" '
|
||||
''.format(hostname, resolve_address(endpoint_type=networkspace,
|
||||
override=False)))
|
||||
|
||||
if len(hostname_group) >= 1:
|
||||
log('DNS HA: Hostname group is set with {} as members. '
|
||||
'Informing the ha relation'.format(' '.join(hostname_group)),
|
||||
DEBUG)
|
||||
relation_set(relation_id=relation_id, groups={
|
||||
'grp_{}_hostnames'.format(charm_name()): ' '.join(hostname_group)})
|
||||
else:
|
||||
msg = 'DNS HA: Hostname group has no members.'
|
||||
status_set('blocked', msg)
|
||||
raise DNSHAException(msg)
|
@ -109,7 +109,7 @@ def _get_address_override(endpoint_type=PUBLIC):
|
||||
return addr_override.format(service_name=service_name())
|
||||
|
||||
|
||||
def resolve_address(endpoint_type=PUBLIC):
|
||||
def resolve_address(endpoint_type=PUBLIC, override=True):
|
||||
"""Return unit address depending on net config.
|
||||
|
||||
If unit is clustered with vip(s) and has net splits defined, return vip on
|
||||
@ -119,10 +119,13 @@ def resolve_address(endpoint_type=PUBLIC):
|
||||
split if one is configured, or a Juju 2.0 extra-binding has been used.
|
||||
|
||||
:param endpoint_type: Network endpoing type
|
||||
:param override: Accept hostname overrides or not
|
||||
"""
|
||||
resolved_address = _get_address_override(endpoint_type)
|
||||
if resolved_address:
|
||||
return resolved_address
|
||||
resolved_address = None
|
||||
if override:
|
||||
resolved_address = _get_address_override(endpoint_type)
|
||||
if resolved_address:
|
||||
return resolved_address
|
||||
|
||||
vips = config('vip')
|
||||
if vips:
|
||||
|
@ -51,6 +51,7 @@ from charmhelpers.core.hookenv import (
|
||||
related_units,
|
||||
relation_ids,
|
||||
relation_set,
|
||||
service_name,
|
||||
status_set,
|
||||
hook_name
|
||||
)
|
||||
@ -207,6 +208,27 @@ PACKAGE_CODENAMES = {
|
||||
]),
|
||||
}
|
||||
|
||||
GIT_DEFAULT_REPOS = {
|
||||
'requirements': 'git://github.com/openstack/requirements',
|
||||
'cinder': 'git://github.com/openstack/cinder',
|
||||
'glance': 'git://github.com/openstack/glance',
|
||||
'horizon': 'git://github.com/openstack/horizon',
|
||||
'keystone': 'git://github.com/openstack/keystone',
|
||||
'neutron': 'git://github.com/openstack/neutron',
|
||||
'neutron-fwaas': 'git://github.com/openstack/neutron-fwaas',
|
||||
'neutron-lbaas': 'git://github.com/openstack/neutron-lbaas',
|
||||
'neutron-vpnaas': 'git://github.com/openstack/neutron-vpnaas',
|
||||
'nova': 'git://github.com/openstack/nova',
|
||||
}
|
||||
|
||||
GIT_DEFAULT_BRANCHES = {
|
||||
'icehouse': 'icehouse-eol',
|
||||
'kilo': 'stable/kilo',
|
||||
'liberty': 'stable/liberty',
|
||||
'mitaka': 'stable/mitaka',
|
||||
'master': 'master',
|
||||
}
|
||||
|
||||
DEFAULT_LOOPBACK_SIZE = '5G'
|
||||
|
||||
|
||||
@ -703,6 +725,53 @@ def git_install_requested():
|
||||
requirements_dir = None
|
||||
|
||||
|
||||
def git_default_repos(projects):
|
||||
"""
|
||||
Returns default repos if a default openstack-origin-git value is specified.
|
||||
"""
|
||||
service = service_name()
|
||||
|
||||
for default, branch in GIT_DEFAULT_BRANCHES.iteritems():
|
||||
if projects == default:
|
||||
|
||||
# add the requirements repo first
|
||||
repo = {
|
||||
'name': 'requirements',
|
||||
'repository': GIT_DEFAULT_REPOS['requirements'],
|
||||
'branch': branch,
|
||||
}
|
||||
repos = [repo]
|
||||
|
||||
# neutron and nova charms require some additional repos
|
||||
if service == 'neutron':
|
||||
for svc in ['neutron-fwaas', 'neutron-lbaas', 'neutron-vpnaas']:
|
||||
repo = {
|
||||
'name': svc,
|
||||
'repository': GIT_DEFAULT_REPOS[svc],
|
||||
'branch': branch,
|
||||
}
|
||||
repos.append(repo)
|
||||
elif service == 'nova':
|
||||
repo = {
|
||||
'name': 'neutron',
|
||||
'repository': GIT_DEFAULT_REPOS['neutron'],
|
||||
'branch': branch,
|
||||
}
|
||||
repos.append(repo)
|
||||
|
||||
# finally add the current service's repo
|
||||
repo = {
|
||||
'name': service,
|
||||
'repository': GIT_DEFAULT_REPOS[service],
|
||||
'branch': branch,
|
||||
}
|
||||
repos.append(repo)
|
||||
|
||||
return yaml.dump(dict(repositories=repos))
|
||||
|
||||
return projects
|
||||
|
||||
|
||||
def _git_yaml_load(projects_yaml):
|
||||
"""
|
||||
Load the specified yaml into a dictionary.
|
||||
|
@ -1231,7 +1231,7 @@ class CephConfContext(object):
|
||||
|
||||
permitted = self.permitted_sections
|
||||
if permitted:
|
||||
diff = set(conf.keys()).symmetric_difference(set(permitted))
|
||||
diff = set(conf.keys()).difference(set(permitted))
|
||||
if diff:
|
||||
log("Config-flags contains invalid keys '%s' - they will be "
|
||||
"ignored" % (', '.join(diff)), level=WARNING)
|
||||
|
@ -176,7 +176,7 @@ def init_is_systemd():
|
||||
|
||||
|
||||
def adduser(username, password=None, shell='/bin/bash', system_user=False,
|
||||
primary_group=None, secondary_groups=None):
|
||||
primary_group=None, secondary_groups=None, uid=None):
|
||||
"""Add a user to the system.
|
||||
|
||||
Will log but otherwise succeed if the user already exists.
|
||||
@ -187,15 +187,21 @@ def adduser(username, password=None, shell='/bin/bash', system_user=False,
|
||||
:param bool system_user: Whether to create a login or system user
|
||||
:param str primary_group: Primary group for user; defaults to username
|
||||
:param list secondary_groups: Optional list of additional groups
|
||||
:param int uid: UID for user being created
|
||||
|
||||
:returns: The password database entry struct, as returned by `pwd.getpwnam`
|
||||
"""
|
||||
try:
|
||||
user_info = pwd.getpwnam(username)
|
||||
log('user {0} already exists!'.format(username))
|
||||
if uid:
|
||||
user_info = pwd.getpwuid(int(uid))
|
||||
log('user with uid {0} already exists!'.format(uid))
|
||||
except KeyError:
|
||||
log('creating user {0}'.format(username))
|
||||
cmd = ['useradd']
|
||||
if uid:
|
||||
cmd.extend(['--uid', str(uid)])
|
||||
if system_user or password is None:
|
||||
cmd.append('--system')
|
||||
else:
|
||||
@ -230,14 +236,58 @@ def user_exists(username):
|
||||
return user_exists
|
||||
|
||||
|
||||
def add_group(group_name, system_group=False):
|
||||
"""Add a group to the system"""
|
||||
def uid_exists(uid):
|
||||
"""Check if a uid exists"""
|
||||
try:
|
||||
pwd.getpwuid(uid)
|
||||
uid_exists = True
|
||||
except KeyError:
|
||||
uid_exists = False
|
||||
return uid_exists
|
||||
|
||||
|
||||
def group_exists(groupname):
|
||||
"""Check if a group exists"""
|
||||
try:
|
||||
grp.getgrnam(groupname)
|
||||
group_exists = True
|
||||
except KeyError:
|
||||
group_exists = False
|
||||
return group_exists
|
||||
|
||||
|
||||
def gid_exists(gid):
|
||||
"""Check if a gid exists"""
|
||||
try:
|
||||
grp.getgrgid(gid)
|
||||
gid_exists = True
|
||||
except KeyError:
|
||||
gid_exists = False
|
||||
return gid_exists
|
||||
|
||||
|
||||
def add_group(group_name, system_group=False, gid=None):
|
||||
"""Add a group to the system
|
||||
|
||||
Will log but otherwise succeed if the group already exists.
|
||||
|
||||
:param str group_name: group to create
|
||||
:param bool system_group: Create system group
|
||||
:param int gid: GID for user being created
|
||||
|
||||
:returns: The password database entry struct, as returned by `grp.getgrnam`
|
||||
"""
|
||||
try:
|
||||
group_info = grp.getgrnam(group_name)
|
||||
log('group {0} already exists!'.format(group_name))
|
||||
if gid:
|
||||
group_info = grp.getgrgid(gid)
|
||||
log('group with gid {0} already exists!'.format(gid))
|
||||
except KeyError:
|
||||
log('creating group {0}'.format(group_name))
|
||||
cmd = ['addgroup']
|
||||
if gid:
|
||||
cmd.extend(['--gid', str(gid)])
|
||||
if system_group:
|
||||
cmd.append('--system')
|
||||
else:
|
||||
|
@ -398,16 +398,13 @@ def install_remote(source, *args, **kwargs):
|
||||
# We ONLY check for True here because can_handle may return a string
|
||||
# explaining why it can't handle a given source.
|
||||
handlers = [h for h in plugins() if h.can_handle(source) is True]
|
||||
installed_to = None
|
||||
for handler in handlers:
|
||||
try:
|
||||
installed_to = handler.install(source, *args, **kwargs)
|
||||
return handler.install(source, *args, **kwargs)
|
||||
except UnhandledSource as e:
|
||||
log('Install source attempt unsuccessful: {}'.format(e),
|
||||
level='WARNING')
|
||||
if not installed_to:
|
||||
raise UnhandledSource("No handler found for source {}".format(source))
|
||||
return installed_to
|
||||
raise UnhandledSource("No handler found for source {}".format(source))
|
||||
|
||||
|
||||
def install_from_config(config_var_name):
|
||||
|
@ -42,15 +42,23 @@ class BzrUrlFetchHandler(BaseFetchHandler):
|
||||
else:
|
||||
return True
|
||||
|
||||
def branch(self, source, dest):
|
||||
def branch(self, source, dest, revno=None):
|
||||
if not self.can_handle(source):
|
||||
raise UnhandledSource("Cannot handle {}".format(source))
|
||||
cmd_opts = []
|
||||
if revno:
|
||||
cmd_opts += ['-r', str(revno)]
|
||||
if os.path.exists(dest):
|
||||
check_call(['bzr', 'pull', '--overwrite', '-d', dest, source])
|
||||
cmd = ['bzr', 'pull']
|
||||
cmd += cmd_opts
|
||||
cmd += ['--overwrite', '-d', dest, source]
|
||||
else:
|
||||
check_call(['bzr', 'branch', source, dest])
|
||||
cmd = ['bzr', 'branch']
|
||||
cmd += cmd_opts
|
||||
cmd += [source, dest]
|
||||
check_call(cmd)
|
||||
|
||||
def install(self, source, dest=None):
|
||||
def install(self, source, dest=None, revno=None):
|
||||
url_parts = self.parse_url(source)
|
||||
branch_name = url_parts.path.strip("/").split("/")[-1]
|
||||
if dest:
|
||||
@ -59,10 +67,11 @@ class BzrUrlFetchHandler(BaseFetchHandler):
|
||||
dest_dir = os.path.join(os.environ.get('CHARM_DIR'), "fetched",
|
||||
branch_name)
|
||||
|
||||
if not os.path.exists(dest_dir):
|
||||
mkdir(dest_dir, perms=0o755)
|
||||
if dest and not os.path.exists(dest):
|
||||
mkdir(dest, perms=0o755)
|
||||
|
||||
try:
|
||||
self.branch(source, dest_dir)
|
||||
self.branch(source, dest_dir, revno)
|
||||
except OSError as e:
|
||||
raise UnhandledSource(e.strerror)
|
||||
return dest_dir
|
||||
|
@ -43,9 +43,6 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
self.openstack = openstack
|
||||
self.source = source
|
||||
self.stable = stable
|
||||
# Note(coreycb): this needs to be changed when new next branches come
|
||||
# out.
|
||||
self.current_next = "trusty"
|
||||
|
||||
def get_logger(self, name="deployment-logger", level=logging.DEBUG):
|
||||
"""Get a logger object that will log to stdout."""
|
||||
@ -72,38 +69,34 @@ class OpenStackAmuletDeployment(AmuletDeployment):
|
||||
|
||||
self.log.info('OpenStackAmuletDeployment: determine branch locations')
|
||||
|
||||
# Charms outside the lp:~openstack-charmers namespace
|
||||
base_charms = ['mysql', 'mongodb', 'nrpe']
|
||||
|
||||
# Force these charms to current series even when using an older series.
|
||||
# ie. Use trusty/nrpe even when series is precise, as the P charm
|
||||
# does not possess the necessary external master config and hooks.
|
||||
force_series_current = ['nrpe']
|
||||
|
||||
if self.series in ['precise', 'trusty']:
|
||||
base_series = self.series
|
||||
else:
|
||||
base_series = self.current_next
|
||||
# Charms outside the ~openstack-charmers
|
||||
base_charms = {
|
||||
'mysql': ['precise', 'trusty'],
|
||||
'mongodb': ['precise', 'trusty'],
|
||||
'nrpe': ['precise', 'trusty'],
|
||||
}
|
||||
|
||||
for svc in other_services:
|
||||
if svc['name'] in force_series_current:
|
||||
base_series = self.current_next
|
||||
# If a location has been explicitly set, use it
|
||||
if svc.get('location'):
|
||||
continue
|
||||
if self.stable:
|
||||
temp = 'lp:charms/{}/{}'
|
||||
svc['location'] = temp.format(base_series,
|
||||
svc['name'])
|
||||
if svc['name'] in base_charms:
|
||||
# NOTE: not all charms have support for all series we
|
||||
# want/need to test against, so fix to most recent
|
||||
# that each base charm supports
|
||||
target_series = self.series
|
||||
if self.series not in base_charms[svc['name']]:
|
||||
target_series = base_charms[svc['name']][-1]
|
||||
svc['location'] = 'cs:{}/{}'.format(target_series,
|
||||
svc['name'])
|
||||
elif self.stable:
|
||||
svc['location'] = 'cs:{}/{}'.format(self.series,
|
||||
svc['name'])
|
||||
else:
|
||||
if svc['name'] in base_charms:
|
||||
temp = 'lp:charms/{}/{}'
|
||||
svc['location'] = temp.format(base_series,
|
||||
svc['name'])
|
||||
else:
|
||||
temp = 'lp:~openstack-charmers/charms/{}/{}/next'
|
||||
svc['location'] = temp.format(self.current_next,
|
||||
svc['name'])
|
||||
svc['location'] = 'cs:~openstack-charmers-next/{}/{}'.format(
|
||||
self.series,
|
||||
svc['name']
|
||||
)
|
||||
|
||||
return other_services
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user