Sync charm-helpers

Change-Id: Iaf76a655e96386608f01d7eba2281ac94d1e85b8
This commit is contained in:
Chris MacNaughton 2019-07-12 15:01:55 +02:00 committed by Liam Young
parent 691bb92310
commit b0c758a466
18 changed files with 359 additions and 36 deletions

View File

@ -33,6 +33,7 @@ from charmhelpers.core.hookenv import (
hook_name, hook_name,
local_unit, local_unit,
log, log,
relation_get,
relation_ids, relation_ids,
relation_set, relation_set,
relations_of_type, relations_of_type,
@ -260,11 +261,23 @@ class NRPE(object):
relation = relation_ids('nrpe-external-master') relation = relation_ids('nrpe-external-master')
if relation: if relation:
log("Setting charm primary status {}".format(primary)) log("Setting charm primary status {}".format(primary))
for rid in relation_ids('nrpe-external-master'): for rid in relation:
relation_set(relation_id=rid, relation_settings={'primary': self.primary}) relation_set(relation_id=rid, relation_settings={'primary': self.primary})
self.remove_check_queue = set()
def add_check(self, *args, **kwargs): def add_check(self, *args, **kwargs):
shortname = None
if kwargs.get('shortname') is None:
if len(args) > 0:
shortname = args[0]
else:
shortname = kwargs['shortname']
self.checks.append(Check(*args, **kwargs)) self.checks.append(Check(*args, **kwargs))
try:
self.remove_check_queue.remove(shortname)
except KeyError:
pass
def remove_check(self, *args, **kwargs): def remove_check(self, *args, **kwargs):
if kwargs.get('shortname') is None: if kwargs.get('shortname') is None:
@ -281,6 +294,7 @@ class NRPE(object):
check = Check(*args, **kwargs) check = Check(*args, **kwargs)
check.remove(self.hostname) check.remove(self.hostname)
self.remove_check_queue.add(kwargs['shortname'])
def write(self): def write(self):
try: try:
@ -313,7 +327,24 @@ class NRPE(object):
monitor_ids = relation_ids("local-monitors") + \ monitor_ids = relation_ids("local-monitors") + \
relation_ids("nrpe-external-master") relation_ids("nrpe-external-master")
for rid in monitor_ids: for rid in monitor_ids:
relation_set(relation_id=rid, monitors=yaml.dump(monitors)) reldata = relation_get(unit=local_unit(), rid=rid)
if 'monitors' in reldata:
# update the existing set of monitors with the new data
old_monitors = yaml.safe_load(reldata['monitors'])
old_nrpe_monitors = old_monitors['monitors']['remote']['nrpe']
# remove keys that are in the remove_check_queue
old_nrpe_monitors = {k: v for k, v in old_nrpe_monitors.items()
if k not in self.remove_check_queue}
# update/add nrpe_monitors
old_nrpe_monitors.update(nrpe_monitors)
old_monitors['monitors']['remote']['nrpe'] = old_nrpe_monitors
# write back to the relation
relation_set(relation_id=rid, monitors=yaml.dump(old_monitors))
else:
# write a brand new set of monitors, as no existing ones.
relation_set(relation_id=rid, monitors=yaml.dump(monitors))
self.remove_check_queue.clear()
def get_nagios_hostcontext(relation_name='nrpe-external-master'): def get_nagios_hostcontext(relation_name='nrpe-external-master'):

View File

@ -294,8 +294,10 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('bionic', None): self.bionic_queens, ('bionic', None): self.bionic_queens,
('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky,
('bionic', 'cloud:bionic-stein'): self.bionic_stein, ('bionic', 'cloud:bionic-stein'): self.bionic_stein,
('bionic', 'cloud:bionic-train'): self.bionic_train,
('cosmic', None): self.cosmic_rocky, ('cosmic', None): self.cosmic_rocky,
('disco', None): self.disco_stein, ('disco', None): self.disco_stein,
('eoan', None): self.eoan_train,
} }
return releases[(self.series, self.openstack)] return releases[(self.series, self.openstack)]
@ -313,6 +315,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('bionic', 'queens'), ('bionic', 'queens'),
('cosmic', 'rocky'), ('cosmic', 'rocky'),
('disco', 'stein'), ('disco', 'stein'),
('eoan', 'train'),
]) ])
if self.openstack: if self.openstack:
os_origin = self.openstack.split(':')[1] os_origin = self.openstack.split(':')[1]
@ -320,6 +323,23 @@ class OpenStackAmuletDeployment(AmuletDeployment):
else: else:
return releases[self.series] return releases[self.series]
def get_percona_service_entry(self, memory_constraint=None):
"""Return a amulet service entry for percona cluster.
:param memory_constraint: Override the default memory constraint
in the service entry.
:type memory_constraint: str
:returns: Amulet service entry.
:rtype: dict
"""
memory_constraint = memory_constraint or '3072M'
svc_entry = {
'name': 'percona-cluster',
'constraints': {'mem': memory_constraint}}
if self._get_openstack_release() <= self.trusty_mitaka:
svc_entry['location'] = 'cs:trusty/percona-cluster'
return svc_entry
def get_ceph_expected_pools(self, radosgw=False): def get_ceph_expected_pools(self, radosgw=False):
"""Return a list of expected ceph pools in a ceph + cinder + glance """Return a list of expected ceph pools in a ceph + cinder + glance
test scenario, based on OpenStack release and whether ceph radosgw test scenario, based on OpenStack release and whether ceph radosgw

View File

@ -54,11 +54,15 @@ NOVA_CLIENT_VERSION = "2"
OPENSTACK_RELEASES_PAIRS = [ OPENSTACK_RELEASES_PAIRS = [
'trusty_icehouse', 'trusty_kilo', 'trusty_liberty', 'trusty_icehouse', 'trusty_kilo', 'trusty_liberty',
'trusty_mitaka', 'xenial_mitaka', 'xenial_newton', 'trusty_mitaka', 'xenial_mitaka',
'yakkety_newton', 'xenial_ocata', 'zesty_ocata', 'xenial_newton', 'yakkety_newton',
'xenial_pike', 'artful_pike', 'xenial_queens', 'xenial_ocata', 'zesty_ocata',
'bionic_queens', 'bionic_rocky', 'cosmic_rocky', 'xenial_pike', 'artful_pike',
'bionic_stein', 'disco_stein'] 'xenial_queens', 'bionic_queens',
'bionic_rocky', 'cosmic_rocky',
'bionic_stein', 'disco_stein',
'bionic_train', 'eoan_train',
]
class OpenStackAmuletUtils(AmuletUtils): class OpenStackAmuletUtils(AmuletUtils):

View File

@ -126,7 +126,11 @@ def _config_ini(path):
:returns: Configuration contained in path :returns: Configuration contained in path
:rtype: Dict :rtype: Dict
""" """
conf = configparser.ConfigParser() # When strict is enabled, duplicate options are not allowed in the
# parsed INI; however, Oslo allows duplicate values. This change
# causes us to ignore the duplicate values which is acceptable as
# long as we don't validate any multi-value options
conf = configparser.ConfigParser(strict=False)
conf.read(path) conf.read(path)
return dict(conf) return dict(conf)
@ -204,7 +208,7 @@ def validate_file_ownership(config):
"Invalid ownership configuration: {}".format(key)) "Invalid ownership configuration: {}".format(key))
owner = options.get('owner', config.get('owner', 'root')) owner = options.get('owner', config.get('owner', 'root'))
group = options.get('group', config.get('group', 'root')) group = options.get('group', config.get('group', 'root'))
optional = options.get('optional', config.get('optional', 'False')) optional = options.get('optional', config.get('optional', False))
if '*' in file_name: if '*' in file_name:
for file in glob.glob(file_name): for file in glob.glob(file_name):
if file not in files.keys(): if file not in files.keys():
@ -226,7 +230,7 @@ def validate_file_permissions(config):
raise RuntimeError( raise RuntimeError(
"Invalid ownership configuration: {}".format(key)) "Invalid ownership configuration: {}".format(key))
mode = options.get('mode', config.get('permissions', '600')) mode = options.get('mode', config.get('permissions', '600'))
optional = options.get('optional', config.get('optional', 'False')) optional = options.get('optional', config.get('optional', False))
if '*' in file_name: if '*' in file_name:
for file in glob.glob(file_name): for file in glob.glob(file_name):
if file not in files.keys(): if file not in files.keys():

View File

@ -106,9 +106,11 @@ class CertRequest(object):
sans = sorted(list(set(entry['addresses']))) sans = sorted(list(set(entry['addresses'])))
request[entry['cn']] = {'sans': sans} request[entry['cn']] = {'sans': sans}
if self.json_encode: if self.json_encode:
return {'cert_requests': json.dumps(request, sort_keys=True)} req = {'cert_requests': json.dumps(request, sort_keys=True)}
else: else:
return {'cert_requests': request} req = {'cert_requests': request}
req['unit_name'] = local_unit().replace('/', '_')
return req
def get_certificate_request(json_encode=True): def get_certificate_request(json_encode=True):
@ -220,6 +222,8 @@ def process_certificates(service_name, relation_id, unit,
:type user: str :type user: str
:param group: (Optional) Group of certificate files. Defaults to 'root' :param group: (Optional) Group of certificate files. Defaults to 'root'
:type group: str :type group: str
:returns: True if certificates processed for local unit or False
:rtype: bool
""" """
data = relation_get(rid=relation_id, unit=unit) data = relation_get(rid=relation_id, unit=unit)
ssl_dir = os.path.join('/etc/apache2/ssl/', service_name) ssl_dir = os.path.join('/etc/apache2/ssl/', service_name)
@ -235,6 +239,8 @@ def process_certificates(service_name, relation_id, unit,
create_ip_cert_links( create_ip_cert_links(
ssl_dir, ssl_dir,
custom_hostname_link=custom_hostname_link) custom_hostname_link=custom_hostname_link)
return True
return False
def get_requests_for_local_unit(relation_name=None): def get_requests_for_local_unit(relation_name=None):

View File

@ -117,6 +117,7 @@ except ImportError:
CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt' CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
ADDRESS_TYPES = ['admin', 'internal', 'public'] ADDRESS_TYPES = ['admin', 'internal', 'public']
HAPROXY_RUN_DIR = '/var/run/haproxy/' HAPROXY_RUN_DIR = '/var/run/haproxy/'
DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2"
def ensure_packages(packages): def ensure_packages(packages):
@ -257,7 +258,7 @@ class SharedDBContext(OSContextGenerator):
'database_password': rdata.get(password_setting), 'database_password': rdata.get(password_setting),
'database_type': 'mysql+pymysql' 'database_type': 'mysql+pymysql'
} }
if CompareOpenStackReleases(rel) < 'stein': if CompareOpenStackReleases(rel) < 'queens':
ctxt['database_type'] = 'mysql' ctxt['database_type'] = 'mysql'
if self.context_complete(ctxt): if self.context_complete(ctxt):
db_ssl(rdata, ctxt, self.ssl_dir) db_ssl(rdata, ctxt, self.ssl_dir)
@ -351,10 +352,70 @@ class IdentityServiceContext(OSContextGenerator):
return cachedir return cachedir
return None return None
def _get_pkg_name(self, python_name='keystonemiddleware'):
"""Get corresponding distro installed package for python
package name.
:param python_name: nameof the python package
:type: string
"""
pkg_names = map(lambda x: x + python_name, ('python3-', 'python-'))
for pkg in pkg_names:
if not filter_installed_packages((pkg,)):
return pkg
return None
def _get_keystone_authtoken_ctxt(self, ctxt, keystonemiddleware_os_rel):
"""Build Jinja2 context for full rendering of [keystone_authtoken]
section with variable names included. Re-constructed from former
template 'section-keystone-auth-mitaka'.
:param ctxt: Jinja2 context returned from self.__call__()
:type: dict
:param keystonemiddleware_os_rel: OpenStack release name of
keystonemiddleware package installed
"""
c = collections.OrderedDict((('auth_type', 'password'),))
# 'www_authenticate_uri' replaced 'auth_uri' since Stein,
# see keystonemiddleware upstream sources for more info
if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein':
c.update((
('www_authenticate_uri', "{}://{}:{}/v3".format(
ctxt.get('service_protocol', ''),
ctxt.get('service_host', ''),
ctxt.get('service_port', ''))),))
else:
c.update((
('auth_uri', "{}://{}:{}/v3".format(
ctxt.get('service_protocol', ''),
ctxt.get('service_host', ''),
ctxt.get('service_port', ''))),))
c.update((
('auth_url', "{}://{}:{}/v3".format(
ctxt.get('auth_protocol', ''),
ctxt.get('auth_host', ''),
ctxt.get('auth_port', ''))),
('project_domain_name', ctxt.get('admin_domain_name', '')),
('user_domain_name', ctxt.get('admin_domain_name', '')),
('project_name', ctxt.get('admin_tenant_name', '')),
('username', ctxt.get('admin_user', '')),
('password', ctxt.get('admin_password', '')),
('signing_dir', ctxt.get('signing_dir', '')),))
return c
def __call__(self): def __call__(self):
log('Generating template context for ' + self.rel_name, level=DEBUG) log('Generating template context for ' + self.rel_name, level=DEBUG)
ctxt = {} ctxt = {}
keystonemiddleware_os_release = None
if self._get_pkg_name():
keystonemiddleware_os_release = os_release(self._get_pkg_name())
cachedir = self._setup_pki_cache() cachedir = self._setup_pki_cache()
if cachedir: if cachedir:
ctxt['signing_dir'] = cachedir ctxt['signing_dir'] = cachedir
@ -382,8 +443,18 @@ class IdentityServiceContext(OSContextGenerator):
'api_version': api_version}) 'api_version': api_version})
if float(api_version) > 2: if float(api_version) > 2:
ctxt.update({'admin_domain_name': ctxt.update({
rdata.get('service_domain')}) 'admin_domain_name': rdata.get('service_domain'),
'service_project_id': rdata.get('service_tenant_id'),
'service_domain_id': rdata.get('service_domain_id')})
# we keep all veriables in ctxt for compatibility and
# add nested dictionary for keystone_authtoken generic
# templating
if keystonemiddleware_os_release:
ctxt['keystone_authtoken'] = \
self._get_keystone_authtoken_ctxt(
ctxt, keystonemiddleware_os_release)
if self.context_complete(ctxt): if self.context_complete(ctxt):
# NOTE(jamespage) this is required for >= icehouse # NOTE(jamespage) this is required for >= icehouse
@ -452,6 +523,86 @@ class IdentityCredentialsContext(IdentityServiceContext):
return {} return {}
class NovaVendorMetadataContext(OSContextGenerator):
"""Context used for configuring nova vendor metadata on nova.conf file."""
def __init__(self, os_release_pkg, interfaces=None):
"""Initialize the NovaVendorMetadataContext object.
:param os_release_pkg: the package name to extract the OpenStack
release codename from.
:type os_release_pkg: str
:param interfaces: list of string values to be used as the Context's
relation interfaces.
:type interfaces: List[str]
"""
self.os_release_pkg = os_release_pkg
if interfaces is not None:
self.interfaces = interfaces
def __call__(self):
cmp_os_release = CompareOpenStackReleases(
os_release(self.os_release_pkg))
ctxt = {'vendor_data': False}
vdata_providers = []
vdata = config('vendor-data')
vdata_url = config('vendor-data-url')
if vdata:
try:
# validate the JSON. If invalid, we do not set anything here
json.loads(vdata)
except (TypeError, ValueError) as e:
log('Error decoding vendor-data. {}'.format(e), level=ERROR)
else:
ctxt['vendor_data'] = True
# Mitaka does not support DynamicJSON
# so vendordata_providers is not needed
if cmp_os_release > 'mitaka':
vdata_providers.append('StaticJSON')
if vdata_url:
if cmp_os_release > 'mitaka':
ctxt['vendor_data_url'] = vdata_url
vdata_providers.append('DynamicJSON')
else:
log('Dynamic vendor data unsupported'
' for {}.'.format(cmp_os_release), level=ERROR)
if vdata_providers:
ctxt['vendordata_providers'] = ','.join(vdata_providers)
return ctxt
class NovaVendorMetadataJSONContext(OSContextGenerator):
"""Context used for writing nova vendor metadata json file."""
def __init__(self, os_release_pkg):
"""Initialize the NovaVendorMetadataJSONContext object.
:param os_release_pkg: the package name to extract the OpenStack
release codename from.
:type os_release_pkg: str
"""
self.os_release_pkg = os_release_pkg
def __call__(self):
ctxt = {'vendor_data_json': '{}'}
vdata = config('vendor-data')
if vdata:
try:
# validate the JSON. If invalid, we return empty.
json.loads(vdata)
except (TypeError, ValueError) as e:
log('Error decoding vendor-data. {}'.format(e), level=ERROR)
else:
ctxt['vendor_data_json'] = vdata
return ctxt
class AMQPContext(OSContextGenerator): class AMQPContext(OSContextGenerator):
def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None, def __init__(self, ssl_dir=None, rel_name='amqp', relation_prefix=None,
@ -569,6 +720,19 @@ class AMQPContext(OSContextGenerator):
ctxt['oslo_messaging_flags'] = config_flags_parser( ctxt['oslo_messaging_flags'] = config_flags_parser(
oslo_messaging_flags) oslo_messaging_flags)
oslo_messaging_driver = conf.get(
'oslo-messaging-driver', DEFAULT_OSLO_MESSAGING_DRIVER)
if oslo_messaging_driver:
ctxt['oslo_messaging_driver'] = oslo_messaging_driver
notification_format = conf.get('notification-format', None)
if notification_format:
ctxt['notification_format'] = notification_format
send_notifications_to_logs = conf.get('send-notifications-to-logs', None)
if send_notifications_to_logs:
ctxt['send_notifications_to_logs'] = send_notifications_to_logs
if not self.complete: if not self.complete:
return {} return {}
@ -620,6 +784,25 @@ class CephContext(OSContextGenerator):
ensure_packages(['ceph-common']) ensure_packages(['ceph-common'])
return ctxt return ctxt
def context_complete(self, ctxt):
"""Overridden here to ensure the context is actually complete.
We set `key` and `auth` to None here, by default, to ensure
that the context will always evaluate to incomplete until the
Ceph relation has actually sent these details; otherwise,
there is a potential race condition between the relation
appearing and the first unit actually setting this data on the
relation.
:param ctxt: The current context members
:type ctxt: Dict[str, ANY]
:returns: True if the context is complete
:rtype: bool
"""
if 'auth' not in ctxt or 'key' not in ctxt:
return False
return super(CephContext, self).context_complete(ctxt)
class HAProxyContext(OSContextGenerator): class HAProxyContext(OSContextGenerator):
"""Provides half a context for the haproxy template, which describes """Provides half a context for the haproxy template, which describes
@ -1110,7 +1293,9 @@ class NeutronPortContext(OSContextGenerator):
hwaddr_to_nic = {} hwaddr_to_nic = {}
hwaddr_to_ip = {} hwaddr_to_ip = {}
for nic in list_nics(): extant_nics = list_nics()
for nic in extant_nics:
# Ignore virtual interfaces (bond masters will be identified from # Ignore virtual interfaces (bond masters will be identified from
# their slaves) # their slaves)
if not is_phy_iface(nic): if not is_phy_iface(nic):
@ -1141,10 +1326,11 @@ class NeutronPortContext(OSContextGenerator):
# Entry is a MAC address for a valid interface that doesn't # Entry is a MAC address for a valid interface that doesn't
# have an IP address assigned yet. # have an IP address assigned yet.
resolved.append(hwaddr_to_nic[entry]) resolved.append(hwaddr_to_nic[entry])
else: elif entry in extant_nics:
# If the passed entry is not a MAC address, assume it's a valid # If the passed entry is not a MAC address and the interface
# interface, and that the user put it there on purpose (we can # exists, assume it's a valid interface, and that the user put
# trust it to be the real external network). # it there on purpose (we can trust it to be the real external
# network).
resolved.append(entry) resolved.append(entry)
# Ensure no duplicates # Ensure no duplicates
@ -1526,6 +1712,18 @@ class NeutronAPIContext(OSContextGenerator):
'rel_key': 'enable-nsg-logging', 'rel_key': 'enable-nsg-logging',
'default': False, 'default': False,
}, },
'enable_nfg_logging': {
'rel_key': 'enable-nfg-logging',
'default': False,
},
'global_physnet_mtu': {
'rel_key': 'global-physnet-mtu',
'default': 1500,
},
'physical_network_mtus': {
'rel_key': 'physical-network-mtus',
'default': None,
},
} }
ctxt = self.get_neutron_options({}) ctxt = self.get_neutron_options({})
for rid in relation_ids('neutron-plugin-api'): for rid in relation_ids('neutron-plugin-api'):
@ -1587,13 +1785,13 @@ class DataPortContext(NeutronPortContext):
def __call__(self): def __call__(self):
ports = config('data-port') ports = config('data-port')
if ports: if ports:
# Map of {port/mac:bridge} # Map of {bridge:port/mac}
portmap = parse_data_port_mappings(ports) portmap = parse_data_port_mappings(ports)
ports = portmap.keys() ports = portmap.keys()
# Resolve provided ports or mac addresses and filter out those # Resolve provided ports or mac addresses and filter out those
# already attached to a bridge. # already attached to a bridge.
resolved = self.resolve_ports(ports) resolved = self.resolve_ports(ports)
# FIXME: is this necessary? # Rebuild port index using resolved and filtered ports.
normalized = {get_nic_hwaddr(port): port for port in resolved normalized = {get_nic_hwaddr(port): port for port in resolved
if port not in ports} if port not in ports}
normalized.update({port: port for port in resolved normalized.update({port: port for port in resolved

View File

@ -217,6 +217,11 @@ def neutron_plugins():
plugins['nsx']['config'] = '/etc/neutron/nsx.ini' plugins['nsx']['config'] = '/etc/neutron/nsx.ini'
plugins['vsp']['driver'] = ( plugins['vsp']['driver'] = (
'nuage_neutron.plugins.nuage.plugin.NuagePlugin') 'nuage_neutron.plugins.nuage.plugin.NuagePlugin')
if CompareOpenStackReleases(release) >= 'newton':
plugins['vsp']['config'] = '/etc/neutron/plugins/ml2/ml2_conf.ini'
plugins['vsp']['driver'] = 'neutron.plugins.ml2.plugin.Ml2Plugin'
plugins['vsp']['server_packages'] = ['neutron-server',
'neutron-plugin-ml2']
return plugins return plugins

View File

@ -0,0 +1,9 @@
{% if auth_host -%}
[keystone_authtoken]
{% for option_name, option_value in keystone_authtoken.items() -%}
{{ option_name }} = {{ option_value }}
{% endfor -%}
{% if use_memcache == true %}
memcached_servers = {{ memcache_url }}
{% endif -%}
{% endif -%}

View File

@ -1,11 +1,15 @@
{% if transport_url -%} {% if transport_url -%}
[oslo_messaging_notifications] [oslo_messaging_notifications]
driver = messagingv2 driver = {{ oslo_messaging_driver }}
transport_url = {{ transport_url }} transport_url = {{ transport_url }}
{% if send_notifications_to_logs %}
driver = log
{% endif %}
{% if notification_topics -%} {% if notification_topics -%}
topics = {{ notification_topics }} topics = {{ notification_topics }}
{% endif -%} {% endif -%}
{% if notification_format -%} {% if notification_format -%}
[notifications]
notification_format = {{ notification_format }} notification_format = {{ notification_format }}
{% endif -%} {% endif -%}
{% endif -%} {% endif -%}

View File

@ -0,0 +1 @@
{{ vendor_data_json }}

View File

@ -120,6 +120,7 @@ OPENSTACK_RELEASES = (
'queens', 'queens',
'rocky', 'rocky',
'stein', 'stein',
'train',
) )
UBUNTU_OPENSTACK_RELEASE = OrderedDict([ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
@ -139,6 +140,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('bionic', 'queens'), ('bionic', 'queens'),
('cosmic', 'rocky'), ('cosmic', 'rocky'),
('disco', 'stein'), ('disco', 'stein'),
('eoan', 'train'),
]) ])
@ -159,6 +161,7 @@ OPENSTACK_CODENAMES = OrderedDict([
('2018.1', 'queens'), ('2018.1', 'queens'),
('2018.2', 'rocky'), ('2018.2', 'rocky'),
('2019.1', 'stein'), ('2019.1', 'stein'),
('2019.2', 'train'),
]) ])
# The ugly duckling - must list releases oldest to newest # The ugly duckling - must list releases oldest to newest
@ -194,7 +197,9 @@ SWIFT_CODENAMES = OrderedDict([
('rocky', ('rocky',
['2.18.0', '2.19.0']), ['2.18.0', '2.19.0']),
('stein', ('stein',
['2.20.0']), ['2.20.0', '2.21.0']),
('train',
['2.22.0']),
]) ])
# >= Liberty version->codename mapping # >= Liberty version->codename mapping
@ -208,6 +213,7 @@ PACKAGE_CODENAMES = {
('17', 'queens'), ('17', 'queens'),
('18', 'rocky'), ('18', 'rocky'),
('19', 'stein'), ('19', 'stein'),
('20', 'train'),
]), ]),
'neutron-common': OrderedDict([ 'neutron-common': OrderedDict([
('7', 'liberty'), ('7', 'liberty'),
@ -218,6 +224,7 @@ PACKAGE_CODENAMES = {
('12', 'queens'), ('12', 'queens'),
('13', 'rocky'), ('13', 'rocky'),
('14', 'stein'), ('14', 'stein'),
('15', 'train'),
]), ]),
'cinder-common': OrderedDict([ 'cinder-common': OrderedDict([
('7', 'liberty'), ('7', 'liberty'),
@ -228,6 +235,7 @@ PACKAGE_CODENAMES = {
('12', 'queens'), ('12', 'queens'),
('13', 'rocky'), ('13', 'rocky'),
('14', 'stein'), ('14', 'stein'),
('15', 'train'),
]), ]),
'keystone': OrderedDict([ 'keystone': OrderedDict([
('8', 'liberty'), ('8', 'liberty'),
@ -238,6 +246,7 @@ PACKAGE_CODENAMES = {
('13', 'queens'), ('13', 'queens'),
('14', 'rocky'), ('14', 'rocky'),
('15', 'stein'), ('15', 'stein'),
('16', 'train'),
]), ]),
'horizon-common': OrderedDict([ 'horizon-common': OrderedDict([
('8', 'liberty'), ('8', 'liberty'),
@ -248,6 +257,7 @@ PACKAGE_CODENAMES = {
('13', 'queens'), ('13', 'queens'),
('14', 'rocky'), ('14', 'rocky'),
('15', 'stein'), ('15', 'stein'),
('16', 'train'),
]), ]),
'ceilometer-common': OrderedDict([ 'ceilometer-common': OrderedDict([
('5', 'liberty'), ('5', 'liberty'),
@ -258,6 +268,7 @@ PACKAGE_CODENAMES = {
('10', 'queens'), ('10', 'queens'),
('11', 'rocky'), ('11', 'rocky'),
('12', 'stein'), ('12', 'stein'),
('13', 'train'),
]), ]),
'heat-common': OrderedDict([ 'heat-common': OrderedDict([
('5', 'liberty'), ('5', 'liberty'),
@ -268,6 +279,7 @@ PACKAGE_CODENAMES = {
('10', 'queens'), ('10', 'queens'),
('11', 'rocky'), ('11', 'rocky'),
('12', 'stein'), ('12', 'stein'),
('13', 'train'),
]), ]),
'glance-common': OrderedDict([ 'glance-common': OrderedDict([
('11', 'liberty'), ('11', 'liberty'),
@ -278,6 +290,7 @@ PACKAGE_CODENAMES = {
('16', 'queens'), ('16', 'queens'),
('17', 'rocky'), ('17', 'rocky'),
('18', 'stein'), ('18', 'stein'),
('19', 'train'),
]), ]),
'openstack-dashboard': OrderedDict([ 'openstack-dashboard': OrderedDict([
('8', 'liberty'), ('8', 'liberty'),
@ -288,6 +301,7 @@ PACKAGE_CODENAMES = {
('13', 'queens'), ('13', 'queens'),
('14', 'rocky'), ('14', 'rocky'),
('15', 'stein'), ('15', 'stein'),
('16', 'train'),
]), ]),
} }

View File

@ -1482,13 +1482,28 @@ def send_request_if_needed(request, relation='ceph'):
relation_set(relation_id=rid, broker_req=request.request) relation_set(relation_id=rid, broker_req=request.request)
def has_broker_rsp(rid=None, unit=None):
"""Return True if the broker_rsp key is 'truthy' (i.e. set to something) in the relation data.
:param rid: The relation to check (default of None means current relation)
:type rid: Union[str, None]
:param unit: The remote unit to check (default of None means current unit)
:type unit: Union[str, None]
:returns: True if broker key exists and is set to something 'truthy'
:rtype: bool
"""
rdata = relation_get(rid=rid, unit=unit) or {}
broker_rsp = rdata.get(get_broker_rsp_key())
return True if broker_rsp else False
def is_broker_action_done(action, rid=None, unit=None): def is_broker_action_done(action, rid=None, unit=None):
"""Check whether broker action has completed yet. """Check whether broker action has completed yet.
@param action: name of action to be performed @param action: name of action to be performed
@returns True if action complete otherwise False @returns True if action complete otherwise False
""" """
rdata = relation_get(rid, unit) or {} rdata = relation_get(rid=rid, unit=unit) or {}
broker_rsp = rdata.get(get_broker_rsp_key()) broker_rsp = rdata.get(get_broker_rsp_key())
if not broker_rsp: if not broker_rsp:
return False return False
@ -1510,7 +1525,7 @@ def mark_broker_action_done(action, rid=None, unit=None):
@param action: name of action to be performed @param action: name of action to be performed
@returns None @returns None
""" """
rdata = relation_get(rid, unit) or {} rdata = relation_get(rid=rid, unit=unit) or {}
broker_rsp = rdata.get(get_broker_rsp_key()) broker_rsp = rdata.get(get_broker_rsp_key())
if not broker_rsp: if not broker_rsp:
return return

View File

@ -110,17 +110,19 @@ def is_device_mounted(device):
return bool(re.search(r'MOUNTPOINT=".+"', out)) return bool(re.search(r'MOUNTPOINT=".+"', out))
def mkfs_xfs(device, force=False): def mkfs_xfs(device, force=False, inode_size=1024):
"""Format device with XFS filesystem. """Format device with XFS filesystem.
By default this should fail if the device already has a filesystem on it. By default this should fail if the device already has a filesystem on it.
:param device: Full path to device to format :param device: Full path to device to format
:ptype device: tr :ptype device: tr
:param force: Force operation :param force: Force operation
:ptype: force: boolean""" :ptype: force: boolean
:param inode_size: XFS inode size in bytes
:ptype inode_size: int"""
cmd = ['mkfs.xfs'] cmd = ['mkfs.xfs']
if force: if force:
cmd.append("-f") cmd.append("-f")
cmd += ['-i', 'size=1024', device] cmd += ['-i', "size={}".format(inode_size), device]
check_call(cmd) check_call(cmd)

View File

@ -173,6 +173,14 @@ CLOUD_ARCHIVE_POCKETS = {
'stein/proposed': 'bionic-proposed/stein', 'stein/proposed': 'bionic-proposed/stein',
'bionic-stein/proposed': 'bionic-proposed/stein', 'bionic-stein/proposed': 'bionic-proposed/stein',
'bionic-proposed/stein': 'bionic-proposed/stein', 'bionic-proposed/stein': 'bionic-proposed/stein',
# Train
'train': 'bionic-updates/train',
'bionic-train': 'bionic-updates/train',
'bionic-train/updates': 'bionic-updates/train',
'bionic-updates/train': 'bionic-updates/train',
'train/proposed': 'bionic-proposed/train',
'bionic-train/proposed': 'bionic-proposed/train',
'bionic-proposed/train': 'bionic-proposed/train',
} }
@ -522,14 +530,16 @@ def add_source(source, key=None, fail_invalid=False):
for r, fn in six.iteritems(_mapping): for r, fn in six.iteritems(_mapping):
m = re.match(r, source) m = re.match(r, source)
if m: if m:
# call the assoicated function with the captured groups
# raises SourceConfigError on error.
fn(*m.groups())
if key: if key:
# Import key before adding the source which depends on it,
# as refreshing packages could fail otherwise.
try: try:
import_key(key) import_key(key)
except GPGKeyError as e: except GPGKeyError as e:
raise SourceConfigError(str(e)) raise SourceConfigError(str(e))
# call the associated function with the captured groups
# raises SourceConfigError on error.
fn(*m.groups())
break break
else: else:
# nothing matched. log an error and maybe sys.exit # nothing matched. log an error and maybe sys.exit

View File

@ -27,5 +27,5 @@ git+https://github.com/juju/charm-helpers.git#egg=charmhelpers
# NOTE: workaround for 14.04 pip/tox # NOTE: workaround for 14.04 pip/tox
pytz pytz
pyudev # for ceph-* charm unit tests (not mocked?) pyudev # for ceph-* charm unit tests (not mocked?)
git+https://github.com/openstack-charmers/zaza.git@remove-namespaced-tests#egg=zaza git+https://github.com/openstack-charmers/zaza.git#egg=zaza;python_version>='3.0'
git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack

View File

@ -43,7 +43,7 @@ applications:
charm: cs:~openstack-charmers-next/rabbitmq-server charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1 num_units: 1
percona-cluster: percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster charm: cs:trusty/percona-cluster
num_units: 1 num_units: 1
options: options:
max-connections: 1000 max-connections: 1000

View File

@ -51,7 +51,7 @@ applications:
charm: cs:~openstack-charmers-next/rabbitmq-server charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1 num_units: 1
percona-cluster: percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster charm: cs:trusty/percona-cluster
num_units: 1 num_units: 1
options: options:
max-connections: 1000 max-connections: 1000

View File

@ -1,6 +1,6 @@
charm_name: cinder charm_name: cinder
smoke_bundles: smoke_bundles:
- bionic-rocky - bionic-stein
gate_bundles: gate_bundles:
- bionic-stein - bionic-stein
- bionic-rocky - bionic-rocky