Sync charm-helpers for Ussuri/Focal release and version details

Also drop Disco bundle now that it is EOL.

Also add machine constraints to ensure stacks create
properly

Change-Id: Ic1301c6fc052a4ace4b6f6a817a5ac72fd303732
This commit is contained in:
Corey Bryant 2020-01-17 14:22:07 -05:00 committed by Liam Young
parent 5712e6aece
commit 1b923c5731
20 changed files with 839 additions and 292 deletions

View File

@ -295,9 +295,11 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('bionic', 'cloud:bionic-rocky'): self.bionic_rocky, ('bionic', 'cloud:bionic-rocky'): self.bionic_rocky,
('bionic', 'cloud:bionic-stein'): self.bionic_stein, ('bionic', 'cloud:bionic-stein'): self.bionic_stein,
('bionic', 'cloud:bionic-train'): self.bionic_train, ('bionic', 'cloud:bionic-train'): self.bionic_train,
('bionic', 'cloud:bionic-ussuri'): self.bionic_ussuri,
('cosmic', None): self.cosmic_rocky, ('cosmic', None): self.cosmic_rocky,
('disco', None): self.disco_stein, ('disco', None): self.disco_stein,
('eoan', None): self.eoan_train, ('eoan', None): self.eoan_train,
('focal', None): self.focal_ussuri,
} }
return releases[(self.series, self.openstack)] return releases[(self.series, self.openstack)]
@ -316,6 +318,7 @@ class OpenStackAmuletDeployment(AmuletDeployment):
('cosmic', 'rocky'), ('cosmic', 'rocky'),
('disco', 'stein'), ('disco', 'stein'),
('eoan', 'train'), ('eoan', 'train'),
('focal', 'ussuri'),
]) ])
if self.openstack: if self.openstack:
os_origin = self.openstack.split(':')[1] os_origin = self.openstack.split(':')[1]

View File

@ -62,6 +62,7 @@ OPENSTACK_RELEASES_PAIRS = [
'bionic_rocky', 'cosmic_rocky', 'bionic_rocky', 'cosmic_rocky',
'bionic_stein', 'disco_stein', 'bionic_stein', 'disco_stein',
'bionic_train', 'eoan_train', 'bionic_train', 'eoan_train',
'bionic_ussuri', 'focal_ussuri',
] ]

View File

@ -244,8 +244,8 @@ def validate_file_permissions(config):
@audit(is_audit_type(AuditType.OpenStackSecurityGuide)) @audit(is_audit_type(AuditType.OpenStackSecurityGuide))
def validate_uses_keystone(audit_options): def validate_uses_keystone(audit_options):
"""Validate that the service uses Keystone for authentication.""" """Validate that the service uses Keystone for authentication."""
section = _config_section(audit_options, 'DEFAULT') section = _config_section(audit_options, 'api') or _config_section(audit_options, 'DEFAULT')
assert section is not None, "Missing section 'DEFAULT'" assert section is not None, "Missing section 'api / DEFAULT'"
assert section.get('auth_strategy') == "keystone", \ assert section.get('auth_strategy') == "keystone", \
"Application is not using Keystone" "Application is not using Keystone"

View File

@ -730,6 +730,10 @@ class AMQPContext(OSContextGenerator):
if notification_format: if notification_format:
ctxt['notification_format'] = notification_format ctxt['notification_format'] = notification_format
notification_topics = conf.get('notification-topics', None)
if notification_topics:
ctxt['notification_topics'] = notification_topics
send_notifications_to_logs = conf.get('send-notifications-to-logs', None) send_notifications_to_logs = conf.get('send-notifications-to-logs', None)
if send_notifications_to_logs: if send_notifications_to_logs:
ctxt['send_notifications_to_logs'] = send_notifications_to_logs ctxt['send_notifications_to_logs'] = send_notifications_to_logs
@ -2177,9 +2181,66 @@ class LogrotateContext(OSContextGenerator):
class HostInfoContext(OSContextGenerator): class HostInfoContext(OSContextGenerator):
"""Context to provide host information.""" """Context to provide host information."""
def __init__(self, use_fqdn_hint_cb=None):
"""Initialize HostInfoContext
:param use_fqdn_hint_cb: Callback whose return value used to populate
`use_fqdn_hint`
:type use_fqdn_hint_cb: Callable[[], bool]
"""
# Store callback used to get hint for whether FQDN should be used
# Depending on the workload a charm manages, the use of FQDN vs.
# shortname may be a deploy-time decision, i.e. behaviour can not
# change on charm upgrade or post-deployment configuration change.
# The hint is passed on as a flag in the context to allow the decision
# to be made in the Jinja2 configuration template.
self.use_fqdn_hint_cb = use_fqdn_hint_cb
def _get_canonical_name(self, name=None):
"""Get the official FQDN of the host
The implementation of ``socket.getfqdn()`` in the standard Python
library does not exhaust all methods of getting the official name
of a host ref Python issue https://bugs.python.org/issue5004
This function mimics the behaviour of a call to ``hostname -f`` to
get the official FQDN but returns an empty string if it is
unsuccessful.
:param name: Shortname to get FQDN on
:type name: Optional[str]
:returns: The official FQDN for host or empty string ('')
:rtype: str
"""
name = name or socket.gethostname()
fqdn = ''
if six.PY2:
exc = socket.error
else:
exc = OSError
try:
addrs = socket.getaddrinfo(
name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME)
except exc:
pass
else:
for addr in addrs:
if addr[3]:
if '.' in addr[3]:
fqdn = addr[3]
break
return fqdn
def __call__(self): def __call__(self):
name = socket.gethostname()
ctxt = { ctxt = {
'host_fqdn': socket.getfqdn(), 'host_fqdn': self._get_canonical_name(name) or name,
'host': socket.gethostname(), 'host': name,
'use_fqdn_hint': (
self.use_fqdn_hint_cb() if self.use_fqdn_hint_cb else False)
} }
return ctxt return ctxt

View File

@ -21,6 +21,7 @@ import sys
import yaml import yaml
import zipfile import zipfile
import charmhelpers
import charmhelpers.core.hookenv as hookenv import charmhelpers.core.hookenv as hookenv
import charmhelpers.core.host as ch_host import charmhelpers.core.host as ch_host
@ -234,7 +235,10 @@ def maybe_do_policyd_overrides(openstack_release,
blacklist_paths=None, blacklist_paths=None,
blacklist_keys=None, blacklist_keys=None,
template_function=None, template_function=None,
restart_handler=None): restart_handler=None,
user=None,
group=None,
config_changed=False):
"""If the config option is set, get the resource file and process it to """If the config option is set, get the resource file and process it to
enable the policy.d overrides for the service passed. enable the policy.d overrides for the service passed.
@ -263,6 +267,11 @@ def maybe_do_policyd_overrides(openstack_release,
directory. However, for any services where this is buggy then a directory. However, for any services where this is buggy then a
restart_handler can be used to force the policy.d files to be read. restart_handler can be used to force the policy.d files to be read.
If the config_changed param is True, then the handling is slightly
different: It will only perform the policyd overrides if the config is True
and the success file doesn't exist. Otherwise, it does nothing as the
resource file has already been processed.
:param openstack_release: The openstack release that is installed. :param openstack_release: The openstack release that is installed.
:type openstack_release: str :type openstack_release: str
:param service: the service name to construct the policy.d directory for. :param service: the service name to construct the policy.d directory for.
@ -278,18 +287,26 @@ def maybe_do_policyd_overrides(openstack_release,
:param restart_handler: The function to call if the service should be :param restart_handler: The function to call if the service should be
restarted. restarted.
:type restart_handler: Union[None, Callable[]] :type restart_handler: Union[None, Callable[]]
:param user: The user to create/write files/directories as
:type user: Union[None, str]
:param group: the group to create/write files/directories as
:type group: Union[None, str]
:param config_changed: Set to True for config_changed hook.
:type config_changed: bool
""" """
_user = service if user is None else user
_group = service if group is None else group
if not is_policyd_override_valid_on_this_release(openstack_release):
return
hookenv.log("Running maybe_do_policyd_overrides", hookenv.log("Running maybe_do_policyd_overrides",
level=POLICYD_LOG_LEVEL_DEFAULT) level=POLICYD_LOG_LEVEL_DEFAULT)
if not is_policyd_override_valid_on_this_release(openstack_release):
hookenv.log("... policy overrides not valid on this release: {}"
.format(openstack_release),
level=POLICYD_LOG_LEVEL_DEFAULT)
return
config = hookenv.config() config = hookenv.config()
try: try:
if not config.get(POLICYD_CONFIG_NAME, False): if not config.get(POLICYD_CONFIG_NAME, False):
clean_policyd_dir_for(service, blacklist_paths) clean_policyd_dir_for(service,
blacklist_paths,
user=_user,
group=_group)
if (os.path.isfile(_policy_success_file()) and if (os.path.isfile(_policy_success_file()) and
restart_handler is not None and restart_handler is not None and
callable(restart_handler)): callable(restart_handler)):
@ -302,6 +319,12 @@ def maybe_do_policyd_overrides(openstack_release,
import traceback import traceback
hookenv.log(traceback.format_exc(), level=POLICYD_LOG_LEVEL_DEFAULT) hookenv.log(traceback.format_exc(), level=POLICYD_LOG_LEVEL_DEFAULT)
return return
# if the policyd overrides have been performed when doing config_changed
# just return
if config_changed and is_policy_success_file_set():
hookenv.log("... already setup, so skipping.",
level=POLICYD_LOG_LEVEL_DEFAULT)
return
# from now on it should succeed; if it doesn't then status line will show # from now on it should succeed; if it doesn't then status line will show
# broken. # broken.
resource_filename = get_policy_resource_filename() resource_filename = get_policy_resource_filename()
@ -312,63 +335,18 @@ def maybe_do_policyd_overrides(openstack_release,
restart_handler() restart_handler()
def maybe_do_policyd_overrides_on_config_changed(openstack_release, @charmhelpers.deprecate("Use maybe_do_poliyd_overrrides instead")
service, def maybe_do_policyd_overrides_on_config_changed(*args, **kwargs):
blacklist_paths=None, """This function is designed to be called from the config changed hook.
blacklist_keys=None,
template_function=None, DEPRECATED: please use maybe_do_policyd_overrides() with the param
restart_handler=None): `config_changed` as `True`.
"""This function is designed to be called from the config changed hook
handler. It will only perform the policyd overrides if the config is True
and the success file doesn't exist. Otherwise, it does nothing as the
resource file has already been processed.
See maybe_do_policyd_overrides() for more details on the params. See maybe_do_policyd_overrides() for more details on the params.
:param openstack_release: The openstack release that is installed.
:type openstack_release: str
:param service: the service name to construct the policy.d directory for.
:type service: str
:param blacklist_paths: optional list of paths to leave alone
:type blacklist_paths: Union[None, List[str]]
:param blacklist_keys: optional list of keys that mustn't appear in the
yaml file's
:type blacklist_keys: Union[None, List[str]]
:param template_function: Optional function that can modify the string
prior to being processed as a Yaml document.
:type template_function: Union[None, Callable[[str], str]]
:param restart_handler: The function to call if the service should be
restarted.
:type restart_handler: Union[None, Callable[]]
""" """
if not is_policyd_override_valid_on_this_release(openstack_release): if 'config_changed' not in kwargs.keys():
return kwargs['config_changed'] = True
hookenv.log("Running maybe_do_policyd_overrides_on_config_changed", return maybe_do_policyd_overrides(*args, **kwargs)
level=POLICYD_LOG_LEVEL_DEFAULT)
config = hookenv.config()
try:
if not config.get(POLICYD_CONFIG_NAME, False):
clean_policyd_dir_for(service, blacklist_paths)
if (os.path.isfile(_policy_success_file()) and
restart_handler is not None and
callable(restart_handler)):
restart_handler()
remove_policy_success_file()
return
except Exception as e:
hookenv.log("... ERROR: Exception is: {}".format(str(e)),
level=POLICYD_CONFIG_NAME)
import traceback
hookenv.log(traceback.format_exc(), level=POLICYD_LOG_LEVEL_DEFAULT)
return
# if the policyd overrides have been performed just return
if os.path.isfile(_policy_success_file()):
hookenv.log("... already setup, so skipping.",
level=POLICYD_LOG_LEVEL_DEFAULT)
return
maybe_do_policyd_overrides(
openstack_release, service, blacklist_paths, blacklist_keys,
template_function, restart_handler)
def get_policy_resource_filename(): def get_policy_resource_filename():
@ -385,13 +363,16 @@ def get_policy_resource_filename():
@contextlib.contextmanager @contextlib.contextmanager
def open_and_filter_yaml_files(filepath): def open_and_filter_yaml_files(filepath, has_subdirs=False):
"""Validate that the filepath provided is a zip file and contains at least """Validate that the filepath provided is a zip file and contains at least
one (.yaml|.yml) file, and that the files are not duplicated when the zip one (.yaml|.yml) file, and that the files are not duplicated when the zip
file is flattened. Note that the yaml files are not checked. This is the file is flattened. Note that the yaml files are not checked. This is the
first stage in validating the policy zipfile; individual yaml files are not first stage in validating the policy zipfile; individual yaml files are not
checked for validity or black listed keys. checked for validity or black listed keys.
If the has_subdirs param is True, then the files are flattened to the first
directory, and the files in the root are ignored.
An example of use is: An example of use is:
with open_and_filter_yaml_files(some_path) as zfp, g: with open_and_filter_yaml_files(some_path) as zfp, g:
@ -400,6 +381,8 @@ def open_and_filter_yaml_files(filepath):
:param filepath: a filepath object that can be opened by zipfile :param filepath: a filepath object that can be opened by zipfile
:type filepath: Union[AnyStr, os.PathLike[AntStr]] :type filepath: Union[AnyStr, os.PathLike[AntStr]]
:param has_subdirs: Keep first level of subdirectories in yaml file.
:type has_subdirs: bool
:returns: (zfp handle, :returns: (zfp handle,
a generator of the (name, filename, ZipInfo object) tuples) as a a generator of the (name, filename, ZipInfo object) tuples) as a
tuple. tuple.
@ -412,7 +395,7 @@ def open_and_filter_yaml_files(filepath):
with zipfile.ZipFile(filepath, 'r') as zfp: with zipfile.ZipFile(filepath, 'r') as zfp:
# first pass through; check for duplicates and at least one yaml file. # first pass through; check for duplicates and at least one yaml file.
names = collections.defaultdict(int) names = collections.defaultdict(int)
yamlfiles = _yamlfiles(zfp) yamlfiles = _yamlfiles(zfp, has_subdirs)
for name, _, _, _ in yamlfiles: for name, _, _, _ in yamlfiles:
names[name] += 1 names[name] += 1
# There must be at least 1 yaml file. # There must be at least 1 yaml file.
@ -428,17 +411,33 @@ def open_and_filter_yaml_files(filepath):
yield (zfp, yamlfiles) yield (zfp, yamlfiles)
def _yamlfiles(zipfile): def _yamlfiles(zipfile, has_subdirs=False):
"""Helper to get a yaml file (according to POLICYD_VALID_EXTS extensions) """Helper to get a yaml file (according to POLICYD_VALID_EXTS extensions)
and the infolist item from a zipfile. and the infolist item from a zipfile.
If the `has_subdirs` param is True, the the only yaml files that have a
directory component are read, and then first part of the directory
component is kept, along with the filename in the name. e.g. an entry with
a filename of:
compute/someotherdir/override.yaml
is returned as:
compute/override, yaml, override.yaml, <ZipInfo object>
This is to help with the special, additional, processing that the dashboard
charm requires.
:param zipfile: the zipfile to read zipinfo items from :param zipfile: the zipfile to read zipinfo items from
:type zipfile: zipfile.ZipFile :type zipfile: zipfile.ZipFile
:returns: generator of (name, ext, filename, info item) for each self-identified :param has_subdirs: Keep first level of subdirectories in yaml file.
yaml file. :type has_subdirs: bool
:returns: generator of (name, ext, filename, info item) for each
self-identified yaml file.
:rtype: List[(str, str, str, zipfile.ZipInfo)] :rtype: List[(str, str, str, zipfile.ZipInfo)]
""" """
l = [] files = []
for infolist_item in zipfile.infolist(): for infolist_item in zipfile.infolist():
try: try:
if infolist_item.is_dir(): if infolist_item.is_dir():
@ -447,12 +446,14 @@ def _yamlfiles(zipfile):
# fallback to "old" way to determine dir entry for pre-py36 # fallback to "old" way to determine dir entry for pre-py36
if infolist_item.filename.endswith('/'): if infolist_item.filename.endswith('/'):
continue continue
_, name_ext = os.path.split(infolist_item.filename) _dir, name_ext = os.path.split(infolist_item.filename)
name, ext = os.path.splitext(name_ext) name, ext = os.path.splitext(name_ext)
if has_subdirs and _dir != "":
name = os.path.join(_dir.split(os.path.sep)[0], name)
ext = ext.lower() ext = ext.lower()
if ext and ext in POLICYD_VALID_EXTS: if ext and ext in POLICYD_VALID_EXTS:
l.append((name, ext, name_ext, infolist_item)) files.append((name, ext, name_ext, infolist_item))
return l return files
def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): def read_and_validate_yaml(stream_or_doc, blacklist_keys=None):
@ -498,9 +499,6 @@ def read_and_validate_yaml(stream_or_doc, blacklist_keys=None):
def policyd_dir_for(service): def policyd_dir_for(service):
"""Return the policy directory for the named service. """Return the policy directory for the named service.
This assumes the default name of "policy.d" which is kept across all
charms.
:param service: str :param service: str
:returns: the policy.d override directory. :returns: the policy.d override directory.
:rtype: os.PathLike[str] :rtype: os.PathLike[str]
@ -508,7 +506,7 @@ def policyd_dir_for(service):
return os.path.join("/", "etc", service, "policy.d") return os.path.join("/", "etc", service, "policy.d")
def clean_policyd_dir_for(service, keep_paths=None): def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None):
"""Clean out the policyd directory except for items that should be kept. """Clean out the policyd directory except for items that should be kept.
The keep_paths, if used, should be set to the full path of the files that The keep_paths, if used, should be set to the full path of the files that
@ -521,11 +519,18 @@ def clean_policyd_dir_for(service, keep_paths=None):
:type service: str :type service: str
:param keep_paths: optional list of paths to not delete. :param keep_paths: optional list of paths to not delete.
:type keep_paths: Union[None, List[str]] :type keep_paths: Union[None, List[str]]
:param user: The user to create/write files/directories as
:type user: Union[None, str]
:param group: the group to create/write files/directories as
:type group: Union[None, str]
""" """
_user = service if user is None else user
_group = service if group is None else group
keep_paths = keep_paths or [] keep_paths = keep_paths or []
path = policyd_dir_for(service) path = policyd_dir_for(service)
hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG)
if not os.path.exists(path): if not os.path.exists(path):
ch_host.mkdir(path, owner=service, group=service, perms=0o775) ch_host.mkdir(path, owner=_user, group=_group, perms=0o775)
_scanner = os.scandir if sys.version_info > (3, 4) else _py2_scandir _scanner = os.scandir if sys.version_info > (3, 4) else _py2_scandir
for direntry in _scanner(path): for direntry in _scanner(path):
# see if the path should be kept. # see if the path should be kept.
@ -538,6 +543,22 @@ def clean_policyd_dir_for(service, keep_paths=None):
os.remove(direntry.path) os.remove(direntry.path)
def maybe_create_directory_for(path, user, group):
"""For the filename 'path', ensure that the directory for that path exists.
Note that if the directory already exists then the permissions are NOT
changed.
:param path: the filename including the path to it.
:type path: str
:param user: the user to create the directory as
:param group: the group to create the directory as
"""
_dir, _ = os.path.split(path)
if not os.path.exists(_dir):
ch_host.mkdir(_dir, owner=user, group=group, perms=0o775)
@contextlib.contextmanager @contextlib.contextmanager
def _py2_scandir(path): def _py2_scandir(path):
"""provide a py2 implementation of os.scandir if this module ever gets used """provide a py2 implementation of os.scandir if this module ever gets used
@ -573,6 +594,11 @@ def path_for_policy_file(service, name):
It is constructed using policyd_dir_for(), the name and the ".yaml" It is constructed using policyd_dir_for(), the name and the ".yaml"
extension. extension.
For horizon, for example, it's a bit more complicated. The name param is
actually "override_service_dir/a_name", where target_service needs to be
one the allowed horizon override services. This translation and check is
done in the _yamlfiles() function.
:param service: the service name :param service: the service name
:type service: str :type service: str
:param name: the name for the policy override :param name: the name for the policy override
@ -600,6 +626,22 @@ def remove_policy_success_file():
pass pass
def set_policy_success_file():
"""Set the file that indicates successful policyd override."""
open(_policy_success_file(), "w").close()
def is_policy_success_file_set():
"""Returns True if the policy success file has been set.
This indicates that policies are overridden and working properly.
:returns: True if the policy file is set
:rtype: bool
"""
return os.path.isfile(_policy_success_file())
def policyd_status_message_prefix(): def policyd_status_message_prefix():
"""Return the prefix str for the status line. """Return the prefix str for the status line.
@ -609,7 +651,7 @@ def policyd_status_message_prefix():
:returns: the prefix :returns: the prefix
:rtype: str :rtype: str
""" """
if os.path.isfile(_policy_success_file()): if is_policy_success_file_set():
return "PO:" return "PO:"
return "PO (broken):" return "PO (broken):"
@ -618,7 +660,11 @@ def process_policy_resource_file(resource_file,
service, service,
blacklist_paths=None, blacklist_paths=None,
blacklist_keys=None, blacklist_keys=None,
template_function=None): template_function=None,
preserve_topdir=False,
preprocess_filename=None,
user=None,
group=None):
"""Process the resource file (which should contain at least one yaml file) """Process the resource file (which should contain at least one yaml file)
and write those files to the service's policy.d directory. and write those files to the service's policy.d directory.
@ -638,6 +684,16 @@ def process_policy_resource_file(resource_file,
its file path reconstructed. This, also, must not match any path in the its file path reconstructed. This, also, must not match any path in the
black list. black list.
The yaml filename can be modified in two ways. If the `preserve_topdir`
param is True, then files will be flattened to the top dir. This allows
for creating sets of files that can be grouped into a single level tree
structure.
Secondly, if the `preprocess_filename` param is not None and callable()
then the name is passed to that function for preprocessing before being
converted to the end location. This is to allow munging of the filename
prior to being tested for a blacklist path.
If any error occurs, then the policy.d directory is cleared, the error is If any error occurs, then the policy.d directory is cleared, the error is
written to the log, and the status line will eventually show as failed. written to the log, and the status line will eventually show as failed.
@ -653,18 +709,39 @@ def process_policy_resource_file(resource_file,
:param template_function: Optional function that can modify the yaml :param template_function: Optional function that can modify the yaml
document. document.
:type template_function: Union[None, Callable[[AnyStr], AnyStr]] :type template_function: Union[None, Callable[[AnyStr], AnyStr]]
:param preserve_topdir: Keep the toplevel subdir
:type preserve_topdir: bool
:param preprocess_filename: Optional function to use to process filenames
extracted from the resource file.
:type preprocess_filename: Union[None, Callable[[AnyStr]. AnyStr]]
:param user: The user to create/write files/directories as
:type user: Union[None, str]
:param group: the group to create/write files/directories as
:type group: Union[None, str]
:returns: True if the processing was successful, False if not. :returns: True if the processing was successful, False if not.
:rtype: boolean :rtype: boolean
""" """
hookenv.log("Running process_policy_resource_file", level=hookenv.DEBUG) hookenv.log("Running process_policy_resource_file", level=hookenv.DEBUG)
blacklist_paths = blacklist_paths or [] blacklist_paths = blacklist_paths or []
completed = False completed = False
_preprocess = None
if preprocess_filename is not None and callable(preprocess_filename):
_preprocess = preprocess_filename
_user = service if user is None else user
_group = service if group is None else group
try: try:
with open_and_filter_yaml_files(resource_file) as (zfp, gen): with open_and_filter_yaml_files(
resource_file, preserve_topdir) as (zfp, gen):
# first clear out the policy.d directory and clear success # first clear out the policy.d directory and clear success
remove_policy_success_file() remove_policy_success_file()
clean_policyd_dir_for(service, blacklist_paths) clean_policyd_dir_for(service,
blacklist_paths,
user=_user,
group=_group)
for name, ext, filename, zipinfo in gen: for name, ext, filename, zipinfo in gen:
# See if the name should be preprocessed.
if _preprocess is not None:
name = _preprocess(name)
# construct a name for the output file. # construct a name for the output file.
yaml_filename = path_for_policy_file(service, name) yaml_filename = path_for_policy_file(service, name)
if yaml_filename in blacklist_paths: if yaml_filename in blacklist_paths:
@ -682,8 +759,12 @@ def process_policy_resource_file(resource_file,
"available".format(filename)) "available".format(filename))
doc = template_function(doc) doc = template_function(doc)
yaml_doc = read_and_validate_yaml(doc, blacklist_keys) yaml_doc = read_and_validate_yaml(doc, blacklist_keys)
with open(yaml_filename, "wt") as f: # we may have to create the directory
yaml.dump(yaml_doc, f) maybe_create_directory_for(yaml_filename, _user, _group)
ch_host.write_file(yaml_filename,
yaml.dump(yaml_doc).encode('utf-8'),
_user,
_group)
# Every thing worked, so we mark up a success. # Every thing worked, so we mark up a success.
completed = True completed = True
except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e:
@ -707,10 +788,13 @@ def process_policy_resource_file(resource_file,
hookenv.log("Processing {} failed: cleaning policy.d directory" hookenv.log("Processing {} failed: cleaning policy.d directory"
.format(resource_file), .format(resource_file),
level=POLICYD_LOG_LEVEL_DEFAULT) level=POLICYD_LOG_LEVEL_DEFAULT)
clean_policyd_dir_for(service, blacklist_paths) clean_policyd_dir_for(service,
blacklist_paths,
user=_user,
group=_group)
else: else:
# touch the success filename # touch the success filename
hookenv.log("policy.d overrides installed.", hookenv.log("policy.d overrides installed.",
level=POLICYD_LOG_LEVEL_DEFAULT) level=POLICYD_LOG_LEVEL_DEFAULT)
open(_policy_success_file(), "w").close() set_policy_success_file()
return completed return completed

View File

@ -50,9 +50,14 @@ from charmhelpers.core.hookenv import (
hook_name, hook_name,
application_version_set, application_version_set,
cached, cached,
leader_set,
leader_get,
) )
from charmhelpers.core.strutils import BasicStringComparator from charmhelpers.core.strutils import (
BasicStringComparator,
bool_from_string,
)
from charmhelpers.contrib.storage.linux.lvm import ( from charmhelpers.contrib.storage.linux.lvm import (
deactivate_lvm_volume_group, deactivate_lvm_volume_group,
@ -126,6 +131,7 @@ OPENSTACK_RELEASES = (
'rocky', 'rocky',
'stein', 'stein',
'train', 'train',
'ussuri',
) )
UBUNTU_OPENSTACK_RELEASE = OrderedDict([ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
@ -146,6 +152,7 @@ UBUNTU_OPENSTACK_RELEASE = OrderedDict([
('cosmic', 'rocky'), ('cosmic', 'rocky'),
('disco', 'stein'), ('disco', 'stein'),
('eoan', 'train'), ('eoan', 'train'),
('focal', 'ussuri'),
]) ])
@ -167,6 +174,7 @@ OPENSTACK_CODENAMES = OrderedDict([
('2018.2', 'rocky'), ('2018.2', 'rocky'),
('2019.1', 'stein'), ('2019.1', 'stein'),
('2019.2', 'train'), ('2019.2', 'train'),
('2020.1', 'ussuri'),
]) ])
# The ugly duckling - must list releases oldest to newest # The ugly duckling - must list releases oldest to newest
@ -205,6 +213,8 @@ SWIFT_CODENAMES = OrderedDict([
['2.20.0', '2.21.0']), ['2.20.0', '2.21.0']),
('train', ('train',
['2.22.0', '2.23.0']), ['2.22.0', '2.23.0']),
('ussuri',
['2.24.0']),
]) ])
# >= Liberty version->codename mapping # >= Liberty version->codename mapping
@ -219,6 +229,7 @@ PACKAGE_CODENAMES = {
('18', 'rocky'), ('18', 'rocky'),
('19', 'stein'), ('19', 'stein'),
('20', 'train'), ('20', 'train'),
('21', 'ussuri'),
]), ]),
'neutron-common': OrderedDict([ 'neutron-common': OrderedDict([
('7', 'liberty'), ('7', 'liberty'),
@ -230,6 +241,7 @@ PACKAGE_CODENAMES = {
('13', 'rocky'), ('13', 'rocky'),
('14', 'stein'), ('14', 'stein'),
('15', 'train'), ('15', 'train'),
('16', 'ussuri'),
]), ]),
'cinder-common': OrderedDict([ 'cinder-common': OrderedDict([
('7', 'liberty'), ('7', 'liberty'),
@ -241,6 +253,7 @@ PACKAGE_CODENAMES = {
('13', 'rocky'), ('13', 'rocky'),
('14', 'stein'), ('14', 'stein'),
('15', 'train'), ('15', 'train'),
('16', 'ussuri'),
]), ]),
'keystone': OrderedDict([ 'keystone': OrderedDict([
('8', 'liberty'), ('8', 'liberty'),
@ -252,6 +265,7 @@ PACKAGE_CODENAMES = {
('14', 'rocky'), ('14', 'rocky'),
('15', 'stein'), ('15', 'stein'),
('16', 'train'), ('16', 'train'),
('17', 'ussuri'),
]), ]),
'horizon-common': OrderedDict([ 'horizon-common': OrderedDict([
('8', 'liberty'), ('8', 'liberty'),
@ -263,6 +277,7 @@ PACKAGE_CODENAMES = {
('14', 'rocky'), ('14', 'rocky'),
('15', 'stein'), ('15', 'stein'),
('16', 'train'), ('16', 'train'),
('17', 'ussuri'),
]), ]),
'ceilometer-common': OrderedDict([ 'ceilometer-common': OrderedDict([
('5', 'liberty'), ('5', 'liberty'),
@ -274,6 +289,7 @@ PACKAGE_CODENAMES = {
('11', 'rocky'), ('11', 'rocky'),
('12', 'stein'), ('12', 'stein'),
('13', 'train'), ('13', 'train'),
('14', 'ussuri'),
]), ]),
'heat-common': OrderedDict([ 'heat-common': OrderedDict([
('5', 'liberty'), ('5', 'liberty'),
@ -285,6 +301,7 @@ PACKAGE_CODENAMES = {
('11', 'rocky'), ('11', 'rocky'),
('12', 'stein'), ('12', 'stein'),
('13', 'train'), ('13', 'train'),
('14', 'ussuri'),
]), ]),
'glance-common': OrderedDict([ 'glance-common': OrderedDict([
('11', 'liberty'), ('11', 'liberty'),
@ -296,6 +313,7 @@ PACKAGE_CODENAMES = {
('17', 'rocky'), ('17', 'rocky'),
('18', 'stein'), ('18', 'stein'),
('19', 'train'), ('19', 'train'),
('20', 'ussuri'),
]), ]),
'openstack-dashboard': OrderedDict([ 'openstack-dashboard': OrderedDict([
('8', 'liberty'), ('8', 'liberty'),
@ -307,6 +325,7 @@ PACKAGE_CODENAMES = {
('14', 'rocky'), ('14', 'rocky'),
('15', 'stein'), ('15', 'stein'),
('16', 'train'), ('16', 'train'),
('17', 'ussuri'),
]), ]),
} }
@ -674,7 +693,7 @@ def openstack_upgrade_available(package):
else: else:
try: try:
avail_vers = get_os_version_install_source(src) avail_vers = get_os_version_install_source(src)
except: except Exception:
avail_vers = cur_vers avail_vers = cur_vers
apt.init() apt.init()
return apt.version_compare(avail_vers, cur_vers) >= 1 return apt.version_compare(avail_vers, cur_vers) >= 1
@ -1868,3 +1887,28 @@ def series_upgrade_complete(resume_unit_helper=None, configs=None):
configs.write_all() configs.write_all()
if resume_unit_helper: if resume_unit_helper:
resume_unit_helper(configs) resume_unit_helper(configs)
def is_db_initialised():
"""Check leader storage to see if database has been initialised.
:returns: Whether DB has been initialised
:rtype: bool
"""
db_initialised = None
if leader_get('db-initialised') is None:
juju_log(
'db-initialised key missing, assuming db is not initialised',
'DEBUG')
db_initialised = False
else:
db_initialised = bool_from_string(leader_get('db-initialised'))
juju_log('Database initialised: {}'.format(db_initialised), 'DEBUG')
return db_initialised
def set_db_initialised():
"""Add flag to leader storage to indicate database has been initialised.
"""
juju_log('Setting db-initialised to True', 'DEBUG')
leader_set({'db-initialised': True})

View File

@ -37,9 +37,13 @@ class VaultKVContext(context.OSContextGenerator):
) )
def __call__(self): def __call__(self):
import hvac
ctxt = {}
# NOTE(hopem): see https://bugs.launchpad.net/charm-helpers/+bug/1849323
db = unitdata.kv() db = unitdata.kv()
last_token = db.get('last-token') # currently known-good secret-id
secret_id = db.get('secret-id') secret_id = db.get('secret-id')
for relation_id in hookenv.relation_ids(self.interfaces[0]): for relation_id in hookenv.relation_ids(self.interfaces[0]):
for unit in hookenv.related_units(relation_id): for unit in hookenv.related_units(relation_id):
data = hookenv.relation_get(unit=unit, data = hookenv.relation_get(unit=unit,
@ -54,27 +58,48 @@ class VaultKVContext(context.OSContextGenerator):
# Tokens may change when secret_id's are being # Tokens may change when secret_id's are being
# reissued - if so use token to get new secret_id # reissued - if so use token to get new secret_id
if token != last_token: token_success = False
try:
secret_id = retrieve_secret_id( secret_id = retrieve_secret_id(
url=vault_url, url=vault_url,
token=token token=token
) )
token_success = True
except hvac.exceptions.InvalidRequest:
# Try next
pass
if token_success:
db.set('secret-id', secret_id) db.set('secret-id', secret_id)
db.set('last-token', token)
db.flush() db.flush()
ctxt = { ctxt['vault_url'] = vault_url
'vault_url': vault_url, ctxt['role_id'] = json.loads(role_id)
'role_id': json.loads(role_id), ctxt['secret_id'] = secret_id
'secret_id': secret_id, ctxt['secret_backend'] = self.secret_backend
'secret_backend': self.secret_backend, vault_ca = data.get('vault_ca')
} if vault_ca:
vault_ca = data.get('vault_ca') ctxt['vault_ca'] = json.loads(vault_ca)
if vault_ca:
ctxt['vault_ca'] = json.loads(vault_ca) self.complete = True
self.complete = True break
return ctxt else:
return {} if secret_id:
ctxt['vault_url'] = vault_url
ctxt['role_id'] = json.loads(role_id)
ctxt['secret_id'] = secret_id
ctxt['secret_backend'] = self.secret_backend
vault_ca = data.get('vault_ca')
if vault_ca:
ctxt['vault_ca'] = json.loads(vault_ca)
if self.complete:
break
if ctxt:
self.complete = True
return ctxt
def write_vaultlocker_conf(context, priority=100): def write_vaultlocker_conf(context, priority=100):

View File

@ -34,6 +34,8 @@ import errno
import tempfile import tempfile
from subprocess import CalledProcessError from subprocess import CalledProcessError
from charmhelpers import deprecate
import six import six
if not six.PY3: if not six.PY3:
from UserDict import UserDict from UserDict import UserDict
@ -119,19 +121,19 @@ def log(message, level=None):
raise raise
def action_log(message): def function_log(message):
"""Write an action progress message""" """Write a function progress message"""
command = ['action-log'] command = ['function-log']
if not isinstance(message, six.string_types): if not isinstance(message, six.string_types):
message = repr(message) message = repr(message)
command += [message[:SH_MAX_ARG]] command += [message[:SH_MAX_ARG]]
# Missing action-log should not cause failures in unit tests # Missing function-log should not cause failures in unit tests
# Send action_log output to stderr # Send function_log output to stderr
try: try:
subprocess.call(command) subprocess.call(command)
except OSError as e: except OSError as e:
if e.errno == errno.ENOENT: if e.errno == errno.ENOENT:
message = "action-log: {}".format(message) message = "function-log: {}".format(message)
print(message, file=sys.stderr) print(message, file=sys.stderr)
else: else:
raise raise
@ -964,9 +966,23 @@ def charm_dir():
return os.environ.get('CHARM_DIR') return os.environ.get('CHARM_DIR')
def cmd_exists(cmd):
"""Return True if the specified cmd exists in the path"""
return any(
os.access(os.path.join(path, cmd), os.X_OK)
for path in os.environ["PATH"].split(os.pathsep)
)
@cached @cached
@deprecate("moved to function_get()", log=log)
def action_get(key=None): def action_get(key=None):
"""Gets the value of an action parameter, or all key/value param pairs""" """
.. deprecated:: 0.20.7
Alias for :func:`function_get`.
Gets the value of an action parameter, or all key/value param pairs.
"""
cmd = ['action-get'] cmd = ['action-get']
if key is not None: if key is not None:
cmd.append(key) cmd.append(key)
@ -975,36 +991,103 @@ def action_get(key=None):
return action_data return action_data
@cached
def function_get(key=None):
"""Gets the value of an action parameter, or all key/value param pairs"""
cmd = ['function-get']
# Fallback for older charms.
if not cmd_exists('function-get'):
cmd = ['action-get']
if key is not None:
cmd.append(key)
cmd.append('--format=json')
function_data = json.loads(subprocess.check_output(cmd).decode('UTF-8'))
return function_data
@deprecate("moved to function_set()", log=log)
def action_set(values): def action_set(values):
"""Sets the values to be returned after the action finishes""" """
.. deprecated:: 0.20.7
Alias for :func:`function_set`.
Sets the values to be returned after the action finishes.
"""
cmd = ['action-set'] cmd = ['action-set']
for k, v in list(values.items()): for k, v in list(values.items()):
cmd.append('{}={}'.format(k, v)) cmd.append('{}={}'.format(k, v))
subprocess.check_call(cmd) subprocess.check_call(cmd)
def action_fail(message): def function_set(values):
"""Sets the action status to failed and sets the error message. """Sets the values to be returned after the function finishes"""
cmd = ['function-set']
# Fallback for older charms.
if not cmd_exists('function-get'):
cmd = ['action-set']
The results set by action_set are preserved.""" for k, v in list(values.items()):
cmd.append('{}={}'.format(k, v))
subprocess.check_call(cmd)
@deprecate("moved to function_fail()", log=log)
def action_fail(message):
"""
.. deprecated:: 0.20.7
Alias for :func:`function_fail`.
Sets the action status to failed and sets the error message.
The results set by action_set are preserved.
"""
subprocess.check_call(['action-fail', message]) subprocess.check_call(['action-fail', message])
def function_fail(message):
"""Sets the function status to failed and sets the error message.
The results set by function_set are preserved."""
cmd = ['function-fail']
# Fallback for older charms.
if not cmd_exists('function-fail'):
cmd = ['action-fail']
cmd.append(message)
subprocess.check_call(cmd)
def action_name(): def action_name():
"""Get the name of the currently executing action.""" """Get the name of the currently executing action."""
return os.environ.get('JUJU_ACTION_NAME') return os.environ.get('JUJU_ACTION_NAME')
def function_name():
"""Get the name of the currently executing function."""
return os.environ.get('JUJU_FUNCTION_NAME') or action_name()
def action_uuid(): def action_uuid():
"""Get the UUID of the currently executing action.""" """Get the UUID of the currently executing action."""
return os.environ.get('JUJU_ACTION_UUID') return os.environ.get('JUJU_ACTION_UUID')
def function_id():
"""Get the ID of the currently executing function."""
return os.environ.get('JUJU_FUNCTION_ID') or action_uuid()
def action_tag(): def action_tag():
"""Get the tag for the currently executing action.""" """Get the tag for the currently executing action."""
return os.environ.get('JUJU_ACTION_TAG') return os.environ.get('JUJU_ACTION_TAG')
def function_tag():
"""Get the tag for the currently executing function."""
return os.environ.get('JUJU_FUNCTION_TAG') or action_tag()
def status_set(workload_state, message): def status_set(workload_state, message):
"""Set the workload state with a message """Set the workload state with a message

View File

@ -182,6 +182,14 @@ CLOUD_ARCHIVE_POCKETS = {
'train/proposed': 'bionic-proposed/train', 'train/proposed': 'bionic-proposed/train',
'bionic-train/proposed': 'bionic-proposed/train', 'bionic-train/proposed': 'bionic-proposed/train',
'bionic-proposed/train': 'bionic-proposed/train', 'bionic-proposed/train': 'bionic-proposed/train',
# Ussuri
'ussuri': 'bionic-updates/ussuri',
'bionic-ussuri': 'bionic-updates/ussuri',
'bionic-ussuri/updates': 'bionic-updates/ussuri',
'bionic-updates/ussuri': 'bionic-updates/ussuri',
'ussuri/proposed': 'bionic-proposed/ussuri',
'bionic-ussuri/proposed': 'bionic-proposed/ussuri',
'bionic-proposed/ussuri': 'bionic-proposed/ussuri',
} }

View File

@ -1,4 +1,23 @@
series: bionic variables:
openstack-origin: &openstack-origin distro
series: &series bionic
machines:
0:
constraints: "mem=3072M"
1: {}
2: {}
3: {}
4: {}
5:
constraints: "root-disk=20G mem=4G"
6: {}
7: {}
8: {}
# We specify machine placements for these to improve iteration
# time, given that machine "0" comes up way before machine "7"
applications: applications:
percona-cluster: percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster charm: cs:~openstack-charmers-next/percona-cluster
@ -7,33 +26,60 @@ applications:
innodb-buffer-pool-size: 256M innodb-buffer-pool-size: 256M
dataset-size: 25% dataset-size: 25%
max-connections: 1000 max-connections: 1000
source: *openstack-origin
to:
- '0'
keystone: keystone:
charm: cs:~openstack-charmers-next/keystone charm: cs:~openstack-charmers-next/keystone
num_units: 1 num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '1'
rabbitmq-server: rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1 num_units: 1
options:
source: *openstack-origin
to:
- '2'
glance: glance:
charm: cs:~openstack-charmers-next/glance charm: cs:~openstack-charmers-next/glance
num_units: 1 num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '3'
nova-cloud-controller: nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1 num_units: 1
options: options:
openstack-origin: *openstack-origin
api-rate-limit-rules: "( POST, '*', .*, 9999, MINUTE );" api-rate-limit-rules: "( POST, '*', .*, 9999, MINUTE );"
network-manager: Neutron network-manager: Neutron
to:
- '4'
nova-compute: nova-compute:
charm: cs:~openstack-charmers-next/nova-compute charm: cs:~openstack-charmers-next/nova-compute
num_units: 1 num_units: 1
options: options:
openstack-origin: *openstack-origin
config-flags: 'auto_assign_floating_ip=False' config-flags: 'auto_assign_floating_ip=False'
enable-live-migration: "False" enable-live-migration: "False"
to:
- '5'
neutron-gateway: neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1 num_units: 1
options:
openstack-origin: *openstack-origin
neutron-api: neutron-api:
charm: cs:~openstack-charmers-next/neutron-api charm: cs:~openstack-charmers-next/neutron-api
num_units: 1 num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '6'
neutron-openvswitch: neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch charm: cs:~openstack-charmers-next/neutron-openvswitch
heat: heat:
@ -42,8 +88,12 @@ applications:
series: bionic series: bionic
constraints: mem=2048 constraints: mem=2048
options: options:
openstack-origin: *openstack-origin
debug: "True" debug: "True"
verbose: "True" verbose: "True"
to:
- '7'
- '8'
relations: relations:
- - heat:amqp - - heat:amqp
- rabbitmq-server:amqp - rabbitmq-server:amqp

View File

@ -1,4 +1,23 @@
series: bionic variables:
openstack-origin: &openstack-origin cloud:bionic-rocky
series: &series bionic
machines:
0:
constraints: "mem=3072M"
1: {}
2: {}
3: {}
4: {}
5:
constraints: "root-disk=20G mem=4G"
6: {}
7: {}
8: {}
# We specify machine placements for these to improve iteration
# time, given that machine "0" comes up way before machine "7"
applications: applications:
percona-cluster: percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster charm: cs:~openstack-charmers-next/percona-cluster
@ -7,46 +26,60 @@ applications:
innodb-buffer-pool-size: 256M innodb-buffer-pool-size: 256M
dataset-size: 25% dataset-size: 25%
max-connections: 1000 max-connections: 1000
source: cloud:bionic-rocky source: *openstack-origin
to:
- '0'
keystone: keystone:
charm: cs:~openstack-charmers-next/keystone charm: cs:~openstack-charmers-next/keystone
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:bionic-rocky openstack-origin: *openstack-origin
to:
- '1'
rabbitmq-server: rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1 num_units: 1
options: options:
source: cloud:bionic-rocky source: *openstack-origin
to:
- '2'
glance: glance:
charm: cs:~openstack-charmers-next/glance charm: cs:~openstack-charmers-next/glance
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:bionic-rocky openstack-origin: *openstack-origin
to:
- '3'
nova-cloud-controller: nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:bionic-rocky openstack-origin: *openstack-origin
api-rate-limit-rules: "( POST, '*', .*, 9999, MINUTE );" api-rate-limit-rules: "( POST, '*', .*, 9999, MINUTE );"
network-manager: Neutron network-manager: Neutron
to:
- '4'
nova-compute: nova-compute:
charm: cs:~openstack-charmers-next/nova-compute charm: cs:~openstack-charmers-next/nova-compute
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:bionic-rocky openstack-origin: *openstack-origin
config-flags: 'auto_assign_floating_ip=False' config-flags: 'auto_assign_floating_ip=False'
enable-live-migration: "False" enable-live-migration: "False"
to:
- '5'
neutron-gateway: neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:bionic-rocky openstack-origin: *openstack-origin
neutron-api: neutron-api:
charm: cs:~openstack-charmers-next/neutron-api charm: cs:~openstack-charmers-next/neutron-api
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:bionic-rocky openstack-origin: *openstack-origin
to:
- '6'
neutron-openvswitch: neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch charm: cs:~openstack-charmers-next/neutron-openvswitch
heat: heat:
@ -55,9 +88,12 @@ applications:
series: bionic series: bionic
constraints: mem=2048 constraints: mem=2048
options: options:
openstack-origin: cloud:bionic-rocky openstack-origin: *openstack-origin
debug: "True" debug: "True"
verbose: "True" verbose: "True"
to:
- '7'
- '8'
relations: relations:
- - heat:amqp - - heat:amqp
- rabbitmq-server:amqp - rabbitmq-server:amqp

View File

@ -1,4 +1,23 @@
series: bionic variables:
openstack-origin: &openstack-origin cloud:bionic-stein
series: &series bionic
machines:
0:
constraints: "mem=3072M"
1: {}
2: {}
3: {}
4: {}
5:
constraints: "root-disk=20G mem=4G"
6: {}
7: {}
8: {}
# We specify machine placements for these to improve iteration
# time, given that machine "0" comes up way before machine "7"
applications: applications:
percona-cluster: percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster charm: cs:~openstack-charmers-next/percona-cluster
@ -7,46 +26,60 @@ applications:
innodb-buffer-pool-size: 256M innodb-buffer-pool-size: 256M
dataset-size: 25% dataset-size: 25%
max-connections: 1000 max-connections: 1000
source: cloud:bionic-stein source: *openstack-origin
to:
- '0'
keystone: keystone:
charm: cs:~openstack-charmers-next/keystone charm: cs:~openstack-charmers-next/keystone
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:bionic-stein openstack-origin: *openstack-origin
to:
- '1'
rabbitmq-server: rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1 num_units: 1
options: options:
source: cloud:bionic-stein source: *openstack-origin
to:
- '2'
glance: glance:
charm: cs:~openstack-charmers-next/glance charm: cs:~openstack-charmers-next/glance
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:bionic-stein openstack-origin: *openstack-origin
to:
- '3'
nova-cloud-controller: nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:bionic-stein openstack-origin: *openstack-origin
api-rate-limit-rules: "( POST, '*', .*, 9999, MINUTE );" api-rate-limit-rules: "( POST, '*', .*, 9999, MINUTE );"
network-manager: Neutron network-manager: Neutron
to:
- '4'
nova-compute: nova-compute:
charm: cs:~openstack-charmers-next/nova-compute charm: cs:~openstack-charmers-next/nova-compute
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:bionic-stein openstack-origin: *openstack-origin
config-flags: 'auto_assign_floating_ip=False' config-flags: 'auto_assign_floating_ip=False'
enable-live-migration: "False" enable-live-migration: "False"
to:
- '5'
neutron-gateway: neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:bionic-stein openstack-origin: *openstack-origin
neutron-api: neutron-api:
charm: cs:~openstack-charmers-next/neutron-api charm: cs:~openstack-charmers-next/neutron-api
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:bionic-stein openstack-origin: *openstack-origin
to:
- '6'
neutron-openvswitch: neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch charm: cs:~openstack-charmers-next/neutron-openvswitch
heat: heat:
@ -55,9 +88,12 @@ applications:
series: bionic series: bionic
constraints: mem=2048 constraints: mem=2048
options: options:
openstack-origin: cloud:bionic-stein openstack-origin: *openstack-origin
debug: "True" debug: "True"
verbose: "True" verbose: "True"
to:
- '7'
- '8'
relations: relations:
- - heat:amqp - - heat:amqp
- rabbitmq-server:amqp - rabbitmq-server:amqp

View File

@ -1,4 +1,24 @@
series: bionic variables:
openstack-origin: &openstack-origin cloud:bionic-train
series: &series bionic
machines:
0:
constraints: "mem=3072M"
1: {}
2: {}
3: {}
4: {}
5:
constraints: "root-disk=20G mem=4G"
6: {}
7: {}
8: {}
9: {}
# We specify machine placements for these to improve iteration
# time, given that machine "0" comes up way before machine "7"
applications: applications:
percona-cluster: percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster charm: cs:~openstack-charmers-next/percona-cluster
@ -7,36 +27,48 @@ applications:
innodb-buffer-pool-size: 256M innodb-buffer-pool-size: 256M
dataset-size: 25% dataset-size: 25%
max-connections: 1000 max-connections: 1000
source: cloud:bionic-train source: *openstack-origin
to:
- '0'
keystone: keystone:
charm: cs:~openstack-charmers-next/keystone charm: cs:~openstack-charmers-next/keystone
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:bionic-train openstack-origin: *openstack-origin
to:
- '1'
rabbitmq-server: rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1 num_units: 1
options: options:
source: cloud:bionic-train source: *openstack-origin
to:
- '2'
glance: glance:
charm: cs:~openstack-charmers-next/glance charm: cs:~openstack-charmers-next/glance
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:bionic-train openstack-origin: *openstack-origin
to:
- '3'
nova-cloud-controller: nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:bionic-train openstack-origin: *openstack-origin
api-rate-limit-rules: "( POST, '*', .*, 9999, MINUTE );" api-rate-limit-rules: "( POST, '*', .*, 9999, MINUTE );"
network-manager: Neutron network-manager: Neutron
to:
- '4'
nova-compute: nova-compute:
charm: cs:~openstack-charmers-next/nova-compute charm: cs:~openstack-charmers-next/nova-compute
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:bionic-train openstack-origin: *openstack-origin
config-flags: 'auto_assign_floating_ip=False' config-flags: 'auto_assign_floating_ip=False'
enable-live-migration: "False" enable-live-migration: "False"
to:
- '5'
placement: placement:
charm: cs:~openstack-charmers-next/placement charm: cs:~openstack-charmers-next/placement
num_units: 1 num_units: 1
@ -44,27 +76,34 @@ applications:
options: options:
openstack-origin: cloud:bionic-train openstack-origin: cloud:bionic-train
debug: "True" debug: "True"
to:
- '6'
neutron-gateway: neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:bionic-train openstack-origin: *openstack-origin
neutron-api: neutron-api:
charm: cs:~openstack-charmers-next/neutron-api charm: cs:~openstack-charmers-next/neutron-api
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:bionic-train openstack-origin: *openstack-origin
to:
- '7'
neutron-openvswitch: neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch charm: cs:~openstack-charmers-next/neutron-openvswitch
heat: heat:
charm: ../../../heat charm: ../../../heat
num_units: 1 num_units: 2
series: bionic series: bionic
constraints: mem=2048 constraints: mem=2048
options: options:
openstack-origin: cloud:bionic-train openstack-origin: *openstack-origin
debug: "True" debug: "True"
verbose: "True" verbose: "True"
to:
- '8'
- '9'
relations: relations:
- - heat:amqp - - heat:amqp
- rabbitmq-server:amqp - rabbitmq-server:amqp

View File

@ -1,105 +0,0 @@
series: disco
applications:
percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster
num_units: 1
options:
innodb-buffer-pool-size: 256M
dataset-size: 25%
max-connections: 1000
source: distro
keystone:
charm: cs:~openstack-charmers-next/keystone
num_units: 1
options:
openstack-origin: distro
rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1
options:
source: distro
glance:
charm: cs:~openstack-charmers-next/glance
num_units: 1
options:
openstack-origin: distro
nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1
options:
openstack-origin: distro
api-rate-limit-rules: "( POST, '*', .*, 9999, MINUTE );"
network-manager: Neutron
nova-compute:
charm: cs:~openstack-charmers-next/nova-compute
num_units: 1
options:
openstack-origin: distro
config-flags: 'auto_assign_floating_ip=False'
enable-live-migration: "False"
neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1
options:
openstack-origin: distro
neutron-api:
charm: cs:~openstack-charmers-next/neutron-api
num_units: 1
options:
openstack-origin: distro
neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch
heat:
charm: ../../../heat
num_units: 2
series: disco
constraints: mem=2048
options:
openstack-origin: distro
debug: "True"
verbose: "True"
relations:
- - heat:amqp
- rabbitmq-server:amqp
- - heat:identity-service
- keystone:identity-service
- - heat:shared-db
- percona-cluster:shared-db
- - nova-compute:image-service
- glance:image-service
- - nova-compute:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:shared-db
- percona-cluster:shared-db
- - nova-cloud-controller:identity-service
- keystone:identity-service
- - nova-cloud-controller:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:cloud-compute
- nova-compute:cloud-compute
- - nova-cloud-controller:image-service
- glance:image-service
- - keystone:shared-db
- percona-cluster:shared-db
- - glance:identity-service
- keystone:identity-service
- - glance:shared-db
- percona-cluster:shared-db
- - glance:amqp
- rabbitmq-server:amqp
- - neutron-gateway:amqp
- rabbitmq-server:amqp
- - nova-cloud-controller:quantum-network-service
- neutron-gateway:quantum-network-service
- - neutron-api:shared-db
- percona-cluster:shared-db
- - neutron-api:amqp
- rabbitmq-server:amqp
- - neutron-api:neutron-api
- nova-cloud-controller:neutron-api
- - neutron-api:identity-service
- keystone:identity-service
- - nova-compute:neutron-plugin
- neutron-openvswitch:neutron-plugin
- - rabbitmq-server:amqp
- neutron-openvswitch:amqp

View File

@ -1,4 +1,22 @@
series: trusty variables:
openstack-origin: &openstack-origin cloud:trusty-mitaka
series: &series trusty
machines:
0:
constraints: "mem=3072M"
1: {}
2: {}
3: {}
4: {}
5:
constraints: "root-disk=20G mem=4G"
6: {}
7: {}
# We specify machine placements for these to improve iteration
# time, given that machine "0" comes up way before machine "7"
applications: applications:
percona-cluster: percona-cluster:
charm: cs:trusty/percona-cluster charm: cs:trusty/percona-cluster
@ -6,44 +24,59 @@ applications:
options: options:
innodb-buffer-pool-size: 256M innodb-buffer-pool-size: 256M
max-connections: 1000 max-connections: 1000
source: cloud:trusty-mitaka source: *openstack-origin
to:
- '0'
keystone: keystone:
charm: cs:~openstack-charmers-next/keystone charm: cs:~openstack-charmers-next/keystone
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:trusty-mitaka openstack-origin: *openstack-origin
to:
- '1'
rabbitmq-server: rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1 num_units: 1
options: options:
source: cloud:trusty-mitaka source: *openstack-origin
to:
- '2'
glance: glance:
charm: cs:~openstack-charmers-next/glance charm: cs:~openstack-charmers-next/glance
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:trusty-mitaka openstack-origin: *openstack-origin
to:
- '3'
nova-cloud-controller: nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:trusty-mitaka openstack-origin: *openstack-origin
api-rate-limit-rules: "( POST, '*', .*, 9999, MINUTE );" api-rate-limit-rules: "( POST, '*', .*, 9999, MINUTE );"
to:
- '4'
nova-compute: nova-compute:
charm: cs:~openstack-charmers-next/nova-compute charm: cs:~openstack-charmers-next/nova-compute
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:trusty-mitaka openstack-origin: *openstack-origin
config-flags: 'auto_assign_floating_ip=False' config-flags: 'auto_assign_floating_ip=False'
enable-live-migration: "False" enable-live-migration: "False"
to:
- '5'
heat: heat:
charm: ../../../heat charm: ../../../heat
num_units: 2 num_units: 2
series: trusty series: trusty
constraints: mem=2048 constraints: mem=2048
options: options:
openstack-origin: cloud:trusty-mitaka openstack-origin: *openstack-origin
debug: "True" debug: "True"
verbose: "True" verbose: "True"
to:
- '6'
- '7'
relations: relations:
- - heat:amqp - - heat:amqp
- rabbitmq-server:amqp - rabbitmq-server:amqp

View File

@ -1,4 +1,22 @@
series: xenial variables:
openstack-origin: &openstack-origin distro
series: &series xenial
machines:
0:
constraints: "mem=3072M"
1: {}
2: {}
3: {}
4: {}
5:
constraints: "root-disk=20G mem=4G"
6: {}
7: {}
# We specify machine placements for these to improve iteration
# time, given that machine "0" comes up way before machine "7"
applications: applications:
percona-cluster: percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster charm: cs:~openstack-charmers-next/percona-cluster
@ -7,26 +25,47 @@ applications:
innodb-buffer-pool-size: 256M innodb-buffer-pool-size: 256M
dataset-size: 25% dataset-size: 25%
max-connections: 1000 max-connections: 1000
source: *openstack-origin
to:
- '0'
keystone: keystone:
charm: cs:~openstack-charmers-next/keystone charm: cs:~openstack-charmers-next/keystone
num_units: 1 num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '1'
rabbitmq-server: rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1 num_units: 1
options:
source: *openstack-origin
to:
- '2'
glance: glance:
charm: cs:~openstack-charmers-next/glance charm: cs:~openstack-charmers-next/glance
num_units: 1 num_units: 1
options:
openstack-origin: *openstack-origin
to:
- '3'
nova-cloud-controller: nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1 num_units: 1
options: options:
openstack-origin: *openstack-origin
api-rate-limit-rules: "( POST, '*', .*, 9999, MINUTE );" api-rate-limit-rules: "( POST, '*', .*, 9999, MINUTE );"
to:
- '4'
nova-compute: nova-compute:
charm: cs:~openstack-charmers-next/nova-compute charm: cs:~openstack-charmers-next/nova-compute
num_units: 1 num_units: 1
options: options:
openstack-origin: *openstack-origin
config-flags: 'auto_assign_floating_ip=False' config-flags: 'auto_assign_floating_ip=False'
enable-live-migration: "False" enable-live-migration: "False"
to:
- '5'
heat: heat:
charm: ../../../heat charm: ../../../heat
num_units: 2 num_units: 2
@ -35,6 +74,9 @@ applications:
options: options:
debug: "True" debug: "True"
verbose: "True" verbose: "True"
to:
- '6'
- '7'
relations: relations:
- - heat:amqp - - heat:amqp
- rabbitmq-server:amqp - rabbitmq-server:amqp

View File

@ -1,4 +1,23 @@
series: xenial variables:
openstack-origin: &openstack-origin cloud:xenial-ocata
series: &series xenial
machines:
0:
constraints: "mem=3072M"
1: {}
2: {}
3: {}
4: {}
5:
constraints: "root-disk=20G mem=4G"
6: {}
7: {}
8: {}
# We specify machine placements for these to improve iteration
# time, given that machine "0" comes up way before machine "7"
applications: applications:
percona-cluster: percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster charm: cs:~openstack-charmers-next/percona-cluster
@ -7,46 +26,60 @@ applications:
innodb-buffer-pool-size: 256M innodb-buffer-pool-size: 256M
dataset-size: 25% dataset-size: 25%
max-connections: 1000 max-connections: 1000
source: cloud:xenial-ocata source: *openstack-origin
to:
- '0'
keystone: keystone:
charm: cs:~openstack-charmers-next/keystone charm: cs:~openstack-charmers-next/keystone
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:xenial-ocata openstack-origin: *openstack-origin
to:
- '1'
rabbitmq-server: rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1 num_units: 1
options: options:
source: cloud:xenial-ocata source: *openstack-origin
to:
- '2'
glance: glance:
charm: cs:~openstack-charmers-next/glance charm: cs:~openstack-charmers-next/glance
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:xenial-ocata openstack-origin: *openstack-origin
to:
- '3'
nova-cloud-controller: nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:xenial-ocata openstack-origin: *openstack-origin
api-rate-limit-rules: "( POST, '*', .*, 9999, MINUTE );" api-rate-limit-rules: "( POST, '*', .*, 9999, MINUTE );"
network-manager: Neutron network-manager: Neutron
to:
- '4'
nova-compute: nova-compute:
charm: cs:~openstack-charmers-next/nova-compute charm: cs:~openstack-charmers-next/nova-compute
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:xenial-ocata openstack-origin: *openstack-origin
config-flags: 'auto_assign_floating_ip=False' config-flags: 'auto_assign_floating_ip=False'
enable-live-migration: "False" enable-live-migration: "False"
to:
- '5'
neutron-gateway: neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:xenial-ocata openstack-origin: *openstack-origin
neutron-api: neutron-api:
charm: cs:~openstack-charmers-next/neutron-api charm: cs:~openstack-charmers-next/neutron-api
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:xenial-ocata openstack-origin: *openstack-origin
to:
- '6'
neutron-openvswitch: neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch charm: cs:~openstack-charmers-next/neutron-openvswitch
heat: heat:
@ -55,9 +88,12 @@ applications:
series: xenial series: xenial
constraints: mem=2048 constraints: mem=2048
options: options:
openstack-origin: cloud:xenial-ocata openstack-origin: *openstack-origin
debug: "True" debug: "True"
verbose: "True" verbose: "True"
to:
- '7'
- '8'
relations: relations:
- - heat:amqp - - heat:amqp
- rabbitmq-server:amqp - rabbitmq-server:amqp

View File

@ -1,4 +1,23 @@
series: xenial variables:
openstack-origin: &openstack-origin cloud:xenial-pike
series: &series xenial
machines:
0:
constraints: "mem=3072M"
1: {}
2: {}
3: {}
4: {}
5:
constraints: "root-disk=20G mem=4G"
6: {}
7: {}
8: {}
# We specify machine placements for these to improve iteration
# time, given that machine "0" comes up way before machine "7"
applications: applications:
percona-cluster: percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster charm: cs:~openstack-charmers-next/percona-cluster
@ -7,46 +26,60 @@ applications:
innodb-buffer-pool-size: 256M innodb-buffer-pool-size: 256M
dataset-size: 25% dataset-size: 25%
max-connections: 1000 max-connections: 1000
source: cloud:xenial-pike source: *openstack-origin
to:
- '0'
keystone: keystone:
charm: cs:~openstack-charmers-next/keystone charm: cs:~openstack-charmers-next/keystone
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:xenial-pike openstack-origin: *openstack-origin
to:
- '1'
rabbitmq-server: rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1 num_units: 1
options: options:
source: cloud:xenial-pike source: *openstack-origin
to:
- '2'
glance: glance:
charm: cs:~openstack-charmers-next/glance charm: cs:~openstack-charmers-next/glance
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:xenial-pike openstack-origin: *openstack-origin
to:
- '3'
nova-cloud-controller: nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:xenial-pike openstack-origin: *openstack-origin
api-rate-limit-rules: "( POST, '*', .*, 9999, MINUTE );" api-rate-limit-rules: "( POST, '*', .*, 9999, MINUTE );"
network-manager: Neutron network-manager: Neutron
to:
- '4'
nova-compute: nova-compute:
charm: cs:~openstack-charmers-next/nova-compute charm: cs:~openstack-charmers-next/nova-compute
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:xenial-pike openstack-origin: *openstack-origin
config-flags: 'auto_assign_floating_ip=False' config-flags: 'auto_assign_floating_ip=False'
enable-live-migration: "False" enable-live-migration: "False"
to:
- '5'
neutron-gateway: neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:xenial-pike openstack-origin: *openstack-origin
neutron-api: neutron-api:
charm: cs:~openstack-charmers-next/neutron-api charm: cs:~openstack-charmers-next/neutron-api
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:xenial-pike openstack-origin: *openstack-origin
to:
- '6'
neutron-openvswitch: neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch charm: cs:~openstack-charmers-next/neutron-openvswitch
heat: heat:
@ -55,9 +88,12 @@ applications:
series: xenial series: xenial
constraints: mem=2048 constraints: mem=2048
options: options:
openstack-origin: cloud:xenial-pike openstack-origin: *openstack-origin
debug: "True" debug: "True"
verbose: "True" verbose: "True"
to:
- '7'
- '8'
relations: relations:
- - heat:amqp - - heat:amqp
- rabbitmq-server:amqp - rabbitmq-server:amqp

View File

@ -1,4 +1,23 @@
series: xenial variables:
openstack-origin: &openstack-origin cloud:xenial-queens
series: &series xenial
machines:
0:
constraints: "mem=3072M"
1: {}
2: {}
3: {}
4: {}
5:
constraints: "root-disk=20G mem=4G"
6: {}
7: {}
8: {}
# We specify machine placements for these to improve iteration
# time, given that machine "0" comes up way before machine "7"
applications: applications:
percona-cluster: percona-cluster:
charm: cs:~openstack-charmers-next/percona-cluster charm: cs:~openstack-charmers-next/percona-cluster
@ -7,46 +26,60 @@ applications:
innodb-buffer-pool-size: 256M innodb-buffer-pool-size: 256M
dataset-size: 25% dataset-size: 25%
max-connections: 1000 max-connections: 1000
source: cloud:xenial-queens source: *openstack-origin
to:
- '0'
keystone: keystone:
charm: cs:~openstack-charmers-next/keystone charm: cs:~openstack-charmers-next/keystone
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:xenial-queens openstack-origin: *openstack-origin
to:
- '1'
rabbitmq-server: rabbitmq-server:
charm: cs:~openstack-charmers-next/rabbitmq-server charm: cs:~openstack-charmers-next/rabbitmq-server
num_units: 1 num_units: 1
options: options:
source: cloud:xenial-queens source: *openstack-origin
to:
- '2'
glance: glance:
charm: cs:~openstack-charmers-next/glance charm: cs:~openstack-charmers-next/glance
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:xenial-queens openstack-origin: *openstack-origin
to:
- '3'
nova-cloud-controller: nova-cloud-controller:
charm: cs:~openstack-charmers-next/nova-cloud-controller charm: cs:~openstack-charmers-next/nova-cloud-controller
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:xenial-queens openstack-origin: *openstack-origin
api-rate-limit-rules: "( POST, '*', .*, 9999, MINUTE );" api-rate-limit-rules: "( POST, '*', .*, 9999, MINUTE );"
network-manager: Neutron network-manager: Neutron
to:
- '4'
nova-compute: nova-compute:
charm: cs:~openstack-charmers-next/nova-compute charm: cs:~openstack-charmers-next/nova-compute
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:xenial-queens openstack-origin: *openstack-origin
config-flags: 'auto_assign_floating_ip=False' config-flags: 'auto_assign_floating_ip=False'
enable-live-migration: "False" enable-live-migration: "False"
to:
- '5'
neutron-gateway: neutron-gateway:
charm: cs:~openstack-charmers-next/neutron-gateway charm: cs:~openstack-charmers-next/neutron-gateway
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:xenial-queens openstack-origin: *openstack-origin
neutron-api: neutron-api:
charm: cs:~openstack-charmers-next/neutron-api charm: cs:~openstack-charmers-next/neutron-api
num_units: 1 num_units: 1
options: options:
openstack-origin: cloud:xenial-queens openstack-origin: *openstack-origin
to:
- '6'
neutron-openvswitch: neutron-openvswitch:
charm: cs:~openstack-charmers-next/neutron-openvswitch charm: cs:~openstack-charmers-next/neutron-openvswitch
heat: heat:
@ -55,9 +88,12 @@ applications:
series: xenial series: xenial
constraints: mem=2048 constraints: mem=2048
options: options:
openstack-origin: cloud:xenial-queens openstack-origin: *openstack-origin
debug: "True" debug: "True"
verbose: "True" verbose: "True"
to:
- '7'
- '8'
relations: relations:
- - heat:amqp - - heat:amqp
- rabbitmq-server:amqp - rabbitmq-server:amqp

View File

@ -11,7 +11,6 @@ gate_bundles:
- bionic-rocky - bionic-rocky
- bionic-stein - bionic-stein
- bionic-train - bionic-train
- disco-stein
dev_bundles: dev_bundles:
- bionic-train - bionic-train
configure: configure: