From f3aafbb1829f4e9a3052a5104f1824ecacebafd7 Mon Sep 17 00:00:00 2001 From: Liam Young Date: Thu, 16 Jan 2020 15:01:25 +0000 Subject: [PATCH] Sync charmhelpers Change-Id: I9544ecb2ecb5306be30d915835f60e48315858d3 --- charmhelpers/contrib/charmsupport/nrpe.py | 8 +- .../audits/openstack_security_guide.py | 4 +- charmhelpers/contrib/openstack/context.py | 69 +++- charmhelpers/contrib/openstack/ha/utils.py | 3 +- charmhelpers/contrib/openstack/policyd.py | 298 +++++++++++------- .../openstack/templates/section-placement | 19 ++ charmhelpers/contrib/openstack/utils.py | 45 ++- charmhelpers/contrib/openstack/vaultlocker.py | 55 +++- charmhelpers/contrib/storage/linux/ceph.py | 35 +- charmhelpers/core/hookenv.py | 111 ++++++- 10 files changed, 494 insertions(+), 153 deletions(-) create mode 100644 charmhelpers/contrib/openstack/templates/section-placement diff --git a/charmhelpers/contrib/charmsupport/nrpe.py b/charmhelpers/contrib/charmsupport/nrpe.py index 2a3ef459..d775861b 100644 --- a/charmhelpers/contrib/charmsupport/nrpe.py +++ b/charmhelpers/contrib/charmsupport/nrpe.py @@ -485,12 +485,16 @@ def add_haproxy_checks(nrpe, unit_name): description='Check HAProxy queue depth {%s}' % unit_name, check_cmd='check_haproxy_queue_depth.sh') + def remove_deprecated_check(nrpe, deprecated_services): """ Remove checks fro deprecated services in list - :param NRPE nrpe: NRPE object to remove check from + :param nrpe: NRPE object to remove check from + :type nrpe: NRPE + :param deprecated_services: List of deprecated services that are removed + :type deprecated_services: list """ for dep_svc in deprecated_services: log('Deprecated service: {}'.format(dep_svc)) - nrpe.remove_check(shortname=dep_svc) \ No newline at end of file + nrpe.remove_check(shortname=dep_svc) diff --git a/charmhelpers/contrib/openstack/audits/openstack_security_guide.py b/charmhelpers/contrib/openstack/audits/openstack_security_guide.py index b7b8a60f..79740ed0 100644 --- a/charmhelpers/contrib/openstack/audits/openstack_security_guide.py +++ b/charmhelpers/contrib/openstack/audits/openstack_security_guide.py @@ -244,8 +244,8 @@ def validate_file_permissions(config): @audit(is_audit_type(AuditType.OpenStackSecurityGuide)) def validate_uses_keystone(audit_options): """Validate that the service uses Keystone for authentication.""" - section = _config_section(audit_options, 'DEFAULT') - assert section is not None, "Missing section 'DEFAULT'" + section = _config_section(audit_options, 'api') or _config_section(audit_options, 'DEFAULT') + assert section is not None, "Missing section 'api / DEFAULT'" assert section.get('auth_strategy') == "keystone", \ "Application is not using Keystone" diff --git a/charmhelpers/contrib/openstack/context.py b/charmhelpers/contrib/openstack/context.py index a3d48c41..e99aba47 100644 --- a/charmhelpers/contrib/openstack/context.py +++ b/charmhelpers/contrib/openstack/context.py @@ -730,6 +730,10 @@ class AMQPContext(OSContextGenerator): if notification_format: ctxt['notification_format'] = notification_format + notification_topics = conf.get('notification-topics', None) + if notification_topics: + ctxt['notification_topics'] = notification_topics + send_notifications_to_logs = conf.get('send-notifications-to-logs', None) if send_notifications_to_logs: ctxt['send_notifications_to_logs'] = send_notifications_to_logs @@ -1940,7 +1944,7 @@ class VolumeAPIContext(InternalEndpointContext): as well as the catalog_info string that would be supplied. Returns a dict containing the volume_api_version and the volume_catalog_info. """ - rel = os_release(self.pkg, base='icehouse') + rel = os_release(self.pkg) version = '2' if CompareOpenStackReleases(rel) >= 'pike': version = '3' @@ -2140,7 +2144,7 @@ class VersionsContext(OSContextGenerator): self.pkg = pkg def __call__(self): - ostack = os_release(self.pkg, base='icehouse') + ostack = os_release(self.pkg) osystem = lsb_release()['DISTRIB_CODENAME'].lower() return { 'openstack_release': ostack, @@ -2177,9 +2181,66 @@ class LogrotateContext(OSContextGenerator): class HostInfoContext(OSContextGenerator): """Context to provide host information.""" + def __init__(self, use_fqdn_hint_cb=None): + """Initialize HostInfoContext + + :param use_fqdn_hint_cb: Callback whose return value used to populate + `use_fqdn_hint` + :type use_fqdn_hint_cb: Callable[[], bool] + """ + # Store callback used to get hint for whether FQDN should be used + + # Depending on the workload a charm manages, the use of FQDN vs. + # shortname may be a deploy-time decision, i.e. behaviour can not + # change on charm upgrade or post-deployment configuration change. + + # The hint is passed on as a flag in the context to allow the decision + # to be made in the Jinja2 configuration template. + self.use_fqdn_hint_cb = use_fqdn_hint_cb + + def _get_canonical_name(self, name=None): + """Get the official FQDN of the host + + The implementation of ``socket.getfqdn()`` in the standard Python + library does not exhaust all methods of getting the official name + of a host ref Python issue https://bugs.python.org/issue5004 + + This function mimics the behaviour of a call to ``hostname -f`` to + get the official FQDN but returns an empty string if it is + unsuccessful. + + :param name: Shortname to get FQDN on + :type name: Optional[str] + :returns: The official FQDN for host or empty string ('') + :rtype: str + """ + name = name or socket.gethostname() + fqdn = '' + + if six.PY2: + exc = socket.error + else: + exc = OSError + + try: + addrs = socket.getaddrinfo( + name, None, 0, socket.SOCK_DGRAM, 0, socket.AI_CANONNAME) + except exc: + pass + else: + for addr in addrs: + if addr[3]: + if '.' in addr[3]: + fqdn = addr[3] + break + return fqdn + def __call__(self): + name = socket.gethostname() ctxt = { - 'host_fqdn': socket.getfqdn(), - 'host': socket.gethostname(), + 'host_fqdn': self._get_canonical_name(name) or name, + 'host': name, + 'use_fqdn_hint': ( + self.use_fqdn_hint_cb() if self.use_fqdn_hint_cb else False) } return ctxt diff --git a/charmhelpers/contrib/openstack/ha/utils.py b/charmhelpers/contrib/openstack/ha/utils.py index e017bc20..a5cbdf53 100644 --- a/charmhelpers/contrib/openstack/ha/utils.py +++ b/charmhelpers/contrib/openstack/ha/utils.py @@ -157,10 +157,11 @@ def generate_ha_relation_data(service, _relation_data = {'resources': {}, 'resource_params': {}} if haproxy_enabled: + _meta = 'meta migration-threshold="INFINITY" failure-timeout="5s"' _haproxy_res = 'res_{}_haproxy'.format(service) _relation_data['resources'] = {_haproxy_res: 'lsb:haproxy'} _relation_data['resource_params'] = { - _haproxy_res: 'op monitor interval="5s"' + _haproxy_res: '{} op monitor interval="5s"'.format(_meta) } _relation_data['init_services'] = {_haproxy_res: 'haproxy'} _relation_data['clones'] = { diff --git a/charmhelpers/contrib/openstack/policyd.py b/charmhelpers/contrib/openstack/policyd.py index 6541146f..d89d2cca 100644 --- a/charmhelpers/contrib/openstack/policyd.py +++ b/charmhelpers/contrib/openstack/policyd.py @@ -17,9 +17,11 @@ import contextlib import os import six import shutil +import sys import yaml import zipfile +import charmhelpers import charmhelpers.core.hookenv as hookenv import charmhelpers.core.host as ch_host @@ -115,8 +117,8 @@ library for further details). default: False description: | If True then use the resource file named 'policyd-override' to install - override yaml files in the service's policy.d directory. The resource - file should be a zip file containing at least one yaml file with a .yaml + override YAML files in the service's policy.d directory. The resource + file should be a ZIP file containing at least one yaml file with a .yaml or .yml extension. If False then remove the overrides. """ @@ -134,14 +136,14 @@ resources: Policy Overrides ---------------- -This service allows for policy overrides using the `policy.d` directory. This -is an **advanced** feature and the policies that the service supports should be -clearly and unambiguously understood before trying to override, or add to, the -default policies that the service uses. +This feature allows for policy overrides using the `policy.d` directory. This +is an **advanced** feature and the policies that the OpenStack service supports +should be clearly and unambiguously understood before trying to override, or +add to, the default policies that the service uses. The charm also has some +policy defaults. They should also be understood before being overridden. -The charm also has some policy defaults. They should also be understood before -being overridden. It is possible to break the system (for tenants and other -services) if policies are incorrectly applied to the service. +> **Caution**: It is possible to break the system (for tenants and other + services) if policies are incorrectly applied to the service. Policy overrides are YAML files that contain rules that will add to, or override, existing policy rules in the service. The `policy.d` directory is @@ -149,30 +151,16 @@ a place to put the YAML override files. This charm owns the `/etc/keystone/policy.d` directory, and as such, any manual changes to it will be overwritten on charm upgrades. -Policy overrides are provided to the charm using a resource file called -`policyd-override`. This is attached to the charm using (for example): +Overrides are provided to the charm using a Juju resource called +`policyd-override`. The resource is a ZIP file. This file, say +`overrides.zip`, is attached to the charm by: - juju attach-resource policyd-override= -The `` is the name that this charm is deployed as, with -`` being the resource file containing the policy overrides. + juju attach-resource policyd-override=overrides.zip -The format of the resource file is a ZIP file (.zip extension) containing at -least one YAML file with an extension of `.yaml` or `.yml`. Note that any -directories in the ZIP file are ignored; all of the files are flattened into a -single directory. There must not be any duplicated filenames; this will cause -an error and nothing in the resource file will be applied. +The policy override is enabled in the charm using: -(ed. next part is optional is the charm supports some form of -template/substitution on a read file) - -If a (ed. "one or more of") [`.j2`, `.tmpl`, `.tpl`] file is found in the -resource file then the charm will perform a substitution with charm variables -taken from the config or relations. (ed. edit as appropriate to include the -variable). - -To enable the policy overrides the config option `use-policyd-override` must be -set to `True`. + juju config use-policyd-override=true When `use-policyd-override` is `True` the status line of the charm will be prefixed with `PO:` indicating that policies have been overridden. If the @@ -180,12 +168,8 @@ installation of the policy override YAML files failed for any reason then the status line will be prefixed with `PO (broken):`. The log file for the charm will indicate the reason. No policy override files are installed if the `PO (broken):` is shown. The status line indicates that the overrides are broken, -not that the policy for the service has failed - they will be the defaults for -the charm and service. - -If the policy overrides did not install then *either* attach a new, corrected, -resource file *or* disable the policy overrides by setting -`use-policyd-override` to False. +not that the policy for the service has failed. The policy will be the defaults +for the charm and service. Policy overrides on one service may affect the functionality of another service. Therefore, it may be necessary to provide policy overrides for @@ -251,7 +235,10 @@ def maybe_do_policyd_overrides(openstack_release, blacklist_paths=None, blacklist_keys=None, template_function=None, - restart_handler=None): + restart_handler=None, + user=None, + group=None, + config_changed=False): """If the config option is set, get the resource file and process it to enable the policy.d overrides for the service passed. @@ -280,6 +267,11 @@ def maybe_do_policyd_overrides(openstack_release, directory. However, for any services where this is buggy then a restart_handler can be used to force the policy.d files to be read. + If the config_changed param is True, then the handling is slightly + different: It will only perform the policyd overrides if the config is True + and the success file doesn't exist. Otherwise, it does nothing as the + resource file has already been processed. + :param openstack_release: The openstack release that is installed. :type openstack_release: str :param service: the service name to construct the policy.d directory for. @@ -295,11 +287,26 @@ def maybe_do_policyd_overrides(openstack_release, :param restart_handler: The function to call if the service should be restarted. :type restart_handler: Union[None, Callable[]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] + :param config_changed: Set to True for config_changed hook. + :type config_changed: bool """ + _user = service if user is None else user + _group = service if group is None else group + if not is_policyd_override_valid_on_this_release(openstack_release): + return + hookenv.log("Running maybe_do_policyd_overrides", + level=POLICYD_LOG_LEVEL_DEFAULT) config = hookenv.config() try: if not config.get(POLICYD_CONFIG_NAME, False): - clean_policyd_dir_for(service, blacklist_paths) + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) if (os.path.isfile(_policy_success_file()) and restart_handler is not None and callable(restart_handler)): @@ -307,11 +314,16 @@ def maybe_do_policyd_overrides(openstack_release, remove_policy_success_file() return except Exception as e: - print("Exception is: ", str(e)) + hookenv.log("... ERROR: Exception is: {}".format(str(e)), + level=POLICYD_CONFIG_NAME) import traceback - traceback.print_exc() + hookenv.log(traceback.format_exc(), level=POLICYD_LOG_LEVEL_DEFAULT) return - if not is_policyd_override_valid_on_this_release(openstack_release): + # if the policyd overrides have been performed when doing config_changed + # just return + if config_changed and is_policy_success_file_set(): + hookenv.log("... already setup, so skipping.", + level=POLICYD_LOG_LEVEL_DEFAULT) return # from now on it should succeed; if it doesn't then status line will show # broken. @@ -323,53 +335,18 @@ def maybe_do_policyd_overrides(openstack_release, restart_handler() -def maybe_do_policyd_overrides_on_config_changed(openstack_release, - service, - blacklist_paths=None, - blacklist_keys=None, - template_function=None, - restart_handler=None): - """This function is designed to be called from the config changed hook - handler. It will only perform the policyd overrides if the config is True - and the success file doesn't exist. Otherwise, it does nothing as the - resource file has already been processed. +@charmhelpers.deprecate("Use maybe_do_poliyd_overrrides instead") +def maybe_do_policyd_overrides_on_config_changed(*args, **kwargs): + """This function is designed to be called from the config changed hook. + + DEPRECATED: please use maybe_do_policyd_overrides() with the param + `config_changed` as `True`. See maybe_do_policyd_overrides() for more details on the params. - - :param openstack_release: The openstack release that is installed. - :type openstack_release: str - :param service: the service name to construct the policy.d directory for. - :type service: str - :param blacklist_paths: optional list of paths to leave alone - :type blacklist_paths: Union[None, List[str]] - :param blacklist_keys: optional list of keys that mustn't appear in the - yaml file's - :type blacklist_keys: Union[None, List[str]] - :param template_function: Optional function that can modify the string - prior to being processed as a Yaml document. - :type template_function: Union[None, Callable[[str], str]] - :param restart_handler: The function to call if the service should be - restarted. - :type restart_handler: Union[None, Callable[]] """ - config = hookenv.config() - try: - if not config.get(POLICYD_CONFIG_NAME, False): - clean_policyd_dir_for(service, blacklist_paths) - if (os.path.isfile(_policy_success_file()) and - restart_handler is not None and - callable(restart_handler)): - restart_handler() - remove_policy_success_file() - return - except Exception: - return - # if the policyd overrides have been performed just return - if os.path.isfile(_policy_success_file()): - return - maybe_do_policyd_overrides( - openstack_release, service, blacklist_paths, blacklist_keys, - template_function, restart_handler) + if 'config_changed' not in kwargs.keys(): + kwargs['config_changed'] = True + return maybe_do_policyd_overrides(*args, **kwargs) def get_policy_resource_filename(): @@ -386,13 +363,16 @@ def get_policy_resource_filename(): @contextlib.contextmanager -def open_and_filter_yaml_files(filepath): +def open_and_filter_yaml_files(filepath, has_subdirs=False): """Validate that the filepath provided is a zip file and contains at least one (.yaml|.yml) file, and that the files are not duplicated when the zip file is flattened. Note that the yaml files are not checked. This is the first stage in validating the policy zipfile; individual yaml files are not checked for validity or black listed keys. + If the has_subdirs param is True, then the files are flattened to the first + directory, and the files in the root are ignored. + An example of use is: with open_and_filter_yaml_files(some_path) as zfp, g: @@ -401,6 +381,8 @@ def open_and_filter_yaml_files(filepath): :param filepath: a filepath object that can be opened by zipfile :type filepath: Union[AnyStr, os.PathLike[AntStr]] + :param has_subdirs: Keep first level of subdirectories in yaml file. + :type has_subdirs: bool :returns: (zfp handle, a generator of the (name, filename, ZipInfo object) tuples) as a tuple. @@ -413,7 +395,7 @@ def open_and_filter_yaml_files(filepath): with zipfile.ZipFile(filepath, 'r') as zfp: # first pass through; check for duplicates and at least one yaml file. names = collections.defaultdict(int) - yamlfiles = _yamlfiles(zfp) + yamlfiles = _yamlfiles(zfp, has_subdirs) for name, _, _, _ in yamlfiles: names[name] += 1 # There must be at least 1 yaml file. @@ -429,17 +411,33 @@ def open_and_filter_yaml_files(filepath): yield (zfp, yamlfiles) -def _yamlfiles(zipfile): +def _yamlfiles(zipfile, has_subdirs=False): """Helper to get a yaml file (according to POLICYD_VALID_EXTS extensions) and the infolist item from a zipfile. + If the `has_subdirs` param is True, the the only yaml files that have a + directory component are read, and then first part of the directory + component is kept, along with the filename in the name. e.g. an entry with + a filename of: + + compute/someotherdir/override.yaml + + is returned as: + + compute/override, yaml, override.yaml, + + This is to help with the special, additional, processing that the dashboard + charm requires. + :param zipfile: the zipfile to read zipinfo items from :type zipfile: zipfile.ZipFile - :returns: generator of (name, ext, filename, info item) for each self-identified - yaml file. + :param has_subdirs: Keep first level of subdirectories in yaml file. + :type has_subdirs: bool + :returns: generator of (name, ext, filename, info item) for each + self-identified yaml file. :rtype: List[(str, str, str, zipfile.ZipInfo)] """ - l = [] + files = [] for infolist_item in zipfile.infolist(): try: if infolist_item.is_dir(): @@ -448,12 +446,14 @@ def _yamlfiles(zipfile): # fallback to "old" way to determine dir entry for pre-py36 if infolist_item.filename.endswith('/'): continue - _, name_ext = os.path.split(infolist_item.filename) + _dir, name_ext = os.path.split(infolist_item.filename) name, ext = os.path.splitext(name_ext) + if has_subdirs and _dir != "": + name = os.path.join(_dir.split(os.path.sep)[0], name) ext = ext.lower() if ext and ext in POLICYD_VALID_EXTS: - l.append((name, ext, name_ext, infolist_item)) - return l + files.append((name, ext, name_ext, infolist_item)) + return files def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): @@ -499,9 +499,6 @@ def read_and_validate_yaml(stream_or_doc, blacklist_keys=None): def policyd_dir_for(service): """Return the policy directory for the named service. - This assumes the default name of "policy.d" which is kept across all - charms. - :param service: str :returns: the policy.d override directory. :rtype: os.PathLike[str] @@ -509,7 +506,7 @@ def policyd_dir_for(service): return os.path.join("/", "etc", service, "policy.d") -def clean_policyd_dir_for(service, keep_paths=None): +def clean_policyd_dir_for(service, keep_paths=None, user=None, group=None): """Clean out the policyd directory except for items that should be kept. The keep_paths, if used, should be set to the full path of the files that @@ -522,12 +519,19 @@ def clean_policyd_dir_for(service, keep_paths=None): :type service: str :param keep_paths: optional list of paths to not delete. :type keep_paths: Union[None, List[str]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] """ + _user = service if user is None else user + _group = service if group is None else group keep_paths = keep_paths or [] path = policyd_dir_for(service) + hookenv.log("Cleaning path: {}".format(path), level=hookenv.DEBUG) if not os.path.exists(path): - ch_host.mkdir(path, owner=service, group=service, perms=0o775) - _scanner = os.scandir if six.PY3 else _py2_scandir + ch_host.mkdir(path, owner=_user, group=_group, perms=0o775) + _scanner = os.scandir if sys.version_info > (3, 4) else _py2_scandir for direntry in _scanner(path): # see if the path should be kept. if direntry.path in keep_paths: @@ -539,6 +543,22 @@ def clean_policyd_dir_for(service, keep_paths=None): os.remove(direntry.path) +def maybe_create_directory_for(path, user, group): + """For the filename 'path', ensure that the directory for that path exists. + + Note that if the directory already exists then the permissions are NOT + changed. + + :param path: the filename including the path to it. + :type path: str + :param user: the user to create the directory as + :param group: the group to create the directory as + """ + _dir, _ = os.path.split(path) + if not os.path.exists(_dir): + ch_host.mkdir(_dir, owner=user, group=group, perms=0o775) + + @contextlib.contextmanager def _py2_scandir(path): """provide a py2 implementation of os.scandir if this module ever gets used @@ -574,6 +594,11 @@ def path_for_policy_file(service, name): It is constructed using policyd_dir_for(), the name and the ".yaml" extension. + For horizon, for example, it's a bit more complicated. The name param is + actually "override_service_dir/a_name", where target_service needs to be + one the allowed horizon override services. This translation and check is + done in the _yamlfiles() function. + :param service: the service name :type service: str :param name: the name for the policy override @@ -601,6 +626,22 @@ def remove_policy_success_file(): pass +def set_policy_success_file(): + """Set the file that indicates successful policyd override.""" + open(_policy_success_file(), "w").close() + + +def is_policy_success_file_set(): + """Returns True if the policy success file has been set. + + This indicates that policies are overridden and working properly. + + :returns: True if the policy file is set + :rtype: bool + """ + return os.path.isfile(_policy_success_file()) + + def policyd_status_message_prefix(): """Return the prefix str for the status line. @@ -610,7 +651,7 @@ def policyd_status_message_prefix(): :returns: the prefix :rtype: str """ - if os.path.isfile(_policy_success_file()): + if is_policy_success_file_set(): return "PO:" return "PO (broken):" @@ -619,7 +660,11 @@ def process_policy_resource_file(resource_file, service, blacklist_paths=None, blacklist_keys=None, - template_function=None): + template_function=None, + preserve_topdir=False, + preprocess_filename=None, + user=None, + group=None): """Process the resource file (which should contain at least one yaml file) and write those files to the service's policy.d directory. @@ -639,6 +684,16 @@ def process_policy_resource_file(resource_file, its file path reconstructed. This, also, must not match any path in the black list. + The yaml filename can be modified in two ways. If the `preserve_topdir` + param is True, then files will be flattened to the top dir. This allows + for creating sets of files that can be grouped into a single level tree + structure. + + Secondly, if the `preprocess_filename` param is not None and callable() + then the name is passed to that function for preprocessing before being + converted to the end location. This is to allow munging of the filename + prior to being tested for a blacklist path. + If any error occurs, then the policy.d directory is cleared, the error is written to the log, and the status line will eventually show as failed. @@ -654,17 +709,39 @@ def process_policy_resource_file(resource_file, :param template_function: Optional function that can modify the yaml document. :type template_function: Union[None, Callable[[AnyStr], AnyStr]] + :param preserve_topdir: Keep the toplevel subdir + :type preserve_topdir: bool + :param preprocess_filename: Optional function to use to process filenames + extracted from the resource file. + :type preprocess_filename: Union[None, Callable[[AnyStr]. AnyStr]] + :param user: The user to create/write files/directories as + :type user: Union[None, str] + :param group: the group to create/write files/directories as + :type group: Union[None, str] :returns: True if the processing was successful, False if not. :rtype: boolean """ + hookenv.log("Running process_policy_resource_file", level=hookenv.DEBUG) blacklist_paths = blacklist_paths or [] completed = False + _preprocess = None + if preprocess_filename is not None and callable(preprocess_filename): + _preprocess = preprocess_filename + _user = service if user is None else user + _group = service if group is None else group try: - with open_and_filter_yaml_files(resource_file) as (zfp, gen): + with open_and_filter_yaml_files( + resource_file, preserve_topdir) as (zfp, gen): # first clear out the policy.d directory and clear success remove_policy_success_file() - clean_policyd_dir_for(service, blacklist_paths) + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) for name, ext, filename, zipinfo in gen: + # See if the name should be preprocessed. + if _preprocess is not None: + name = _preprocess(name) # construct a name for the output file. yaml_filename = path_for_policy_file(service, name) if yaml_filename in blacklist_paths: @@ -682,8 +759,12 @@ def process_policy_resource_file(resource_file, "available".format(filename)) doc = template_function(doc) yaml_doc = read_and_validate_yaml(doc, blacklist_keys) - with open(yaml_filename, "wt") as f: - yaml.dump(yaml_doc, f) + # we may have to create the directory + maybe_create_directory_for(yaml_filename, _user, _group) + ch_host.write_file(yaml_filename, + yaml.dump(yaml_doc).encode('utf-8'), + _user, + _group) # Every thing worked, so we mark up a success. completed = True except (BadZipFile, BadPolicyZipFile, BadPolicyYamlFile) as e: @@ -707,10 +788,13 @@ def process_policy_resource_file(resource_file, hookenv.log("Processing {} failed: cleaning policy.d directory" .format(resource_file), level=POLICYD_LOG_LEVEL_DEFAULT) - clean_policyd_dir_for(service, blacklist_paths) + clean_policyd_dir_for(service, + blacklist_paths, + user=_user, + group=_group) else: # touch the success filename hookenv.log("policy.d overrides installed.", level=POLICYD_LOG_LEVEL_DEFAULT) - open(_policy_success_file(), "w").close() + set_policy_success_file() return completed diff --git a/charmhelpers/contrib/openstack/templates/section-placement b/charmhelpers/contrib/openstack/templates/section-placement new file mode 100644 index 00000000..97724bdb --- /dev/null +++ b/charmhelpers/contrib/openstack/templates/section-placement @@ -0,0 +1,19 @@ +[placement] +{% if auth_host -%} +auth_url = {{ auth_protocol }}://{{ auth_host }}:{{ auth_port }} +auth_type = password +{% if api_version == "3" -%} +project_domain_name = {{ admin_domain_name }} +user_domain_name = {{ admin_domain_name }} +{% else -%} +project_domain_name = default +user_domain_name = default +{% endif -%} +project_name = {{ admin_tenant_name }} +username = {{ admin_user }} +password = {{ admin_password }} +{% endif -%} +{% if region -%} +os_region_name = {{ region }} +{% endif -%} +randomize_allocation_candidates = true diff --git a/charmhelpers/contrib/openstack/utils.py b/charmhelpers/contrib/openstack/utils.py index ac96f844..971c9e10 100644 --- a/charmhelpers/contrib/openstack/utils.py +++ b/charmhelpers/contrib/openstack/utils.py @@ -50,9 +50,14 @@ from charmhelpers.core.hookenv import ( hook_name, application_version_set, cached, + leader_set, + leader_get, ) -from charmhelpers.core.strutils import BasicStringComparator +from charmhelpers.core.strutils import ( + BasicStringComparator, + bool_from_string, +) from charmhelpers.contrib.storage.linux.lvm import ( deactivate_lvm_volume_group, @@ -204,7 +209,7 @@ SWIFT_CODENAMES = OrderedDict([ ('stein', ['2.20.0', '2.21.0']), ('train', - ['2.22.0']), + ['2.22.0', '2.23.0']), ]) # >= Liberty version->codename mapping @@ -531,7 +536,7 @@ def reset_os_release(): _os_rel = None -def os_release(package, base='essex', reset_cache=False): +def os_release(package, base=None, reset_cache=False): ''' Returns OpenStack release codename from a cached global. @@ -542,6 +547,8 @@ def os_release(package, base='essex', reset_cache=False): the installation source, the earliest release supported by the charm should be returned. ''' + if not base: + base = UBUNTU_OPENSTACK_RELEASE[lsb_release()['DISTRIB_CODENAME']] global _os_rel if reset_cache: reset_os_release() @@ -670,7 +677,10 @@ def openstack_upgrade_available(package): codename = get_os_codename_install_source(src) avail_vers = get_os_version_codename_swift(codename) else: - avail_vers = get_os_version_install_source(src) + try: + avail_vers = get_os_version_install_source(src) + except Exception: + avail_vers = cur_vers apt.init() return apt.version_compare(avail_vers, cur_vers) >= 1 @@ -1693,7 +1703,7 @@ def enable_memcache(source=None, release=None, package=None): if release: _release = release else: - _release = os_release(package, base='icehouse') + _release = os_release(package) if not _release: _release = get_os_codename_install_source(source) @@ -1863,3 +1873,28 @@ def series_upgrade_complete(resume_unit_helper=None, configs=None): configs.write_all() if resume_unit_helper: resume_unit_helper(configs) + + +def is_db_initialised(): + """Check leader storage to see if database has been initialised. + + :returns: Whether DB has been initialised + :rtype: bool + """ + db_initialised = None + if leader_get('db-initialised') is None: + juju_log( + 'db-initialised key missing, assuming db is not initialised', + 'DEBUG') + db_initialised = False + else: + db_initialised = bool_from_string(leader_get('db-initialised')) + juju_log('Database initialised: {}'.format(db_initialised), 'DEBUG') + return db_initialised + + +def set_db_initialised(): + """Add flag to leader storage to indicate database has been initialised. + """ + juju_log('Setting db-initialised to True', 'DEBUG') + leader_set({'db-initialised': True}) diff --git a/charmhelpers/contrib/openstack/vaultlocker.py b/charmhelpers/contrib/openstack/vaultlocker.py index a8e4bf88..c162de27 100644 --- a/charmhelpers/contrib/openstack/vaultlocker.py +++ b/charmhelpers/contrib/openstack/vaultlocker.py @@ -37,9 +37,13 @@ class VaultKVContext(context.OSContextGenerator): ) def __call__(self): + import hvac + ctxt = {} + # NOTE(hopem): see https://bugs.launchpad.net/charm-helpers/+bug/1849323 db = unitdata.kv() - last_token = db.get('last-token') + # currently known-good secret-id secret_id = db.get('secret-id') + for relation_id in hookenv.relation_ids(self.interfaces[0]): for unit in hookenv.related_units(relation_id): data = hookenv.relation_get(unit=unit, @@ -54,27 +58,48 @@ class VaultKVContext(context.OSContextGenerator): # Tokens may change when secret_id's are being # reissued - if so use token to get new secret_id - if token != last_token: + token_success = False + try: secret_id = retrieve_secret_id( url=vault_url, token=token ) + token_success = True + except hvac.exceptions.InvalidRequest: + # Try next + pass + + if token_success: db.set('secret-id', secret_id) - db.set('last-token', token) db.flush() - ctxt = { - 'vault_url': vault_url, - 'role_id': json.loads(role_id), - 'secret_id': secret_id, - 'secret_backend': self.secret_backend, - } - vault_ca = data.get('vault_ca') - if vault_ca: - ctxt['vault_ca'] = json.loads(vault_ca) - self.complete = True - return ctxt - return {} + ctxt['vault_url'] = vault_url + ctxt['role_id'] = json.loads(role_id) + ctxt['secret_id'] = secret_id + ctxt['secret_backend'] = self.secret_backend + vault_ca = data.get('vault_ca') + if vault_ca: + ctxt['vault_ca'] = json.loads(vault_ca) + + self.complete = True + break + else: + if secret_id: + ctxt['vault_url'] = vault_url + ctxt['role_id'] = json.loads(role_id) + ctxt['secret_id'] = secret_id + ctxt['secret_backend'] = self.secret_backend + vault_ca = data.get('vault_ca') + if vault_ca: + ctxt['vault_ca'] = json.loads(vault_ca) + + if self.complete: + break + + if ctxt: + self.complete = True + + return ctxt def write_vaultlocker_conf(context, priority=100): diff --git a/charmhelpers/contrib/storage/linux/ceph.py b/charmhelpers/contrib/storage/linux/ceph.py index e13dfa8b..104977af 100644 --- a/charmhelpers/contrib/storage/linux/ceph.py +++ b/charmhelpers/contrib/storage/linux/ceph.py @@ -422,6 +422,8 @@ def enabled_manager_modules(): cmd = ['ceph', 'mgr', 'module', 'ls'] try: modules = check_output(cmd) + if six.PY3: + modules = modules.decode('UTF-8') except CalledProcessError as e: log("Failed to list ceph modules: {}".format(e), WARNING) return [] @@ -1185,6 +1187,15 @@ class CephBrokerRq(object): self.request_id = str(uuid.uuid1()) self.ops = [] + def add_op(self, op): + """Add an op if it is not already in the list. + + :param op: Operation to add. + :type op: dict + """ + if op not in self.ops: + self.ops.append(op) + def add_op_request_access_to_group(self, name, namespace=None, permission=None, key_name=None, object_prefix_permissions=None): @@ -1198,7 +1209,7 @@ class CephBrokerRq(object): 'rwx': ['prefix1', 'prefix2'], 'class-read': ['prefix3']} """ - self.ops.append({ + self.add_op({ 'op': 'add-permissions-to-key', 'group': name, 'namespace': namespace, 'name': key_name or service_name(), @@ -1251,11 +1262,11 @@ class CephBrokerRq(object): if pg_num and weight: raise ValueError('pg_num and weight are mutually exclusive') - self.ops.append({'op': 'create-pool', 'name': name, - 'replicas': replica_count, 'pg_num': pg_num, - 'weight': weight, 'group': group, - 'group-namespace': namespace, 'app-name': app_name, - 'max-bytes': max_bytes, 'max-objects': max_objects}) + self.add_op({'op': 'create-pool', 'name': name, + 'replicas': replica_count, 'pg_num': pg_num, + 'weight': weight, 'group': group, + 'group-namespace': namespace, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) def add_op_create_erasure_pool(self, name, erasure_profile=None, weight=None, group=None, app_name=None, @@ -1283,12 +1294,12 @@ class CephBrokerRq(object): :param max_objects: Maximum objects quota to apply :type max_objects: int """ - self.ops.append({'op': 'create-pool', 'name': name, - 'pool-type': 'erasure', - 'erasure-profile': erasure_profile, - 'weight': weight, - 'group': group, 'app-name': app_name, - 'max-bytes': max_bytes, 'max-objects': max_objects}) + self.add_op({'op': 'create-pool', 'name': name, + 'pool-type': 'erasure', + 'erasure-profile': erasure_profile, + 'weight': weight, + 'group': group, 'app-name': app_name, + 'max-bytes': max_bytes, 'max-objects': max_objects}) def set_ops(self, ops): """Set request ops to provided value. diff --git a/charmhelpers/core/hookenv.py b/charmhelpers/core/hookenv.py index 4744eb43..647f6e4b 100644 --- a/charmhelpers/core/hookenv.py +++ b/charmhelpers/core/hookenv.py @@ -34,6 +34,8 @@ import errno import tempfile from subprocess import CalledProcessError +from charmhelpers import deprecate + import six if not six.PY3: from UserDict import UserDict @@ -119,6 +121,24 @@ def log(message, level=None): raise +def function_log(message): + """Write a function progress message""" + command = ['function-log'] + if not isinstance(message, six.string_types): + message = repr(message) + command += [message[:SH_MAX_ARG]] + # Missing function-log should not cause failures in unit tests + # Send function_log output to stderr + try: + subprocess.call(command) + except OSError as e: + if e.errno == errno.ENOENT: + message = "function-log: {}".format(message) + print(message, file=sys.stderr) + else: + raise + + class Serializable(UserDict): """Wrapper, an object that can be serialized to yaml or json""" @@ -946,9 +966,23 @@ def charm_dir(): return os.environ.get('CHARM_DIR') +def cmd_exists(cmd): + """Return True if the specified cmd exists in the path""" + return any( + os.access(os.path.join(path, cmd), os.X_OK) + for path in os.environ["PATH"].split(os.pathsep) + ) + + @cached +@deprecate("moved to function_get()", log=log) def action_get(key=None): - """Gets the value of an action parameter, or all key/value param pairs""" + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_get`. + + Gets the value of an action parameter, or all key/value param pairs. + """ cmd = ['action-get'] if key is not None: cmd.append(key) @@ -957,36 +991,103 @@ def action_get(key=None): return action_data +@cached +def function_get(key=None): + """Gets the value of an action parameter, or all key/value param pairs""" + cmd = ['function-get'] + # Fallback for older charms. + if not cmd_exists('function-get'): + cmd = ['action-get'] + + if key is not None: + cmd.append(key) + cmd.append('--format=json') + function_data = json.loads(subprocess.check_output(cmd).decode('UTF-8')) + return function_data + + +@deprecate("moved to function_set()", log=log) def action_set(values): - """Sets the values to be returned after the action finishes""" + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_set`. + + Sets the values to be returned after the action finishes. + """ cmd = ['action-set'] for k, v in list(values.items()): cmd.append('{}={}'.format(k, v)) subprocess.check_call(cmd) -def action_fail(message): - """Sets the action status to failed and sets the error message. +def function_set(values): + """Sets the values to be returned after the function finishes""" + cmd = ['function-set'] + # Fallback for older charms. + if not cmd_exists('function-get'): + cmd = ['action-set'] - The results set by action_set are preserved.""" + for k, v in list(values.items()): + cmd.append('{}={}'.format(k, v)) + subprocess.check_call(cmd) + + +@deprecate("moved to function_fail()", log=log) +def action_fail(message): + """ + .. deprecated:: 0.20.7 + Alias for :func:`function_fail`. + + Sets the action status to failed and sets the error message. + + The results set by action_set are preserved. + """ subprocess.check_call(['action-fail', message]) +def function_fail(message): + """Sets the function status to failed and sets the error message. + + The results set by function_set are preserved.""" + cmd = ['function-fail'] + # Fallback for older charms. + if not cmd_exists('function-fail'): + cmd = ['action-fail'] + cmd.append(message) + + subprocess.check_call(cmd) + + def action_name(): """Get the name of the currently executing action.""" return os.environ.get('JUJU_ACTION_NAME') +def function_name(): + """Get the name of the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_NAME') or action_name() + + def action_uuid(): """Get the UUID of the currently executing action.""" return os.environ.get('JUJU_ACTION_UUID') +def function_id(): + """Get the ID of the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_ID') or action_uuid() + + def action_tag(): """Get the tag for the currently executing action.""" return os.environ.get('JUJU_ACTION_TAG') +def function_tag(): + """Get the tag for the currently executing function.""" + return os.environ.get('JUJU_FUNCTION_TAG') or action_tag() + + def status_set(workload_state, message): """Set the workload state with a message