commit 2e48c9a41fc3624eddd0021023a1d6555c28adfe Author: Billy Olsen Date: Tue Jul 6 11:49:17 2021 -0700 Initial commit just to share Initial commit of keystone work just to share. diff --git a/charms/keystone-k8s/.flake8 b/charms/keystone-k8s/.flake8 new file mode 100644 index 00000000..c0a92a06 --- /dev/null +++ b/charms/keystone-k8s/.flake8 @@ -0,0 +1,9 @@ +[flake8] +max-line-length = 80 +select: E,W,F,C,N +exclude: + venv + .git + build + dist + *.egg_info diff --git a/charms/keystone-k8s/.gitignore b/charms/keystone-k8s/.gitignore new file mode 100644 index 00000000..08f07be2 --- /dev/null +++ b/charms/keystone-k8s/.gitignore @@ -0,0 +1,10 @@ +venv/ +build/ +.idea/ +*.charm +.tox +venv +.coverage +__pycache__/ +*.py[cod] + diff --git a/charms/keystone-k8s/.jujuignore b/charms/keystone-k8s/.jujuignore new file mode 100644 index 00000000..06a6ded7 --- /dev/null +++ b/charms/keystone-k8s/.jujuignore @@ -0,0 +1,4 @@ +/venv +*.py[cod] +*.charm +/.venv diff --git a/charms/keystone-k8s/LICENSE b/charms/keystone-k8s/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/charms/keystone-k8s/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/charms/keystone-k8s/README.md b/charms/keystone-k8s/README.md new file mode 100644 index 00000000..04a474c7 --- /dev/null +++ b/charms/keystone-k8s/README.md @@ -0,0 +1,45 @@ +# keystone-operator + +## Description + +The keystone operator is an operator to manage the keystone identity +service. + +## Usage + +TODO: Provide high-level usage, such as required config or relations + + +## Developing + +This project uses tox for building and managing. To build the charm +run: + + tox -e build + +To deploy the local test instance: + + tox -e build + juju add-model keystone + juju deploy ./keystone-operator.charm --resource keystone-image=kolla/ubuntu-binary-keystone:victoria + + +## Status + +This charm is currently in basic dev/exploratory state. This charm will deploy a keystone instance which uses local sqlite database. + +TODOs + +- [X] Basic bootstrap of keystone service +- [ ] Handle shared db relation +- [ ] Provide identity-service relation +- [ ] Handle config changed events +- [ ] Unit tests +- [ ] Functional tests + +## Testing + +The Python operator framework includes a very nice harness for testing +operator behaviour without full deployment. Just `run_tests`: + + ./run_tests diff --git a/charms/keystone-k8s/actions.yaml b/charms/keystone-k8s/actions.yaml new file mode 100644 index 00000000..cae6f88e --- /dev/null +++ b/charms/keystone-k8s/actions.yaml @@ -0,0 +1,10 @@ +# Copyright 2021 Billy Olsen +# See LICENSE file for licensing details. +# +# TEMPLATE-TODO: change this example to suit your needs. +# If you don't need actions, you can remove the file entirely. +# It ties in to the example _on_fortune_action handler in src/charm.py +# +# Learn more about actions at: https://juju.is/docs/sdk/actions + +{ } \ No newline at end of file diff --git a/charms/keystone-k8s/config.yaml b/charms/keystone-k8s/config.yaml new file mode 100644 index 00000000..1b510b4c --- /dev/null +++ b/charms/keystone-k8s/config.yaml @@ -0,0 +1,53 @@ +# Copyright 2021 Canonical Ltd. +# See LICENSE file for licensing details. +# +options: + debug: + default: False + description: Enable debug logging. + type: boolean + + admin-user: + default: admin + description: Default admin user for keystone + type: string + admin-role: + default: Admin + description: Admin role to be associated with admin and service users + type: string + service-tenant: + default: services + description: Name of tenant to associate with service credentials + type: string + + service-port: + default: 5000 + description: Port the public and internal endpoints will listen on + type: int + admin-port: + default: 35357 + description: Port the admin endpoint will listen on + type: int + + os-admin-hostname: + default: keystone.juju + description: | + The hostname or address of the admin endpoints that should be advertised + in the keystone identity provider. + type: string + os-internal-hostname: + default: keystone.juju + description: | + The hostname or address of the internal endpoints that should be advertised + in the keystone identity provider. + type: string + os-public-hostname: + default: keystone.juju + description: | + The hostname or address of the internal endpoints that should be advertised + in the keystone identity provider. + type: string + region: + default: RegionOne + description: Space delimited list of OpenStack regions + type: string diff --git a/charms/keystone-k8s/lib/charms/nginx_ingress_integrator/v0/ingress.py b/charms/keystone-k8s/lib/charms/nginx_ingress_integrator/v0/ingress.py new file mode 100644 index 00000000..c8d2e0b1 --- /dev/null +++ b/charms/keystone-k8s/lib/charms/nginx_ingress_integrator/v0/ingress.py @@ -0,0 +1,211 @@ +"""Library for the ingress relation. + +This library contains the Requires and Provides classes for handling +the ingress interface. + +Import `IngressRequires` in your charm, with two required options: + - "self" (the charm itself) + - config_dict + +`config_dict` accepts the following keys: + - service-hostname (required) + - service-name (required) + - service-port (required) + - additional-hostnames + - limit-rps + - limit-whitelist + - max-body-size + - path-routes + - retry-errors + - rewrite-enabled + - rewrite-target + - service-namespace + - session-cookie-max-age + - tls-secret-name + +See [the config section](https://charmhub.io/nginx-ingress-integrator/configure) for descriptions +of each, along with the required type. + +As an example, add the following to `src/charm.py`: +``` +from charms.nginx_ingress_integrator.v0.ingress import IngressRequires + +# In your charm's `__init__` method. +self.ingress = IngressRequires(self, {"service-hostname": self.config["external_hostname"], + "service-name": self.app.name, + "service-port": 80}) + +# In your charm's `config-changed` handler. +self.ingress.update_config({"service-hostname": self.config["external_hostname"]}) +``` +And then add the following to `metadata.yaml`: +``` +requires: + ingress: + interface: ingress +``` +You _must_ register the IngressRequires class as part of the `__init__` method +rather than, for instance, a config-changed event handler. This is because +doing so won't get the current relation changed event, because it wasn't +registered to handle the event (because it wasn't created in `__init__` when +the event was fired). +""" + +import logging + +from ops.charm import CharmEvents +from ops.framework import EventBase, EventSource, Object +from ops.model import BlockedStatus + +# The unique Charmhub library identifier, never change it +LIBID = "db0af4367506491c91663468fb5caa4c" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 9 + +logger = logging.getLogger(__name__) + +REQUIRED_INGRESS_RELATION_FIELDS = { + "service-hostname", + "service-name", + "service-port", +} + +OPTIONAL_INGRESS_RELATION_FIELDS = { + "additional-hostnames", + "limit-rps", + "limit-whitelist", + "max-body-size", + "retry-errors", + "rewrite-target", + "rewrite-enabled", + "service-namespace", + "session-cookie-max-age", + "tls-secret-name", + "path-routes", +} + + +class IngressAvailableEvent(EventBase): + pass + + +class IngressCharmEvents(CharmEvents): + """Custom charm events.""" + + ingress_available = EventSource(IngressAvailableEvent) + + +class IngressRequires(Object): + """This class defines the functionality for the 'requires' side of the 'ingress' relation. + + Hook events observed: + - relation-changed + """ + + def __init__(self, charm, config_dict): + super().__init__(charm, "ingress") + + self.framework.observe(charm.on["ingress"].relation_changed, self._on_relation_changed) + + self.config_dict = config_dict + + def _config_dict_errors(self, update_only=False): + """Check our config dict for errors.""" + blocked_message = "Error in ingress relation, check `juju debug-log`" + unknown = [ + x + for x in self.config_dict + if x not in REQUIRED_INGRESS_RELATION_FIELDS | OPTIONAL_INGRESS_RELATION_FIELDS + ] + if unknown: + logger.error( + "Ingress relation error, unknown key(s) in config dictionary found: %s", + ", ".join(unknown), + ) + self.model.unit.status = BlockedStatus(blocked_message) + return True + if not update_only: + missing = [x for x in REQUIRED_INGRESS_RELATION_FIELDS if x not in self.config_dict] + if missing: + logger.error( + "Ingress relation error, missing required key(s) in config dictionary: %s", + ", ".join(missing), + ) + self.model.unit.status = BlockedStatus(blocked_message) + return True + return False + + def _on_relation_changed(self, event): + """Handle the relation-changed event.""" + # `self.unit` isn't available here, so use `self.model.unit`. + if self.model.unit.is_leader(): + if self._config_dict_errors(): + return + for key in self.config_dict: + event.relation.data[self.model.app][key] = str(self.config_dict[key]) + + def update_config(self, config_dict): + """Allow for updates to relation.""" + if self.model.unit.is_leader(): + self.config_dict = config_dict + if self._config_dict_errors(update_only=True): + return + relation = self.model.get_relation("ingress") + if relation: + for key in self.config_dict: + relation.data[self.model.app][key] = str(self.config_dict[key]) + + +class IngressProvides(Object): + """This class defines the functionality for the 'provides' side of the 'ingress' relation. + + Hook events observed: + - relation-changed + """ + + def __init__(self, charm): + super().__init__(charm, "ingress") + # Observe the relation-changed hook event and bind + # self.on_relation_changed() to handle the event. + self.framework.observe(charm.on["ingress"].relation_changed, self._on_relation_changed) + self.charm = charm + + def _on_relation_changed(self, event): + """Handle a change to the ingress relation. + + Confirm we have the fields we expect to receive.""" + # `self.unit` isn't available here, so use `self.model.unit`. + if not self.model.unit.is_leader(): + return + + ingress_data = { + field: event.relation.data[event.app].get(field) + for field in REQUIRED_INGRESS_RELATION_FIELDS | OPTIONAL_INGRESS_RELATION_FIELDS + } + + missing_fields = sorted( + [ + field + for field in REQUIRED_INGRESS_RELATION_FIELDS + if ingress_data.get(field) is None + ] + ) + + if missing_fields: + logger.error( + "Missing required data fields for ingress relation: {}".format( + ", ".join(missing_fields) + ) + ) + self.model.unit.status = BlockedStatus( + "Missing fields for ingress: {}".format(", ".join(missing_fields)) + ) + + # Create an event that our charm can use to decide it's okay to + # configure the ingress. + self.charm.on.ingress_available.emit() diff --git a/charms/keystone-k8s/metadata.yaml b/charms/keystone-k8s/metadata.yaml new file mode 100644 index 00000000..31e8e41d --- /dev/null +++ b/charms/keystone-k8s/metadata.yaml @@ -0,0 +1,51 @@ +# Copyright 2021 Billy Olsen +# See LICENSE file for licensing details. +name: keystone-operator +summary: OpenStack identity service +maintainer: Openstack Charmers +description: | + Keystone is an OpenStack project that provides Identity, Token, Catalog and + Policy services for use specifically by projects in the OpenStack family. It + implements OpenStack's Identity API. +version: 3 +bases: + - name: ubuntu + channel: 20.04/stable +tags: + - openstack + - identity + - misc + +subordinate: false + +requires: + db: + interface: mysql + limit: 1 + ingress: + interface: ingress + + +peers: + peers: + interface: keystone-peer + +containers: + keystone: + resource: keystone-image + mounts: + - storage: logs + location: /var/log/kolla/keystone + - storage: db + location: /var/lib/keystone + +storage: + logs: + type: filesystem + db: + type: filesystem + +resources: + keystone-image: + type: oci-image + description: Kolla based docker image for keystone diff --git a/charms/keystone-k8s/requirements-dev.txt b/charms/keystone-k8s/requirements-dev.txt new file mode 100644 index 00000000..4f2a3f5b --- /dev/null +++ b/charms/keystone-k8s/requirements-dev.txt @@ -0,0 +1,3 @@ +-r requirements.txt +coverage +flake8 diff --git a/charms/keystone-k8s/requirements.txt b/charms/keystone-k8s/requirements.txt new file mode 100644 index 00000000..18c7dead --- /dev/null +++ b/charms/keystone-k8s/requirements.txt @@ -0,0 +1,7 @@ +charmhelpers +jinja2 +kubernetes +ops +ops-lib-mysql +python-keystoneclient +git+https://opendev.org/openstack/charm-ops-openstack#egg=ops_openstack diff --git a/charms/keystone-k8s/run_tests b/charms/keystone-k8s/run_tests new file mode 100755 index 00000000..a66cc8c0 --- /dev/null +++ b/charms/keystone-k8s/run_tests @@ -0,0 +1,17 @@ +#!/bin/sh -e +# Copyright 2021 Billy Olsen +# See LICENSE file for licensing details. + +if [ -z "$VIRTUAL_ENV" -a -d venv/ ]; then + . venv/bin/activate +fi + +if [ -z "$PYTHONPATH" ]; then + export PYTHONPATH="lib:src" +else + export PYTHONPATH="lib:src:$PYTHONPATH" +fi + +flake8 +coverage run --source=src -m unittest -v "$@" +coverage report -m diff --git a/charms/keystone-k8s/src/__init__.py b/charms/keystone-k8s/src/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/charms/keystone-k8s/src/charm.py b/charms/keystone-k8s/src/charm.py new file mode 100755 index 00000000..351a8647 --- /dev/null +++ b/charms/keystone-k8s/src/charm.py @@ -0,0 +1,256 @@ +#!/usr/bin/env python3 +# Copyright 2021 Billy Olsen +# See LICENSE file for licensing details. +# +# Learn more at: https://juju.is/docs/sdk + +"""Charm the service. + +Refer to the following post for a quick-start guide that will help you +develop a new k8s charm using the Operator Framework: + + https://discourse.charmhub.io/t/4208 +""" + +import logging +import os + +from charms.nginx_ingress_integrator.v0.ingress import IngressRequires + +from ops.charm import CharmBase +from ops.charm import PebbleReadyEvent + +from opslib.mysql import MySQLClient +from opslib.mysql import MySQLRelationEvent + +from ops.main import main +from ops.framework import StoredState +from ops import model + +from utils import contexts +from utils import manager +from utils.cprocess import check_output +from utils.cprocess import ContainerProcessError +from utils.templating import SidecarConfigRenderer + +logger = logging.getLogger(__name__) + +KEYSTONE_CONTAINER = "keystone" + + +KEYSTONE_CONF = '/etc/keystone/keystone.conf' +DATABASE_CONF = '/etc/keystone/database.conf' +KEYSTONE_WSGI_CONF = '/etc/apache2/sites-available/wsgi-keystone.conf' + + +class KeystoneOperatorCharm(CharmBase): + """Charm the service.""" + + _state = StoredState() + _authed = False + + def __init__(self, *args): + super().__init__(*args) + + logger.warning(f'Current working directory is: {os.getcwd()}') + + self.framework.observe(self.on.keystone_pebble_ready, + self._on_keystone_pebble_ready) + self.framework.observe(self.on.config_changed, + self._on_config_changed) + + self.db = MySQLClient(self, 'db') + self.framework.observe(self.db.on.database_changed, + self._on_database_changed) + + self.ingress_public = IngressRequires(self, { + 'service-hostname': self.model.config['os-public-hostname'], + 'service-name': self.app.name, + 'service-port': self.model.config['service-port'], + }) + self.keystone_manager = manager.KeystoneManager(self) + + # TODO(wolsen) how to determine the current release? + self.os_config_renderer = SidecarConfigRenderer('src/templates', + 'victoria') + self._register_configs(self.os_config_renderer) + + self._state.set_default(bootstrapped=False) + + def _register_configs(self, renderer: SidecarConfigRenderer) -> None: + """ + + """ + # renderer.register(KEYSTONE_CONF, contexts.KeystoneContext(self), + # containers=[KEYSTONE_CONTAINER]) + renderer.register(DATABASE_CONF, contexts.DatabaseContext(self, 'db'), + containers=[KEYSTONE_CONTAINER]) + renderer.register(KEYSTONE_WSGI_CONF, + contexts.WSGIWorkerConfigContext(self), + containers=[KEYSTONE_CONTAINER], + user='root', group='root') + + def _on_database_changed(self, event: MySQLRelationEvent) -> None: + """Handles database change events.""" + self.unit.status = model.MaintenanceStatus('Updating database ' + 'configuration') + self._do_bootstrap() + + @property + def admin_domain_name(self): + return self._state.admin_domain_name + + @property + def admin_domain_id(self): + return self._state.admin_domain_id + + @property + def admin_password(self): + # TODO(wolsen) password stuff + return 'abc123' + + @property + def admin_user(self): + return self.model.config['admin-user'] + + @property + def admin_role(self): + return self.model.config['admin-role'] + + @property + def charm_user(self): + """The admin user specific to the charm. + + This is a special admin user reserved for the charm to interact with + keystone. + """ + return '_charm-keystone-admin' + + @property + def charm_password(self): + # TODO + return 'abc123' + + @property + def service_tenant(self): + return self.model.config['service-tenant'] + + @property + def db_ready(self): + """Returns True if the remote database has been configured and is + ready for access from the local service. + + :returns: True if the database is ready to be accessed, False otherwise + :rtype: bool + """ + return self._state.db_available + + def _do_bootstrap(self): + """Checks the services to see which services need to run depending + on the current state. + + Starts the appropriate services in the order they are needed. + If the service has not yet been bootstrapped, then this will + 1. Create the keystone database + 2. Bootstrap the keystone users service + 3. Setup the fernet tokens + """ + # if not self.db_ready: + # logger.debug('Database not ready, not bootstrapping') + # self.unit.status = model.BlockedStatus('Waiting for database') + # return + + if not self.unit.is_leader(): + logger.debug('Deferring bootstrap to leader unit') + self.unit.status = model.BlockedStatus('Waiting for leader to ' + 'bootstrap keystone') + return + + container = self.unit.get_container('keystone') + if not container: + logger.debug('Keystone container is not ready. Deferring bootstrap') + return + + # Write the config files to the container + self.os_config_renderer.write_all(container) + + try: + check_output(container, 'a2ensite wsgi-keystone && sleep 1') + except ContainerProcessError: + logger.exception('Failed to enable wsgi-keystone site in apache') + # ignore for now - pebble is raising an exited too quickly, but it + # appears to work properly. + + try: + self.keystone_manager.setup_keystone(container) + except ContainerProcessError: + logger.exception('Failed to bootstrap') + self._state.bootstrapped = False + return + + self.unit.status = model.MaintenanceStatus('Starting Keystone') + service = container.get_service('keystone-wsgi') + if service.is_running(): + container.stop('keystone-wsgi') + + container.start('keystone-wsgi') + + self.keystone_manager.setup_initial_projects_and_users() + + self.unit.status = model.ActiveStatus() + self._state.bootstrapped = True + + def _on_keystone_pebble_ready(self, event: PebbleReadyEvent) -> None: + """Invoked when the keystone bootstrap init container is ready. + + When invoked, the Pebble service is running in the container and ready + for bootstrap. The bootstrap sequence consists of creating the initial + keystone database and performing initial setup of the admin + credentials. + """ + container = event.workload + logger.debug('Updating keystone bootstrap layer to create the ' + 'keystone database') + + container.add_layer('keystone', self._keystone_layer(), combine=True) + logger.debug(f'Plan: {container.get_plan()}') + self._do_bootstrap() + + def _keystone_layer(self) -> dict: + """Keystone layer definition. + + :returns: pebble layer configuration for keystone services + :rtype: dict + """ + return { + 'summary': 'keystone layer', + 'description': 'pebble config layer for keystone', + 'services': { + 'keystone-wsgi': { + 'override': 'replace', + 'summary': 'Keystone Identity', + 'command': '/usr/sbin/apache2ctl -DFOREGROUND', + 'startup': 'disabled', + }, + }, + } + + def _on_config_changed(self, _): + """Just an example to show how to deal with changed configuration. + + TEMPLATE-TODO: change this example to suit your needs. + If you don't need to handle config, you can remove this method, + the hook created in __init__.py for it, the corresponding test, + and the config.py file. + + Learn more about config at https://juju.is/docs/sdk/config + """ + logger.debug('config changed event') + if self._state.bootstrapped: + self.keystone_manager.update_service_catalog_for_keystone() + + +if __name__ == "__main__": + # Note: use_juju_for_storage=True required per + # https://github.com/canonical/operator/issues/506 + main(KeystoneOperatorCharm, use_juju_for_storage=True) diff --git a/charms/keystone-k8s/src/templates/database.conf.j2 b/charms/keystone-k8s/src/templates/database.conf.j2 new file mode 100644 index 00000000..dad2118f --- /dev/null +++ b/charms/keystone-k8s/src/templates/database.conf.j2 @@ -0,0 +1,7 @@ +[database] +{% if database_host -%} +connection = {{ database_type }}://{{ database_user }}:{{ database_password }}@{{ database_host }}/{{ database }}{% if database_ssl_ca %}?ssl_ca={{ database_ssl_ca }}{% if database_ssl_cert %}&ssl_cert={{ database_ssl_cert }}&ssl_key={{ database_ssl_key }}{% endif %}{% endif %} +{% else -%} +connection = sqlite:////var/lib/keystone/keystone.db +{% endif -%} +connection_recycle_time = 200 diff --git a/charms/keystone-k8s/src/templates/keystone.conf.j2 b/charms/keystone-k8s/src/templates/keystone.conf.j2 new file mode 100644 index 00000000..74b35e1a --- /dev/null +++ b/charms/keystone-k8s/src/templates/keystone.conf.j2 @@ -0,0 +1,114 @@ +# Victoria +############################################################################### +# [ WARNING ] +# Configuration file maintained by Juju. Local changes may be overwritten. +############################################################################### +[DEFAULT] +use_syslog = {{ use_syslog }} +log_config_append = /etc/keystone/logging.conf +debug = {{ debug }} + +public_endpoint = {{ public_endpoint }} + +[identity] +driver = {{ identity_backend }} +{% if default_domain_id -%} +default_domain_id = {{ default_domain_id }} +{% endif -%} + +{% if api_version == 3 -%} +domain_specific_drivers_enabled = True +domain_config_dir = {{ domain_config_dir }} +{% endif -%} + +[credential] +driver = sql + +[trust] +driver = sql + +[catalog] +cache_time = {{ catalog_cache_expiration }} +driver = sql + +[endpoint_filter] + +[token] +expiration = {{ token_expiration }} + +[fernet_tokens] +max_active_keys = {{ fernet_max_active_keys }} + +{% include "parts/section-signing" %} + +{% include "section-oslo-cache" %} +# This goes in the section above, selectively +# Bug #1899117 +expiration_time = {{ dogpile_cache_expiration }} + +[policy] +driver = sql + +[assignment] +driver = {{ assignment_backend }} + +[auth] +methods = {{ auth_methods }} + +[paste_deploy] +config_file = {{ paste_config_file }} + +[extra_headers] +Distribution = Ubuntu + +[ldap] +{% if identity_backend == 'ldap' -%} +url = {{ ldap_server }} +user = {{ ldap_user }} +password = {{ ldap_password }} +suffix = {{ ldap_suffix }} + +{% if ldap_config_flags -%} +{% for key, value in ldap_config_flags.iteritems() -%} +{{ key }} = {{ value }} +{% endfor -%} +{% endif -%} + +{% if ldap_readonly -%} +user_allow_create = False +user_allow_update = False +user_allow_delete = False + +tenant_allow_create = False +tenant_allow_update = False +tenant_allow_delete = False + +role_allow_create = False +role_allow_update = False +role_allow_delete = False + +group_allow_create = False +group_allow_update = False +group_allow_delete = False +{% endif -%} +{% endif -%} + +{% if api_version == 3 %} +[resource] +admin_project_domain_name = {{ admin_domain_name }} +admin_project_name = admin +{% endif -%} + +{% if password_security_compliance %} +[security_compliance] +{% for k, v in password_security_compliance.items() -%} +{{ k }} = {{ v }} +{% endfor -%} +{% endif -%} + +{% include "parts/section-federation" %} + +{% include "section-oslo-middleware" %} +# This goes in the section above, selectively +# Bug #1819134 +max_request_body_size = 114688 diff --git a/charms/keystone-k8s/src/templates/wsgi-keystone.conf.j2 b/charms/keystone-k8s/src/templates/wsgi-keystone.conf.j2 new file mode 100644 index 00000000..4529c796 --- /dev/null +++ b/charms/keystone-k8s/src/templates/wsgi-keystone.conf.j2 @@ -0,0 +1,50 @@ +Listen 0.0.0.0:5000 +Listen 0.0.0.0:35357 + + + WSGIDaemonProcess keystone-public processes=1 threads=1 user=keystone group=keystone display-name=%{GROUP} python-path=/usr/lib/python3/site-packages + WSGIProcessGroup keystone-public + WSGIScriptAlias / /usr/bin/keystone-wsgi-public + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog "/var/log/keystone/keystone-apache-public-error.log" + LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat + CustomLog "/var/log/keystone/keystone-apache-public-access.log" logformat + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + + + + WSGIDaemonProcess keystone-admin processes=1 threads=1 user=keystone group=keystone display-name=%{GROUP} python-path=/usr/lib/python3/site-packages + WSGIProcessGroup keystone-admin + WSGIScriptAlias / /usr/bin/keystone-wsgi-admin + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog "/var/log/keystone/keystone-apache-admin-error.log" + LogFormat "%{X-Forwarded-For}i %l %u %t \"%r\" %>s %b %D \"%{Referer}i\" \"%{User-Agent}i\"" logformat + CustomLog "/var/log/keystone/keystone-apache-admin-access.log" logformat + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + diff --git a/charms/keystone-k8s/src/utils/__init__.py b/charms/keystone-k8s/src/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/charms/keystone-k8s/src/utils/contexts.py b/charms/keystone-k8s/src/utils/contexts.py new file mode 100644 index 00000000..df975abf --- /dev/null +++ b/charms/keystone-k8s/src/utils/contexts.py @@ -0,0 +1,92 @@ +# Copyright 2021, Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ops import framework +import logging + +log = logging.getLogger(__name__) + + +class ContextGenerator(framework.Object): + """Base class for all context generators""" + interfaces = [] + related = False + complete = False + missing_data = [] + + def __init__(self, charm, context_name): + super().__init__(charm, context_name) + self.charm = charm + + def __call__(self): + raise NotImplementedError + + def context_complete(self, ctxt): + """Check for missing data for the required context data. + Set self.missing_data if it exists and return False. + Set self.complete if no missing data and return True. + """ + # Fresh start + self.complete = False + self.missing_data = [] + for k, v in ctxt.items(ctxt): + if v is None or v == '': + if k not in self.missing_data: + self.missing_data.append(k) + + if self.missing_data: + self.complete = False + log.debug(f"Missing required data: {' '.join(self.missing_data)}") + else: + self.complete = True + return self.complete + + +class DatabaseContext(ContextGenerator): + + def __init__(self, charm, relation_name): + super().__init__(charm, 'database_context') + self.relation_name = relation_name + + def __call__(self): + relation = self.charm.model.get_relation(self.relation_name) + + return {} + + +class WSGIWorkerConfigContext(ContextGenerator): + + def __init__(self, charm): + super().__init__(charm, 'WSGIWorkerConfigContext') + + def __call__(self, *args, **kwargs): + return { + 'name': 'keystone', + 'admin_script': '/usr/bin/keystone-wsgi-admin', + 'public_script': '/usr/bin/keystone/wsgi-public', + } + + +class KeystoneContext(ContextGenerator): + + def __init__(self, charm): + super().__init__(charm, 'KeystoneContext') + + def __call__(self, *args, **kwargs): + ctxt = { + 'api_version': 3, + 'admin_role': self.charm.model.config['admin-role'], + } + + return ctxt diff --git a/charms/keystone-k8s/src/utils/cprocess.py b/charms/keystone-k8s/src/utils/cprocess.py new file mode 100644 index 00000000..6e3f405a --- /dev/null +++ b/charms/keystone-k8s/src/utils/cprocess.py @@ -0,0 +1,365 @@ +# Copyright 2021, Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import subprocess +import textwrap +import time +import typing +import weakref + +from ops import model +from ops import pebble +import uuid + +import logging + + +logger = logging.getLogger(__name__) + +# Unknown return code is a large negative number outside the usual range of a +# process exit code +RETURN_CODE_UNKNOWN = -1000 + + +class ContainerProcess(object): + """A process that has finished running. + + This is returned by an invocation to run() + + :param container: the container the process was running in + :type container: model.Container + :param process_name: the name of the process the container is running as + :type process_name: str + :param tmp_dir: the dir containing the location of process files + :type tmp_dir: str + """ + def __init__(self, container: model.Container, process_name: str, + tmp_dir: str): + self.container = weakref.proxy(container) + self.process_name = process_name + self._returncode = RETURN_CODE_UNKNOWN + self.tmp_dir = tmp_dir + self.stdout_file = f'{tmp_dir}/{process_name}.stdout' + self.stderr_file = f'{tmp_dir}/{process_name}.stderr' + self._env = dict() + self.env_file = f'{tmp_dir}/{process_name}.env' + self.rc_file = f'{tmp_dir}/{process_name}.rc' + self._cleaned = False + + @property + def stdout(self) -> typing.Union[typing.BinaryIO, typing.TextIO]: + return self.container.pull(f'{self.stdout_file}') + + @property + def stderr(self) -> typing.Union[typing.BinaryIO, typing.TextIO]: + return self.container.pull(f'{self.stderr_file}') + + @property + def env(self) -> typing.Dict[str, str]: + if self._env: + return self._env + + with self.container.pull(f'{self.env_file}') as f: + for env_vars in f.read().split(b'\n'): + key_values = env_vars.split(b'=', 1) + self._env[key_values[0]] = key_values[1] + + return self._env + + @property + def returncode(self) -> int: + if self._returncode == RETURN_CODE_UNKNOWN: + self._returncode = self._get_returncode() + return self._returncode + + def _get_returncode(self): + """Reads the contents of the returncode file""" + try: + with self.container.pull(f'{self.rc_file}') as text: + return int(text.read()) + except pebble.PathError: + # If the rc file doesn't exist within the container, then the + # process is either running or failed without capturing output + return RETURN_CODE_UNKNOWN + + @property + def completed(self) -> bool: + return self._returncode != RETURN_CODE_UNKNOWN + + def check_returncode(self): + """Raise CalledProcessError if the exit code is non-zero.""" + if self.returncode: + stdout = None + stderr = None + try: + stdout = self.stdout.read() + except pebble.PathError: + pass + try: + stderr = self.stderr.read() + except pebble.PathError: + pass + raise CalledProcessError(self.returncode, self.process_name, + stdout, stderr) + + def wait(self, timeout: int = 30) -> None: + """Waits for the process to complete. + + Waits for the process to complete. If the process has not completed + within the timeout specified, this method will raise a TimeoutExpired + exception. + + :param timeout: the number of seconds to wait before timing out + :type timeout: int + """ + timeout_at = time.time() + timeout + while not self.completed and time.time() < timeout_at: + try: + self._returncode = self._get_returncode() + if self.completed: + return + else: + time.sleep(0.2) + except pebble.PathError: + # This happens while the process is still running + # Sleep a moment and try again + time.sleep(0.2) + + raise TimeoutExpired(self.process_name, timeout) + + def cleanup(self) -> None: + """Clean up process files left on the container. + + Attempts to cleanup the process artifacts left on the container. This + will remove the directory containing the stdout, stderr, rc and env + files generated. + + :raises pebble.PathError: when the path has already been cleand up. + """ + if self._cleaned: + return + + self.container.remove_path(f'{self.tmp_dir}', recursive=True) + + def __del__(self): + """On destruction of this process, we'll attempt to clean up left over + process files. + """ + try: + self.cleanup() + except pebble.PathError: + pass + + +class ContainerProcessError(Exception): + """Base class for exceptions raised within this module.""" + pass + + +class CalledProcessError(ContainerProcessError): + """Raised when an error occurs running a process in a container and + the check=True has been passed to raise an error on failure. + + :param returncode: the exit code from the program + :type returncode: int + :param cmd: the command that was run + :type cmd: str or list + :param stdout: the output of the command on standard out + :type stdout: str + :param stderr: the output of the command on standard err + :type stderr: str + """ + def __init__(self, returncode: int, cmd: typing.Union[str, list], + stdout: str = None, stderr: str = None): + self.returncode = returncode + self.cmd = cmd + self.stdout = stdout + self.stderr = stderr + + +class TimeoutExpired(ContainerProcessError): + """This exception is raised when the timeout expires while waiting for a + container process. + + :param cmd: the command that was run + :type cmd: list + :param timeout: the configured timeout for the command + :type timeout: int + """ + def __init__(self, cmd: typing.Union[str, list], timeout: int): + self.cmd = cmd + self.timeout = timeout + + def __str__(self): + return f"Command '{self.cmd}' timed out after {self.timeout} seconds" + + +def run(container: model.Container, args: typing.List[str], + timeout: int = 30, check: bool = False, + env: dict = None, service_name: str = None) -> ContainerProcess: + """Run command with arguments in the specified container. + + Run a command in the specified container and returns a + subprocess.CompletedProcess instance containing the command which + was run (args), returncode, and stdout and stderr. When the check + option is True and the process exits with a non-zero exit code, a + CalledProcessError will be raised containing the cmd, returncode, + stdout and stderr. + + :param container: the container to run the command in + :type container: model.Container + :param args: the command to run in the container + :type args: str or list + :param timeout: the timeout of the process in seconds + :type timeout: int + :param check: when True, raise an exception on a non-zero exit code + :type check: bool + :param env: environment variables to pass to the process + :type env: dict + :param service_name: name of the service + :type service_name: str + :returns: CompletedProcess the completed process + :rtype: ContainerProcess + """ + if not container: + raise ValueError('container cannot be None') + if not isinstance(container, model.Container): + raise ValueError('container must be of type ops.model.Container, ' + f'not of type {type(container)}') + + if isinstance(args, str): + if service_name is None: + service_name = args.split(' ')[0] + service_name = service_name.split('/')[-1] + cmdline = args + elif isinstance(args, list): + if service_name is None: + service_name = args[0].split('/')[-1] + cmdline = subprocess.list2cmdline(args) + else: + raise ValueError('args are expected to be a str or a list of str.' + f' Provided {type(args)}') + + tmp_dir = f'/tmp/{service_name}-{str(uuid.uuid4()).split("-")[0]}' + process = ContainerProcess(container, service_name, tmp_dir) + + command = f"""\ + #!/bin/bash + mkdir -p {tmp_dir} + echo $(env) > {process.env_file} + {cmdline} 2> {process.stderr_file} 1> {process.stdout_file} + rc=$? + echo $rc > {process.rc_file} + exit $rc + """ + command = textwrap.dedent(command) + + container.push(path=f'/tmp/{service_name}.sh', source=command, + encoding='utf-8', permissions=0o755) + + logger.debug(f'Adding layer for {service_name} to run command ' + f'{cmdline}') + container.add_layer('process_layer', { + 'summary': 'container process runner', + 'description': 'layer for running single-shot commands (kinda)', + 'services': { + service_name: { + 'override': 'replace', + 'summary': cmdline, + 'command': f'/tmp/{service_name}.sh', + 'startup': 'disabled', + 'environment': env or {}, + } + } + }, combine=True) + + timeout_at = time.time() + timeout + try: + # Start the service which will run the command. + logger.debug(f'Starting {service_name} via pebble') + + # TODO(wolsen) this is quite naughty, but the container object + # doesn't provide us access to the pebble layer to specify + # timeouts and such. Some commands may need a longer time to + # start, and as such I'm using the private internal reference + # in order to be able to specify the timeout itself. + container._pebble.start_services([service_name], # noqa + timeout=float(timeout)) + except pebble.ChangeError: + # Check to see if the command has timed out and if so, raise + # the TimeoutExpired. + if time.time() >= timeout_at: + logger.error(f'Command {cmdline} could not start out after ' + f'{timeout} seconds in container ' + f'{container.name}') + raise TimeoutExpired(args, timeout) + + # Note, this could be expected. + logger.exception(f'Error running {service_name}') + + logger.debug('Waiting for process completion...') + process.wait(timeout) + + # It appears that pebble services are still active after the command + # has finished. Feels like a bug, but let's stop it. + try: + service = container.get_service(service_name) + if service.is_running(): + container.stop(service_name) + except pebble.ChangeError as e: + # Eat the change error that might occur. This was a best effort + # attempt to ensure the process is stopped + logger.exception(f'Failed to stop service {service_name}', e) + + if check: + process.check_returncode() + return process + + +def call(container: model.Container, args: typing.Union[str, list], + env: dict = None, timeout: int = 30) -> int: + """Runs a command in the container. + + The command will run until the process completes (either normally or + abnormally) or until the timeout expires. + + :param container: the container to run the command in + :type container: model.Container + :param args: arguments to pass to the commandline + :type args: str or list of strings + :param env: environment variables for the process + :type env: dictionary of environment variables + :param timeout: number of seconds the command should complete in before + timing out + :type timeout: int + :returns: the exit code of the process + :rtype: int + """ + return run(container, args, env=env, timeout=timeout).returncode + + +def check_call(container: model.Container, args: typing.Union[str, list], + env: dict = None, timeout: int = 30, + service_name: str = None) -> None: + run(container, args, env=env, check=True, timeout=timeout, + service_name=service_name) + + +def check_output(container: model.Container, args: typing.Union[str, list], + env: dict = None, timeout: int = 30, + service_name: str = None) -> str: + process = run(container, args, env=env, check=True, timeout=timeout, + service_name=service_name) + with process.stdout as stdout: + return stdout.read() diff --git a/charms/keystone-k8s/src/utils/guard.py b/charms/keystone-k8s/src/utils/guard.py new file mode 100644 index 00000000..259cb827 --- /dev/null +++ b/charms/keystone-k8s/src/utils/guard.py @@ -0,0 +1,73 @@ +# Copyright 2021, Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +from contextlib import contextmanager + +from ops.model import BlockedStatus + +logger = logging.getLogger(__name__) + + +class GuardException(Exception): + pass + + +class BlockedException(Exception): + pass + + +@contextmanager +def guard(charm: 'CharmBase', + section: str, + handle_exception: bool = True, + log_traceback: bool = True, + **__): + """Context manager to handle errors and bailing out of an event/hook. + The nature of Juju is that all the information may not be available to run + a set of actions. This context manager allows a section of code to be + 'guarded' so that it can be bailed at any time. + + It also handles errors which can be interpreted as a Block rather than the + charm going into error. + + :param charm: the charm class (so that unit status can be set) + :param section: the name of the section (for debugging/info purposes) + :handle_exception: whether to handle the exception to a BlockedStatus() + :log_traceback: whether to log the traceback for debugging purposes. + :raises: Exception if handle_exception is False + """ + logger.info("Entering guarded section: '%s'", section) + try: + yield + logging.info("Completed guarded section fully: '%s'", section) + except GuardException as e: + logger.info("Guarded Section: Early exit from '%s' due to '%s'.", + section, str(e)) + except BlockedException as e: + logger.warning( + "Charm is blocked in section '%s' due to '%s'", section, str(e)) + charm.unit.status = BlockedStatus(e.msg) + except Exception as e: + # something else went wrong + if handle_exception: + logging.error("Exception raised in secion '%s': %s", + section, str(e)) + if log_traceback: + import traceback + logging.error(traceback.format_exc()) + charm.unit.status = BlockedStatus( + "Error in charm (see logs): {}".format(str(e))) + return + raise diff --git a/charms/keystone-k8s/src/utils/manager.py b/charms/keystone-k8s/src/utils/manager.py new file mode 100644 index 00000000..171ca667 --- /dev/null +++ b/charms/keystone-k8s/src/utils/manager.py @@ -0,0 +1,528 @@ +# Copyright 2021, Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from ops import framework +from ops.model import Container +from ops.model import MaintenanceStatus + +from keystoneauth1 import session +from keystoneauth1.identity import v3 +from keystoneclient.v3 import client + +from utils.cprocess import check_output +from utils.cprocess import check_call +from utils.cprocess import ContainerProcessError + +from utils.guard import guard + +import logging +import typing + +logger = logging.getLogger(__name__) + + +class KeystoneException(Exception): + pass + + +class KeystoneManager(framework.Object): + """ + + """ + def __init__(self, charm): + super().__init__(charm, 'keystone-manager') + self.charm = charm + self._api = None + + @property + def api(self): + """ + Returns the current api reference or creates a new one. + + TODO(wolsen): All of the direct interaction with keystone belongs in + an Adapter class which can handle v3 as well as future versions. + """ + if self._api: + return self._api + + # TODO(wolsen) use appropriate values rather than these + auth = v3.Password( + auth_url="http://localhost:5000/v3", + username=self.charm.admin_user, + password='abc123', + system_scope='all', + project_domain_name='Default', + user_domain_name='Default', + ) + keystone_session = session.Session(auth=auth) + self._api = client.Client(session=keystone_session, + endpoint_override='http://localhost:5000/v3') + return self._api + + @property + def admin_endpoint(self): + admin_hostname = self.charm.model.config['os-admin-hostname'] + admin_port = self.charm.model.config['admin-port'] + return f'http://{admin_hostname}:{admin_port}/v3' + + @property + def internal_endpoint(self): + internal_hostname = self.charm.model.config['os-internal-hostname'] + service_port = self.charm.model.config['service-port'] + return f'http://{internal_hostname}:{service_port}/v3' + + @property + def public_endpoint(self): + public_hostname = self.charm.model.config['os-public-hostname'] + return f'http://{public_hostname}:5000/v3' + + @property + def regions(self): + # split regions and strip out empty regions + regions = [r for r in self.charm.model.config['region'].split() if r] + return regions + + def setup_keystone(self, container: Container): + """Runs the keystone setup process for first time configuration. + + Runs through the keystone setup process for initial installation and + configuration. This involves creating the database, setting up fernet + repositories for tokens and credentials, and bootstrapping the initial + keystone service. + + :param container: the container to set keystone up in. + :type container: Container + """ + with guard(self.charm, 'Initializing Keystone'): + self._sync_database(container) + self._fernet_setup(container) + self._credential_setup(container) + self._bootstrap(container) + + def _set_status(self, status: str, app: bool = False) -> None: + """Sets the status to the specified status string. + By default, the status is set on the individual unit but can be set + for the whole application if app is set to True. + + :param status: the status to set + :type status: str + :param app: whether to set the status for the application or the unit + :type app: bool + :return: None + """ + if app: + target = self.charm.app + else: + target = self.charm.unit + + target.status = MaintenanceStatus(status) + + def _sync_database(self, container: Container): + """Syncs the database using the keystone-manage db_sync + + The database is synchronized using the keystone-manage db_sync command. + Database configuration information is retrieved from configuration + files. + + :param container: the container to sync the database in + :type container: ops.model.Container + :raises: KeystoneException when the database sync fails. + """ + try: + self._set_status('Syncing database') + logger.info("Syncing database...") + out = check_output(container, + ['sudo', '-u', 'keystone', + 'keystone-manage', 'db_sync'], + service_name='keystone-db-sync') + logging.debug(f'Output from database sync: \n{out}') + except ContainerProcessError: + logger.exception('Error occurred synchronizing the database.') + raise KeystoneException('Database sync failed') + + def _fernet_setup(self, container): + """Sets up the fernet token store in the specified container. + + :param container: the container to setup the fernet token store in + :type container: ops.model.Container + :raises: KeystoneException when a failure occurs setting up the fernet + token store + """ + try: + self._set_status('Setting up fernet tokens') + logger.info("Setting up fernet tokens...") + out = check_output(container, + ['sudo', '-u', 'keystone', + 'keystone-manage', 'fernet_setup'], + service_name='keystone-fernet-setup') + logging.debug(f'Output from keystone fernet setup: \n{out}') + except ContainerProcessError: + logger.exception('Error occurred setting up fernet tokens') + raise KeystoneException('Fernet setup failed.') + + def _credential_setup(self, container): + """ + + """ + try: + self._set_status('Setting up credentials') + logger.info("Setting up credentials...") + check_output(container, + ['sudo', '-u', 'keystone', + 'keystone-manage', 'credential_setup'], + service_name='keystone-credential-setup') + except ContainerProcessError: + logger.exception('Error occurred during credential setup') + raise KeystoneException('Credential setup failed.') + + def _bootstrap(self, container): + """ + + """ + try: + self._set_status('Bootstrapping Keystone') + logger.info('Bootstrapping keystone service') + + # NOTE(wolsen) in classic keystone charm, there's a comment about + # enabling immutable roles for this. This is unnecessary as it is + # now the default behavior for keystone-manage bootstrap. + check_call(container, + ['keystone-manage', 'bootstrap', + '--bootstrap-username', self.charm.charm_user, + '--bootstrap-password', self.charm.charm_password, + '--bootstrap-project-name', 'admin', + '--bootstrap-role-name', self.charm.admin_role, + '--bootstrap-service-name', 'keystone', + '--bootstrap-admin-url', self.admin_endpoint, + '--bootstrap-public-url', self.public_endpoint, + '--bootstrap-internal-url', self.internal_endpoint, + '--bootstrap-region-id', self.regions[0]], + service_name='keystone-manage-bootstrap') + except ContainerProcessError: + logger.exception('Error occurred bootstrapping keystone service') + raise KeystoneException('Bootstrap failed') + + def setup_initial_projects_and_users(self): + """ + + """ + with guard(self.charm, 'Setting up initial projects and users'): + self._setup_admin_accounts() + self._setup_service_accounts() + self.update_service_catalog_for_keystone() + + def _setup_admin_accounts(self): + """ + + """ + # Get the default domain id + default_domain = self.get_domain('default') + logger.debug(f'Default domain id: {default_domain["id"]}') + + # Get the admin domain id + admin_domain = self.create_domain(name='admin_domain', + may_exist=True) + logger.debug(f'Admin domain id: {admin_domain.id}') + + # Ensure that we have the necessary projects: admin and service + admin_project = self.create_project(name='admin', domain=admin_domain, + may_exist=True) + + logger.debug('Ensuring admin user exists') + admin_user = self.create_user(name=self.charm.admin_user, + password=self.charm.admin_password, + domain=admin_domain, may_exist=True) + + logger.debug('Ensuring roles exist for admin') + # I seem to recall all kinds of grief between Member and member and + # _member_ and inconsistencies in what other projects expect. + member_role = self.create_role(name='Member', may_exist=True) + admin_role = self.create_role(name=self.charm.admin_role, + may_exist=True) + + logger.debug('Granting roles to admin user') + # Make the admin a member of the admin project + self.grant_role(role=member_role, user=admin_user, + project=admin_project, may_exist=True) + # Make the admin an admin of the admin project + self.grant_role(role=admin_role, user=admin_user, + project=admin_project, may_exist=True) + # Make the admin a domain-level admin + self.grant_role(role=admin_role, user=admin_user, + domain=admin_domain, may_exist=True) + + def _setup_service_accounts(self): + """ + + """ + # Get the service domain id + service_domain = self.create_domain(name='service_domain', + may_exist=True) + logger.debug(f'Service domain id: {service_domain.id}.') + + service_project = self.create_project(name=self.charm.service_tenant, + domain=service_domain, + may_exist=True) + logger.debug(f'Service project id: {service_project.id}.') + + def update_service_catalog_for_keystone(self): + """ + + """ + service = self.create_service(name='keystone', service_type='identity', + description='Keystone Identity Service', + may_exist=True) + + endpoints = { + 'admin': self.admin_endpoint, + 'internal': self.internal_endpoint, + 'public': self.public_endpoint, + } + + for region in self.charm.model.config['region'].split(): + if not region: + continue + + for interface, url in endpoints.items(): + self.create_endpoint(service=service, interface=interface, + url=url, region=region, may_exist=True) + + def get_domain(self, name: str) -> 'Domain': + """Returns the domain specified by the name, or None if a matching + domain could not be found. + + :param name: the name of the domain + :type name: str + :rtype: 'Domain' or None + """ + for domain in self.api.domains.list(): + if domain.name.lower() == name.lower(): + return domain + + return None + + def create_domain(self, name: str, description: str = 'Created by Juju', + may_exist: bool = False) -> 'Domain': + """ + + """ + if may_exist: + domain = self.get_domain(name) + if domain: + logger.debug(f'Domain {name} already exists with domain ' + f'id {domain.id}.') + return domain + + domain = self.api.domains.create(name=name, description=description) + logger.debug(f'Created domain {name} with id {domain.id}') + return domain + + def create_project(self, name: str, domain: str, + description: str = 'Created by Juju', + may_exist: bool = False) -> 'Project': + """ + + """ + if may_exist: + for project in self.api.projects.list(domain=domain): + if project.name.lower() == name.lower(): + logger.debug(f'Project {name} already exists with project ' + f'id {project.id}.') + return project + + project = self.api.projects.create(name=name, description=description, + domain=domain) + logger.debug(f'Created project {name} with id {project.id}') + return project + + def create_user(self, name: str, password: str, email: str = None, + project: 'Project' = None, + domain: 'Domain' = None, + may_exist: bool = False) -> 'User': + """ + + """ + if may_exist: + user = self.get_user(name, project=project, domain=domain) + if user: + logger.debug(f'User {name} already exists with user ' + f'id {user.id}.') + return user + + user = self.api.users.create(name=name, default_project=project, + domain=domain, password=password, + email=email) + logger.debug(f'Created user {user.name} with id {user.id}.') + return user + + def get_user(self, name: str, project: 'Project' = None, + domain: typing.Union[str, 'Domain'] = None) -> 'User': + """ + + """ + users = self.api.users.list(default_project=project, domain=domain) + for user in users: + if user.name.lower() == name.lower(): + return user + + return None + + def create_role(self, name: str, + domain: typing.Union['Domain', str] = None, + may_exist: bool = False) -> 'Role': + """ + + """ + if may_exist: + role = self.get_role(name=name, domain=domain) + if role: + logger.debug(f'Role {name} already exists with role ' + f'id {role.id}') + return role + + role = self.api.roles.create(name=name, domain=domain) + logger.debug(f'Created role {name} with id {role.id}.') + return role + + def get_role(self, name: str, + domain: 'Domain' = None) -> 'Role': + """ + + """ + for role in self.api.roles.list(domain=domain): + if role.name == name: + return role + + return None + + def get_roles(self, user: 'User', + project: 'Project' = None, + domain: 'Project' = None) \ + -> typing.List['Role']: + """ + + """ + if project and domain: + raise ValueError('Project and domain are mutually exclusive') + if not project and not domain: + raise ValueError('Project or domain must be specified') + + if project: + roles = self.api.roles.list(user=user, project=project) + else: + roles = self.api.roles.list(user=user, domain=domain) + + return roles + + def grant_role(self, role: typing.Union['Role', str], + user: 'User', + project: typing.Union['Project', str] = None, + domain: typing.Union['Domain', str] = None, + may_exist: bool = False) -> 'Role': + """ + + """ + if project and domain: + raise ValueError('Project and domain are mutually exclusive') + if not project and not domain: + raise ValueError('Project or domain must be specified') + + if domain: + ctxt_str = f'domain {domain.name}' + else: + ctxt_str = f'project {project.name}' + + if may_exist: + roles = self.get_roles(user=user, project=project, domain=domain) + for r in roles: + if role.id == r.id: + logger.debug(f'User {user.name} already has role ' + f'{role.name} for {ctxt_str}') + return r + + role = self.api.roles.grant(role=role, user=user, project=project, + domain=domain) + logger.debug(f'Granted user {user} role {role} for ' + f'{ctxt_str}.') + return role + + def create_region(self, name: str, description: str = None, + may_exist: bool = False) -> 'Region': + """ + + """ + if may_exist: + for region in self.api.regions.list(): + if region.id == name: + logger.debug(f'Region {name} already exists.') + return region + + region = self.api.regions.create(id=name, description=description) + logger.debug(f'Created region {name}.') + return region + + def create_service(self, name: str, service_type: str, + description: str, owner: str = None, + may_exist: bool = False) -> 'Service': + """ + + """ + if may_exist: + services = self.api.services.list(name=name, type=service_type) + # TODO(wolsen) can we have more than one service with the same + # service name? I don't think so, so we'll just handle the first + # one for now. + for service in services: + logger.debug(f'Service {name} already exists with ' + f'service id {service.id}.') + return service + + service = self.api.services.create(name=name, type=service_type, + description=description) + logger.debug(f'Created service {service.name} with id {service.id}') + return service + + def create_endpoint(self, service: 'Service', url: str, interface: str, + region: str, may_exist: bool = False) \ + -> 'Endpoint': + """ + + """ + ep_string = (f'{interface} endpoint for service {service} in ' + f'region {region}') + if may_exist: + endpoints = self.api.endpoints.list(service=service, + interface=interface, + region=region) + if endpoints: + # NOTE(wolsen) if we have endpoints found, there should be only + # one endpoint; but assert it to make sure + assert len(endpoints) == 1 + endpoint = endpoints[0] + if endpoint.url != url: + logger.debug(f'{ep_string} ({endpoint.url}) does ' + f'not match requested url ({url}). Updating.') + endpoint = self.api.endpoints.update(endpoint=endpoint, + url=url) + logger.debug(f'Endpoint updated to use {url}') + else: + logger.debug(f'Endpoint {ep_string} already exists with ' + f'id {endpoint.id}') + return endpoint + + endpoint = self.api.endpoints.create(service=service, url=url, + interface=interface, + region=region) + logger.debug(f'Created endpoint {ep_string} with id {endpoint.id}') + return endpoint diff --git a/charms/keystone-k8s/src/utils/templating.py b/charms/keystone-k8s/src/utils/templating.py new file mode 100644 index 00000000..291b69da --- /dev/null +++ b/charms/keystone-k8s/src/utils/templating.py @@ -0,0 +1,101 @@ +# Copyright 2021, Canonical Ltd. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging + +from collections import defaultdict + +from charmhelpers.contrib.openstack.templating import OSConfigException +from charmhelpers.contrib.openstack.templating import OSConfigRenderer + +log = logging.getLogger(__name__) + + +class SidecarConfigRenderer(OSConfigRenderer): + + """ + This class provides a common templating system to be used by OpenStack + sidecar charms. + """ + def __init__(self, templates_dir, openstack_release): + super(SidecarConfigRenderer, self).__init__(templates_dir, + openstack_release) + self.config_to_containers = defaultdict(set) + self.owner_info = defaultdict(set) + + def _get_template(self, template): + """ + + """ + self._get_tmpl_env() + if not template.endswith('.j2'): + template += '.j2' + template = self._tmpl_env.get_template(template) + log.debug(f'Loaded template from {template.filename}') + return template + + def register(self, config_file, contexts, config_template=None, + containers=None, user=None, group=None): + """ + + """ + # NOTE(wolsen): Intentionally overriding base class to raise an error + # if this is accidentally used instead. + if containers is None: + raise ValueError('One or more containers must be provided') + + super().register(config_file, contexts, config_template) + + # Register user/group info. There's a better way to do this for sure + if user or group: + self.owner_info[config_file] = (user, group) + + for container in containers: + self.config_to_containers[config_file].add(container) + log.debug(f'Registered config file "{config_file}" for container ' + f'{container}') + + def write(self, config_file, container): + """ + + """ + containers = self.config_to_containers.get(config_file) + if not containers or container.name not in containers: + log.error(f'Config file {config_file} not registered for ' + f'container {container.name}') + raise OSConfigException + + contents = self.render(config_file) + owner_info = self.owner_info.get(config_file) + kwargs = {} + log.debug(f'Got owner_info of {owner_info}') + if owner_info: + user, group = owner_info + kwargs['user'] = user + kwargs['group'] = group + container.push(config_file, contents, **kwargs) + + log.debug(f'Wrote template {config_file} in container ' + f'{container.name}.') + + def write_all(self, container=None): + for config_file, containers in self.config_to_containers.items(): + if container: + if container.name not in containers: + continue + + self.write(config_file, container) + else: + for c in containers: + self.write(config_file, c) diff --git a/charms/keystone-k8s/tests/__init__.py b/charms/keystone-k8s/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/charms/keystone-k8s/tests/test_charm.py b/charms/keystone-k8s/tests/test_charm.py new file mode 100644 index 00000000..f0df4ec2 --- /dev/null +++ b/charms/keystone-k8s/tests/test_charm.py @@ -0,0 +1,37 @@ +# Copyright 2021 Billy Olsen +# See LICENSE file for licensing details. +# +# Learn more about testing at: https://juju.is/docs/sdk/testing + +import unittest +from unittest.mock import Mock + +from ops.testing import Harness +from charm import KeystoneOperatorCharm + + +class TestCharm(unittest.TestCase): + def test_config_changed(self): + harness = Harness(KeystoneOperatorCharm) + self.addCleanup(harness.cleanup) + harness.begin() + self.assertEqual(list(harness.charm._stored.things), []) + harness.update_config({"thing": "foo"}) + self.assertEqual(list(harness.charm._stored.things), ["foo"]) + + def test_action(self): + harness = Harness(KeystoneOperatorCharm) + harness.begin() + # the harness doesn't (yet!) help much with actions themselves + action_event = Mock(params={"fail": ""}) + harness.charm._on_fortune_action(action_event) + + self.assertTrue(action_event.set_results.called) + + def test_action_fail(self): + harness = Harness(KeystoneOperatorCharm) + harness.begin() + action_event = Mock(params={"fail": "fail this"}) + harness.charm._on_fortune_action(action_event) + + self.assertEqual(action_event.fail.call_args, [("fail this",)]) diff --git a/charms/keystone-k8s/tox.ini b/charms/keystone-k8s/tox.ini new file mode 100644 index 00000000..ac4f2aac --- /dev/null +++ b/charms/keystone-k8s/tox.ini @@ -0,0 +1,73 @@ +# Operator charm (with zaza): tox.ini + +[tox] +envlist = pep8,py3 +skipsdist = True +# NOTE: Avoid build/test env pollution by not enabling sitepackages. +sitepackages = False +# NOTE: Avoid false positives by not skipping missing interpreters. +skip_missing_interpreters = False + +[testenv] +setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 + CHARM_DIR={envdir} +install_command = + pip install {opts} {packages} +whitelist_externals = + charmcraft + git + add-to-archive.py + bash + juju + ln +passenv = HOME TERM CS_* OS_* TEST_* + +[testenv:py3] +basepython = python3 +deps = -r{toxinidir}/requirements-dev.txt +commands = flake8 src tests + coverage run --source=src -m unittest -v "$@" + coverage report -m + + +[testenv:pep8] +basepython = python3 +deps = -r{toxinidir}/requirements-dev.txt +commands = flake8 {posargs} src tests lib + +[testenv:venv] +basepython = python3 +deps = -r{toxinidir}/requirements-dev.txt +commands = ln -s -f .tox/venv . + ./run_tests + +[testenv:build] +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/requirements-dev.txt +commands = + flake8 {posargs} src tests + charmcraft build + +[testenv:refresh] +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/requirements-dev.txt +commands = + juju refresh keystone-operator --path ./keystone-operator.charm + +[testenv:deploy] +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/requirements-dev.txt +commands = + juju deploy ./thedac-rabbitmq-operator --resource thedac-rabbitmq-image=rabbitmq + + + +[flake8] +# Ignore E902 because the unit_tests directory is missing in the built charm. +# Ignore F821 due to typing not importing objects +ignore = E402,E226,E902,W504,F821 +