commit be567fb0b8d10200b362ba53a96144940882f59b Author: Liam Young Date: Thu Jan 13 13:38:03 2022 +0000 Initial Cut diff --git a/charms/neutron-k8s/.flake8 b/charms/neutron-k8s/.flake8 new file mode 100644 index 00000000..8ef84fcd --- /dev/null +++ b/charms/neutron-k8s/.flake8 @@ -0,0 +1,9 @@ +[flake8] +max-line-length = 99 +select: E,W,F,C,N +exclude: + venv + .git + build + dist + *.egg_info diff --git a/charms/neutron-k8s/.gitignore b/charms/neutron-k8s/.gitignore new file mode 100644 index 00000000..de9170b0 --- /dev/null +++ b/charms/neutron-k8s/.gitignore @@ -0,0 +1,10 @@ +venv/ +build/ +*.charm +*.swp + +.coverage +__pycache__/ +*.py[cod] +.tox +.stestr/ diff --git a/charms/neutron-k8s/.jujuignore b/charms/neutron-k8s/.jujuignore new file mode 100644 index 00000000..6ccd559e --- /dev/null +++ b/charms/neutron-k8s/.jujuignore @@ -0,0 +1,3 @@ +/venv +*.py[cod] +*.charm diff --git a/charms/neutron-k8s/.stestr.conf b/charms/neutron-k8s/.stestr.conf new file mode 100644 index 00000000..5fcccaca --- /dev/null +++ b/charms/neutron-k8s/.stestr.conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./unit_tests +top_dir=./ diff --git a/charms/neutron-k8s/CONTRIBUTING.md b/charms/neutron-k8s/CONTRIBUTING.md new file mode 100644 index 00000000..b304f62f --- /dev/null +++ b/charms/neutron-k8s/CONTRIBUTING.md @@ -0,0 +1,34 @@ +# sunbeam-neutron-operator + +## Developing + +Create and activate a virtualenv with the development requirements: + + virtualenv -p python3 venv + source venv/bin/activate + pip install -r requirements-dev.txt + +## Code overview + +TEMPLATE-TODO: +One of the most important things a consumer of your charm (or library) +needs to know is what set of functionality it provides. Which categories +does it fit into? Which events do you listen to? Which libraries do you +consume? Which ones do you export and how are they used? + +## Intended use case + +TEMPLATE-TODO: +Why were these decisions made? What's the scope of your charm? + +## Roadmap + +If this Charm doesn't fulfill all of the initial functionality you were +hoping for or planning on, please add a Roadmap or TODO here + +## Testing + +The Python operator framework includes a very nice harness for testing +operator behaviour without full deployment. Just `run_tests`: + + ./run_tests diff --git a/charms/neutron-k8s/LICENSE b/charms/neutron-k8s/LICENSE new file mode 100644 index 00000000..d6456956 --- /dev/null +++ b/charms/neutron-k8s/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/charms/neutron-k8s/README.md b/charms/neutron-k8s/README.md new file mode 100644 index 00000000..4c7e9ece --- /dev/null +++ b/charms/neutron-k8s/README.md @@ -0,0 +1,24 @@ +# sunbeam-neutron-operator + +## Description + +TODO: Describe your charm in a few paragraphs of Markdown + +## Usage + +TODO: Provide high-level usage, such as required config or relations + + +## Relations + +TODO: Provide any relations which are provided or required by your charm + +## OCI Images + +TODO: Include a link to the default image your charm uses + +## Contributing + +Please see the [Juju SDK docs](https://juju.is/docs/sdk) for guidelines +on enhancements to this charm following best practice guidelines, and +`CONTRIBUTING.md` for developer guidance. diff --git a/charms/neutron-k8s/actions.yaml b/charms/neutron-k8s/actions.yaml new file mode 100644 index 00000000..88e6195d --- /dev/null +++ b/charms/neutron-k8s/actions.yaml @@ -0,0 +1,2 @@ +# NOTE: no actions yet! +{ } diff --git a/charms/neutron-k8s/charmcraft.yaml b/charms/neutron-k8s/charmcraft.yaml new file mode 100644 index 00000000..be706d54 --- /dev/null +++ b/charms/neutron-k8s/charmcraft.yaml @@ -0,0 +1,14 @@ +# Learn more about charmcraft.yaml configuration at: +# https://juju.is/docs/sdk/charmcraft-config +type: "charm" +bases: + - build-on: + - name: "ubuntu" + channel: "20.04" + run-on: + - name: "ubuntu" + channel: "20.04" +parts: + charm: + build-packages: + - git diff --git a/charms/neutron-k8s/config.yaml b/charms/neutron-k8s/config.yaml new file mode 100644 index 00000000..9a1602fb --- /dev/null +++ b/charms/neutron-k8s/config.yaml @@ -0,0 +1,27 @@ +options: + debug: + default: False + description: Enable debug logging. + type: boolean + os-admin-hostname: + default: neutron.juju + description: | + The hostname or address of the admin endpoints that should be advertised + in the neutron image provider. + type: string + os-internal-hostname: + default: neutron.juju + description: | + The hostname or address of the internal endpoints that should be advertised + in the neutron image provider. + type: string + os-public-hostname: + default: neutron.juju + description: | + The hostname or address of the internal endpoints that should be advertised + in the neutron image provider. + type: string + region: + default: RegionOne + description: Space delimited list of OpenStack regions + type: string diff --git a/charms/neutron-k8s/lib/charms/nginx_ingress_integrator/v0/ingress.py b/charms/neutron-k8s/lib/charms/nginx_ingress_integrator/v0/ingress.py new file mode 100644 index 00000000..c8d2e0b1 --- /dev/null +++ b/charms/neutron-k8s/lib/charms/nginx_ingress_integrator/v0/ingress.py @@ -0,0 +1,211 @@ +"""Library for the ingress relation. + +This library contains the Requires and Provides classes for handling +the ingress interface. + +Import `IngressRequires` in your charm, with two required options: + - "self" (the charm itself) + - config_dict + +`config_dict` accepts the following keys: + - service-hostname (required) + - service-name (required) + - service-port (required) + - additional-hostnames + - limit-rps + - limit-whitelist + - max-body-size + - path-routes + - retry-errors + - rewrite-enabled + - rewrite-target + - service-namespace + - session-cookie-max-age + - tls-secret-name + +See [the config section](https://charmhub.io/nginx-ingress-integrator/configure) for descriptions +of each, along with the required type. + +As an example, add the following to `src/charm.py`: +``` +from charms.nginx_ingress_integrator.v0.ingress import IngressRequires + +# In your charm's `__init__` method. +self.ingress = IngressRequires(self, {"service-hostname": self.config["external_hostname"], + "service-name": self.app.name, + "service-port": 80}) + +# In your charm's `config-changed` handler. +self.ingress.update_config({"service-hostname": self.config["external_hostname"]}) +``` +And then add the following to `metadata.yaml`: +``` +requires: + ingress: + interface: ingress +``` +You _must_ register the IngressRequires class as part of the `__init__` method +rather than, for instance, a config-changed event handler. This is because +doing so won't get the current relation changed event, because it wasn't +registered to handle the event (because it wasn't created in `__init__` when +the event was fired). +""" + +import logging + +from ops.charm import CharmEvents +from ops.framework import EventBase, EventSource, Object +from ops.model import BlockedStatus + +# The unique Charmhub library identifier, never change it +LIBID = "db0af4367506491c91663468fb5caa4c" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 9 + +logger = logging.getLogger(__name__) + +REQUIRED_INGRESS_RELATION_FIELDS = { + "service-hostname", + "service-name", + "service-port", +} + +OPTIONAL_INGRESS_RELATION_FIELDS = { + "additional-hostnames", + "limit-rps", + "limit-whitelist", + "max-body-size", + "retry-errors", + "rewrite-target", + "rewrite-enabled", + "service-namespace", + "session-cookie-max-age", + "tls-secret-name", + "path-routes", +} + + +class IngressAvailableEvent(EventBase): + pass + + +class IngressCharmEvents(CharmEvents): + """Custom charm events.""" + + ingress_available = EventSource(IngressAvailableEvent) + + +class IngressRequires(Object): + """This class defines the functionality for the 'requires' side of the 'ingress' relation. + + Hook events observed: + - relation-changed + """ + + def __init__(self, charm, config_dict): + super().__init__(charm, "ingress") + + self.framework.observe(charm.on["ingress"].relation_changed, self._on_relation_changed) + + self.config_dict = config_dict + + def _config_dict_errors(self, update_only=False): + """Check our config dict for errors.""" + blocked_message = "Error in ingress relation, check `juju debug-log`" + unknown = [ + x + for x in self.config_dict + if x not in REQUIRED_INGRESS_RELATION_FIELDS | OPTIONAL_INGRESS_RELATION_FIELDS + ] + if unknown: + logger.error( + "Ingress relation error, unknown key(s) in config dictionary found: %s", + ", ".join(unknown), + ) + self.model.unit.status = BlockedStatus(blocked_message) + return True + if not update_only: + missing = [x for x in REQUIRED_INGRESS_RELATION_FIELDS if x not in self.config_dict] + if missing: + logger.error( + "Ingress relation error, missing required key(s) in config dictionary: %s", + ", ".join(missing), + ) + self.model.unit.status = BlockedStatus(blocked_message) + return True + return False + + def _on_relation_changed(self, event): + """Handle the relation-changed event.""" + # `self.unit` isn't available here, so use `self.model.unit`. + if self.model.unit.is_leader(): + if self._config_dict_errors(): + return + for key in self.config_dict: + event.relation.data[self.model.app][key] = str(self.config_dict[key]) + + def update_config(self, config_dict): + """Allow for updates to relation.""" + if self.model.unit.is_leader(): + self.config_dict = config_dict + if self._config_dict_errors(update_only=True): + return + relation = self.model.get_relation("ingress") + if relation: + for key in self.config_dict: + relation.data[self.model.app][key] = str(self.config_dict[key]) + + +class IngressProvides(Object): + """This class defines the functionality for the 'provides' side of the 'ingress' relation. + + Hook events observed: + - relation-changed + """ + + def __init__(self, charm): + super().__init__(charm, "ingress") + # Observe the relation-changed hook event and bind + # self.on_relation_changed() to handle the event. + self.framework.observe(charm.on["ingress"].relation_changed, self._on_relation_changed) + self.charm = charm + + def _on_relation_changed(self, event): + """Handle a change to the ingress relation. + + Confirm we have the fields we expect to receive.""" + # `self.unit` isn't available here, so use `self.model.unit`. + if not self.model.unit.is_leader(): + return + + ingress_data = { + field: event.relation.data[event.app].get(field) + for field in REQUIRED_INGRESS_RELATION_FIELDS | OPTIONAL_INGRESS_RELATION_FIELDS + } + + missing_fields = sorted( + [ + field + for field in REQUIRED_INGRESS_RELATION_FIELDS + if ingress_data.get(field) is None + ] + ) + + if missing_fields: + logger.error( + "Missing required data fields for ingress relation: {}".format( + ", ".join(missing_fields) + ) + ) + self.model.unit.status = BlockedStatus( + "Missing fields for ingress: {}".format(", ".join(missing_fields)) + ) + + # Create an event that our charm can use to decide it's okay to + # configure the ingress. + self.charm.on.ingress_available.emit() diff --git a/charms/neutron-k8s/lib/charms/observability_libs/v0/kubernetes_service_patch.py b/charms/neutron-k8s/lib/charms/observability_libs/v0/kubernetes_service_patch.py new file mode 100644 index 00000000..3f609912 --- /dev/null +++ b/charms/neutron-k8s/lib/charms/observability_libs/v0/kubernetes_service_patch.py @@ -0,0 +1,241 @@ +# Copyright 2021 Canonical Ltd. +# See LICENSE file for licensing details. + +"""# KubernetesServicePatch Library. + +This library is designed to enable developers to more simply patch the Kubernetes Service created +by Juju during the deployment of a sidecar charm. When sidecar charms are deployed, Juju creates a +service named after the application in the namespace (named after the Juju model). This service by +default contains a "placeholder" port, which is 65536/TCP. + +When modifying the default set of resources managed by Juju, one must consider the lifecycle of the +charm. In this case, any modifications to the default service (created during deployment), will +be overwritten during a charm upgrade. + +When intialised, this library binds a handler to the parent charm's `install` and `upgrade_charm` +events which applies the patch to the cluster. This should ensure that the service ports are +correct throughout the charm's life. + +The constructor simply takes a reference to the parent charm, and a list of tuples that each define +a port for the service, where each tuple contains: + +- a name for the port +- port for the service to listen on +- optionally: a targetPort for the service (the port in the container!) +- optionally: a nodePort for the service (for NodePort or LoadBalancer services only!) +- optionally: a name of the service (in case service name needs to be patched as well) + +## Getting Started + +To get started using the library, you just need to fetch the library using `charmcraft`. **Note +that you also need to add `lightkube` and `lightkube-models` to your charm's `requirements.txt`.** + +```shell +cd some-charm +charmcraft fetch-lib charms.observability_libs.v0.kubernetes_service_patch +echo <<-EOF >> requirements.txt +lightkube +lightkube-models +EOF +``` + +Then, to initialise the library: + +For ClusterIP services: +```python +# ... +from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch + +class SomeCharm(CharmBase): + def __init__(self, *args): + # ... + self.service_patcher = KubernetesServicePatch(self, [(f"{self.app.name}", 8080)]) + # ... +``` + +For LoadBalancer/NodePort services: +```python +# ... +from charms.observability_libs.v0.kubernetes_service_patch import KubernetesServicePatch + +class SomeCharm(CharmBase): + def __init__(self, *args): + # ... + self.service_patcher = KubernetesServicePatch( + self, [(f"{self.app.name}", 443, 443, 30666)], "LoadBalancer" + ) + # ... +``` + +Additionally, you may wish to use mocks in your charm's unit testing to ensure that the library +does not try to make any API calls, or open any files during testing that are unlikely to be +present, and could break your tests. The easiest way to do this is during your test `setUp`: + +```python +# ... + +@patch("charm.KubernetesServicePatch", lambda x, y: None) +def setUp(self, *unused): + self.harness = Harness(SomeCharm) + # ... +``` +""" + +import logging +from types import MethodType +from typing import Literal, Sequence, Tuple, Union + +from lightkube import ApiError, Client +from lightkube.models.core_v1 import ServicePort, ServiceSpec +from lightkube.models.meta_v1 import ObjectMeta +from lightkube.resources.core_v1 import Service +from lightkube.types import PatchType +from ops.charm import CharmBase +from ops.framework import Object + +logger = logging.getLogger(__name__) + +# The unique Charmhub library identifier, never change it +LIBID = "0042f86d0a874435adef581806cddbbb" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 5 + +PortDefinition = Union[Tuple[str, int], Tuple[str, int, int], Tuple[str, int, int, int]] +ServiceType = Literal["ClusterIP", "LoadBalancer"] + + +class KubernetesServicePatch(Object): + """A utility for patching the Kubernetes service set up by Juju.""" + + def __init__( + self, + charm: CharmBase, + ports: Sequence[PortDefinition], + service_name: str = None, + service_type: ServiceType = "ClusterIP", + ): + """Constructor for KubernetesServicePatch. + + Args: + charm: the charm that is instantiating the library. + ports: a list of tuples (name, port, targetPort, nodePort) for every service port. + service_name: allows setting custom name to the patched service. If none given, + application name will be used. + service_type: desired type of K8s service. Default value is in line with ServiceSpec's + default value. + """ + super().__init__(charm, "kubernetes-service-patch") + self.charm = charm + self.service_name = service_name if service_name else self._app + self.service = self._service_object(ports, service_name, service_type) + + # Make mypy type checking happy that self._patch is a method + assert isinstance(self._patch, MethodType) + # Ensure this patch is applied during the 'install' and 'upgrade-charm' events + self.framework.observe(charm.on.install, self._patch) + self.framework.observe(charm.on.upgrade_charm, self._patch) + + def _service_object( + self, + ports: Sequence[PortDefinition], + service_name: str = None, + service_type: ServiceType = "ClusterIP", + ) -> Service: + """Creates a valid Service representation for Alertmanager. + + Args: + ports: a list of tuples of the form (name, port) or (name, port, targetPort) + or (name, port, targetPort, nodePort) for every service port. If the 'targetPort' + is omitted, it is assumed to be equal to 'port', with the exception of NodePort + and LoadBalancer services, where all port numbers have to be specified. + service_name: allows setting custom name to the patched service. If none given, + application name will be used. + service_type: desired type of K8s service. Default value is in line with ServiceSpec's + default value. + + Returns: + Service: A valid representation of a Kubernetes Service with the correct ports. + """ + if not service_name: + service_name = self._app + return Service( + apiVersion="v1", + kind="Service", + metadata=ObjectMeta( + namespace=self._namespace, + name=service_name, + labels={"app.kubernetes.io/name": service_name}, + ), + spec=ServiceSpec( + selector={"app.kubernetes.io/name": service_name}, + ports=[ + ServicePort( + name=p[0], + port=p[1], + targetPort=p[2] if len(p) > 2 else p[1], # type: ignore[misc] + nodePort=p[3] if len(p) > 3 else None, # type: ignore[arg-type, misc] + ) + for p in ports + ], + type=service_type, + ), + ) + + def _patch(self, _) -> None: + """Patch the Kubernetes service created by Juju to map the correct port. + + Raises: + PatchFailed: if patching fails due to lack of permissions, or otherwise. + """ + if not self.charm.unit.is_leader(): + return + + client = Client() + try: + client.patch(Service, self._app, self.service, patch_type=PatchType.MERGE) + except ApiError as e: + if e.status.code == 403: + logger.error("Kubernetes service patch failed: `juju trust` this application.") + else: + logger.error("Kubernetes service patch failed: %s", str(e)) + else: + logger.info("Kubernetes service '%s' patched successfully", self._app) + + def is_patched(self) -> bool: + """Reports if the service patch has been applied. + + Returns: + bool: A boolean indicating if the service patch has been applied. + """ + client = Client() + # Get the relevant service from the cluster + service = client.get(Service, name=self.service_name, namespace=self._namespace) + # Construct a list of expected ports, should the patch be applied + expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports] + # Construct a list in the same manner, using the fetched service + fetched_ports = [(p.port, p.targetPort) for p in service.spec.ports] # type: ignore[attr-defined] # noqa: E501 + return expected_ports == fetched_ports + + @property + def _app(self) -> str: + """Name of the current Juju application. + + Returns: + str: A string containing the name of the current Juju application. + """ + return self.charm.app.name + + @property + def _namespace(self) -> str: + """The Kubernetes namespace we're running in. + + Returns: + str: A string containing the name of the current Kubernetes namespace. + """ + with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f: + return f.read().strip() diff --git a/charms/neutron-k8s/lib/charms/sunbeam_keystone_operator/v0/identity_service.py b/charms/neutron-k8s/lib/charms/sunbeam_keystone_operator/v0/identity_service.py new file mode 100644 index 00000000..7a7f4e43 --- /dev/null +++ b/charms/neutron-k8s/lib/charms/sunbeam_keystone_operator/v0/identity_service.py @@ -0,0 +1,470 @@ +"""IdentityServiceProvides and Requires module. + + +This library contains the Requires and Provides classes for handling +the identity_service interface. + +Import `IdentityServiceRequires` in your charm, with the charm object and the +relation name: + - self + - "identity_service" + +Also provide additional parameters to the charm object: + - service + - internal_url + - public_url + - admin_url + - region + - username + - vhost + +Two events are also available to respond to: + - connected + - ready + - goneaway + +A basic example showing the usage of this relation follows: + +``` +from charms.sunbeam_sunbeam_identity_service_operator.v0.identity_service import IdentityServiceRequires + +class IdentityServiceClientCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + # IdentityService Requires + self.identity_service = IdentityServiceRequires( + self, "identity_service", + service = "my-service" + internal_url = "http://internal-url" + public_url = "http://public-url" + admin_url = "http://admin-url" + region = "region" + ) + self.framework.observe( + self.identity_service.on.connected, self._on_identity_service_connected) + self.framework.observe( + self.identity_service.on.ready, self._on_identity_service_ready) + self.framework.observe( + self.identity_service.on.goneaway, self._on_identity_service_goneaway) + + def _on_identity_service_connected(self, event): + '''React to the IdentityService connected event. + + This event happens when n IdentityService relation is added to the + model before credentials etc have been provided. + ''' + # Do something before the relation is complete + pass + + def _on_identity_service_ready(self, event): + '''React to the IdentityService ready event. + + The IdentityService interface will use the provided config for the + request to the identity server. + ''' + # IdentityService Relation is ready. Do something with the completed relation. + pass + + def _on_identity_service_goneaway(self, event): + '''React to the IdentityService goneaway event. + + This event happens when an IdentityService relation is removed. + ''' + # IdentityService Relation has goneaway. shutdown services or suchlike + pass +``` +""" + +# The unique Charmhub library identifier, never change it +LIBID = "6a7cb19b98314ecf916e3fcb02708608" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 1 + +import json +import logging +import requests + +from ops.framework import ( + StoredState, + EventBase, + ObjectEvents, + EventSource, + Object, +) + +from ops.model import Relation + +from typing import List + +logger = logging.getLogger(__name__) + + +class IdentityServiceConnectedEvent(EventBase): + """IdentityService connected Event.""" + + pass + + +class IdentityServiceReadyEvent(EventBase): + """IdentityService ready for use Event.""" + + pass + + +class IdentityServiceGoneAwayEvent(EventBase): + """IdentityService relation has gone-away Event""" + + pass + + +class IdentityServiceServerEvents(ObjectEvents): + """Events class for `on`""" + + connected = EventSource(IdentityServiceConnectedEvent) + ready = EventSource(IdentityServiceReadyEvent) + goneaway = EventSource(IdentityServiceGoneAwayEvent) + + +class IdentityServiceRequires(Object): + """ + IdentityServiceRequires class + """ + + on = IdentityServiceServerEvents() + _stored = StoredState() + + def __init__(self, charm, relation_name: str, service_endpoints: dict, + region: str): + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + self.service_endpoints = service_endpoints + self.region = region + self.framework.observe( + self.charm.on[relation_name].relation_joined, + self._on_identity_service_relation_joined, + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, + self._on_identity_service_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_departed, + self._on_identity_service_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_broken, + self._on_identity_service_relation_broken, + ) + + def _on_identity_service_relation_joined(self, event): + """IdentityService relation joined.""" + logging.debug("IdentityService on_joined") + self.on.connected.emit() + self.register_services( + self.service_endpoints, + self.region) + + def _on_identity_service_relation_changed(self, event): + """IdentityService relation changed.""" + logging.debug("IdentityService on_changed") + try: + self.service_password + self.on.ready.emit() + except AttributeError: + pass + + def _on_identity_service_relation_broken(self, event): + """IdentityService relation broken.""" + logging.debug("IdentityService on_broken") + self.on.goneaway.emit() + + @property + def _identity_service_rel(self) -> Relation: + """The IdentityService relation.""" + return self.framework.model.get_relation(self.relation_name) + + def get_remote_app_data(self, key: str) -> str: + """Return the value for the given key from remote app data.""" + data = self._identity_service_rel.data[self._identity_service_rel.app] + return data.get(key) + + @property + def api_version(self) -> str: + """Return the api_version.""" + return self.get_remote_app_data('api-version') + + @property + def auth_host(self) -> str: + """Return the auth_host.""" + return self.get_remote_app_data('auth-host') + + @property + def auth_port(self) -> str: + """Return the auth_port.""" + return self.get_remote_app_data('auth-port') + + @property + def auth_protocol(self) -> str: + """Return the auth_protocol.""" + return self.get_remote_app_data('auth-protocol') + + @property + def internal_host(self) -> str: + """Return the internal_host.""" + return self.get_remote_app_data('internal-host') + + @property + def internal_port(self) -> str: + """Return the internal_port.""" + return self.get_remote_app_data('internal-port') + + @property + def internal_protocol(self) -> str: + """Return the internal_protocol.""" + return self.get_remote_app_data('internal-protocol') + + @property + def admin_domain_name(self) -> str: + """Return the admin_domain_name.""" + return self.get_remote_app_data('admin-domain-name') + + @property + def admin_domain_id(self) -> str: + """Return the admin_domain_id.""" + return self.get_remote_app_data('admin-domain-id') + + @property + def admin_project_name(self) -> str: + """Return the admin_project_name.""" + return self.get_remote_app_data('admin-project-name') + + @property + def admin_project_id(self) -> str: + """Return the admin_project_id.""" + return self.get_remote_app_data('admin-project-id') + + @property + def admin_user_name(self) -> str: + """Return the admin_user_name.""" + return self.get_remote_app_data('admin-user-name') + + @property + def admin_user_id(self) -> str: + """Return the admin_user_id.""" + return self.get_remote_app_data('admin-user-id') + + @property + def service_domain_name(self) -> str: + """Return the service_domain_name.""" + return self.get_remote_app_data('service-domain-name') + + @property + def service_domain_id(self) -> str: + """Return the service_domain_id.""" + return self.get_remote_app_data('service-domain-id') + + @property + def service_host(self) -> str: + """Return the service_host.""" + return self.get_remote_app_data('service-host') + + @property + def service_password(self) -> str: + """Return the service_password.""" + return self.get_remote_app_data('service-password') + + @property + def service_port(self) -> str: + """Return the service_port.""" + return self.get_remote_app_data('service-port') + + @property + def service_protocol(self) -> str: + """Return the service_protocol.""" + return self.get_remote_app_data('service-protocol') + + @property + def service_project_name(self) -> str: + """Return the service_project_name.""" + return self.get_remote_app_data('service-project-name') + + @property + def service_project_id(self) -> str: + """Return the service_project_id.""" + return self.get_remote_app_data('service-project-id') + + @property + def service_user_name(self) -> str: + """Return the service_user_name.""" + return self.get_remote_app_data('service-user-name') + + @property + def service_user_id(self) -> str: + """Return the service_user_id.""" + return self.get_remote_app_data('service-user-id') + + + def register_services(self, service_endpoints: dict, + region: str) -> None: + """Request access to the IdentityService server.""" + if self.model.unit.is_leader(): + logging.debug("Requesting service registration") + app_data = self._identity_service_rel.data[self.charm.app] + app_data["service-endpoints"] = json.dumps(service_endpoints) + app_data["region"] = region + + +class HasIdentityServiceClientsEvent(EventBase): + """Has IdentityServiceClients Event.""" + + pass + + +class ReadyIdentityServiceClientsEvent(EventBase): + """IdentityServiceClients Ready Event.""" + + def __init__(self, handle, relation_id, relation_name, service_endpoints, + region, client_app_name): + super().__init__(handle) + self.relation_id = relation_id + self.relation_name = relation_name + self.service_endpoints = service_endpoints + self.region = region + self.client_app_name = client_app_name + + + def snapshot(self): + return { + "relation_id": self.relation_id, + "relation_name": self.relation_name, + "service_endpoints": self.service_endpoints, + "client_app_name": self.client_app_name, + "region": self.region} + + def restore(self, snapshot): + super().restore(snapshot) + self.relation_id = snapshot["relation_id"] + self.relation_name = snapshot["relation_name"] + self.service_endpoints = snapshot["service_endpoints"] + self.region = snapshot["region"] + self.client_app_name = snapshot["client_app_name"] + + +class IdentityServiceClientEvents(ObjectEvents): + """Events class for `on`""" + + has_identity_service_clients = EventSource(HasIdentityServiceClientsEvent) + ready_identity_service_clients = EventSource(ReadyIdentityServiceClientsEvent) + + +class IdentityServiceProvides(Object): + """ + IdentityServiceProvides class + """ + + on = IdentityServiceClientEvents() + _stored = StoredState() + + def __init__(self, charm, relation_name): + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + self.framework.observe( + self.charm.on[relation_name].relation_joined, + self._on_identity_service_relation_joined, + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, + self._on_identity_service_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_broken, + self._on_identity_service_relation_broken, + ) + + def _on_identity_service_relation_joined(self, event): + """Handle IdentityService joined.""" + logging.debug("IdentityService on_joined") + self.on.has_identity_service_clients.emit() + + def _on_identity_service_relation_changed(self, event): + """Handle IdentityService changed.""" + logging.debug("IdentityService on_changed") + REQUIRED_KEYS = [ + 'service-endpoints', + 'region'] + + values = [ + event.relation.data[event.relation.app].get(k) + for k in REQUIRED_KEYS ] + # Validate data on the relation + if all(values): + print(event.relation.id) + print(event.relation.name) + service_eps = json.loads( + event.relation.data[event.relation.app]['service-endpoints']) + self.on.ready_identity_service_clients.emit( + event.relation.id, + event.relation.name, + service_eps, + event.relation.data[event.relation.app]['region'], + event.relation.app.name) + + def _on_identity_service_relation_broken(self, event): + """Handle IdentityService broken.""" + logging.debug("IdentityServiceProvides on_departed") + # TODO clear data on the relation + + def set_identity_service_credentials(self, relation_name: int, + relation_id: str, + api_version: str, + auth_host: str, + auth_port: str, + auth_protocol: str, + internal_host: str, + internal_port: str, + internal_protocol: str, + service_host: str, + service_port: str, + service_protocol: str, + admin_domain: str, + admin_project: str, + admin_user: str, + service_domain: str, + service_password: str, + service_project: str, + service_user: str): + logging.debug("Setting identity_service connection information.") + for relation in self.framework.model.relations[relation_name]: + if relation.id == relation_id: + _identity_service_rel = relation + app_data = _identity_service_rel.data[self.charm.app] + app_data["api-version"] = api_version + app_data["auth-host"] = auth_host + app_data["auth-port"] = str(auth_port) + app_data["auth-protocol"] = auth_protocol + app_data["internal-host"] = internal_host + app_data["internal-port"] = str(internal_port) + app_data["internal-protocol"] = internal_protocol + app_data["service-host"] = service_host + app_data["service-port"] = str(service_port) + app_data["service-protocol"] = service_protocol + app_data["admin-domain-name"] = admin_domain.name + app_data["admin-domain-id"] = admin_domain.id + app_data["admin-project-name"] = admin_project.name + app_data["admin-project-id"] = admin_project.id + app_data["admin-user-name"] = admin_user.name + app_data["admin-user-id"] = admin_user.id + app_data["service-domain-name"] = service_domain.name + app_data["service-domain-id"] = service_domain.id + app_data["service-project-name"] = service_project.name + app_data["service-project-id"] = service_project.id + app_data["service-user-name"] = service_user.name + app_data["service-user-id"] = service_user.id + app_data["service-password"] = service_password diff --git a/charms/neutron-k8s/lib/charms/sunbeam_mysql_k8s/v0/mysql.py b/charms/neutron-k8s/lib/charms/sunbeam_mysql_k8s/v0/mysql.py new file mode 100644 index 00000000..69d4833f --- /dev/null +++ b/charms/neutron-k8s/lib/charms/sunbeam_mysql_k8s/v0/mysql.py @@ -0,0 +1,165 @@ +""" +## Overview + +This document explains how to integrate with the MySQL charm for the purposes of consuming a mysql database. It also explains how alternative implementations of the MySQL charm may maintain the same interface and be backward compatible with all currently integrated charms. Finally this document is the authoritative reference on the structure of relation data that is shared between MySQL charms and any other charm that intends to use the database. + + +## Consumer Library Usage + +The MySQL charm library uses the [Provider and Consumer](https://ops.readthedocs.io/en/latest/#module-ops.relation) objects from the Operator Framework. Charms that would like to use a MySQL database must use the `MySQLConsumer` object from the charm library. Using the `MySQLConsumer` object requires instantiating it, typically in the constructor of your charm. The `MySQLConsumer` constructor requires the name of the relation over which a database will be used. This relation must use the `mysql_datastore` interface. In addition the constructor also requires a `consumes` specification, which is a dictionary with key `mysql` (also see Provider Library Usage below) and a value that represents the minimum acceptable version of MySQL. This version string can be in any format that is compatible with the Python [Semantic Version module](https://pypi.org/project/semantic-version/). For example, assuming your charm consumes a database over a rlation named "monitoring", you may instantiate `MySQLConsumer` as follows: + + from charms.mysql_k8s.v0.mysql import MySQLConsumer + def __init__(self, *args): + super().__init__(*args) + ... + self.mysql_consumer = MySQLConsumer( + self, "monitoring", {"mysql": ">=8"} + ) + ... + +This example hard codes the consumes dictionary argument containing the minimal MySQL version required, however you may want to consider generating this dictionary by some other means, such as a `self.consumes` property in your charm. This is because the minimum required MySQL version may change when you upgrade your charm. Of course it is expected that you will keep this version string updated as you develop newer releases of your charm. If the version string can be determined at run time by inspecting the actual deployed version of your charmed application, this would be ideal. +An instantiated `MySQLConsumer` object may be used to request new databases using the `new_database()` method. This method requires no arguments unless you require multiple databases. If multiple databases are requested, you must provide a unique `name_suffix` argument. For example: + + def _on_database_relation_joined(self, event): + self.mysql_consumer.new_database(name_suffix="db1") + self.mysql_consumer.new_database(name_suffix="db2") + +The `address`, `port`, `databases`, and `credentials` methods can all be called +to get the relevant information from the relation data. +""" + +# !/usr/bin/env python3 +# Copyright 2021 Canonical Ltd. +# See LICENSE file for licensing details. + +import json +import uuid +import logging +from ops.relation import ConsumerBase + +from ops.framework import ( + StoredState, + EventBase, + ObjectEvents, + EventSource, + Object, +) + +# The unique Charmhub library identifier, never change it +LIBID = "1fdc567d7095465990dc1f9be80461fd" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 1 + +logger = logging.getLogger(__name__) + +class DatabaseConnectedEvent(EventBase): + """Database connected Event.""" + + pass + + +class DatabaseReadyEvent(EventBase): + """Database ready for use Event.""" + + pass + + +class DatabaseGoneAwayEvent(EventBase): + """Database relation has gone-away Event""" + + pass + + +class DatabaseServerEvents(ObjectEvents): + """Events class for `on`""" + + connected = EventSource(DatabaseConnectedEvent) + ready = EventSource(DatabaseReadyEvent) + goneaway = EventSource(DatabaseGoneAwayEvent) + + +class MySQLConsumer(Object): + """ + MySQLConsumer lib class + """ + + on = DatabaseServerEvents() + + def __init__(self, charm, relation_name: str, databases: list): + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + self.request_databases = databases + self.framework.observe( + self.charm.on[relation_name].relation_joined, + self._on_database_relation_joined, + ) + + def _on_database_relation_joined(self, event): + """AMQP relation joined.""" + logging.debug("DatabaseRequires on_joined") + self.on.connected.emit() + self.request_access(self.request_databases) + + def databases(self, rel_id=None) -> list: + """ + List of currently available databases + Returns: + list: list of database names + """ + + rel = self.framework.model.get_relation(self.relation_name, rel_id) + relation_data = rel.data[rel.app] + dbs = relation_data.get("databases") + databases = json.loads(dbs) if dbs else [] + + return databases + + def credentials(self, rel_id=None) -> dict: + """ + Dictionary of credential information to access databases + Returns: + dict: dictionary of credential information including username, + password and address + """ + rel = self.framework.model.get_relation(self.relation_name, rel_id) + relation_data = rel.data[rel.app] + data = relation_data.get("data") + data = json.loads(data) if data else {} + credentials = data.get("credentials") + + return credentials + + def new_database(self, rel_id=None, name_suffix=""): + """ + Request creation of an additional database + """ + if not self.charm.unit.is_leader(): + return + + rel = self.framework.model.get_relation(self.relation_name, rel_id) + + if name_suffix: + name_suffix = "_{}".format(name_suffix) + + rid = str(uuid.uuid4()).split("-")[-1] + db_name = "db_{}_{}_{}".format(rel.id, rid, name_suffix) + logger.debug("CLIENT REQUEST %s", db_name) + rel_data = rel.data[self.charm.app] + dbs = rel_data.get("databases") + dbs = json.loads(dbs) if dbs else [] + dbs.append(db_name) + rel.data[self.charm.app]["databases"] = json.dumps(dbs) + + def request_access(self, databases: list) -> None: + """Request access to the AMQP server.""" + if self.model.unit.is_leader(): + logging.debug("Requesting AMQP user and vhost") + if databases: + rel = self.framework.model.get_relation(self.relation_name) + rel.data[self.charm.app]["databases"] = json.dumps(databases) diff --git a/charms/neutron-k8s/lib/charms/sunbeam_rabbitmq_operator/v0/amqp.py b/charms/neutron-k8s/lib/charms/sunbeam_rabbitmq_operator/v0/amqp.py new file mode 100644 index 00000000..4c1540de --- /dev/null +++ b/charms/neutron-k8s/lib/charms/sunbeam_rabbitmq_operator/v0/amqp.py @@ -0,0 +1,314 @@ +"""AMQPProvides and Requires module. + + +This library contains the Requires and Provides classes for handling +the amqp interface. + +Import `AMQPRequires` in your charm, with the charm object and the +relation name: + - self + - "amqp" + +Also provide two additional parameters to the charm object: + - username + - vhost + +Two events are also available to respond to: + - connected + - ready + - goneaway + +A basic example showing the usage of this relation follows: + +``` +from charms.sunbeam_rabbitmq_operator.v0.amqp import AMQPRequires + +class AMQPClientCharm(CharmBase): + def __init__(self, *args): + super().__init__(*args) + # AMQP Requires + self.amqp = AMQPRequires( + self, "amqp", + username="myusername", + vhost="vhostname" + ) + self.framework.observe( + self.amqp.on.connected, self._on_amqp_connected) + self.framework.observe( + self.amqp.on.ready, self._on_amqp_ready) + self.framework.observe( + self.amqp.on.goneaway, self._on_amqp_goneaway) + + def _on_amqp_connected(self, event): + '''React to the AMQP connected event. + + This event happens when n AMQP relation is added to the + model before credentials etc have been provided. + ''' + # Do something before the relation is complete + pass + + def _on_amqp_ready(self, event): + '''React to the AMQP ready event. + + The AMQP interface will use the provided username and vhost for the + request to the rabbitmq server. + ''' + # AMQP Relation is ready. Do something with the completed relation. + pass + + def _on_amqp_goneaway(self, event): + '''React to the AMQP goneaway event. + + This event happens when an AMQP relation is removed. + ''' + # AMQP Relation has goneaway. shutdown services or suchlike + pass +``` +""" + +# The unique Charmhub library identifier, never change it +LIBID = "ab1414b6baf044f099caf9c117f1a101" + +# Increment this major API version when introducing breaking changes +LIBAPI = 0 + +# Increment this PATCH version before using `charmcraft publish-lib` or reset +# to 0 if you are raising the major API version +LIBPATCH = 3 + +import logging +import requests + +from ops.framework import ( + StoredState, + EventBase, + ObjectEvents, + EventSource, + Object, +) + +from ops.model import Relation + +from typing import List + +logger = logging.getLogger(__name__) + + +class AMQPConnectedEvent(EventBase): + """AMQP connected Event.""" + + pass + + +class AMQPReadyEvent(EventBase): + """AMQP ready for use Event.""" + + pass + + +class AMQPGoneAwayEvent(EventBase): + """AMQP relation has gone-away Event""" + + pass + + +class AMQPServerEvents(ObjectEvents): + """Events class for `on`""" + + connected = EventSource(AMQPConnectedEvent) + ready = EventSource(AMQPReadyEvent) + goneaway = EventSource(AMQPGoneAwayEvent) + + +class AMQPRequires(Object): + """ + AMQPRequires class + """ + + on = AMQPServerEvents() + _stored = StoredState() + + def __init__(self, charm, relation_name: str, username: str, vhost: str): + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + self.username = username + self.vhost = vhost + self.framework.observe( + self.charm.on[relation_name].relation_joined, + self._on_amqp_relation_joined, + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, + self._on_amqp_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_departed, + self._on_amqp_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_broken, + self._on_amqp_relation_broken, + ) + + def _on_amqp_relation_joined(self, event): + """AMQP relation joined.""" + logging.debug("RabbitMQAMQPRequires on_joined") + self.on.connected.emit() + self.request_access(self.username, self.vhost) + + def _on_amqp_relation_changed(self, event): + """AMQP relation changed.""" + logging.debug("RabbitMQAMQPRequires on_changed") + if self.password: + self.on.ready.emit() + + def _on_amqp_relation_broken(self, event): + """AMQP relation broken.""" + logging.debug("RabbitMQAMQPRequires on_broken") + self.on.goneaway.emit() + + @property + def _amqp_rel(self) -> Relation: + """The AMQP relation.""" + return self.framework.model.get_relation(self.relation_name) + + @property + def password(self) -> str: + """Return the AMQP password from the server side of the relation.""" + return self._amqp_rel.data[self._amqp_rel.app].get("password") + + @property + def hostname(self) -> str: + """Return the hostname from the AMQP relation""" + return self._amqp_rel.data[self._amqp_rel.app].get("hostname") + + @property + def ssl_port(self) -> str: + """Return the SSL port from the AMQP relation""" + return self._amqp_rel.data[self._amqp_rel.app].get("ssl_port") + + @property + def ssl_ca(self) -> str: + """Return the SSL port from the AMQP relation""" + return self._amqp_rel.data[self._amqp_rel.app].get("ssl_ca") + + @property + def hostnames(self) -> List[str]: + """Return a list of remote RMQ hosts from the AMQP relation""" + _hosts = [] + for unit in self._amqp_rel.units: + _hosts.append(self._amqp_rel.data[unit].get("ingress-address")) + return _hosts + + def request_access(self, username: str, vhost: str) -> None: + """Request access to the AMQP server.""" + if self.model.unit.is_leader(): + logging.debug("Requesting AMQP user and vhost") + self._amqp_rel.data[self.charm.app]["username"] = username + self._amqp_rel.data[self.charm.app]["vhost"] = vhost + + +class HasAMQPClientsEvent(EventBase): + """Has AMQPClients Event.""" + + pass + + +class ReadyAMQPClientsEvent(EventBase): + """AMQPClients Ready Event.""" + + pass + + +class AMQPClientEvents(ObjectEvents): + """Events class for `on`""" + + has_amqp_clients = EventSource(HasAMQPClientsEvent) + ready_amqp_clients = EventSource(ReadyAMQPClientsEvent) + + +class AMQPProvides(Object): + """ + AMQPProvides class + """ + + on = AMQPClientEvents() + _stored = StoredState() + + def __init__(self, charm, relation_name): + super().__init__(charm, relation_name) + self.charm = charm + self.relation_name = relation_name + self.framework.observe( + self.charm.on[relation_name].relation_joined, + self._on_amqp_relation_joined, + ) + self.framework.observe( + self.charm.on[relation_name].relation_changed, + self._on_amqp_relation_changed, + ) + self.framework.observe( + self.charm.on[relation_name].relation_broken, + self._on_amqp_relation_broken, + ) + + def _on_amqp_relation_joined(self, event): + """Handle AMQP joined.""" + logging.debug("RabbitMQAMQPProvides on_joined") + self.on.has_amqp_clients.emit() + + def _on_amqp_relation_changed(self, event): + """Handle AMQP changed.""" + logging.debug("RabbitMQAMQPProvides on_changed") + # Validate data on the relation + if self.username(event) and self.vhost(event): + self.on.ready_amqp_clients.emit() + if self.charm.unit.is_leader(): + self.set_amqp_credentials( + event, self.username(event), self.vhost(event) + ) + + def _on_amqp_relation_broken(self, event): + """Handle AMQP broken.""" + logging.debug("RabbitMQAMQPProvides on_departed") + # TODO clear data on the relation + + def username(self, event): + """Return the AMQP username from the client side of the relation.""" + return event.relation.data[event.relation.app].get("username") + + def vhost(self, event): + """Return the AMQP vhost from the client side of the relation.""" + return event.relation.data[event.relation.app].get("vhost") + + def set_amqp_credentials(self, event, username, vhost): + """Set AMQP Credentials. + + :param event: The current event + :type EventsBase + :param username: The requested username + :type username: str + :param vhost: The requested vhost + :type vhost: str + :returns: None + :rtype: None + """ + # TODO: Can we move this into the charm code? + # TODO TLS Support. Existing interfaces set ssl_port and ssl_ca + logging.debug("Setting amqp connection information.") + try: + if not self.charm.does_vhost_exist(vhost): + self.charm.create_vhost(vhost) + password = self.charm.create_user(username) + self.charm.set_user_permissions(username, vhost) + event.relation.data[self.charm.app]["password"] = password + event.relation.data[self.charm.app][ + "hostname" + ] = self.charm.hostname + except requests.exceptions.ConnectionError as e: + logging.warning( + "Rabbitmq is not ready. Defering. Errno: {}".format(e.errno) + ) + event.defer() diff --git a/charms/neutron-k8s/metadata.yaml b/charms/neutron-k8s/metadata.yaml new file mode 100644 index 00000000..32463da8 --- /dev/null +++ b/charms/neutron-k8s/metadata.yaml @@ -0,0 +1,44 @@ +name: sunbeam-neutron-operator +summary: OpenStack Networking API service +maintainer: OpenStack Charmers +description: | + Neutron is a virtual network service for OpenStack, and a part of + Netstack. Just like OpenStack Nova provides an API to dynamically + request and configure virtual servers, Neutron provides an API to + dynamically request and configure virtual networks. These networks + connect "interfaces" from other OpenStack services (e.g., virtual NICs + from Nova VMs). The Neutron API supports extensions to provide + advanced network capabilities (e.g., QoS, ACLs, network monitoring, + etc.) + . + This charm provides the OpenStack Neutron API service. +tags: + - openstack + +containers: + neutron-server: + resource: neutron-server-image + +resources: + neutron-server-image: + type: oci-image + description: OCI image for OpenStack Neutron API (kolla/neutron-server) + +provides: + neutron-api: + interface: neutron-api + +requires: + ingress: + interface: ingress + shared-db: + interface: mysql_datastore + limit: 1 + amqp: + interface: rabbitmq + identity-service: + interface: keystone + +peers: + peers: + interface: neutron-peer diff --git a/charms/neutron-k8s/requirements-dev.txt b/charms/neutron-k8s/requirements-dev.txt new file mode 100644 index 00000000..4f2a3f5b --- /dev/null +++ b/charms/neutron-k8s/requirements-dev.txt @@ -0,0 +1,3 @@ +-r requirements.txt +coverage +flake8 diff --git a/charms/neutron-k8s/requirements.txt b/charms/neutron-k8s/requirements.txt new file mode 100644 index 00000000..32a3d3b9 --- /dev/null +++ b/charms/neutron-k8s/requirements.txt @@ -0,0 +1,10 @@ +# ops >= 1.2.0 +jinja2 +git+https://github.com/canonical/operator@2875e73e#egg=ops +git+https://opendev.org/openstack/charm-ops-openstack#egg=ops_openstack +git+https://github.com/openstack-charmers/advanced-sunbeam-openstack#egg=advanced_sunbeam_openstack +lightkube +# These are only needeed if the charm relates to ceph +git+https://github.com/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client +# Charmhelpers is only present as interface_ceph_client uses it. +git+https://github.com/juju/charm-helpers.git#egg=charmhelpers diff --git a/charms/neutron-k8s/run_tests b/charms/neutron-k8s/run_tests new file mode 100755 index 00000000..90db638b --- /dev/null +++ b/charms/neutron-k8s/run_tests @@ -0,0 +1,17 @@ +#!/bin/sh -e +# Copyright 2022 liam +# See LICENSE file for licensing details. + +if [ -z "$VIRTUAL_ENV" -a -d venv/ ]; then + . venv/bin/activate +fi + +if [ -z "$PYTHONPATH" ]; then + export PYTHONPATH="lib:src" +else + export PYTHONPATH="lib:src:$PYTHONPATH" +fi + +flake8 +coverage run --branch --source=src -m unittest -v "$@" +coverage report -m diff --git a/charms/neutron-k8s/src/charm.py b/charms/neutron-k8s/src/charm.py new file mode 100755 index 00000000..4b349fad --- /dev/null +++ b/charms/neutron-k8s/src/charm.py @@ -0,0 +1,128 @@ +#!/usr/bin/env python3 +"""Neutron Operator Charm. + +This charm provide Neutron services as part of an OpenStack deployment +""" + +import logging +from typing import List + +from ops.framework import StoredState +from ops.main import main + +import advanced_sunbeam_openstack.cprocess as sunbeam_cprocess +import advanced_sunbeam_openstack.charm as sunbeam_charm +import advanced_sunbeam_openstack.core as sunbeam_core +import advanced_sunbeam_openstack.container_handlers as sunbeam_chandlers +import advanced_sunbeam_openstack.relation_handlers as sunbeam_rhandlers +import advanced_sunbeam_openstack.config_contexts as sunbeam_ctxts + +from charms.observability_libs.v0.kubernetes_service_patch \ + import KubernetesServicePatch + +logger = logging.getLogger(__name__) + + +class NeutronServerPebbleHandler(sunbeam_chandlers.ServicePebbleHandler): + + def get_layer(self): + """Neutron server service + + :returns: pebble layer configuration for neutron server service + :rtype: dict + """ + return { + "summary": "neutron server layer", + "description": "pebble configuration for neutron server", + "services": { + "neutron-server": { + "override": "replace", + "summary": "Neutron Server", + "command": "neutron-server", + "startup": "enabled" + } + } + } + + def default_container_configs(self): + return [ + sunbeam_core.ContainerConfigFile( + [self.container_name], + '/etc/neutron/neutron.conf', + 'neutron', + 'neutron')] + + +class NeutronOperatorCharm(sunbeam_charm.OSBaseOperatorAPICharm): + """Charm the service.""" + + _state = StoredState() + service_name = "neutron-server" + + db_sync_cmds = [ + ['sudo', '-u', 'neutron', 'neutron-db-manage', '--config-file', + '/etc/neutron/neutron.conf', '--config-file', + '/etc/neutron/plugins/ml2/ml2_conf.ini', 'upgrade', 'head']] + + def __init__(self, framework): + super().__init__(framework) + self.service_patcher = KubernetesServicePatch( + self, + [ + ('public', self.default_public_ingress_port), + ] + ) + + def get_pebble_handlers(self) -> List[sunbeam_chandlers.PebbleHandler]: + """Pebble handlers for the service.""" + return [ + NeutronServerPebbleHandler( + self, + 'neutron-server', + self.service_name, + self.container_configs, + self.template_dir, + self.openstack_release, + self.configure_charm, + ) + ] + + @property + def service_endpoints(self): + return [ + { + 'service_name': 'neutron', + 'type': 'network', + 'description': "OpenStack Networking", + 'internal_url': f'{self.internal_url}', + 'public_url': f'{self.public_url}', + 'admin_url': f'{self.admin_url}'}] + + @property + def default_public_ingress_port(self): + return 9696 + + @property + def service_user(self) -> str: + """Service user file and directory ownership.""" + return 'neutron' + + @property + def service_group(self) -> str: + """Service group file and directory ownership.""" + return 'neutron' + + @property + def service_conf(self) -> str: + """Service default configuration file.""" + return "/etc/neutron/neutron.conf" + + +class NeutronWallabyOperatorCharm(NeutronOperatorCharm): + + openstack_release = 'wallaby' + +if __name__ == "__main__": + # Note: use_juju_for_storage=True required per + # https://github.com/canonical/operator/issues/506 + main(NeutronWallabyOperatorCharm, use_juju_for_storage=True) diff --git a/charms/neutron-k8s/src/templates/ceph.conf.j2 b/charms/neutron-k8s/src/templates/ceph.conf.j2 new file mode 100644 index 00000000..c293ae90 --- /dev/null +++ b/charms/neutron-k8s/src/templates/ceph.conf.j2 @@ -0,0 +1,22 @@ +############################################################################### +# [ WARNING ] +# ceph configuration file maintained in aso +# local changes may be overwritten. +############################################################################### +[global] +{% if ceph.auth -%} +auth_supported = {{ ceph.auth }} +mon host = {{ ceph.mon_hosts }} +{% endif -%} +keyring = /etc/ceph/$cluster.$name.keyring +log to syslog = false +err to syslog = false +clog to syslog = false +{% if ceph.rbd_features %} +rbd default features = {{ ceph.rbd_features }} +{% endif %} + +[client] +{% if ceph_config.rbd_default_data_pool -%} +rbd default data pool = {{ ceph_config.rbd_default_data_pool }} +{% endif %} diff --git a/charms/neutron-k8s/src/templates/neutron.conf b/charms/neutron-k8s/src/templates/neutron.conf new file mode 100644 index 00000000..9ecac829 --- /dev/null +++ b/charms/neutron-k8s/src/templates/neutron.conf @@ -0,0 +1,1868 @@ +[DEFAULT] +core_plugin = ml2 +debug = {{ options.debug }} + +# +# From neutron +# + +# Where to store Neutron state files. This directory must be writable by the +# agent. (string value) +#state_path = /var/lib/neutron + +# The host IP to bind to. (host address value) +#bind_host = 0.0.0.0 + +# The port to bind to (port value) +# Minimum value: 0 +# Maximum value: 65535 +#bind_port = 9696 + +# The path for API extensions. Note that this can be a colon-separated list of +# paths. For example: api_extensions_path = +# extensions:/path/to/more/exts:/even/more/exts. The __path__ of +# neutron.extensions is appended to this, so if your extensions are in there +# you don't need to specify them here. (string value) +#api_extensions_path = + +# The type of authentication to use (string value) +#auth_strategy = keystone + +# The core plugin Neutron will use (string value) +#core_plugin = + +# The service plugins Neutron will use (list value) +#service_plugins = + +# The base MAC address Neutron will use for VIFs. The first 3 octets will +# remain unchanged. If the 4th octet is not 00, it will also be used. The +# others will be randomly generated. (string value) +#base_mac = fa:16:3e:00:00:00 + +# Allow the usage of the bulk API (boolean value) +#allow_bulk = true + +# The maximum number of items returned in a single response, value was +# 'infinite' or negative integer means no limit (string value) +#pagination_max_limit = -1 + +# Default value of availability zone hints. The availability zone aware +# schedulers use this when the resources availability_zone_hints is empty. +# Multiple availability zones can be specified by a comma separated string. +# This value can be empty. In this case, even if availability_zone_hints for a +# resource is empty, availability zone is considered for high availability +# while scheduling the resource. (list value) +#default_availability_zones = + +# Maximum number of DNS nameservers per subnet (integer value) +#max_dns_nameservers = 5 + +# Maximum number of host routes per subnet (integer value) +#max_subnet_host_routes = 20 + +# Enables IPv6 Prefix Delegation for automatic subnet CIDR allocation. Set to +# True to enable IPv6 Prefix Delegation for subnet allocation in a PD-capable +# environment. Users making subnet creation requests for IPv6 subnets without +# providing a CIDR or subnetpool ID will be given a CIDR via the Prefix +# Delegation mechanism. Note that enabling PD will override the behavior of the +# default IPv6 subnetpool. (boolean value) +#ipv6_pd_enabled = false + +# DHCP lease duration (in seconds). Use -1 to tell dnsmasq to use infinite +# lease times. (integer value) +#dhcp_lease_duration = 86400 + +# Domain to use for building the hostnames (string value) +#dns_domain = openstacklocal + +# Driver for external DNS integration. (string value) +#external_dns_driver = + +# Allow sending resource operation notification to DHCP agent (boolean value) +#dhcp_agent_notification = true + +# Allow overlapping IP support in Neutron. Attention: the following parameter +# MUST be set to False if Neutron is being used in conjunction with Nova +# security groups. (boolean value) +#allow_overlapping_ips = false + +# Hostname to be used by the Neutron server, agents and services running on +# this machine. All the agents and services running on this machine must use +# the same host value. (host address value) +# +# This option has a sample default set, which means that +# its actual default value may vary from the one documented +# below. +#host = example.domain + +# This string is prepended to the normal URL that is returned in links to the +# OpenStack Network API. If it is empty (the default), the URLs are returned +# unchanged. (string value) +#network_link_prefix = + +# Send notification to nova when port status changes (boolean value) +#notify_nova_on_port_status_changes = true + +# Send notification to nova when port data (fixed_ips/floatingip) changes so +# nova can update its cache. (boolean value) +#notify_nova_on_port_data_changes = true + +# Number of seconds between sending events to nova if there are any events to +# send. (integer value) +#send_events_interval = 2 + +# Set process name to match child worker role. Available options are: 'off' - +# retains the previous behavior; 'on' - renames processes to 'neutron-server: +# role (original string)'; 'brief' - renames the same as 'on', but without the +# original string, such as 'neutron-server: role'. (string value) +#setproctitle = on + +# Neutron IPAM (IP address management) driver to use. By default, the reference +# implementation of the Neutron IPAM driver is used. (string value) +#ipam_driver = internal + +# If True, then allow plugins that support it to create VLAN transparent +# networks. (boolean value) +#vlan_transparent = false + +# If True, then allow plugins to decide whether to perform validations on +# filter parameters. Filter validation is enabled if this config is turned on +# and it is supported by all plugins (boolean value) +#filter_validation = true + +# MTU of the underlying physical network. Neutron uses this value to calculate +# MTU for all virtual network components. For flat and VLAN networks, neutron +# uses this value without modification. For overlay networks such as VXLAN, +# neutron automatically subtracts the overlay protocol overhead from this +# value. Defaults to 1500, the standard value for Ethernet. (integer value) +# Deprecated group/name - [ml2]/segment_mtu +#global_physnet_mtu = 1500 + +# Number of times client connections (nova, ironic) should be retried on a +# failed HTTP call. 0 (zero) means connection is attempted only once (not +# retried). Setting to any positive integer means that on failure the +# connection is retried that many times. For example, setting to 3 means total +# attempts to connect will be 4. (integer value) +# Minimum value: 0 +#http_retries = 3 + +# If False, neutron-server will disable the following DHCP-agent related +# functions:1. DHCP provisioning block 2. DHCP scheduler API extension 3. +# Network scheduling mechanism 4. DHCP RPC/notification (boolean value) +#enable_traditional_dhcp = true + +# Number of backlog requests to configure the socket with (integer value) +#backlog = 4096 + +# Number of seconds to keep retrying to listen (integer value) +#retry_until_window = 30 + +# Enable SSL on the API server (boolean value) +#use_ssl = false + +# Seconds between running periodic tasks. (integer value) +#periodic_interval = 40 + +# Number of separate API worker processes for service. If not specified, the +# default is equal to the number of CPUs available for best performance, capped +# by potential RAM usage. (integer value) +#api_workers = + +# Number of RPC worker processes for service. If not specified, the default is +# equal to half the number of API workers. (integer value) +#rpc_workers = + +# Number of RPC worker processes dedicated to state reports queue. (integer +# value) +#rpc_state_report_workers = 1 + +# Range of seconds to randomly delay when starting the periodic task scheduler +# to reduce stampeding. (Disable by setting to 0) (integer value) +#periodic_fuzzy_delay = 5 + +# Maximum seconds to wait for a response from an RPC call. (integer value) +#rpc_response_max_timeout = 600 + +# +# From neutron.agent +# + +# The driver used to manage the virtual interface. (string value) +#interface_driver = + +# Location for Metadata Proxy UNIX domain socket. (string value) +#metadata_proxy_socket = $state_path/metadata_proxy + +# User (uid or name) running metadata proxy after its initialization (if empty: +# agent effective user). (string value) +#metadata_proxy_user = + +# Group (gid or name) running metadata proxy after its initialization (if +# empty: agent effective group). (string value) +#metadata_proxy_group = + +# +# From neutron.db +# + +# Seconds to regard the agent is down; should be at least twice +# report_interval, to be sure the agent is down for good. (integer value) +#agent_down_time = 75 + +# Representing the resource type whose load is being reported by the agent. +# This can be "networks", "subnets" or "ports". When specified (Default is +# networks), the server will extract particular load sent as part of its agent +# configuration object from the agent report state, which is the number of +# resources being consumed, at every report_interval.dhcp_load_type can be used +# in combination with network_scheduler_driver = +# neutron.scheduler.dhcp_agent_scheduler.WeightScheduler When the +# network_scheduler_driver is WeightScheduler, dhcp_load_type can be configured +# to represent the choice for the resource being balanced. Example: +# dhcp_load_type=networks (string value) +# Possible values: +# networks - +# subnets - +# ports - +#dhcp_load_type = networks + +# Agent starts with admin_state_up=False when enable_new_agents=False. In the +# case, user's resources will not be scheduled automatically to the agent until +# admin changes admin_state_up to True. (boolean value) +#enable_new_agents = true + +# Maximum number of routes per router (integer value) +#max_routes = 30 + +# Define the default value of enable_snat if not provided in +# external_gateway_info. (boolean value) +#enable_snat_by_default = true + +# Driver to use for scheduling network to DHCP agent (string value) +#network_scheduler_driver = neutron.scheduler.dhcp_agent_scheduler.WeightScheduler + +# Allow auto scheduling networks to DHCP agent. (boolean value) +#network_auto_schedule = true + +# Automatically remove networks from offline DHCP agents. (boolean value) +#allow_automatic_dhcp_failover = true + +# Number of DHCP agents scheduled to host a tenant network. If this number is +# greater than 1, the scheduler automatically assigns multiple DHCP agents for +# a given tenant network, providing high availability for DHCP service. +# (integer value) +# Minimum value: 1 +#dhcp_agents_per_network = 1 + +# Enable services on an agent with admin_state_up False. If this option is +# False, when admin_state_up of an agent is turned False, services on it will +# be disabled. Agents with admin_state_up False are not selected for automatic +# scheduling regardless of this option. But manual scheduling to such agents is +# available if this option is True. (boolean value) +#enable_services_on_agents_with_admin_state_down = false + +# The base mac address used for unique DVR instances by Neutron. The first 3 +# octets will remain unchanged. If the 4th octet is not 00, it will also be +# used. The others will be randomly generated. The 'dvr_base_mac' *must* be +# different from 'base_mac' to avoid mixing them up with MAC's allocated for +# tenant ports. A 4 octet example would be dvr_base_mac = fa:16:3f:4f:00:00. +# The default is 3 octet (string value) +#dvr_base_mac = fa:16:3f:00:00:00 + +# System-wide flag to determine the type of router that tenants can create. +# Only admin can override. (boolean value) +#router_distributed = false + +# Determine if setup is configured for DVR. If False, DVR API extension will be +# disabled. (boolean value) +#enable_dvr = true + +# Flag to determine if hosting a DVR local router to the DHCP agent is desired. +# If False, any L3 function supported by the DHCP agent instance will not be +# possible, for instance: DNS. (boolean value) +#host_dvr_for_dhcp = true + +# Driver to use for scheduling router to a default L3 agent (string value) +#router_scheduler_driver = neutron.scheduler.l3_agent_scheduler.LeastRoutersScheduler + +# Allow auto scheduling of routers to L3 agent. (boolean value) +#router_auto_schedule = true + +# Automatically reschedule routers from offline L3 agents to online L3 agents. +# (boolean value) +#allow_automatic_l3agent_failover = false + +# Enable HA mode for virtual routers. (boolean value) +#l3_ha = false + +# Maximum number of L3 agents which a HA router will be scheduled on. If it is +# set to 0 then the router will be scheduled on every agent. (integer value) +#max_l3_agents_per_router = 3 + +# Subnet used for the l3 HA admin network. (string value) +#l3_ha_net_cidr = 169.254.192.0/18 + +# The network type to use when creating the HA network for an HA router. By +# default or if empty, the first 'tenant_network_types' is used. This is +# helpful when the VRRP traffic should use a specific network which is not the +# default one. (string value) +#l3_ha_network_type = + +# The physical network name with which the HA network can be created. (string +# value) +#l3_ha_network_physical_name = + +# +# From neutron.extensions +# + +# Maximum number of allowed address pairs (integer value) +#max_allowed_address_pair = 10 + +# Defines the allowed conntrack helpers, and conntack helper module protocol +# constraints. (list value) +# +# This option has a sample default set, which means that +# its actual default value may vary from the one documented +# below. +#allowed_conntrack_helpers = tftp:udp,ftp:tcp,sip:tcp,sip:udp + +# +# From oslo.log +# + +# If set to true, the logging level will be set to DEBUG instead of the default +# INFO level. (boolean value) +# Note: This option can be changed without restarting. +#debug = false + +# The name of a logging configuration file. This file is appended to any +# existing logging configuration files. For details about logging configuration +# files, see the Python logging module documentation. Note that when logging +# configuration files are used then all logging configuration is set in the +# configuration file and other logging configuration options are ignored (for +# example, log-date-format). (string value) +# Note: This option can be changed without restarting. +# Deprecated group/name - [DEFAULT]/log_config +#log_config_append = + +# Defines the format string for %%(asctime)s in log records. Default: +# %(default)s . This option is ignored if log_config_append is set. (string +# value) +#log_date_format = %Y-%m-%d %H:%M:%S + +# (Optional) Name of log file to send logging output to. If no default is set, +# logging will go to stderr as defined by use_stderr. This option is ignored if +# log_config_append is set. (string value) +# Deprecated group/name - [DEFAULT]/logfile +#log_file = + +# (Optional) The base directory used for relative log_file paths. This option +# is ignored if log_config_append is set. (string value) +# Deprecated group/name - [DEFAULT]/logdir +#log_dir = + +# Uses logging handler designed to watch file system. When log file is moved or +# removed this handler will open a new log file with specified path +# instantaneously. It makes sense only if log_file option is specified and +# Linux platform is used. This option is ignored if log_config_append is set. +# (boolean value) +#watch_log_file = false + +# Use syslog for logging. Existing syslog format is DEPRECATED and will be +# changed later to honor RFC5424. This option is ignored if log_config_append +# is set. (boolean value) +#use_syslog = false + +# Enable journald for logging. If running in a systemd environment you may wish +# to enable journal support. Doing so will use the journal native protocol +# which includes structured metadata in addition to log messages.This option is +# ignored if log_config_append is set. (boolean value) +#use_journal = false + +# Syslog facility to receive log lines. This option is ignored if +# log_config_append is set. (string value) +#syslog_log_facility = LOG_USER + +# Use JSON formatting for logging. This option is ignored if log_config_append +# is set. (boolean value) +#use_json = false + +# Log output to standard error. This option is ignored if log_config_append is +# set. (boolean value) +#use_stderr = false + +# Log output to Windows Event Log. (boolean value) +#use_eventlog = false + +# The amount of time before the log files are rotated. This option is ignored +# unless log_rotation_type is setto "interval". (integer value) +#log_rotate_interval = 1 + +# Rotation interval type. The time of the last file change (or the time when +# the service was started) is used when scheduling the next rotation. (string +# value) +# Possible values: +# Seconds - +# Minutes - +# Hours - +# Days - +# Weekday - +# Midnight - +#log_rotate_interval_type = days + +# Maximum number of rotated log files. (integer value) +#max_logfile_count = 30 + +# Log file maximum size in MB. This option is ignored if "log_rotation_type" is +# not set to "size". (integer value) +#max_logfile_size_mb = 200 + +# Log rotation type. (string value) +# Possible values: +# interval - Rotate logs at predefined time intervals. +# size - Rotate logs once they reach a predefined size. +# none - Do not rotate log files. +#log_rotation_type = none + +# Format string to use for log messages with context. Used by +# oslo_log.formatters.ContextFormatter (string value) +#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s + +# Format string to use for log messages when context is undefined. Used by +# oslo_log.formatters.ContextFormatter (string value) +#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s + +# Additional data to append to log message when logging level for the message +# is DEBUG. Used by oslo_log.formatters.ContextFormatter (string value) +#logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d + +# Prefix each line of exception output with this format. Used by +# oslo_log.formatters.ContextFormatter (string value) +#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s + +# Defines the format string for %(user_identity)s that is used in +# logging_context_format_string. Used by oslo_log.formatters.ContextFormatter +# (string value) +#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s + +# List of package logging levels in logger=LEVEL pairs. This option is ignored +# if log_config_append is set. (list value) +#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,oslo_policy=INFO,dogpile.core.dogpile=INFO + +# Enables or disables publication of error events. (boolean value) +#publish_errors = false + +# The format for an instance that is passed with the log message. (string +# value) +#instance_format = "[instance: %(uuid)s] " + +# The format for an instance UUID that is passed with the log message. (string +# value) +#instance_uuid_format = "[instance: %(uuid)s] " + +# Interval, number of seconds, of log rate limiting. (integer value) +#rate_limit_interval = 0 + +# Maximum number of logged messages per rate_limit_interval. (integer value) +#rate_limit_burst = 0 + +# Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG +# or empty string. Logs with level greater or equal to rate_limit_except_level +# are not filtered. An empty string means that all levels are filtered. (string +# value) +#rate_limit_except_level = CRITICAL + +# Enables or disables fatal status of deprecations. (boolean value) +#fatal_deprecations = false + +# +# From oslo.messaging +# + +# Size of RPC connection pool. (integer value) +# Minimum value: 1 +#rpc_conn_pool_size = 30 + +# The pool size limit for connections expiration policy (integer value) +#conn_pool_min_size = 2 + +# The time-to-live in sec of idle connections in the pool (integer value) +#conn_pool_ttl = 1200 + +# Size of executor thread pool when executor is threading or eventlet. (integer +# value) +# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size +#executor_thread_pool_size = 64 + +# Seconds to wait for a response from a call. (integer value) +#rpc_response_timeout = 60 + +# The network address and optional user credentials for connecting to the +# messaging backend, in URL format. The expected format is: +# +# driver://[user:pass@]host:port[,[userN:passN@]hostN:portN]/virtual_host?query +# +# Example: rabbit://rabbitmq:password@127.0.0.1:5672// +# +# For full details on the fields in the URL see the documentation of +# oslo_messaging.TransportURL at +# https://docs.openstack.org/oslo.messaging/latest/reference/transport.html +# (string value) +transport_url = {{ amqp.transport_url }} + +# The default exchange under which topics are scoped. May be overridden by an +# exchange name specified in the transport_url option. (string value) +#control_exchange = neutron + +# Add an endpoint to answer to ping calls. Endpoint is named +# oslo_rpc_server_ping (boolean value) +#rpc_ping_enabled = false + +# +# From oslo.service.periodic_task +# + +# Some periodic tasks can be run in a separate process. Should we run them +# here? (boolean value) +#run_external_periodic_tasks = true + +# +# From oslo.service.service +# + +# Enable eventlet backdoor. Acceptable values are 0, , and +# :, where 0 results in listening on a random tcp port number; +# results in listening on the specified port number (and not enabling +# backdoor if that port is in use); and : results in listening on +# the smallest unused port number within the specified range of port numbers. +# The chosen port is displayed in the service's log file. (string value) +#backdoor_port = + +# Enable eventlet backdoor, using the provided path as a unix socket that can +# receive connections. This option is mutually exclusive with 'backdoor_port' +# in that only one should be provided. If both are provided then the existence +# of this option overrides the usage of that option. Inside the path {pid} will +# be replaced with the PID of the current process. (string value) +#backdoor_socket = + +# Enables or disables logging values of all registered options when starting a +# service (at DEBUG level). (boolean value) +#log_options = true + +# Specify a timeout after which a gracefully shutdown server will exit. Zero +# value means endless wait. (integer value) +#graceful_shutdown_timeout = 60 + +# +# From oslo.service.wsgi +# + +# File name for the paste.deploy config for api service (string value) +#api_paste_config = api-paste.ini + +# A python format string that is used as the template to generate log lines. +# The following values can beformatted into it: client_ip, date_time, +# request_line, status_code, body_length, wall_seconds. (string value) +#wsgi_log_format = %(client_ip)s "%(request_line)s" status: %(status_code)s len: %(body_length)s time: %(wall_seconds).7f + +# Sets the value of TCP_KEEPIDLE in seconds for each server socket. Not +# supported on OS X. (integer value) +#tcp_keepidle = 600 + +# Size of the pool of greenthreads used by wsgi (integer value) +#wsgi_default_pool_size = 100 + +# Maximum line size of message headers to be accepted. max_header_line may need +# to be increased when using large tokens (typically those generated when +# keystone is configured to use PKI tokens with big service catalogs). (integer +# value) +#max_header_line = 16384 + +# If False, closes the client socket connection explicitly. (boolean value) +#wsgi_keep_alive = true + +# Timeout for client connections' socket operations. If an incoming connection +# is idle for this number of seconds it will be closed. A value of '0' means +# wait forever. (integer value) +#client_socket_timeout = 900 + + +[agent] +root_helper = "sudo /usr/bin/neutron-rootwrap /etc/neutron/rootwrap.conf" + +# +# From neutron.agent +# + +# Root helper application. Use 'sudo neutron-rootwrap +# /etc/neutron/rootwrap.conf' to use the real root filter facility. Change to +# 'sudo' to skip the filtering and just run the command directly. (string +# value) +#root_helper = sudo + +# Use the root helper when listing the namespaces on a system. This may not be +# required depending on the security configuration. If the root helper is not +# required, set this to False for a performance improvement. (boolean value) +#use_helper_for_ns_read = true + +# +# Root helper daemon application to use when possible. +# +# Use 'sudo neutron-rootwrap-daemon /etc/neutron/rootwrap.conf' to run rootwrap +# in "daemon mode" which has been reported to improve performance at scale. For +# more information on running rootwrap in "daemon mode", see: +# +# https://docs.openstack.org/oslo.rootwrap/latest/user/usage.html#daemon-mode +# (string value) +#root_helper_daemon = + +# Seconds between nodes reporting state to server; should be less than +# agent_down_time, best if it is half or less than agent_down_time. (floating +# point value) +#report_interval = 30 + +# Log agent heartbeats (boolean value) +#log_agent_heartbeats = false + +# Add comments to iptables rules. Set to false to disallow the addition of +# comments to generated iptables rules that describe each rule's purpose. +# System must support the iptables comments module for addition of comments. +# (boolean value) +#comment_iptables_rules = true + +# Duplicate every iptables difference calculation to ensure the format being +# generated matches the format of iptables-save. This option should not be +# turned on for production systems because it imposes a performance penalty. +# (boolean value) +#debug_iptables_rules = false + +# Action to be executed when a child process dies (string value) +# Possible values: +# respawn - +# exit - +#check_child_processes_action = respawn + +# Interval between checks of child process liveness (seconds), use 0 to disable +# (integer value) +#check_child_processes_interval = 60 + +# Location of scripts used to kill external processes. Names of scripts here +# must follow the pattern: "-kill" where is name +# of the process which should be killed using this script. For example, kill +# script for dnsmasq process should be named "dnsmasq-kill". If path is set to +# None, then default "kill" command will be used to stop processes. (string +# value) +#kill_scripts_path = /etc/neutron/kill_scripts/ + +# Availability zone of this node (string value) +#availability_zone = nova + + +[cors] + +# +# From oslo.middleware.cors +# + +# Indicate whether this resource may be shared with the domain received in the +# requests "origin" header. Format: "://[:]", no trailing +# slash. Example: https://horizon.example.com (list value) +#allowed_origin = + +# Indicate that the actual request can include user credentials (boolean value) +#allow_credentials = true + +# Indicate which headers are safe to expose to the API. Defaults to HTTP Simple +# Headers. (list value) +#expose_headers = X-Auth-Token,X-Subject-Token,X-Service-Token,X-OpenStack-Request-ID,OpenStack-Volume-microversion + +# Maximum cache age of CORS preflight requests. (integer value) +#max_age = 3600 + +# Indicate which methods can be used during the actual request. (list value) +#allow_methods = GET,PUT,POST,DELETE,PATCH + +# Indicate which header field names may be used during the actual request. +# (list value) +#allow_headers = X-Auth-Token,X-Identity-Status,X-Roles,X-Service-Catalog,X-User-Id,X-Tenant-Id,X-OpenStack-Request-ID + + +{% include "parts/section-database" %} + +# +# From neutron.db +# + +# Database engine for which script will be generated when using offline +# migration. (string value) +#engine = + +# +# From oslo.db +# + +# If True, SQLite uses synchronous mode. (boolean value) +#sqlite_synchronous = true + +# The back end to use for the database. (string value) +# Deprecated group/name - [DEFAULT]/db_backend +#backend = sqlalchemy + +# The SQLAlchemy connection string to use to connect to the database. (string +# value) +# Deprecated group/name - [DEFAULT]/sql_connection +# Deprecated group/name - [DATABASE]/sql_connection +# Deprecated group/name - [sql]/connection +#connection = + +# The SQLAlchemy connection string to use to connect to the slave database. +# (string value) +#slave_connection = + +# The SQL mode to be used for MySQL sessions. This option, including the +# default, overrides any server-set SQL mode. To use whatever SQL mode is set +# by the server configuration, set this to no value. Example: mysql_sql_mode= +# (string value) +#mysql_sql_mode = TRADITIONAL + +# If True, transparently enables support for handling MySQL Cluster (NDB). +# (boolean value) +#mysql_enable_ndb = false + +# Connections which have been present in the connection pool longer than this +# number of seconds will be replaced with a new one the next time they are +# checked out from the pool. (integer value) +# Deprecated group/name - [DATABASE]/idle_timeout +# Deprecated group/name - [database]/idle_timeout +# Deprecated group/name - [DEFAULT]/sql_idle_timeout +# Deprecated group/name - [DATABASE]/sql_idle_timeout +# Deprecated group/name - [sql]/idle_timeout +#connection_recycle_time = 3600 + +# Maximum number of SQL connections to keep open in a pool. Setting a value of +# 0 indicates no limit. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_pool_size +# Deprecated group/name - [DATABASE]/sql_max_pool_size +#max_pool_size = 5 + +# Maximum number of database connection retries during startup. Set to -1 to +# specify an infinite retry count. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_retries +# Deprecated group/name - [DATABASE]/sql_max_retries +#max_retries = 10 + +# Interval between retries of opening a SQL connection. (integer value) +# Deprecated group/name - [DEFAULT]/sql_retry_interval +# Deprecated group/name - [DATABASE]/reconnect_interval +#retry_interval = 10 + +# If set, use this value for max_overflow with SQLAlchemy. (integer value) +# Deprecated group/name - [DEFAULT]/sql_max_overflow +# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow +#max_overflow = 50 + +# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer +# value) +# Minimum value: 0 +# Maximum value: 100 +# Deprecated group/name - [DEFAULT]/sql_connection_debug +#connection_debug = 0 + +# Add Python stack traces to SQL as comment strings. (boolean value) +# Deprecated group/name - [DEFAULT]/sql_connection_trace +#connection_trace = false + +# If set, use this value for pool_timeout with SQLAlchemy. (integer value) +# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout +#pool_timeout = + +# Enable the experimental use of database reconnect on connection lost. +# (boolean value) +#use_db_reconnect = false + +# Seconds between retries of a database transaction. (integer value) +#db_retry_interval = 1 + +# If True, increases the interval between retries of a database operation up to +# db_max_retry_interval. (boolean value) +#db_inc_retry_interval = true + +# If db_inc_retry_interval is set, the maximum seconds between retries of a +# database operation. (integer value) +#db_max_retry_interval = 10 + +# Maximum retries in case of connection error or deadlock error before error is +# raised. Set to -1 to specify an infinite retry count. (integer value) +#db_max_retries = 20 + +# Optional URL parameters to append onto the connection URL at connect time; +# specify as param1=value1¶m2=value2&... (string value) +#connection_parameters = + + +[healthcheck] + +# +# From oslo.middleware.healthcheck +# + +# DEPRECATED: The path to respond to healtcheck requests on. (string value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#path = /healthcheck + +# Show more detailed information as part of the response. Security note: +# Enabling this option may expose sensitive details about the service being +# monitored. Be sure to verify that it will not violate your security policies. +# (boolean value) +#detailed = false + +# Additional backends that can perform health checks and report that +# information back as part of a request. (list value) +#backends = + +# Check the presence of a file to determine if an application is running on a +# port. Used by DisableByFileHealthcheck plugin. (string value) +#disable_by_file_path = + +# Check the presence of a file based on a port to determine if an application +# is running on a port. Expects a "port:path" list of strings. Used by +# DisableByFilesPortsHealthcheck plugin. (list value) +#disable_by_file_paths = + + +[ironic] + +# +# From ironic.auth +# + +# Authentication URL (string value) +#auth_url = + +# Authentication type to load (string value) +# Deprecated group/name - [ironic]/auth_plugin +#auth_type = + +# PEM encoded Certificate Authority to use when verifying HTTPs connections. +# (string value) +#cafile = + +# PEM encoded client certificate cert file (string value) +#certfile = + +# Collect per-API call timing information. (boolean value) +#collect_timing = false + +# Optional domain ID to use with v3 and v2 parameters. It will be used for both +# the user and project domain in v3 and ignored in v2 authentication. (string +# value) +#default_domain_id = + +# Optional domain name to use with v3 API and v2 parameters. It will be used +# for both the user and project domain in v3 and ignored in v2 authentication. +# (string value) +#default_domain_name = + +# Domain ID to scope to (string value) +#domain_id = + +# Domain name to scope to (string value) +#domain_name = + +# Verify HTTPS connections. (boolean value) +#insecure = false + +# PEM encoded client certificate key file (string value) +#keyfile = + +# User's password (string value) +#password = + +# Domain ID containing project (string value) +#project_domain_id = + +# Domain name containing project (string value) +#project_domain_name = + +# Project ID to scope to (string value) +# Deprecated group/name - [ironic]/tenant_id +#project_id = + +# Project name to scope to (string value) +# Deprecated group/name - [ironic]/tenant_name +#project_name = + +# Log requests to multiple loggers. (boolean value) +#split_loggers = false + +# Scope for system operations (string value) +#system_scope = + +# Tenant ID (string value) +#tenant_id = + +# Tenant Name (string value) +#tenant_name = + +# Timeout value for http requests (integer value) +#timeout = + +# Trust ID (string value) +#trust_id = + +# User's domain id (string value) +#user_domain_id = + +# User's domain name (string value) +#user_domain_name = + +# User id (string value) +#user_id = + +# Username (string value) +# Deprecated group/name - [ironic]/user_name +#username = + +# +# From neutron +# + +# Send notification events to ironic. (For example on relevant port status +# changes.) (boolean value) +#enable_notifications = false + + +[keystone_authtoken] +www_authenticate_uri = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }} +auth_url = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }} +auth_type = password +project_domain_name = {{ identity_service.service_domain_name }} +user_domain_name = {{ identity_service.service_domain_name }} +project_name = {{ identity_service.service_project_name }} +username = {{ identity_service.service_user_name }} +password = {{ identity_service.service_password }} + +# +# From keystonemiddleware.auth_token +# + +# Complete "public" Identity API endpoint. This endpoint should not be an +# "admin" endpoint, as it should be accessible by all end users. +# Unauthenticated clients are redirected to this endpoint to authenticate. +# Although this endpoint should ideally be unversioned, client support in the +# wild varies. If you're using a versioned v2 endpoint here, then this should +# *not* be the same endpoint the service user utilizes for validating tokens, +# because normal end users may not be able to reach that endpoint. (string +# value) +# Deprecated group/name - [keystone_authtoken]/auth_uri +#www_authenticate_uri = + +# DEPRECATED: Complete "public" Identity API endpoint. This endpoint should not +# be an "admin" endpoint, as it should be accessible by all end users. +# Unauthenticated clients are redirected to this endpoint to authenticate. +# Although this endpoint should ideally be unversioned, client support in the +# wild varies. If you're using a versioned v2 endpoint here, then this should +# *not* be the same endpoint the service user utilizes for validating tokens, +# because normal end users may not be able to reach that endpoint. This option +# is deprecated in favor of www_authenticate_uri and will be removed in the S +# release. (string value) +# This option is deprecated for removal since Queens. +# Its value may be silently ignored in the future. +# Reason: The auth_uri option is deprecated in favor of www_authenticate_uri +# and will be removed in the S release. +#auth_uri = + +# API version of the Identity API endpoint. (string value) +#auth_version = + +# Interface to use for the Identity API endpoint. Valid values are "public", +# "internal" (default) or "admin". (string value) +#interface = internal + +# Do not handle authorization requests within the middleware, but delegate the +# authorization decision to downstream WSGI components. (boolean value) +#delay_auth_decision = false + +# Request timeout value for communicating with Identity API server. (integer +# value) +#http_connect_timeout = + +# How many times are we trying to reconnect when communicating with Identity +# API Server. (integer value) +#http_request_max_retries = 3 + +# Request environment key where the Swift cache object is stored. When +# auth_token middleware is deployed with a Swift cache, use this option to have +# the middleware share a caching backend with swift. Otherwise, use the +# ``memcached_servers`` option instead. (string value) +#cache = + +# Required if identity server requires client certificate (string value) +#certfile = + +# Required if identity server requires client certificate (string value) +#keyfile = + +# A PEM encoded Certificate Authority to use when verifying HTTPs connections. +# Defaults to system CAs. (string value) +#cafile = + +# Verify HTTPS connections. (boolean value) +#insecure = false + +# The region in which the identity server can be found. (string value) +#region_name = + +# Optionally specify a list of memcached server(s) to use for caching. If left +# undefined, tokens will instead be cached in-process. (list value) +# Deprecated group/name - [keystone_authtoken]/memcache_servers +#memcached_servers = + +# In order to prevent excessive effort spent validating tokens, the middleware +# caches previously-seen tokens for a configurable duration (in seconds). Set +# to -1 to disable caching completely. (integer value) +#token_cache_time = 300 + +# (Optional) If defined, indicate whether token data should be authenticated or +# authenticated and encrypted. If MAC, token data is authenticated (with HMAC) +# in the cache. If ENCRYPT, token data is encrypted and authenticated in the +# cache. If the value is not one of these options or empty, auth_token will +# raise an exception on initialization. (string value) +# Possible values: +# None - +# MAC - +# ENCRYPT - +#memcache_security_strategy = None + +# (Optional, mandatory if memcache_security_strategy is defined) This string is +# used for key derivation. (string value) +#memcache_secret_key = + +# (Optional) Number of seconds memcached server is considered dead before it is +# tried again. (integer value) +#memcache_pool_dead_retry = 300 + +# (Optional) Maximum total number of open connections to every memcached +# server. (integer value) +#memcache_pool_maxsize = 10 + +# (Optional) Socket timeout in seconds for communicating with a memcached +# server. (integer value) +#memcache_pool_socket_timeout = 3 + +# (Optional) Number of seconds a connection to memcached is held unused in the +# pool before it is closed. (integer value) +#memcache_pool_unused_timeout = 60 + +# (Optional) Number of seconds that an operation will wait to get a memcached +# client connection from the pool. (integer value) +#memcache_pool_conn_get_timeout = 10 + +# (Optional) Use the advanced (eventlet safe) memcached client pool. The +# advanced pool will only work under python 2.x. (boolean value) +#memcache_use_advanced_pool = false + +# (Optional) Indicate whether to set the X-Service-Catalog header. If False, +# middleware will not ask for service catalog on token validation and will not +# set the X-Service-Catalog header. (boolean value) +#include_service_catalog = true + +# Used to control the use and type of token binding. Can be set to: "disabled" +# to not check token binding. "permissive" (default) to validate binding +# information if the bind type is of a form known to the server and ignore it +# if not. "strict" like "permissive" but if the bind type is unknown the token +# will be rejected. "required" any form of token binding is needed to be +# allowed. Finally the name of a binding method that must be present in tokens. +# (string value) +#enforce_token_bind = permissive + +# A choice of roles that must be present in a service token. Service tokens are +# allowed to request that an expired token can be used and so this check should +# tightly control that only actual services should be sending this token. Roles +# here are applied as an ANY check so any role in this list must be present. +# For backwards compatibility reasons this currently only affects the +# allow_expired check. (list value) +#service_token_roles = service + +# For backwards compatibility reasons we must let valid service tokens pass +# that don't pass the service_token_roles check as valid. Setting this true +# will become the default in a future release and should be enabled if +# possible. (boolean value) +#service_token_roles_required = false + +# The name or type of the service as it appears in the service catalog. This is +# used to validate tokens that have restricted access rules. (string value) +#service_type = + +# Authentication type to load (string value) +# Deprecated group/name - [keystone_authtoken]/auth_plugin +#auth_type = + +# Config Section from which to load plugin specific options (string value) +#auth_section = + + +[nova] + +# +# From neutron +# + +# Name of nova region to use. Useful if keystone manages more than one region. +# (string value) +#region_name = + +# Type of the nova endpoint to use. This endpoint will be looked up in the +# keystone catalog and should be one of public, internal or admin. (string +# value) +# Possible values: +# public - +# admin - +# internal - +#endpoint_type = public + +# DEPRECATED: When this option is enabled, during the live migration, the OVS +# agent will only send the "vif-plugged-event" when the destination host +# interface is bound. This option also disables any other agent (like DHCP) to +# send to Nova this event when the port is provisioned.This option can be +# enabled if Nova patch https://review.opendev.org/c/openstack/nova/+/767368 is +# in place.This option is temporary and will be removed in Y and the behavior +# will be "True". (boolean value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: In Y the Nova patch +# https://review.opendev.org/c/openstack/nova/+/767368 will be in the code even +# when running a Nova server in X. +#live_migration_events = false + +# +# From nova.auth +# + +# Authentication URL (string value) +#auth_url = + +# Authentication type to load (string value) +# Deprecated group/name - [nova]/auth_plugin +#auth_type = + +# PEM encoded Certificate Authority to use when verifying HTTPs connections. +# (string value) +#cafile = + +# PEM encoded client certificate cert file (string value) +#certfile = + +# Collect per-API call timing information. (boolean value) +#collect_timing = false + +# Optional domain ID to use with v3 and v2 parameters. It will be used for both +# the user and project domain in v3 and ignored in v2 authentication. (string +# value) +#default_domain_id = + +# Optional domain name to use with v3 API and v2 parameters. It will be used +# for both the user and project domain in v3 and ignored in v2 authentication. +# (string value) +#default_domain_name = + +# Domain ID to scope to (string value) +#domain_id = + +# Domain name to scope to (string value) +#domain_name = + +# Verify HTTPS connections. (boolean value) +#insecure = false + +# PEM encoded client certificate key file (string value) +#keyfile = + +# User's password (string value) +#password = + +# Domain ID containing project (string value) +#project_domain_id = + +# Domain name containing project (string value) +#project_domain_name = + +# Project ID to scope to (string value) +# Deprecated group/name - [nova]/tenant_id +#project_id = + +# Project name to scope to (string value) +# Deprecated group/name - [nova]/tenant_name +#project_name = + +# Log requests to multiple loggers. (boolean value) +#split_loggers = false + +# Scope for system operations (string value) +#system_scope = + +# Tenant ID (string value) +#tenant_id = + +# Tenant Name (string value) +#tenant_name = + +# Timeout value for http requests (integer value) +#timeout = + +# Trust ID (string value) +#trust_id = + +# User's domain id (string value) +#user_domain_id = + +# User's domain name (string value) +#user_domain_name = + +# User id (string value) +#user_id = + +# Username (string value) +# Deprecated group/name - [nova]/user_name +#username = + + +[oslo_concurrency] + +# +# From oslo.concurrency +# + +# Enables or disables inter-process locks. (boolean value) +#disable_process_locking = false + +# Directory to use for lock files. For security, the specified directory +# should only be writable by the user running the processes that need locking. +# Defaults to environment variable OSLO_LOCK_PATH. If external locks are used, +# a lock path must be set. (string value) +#lock_path = + + +[oslo_messaging_amqp] + +# +# From oslo.messaging +# + +# Name for the AMQP container. must be globally unique. Defaults to a generated +# UUID (string value) +#container_name = + +# Timeout for inactive connections (in seconds) (integer value) +#idle_timeout = 0 + +# Debug: dump AMQP frames to stdout (boolean value) +#trace = false + +# Attempt to connect via SSL. If no other ssl-related parameters are given, it +# will use the system's CA-bundle to verify the server's certificate. (boolean +# value) +#ssl = false + +# CA certificate PEM file used to verify the server's certificate (string +# value) +#ssl_ca_file = + +# Self-identifying certificate PEM file for client authentication (string +# value) +#ssl_cert_file = + +# Private key PEM file used to sign ssl_cert_file certificate (optional) +# (string value) +#ssl_key_file = + +# Password for decrypting ssl_key_file (if encrypted) (string value) +#ssl_key_password = + +# By default SSL checks that the name in the server's certificate matches the +# hostname in the transport_url. In some configurations it may be preferable to +# use the virtual hostname instead, for example if the server uses the Server +# Name Indication TLS extension (rfc6066) to provide a certificate per virtual +# host. Set ssl_verify_vhost to True if the server's SSL certificate uses the +# virtual host name instead of the DNS name. (boolean value) +#ssl_verify_vhost = false + +# Space separated list of acceptable SASL mechanisms (string value) +#sasl_mechanisms = + +# Path to directory that contains the SASL configuration (string value) +#sasl_config_dir = + +# Name of configuration file (without .conf suffix) (string value) +#sasl_config_name = + +# SASL realm to use if no realm present in username (string value) +#sasl_default_realm = + +# Seconds to pause before attempting to re-connect. (integer value) +# Minimum value: 1 +#connection_retry_interval = 1 + +# Increase the connection_retry_interval by this many seconds after each +# unsuccessful failover attempt. (integer value) +# Minimum value: 0 +#connection_retry_backoff = 2 + +# Maximum limit for connection_retry_interval + connection_retry_backoff +# (integer value) +# Minimum value: 1 +#connection_retry_interval_max = 30 + +# Time to pause between re-connecting an AMQP 1.0 link that failed due to a +# recoverable error. (integer value) +# Minimum value: 1 +#link_retry_delay = 10 + +# The maximum number of attempts to re-send a reply message which failed due to +# a recoverable error. (integer value) +# Minimum value: -1 +#default_reply_retry = 0 + +# The deadline for an rpc reply message delivery. (integer value) +# Minimum value: 5 +#default_reply_timeout = 30 + +# The deadline for an rpc cast or call message delivery. Only used when caller +# does not provide a timeout expiry. (integer value) +# Minimum value: 5 +#default_send_timeout = 30 + +# The deadline for a sent notification message delivery. Only used when caller +# does not provide a timeout expiry. (integer value) +# Minimum value: 5 +#default_notify_timeout = 30 + +# The duration to schedule a purge of idle sender links. Detach link after +# expiry. (integer value) +# Minimum value: 1 +#default_sender_link_timeout = 600 + +# Indicates the addressing mode used by the driver. +# Permitted values: +# 'legacy' - use legacy non-routable addressing +# 'routable' - use routable addresses +# 'dynamic' - use legacy addresses if the message bus does not support routing +# otherwise use routable addressing (string value) +#addressing_mode = dynamic + +# Enable virtual host support for those message buses that do not natively +# support virtual hosting (such as qpidd). When set to true the virtual host +# name will be added to all message bus addresses, effectively creating a +# private 'subnet' per virtual host. Set to False if the message bus supports +# virtual hosting using the 'hostname' field in the AMQP 1.0 Open performative +# as the name of the virtual host. (boolean value) +#pseudo_vhost = true + +# address prefix used when sending to a specific server (string value) +#server_request_prefix = exclusive + +# address prefix used when broadcasting to all servers (string value) +#broadcast_prefix = broadcast + +# address prefix when sending to any server in group (string value) +#group_request_prefix = unicast + +# Address prefix for all generated RPC addresses (string value) +#rpc_address_prefix = openstack.org/om/rpc + +# Address prefix for all generated Notification addresses (string value) +#notify_address_prefix = openstack.org/om/notify + +# Appended to the address prefix when sending a fanout message. Used by the +# message bus to identify fanout messages. (string value) +#multicast_address = multicast + +# Appended to the address prefix when sending to a particular RPC/Notification +# server. Used by the message bus to identify messages sent to a single +# destination. (string value) +#unicast_address = unicast + +# Appended to the address prefix when sending to a group of consumers. Used by +# the message bus to identify messages that should be delivered in a round- +# robin fashion across consumers. (string value) +#anycast_address = anycast + +# Exchange name used in notification addresses. +# Exchange name resolution precedence: +# Target.exchange if set +# else default_notification_exchange if set +# else control_exchange if set +# else 'notify' (string value) +#default_notification_exchange = + +# Exchange name used in RPC addresses. +# Exchange name resolution precedence: +# Target.exchange if set +# else default_rpc_exchange if set +# else control_exchange if set +# else 'rpc' (string value) +#default_rpc_exchange = + +# Window size for incoming RPC Reply messages. (integer value) +# Minimum value: 1 +#reply_link_credit = 200 + +# Window size for incoming RPC Request messages (integer value) +# Minimum value: 1 +#rpc_server_credit = 100 + +# Window size for incoming Notification messages (integer value) +# Minimum value: 1 +#notify_server_credit = 100 + +# Send messages of this type pre-settled. +# Pre-settled messages will not receive acknowledgement +# from the peer. Note well: pre-settled messages may be +# silently discarded if the delivery fails. +# Permitted values: +# 'rpc-call' - send RPC Calls pre-settled +# 'rpc-reply'- send RPC Replies pre-settled +# 'rpc-cast' - Send RPC Casts pre-settled +# 'notify' - Send Notifications pre-settled +# (multi valued) +#pre_settled = rpc-cast +#pre_settled = rpc-reply + + +[oslo_messaging_kafka] + +# +# From oslo.messaging +# + +# Max fetch bytes of Kafka consumer (integer value) +#kafka_max_fetch_bytes = 1048576 + +# Default timeout(s) for Kafka consumers (floating point value) +#kafka_consumer_timeout = 1.0 + +# DEPRECATED: Pool Size for Kafka Consumers (integer value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Driver no longer uses connection pool. +#pool_size = 10 + +# DEPRECATED: The pool size limit for connections expiration policy (integer +# value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Driver no longer uses connection pool. +#conn_pool_min_size = 2 + +# DEPRECATED: The time-to-live in sec of idle connections in the pool (integer +# value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Driver no longer uses connection pool. +#conn_pool_ttl = 1200 + +# Group id for Kafka consumer. Consumers in one group will coordinate message +# consumption (string value) +#consumer_group = oslo_messaging_consumer + +# Upper bound on the delay for KafkaProducer batching in seconds (floating +# point value) +#producer_batch_timeout = 0.0 + +# Size of batch for the producer async send (integer value) +#producer_batch_size = 16384 + +# The compression codec for all data generated by the producer. If not set, +# compression will not be used. Note that the allowed values of this depend on +# the kafka version (string value) +# Possible values: +# none - +# gzip - +# snappy - +# lz4 - +# zstd - +#compression_codec = none + +# Enable asynchronous consumer commits (boolean value) +#enable_auto_commit = false + +# The maximum number of records returned in a poll call (integer value) +#max_poll_records = 500 + +# Protocol used to communicate with brokers (string value) +# Possible values: +# PLAINTEXT - +# SASL_PLAINTEXT - +# SSL - +# SASL_SSL - +#security_protocol = PLAINTEXT + +# Mechanism when security protocol is SASL (string value) +#sasl_mechanism = PLAIN + +# CA certificate PEM file used to verify the server certificate (string value) +#ssl_cafile = + +# Client certificate PEM file used for authentication. (string value) +#ssl_client_cert_file = + +# Client key PEM file used for authentication. (string value) +#ssl_client_key_file = + +# Client key password file used for authentication. (string value) +#ssl_client_key_password = + + +[oslo_messaging_notifications] + +# +# From oslo.messaging +# + +# The Drivers(s) to handle sending notifications. Possible values are +# messaging, messagingv2, routing, log, test, noop (multi valued) +# Deprecated group/name - [DEFAULT]/notification_driver +#driver = + +# A URL representing the messaging driver to use for notifications. If not set, +# we fall back to the same configuration used for RPC. (string value) +# Deprecated group/name - [DEFAULT]/notification_transport_url +#transport_url = + +# AMQP topic used for OpenStack notifications. (list value) +# Deprecated group/name - [rpc_notifier2]/topics +# Deprecated group/name - [DEFAULT]/notification_topics +#topics = notifications + +# The maximum number of attempts to re-send a notification message which failed +# to be delivered due to a recoverable error. 0 - No retry, -1 - indefinite +# (integer value) +#retry = -1 + + +[oslo_messaging_rabbit] + +# +# From oslo.messaging +# + +# Use durable queues in AMQP. (boolean value) +#amqp_durable_queues = false + +# Auto-delete queues in AMQP. (boolean value) +#amqp_auto_delete = false + +# Connect over SSL. (boolean value) +# Deprecated group/name - [oslo_messaging_rabbit]/rabbit_use_ssl +#ssl = false + +# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and +# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some +# distributions. (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_version +#ssl_version = + +# SSL key file (valid only if SSL enabled). (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_keyfile +#ssl_key_file = + +# SSL cert file (valid only if SSL enabled). (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_certfile +#ssl_cert_file = + +# SSL certification authority file (valid only if SSL enabled). (string value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_ca_certs +#ssl_ca_file = + +# DEPRECATED: Run the health check heartbeat thread through a native python +# thread by default. If this option is equal to False then the health check +# heartbeat will inherit the execution model from the parent process. For +# example if the parent process has monkey patched the stdlib by using +# eventlet/greenlet then the heartbeat will be run through a green thread. +# (boolean value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +#heartbeat_in_pthread = true + +# How long to wait before reconnecting in response to an AMQP consumer cancel +# notification. (floating point value) +#kombu_reconnect_delay = 1.0 + +# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not +# be used. This option may not be available in future versions. (string value) +#kombu_compression = + +# How long to wait a missing client before abandoning to send it its replies. +# This value should not be longer than rpc_response_timeout. (integer value) +# Deprecated group/name - [oslo_messaging_rabbit]/kombu_reconnect_timeout +#kombu_missing_consumer_retry_timeout = 60 + +# Determines how the next RabbitMQ node is chosen in case the one we are +# currently connected to becomes unavailable. Takes effect only if more than +# one RabbitMQ node is provided in config. (string value) +# Possible values: +# round-robin - +# shuffle - +#kombu_failover_strategy = round-robin + +# The RabbitMQ login method. (string value) +# Possible values: +# PLAIN - +# AMQPLAIN - +# RABBIT-CR-DEMO - +#rabbit_login_method = AMQPLAIN + +# How frequently to retry connecting with RabbitMQ. (integer value) +#rabbit_retry_interval = 1 + +# How long to backoff for between retries when connecting to RabbitMQ. (integer +# value) +#rabbit_retry_backoff = 2 + +# Maximum interval of RabbitMQ connection retries. Default is 30 seconds. +# (integer value) +#rabbit_interval_max = 30 + +# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this +# option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring +# is no longer controlled by the x-ha-policy argument when declaring a queue. +# If you just want to make sure that all queues (except those with auto- +# generated names) are mirrored across all nodes, run: "rabbitmqctl set_policy +# HA '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value) +#rabbit_ha_queues = false + +# Positive integer representing duration in seconds for queue TTL (x-expires). +# Queues which are unused for the duration of the TTL are automatically +# deleted. The parameter affects only reply and fanout queues. (integer value) +# Minimum value: 1 +#rabbit_transient_queues_ttl = 1800 + +# Specifies the number of messages to prefetch. Setting to zero allows +# unlimited messages. (integer value) +#rabbit_qos_prefetch_count = 0 + +# Number of seconds after which the Rabbit broker is considered down if +# heartbeat's keep-alive fails (0 disables heartbeat). (integer value) +#heartbeat_timeout_threshold = 60 + +# How often times during the heartbeat_timeout_threshold we check the +# heartbeat. (integer value) +#heartbeat_rate = 2 + +# DEPRECATED: (DEPRECATED) Enable/Disable the RabbitMQ mandatory flag for +# direct send. The direct send is used as reply, so the MessageUndeliverable +# exception is raised in case the client queue does not +# exist.MessageUndeliverable exception will be used to loop for a timeout to +# lets a chance to sender to recover.This flag is deprecated and it will not be +# possible to deactivate this functionality anymore (boolean value) +# This option is deprecated for removal. +# Its value may be silently ignored in the future. +# Reason: Mandatory flag no longer deactivable. +#direct_mandatory_flag = true + +# Enable x-cancel-on-ha-failover flag so that rabbitmq server will cancel and +# notify consumerswhen queue is down (boolean value) +#enable_cancel_on_failover = false + + +[oslo_middleware] + +# +# From oslo.middleware.http_proxy_to_wsgi +# + +# Whether the application is behind a proxy or not. This determines if the +# middleware should parse the headers or not. (boolean value) +#enable_proxy_headers_parsing = false + + +[oslo_policy] + +# +# From oslo.policy +# + +# This option controls whether or not to enforce scope when evaluating +# policies. If ``True``, the scope of the token used in the request is compared +# to the ``scope_types`` of the policy being enforced. If the scopes do not +# match, an ``InvalidScope`` exception will be raised. If ``False``, a message +# will be logged informing operators that policies are being invoked with +# mismatching scope. (boolean value) +#enforce_scope = false + +# This option controls whether or not to use old deprecated defaults when +# evaluating policies. If ``True``, the old deprecated defaults are not going +# to be evaluated. This means if any existing token is allowed for old defaults +# but is disallowed for new defaults, it will be disallowed. It is encouraged +# to enable this flag along with the ``enforce_scope`` flag so that you can get +# the benefits of new defaults and ``scope_type`` together (boolean value) +#enforce_new_defaults = false + +# The relative or absolute path of a file that maps roles to permissions for a +# given service. Relative paths must be specified in relation to the +# configuration file setting this option. (string value) +#policy_file = policy.yaml + +# Default rule. Enforced when a requested rule is not found. (string value) +#policy_default_rule = default + +# Directories where policy configuration files are stored. They can be relative +# to any directory in the search path defined by the config_dir option, or +# absolute paths. The file defined by policy_file must exist for these +# directories to be searched. Missing or empty directories are ignored. (multi +# valued) +#policy_dirs = policy.d + +# Content Type to send and receive data for REST based policy check (string +# value) +# Possible values: +# application/x-www-form-urlencoded - +# application/json - +#remote_content_type = application/x-www-form-urlencoded + +# server identity verification for REST based policy check (boolean value) +#remote_ssl_verify_server_crt = false + +# Absolute path to ca cert file for REST based policy check (string value) +#remote_ssl_ca_crt_file = + +# Absolute path to client cert for REST based policy check (string value) +#remote_ssl_client_crt_file = + +# Absolute path client key file REST based policy check (string value) +#remote_ssl_client_key_file = + + +[oslo_reports] + +# +# From oslo.reports +# + +# Path to a log directory where to create a file (string value) +#log_dir = + +# The path to a file to watch for changes to trigger the reports, instead of +# signals. Setting this option disables the signal trigger for the reports. If +# application is running as a WSGI application it is recommended to use this +# instead of signals. (string value) +#file_event_handler = + +# How many seconds to wait between polls when file_event_handler is set +# (integer value) +#file_event_handler_interval = 1 + + +[privsep] +# Configuration options for the oslo.privsep daemon. Note that this group name +# can be changed by the consuming service. Check the service's docs to see if +# this is the case. + +# +# From oslo.privsep +# + +# User that the privsep daemon should run as. (string value) +#user = + +# Group that the privsep daemon should run as. (string value) +#group = + +# List of Linux capabilities retained by the privsep daemon. (list value) +#capabilities = + +# The number of threads available for privsep to concurrently run processes. +# Defaults to the number of CPU cores in the system. (integer value) +# Minimum value: 1 +# +# This option has a sample default set, which means that +# its actual default value may vary from the one documented +# below. +#thread_pool_size = multiprocessing.cpu_count() + +# Command to invoke to start the privsep daemon if not using the "fork" method. +# If not specified, a default is generated using "sudo privsep-helper" and +# arguments designed to recreate the current configuration. This command must +# accept suitable --privsep_context and --privsep_sock_path arguments. (string +# value) +#helper_command = + + +[quotas] + +# +# From neutron +# + +# Default number of resource allowed per tenant. A negative value means +# unlimited. (integer value) +#default_quota = -1 + +# Number of networks allowed per tenant. A negative value means unlimited. +# (integer value) +#quota_network = 100 + +# Number of subnets allowed per tenant, A negative value means unlimited. +# (integer value) +#quota_subnet = 100 + +# Number of ports allowed per tenant. A negative value means unlimited. +# (integer value) +#quota_port = 500 + +# Default driver to use for quota checks. (string value) +#quota_driver = neutron.db.quota.driver.DbQuotaDriver + +# Keep in track in the database of current resource quota usage. Plugins which +# do not leverage the neutron database should set this flag to False. (boolean +# value) +#track_quota_usage = true + +# +# From neutron.extensions +# + +# Number of routers allowed per tenant. A negative value means unlimited. +# (integer value) +#quota_router = 10 + +# Number of floating IPs allowed per tenant. A negative value means unlimited. +# (integer value) +#quota_floatingip = 50 + +# Number of security groups allowed per tenant. A negative value means +# unlimited. (integer value) +#quota_security_group = 10 + +# Number of security rules allowed per tenant. A negative value means +# unlimited. (integer value) +#quota_security_group_rule = 100 + + +[ssl] + +# +# From oslo.service.sslutils +# + +# CA certificate file to use to verify connecting clients. (string value) +# Deprecated group/name - [DEFAULT]/ssl_ca_file +#ca_file = + +# Certificate file to use when starting the server securely. (string value) +# Deprecated group/name - [DEFAULT]/ssl_cert_file +#cert_file = + +# Private key file to use when starting the server securely. (string value) +# Deprecated group/name - [DEFAULT]/ssl_key_file +#key_file = + +# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and +# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some +# distributions. (string value) +#version = + +# Sets the list of available ciphers. value should be a string in the OpenSSL +# cipher list format. (string value) +#ciphers = diff --git a/charms/neutron-k8s/src/templates/parts/section-database b/charms/neutron-k8s/src/templates/parts/section-database new file mode 100644 index 00000000..e9062346 --- /dev/null +++ b/charms/neutron-k8s/src/templates/parts/section-database @@ -0,0 +1,7 @@ +[database] +{% if shared_db.database_host -%} +connection = {{ shared_db.database_type }}://{{ shared_db.database_user }}:{{ shared_db.database_password }}@{{ shared_db.database_host }}/{{ shared_db.database }}{% if shared_db.database_ssl_ca %}?ssl_ca={{ shared_db.database_ssl_ca }}{% if shared_db.database_ssl_cert %}&ssl_cert={{ shared_db.database_ssl_cert }}&ssl_key={{ shared_db.database_ssl_key }}{% endif %}{% endif %} +{% else -%} +connection = sqlite:////var/lib/cinder/cinder.db +{% endif -%} +connection_recycle_time = 200 diff --git a/charms/neutron-k8s/src/templates/parts/section-federation b/charms/neutron-k8s/src/templates/parts/section-federation new file mode 100644 index 00000000..65ee99ed --- /dev/null +++ b/charms/neutron-k8s/src/templates/parts/section-federation @@ -0,0 +1,10 @@ +{% if trusted_dashboards %} +[federation] +{% for dashboard_url in trusted_dashboards -%} +trusted_dashboard = {{ dashboard_url }} +{% endfor -%} +{% endif %} +{% for sp in fid_sps -%} +[{{ sp['protocol-name'] }}] +remote_id_attribute = {{ sp['remote-id-attribute'] }} +{% endfor -%} diff --git a/charms/neutron-k8s/src/templates/parts/section-identity b/charms/neutron-k8s/src/templates/parts/section-identity new file mode 100644 index 00000000..cda360f7 --- /dev/null +++ b/charms/neutron-k8s/src/templates/parts/section-identity @@ -0,0 +1,11 @@ +[keystone_authtoken] +{% if identity_service.internal_host -%} +www_authenticate_uri = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }} +auth_url = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }} +auth_type = password +project_domain_name = {{ identity_service.service_domain_name }} +user_domain_name = {{ identity_service.service_domain_name }} +project_name = {{ identity_service.service_project_name }} +username = {{ identity_service.service_user_name }} +password = {{ identity_service.service_password }} +{% endif -%} diff --git a/charms/neutron-k8s/src/templates/parts/section-middleware b/charms/neutron-k8s/src/templates/parts/section-middleware new file mode 100644 index 00000000..e65f1d98 --- /dev/null +++ b/charms/neutron-k8s/src/templates/parts/section-middleware @@ -0,0 +1,6 @@ +{% for section in sections -%} +[{{section}}] +{% for key, value in sections[section].items() -%} +{{ key }} = {{ value }} +{% endfor %} +{%- endfor %} diff --git a/charms/neutron-k8s/src/templates/parts/section-signing b/charms/neutron-k8s/src/templates/parts/section-signing new file mode 100644 index 00000000..cb7d69ae --- /dev/null +++ b/charms/neutron-k8s/src/templates/parts/section-signing @@ -0,0 +1,15 @@ +{% if enable_signing -%} +[signing] +{% if certfile -%} +certfile = {{ certfile }} +{% endif -%} +{% if keyfile -%} +keyfile = {{ keyfile }} +{% endif -%} +{% if ca_certs -%} +ca_certs = {{ ca_certs }} +{% endif -%} +{% if ca_key -%} +ca_key = {{ ca_key }} +{% endif -%} +{% endif -%} \ No newline at end of file diff --git a/charms/neutron-k8s/src/templates/wsgi-neutron-api.conf.j2 b/charms/neutron-k8s/src/templates/wsgi-neutron-api.conf.j2 new file mode 100644 index 00000000..981809a9 --- /dev/null +++ b/charms/neutron-k8s/src/templates/wsgi-neutron-api.conf.j2 @@ -0,0 +1,24 @@ +Listen {{ wsgi_config.public_port }} + + WSGIDaemonProcess glance processes=3 threads=1 user={{ wsgi_config.user }} group={{ wsgi_config.group }} \ + display-name=%{GROUP} + WSGIProcessGroup glance + WSGIScriptAlias / {{ wsgi_config.wsgi_public_script }} + WSGIApplicationGroup %{GLOBAL} + WSGIPassAuthorization On + = 2.4> + ErrorLogFormat "%{cu}t %M" + + ErrorLog {{ wsgi_config.error_log }} + CustomLog {{ wsgi_config.custom_log }} combined + + + = 2.4> + Require all granted + + + Order allow,deny + Allow from all + + + diff --git a/charms/neutron-k8s/test-requirements.txt b/charms/neutron-k8s/test-requirements.txt new file mode 100644 index 00000000..8057d2c6 --- /dev/null +++ b/charms/neutron-k8s/test-requirements.txt @@ -0,0 +1,17 @@ +# This file is managed centrally. If you find the need to modify this as a +# one-off, please don't. Intead, consult #openstack-charms and ask about +# requirements management in charms via bot-control. Thank you. +charm-tools>=2.4.4 +coverage>=3.6 +mock>=1.2 +flake8>=2.2.4,<=2.4.1 +pyflakes==2.1.1 +stestr>=2.2.0 +requests>=2.18.4 +psutil +# oslo.i18n dropped py35 support +oslo.i18n<4.0.0 +git+https://github.com/openstack-charmers/zaza.git#egg=zaza +git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack +pytz # workaround for 14.04 pip/tox +pyudev # for ceph-* charm unit tests (not mocked?) diff --git a/charms/neutron-k8s/tests/__init__.py b/charms/neutron-k8s/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/charms/neutron-k8s/tests/test_charm.py b/charms/neutron-k8s/tests/test_charm.py new file mode 100644 index 00000000..d0262094 --- /dev/null +++ b/charms/neutron-k8s/tests/test_charm.py @@ -0,0 +1,66 @@ +# Copyright 2022 liam +# See LICENSE file for licensing details. +# +# Learn more about testing at: https://juju.is/docs/sdk/testing + +import unittest +from unittest.mock import Mock + +from charm import SunbeamNeutronOperatorCharm +from ops.model import ActiveStatus +from ops.testing import Harness + + +class TestCharm(unittest.TestCase): + def setUp(self): + self.harness = Harness(SunbeamNeutronOperatorCharm) + self.addCleanup(self.harness.cleanup) + self.harness.begin() + + def test_config_changed(self): + self.assertEqual(list(self.harness.charm._stored.things), []) + self.harness.update_config({"thing": "foo"}) + self.assertEqual(list(self.harness.charm._stored.things), ["foo"]) + + def test_action(self): + # the harness doesn't (yet!) help much with actions themselves + action_event = Mock(params={"fail": ""}) + self.harness.charm._on_fortune_action(action_event) + + self.assertTrue(action_event.set_results.called) + + def test_action_fail(self): + action_event = Mock(params={"fail": "fail this"}) + self.harness.charm._on_fortune_action(action_event) + + self.assertEqual(action_event.fail.call_args, [("fail this",)]) + + def test_httpbin_pebble_ready(self): + # Check the initial Pebble plan is empty + initial_plan = self.harness.get_container_pebble_plan("httpbin") + self.assertEqual(initial_plan.to_yaml(), "{}\n") + # Expected plan after Pebble ready with default config + expected_plan = { + "services": { + "httpbin": { + "override": "replace", + "summary": "httpbin", + "command": "gunicorn -b 0.0.0.0:80 httpbin:app -k gevent", + "startup": "enabled", + "environment": {"thing": "🎁"}, + } + }, + } + # Get the httpbin container from the model + container = self.harness.model.unit.get_container("httpbin") + # Emit the PebbleReadyEvent carrying the httpbin container + self.harness.charm.on.httpbin_pebble_ready.emit(container) + # Get the plan now we've run PebbleReady + updated_plan = self.harness.get_container_pebble_plan("httpbin").to_dict() + # Check we've got the plan we expected + self.assertEqual(expected_plan, updated_plan) + # Check the service was started + service = self.harness.model.unit.get_container("httpbin").get_service("httpbin") + self.assertTrue(service.is_running()) + # Ensure we set an ActiveStatus with no message + self.assertEqual(self.harness.model.unit.status, ActiveStatus()) diff --git a/charms/neutron-k8s/tox.ini b/charms/neutron-k8s/tox.ini new file mode 100644 index 00000000..31301b80 --- /dev/null +++ b/charms/neutron-k8s/tox.ini @@ -0,0 +1,134 @@ +# Operator charm (with zaza): tox.ini + +[tox] +envlist = pep8,py3 +skipsdist = True +# NOTE: Avoid build/test env pollution by not enabling sitepackages. +sitepackages = False +# NOTE: Avoid false positives by not skipping missing interpreters. +skip_missing_interpreters = False +# NOTES: +# * We avoid the new dependency resolver by pinning pip < 20.3, see +# https://github.com/pypa/pip/issues/9187 +# * Pinning dependencies requires tox >= 3.2.0, see +# https://tox.readthedocs.io/en/latest/config.html#conf-requires +# * It is also necessary to pin virtualenv as a newer virtualenv would still +# lead to fetching the latest pip in the func* tox targets, see +# https://stackoverflow.com/a/38133283 +requires = pip < 20.3 + virtualenv < 20.0 +# NOTE: https://wiki.canonical.com/engineering/OpenStack/InstallLatestToxOnOsci +minversion = 3.2.0 + +[testenv] +setenv = VIRTUAL_ENV={envdir} + PYTHONHASHSEED=0 + CHARM_DIR={envdir} +install_command = + pip install {opts} {packages} +commands = stestr run --slowest {posargs} +whitelist_externals = + git + add-to-archive.py + bash + charmcraft +passenv = HOME TERM CS_* OS_* TEST_* +deps = -r{toxinidir}/test-requirements.txt + +[testenv:py35] +basepython = python3.5 +# python3.5 is irrelevant on a focal+ charm. +commands = /bin/true + +[testenv:py36] +basepython = python3.6 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py37] +basepython = python3.7 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py38] +basepython = python3.8 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:py3] +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt + +[testenv:pep8] +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +commands = flake8 {posargs} src unit_tests tests + +[testenv:cover] +# Technique based heavily upon +# https://github.com/openstack/nova/blob/master/tox.ini +basepython = python3 +deps = -r{toxinidir}/requirements.txt + -r{toxinidir}/test-requirements.txt +setenv = + {[testenv]setenv} + PYTHON=coverage run +commands = + coverage erase + stestr run --slowest {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + coverage report + +[coverage:run] +branch = True +concurrency = multiprocessing +parallel = True +source = + . +omit = + .tox/* + */charmhelpers/* + unit_tests/* + +[testenv:venv] +basepython = python3 +commands = {posargs} + +[testenv:build] +basepython = python3 +deps = -r{toxinidir}/build-requirements.txt +commands = + charmcraft build + +[testenv:func-noop] +basepython = python3 +commands = + functest-run-suite --help + +[testenv:func] +basepython = python3 +commands = + functest-run-suite --keep-model + +[testenv:func-smoke] +basepython = python3 +commands = + functest-run-suite --keep-model --smoke + +[testenv:func-dev] +basepython = python3 +commands = + functest-run-suite --keep-model --dev + +[testenv:func-target] +basepython = python3 +commands = + functest-run-suite --keep-model --bundle {posargs} + +[flake8] +# Ignore E902 because the unit_tests directory is missing in the built charm. +ignore = E402,E226,E902