Misc fixes and testing

* Add functional tests
* Add service / group user
* Remove observability_libs
* Upgrade keystone identity_service lib
* Use admin url
* Use service token

Change-Id: Id52a92eccead21a9a354dd2f9c26ab8271a60568
This commit is contained in:
Guillaume Boutry 2023-07-24 14:50:06 +02:00
parent 9bc0f8717d
commit e21ab648ee
16 changed files with 149 additions and 408 deletions

View File

@ -0,0 +1,5 @@
[gerrit]
host=review.opendev.org
port=29418
project=openstack/charm-barbican-k8s.git
defaultbranch=main

View File

@ -0,0 +1,11 @@
- project:
templates:
- openstack-python3-charm-yoga-jobs
- openstack-cover-jobs
- microk8s-func-test
vars:
charm_build_name: barbican-k8s
juju_channel: 3.2/stable
juju_classic_mode: false
microk8s_channel: 1.26-strict/stable
microk8s_classic_mode: false

View File

@ -4,5 +4,4 @@ echo "INFO: Fetching libs from charmhub."
charmcraft fetch-lib charms.data_platform_libs.v0.database_requires charmcraft fetch-lib charms.data_platform_libs.v0.database_requires
charmcraft fetch-lib charms.keystone_k8s.v1.identity_service charmcraft fetch-lib charms.keystone_k8s.v1.identity_service
charmcraft fetch-lib charms.rabbitmq_k8s.v0.rabbitmq charmcraft fetch-lib charms.rabbitmq_k8s.v0.rabbitmq
charmcraft fetch-lib charms.observability_libs.v1.kubernetes_service_patch
charmcraft fetch-lib charms.traefik_k8s.v1.ingress charmcraft fetch-lib charms.traefik_k8s.v1.ingress

View File

@ -100,7 +100,7 @@ LIBAPI = 1
# Increment this PATCH version before using `charmcraft publish-lib` or reset # Increment this PATCH version before using `charmcraft publish-lib` or reset
# to 0 if you are raising the major API version # to 0 if you are raising the major API version
LIBPATCH = 0 LIBPATCH = 1
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -349,6 +349,11 @@ class IdentityServiceRequires(Object):
"""Return the public_auth_url.""" """Return the public_auth_url."""
return self.get_remote_app_data('public-auth-url') return self.get_remote_app_data('public-auth-url')
@property
def admin_role(self) -> str:
"""Return the admin_role."""
return self.get_remote_app_data('admin-role')
def register_services(self, service_endpoints: dict, def register_services(self, service_endpoints: dict,
region: str) -> None: region: str) -> None:
"""Request access to the IdentityService server.""" """Request access to the IdentityService server."""
@ -481,7 +486,8 @@ class IdentityServiceProvides(Object):
internal_auth_url: str, internal_auth_url: str,
admin_auth_url: str, admin_auth_url: str,
public_auth_url: str, public_auth_url: str,
service_credentials: str): service_credentials: str,
admin_role: str):
logging.debug("Setting identity_service connection information.") logging.debug("Setting identity_service connection information.")
_identity_service_rel = None _identity_service_rel = None
for relation in self.framework.model.relations[relation_name]: for relation in self.framework.model.relations[relation_name]:
@ -516,3 +522,4 @@ class IdentityServiceProvides(Object):
app_data["admin-auth-url"] = admin_auth_url app_data["admin-auth-url"] = admin_auth_url
app_data["public-auth-url"] = public_auth_url app_data["public-auth-url"] = public_auth_url
app_data["service-credentials"] = service_credentials app_data["service-credentials"] = service_credentials
app_data["admin-role"] = admin_role

View File

@ -1,341 +0,0 @@
# Copyright 2021 Canonical Ltd.
# See LICENSE file for licensing details.
"""# KubernetesServicePatch Library.
This library is designed to enable developers to more simply patch the Kubernetes Service created
by Juju during the deployment of a sidecar charm. When sidecar charms are deployed, Juju creates a
service named after the application in the namespace (named after the Juju model). This service by
default contains a "placeholder" port, which is 65536/TCP.
When modifying the default set of resources managed by Juju, one must consider the lifecycle of the
charm. In this case, any modifications to the default service (created during deployment), will be
overwritten during a charm upgrade.
When initialised, this library binds a handler to the parent charm's `install` and `upgrade_charm`
events which applies the patch to the cluster. This should ensure that the service ports are
correct throughout the charm's life.
The constructor simply takes a reference to the parent charm, and a list of
[`lightkube`](https://github.com/gtsystem/lightkube) ServicePorts that each define a port for the
service. For information regarding the `lightkube` `ServicePort` model, please visit the
`lightkube` [docs](https://gtsystem.github.io/lightkube-models/1.23/models/core_v1/#serviceport).
Optionally, a name of the service (in case service name needs to be patched as well), labels,
selectors, and annotations can be provided as keyword arguments.
## Getting Started
To get started using the library, you just need to fetch the library using `charmcraft`. **Note
that you also need to add `lightkube` and `lightkube-models` to your charm's `requirements.txt`.**
```shell
cd some-charm
charmcraft fetch-lib charms.observability_libs.v1.kubernetes_service_patch
cat << EOF >> requirements.txt
lightkube
lightkube-models
EOF
```
Then, to initialise the library:
For `ClusterIP` services:
```python
# ...
from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch
from lightkube.models.core_v1 import ServicePort
class SomeCharm(CharmBase):
def __init__(self, *args):
# ...
port = ServicePort(443, name=f"{self.app.name}")
self.service_patcher = KubernetesServicePatch(self, [port])
# ...
```
For `LoadBalancer`/`NodePort` services:
```python
# ...
from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch
from lightkube.models.core_v1 import ServicePort
class SomeCharm(CharmBase):
def __init__(self, *args):
# ...
port = ServicePort(443, name=f"{self.app.name}", targetPort=443, nodePort=30666)
self.service_patcher = KubernetesServicePatch(
self, [port], "LoadBalancer"
)
# ...
```
Port protocols can also be specified. Valid protocols are `"TCP"`, `"UDP"`, and `"SCTP"`
```python
# ...
from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch
from lightkube.models.core_v1 import ServicePort
class SomeCharm(CharmBase):
def __init__(self, *args):
# ...
tcp = ServicePort(443, name=f"{self.app.name}-tcp", protocol="TCP")
udp = ServicePort(443, name=f"{self.app.name}-udp", protocol="UDP")
sctp = ServicePort(443, name=f"{self.app.name}-sctp", protocol="SCTP")
self.service_patcher = KubernetesServicePatch(self, [tcp, udp, sctp])
# ...
```
Bound with custom events by providing `refresh_event` argument:
For example, you would like to have a configurable port in your charm and want to apply
service patch every time charm config is changed.
```python
from charms.observability_libs.v1.kubernetes_service_patch import KubernetesServicePatch
from lightkube.models.core_v1 import ServicePort
class SomeCharm(CharmBase):
def __init__(self, *args):
# ...
port = ServicePort(int(self.config["charm-config-port"]), name=f"{self.app.name}")
self.service_patcher = KubernetesServicePatch(
self,
[port],
refresh_event=self.on.config_changed
)
# ...
```
Additionally, you may wish to use mocks in your charm's unit testing to ensure that the library
does not try to make any API calls, or open any files during testing that are unlikely to be
present, and could break your tests. The easiest way to do this is during your test `setUp`:
```python
# ...
@patch("charm.KubernetesServicePatch", lambda x, y: None)
def setUp(self, *unused):
self.harness = Harness(SomeCharm)
# ...
```
"""
import logging
from types import MethodType
from typing import List, Literal, Optional, Union
from lightkube import ApiError, Client
from lightkube.core import exceptions
from lightkube.models.core_v1 import ServicePort, ServiceSpec
from lightkube.models.meta_v1 import ObjectMeta
from lightkube.resources.core_v1 import Service
from lightkube.types import PatchType
from ops.charm import CharmBase
from ops.framework import BoundEvent, Object
logger = logging.getLogger(__name__)
# The unique Charmhub library identifier, never change it
LIBID = "0042f86d0a874435adef581806cddbbb"
# Increment this major API version when introducing breaking changes
LIBAPI = 1
# Increment this PATCH version before using `charmcraft publish-lib` or reset
# to 0 if you are raising the major API version
LIBPATCH = 7
ServiceType = Literal["ClusterIP", "LoadBalancer"]
class KubernetesServicePatch(Object):
"""A utility for patching the Kubernetes service set up by Juju."""
def __init__(
self,
charm: CharmBase,
ports: List[ServicePort],
service_name: Optional[str] = None,
service_type: ServiceType = "ClusterIP",
additional_labels: Optional[dict] = None,
additional_selectors: Optional[dict] = None,
additional_annotations: Optional[dict] = None,
*,
refresh_event: Optional[Union[BoundEvent, List[BoundEvent]]] = None,
):
"""Constructor for KubernetesServicePatch.
Args:
charm: the charm that is instantiating the library.
ports: a list of ServicePorts
service_name: allows setting custom name to the patched service. If none given,
application name will be used.
service_type: desired type of K8s service. Default value is in line with ServiceSpec's
default value.
additional_labels: Labels to be added to the kubernetes service (by default only
"app.kubernetes.io/name" is set to the service name)
additional_selectors: Selectors to be added to the kubernetes service (by default only
"app.kubernetes.io/name" is set to the service name)
additional_annotations: Annotations to be added to the kubernetes service.
refresh_event: an optional bound event or list of bound events which
will be observed to re-apply the patch (e.g. on port change).
The `install` and `upgrade-charm` events would be observed regardless.
"""
super().__init__(charm, "kubernetes-service-patch")
self.charm = charm
self.service_name = service_name if service_name else self._app
self.service = self._service_object(
ports,
service_name,
service_type,
additional_labels,
additional_selectors,
additional_annotations,
)
# Make mypy type checking happy that self._patch is a method
assert isinstance(self._patch, MethodType)
# Ensure this patch is applied during the 'install' and 'upgrade-charm' events
self.framework.observe(charm.on.install, self._patch)
self.framework.observe(charm.on.upgrade_charm, self._patch)
self.framework.observe(charm.on.update_status, self._patch)
# apply user defined events
if refresh_event:
if not isinstance(refresh_event, list):
refresh_event = [refresh_event]
for evt in refresh_event:
self.framework.observe(evt, self._patch)
def _service_object(
self,
ports: List[ServicePort],
service_name: Optional[str] = None,
service_type: ServiceType = "ClusterIP",
additional_labels: Optional[dict] = None,
additional_selectors: Optional[dict] = None,
additional_annotations: Optional[dict] = None,
) -> Service:
"""Creates a valid Service representation.
Args:
ports: a list of ServicePorts
service_name: allows setting custom name to the patched service. If none given,
application name will be used.
service_type: desired type of K8s service. Default value is in line with ServiceSpec's
default value.
additional_labels: Labels to be added to the kubernetes service (by default only
"app.kubernetes.io/name" is set to the service name)
additional_selectors: Selectors to be added to the kubernetes service (by default only
"app.kubernetes.io/name" is set to the service name)
additional_annotations: Annotations to be added to the kubernetes service.
Returns:
Service: A valid representation of a Kubernetes Service with the correct ports.
"""
if not service_name:
service_name = self._app
labels = {"app.kubernetes.io/name": self._app}
if additional_labels:
labels.update(additional_labels)
selector = {"app.kubernetes.io/name": self._app}
if additional_selectors:
selector.update(additional_selectors)
return Service(
apiVersion="v1",
kind="Service",
metadata=ObjectMeta(
namespace=self._namespace,
name=service_name,
labels=labels,
annotations=additional_annotations, # type: ignore[arg-type]
),
spec=ServiceSpec(
selector=selector,
ports=ports,
type=service_type,
),
)
def _patch(self, _) -> None:
"""Patch the Kubernetes service created by Juju to map the correct port.
Raises:
PatchFailed: if patching fails due to lack of permissions, or otherwise.
"""
try:
client = Client()
except exceptions.ConfigError as e:
logger.warning("Error creating k8s client: %s", e)
return
try:
if self._is_patched(client):
return
if self.service_name != self._app:
self._delete_and_create_service(client)
client.patch(Service, self.service_name, self.service, patch_type=PatchType.MERGE)
except ApiError as e:
if e.status.code == 403:
logger.error("Kubernetes service patch failed: `juju trust` this application.")
else:
logger.error("Kubernetes service patch failed: %s", str(e))
else:
logger.info("Kubernetes service '%s' patched successfully", self._app)
def _delete_and_create_service(self, client: Client):
service = client.get(Service, self._app, namespace=self._namespace)
service.metadata.name = self.service_name # type: ignore[attr-defined]
service.metadata.resourceVersion = service.metadata.uid = None # type: ignore[attr-defined] # noqa: E501
client.delete(Service, self._app, namespace=self._namespace)
client.create(service)
def is_patched(self) -> bool:
"""Reports if the service patch has been applied.
Returns:
bool: A boolean indicating if the service patch has been applied.
"""
client = Client()
return self._is_patched(client)
def _is_patched(self, client: Client) -> bool:
# Get the relevant service from the cluster
try:
service = client.get(Service, name=self.service_name, namespace=self._namespace)
except ApiError as e:
if e.status.code == 404 and self.service_name != self._app:
return False
logger.error("Kubernetes service get failed: %s", str(e))
raise
# Construct a list of expected ports, should the patch be applied
expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports]
# Construct a list in the same manner, using the fetched service
fetched_ports = [
(p.port, p.targetPort) for p in service.spec.ports # type: ignore[attr-defined]
] # noqa: E501
return expected_ports == fetched_ports
@property
def _app(self) -> str:
"""Name of the current Juju application.
Returns:
str: A string containing the name of the current Juju application.
"""
return self.charm.app.name
@property
def _namespace(self) -> str:
"""The Kubernetes namespace we're running in.
Returns:
str: A string containing the name of the current Kubernetes namespace.
"""
with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f:
return f.read().strip()

View File

@ -1,11 +1,10 @@
name: charm-barbican-k8s name: barbican-k8s
display-name: Barbican display-name: Barbican
summary: A very short one-line summary of the charm. summary: Openstack Key Manager service
description: | description: |
A single sentence that says what the charm is, concisely and memorably. Barbican is the OpenStack Key Manager service.
A paragraph of one to three short sentences, that describe what the charm does. It provides secure storage, provisioning and management of secret data.
A third paragraph that explains what need the charm meets. This includes keying material such as Symmetric Keys, Asymmetric Keys, Certificates and raw binary data.
Finally, a paragraph that describes whom the charm is useful for.
maintainer: Openstack Charmers <openstack-charmers@lists.ubuntu.com> maintainer: Openstack Charmers <openstack-charmers@lists.ubuntu.com>
source: https://opendev.org/openstack/charm-barbican-k8s source: https://opendev.org/openstack/charm-barbican-k8s
issues: https://bugs.launchpad.net/charm-barbican-k8s issues: https://bugs.launchpad.net/charm-barbican-k8s
@ -44,10 +43,8 @@ resources:
barbican-api-image: barbican-api-image:
type: oci-image type: oci-image
description: OCI image for OpenStack Barbican API description: OCI image for OpenStack Barbican API
# kolla/ubuntu-binary-barbican-api:yoga upstream-source: ghcr.io/openstack-snaps/barbican-consolidated:2023.1
upstream-source: docker.io/kolla/ubuntu-binary-barbican-api@sha256:8428bb4b6289075832286e098febcc2ff9954df97c279cf8f18c798d188c8e3e
barbican-worker-image: barbican-worker-image:
type: oci-image type: oci-image
description: OCI image for OpenStack Barbican worker description: OCI image for OpenStack Barbican worker
# kolla/ubuntu-binary-barbican-worker:yoga upstream-source: ghcr.io/openstack-snaps/barbican-consolidated:2023.1
upstream-source: docker.io/kolla/ubuntu-binary-barbican-worker@sha256:4c9d8252bbb8b99d729b28eb586d9c3c08c3a882b076682bae83a0e01600b5f8

View File

@ -0,0 +1,10 @@
- project:
templates:
- charm-publish-jobs
vars:
needs_charm_build: true
charm_build_name: barbican-k8s
build_type: charmcraft
publish_charm: true
charmcraft_channel: 2.0/stable
publish_channel: 2023.1/edge

13
charms/barbican-k8s/rename.sh Executable file
View File

@ -0,0 +1,13 @@
#!/bin/bash
charm=$(grep "charm_build_name" osci.yaml | awk '{print $2}')
echo "renaming ${charm}_*.charm to ${charm}.charm"
echo -n "pwd: "
pwd
ls -al
echo "Removing bad downloaded charm maybe?"
if [[ -e "${charm}.charm" ]];
then
rm "${charm}.charm"
fi
echo "Renaming charm here."
mv ${charm}_*.charm ${charm}.charm

View File

@ -47,7 +47,6 @@ class WSGIBarbicanAdminConfigContext(sunbeam_ctxts.ConfigContext):
def context(self) -> dict: def context(self) -> dict:
"""WSGI configuration options.""" """WSGI configuration options."""
log_svc_name = self.charm.service_name.replace("-", "_")
return { return {
"name": self.charm.service_name, "name": self.charm.service_name,
"public_port": 9312, "public_port": 9312,
@ -55,8 +54,8 @@ class WSGIBarbicanAdminConfigContext(sunbeam_ctxts.ConfigContext):
"group": self.charm.service_group, "group": self.charm.service_group,
"wsgi_admin_script": "/usr/bin/barbican-wsgi-api", "wsgi_admin_script": "/usr/bin/barbican-wsgi-api",
"wsgi_public_script": "/usr/bin/barbican-wsgi-api", "wsgi_public_script": "/usr/bin/barbican-wsgi-api",
"error_log": f"/var/log/apache2/{log_svc_name}_error.log", "error_log": "/dev/stdout",
"custom_log": f"/var/log/apache2/{log_svc_name}_access.log", "custom_log": "/dev/stdout",
} }
@ -81,27 +80,12 @@ class BarbicanWorkerPebbleHandler(sunbeam_chandlers.ServicePebbleHandler):
"override": "replace", "override": "replace",
"summary": "Barbican Worker", "summary": "Barbican Worker",
"command": "barbican-worker", "command": "barbican-worker",
"startup": "enabled", "user": "barbican",
"group": "barbican",
} }
}, },
} }
def get_healthcheck_layer(self) -> dict:
"""Health check pebble layer.
:returns: pebble health check layer configuration for scheduler service
:rtype: dict
"""
return {
"checks": {
"online": {
"override": "replace",
"level": "ready",
"exec": {"command": "service barbican-worker status"},
},
}
}
def default_container_configs( def default_container_configs(
self, self,
) -> List[sunbeam_core.ContainerConfigFile]: ) -> List[sunbeam_core.ContainerConfigFile]:
@ -144,12 +128,7 @@ class BarbicanOperatorCharm(sunbeam_charm.OSBaseOperatorAPICharm):
def configure_unit(self, event: framework.EventBase) -> None: def configure_unit(self, event: framework.EventBase) -> None:
"""Run configuration on this unit.""" """Run configuration on this unit."""
self.disable_barbican_config() self.disable_barbican_config()
self.check_leader_ready() super().configure_unit(event)
self.check_relation_handlers_ready()
self.init_container_services()
self.check_pebble_handlers_ready()
self.run_db_sync()
self._state.unit_bootstrapped = True
@property @property
def config_contexts(self) -> List[sunbeam_ctxts.ConfigContext]: def config_contexts(self) -> List[sunbeam_ctxts.ConfigContext]:

View File

@ -1,5 +1,4 @@
[DEFAULT] [DEFAULT]
log_dir = /var/log/barbican
lock_path = /var/lock/barbican lock_path = /var/lock/barbican
state_path = /var/lib/barbican state_path = /var/lib/barbican
@ -9,11 +8,11 @@ transport_url = {{ amqp.transport_url }}
sql_connection = {{ database.connection }} sql_connection = {{ database.connection }}
db_auto_create = false db_auto_create = false
[keystone_authtoken]
{% include "parts/section-identity" %} {% include "parts/section-identity" %}
{% include "parts/section-service-user" %}
[secretstore] [secretstore]
namespace = barbican.crypto.plugin
enabled_secretstore_plugins = store_crypto enabled_secretstore_plugins = store_crypto
[crypto] [crypto]

View File

@ -1,9 +1,18 @@
{% if identity_service.internal_auth_url -%} [keystone_authtoken]
www_authenticate_uri = {{ identity_service.internal_auth_url }} {% if identity_service.admin_auth_url -%}
auth_url = {{ identity_service.admin_auth_url }}
interface = admin
{% elif identity_service.internal_auth_url -%}
auth_url = {{ identity_service.internal_auth_url }} auth_url = {{ identity_service.internal_auth_url }}
interface = internal
{% elif identity_service.internal_host -%}
auth_url = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }}
interface = internal
{% endif -%}
{% if identity_service.public_auth_url -%}
www_authenticate_uri = {{ identity_service.public_auth_url }}
{% elif identity_service.internal_host -%} {% elif identity_service.internal_host -%}
www_authenticate_uri = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }} www_authenticate_uri = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }}
auth_url = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }}
{% endif -%} {% endif -%}
auth_type = password auth_type = password
project_domain_name = {{ identity_service.service_domain_name }} project_domain_name = {{ identity_service.service_domain_name }}
@ -11,6 +20,8 @@ user_domain_name = {{ identity_service.service_domain_name }}
project_name = {{ identity_service.service_project_name }} project_name = {{ identity_service.service_project_name }}
username = {{ identity_service.service_user_name }} username = {{ identity_service.service_user_name }}
password = {{ identity_service.service_password }} password = {{ identity_service.service_password }}
service_token_roles = {{ identity_service.admin_role }}
service_token_roles_required = True
# XXX Region should come from the id relation here # XXX Region should come from the id relation here
region_name = {{ options.region }} region_name = {{ options.region }}

View File

@ -0,0 +1,17 @@
{% if identity_service.service_domain_id -%}
[service_user]
{% if identity_service.admin_auth_url -%}
auth_url = {{ identity_service.admin_auth_url }}
{% elif identity_service.internal_auth_url -%}
auth_url = {{ identity_service.internal_auth_url }}
{% elif identity_service.internal_host -%}
auth_url = {{ identity_service.internal_protocol }}://{{ identity_service.internal_host }}:{{ identity_service.internal_port }}
{% endif -%}
send_service_user_token = true
auth_type = password
project_domain_id = {{ identity_service.service_domain_id }}
user_domain_id = {{ identity_service.service_domain_id }}
project_name = {{ identity_service.service_project_name }}
username = {{ identity_service.service_user_name }}
password = {{ identity_service.service_password }}
{% endif -%}

View File

@ -1,11 +1,10 @@
bundle: kubernetes bundle: kubernetes
applications: applications:
mysql: mysql:
charm: ch:mysql-k8s charm: ch:mysql-k8s
channel: 8.0/stable channel: 8.0/stable
scale: 1 scale: 1
trust: false trust: true
# Currently traefik is required for networking things. # Currently traefik is required for networking things.
# If this isn't present, the units will hang at "installing agent". # If this isn't present, the units will hang at "installing agent".
@ -14,12 +13,6 @@ applications:
channel: 1.0/stable channel: 1.0/stable
scale: 1 scale: 1
trust: true trust: true
traefik-public:
charm: ch:traefik-k8s
channel: 1.0/stable
scale: 1
trust: true
options: options:
kubernetes-service-annotations: metallb.universe.tf/address-pool=public kubernetes-service-annotations: metallb.universe.tf/address-pool=public
@ -32,9 +25,9 @@ applications:
keystone: keystone:
charm: ch:keystone-k8s charm: ch:keystone-k8s
channel: yoga/edge channel: 2023.1/edge
scale: 1 scale: 1
trust: true trust: false
options: options:
admin-role: admin admin-role: admin
storage: storage:
@ -44,15 +37,15 @@ applications:
barbican: barbican:
charm: ../../barbican-k8s.charm charm: ../../barbican-k8s.charm
scale: 1 scale: 1
trust: true trust: false
resources: resources:
barbican-api-image: kolla/ubuntu-binary-barbican-api:yoga barbican-api-image: ghcr.io/openstack-snaps/barbican-consolidated:2023.1
barbican-worker-image: kolla/ubuntu-binary-barbican-worker:yoga barbican-worker-image: ghcr.io/openstack-snaps/barbican-consolidated:2023.1
relations: relations:
- - traefik:ingress - - traefik:ingress
- keystone:ingress-internal - keystone:ingress-internal
- - traefik-public:ingress - - traefik:ingress
- keystone:ingress-public - keystone:ingress-public
- - mysql:database - - mysql:database
@ -66,5 +59,5 @@ relations:
- barbican:identity-service - barbican:identity-service
- - traefik:ingress - - traefik:ingress
- barbican:ingress-internal - barbican:ingress-internal
- - traefik-public:ingress - - traefik:ingress
- barbican:ingress-public - barbican:ingress-public

View File

@ -0,0 +1,38 @@
gate_bundles:
- smoke
smoke_bundles:
- smoke
# There is no storage provider at the moment so cannot run tests.
configure:
- zaza.charm_tests.noop.setup.basic_setup
tests:
- zaza.openstack.charm_tests.barbican.tests.BarbicanTempestTestK8S
tests_options:
trust:
- smoke
ignore_hard_deploy_errors:
- smoke
tempest:
default:
smoke: True
target_deploy_status:
traefik:
workload-status: active
workload-status-message-regex: '^$'
traefik-public:
workload-status: active
workload-status-message-regex: '^$'
rabbitmq:
workload-status: active
workload-status-message-regex: '^$'
keystone:
workload-status: active
workload-status-message-regex: '^$'
mysql:
workload-status: active
workload-status-message-regex: '^.*$'
barbican:
workload-status: active
workload-status-message-regex: '^.*$'

View File

@ -16,7 +16,6 @@
"""Unit tests for Barbican operator.""" """Unit tests for Barbican operator."""
import mock
import ops_sunbeam.test_utils as test_utils import ops_sunbeam.test_utils as test_utils
import charm import charm
@ -46,11 +45,7 @@ class TestBarbicanOperatorCharm(test_utils.CharmTestCase):
PATCHES = [] PATCHES = []
@mock.patch( def setUp(self):
"charms.observability_libs.v1.kubernetes_service_patch."
"KubernetesServicePatch"
)
def setUp(self, mock_patch):
"""Set up environment for unit test.""" """Set up environment for unit test."""
super().setUp(charm, self.PATCHES) super().setUp(charm, self.PATCHES)
self.harness = test_utils.get_harness( self.harness = test_utils.get_harness(

View File

@ -99,7 +99,7 @@ commands = {[testenv:lint]commands}
description = Check code against coding style standards description = Check code against coding style standards
deps = deps =
black black
flake8<6 flake8<6 # Pin version until https://github.com/savoirfairelinux/flake8-copyright/issues/19 is merged
flake8-docstrings flake8-docstrings
flake8-copyright flake8-copyright
flake8-builtins flake8-builtins
@ -116,16 +116,22 @@ commands =
[testenv:func-noop] [testenv:func-noop]
basepython = python3 basepython = python3
deps =
git+https://github.com/openstack-charmers/zaza.git@libjuju-3.1#egg=zaza
git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack
git+https://opendev.org/openstack/tempest.git#egg=tempest
commands = commands =
functest-run-suite --help functest-run-suite --help
[testenv:func] [testenv:func]
basepython = python3 basepython = python3
deps = {[testenv:func-noop]deps}
commands = commands =
functest-run-suite --keep-model functest-run-suite --keep-model
[testenv:func-smoke] [testenv:func-smoke]
basepython = python3 basepython = python3
deps = {[testenv:func-noop]deps}
setenv = setenv =
TEST_MODEL_SETTINGS = automatically-retry-hooks=true TEST_MODEL_SETTINGS = automatically-retry-hooks=true
TEST_MAX_RESOLVE_COUNT = 5 TEST_MAX_RESOLVE_COUNT = 5
@ -134,11 +140,13 @@ commands =
[testenv:func-dev] [testenv:func-dev]
basepython = python3 basepython = python3
deps = {[testenv:func-noop]deps}
commands = commands =
functest-run-suite --keep-model --dev functest-run-suite --keep-model --dev
[testenv:func-target] [testenv:func-target]
basepython = python3 basepython = python3
deps = {[testenv:func-noop]deps}
commands = commands =
functest-run-suite --keep-model --bundle {posargs} functest-run-suite --keep-model --bundle {posargs}