Update all external libs
Update all external libs to their latest minor patches. Fix py3 failures in nova-k8s, heat-k8s Fix temptest tests bundle Change-Id: I080b8d9d5d026fa69934132b93e26c6a3a1d90aa
This commit is contained in:
parent
fe0fcaf659
commit
bd1d5b0350
@ -418,7 +418,10 @@ class HeatOperatorCharm(sunbeam_charm.OSBaseOperatorAPICharm):
|
||||
)
|
||||
return
|
||||
|
||||
if self.traefik_route_public:
|
||||
if (
|
||||
self.traefik_route_public
|
||||
and self.traefik_route_public.interface.is_ready()
|
||||
):
|
||||
logger.debug("Sending traefik config for public interface")
|
||||
self.traefik_route_public.interface.submit_to_traefik(
|
||||
config=self.traefik_config
|
||||
@ -427,7 +430,10 @@ class HeatOperatorCharm(sunbeam_charm.OSBaseOperatorAPICharm):
|
||||
if self.traefik_route_public.ready:
|
||||
self._update_service_endpoints()
|
||||
|
||||
if self.traefik_route_internal:
|
||||
if (
|
||||
self.traefik_route_internal
|
||||
and self.traefik_route_internal.interface.is_ready()
|
||||
):
|
||||
logger.debug("Sending traefik config for internal interface")
|
||||
self.traefik_route_internal.interface.submit_to_traefik(
|
||||
config=self.traefik_config
|
||||
|
@ -713,7 +713,10 @@ class NovaOperatorCharm(sunbeam_charm.OSBaseOperatorAPICharm):
|
||||
)
|
||||
return
|
||||
|
||||
if self.traefik_route_public:
|
||||
if (
|
||||
self.traefik_route_public
|
||||
and self.traefik_route_public.interface.is_ready()
|
||||
):
|
||||
logger.debug("Sending traefik config for public interface")
|
||||
self.traefik_route_public.interface.submit_to_traefik(
|
||||
config=self.traefik_config
|
||||
@ -723,7 +726,10 @@ class NovaOperatorCharm(sunbeam_charm.OSBaseOperatorAPICharm):
|
||||
# Any http/https changes are detected here
|
||||
self.set_config_on_update()
|
||||
|
||||
if self.traefik_route_internal:
|
||||
if (
|
||||
self.traefik_route_internal
|
||||
and self.traefik_route_internal.interface.is_ready()
|
||||
):
|
||||
logger.debug("Sending traefik config for internal interface")
|
||||
self.traefik_route_internal.interface.submit_to_traefik(
|
||||
config=self.traefik_config
|
||||
|
@ -3,11 +3,14 @@
|
||||
pushd libs/external
|
||||
|
||||
echo "INFO: Fetching libs from charmhub."
|
||||
charmcraft fetch-lib charms.certificate_transfer_interface.v0.certificate_transfer
|
||||
charmcraft fetch-lib charms.data_platform_libs.v0.data_interfaces
|
||||
charmcraft fetch-lib charms.grafana_k8s.v0.grafana_auth
|
||||
charmcraft fetch-lib charms.grafana_k8s.v0.grafana_dashboard
|
||||
charmcraft fetch-lib charms.grafana_agent.v0.cos_agent
|
||||
charmcraft fetch-lib charms.loki_k8s.v1.loki_push_api
|
||||
charmcraft fetch-lib charms.observability_libs.v0.juju_topology
|
||||
charmcraft fetch-lib charms.observability_libs.v1.kubernetes_service_patch
|
||||
charmcraft fetch-lib charms.operator_libs_linux.v0.sysctl
|
||||
charmcraft fetch-lib charms.operator_libs_linux.v2.snap
|
||||
charmcraft fetch-lib charms.prometheus_k8s.v0.prometheus_scrape
|
||||
charmcraft fetch-lib charms.rabbitmq_k8s.v0.rabbitmq
|
||||
|
@ -21,7 +21,9 @@ Example:
|
||||
from ops.charm import CharmBase, RelationJoinedEvent
|
||||
from ops.main import main
|
||||
|
||||
from lib.charms.certificate_transfer_interface.v0.certificate_transfer import CertificateTransferProvides # noqa: E501 W505
|
||||
from lib.charms.certificate_transfer_interface.v0.certificate_transfer import(
|
||||
CertificateTransferProvides,
|
||||
)
|
||||
|
||||
|
||||
class DummyCertificateTransferProviderCharm(CharmBase):
|
||||
@ -36,7 +38,9 @@ class DummyCertificateTransferProviderCharm(CharmBase):
|
||||
certificate = "my certificate"
|
||||
ca = "my CA certificate"
|
||||
chain = ["certificate 1", "certificate 2"]
|
||||
self.certificate_transfer.set_certificate(certificate=certificate, ca=ca, chain=chain, relation_id=event.relation.id)
|
||||
self.certificate_transfer.set_certificate(
|
||||
certificate=certificate, ca=ca, chain=chain, relation_id=event.relation.id
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
@ -95,7 +99,7 @@ juju relate <certificate_transfer provider charm> <certificate_transfer requirer
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import List
|
||||
from typing import List, Mapping
|
||||
|
||||
from jsonschema import exceptions, validate # type: ignore[import-untyped]
|
||||
from ops.charm import CharmBase, CharmEvents, RelationBrokenEvent, RelationChangedEvent
|
||||
@ -109,7 +113,7 @@ LIBAPI = 0
|
||||
|
||||
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||
# to 0 if you are raising the major API version
|
||||
LIBPATCH = 5
|
||||
LIBPATCH = 7
|
||||
|
||||
PYDEPS = ["jsonschema"]
|
||||
|
||||
@ -210,7 +214,7 @@ class CertificateRemovedEvent(EventBase):
|
||||
self.relation_id = snapshot["relation_id"]
|
||||
|
||||
|
||||
def _load_relation_data(raw_relation_data: dict) -> dict:
|
||||
def _load_relation_data(raw_relation_data: Mapping[str, str]) -> dict:
|
||||
"""Load relation data from the relation data bag.
|
||||
|
||||
Args:
|
||||
@ -313,7 +317,7 @@ class CertificateTransferProvides(Object):
|
||||
class CertificateTransferRequires(Object):
|
||||
"""TLS certificates requirer class to be instantiated by TLS certificates requirers."""
|
||||
|
||||
on = CertificateTransferRequirerCharmEvents()
|
||||
on = CertificateTransferRequirerCharmEvents() # type: ignore
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@ -379,7 +383,7 @@ class CertificateTransferRequires(Object):
|
||||
)
|
||||
|
||||
def _on_relation_broken(self, event: RelationBrokenEvent) -> None:
|
||||
"""Handler triggered on relation broken event.
|
||||
"""Handle relation broken event.
|
||||
|
||||
Args:
|
||||
event: Juju event
|
||||
|
File diff suppressed because it is too large
Load Diff
@ -206,21 +206,19 @@ class GrafanaAgentMachineCharm(GrafanaAgentCharm)
|
||||
```
|
||||
"""
|
||||
|
||||
import base64
|
||||
import json
|
||||
import logging
|
||||
import lzma
|
||||
from collections import namedtuple
|
||||
from itertools import chain
|
||||
from pathlib import Path
|
||||
from typing import TYPE_CHECKING, Any, Callable, ClassVar, Dict, List, Optional, Set, Union
|
||||
from typing import TYPE_CHECKING, Any, Callable, ClassVar, Dict, List, Optional, Set, Tuple, Union
|
||||
|
||||
import pydantic
|
||||
from cosl import JujuTopology
|
||||
from cosl import GrafanaDashboard, JujuTopology
|
||||
from cosl.rules import AlertRules
|
||||
from ops.charm import RelationChangedEvent
|
||||
from ops.framework import EventBase, EventSource, Object, ObjectEvents
|
||||
from ops.model import Relation, Unit
|
||||
from ops.model import Relation
|
||||
from ops.testing import CharmType
|
||||
|
||||
if TYPE_CHECKING:
|
||||
@ -236,9 +234,9 @@ if TYPE_CHECKING:
|
||||
|
||||
LIBID = "dc15fa84cef84ce58155fb84f6c6213a"
|
||||
LIBAPI = 0
|
||||
LIBPATCH = 5
|
||||
LIBPATCH = 8
|
||||
|
||||
PYDEPS = ["cosl", "pydantic<2"]
|
||||
PYDEPS = ["cosl", "pydantic < 2"]
|
||||
|
||||
DEFAULT_RELATION_NAME = "cos-agent"
|
||||
DEFAULT_PEER_RELATION_NAME = "peers"
|
||||
@ -251,31 +249,6 @@ logger = logging.getLogger(__name__)
|
||||
SnapEndpoint = namedtuple("SnapEndpoint", "owner, name")
|
||||
|
||||
|
||||
class GrafanaDashboard(str):
|
||||
"""Grafana Dashboard encoded json; lzma-compressed."""
|
||||
|
||||
# TODO Replace this with a custom type when pydantic v2 released (end of 2023 Q1?)
|
||||
# https://github.com/pydantic/pydantic/issues/4887
|
||||
@staticmethod
|
||||
def _serialize(raw_json: Union[str, bytes]) -> "GrafanaDashboard":
|
||||
if not isinstance(raw_json, bytes):
|
||||
raw_json = raw_json.encode("utf-8")
|
||||
encoded = base64.b64encode(lzma.compress(raw_json)).decode("utf-8")
|
||||
return GrafanaDashboard(encoded)
|
||||
|
||||
def _deserialize(self) -> Dict:
|
||||
try:
|
||||
raw = lzma.decompress(base64.b64decode(self.encode("utf-8"))).decode()
|
||||
return json.loads(raw)
|
||||
except json.decoder.JSONDecodeError as e:
|
||||
logger.error("Invalid Dashboard format: %s", e)
|
||||
return {}
|
||||
|
||||
def __repr__(self):
|
||||
"""Return string representation of self."""
|
||||
return "<GrafanaDashboard>"
|
||||
|
||||
|
||||
class CosAgentProviderUnitData(pydantic.BaseModel):
|
||||
"""Unit databag model for `cos-agent` relation."""
|
||||
|
||||
@ -285,6 +258,9 @@ class CosAgentProviderUnitData(pydantic.BaseModel):
|
||||
metrics_alert_rules: dict
|
||||
log_alert_rules: dict
|
||||
dashboards: List[GrafanaDashboard]
|
||||
# subordinate is no longer used but we should keep it until we bump the library to ensure
|
||||
# we don't break compatibility.
|
||||
subordinate: Optional[bool] = None
|
||||
|
||||
# The following entries may vary across units of the same principal app.
|
||||
# this data does not need to be forwarded to the gagent leader
|
||||
@ -303,9 +279,9 @@ class CosAgentPeersUnitData(pydantic.BaseModel):
|
||||
# We need the principal unit name and relation metadata to be able to render identifiers
|
||||
# (e.g. topology) on the leader side, after all the data moves into peer data (the grafana
|
||||
# agent leader can only see its own principal, because it is a subordinate charm).
|
||||
principal_unit_name: str
|
||||
principal_relation_id: str
|
||||
principal_relation_name: str
|
||||
unit_name: str
|
||||
relation_id: str
|
||||
relation_name: str
|
||||
|
||||
# The only data that is forwarded to the leader is data that needs to go into the app databags
|
||||
# of the outgoing o11y relations.
|
||||
@ -325,7 +301,7 @@ class CosAgentPeersUnitData(pydantic.BaseModel):
|
||||
TODO: Switch to using `model_post_init` when pydantic v2 is released?
|
||||
https://github.com/pydantic/pydantic/issues/1729#issuecomment-1300576214
|
||||
"""
|
||||
return self.principal_unit_name.split("/")[0]
|
||||
return self.unit_name.split("/")[0]
|
||||
|
||||
|
||||
class COSAgentProvider(Object):
|
||||
@ -578,18 +554,20 @@ class COSAgentRequirer(Object):
|
||||
if not (provider_data := self._validated_provider_data(raw)):
|
||||
return
|
||||
|
||||
# Copy data from the principal relation to the peer relation, so the leader could
|
||||
# Copy data from the cos_agent relation to the peer relation, so the leader could
|
||||
# follow up.
|
||||
# Save the originating unit name, so it could be used for topology later on by the leader.
|
||||
data = CosAgentPeersUnitData( # peer relation databag model
|
||||
principal_unit_name=event.unit.name,
|
||||
principal_relation_id=str(event.relation.id),
|
||||
principal_relation_name=event.relation.name,
|
||||
unit_name=event.unit.name,
|
||||
relation_id=str(event.relation.id),
|
||||
relation_name=event.relation.name,
|
||||
metrics_alert_rules=provider_data.metrics_alert_rules,
|
||||
log_alert_rules=provider_data.log_alert_rules,
|
||||
dashboards=provider_data.dashboards,
|
||||
)
|
||||
self.peer_relation.data[self._charm.unit][data.KEY] = data.json()
|
||||
self.peer_relation.data[self._charm.unit][
|
||||
f"{CosAgentPeersUnitData.KEY}-{event.unit.name}"
|
||||
] = data.json()
|
||||
|
||||
# We can't easily tell if the data that was changed is limited to only the data
|
||||
# that goes into peer relation (in which case, if this is not a leader unit, we wouldn't
|
||||
@ -609,53 +587,34 @@ class COSAgentRequirer(Object):
|
||||
self.on.data_changed.emit() # pyright: ignore
|
||||
|
||||
@property
|
||||
def _principal_unit(self) -> Optional[Unit]:
|
||||
"""Return the principal unit for a relation.
|
||||
def _remote_data(self) -> List[Tuple[CosAgentProviderUnitData, JujuTopology]]:
|
||||
"""Return a list of remote data from each of the related units.
|
||||
|
||||
Assumes that the relation is of type subordinate.
|
||||
Relies on the fact that, for subordinate relations, the only remote unit visible to
|
||||
*this unit* is the principal unit that this unit is attached to.
|
||||
"""
|
||||
if relations := self._principal_relations:
|
||||
# Technically it's a list, but for subordinates there can only be one relation
|
||||
principal_relation = next(iter(relations))
|
||||
if units := principal_relation.units:
|
||||
# Technically it's a list, but for subordinates there can only be one
|
||||
return next(iter(units))
|
||||
all_data = []
|
||||
|
||||
return None
|
||||
for relation in self._charm.model.relations[self._relation_name]:
|
||||
if not relation.units:
|
||||
continue
|
||||
unit = next(iter(relation.units))
|
||||
if not (raw := relation.data[unit].get(CosAgentProviderUnitData.KEY)):
|
||||
continue
|
||||
if not (provider_data := self._validated_provider_data(raw)):
|
||||
continue
|
||||
|
||||
@property
|
||||
def _principal_relations(self):
|
||||
# Technically it's a list, but for subordinates there can only be one.
|
||||
return self._charm.model.relations[self._relation_name]
|
||||
topology = JujuTopology(
|
||||
model=self._charm.model.name,
|
||||
model_uuid=self._charm.model.uuid,
|
||||
application=unit.app.name,
|
||||
unit=unit.name,
|
||||
)
|
||||
|
||||
@property
|
||||
def _principal_unit_data(self) -> Optional[CosAgentProviderUnitData]:
|
||||
"""Return the principal unit's data.
|
||||
all_data.append((provider_data, topology))
|
||||
|
||||
Assumes that the relation is of type subordinate.
|
||||
Relies on the fact that, for subordinate relations, the only remote unit visible to
|
||||
*this unit* is the principal unit that this unit is attached to.
|
||||
"""
|
||||
if not (relations := self._principal_relations):
|
||||
return None
|
||||
|
||||
# Technically it's a list, but for subordinates there can only be one relation
|
||||
principal_relation = next(iter(relations))
|
||||
|
||||
if not (units := principal_relation.units):
|
||||
return None
|
||||
|
||||
# Technically it's a list, but for subordinates there can only be one
|
||||
unit = next(iter(units))
|
||||
if not (raw := principal_relation.data[unit].get(CosAgentProviderUnitData.KEY)):
|
||||
return None
|
||||
|
||||
if not (provider_data := self._validated_provider_data(raw)):
|
||||
return None
|
||||
|
||||
return provider_data
|
||||
return all_data
|
||||
|
||||
def _gather_peer_data(self) -> List[CosAgentPeersUnitData]:
|
||||
"""Collect data from the peers.
|
||||
@ -673,18 +632,21 @@ class COSAgentRequirer(Object):
|
||||
app_names: Set[str] = set()
|
||||
|
||||
for unit in chain((self._charm.unit,), relation.units):
|
||||
if not relation.data.get(unit) or not (
|
||||
raw := relation.data[unit].get(CosAgentPeersUnitData.KEY)
|
||||
):
|
||||
logger.info(f"peer {unit} has not set its primary data yet; skipping for now...")
|
||||
if not relation.data.get(unit):
|
||||
continue
|
||||
|
||||
data = CosAgentPeersUnitData(**json.loads(raw))
|
||||
app_name = data.app_name
|
||||
# Have we already seen this principal app?
|
||||
if app_name in app_names:
|
||||
continue
|
||||
peer_data.append(data)
|
||||
for unit_name in relation.data.get(unit): # pyright: ignore
|
||||
if not unit_name.startswith(CosAgentPeersUnitData.KEY):
|
||||
continue
|
||||
raw = relation.data[unit].get(unit_name)
|
||||
if raw is None:
|
||||
continue
|
||||
data = CosAgentPeersUnitData(**json.loads(raw))
|
||||
# Have we already seen this principal app?
|
||||
if (app_name := data.app_name) in app_names:
|
||||
continue
|
||||
peer_data.append(data)
|
||||
app_names.add(app_name)
|
||||
|
||||
return peer_data
|
||||
|
||||
@ -720,7 +682,7 @@ class COSAgentRequirer(Object):
|
||||
def metrics_jobs(self) -> List[Dict]:
|
||||
"""Parse the relation data contents and extract the metrics jobs."""
|
||||
scrape_jobs = []
|
||||
if data := self._principal_unit_data:
|
||||
for data, topology in self._remote_data:
|
||||
for job in data.metrics_scrape_jobs:
|
||||
# In #220, relation schema changed from a simplified dict to the standard
|
||||
# `scrape_configs`.
|
||||
@ -730,6 +692,26 @@ class COSAgentRequirer(Object):
|
||||
"job_name": job["job_name"],
|
||||
"metrics_path": job["path"],
|
||||
"static_configs": [{"targets": [f"localhost:{job['port']}"]}],
|
||||
# We include insecure_skip_verify because we are always scraping localhost.
|
||||
# Even if we have the certs for the scrape targets, we'd rather specify the scrape
|
||||
# jobs with localhost rather than the SAN DNS the cert was issued for.
|
||||
"tls_config": {"insecure_skip_verify": True},
|
||||
}
|
||||
|
||||
# Apply labels to the scrape jobs
|
||||
for static_config in job.get("static_configs", []):
|
||||
topo_as_dict = topology.as_dict(excluded_keys=["charm_name"])
|
||||
static_config["labels"] = {
|
||||
# Be sure to keep labels from static_config
|
||||
**static_config.get("labels", {}),
|
||||
# TODO: We should add a new method in juju_topology.py
|
||||
# that like `as_dict` method, returns the keys with juju_ prefix
|
||||
# https://github.com/canonical/cos-lib/issues/18
|
||||
**{
|
||||
"juju_{}".format(key): value
|
||||
for key, value in topo_as_dict.items()
|
||||
if value
|
||||
},
|
||||
}
|
||||
|
||||
scrape_jobs.append(job)
|
||||
@ -740,7 +722,7 @@ class COSAgentRequirer(Object):
|
||||
def snap_log_endpoints(self) -> List[SnapEndpoint]:
|
||||
"""Fetch logging endpoints exposed by related snaps."""
|
||||
plugs = []
|
||||
if data := self._principal_unit_data:
|
||||
for data, _ in self._remote_data:
|
||||
targets = data.log_slots
|
||||
if targets:
|
||||
for target in targets:
|
||||
@ -780,7 +762,7 @@ class COSAgentRequirer(Object):
|
||||
model=self._charm.model.name,
|
||||
model_uuid=self._charm.model.uuid,
|
||||
application=app_name,
|
||||
# For the topology unit, we could use `data.principal_unit_name`, but that unit
|
||||
# For the topology unit, we could use `data.unit_name`, but that unit
|
||||
# name may not be very stable: `_gather_peer_data` de-duplicates by app name so
|
||||
# the exact unit name that turns up first in the iterator may vary from time to
|
||||
# time. So using the grafana-agent unit name instead.
|
||||
@ -813,9 +795,9 @@ class COSAgentRequirer(Object):
|
||||
|
||||
dashboards.append(
|
||||
{
|
||||
"relation_id": data.principal_relation_id,
|
||||
"relation_id": data.relation_id,
|
||||
# We have the remote charm name - use it for the identifier
|
||||
"charm": f"{data.principal_relation_name}-{app_name}",
|
||||
"charm": f"{data.relation_name}-{app_name}",
|
||||
"content": content,
|
||||
"title": title,
|
||||
}
|
||||
|
@ -20,6 +20,10 @@ For instance, a Promtail or Grafana agent charm which needs to send logs to Loki
|
||||
send telemetry, such as logs, to Loki through a Log Proxy by implementing the consumer side of the
|
||||
`loki_push_api` relation interface.
|
||||
|
||||
- `LogForwarder`: This object can be used by any Charmed Operator which needs to send the workload
|
||||
standard output (stdout) through Pebble's log forwarding mechanism, to Loki endpoints through the
|
||||
`loki_push_api` relation interface.
|
||||
|
||||
Filtering logs in Loki is largely performed on the basis of labels. In the Juju ecosystem, Juju
|
||||
topology labels are used to uniquely identify the workload which generates telemetry like logs.
|
||||
|
||||
@ -57,7 +61,7 @@ and three optional arguments.
|
||||
Subsequently, a Loki charm may instantiate the `LokiPushApiProvider` in its constructor as
|
||||
follows:
|
||||
|
||||
from charms.loki_k8s.v0.loki_push_api import LokiPushApiProvider
|
||||
from charms.loki_k8s.v1.loki_push_api import LokiPushApiProvider
|
||||
from loki_server import LokiServer
|
||||
...
|
||||
|
||||
@ -163,7 +167,7 @@ instantiating it, typically in the constructor of your charm (the one which
|
||||
sends logs).
|
||||
|
||||
```python
|
||||
from charms.loki_k8s.v0.loki_push_api import LokiPushApiConsumer
|
||||
from charms.loki_k8s.v1.loki_push_api import LokiPushApiConsumer
|
||||
|
||||
class LokiClientCharm(CharmBase):
|
||||
|
||||
@ -349,6 +353,45 @@ These can be monitored via the PromtailDigestError events via:
|
||||
)
|
||||
```
|
||||
|
||||
## LogForwarder class Usage
|
||||
|
||||
Let's say that we have a charm's workload that writes logs to the standard output (stdout),
|
||||
and we need to send those logs to a workload implementing the `loki_push_api` interface,
|
||||
such as `Loki` or `Grafana Agent`. To know how to reach a Loki instance, a charm would
|
||||
typically use the `loki_push_api` interface.
|
||||
|
||||
Use the `LogForwarder` class by instantiating it in the `__init__` method of the charm:
|
||||
|
||||
```python
|
||||
from charms.loki_k8s.v1.loki_push_api import LogForwarder
|
||||
|
||||
...
|
||||
|
||||
def __init__(self, *args):
|
||||
...
|
||||
self._log_forwarder = LogForwarder(
|
||||
self,
|
||||
relation_name="logging" # optional, defaults to `logging`
|
||||
)
|
||||
```
|
||||
|
||||
The `LogForwarder` by default will observe relation events on the `logging` endpoint and
|
||||
enable/disable log forwarding automatically.
|
||||
Next, modify the `metadata.yaml` file to add:
|
||||
|
||||
The `log-forwarding` relation in the `requires` section:
|
||||
```yaml
|
||||
requires:
|
||||
logging:
|
||||
interface: loki_push_api
|
||||
optional: true
|
||||
```
|
||||
|
||||
Once the LogForwader class is implemented in your charm and the relation (implementing the
|
||||
`loki_push_api` interface) is active and healthy, the library will inject a Pebble layer in
|
||||
each workload container the charm has access to, to configure Pebble's log forwarding
|
||||
feature and start sending logs to Loki.
|
||||
|
||||
## Alerting Rules
|
||||
|
||||
This charm library also supports gathering alerting rules from all related Loki client
|
||||
@ -447,13 +490,14 @@ from io import BytesIO
|
||||
from pathlib import Path
|
||||
from typing import Any, Dict, List, Optional, Tuple, Union
|
||||
from urllib import request
|
||||
from urllib.error import HTTPError
|
||||
from urllib.error import URLError
|
||||
|
||||
import yaml
|
||||
from charms.observability_libs.v0.juju_topology import JujuTopology
|
||||
from cosl import JujuTopology
|
||||
from ops.charm import (
|
||||
CharmBase,
|
||||
HookEvent,
|
||||
PebbleReadyEvent,
|
||||
RelationBrokenEvent,
|
||||
RelationCreatedEvent,
|
||||
RelationDepartedEvent,
|
||||
@ -463,6 +507,7 @@ from ops.charm import (
|
||||
WorkloadEvent,
|
||||
)
|
||||
from ops.framework import EventBase, EventSource, Object, ObjectEvents
|
||||
from ops.jujuversion import JujuVersion
|
||||
from ops.model import Container, ModelError, Relation
|
||||
from ops.pebble import APIError, ChangeError, Layer, PathError, ProtocolError
|
||||
|
||||
@ -474,7 +519,9 @@ LIBAPI = 1
|
||||
|
||||
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||
# to 0 if you are raising the major API version
|
||||
LIBPATCH = 0
|
||||
LIBPATCH = 9
|
||||
|
||||
PYDEPS = ["cosl"]
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
@ -487,12 +534,17 @@ PROMTAIL_BASE_URL = "https://github.com/canonical/loki-k8s-operator/releases/dow
|
||||
# To update Promtail version you only need to change the PROMTAIL_VERSION and
|
||||
# update all sha256 sums in PROMTAIL_BINARIES. To support a new architecture
|
||||
# you only need to add a new key value pair for the architecture in PROMTAIL_BINARIES.
|
||||
PROMTAIL_VERSION = "v2.5.0"
|
||||
PROMTAIL_VERSION = "v2.9.7"
|
||||
PROMTAIL_BINARIES = {
|
||||
"amd64": {
|
||||
"filename": "promtail-static-amd64",
|
||||
"zipsha": "543e333b0184e14015a42c3c9e9e66d2464aaa66eca48b29e185a6a18f67ab6d",
|
||||
"binsha": "17e2e271e65f793a9fbe81eab887b941e9d680abe82d5a0602888c50f5e0cac9",
|
||||
"zipsha": "6873cbdabf23062aeefed6de5f00ff382710332af3ab90a48c253ea17e08f465",
|
||||
"binsha": "28da9b99f81296fe297831f3bc9d92aea43b4a92826b8ff04ba433b8cb92fb50",
|
||||
},
|
||||
"arm64": {
|
||||
"filename": "promtail-static-arm64",
|
||||
"zipsha": "c083fdb45e5c794103f974eeb426489b4142438d9e10d0ae272b2aff886e249b",
|
||||
"binsha": "4cd055c477a301c0bdfdbcea514e6e93f6df5d57425ce10ffc77f3e16fec1ddf",
|
||||
},
|
||||
}
|
||||
|
||||
@ -755,7 +807,11 @@ class AlertRules:
|
||||
alert_rule["labels"] = {}
|
||||
|
||||
if self.topology:
|
||||
alert_rule["labels"].update(self.topology.label_matcher_dict)
|
||||
# only insert labels that do not already exist
|
||||
for label, val in self.topology.label_matcher_dict.items():
|
||||
if label not in alert_rule["labels"]:
|
||||
alert_rule["labels"][label] = val
|
||||
|
||||
# insert juju topology filters into a prometheus alert rule
|
||||
# logql doesn't like empty matchers, so add a job matcher which hits
|
||||
# any string as a "wildcard" which the topology labels will
|
||||
@ -1780,7 +1836,12 @@ class LogProxyConsumer(ConsumerBase):
|
||||
|
||||
# architecture used for promtail binary
|
||||
arch = platform.processor()
|
||||
self._arch = "amd64" if arch == "x86_64" else arch
|
||||
if arch in ["x86_64", "amd64"]:
|
||||
self._arch = "amd64"
|
||||
elif arch in ["aarch64", "arm64", "armv8b", "armv8l"]:
|
||||
self._arch = "arm64"
|
||||
else:
|
||||
self._arch = arch
|
||||
|
||||
events = self._charm.on[relation_name]
|
||||
self.framework.observe(events.relation_created, self._on_relation_created)
|
||||
@ -2045,7 +2106,24 @@ class LogProxyConsumer(ConsumerBase):
|
||||
- "binsha": sha256 sum of unpacked promtail binary
|
||||
container: container into which promtail is to be uploaded.
|
||||
"""
|
||||
with request.urlopen(promtail_info["url"]) as r:
|
||||
# Check for Juju proxy variables and fall back to standard ones if not set
|
||||
# If no Juju proxy variable was set, we set proxies to None to let the ProxyHandler get
|
||||
# the proxy env variables from the environment
|
||||
proxies = {
|
||||
# The ProxyHandler uses only the protocol names as keys
|
||||
# https://docs.python.org/3/library/urllib.request.html#urllib.request.ProxyHandler
|
||||
"https": os.environ.get("JUJU_CHARM_HTTPS_PROXY", ""),
|
||||
"http": os.environ.get("JUJU_CHARM_HTTP_PROXY", ""),
|
||||
# The ProxyHandler uses `no` for the no_proxy key
|
||||
# https://github.com/python/cpython/blob/3.12/Lib/urllib/request.py#L2553
|
||||
"no": os.environ.get("JUJU_CHARM_NO_PROXY", ""),
|
||||
}
|
||||
proxies = {k: v for k, v in proxies.items() if v != ""} or None
|
||||
|
||||
proxy_handler = request.ProxyHandler(proxies)
|
||||
opener = request.build_opener(proxy_handler)
|
||||
|
||||
with opener.open(promtail_info["url"]) as r:
|
||||
file_bytes = r.read()
|
||||
file_path = os.path.join(BINARY_DIR, promtail_info["filename"] + ".gz")
|
||||
with open(file_path, "wb") as f:
|
||||
@ -2260,7 +2338,7 @@ class LogProxyConsumer(ConsumerBase):
|
||||
|
||||
try:
|
||||
self._obtain_promtail(promtail_binaries[self._arch], container)
|
||||
except HTTPError as e:
|
||||
except URLError as e:
|
||||
msg = f"Promtail binary couldn't be downloaded - {str(e)}"
|
||||
logger.warning(msg)
|
||||
self.on.promtail_digest_error.emit(msg)
|
||||
@ -2313,6 +2391,233 @@ class LogProxyConsumer(ConsumerBase):
|
||||
return {cont: self._charm.unit.get_container(cont) for cont in self._logs_scheme.keys()}
|
||||
|
||||
|
||||
class _PebbleLogClient:
|
||||
@staticmethod
|
||||
def check_juju_version() -> bool:
|
||||
"""Make sure the Juju version supports Log Forwarding."""
|
||||
juju_version = JujuVersion.from_environ()
|
||||
if not juju_version > JujuVersion(version=str("3.3")):
|
||||
msg = f"Juju version {juju_version} does not support Pebble log forwarding. Juju >= 3.4 is needed."
|
||||
logger.warning(msg)
|
||||
return False
|
||||
return True
|
||||
|
||||
@staticmethod
|
||||
def _build_log_target(
|
||||
unit_name: str, loki_endpoint: str, topology: JujuTopology, enable: bool
|
||||
) -> Dict:
|
||||
"""Build a log target for the log forwarding Pebble layer.
|
||||
|
||||
Log target's syntax for enabling/disabling forwarding is explained here:
|
||||
https://github.com/canonical/pebble?tab=readme-ov-file#log-forwarding
|
||||
"""
|
||||
services_value = ["all"] if enable else ["-all"]
|
||||
|
||||
log_target = {
|
||||
"override": "replace",
|
||||
"services": services_value,
|
||||
"type": "loki",
|
||||
"location": loki_endpoint,
|
||||
}
|
||||
if enable:
|
||||
log_target.update(
|
||||
{
|
||||
"labels": {
|
||||
"product": "Juju",
|
||||
"charm": topology._charm_name,
|
||||
"juju_model": topology._model,
|
||||
"juju_model_uuid": topology._model_uuid,
|
||||
"juju_application": topology._application,
|
||||
"juju_unit": topology._unit,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
return {unit_name: log_target}
|
||||
|
||||
@staticmethod
|
||||
def _build_log_targets(
|
||||
loki_endpoints: Optional[Dict[str, str]], topology: JujuTopology, enable: bool
|
||||
):
|
||||
"""Build all the targets for the log forwarding Pebble layer."""
|
||||
targets = {}
|
||||
if not loki_endpoints:
|
||||
return targets
|
||||
|
||||
for unit_name, endpoint in loki_endpoints.items():
|
||||
targets.update(
|
||||
_PebbleLogClient._build_log_target(
|
||||
unit_name=unit_name,
|
||||
loki_endpoint=endpoint,
|
||||
topology=topology,
|
||||
enable=enable,
|
||||
)
|
||||
)
|
||||
return targets
|
||||
|
||||
@staticmethod
|
||||
def disable_inactive_endpoints(
|
||||
container: Container, active_endpoints: Dict[str, str], topology: JujuTopology
|
||||
):
|
||||
"""Disable forwarding for inactive endpoints by checking against the Pebble plan."""
|
||||
pebble_layer = container.get_plan().to_dict().get("log-targets", None)
|
||||
if not pebble_layer:
|
||||
return
|
||||
|
||||
for unit_name, target in pebble_layer.items():
|
||||
# If the layer is a disabled log forwarding endpoint, skip it
|
||||
if "-all" in target["services"]: # pyright: ignore
|
||||
continue
|
||||
|
||||
if unit_name not in active_endpoints:
|
||||
layer = Layer(
|
||||
{ # pyright: ignore
|
||||
"log-targets": _PebbleLogClient._build_log_targets(
|
||||
loki_endpoints={unit_name: "(removed)"},
|
||||
topology=topology,
|
||||
enable=False,
|
||||
)
|
||||
}
|
||||
)
|
||||
container.add_layer(f"{container.name}-log-forwarding", layer=layer, combine=True)
|
||||
|
||||
@staticmethod
|
||||
def enable_endpoints(
|
||||
container: Container, active_endpoints: Dict[str, str], topology: JujuTopology
|
||||
):
|
||||
"""Enable forwarding for the specified Loki endpoints."""
|
||||
layer = Layer(
|
||||
{ # pyright: ignore
|
||||
"log-targets": _PebbleLogClient._build_log_targets(
|
||||
loki_endpoints=active_endpoints,
|
||||
topology=topology,
|
||||
enable=True,
|
||||
)
|
||||
}
|
||||
)
|
||||
container.add_layer(f"{container.name}-log-forwarding", layer, combine=True)
|
||||
|
||||
|
||||
class LogForwarder(ConsumerBase):
|
||||
"""Forward the standard outputs of all workloads operated by a charm to one or multiple Loki endpoints.
|
||||
|
||||
This class implements Pebble log forwarding. Juju >= 3.4 is needed.
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
charm: CharmBase,
|
||||
*,
|
||||
relation_name: str = DEFAULT_RELATION_NAME,
|
||||
alert_rules_path: str = DEFAULT_ALERT_RULES_RELATIVE_PATH,
|
||||
recursive: bool = True,
|
||||
skip_alert_topology_labeling: bool = False,
|
||||
):
|
||||
_PebbleLogClient.check_juju_version()
|
||||
super().__init__(
|
||||
charm, relation_name, alert_rules_path, recursive, skip_alert_topology_labeling
|
||||
)
|
||||
self._charm = charm
|
||||
self._relation_name = relation_name
|
||||
|
||||
on = self._charm.on[self._relation_name]
|
||||
self.framework.observe(on.relation_joined, self._update_logging)
|
||||
self.framework.observe(on.relation_changed, self._update_logging)
|
||||
self.framework.observe(on.relation_departed, self._update_logging)
|
||||
self.framework.observe(on.relation_broken, self._update_logging)
|
||||
|
||||
for container_name in self._charm.meta.containers.keys():
|
||||
snake_case_container_name = container_name.replace("-", "_")
|
||||
self.framework.observe(
|
||||
getattr(self._charm.on, f"{snake_case_container_name}_pebble_ready"),
|
||||
self._on_pebble_ready,
|
||||
)
|
||||
|
||||
def _on_pebble_ready(self, event: PebbleReadyEvent):
|
||||
if not (loki_endpoints := self._retrieve_endpoints_from_relation()):
|
||||
logger.warning("No Loki endpoints available")
|
||||
return
|
||||
|
||||
self._update_endpoints(event.workload, loki_endpoints)
|
||||
|
||||
def _update_logging(self, _):
|
||||
"""Update the log forwarding to match the active Loki endpoints."""
|
||||
if not (loki_endpoints := self._retrieve_endpoints_from_relation()):
|
||||
logger.warning("No Loki endpoints available")
|
||||
return
|
||||
|
||||
for container in self._charm.unit.containers.values():
|
||||
self._update_endpoints(container, loki_endpoints)
|
||||
|
||||
def _retrieve_endpoints_from_relation(self) -> dict:
|
||||
loki_endpoints = {}
|
||||
|
||||
# Get the endpoints from relation data
|
||||
for relation in self._charm.model.relations[self._relation_name]:
|
||||
loki_endpoints.update(self._fetch_endpoints(relation))
|
||||
|
||||
return loki_endpoints
|
||||
|
||||
def _update_endpoints(self, container: Container, loki_endpoints: dict):
|
||||
_PebbleLogClient.disable_inactive_endpoints(
|
||||
container=container,
|
||||
active_endpoints=loki_endpoints,
|
||||
topology=self.topology,
|
||||
)
|
||||
_PebbleLogClient.enable_endpoints(
|
||||
container=container, active_endpoints=loki_endpoints, topology=self.topology
|
||||
)
|
||||
|
||||
def is_ready(self, relation: Optional[Relation] = None):
|
||||
"""Check if the relation is active and healthy."""
|
||||
if not relation:
|
||||
relations = self._charm.model.relations[self._relation_name]
|
||||
if not relations:
|
||||
return False
|
||||
return all(self.is_ready(relation) for relation in relations)
|
||||
|
||||
try:
|
||||
if self._extract_urls(relation):
|
||||
return True
|
||||
return False
|
||||
except (KeyError, json.JSONDecodeError):
|
||||
return False
|
||||
|
||||
def _extract_urls(self, relation: Relation) -> Dict[str, str]:
|
||||
"""Default getter function to extract Loki endpoints from a relation.
|
||||
|
||||
Returns:
|
||||
A dictionary of remote units and the respective Loki endpoint.
|
||||
{
|
||||
"loki/0": "http://loki:3100/loki/api/v1/push",
|
||||
"another-loki/0": "http://another-loki:3100/loki/api/v1/push",
|
||||
}
|
||||
"""
|
||||
endpoints: Dict = {}
|
||||
|
||||
for unit in relation.units:
|
||||
endpoint = relation.data[unit]["endpoint"]
|
||||
deserialized_endpoint = json.loads(endpoint)
|
||||
url = deserialized_endpoint["url"]
|
||||
endpoints[unit.name] = url
|
||||
|
||||
return endpoints
|
||||
|
||||
def _fetch_endpoints(self, relation: Relation) -> Dict[str, str]:
|
||||
"""Fetch Loki Push API endpoints from relation data using the endpoints getter."""
|
||||
endpoints: Dict = {}
|
||||
|
||||
if not self.is_ready(relation):
|
||||
logger.warning(f"The relation '{relation.name}' is not ready yet.")
|
||||
return endpoints
|
||||
|
||||
# if the code gets here, the function won't raise anymore because it's
|
||||
# also called in is_ready()
|
||||
endpoints = self._extract_urls(relation)
|
||||
|
||||
return endpoints
|
||||
|
||||
|
||||
class CosTool:
|
||||
"""Uses cos-tool to inject label matchers into alert rule expressions and validate rules."""
|
||||
|
||||
|
@ -127,7 +127,7 @@ import logging
|
||||
from types import MethodType
|
||||
from typing import List, Literal, Optional, Union
|
||||
|
||||
from lightkube import ApiError, Client
|
||||
from lightkube import ApiError, Client # pyright: ignore
|
||||
from lightkube.core import exceptions
|
||||
from lightkube.models.core_v1 import ServicePort, ServiceSpec
|
||||
from lightkube.models.meta_v1 import ObjectMeta
|
||||
@ -146,7 +146,7 @@ LIBAPI = 1
|
||||
|
||||
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||
# to 0 if you are raising the major API version
|
||||
LIBPATCH = 7
|
||||
LIBPATCH = 9
|
||||
|
||||
ServiceType = Literal["ClusterIP", "LoadBalancer"]
|
||||
|
||||
@ -268,7 +268,7 @@ class KubernetesServicePatch(Object):
|
||||
PatchFailed: if patching fails due to lack of permissions, or otherwise.
|
||||
"""
|
||||
try:
|
||||
client = Client()
|
||||
client = Client() # pyright: ignore
|
||||
except exceptions.ConfigError as e:
|
||||
logger.warning("Error creating k8s client: %s", e)
|
||||
return
|
||||
@ -300,7 +300,7 @@ class KubernetesServicePatch(Object):
|
||||
Returns:
|
||||
bool: A boolean indicating if the service patch has been applied.
|
||||
"""
|
||||
client = Client()
|
||||
client = Client() # pyright: ignore
|
||||
return self._is_patched(client)
|
||||
|
||||
def _is_patched(self, client: Client) -> bool:
|
||||
@ -314,7 +314,7 @@ class KubernetesServicePatch(Object):
|
||||
raise
|
||||
|
||||
# Construct a list of expected ports, should the patch be applied
|
||||
expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports]
|
||||
expected_ports = [(p.port, p.targetPort) for p in self.service.spec.ports] # type: ignore[attr-defined]
|
||||
# Construct a list in the same manner, using the fetched service
|
||||
fetched_ports = [
|
||||
(p.port, p.targetPort) for p in service.spec.ports # type: ignore[attr-defined]
|
||||
|
@ -83,7 +83,7 @@ LIBAPI = 2
|
||||
|
||||
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||
# to 0 if you are raising the major API version
|
||||
LIBPATCH = 2
|
||||
LIBPATCH = 5
|
||||
|
||||
|
||||
# Regex to locate 7-bit C1 ANSI sequences
|
||||
@ -214,7 +214,7 @@ class Snap(object):
|
||||
- state: a `SnapState` representation of its install status
|
||||
- channel: "stable", "candidate", "beta", and "edge" are common
|
||||
- revision: a string representing the snap's revision
|
||||
- confinement: "classic" or "strict"
|
||||
- confinement: "classic", "strict", or "devmode"
|
||||
"""
|
||||
|
||||
def __init__(
|
||||
@ -475,6 +475,8 @@ class Snap(object):
|
||||
args = []
|
||||
if self.confinement == "classic":
|
||||
args.append("--classic")
|
||||
if self.confinement == "devmode":
|
||||
args.append("--devmode")
|
||||
if channel:
|
||||
args.append('--channel="{}"'.format(channel))
|
||||
if revision:
|
||||
@ -489,6 +491,7 @@ class Snap(object):
|
||||
channel: Optional[str] = "",
|
||||
cohort: Optional[str] = "",
|
||||
revision: Optional[str] = None,
|
||||
devmode: bool = False,
|
||||
leave_cohort: Optional[bool] = False,
|
||||
) -> None:
|
||||
"""Refresh a snap.
|
||||
@ -497,6 +500,7 @@ class Snap(object):
|
||||
channel: the channel to install from
|
||||
cohort: optionally, specify a cohort.
|
||||
revision: optionally, specify the revision of the snap to refresh
|
||||
devmode: optionally, specify devmode confinement
|
||||
leave_cohort: leave the current cohort.
|
||||
"""
|
||||
args = []
|
||||
@ -506,6 +510,9 @@ class Snap(object):
|
||||
if revision:
|
||||
args.append('--revision="{}"'.format(revision))
|
||||
|
||||
if devmode:
|
||||
args.append("--devmode")
|
||||
|
||||
if not cohort:
|
||||
cohort = self._cohort
|
||||
|
||||
@ -530,6 +537,7 @@ class Snap(object):
|
||||
self,
|
||||
state: SnapState,
|
||||
classic: Optional[bool] = False,
|
||||
devmode: bool = False,
|
||||
channel: Optional[str] = "",
|
||||
cohort: Optional[str] = "",
|
||||
revision: Optional[str] = None,
|
||||
@ -539,6 +547,7 @@ class Snap(object):
|
||||
Args:
|
||||
state: a `SnapState` to reconcile to.
|
||||
classic: an (Optional) boolean indicating whether classic confinement should be used
|
||||
devmode: an (Optional) boolean indicating whether devmode confinement should be used
|
||||
channel: the channel to install from
|
||||
cohort: optional. Specify the key of a snap cohort.
|
||||
revision: optional. the revision of the snap to install/refresh
|
||||
@ -549,7 +558,15 @@ class Snap(object):
|
||||
Raises:
|
||||
SnapError if an error is encountered
|
||||
"""
|
||||
self._confinement = "classic" if classic or self._confinement == "classic" else ""
|
||||
if classic and devmode:
|
||||
raise ValueError("Cannot set both classic and devmode confinement")
|
||||
|
||||
if classic or self._confinement == "classic":
|
||||
self._confinement = "classic"
|
||||
elif devmode or self._confinement == "devmode":
|
||||
self._confinement = "devmode"
|
||||
else:
|
||||
self._confinement = ""
|
||||
|
||||
if state not in (SnapState.Present, SnapState.Latest):
|
||||
# We are attempting to remove this snap.
|
||||
@ -563,10 +580,17 @@ class Snap(object):
|
||||
# We are installing or refreshing a snap.
|
||||
if self._state not in (SnapState.Present, SnapState.Latest):
|
||||
# The snap is not installed, so we install it.
|
||||
logger.info(
|
||||
"Installing snap %s, revision %s, tracking %s", self._name, revision, channel
|
||||
)
|
||||
self._install(channel, cohort, revision)
|
||||
else:
|
||||
# The snap is installed, but we are changing it (e.g., switching channels).
|
||||
self._refresh(channel, cohort, revision)
|
||||
logger.info(
|
||||
"Refreshing snap %s, revision %s, tracking %s", self._name, revision, channel
|
||||
)
|
||||
self._refresh(channel=channel, cohort=cohort, revision=revision, devmode=devmode)
|
||||
logger.info("The snap installation completed successfully")
|
||||
|
||||
self._update_snap_apps()
|
||||
self._state = state
|
||||
@ -692,7 +716,7 @@ class SnapClient:
|
||||
socket_path: str = "/run/snapd.socket",
|
||||
opener: Optional[urllib.request.OpenerDirector] = None,
|
||||
base_url: str = "http://localhost/v2/",
|
||||
timeout: float = 5.0,
|
||||
timeout: float = 30.0,
|
||||
):
|
||||
"""Initialize a client instance.
|
||||
|
||||
@ -701,7 +725,7 @@ class SnapClient:
|
||||
opener: specifies an opener for unix socket, if unspecified a default is used
|
||||
base_url: base url for making requests to the snap client. Defaults to
|
||||
http://localhost/v2/
|
||||
timeout: timeout in seconds to use when making requests to the API. Default is 5.0s.
|
||||
timeout: timeout in seconds to use when making requests to the API. Default is 30.0s.
|
||||
"""
|
||||
if opener is None:
|
||||
opener = self._get_default_opener(socket_path)
|
||||
@ -892,6 +916,7 @@ def add(
|
||||
state: Union[str, SnapState] = SnapState.Latest,
|
||||
channel: Optional[str] = "",
|
||||
classic: Optional[bool] = False,
|
||||
devmode: bool = False,
|
||||
cohort: Optional[str] = "",
|
||||
revision: Optional[str] = None,
|
||||
) -> Union[Snap, List[Snap]]:
|
||||
@ -904,6 +929,8 @@ def add(
|
||||
channel: an (Optional) channel as a string. Defaults to 'latest'
|
||||
classic: an (Optional) boolean specifying whether it should be added with classic
|
||||
confinement. Default `False`
|
||||
devmode: an (Optional) boolean specifying whether it should be added with devmode
|
||||
confinement. Default `False`
|
||||
cohort: an (Optional) string specifying the snap cohort to use
|
||||
revision: an (Optional) string specifying the snap revision to use
|
||||
|
||||
@ -920,7 +947,7 @@ def add(
|
||||
if isinstance(state, str):
|
||||
state = SnapState(state)
|
||||
|
||||
return _wrap_snap_operations(snap_names, state, channel, classic, cohort, revision)
|
||||
return _wrap_snap_operations(snap_names, state, channel, classic, devmode, cohort, revision)
|
||||
|
||||
|
||||
@_cache_init
|
||||
@ -936,8 +963,13 @@ def remove(snap_names: Union[str, List[str]]) -> Union[Snap, List[Snap]]:
|
||||
snap_names = [snap_names] if isinstance(snap_names, str) else snap_names
|
||||
if not snap_names:
|
||||
raise TypeError("Expected at least one snap to add, received zero!")
|
||||
|
||||
return _wrap_snap_operations(snap_names, SnapState.Absent, "", False)
|
||||
return _wrap_snap_operations(
|
||||
snap_names=snap_names,
|
||||
state=SnapState.Absent,
|
||||
channel="",
|
||||
classic=False,
|
||||
devmode=False,
|
||||
)
|
||||
|
||||
|
||||
@_cache_init
|
||||
@ -946,6 +978,7 @@ def ensure(
|
||||
state: str,
|
||||
channel: Optional[str] = "",
|
||||
classic: Optional[bool] = False,
|
||||
devmode: bool = False,
|
||||
cohort: Optional[str] = "",
|
||||
revision: Optional[int] = None,
|
||||
) -> Union[Snap, List[Snap]]:
|
||||
@ -957,6 +990,8 @@ def ensure(
|
||||
channel: an (Optional) channel as a string. Defaults to 'latest'
|
||||
classic: an (Optional) boolean specifying whether it should be added with classic
|
||||
confinement. Default `False`
|
||||
devmode: an (Optional) boolean specifying whether it should be added with devmode
|
||||
confinement. Default `False`
|
||||
cohort: an (Optional) string specifying the snap cohort to use
|
||||
revision: an (Optional) integer specifying the snap revision to use
|
||||
|
||||
@ -970,7 +1005,15 @@ def ensure(
|
||||
channel = "latest"
|
||||
|
||||
if state in ("present", "latest") or revision:
|
||||
return add(snap_names, SnapState(state), channel, classic, cohort, revision)
|
||||
return add(
|
||||
snap_names=snap_names,
|
||||
state=SnapState(state),
|
||||
channel=channel,
|
||||
classic=classic,
|
||||
devmode=devmode,
|
||||
cohort=cohort,
|
||||
revision=revision,
|
||||
)
|
||||
else:
|
||||
return remove(snap_names)
|
||||
|
||||
@ -980,6 +1023,7 @@ def _wrap_snap_operations(
|
||||
state: SnapState,
|
||||
channel: str,
|
||||
classic: bool,
|
||||
devmode: bool,
|
||||
cohort: Optional[str] = "",
|
||||
revision: Optional[str] = None,
|
||||
) -> Union[Snap, List[Snap]]:
|
||||
@ -995,7 +1039,12 @@ def _wrap_snap_operations(
|
||||
snap.ensure(state=SnapState.Absent)
|
||||
else:
|
||||
snap.ensure(
|
||||
state=state, classic=classic, channel=channel, cohort=cohort, revision=revision
|
||||
state=state,
|
||||
classic=classic,
|
||||
devmode=devmode,
|
||||
channel=channel,
|
||||
cohort=cohort,
|
||||
revision=revision,
|
||||
)
|
||||
snaps["success"].append(snap)
|
||||
except SnapError as e:
|
||||
@ -1014,13 +1063,17 @@ def _wrap_snap_operations(
|
||||
|
||||
|
||||
def install_local(
|
||||
filename: str, classic: Optional[bool] = False, dangerous: Optional[bool] = False
|
||||
filename: str,
|
||||
classic: Optional[bool] = False,
|
||||
devmode: Optional[bool] = False,
|
||||
dangerous: Optional[bool] = False,
|
||||
) -> Snap:
|
||||
"""Perform a snap operation.
|
||||
|
||||
Args:
|
||||
filename: the path to a local .snap file to install
|
||||
classic: whether to use classic confinement
|
||||
devmode: whether to use devmode confinement
|
||||
dangerous: whether --dangerous should be passed to install snaps without a signature
|
||||
|
||||
Raises:
|
||||
@ -1033,6 +1086,8 @@ def install_local(
|
||||
]
|
||||
if classic:
|
||||
args.append("--classic")
|
||||
if devmode:
|
||||
args.append("--devmode")
|
||||
if dangerous:
|
||||
args.append("--dangerous")
|
||||
try:
|
||||
|
@ -362,7 +362,7 @@ LIBAPI = 0
|
||||
|
||||
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||
# to 0 if you are raising the major API version
|
||||
LIBPATCH = 42
|
||||
LIBPATCH = 46
|
||||
|
||||
PYDEPS = ["cosl"]
|
||||
|
||||
@ -386,6 +386,7 @@ ALLOWED_KEYS = {
|
||||
"basic_auth",
|
||||
"tls_config",
|
||||
"authorization",
|
||||
"params",
|
||||
}
|
||||
DEFAULT_JOB = {
|
||||
"metrics_path": "/metrics",
|
||||
@ -520,8 +521,8 @@ class PrometheusConfig:
|
||||
# for such a target. Therefore labeling with Juju topology, excluding the
|
||||
# unit name.
|
||||
non_wildcard_static_config["labels"] = {
|
||||
**non_wildcard_static_config.get("labels", {}),
|
||||
**topology.label_matcher_dict,
|
||||
**non_wildcard_static_config.get("labels", {}),
|
||||
}
|
||||
|
||||
non_wildcard_static_configs.append(non_wildcard_static_config)
|
||||
@ -546,9 +547,9 @@ class PrometheusConfig:
|
||||
if topology:
|
||||
# Add topology labels
|
||||
modified_static_config["labels"] = {
|
||||
**modified_static_config.get("labels", {}),
|
||||
**topology.label_matcher_dict,
|
||||
**{"juju_unit": unit_name},
|
||||
**modified_static_config.get("labels", {}),
|
||||
}
|
||||
|
||||
# Instance relabeling for topology should be last in order.
|
||||
@ -764,7 +765,7 @@ def _validate_relation_by_interface_and_direction(
|
||||
actual_relation_interface = relation.interface_name
|
||||
if actual_relation_interface != expected_relation_interface:
|
||||
raise RelationInterfaceMismatchError(
|
||||
relation_name, expected_relation_interface, actual_relation_interface
|
||||
relation_name, expected_relation_interface, actual_relation_interface or "None"
|
||||
)
|
||||
|
||||
if expected_relation_role == RelationRole.provides:
|
||||
@ -857,7 +858,7 @@ class MonitoringEvents(ObjectEvents):
|
||||
class MetricsEndpointConsumer(Object):
|
||||
"""A Prometheus based Monitoring service."""
|
||||
|
||||
on = MonitoringEvents()
|
||||
on = MonitoringEvents() # pyright: ignore
|
||||
|
||||
def __init__(self, charm: CharmBase, relation_name: str = DEFAULT_RELATION_NAME):
|
||||
"""A Prometheus based Monitoring service.
|
||||
@ -1014,7 +1015,6 @@ class MetricsEndpointConsumer(Object):
|
||||
try:
|
||||
scrape_metadata = json.loads(relation.data[relation.app]["scrape_metadata"])
|
||||
identifier = JujuTopology.from_dict(scrape_metadata).identifier
|
||||
alerts[identifier] = self._tool.apply_label_matchers(alert_rules) # type: ignore
|
||||
|
||||
except KeyError as e:
|
||||
logger.debug(
|
||||
@ -1029,6 +1029,10 @@ class MetricsEndpointConsumer(Object):
|
||||
)
|
||||
continue
|
||||
|
||||
# We need to append the relation info to the identifier. This is to allow for cases for there are two
|
||||
# relations which eventually scrape the same application. Issue #551.
|
||||
identifier = f"{identifier}_{relation.name}_{relation.id}"
|
||||
|
||||
alerts[identifier] = alert_rules
|
||||
|
||||
_, errmsg = self._tool.validate_alert_rules(alert_rules)
|
||||
@ -1294,7 +1298,7 @@ def _resolve_dir_against_charm_path(charm: CharmBase, *path_elements: str) -> st
|
||||
class MetricsEndpointProvider(Object):
|
||||
"""A metrics endpoint for Prometheus."""
|
||||
|
||||
on = MetricsEndpointProviderEvents()
|
||||
on = MetricsEndpointProviderEvents() # pyright: ignore
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@ -1533,12 +1537,11 @@ class MetricsEndpointProvider(Object):
|
||||
relation.data[self._charm.app]["scrape_metadata"] = json.dumps(self._scrape_metadata)
|
||||
relation.data[self._charm.app]["scrape_jobs"] = json.dumps(self._scrape_jobs)
|
||||
|
||||
if alert_rules_as_dict:
|
||||
# Update relation data with the string representation of the rule file.
|
||||
# Juju topology is already included in the "scrape_metadata" field above.
|
||||
# The consumer side of the relation uses this information to name the rules file
|
||||
# that is written to the filesystem.
|
||||
relation.data[self._charm.app]["alert_rules"] = json.dumps(alert_rules_as_dict)
|
||||
# Update relation data with the string representation of the rule file.
|
||||
# Juju topology is already included in the "scrape_metadata" field above.
|
||||
# The consumer side of the relation uses this information to name the rules file
|
||||
# that is written to the filesystem.
|
||||
relation.data[self._charm.app]["alert_rules"] = json.dumps(alert_rules_as_dict)
|
||||
|
||||
def _set_unit_ip(self, _=None):
|
||||
"""Set unit host address.
|
||||
@ -1836,14 +1839,16 @@ class MetricsEndpointAggregator(Object):
|
||||
return
|
||||
|
||||
jobs = [] + _type_convert_stored(
|
||||
self._stored.jobs
|
||||
self._stored.jobs # pyright: ignore
|
||||
) # list of scrape jobs, one per relation
|
||||
for relation in self.model.relations[self._target_relation]:
|
||||
targets = self._get_targets(relation)
|
||||
if targets and relation.app:
|
||||
jobs.append(self._static_scrape_job(targets, relation.app.name))
|
||||
|
||||
groups = [] + _type_convert_stored(self._stored.alert_rules) # list of alert rule groups
|
||||
groups = [] + _type_convert_stored(
|
||||
self._stored.alert_rules # pyright: ignore
|
||||
) # list of alert rule groups
|
||||
for relation in self.model.relations[self._alert_rules_relation]:
|
||||
unit_rules = self._get_alert_rules(relation)
|
||||
if unit_rules and relation.app:
|
||||
@ -1895,7 +1900,7 @@ class MetricsEndpointAggregator(Object):
|
||||
jobs.append(updated_job)
|
||||
relation.data[self._charm.app]["scrape_jobs"] = json.dumps(jobs)
|
||||
|
||||
if not _type_convert_stored(self._stored.jobs) == jobs:
|
||||
if not _type_convert_stored(self._stored.jobs) == jobs: # pyright: ignore
|
||||
self._stored.jobs = jobs
|
||||
|
||||
def _on_prometheus_targets_departed(self, event):
|
||||
@ -1947,7 +1952,7 @@ class MetricsEndpointAggregator(Object):
|
||||
|
||||
relation.data[self._charm.app]["scrape_jobs"] = json.dumps(jobs)
|
||||
|
||||
if not _type_convert_stored(self._stored.jobs) == jobs:
|
||||
if not _type_convert_stored(self._stored.jobs) == jobs: # pyright: ignore
|
||||
self._stored.jobs = jobs
|
||||
|
||||
def _job_name(self, appname) -> str:
|
||||
@ -2126,7 +2131,7 @@ class MetricsEndpointAggregator(Object):
|
||||
groups.append(updated_group)
|
||||
relation.data[self._charm.app]["alert_rules"] = json.dumps({"groups": groups})
|
||||
|
||||
if not _type_convert_stored(self._stored.alert_rules) == groups:
|
||||
if not _type_convert_stored(self._stored.alert_rules) == groups: # pyright: ignore
|
||||
self._stored.alert_rules = groups
|
||||
|
||||
def _on_alert_rules_departed(self, event):
|
||||
@ -2176,7 +2181,7 @@ class MetricsEndpointAggregator(Object):
|
||||
json.dumps({"groups": groups}) if groups else "{}"
|
||||
)
|
||||
|
||||
if not _type_convert_stored(self._stored.alert_rules) == groups:
|
||||
if not _type_convert_stored(self._stored.alert_rules) == groups: # pyright: ignore
|
||||
self._stored.alert_rules = groups
|
||||
|
||||
def _get_alert_rules(self, relation) -> dict:
|
||||
|
@ -1,6 +1,7 @@
|
||||
# Copyright 2021 Canonical Ltd.
|
||||
# See LICENSE file for licensing details.
|
||||
|
||||
|
||||
"""Library for the tls-certificates relation.
|
||||
|
||||
This library contains the Requires and Provides classes for handling the tls-certificates
|
||||
@ -126,6 +127,7 @@ Example:
|
||||
from charms.tls_certificates_interface.v1.tls_certificates import (
|
||||
CertificateAvailableEvent,
|
||||
CertificateExpiringEvent,
|
||||
CertificateRevokedEvent,
|
||||
TLSCertificatesRequiresV1,
|
||||
generate_csr,
|
||||
generate_private_key,
|
||||
@ -151,6 +153,9 @@ class ExampleRequirerCharm(CharmBase):
|
||||
self.framework.observe(
|
||||
self.certificates.on.certificate_expiring, self._on_certificate_expiring
|
||||
)
|
||||
self.framework.observe(
|
||||
self.certificates.on.certificate_revoked, self._on_certificate_revoked
|
||||
)
|
||||
|
||||
def _on_install(self, event) -> None:
|
||||
private_key_password = b"banana"
|
||||
@ -211,6 +216,30 @@ class ExampleRequirerCharm(CharmBase):
|
||||
)
|
||||
replicas_relation.data[self.app].update({"csr": new_csr.decode()})
|
||||
|
||||
def _on_certificate_revoked(self, event: CertificateRevokedEvent) -> None:
|
||||
replicas_relation = self.model.get_relation("replicas")
|
||||
if not replicas_relation:
|
||||
self.unit.status = WaitingStatus("Waiting for peer relation to be created")
|
||||
event.defer()
|
||||
return
|
||||
old_csr = replicas_relation.data[self.app].get("csr")
|
||||
private_key_password = replicas_relation.data[self.app].get("private_key_password")
|
||||
private_key = replicas_relation.data[self.app].get("private_key")
|
||||
new_csr = generate_csr(
|
||||
private_key=private_key.encode(),
|
||||
private_key_password=private_key_password.encode(),
|
||||
subject=self.cert_subject,
|
||||
)
|
||||
self.certificates.request_certificate_renewal(
|
||||
old_certificate_signing_request=old_csr,
|
||||
new_certificate_signing_request=new_csr,
|
||||
)
|
||||
replicas_relation.data[self.app].update({"csr": new_csr.decode()})
|
||||
replicas_relation.data[self.app].pop("certificate")
|
||||
replicas_relation.data[self.app].pop("ca")
|
||||
replicas_relation.data[self.app].pop("chain")
|
||||
self.unit.status = WaitingStatus("Waiting for new certificate")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main(ExampleRequirerCharm)
|
||||
@ -243,7 +272,8 @@ LIBAPI = 1
|
||||
|
||||
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||
# to 0 if you are raising the major API version
|
||||
LIBPATCH = 10
|
||||
LIBPATCH = 12
|
||||
|
||||
|
||||
REQUIRER_JSON_SCHEMA = {
|
||||
"$schema": "http://json-schema.org/draft-04/schema#",
|
||||
@ -283,7 +313,7 @@ PROVIDER_JSON_SCHEMA = {
|
||||
"type": "object",
|
||||
"title": "`tls_certificates` provider root schema",
|
||||
"description": "The `tls_certificates` root schema comprises the entire provider databag for this interface.", # noqa: E501
|
||||
"example": [
|
||||
"examples": [
|
||||
{
|
||||
"certificates": [
|
||||
{
|
||||
@ -295,7 +325,20 @@ PROVIDER_JSON_SCHEMA = {
|
||||
"certificate": "-----BEGIN CERTIFICATE-----\nMIICvDCCAaQCFFPAOD7utDTsgFrm0vS4We18OcnKMA0GCSqGSIb3DQEBCwUAMCAx\nCzAJBgNVBAYTAlVTMREwDwYDVQQDDAh3aGF0ZXZlcjAeFw0yMjA3MjkyMTE5Mzha\nFw0yMzA3MjkyMTE5MzhaMBUxEzARBgNVBAMMCmJhbmFuYS5jb20wggEiMA0GCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDVpcfcBOnFuyZG+A2WQzmaBI5NXgwTCfvE\neKciqRQXhzJdUkEg7eqwFrK3y9yjhoiB6q0WNAeR+nOdS/Cw7layRtGz5skOq7Aa\nN4FZHg0or30i7Rrx7afJcGJyLpxfK/OfLmJm5QEdLXV0DZp0L5vuhhEb1EUOrMaY\nGe4iwqTyg6D7fuBili9dBVn9IvNhYMVgtiqkWVLTW4ChE0LgES4oO3rQZgp4dtM5\nsp6KwHGO766UzwGnkKRizaqmLylfVusllWNPFfp6gEaxa45N70oqGUrvGSVHWeHf\nfvkhpWx+wOnu+2A5F/Yv3UNz2v4g7Vjt7V0tjL4KMV9YklpRjTh3AgMBAAEwDQYJ\nKoZIhvcNAQELBQADggEBAChjRzuba8zjQ7NYBVas89Oy7u++MlS8xWxh++yiUsV6\nWMk3ZemsPtXc1YmXorIQohtxLxzUPm2JhyzFzU/sOLmJQ1E/l+gtZHyRCwsb20fX\nmphuJsMVd7qv/GwEk9PBsk2uDqg4/Wix0Rx5lf95juJP7CPXQJl5FQauf3+LSz0y\nwF/j+4GqvrwsWr9hKOLmPdkyKkR6bHKtzzsxL9PM8GnElk2OpaPMMnzbL/vt2IAt\nxK01ZzPxCQCzVwHo5IJO5NR/fIyFbEPhxzG17QsRDOBR9fl9cOIvDeSO04vyZ+nz\n+kA2c3fNrZFAtpIlOOmFh8Q12rVL4sAjI5mVWnNEgvI=\n-----END CERTIFICATE-----\n", # noqa: E501
|
||||
}
|
||||
]
|
||||
}
|
||||
},
|
||||
{
|
||||
"certificates": [
|
||||
{
|
||||
"ca": "-----BEGIN CERTIFICATE-----\\nMIIDJTCCAg2gAwIBAgIUMsSK+4FGCjW6sL/EXMSxColmKw8wDQYJKoZIhvcNAQEL\\nBQAwIDELMAkGA1UEBhMCVVMxETAPBgNVBAMMCHdoYXRldmVyMB4XDTIyMDcyOTIx\\nMTgyN1oXDTIzMDcyOTIxMTgyN1owIDELMAkGA1UEBhMCVVMxETAPBgNVBAMMCHdo\\nYXRldmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA55N9DkgFWbJ/\\naqcdQhso7n1kFvt6j/fL1tJBvRubkiFMQJnZFtekfalN6FfRtA3jq+nx8o49e+7t\\nLCKT0xQ+wufXfOnxv6/if6HMhHTiCNPOCeztUgQ2+dfNwRhYYgB1P93wkUVjwudK\\n13qHTTZ6NtEF6EzOqhOCe6zxq6wrr422+ZqCvcggeQ5tW9xSd/8O1vNID/0MTKpy\\nET3drDtBfHmiUEIBR3T3tcy6QsIe4Rz/2sDinAcM3j7sG8uY6drh8jY3PWar9til\\nv2l4qDYSU8Qm5856AB1FVZRLRJkLxZYZNgreShAIYgEd0mcyI2EO/UvKxsIcxsXc\\nd45GhGpKkwIDAQABo1cwVTAfBgNVHQ4EGAQWBBRXBrXKh3p/aFdQjUcT/UcvICBL\\nODAhBgNVHSMEGjAYgBYEFFcGtcqHen9oV1CNRxP9Ry8gIEs4MA8GA1UdEwEB/wQF\\nMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAGmCEvcoFUrT9e133SHkgF/ZAgzeIziO\\nBjfAdU4fvAVTVfzaPm0yBnGqzcHyacCzbZjKQpaKVgc5e6IaqAQtf6cZJSCiJGhS\\nJYeosWrj3dahLOUAMrXRr8G/Ybcacoqc+osKaRa2p71cC3V6u2VvcHRV7HDFGJU7\\noijbdB+WhqET6Txe67rxZCJG9Ez3EOejBJBl2PJPpy7m1Ml4RR+E8YHNzB0lcBzc\\nEoiJKlDfKSO14E2CPDonnUoWBJWjEvJys3tbvKzsRj2fnLilytPFU0gH3cEjCopi\\nzFoWRdaRuNHYCqlBmso1JFDl8h4fMmglxGNKnKRar0WeGyxb4xXBGpI=\\n-----END CERTIFICATE-----\\n", # noqa: E501
|
||||
"chain": [
|
||||
"-----BEGIN CERTIFICATE-----\\nMIIDJTCCAg2gAwIBAgIUMsSK+4FGCjW6sL/EXMSxColmKw8wDQYJKoZIhvcNAQEL\\nBQAwIDELMAkGA1UEBhMCVVMxETAPBgNVBAMMCHdoYXRldmVyMB4XDTIyMDcyOTIx\\nMTgyN1oXDTIzMDcyOTIxMTgyN1owIDELMAkGA1UEBhMCVVMxETAPBgNVBAMMCHdo\\nYXRldmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA55N9DkgFWbJ/\\naqcdQhso7n1kFvt6j/fL1tJBvRubkiFMQJnZFtekfalN6FfRtA3jq+nx8o49e+7t\\nLCKT0xQ+wufXfOnxv6/if6HMhHTiCNPOCeztUgQ2+dfNwRhYYgB1P93wkUVjwudK\\n13qHTTZ6NtEF6EzOqhOCe6zxq6wrr422+ZqCvcggeQ5tW9xSd/8O1vNID/0MTKpy\\nET3drDtBfHmiUEIBR3T3tcy6QsIe4Rz/2sDinAcM3j7sG8uY6drh8jY3PWar9til\\nv2l4qDYSU8Qm5856AB1FVZRLRJkLxZYZNgreShAIYgEd0mcyI2EO/UvKxsIcxsXc\\nd45GhGpKkwIDAQABo1cwVTAfBgNVHQ4EGAQWBBRXBrXKh3p/aFdQjUcT/UcvICBL\\nODAhBgNVHSMEGjAYgBYEFFcGtcqHen9oV1CNRxP9Ry8gIEs4MA8GA1UdEwEB/wQF\\nMAMBAf8wDQYJKoZIhvcNAQELBQADggEBAGmCEvcoFUrT9e133SHkgF/ZAgzeIziO\\nBjfAdU4fvAVTVfzaPm0yBnGqzcHyacCzbZjKQpaKVgc5e6IaqAQtf6cZJSCiJGhS\\nJYeosWrj3dahLOUAMrXRr8G/Ybcacoqc+osKaRa2p71cC3V6u2VvcHRV7HDFGJU7\\noijbdB+WhqET6Txe67rxZCJG9Ez3EOejBJBl2PJPpy7m1Ml4RR+E8YHNzB0lcBzc\\nEoiJKlDfKSO14E2CPDonnUoWBJWjEvJys3tbvKzsRj2fnLilytPFU0gH3cEjCopi\\nzFoWRdaRuNHYCqlBmso1JFDl8h4fMmglxGNKnKRar0WeGyxb4xXBGpI=\\n-----END CERTIFICATE-----\\n" # noqa: E501, W505
|
||||
],
|
||||
"certificate_signing_request": "-----BEGIN CERTIFICATE REQUEST-----\nMIICWjCCAUICAQAwFTETMBEGA1UEAwwKYmFuYW5hLmNvbTCCASIwDQYJKoZIhvcN\nAQEBBQADggEPADCCAQoCggEBANWlx9wE6cW7Jkb4DZZDOZoEjk1eDBMJ+8R4pyKp\nFBeHMl1SQSDt6rAWsrfL3KOGiIHqrRY0B5H6c51L8LDuVrJG0bPmyQ6rsBo3gVke\nDSivfSLtGvHtp8lwYnIunF8r858uYmblAR0tdXQNmnQvm+6GERvURQ6sxpgZ7iLC\npPKDoPt+4GKWL10FWf0i82FgxWC2KqRZUtNbgKETQuARLig7etBmCnh20zmynorA\ncY7vrpTPAaeQpGLNqqYvKV9W6yWVY08V+nqARrFrjk3vSioZSu8ZJUdZ4d9++SGl\nbH7A6e77YDkX9i/dQ3Pa/iDtWO3tXS2MvgoxX1iSWlGNOHcCAwEAAaAAMA0GCSqG\nSIb3DQEBCwUAA4IBAQCW1fKcHessy/ZhnIwAtSLznZeZNH8LTVOzkhVd4HA7EJW+\nKVLBx8DnN7L3V2/uPJfHiOg4Rx7fi7LkJPegl3SCqJZ0N5bQS/KvDTCyLG+9E8Y+\n7wqCmWiXaH1devimXZvazilu4IC2dSks2D8DPWHgsOdVks9bme8J3KjdNMQudegc\newWZZ1Dtbd+Rn7cpKU3jURMwm4fRwGxbJ7iT5fkLlPBlyM/yFEik4SmQxFYrZCQg\n0f3v4kBefTh5yclPy5tEH+8G0LMsbbo3dJ5mPKpAShi0QEKDLd7eR1R/712lYTK4\ndi4XaEfqERgy68O4rvb4PGlJeRGS7AmL7Ss8wfAq\n-----END CERTIFICATE REQUEST-----\n", # noqa: E501
|
||||
"certificate": "-----BEGIN CERTIFICATE-----\nMIICvDCCAaQCFFPAOD7utDTsgFrm0vS4We18OcnKMA0GCSqGSIb3DQEBCwUAMCAx\nCzAJBgNVBAYTAlVTMREwDwYDVQQDDAh3aGF0ZXZlcjAeFw0yMjA3MjkyMTE5Mzha\nFw0yMzA3MjkyMTE5MzhaMBUxEzARBgNVBAMMCmJhbmFuYS5jb20wggEiMA0GCSqG\nSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDVpcfcBOnFuyZG+A2WQzmaBI5NXgwTCfvE\neKciqRQXhzJdUkEg7eqwFrK3y9yjhoiB6q0WNAeR+nOdS/Cw7layRtGz5skOq7Aa\nN4FZHg0or30i7Rrx7afJcGJyLpxfK/OfLmJm5QEdLXV0DZp0L5vuhhEb1EUOrMaY\nGe4iwqTyg6D7fuBili9dBVn9IvNhYMVgtiqkWVLTW4ChE0LgES4oO3rQZgp4dtM5\nsp6KwHGO766UzwGnkKRizaqmLylfVusllWNPFfp6gEaxa45N70oqGUrvGSVHWeHf\nfvkhpWx+wOnu+2A5F/Yv3UNz2v4g7Vjt7V0tjL4KMV9YklpRjTh3AgMBAAEwDQYJ\nKoZIhvcNAQELBQADggEBAChjRzuba8zjQ7NYBVas89Oy7u++MlS8xWxh++yiUsV6\nWMk3ZemsPtXc1YmXorIQohtxLxzUPm2JhyzFzU/sOLmJQ1E/l+gtZHyRCwsb20fX\nmphuJsMVd7qv/GwEk9PBsk2uDqg4/Wix0Rx5lf95juJP7CPXQJl5FQauf3+LSz0y\nwF/j+4GqvrwsWr9hKOLmPdkyKkR6bHKtzzsxL9PM8GnElk2OpaPMMnzbL/vt2IAt\nxK01ZzPxCQCzVwHo5IJO5NR/fIyFbEPhxzG17QsRDOBR9fl9cOIvDeSO04vyZ+nz\n+kA2c3fNrZFAtpIlOOmFh8Q12rVL4sAjI5mVWnNEgvI=\n-----END CERTIFICATE-----\n", # noqa: E501
|
||||
"revoked": True,
|
||||
}
|
||||
]
|
||||
},
|
||||
],
|
||||
"properties": {
|
||||
"certificates": {
|
||||
@ -323,6 +366,10 @@ PROVIDER_JSON_SCHEMA = {
|
||||
"$id": "#/properties/certificates/items/chain/items",
|
||||
},
|
||||
},
|
||||
"revoked": {
|
||||
"$id": "#/properties/certificates/items/revoked",
|
||||
"type": "boolean",
|
||||
},
|
||||
},
|
||||
"additionalProperties": True,
|
||||
},
|
||||
@ -412,6 +459,44 @@ class CertificateExpiredEvent(EventBase):
|
||||
self.certificate = snapshot["certificate"]
|
||||
|
||||
|
||||
class CertificateRevokedEvent(EventBase):
|
||||
"""Charm Event triggered when a TLS certificate is revoked."""
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
handle: Handle,
|
||||
certificate: str,
|
||||
certificate_signing_request: str,
|
||||
ca: str,
|
||||
chain: List[str],
|
||||
revoked: bool,
|
||||
):
|
||||
super().__init__(handle)
|
||||
self.certificate = certificate
|
||||
self.certificate_signing_request = certificate_signing_request
|
||||
self.ca = ca
|
||||
self.chain = chain
|
||||
self.revoked = revoked
|
||||
|
||||
def snapshot(self) -> dict:
|
||||
"""Returns snapshot."""
|
||||
return {
|
||||
"certificate": self.certificate,
|
||||
"certificate_signing_request": self.certificate_signing_request,
|
||||
"ca": self.ca,
|
||||
"chain": self.chain,
|
||||
"revoked": self.revoked,
|
||||
}
|
||||
|
||||
def restore(self, snapshot: dict):
|
||||
"""Restores snapshot."""
|
||||
self.certificate = snapshot["certificate"]
|
||||
self.certificate_signing_request = snapshot["certificate_signing_request"]
|
||||
self.ca = snapshot["ca"]
|
||||
self.chain = snapshot["chain"]
|
||||
self.revoked = snapshot["revoked"]
|
||||
|
||||
|
||||
class CertificateCreationRequestEvent(EventBase):
|
||||
"""Charm Event triggered when a TLS certificate is required."""
|
||||
|
||||
@ -551,7 +636,7 @@ def generate_certificate(
|
||||
ca_key: bytes,
|
||||
ca_key_password: Optional[bytes] = None,
|
||||
validity: int = 365,
|
||||
alt_names: List[str] = None,
|
||||
alt_names: Optional[List[str]] = None,
|
||||
) -> bytes:
|
||||
"""Generates a TLS certificate based on a CSR.
|
||||
|
||||
@ -679,9 +764,9 @@ def generate_csr(
|
||||
private_key: bytes,
|
||||
subject: str,
|
||||
add_unique_id_to_subject_name: bool = True,
|
||||
organization: str = None,
|
||||
email_address: str = None,
|
||||
country_name: str = None,
|
||||
organization: Optional[str] = None,
|
||||
email_address: Optional[str] = None,
|
||||
country_name: Optional[str] = None,
|
||||
private_key_password: Optional[bytes] = None,
|
||||
sans: Optional[List[str]] = None,
|
||||
sans_oid: Optional[List[str]] = None,
|
||||
@ -760,6 +845,7 @@ class CertificatesRequirerCharmEvents(CharmEvents):
|
||||
certificate_available = EventSource(CertificateAvailableEvent)
|
||||
certificate_expiring = EventSource(CertificateExpiringEvent)
|
||||
certificate_expired = EventSource(CertificateExpiredEvent)
|
||||
certificate_revoked = EventSource(CertificateRevokedEvent)
|
||||
|
||||
|
||||
class TLSCertificatesProvidesV1(Object):
|
||||
@ -821,8 +907,8 @@ class TLSCertificatesProvidesV1(Object):
|
||||
def _remove_certificate(
|
||||
self,
|
||||
relation_id: int,
|
||||
certificate: str = None,
|
||||
certificate_signing_request: str = None,
|
||||
certificate: Optional[str] = None,
|
||||
certificate_signing_request: Optional[str] = None,
|
||||
) -> None:
|
||||
"""Removes certificate from a given relation based on user provided certificate or csr.
|
||||
|
||||
@ -877,7 +963,11 @@ class TLSCertificatesProvidesV1(Object):
|
||||
This method is meant to be used when the Root CA has changed.
|
||||
"""
|
||||
for relation in self.model.relations[self.relationship_name]:
|
||||
relation.data[self.model.app]["certificates"] = json.dumps([])
|
||||
provider_relation_data = _load_relation_data(relation.data[self.charm.app])
|
||||
provider_certificates = copy.deepcopy(provider_relation_data.get("certificates", []))
|
||||
for certificate in provider_certificates:
|
||||
certificate["revoked"] = True
|
||||
relation.data[self.model.app]["certificates"] = json.dumps(provider_certificates)
|
||||
|
||||
def set_relation_certificate(
|
||||
self,
|
||||
@ -1179,7 +1269,7 @@ class TLSCertificatesRequiresV1(Object):
|
||||
return False
|
||||
|
||||
def _on_relation_changed(self, event: RelationChangedEvent) -> None:
|
||||
"""Handler triggerred on relation changed events.
|
||||
"""Handler triggered on relation changed events.
|
||||
|
||||
Args:
|
||||
event: Juju event
|
||||
@ -1207,12 +1297,21 @@ class TLSCertificatesRequiresV1(Object):
|
||||
]
|
||||
for certificate in self._provider_certificates:
|
||||
if certificate["certificate_signing_request"] in requirer_csrs:
|
||||
self.on.certificate_available.emit(
|
||||
certificate_signing_request=certificate["certificate_signing_request"],
|
||||
certificate=certificate["certificate"],
|
||||
ca=certificate["ca"],
|
||||
chain=certificate["chain"],
|
||||
)
|
||||
if certificate.get("revoked", False):
|
||||
self.on.certificate_revoked.emit(
|
||||
certificate_signing_request=certificate["certificate_signing_request"],
|
||||
certificate=certificate["certificate"],
|
||||
ca=certificate["ca"],
|
||||
chain=certificate["chain"],
|
||||
revoked=True,
|
||||
)
|
||||
else:
|
||||
self.on.certificate_available.emit(
|
||||
certificate_signing_request=certificate["certificate_signing_request"],
|
||||
certificate=certificate["certificate"],
|
||||
ca=certificate["ca"],
|
||||
chain=certificate["chain"],
|
||||
)
|
||||
|
||||
def _on_update_status(self, event: UpdateStatusEvent) -> None:
|
||||
"""Triggered on update status event.
|
||||
|
268
libs/external/lib/charms/traefik_k8s/v2/ingress.py
vendored
268
libs/external/lib/charms/traefik_k8s/v2/ingress.py
vendored
@ -1,4 +1,4 @@
|
||||
# Copyright 2023 Canonical Ltd.
|
||||
# Copyright 2024 Canonical Ltd.
|
||||
# See LICENSE file for licensing details.
|
||||
|
||||
r"""# Interface Library for ingress.
|
||||
@ -13,7 +13,7 @@ To get started using the library, you just need to fetch the library using `char
|
||||
|
||||
```shell
|
||||
cd some-charm
|
||||
charmcraft fetch-lib charms.traefik_k8s.v1.ingress
|
||||
charmcraft fetch-lib charms.traefik_k8s.v2.ingress
|
||||
```
|
||||
|
||||
In the `metadata.yaml` of the charm, add the following:
|
||||
@ -50,20 +50,13 @@ class SomeCharm(CharmBase):
|
||||
def _on_ingress_revoked(self, event: IngressPerAppRevokedEvent):
|
||||
logger.info("This app no longer has ingress")
|
||||
"""
|
||||
import ipaddress
|
||||
import json
|
||||
import logging
|
||||
import socket
|
||||
import typing
|
||||
from dataclasses import dataclass
|
||||
from typing import (
|
||||
Any,
|
||||
Dict,
|
||||
List,
|
||||
MutableMapping,
|
||||
Optional,
|
||||
Sequence,
|
||||
Tuple,
|
||||
)
|
||||
from typing import Any, Callable, Dict, List, MutableMapping, Optional, Sequence, Tuple, Union
|
||||
|
||||
import pydantic
|
||||
from ops.charm import CharmBase, RelationBrokenEvent, RelationEvent
|
||||
@ -79,9 +72,9 @@ LIBAPI = 2
|
||||
|
||||
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||
# to 0 if you are raising the major API version
|
||||
LIBPATCH = 6
|
||||
LIBPATCH = 12
|
||||
|
||||
PYDEPS = ["pydantic<2.0"]
|
||||
PYDEPS = ["pydantic"]
|
||||
|
||||
DEFAULT_RELATION_NAME = "ingress"
|
||||
RELATION_INTERFACE = "ingress"
|
||||
@ -89,59 +82,132 @@ RELATION_INTERFACE = "ingress"
|
||||
log = logging.getLogger(__name__)
|
||||
BUILTIN_JUJU_KEYS = {"ingress-address", "private-address", "egress-subnets"}
|
||||
|
||||
PYDANTIC_IS_V1 = int(pydantic.version.VERSION.split(".")[0]) < 2
|
||||
if PYDANTIC_IS_V1:
|
||||
|
||||
class DatabagModel(BaseModel):
|
||||
"""Base databag model."""
|
||||
class DatabagModel(BaseModel): # type: ignore
|
||||
"""Base databag model."""
|
||||
|
||||
class Config:
|
||||
class Config:
|
||||
"""Pydantic config."""
|
||||
|
||||
allow_population_by_field_name = True
|
||||
"""Allow instantiating this class by field name (instead of forcing alias)."""
|
||||
|
||||
_NEST_UNDER = None
|
||||
|
||||
@classmethod
|
||||
def load(cls, databag: MutableMapping):
|
||||
"""Load this model from a Juju databag."""
|
||||
if cls._NEST_UNDER:
|
||||
return cls.parse_obj(json.loads(databag[cls._NEST_UNDER]))
|
||||
|
||||
try:
|
||||
data = {
|
||||
k: json.loads(v)
|
||||
for k, v in databag.items()
|
||||
# Don't attempt to parse model-external values
|
||||
if k in {f.alias for f in cls.__fields__.values()} # type: ignore
|
||||
}
|
||||
except json.JSONDecodeError as e:
|
||||
msg = f"invalid databag contents: expecting json. {databag}"
|
||||
log.error(msg)
|
||||
raise DataValidationError(msg) from e
|
||||
|
||||
try:
|
||||
return cls.parse_raw(json.dumps(data)) # type: ignore
|
||||
except pydantic.ValidationError as e:
|
||||
msg = f"failed to validate databag: {databag}"
|
||||
log.debug(msg, exc_info=True)
|
||||
raise DataValidationError(msg) from e
|
||||
|
||||
def dump(self, databag: Optional[MutableMapping] = None, clear: bool = True):
|
||||
"""Write the contents of this model to Juju databag.
|
||||
|
||||
:param databag: the databag to write the data to.
|
||||
:param clear: ensure the databag is cleared before writing it.
|
||||
"""
|
||||
if clear and databag:
|
||||
databag.clear()
|
||||
|
||||
if databag is None:
|
||||
databag = {}
|
||||
|
||||
if self._NEST_UNDER:
|
||||
databag[self._NEST_UNDER] = self.json(by_alias=True, exclude_defaults=True)
|
||||
return databag
|
||||
|
||||
for key, value in self.dict(by_alias=True, exclude_defaults=True).items(): # type: ignore
|
||||
databag[key] = json.dumps(value)
|
||||
|
||||
return databag
|
||||
|
||||
else:
|
||||
from pydantic import ConfigDict
|
||||
|
||||
class DatabagModel(BaseModel):
|
||||
"""Base databag model."""
|
||||
|
||||
model_config = ConfigDict(
|
||||
# tolerate additional keys in databag
|
||||
extra="ignore",
|
||||
# Allow instantiating this class by field name (instead of forcing alias).
|
||||
populate_by_name=True,
|
||||
# Custom config key: whether to nest the whole datastructure (as json)
|
||||
# under a field or spread it out at the toplevel.
|
||||
_NEST_UNDER=None,
|
||||
) # type: ignore
|
||||
"""Pydantic config."""
|
||||
|
||||
allow_population_by_field_name = True
|
||||
"""Allow instantiating this class by field name (instead of forcing alias)."""
|
||||
@classmethod
|
||||
def load(cls, databag: MutableMapping):
|
||||
"""Load this model from a Juju databag."""
|
||||
nest_under = cls.model_config.get("_NEST_UNDER")
|
||||
if nest_under:
|
||||
return cls.model_validate(json.loads(databag[nest_under])) # type: ignore
|
||||
|
||||
_NEST_UNDER = None
|
||||
try:
|
||||
data = {
|
||||
k: json.loads(v)
|
||||
for k, v in databag.items()
|
||||
# Don't attempt to parse model-external values
|
||||
if k in {(f.alias or n) for n, f in cls.__fields__.items()} # type: ignore
|
||||
}
|
||||
except json.JSONDecodeError as e:
|
||||
msg = f"invalid databag contents: expecting json. {databag}"
|
||||
log.error(msg)
|
||||
raise DataValidationError(msg) from e
|
||||
|
||||
@classmethod
|
||||
def load(cls, databag: MutableMapping):
|
||||
"""Load this model from a Juju databag."""
|
||||
if cls._NEST_UNDER:
|
||||
return cls.parse_obj(json.loads(databag[cls._NEST_UNDER]))
|
||||
try:
|
||||
return cls.model_validate_json(json.dumps(data)) # type: ignore
|
||||
except pydantic.ValidationError as e:
|
||||
msg = f"failed to validate databag: {databag}"
|
||||
log.debug(msg, exc_info=True)
|
||||
raise DataValidationError(msg) from e
|
||||
|
||||
try:
|
||||
data = {k: json.loads(v) for k, v in databag.items() if k not in BUILTIN_JUJU_KEYS}
|
||||
except json.JSONDecodeError as e:
|
||||
msg = f"invalid databag contents: expecting json. {databag}"
|
||||
log.error(msg)
|
||||
raise DataValidationError(msg) from e
|
||||
def dump(self, databag: Optional[MutableMapping] = None, clear: bool = True):
|
||||
"""Write the contents of this model to Juju databag.
|
||||
|
||||
try:
|
||||
return cls.parse_raw(json.dumps(data)) # type: ignore
|
||||
except pydantic.ValidationError as e:
|
||||
msg = f"failed to validate databag: {databag}"
|
||||
log.error(msg, exc_info=True)
|
||||
raise DataValidationError(msg) from e
|
||||
:param databag: the databag to write the data to.
|
||||
:param clear: ensure the databag is cleared before writing it.
|
||||
"""
|
||||
if clear and databag:
|
||||
databag.clear()
|
||||
|
||||
def dump(self, databag: Optional[MutableMapping] = None, clear: bool = True):
|
||||
"""Write the contents of this model to Juju databag.
|
||||
if databag is None:
|
||||
databag = {}
|
||||
nest_under = self.model_config.get("_NEST_UNDER")
|
||||
if nest_under:
|
||||
databag[nest_under] = self.model_dump_json( # type: ignore
|
||||
by_alias=True,
|
||||
# skip keys whose values are default
|
||||
exclude_defaults=True,
|
||||
)
|
||||
return databag
|
||||
|
||||
:param databag: the databag to write the data to.
|
||||
:param clear: ensure the databag is cleared before writing it.
|
||||
"""
|
||||
if clear and databag:
|
||||
databag.clear()
|
||||
|
||||
if databag is None:
|
||||
databag = {}
|
||||
|
||||
if self._NEST_UNDER:
|
||||
databag[self._NEST_UNDER] = self.json()
|
||||
|
||||
dct = self.dict()
|
||||
for key, field in self.__fields__.items(): # type: ignore
|
||||
value = dct[key]
|
||||
databag[field.alias or key] = json.dumps(value)
|
||||
|
||||
return databag
|
||||
dct = self.model_dump(mode="json", by_alias=True, exclude_defaults=True) # type: ignore
|
||||
databag.update({k: json.dumps(v) for k, v in dct.items()})
|
||||
return databag
|
||||
|
||||
|
||||
# todo: import these models from charm-relation-interfaces/ingress/v2 instead of redeclaring them
|
||||
@ -172,10 +238,14 @@ class IngressRequirerAppData(DatabagModel):
|
||||
|
||||
# fields on top of vanilla 'ingress' interface:
|
||||
strip_prefix: Optional[bool] = Field(
|
||||
description="Whether to strip the prefix from the ingress url.", alias="strip-prefix"
|
||||
default=False,
|
||||
description="Whether to strip the prefix from the ingress url.",
|
||||
alias="strip-prefix",
|
||||
)
|
||||
redirect_https: Optional[bool] = Field(
|
||||
description="Whether to redirect http traffic to https.", alias="redirect-https"
|
||||
default=False,
|
||||
description="Whether to redirect http traffic to https.",
|
||||
alias="redirect-https",
|
||||
)
|
||||
|
||||
scheme: Optional[str] = Field(
|
||||
@ -200,7 +270,12 @@ class IngressRequirerAppData(DatabagModel):
|
||||
class IngressRequirerUnitData(DatabagModel):
|
||||
"""Ingress requirer unit databag model."""
|
||||
|
||||
host: str = Field(description="Hostname the unit wishes to be exposed.")
|
||||
host: str = Field(description="Hostname at which the unit is reachable.")
|
||||
ip: Optional[str] = Field(
|
||||
None,
|
||||
description="IP at which the unit is reachable, "
|
||||
"IP can only be None if the IP information can't be retrieved from juju.",
|
||||
)
|
||||
|
||||
@validator("host", pre=True)
|
||||
def validate_host(cls, host): # noqa: N805 # pydantic wants 'cls' as first arg
|
||||
@ -208,6 +283,24 @@ class IngressRequirerUnitData(DatabagModel):
|
||||
assert isinstance(host, str), type(host)
|
||||
return host
|
||||
|
||||
@validator("ip", pre=True)
|
||||
def validate_ip(cls, ip): # noqa: N805 # pydantic wants 'cls' as first arg
|
||||
"""Validate ip."""
|
||||
if ip is None:
|
||||
return None
|
||||
if not isinstance(ip, str):
|
||||
raise TypeError(f"got ip of type {type(ip)} instead of expected str")
|
||||
try:
|
||||
ipaddress.IPv4Address(ip)
|
||||
return ip
|
||||
except ipaddress.AddressValueError:
|
||||
pass
|
||||
try:
|
||||
ipaddress.IPv6Address(ip)
|
||||
return ip
|
||||
except ipaddress.AddressValueError:
|
||||
raise ValueError(f"{ip!r} is not a valid ip address")
|
||||
|
||||
|
||||
class RequirerSchema(BaseModel):
|
||||
"""Requirer schema for Ingress."""
|
||||
@ -244,6 +337,7 @@ class _IngressPerAppBase(Object):
|
||||
observe(rel_events.relation_created, self._handle_relation)
|
||||
observe(rel_events.relation_joined, self._handle_relation)
|
||||
observe(rel_events.relation_changed, self._handle_relation)
|
||||
observe(rel_events.relation_departed, self._handle_relation)
|
||||
observe(rel_events.relation_broken, self._handle_relation_broken)
|
||||
observe(charm.on.leader_elected, self._handle_upgrade_or_leader) # type: ignore
|
||||
observe(charm.on.upgrade_charm, self._handle_upgrade_or_leader) # type: ignore
|
||||
@ -340,14 +434,6 @@ class IngressRequirerData:
|
||||
units: List["IngressRequirerUnitData"]
|
||||
|
||||
|
||||
class TlsProviderType(typing.Protocol):
|
||||
"""Placeholder."""
|
||||
|
||||
@property
|
||||
def enabled(self) -> bool: # type: ignore
|
||||
"""Placeholder."""
|
||||
|
||||
|
||||
class IngressPerAppProvider(_IngressPerAppBase):
|
||||
"""Implementation of the provider of ingress."""
|
||||
|
||||
@ -463,10 +549,10 @@ class IngressPerAppProvider(_IngressPerAppBase):
|
||||
def publish_url(self, relation: Relation, url: str):
|
||||
"""Publish to the app databag the ingress url."""
|
||||
ingress_url = {"url": url}
|
||||
IngressProviderAppData.parse_obj({"ingress": ingress_url}).dump(relation.data[self.app])
|
||||
IngressProviderAppData(ingress=ingress_url).dump(relation.data[self.app]) # type: ignore
|
||||
|
||||
@property
|
||||
def proxied_endpoints(self) -> Dict[str, str]:
|
||||
def proxied_endpoints(self) -> Dict[str, Dict[str, str]]:
|
||||
"""Returns the ingress settings provided to applications by this IngressPerAppProvider.
|
||||
|
||||
For example, when this IngressPerAppProvider has provided the
|
||||
@ -481,7 +567,7 @@ class IngressPerAppProvider(_IngressPerAppBase):
|
||||
}
|
||||
```
|
||||
"""
|
||||
results = {}
|
||||
results: Dict[str, Dict[str, str]] = {}
|
||||
|
||||
for ingress_relation in self.relations:
|
||||
if not ingress_relation.app:
|
||||
@ -501,8 +587,10 @@ class IngressPerAppProvider(_IngressPerAppBase):
|
||||
if not ingress_data:
|
||||
log.warning(f"relation {ingress_relation} not ready yet: try again in some time.")
|
||||
continue
|
||||
|
||||
results[ingress_relation.app.name] = ingress_data.ingress.dict()
|
||||
if PYDANTIC_IS_V1:
|
||||
results[ingress_relation.app.name] = ingress_data.ingress.dict()
|
||||
else:
|
||||
results[ingress_relation.app.name] = ingress_data.ingress.model_dump(mode=json) # type: ignore
|
||||
return results
|
||||
|
||||
|
||||
@ -540,12 +628,13 @@ class IngressPerAppRequirer(_IngressPerAppBase):
|
||||
relation_name: str = DEFAULT_RELATION_NAME,
|
||||
*,
|
||||
host: Optional[str] = None,
|
||||
ip: Optional[str] = None,
|
||||
port: Optional[int] = None,
|
||||
strip_prefix: bool = False,
|
||||
redirect_https: bool = False,
|
||||
# fixme: this is horrible UX.
|
||||
# shall we switch to manually calling provide_ingress_requirements with all args when ready?
|
||||
scheme: typing.Callable[[], str] = lambda: "http",
|
||||
scheme: Union[Callable[[], str], str] = lambda: "http",
|
||||
):
|
||||
"""Constructor for IngressRequirer.
|
||||
|
||||
@ -560,9 +649,12 @@ class IngressPerAppRequirer(_IngressPerAppBase):
|
||||
relation must be of interface type `ingress` and have "limit: 1")
|
||||
host: Hostname to be used by the ingress provider to address the requiring
|
||||
application; if unspecified, the default Kubernetes service name will be used.
|
||||
ip: Alternative addressing method other than host to be used by the ingress provider;
|
||||
if unspecified, binding address from juju network API will be used.
|
||||
strip_prefix: configure Traefik to strip the path prefix.
|
||||
redirect_https: redirect incoming requests to HTTPS.
|
||||
scheme: callable returning the scheme to use when constructing the ingress url.
|
||||
Or a string, if the scheme is known and stable at charm-init-time.
|
||||
|
||||
Request Args:
|
||||
port: the port of the service
|
||||
@ -572,21 +664,20 @@ class IngressPerAppRequirer(_IngressPerAppBase):
|
||||
self.relation_name = relation_name
|
||||
self._strip_prefix = strip_prefix
|
||||
self._redirect_https = redirect_https
|
||||
self._get_scheme = scheme
|
||||
self._get_scheme = scheme if callable(scheme) else lambda: scheme
|
||||
|
||||
self._stored.set_default(current_url=None) # type: ignore
|
||||
|
||||
# if instantiated with a port, and we are related, then
|
||||
# we immediately publish our ingress data to speed up the process.
|
||||
if port:
|
||||
self._auto_data = host, port
|
||||
self._auto_data = host, ip, port
|
||||
else:
|
||||
self._auto_data = None
|
||||
|
||||
def _handle_relation(self, event):
|
||||
# created, joined or changed: if we have auto data: publish it
|
||||
self._publish_auto_data()
|
||||
|
||||
if self.is_ready():
|
||||
# Avoid spurious events, emit only when there is a NEW URL available
|
||||
new_url = (
|
||||
@ -616,14 +707,15 @@ class IngressPerAppRequirer(_IngressPerAppBase):
|
||||
|
||||
def _publish_auto_data(self):
|
||||
if self._auto_data:
|
||||
host, port = self._auto_data
|
||||
self.provide_ingress_requirements(host=host, port=port)
|
||||
host, ip, port = self._auto_data
|
||||
self.provide_ingress_requirements(host=host, ip=ip, port=port)
|
||||
|
||||
def provide_ingress_requirements(
|
||||
self,
|
||||
*,
|
||||
scheme: Optional[str] = None,
|
||||
host: Optional[str] = None,
|
||||
ip: Optional[str] = None,
|
||||
port: int,
|
||||
):
|
||||
"""Publishes the data that Traefik needs to provide ingress.
|
||||
@ -632,34 +724,48 @@ class IngressPerAppRequirer(_IngressPerAppBase):
|
||||
scheme: Scheme to be used; if unspecified, use the one used by __init__.
|
||||
host: Hostname to be used by the ingress provider to address the
|
||||
requirer unit; if unspecified, FQDN will be used instead
|
||||
ip: Alternative addressing method other than host to be used by the ingress provider.
|
||||
if unspecified, binding address from juju network API will be used.
|
||||
port: the port of the service (required)
|
||||
"""
|
||||
for relation in self.relations:
|
||||
self._provide_ingress_requirements(scheme, host, port, relation)
|
||||
self._provide_ingress_requirements(scheme, host, ip, port, relation)
|
||||
|
||||
def _provide_ingress_requirements(
|
||||
self,
|
||||
scheme: Optional[str],
|
||||
host: Optional[str],
|
||||
ip: Optional[str],
|
||||
port: int,
|
||||
relation: Relation,
|
||||
):
|
||||
if self.unit.is_leader():
|
||||
self._publish_app_data(scheme, port, relation)
|
||||
|
||||
self._publish_unit_data(host, relation)
|
||||
self._publish_unit_data(host, ip, relation)
|
||||
|
||||
def _publish_unit_data(
|
||||
self,
|
||||
host: Optional[str],
|
||||
ip: Optional[str],
|
||||
relation: Relation,
|
||||
):
|
||||
if not host:
|
||||
host = socket.getfqdn()
|
||||
|
||||
if ip is None:
|
||||
network_binding = self.charm.model.get_binding(relation)
|
||||
if (
|
||||
network_binding is not None
|
||||
and (bind_address := network_binding.network.bind_address) is not None
|
||||
):
|
||||
ip = str(bind_address)
|
||||
else:
|
||||
log.error("failed to retrieve ip information from juju")
|
||||
|
||||
unit_databag = relation.data[self.unit]
|
||||
try:
|
||||
IngressRequirerUnitData(host=host).dump(unit_databag)
|
||||
IngressRequirerUnitData(host=host, ip=ip).dump(unit_databag)
|
||||
except pydantic.ValidationError as e:
|
||||
msg = "failed to validate unit data"
|
||||
log.info(msg, exc_info=True) # log to INFO because this might be expected
|
||||
|
@ -88,7 +88,7 @@ LIBAPI = 0
|
||||
|
||||
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||
# to 0 if you are raising the major API version
|
||||
LIBPATCH = 8
|
||||
LIBPATCH = 9
|
||||
|
||||
log = logging.getLogger(__name__)
|
||||
|
||||
@ -137,7 +137,7 @@ class TraefikRouteProvider(Object):
|
||||
The TraefikRouteProvider provides api to do this easily.
|
||||
"""
|
||||
|
||||
on = TraefikRouteProviderEvents()
|
||||
on = TraefikRouteProviderEvents() # pyright: ignore
|
||||
_stored = StoredState()
|
||||
|
||||
def __init__(
|
||||
@ -163,7 +163,10 @@ class TraefikRouteProvider(Object):
|
||||
self._charm = charm
|
||||
self._relation_name = relation_name
|
||||
|
||||
if self._stored.external_host != external_host or self._stored.scheme != scheme:
|
||||
if (
|
||||
self._stored.external_host != external_host # pyright: ignore
|
||||
or self._stored.scheme != scheme # pyright: ignore
|
||||
):
|
||||
# If traefik endpoint details changed, update
|
||||
self.update_traefik_address(external_host=external_host, scheme=scheme)
|
||||
|
||||
@ -197,7 +200,7 @@ class TraefikRouteProvider(Object):
|
||||
This is split out into a separate method since, in the case of multi-unit deployments,
|
||||
removal of a `TraefikRouteRequirer` will not cause a `RelationEvent`, but the guard on
|
||||
app data ensures that only the previous leader will know what it is. Separating it
|
||||
allows for re-use both when the property is called and if the relation changes, so a
|
||||
allows for reuse both when the property is called and if the relation changes, so a
|
||||
leader change where the new leader checks the property will do the right thing.
|
||||
"""
|
||||
if not self._charm.unit.is_leader():
|
||||
@ -209,9 +212,11 @@ class TraefikRouteProvider(Object):
|
||||
self._stored.scheme = ""
|
||||
return
|
||||
external_host = relation.data[relation.app].get("external_host", "")
|
||||
self._stored.external_host = external_host or self._stored.external_host
|
||||
self._stored.external_host = (
|
||||
external_host or self._stored.external_host # pyright: ignore
|
||||
)
|
||||
scheme = relation.data[relation.app].get("scheme", "")
|
||||
self._stored.scheme = scheme or self._stored.scheme
|
||||
self._stored.scheme = scheme or self._stored.scheme # pyright: ignore
|
||||
|
||||
def _on_relation_changed(self, event: RelationEvent):
|
||||
if self.is_ready(event.relation):
|
||||
@ -269,7 +274,7 @@ class TraefikRouteRequirer(Object):
|
||||
application databag.
|
||||
"""
|
||||
|
||||
on = TraefikRouteRequirerEvents()
|
||||
on = TraefikRouteRequirerEvents() # pyright: ignore
|
||||
_stored = StoredState()
|
||||
|
||||
def __init__(self, charm: CharmBase, relation: Relation, relation_name: str = "traefik-route"):
|
||||
@ -304,7 +309,7 @@ class TraefikRouteRequirer(Object):
|
||||
This is split out into a separate method since, in the case of multi-unit deployments,
|
||||
removal of a `TraefikRouteRequirer` will not cause a `RelationEvent`, but the guard on
|
||||
app data ensures that only the previous leader will know what it is. Separating it
|
||||
allows for re-use both when the property is called and if the relation changes, so a
|
||||
allows for reuse both when the property is called and if the relation changes, so a
|
||||
leader change where the new leader checks the property will do the right thing.
|
||||
"""
|
||||
if not self._charm.unit.is_leader():
|
||||
@ -317,9 +322,11 @@ class TraefikRouteRequirer(Object):
|
||||
self._stored.scheme = ""
|
||||
return
|
||||
external_host = relation.data[relation.app].get("external_host", "")
|
||||
self._stored.external_host = external_host or self._stored.external_host
|
||||
self._stored.external_host = (
|
||||
external_host or self._stored.external_host # pyright: ignore
|
||||
)
|
||||
scheme = relation.data[relation.app].get("scheme", "")
|
||||
self._stored.scheme = scheme or self._stored.scheme
|
||||
self._stored.scheme = scheme or self._stored.scheme # pyright: ignore
|
||||
|
||||
def _on_relation_changed(self, event: RelationEvent) -> None:
|
||||
"""Update StoredState with external_host and other information from Traefik."""
|
||||
@ -335,11 +342,7 @@ class TraefikRouteRequirer(Object):
|
||||
|
||||
def is_ready(self) -> bool:
|
||||
"""Is the TraefikRouteRequirer ready to submit data to Traefik?"""
|
||||
if self._relation:
|
||||
return True
|
||||
|
||||
return False
|
||||
# return self._relation is not None
|
||||
return self._relation is not None
|
||||
|
||||
def submit_to_traefik(self, config):
|
||||
"""Relay an ingress configuration data structure to traefik.
|
||||
@ -350,9 +353,6 @@ class TraefikRouteRequirer(Object):
|
||||
if not self._charm.unit.is_leader():
|
||||
raise UnauthorizedError()
|
||||
|
||||
if not self._relation:
|
||||
return
|
||||
|
||||
app_databag = self._relation.data[self._charm.app]
|
||||
|
||||
# Traefik thrives on yaml, feels pointless to talk json to Route
|
||||
|
156
libs/external/lib/charms/vault_k8s/v0/vault_kv.py
vendored
156
libs/external/lib/charms/vault_k8s/v0/vault_kv.py
vendored
@ -116,15 +116,14 @@ juju integrate <vault provider charm> <vault requirer charm>
|
||||
|
||||
import json
|
||||
import logging
|
||||
from typing import Any, Dict, Iterable, Mapping, Optional, Union
|
||||
from collections.abc import Iterable, Mapping
|
||||
from dataclasses import dataclass
|
||||
from typing import Any, Dict, List, Optional, Union
|
||||
|
||||
import ops
|
||||
from interface_tester.schema_base import DataBagSchema # type: ignore[import]
|
||||
from interface_tester.schema_base import DataBagSchema # type: ignore[import-untyped]
|
||||
from pydantic import BaseModel, Field, Json, ValidationError
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
||||
# The unique Charmhub library identifier, never change it
|
||||
LIBID = "591d6d2fb6a54853b4bb53ef16ef603a"
|
||||
|
||||
@ -133,11 +132,24 @@ LIBAPI = 0
|
||||
|
||||
# Increment this PATCH version before using `charmcraft publish-lib` or reset
|
||||
# to 0 if you are raising the major API version
|
||||
LIBPATCH = 1
|
||||
LIBPATCH = 5
|
||||
|
||||
PYDEPS = ["pydantic", "pytest-interface-tester"]
|
||||
|
||||
|
||||
class LogAdapter(logging.LoggerAdapter):
|
||||
"""Adapter for the logger to prepend a prefix to all log lines."""
|
||||
|
||||
prefix = "vault_kv"
|
||||
|
||||
def process(self, msg, kwargs):
|
||||
"""Decides the format for the prepended text."""
|
||||
return f"[{self.prefix}] {msg}", kwargs
|
||||
|
||||
|
||||
logger = LogAdapter(logging.getLogger(__name__), {})
|
||||
|
||||
|
||||
class VaultKvProviderSchema(BaseModel):
|
||||
"""Provider side of the vault-kv interface."""
|
||||
|
||||
@ -179,17 +191,29 @@ class UnitVaultKvRequirerSchema(BaseModel):
|
||||
class ProviderSchema(DataBagSchema):
|
||||
"""The schema for the provider side of this interface."""
|
||||
|
||||
app: VaultKvProviderSchema
|
||||
app: VaultKvProviderSchema # type: ignore
|
||||
|
||||
|
||||
class RequirerSchema(DataBagSchema):
|
||||
"""The schema for the requirer side of this interface."""
|
||||
|
||||
app: AppVaultKvRequirerSchema
|
||||
unit: UnitVaultKvRequirerSchema
|
||||
app: AppVaultKvRequirerSchema # type: ignore
|
||||
unit: UnitVaultKvRequirerSchema # type: ignore
|
||||
|
||||
|
||||
def is_requirer_data_valid(app_data: dict, unit_data: dict) -> bool:
|
||||
@dataclass
|
||||
class KVRequest:
|
||||
"""This class represents a kv request from an interface Requirer."""
|
||||
|
||||
relation_id: int
|
||||
app_name: str
|
||||
unit_name: str
|
||||
mount_suffix: str
|
||||
egress_subnet: str
|
||||
nonce: str
|
||||
|
||||
|
||||
def is_requirer_data_valid(app_data: Mapping[str, str], unit_data: Mapping[str, str]) -> bool:
|
||||
"""Return whether the requirer data is valid."""
|
||||
try:
|
||||
RequirerSchema(
|
||||
@ -202,10 +226,10 @@ def is_requirer_data_valid(app_data: dict, unit_data: dict) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
def is_provider_data_valid(data: dict) -> bool:
|
||||
def is_provider_data_valid(data: Mapping[str, str]) -> bool:
|
||||
"""Return whether the provider data is valid."""
|
||||
try:
|
||||
ProviderSchema(app=VaultKvProviderSchema(**data))
|
||||
ProviderSchema(app=VaultKvProviderSchema(**data)) # type: ignore https://github.com/pydantic/pydantic/issues/8616
|
||||
return True
|
||||
except ValidationError as e:
|
||||
logger.debug("Invalid data: %s", e)
|
||||
@ -219,28 +243,40 @@ class NewVaultKvClientAttachedEvent(ops.EventBase):
|
||||
self,
|
||||
handle: ops.Handle,
|
||||
relation_id: int,
|
||||
relation_name: str,
|
||||
app_name: str,
|
||||
unit_name: str,
|
||||
mount_suffix: str,
|
||||
egress_subnet: str,
|
||||
nonce: str,
|
||||
):
|
||||
super().__init__(handle)
|
||||
self.relation_id = relation_id
|
||||
self.relation_name = relation_name
|
||||
self.app_name = app_name
|
||||
self.unit_name = unit_name
|
||||
self.mount_suffix = mount_suffix
|
||||
self.egress_subnet = egress_subnet
|
||||
self.nonce = nonce
|
||||
|
||||
def snapshot(self) -> dict:
|
||||
"""Return snapshot data that should be persisted."""
|
||||
return {
|
||||
"relation_id": self.relation_id,
|
||||
"relation_name": self.relation_name,
|
||||
"app_name": self.app_name,
|
||||
"unit_name": self.unit_name,
|
||||
"mount_suffix": self.mount_suffix,
|
||||
"egress_subnet": self.egress_subnet,
|
||||
"nonce": self.nonce,
|
||||
}
|
||||
|
||||
def restore(self, snapshot: Dict[str, Any]):
|
||||
"""Restore the value state from a given snapshot."""
|
||||
super().restore(snapshot)
|
||||
self.relation_id = snapshot["relation_id"]
|
||||
self.relation_name = snapshot["relation_name"]
|
||||
self.app_name = snapshot["app_name"]
|
||||
self.unit_name = snapshot["unit_name"]
|
||||
self.mount_suffix = snapshot["mount_suffix"]
|
||||
self.egress_subnet = snapshot["egress_subnet"]
|
||||
self.nonce = snapshot["nonce"]
|
||||
|
||||
|
||||
class VaultKvProviderEvents(ops.ObjectEvents):
|
||||
@ -252,7 +288,7 @@ class VaultKvProviderEvents(ops.ObjectEvents):
|
||||
class VaultKvProvides(ops.Object):
|
||||
"""Class to be instanciated by the providing side of the relation."""
|
||||
|
||||
on = VaultKvProviderEvents()
|
||||
on = VaultKvProviderEvents() # type: ignore
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@ -270,27 +306,24 @@ class VaultKvProvides(ops.Object):
|
||||
def _on_relation_changed(self, event: ops.RelationChangedEvent):
|
||||
"""Handle client changed relation.
|
||||
|
||||
This handler will emit a new_vault_kv_client_attached event if at least one unit data is
|
||||
valid.
|
||||
This handler will emit a new_vault_kv_client_attached event for each requiring unit
|
||||
with valid relation data.
|
||||
"""
|
||||
if event.app is None:
|
||||
logger.debug("No remote application yet")
|
||||
return
|
||||
|
||||
app_data = dict(event.relation.data[event.app])
|
||||
|
||||
any_valid = False
|
||||
app_data = event.relation.data[event.app]
|
||||
for unit in event.relation.units:
|
||||
if not is_requirer_data_valid(app_data, dict(event.relation.data[unit])):
|
||||
if not is_requirer_data_valid(app_data, event.relation.data[unit]):
|
||||
logger.debug("Invalid data from unit %r", unit.name)
|
||||
continue
|
||||
any_valid = True
|
||||
|
||||
if any_valid:
|
||||
self.on.new_vault_kv_client_attached.emit(
|
||||
event.relation.id,
|
||||
event.relation.name,
|
||||
event.relation.data[event.app]["mount_suffix"],
|
||||
relation_id=event.relation.id,
|
||||
app_name=event.app.name,
|
||||
unit_name=unit.name,
|
||||
mount_suffix=event.relation.data[event.app]["mount_suffix"],
|
||||
egress_subnet=event.relation.data[unit]["egress_subnet"],
|
||||
nonce=event.relation.data[unit]["nonce"],
|
||||
)
|
||||
|
||||
def set_vault_url(self, relation: ops.Relation, vault_url: str):
|
||||
@ -304,7 +337,12 @@ class VaultKvProvides(ops.Object):
|
||||
"""Set the ca_certificate on the relation."""
|
||||
if not self.charm.unit.is_leader():
|
||||
return
|
||||
|
||||
if not relation:
|
||||
logger.warning("Relation is None")
|
||||
return
|
||||
if not relation.active:
|
||||
logger.warning("Relation is not active")
|
||||
return
|
||||
relation.data[self.charm.app]["ca_certificate"] = ca_certificate
|
||||
|
||||
def set_mount(self, relation: ops.Relation, mount: str):
|
||||
@ -350,6 +388,58 @@ class VaultKvProvides(ops.Object):
|
||||
"""Get the unit credentials from the relation."""
|
||||
return json.loads(relation.data[self.charm.app].get("credentials", "{}"))
|
||||
|
||||
def get_outstanding_kv_requests(self, relation_id: Optional[int] = None) -> List[KVRequest]:
|
||||
"""Get the outstanding requests for the relation."""
|
||||
outstanding_requests: List[KVRequest] = []
|
||||
kv_requests = self.get_kv_requests(relation_id=relation_id)
|
||||
for request in kv_requests:
|
||||
if not self._credentials_issued_for_request(
|
||||
nonce=request.nonce, relation_id=relation_id
|
||||
):
|
||||
outstanding_requests.append(request)
|
||||
return outstanding_requests
|
||||
|
||||
def get_kv_requests(self, relation_id: Optional[int] = None) -> List[KVRequest]:
|
||||
"""Get all KV requests for the relation."""
|
||||
kv_requests: List[KVRequest] = []
|
||||
relations = (
|
||||
[
|
||||
relation
|
||||
for relation in self.model.relations[self.relation_name]
|
||||
if relation.id == relation_id
|
||||
]
|
||||
if relation_id is not None
|
||||
else self.model.relations.get(self.relation_name, [])
|
||||
)
|
||||
for relation in relations:
|
||||
assert isinstance(relation.app, ops.Application)
|
||||
if not relation.active:
|
||||
continue
|
||||
app_data = relation.data[relation.app]
|
||||
for unit in relation.units:
|
||||
unit_data = relation.data[unit]
|
||||
if not is_requirer_data_valid(app_data=app_data, unit_data=unit_data):
|
||||
continue
|
||||
kv_requests.append(
|
||||
KVRequest(
|
||||
relation_id=relation.id,
|
||||
app_name=relation.app.name,
|
||||
unit_name=unit.name,
|
||||
mount_suffix=app_data["mount_suffix"],
|
||||
egress_subnet=unit_data["egress_subnet"],
|
||||
nonce=unit_data["nonce"],
|
||||
)
|
||||
)
|
||||
return kv_requests
|
||||
|
||||
def _credentials_issued_for_request(self, nonce: str, relation_id: Optional[int]) -> bool:
|
||||
"""Return whether credentials have been issued for the request."""
|
||||
relation = self.model.get_relation(self.relation_name, relation_id)
|
||||
if not relation:
|
||||
return False
|
||||
credentials = self.get_credentials(relation)
|
||||
return credentials.get(nonce) is not None
|
||||
|
||||
|
||||
class VaultKvConnectedEvent(ops.EventBase):
|
||||
"""VaultKvConnectedEvent Event."""
|
||||
@ -422,7 +512,7 @@ class VaultKvRequireEvents(ops.ObjectEvents):
|
||||
class VaultKvRequires(ops.Object):
|
||||
"""Class to be instanciated by the requiring side of the relation."""
|
||||
|
||||
on = VaultKvRequireEvents()
|
||||
on = VaultKvRequireEvents() # type: ignore
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
@ -475,7 +565,7 @@ class VaultKvRequires(ops.Object):
|
||||
return
|
||||
|
||||
if (
|
||||
is_provider_data_valid(dict(event.relation.data[event.app]))
|
||||
is_provider_data_valid(event.relation.data[event.app])
|
||||
and self.get_unit_credentials(event.relation) is not None
|
||||
):
|
||||
self.on.ready.emit(
|
||||
|
2
rebuild
2
rebuild
@ -1,3 +1,3 @@
|
||||
# This file is used to trigger a build.
|
||||
# Change uuid to trigger a new build on every charms.
|
||||
b61d2bad-c892-41b2-b41c-6dd9c3401cd4
|
||||
e2e6fe8e-0445-11ef-a2f8-fbd98528559f
|
||||
|
@ -102,6 +102,7 @@ applications:
|
||||
nova-api-image: ghcr.io/canonical/nova-consolidated:2024.1
|
||||
nova-scheduler-image: ghcr.io/canonical/nova-consolidated:2024.1
|
||||
nova-conductor-image: ghcr.io/canonical/nova-consolidated:2024.1
|
||||
nova-spiceproxy-image: ghcr.io/canonical/nova-consolidated:2024.1
|
||||
placement:
|
||||
{% if placement_k8s is defined and placement_k8s is sameas true -%}
|
||||
charm: ../../../placement-k8s.charm
|
||||
|
@ -390,6 +390,7 @@
|
||||
- ops-sunbeam/ops_sunbeam/*
|
||||
- charms/tempest-k8s/*
|
||||
- tests/local/zaza/sunbeam/charm_tests/tempest_k8s/*
|
||||
- rebuild
|
||||
vars:
|
||||
# Artifacts will be downloaded from below charm jobs
|
||||
charm_jobs:
|
||||
|
Loading…
Reference in New Issue
Block a user