Migrate to unified charmcraft.yaml

Charmcraft 3 moves towards a single charmcraft.yaml, this is needed for
24.04 migration.

Change-Id: I743712752aaf37bf68730b64bd6c147dfad370e2
Signed-off-by: Guillaume Boutry <guillaume.boutry@canonical.com>
This commit is contained in:
Guillaume Boutry 2024-10-07 16:29:59 +02:00
parent f700846623
commit e911599abe
No known key found for this signature in database
GPG Key ID: E95E3326872E55DE
128 changed files with 3249 additions and 3527 deletions

View File

@ -1,2 +0,0 @@
# NOTE: no actions yet!
{ }

View File

@ -1,11 +1,115 @@
type: "charm"
bases:
- build-on:
- name: "ubuntu"
channel: "22.04"
run-on:
- name: "ubuntu"
channel: "22.04"
type: charm
name: aodh-k8s
summary: OpenStack aodh service
description: |
OpenStack aodh provides an HTTP service for managing, selecting,
and claiming providers of classes of inventory representing available
resources in a cloud.
.
assumes:
- k8s-api
- juju >= 3.1
links:
source:
- https://opendev.org/openstack/charm-aodh-k8s
issues:
- https://bugs.launchpad.net/charm-aodh-k8s
base: ubuntu@22.04
platforms:
amd64:
config:
options:
debug:
default: false
description: Enable debug logging.
type: boolean
region:
default: RegionOne
description: Name of the OpenStack region
type: string
alarm-history-time-to-live:
default: -1
description: |
Number of seconds that alarm histories are kept in the database for (<= 0
means forever)
type: int
alarm-histories-delete-batch-size:
default: 0
description: |
Number of alarm histories to be deleted in one iteration from the database (0
means all). (integer value)
type: int
containers:
aodh-api:
resource: aodh-api-image
aodh-evaluator:
resource: aodh-evaluator-image
aodh-notifier:
resource: aodh-notifier-image
aodh-listener:
resource: aodh-listener-image
aodh-expirer:
resource: aodh-expirer-image
resources:
aodh-api-image:
type: oci-image
description: OCI image for OpenStack aodh api service
upstream-source: ghcr.io/canonical/aodh-consolidated:2024.1
aodh-evaluator-image:
type: oci-image
description: OCI image for OpenStack aodh evaluator service
upstream-source: ghcr.io/canonical/aodh-consolidated:2024.1
aodh-notifier-image:
type: oci-image
description: OCI image for OpenStack aodh notifier service
upstream-source: ghcr.io/canonical/aodh-consolidated:2024.1
aodh-listener-image:
type: oci-image
description: OCI image for OpenStack aodh listener service
upstream-source: ghcr.io/canonical/aodh-consolidated:2024.1
aodh-expirer-image:
type: oci-image
description: OCI image for OpenStack aodh expirer service
upstream-source: ghcr.io/canonical/aodh-consolidated:2024.1
requires:
database:
interface: mysql_client
limit: 1
identity-service:
interface: keystone
ingress-internal:
interface: ingress
optional: true
limit: 1
ingress-public:
interface: ingress
limit: 1
amqp:
interface: rabbitmq
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
provides:
aodh:
interface: aodh
peers:
peers:
interface: aodh-peer
parts:
update-certificates:
plugin: nil
@ -13,9 +117,9 @@ parts:
apt update
apt install -y ca-certificates
update-ca-certificates
charm:
after: [update-certificates]
after:
- update-certificates
build-packages:
- git
- libffi-dev

View File

@ -1,21 +0,0 @@
options:
debug:
default: False
description: Enable debug logging.
type: boolean
region:
default: RegionOne
description: Name of the OpenStack region
type: string
alarm-history-time-to-live:
default: -1
description: |
Number of seconds that alarm histories are kept in the database for (<= 0
means forever)
type: int
alarm-histories-delete-batch-size:
default: 0
description: |
Number of alarm histories to be deleted in one iteration from the database (0
means all). (integer value)
type: int

View File

@ -1,92 +0,0 @@
name: aodh-k8s
summary: OpenStack aodh service
maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com>
description: |
OpenStack aodh provides an HTTP service for managing, selecting,
and claiming providers of classes of inventory representing available
resources in a cloud.
.
version: 3
bases:
- name: ubuntu
channel: 22.04/stable
assumes:
- k8s-api
- juju >= 3.1
tags:
- openstack
source: https://opendev.org/openstack/charm-aodh-k8s
issues: https://bugs.launchpad.net/charm-aodh-k8s
containers:
aodh-api:
resource: aodh-api-image
aodh-evaluator:
resource: aodh-evaluator-image
aodh-notifier:
resource: aodh-notifier-image
aodh-listener:
resource: aodh-listener-image
aodh-expirer:
resource: aodh-expirer-image
resources:
aodh-api-image:
type: oci-image
description: OCI image for OpenStack aodh api service
# ghcr.io/canonical/aodh-consolidated:2024.1
upstream-source: ghcr.io/canonical/aodh-consolidated:2024.1
aodh-evaluator-image:
type: oci-image
description: OCI image for OpenStack aodh evaluator service
# ghcr.io/canonical/aodh-consolidated:2024.1
upstream-source: ghcr.io/canonical/aodh-consolidated:2024.1
aodh-notifier-image:
type: oci-image
description: OCI image for OpenStack aodh notifier service
# ghcr.io/canonical/aodh-consolidated:2024.1
upstream-source: ghcr.io/canonical/aodh-consolidated:2024.1
aodh-listener-image:
type: oci-image
description: OCI image for OpenStack aodh listener service
# ghcr.io/canonical/aodh-consolidated:2024.1
upstream-source: ghcr.io/canonical/aodh-consolidated:2024.1
aodh-expirer-image:
type: oci-image
description: OCI image for OpenStack aodh expirer service
# ghcr.io/canonical/aodh-consolidated:2024.1
upstream-source: ghcr.io/canonical/aodh-consolidated:2024.1
requires:
database:
interface: mysql_client
limit: 1
identity-service:
interface: keystone
ingress-internal:
interface: ingress
optional: true
limit: 1
ingress-public:
interface: ingress
limit: 1
amqp:
interface: rabbitmq
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
provides:
aodh:
interface: aodh
peers:
peers:
interface: aodh-peer

View File

@ -1 +0,0 @@
../config.yaml

View File

@ -1,14 +1,87 @@
# This file configures Charmcraft.
# See https://juju.is/docs/sdk/charmcraft-config for guidance.
type: charm
bases:
- build-on:
- name: ubuntu
channel: "22.04"
run-on:
- name: ubuntu
channel: "22.04"
title: Barbican
name: barbican-k8s
summary: Openstack Key Manager service
description: |
Barbican is the OpenStack Key Manager service.
It provides secure storage, provisioning and management of secret data.
This includes keying material such as Symmetric Keys, Asymmetric Keys, Certificates and raw binary data.
assumes:
- k8s-api
- juju >= 3.1
links:
source:
- https://opendev.org/openstack/charm-barbican-k8s
issues:
- https://bugs.launchpad.net/charm-barbican-k8s
base: ubuntu@22.04
platforms:
amd64:
config:
options:
debug:
default: false
description: Enable debug logging.
type: boolean
region:
default: RegionOne
description: Name of the OpenStack region
type: string
containers:
barbican-api:
resource: barbican-api-image
barbican-worker:
resource: barbican-worker-image
resources:
barbican-api-image:
type: oci-image
description: OCI image for OpenStack Barbican API
upstream-source: ghcr.io/canonical/barbican-consolidated:2024.1
barbican-worker-image:
type: oci-image
description: OCI image for OpenStack Barbican worker
upstream-source: ghcr.io/canonical/barbican-consolidated:2024.1
requires:
ingress-internal:
interface: ingress
limit: 1
optional: true
ingress-public:
interface: ingress
limit: 1
database:
interface: mysql_client
limit: 1
identity-service:
interface: keystone
identity-ops:
interface: keystone-resources
optional: true
amqp:
interface: rabbitmq
vault-kv:
interface: vault-kv
limit: 1
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
peers:
peers:
interface: barbican-peer
parts:
charm:
build-packages:

View File

@ -1,9 +0,0 @@
options:
debug:
default: False
description: Enable debug logging.
type: boolean
region:
default: RegionOne
description: Name of the OpenStack region
type: string

View File

@ -1,74 +0,0 @@
name: barbican-k8s
display-name: Barbican
summary: Openstack Key Manager service
description: |
Barbican is the OpenStack Key Manager service.
It provides secure storage, provisioning and management of secret data.
This includes keying material such as Symmetric Keys, Asymmetric Keys, Certificates and raw binary data.
maintainer: Openstack Charmers <openstack-charmers@lists.ubuntu.com>
source: https://opendev.org/openstack/charm-barbican-k8s
issues: https://bugs.launchpad.net/charm-barbican-k8s
bases:
- name: ubuntu
channel: 22.04/stable
assumes:
- k8s-api
- juju >= 3.1
tags:
- openstack
- secrets
- misc
requires:
ingress-internal:
interface: ingress
limit: 1
optional: true
ingress-public:
interface: ingress
limit: 1
database:
interface: mysql_client
limit: 1
identity-service:
interface: keystone
identity-ops:
interface: keystone-resources
optional: true
amqp:
interface: rabbitmq
vault-kv:
interface: vault-kv
limit: 1
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
peers:
peers:
interface: barbican-peer
containers:
barbican-api:
resource: barbican-api-image
barbican-worker:
resource: barbican-worker-image
resources:
barbican-api-image:
type: oci-image
description: OCI image for OpenStack Barbican API
upstream-source: ghcr.io/canonical/barbican-consolidated:2024.1
barbican-worker-image:
type: oci-image
description: OCI image for OpenStack Barbican worker
upstream-source: ghcr.io/canonical/barbican-consolidated:2024.1

View File

@ -1 +0,0 @@
../config.yaml

View File

@ -1,2 +0,0 @@
# NOTE: no actions yet!
{ }

View File

@ -1,11 +1,78 @@
type: "charm"
bases:
- build-on:
- name: "ubuntu"
channel: "22.04"
run-on:
- name: "ubuntu"
channel: "22.04"
type: charm
name: ceilometer-k8s
summary: OpenStack ceilometer service
description: |
OpenStack ceilometer provides an HTTP service for managing, selecting,
and claiming providers of classes of inventory representing available
resources in a cloud.
.
assumes:
- k8s-api
- juju >= 3.1
links:
source:
- https://opendev.org/openstack/charm-ceilometer-k8s
issues:
- https://bugs.launchpad.net/charm-ceilometer-k8s
base: ubuntu@22.04
platforms:
amd64:
config:
options:
debug:
default: false
description: Enable debug logging.
type: boolean
region:
default: RegionOne
description: Name of the OpenStack region
type: string
containers:
ceilometer-central:
resource: ceilometer-central-image
ceilometer-notification:
resource: ceilometer-notification-image
resources:
ceilometer-central-image:
type: oci-image
description: OCI image for OpenStack ceilometer central
upstream-source: ghcr.io/canonical/ceilometer-consolidated:2024.1
ceilometer-notification-image:
type: oci-image
description: OCI image for OpenStack ceilometer
upstream-source: ghcr.io/canonical/ceilometer-consolidated:2024.1
requires:
amqp:
interface: rabbitmq
identity-credentials:
interface: keystone-credentials
limit: 1
gnocchi-db:
interface: gnocchi
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
provides:
ceilometer-service:
interface: ceilometer
peers:
peers:
interface: ceilometer-peer
parts:
update-certificates:
plugin: nil
@ -13,9 +80,9 @@ parts:
apt update
apt install -y ca-certificates
update-ca-certificates
charm:
after: [update-certificates]
after:
- update-certificates
build-packages:
- git
- libffi-dev

View File

@ -1,9 +0,0 @@
options:
debug:
default: False
description: Enable debug logging.
type: boolean
region:
default: RegionOne
description: Name of the OpenStack region
type: string

View File

@ -1,65 +0,0 @@
name: ceilometer-k8s
summary: OpenStack ceilometer service
maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com>
description: |
OpenStack ceilometer provides an HTTP service for managing, selecting,
and claiming providers of classes of inventory representing available
resources in a cloud.
.
version: 3
bases:
- name: ubuntu
channel: 22.04/stable
assumes:
- k8s-api
- juju >= 3.1
tags:
- openstack
source: https://opendev.org/openstack/charm-ceilometer-k8s
issues: https://bugs.launchpad.net/charm-ceilometer-k8s
containers:
ceilometer-central:
resource: ceilometer-central-image
ceilometer-notification:
resource: ceilometer-notification-image
resources:
ceilometer-central-image:
type: oci-image
description: OCI image for OpenStack ceilometer central
# ghcr.io/canonical/ceilometer-central:2024.1
upstream-source: ghcr.io/canonical/ceilometer-consolidated:2024.1
ceilometer-notification-image:
type: oci-image
description: OCI image for OpenStack ceilometer notification
# ghcr.io/canonical/ceilometer-notification:2024.1
description: OCI image for OpenStack ceilometer
upstream-source: ghcr.io/canonical/ceilometer-consolidated:2024.1
provides:
ceilometer-service:
interface: ceilometer
requires:
amqp:
interface: rabbitmq
identity-credentials:
interface: keystone-credentials
limit: 1
gnocchi-db:
interface: gnocchi
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
peers:
peers:
interface: ceilometer-peer

View File

@ -1 +0,0 @@
../actions.yaml

View File

@ -1 +0,0 @@
../config.yaml

View File

@ -1,2 +0,0 @@
# NOTE: no actions yet!
{ }

View File

@ -1,11 +1,331 @@
type: "charm"
bases:
- build-on:
- name: "ubuntu"
channel: "22.04"
run-on:
- name: "ubuntu"
channel: "22.04"
type: charm
name: cinder-ceph-k8s
summary: OpenStack volume service - Ceph backend
description: |
Cinder is the OpenStack project that provides volume management for
instances. This charm provides integration with Ceph storage
backends.
assumes:
- k8s-api
- juju >= 3.1
links:
source:
- https://opendev.org/openstack/charm-cinder-ceph-k8s
issues:
- https://bugs.launchpad.net/charm-cinder-ceph-k8s
base: ubuntu@22.04
platforms:
amd64:
config:
options:
debug:
type: boolean
default: false
description: Enable debug logging.
use-syslog:
type: boolean
default: true
description: |
Setting this to False will disable logging to syslog (the default).
ceph-osd-replication-count:
default: 3
type: int
description: |
This value dictates the number of replicas ceph must make of any
object it stores within the cinder rbd pool. Of course, this only
applies if using Ceph as a backend store. Note that once the cinder
rbd pool has been created, changing this value will not have any
effect (although it can be changed in ceph by manually configuring
your ceph cluster).
ceph-pool-weight:
type: int
default: 40
description: |
Defines a relative weighting of the pool as a percentage of the total
amount of data in the Ceph cluster. This effectively weights the number
of placement groups for the pool created to be appropriately portioned
to the amount of data expected. For example, if the ephemeral volumes
for the OpenStack compute instances are expected to take up 20% of the
overall configuration then this value would be specified as 20. Note -
it is important to choose an appropriate value for the pool weight as
this directly affects the number of placement groups which will be
created for the pool. The number of placement groups for a pool can
only be increased, never decreased - so it is important to identify the
percent of data that will likely reside in the pool.
volume-backend-name:
default: null
type: string
description: |
Volume backend name for the backend. The default value is the
application name in the Juju model, e.g. "cinder-ceph-mybackend"
if it's deployed as `juju deploy cinder-ceph cinder-ceph-mybackend`.
A common backend name can be set to multiple backends with the
same characters so that those can be treated as a single virtual
backend associated with a single volume type.
backend-availability-zone:
default: null
type: string
description: |
Availability zone name of this volume backend. If set, it will
override the default availability zone. Supported for Pike or
newer releases.
restrict-ceph-pools:
default: false
type: boolean
description: |
Optionally restrict Ceph key permissions to access pools as required.
rbd-pool-name:
default: null
type: string
description: |
Optionally specify an existing rbd pool that cinder should map to.
rbd-flatten-volume-from-snapshot:
default: false
type: boolean
description: |
Flatten volumes created from snapshots to remove dependency from
volume to snapshot.
rbd-mirroring-mode:
type: string
default: pool
description: |
The RBD mirroring mode used for the Ceph pool. This option is only used
with 'replicated' pool type, as it's not supported for 'erasure-coded'
pool type - valid values: 'pool' and 'image'
pool-type:
type: string
default: replicated
description: |
Ceph pool type to use for storage - valid values include `replicated`
and `erasure-coded`.
ec-profile-name:
type: string
default: null
description: |
Name for the EC profile to be created for the EC pools. If not defined
a profile name will be generated based on the name of the pool used by
the application.
ec-rbd-metadata-pool:
type: string
default: null
description: |
Name of the metadata pool to be created (for RBD use-cases). If not
defined a metadata pool name will be generated based on the name of
the data pool used by the application. The metadata pool is always
replicated, not erasure coded.
ec-profile-k:
type: int
default: 1
description: |
Number of data chunks that will be used for EC data pool. K+M factors
should never be greater than the number of available zones (or hosts)
for balancing.
ec-profile-m:
type: int
default: 2
description: |
Number of coding chunks that will be used for EC data pool. K+M factors
should never be greater than the number of available zones (or hosts)
for balancing.
ec-profile-locality:
type: int
default: null
description: |
(lrc plugin - l) Group the coding and data chunks into sets of size l.
For instance, for k=4 and m=2, when l=3 two groups of three are created.
Each set can be recovered without reading chunks from another set. Note
that using the lrc plugin does incur more raw storage usage than isa or
jerasure in order to reduce the cost of recovery operations.
ec-profile-crush-locality:
type: string
default: null
description: |
(lrc plugin) The type of the crush bucket in which each set of chunks
defined by l will be stored. For instance, if it is set to rack, each
group of l chunks will be placed in a different rack. It is used to
create a CRUSH rule step such as step choose rack. If it is not set,
no such grouping is done.
ec-profile-durability-estimator:
type: int
default: null
description: |
(shec plugin - c) The number of parity chunks each of which includes
each data chunk in its calculation range. The number is used as a
durability estimator. For instance, if c=2, 2 OSDs can be down
without losing data.
ec-profile-helper-chunks:
type: int
default: null
description: |
(clay plugin - d) Number of OSDs requested to send data during
recovery of a single chunk. d needs to be chosen such that
k+1 <= d <= k+m-1. Larger the d, the better the savings.
ec-profile-scalar-mds:
type: string
default: null
description: |
(clay plugin) specifies the plugin that is used as a building
block in the layered construction. It can be one of jerasure,
isa, shec (defaults to jerasure).
ec-profile-plugin:
type: string
default: jerasure
description: |
EC plugin to use for this applications pool. The following list of
plugins acceptable - jerasure, lrc, isa, shec, clay.
ec-profile-technique:
type: string
default: null
description: |
EC profile technique used for this applications pool - will be
validated based on the plugin configured via ec-profile-plugin.
Supported techniques are `reed_sol_van`, `reed_sol_r6_op`,
`cauchy_orig`, `cauchy_good`, `liber8tion` for jerasure,
`reed_sol_van`, `cauchy` for isa and `single`, `multiple`
for shec.
ec-profile-device-class:
type: string
default: null
description: |
Device class from CRUSH map to use for placement groups for
erasure profile - valid values: ssd, hdd or nvme (or leave
unset to not use a device class).
bluestore-compression-algorithm:
type: string
default: null
description: |
Compressor to use (if any) for pools requested by this charm.
.
NOTE: The ceph-osd charm sets a global default for this value (defaults
to 'lz4' unless configured by the end user) which will be used unless
specified for individual pools.
bluestore-compression-mode:
type: string
default: null
description: |
Policy for using compression on pools requested by this charm.
.
'none' means never use compression.
'passive' means use compression when clients hint that data is
compressible.
'aggressive' means use compression unless clients hint that
data is not compressible.
'force' means use compression under all circumstances even if the clients
hint that the data is not compressible.
bluestore-compression-required-ratio:
type: float
default: null
description: |
The ratio of the size of the data chunk after compression relative to the
original size must be at least this small in order to store the
compressed version on pools requested by this charm.
bluestore-compression-min-blob-size:
type: int
default: null
description: |
Chunks smaller than this are never compressed on pools requested by
this charm.
bluestore-compression-min-blob-size-hdd:
type: int
default: null
description: |
Value of bluestore compression min blob size for rotational media on
pools requested by this charm.
bluestore-compression-min-blob-size-ssd:
type: int
default: null
description: |
Value of bluestore compression min blob size for solid state media on
pools requested by this charm.
bluestore-compression-max-blob-size:
type: int
default: null
description: |
Chunks larger than this are broken into smaller blobs sizing bluestore
compression max blob size before being compressed on pools requested by
this charm.
bluestore-compression-max-blob-size-hdd:
type: int
default: null
description: |
Value of bluestore compression max blob size for rotational media on
pools requested by this charm.
bluestore-compression-max-blob-size-ssd:
type: int
default: null
description: |
Value of bluestore compression max blob size for solid state media on
pools requested by this charm.
rabbit-user:
type: string
default: null
description: Username to request access on rabbitmq-server.
rabbit-vhost:
type: string
default: null
description: RabbitMQ virtual host to request access on rabbitmq-server.
enable-telemetry-notifications:
type: boolean
default: false
description: Enable notifications to send to telemetry.
image-volume-cache-enabled:
type: boolean
default: false
description: |
Enable the image volume cache.
image-volume-cache-max-size-gb:
type: int
default: 0
description: |
Max size of the image volume cache in GB. 0 means unlimited.
image-volume-cache-max-count:
type: int
default: 0
description: |
Max number of entries allowed in the image volume cache. 0 means
unlimited.
containers:
cinder-volume:
resource: cinder-volume-image
resources:
cinder-volume-image:
type: oci-image
description: OCI image for OpenStack Cinder Volume
upstream-source: ghcr.io/canonical/cinder-consolidated:2024.1
requires:
amqp:
interface: rabbitmq
ceph:
interface: ceph-client
database:
interface: mysql_client
limit: 1
identity-credentials:
interface: keystone-credentials
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
provides:
ceph-access:
interface: cinder-ceph-key
storage-backend:
interface: cinder-backend
peers:
peers:
interface: cinder-peer
parts:
update-certificates:
plugin: nil
@ -13,9 +333,9 @@ parts:
apt update
apt install -y ca-certificates
update-ca-certificates
charm:
after: [update-certificates]
after:
- update-certificates
build-packages:
- git
- libffi-dev

View File

@ -1,268 +0,0 @@
options:
debug:
type: boolean
default: False
description: Enable debug logging.
use-syslog:
type: boolean
default: True
description: |
Setting this to False will disable logging to syslog (the default).
ceph-osd-replication-count:
default: 3
type: int
description: |
This value dictates the number of replicas ceph must make of any
object it stores within the cinder rbd pool. Of course, this only
applies if using Ceph as a backend store. Note that once the cinder
rbd pool has been created, changing this value will not have any
effect (although it can be changed in ceph by manually configuring
your ceph cluster).
ceph-pool-weight:
type: int
default: 40
description: |
Defines a relative weighting of the pool as a percentage of the total
amount of data in the Ceph cluster. This effectively weights the number
of placement groups for the pool created to be appropriately portioned
to the amount of data expected. For example, if the ephemeral volumes
for the OpenStack compute instances are expected to take up 20% of the
overall configuration then this value would be specified as 20. Note -
it is important to choose an appropriate value for the pool weight as
this directly affects the number of placement groups which will be
created for the pool. The number of placement groups for a pool can
only be increased, never decreased - so it is important to identify the
percent of data that will likely reside in the pool.
volume-backend-name:
default:
type: string
description: |
Volume backend name for the backend. The default value is the
application name in the Juju model, e.g. "cinder-ceph-mybackend"
if it's deployed as `juju deploy cinder-ceph cinder-ceph-mybackend`.
A common backend name can be set to multiple backends with the
same characters so that those can be treated as a single virtual
backend associated with a single volume type.
backend-availability-zone:
default:
type: string
description: |
Availability zone name of this volume backend. If set, it will
override the default availability zone. Supported for Pike or
newer releases.
restrict-ceph-pools:
default: False
type: boolean
description: |
Optionally restrict Ceph key permissions to access pools as required.
rbd-pool-name:
default:
type: string
description: |
Optionally specify an existing rbd pool that cinder should map to.
rbd-flatten-volume-from-snapshot:
default:
type: boolean
default: False
description: |
Flatten volumes created from snapshots to remove dependency from
volume to snapshot.
rbd-mirroring-mode:
type: string
default: pool
description: |
The RBD mirroring mode used for the Ceph pool. This option is only used
with 'replicated' pool type, as it's not supported for 'erasure-coded'
pool type - valid values: 'pool' and 'image'
pool-type:
type: string
default: replicated
description: |
Ceph pool type to use for storage - valid values include replicated
and erasure-coded.
ec-profile-name:
type: string
default:
description: |
Name for the EC profile to be created for the EC pools. If not defined
a profile name will be generated based on the name of the pool used by
the application.
ec-rbd-metadata-pool:
type: string
default:
description: |
Name of the metadata pool to be created (for RBD use-cases). If not
defined a metadata pool name will be generated based on the name of
the data pool used by the application. The metadata pool is always
replicated, not erasure coded.
ec-profile-k:
type: int
default: 1
description: |
Number of data chunks that will be used for EC data pool. K+M factors
should never be greater than the number of available zones (or hosts)
for balancing.
ec-profile-m:
type: int
default: 2
description: |
Number of coding chunks that will be used for EC data pool. K+M factors
should never be greater than the number of available zones (or hosts)
for balancing.
ec-profile-locality:
type: int
default:
description: |
(lrc plugin - l) Group the coding and data chunks into sets of size l.
For instance, for k=4 and m=2, when l=3 two groups of three are created.
Each set can be recovered without reading chunks from another set. Note
that using the lrc plugin does incur more raw storage usage than isa or
jerasure in order to reduce the cost of recovery operations.
ec-profile-crush-locality:
type: string
default:
description: |
(lrc plugin) The type of the crush bucket in which each set of chunks
defined by l will be stored. For instance, if it is set to rack, each
group of l chunks will be placed in a different rack. It is used to
create a CRUSH rule step such as step choose rack. If it is not set,
no such grouping is done.
ec-profile-durability-estimator:
type: int
default:
description: |
(shec plugin - c) The number of parity chunks each of which includes
each data chunk in its calculation range. The number is used as a
durability estimator. For instance, if c=2, 2 OSDs can be down
without losing data.
ec-profile-helper-chunks:
type: int
default:
description: |
(clay plugin - d) Number of OSDs requested to send data during
recovery of a single chunk. d needs to be chosen such that
k+1 <= d <= k+m-1. Larger the d, the better the savings.
ec-profile-scalar-mds:
type: string
default:
description: |
(clay plugin) specifies the plugin that is used as a building
block in the layered construction. It can be one of jerasure,
isa, shec (defaults to jerasure).
ec-profile-plugin:
type: string
default: jerasure
description: |
EC plugin to use for this applications pool. The following list of
plugins acceptable - jerasure, lrc, isa, shec, clay.
ec-profile-technique:
type: string
default:
description: |
EC profile technique used for this applications pool - will be
validated based on the plugin configured via ec-profile-plugin.
Supported techniques are reed_sol_van, reed_sol_r6_op,
cauchy_orig, cauchy_good, liber8tion for jerasure,
reed_sol_van, cauchy for isa and single, multiple
for shec.
ec-profile-device-class:
type: string
default:
description: |
Device class from CRUSH map to use for placement groups for
erasure profile - valid values: ssd, hdd or nvme (or leave
unset to not use a device class).
bluestore-compression-algorithm:
type: string
default:
description: |
Compressor to use (if any) for pools requested by this charm.
.
NOTE: The ceph-osd charm sets a global default for this value (defaults
to 'lz4' unless configured by the end user) which will be used unless
specified for individual pools.
bluestore-compression-mode:
type: string
default:
description: |
Policy for using compression on pools requested by this charm.
.
'none' means never use compression.
'passive' means use compression when clients hint that data is
compressible.
'aggressive' means use compression unless clients hint that
data is not compressible.
'force' means use compression under all circumstances even if the clients
hint that the data is not compressible.
bluestore-compression-required-ratio:
type: float
default:
description: |
The ratio of the size of the data chunk after compression relative to the
original size must be at least this small in order to store the
compressed version on pools requested by this charm.
bluestore-compression-min-blob-size:
type: int
default:
description: |
Chunks smaller than this are never compressed on pools requested by
this charm.
bluestore-compression-min-blob-size-hdd:
type: int
default:
description: |
Value of bluestore compression min blob size for rotational media on
pools requested by this charm.
bluestore-compression-min-blob-size-ssd:
type: int
default:
description: |
Value of bluestore compression min blob size for solid state media on
pools requested by this charm.
bluestore-compression-max-blob-size:
type: int
default:
description: |
Chunks larger than this are broken into smaller blobs sizing bluestore
compression max blob size before being compressed on pools requested by
this charm.
bluestore-compression-max-blob-size-hdd:
type: int
default:
description: |
Value of bluestore compression max blob size for rotational media on
pools requested by this charm.
bluestore-compression-max-blob-size-ssd:
type: int
default:
description: |
Value of bluestore compression max blob size for solid state media on
pools requested by this charm.
rabbit-user:
type: string
default:
description: Username to request access on rabbitmq-server.
rabbit-vhost:
type: string
default:
description: RabbitMQ virtual host to request access on rabbitmq-server.
enable-telemetry-notifications:
type: boolean
default: False
description: Enable notifications to send to telemetry.
image-volume-cache-enabled:
type: boolean
default: False
description: |
Enable the image volume cache.
image-volume-cache-max-size-gb:
type: int
default: 0
description: |
Max size of the image volume cache in GB. 0 means unlimited.
image-volume-cache-max-count:
type: int
default: 0
description: |
Max number of entries allowed in the image volume cache. 0 means
unlimited.

View File

@ -1,61 +0,0 @@
# Copyright 2021 Canonical Ltd
# See LICENSE file for licensing details.
name: cinder-ceph-k8s
summary: OpenStack volume service - Ceph backend
maintainer: Openstack Charmers <openstack-charmers@lists.ubuntu.com>
description: |
Cinder is the OpenStack project that provides volume management for
instances. This charm provides integration with Ceph storage
backends.
version: 3
bases:
- name: ubuntu
channel: 22.04/stable
assumes:
- k8s-api
- juju >= 3.1
tags:
- openstack
- storage
- misc
source: https://opendev.org/openstack/charm-cinder-ceph-k8s
issues: https://bugs.launchpad.net/charm-cinder-ceph-k8s
containers:
cinder-volume:
resource: cinder-volume-image
resources:
cinder-volume-image:
type: oci-image
description: OCI image for OpenStack Cinder Volume
upstream-source: ghcr.io/canonical/cinder-consolidated:2024.1
requires:
amqp:
interface: rabbitmq
ceph:
interface: ceph-client
database:
interface: mysql_client
limit: 1
identity-credentials:
interface: keystone-credentials
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
provides:
ceph-access:
interface: cinder-ceph-key
storage-backend:
interface: cinder-backend
peers:
peers:
interface: cinder-peer

View File

@ -83,12 +83,9 @@ class TestCinderCephOperatorCharm(test_utils.CharmTestCase):
"""Setup fixtures ready for testing."""
super().setUp(charm, self.PATCHES)
self.mock_event = MagicMock()
with open("config.yaml", "r") as f:
config_data = f.read()
self.harness = test_utils.get_harness(
_CinderCephOperatorCharm,
container_calls=self.container_calls,
charm_config=config_data,
)
mock_get_platform = patch(
"charmhelpers.osplatform.get_platform", return_value="ubuntu"

View File

@ -1,2 +0,0 @@
# NOTE: no actions yet!
{ }

View File

@ -1,11 +1,85 @@
type: "charm"
bases:
- build-on:
- name: "ubuntu"
channel: "22.04"
run-on:
- name: "ubuntu"
channel: "22.04"
type: charm
name: cinder-k8s
summary: OpenStack volume service
description: |
Cinder is the OpenStack project that provides volume management for
instances.
assumes:
- k8s-api
- juju >= 3.1
links:
source:
- https://opendev.org/openstack/charm-cinder-k8s
issues:
- https://bugs.launchpad.net/charm-cinder-k8s
base: ubuntu@22.04
platforms:
amd64:
config:
options:
debug:
default: false
description: Enable debug logging.
type: boolean
region:
default: RegionOne
description: Name of the OpenStack region
type: string
containers:
cinder-api:
resource: cinder-api-image
cinder-scheduler:
resource: cinder-scheduler-image
resources:
cinder-api-image:
type: oci-image
description: OCI image for OpenStack Cinder API
upstream-source: ghcr.io/canonical/cinder-consolidated:2024.1
cinder-scheduler-image:
type: oci-image
description: OCI image for OpenStack Cinder Scheduler
upstream-source: ghcr.io/canonical/cinder-consolidated:2024.1
requires:
database:
interface: mysql_client
limit: 1
ingress-internal:
interface: ingress
optional: true
limit: 1
ingress-public:
interface: ingress
limit: 1
identity-service:
interface: keystone
limit: 1
amqp:
interface: rabbitmq
storage-backend:
interface: cinder-backend
image-service:
interface: glance
optional: true
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
peers:
peers:
interface: cinder-peer
parts:
update-certificates:
plugin: nil
@ -13,9 +87,9 @@ parts:
apt update
apt install -y ca-certificates
update-ca-certificates
charm:
after: [update-certificates]
after:
- update-certificates
build-packages:
- git
- libffi-dev

View File

@ -1,9 +0,0 @@
options:
debug:
default: False
description: Enable debug logging.
type: boolean
region:
default: RegionOne
description: Name of the OpenStack region
type: string

View File

@ -1,73 +0,0 @@
# Copyright 2021 Canonical Ltd
# See LICENSE file for licensing details.
name: cinder-k8s
summary: OpenStack volume service
maintainer: Openstack Charmers <openstack-charmers@lists.ubuntu.com>
description: |
Cinder is the OpenStack project that provides volume management for
instances.
version: 3
bases:
- name: ubuntu
channel: 22.04/stable
assumes:
- k8s-api
- juju >= 3.1
tags:
- openstack
- storage
- misc
source: https://opendev.org/openstack/charm-cinder-k8s
issues: https://bugs.launchpad.net/charm-cinder-k8s
containers:
cinder-api:
resource: cinder-api-image
cinder-scheduler:
resource: cinder-scheduler-image
resources:
cinder-api-image:
type: oci-image
description: OCI image for OpenStack Cinder API
upstream-source: ghcr.io/canonical/cinder-consolidated:2024.1
cinder-scheduler-image:
type: oci-image
description: OCI image for OpenStack Cinder Scheduler
upstream-source: ghcr.io/canonical/cinder-consolidated:2024.1
requires:
database:
interface: mysql_client
limit: 1
ingress-internal:
interface: ingress
optional: true
limit: 1
ingress-public:
interface: ingress
limit: 1
identity-service:
interface: keystone
limit: 1
amqp:
interface: rabbitmq
storage-backend:
interface: cinder-backend
image-service:
interface: glance
optional: true
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
peers:
peers:
interface: cinder-peer

View File

@ -1 +0,0 @@
../config.yaml

View File

@ -1,2 +0,0 @@
# NOTE: no actions yet!
{ }

View File

@ -1,11 +1,59 @@
type: charm
bases:
- build-on:
- name: ubuntu
channel: "22.04"
run-on:
- name: ubuntu
channel: "22.04"
name: designate-bind-k8s
summary: OpenStack designate-bind service
description:
"Domain Name Service (DNS) is an Internet service that maps IP addresses\
\ and fully qualified domain names (FQDN) to one another.\nIn this way, DNS alleviates\
\ the need to remember IP addresses. Computers that run DNS are called name servers.\
\ \nUbuntu ships with BIND (Berkley Internet Naming Daemon), the most common program\
\ used for maintaining a name server on Linux.\n"
assumes:
- k8s-api
- juju >= 3.1
links:
source:
- https://opendev.org/openstack/charm-designate-bind-k8s
issues:
- https://bugs.launchpad.net/charm-designate-bind-k8s
base: ubuntu@22.04
platforms:
amd64:
config:
options:
debug:
default: false
description: Enable debug logging.
type: boolean
containers:
designate-bind:
resource: designate-bind-image
resources:
designate-bind-image:
type: oci-image
description: OCI image for bind9
upstream-source: ubuntu/bind9:9.18-22.04_beta
requires:
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
provides:
dns-backend:
interface: bind-rndc
peers:
peers:
interface: bind-peer
parts:
update-certificates:
plugin: nil
@ -13,9 +61,9 @@ parts:
apt update
apt install -y ca-certificates
update-ca-certificates
charm:
after: [update-certificates]
after:
- update-certificates
build-packages:
- git
- libffi-dev

View File

@ -1,5 +0,0 @@
options:
debug:
default: False
description: Enable debug logging.
type: boolean

View File

@ -1,45 +0,0 @@
name: designate-bind-k8s
summary: OpenStack designate-bind service
maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com>
description: |
Domain Name Service (DNS) is an Internet service that maps IP addresses and fully qualified domain names (FQDN) to one another.
In this way, DNS alleviates the need to remember IP addresses. Computers that run DNS are called name servers.
Ubuntu ships with BIND (Berkley Internet Naming Daemon), the most common program used for maintaining a name server on Linux.
version: 3
bases:
- name: ubuntu
channel: 22.04/stable
assumes:
- k8s-api
- juju >= 3.1
tags:
- openstack
source: https://opendev.org/openstack/charm-designate-bind-k8s
issues: https://bugs.launchpad.net/charm-designate-bind-k8s
containers:
designate-bind:
resource: designate-bind-image
resources:
designate-bind-image:
type: oci-image
description: OCI image for bind9
upstream-source: ubuntu/bind9:9.18-22.04_beta
provides:
dns-backend:
interface: bind-rndc
requires:
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
peers:
peers:
interface: bind-peer

View File

@ -1,2 +0,0 @@
# NOTE: no actions yet!
{ }

View File

@ -1,11 +1,88 @@
type: "charm"
bases:
- build-on:
- name: "ubuntu"
channel: "22.04"
run-on:
- name: "ubuntu"
channel: "22.04"
type: charm
name: designate-k8s
summary: OpenStack designate service
description: |
Designate is a multi-tenant DNSaaS service for OpenStack. It provides a REST API with integrated Keystone authentication.
It can be configured to auto-generate records based on Nova and Neutron actions.
Designate supports a variety of DNS servers including Bind9 and PowerDNS 4.
assumes:
- k8s-api
- juju >= 3.1
links:
source:
- https://opendev.org/openstack/charm-designate-k8s
issues:
- https://bugs.launchpad.net/charm-designate-k8s
base: ubuntu@22.04
platforms:
amd64:
config:
options:
debug:
default: false
description: Enable debug logging.
type: boolean
region:
default: RegionOne
description: Name of the OpenStack region
type: string
nameservers:
type: string
default: null
description: |
Space delimited list of nameservers. These are the nameservers that have
been provided to the domain registrar in order to delegate the domain to
Designate. e.g. "ns1.example.com. ns2.example.com."
containers:
designate:
resource: designate-image
resources:
designate-image:
type: oci-image
description: OCI image for OpenStack designate
upstream-source: ghcr.io/canonical/designate-consolidated:2024.1
requires:
database:
interface: mysql_client
limit: 1
identity-service:
interface: keystone
ingress-internal:
interface: ingress
optional: true
limit: 1
ingress-public:
interface: ingress
limit: 1
amqp:
interface: rabbitmq
dns-backend:
interface: bind-rndc
limit: 1
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
provides:
dnsaas:
interface: designate
peers:
peers:
interface: designate-peer
parts:
update-certificates:
plugin: nil
@ -13,9 +90,9 @@ parts:
apt update
apt install -y ca-certificates
update-ca-certificates
charm:
after: [update-certificates]
after:
- update-certificates
build-packages:
- git
- libffi-dev

View File

@ -1,16 +0,0 @@
options:
debug:
default: False
description: Enable debug logging.
type: boolean
region:
default: RegionOne
description: Name of the OpenStack region
type: string
nameservers:
type: string
default:
description: |
Space delimited list of nameservers. These are the nameservers that have
been provided to the domain registrar in order to delegate the domain to
Designate. e.g. "ns1.example.com. ns2.example.com."

View File

@ -1,65 +0,0 @@
name: designate-k8s
summary: OpenStack designate service
maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com>
description: |
Designate is a multi-tenant DNSaaS service for OpenStack. It provides a REST API with integrated Keystone authentication.
It can be configured to auto-generate records based on Nova and Neutron actions.
Designate supports a variety of DNS servers including Bind9 and PowerDNS 4.
version: 3
bases:
- name: ubuntu
channel: 22.04/stable
assumes:
- k8s-api
- juju >= 3.1
tags:
- openstack
source: https://opendev.org/openstack/charm-designate-k8s
issues: https://bugs.launchpad.net/charm-designate-k8s
containers:
designate:
resource: designate-image
resources:
designate-image:
type: oci-image
description: OCI image for OpenStack designate
upstream-source: ghcr.io/canonical/designate-consolidated:2024.1
provides:
dnsaas:
interface: designate
requires:
database:
interface: mysql_client
limit: 1
identity-service:
interface: keystone
ingress-internal:
interface: ingress
optional: true
limit: 1
ingress-public:
interface: ingress
limit: 1
amqp:
interface: rabbitmq
dns-backend:
interface: bind-rndc
limit: 1
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
peers:
peers:
interface: designate-peer

View File

@ -1 +0,0 @@
../config.yaml

View File

@ -1,8 +0,0 @@
# NOTE: no actions yet!
describe-status:
description: |
See an expanded view of the compound status.
For a neat human readable summary:
juju run-action --wait glance/0 describe-status --format=json | jq -r '.[].results.output'

View File

@ -1,11 +1,355 @@
type: "charm"
bases:
- build-on:
- name: "ubuntu"
channel: "22.04"
run-on:
- name: "ubuntu"
channel: "22.04"
type: charm
name: glance-k8s
summary: OpenStack Image Registry and Delivery Service
description: |
The Glance project provides an image registration and discovery service
and an image delivery service. These services are used in conjunction
by Nova to deliver images from object stores, such as OpenStack's Swift
service, to Nova's compute nodes.
assumes:
- k8s-api
- juju >= 3.1
links:
source:
- https://opendev.org/openstack/charm-glance-k8s
issues:
- https://bugs.launchpad.net/charm-glance-k8s
base: ubuntu@22.04
platforms:
amd64:
config:
options:
debug:
default: false
description: Enable debug logging.
type: boolean
region:
default: RegionOne
description: Name of the OpenStack region
type: string
ceph-osd-replication-count:
default: 3
type: int
description: |
This value dictates the number of replicas ceph must make of any
object it stores within the cinder rbd pool. Of course, this only
applies if using Ceph as a backend store. Note that once the cinder
rbd pool has been created, changing this value will not have any
effect (although it can be changed in ceph by manually configuring
your ceph cluster).
ceph-pool-weight:
type: int
default: 40
description: |
Defines a relative weighting of the pool as a percentage of the total
amount of data in the Ceph cluster. This effectively weights the number
of placement groups for the pool created to be appropriately portioned
to the amount of data expected. For example, if the ephemeral volumes
for the OpenStack compute instances are expected to take up 20% of the
overall configuration then this value would be specified as 20. Note -
it is important to choose an appropriate value for the pool weight as
this directly affects the number of placement groups which will be
created for the pool. The number of placement groups for a pool can
only be increased, never decreased - so it is important to identify the
percent of data that will likely reside in the pool.
volume-backend-name:
default: null
type: string
description: |
Volume backend name for the backend. The default value is the
application name in the Juju model, e.g. "cinder-ceph-mybackend"
if it's deployed as `juju deploy cinder-ceph cinder-ceph-mybackend`.
A common backend name can be set to multiple backends with the
same characters so that those can be treated as a single virtual
backend associated with a single volume type.
backend-availability-zone:
default: null
type: string
description: |
Availability zone name of this volume backend. If set, it will
override the default availability zone. Supported for Pike or
newer releases.
restrict-ceph-pools:
default: false
type: boolean
description: |
Optionally restrict Ceph key permissions to access pools as required.
rbd-pool-name:
default: null
type: string
description: |
Optionally specify an existing rbd pool that cinder should map to.
rbd-flatten-volume-from-snapshot:
default: false
type: boolean
description: |
Flatten volumes created from snapshots to remove dependency from
volume to snapshot. Supported on Queens+
rbd-mirroring-mode:
type: string
default: pool
description: |
The RBD mirroring mode used for the Ceph pool. This option is only used
with 'replicated' pool type, as it's not supported for 'erasure-coded'
pool type - valid values: 'pool' and 'image'
pool-type:
type: string
default: replicated
description: |
Ceph pool type to use for storage - valid values include `replicated`
and `erasure-coded`.
ec-profile-name:
type: string
default: null
description: |
Name for the EC profile to be created for the EC pools. If not defined
a profile name will be generated based on the name of the pool used by
the application.
ec-rbd-metadata-pool:
type: string
default: null
description: |
Name of the metadata pool to be created (for RBD use-cases). If not
defined a metadata pool name will be generated based on the name of
the data pool used by the application. The metadata pool is always
replicated, not erasure coded.
ec-profile-k:
type: int
default: 1
description: |
Number of data chunks that will be used for EC data pool. K+M factors
should never be greater than the number of available zones (or hosts)
for balancing.
ec-profile-m:
type: int
default: 2
description: |
Number of coding chunks that will be used for EC data pool. K+M factors
should never be greater than the number of available zones (or hosts)
for balancing.
ec-profile-locality:
type: int
default: null
description: |
(lrc plugin - l) Group the coding and data chunks into sets of size l.
For instance, for k=4 and m=2, when l=3 two groups of three are created.
Each set can be recovered without reading chunks from another set. Note
that using the lrc plugin does incur more raw storage usage than isa or
jerasure in order to reduce the cost of recovery operations.
ec-profile-crush-locality:
type: string
default: null
description: |
(lrc plugin) The type of the crush bucket in which each set of chunks
defined by l will be stored. For instance, if it is set to rack, each
group of l chunks will be placed in a different rack. It is used to
create a CRUSH rule step such as step choose rack. If it is not set,
no such grouping is done.
ec-profile-durability-estimator:
type: int
default: null
description: |
(shec plugin - c) The number of parity chunks each of which includes
each data chunk in its calculation range. The number is used as a
durability estimator. For instance, if c=2, 2 OSDs can be down
without losing data.
ec-profile-helper-chunks:
type: int
default: null
description: |
(clay plugin - d) Number of OSDs requested to send data during
recovery of a single chunk. d needs to be chosen such that
k+1 <= d <= k+m-1. Larger the d, the better the savings.
ec-profile-scalar-mds:
type: string
default: null
description: |
(clay plugin) specifies the plugin that is used as a building
block in the layered construction. It can be one of jerasure,
isa, shec (defaults to jerasure).
ec-profile-plugin:
type: string
default: jerasure
description: |
EC plugin to use for this applications pool. The following list of
plugins acceptable - jerasure, lrc, isa, shec, clay.
ec-profile-technique:
type: string
default: null
description: |
EC profile technique used for this applications pool - will be
validated based on the plugin configured via ec-profile-plugin.
Supported techniques are `reed_sol_van`, `reed_sol_r6_op`,
`cauchy_orig`, `cauchy_good`, `liber8tion` for jerasure,
`reed_sol_van`, `cauchy` for isa and `single`, `multiple`
for shec.
ec-profile-device-class:
type: string
default: null
description: |
Device class from CRUSH map to use for placement groups for
erasure profile - valid values: ssd, hdd or nvme (or leave
unset to not use a device class).
bluestore-compression-algorithm:
type: string
default: null
description: |
Compressor to use (if any) for pools requested by this charm.
.
NOTE: The ceph-osd charm sets a global default for this value (defaults
to 'lz4' unless configured by the end user) which will be used unless
specified for individual pools.
bluestore-compression-mode:
type: string
default: null
description: |
Policy for using compression on pools requested by this charm.
.
'none' means never use compression.
'passive' means use compression when clients hint that data is
compressible.
'aggressive' means use compression unless clients hint that
data is not compressible.
'force' means use compression under all circumstances even if the clients
hint that the data is not compressible.
bluestore-compression-required-ratio:
type: float
default: null
description: |
The ratio of the size of the data chunk after compression relative to the
original size must be at least this small in order to store the
compressed version on pools requested by this charm.
bluestore-compression-min-blob-size:
type: int
default: null
description: |
Chunks smaller than this are never compressed on pools requested by
this charm.
bluestore-compression-min-blob-size-hdd:
type: int
default: null
description: |
Value of bluestore compression min blob size for rotational media on
pools requested by this charm.
bluestore-compression-min-blob-size-ssd:
type: int
default: null
description: |
Value of bluestore compression min blob size for solid state media on
pools requested by this charm.
bluestore-compression-max-blob-size:
type: int
default: null
description: |
Chunks larger than this are broken into smaller blobs sizing bluestore
compression max blob size before being compressed on pools requested by
this charm.
bluestore-compression-max-blob-size-hdd:
type: int
default: null
description: |
Value of bluestore compression max blob size for rotational media on
pools requested by this charm.
bluestore-compression-max-blob-size-ssd:
type: int
default: null
description: |
Value of bluestore compression max blob size for solid state media on
pools requested by this charm.
enable-telemetry-notifications:
type: boolean
default: false
description: Enable notifications to send to telemetry.
image-size-cap:
type: string
default: 5GB
description: |
Maximum size of image a user can upload. Defaults to 5GB
(5368709120 bytes). Example values: 500M, 500MB, 5G, 5TB.
Valid units: K, KB, M, MB, G, GB, T, TB, P, PB. If no units provided,
bytes are assumed.
.
WARNING: this value should only be increased after careful consideration
and must be set to a value under 8EB (9223372036854775808 bytes).
image-conversion:
type: boolean
default: false
description: |
Enable conversion of all images to raw format during image import.
This only works on imported images (for example using 'openstack image create --import').
Does not work on regular image uploads (like 'openstack image create')
actions:
describe-status:
description: |
See an expanded view of the compound status.
For a neat human readable summary:
juju run-action --wait glance/0 describe-status --format=json | jq -r '.[].results.output'
containers:
glance-api:
resource: glance-api-image
mounts:
- storage: local-repository
location: /var/lib/glance/images
resources:
glance-api-image:
type: oci-image
description: OCI image for OpenStack Glance
upstream-source: ghcr.io/canonical/glance-api:2024.1
storage:
local-repository:
type: filesystem
minimum-size: 10GiB
description: |
A local filesystem storage repository for glance images to be saved to.
Note, this must be shared storage in order to support a highly
available glance image registry.
requires:
database:
interface: mysql_client
limit: 1
ingress-internal:
interface: ingress
optional: true
limit: 1
ingress-public:
interface: ingress
limit: 1
identity-service:
interface: keystone
limit: 1
amqp:
interface: rabbitmq
optional: true
ceph:
interface: ceph-client
optional: true
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
provides:
image-service:
interface: glance
peers:
peers:
interface: glance-peer
parts:
update-certificates:
plugin: nil
@ -13,9 +357,9 @@ parts:
apt update
apt install -y ca-certificates
update-ca-certificates
charm:
after: [update-certificates]
after:
- update-certificates
build-packages:
- git
- libffi-dev

View File

@ -1,261 +0,0 @@
options:
debug:
default: False
description: Enable debug logging.
type: boolean
region:
default: RegionOne
description: Name of the OpenStack region
type: string
ceph-osd-replication-count:
default: 3
type: int
description: |
This value dictates the number of replicas ceph must make of any
object it stores within the cinder rbd pool. Of course, this only
applies if using Ceph as a backend store. Note that once the cinder
rbd pool has been created, changing this value will not have any
effect (although it can be changed in ceph by manually configuring
your ceph cluster).
ceph-pool-weight:
type: int
default: 40
description: |
Defines a relative weighting of the pool as a percentage of the total
amount of data in the Ceph cluster. This effectively weights the number
of placement groups for the pool created to be appropriately portioned
to the amount of data expected. For example, if the ephemeral volumes
for the OpenStack compute instances are expected to take up 20% of the
overall configuration then this value would be specified as 20. Note -
it is important to choose an appropriate value for the pool weight as
this directly affects the number of placement groups which will be
created for the pool. The number of placement groups for a pool can
only be increased, never decreased - so it is important to identify the
percent of data that will likely reside in the pool.
volume-backend-name:
default:
type: string
description: |
Volume backend name for the backend. The default value is the
application name in the Juju model, e.g. "cinder-ceph-mybackend"
if it's deployed as `juju deploy cinder-ceph cinder-ceph-mybackend`.
A common backend name can be set to multiple backends with the
same characters so that those can be treated as a single virtual
backend associated with a single volume type.
backend-availability-zone:
default:
type: string
description: |
Availability zone name of this volume backend. If set, it will
override the default availability zone. Supported for Pike or
newer releases.
restrict-ceph-pools:
default: False
type: boolean
description: |
Optionally restrict Ceph key permissions to access pools as required.
rbd-pool-name:
default:
type: string
description: |
Optionally specify an existing rbd pool that cinder should map to.
rbd-flatten-volume-from-snapshot:
default:
type: boolean
default: False
description: |
Flatten volumes created from snapshots to remove dependency from
volume to snapshot. Supported on Queens+
rbd-mirroring-mode:
type: string
default: pool
description: |
The RBD mirroring mode used for the Ceph pool. This option is only used
with 'replicated' pool type, as it's not supported for 'erasure-coded'
pool type - valid values: 'pool' and 'image'
pool-type:
type: string
default: replicated
description: |
Ceph pool type to use for storage - valid values include replicated
and erasure-coded.
ec-profile-name:
type: string
default:
description: |
Name for the EC profile to be created for the EC pools. If not defined
a profile name will be generated based on the name of the pool used by
the application.
ec-rbd-metadata-pool:
type: string
default:
description: |
Name of the metadata pool to be created (for RBD use-cases). If not
defined a metadata pool name will be generated based on the name of
the data pool used by the application. The metadata pool is always
replicated, not erasure coded.
ec-profile-k:
type: int
default: 1
description: |
Number of data chunks that will be used for EC data pool. K+M factors
should never be greater than the number of available zones (or hosts)
for balancing.
ec-profile-m:
type: int
default: 2
description: |
Number of coding chunks that will be used for EC data pool. K+M factors
should never be greater than the number of available zones (or hosts)
for balancing.
ec-profile-locality:
type: int
default:
description: |
(lrc plugin - l) Group the coding and data chunks into sets of size l.
For instance, for k=4 and m=2, when l=3 two groups of three are created.
Each set can be recovered without reading chunks from another set. Note
that using the lrc plugin does incur more raw storage usage than isa or
jerasure in order to reduce the cost of recovery operations.
ec-profile-crush-locality:
type: string
default:
description: |
(lrc plugin) The type of the crush bucket in which each set of chunks
defined by l will be stored. For instance, if it is set to rack, each
group of l chunks will be placed in a different rack. It is used to
create a CRUSH rule step such as step choose rack. If it is not set,
no such grouping is done.
ec-profile-durability-estimator:
type: int
default:
description: |
(shec plugin - c) The number of parity chunks each of which includes
each data chunk in its calculation range. The number is used as a
durability estimator. For instance, if c=2, 2 OSDs can be down
without losing data.
ec-profile-helper-chunks:
type: int
default:
description: |
(clay plugin - d) Number of OSDs requested to send data during
recovery of a single chunk. d needs to be chosen such that
k+1 <= d <= k+m-1. Larger the d, the better the savings.
ec-profile-scalar-mds:
type: string
default:
description: |
(clay plugin) specifies the plugin that is used as a building
block in the layered construction. It can be one of jerasure,
isa, shec (defaults to jerasure).
ec-profile-plugin:
type: string
default: jerasure
description: |
EC plugin to use for this applications pool. The following list of
plugins acceptable - jerasure, lrc, isa, shec, clay.
ec-profile-technique:
type: string
default:
description: |
EC profile technique used for this applications pool - will be
validated based on the plugin configured via ec-profile-plugin.
Supported techniques are reed_sol_van, reed_sol_r6_op,
cauchy_orig, cauchy_good, liber8tion for jerasure,
reed_sol_van, cauchy for isa and single, multiple
for shec.
ec-profile-device-class:
type: string
default:
description: |
Device class from CRUSH map to use for placement groups for
erasure profile - valid values: ssd, hdd or nvme (or leave
unset to not use a device class).
bluestore-compression-algorithm:
type: string
default:
description: |
Compressor to use (if any) for pools requested by this charm.
.
NOTE: The ceph-osd charm sets a global default for this value (defaults
to 'lz4' unless configured by the end user) which will be used unless
specified for individual pools.
bluestore-compression-mode:
type: string
default:
description: |
Policy for using compression on pools requested by this charm.
.
'none' means never use compression.
'passive' means use compression when clients hint that data is
compressible.
'aggressive' means use compression unless clients hint that
data is not compressible.
'force' means use compression under all circumstances even if the clients
hint that the data is not compressible.
bluestore-compression-required-ratio:
type: float
default:
description: |
The ratio of the size of the data chunk after compression relative to the
original size must be at least this small in order to store the
compressed version on pools requested by this charm.
bluestore-compression-min-blob-size:
type: int
default:
description: |
Chunks smaller than this are never compressed on pools requested by
this charm.
bluestore-compression-min-blob-size-hdd:
type: int
default:
description: |
Value of bluestore compression min blob size for rotational media on
pools requested by this charm.
bluestore-compression-min-blob-size-ssd:
type: int
default:
description: |
Value of bluestore compression min blob size for solid state media on
pools requested by this charm.
bluestore-compression-max-blob-size:
type: int
default:
description: |
Chunks larger than this are broken into smaller blobs sizing bluestore
compression max blob size before being compressed on pools requested by
this charm.
bluestore-compression-max-blob-size-hdd:
type: int
default:
description: |
Value of bluestore compression max blob size for rotational media on
pools requested by this charm.
bluestore-compression-max-blob-size-ssd:
type: int
default:
description: |
Value of bluestore compression max blob size for solid state media on
pools requested by this charm.
enable-telemetry-notifications:
type: boolean
default: False
description: Enable notifications to send to telemetry.
image-size-cap:
type: string
default: 5GB
description: |
Maximum size of image a user can upload. Defaults to 5GB
(5368709120 bytes). Example values: 500M, 500MB, 5G, 5TB.
Valid units: K, KB, M, MB, G, GB, T, TB, P, PB. If no units provided,
bytes are assumed.
.
WARNING: this value should only be increased after careful consideration
and must be set to a value under 8EB (9223372036854775808 bytes).
image-conversion:
type: boolean
default: False
description: |
Enable conversion of all images to raw format during image import.
This only works on imported images (for example using 'openstack image create --import').
Does not work on regular image uploads (like 'openstack image create')

View File

@ -1,88 +0,0 @@
# Copyright 2021 Canonical Ltd
# See LICENSE file for licensing details.
name: glance-k8s
maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com>
summary: OpenStack Image Registry and Delivery Service
description: |
The Glance project provides an image registration and discovery service
and an image delivery service. These services are used in conjunction
by Nova to deliver images from object stores, such as OpenStack's Swift
service, to Nova's compute nodes.
version: 3
bases:
- name: ubuntu
channel: 22.04/stable
assumes:
- k8s-api
- juju >= 3.1
tags:
- openstack
- storage
- misc
source: https://opendev.org/openstack/charm-glance-k8s
issues: https://bugs.launchpad.net/charm-glance-k8s
containers:
glance-api:
resource: glance-api-image
mounts:
- storage: local-repository
# A persistent storage place to store glance images in a local file
# based repository. This must be shared storage in order to support a
# highly available glance registry.
location: /var/lib/glance/images
resources:
glance-api-image:
type: oci-image
description: OCI image for OpenStack Glance
# ghcr.io/canonical/glance-api:2024.1
upstream-source: ghcr.io/canonical/glance-api:2024.1
storage:
local-repository:
type: filesystem
minimum-size: 10GiB
description: |
A local filesystem storage repository for glance images to be saved to.
Note, this must be shared storage in order to support a highly
available glance image registry.
requires:
database:
interface: mysql_client
limit: 1
ingress-internal:
interface: ingress
optional: true
limit: 1
ingress-public:
interface: ingress
limit: 1
identity-service:
interface: keystone
limit: 1
amqp:
interface: rabbitmq
optional: true
ceph:
interface: ceph-client
optional: true
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
provides:
image-service:
interface: glance
peers:
peers:
interface: glance-peer

View File

@ -1 +0,0 @@
../actions.yaml

View File

@ -1 +0,0 @@
../config.yaml

View File

@ -1,2 +0,0 @@
# NOTE: no actions yet!
{ }

View File

@ -1,11 +1,314 @@
type: "charm"
bases:
- build-on:
- name: "ubuntu"
channel: "22.04"
run-on:
- name: "ubuntu"
channel: "22.04"
type: charm
name: gnocchi-k8s
summary: OpenStack gnocchi service
description: |
OpenStack gnocchi provides an HTTP service for managing, selecting,
and claiming providers of classes of inventory representing available
resources in a cloud.
.
assumes:
- k8s-api
- juju >= 3.1
links:
source:
- https://opendev.org/openstack/charm-gnocchi-k8s
issues:
- https://bugs.launchpad.net/charm-gnocchi-k8s
base: ubuntu@22.04
platforms:
amd64:
config:
options:
debug:
default: false
description: Enable debug logging.
type: boolean
region:
default: RegionOne
description: Name of the OpenStack region
type: string
ceph-osd-replication-count:
default: 3
type: int
description: |
This value dictates the number of replicas ceph must make of any
object it stores within the cinder rbd pool. Of course, this only
applies if using Ceph as a backend store. Note that once the cinder
rbd pool has been created, changing this value will not have any
effect (although it can be changed in ceph by manually configuring
your ceph cluster).
ceph-pool-weight:
type: int
default: 40
description: |
Defines a relative weighting of the pool as a percentage of the total
amount of data in the Ceph cluster. This effectively weights the number
of placement groups for the pool created to be appropriately portioned
to the amount of data expected. For example, if the ephemeral volumes
for the OpenStack compute instances are expected to take up 20% of the
overall configuration then this value would be specified as 20. Note -
it is important to choose an appropriate value for the pool weight as
this directly affects the number of placement groups which will be
created for the pool. The number of placement groups for a pool can
only be increased, never decreased - so it is important to identify the
percent of data that will likely reside in the pool.
volume-backend-name:
default: null
type: string
description: |
Volume backend name for the backend. The default value is the
application name in the Juju model, e.g. "cinder-ceph-mybackend"
if it's deployed as `juju deploy cinder-ceph cinder-ceph-mybackend`.
A common backend name can be set to multiple backends with the
same characters so that those can be treated as a single virtual
backend associated with a single volume type.
backend-availability-zone:
default: null
type: string
description: |
Availability zone name of this volume backend. If set, it will
override the default availability zone. Supported for Pike or
newer releases.
restrict-ceph-pools:
default: false
type: boolean
description: |
Optionally restrict Ceph key permissions to access pools as required.
rbd-pool-name:
default: null
type: string
description: |
Optionally specify an existing rbd pool that cinder should map to.
rbd-flatten-volume-from-snapshot:
default: false
type: boolean
description: |
Flatten volumes created from snapshots to remove dependency from
volume to snapshot. Supported on Queens+
rbd-mirroring-mode:
type: string
default: pool
description: |
The RBD mirroring mode used for the Ceph pool. This option is only used
with 'replicated' pool type, as it's not supported for 'erasure-coded'
pool type - valid values: 'pool' and 'image'
pool-type:
type: string
default: replicated
description: |
Ceph pool type to use for storage - valid values include `replicated`
and `erasure-coded`.
ec-profile-name:
type: string
default: null
description: |
Name for the EC profile to be created for the EC pools. If not defined
a profile name will be generated based on the name of the pool used by
the application.
ec-rbd-metadata-pool:
type: string
default: null
description: |
Name of the metadata pool to be created (for RBD use-cases). If not
defined a metadata pool name will be generated based on the name of
the data pool used by the application. The metadata pool is always
replicated, not erasure coded.
ec-profile-k:
type: int
default: 1
description: |
Number of data chunks that will be used for EC data pool. K+M factors
should never be greater than the number of available zones (or hosts)
for balancing.
ec-profile-m:
type: int
default: 2
description: |
Number of coding chunks that will be used for EC data pool. K+M factors
should never be greater than the number of available zones (or hosts)
for balancing.
ec-profile-locality:
type: int
default: null
description: |
(lrc plugin - l) Group the coding and data chunks into sets of size l.
For instance, for k=4 and m=2, when l=3 two groups of three are created.
Each set can be recovered without reading chunks from another set. Note
that using the lrc plugin does incur more raw storage usage than isa or
jerasure in order to reduce the cost of recovery operations.
ec-profile-crush-locality:
type: string
default: null
description: |
(lrc plugin) The type of the crush bucket in which each set of chunks
defined by l will be stored. For instance, if it is set to rack, each
group of l chunks will be placed in a different rack. It is used to
create a CRUSH rule step such as step choose rack. If it is not set,
no such grouping is done.
ec-profile-durability-estimator:
type: int
default: null
description: |
(shec plugin - c) The number of parity chunks each of which includes
each data chunk in its calculation range. The number is used as a
durability estimator. For instance, if c=2, 2 OSDs can be down
without losing data.
ec-profile-helper-chunks:
type: int
default: null
description: |
(clay plugin - d) Number of OSDs requested to send data during
recovery of a single chunk. d needs to be chosen such that
k+1 <= d <= k+m-1. Larger the d, the better the savings.
ec-profile-scalar-mds:
type: string
default: null
description: |
(clay plugin) specifies the plugin that is used as a building
block in the layered construction. It can be one of jerasure,
isa, shec (defaults to jerasure).
ec-profile-plugin:
type: string
default: jerasure
description: |
EC plugin to use for this applications pool. The following list of
plugins acceptable - jerasure, lrc, isa, shec, clay.
ec-profile-technique:
type: string
default: null
description: |
EC profile technique used for this applications pool - will be
validated based on the plugin configured via ec-profile-plugin.
Supported techniques are `reed_sol_van`, `reed_sol_r6_op`,
`cauchy_orig`, `cauchy_good`, `liber8tion` for jerasure,
`reed_sol_van`, `cauchy` for isa and `single`, `multiple`
for shec.
ec-profile-device-class:
type: string
default: null
description: |
Device class from CRUSH map to use for placement groups for
erasure profile - valid values: ssd, hdd or nvme (or leave
unset to not use a device class).
bluestore-compression-algorithm:
type: string
default: null
description: |
Compressor to use (if any) for pools requested by this charm.
.
NOTE: The ceph-osd charm sets a global default for this value (defaults
to 'lz4' unless configured by the end user) which will be used unless
specified for individual pools.
bluestore-compression-mode:
type: string
default: null
description: |
Policy for using compression on pools requested by this charm.
.
'none' means never use compression.
'passive' means use compression when clients hint that data is
compressible.
'aggressive' means use compression unless clients hint that
data is not compressible.
'force' means use compression under all circumstances even if the clients
hint that the data is not compressible.
bluestore-compression-required-ratio:
type: float
default: null
description: |
The ratio of the size of the data chunk after compression relative to the
original size must be at least this small in order to store the
compressed version on pools requested by this charm.
bluestore-compression-min-blob-size:
type: int
default: null
description: |
Chunks smaller than this are never compressed on pools requested by
this charm.
bluestore-compression-min-blob-size-hdd:
type: int
default: null
description: |
Value of bluestore compression min blob size for rotational media on
pools requested by this charm.
bluestore-compression-min-blob-size-ssd:
type: int
default: null
description: |
Value of bluestore compression min blob size for solid state media on
pools requested by this charm.
bluestore-compression-max-blob-size:
type: int
default: null
description: |
Chunks larger than this are broken into smaller blobs sizing bluestore
compression max blob size before being compressed on pools requested by
this charm.
bluestore-compression-max-blob-size-hdd:
type: int
default: null
description: |
Value of bluestore compression max blob size for rotational media on
pools requested by this charm.
bluestore-compression-max-blob-size-ssd:
type: int
default: null
description: |
Value of bluestore compression max blob size for solid state media on
pools requested by this charm.
containers:
gnocchi-api:
resource: gnocchi-api-image
gnocchi-metricd:
resource: gnocchi-metricd-image
resources:
gnocchi-api-image:
type: oci-image
description: OCI image for OpenStack Gnocchi api service
upstream-source: ghcr.io/canonical/gnocchi-consolidated:2024.1
gnocchi-metricd-image:
type: oci-image
description: OCI image for OpenStack Gnocchi Metric service
upstream-source: ghcr.io/canonical/gnocchi-consolidated:2024.1
requires:
database:
interface: mysql_client
limit: 1
identity-service:
interface: keystone
ingress-internal:
interface: ingress
optional: true
limit: 1
ingress-public:
interface: ingress
limit: 1
ceph:
interface: ceph-client
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
provides:
gnocchi-service:
interface: gnocchi
peers:
peers:
interface: gnocchi-peer
parts:
update-certificates:
plugin: nil
@ -13,9 +316,9 @@ parts:
apt update
apt install -y ca-certificates
update-ca-certificates
charm:
after: [update-certificates]
after:
- update-certificates
build-packages:
- git
- libffi-dev

View File

@ -1,239 +0,0 @@
options:
debug:
default: False
description: Enable debug logging.
type: boolean
region:
default: RegionOne
description: Name of the OpenStack region
type: string
ceph-osd-replication-count:
default: 3
type: int
description: |
This value dictates the number of replicas ceph must make of any
object it stores within the cinder rbd pool. Of course, this only
applies if using Ceph as a backend store. Note that once the cinder
rbd pool has been created, changing this value will not have any
effect (although it can be changed in ceph by manually configuring
your ceph cluster).
ceph-pool-weight:
type: int
default: 40
description: |
Defines a relative weighting of the pool as a percentage of the total
amount of data in the Ceph cluster. This effectively weights the number
of placement groups for the pool created to be appropriately portioned
to the amount of data expected. For example, if the ephemeral volumes
for the OpenStack compute instances are expected to take up 20% of the
overall configuration then this value would be specified as 20. Note -
it is important to choose an appropriate value for the pool weight as
this directly affects the number of placement groups which will be
created for the pool. The number of placement groups for a pool can
only be increased, never decreased - so it is important to identify the
percent of data that will likely reside in the pool.
volume-backend-name:
default:
type: string
description: |
Volume backend name for the backend. The default value is the
application name in the Juju model, e.g. "cinder-ceph-mybackend"
if it's deployed as `juju deploy cinder-ceph cinder-ceph-mybackend`.
A common backend name can be set to multiple backends with the
same characters so that those can be treated as a single virtual
backend associated with a single volume type.
backend-availability-zone:
default:
type: string
description: |
Availability zone name of this volume backend. If set, it will
override the default availability zone. Supported for Pike or
newer releases.
restrict-ceph-pools:
default: False
type: boolean
description: |
Optionally restrict Ceph key permissions to access pools as required.
rbd-pool-name:
default:
type: string
description: |
Optionally specify an existing rbd pool that cinder should map to.
rbd-flatten-volume-from-snapshot:
default:
type: boolean
default: False
description: |
Flatten volumes created from snapshots to remove dependency from
volume to snapshot. Supported on Queens+
rbd-mirroring-mode:
type: string
default: pool
description: |
The RBD mirroring mode used for the Ceph pool. This option is only used
with 'replicated' pool type, as it's not supported for 'erasure-coded'
pool type - valid values: 'pool' and 'image'
pool-type:
type: string
default: replicated
description: |
Ceph pool type to use for storage - valid values include replicated
and erasure-coded.
ec-profile-name:
type: string
default:
description: |
Name for the EC profile to be created for the EC pools. If not defined
a profile name will be generated based on the name of the pool used by
the application.
ec-rbd-metadata-pool:
type: string
default:
description: |
Name of the metadata pool to be created (for RBD use-cases). If not
defined a metadata pool name will be generated based on the name of
the data pool used by the application. The metadata pool is always
replicated, not erasure coded.
ec-profile-k:
type: int
default: 1
description: |
Number of data chunks that will be used for EC data pool. K+M factors
should never be greater than the number of available zones (or hosts)
for balancing.
ec-profile-m:
type: int
default: 2
description: |
Number of coding chunks that will be used for EC data pool. K+M factors
should never be greater than the number of available zones (or hosts)
for balancing.
ec-profile-locality:
type: int
default:
description: |
(lrc plugin - l) Group the coding and data chunks into sets of size l.
For instance, for k=4 and m=2, when l=3 two groups of three are created.
Each set can be recovered without reading chunks from another set. Note
that using the lrc plugin does incur more raw storage usage than isa or
jerasure in order to reduce the cost of recovery operations.
ec-profile-crush-locality:
type: string
default:
description: |
(lrc plugin) The type of the crush bucket in which each set of chunks
defined by l will be stored. For instance, if it is set to rack, each
group of l chunks will be placed in a different rack. It is used to
create a CRUSH rule step such as step choose rack. If it is not set,
no such grouping is done.
ec-profile-durability-estimator:
type: int
default:
description: |
(shec plugin - c) The number of parity chunks each of which includes
each data chunk in its calculation range. The number is used as a
durability estimator. For instance, if c=2, 2 OSDs can be down
without losing data.
ec-profile-helper-chunks:
type: int
default:
description: |
(clay plugin - d) Number of OSDs requested to send data during
recovery of a single chunk. d needs to be chosen such that
k+1 <= d <= k+m-1. Larger the d, the better the savings.
ec-profile-scalar-mds:
type: string
default:
description: |
(clay plugin) specifies the plugin that is used as a building
block in the layered construction. It can be one of jerasure,
isa, shec (defaults to jerasure).
ec-profile-plugin:
type: string
default: jerasure
description: |
EC plugin to use for this applications pool. The following list of
plugins acceptable - jerasure, lrc, isa, shec, clay.
ec-profile-technique:
type: string
default:
description: |
EC profile technique used for this applications pool - will be
validated based on the plugin configured via ec-profile-plugin.
Supported techniques are reed_sol_van, reed_sol_r6_op,
cauchy_orig, cauchy_good, liber8tion for jerasure,
reed_sol_van, cauchy for isa and single, multiple
for shec.
ec-profile-device-class:
type: string
default:
description: |
Device class from CRUSH map to use for placement groups for
erasure profile - valid values: ssd, hdd or nvme (or leave
unset to not use a device class).
bluestore-compression-algorithm:
type: string
default:
description: |
Compressor to use (if any) for pools requested by this charm.
.
NOTE: The ceph-osd charm sets a global default for this value (defaults
to 'lz4' unless configured by the end user) which will be used unless
specified for individual pools.
bluestore-compression-mode:
type: string
default:
description: |
Policy for using compression on pools requested by this charm.
.
'none' means never use compression.
'passive' means use compression when clients hint that data is
compressible.
'aggressive' means use compression unless clients hint that
data is not compressible.
'force' means use compression under all circumstances even if the clients
hint that the data is not compressible.
bluestore-compression-required-ratio:
type: float
default:
description: |
The ratio of the size of the data chunk after compression relative to the
original size must be at least this small in order to store the
compressed version on pools requested by this charm.
bluestore-compression-min-blob-size:
type: int
default:
description: |
Chunks smaller than this are never compressed on pools requested by
this charm.
bluestore-compression-min-blob-size-hdd:
type: int
default:
description: |
Value of bluestore compression min blob size for rotational media on
pools requested by this charm.
bluestore-compression-min-blob-size-ssd:
type: int
default:
description: |
Value of bluestore compression min blob size for solid state media on
pools requested by this charm.
bluestore-compression-max-blob-size:
type: int
default:
description: |
Chunks larger than this are broken into smaller blobs sizing bluestore
compression max blob size before being compressed on pools requested by
this charm.
bluestore-compression-max-blob-size-hdd:
type: int
default:
description: |
Value of bluestore compression max blob size for rotational media on
pools requested by this charm.
bluestore-compression-max-blob-size-ssd:
type: int
default:
description: |
Value of bluestore compression max blob size for solid state media on
pools requested by this charm.

View File

@ -1,71 +0,0 @@
name: gnocchi-k8s
summary: OpenStack gnocchi service
maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com>
description: |
OpenStack gnocchi provides an HTTP service for managing, selecting,
and claiming providers of classes of inventory representing available
resources in a cloud.
.
version: 3
bases:
- name: ubuntu
channel: 22.04/stable
assumes:
- k8s-api
- juju >= 3.1
tags:
- openstack
source: https://opendev.org/openstack/charm-gnocchi-k8s
issues: https://bugs.launchpad.net/charm-gnocchi-k8s
containers:
gnocchi-api:
resource: gnocchi-api-image
gnocchi-metricd:
resource: gnocchi-metricd-image
resources:
gnocchi-api-image:
type: oci-image
description: OCI image for OpenStack Gnocchi api service
# ghcr.io/canonical/gnocchi-api:2024.1
upstream-source: ghcr.io/canonical/gnocchi-consolidated:2024.1
gnocchi-metricd-image:
type: oci-image
description: OCI image for OpenStack Gnocchi Metric service
# ghcr.io/canonical/gnocchi-metricd:2024.1
upstream-source: ghcr.io/canonical/gnocchi-consolidated:2024.1
requires:
database:
interface: mysql_client
limit: 1
identity-service:
interface: keystone
ingress-internal:
interface: ingress
optional: true
limit: 1
ingress-public:
interface: ingress
limit: 1
ceph:
interface: ceph-client
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
provides:
gnocchi-service:
interface: gnocchi
peers:
peers:
interface: gnocchi-peer

View File

@ -1 +0,0 @@
../config.yaml

View File

@ -1,2 +0,0 @@
# NOTE: no actions yet!
{ }

View File

@ -1,11 +1,84 @@
type: "charm"
bases:
- build-on:
- name: "ubuntu"
channel: "22.04"
run-on:
- name: "ubuntu"
channel: "22.04"
type: charm
name: heat-k8s
summary: OpenStack heat service
description: |
Heat is the main project in the OpenStack Orchestration program. It implements an
orchestration engine to launch multiple composite cloud applications based on
templates in the form of text files that can be treated like code.
assumes:
- k8s-api
- juju >= 3.1
links:
source:
- https://opendev.org/openstack/charm-heat-k8s
issues:
- https://bugs.launchpad.net/charm-heat-k8s
base: ubuntu@22.04
platforms:
amd64:
config:
options:
debug:
default: false
description: Enable debug logging.
type: boolean
region:
default: RegionOne
description: Name of the OpenStack region
type: string
containers:
heat-api:
resource: heat-api-image
heat-api-cfn:
resource: heat-api-image
heat-engine:
resource: heat-engine-image
resources:
heat-api-image:
type: oci-image
description: OCI image for OpenStack Heat
upstream-source: ghcr.io/canonical/heat-consolidated:2024.1
heat-engine-image:
type: oci-image
description: OCI image for OpenStack Heat Engine
upstream-source: ghcr.io/canonical/heat-consolidated:2024.1
requires:
database:
interface: mysql_client
limit: 1
identity-service:
interface: keystone
traefik-route-internal:
interface: traefik_route
optional: true
limit: 1
traefik-route-public:
interface: traefik_route
limit: 1
amqp:
interface: rabbitmq
identity-ops:
interface: keystone-resources
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
peers:
peers:
interface: heat-peer
parts:
update-certificates:
plugin: nil
@ -13,9 +86,9 @@ parts:
apt update
apt install -y ca-certificates
update-ca-certificates
charm:
after: [update-certificates]
after:
- update-certificates
build-packages:
- git
- libffi-dev

View File

@ -1,9 +0,0 @@
options:
debug:
default: False
description: Enable debug logging.
type: boolean
region:
default: RegionOne
description: Name of the OpenStack region
type: string

View File

@ -1,70 +0,0 @@
name: heat-k8s
summary: OpenStack heat service
maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com>
description: |
Heat is the main project in the OpenStack Orchestration program. It implements an
orchestration engine to launch multiple composite cloud applications based on
templates in the form of text files that can be treated like code.
version: 3
bases:
- name: ubuntu
channel: 22.04/stable
assumes:
- k8s-api
- juju >= 3.1
tags:
- openstack
source: https://opendev.org/openstack/charm-heat-k8s
issues: https://bugs.launchpad.net/charm-heat-k8s
containers:
heat-api:
resource: heat-api-image
heat-api-cfn:
resource: heat-api-image
heat-engine:
resource: heat-engine-image
resources:
heat-api-image:
type: oci-image
description: OCI image for OpenStack Heat
# ghcr.io/canonical/heat-api:2024.1
upstream-source: ghcr.io/canonical/heat-consolidated:2024.1
heat-engine-image:
type: oci-image
description: OCI image for OpenStack Heat Engine
# ghcr.io/canonical/heat-engine:2024.1
upstream-source: ghcr.io/canonical/heat-consolidated:2024.1
requires:
database:
interface: mysql_client
limit: 1
identity-service:
interface: keystone
traefik-route-internal:
interface: traefik_route
optional: true
limit: 1
traefik-route-public:
interface: traefik_route
limit: 1
amqp:
interface: rabbitmq
identity-ops:
interface: keystone-resources
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
peers:
peers:
interface: heat-peer

View File

@ -1 +0,0 @@
../config.yaml

View File

@ -1,2 +0,0 @@
get-dashboard-url:
description: URL for access to the Horizon OpenStack Dashboard.

View File

@ -1,11 +1,242 @@
type: "charm"
bases:
- build-on:
- name: "ubuntu"
channel: "22.04"
run-on:
- name: "ubuntu"
channel: "22.04"
type: charm
name: horizon-k8s
summary: OpenStack Horizon service
description: |
OpenStack Horizon provides an HTTP service for managing, selecting,
and claiming providers of classes of inventory representing available
resources in a cloud.
.
assumes:
- k8s-api
- juju >= 3.1
links:
source:
- https://opendev.org/openstack/charm-horizon-k8s
issues:
- https://bugs.launchpad.net/charm-horizon-k8s
base: ubuntu@22.04
platforms:
amd64:
config:
options:
debug:
default: false
description: Enable debug logging.
type: boolean
session-timeout:
type: int
default: 3600
description:
A method to supersede the token timeout with a shorter dashboard
session timeout in seconds. For example, if your token expires in 60 minutes,
a value of 1800 will log users out after 30 minutes.
default-role:
type: string
default: member
description: |
Default role for Horizon operations that will be created in
Keystone upon introduction of an identity-service relation.
default-domain:
type: string
default: null
description: |
Default domain when authenticating with Horizon. Disables the domain
field in the login page.
secret:
type: string
default: null
description: |
Secret for Horizon to use when securing internal data; set this when
using multiple dashboard units.
dropdown-max-items:
type: int
default: 30
description: |
Max dropdown items to show in dropdown controls.
NOTE: This setting is supported >= OpenStack Liberty.
profile:
type: string
default: null
description: Default profile for the dashboard. Eg. cisco.
disable-instance-snapshot:
type: boolean
default: false
description: |
This setting disables Snapshots as a valid boot source for launching
instances. Snapshots sources won't show up in the Launch Instance modal
dialogue box. This option works from the Newton release, and has no
effect on earlier OpenStack releases.
cinder-backup:
type: boolean
default: false
description: Enable cinder backup panel.
password-retrieve:
type: boolean
default: false
description: Enable "Retrieve password" instance action.
customization-module:
type: string
default: ""
description: |
This option provides a means to enable customisation modules to modify
existing dashboards and panels.
allow-password-autocompletion:
type: boolean
default: false
description: |
Setting this to True will allow password form autocompletion by browser.
default-create-volume:
type: boolean
default: true
description: |
The default value for the option of creating a new volume in the
workflow for image and instance snapshot sources when launching an
instance. This option has an effect only to Ocata or newer
releases.
hide-create-volume:
type: boolean
default: false
description: |
Hide the "Create New Volume" option and rely on the
default-create-volume value during instance creation.
image-formats:
type: string
default: ""
description: |
The image-formats setting can be used to alter the default list of
advertised image formats. Many installations cannot use all the formats
that Glance recognizes, restricting the list here prevents unwanted
formats from being listed in Horizon which can lead to confusion.
.
This setting takes a space separated list, for example: iso qcow2 raw
.
Supported formats are: aki, ami, ari, docker, iso, ova, qcow2, raw, vdi,
vhd, vmdk.
.
If not provided, leave the option unconfigured which enables all of the
above.
api-result-limit:
type: int
default: 1000
description: |
The maximum number of objects (e.g. Swift objects or Glance images) to
display on a single page before providing a paging element (a "more" link)
to paginate results.
enable-fip-topology-check:
type: boolean
default: true
description:
By default Horizon checks that a project has a router attached
to an external network before allowing FIPs to be attached to a VM. Some use
cases will not meet this constraint, e.g. if the router is owned by a different
project. Setting this to False removes this check from Horizon.
disable-password-reveal:
type: boolean
default: false
description: |
If enabled, the reveal button for passwords is removed.
enforce-password-check:
type: boolean
default: true
description: |
If True, displays an `Admin Password` field on the Change Password form
to verify that it is indeed the admin logged-in who wants to change the password.
site-name:
type: string
default: ""
description: |
An unique site name for OpenStack deployment to be passed via the
application-dashboard relation
site-branding:
type: string
default: null
description: |
A brand name to be shown in the HTML title. The default value is
"OpenStack Dashboard", e.g. "Instance Overview - OpenStack Dashboard"
site-branding-link:
type: string
default: null
description: |
A custom hyperlink when the logo in the dashboard is clicked, e.g.
https://mycloud.example.com/. The default value is
"horizon:user_home" to open the top level of the dashboard.
help-url:
type: string
default: null
description: |
A custom hyperlink for the "Help" menu, e.g.
https://mycloud.example.com/help. The default value is
https://docs.openstack.org/
create-instance-flavor-sort-key:
type: string
default: null
description: |
This option can be used to customise the order instances are sorted in.
Support values include: id, name, ram, disk, and vcpus.
See https://docs.openstack.org/horizon/latest/configuration/settings.html#create-instance-flavor-sort
for more details.
create-instance-flavor-sort-reverse:
type: boolean
default: false
description: |
This option can be used to set the instance sorting to either ascending or descending.
Set True to sort in ascending order or False for descending order.
plugins:
type: string
default: "[]"
description: |
This option can be used to enable plugins for Horizon. The value should be a
JSON formatted list of plugin names.
actions:
get-dashboard-url:
description: URL for access to the Horizon OpenStack Dashboard.
containers:
horizon:
resource: horizon-image
resources:
horizon-image:
type: oci-image
description: OCI image for Horizon
upstream-source: ghcr.io/canonical/horizon:2024.1
requires:
database:
interface: mysql_client
limit: 1
ingress-internal:
interface: ingress
limit: 1
ingress-public:
interface: ingress
optional: true
limit: 1
identity-credentials:
interface: keystone-credentials
limit: 1
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
provides:
horizon:
interface: horizon
peers:
peers:
interface: horizon-peer
parts:
update-certificates:
plugin: nil
@ -13,9 +244,9 @@ parts:
apt update
apt install -y ca-certificates
update-ca-certificates
charm:
after: [update-certificates]
after:
- update-certificates
build-packages:
- git
- libffi-dev

View File

@ -1,169 +0,0 @@
options:
debug:
default: False
description: Enable debug logging.
type: boolean
session-timeout:
type: int
default: 3600
description:
A method to supersede the token timeout with a shorter dashboard session
timeout in seconds. For example, if your token expires in 60 minutes, a
value of 1800 will log users out after 30 minutes.
default-role:
type: string
default: "member"
description: |
Default role for Horizon operations that will be created in
Keystone upon introduction of an identity-service relation.
default-domain:
type: string
default:
description: |
Default domain when authenticating with Horizon. Disables the domain
field in the login page.
secret:
type: string
default:
description: |
Secret for Horizon to use when securing internal data; set this when
using multiple dashboard units.
dropdown-max-items:
type: int
default: 30
description: |
Max dropdown items to show in dropdown controls.
NOTE: This setting is supported >= OpenStack Liberty.
profile:
type: string
default:
description: Default profile for the dashboard. Eg. cisco.
disable-instance-snapshot:
type: boolean
default: False
description: |
This setting disables Snapshots as a valid boot source for launching
instances. Snapshots sources wont show up in the Launch Instance modal
dialogue box. This option works from the Newton release, and has no
effect on earlier OpenStack releases.
cinder-backup:
type: boolean
default: False
description: Enable cinder backup panel.
password-retrieve:
type: boolean
default: False
description: Enable "Retrieve password" instance action.
customization-module:
type: string
default: ""
description: |
This option provides a means to enable customisation modules to modify
existing dashboards and panels.
allow-password-autocompletion:
type: boolean
default: False
description: |
Setting this to True will allow password form autocompletion by browser.
default-create-volume:
type: boolean
default: True
description: |
The default value for the option of creating a new volume in the
workflow for image and instance snapshot sources when launching an
instance. This option has an effect only to Ocata or newer
releases.
hide-create-volume:
type: boolean
default: False
description: |
Hide the "Create New Volume" option and rely on the
default-create-volume value during instance creation.
image-formats:
type: string
default: ""
description: |
The image-formats setting can be used to alter the default list of
advertised image formats. Many installations cannot use all the formats
that Glance recognizes, restricting the list here prevents unwanted
formats from being listed in Horizon which can lead to confusion.
.
This setting takes a space separated list, for example: iso qcow2 raw
.
Supported formats are: aki, ami, ari, docker, iso, ova, qcow2, raw, vdi,
vhd, vmdk.
.
If not provided, leave the option unconfigured which enables all of the
above.
api-result-limit:
type: int
default: 1000
description: |
The maximum number of objects (e.g. Swift objects or Glance images) to
display on a single page before providing a paging element (a "more" link)
to paginate results.
enable-fip-topology-check:
type: boolean
default: true
description:
By default Horizon checks that a project has a router attached to an
external network before allowing FIPs to be attached to a VM. Some use
cases will not meet this constraint, e.g. if the router is owned by a
different project. Setting this to False removes this check from Horizon.
disable-password-reveal:
type: boolean
default: false
description: |
If enabled, the reveal button for passwords is removed.
enforce-password-check:
type: boolean
default: True
description: |
If True, displays an Admin Password field on the Change Password form
to verify that it is indeed the admin logged-in who wants to change the password.
site-name:
type: string
default: ''
description: |
An unique site name for OpenStack deployment to be passed via the
application-dashboard relation
site-branding:
type: string
default:
description: |
A brand name to be shown in the HTML title. The default value is
"OpenStack Dashboard", e.g. "Instance Overview - OpenStack Dashboard"
site-branding-link:
type: string
default:
description: |
A custom hyperlink when the logo in the dashboard is clicked, e.g.
https://mycloud.example.com/. The default value is
"horizon:user_home" to open the top level of the dashboard.
help-url:
type: string
default:
description: |
A custom hyperlink for the "Help" menu, e.g.
https://mycloud.example.com/help. The default value is
https://docs.openstack.org/
create-instance-flavor-sort-key:
type: string
default:
description: |
This option can be used to customise the order instances are sorted in.
Support values include: id, name, ram, disk, and vcpus.
See https://docs.openstack.org/horizon/latest/configuration/settings.html#create-instance-flavor-sort
for more details.
create-instance-flavor-sort-reverse:
type: boolean
default: False
description: |
This option can be used to set the instance sorting to either ascending or descending.
Set True to sort in ascending order or False for descending order.
plugins:
type: string
default: '[]'
description: |
This option can be used to enable plugins for Horizon. The value should be a
JSON formatted list of plugin names.

View File

@ -1,63 +0,0 @@
name: horizon-k8s
summary: OpenStack Horizon service
maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com>
description: |
OpenStack Horizon provides an HTTP service for managing, selecting,
and claiming providers of classes of inventory representing available
resources in a cloud.
.
version: 3
bases:
- name: ubuntu
channel: 22.04/stable
assumes:
- k8s-api
- juju >= 3.1
tags:
- openstack
source: https://opendev.org/openstack/charm-horizon-k8s
issues: https://bugs.launchpad.net/charm-horizon-k8s
containers:
horizon:
resource: horizon-image
resources:
horizon-image:
type: oci-image
description: OCI image for Horizon
# ghcr.io/canonical/horizon:2024.1
upstream-source: ghcr.io/canonical/horizon:2024.1
requires:
database:
interface: mysql_client
limit: 1
ingress-internal:
interface: ingress
limit: 1
ingress-public:
interface: ingress
optional: true
limit: 1
identity-credentials:
interface: keystone-credentials
limit: 1
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
provides:
horizon:
interface: horizon
peers:
peers:
interface: horizon-peer

View File

@ -1 +0,0 @@
../actions.yaml

View File

@ -1,60 +0,0 @@
# Copyright 2021 Canonical Ltd.
# See LICENSE file for licensing details.
get-admin-password:
description: Get the password for the Keystone Admin user
get-admin-account:
description: Get full access details for the Keystone Admin user
get-service-account:
description: Create/get details for a new/existing service account.
params:
username:
type: string
description: The username for the service account.
required:
- username
additionalProperties: False
regenerate-password:
description: |
Regenerate password for the given user.
params:
username:
type: string
description: The username for the account.
required:
- username
additionalProperties: False
add-ca-certs:
description: |
Add CA certs for transfer
params:
name:
type: string
description: Name of CA certs bundle
ca:
type: string
description: Base64 encoded CA certificate
chain:
type: string
description: Base64 encoded CA Chain
required:
- name
- ca
additionalProperties: False
remove-ca-certs:
description: |
Remove CA certs
params:
name:
type: string
description: Name of CA certs bundle
required:
- name
additionalProperties: False
list-ca-certs:
description: |
List CA certs uploaded for transfer

View File

@ -1,11 +1,177 @@
type: "charm"
bases:
- build-on:
- name: "ubuntu"
channel: "22.04"
run-on:
- name: "ubuntu"
channel: "22.04"
type: charm
name: keystone-k8s
summary: OpenStack identity service
description: |
Keystone is an OpenStack project that provides Identity, Token, Catalog and
Policy services for use specifically by projects in the OpenStack family. It
implements OpenStack's Identity API.
assumes:
- k8s-api
- juju >= 3.1
links:
source:
- https://opendev.org/openstack/charm-keystone-k8s
issues:
- https://bugs.launchpad.net/charm-keystone-k8s
base: ubuntu@22.04
platforms:
amd64:
config:
options:
debug:
default: false
description: Enable debug logging.
type: boolean
log-level:
default: WARNING
type: string
description: Log level (WARNING, INFO, DEBUG, ERROR)
region:
default: RegionOne
description: Name of the OpenStack region
type: string
catalog-cache-expiration:
type: int
default: 60
description: Amount of time (in seconds) the catalog should be cached for.
dogpile-cache-expiration:
type: int
default: 60
description: |
Amount of time (in seconds) to cache items in the dogpile.cache. This only applies
to cached methods that do not have an explicitly defined cache expiration time.
identity-backend:
type: string
default: sql
description: |
Keystone identity backend, valid options are sql and pam
enable-telemetry-notifications:
type: boolean
default: false
description: Enable notifications to send to telemetry.
actions:
get-admin-password:
description: Get the password for the Keystone Admin user
get-admin-account:
description: Get full access details for the Keystone Admin user
get-service-account:
description: Create/get details for a new/existing service account.
params:
username:
type: string
description: The username for the service account.
required:
- username
additionalProperties: false
regenerate-password:
description: |
Regenerate password for the given user.
params:
username:
type: string
description: The username for the account.
required:
- username
additionalProperties: false
add-ca-certs:
description: |
Add CA certs for transfer
params:
name:
type: string
description: Name of CA certs bundle
ca:
type: string
description: Base64 encoded CA certificate
chain:
type: string
description: Base64 encoded CA Chain
required:
- name
- ca
additionalProperties: false
remove-ca-certs:
description: |
Remove CA certs
params:
name:
type: string
description: Name of CA certs bundle
required:
- name
additionalProperties: false
list-ca-certs:
description: |
List CA certs uploaded for transfer
containers:
keystone:
resource: keystone-image
mounts:
- storage: fernet-keys
location: /etc/keystone/fernet-keys/
- storage: credential-keys
location: /etc/keystone/credential-keys/
resources:
keystone-image:
type: oci-image
description: OCI image for OpenStack Keystone
upstream-source: ghcr.io/canonical/keystone:2024.1
storage:
fernet-keys:
type: filesystem
description: |
Persistent storage for the location of fernet keys
minimum-size: 5M
credential-keys:
type: filesystem
description: |
Persistent storage for the location of credential keys
minimum-size: 5M
requires:
database:
interface: mysql_client
limit: 1
ingress-internal:
interface: ingress
limit: 1
optional: true
ingress-public:
interface: ingress
limit: 1
amqp:
interface: rabbitmq
optional: true
domain-config:
interface: keystone-domain-config
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
provides:
identity-service:
interface: keystone
identity-credentials:
interface: keystone-credentials
identity-ops:
interface: keystone-resources
send-ca-cert:
interface: certificate_transfer
peers:
peers:
interface: keystone-peer
parts:
update-certificates:
plugin: nil
@ -13,9 +179,9 @@ parts:
apt update
apt install -y ca-certificates
update-ca-certificates
charm:
after: [update-certificates]
after:
- update-certificates
build-packages:
- git
- libffi-dev

View File

@ -1,39 +0,0 @@
# Copyright 2021 Canonical Ltd.
# See LICENSE file for licensing details.
#
options:
debug:
default: False
description: Enable debug logging.
type: boolean
log-level:
default: WARNING
type: string
description: Log level (WARNING, INFO, DEBUG, ERROR)
region:
default: RegionOne
description: Name of the OpenStack region
type: string
catalog-cache-expiration:
type: int
default: 60
description: Amount of time (in seconds) the catalog should be cached for.
dogpile-cache-expiration:
type: int
default: 60
description: |
Amount of time (in seconds) to cache items in the dogpile.cache. This only applies
to cached methods that do not have an explicitly defined cache expiration time.
identity-backend:
type: string
default: "sql"
description: |
Keystone identity backend, valid options are sql and pam
enable-telemetry-notifications:
type: boolean
default: False
description: Enable notifications to send to telemetry.

View File

@ -1,98 +0,0 @@
# Copyright 2021 Canonical Ltd.
# See LICENSE file for licensing details.
name: keystone-k8s
summary: OpenStack identity service
maintainer: Openstack Charmers <openstack-charmers@lists.ubuntu.com>
description: |
Keystone is an OpenStack project that provides Identity, Token, Catalog and
Policy services for use specifically by projects in the OpenStack family. It
implements OpenStack's Identity API.
version: 3
bases:
- name: ubuntu
channel: 22.04/stable
assumes:
- k8s-api
- juju >= 3.1
tags:
- openstack
- identity
- misc
source: https://opendev.org/openstack/charm-keystone-k8s
issues: https://bugs.launchpad.net/charm-keystone-k8s
provides:
identity-service:
interface: keystone
identity-credentials:
interface: keystone-credentials
identity-ops:
interface: keystone-resources
send-ca-cert:
interface: certificate_transfer
requires:
database:
interface: mysql_client
limit: 1
ingress-internal:
interface: ingress
limit: 1
optional: true
ingress-public:
interface: ingress
limit: 1
amqp:
interface: rabbitmq
optional: true
domain-config:
interface: keystone-domain-config
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
peers:
peers:
interface: keystone-peer
storage:
fernet-keys:
type: filesystem
description: |
Persistent storage for the location of fernet keys
minimum-size: 5M
credential-keys:
type: filesystem
description: |
Persistent storage for the location of credential keys
minimum-size: 5M
containers:
# The primary container that runs the keystone services
keystone:
resource: keystone-image
mounts:
- storage: fernet-keys
# The fernet keys used for generated tokens are stored here. With a
# mounted storage option, the fernet keys are persisted across
# container restarts.
location: /etc/keystone/fernet-keys/
- storage: credential-keys
# The credential keys used for generated credentials are stored here.
# With a mounted storage option, the credential keys are persisted
# across container restarts.
location: /etc/keystone/credential-keys/
resources:
keystone-image:
type: oci-image
description: OCI image for OpenStack Keystone
# ghcr.io/canonical/keystone:2024.1
upstream-source: ghcr.io/canonical/keystone:2024.1

View File

@ -1 +0,0 @@
../actions.yaml

View File

@ -1 +0,0 @@
../config.yaml

View File

@ -1,11 +1,64 @@
type: "charm"
bases:
- build-on:
- name: "ubuntu"
channel: "22.04"
run-on:
- name: "ubuntu"
channel: "22.04"
type: charm
title: Keystone LDAP integration
name: keystone-ldap-k8s
summary: Keystone Domain backend for LDAP or Active Directory
description: |
Keystone support the use of domain specific identity drivers,
allowing different types of authentication backend to be deployed in a single Keystone
deployment. This charm supports use of LDAP or Active Directory domain backends,
with configuration details provided by charm configuration options.
base: ubuntu@22.04
platforms:
amd64:
config:
options:
domain-name:
type: string
default: null
description: |
Name of the keystone domain to configure; defaults to the deployed
application name.
ldap-config-flags:
type: string
default: null
description: |
The are ~50 LDAP configuration options supported by keystone.
Use a json like string with double quotes
and braces around all the options and single quotes around complex values.
"{user_tree_dn: 'DC=dc1,DC=ad,DC=example,DC=com',
user_allow_create: False,
user_allow_delete: False}"
See the README for more details.
tls-ca-ldap:
type: string
default: null
description: |
This option controls which certificate (or a chain) will be used to connect
to an ldap server(s) over TLS. Certificate contents should be either used
directly or included via include-file://
An LDAP url should also be considered as ldaps and StartTLS are both valid
methods of using TLS (see RFC 4513) with StartTLS using a non-ldaps url which,
of course, still requires a CA certificate.
requires:
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
provides:
domain-config:
interface: keystone-domain-config
peers:
peers:
interface: keystone-dc-peer
parts:
update-certificates:
plugin: nil
@ -13,9 +66,9 @@ parts:
apt update
apt install -y ca-certificates
update-ca-certificates
charm:
after: [update-certificates]
after:
- update-certificates
build-packages:
- git
- libffi-dev

View File

@ -1,28 +0,0 @@
options:
domain-name:
type: string
default:
description: |
Name of the keystone domain to configure; defaults to the deployed
application name.
ldap-config-flags:
type: string
default:
description: |
The are ~50 LDAP configuration options supported by keystone.
Use a json like string with double quotes
and braces around all the options and single quotes around complex values.
"{user_tree_dn: 'DC=dc1,DC=ad,DC=example,DC=com',
user_allow_create: False,
user_allow_delete: False}"
See the README for more details.
tls-ca-ldap:
type: string
default: null
description: |
This option controls which certificate (or a chain) will be used to connect
to an ldap server(s) over TLS. Certificate contents should be either used
directly or included via include-file://
An LDAP url should also be considered as ldaps and StartTLS are both valid
methods of using TLS (see RFC 4513) with StartTLS using a non-ldaps url which,
of course, still requires a CA certificate.

View File

@ -1,22 +0,0 @@
name: keystone-ldap-k8s
display-name: Keystone LDAP integration
summary: Keystone Domain backend for LDAP or Active Directory
description: |
Keystone support the use of domain specific identity drivers,
allowing different types of authentication backend to be deployed in a single Keystone
deployment. This charm supports use of LDAP or Active Directory domain backends,
with configuration details provided by charm configuration options.
peers:
peers:
interface: keystone-dc-peer
provides:
domain-config:
interface: keystone-domain-config
requires:
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1

View File

@ -1 +0,0 @@
../config.yaml

View File

@ -1,2 +0,0 @@
# NOTE: no actions yet!
{ }

View File

@ -1,11 +1,86 @@
type: "charm"
bases:
- build-on:
- name: "ubuntu"
channel: "22.04"
run-on:
- name: "ubuntu"
channel: "22.04"
type: charm
name: magnum-k8s
summary: OpenStack magnum service
description: |
Magnum is an OpenStack project which offers container orchestration engines
for deploying and managing containers as first class resources in OpenStack.
assumes:
- k8s-api
- juju >= 3.1
base: ubuntu@22.04
platforms:
amd64:
config:
options:
cluster-user-trust:
type: boolean
default: false
description: |
Controls whether to assign a trust to the cluster user or not. You will
need to set it to True for clusters with volume_driver=cinder or
registry_enabled=true in the underlying cluster template to work. This is
a potential security risk since the trust gives instances OpenStack API
access to the cluster's project. Note that this setting does not affect
per-cluster trusts assigned to the Magnum service user.
debug:
default: false
description: Enable debug logging.
type: boolean
region:
default: RegionOne
description: Name of the OpenStack region
type: string
containers:
magnum-api:
resource: magnum-api-image
magnum-conductor:
resource: magnum-conductor-image
resources:
magnum-api-image:
type: oci-image
description: OCI image for OpenStack magnum
upstream-source: ghcr.io/canonical/magnum-consolidated:2024.1
magnum-conductor-image:
type: oci-image
description: OCI image for OpenStack magnum
upstream-source: ghcr.io/canonical/magnum-consolidated:2024.1
requires:
database:
interface: mysql_client
limit: 1
identity-service:
interface: keystone
identity-ops:
interface: keystone-resources
ingress-internal:
interface: ingress
optional: true
limit: 1
ingress-public:
interface: ingress
limit: 1
amqp:
interface: rabbitmq
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
peers:
peers:
interface: magnum-peer
parts:
charm:
build-packages:

View File

@ -1,19 +0,0 @@
options:
cluster-user-trust:
type: boolean
default: False
description: |
Controls whether to assign a trust to the cluster user or not. You will
need to set it to True for clusters with volume_driver=cinder or
registry_enabled=true in the underlying cluster template to work. This is
a potential security risk since the trust gives instances OpenStack API
access to the cluster's project. Note that this setting does not affect
per-cluster trusts assigned to the Magnum service user.
debug:
default: False
description: Enable debug logging.
type: boolean
region:
default: RegionOne
description: Name of the OpenStack region
type: string

View File

@ -1,67 +0,0 @@
name: magnum-k8s
summary: OpenStack magnum service
maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com>
description: |
Magnum is an OpenStack project which offers container orchestration engines
for deploying and managing containers as first class resources in OpenStack.
version: 3
bases:
- name: ubuntu
channel: 22.04/stable
assumes:
- k8s-api
- juju >= 3.1
tags:
- openstack
- container-orchestration
- misc
containers:
magnum-api:
resource: magnum-api-image
magnum-conductor:
resource: magnum-conductor-image
resources:
magnum-api-image:
type: oci-image
description: OCI image for OpenStack magnum
# ghcr.io/canonical/magnum-consolidated:2024.1
upstream-source: ghcr.io/canonical/magnum-consolidated:2024.1
magnum-conductor-image:
type: oci-image
description: OCI image for OpenStack magnum
# ghcr.io/canonical/magnum-consolidated:2024.1
upstream-source: ghcr.io/canonical/magnum-consolidated:2024.1
requires:
database:
interface: mysql_client
limit: 1
identity-service:
interface: keystone
identity-ops:
interface: keystone-resources
ingress-internal:
interface: ingress
optional: true
limit: 1
ingress-public:
interface: ingress
limit: 1
amqp:
interface: rabbitmq
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
peers:
peers:
interface: magnum-peer

View File

@ -1 +0,0 @@
../config.yaml

View File

@ -1,34 +1,6 @@
type: "charm"
bases:
- build-on:
- name: "ubuntu"
channel: "22.04"
run-on:
- name: "ubuntu"
channel: "22.04"
parts:
update-certificates:
plugin: nil
override-build: |
apt update
apt install -y ca-certificates
update-ca-certificates
charm:
after: [update-certificates]
build-packages:
- git
- libffi-dev
- libssl-dev
- rustc
- cargo
- pkg-config
charm-binary-python-packages:
- cryptography
- jsonschema
- pydantic
- jinja2
name: masakari-k8s
type: charm
title: OpenStack masakari service
name: masakari-k8s
summary: Masakari - Instances High Availability Service
description: |
Masakari provides Instances High Availability Service for OpenStack clouds by
@ -36,25 +8,52 @@ description: |
KVM-based Virtual Machine(VM)s from failure events such as VM process down,
provisioning process down, and nova-compute host failure. Masakari also
provides an API service to manage and control the automated rescue mechanism.
links:
source: https://opendev.org/openstack/sunbeam-charms
issues: https://bugs.launchpad.net/sunbeam-charms
assumes:
- k8s-api
- juju >= 3.5
links:
source: https://opendev.org/openstack/sunbeam-charms
issues: https://bugs.launchpad.net/sunbeam-charms
base: ubuntu@22.04
platforms:
amd64:
config:
options:
debug:
default: false
description: Enable debug logging.
type: boolean
evacuation-delay:
type: int
default: 60
description: |
Number of seconds to wait before evacuation after a service is
enabled or disabled.
evacuate-all-instances:
type: boolean
default: true
description: |
Whether to restrict instance evacuation to instances with ha enabled
in their metadata
region:
type: string
default: RegionOne
description: Name of the OpenStack region
containers:
masakari-api:
resource: masakari-image
masakari-engine:
resource: masakari-image
# Note(mylesjp): disabled until implemented
# host-monitor:
# resource: masakari-image
resources:
masakari-image:
description: OCI image for OpenStack Masakari services
type: oci-image
upstream-source: ghcr.io/canonical/masakari-consolidated:2024.1
requires:
amqp:
interface: rabbitmq
@ -84,41 +83,42 @@ requires:
interface: tracing
limit: 1
optional: true
# Note(mylesjp): consul disabled until charm is published
# consul-management:
# interface: consul-client
# limit: 1
# consul-tenant: # Name TBD
# interface: consul-client
# limit: 1
# optional: true
# consul-storage:
# interface: consul-client
# limit: 1
# optional: true
# Note(mylesjp): consul disabled until charm is published
# consul-management:
# interface: consul-client
# limit: 1
# consul-tenant: # Name TBD
# interface: consul-client
# limit: 1
# optional: true
# consul-storage:
# interface: consul-client
# limit: 1
# optional: true
peers:
peers:
interface: masakari-peer
config:
options:
debug:
default: False
description: Enable debug logging.
type: boolean
evacuation-delay:
type: int
default: 60
description: |
Number of seconds to wait before evacuation after a service is
enabled or disabled.
evacuate-all-instances:
type: boolean
default: True
description: |
Whether to restrict instance evacuation to instances with ha enabled
in their metadata
region:
type: string
default: RegionOne
description: Name of the OpenStack region
actions: {}
parts:
update-certificates:
plugin: nil
override-build: |
apt update
apt install -y ca-certificates
update-ca-certificates
charm:
after:
- update-certificates
build-packages:
- git
- libffi-dev
- libssl-dev
- rustc
- cargo
- pkg-config
charm-binary-python-packages:
- cryptography
- jsonschema
- pydantic
- jinja2

View File

@ -16,17 +16,8 @@
"""Tests for masakari-k8s charm."""
from pathlib import (
Path,
)
import charm
import ops_sunbeam.test_utils as test_utils
import yaml
charmcraft = (Path(__file__).parents[2] / "charmcraft.yaml").read_text()
config = yaml.dump(yaml.safe_load(charmcraft)["config"])
actions = yaml.dump(yaml.safe_load(charmcraft)["actions"])
class _MasakariOperatorCharm(charm.MasakariOperatorCharm):
@ -57,9 +48,6 @@ class TestMasakariOperatorCharm(test_utils.CharmTestCase):
self.harness = test_utils.get_harness(
_MasakariOperatorCharm,
container_calls=self.container_calls,
charm_metadata=charmcraft,
charm_config=config,
charm_actions=actions,
)
from charms.data_platform_libs.v0.data_interfaces import (

View File

@ -1,2 +0,0 @@
# NOTE: no actions yet!
{ }

View File

@ -1,11 +1,167 @@
type: "charm"
bases:
- build-on:
- name: "ubuntu"
channel: "22.04"
run-on:
- name: "ubuntu"
channel: "22.04"
type: charm
name: neutron-k8s
summary: OpenStack Networking API service
description: |
Neutron is a virtual network service for OpenStack, and a part of
Netstack. Just like OpenStack Nova provides an API to dynamically
request and configure virtual servers, Neutron provides an API to
dynamically request and configure virtual networks. These networks
connect "interfaces" from other OpenStack services (e.g., virtual NICs
from Nova VMs). The Neutron API supports extensions to provide
advanced network capabilities (e.g., QoS, ACLs, network monitoring,
etc.)
.
This charm provides the OpenStack Neutron API service.
assumes:
- k8s-api
- juju >= 3.1
links:
source:
- https://opendev.org/openstack/charm-neutron-k8s
issues:
- https://bugs.launchpad.net/charm-neutron-k8s
base: ubuntu@22.04
platforms:
amd64:
config:
options:
debug:
default: false
description: Enable debug logging.
type: boolean
dns-domain:
default: cloud.sunbeam.internal.
description: |
Specifies the dns domain name that should be used for building instance
hostnames. The value of 'openstacklocal' will cause
the dhcp agents to broadcast the default domain of openstacklocal and
will not enable internal cloud dns resolution. This value should end
with a '.', e.g. 'cloud.example.org.'.
type: string
region:
default: RegionOne
description: Name of the OpenStack region
type: string
vlan-ranges:
default: physnet1:1:4094
description: |
Space-delimited list of <physical_network>:<vlan_min>:<vlan_max> or
<physical_network> specifying physical_network names usable for VLAN
provider and tenant networks, as well as ranges of VLAN tags on each
available for allocation to tenant networks.
type: string
enable-igmp-snooping:
default: false
description: Enable IGMP snooping for integration bridge.
type: boolean
global-physnet-mtu:
type: int
default: 1500
description: |
MTU of the underlying physical network. Neutron uses this value to
calculate MTU for all virtual network components. For flat and
VLAN networks, neutron uses this value without modification. For
overlay networks such as Geneve, neutron automatically subtracts
the overlay protocol overhead from this value.
path-mtu:
type: int
default: 1500
description: |
Maximum size of an IP packet (MTU) that can traverse the
underlying physical network infrastructure without fragmentation
when using an overlay/tunnel protocol. This option allows
specifying a physical network MTU value that differs from the
default global-physnet-mtu value.
physical-network-mtus:
type: string
default: null
description: |
Space-delimited list of <physical_network>:<mtu> pairs specifying MTU for
individual physical networks.
.
Use this if a subset of your flat or VLAN provider networks have a MTU
that differ with what is set in global-physnet-mtu.
reverse-dns-lookup:
default: false
description: |
A boolean value specifying whether to enable or not the creation of
reverse lookup (PTR) records.
.
NOTE: Use only when integrating neutron-k8s charm to designate charm.
type: boolean
ipv4-ptr-zone-prefix-size:
default: 24
description: |
The size in bits of the prefix for the IPv4 reverse lookup (PTR) zones.
Valid size has to be multiple of 8, with maximum value of 24 and minimum
value of 8.
.
NOTE: Use only when "reverse-dns-lookup" option is set to "True".
type: int
ipv6-ptr-zone-prefix-size:
default: 64
description: |
The size in bits of the prefix for the IPv6 reverse lookup (PTR) zones.
Valid size has to be multiple of 4, with maximum value of 124 and minimum
value of 4.
.
NOTE: Use only when "reverse-dns-lookup" option is set to "True".
type: int
containers:
neutron-server:
resource: neutron-server-image
resources:
neutron-server-image:
type: oci-image
description: OCI image for OpenStack Neutron API
upstream-source: ghcr.io/canonical/neutron-server:2024.1
requires:
ingress-internal:
interface: ingress
optional: true
limit: 1
ingress-public:
interface: ingress
limit: 1
database:
interface: mysql_client
limit: 1
amqp:
interface: rabbitmq
identity-service:
interface: keystone
ovsdb-cms:
interface: ovsdb-cms
certificates:
interface: tls-certificates
optional: true
receive-ca-cert:
interface: certificate_transfer
optional: true
external-dns:
interface: designate
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
provides:
neutron-api:
interface: neutron-api
peers:
peers:
interface: neutron-peer
parts:
update-certificates:
plugin: nil
@ -13,9 +169,9 @@ parts:
apt update
apt install -y ca-certificates
update-ca-certificates
charm:
after: [update-certificates]
after:
- update-certificates
build-packages:
- git
- libffi-dev

View File

@ -1,83 +0,0 @@
options:
debug:
default: False
description: Enable debug logging.
type: boolean
dns-domain:
default: cloud.sunbeam.internal.
description: |
Specifies the dns domain name that should be used for building instance
hostnames. The value of 'openstacklocal' will cause
the dhcp agents to broadcast the default domain of openstacklocal and
will not enable internal cloud dns resolution. This value should end
with a '.', e.g. 'cloud.example.org.'.
type: string
region:
default: RegionOne
description: Name of the OpenStack region
type: string
vlan-ranges:
default: "physnet1:1:4094"
description: |
Space-delimited list of <physical_network>:<vlan_min>:<vlan_max> or
<physical_network> specifying physical_network names usable for VLAN
provider and tenant networks, as well as ranges of VLAN tags on each
available for allocation to tenant networks.
type: string
enable-igmp-snooping:
default: False
description: Enable IGMP snooping for integration bridge.
type: boolean
global-physnet-mtu:
type: int
default: 1500
description: |
MTU of the underlying physical network. Neutron uses this value to
calculate MTU for all virtual network components. For flat and
VLAN networks, neutron uses this value without modification. For
overlay networks such as Geneve, neutron automatically subtracts
the overlay protocol overhead from this value.
path-mtu:
type: int
default: 1500
description: |
Maximum size of an IP packet (MTU) that can traverse the
underlying physical network infrastructure without fragmentation
when using an overlay/tunnel protocol. This option allows
specifying a physical network MTU value that differs from the
default global-physnet-mtu value.
physical-network-mtus:
type: string
default:
description: |
Space-delimited list of <physical_network>:<mtu> pairs specifying MTU for
individual physical networks.
.
Use this if a subset of your flat or VLAN provider networks have a MTU
that differ with what is set in global-physnet-mtu.
reverse-dns-lookup:
default: False
description: |
A boolean value specifying whether to enable or not the creation of
reverse lookup (PTR) records.
.
NOTE: Use only when integrating neutron-k8s charm to designate charm.
type: boolean
ipv4-ptr-zone-prefix-size:
default: 24
description: |
The size in bits of the prefix for the IPv4 reverse lookup (PTR) zones.
Valid size has to be multiple of 8, with maximum value of 24 and minimum
value of 8.
.
NOTE: Use only when "reverse-dns-lookup" option is set to "True".
type: int
ipv6-ptr-zone-prefix-size:
default: 64
description: |
The size in bits of the prefix for the IPv6 reverse lookup (PTR) zones.
Valid size has to be multiple of 4, with maximum value of 124 and minimum
value of 4.
.
NOTE: Use only when "reverse-dns-lookup" option is set to "True".
type: int

View File

@ -1,76 +0,0 @@
name: neutron-k8s
summary: OpenStack Networking API service
maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com>
description: |
Neutron is a virtual network service for OpenStack, and a part of
Netstack. Just like OpenStack Nova provides an API to dynamically
request and configure virtual servers, Neutron provides an API to
dynamically request and configure virtual networks. These networks
connect "interfaces" from other OpenStack services (e.g., virtual NICs
from Nova VMs). The Neutron API supports extensions to provide
advanced network capabilities (e.g., QoS, ACLs, network monitoring,
etc.)
.
This charm provides the OpenStack Neutron API service.
tags:
- openstack
source: https://opendev.org/openstack/charm-neutron-k8s
issues: https://bugs.launchpad.net/charm-neutron-k8s
bases:
- name: ubuntu
channel: 22.04/stable
assumes:
- k8s-api
- juju >= 3.1
containers:
neutron-server:
resource: neutron-server-image
resources:
neutron-server-image:
type: oci-image
description: OCI image for OpenStack Neutron API
# ghcr.io/canonical/neutron-server:2024.1
upstream-source: ghcr.io/canonical/neutron-server:2024.1
provides:
neutron-api:
interface: neutron-api
requires:
ingress-internal:
interface: ingress
optional: true
limit: 1
ingress-public:
interface: ingress
limit: 1
database:
interface: mysql_client
limit: 1
amqp:
interface: rabbitmq
identity-service:
interface: keystone
ovsdb-cms:
interface: ovsdb-cms
certificates:
interface: tls-certificates
optional: true
receive-ca-cert:
interface: certificate_transfer
optional: true
external-dns:
interface: designate
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
peers:
peers:
interface: neutron-peer

View File

@ -1 +0,0 @@
../config.yaml

View File

@ -1,2 +0,0 @@
# NOTE: no actions yet!
{ }

View File

@ -1,11 +1,129 @@
type: "charm"
bases:
- build-on:
- name: "ubuntu"
channel: "22.04"
run-on:
- name: "ubuntu"
channel: "22.04"
type: charm
name: nova-k8s
summary: OpenStack Compute - Nova cloud controller service
description: |
OpenStack is a reliable cloud infrastructure. Its mission is to produce
the ubiquitous cloud computing platform that will meet the needs of public
and private cloud providers regardless of size, by being simple to implement
and massively scalable.
.
OpenStack Compute, codenamed Nova, is a cloud computing fabric controller. In
addition to its "native" API (the OpenStack API), it also supports the Amazon
EC2 API.
.
This charm provides the cloud controller service for OpenStack Nova and includes
nova-scheduler, nova-api and nova-conductor services.
assumes:
- k8s-api
- juju >= 3.1
links:
source:
- https://opendev.org/openstack/charm-nova-k8s
issues:
- https://bugs.launchpad.net/charm-nova-k8s
base: ubuntu@22.04
platforms:
amd64:
config:
options:
debug:
default: false
description: Enable debug logging.
type: boolean
region:
default: RegionOne
description: Name of the OpenStack region
type: string
containers:
nova-api:
resource: nova-api-image
nova-scheduler:
resource: nova-scheduler-image
nova-conductor:
resource: nova-conductor-image
nova-spiceproxy:
resource: nova-spiceproxy-image
resources:
nova-api-image:
type: oci-image
description: OCI image for OpenStack Nova API
upstream-source: ghcr.io/canonical/nova-consolidated:2024.1
nova-scheduler-image:
type: oci-image
description: OCI image for OpenStack Nova Scheduler
upstream-source: ghcr.io/canonical/nova-consolidated:2024.1
nova-conductor-image:
type: oci-image
description: OCI image for OpenStack Nova Conductor
upstream-source: ghcr.io/canonical/nova-consolidated:2024.1
nova-spiceproxy-image:
type: oci-image
description: OCI image for OpenStack Nova Spice proxy
upstream-source: ghcr.io/canonical/nova-consolidated:2024.1
requires:
ingress-internal:
interface: ingress
optional: true
limit: 1
ingress-public:
interface: ingress
limit: 1
traefik-route-internal:
interface: traefik_route
optional: true
limit: 1
traefik-route-public:
interface: traefik_route
limit: 1
database:
interface: mysql_client
limit: 1
api-database:
interface: mysql_client
limit: 1
cell-database:
interface: mysql_client
limit: 1
amqp:
interface: rabbitmq
image-service:
interface: glance
identity-service:
interface: keystone
cloud-compute:
interface: nova-compute
cinder-volume-service:
interface: cinder
neutron-network-service:
interface: neutron
neutron-api:
interface: neutron-api
placement:
interface: placement
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
provides:
nova-service:
interface: nova
peers:
peers:
interface: nova-peer
parts:
update-certificates:
plugin: nil
@ -13,9 +131,9 @@ parts:
apt update
apt install -y ca-certificates
update-ca-certificates
charm:
after: [update-certificates]
after:
- update-certificates
build-packages:
- git
- libffi-dev

View File

@ -1,9 +0,0 @@
options:
debug:
default: False
description: Enable debug logging.
type: boolean
region:
default: RegionOne
description: Name of the OpenStack region
type: string

View File

@ -1,113 +0,0 @@
name: nova-k8s
summary: OpenStack Compute - Nova cloud controller service
maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com>
description: |
OpenStack is a reliable cloud infrastructure. Its mission is to produce
the ubiquitous cloud computing platform that will meet the needs of public
and private cloud providers regardless of size, by being simple to implement
and massively scalable.
.
OpenStack Compute, codenamed Nova, is a cloud computing fabric controller. In
addition to its "native" API (the OpenStack API), it also supports the Amazon
EC2 API.
.
This charm provides the cloud controller service for OpenStack Nova and includes
nova-scheduler, nova-api and nova-conductor services.
version: 3
bases:
- name: ubuntu
channel: 22.04/stable
assumes:
- k8s-api
- juju >= 3.1
tags:
- openstack
source: https://opendev.org/openstack/charm-nova-k8s
issues: https://bugs.launchpad.net/charm-nova-k8s
containers:
nova-api:
resource: nova-api-image
nova-scheduler:
resource: nova-scheduler-image
nova-conductor:
resource: nova-conductor-image
nova-spiceproxy:
resource: nova-spiceproxy-image
resources:
nova-api-image:
type: oci-image
description: OCI image for OpenStack Nova API
upstream-source: ghcr.io/canonical/nova-consolidated:2024.1
nova-scheduler-image:
type: oci-image
description: OCI image for OpenStack Nova Scheduler
upstream-source: ghcr.io/canonical/nova-consolidated:2024.1
nova-conductor-image:
type: oci-image
description: OCI image for OpenStack Nova Conductor
upstream-source: ghcr.io/canonical/nova-consolidated:2024.1
nova-spiceproxy-image:
type: oci-image
description: OCI image for OpenStack Nova Spice proxy
upstream-source: ghcr.io/canonical/nova-consolidated:2024.1
requires:
ingress-internal:
interface: ingress
optional: true
limit: 1
ingress-public:
interface: ingress
limit: 1
traefik-route-internal:
interface: traefik_route
optional: true
limit: 1
traefik-route-public:
interface: traefik_route
limit: 1
database:
interface: mysql_client
limit: 1
api-database:
interface: mysql_client
limit: 1
cell-database:
interface: mysql_client
limit: 1
amqp:
interface: rabbitmq
image-service:
interface: glance
identity-service:
interface: keystone
cloud-compute:
interface: nova-compute
cinder-volume-service:
interface: cinder
neutron-network-service:
interface: neutron
neutron-api:
interface: neutron-api
placement:
interface: placement
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
provides:
nova-service:
interface: nova
peers:
peers:
interface: nova-peer

View File

@ -1 +0,0 @@
../config.yaml

View File

@ -1,2 +0,0 @@
# NOTE: no actions yet!
{ }

View File

@ -1,11 +1,105 @@
type: "charm"
bases:
- build-on:
- name: "ubuntu"
channel: "22.04"
run-on:
- name: "ubuntu"
channel: "22.04"
type: charm
name: octavia-k8s
summary: OpenStack Octavia service
description: |
OpenStack Octavia provides loadbalancing service for an OpenStack cloud.
Currently OVN Octavia provider driver is supported.
assumes:
- k8s-api
- juju >= 3.1
links:
source:
- https://opendev.org/openstack/charm-octavia-k8s
issues:
- https://bugs.launchpad.net/charm-octavia-k8s
base: ubuntu@22.04
platforms:
amd64:
config:
options:
debug:
default: false
description: Enable debug logging.
type: boolean
region:
default: RegionOne
description: Name of the OpenStack region
type: string
containers:
octavia-api:
resource: octavia-api-image
mounts:
- storage: agent-sockets
location: /var/run/octavia/
octavia-driver-agent:
resource: octavia-driver-agent-image
mounts:
- storage: agent-sockets
location: /var/run/octavia/
octavia-housekeeping:
resource: octavia-housekeeping-image
resources:
octavia-api-image:
type: oci-image
description: OCI image for OpenStack octavia
upstream-source: ghcr.io/canonical/octavia-consolidated:2024.1
octavia-driver-agent-image:
type: oci-image
description: OCI image for OpenStack Octavia Driver Agent
upstream-source: ghcr.io/canonical/octavia-consolidated:2024.1
octavia-housekeeping-image:
type: oci-image
description: OCI image for OpenStack Octavia Housekeeping
upstream-source: ghcr.io/canonical/octavia-consolidated:2024.1
storage:
agent-sockets:
type: filesystem
description: |
Storage for the location of agent sockets shared between octavia-api
and octavia-driver-agent
minimum-size: 100M
requires:
database:
interface: mysql_client
limit: 1
identity-service:
interface: keystone
ingress-internal:
interface: ingress
optional: true
limit: 1
ingress-public:
interface: ingress
limit: 1
ovsdb-cms:
interface: ovsdb-cms
certificates:
interface: tls-certificates
optional: true
identity-ops:
interface: keystone-resources
optional: true
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
peers:
peers:
interface: octavia-peer
parts:
update-certificates:
plugin: nil
@ -13,9 +107,9 @@ parts:
apt update
apt install -y ca-certificates
update-ca-certificates
charm:
after: [update-certificates]
after:
- update-certificates
build-packages:
- git
- libffi-dev

View File

@ -1,9 +0,0 @@
options:
debug:
default: False
description: Enable debug logging.
type: boolean
region:
default: RegionOne
description: Name of the OpenStack region
type: string

View File

@ -1,92 +0,0 @@
name: octavia-k8s
summary: OpenStack Octavia service
maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com>
description: |
OpenStack Octavia provides loadbalancing service for an OpenStack cloud.
Currently OVN Octavia provider driver is supported.
version: 3
bases:
- name: ubuntu
channel: 22.04/stable
assumes:
- k8s-api
- juju >= 3.1
tags:
- openstack
source: https://opendev.org/openstack/charm-octavia-k8s
issues: https://bugs.launchpad.net/charm-octavia-k8s
storage:
agent-sockets:
type: filesystem
description: |
Storage for the location of agent sockets shared between octavia-api
and octavia-driver-agent
minimum-size: 100M
containers:
octavia-api:
resource: octavia-api-image
mounts:
- storage: agent-sockets
location: /var/run/octavia/
octavia-driver-agent:
resource: octavia-driver-agent-image
mounts:
- storage: agent-sockets
location: /var/run/octavia/
octavia-housekeeping:
resource: octavia-housekeeping-image
resources:
octavia-api-image:
type: oci-image
description: OCI image for OpenStack octavia
# ghcr.io/canonical/octavia-api:2024.1
upstream-source: ghcr.io/canonical/octavia-consolidated:2024.1
octavia-driver-agent-image:
type: oci-image
description: OCI image for OpenStack Octavia Driver Agent
# ghcr.io/canonical/octavia-driver-agent:2024.1
upstream-source: ghcr.io/canonical/octavia-consolidated:2024.1
octavia-housekeeping-image:
type: oci-image
description: OCI image for OpenStack Octavia Housekeeping
# ghcr.io/canonical/octavia-housekeeping:2024.1
upstream-source: ghcr.io/canonical/octavia-consolidated:2024.1
requires:
database:
interface: mysql_client
limit: 1
identity-service:
interface: keystone
ingress-internal:
interface: ingress
optional: true
limit: 1
ingress-public:
interface: ingress
limit: 1
ovsdb-cms:
interface: ovsdb-cms
certificates:
interface: tls-certificates
optional: true
identity-ops:
interface: keystone-resources
optional: true
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
peers:
peers:
interface: octavia-peer

View File

@ -1 +0,0 @@
../config.yaml

View File

@ -1,2 +0,0 @@
# NOTE: no actions yet!
{ }

View File

@ -1,11 +1,62 @@
type: "charm"
bases:
- build-on:
- name: "ubuntu"
channel: "22.04"
run-on:
- name: "ubuntu"
channel: "22.04"
type: charm
name: openstack-exporter-k8s
summary: OpenStack openstack-exporter service
description: |
OpenStack openstack-exporter provides endpoint metrics for OpenStack services.
assumes:
- k8s-api
- juju >= 3.1
links:
source:
- https://opendev.org/openstack/charm-openstack-exporter-k8s
issues:
- https://bugs.launchpad.net/charm-openstack-exporter-k8s
base: ubuntu@22.04
platforms:
amd64:
config:
options:
region:
default: RegionOne
description: Name of the OpenStack region
type: string
containers:
openstack-exporter:
resource: openstack-exporter-image
resources:
openstack-exporter-image:
type: oci-image
description: OCI image for OpenStack openstack-exporter
upstream-source: ghcr.io/canonical/openstack-exporter:1.7.0
requires:
identity-ops:
interface: keystone-resources
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
provides:
metrics-endpoint:
interface: prometheus_scrape
grafana-dashboard:
interface: grafana_dashboard
peers:
peers:
interface: openstack-exporter-peer
parts:
update-certificates:
plugin: nil
@ -13,9 +64,9 @@ parts:
apt update
apt install -y ca-certificates
update-ca-certificates
charm:
after: [update-certificates]
after:
- update-certificates
build-packages:
- git
- libffi-dev

View File

@ -1,5 +0,0 @@
options:
region:
default: RegionOne
description: Name of the OpenStack region
type: string

View File

@ -1,50 +0,0 @@
name: openstack-exporter-k8s
summary: OpenStack openstack-exporter service
maintainer: OpenStack Charmers <openstack-charmers@lists.ubuntu.com>
description: |
OpenStack openstack-exporter provides endpoint metrics for OpenStack services.
version: 3
bases:
- name: ubuntu
channel: 22.04/stable
assumes:
- k8s-api
- juju >= 3.1
tags:
- openstack
source: https://opendev.org/openstack/charm-openstack-exporter-k8s
issues: https://bugs.launchpad.net/charm-openstack-exporter-k8s
containers:
openstack-exporter:
resource: openstack-exporter-image
resources:
openstack-exporter-image:
type: oci-image
description: OCI image for OpenStack openstack-exporter
upstream-source: ghcr.io/canonical/openstack-exporter:1.7.0
requires:
identity-ops:
interface: keystone-resources
receive-ca-cert:
interface: certificate_transfer
optional: true
logging:
interface: loki_push_api
optional: true
tracing:
interface: tracing
optional: true
limit: 1
provides:
metrics-endpoint:
interface: prometheus_scrape
grafana-dashboard:
interface: grafana_dashboard
peers:
peers:
interface: openstack-exporter-peer

View File

@ -1 +0,0 @@
../config.yaml

View File

@ -1,15 +0,0 @@
set-hypervisor-local-settings:
description: |
Apply settings specific to this hypervisor unit
params:
external-nic:
type: string
description: NIC that hypervisor will configure for North/South traffic
spice-proxy-address:
type: string
description: IP address to use for configuration of SPICE consoles in instances.
ip-address:
type: string
description: IP address to use for service configuration
additionalProperties: false

View File

@ -1,14 +1,97 @@
# This file configures Charmcraft.
# See https://juju.is/docs/sdk/charmcraft-config for guidance.
type: charm
bases:
- build-on:
- name: ubuntu
channel: "22.04"
run-on:
- name: ubuntu
channel: "22.04"
title: OpenStack Hypervisor
name: openstack-hypervisor
summary: Deploy the OpenStack hypervisor
description: |
Configure machine to run VMs as part of an OpenStack cloud.
assumes:
- juju >= 3.1
base: ubuntu@22.04
platforms:
amd64:
config:
options:
snap-channel:
default: 2024.1/edge
type: string
debug:
default: false
type: boolean
resume-on-boot:
default: true
description: |
Whether to resume the guest VMs when the host boots.
type: boolean
dns-servers:
default: 8.8.8.8
type: string
external-bridge:
default: br-ex
type: string
external-bridge-address:
default: 10.20.20.1/24
type: string
physnet-name:
default: physnet1
type: string
use-migration-binding:
default: false
type: boolean
use-data-binding:
default: false
type: boolean
actions:
set-hypervisor-local-settings:
description: |
Apply settings specific to this hypervisor unit
params:
external-nic:
type: string
description: NIC that hypervisor will configure for North/South traffic
spice-proxy-address:
type: string
description: IP address to use for configuration of SPICE consoles in instances.
ip-address:
type: string
description: IP address to use for service configuration
additionalProperties: false
requires:
amqp:
interface: rabbitmq
identity-credentials:
interface: keystone-credentials
ovsdb-cms:
interface: ovsdb-cms
certificates:
interface: tls-certificates
optional: true
ceilometer-service:
interface: ceilometer
optional: true
ceph-access:
interface: cinder-ceph-key
optional: true
receive-ca-cert:
interface: certificate_transfer
optional: true
nova-service:
interface: nova
tracing:
interface: tracing
optional: true
limit: 1
provides:
cos-agent:
interface: cos_agent
extra-bindings:
migration: null
data: null
parts:
charm:

View File

@ -1,30 +0,0 @@
options:
snap-channel:
default: "2024.1/edge"
type: string
debug:
default: False
type: boolean
resume-on-boot:
default: True
description: |
Whether to resume the guest VMs when the host boots.
type: boolean
dns-servers:
default: "8.8.8.8"
type: string
external-bridge:
default: "br-ex"
type: string
external-bridge-address:
default: "10.20.20.1/24"
type: string
physnet-name:
default: "physnet1"
type: string
use-migration-binding:
default: False
type: boolean
use-data-binding:
default: False
type: boolean

View File

@ -1,48 +0,0 @@
name: openstack-hypervisor
display-name: OpenStack Hypervisor
summary: Deploy the OpenStack hypervisor
description: |
Configure machine to run VMs as part of an OpenStack cloud.
requires:
amqp:
interface: rabbitmq
identity-credentials:
interface: keystone-credentials
ovsdb-cms:
interface: ovsdb-cms
certificates:
interface: tls-certificates
optional: true
ceilometer-service:
interface: ceilometer
optional: true
ceph-access:
interface: cinder-ceph-key
optional: true
receive-ca-cert:
interface: certificate_transfer
optional: true
nova-service:
interface: nova
tracing:
interface: tracing
optional: true
limit: 1
provides:
cos-agent:
interface: cos_agent
extra-bindings:
migration:
data: # this binding will be used to support project networking between hypervisors
# This charm has no peer relation by design. This charm needs to scale to
# hundreds of units and this is limited by the peer relation.
assumes:
- juju >= 3.1

View File

@ -1 +0,0 @@
../actions.yaml

View File

@ -1 +0,0 @@
../../config.yaml

View File

@ -40,12 +40,9 @@ class TestCharm(test_utils.CharmTestCase):
def setUp(self):
"""Setup OpenStack Hypervisor tests."""
super().setUp(charm, self.PATCHES)
with open("config.yaml", "r") as f:
config_data = f.read()
self.harness = test_utils.get_harness(
_HypervisorOperatorCharm,
container_calls=self.container_calls,
charm_config=config_data,
)
self.addCleanup(self.harness.cleanup)

View File

@ -1,40 +1,18 @@
name: openstack-images-sync-k8s
type: charm
title: OpenStack Images Sync K8S
name: openstack-images-sync-k8s
summary: Keep OpenStack images in sync with the latest versions
description: |
Openstack Images Sync operator allows synchronization from a SimpleStreams source to an OpenStack cloud.
bases:
- build-on:
- name: ubuntu
channel: "22.04"
run-on:
- name: ubuntu
channel: "22.04"
parts:
charm:
build-packages:
- git
- libffi-dev
- libssl-dev
- pkg-config
- rustc
- cargo
charm-binary-python-packages:
- cryptography
- jsonschema
- jinja2
base: ubuntu@22.04
platforms:
amd64:
config:
options:
debug:
default: False
default: false
type: boolean
frequency:
default: hourly
@ -94,3 +72,17 @@ requires:
interface: tracing
optional: true
limit: 1
parts:
charm:
build-packages:
- git
- libffi-dev
- libssl-dev
- pkg-config
- rustc
- cargo
charm-binary-python-packages:
- cryptography
- jsonschema
- jinja2

Some files were not shown because too many files have changed in this diff Show More