4931ad53b9
Update charm base to ubuntu@24.04 for all k8s based charms. Deploying k8s charm is abstracted from the underlying host base, therefore we can already deploy next generation based charms. Change-Id: If8e8960a58a39f9978aaeec26ed4df4a2b690396 Signed-off-by: Guillaume Boutry <guillaume.boutry@canonical.com>
351 lines
12 KiB
YAML
351 lines
12 KiB
YAML
type: charm
|
|
name: cinder-ceph-k8s
|
|
summary: OpenStack volume service - Ceph backend
|
|
description: |
|
|
Cinder is the OpenStack project that provides volume management for
|
|
instances. This charm provides integration with Ceph storage
|
|
backends.
|
|
assumes:
|
|
- k8s-api
|
|
- juju >= 3.1
|
|
links:
|
|
source:
|
|
- https://opendev.org/openstack/charm-cinder-ceph-k8s
|
|
issues:
|
|
- https://bugs.launchpad.net/charm-cinder-ceph-k8s
|
|
|
|
base: ubuntu@24.04
|
|
platforms:
|
|
amd64:
|
|
|
|
config:
|
|
options:
|
|
debug:
|
|
type: boolean
|
|
default: false
|
|
description: Enable debug logging.
|
|
use-syslog:
|
|
type: boolean
|
|
default: true
|
|
description: |
|
|
Setting this to False will disable logging to syslog (the default).
|
|
ceph-osd-replication-count:
|
|
default: 3
|
|
type: int
|
|
description: |
|
|
This value dictates the number of replicas ceph must make of any
|
|
object it stores within the cinder rbd pool. Of course, this only
|
|
applies if using Ceph as a backend store. Note that once the cinder
|
|
rbd pool has been created, changing this value will not have any
|
|
effect (although it can be changed in ceph by manually configuring
|
|
your ceph cluster).
|
|
ceph-pool-weight:
|
|
type: int
|
|
default: 40
|
|
description: |
|
|
Defines a relative weighting of the pool as a percentage of the total
|
|
amount of data in the Ceph cluster. This effectively weights the number
|
|
of placement groups for the pool created to be appropriately portioned
|
|
to the amount of data expected. For example, if the ephemeral volumes
|
|
for the OpenStack compute instances are expected to take up 20% of the
|
|
overall configuration then this value would be specified as 20. Note -
|
|
it is important to choose an appropriate value for the pool weight as
|
|
this directly affects the number of placement groups which will be
|
|
created for the pool. The number of placement groups for a pool can
|
|
only be increased, never decreased - so it is important to identify the
|
|
percent of data that will likely reside in the pool.
|
|
volume-backend-name:
|
|
default: null
|
|
type: string
|
|
description: |
|
|
Volume backend name for the backend. The default value is the
|
|
application name in the Juju model, e.g. "cinder-ceph-mybackend"
|
|
if it's deployed as `juju deploy cinder-ceph cinder-ceph-mybackend`.
|
|
A common backend name can be set to multiple backends with the
|
|
same characters so that those can be treated as a single virtual
|
|
backend associated with a single volume type.
|
|
backend-availability-zone:
|
|
default: null
|
|
type: string
|
|
description: |
|
|
Availability zone name of this volume backend. If set, it will
|
|
override the default availability zone. Supported for Pike or
|
|
newer releases.
|
|
restrict-ceph-pools:
|
|
default: false
|
|
type: boolean
|
|
description: |
|
|
Optionally restrict Ceph key permissions to access pools as required.
|
|
rbd-pool-name:
|
|
default: null
|
|
type: string
|
|
description: |
|
|
Optionally specify an existing rbd pool that cinder should map to.
|
|
rbd-flatten-volume-from-snapshot:
|
|
default: false
|
|
type: boolean
|
|
description: |
|
|
Flatten volumes created from snapshots to remove dependency from
|
|
volume to snapshot.
|
|
rbd-mirroring-mode:
|
|
type: string
|
|
default: pool
|
|
description: |
|
|
The RBD mirroring mode used for the Ceph pool. This option is only used
|
|
with 'replicated' pool type, as it's not supported for 'erasure-coded'
|
|
pool type - valid values: 'pool' and 'image'
|
|
pool-type:
|
|
type: string
|
|
default: replicated
|
|
description: |
|
|
Ceph pool type to use for storage - valid values include `replicated`
|
|
and `erasure-coded`.
|
|
ec-profile-name:
|
|
type: string
|
|
default: null
|
|
description: |
|
|
Name for the EC profile to be created for the EC pools. If not defined
|
|
a profile name will be generated based on the name of the pool used by
|
|
the application.
|
|
ec-rbd-metadata-pool:
|
|
type: string
|
|
default: null
|
|
description: |
|
|
Name of the metadata pool to be created (for RBD use-cases). If not
|
|
defined a metadata pool name will be generated based on the name of
|
|
the data pool used by the application. The metadata pool is always
|
|
replicated, not erasure coded.
|
|
ec-profile-k:
|
|
type: int
|
|
default: 1
|
|
description: |
|
|
Number of data chunks that will be used for EC data pool. K+M factors
|
|
should never be greater than the number of available zones (or hosts)
|
|
for balancing.
|
|
ec-profile-m:
|
|
type: int
|
|
default: 2
|
|
description: |
|
|
Number of coding chunks that will be used for EC data pool. K+M factors
|
|
should never be greater than the number of available zones (or hosts)
|
|
for balancing.
|
|
ec-profile-locality:
|
|
type: int
|
|
default: null
|
|
description: |
|
|
(lrc plugin - l) Group the coding and data chunks into sets of size l.
|
|
For instance, for k=4 and m=2, when l=3 two groups of three are created.
|
|
Each set can be recovered without reading chunks from another set. Note
|
|
that using the lrc plugin does incur more raw storage usage than isa or
|
|
jerasure in order to reduce the cost of recovery operations.
|
|
ec-profile-crush-locality:
|
|
type: string
|
|
default: null
|
|
description: |
|
|
(lrc plugin) The type of the crush bucket in which each set of chunks
|
|
defined by l will be stored. For instance, if it is set to rack, each
|
|
group of l chunks will be placed in a different rack. It is used to
|
|
create a CRUSH rule step such as step choose rack. If it is not set,
|
|
no such grouping is done.
|
|
ec-profile-durability-estimator:
|
|
type: int
|
|
default: null
|
|
description: |
|
|
(shec plugin - c) The number of parity chunks each of which includes
|
|
each data chunk in its calculation range. The number is used as a
|
|
durability estimator. For instance, if c=2, 2 OSDs can be down
|
|
without losing data.
|
|
ec-profile-helper-chunks:
|
|
type: int
|
|
default: null
|
|
description: |
|
|
(clay plugin - d) Number of OSDs requested to send data during
|
|
recovery of a single chunk. d needs to be chosen such that
|
|
k+1 <= d <= k+m-1. Larger the d, the better the savings.
|
|
ec-profile-scalar-mds:
|
|
type: string
|
|
default: null
|
|
description: |
|
|
(clay plugin) specifies the plugin that is used as a building
|
|
block in the layered construction. It can be one of jerasure,
|
|
isa, shec (defaults to jerasure).
|
|
ec-profile-plugin:
|
|
type: string
|
|
default: jerasure
|
|
description: |
|
|
EC plugin to use for this applications pool. The following list of
|
|
plugins acceptable - jerasure, lrc, isa, shec, clay.
|
|
ec-profile-technique:
|
|
type: string
|
|
default: null
|
|
description: |
|
|
EC profile technique used for this applications pool - will be
|
|
validated based on the plugin configured via ec-profile-plugin.
|
|
Supported techniques are `reed_sol_van`, `reed_sol_r6_op`,
|
|
`cauchy_orig`, `cauchy_good`, `liber8tion` for jerasure,
|
|
`reed_sol_van`, `cauchy` for isa and `single`, `multiple`
|
|
for shec.
|
|
ec-profile-device-class:
|
|
type: string
|
|
default: null
|
|
description: |
|
|
Device class from CRUSH map to use for placement groups for
|
|
erasure profile - valid values: ssd, hdd or nvme (or leave
|
|
unset to not use a device class).
|
|
bluestore-compression-algorithm:
|
|
type: string
|
|
default: null
|
|
description: |
|
|
Compressor to use (if any) for pools requested by this charm.
|
|
.
|
|
NOTE: The ceph-osd charm sets a global default for this value (defaults
|
|
to 'lz4' unless configured by the end user) which will be used unless
|
|
specified for individual pools.
|
|
bluestore-compression-mode:
|
|
type: string
|
|
default: null
|
|
description: |
|
|
Policy for using compression on pools requested by this charm.
|
|
.
|
|
'none' means never use compression.
|
|
'passive' means use compression when clients hint that data is
|
|
compressible.
|
|
'aggressive' means use compression unless clients hint that
|
|
data is not compressible.
|
|
'force' means use compression under all circumstances even if the clients
|
|
hint that the data is not compressible.
|
|
bluestore-compression-required-ratio:
|
|
type: float
|
|
default: null
|
|
description: |
|
|
The ratio of the size of the data chunk after compression relative to the
|
|
original size must be at least this small in order to store the
|
|
compressed version on pools requested by this charm.
|
|
bluestore-compression-min-blob-size:
|
|
type: int
|
|
default: null
|
|
description: |
|
|
Chunks smaller than this are never compressed on pools requested by
|
|
this charm.
|
|
bluestore-compression-min-blob-size-hdd:
|
|
type: int
|
|
default: null
|
|
description: |
|
|
Value of bluestore compression min blob size for rotational media on
|
|
pools requested by this charm.
|
|
bluestore-compression-min-blob-size-ssd:
|
|
type: int
|
|
default: null
|
|
description: |
|
|
Value of bluestore compression min blob size for solid state media on
|
|
pools requested by this charm.
|
|
bluestore-compression-max-blob-size:
|
|
type: int
|
|
default: null
|
|
description: |
|
|
Chunks larger than this are broken into smaller blobs sizing bluestore
|
|
compression max blob size before being compressed on pools requested by
|
|
this charm.
|
|
bluestore-compression-max-blob-size-hdd:
|
|
type: int
|
|
default: null
|
|
description: |
|
|
Value of bluestore compression max blob size for rotational media on
|
|
pools requested by this charm.
|
|
bluestore-compression-max-blob-size-ssd:
|
|
type: int
|
|
default: null
|
|
description: |
|
|
Value of bluestore compression max blob size for solid state media on
|
|
pools requested by this charm.
|
|
rabbit-user:
|
|
type: string
|
|
default: null
|
|
description: Username to request access on rabbitmq-server.
|
|
rabbit-vhost:
|
|
type: string
|
|
default: null
|
|
description: RabbitMQ virtual host to request access on rabbitmq-server.
|
|
enable-telemetry-notifications:
|
|
type: boolean
|
|
default: false
|
|
description: Enable notifications to send to telemetry.
|
|
image-volume-cache-enabled:
|
|
type: boolean
|
|
default: false
|
|
description: |
|
|
Enable the image volume cache.
|
|
image-volume-cache-max-size-gb:
|
|
type: int
|
|
default: 0
|
|
description: |
|
|
Max size of the image volume cache in GB. 0 means unlimited.
|
|
image-volume-cache-max-count:
|
|
type: int
|
|
default: 0
|
|
description: |
|
|
Max number of entries allowed in the image volume cache. 0 means
|
|
unlimited.
|
|
|
|
containers:
|
|
cinder-volume:
|
|
resource: cinder-volume-image
|
|
|
|
resources:
|
|
cinder-volume-image:
|
|
type: oci-image
|
|
description: OCI image for OpenStack Cinder Volume
|
|
upstream-source: ghcr.io/canonical/cinder-consolidated:2024.1
|
|
|
|
requires:
|
|
amqp:
|
|
interface: rabbitmq
|
|
ceph:
|
|
interface: ceph-client
|
|
database:
|
|
interface: mysql_client
|
|
limit: 1
|
|
identity-credentials:
|
|
interface: keystone-credentials
|
|
optional: true
|
|
logging:
|
|
interface: loki_push_api
|
|
optional: true
|
|
tracing:
|
|
interface: tracing
|
|
optional: true
|
|
limit: 1
|
|
|
|
provides:
|
|
ceph-access:
|
|
interface: cinder-ceph-key
|
|
storage-backend:
|
|
interface: cinder-backend
|
|
|
|
peers:
|
|
peers:
|
|
interface: cinder-peer
|
|
|
|
parts:
|
|
update-certificates:
|
|
plugin: nil
|
|
override-build: |
|
|
apt update
|
|
apt install -y ca-certificates
|
|
update-ca-certificates
|
|
charm:
|
|
after:
|
|
- update-certificates
|
|
build-packages:
|
|
- git
|
|
- libffi-dev
|
|
- libssl-dev
|
|
- pkg-config
|
|
- rustc
|
|
- cargo
|
|
charm-binary-python-packages:
|
|
- cryptography
|
|
- jsonschema
|
|
- pydantic
|
|
- jinja2
|