Add juju storage support to the charm
This feature adds juju storage support to the charm. This is largely to deal with the related bugs, but also makes the charm more useful. Release-Note: https://review.opendev.org/#/c/718410/ Related-Bug: #1801349 Change-Id: I056967f4e83f4c99a4aac1090056dd4f320c03f5
This commit is contained in:
parent
01be476c20
commit
7a98ea3daa
20
README.md
20
README.md
@ -70,6 +70,26 @@ Then add a relation from this charm to both Cinder and Ceph:
|
||||
juju add-relation cinder-ceph:storage-backend cinder:storage-backend
|
||||
juju add-relation cinder-ceph:ceph ceph-mon:client
|
||||
|
||||
### Juju storage
|
||||
|
||||
Juju storage can also be used to add block devices to cinder. This uses LVM on
|
||||
a block device that is presented to the cinder unit(s) locally, but can be from
|
||||
any storage pool that Juju supports. This is only recommended for small scale
|
||||
deployments. Storage backed by Ceph should be used for larger deployments.
|
||||
|
||||
The cinder.yaml can be configured as:
|
||||
|
||||
```yaml
|
||||
cinder:
|
||||
options:
|
||||
block-device: None
|
||||
storage:
|
||||
block-devices: 'cinder,40G'
|
||||
```
|
||||
|
||||
Setting `cinder.options.block-device = None` disables the local block device so
|
||||
that Cinder will only be configured with the Juju storage device.
|
||||
|
||||
## High availability
|
||||
|
||||
This charm supports high availability. There are two mutually exclusive
|
||||
|
1
hooks/block-devices-storage-attached
Symbolic link
1
hooks/block-devices-storage-attached
Symbolic link
@ -0,0 +1 @@
|
||||
storage.bootstrap
|
1
hooks/block-devices-storage-detached
Symbolic link
1
hooks/block-devices-storage-detached
Symbolic link
@ -0,0 +1 @@
|
||||
storage.bootstrap
|
@ -63,19 +63,21 @@ from cinder_utils import (
|
||||
from cinder_contexts import ceph_config_file
|
||||
|
||||
from charmhelpers.core.hookenv import (
|
||||
Hooks,
|
||||
UnregisteredHookError,
|
||||
config,
|
||||
DEBUG,
|
||||
Hooks,
|
||||
local_unit,
|
||||
log,
|
||||
open_port,
|
||||
related_units,
|
||||
relation_get,
|
||||
relation_ids,
|
||||
relation_set,
|
||||
related_units,
|
||||
service_name,
|
||||
log,
|
||||
DEBUG,
|
||||
status_set,
|
||||
open_port,
|
||||
storage_get,
|
||||
storage_list,
|
||||
UnregisteredHookError,
|
||||
)
|
||||
|
||||
from charmhelpers.fetch import (
|
||||
@ -197,15 +199,8 @@ def config_changed():
|
||||
if e_mountpoint and filesystem_mounted(e_mountpoint):
|
||||
umount(e_mountpoint)
|
||||
|
||||
if (service_enabled('volume') and
|
||||
conf['block-device'] not in [None, 'None', 'none']):
|
||||
status_set('maintenance', 'Configuring lvm storage')
|
||||
block_devices = conf['block-device'].split()
|
||||
configure_lvm_storage(block_devices,
|
||||
conf['volume-group'],
|
||||
conf['overwrite'] in ['true', 'True', True],
|
||||
conf['remove-missing'],
|
||||
conf['remove-missing-force'])
|
||||
# configure block devices either local or from juju storage
|
||||
_configure_block_devices()
|
||||
|
||||
if not config('action-managed-upgrade'):
|
||||
if openstack_upgrade_available('cinder-common'):
|
||||
@ -248,6 +243,40 @@ def config_changed():
|
||||
restart_handler=lambda: service_restart('cinder-api'))
|
||||
|
||||
|
||||
@hooks.hook('storage.real')
|
||||
@restart_on_change(restart_map(), stopstart=True)
|
||||
def storage_changed():
|
||||
_configure_block_devices()
|
||||
CONFIGS.write_all()
|
||||
|
||||
|
||||
def _configure_block_devices():
|
||||
"""Configure block devices, either from Juju storage or as a local block
|
||||
device configured in the config.
|
||||
"""
|
||||
if service_enabled('volume'):
|
||||
block_devices = []
|
||||
# first see if a specified block device is configured
|
||||
conf = config()
|
||||
if conf['block-device'] not in [None, 'None', 'none']:
|
||||
block_devices.extend(conf['block-device'].split())
|
||||
# now see if there are any Juju storage devies configured
|
||||
storage_ids = storage_list('block-devices')
|
||||
storage_devs = [storage_get('location', s) for s in storage_ids]
|
||||
# add them into the block_devices:
|
||||
block_devices.extend(storage_devs)
|
||||
if block_devices:
|
||||
status_set('maintenance', 'Checking configuration of lvm storage')
|
||||
# Note that there may be None now, and remove-missing is set to true,
|
||||
# so we still have to run the function regardless of whether
|
||||
# block_devices is an empty list or not.
|
||||
configure_lvm_storage(block_devices,
|
||||
conf['volume-group'],
|
||||
conf['overwrite'] in ['true', 'True', True],
|
||||
conf['remove-missing'],
|
||||
conf['remove-missing-force'])
|
||||
|
||||
|
||||
@hooks.hook('shared-db-relation-joined')
|
||||
def db_joined():
|
||||
if config('prefer-ipv6'):
|
||||
|
@ -596,8 +596,10 @@ def configure_lvm_storage(block_devices, volume_group, overwrite=False,
|
||||
|
||||
|
||||
def prepare_volume(device):
|
||||
juju_log("prepare_volume: {}".format(device))
|
||||
clean_storage(device)
|
||||
create_lvm_physical_volume(device)
|
||||
juju_log("prepared volume: {}".format(device))
|
||||
|
||||
|
||||
def has_partition_table(block_device):
|
||||
|
8
hooks/storage.bootstrap
Executable file
8
hooks/storage.bootstrap
Executable file
@ -0,0 +1,8 @@
|
||||
#!/bin/sh
|
||||
|
||||
if ! dpkg -s cinder-common > /dev/null 2>&1; then
|
||||
juju-log "cinder not yet installed."
|
||||
exit 0
|
||||
fi
|
||||
|
||||
./hooks/storage.real
|
1
hooks/storage.real
Symbolic link
1
hooks/storage.real
Symbolic link
@ -0,0 +1 @@
|
||||
cinder_hooks.py
|
@ -52,3 +52,9 @@ resources:
|
||||
type: file
|
||||
filename: policyd-override.zip
|
||||
description: The policy.d overrides file
|
||||
storage:
|
||||
block-devices:
|
||||
type: block
|
||||
multiple:
|
||||
range: 0-
|
||||
minimum-size: 10G
|
||||
|
@ -115,9 +115,11 @@ applications:
|
||||
cinder:
|
||||
series: bionic
|
||||
num_units: 1
|
||||
storage:
|
||||
block-devices: '40G'
|
||||
options:
|
||||
openstack-origin: cloud:bionic-train
|
||||
block-device: /dev/vdb
|
||||
block-device: None
|
||||
glance-api-version: 2
|
||||
overwrite: "true"
|
||||
ephemeral-unmount: /mnt
|
||||
#ephemeral-unmount: /mnt
|
||||
|
Loading…
Reference in New Issue
Block a user