Updates for zed stable branch creation

- Set default branch for git review/gerrit to stable/zed
- Switch tests to stable.
- Switch to using stable charm-helpers branch.
- Switch to using stable charm.openstack branch.
- Switch to using stable zaza, zaza-openstack-tests
  branch
- (reactive charms) Add build.lock file
- (classic charms) make sync
- (reactive: not reactive plugin): lock charm-tools < 3.1
- (reactive: with reactive plugin): lock charm snap to 3.x/stable

Change-Id: I757bf16d2604dd9ffda2ec45c3b98fe09f583d1c
This commit is contained in:
Alex Kavanagh 2022-10-14 17:03:09 +01:00 committed by Billy Olsen
parent 6ae6c1beef
commit 9221ef5674
18 changed files with 257 additions and 84 deletions

View File

@ -2,3 +2,5 @@
host=review.opendev.org
port=29418
project=openstack/charm-cinder.git
defaultbranch=stable/zed

View File

@ -1,4 +1,4 @@
repo: https://github.com/juju/charm-helpers
repo: https://github.com/juju/charm-helpers@stable/zed
destination: charmhelpers
include:
- core

View File

@ -19,6 +19,7 @@
import glob
import grp
import json
import os
import pwd
import re
@ -30,6 +31,7 @@ import yaml
from charmhelpers.core.hookenv import (
application_name,
config,
ERROR,
hook_name,
local_unit,
log,
@ -416,6 +418,20 @@ def add_init_service_checks(nrpe, services, unit_name, immediate_check=True):
:param str unit_name: Unit name to use in check description
:param bool immediate_check: For sysv init, run the service check immediately
"""
# check_haproxy is redundant in the presence of check_crm. See LP Bug#1880601 for details.
# just remove check_haproxy if haproxy is added as a lsb resource in hacluster.
for rid in relation_ids("ha"):
ha_resources = relation_get("json_resources", rid=rid, unit=local_unit())
if ha_resources:
try:
ha_resources_parsed = json.loads(ha_resources)
except ValueError as e:
log('Could not parse JSON from ha resources. {}'.format(e), level=ERROR)
raise
if "lsb:haproxy" in ha_resources_parsed.values():
if "haproxy" in services:
log("removed check_haproxy. This service will be monitored by check_crm")
services.remove("haproxy")
for svc in services:
# Don't add a check for these services from neutron-gateway
if svc in ['ext-port', 'os-charm-phy-nic-mtu']:

View File

@ -25,6 +25,7 @@ import socket
import time
from base64 import b64decode
from distutils.version import LooseVersion
from subprocess import (
check_call,
check_output,
@ -39,6 +40,7 @@ from charmhelpers.contrib.openstack.audits.openstack_security_guide import (
from charmhelpers.fetch import (
apt_install,
filter_installed_packages,
get_installed_version,
)
from charmhelpers.core.hookenv import (
NoNetworkBinding,
@ -59,6 +61,7 @@ from charmhelpers.core.hookenv import (
network_get_primary_address,
WARNING,
service_name,
remote_service_name,
)
from charmhelpers.core.sysctl import create as sysctl_create
@ -130,6 +133,7 @@ CA_CERT_PATH = '/usr/local/share/ca-certificates/keystone_juju_ca_cert.crt'
ADDRESS_TYPES = ['admin', 'internal', 'public']
HAPROXY_RUN_DIR = '/var/run/haproxy/'
DEFAULT_OSLO_MESSAGING_DRIVER = "messagingv2"
DEFAULT_HAPROXY_EXPORTER_STATS_PORT = 8404
def ensure_packages(packages):
@ -345,6 +349,14 @@ def db_ssl(rdata, ctxt, ssl_dir):
class IdentityServiceContext(OSContextGenerator):
_forward_compat_remaps = {
'admin_user': 'admin-user-name',
'service_username': 'service-user-name',
'service_tenant': 'service-project-name',
'service_tenant_id': 'service-project-id',
'service_domain': 'service-domain-name',
}
def __init__(self,
service=None,
service_user=None,
@ -397,6 +409,11 @@ class IdentityServiceContext(OSContextGenerator):
# 'www_authenticate_uri' replaced 'auth_uri' since Stein,
# see keystonemiddleware upstream sources for more info
if CompareOpenStackReleases(keystonemiddleware_os_rel) >= 'stein':
if 'public_auth_url' in ctxt:
c.update((
('www_authenticate_uri', '{}/v3'.format(
ctxt.get('public_auth_url'))),))
else:
c.update((
('www_authenticate_uri', "{}://{}:{}/v3".format(
ctxt.get('service_protocol', ''),
@ -409,11 +426,17 @@ class IdentityServiceContext(OSContextGenerator):
ctxt.get('service_host', ''),
ctxt.get('service_port', ''))),))
if 'internal_auth_url' in ctxt:
c.update((
('auth_url', ctxt.get('internal_auth_url')),))
else:
c.update((
('auth_url', "{}://{}:{}/v3".format(
ctxt.get('auth_protocol', ''),
ctxt.get('auth_host', ''),
ctxt.get('auth_port', ''))),
ctxt.get('auth_port', ''))),))
c.update((
('project_domain_name', ctxt.get('admin_domain_name', '')),
('user_domain_name', ctxt.get('admin_domain_name', '')),
('project_name', ctxt.get('admin_tenant_name', '')),
@ -441,6 +464,26 @@ class IdentityServiceContext(OSContextGenerator):
for rid in relation_ids(self.rel_name):
self.related = True
for unit in related_units(rid):
rdata = {}
# NOTE(jamespage):
# forwards compat with application data
# bag driven approach to relation.
_adata = relation_get(rid=rid, app=remote_service_name(rid))
if _adata:
# New app data bag uses - instead of _
# in key names - remap for compat with
# existing relation data keys
for key, value in _adata.items():
if key == 'api-version':
rdata[key.replace('-', '_')] = value.strip('v')
else:
rdata[key.replace('-', '_')] = value
# Re-map some keys for backwards compatibility
for target, source in self._forward_compat_remaps.items():
rdata[target] = _adata.get(source)
else:
# No app data bag presented - fallback
# to legacy unit based relation data
rdata = relation_get(rid=rid, unit=unit)
serv_host = rdata.get('service_host')
serv_host = format_ipv6_addr(serv_host) or serv_host
@ -475,6 +518,19 @@ class IdentityServiceContext(OSContextGenerator):
'service_project_id': rdata.get('service_tenant_id'),
'service_domain_id': rdata.get('service_domain_id')})
# NOTE:
# keystone-k8s operator presents full URLS
# for all three endpoints - public and internal are
# externally addressable for machine based charm
if 'public_auth_url' in rdata:
ctxt.update({
'public_auth_url': rdata.get('public_auth_url'),
})
if 'internal_auth_url' in rdata:
ctxt.update({
'internal_auth_url': rdata.get('internal_auth_url'),
})
# we keep all veriables in ctxt for compatibility and
# add nested dictionary for keystone_authtoken generic
# templating
@ -860,9 +916,14 @@ class HAProxyContext(OSContextGenerator):
interfaces = ['cluster']
def __init__(self, singlenode_mode=False,
address_types=ADDRESS_TYPES):
address_types=None,
exporter_stats_port=DEFAULT_HAPROXY_EXPORTER_STATS_PORT):
if address_types is None:
address_types = ADDRESS_TYPES[:]
self.address_types = address_types
self.singlenode_mode = singlenode_mode
self.exporter_stats_port = exporter_stats_port
def __call__(self):
if not os.path.isdir(HAPROXY_RUN_DIR):
@ -957,10 +1018,20 @@ class HAProxyContext(OSContextGenerator):
db = kv()
ctxt['stat_password'] = db.get('stat-password')
if not ctxt['stat_password']:
ctxt['stat_password'] = db.set('stat-password',
pwgen(32))
ctxt['stat_password'] = db.set('stat-password', pwgen(32))
db.flush()
# NOTE(rgildein): configure prometheus exporter for haproxy > 2.0.0
# New bind will be created and a prometheus-exporter
# will be used for path /metrics. At the same time,
# prometheus-exporter avoids using auth.
haproxy_version = get_installed_version("haproxy")
if (haproxy_version and
haproxy_version.ver_str >= LooseVersion("2.0.0") and
is_relation_made("haproxy-exporter")):
ctxt["stats_exporter_host"] = get_relation_ip("haproxy-exporter")
ctxt["stats_exporter_port"] = self.exporter_stats_port
for frontend in cluster_hosts:
if (len(cluster_hosts[frontend]['backends']) > 1 or
self.singlenode_mode):

View File

@ -25,6 +25,7 @@ Helpers for high availability.
import hashlib
import json
import os
import re
@ -36,6 +37,7 @@ from charmhelpers.core.hookenv import (
config,
status_set,
DEBUG,
application_name,
)
from charmhelpers.core.host import (
@ -65,6 +67,7 @@ JSON_ENCODE_OPTIONS = dict(
VIP_GROUP_NAME = 'grp_{service}_vips'
DNSHA_GROUP_NAME = 'grp_{service}_hostnames'
HAPROXY_DASHBOARD_RESOURCE = "haproxy-dashboard"
class DNSHAException(Exception):
@ -346,3 +349,29 @@ def update_hacluster_vip(service, relation_data):
relation_data['groups'] = {
key: ' '.join(vip_group)
}
def render_grafana_dashboard(prometheus_app_name, haproxy_dashboard):
"""Load grafana dashboard json model and insert prometheus datasource.
:param prometheus_app_name: name of the 'prometheus' application that will
be used as datasource in grafana dashboard
:type prometheus_app_name: str
:param haproxy_dashboard: path to haproxy dashboard
:type haproxy_dashboard: str
:return: Grafana dashboard json model as a str.
:rtype: str
"""
from charmhelpers.contrib.templating import jinja
dashboard_template = os.path.basename(haproxy_dashboard)
dashboard_template_dir = os.path.dirname(haproxy_dashboard)
app_name = application_name()
datasource = "{} - Juju generated source".format(prometheus_app_name)
return jinja.render(dashboard_template,
{"datasource": datasource,
"app_name": app_name,
"prometheus_app_name": prometheus_app_name},
template_dir=dashboard_template_dir,
jinja_env_args={"variable_start_string": "<< ",
"variable_end_string": " >>"})

View File

@ -25,6 +25,7 @@ from charmhelpers.contrib.network.ip import (
is_ipv6,
get_ipv6_addr,
resolve_network_cidr,
get_iface_for_address
)
from charmhelpers.contrib.hahelpers.cluster import is_clustered
@ -145,6 +146,30 @@ def local_address(unit_get_fallback='public-address'):
return unit_get(unit_get_fallback)
def get_invalid_vips():
"""Check if any of the provided vips are invalid.
A vip is invalid if it doesn't belong to the subnet in any interface.
If all vips are valid, this returns an empty list.
:returns: A list of strings, where each string is an invalid vip address.
:rtype: list
"""
clustered = is_clustered()
vips = config('vip')
if vips:
vips = vips.split()
invalid_vips = []
if clustered and vips:
for vip in vips:
iface_for_vip = get_iface_for_address(vip)
if iface_for_vip is None:
invalid_vips.append(vip)
return invalid_vips
def resolve_address(endpoint_type=PUBLIC, override=True):
"""Return unit address depending on net config.

View File

@ -310,7 +310,7 @@ def ssh_known_hosts_lines(application_name, user=None):
for hosts_line in hosts:
if hosts_line.rstrip():
known_hosts_list.append(hosts_line.rstrip())
return(known_hosts_list)
return known_hosts_list
def ssh_authorized_keys_lines(application_name, user=None):
@ -327,7 +327,7 @@ def ssh_authorized_keys_lines(application_name, user=None):
for authkey_line in keys:
if authkey_line.rstrip():
authorized_keys_list.append(authkey_line.rstrip())
return(authorized_keys_list)
return authorized_keys_list
def ssh_compute_remove(public_key, application_name, user=None):

View File

@ -49,6 +49,11 @@ defaults
listen stats
bind {{ local_host }}:{{ stat_port }}
{%- if stats_exporter_host and stats_exporter_port %}
bind {{ stats_exporter_host }}:{{ stats_exporter_port }}
option http-use-htx
http-request use-service prometheus-exporter if { path /metrics }
{%- endif %}
mode http
stats enable
stats hide-version

View File

@ -23,6 +23,12 @@ from subprocess import (
call
)
from charmhelpers.core.hookenv import (
log,
WARNING,
INFO
)
def _luks_uuid(dev):
"""
@ -110,7 +116,7 @@ def is_device_mounted(device):
return bool(re.search(r'MOUNTPOINT=".+"', out))
def mkfs_xfs(device, force=False, inode_size=1024):
def mkfs_xfs(device, force=False, inode_size=None):
"""Format device with XFS filesystem.
By default this should fail if the device already has a filesystem on it.
@ -118,11 +124,20 @@ def mkfs_xfs(device, force=False, inode_size=1024):
:ptype device: tr
:param force: Force operation
:ptype: force: boolean
:param inode_size: XFS inode size in bytes
:param inode_size: XFS inode size in bytes; if set to 0 or None,
the value used will be the XFS system default
:ptype inode_size: int"""
cmd = ['mkfs.xfs']
if force:
cmd.append("-f")
cmd += ['-i', "size={}".format(inode_size), device]
if inode_size:
if inode_size >= 256 and inode_size <= 2048:
cmd += ['-i', "size={}".format(inode_size)]
else:
log("Config value xfs-inode-size={} is invalid. Using system default.".format(inode_size), level=WARNING)
else:
log("Using XFS filesystem with system default inode size.", level=INFO)
cmd += [device]
check_call(cmd)

View File

@ -954,7 +954,7 @@ def pwgen(length=None):
random_generator = random.SystemRandom()
random_chars = [
random_generator.choice(alphanumeric_chars) for _ in range(length)]
return(''.join(random_chars))
return ''.join(random_chars)
def is_phy_iface(interface):

View File

@ -230,6 +230,10 @@ CLOUD_ARCHIVE_POCKETS = {
'zed/proposed': 'jammy-proposed/zed',
'jammy-zed/proposed': 'jammy-proposed/zed',
'jammy-proposed/zed': 'jammy-proposed/zed',
# OVN
'focal-ovn-22.03': 'focal-updates/ovn-22.03',
'focal-ovn-22.03/proposed': 'focal-proposed/ovn-22.03',
}
@ -363,6 +367,9 @@ def apt_install(packages, options=None, fatal=False, quiet=False):
:type quiet: bool
:raises: subprocess.CalledProcessError
"""
if not packages:
log("Nothing to install", level=DEBUG)
return
if options is None:
options = ['--option=Dpkg::Options::=--force-confold']
@ -687,6 +694,7 @@ def add_source(source, key=None, fail_invalid=False):
(r"^cloud-archive:(.*)$", _add_apt_repository),
(r"^((?:deb |http:|https:|ppa:).*)$", _add_apt_repository),
(r"^cloud:(.*)-(.*)\/staging$", _add_cloud_staging),
(r"^cloud:(.*)-(ovn-.*)$", _add_cloud_distro_check),
(r"^cloud:(.*)-(.*)$", _add_cloud_distro_check),
(r"^cloud:(.*)$", _add_cloud_pocket),
(r"^snap:.*-(.*)-(.*)$", _add_cloud_distro_check),
@ -750,6 +758,11 @@ def _add_apt_repository(spec):
)
def __write_sources_list_d_actual_pocket(file, actual_pocket):
with open('/etc/apt/sources.list.d/{}'.format(file), 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
def _add_cloud_pocket(pocket):
"""Add a cloud pocket as /etc/apt/sources.d/cloud-archive.list
@ -769,8 +782,9 @@ def _add_cloud_pocket(pocket):
'Unsupported cloud: source option %s' %
pocket)
actual_pocket = CLOUD_ARCHIVE_POCKETS[pocket]
with open('/etc/apt/sources.list.d/cloud-archive.list', 'w') as apt:
apt.write(CLOUD_ARCHIVE.format(actual_pocket))
__write_sources_list_d_actual_pocket(
'cloud-archive{}.list'.format('' if 'ovn' not in pocket else '-ovn'),
actual_pocket)
def _add_cloud_staging(cloud_archive_release, openstack_release):

View File

@ -20,8 +20,8 @@ cliff<3.0.0
coverage>=4.5.2
pyudev # for ceph-* charm unit tests (need to fix the ceph-* charm unit tests/mocking)
git+https://github.com/openstack-charmers/zaza.git#egg=zaza
git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack
git+https://github.com/openstack-charmers/zaza.git@stable/zed#egg=zaza
git+https://github.com/openstack-charmers/zaza-openstack-tests.git@stable/zed#egg=zaza.openstack
# Needed for charm-glance:
git+https://opendev.org/openstack/tempest.git#egg=tempest

View File

@ -27,22 +27,22 @@ applications:
nova-cloud-controller-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
placement-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
keystone-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
glance-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
neutron-api-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
cinder-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
mysql-innodb-cluster:
charm: ch:mysql-innodb-cluster
@ -53,7 +53,7 @@ applications:
- '0'
- '1'
- '2'
channel: latest/edge
channel: 8.0/edge
rabbitmq-server:
charm: ch:rabbitmq-server
@ -62,7 +62,7 @@ applications:
source: *openstack-origin
to:
- '3'
channel: latest/edge
channel: 3.9/edge
nova-cloud-controller:
charm: ch:nova-cloud-controller
@ -73,7 +73,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '4'
channel: latest/edge
channel: zed/edge
placement:
charm: ch:placement
@ -83,7 +83,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '5'
channel: latest/edge
channel: zed/edge
neutron-api:
charm: ch:neutron-api
@ -95,7 +95,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '6'
channel: latest/edge
channel: zed/edge
keystone:
charm: ch:keystone
@ -104,7 +104,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '7'
channel: latest/edge
channel: zed/edge
neutron-gateway:
charm: ch:neutron-gateway
@ -114,7 +114,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '8'
channel: latest/edge
channel: zed/edge
glance:
charm: ch:glance
@ -123,11 +123,11 @@ applications:
openstack-origin: *openstack-origin
to:
- '9'
channel: latest/edge
channel: zed/edge
neutron-openvswitch:
charm: ch:neutron-openvswitch
channel: latest/edge
channel: zed/edge
nova-compute:
charm: ch:nova-compute
@ -145,7 +145,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '10'
channel: latest/edge
channel: zed/edge
cinder:
charm: ../../cinder.charm
@ -165,7 +165,7 @@ applications:
options:
block-device: "/mnt/cinder-lvm-block|20G"
config-flags: "target_helper=lioadm"
channel: latest/edge
channel: zed/edge
relations:

View File

@ -27,42 +27,38 @@ applications:
nova-cloud-controller-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
placement-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
keystone-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
glance-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
neutron-api-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
cinder-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
mysql-innodb-cluster:
charm: ch:mysql-innodb-cluster
num_units: 3
options:
source: *openstack-origin
to:
- '0'
- '1'
- '2'
channel: latest/edge
channel: 8.0/edge
rabbitmq-server:
charm: ch:rabbitmq-server
num_units: 1
options:
source: *openstack-origin
to:
- '3'
channel: latest/edge
channel: 3.9/edge
nova-cloud-controller:
charm: ch:nova-cloud-controller
@ -73,7 +69,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '4'
channel: latest/edge
channel: zed/edge
placement:
charm: ch:placement
@ -83,7 +79,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '5'
channel: latest/edge
channel: zed/edge
neutron-api:
charm: ch:neutron-api
@ -95,7 +91,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '6'
channel: latest/edge
channel: zed/edge
keystone:
charm: ch:keystone
@ -104,7 +100,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '7'
channel: latest/edge
channel: zed/edge
neutron-gateway:
charm: ch:neutron-gateway
@ -114,7 +110,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '8'
channel: latest/edge
channel: zed/edge
glance:
charm: ch:glance
@ -123,11 +119,11 @@ applications:
openstack-origin: *openstack-origin
to:
- '9'
channel: latest/edge
channel: zed/edge
neutron-openvswitch:
charm: ch:neutron-openvswitch
channel: latest/edge
channel: zed/edge
nova-compute:
charm: ch:nova-compute
@ -145,7 +141,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '10'
channel: latest/edge
channel: zed/edge
cinder:
charm: ../../cinder.charm
@ -165,7 +161,7 @@ applications:
options:
block-device: "/mnt/cinder-lvm-block|20G"
config-flags: "target_helper=lioadm"
channel: latest/edge
channel: zed/edge
relations:

View File

@ -27,22 +27,22 @@ applications:
nova-cloud-controller-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
placement-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
keystone-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
glance-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
neutron-api-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
cinder-mysql-router:
charm: ch:mysql-router
channel: latest/edge
channel: 8.0/edge
mysql-innodb-cluster:
charm: ch:mysql-innodb-cluster
@ -53,7 +53,7 @@ applications:
- '0'
- '1'
- '2'
channel: latest/edge
channel: 8.0/edge
rabbitmq-server:
charm: ch:rabbitmq-server
@ -62,7 +62,7 @@ applications:
source: *openstack-origin
to:
- '3'
channel: latest/edge
channel: 3.9/edge
nova-cloud-controller:
charm: ch:nova-cloud-controller
@ -73,7 +73,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '4'
channel: latest/edge
channel: zed/edge
placement:
charm: ch:placement
@ -83,7 +83,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '5'
channel: latest/edge
channel: zed/edge
neutron-api:
charm: ch:neutron-api
@ -95,7 +95,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '6'
channel: latest/edge
channel: zed/edge
keystone:
charm: ch:keystone
@ -104,7 +104,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '7'
channel: latest/edge
channel: zed/edge
neutron-gateway:
charm: ch:neutron-gateway
@ -114,7 +114,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '8'
channel: latest/edge
channel: zed/edge
glance:
charm: ch:glance
@ -123,11 +123,11 @@ applications:
openstack-origin: *openstack-origin
to:
- '9'
channel: latest/edge
channel: zed/edge
neutron-openvswitch:
charm: ch:neutron-openvswitch
channel: latest/edge
channel: zed/edge
nova-compute:
charm: ch:nova-compute
@ -145,7 +145,7 @@ applications:
openstack-origin: *openstack-origin
to:
- '10'
channel: latest/edge
channel: zed/edge
cinder:
charm: ../../cinder.charm
@ -165,7 +165,7 @@ applications:
options:
block-device: "/mnt/cinder-lvm-block|20G"
config-flags: "target_helper=lioadm"
channel: latest/edge
channel: zed/edge
relations: