Add support for the openstack-loadbalancer charm
When CephNFS is related to the openstack-loadbalancer, all NFS connections are load-balanced through a TCP loadbalancer running with a VIP and passed back, round-robin, to the nfs-ganesha servers providing service.
This commit is contained in:
parent
f345dd3dbf
commit
bfeee44206
@ -19,9 +19,8 @@ extra-bindings:
|
||||
requires:
|
||||
ceph-client:
|
||||
interface: ceph-client
|
||||
hacluster:
|
||||
interface: hacluster
|
||||
scope: container
|
||||
loadbalancer:
|
||||
interface: openstack-loadbalancer
|
||||
peers:
|
||||
cluster:
|
||||
interface: ceph-nfs-peer
|
14
osci.yaml
14
osci.yaml
@ -3,14 +3,13 @@
|
||||
- charm-unit-jobs
|
||||
check:
|
||||
jobs:
|
||||
- octopus
|
||||
- pacific
|
||||
- focal-pacific
|
||||
vars:
|
||||
needs_charm_build: true
|
||||
charm_build_name: ceph-iscsi
|
||||
charm_build_name: ceph-nfs
|
||||
build_type: charmcraft
|
||||
- job:
|
||||
name: focal-octopus
|
||||
name: focal-pacific
|
||||
parent: func-target
|
||||
dependencies:
|
||||
- osci-lint
|
||||
@ -18,12 +17,5 @@
|
||||
- tox-py36
|
||||
- tox-py37
|
||||
- tox-py38
|
||||
vars:
|
||||
tox_extra_args: focal-octopus
|
||||
- job:
|
||||
name: focal-pacific
|
||||
parent: func-target
|
||||
dependencies: &smoke-jobs
|
||||
- focal-octopus
|
||||
vars:
|
||||
tox_extra_args: focal-pacific
|
||||
|
@ -3,3 +3,4 @@ git+https://github.com/juju/charm-helpers.git#egg=charmhelpers
|
||||
git+https://github.com/canonical/operator.git#egg=ops
|
||||
git+https://opendev.org/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client
|
||||
git+https://github.com/ChrisMacNaughton/charm-ops-openstack.git@feature/charm-instance-to-relation-adapter#egg=ops_openstack
|
||||
git+https://github.com/openstack-charmers/ops-interface-openstack-loadbalancer#egg=interface_openstack_loadbalancer
|
||||
|
41
src/charm.py
41
src/charm.py
@ -27,6 +27,9 @@ import charmhelpers.core.host as ch_host
|
||||
import charmhelpers.core.templating as ch_templating
|
||||
import interface_ceph_client.ceph_client as ceph_client
|
||||
import interface_ceph_nfs_peer
|
||||
|
||||
import interface_openstack_loadbalancer.loadbalancer as ops_lb_interface
|
||||
|
||||
# TODO: Add the below class functionaity to action / relations
|
||||
from ganesha import GaneshaNfs
|
||||
|
||||
@ -132,6 +135,9 @@ class CephNfsCharm(
|
||||
|
||||
SERVICES = ['nfs-ganesha']
|
||||
|
||||
LB_SERVICE_NAME = "nfs-ganesha"
|
||||
NFS_PORT = 2049
|
||||
|
||||
RESTART_MAP = {
|
||||
str(GANESHA_CONF): SERVICES,
|
||||
str(CEPH_CONF): SERVICES,
|
||||
@ -153,6 +159,9 @@ class CephNfsCharm(
|
||||
self.peers = interface_ceph_nfs_peer.CephNfsPeers(
|
||||
self,
|
||||
'cluster')
|
||||
self.ingress = ops_lb_interface.OSLoadbalancerRequires(
|
||||
self,
|
||||
'loadbalancer')
|
||||
self.adapters = CephNFSAdapters(
|
||||
(self.ceph_client, self.peers),
|
||||
contexts=(CephNFSContext(self),),
|
||||
@ -181,6 +190,12 @@ class CephNfsCharm(
|
||||
self.framework.observe(
|
||||
self.peers.on.reload_nonce,
|
||||
self.on_reload_nonce)
|
||||
self.framework.observe(
|
||||
self.ingress.on.lb_relation_ready,
|
||||
self._request_loadbalancer)
|
||||
self.framework.observe(
|
||||
self.ingress.on.lb_configured,
|
||||
self.render_config)
|
||||
# Actions
|
||||
self.framework.observe(
|
||||
self.on.create_share_action,
|
||||
@ -201,6 +216,20 @@ class CephNfsCharm(
|
||||
self.revoke_access_action
|
||||
)
|
||||
|
||||
def _request_loadbalancer(self, _) -> None:
|
||||
"""Send request to create loadbalancer"""
|
||||
self.ingress.request_loadbalancer(
|
||||
self.LB_SERVICE_NAME,
|
||||
self.NFS_PORT,
|
||||
self.NFS_PORT,
|
||||
self._get_bind_ip(),
|
||||
'tcp')
|
||||
|
||||
def _get_bind_ip(self) -> str:
|
||||
"""Return the IP to bind the dashboard to"""
|
||||
binding = self.model.get_binding('public')
|
||||
return str(binding.network.ingress_address)
|
||||
|
||||
def config_get(self, key, default=None):
|
||||
"""Retrieve config option.
|
||||
|
||||
@ -347,10 +376,16 @@ class CephNfsCharm(
|
||||
def access_address(self) -> str:
|
||||
"""Return the IP to advertise Ganesha on"""
|
||||
binding = self.model.get_binding('public')
|
||||
if self.model.get_relation('hacluster'):
|
||||
return self.config_get('vip')
|
||||
ingress_address = str(binding.network.ingress_address)
|
||||
if self.ingress.relations:
|
||||
lb_response = self.ingress.get_frontend_data()
|
||||
if lb_response:
|
||||
lb_config = lb_response[self.LB_SERVICE_NAME]
|
||||
return [i for d in lb_config.values() for i in d['ip']][0]
|
||||
else:
|
||||
return str(binding.network.ingress_address)
|
||||
return ingress_address
|
||||
else:
|
||||
return ingress_address
|
||||
|
||||
def create_share_action(self, event):
|
||||
if not self.model.unit.is_leader():
|
||||
|
@ -13,4 +13,3 @@ oslo.i18n<4.0.0
|
||||
git+https://github.com/openstack-charmers/zaza.git#egg=zaza
|
||||
git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack
|
||||
pytz # workaround for 14.04 pip/tox
|
||||
pyudev # for ceph-* charm unit tests (not mocked?)
|
||||
|
@ -8,22 +8,29 @@ applications:
|
||||
charm: ../../ceph-nfs.charm
|
||||
num_units: 2
|
||||
ceph-osd:
|
||||
charm: cs:~openstack-charmers-next/ceph-osd
|
||||
charm: ch:ceph-osd
|
||||
num_units: 3
|
||||
storage:
|
||||
osd-devices: '2,10G'
|
||||
options:
|
||||
osd-devices: '/dev/test-non-existent'
|
||||
source: cloud:focal-wallaby
|
||||
ceph-mon:
|
||||
charm: cs:~openstack-charmers-next/ceph-mon
|
||||
charm: ch:ceph-mon
|
||||
num_units: 3
|
||||
options:
|
||||
monitor-count: '3'
|
||||
expected-osd-count: 6
|
||||
source: cloud:focal-wallaby
|
||||
ceph-fs:
|
||||
charm: cs:~openstack-charmers-next/ceph-fs
|
||||
charm: ch:ceph-fs
|
||||
num_units: 1
|
||||
|
||||
loadbalancer:
|
||||
charm: ch:openstack-loadbalancer
|
||||
num_units: 3
|
||||
hacluster:
|
||||
charm: ch:hacluster
|
||||
options:
|
||||
cluster_count: 3
|
||||
relations:
|
||||
- - 'ceph-mon:client'
|
||||
- 'ceph-nfs:ceph-client'
|
||||
@ -31,3 +38,7 @@ relations:
|
||||
- 'ceph-mon:osd'
|
||||
- - 'ceph-fs'
|
||||
- 'ceph-mon'
|
||||
- - ceph-nfs
|
||||
- loadbalancer
|
||||
- - 'loadbalancer:ha'
|
||||
- 'hacluster:ha'
|
4
tests/bundles/overlays/focal-pacific.yaml.j2
Normal file
4
tests/bundles/overlays/focal-pacific.yaml.j2
Normal file
@ -0,0 +1,4 @@
|
||||
applications:
|
||||
loadbalancer:
|
||||
options:
|
||||
vip: '{{ TEST_VIP00 }}'
|
Loading…
Reference in New Issue
Block a user