Add support for the openstack-loadbalancer charm

When CephNFS is related to the openstack-loadbalancer, all
NFS connections are load-balanced through a TCP loadbalancer
running with a VIP and passed back, round-robin, to the
nfs-ganesha servers providing service.
This commit is contained in:
Chris MacNaughton 2022-02-10 09:31:53 +01:00 committed by Chris MacNaughton
parent f345dd3dbf
commit bfeee44206
7 changed files with 64 additions and 23 deletions

View File

@ -19,9 +19,8 @@ extra-bindings:
requires: requires:
ceph-client: ceph-client:
interface: ceph-client interface: ceph-client
hacluster: loadbalancer:
interface: hacluster interface: openstack-loadbalancer
scope: container
peers: peers:
cluster: cluster:
interface: ceph-nfs-peer interface: ceph-nfs-peer

View File

@ -3,14 +3,13 @@
- charm-unit-jobs - charm-unit-jobs
check: check:
jobs: jobs:
- octopus - focal-pacific
- pacific
vars: vars:
needs_charm_build: true needs_charm_build: true
charm_build_name: ceph-iscsi charm_build_name: ceph-nfs
build_type: charmcraft build_type: charmcraft
- job: - job:
name: focal-octopus name: focal-pacific
parent: func-target parent: func-target
dependencies: dependencies:
- osci-lint - osci-lint
@ -18,12 +17,5 @@
- tox-py36 - tox-py36
- tox-py37 - tox-py37
- tox-py38 - tox-py38
vars:
tox_extra_args: focal-octopus
- job:
name: focal-pacific
parent: func-target
dependencies: &smoke-jobs
- focal-octopus
vars: vars:
tox_extra_args: focal-pacific tox_extra_args: focal-pacific

View File

@ -3,3 +3,4 @@ git+https://github.com/juju/charm-helpers.git#egg=charmhelpers
git+https://github.com/canonical/operator.git#egg=ops git+https://github.com/canonical/operator.git#egg=ops
git+https://opendev.org/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client git+https://opendev.org/openstack/charm-ops-interface-ceph-client#egg=interface_ceph_client
git+https://github.com/ChrisMacNaughton/charm-ops-openstack.git@feature/charm-instance-to-relation-adapter#egg=ops_openstack git+https://github.com/ChrisMacNaughton/charm-ops-openstack.git@feature/charm-instance-to-relation-adapter#egg=ops_openstack
git+https://github.com/openstack-charmers/ops-interface-openstack-loadbalancer#egg=interface_openstack_loadbalancer

View File

@ -27,6 +27,9 @@ import charmhelpers.core.host as ch_host
import charmhelpers.core.templating as ch_templating import charmhelpers.core.templating as ch_templating
import interface_ceph_client.ceph_client as ceph_client import interface_ceph_client.ceph_client as ceph_client
import interface_ceph_nfs_peer import interface_ceph_nfs_peer
import interface_openstack_loadbalancer.loadbalancer as ops_lb_interface
# TODO: Add the below class functionaity to action / relations # TODO: Add the below class functionaity to action / relations
from ganesha import GaneshaNfs from ganesha import GaneshaNfs
@ -132,6 +135,9 @@ class CephNfsCharm(
SERVICES = ['nfs-ganesha'] SERVICES = ['nfs-ganesha']
LB_SERVICE_NAME = "nfs-ganesha"
NFS_PORT = 2049
RESTART_MAP = { RESTART_MAP = {
str(GANESHA_CONF): SERVICES, str(GANESHA_CONF): SERVICES,
str(CEPH_CONF): SERVICES, str(CEPH_CONF): SERVICES,
@ -153,6 +159,9 @@ class CephNfsCharm(
self.peers = interface_ceph_nfs_peer.CephNfsPeers( self.peers = interface_ceph_nfs_peer.CephNfsPeers(
self, self,
'cluster') 'cluster')
self.ingress = ops_lb_interface.OSLoadbalancerRequires(
self,
'loadbalancer')
self.adapters = CephNFSAdapters( self.adapters = CephNFSAdapters(
(self.ceph_client, self.peers), (self.ceph_client, self.peers),
contexts=(CephNFSContext(self),), contexts=(CephNFSContext(self),),
@ -181,6 +190,12 @@ class CephNfsCharm(
self.framework.observe( self.framework.observe(
self.peers.on.reload_nonce, self.peers.on.reload_nonce,
self.on_reload_nonce) self.on_reload_nonce)
self.framework.observe(
self.ingress.on.lb_relation_ready,
self._request_loadbalancer)
self.framework.observe(
self.ingress.on.lb_configured,
self.render_config)
# Actions # Actions
self.framework.observe( self.framework.observe(
self.on.create_share_action, self.on.create_share_action,
@ -201,6 +216,20 @@ class CephNfsCharm(
self.revoke_access_action self.revoke_access_action
) )
def _request_loadbalancer(self, _) -> None:
"""Send request to create loadbalancer"""
self.ingress.request_loadbalancer(
self.LB_SERVICE_NAME,
self.NFS_PORT,
self.NFS_PORT,
self._get_bind_ip(),
'tcp')
def _get_bind_ip(self) -> str:
"""Return the IP to bind the dashboard to"""
binding = self.model.get_binding('public')
return str(binding.network.ingress_address)
def config_get(self, key, default=None): def config_get(self, key, default=None):
"""Retrieve config option. """Retrieve config option.
@ -347,10 +376,16 @@ class CephNfsCharm(
def access_address(self) -> str: def access_address(self) -> str:
"""Return the IP to advertise Ganesha on""" """Return the IP to advertise Ganesha on"""
binding = self.model.get_binding('public') binding = self.model.get_binding('public')
if self.model.get_relation('hacluster'): ingress_address = str(binding.network.ingress_address)
return self.config_get('vip') if self.ingress.relations:
lb_response = self.ingress.get_frontend_data()
if lb_response:
lb_config = lb_response[self.LB_SERVICE_NAME]
return [i for d in lb_config.values() for i in d['ip']][0]
else:
return ingress_address
else: else:
return str(binding.network.ingress_address) return ingress_address
def create_share_action(self, event): def create_share_action(self, event):
if not self.model.unit.is_leader(): if not self.model.unit.is_leader():

View File

@ -13,4 +13,3 @@ oslo.i18n<4.0.0
git+https://github.com/openstack-charmers/zaza.git#egg=zaza git+https://github.com/openstack-charmers/zaza.git#egg=zaza
git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack git+https://github.com/openstack-charmers/zaza-openstack-tests.git#egg=zaza.openstack
pytz # workaround for 14.04 pip/tox pytz # workaround for 14.04 pip/tox
pyudev # for ceph-* charm unit tests (not mocked?)

View File

@ -8,22 +8,29 @@ applications:
charm: ../../ceph-nfs.charm charm: ../../ceph-nfs.charm
num_units: 2 num_units: 2
ceph-osd: ceph-osd:
charm: cs:~openstack-charmers-next/ceph-osd charm: ch:ceph-osd
num_units: 3 num_units: 3
storage: storage:
osd-devices: '2,10G' osd-devices: '2,10G'
options: options:
osd-devices: '/dev/test-non-existent' source: cloud:focal-wallaby
ceph-mon: ceph-mon:
charm: cs:~openstack-charmers-next/ceph-mon charm: ch:ceph-mon
num_units: 3 num_units: 3
options: options:
monitor-count: '3' monitor-count: '3'
expected-osd-count: 6 expected-osd-count: 6
source: cloud:focal-wallaby
ceph-fs: ceph-fs:
charm: cs:~openstack-charmers-next/ceph-fs charm: ch:ceph-fs
num_units: 1 num_units: 1
loadbalancer:
charm: ch:openstack-loadbalancer
num_units: 3
hacluster:
charm: ch:hacluster
options:
cluster_count: 3
relations: relations:
- - 'ceph-mon:client' - - 'ceph-mon:client'
- 'ceph-nfs:ceph-client' - 'ceph-nfs:ceph-client'
@ -31,3 +38,7 @@ relations:
- 'ceph-mon:osd' - 'ceph-mon:osd'
- - 'ceph-fs' - - 'ceph-fs'
- 'ceph-mon' - 'ceph-mon'
- - ceph-nfs
- loadbalancer
- - 'loadbalancer:ha'
- 'hacluster:ha'

View File

@ -0,0 +1,4 @@
applications:
loadbalancer:
options:
vip: '{{ TEST_VIP00 }}'