add reload nonce, and fix up some setup bits
This commit is contained in:
parent
d968d6ec88
commit
c9817a4b17
22
actions.yaml
22
actions.yaml
@ -1,16 +1,10 @@
|
|||||||
# Copyright 2021 OpenStack Charmers
|
# Copyright 2022 Canonical
|
||||||
# See LICENSE file for licensing details.
|
# See LICENSE file for licensing details.
|
||||||
#
|
|
||||||
# TEMPLATE-TODO: change this example to suit your needs.
|
|
||||||
# If you don't need actions, you can remove the file entirely.
|
|
||||||
# It ties in to the example _on_fortune_action handler in src/charm.py
|
|
||||||
#
|
|
||||||
# Learn more about actions at: https://juju.is/docs/sdk/actions
|
|
||||||
|
|
||||||
# fortune:
|
create-share:
|
||||||
# description: Returns a pithy phrase.
|
description: Create a new CephFS Backed NFS export
|
||||||
# params:
|
params:
|
||||||
# fail:
|
allowed-ips:
|
||||||
# description: "Fail with this message"
|
description: IP Addresses to grant Read/Write access to
|
||||||
# type: string
|
type: string
|
||||||
# default: ""
|
default: "0.0.0.0"
|
22
src/charm.py
22
src/charm.py
@ -17,6 +17,7 @@ import os
|
|||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
import socket
|
import socket
|
||||||
import subprocess
|
import subprocess
|
||||||
|
import tempfile
|
||||||
|
|
||||||
from ops.framework import StoredState
|
from ops.framework import StoredState
|
||||||
from ops.main import main
|
from ops.main import main
|
||||||
@ -174,6 +175,9 @@ class CephNfsCharm(
|
|||||||
self.framework.observe(
|
self.framework.observe(
|
||||||
self.peers.on.pool_initialised,
|
self.peers.on.pool_initialised,
|
||||||
self.on_pool_initialised)
|
self.on_pool_initialised)
|
||||||
|
self.framework.observe(
|
||||||
|
self.peers.on.reload_nonce,
|
||||||
|
self.on_reload_nonce)
|
||||||
|
|
||||||
def config_get(self, key, default=None):
|
def config_get(self, key, default=None):
|
||||||
"""Retrieve config option.
|
"""Retrieve config option.
|
||||||
@ -264,7 +268,7 @@ class CephNfsCharm(
|
|||||||
if not self._stored.is_cluster_setup:
|
if not self._stored.is_cluster_setup:
|
||||||
subprocess.check_call([
|
subprocess.check_call([
|
||||||
'ganesha-rados-grace', '--userid', self.client_name,
|
'ganesha-rados-grace', '--userid', self.client_name,
|
||||||
'--cephconf', '/etc/ceph/ganesha/ceph.conf', '--pool', self.pool_name,
|
'--cephconf', self.CEPH_CONF, '--pool', self.pool_name,
|
||||||
'add', socket.gethostname()])
|
'add', socket.gethostname()])
|
||||||
self._stored.is_cluster_setup = True
|
self._stored.is_cluster_setup = True
|
||||||
|
|
||||||
@ -273,11 +277,21 @@ class CephNfsCharm(
|
|||||||
return
|
return
|
||||||
cmd = [
|
cmd = [
|
||||||
'rados', '-p', self.pool_name,
|
'rados', '-p', self.pool_name,
|
||||||
'-c', '/etc/ceph/ganesha/ceph.conf',
|
'-c', self.CEPH_CONF,
|
||||||
'--id', self.client_name,
|
'--id', self.client_name,
|
||||||
'put', 'ganesha-export-index', '/dev/null'
|
'put', 'ganesha-export-index', '/dev/null'
|
||||||
]
|
]
|
||||||
try:
|
try:
|
||||||
|
subprocess.check_call(cmd)
|
||||||
|
counter = tempfile.NamedTemporaryFile('w+')
|
||||||
|
counter.write('1000')
|
||||||
|
counter.seek(0)
|
||||||
|
cmd = [
|
||||||
|
'rados', '-p', self.pool_name,
|
||||||
|
'-c', self.CEPH_CONF,
|
||||||
|
'--id', self.client_name,
|
||||||
|
'put', 'ganesha-export-counter', counter.name
|
||||||
|
]
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
self.peers.pool_initialised()
|
self.peers.pool_initialised()
|
||||||
except subprocess.CalledProcessError:
|
except subprocess.CalledProcessError:
|
||||||
@ -291,6 +305,10 @@ class CephNfsCharm(
|
|||||||
logging.error("Failed torestart nfs-ganesha")
|
logging.error("Failed torestart nfs-ganesha")
|
||||||
event.defer()
|
event.defer()
|
||||||
|
|
||||||
|
def on_reload_nonce(self, _event):
|
||||||
|
logging.info("Reloading Ganesha after nonce triggered reload")
|
||||||
|
subprocess.call(['killall', '-HUP', 'ganesha.nfsd'])
|
||||||
|
|
||||||
|
|
||||||
@ops_openstack.core.charm_class
|
@ops_openstack.core.charm_class
|
||||||
class CephNFSCharmOcto(CephNfsCharm):
|
class CephNFSCharmOcto(CephNfsCharm):
|
||||||
|
@ -34,7 +34,7 @@ GANESHA_EXPORT_TEMPLATE = """EXPORT {{
|
|||||||
SecType = "sys";
|
SecType = "sys";
|
||||||
CLIENT {{
|
CLIENT {{
|
||||||
Access_Type = "rw";
|
Access_Type = "rw";
|
||||||
Clients = {clients}
|
Clients = {clients};
|
||||||
}}
|
}}
|
||||||
# User id squashing, one of None, Root, All
|
# User id squashing, one of None, Root, All
|
||||||
Squash = "None";
|
Squash = "None";
|
||||||
@ -64,10 +64,12 @@ class GaneshaNfs(object):
|
|||||||
secret_key=self._ceph_auth_key(),
|
secret_key=self._ceph_auth_key(),
|
||||||
clients='0.0.0.0'
|
clients='0.0.0.0'
|
||||||
)
|
)
|
||||||
logging.debug("Export template:: \n{}".format(export_template))
|
logging.debug("Export template::\n{}".format(export_template))
|
||||||
tmp_file = self._tmpfile(export_template)
|
tmp_file = self._tmpfile(export_template)
|
||||||
self.rados_put('ganesha-export-{}'.format(export_id), tmp_file.name)
|
self._rados_put('ganesha-export-{}'.format(export_id), tmp_file.name)
|
||||||
self._ganesha_add_export(self.export_path, tmp_file.name)
|
self._ganesha_add_export(self.export_path, tmp_file.name)
|
||||||
|
self._add_share_to_index(export_id)
|
||||||
|
return self.export_path
|
||||||
|
|
||||||
def _ganesha_add_export(self, export_path, tmp_path):
|
def _ganesha_add_export(self, export_path, tmp_path):
|
||||||
"""Add a configured NFS export to Ganesha"""
|
"""Add a configured NFS export to Ganesha"""
|
||||||
@ -131,7 +133,7 @@ class GaneshaNfs(object):
|
|||||||
|
|
||||||
def _ceph_command(self, *cmd):
|
def _ceph_command(self, *cmd):
|
||||||
"""Run a ceph command"""
|
"""Run a ceph command"""
|
||||||
cmd = ["ceph", "--id", self.client_name, "--conf=/etc/ceph/ganesha/ceph.conf"] + [*cmd]
|
cmd = ["ceph", "--id", self.client_name, "--conf=/etc/ceph/ceph.conf"] + [*cmd]
|
||||||
return subprocess.check_output(cmd)
|
return subprocess.check_output(cmd)
|
||||||
|
|
||||||
def _get_next_export_id(self):
|
def _get_next_export_id(self):
|
||||||
@ -140,9 +142,9 @@ class GaneshaNfs(object):
|
|||||||
:returns: The export ID
|
:returns: The export ID
|
||||||
:rtype: str
|
:rtype: str
|
||||||
"""
|
"""
|
||||||
next_id = int(self.rados_get(self.export_counter))
|
next_id = int(self._rados_get(self.export_counter))
|
||||||
file = self._tmpfile(next_id + 1)
|
file = self._tmpfile(next_id + 1)
|
||||||
self.rados_put(self.export_counter, file.name)
|
self._rados_put(self.export_counter, file.name)
|
||||||
return next_id
|
return next_id
|
||||||
|
|
||||||
def _tmpfile(self, value):
|
def _tmpfile(self, value):
|
||||||
@ -151,7 +153,7 @@ class GaneshaNfs(object):
|
|||||||
file.seek(0)
|
file.seek(0)
|
||||||
return file
|
return file
|
||||||
|
|
||||||
def rados_get(self, name):
|
def _rados_get(self, name):
|
||||||
"""Retrieve the content of the RADOS object with a given name
|
"""Retrieve the content of the RADOS object with a given name
|
||||||
|
|
||||||
:param name: Name of the RADOS object to retrieve
|
:param name: Name of the RADOS object to retrieve
|
||||||
@ -167,7 +169,7 @@ class GaneshaNfs(object):
|
|||||||
output = subprocess.check_output(cmd)
|
output = subprocess.check_output(cmd)
|
||||||
return output.decode('utf-8')
|
return output.decode('utf-8')
|
||||||
|
|
||||||
def rados_put(self, name, source):
|
def _rados_put(self, name, source):
|
||||||
"""Store the contents of the source file in a named RADOS object.
|
"""Store the contents of the source file in a named RADOS object.
|
||||||
|
|
||||||
:param name: Name of the RADOS object to retrieve
|
:param name: Name of the RADOS object to retrieve
|
||||||
@ -181,3 +183,9 @@ class GaneshaNfs(object):
|
|||||||
]
|
]
|
||||||
logging.debug("About to call: {}".format(cmd))
|
logging.debug("About to call: {}".format(cmd))
|
||||||
subprocess.check_call(cmd)
|
subprocess.check_call(cmd)
|
||||||
|
|
||||||
|
def _add_share_to_index(self, export_id):
|
||||||
|
index = self._rados_get(self.export_index)
|
||||||
|
index += '%url rados://{}/ganesha-export-{}'.format(self.ceph_pool, export_id)
|
||||||
|
tmpfile = self._tmpfile(index)
|
||||||
|
self._rados_put(self.export_index, tmpfile.name)
|
@ -3,6 +3,7 @@
|
|||||||
# import json
|
# import json
|
||||||
import logging
|
import logging
|
||||||
# import socket
|
# import socket
|
||||||
|
import uuid
|
||||||
|
|
||||||
from ops.framework import (
|
from ops.framework import (
|
||||||
StoredState,
|
StoredState,
|
||||||
@ -15,9 +16,12 @@ from ops.framework import (
|
|||||||
class PoolInitialisedEvent(EventBase):
|
class PoolInitialisedEvent(EventBase):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
class ReloadNonceEvent(EventBase):
|
||||||
|
pass
|
||||||
|
|
||||||
class CephNfsPeerEvents(ObjectEvents):
|
class CephNfsPeerEvents(ObjectEvents):
|
||||||
pool_initialised = EventSource(PoolInitialisedEvent)
|
pool_initialised = EventSource(PoolInitialisedEvent)
|
||||||
|
reload_nonce = EventSource(ReloadNonceEvent)
|
||||||
|
|
||||||
|
|
||||||
class CephNfsPeers(Object):
|
class CephNfsPeers(Object):
|
||||||
@ -30,7 +34,8 @@ class CephNfsPeers(Object):
|
|||||||
self.relation_name = relation_name
|
self.relation_name = relation_name
|
||||||
self.this_unit = self.framework.model.unit
|
self.this_unit = self.framework.model.unit
|
||||||
self._stored.set_default(
|
self._stored.set_default(
|
||||||
pool_initialised=False)
|
pool_initialised=False,
|
||||||
|
reload_nonce=None)
|
||||||
self.framework.observe(
|
self.framework.observe(
|
||||||
charm.on[relation_name].relation_changed,
|
charm.on[relation_name].relation_changed,
|
||||||
self.on_changed)
|
self.on_changed)
|
||||||
@ -40,12 +45,19 @@ class CephNfsPeers(Object):
|
|||||||
if self.pool_initialised == 'True' and not self._stored.pool_initialised:
|
if self.pool_initialised == 'True' and not self._stored.pool_initialised:
|
||||||
self.on.pool_initialised.emit()
|
self.on.pool_initialised.emit()
|
||||||
self._stored.pool_initialised = True
|
self._stored.pool_initialised = True
|
||||||
|
if self._stored.reload_nonce != self.reload_nonce():
|
||||||
|
self.on.reload_nonce.emit()
|
||||||
|
self._stored.reload_nonce = self.reload_nonce()
|
||||||
|
|
||||||
def pool_initialised(self):
|
def pool_initialised(self):
|
||||||
logging.info("Setting pool initialised")
|
logging.info("Setting pool initialised")
|
||||||
self.peer_rel.data[self.peer_rel.app]['pool_initialised'] = 'True'
|
self.peer_rel.data[self.peer_rel.app]['pool_initialised'] = 'True'
|
||||||
self.on.pool_initialised.emit()
|
self.on.pool_initialised.emit()
|
||||||
|
|
||||||
|
def trigger_reload(self):
|
||||||
|
self.peer_rel.data[self.peer_rel.app]['reload_nonce'] = uuid.uuid4()
|
||||||
|
self.on.reload_nonce.emit()
|
||||||
|
|
||||||
@property
|
@property
|
||||||
def peer_rel(self):
|
def peer_rel(self):
|
||||||
return self.framework.model.get_relation(self.relation_name)
|
return self.framework.model.get_relation(self.relation_name)
|
||||||
|
@ -68,7 +68,7 @@ MDCACHE {
|
|||||||
|
|
||||||
# To read exports from RADOS objects
|
# To read exports from RADOS objects
|
||||||
RADOS_URLS {
|
RADOS_URLS {
|
||||||
ceph_conf = "/etc/ceph/ganesha/ceph.conf";
|
ceph_conf = "/etc/ceph/ceph.conf";
|
||||||
userid = "{{ ceph_nfs.client_name }}";
|
userid = "{{ ceph_nfs.client_name }}";
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -76,7 +76,7 @@ RADOS_URLS {
|
|||||||
# To store client recovery data in the same RADOS pool
|
# To store client recovery data in the same RADOS pool
|
||||||
|
|
||||||
RADOS_KV {
|
RADOS_KV {
|
||||||
ceph_conf = "/etc/ceph/ganesha/ceph.conf";
|
ceph_conf = "/etc/ceph/ceph.conf";
|
||||||
userid = "{{ ceph_nfs.client_name }}";
|
userid = "{{ ceph_nfs.client_name }}";
|
||||||
pool = "{{ ceph_nfs.pool_name }}";
|
pool = "{{ ceph_nfs.pool_name }}";
|
||||||
nodeid = "{{ ceph_nfs.hostname }}";
|
nodeid = "{{ ceph_nfs.hostname }}";
|
||||||
|
Loading…
Reference in New Issue
Block a user