charm-ceph-nfs/templates/ganesha.conf
2022-01-05 16:07:36 -06:00

95 lines
3.4 KiB
Plaintext

# The following is copied from the Ganesha source examples:
# https://github.com/nfs-ganesha/nfs-ganesha/blob/576e3bafccb6da5c7ea18d7099013f7494ce8d2c/src/config_samples/ceph.conf
#
# It is possible to use FSAL_CEPH to provide an NFS gateway to CephFS. The
# following sample config should be useful as a starting point for
# configuration. This basic configuration is suitable for a standalone NFS
# server, or an active/passive configuration managed by some sort of clustering
# software (e.g. pacemaker, docker, etc.).
#
# Note too that it is also possible to put a config file in RADOS, and give
# ganesha a rados URL from which to fetch it. For instance, if the config
# file is stored in a RADOS pool called "nfs-ganesha", in a namespace called
# "ganesha-namespace" with an object name of "ganesha-config":
#
# %url rados://nfs-ganesha/ganesha-namespace/ganesha-config
#
# If we only export cephfs (or RGW), store the configs and recovery data in
# RADOS, and mandate NFSv4.1+ for access, we can avoid any sort of local
# storage, and ganesha can run as an unprivileged user (even inside a
# locked-down container).
#
NFS_CORE_PARAM
{
# Ganesha can lift the NFS grace period early if NLM is disabled.
Enable_NLM = false;
# rquotad doesn't add any value here. CephFS doesn't support per-uid
# quotas anyway.
Enable_RQUOTA = false;
# In this configuration, we're just exporting NFSv4. In practice, it's
# best to use NFSv4.1+ to get the benefit of sessions.
Protocols = 4;
}
NFSv4
{
# Modern versions of libcephfs have delegation support, though they
# are not currently recommended in clustered configurations. They are
# disabled by default but can be reenabled for singleton or
# active/passive configurations.
# Delegations = false;
# One can use any recovery backend with this configuration, but being
# able to store it in RADOS is a nice feature that makes it easy to
# migrate the daemon to another host.
#
# For a single-node or active/passive configuration, rados_ng driver
# is preferred. For active/active clustered configurations, the
# rados_cluster backend can be used instead. See the
# ganesha-rados-grace manpage for more information.
RecoveryBackend = rados_cluster;
# NFSv4.0 clients do not send a RECLAIM_COMPLETE, so we end up having
# to wait out the entire grace period if there are any. Avoid them.
Minor_Versions = 1,2;
}
# The libcephfs client will aggressively cache information while it
# can, so there is little benefit to ganesha actively caching the same
# objects. Doing so can also hurt cache coherency. Here, we disable
# as much attribute and directory caching as we can.
MDCACHE {
# Size the dirent cache down as small as possible.
Dir_Chunk = 0;
}
# To read exports from RADOS objects
RADOS_URLS {
ceph_conf = "/etc/ceph/ceph.conf";
userid = "{{ ceph_nfs.client_name }}";
}
%url rados://{{ ceph_nfs.pool_name }}/ganesha-export-index
# To store client recovery data in the same RADOS pool
RADOS_KV {
ceph_conf = "/etc/ceph/ceph.conf";
userid = "{{ ceph_nfs.client_name }}";
pool = "{{ ceph_nfs.pool_name }}";
nodeid = "{{ ceph_nfs.hostname }}";
}
# Config block for FSAL_CEPH
CEPH
{
# Path to a ceph.conf file for this ceph cluster.
# Ceph_Conf = /etc/ceph/ceph.conf;
# User file-creation mask. These bits will be masked off from the unix
# permissions on newly-created inodes.
# umask = 0;
}