Permalink
Switch branches/tags
version_1_0_3 v2.6.3 v1_1_0 tgcc_lustre_locks sx9_production_tgcc_7_mar_2012 stable_31_jan_2012 stable_30_jan_2012 stable_25_jan_2012 stable_19_jan_2011 stable_19_dec_2011 stable_13_mar_2012 stable_13_jan_2012 stable_09_feb_2012 stable_05_jan_2011 stable_04_jan_2012 stable_03_jan_2012 stable_3_feb_2012 stable_1_5_0 stable_01_feb_2012 stable stable_sx9_28_mar_2012 stable-20111110 stable-2011-11-04 rquota_llapi_quotactl removeNFSv2 removeMFSL remove_MFSL remove_Autotools remove-USE_SHARED release_1_2_0 release_1_1_1 rebase1 pushed_fsal_refactor promoted_to_next production_tgcc_7_mar_2012 prod_sx9 pre_1_2_0 pre_1_1_0 pre_stale_08_mar_2012 pre_stable_08_mar_2012 pre-grand-indent pre-2.0-dev_52 pre-2.0-dev_51 pre-2.0-dev_50 pre-2.0-dev_49 pre-2.0-dev_48 pre-2.0-dev_47 pre-2.0-dev_46 pre-2.0-dev_45 pre-2.0-dev_44 pre-2.0-dev_43 pre-2.0-dev_42 pre-2.0-dev_41 pre-2.0-dev_40 pre-2.0-dev_39 pre-2.0-dev_38 pre-2.0-dev_37 pre-2.0-dev_36 pre-2.0-dev_35 pre-2.0-dev_34 pre-2.0-dev_33 pre-2.0-dev_32 pre-2.0-dev_31 pre-2.0-dev_30 pre-2.0-dev_29 pre-2.0-dev_28 pre-2.0-dev_27 pre-2.0-dev_26 pre-2.0-dev_25 pre-2.0-dev_24 pre-2.0-dev_23 pre-2.0-dev_22 pre-2.0-dev_21 pre-2.0-dev_20 pre-2.0-dev_19 pre-2.0-dev_18 pre-2.0-dev_17 pre-2.0-dev_16 pre-2.0-dev_15 pre-2.0-dev_14 pre-2.0-dev_13 pre-2.0-dev_12 pre-2.0-dev_11 pre-2.0-dev_10 pre-2.0-dev_9 pre-2.0-dev_8 pre-2.0-dev_7 pre-2.0-dev_6 pre-2.0-dev_5 pre-2.0-dev_4 pre-2.0-dev_3 pre-2.0-dev_2 pre-2.0-dev_1 pre-2.0-baseline pre-2.0-RC5 pre-2.0-RC4 pre-2.0-RC3 pre-2.0-RC2 pre-2.0-RC1 pre-2.0-RC0
Nothing to show
Find file Copy path
194 lines (165 sloc) 6.12 KB
#
# It is possible to use FSAL_CEPH to provide an NFS gateway to CephFS. The
# following sample config should be useful as a starting point for
# configuration. This basic configuration is suitable for a standalone NFS
# server, or an active/passive configuration managed by some sort of clustering
# software (e.g. pacemaker, docker, etc.).
#
# Note too that it is also possible to put a config file in RADOS, and give
# ganesha a rados URL from which to fetch it. For instance, if the config
# file is stored in a RADOS pool called "nfs-ganesha" with an object name of
# "ganesha-config":
#
# %url rados://nfs-ganesha/ganesha-config
#
# If we only export cephfs (or RGW), store the configs and recovery data in
# RADOS, and mandate NFSv4.1+ for access, we can avoid any sort of local
# storage, and ganesha can run as an unprivileged user (even inside a
# locked-down container).
#
NFS_CORE_PARAM
{
# Ganesha can lift the NFS grace period early if NLM is disabled.
Enable_NLM = false;
# rquotad doesn't add any value here. CephFS doesn't support per-uid
# quotas anyway.
Enable_RQUOTA = false;
# In this configuration, we're just exporting NFSv4. In practice, it's
# best to use NFSv4.1+ to get the benefit of sessions.
Protocols = 4;
}
NFSv4
{
# Modern versions of libcephfs have delegation support, though they
# are not currently recommended in clustered configurations. They are
# disabled by default but can be reenabled for singleton or
# active/passive configurations.
# Delegations = false;
# One can use any recovery backend with this configuration, but being
# able to store it in RADOS is a nice feature that makes it easy to
# migrate the daemon to another host.
#
# For a single-node or active/passive configuration, rados_ng driver
# is preferred. For active/active clustered configurations, the
# rados_cluster backend can be used instead. See the
# ganesha-rados-grace manpage for more information.
RecoveryBackend = rados_ng;
# NFSv4.0 clients do not send a RECLAIM_COMPLETE, so we end up having
# to wait out the entire grace period if there are any. Avoid them.
Minor_Versions = 1,2;
}
# The libcephfs client will aggressively cache information while it
# can, so there is little benefit to ganesha actively caching the same
# objects. Doing so can also hurt cache coherency. Here, we disable
# as much attribute and directory caching as we can.
CACHEINODE {
# Size the dirent cache down as small as possible.
Dir_Chunk = 0;
# size the inode cache as small as possible
NParts = 1;
Cache_Size = 1;
}
EXPORT
{
# Unique export ID number for this export
Export_ID=100;
# We're only interested in NFSv4 in this configuration
Protocols = 4;
# NFSv4 does not allow UDP transport
Transports = TCP;
#
# Path into the cephfs tree. For now, FSAL_CEPH doesn't support
# having more than one filesystem per running ganesha daemon.
#
# Note that FSAL_CEPH does not support subtree checking, so there is
# no way to validate that a filehandle presented by a client is
# reachable via an exported subtree.
#
# For that reason, we just export "/" here.
Path = /;
#
# The pseudoroot path. This is where the export will appear in the
# NFS pseudoroot namespace.
#
Pseudo = /ceph/;
# We want to be able to read and write
Access_Type = RW;
# Time out attribute cache entries immediately
Attr_Expiration_Time = 0;
# Enable read delegations? libcephfs v13.0.1 and later allow the
# ceph client to set a delegation. While it's possible to allow RW
# delegations it's not recommended to enable them until ganesha
# acquires CB_GETATTR support.
#
# Note too that delegations may not be safe in clustered
# configurations, so it's probably best to just disable them until
# this problem is resolved:
#
# http://tracker.ceph.com/issues/24802
#
# Delegations = R;
# NFS servers usually decide to "squash" incoming requests from the
# root user to a "nobody" user. It's possible to disable that, but for
# now, we leave it enabled.
# Squash = root;
FSAL {
# FSAL_CEPH export
Name = CEPH;
# Ceph clusters have their own authentication scheme (cephx).
# Ganesha acts as a cephfs client. This is the client username
# to use. Note that this user will need to be created before
# running ganesha. See:
#
# http://docs.ceph.com/docs/jewel/rados/operations/user-management/
#
# The default is to send a NULL here, which means that the
# userid is auto-generated by libcephfs.
#
# User_Id = "ganesha";
#
# Key to use for the session (if any). If not set, it uses the
# normal search path for cephx keyring files to find a key:
# Secret_Access_Key = "YOUR SECRET KEY HERE";
}
}
# Config block for FSAL_CEPH
CEPH
{
# Path to a ceph.conf file for this cluster.
# Ceph_Conf = /etc/ceph/ceph.conf;
# User file-creation mask. These bits will be masked off from the unix
# permissions on newly-created inodes.
# umask = 0;
}
#
# This is the config block for the RADOS RecoveryBackend. This is only
# used if you're storing the client recovery records in a RADOS object.
#
RADOS_KV
{
# Path to a ceph.conf file for this cluster.
# Ceph_Conf = /etc/ceph/ceph.conf;
# The recoverybackend has its own ceph client. The default is to
# let libcephfs autogenerate the userid. Note that RADOS_KV block does
# not have a setting for Secret_Access_Key. A cephx keyring file must
# be used for authenticated access.
# UserId = "ganesharecov";
# Pool ID of the ceph storage pool that contains the recovery objects.
# The default is "nfs-ganesha".
# pool = "nfs-ganesha";
# If using the rados_cluster backend, then consider setting a unique
# nodeid for each running daemon here, particularly if this daemon
# could end up migrating to a host with a different hostname. The
# default is to use the hostname of the node where ganesha is running.
# nodeid = hostname.example.com
}
# Config block for rados:// URL access. It too uses its own client to access
# the object, separate from the FSAL_CEPH and RADOS_KV client.
RADOS_URLS
{
# Path to a ceph.conf file for this cluster.
# Ceph_Conf = /etc/ceph/ceph.conf;
# RADOS_URLS use their own ceph client too. Authenticated access
# requires a cephx keyring file.
# UserId = "ganeshaurls";
}