Skip to content

Commit

Permalink
NFS: Convert buffered read paths to use netfs when fscache is enabled
Browse files Browse the repository at this point in the history
Convert the NFS buffered read code paths to corresponding netfs APIs,
but only when fscache is configured and enabled.

The netfs API defines struct netfs_request_ops which must be filled
in by the network filesystem.  For NFS, we only need to define 5 of
the functions, the main one being the issue_read() function.
The issue_read() function is called by the netfs layer when a read
cannot be fulfilled locally, and must be sent to the server (either
the cache is not active, or it is active but the data is not available).
Once the read from the server is complete, netfs requires a call to
netfs_subreq_terminated() which conveys either how many bytes were read
successfully, or an error.  Note that issue_read() is called with a
structure, netfs_io_subrequest, which defines the IO requested, and
contains a start and a length (both in bytes), and assumes the underlying
netfs will return a either an error on the whole region, or the number
of bytes successfully read.

The NFS IO path is page based and the main APIs are the pgio APIs defined
in pagelist.c.  For the pgio APIs, there is no way for the caller to
know how many RPCs will be sent and how the pages will be broken up
into underlying RPCs, each of which will have their own completion and
return code.  In contrast, netfs is subrequest based, a single
subrequest may contain multiple pages, and a single subrequest is
initiated with issue_read() and terminated with netfs_subreq_terminated().
Thus, to utilze the netfs APIs, NFS needs some way to accommodate
the netfs API requirement on the single response to the whole
subrequest, while also minimizing disruptive changes to the NFS
pgio layer.

The approach taken with this patch is to allocate a small structure
for each nfs_netfs_issue_read() call, store the final error and number
of bytes successfully transferred in the structure, and update these values
as each RPC completes.  The refcount on the structure is used as a marker
for the last RPC completion, is incremented in nfs_netfs_read_initiate(),
and decremented inside nfs_netfs_read_completion(), when a nfs_pgio_header
contains a valid pointer to the data.  On the final put (which signals
the final outstanding RPC is complete) in nfs_netfs_read_completion(),
call netfs_subreq_terminated() with either the final error value (if
one or more READs complete with an error) or the number of bytes
successfully transferred (if all RPCs complete successfully).  Note
that when all RPCs complete successfully, the number of bytes transferred
is capped to the length of the subrequest.  Capping the transferred length
to the subrequest length prevents "Subreq overread" warnings from netfs.
This is due to the "aligned_len" in nfs_pageio_add_page(), and the
corner case where NFS requests a full page at the end of the file,
even when i_size reflects only a partial page (NFS overread).

Signed-off-by: Dave Wysochanski <dwysocha@redhat.com>
Tested-by: Daire Byrne <daire@dneg.com>
Signed-off-by: Anna Schumaker <Anna.Schumaker@Netapp.com>
  • Loading branch information
DaveWysochanskiRH authored and amschuma-ntap committed Apr 11, 2023
1 parent 88a4d7b commit 000dbe0
Show file tree
Hide file tree
Showing 8 changed files with 274 additions and 152 deletions.
222 changes: 135 additions & 87 deletions fs/nfs/fscache.c
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,9 @@
#include <linux/seq_file.h>
#include <linux/slab.h>
#include <linux/iversion.h>
#include <linux/xarray.h>
#include <linux/fscache.h>
#include <linux/netfs.h>

#include "internal.h"
#include "iostat.h"
Expand Down Expand Up @@ -235,108 +238,153 @@ void nfs_fscache_release_file(struct inode *inode, struct file *filp)
fscache_unuse_cookie(cookie, &auxdata, &i_size);
}

/*
* Fallback page reading interface.
*/
static int fscache_fallback_read_page(struct inode *inode, struct page *page)
int nfs_netfs_read_folio(struct file *file, struct folio *folio)
{
struct netfs_cache_resources cres;
struct fscache_cookie *cookie = netfs_i_cookie(&NFS_I(inode)->netfs);
struct iov_iter iter;
struct bio_vec bvec;
int ret;

memset(&cres, 0, sizeof(cres));
bvec_set_page(&bvec, page, PAGE_SIZE, 0);
iov_iter_bvec(&iter, ITER_DEST, &bvec, 1, PAGE_SIZE);

ret = fscache_begin_read_operation(&cres, cookie);
if (ret < 0)
return ret;

ret = fscache_read(&cres, page_offset(page), &iter, NETFS_READ_HOLE_FAIL,
NULL, NULL);
fscache_end_operation(&cres);
return ret;
if (!netfs_inode(folio_inode(folio))->cache)
return -ENOBUFS;

return netfs_read_folio(file, folio);
}

/*
* Fallback page writing interface.
*/
static int fscache_fallback_write_page(struct inode *inode, struct page *page,
bool no_space_allocated_yet)
int nfs_netfs_readahead(struct readahead_control *ractl)
{
struct netfs_cache_resources cres;
struct fscache_cookie *cookie = netfs_i_cookie(&NFS_I(inode)->netfs);
struct iov_iter iter;
struct bio_vec bvec;
loff_t start = page_offset(page);
size_t len = PAGE_SIZE;
int ret;

memset(&cres, 0, sizeof(cres));
bvec_set_page(&bvec, page, PAGE_SIZE, 0);
iov_iter_bvec(&iter, ITER_SOURCE, &bvec, 1, PAGE_SIZE);

ret = fscache_begin_write_operation(&cres, cookie);
if (ret < 0)
return ret;

ret = cres.ops->prepare_write(&cres, &start, &len, i_size_read(inode),
no_space_allocated_yet);
if (ret == 0)
ret = fscache_write(&cres, page_offset(page), &iter, NULL, NULL);
fscache_end_operation(&cres);
return ret;
struct inode *inode = ractl->mapping->host;

if (!netfs_inode(inode)->cache)
return -ENOBUFS;

netfs_readahead(ractl);
return 0;
}

/*
* Retrieve a page from fscache
*/
int __nfs_fscache_read_page(struct inode *inode, struct page *page)
atomic_t nfs_netfs_debug_id;
static int nfs_netfs_init_request(struct netfs_io_request *rreq, struct file *file)
{
int ret;
rreq->netfs_priv = get_nfs_open_context(nfs_file_open_context(file));
rreq->debug_id = atomic_inc_return(&nfs_netfs_debug_id);

trace_nfs_fscache_read_page(inode, page);
if (PageChecked(page)) {
ClearPageChecked(page);
ret = 1;
goto out;
}
return 0;
}

ret = fscache_fallback_read_page(inode, page);
if (ret < 0) {
nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL);
SetPageChecked(page);
goto out;
}
static void nfs_netfs_free_request(struct netfs_io_request *rreq)
{
put_nfs_open_context(rreq->netfs_priv);
}

/* Read completed synchronously */
nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK);
SetPageUptodate(page);
ret = 0;
out:
trace_nfs_fscache_read_page_exit(inode, page, ret);
return ret;
static inline int nfs_netfs_begin_cache_operation(struct netfs_io_request *rreq)
{
return fscache_begin_read_operation(&rreq->cache_resources,
netfs_i_cookie(netfs_inode(rreq->inode)));
}

/*
* Store a newly fetched page in fscache. We can be certain there's no page
* stored in the cache as yet otherwise we would've read it from there.
*/
void __nfs_fscache_write_page(struct inode *inode, struct page *page)
static struct nfs_netfs_io_data *nfs_netfs_alloc(struct netfs_io_subrequest *sreq)
{
int ret;
struct nfs_netfs_io_data *netfs;

netfs = kzalloc(sizeof(*netfs), GFP_KERNEL_ACCOUNT);
if (!netfs)
return NULL;
netfs->sreq = sreq;
refcount_set(&netfs->refcount, 1);
return netfs;
}

trace_nfs_fscache_write_page(inode, page);
static bool nfs_netfs_clamp_length(struct netfs_io_subrequest *sreq)
{
size_t rsize = NFS_SB(sreq->rreq->inode->i_sb)->rsize;

ret = fscache_fallback_write_page(inode, page, true);
sreq->len = min(sreq->len, rsize);
return true;
}

if (ret != 0) {
nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL);
nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_UNCACHED);
} else {
nfs_inc_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_WRITTEN_OK);
static void nfs_netfs_issue_read(struct netfs_io_subrequest *sreq)
{
struct nfs_netfs_io_data *netfs;
struct nfs_pageio_descriptor pgio;
struct inode *inode = sreq->rreq->inode;
struct nfs_open_context *ctx = sreq->rreq->netfs_priv;
struct page *page;
int err;
pgoff_t start = (sreq->start + sreq->transferred) >> PAGE_SHIFT;
pgoff_t last = ((sreq->start + sreq->len -
sreq->transferred - 1) >> PAGE_SHIFT);
XA_STATE(xas, &sreq->rreq->mapping->i_pages, start);

nfs_pageio_init_read(&pgio, inode, false,
&nfs_async_read_completion_ops);

netfs = nfs_netfs_alloc(sreq);
if (!netfs)
return netfs_subreq_terminated(sreq, -ENOMEM, false);

pgio.pg_netfs = netfs; /* used in completion */

xas_lock(&xas);
xas_for_each(&xas, page, last) {
/* nfs_read_add_folio() may schedule() due to pNFS layout and other RPCs */
xas_pause(&xas);
xas_unlock(&xas);
err = nfs_read_add_folio(&pgio, ctx, page_folio(page));
if (err < 0) {
netfs->error = err;
goto out;
}
xas_lock(&xas);
}
trace_nfs_fscache_write_page_exit(inode, page, ret);
xas_unlock(&xas);
out:
nfs_pageio_complete_read(&pgio);
nfs_netfs_put(netfs);
}

void nfs_netfs_initiate_read(struct nfs_pgio_header *hdr)
{
struct nfs_netfs_io_data *netfs = hdr->netfs;

if (!netfs)
return;

nfs_netfs_get(netfs);
}

int nfs_netfs_folio_unlock(struct folio *folio)
{
struct inode *inode = folio_file_mapping(folio)->host;

/*
* If fscache is enabled, netfs will unlock pages.
*/
if (netfs_inode(inode)->cache)
return 0;

return 1;
}

void nfs_netfs_read_completion(struct nfs_pgio_header *hdr)
{
struct nfs_netfs_io_data *netfs = hdr->netfs;
struct netfs_io_subrequest *sreq;

if (!netfs)
return;

sreq = netfs->sreq;
if (test_bit(NFS_IOHDR_EOF, &hdr->flags))
__set_bit(NETFS_SREQ_CLEAR_TAIL, &sreq->flags);

if (hdr->error)
netfs->error = hdr->error;
else
atomic64_add(hdr->res.count, &netfs->transferred);

nfs_netfs_put(netfs);
hdr->netfs = NULL;
}

const struct netfs_request_ops nfs_netfs_ops = {
.init_request = nfs_netfs_init_request,
.free_request = nfs_netfs_free_request,
.begin_cache_operation = nfs_netfs_begin_cache_operation,
.issue_read = nfs_netfs_issue_read,
.clamp_length = nfs_netfs_clamp_length
};

0 comments on commit 000dbe0

Please sign in to comment.