Skip to content

Commit

Permalink
netfs: Provide invalidatepage and releasepage calls
Browse files Browse the repository at this point in the history
Provide default invalidatepage and releasepage calls.  These will need to
interact with invalidation correctly at some point.  They don't, however,
need to deal with the page fscache or page private marks as those are no
longer being used.

Signed-off-by: David Howells <dhowells@redhat.com>
  • Loading branch information
dhowells committed Jan 19, 2022
1 parent 14f96dd commit 7deb1d0
Show file tree
Hide file tree
Showing 6 changed files with 53 additions and 207 deletions.
45 changes: 2 additions & 43 deletions fs/9p/vfs_addr.c
Expand Up @@ -93,47 +93,6 @@ const struct netfs_request_ops v9fs_req_ops = {
.cleanup = v9fs_req_cleanup,
};

/**
* v9fs_release_page - release the private state associated with a page
* @page: The page to be released
* @gfp: The caller's allocation restrictions
*
* Returns 1 if the page can be released, false otherwise.
*/

static int v9fs_release_page(struct page *page, gfp_t gfp)
{
struct folio *folio = page_folio(page);
struct inode *inode = folio_inode(folio);

if (folio_test_private(folio))
return 0;
#ifdef CONFIG_9P_FSCACHE
if (folio_test_fscache(folio)) {
if (current_is_kswapd() || !(gfp & __GFP_FS))
return 0;
folio_wait_fscache(folio);
}
#endif
fscache_note_page_release(v9fs_inode_cookie(V9FS_I(inode)));
return 1;
}

/**
* v9fs_invalidate_page - Invalidate a page completely or partially
* @page: The page to be invalidated
* @offset: offset of the invalidated region
* @length: length of the invalidated region
*/

static void v9fs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length)
{
struct folio *folio = page_folio(page);

folio_wait_fscache(folio);
}

static void v9fs_write_to_cache_done(void *priv, ssize_t transferred_or_error,
bool was_async)
{
Expand Down Expand Up @@ -353,8 +312,8 @@ const struct address_space_operations v9fs_addr_operations = {
.writepage = v9fs_vfs_writepage,
.write_begin = v9fs_write_begin,
.write_end = v9fs_write_end,
.releasepage = v9fs_release_page,
.invalidatepage = v9fs_invalidate_page,
.releasepage = netfs_releasepage,
.invalidatepage = netfs_invalidatepage,
.launder_page = v9fs_launder_page,
.direct_IO = v9fs_direct_IO,
};
123 changes: 4 additions & 119 deletions fs/afs/file.c
Expand Up @@ -20,9 +20,6 @@

static int afs_file_mmap(struct file *file, struct vm_area_struct *vma);
static int afs_symlink_readpage(struct file *file, struct page *page);
static void afs_invalidatepage(struct page *page, unsigned int offset,
unsigned int length);
static int afs_releasepage(struct page *page, gfp_t gfp_flags);

static ssize_t afs_file_read_iter(struct kiocb *iocb, struct iov_iter *iter);
static void afs_vm_open(struct vm_area_struct *area);
Expand Down Expand Up @@ -54,8 +51,8 @@ const struct address_space_operations afs_file_aops = {
.readahead = netfs_readahead,
.set_page_dirty = afs_set_page_dirty,
.launder_page = afs_launder_page,
.releasepage = afs_releasepage,
.invalidatepage = afs_invalidatepage,
.releasepage = netfs_releasepage,
.invalidatepage = netfs_invalidatepage,
.write_begin = afs_write_begin,
.write_end = afs_write_end,
.writepage = afs_writepage,
Expand All @@ -64,8 +61,8 @@ const struct address_space_operations afs_file_aops = {

const struct address_space_operations afs_symlink_aops = {
.readpage = afs_symlink_readpage,
.releasepage = afs_releasepage,
.invalidatepage = afs_invalidatepage,
.releasepage = netfs_releasepage,
.invalidatepage = netfs_invalidatepage,
};

static const struct vm_operations_struct afs_vm_ops = {
Expand Down Expand Up @@ -398,118 +395,6 @@ int afs_write_inode(struct inode *inode, struct writeback_control *wbc)
return 0;
}

/*
* Adjust the dirty region of the page on truncation or full invalidation,
* getting rid of the markers altogether if the region is entirely invalidated.
*/
static void afs_invalidate_dirty(struct folio *folio, unsigned int offset,
unsigned int length)
{
struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
unsigned long priv;
unsigned int f, t, end = offset + length;

priv = (unsigned long)folio_get_private(folio);

/* we clean up only if the entire page is being invalidated */
if (offset == 0 && length == folio_size(folio))
goto full_invalidate;

/* If the page was dirtied by page_mkwrite(), the PTE stays writable
* and we don't get another notification to tell us to expand it
* again.
*/
if (afs_is_folio_dirty_mmapped(priv))
return;

/* We may need to shorten the dirty region */
f = afs_folio_dirty_from(folio, priv);
t = afs_folio_dirty_to(folio, priv);

if (t <= offset || f >= end)
return; /* Doesn't overlap */

if (f < offset && t > end)
return; /* Splits the dirty region - just absorb it */

if (f >= offset && t <= end)
goto undirty;

if (f < offset)
t = offset;
else
f = end;
if (f == t)
goto undirty;

priv = afs_folio_dirty(folio, f, t);
folio_change_private(folio, (void *)priv);
trace_afs_folio_dirty(vnode, tracepoint_string("trunc"), folio);
return;

undirty:
trace_afs_folio_dirty(vnode, tracepoint_string("undirty"), folio);
folio_clear_dirty_for_io(folio);
full_invalidate:
trace_afs_folio_dirty(vnode, tracepoint_string("inval"), folio);
folio_detach_private(folio);
}

/*
* invalidate part or all of a page
* - release a page and clean up its private data if offset is 0 (indicating
* the entire page)
*/
static void afs_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
struct folio *folio = page_folio(page);

_enter("{%lu},%u,%u", folio_index(folio), offset, length);

BUG_ON(!PageLocked(page));

if (PagePrivate(page))
afs_invalidate_dirty(folio, offset, length);

folio_wait_fscache(folio);
_leave("");
}

/*
* release a page and clean up its private state if it's not busy
* - return true if the page can now be released, false if not
*/
static int afs_releasepage(struct page *page, gfp_t gfp)
{
struct folio *folio = page_folio(page);
struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));

_enter("{{%llx:%llu}[%lu],%lx},%x",
vnode->fid.vid, vnode->fid.vnode, folio_index(folio), folio->flags,
gfp);

/* deny if page is being written to the cache and the caller hasn't
* elected to wait */
#ifdef CONFIG_AFS_FSCACHE
if (folio_test_fscache(folio)) {
if (current_is_kswapd() || !(gfp & __GFP_FS))
return false;
folio_wait_fscache(folio);
}
fscache_note_page_release(afs_vnode_cache(vnode));
#endif

if (folio_test_private(folio)) {
trace_afs_folio_dirty(vnode, tracepoint_string("rel"), folio);
folio_detach_private(folio);
}

/* Indicate that the folio can be released */
_leave(" = T");
return true;
}

static void afs_add_open_mmap(struct afs_vnode *vnode)
{
if (atomic_inc_return(&vnode->cb_nr_mmap) == 1) {
Expand Down
24 changes: 2 additions & 22 deletions fs/ceph/addr.c
Expand Up @@ -160,27 +160,7 @@ static void ceph_invalidatepage(struct page *page, unsigned int offset,
ceph_put_snap_context(snapc);
}

wait_on_page_fscache(page);
}

static int ceph_releasepage(struct page *page, gfp_t gfp)
{
struct inode *inode = page->mapping->host;

dout("%llx:%llx releasepage %p idx %lu (%sdirty)\n",
ceph_vinop(inode), page,
page->index, PageDirty(page) ? "" : "not ");

if (PagePrivate(page))
return 0;

if (PageFsCache(page)) {
if (current_is_kswapd() || !(gfp & __GFP_FS))
return 0;
wait_on_page_fscache(page);
}
ceph_fscache_note_page_release(inode);
return 1;
netfs_invalidatepage(page, offset, length);
}

static void ceph_netfs_expand_readahead(struct netfs_read_request *rreq)
Expand Down Expand Up @@ -1366,7 +1346,7 @@ const struct address_space_operations ceph_aops = {
.write_end = ceph_write_end,
.set_page_dirty = ceph_set_page_dirty,
.invalidatepage = ceph_invalidatepage,
.releasepage = ceph_releasepage,
.releasepage = netfs_releasepage,
.direct_IO = noop_direct_IO,
};

Expand Down
27 changes: 4 additions & 23 deletions fs/cifs/file.c
Expand Up @@ -4798,25 +4798,6 @@ static int cifs_write_begin(struct file *file, struct address_space *mapping,
return rc;
}

static int cifs_release_page(struct page *page, gfp_t gfp)
{
if (PagePrivate(page))
return 0;
if (PageFsCache(page)) {
if (current_is_kswapd() || !(gfp & __GFP_FS))
return false;
wait_on_page_fscache(page);
}
fscache_note_page_release(cifs_inode_cookie(page->mapping->host));
return true;
}

static void cifs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length)
{
wait_on_page_fscache(page);
}

static int cifs_launder_page(struct page *page)
{
int rc = 0;
Expand Down Expand Up @@ -5012,9 +4993,9 @@ const struct address_space_operations cifs_addr_ops = {
.write_begin = cifs_write_begin,
.write_end = cifs_write_end,
.set_page_dirty = cifs_set_page_dirty,
.releasepage = cifs_release_page,
.releasepage = netfs_releasepage,
.direct_IO = cifs_direct_io,
.invalidatepage = cifs_invalidate_page,
.invalidatepage = netfs_invalidatepage,
.launder_page = cifs_launder_page,
/*
* TODO: investigate and if useful we could add an cifs_migratePage
Expand All @@ -5037,7 +5018,7 @@ const struct address_space_operations cifs_addr_ops_smallbuf = {
.write_begin = cifs_write_begin,
.write_end = cifs_write_end,
.set_page_dirty = cifs_set_page_dirty,
.releasepage = cifs_release_page,
.invalidatepage = cifs_invalidate_page,
.releasepage = netfs_releasepage,
.invalidatepage = netfs_invalidatepage,
.launder_page = cifs_launder_page,
};
39 changes: 39 additions & 0 deletions fs/netfs/read_helper.c
Expand Up @@ -9,6 +9,7 @@
#include <linux/export.h>
#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/uio.h>
Expand Down Expand Up @@ -1347,3 +1348,41 @@ int netfs_write_begin(struct file *file, struct address_space *mapping,
return ret;
}
EXPORT_SYMBOL(netfs_write_begin);

/*
* Invalidate part or all of a folio
* - release a folio and clean up its private data if offset is 0 (indicating
* the entire folio)
*/
void netfs_invalidatepage(struct page *page, unsigned int offset, unsigned int length)
{
struct folio *folio = page_folio(page);

_enter("{%lu},%u,%u", folio_index(folio), offset, length);

folio_wait_fscache(folio);
}
EXPORT_SYMBOL(netfs_invalidatepage);

/*
* Release a folio and clean up its private state if it's not busy
* - return true if the folio can now be released, false if not
*/
int netfs_releasepage(struct page *page, gfp_t gfp)
{
struct folio *folio = page_folio(page);

_enter("");

if (PagePrivate(page))
return 0;
if (folio_test_fscache(folio)) {
if (current_is_kswapd() || !(gfp & __GFP_FS))
return false;
folio_wait_fscache(folio);
}

fscache_note_page_release(netfs_i_cookie(folio_inode(folio)));
return true;
}
EXPORT_SYMBOL(netfs_releasepage);
2 changes: 2 additions & 0 deletions include/linux/netfs.h
Expand Up @@ -274,6 +274,8 @@ extern int netfs_readpage(struct file *, struct page *);
extern int netfs_write_begin(struct file *, struct address_space *,
loff_t, unsigned int, unsigned int, struct folio **,
void **);
extern void netfs_invalidatepage(struct page *page, unsigned int offset, unsigned int length);
extern int netfs_releasepage(struct page *page, gfp_t gfp_flags);

extern void netfs_subreq_terminated(struct netfs_read_subrequest *, ssize_t, bool);
extern void netfs_stats_show(struct seq_file *);
Expand Down

0 comments on commit 7deb1d0

Please sign in to comment.