Skip to content
Permalink
Browse files
netfs: Allow buffered shared-writeable mmap through netfs_page_mkwrite()
Provide an entry point to delegate a filesystem's ->page_mkwrite() to.
This checks for conflicting writes, then creates, continues or merges a
dirty region in the maple tree to represent a whole folio change.

Signed-off-by: David Howells <dhowells@redhat.com>
  • Loading branch information
dhowells committed Feb 16, 2022
1 parent fa57b80 commit eefdd180445e96088f4a69f0e733643e9a794b1f
Show file tree
Hide file tree
Showing 3 changed files with 71 additions and 0 deletions.
@@ -716,3 +716,72 @@ ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from)
return ret;
}
EXPORT_SYMBOL(netfs_file_write_iter);

/*
* Notification that a previously read-only page is about to become writable.
* Note that the caller indicates a single page of a multipage folio.
*/
vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf)
{
struct netfs_dirty_region *spare_region;
struct folio *folio = page_folio(vmf->page);
struct file *file = vmf->vma->vm_file;
struct inode *inode = file_inode(file);
struct netfs_i_context *ctx = netfs_i_context(inode);
vm_fault_t ret = VM_FAULT_RETRY;
int err;

MA_STATE(mas, &ctx->dirty_regions, vmf->page->index, PAGE_SIZE);

_enter("%lx", folio->index);

if (ctx->ops->validate_for_write(inode, file) < 0)
return VM_FAULT_SIGBUS;

sb_start_pagefault(inode->i_sb);

if (folio_wait_writeback_killable(folio))
goto out;

if (folio_lock_killable(folio) < 0)
goto out;

if (mas_expected_entries(&mas, 2) < 0) {
ret = VM_FAULT_OOM;
goto out;
}

spare_region = netfs_alloc_dirty_region();
if (IS_ERR(spare_region)) {
ret = VM_FAULT_OOM;
goto out;
}

err = netfs_flush_conflicting_writes(ctx, file, folio_pos(folio),
folio_size(folio), folio);
switch (err) {
case 0:
break;
case -EAGAIN:
ret = VM_FAULT_RETRY;
goto out;
case -ENOMEM:
ret = VM_FAULT_OOM;
goto out;
default:
ret = VM_FAULT_SIGBUS;
goto out;
}

netfs_commit_folio(ctx, file, &spare_region, &mas,
folio, 0, folio_size(folio));
netfs_commit_region(ctx, &mas, folio_pos(folio), folio_size(folio));
file_update_time(file);

ret = VM_FAULT_LOCKED;
out:
sb_end_pagefault(inode->i_sb);
mas_destroy(&mas);
netfs_put_dirty_region(ctx, spare_region, netfs_region_trace_put_discard);
return ret;
}
@@ -331,6 +331,7 @@ extern int netfs_write_begin(struct file *, struct address_space *,
loff_t, unsigned int, unsigned int, struct folio **,
void **);
extern ssize_t netfs_file_write_iter(struct kiocb *iocb, struct iov_iter *from);
extern vm_fault_t netfs_page_mkwrite(struct vm_fault *vmf);
extern void netfs_invalidatepage(struct page *page, unsigned int offset, unsigned int length);
extern int netfs_releasepage(struct page *page, gfp_t gfp_flags);

@@ -74,6 +74,7 @@
E_(netfs_dirty_trace_wait_active, "WAIT ACTV ")

#define netfs_region_traces \
EM(netfs_region_trace_put_discard, "PUT DISCARD") \
EM(netfs_region_trace_put_merged, "PUT MERGED ") \
EM(netfs_region_trace_free, "FREE ") \
E_(netfs_region_trace_new, "NEW ")

0 comments on commit eefdd18

Please sign in to comment.