Skip to content

Commit

Permalink
netfs: Implement truncation
Browse files Browse the repository at this point in the history
Implement truncation of the pagecache, calling out to the netfs to do the
actual truncation if need be.  We can skip talking to the server if the
permissions and ownership aren't changing, and the new EOF is above the EOF
marker on the server - ie. we're only truncating locally buffered writes.

However, we have to be careful: if content encryption is in play, we may
have to rewrite the block that contains the new EOF marker.  This may be
true if we increase the size of the file as well as decreasing it.

Signed-off-by: David Howells <dhowells@redhat.com>
  • Loading branch information
dhowells committed Jun 30, 2022
1 parent cff1881 commit e0aed6d
Show file tree
Hide file tree
Showing 10 changed files with 602 additions and 30 deletions.
45 changes: 19 additions & 26 deletions fs/afs/inode.c
Expand Up @@ -24,6 +24,7 @@
#include <linux/iversion.h>
#include "internal.h"
#include "afs_fs.h"
#include <trace/events/netfs.h>

static const struct inode_operations afs_symlink_inode_operations = {
.get_link = page_get_link,
Expand Down Expand Up @@ -839,15 +840,15 @@ void afs_evict_inode(struct inode *inode)

static void afs_setattr_success(struct afs_operation *op)
{
struct netfs_io_request *treq = op->setattr.treq;
struct afs_vnode_param *vp = &op->file[0];
struct inode *inode = &vp->vnode->netfs.inode;
loff_t old_i_size = i_size_read(inode);

op->setattr.old_i_size = old_i_size;
afs_vnode_commit_status(op, vp);
/* inode->i_size has now been changed. */

if (op->setattr.attr->ia_valid & ATTR_SIZE) {
loff_t old_i_size = treq->i_size;
loff_t size = op->setattr.attr->ia_size;
if (size > old_i_size)
pagecache_isize_extended(inode, old_i_size, size);
Expand All @@ -856,26 +857,23 @@ static void afs_setattr_success(struct afs_operation *op)

static void afs_setattr_edit_file(struct afs_operation *op)
{
struct afs_vnode_param *vp = &op->file[0];
struct afs_vnode *vnode = vp->vnode;
struct netfs_io_request *treq = op->setattr.treq;

if (op->setattr.attr->ia_valid & ATTR_SIZE) {
loff_t size = op->setattr.attr->ia_size;
loff_t i_size = op->setattr.old_i_size;
if (op->setattr.attr->ia_valid & ATTR_SIZE)
netfs_truncate(treq);
}

if (size != i_size) {
truncate_pagecache(&vnode->netfs.inode, size);
netfs_resize_file(&vnode->netfs, size);
fscache_resize_cookie(afs_vnode_cache(vnode), size);
}
}
static void afs_setattr_put(struct afs_operation *op)
{
netfs_put_request(op->setattr.treq, false, netfs_rreq_trace_put_discard);
}

static const struct afs_operation_ops afs_setattr_operation = {
.issue_afs_rpc = afs_fs_setattr,
.issue_yfs_rpc = yfs_fs_setattr,
.success = afs_setattr_success,
.edit_dir = afs_setattr_edit_file,
.put = afs_setattr_put,
};

/*
Expand All @@ -887,6 +885,7 @@ int afs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
const unsigned int supported =
ATTR_SIZE | ATTR_MODE | ATTR_UID | ATTR_GID |
ATTR_MTIME | ATTR_MTIME_SET | ATTR_TIMES_SET | ATTR_TOUCH;
struct netfs_io_request *treq = NULL;
struct afs_operation *op;
struct afs_vnode *vnode = AFS_FS_I(d_inode(dentry));
struct inode *inode = &vnode->netfs.inode;
Expand All @@ -902,21 +901,12 @@ int afs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
return 0;
}

i_size = i_size_read(inode);
if (attr->ia_valid & ATTR_SIZE) {
if (!S_ISREG(inode->i_mode))
return -EISDIR;

ret = inode_newsize_ok(inode, attr->ia_size);
if (ret)
return ret;

if (attr->ia_size == i_size)
attr->ia_valid &= ~ATTR_SIZE;
}

fscache_use_cookie(afs_vnode_cache(vnode), true);

treq = netfs_prepare_to_truncate(dentry, attr);
if (IS_ERR(treq))
return PTR_ERR(treq);

/* Prevent any new writebacks from starting whilst we do this. */
down_write(&vnode->validate_lock);

Expand Down Expand Up @@ -955,6 +945,7 @@ int afs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,

afs_op_set_vnode(op, 0, vnode);
op->setattr.attr = attr;
op->setattr.treq = treq;

if (attr->ia_valid & ATTR_SIZE) {
op->file[0].dv_delta = 1;
Expand All @@ -965,10 +956,12 @@ int afs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
op->file[0].modification = true;

op->ops = &afs_setattr_operation;
treq = NULL;
ret = afs_do_sync_operation(op);

out_unlock:
up_write(&vnode->validate_lock);
netfs_put_request(treq, false, netfs_rreq_trace_put_discard);
fscache_unuse_cookie(afs_vnode_cache(vnode), NULL, NULL);
_leave(" = %d", ret);
return ret;
Expand Down
2 changes: 1 addition & 1 deletion fs/afs/internal.h
Expand Up @@ -835,7 +835,7 @@ struct afs_operation {
} store;
struct {
struct iattr *attr;
loff_t old_i_size;
struct netfs_io_request *treq;
} setattr;
struct afs_acl *acl;
struct yfs_acl *yacl;
Expand Down
3 changes: 2 additions & 1 deletion fs/netfs/Makefile
Expand Up @@ -11,7 +11,8 @@ netfs-y := \
main.o \
misc.o \
objects.o \
output.o
output.o \
truncate.o

netfs-$(CONFIG_NETFS_STATS) += stats.o

Expand Down
117 changes: 117 additions & 0 deletions fs/netfs/buffered_flush.c
Expand Up @@ -108,7 +108,124 @@ static void netfs_clean_dirty_range(struct netfs_io_request *wreq)
*/
static void netfs_redirty_range(struct netfs_io_request *wreq)
{
struct netfs_io_subrequest *subreq;
struct netfs_io_chain *chain;
struct netfs_dirty_region *d, *d2, *w, *tmp;
struct netfs_inode *ctx = netfs_inode(wreq->inode);
unsigned long long from, to;
unsigned int c;
bool upload_failed = false;
LIST_HEAD(discards);

trace_netfs_rreq(wreq, netfs_rreq_trace_redirty);

if (list_empty(&wreq->regions))
return netfs_clean_dirty_range(wreq);

/* Ask the filesystem how it wants to handle things if an upload
* failed. It has two choices: redirty everything or leave everything
* clean.
*/
for (c = 0; c < wreq->nr_chains; c++)
if (chain->source != NETFS_WRITE_TO_CACHE && chain->error)
upload_failed = true;

if (upload_failed &&
wreq->netfs_ops->redirty_on_failure &&
!wreq->netfs_ops->redirty_on_failure(wreq))
return netfs_clean_dirty_range(wreq);

/* First of all, we step through the list of regions that were to be
* written back and see if we can discard/shorten anything that got
* partially stored.
*
* Don't retry write failures to the cache. If the cache got a fatal
* error, it will have gone offline and retrying is pointless; if it
* ran out of space, it probably won't be able to supply us with space
* on the second attempt.
*/
list_for_each_entry_safe(w, tmp, &wreq->regions, dirty_link) {
if (w->type == NETFS_COPY_TO_CACHE) {
list_del_init(&w->dirty_link);
netfs_put_dirty_region(ctx, w, netfs_region_trace_put_clear);
}
}

w = list_first_entry_or_null(&wreq->regions,
struct netfs_dirty_region, dirty_link);
if (!w)
return;

/* Step through the the uncompleted regions and reintegrate them into
* the dirty list.
*/
spin_lock(&ctx->dirty_lock);

d = list_first_entry_or_null(&ctx->dirty_regions,
struct netfs_dirty_region, dirty_link);
if (!d) {
list_splice_tail_init(&wreq->regions, &ctx->dirty_regions);
goto out;
}

while (d && w) {
/* Dirty region before writeback region and not touching. */
if (d->last < w->first && d->last != w->first - 1) {
d = netfs_next_region(ctx, d);
if (!d)
goto splice;
continue;
}

/* Dirty region overlaps with writeback region. */
if (d->first <= w->last) {
if (d->last == w->first - 1 &&
!netfs_are_regions_mergeable(ctx, d, d2)) {
d = netfs_next_region(ctx, d);
if (!d)
goto splice;
continue;
}

d->first = min(d->first, w->first);
d->last = max(d->last, w->last);
d->from = min(d->from, w->from);
d->to = max(d->to, w->to);
trace_netfs_dirty(ctx, d, w, netfs_dirty_trace_redirty_merge);

d2 = netfs_next_region(ctx, d);
if (d2 && d->last >= d2->first - 1 &&
netfs_are_regions_mergeable(ctx, d, d2)) {
d->last = d2->last;
d->to = d2->to;
list_move(&d2->dirty_link, &discards);
trace_netfs_dirty(ctx, d, d2, netfs_dirty_trace_bridged);
}

w = netfs_rreq_next_region(wreq, w);
continue;
}

/* Dirty region after writeback region and touching. */
if (d->first == w->last - 1 &&
!netfs_are_regions_mergeable(ctx, d, d2)) {
d->first = min(d->first, w->first);
d->from = min(d->from, w->from);
trace_netfs_dirty(ctx, d, w, netfs_dirty_trace_redirty_merge);
w = netfs_rreq_next_region(wreq, w);
continue;
}

if (p->first > r->first) {
if (p->last + 1 < r->first) {

}
} else {
}
}

spin_unlock(&ctx->dirty_lock);

BUG();
}

Expand Down
11 changes: 9 additions & 2 deletions fs/netfs/internal.h
Expand Up @@ -148,8 +148,6 @@ struct netfs_io_request *netfs_alloc_request(struct address_space *mapping,
enum netfs_io_origin origin);
void netfs_get_request(struct netfs_io_request *rreq, enum netfs_rreq_ref_trace what);
void netfs_clear_subrequests(struct netfs_io_request *rreq, bool was_async);
void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
enum netfs_rreq_ref_trace what);
struct netfs_io_subrequest *netfs_alloc_subrequest(struct netfs_io_request *rreq);
struct netfs_dirty_region *netfs_alloc_dirty_region(gfp_t gfp);
struct netfs_dirty_region *netfs_get_dirty_region(struct netfs_inode *ctx,
Expand Down Expand Up @@ -263,6 +261,15 @@ static inline struct netfs_dirty_region *netfs_next_region(struct netfs_inode *c
return list_next_entry(region, dirty_link);
}

static inline
struct netfs_dirty_region *netfs_rreq_next_region(struct netfs_io_request *rreq,
struct netfs_dirty_region *region)
{
if (list_is_last(&region->dirty_link, &rreq->regions))
return NULL;
return list_next_entry(region, dirty_link);
}

/*****************************************************************************/
/*
* debug tracing
Expand Down
1 change: 1 addition & 0 deletions fs/netfs/main.c
Expand Up @@ -36,6 +36,7 @@ static const char *netfs_origins[nr__netfs_io_origin] = {
[NETFS_WRITEBACK] = "WB",
[NETFS_DIO_READ] = "DR",
[NETFS_DIO_WRITE] = "DW",
[NETFS_TRUNCATE] = "TR",
};

/*
Expand Down
1 change: 1 addition & 0 deletions fs/netfs/objects.c
Expand Up @@ -162,6 +162,7 @@ void netfs_put_request(struct netfs_io_request *rreq, bool was_async,
}
}
}
EXPORT_SYMBOL(netfs_put_request);

/*
* Allocate and partially initialise an I/O request structure.
Expand Down

0 comments on commit e0aed6d

Please sign in to comment.