Skip to content
Permalink
Browse files
netfs: Perform content encryption
When dealing with an encrypted file, we gather together sufficient pages
from the pagecache to constitute a logical crypto block, allocate a bounce
buffer and then ask the filesystem to encrypt between the buffers.  The
bounce buffer is then passed to the filesystem to upload.

The network filesystem must set a flag to indicate what service is desired
and when the logical blocksize will be.

The netfs library iterates through each block to be processed, providing a
pair of scatterlists to describe the start and end buffers.

Note that it should be possible in future to encrypt DIO writes also by
this same mechanism.

A mock-up block-encryption function for afs is included for illustration.

Signed-off-by: David Howells <dhowells@redhat.com>
  • Loading branch information
dhowells committed Feb 14, 2022
1 parent b3280ef commit 8573fb679d751564f10601acf56465d777132b03
Show file tree
Hide file tree
Showing 11 changed files with 238 additions and 2 deletions.
@@ -421,6 +421,7 @@ const struct netfs_request_ops afs_req_ops = {
.validate_for_write = afs_validate_for_write,
.init_writeback = afs_init_writeback,
.create_write_requests = afs_create_write_requests,
.encrypt_block = afs_encrypt_block,
};

int afs_write_inode(struct inode *inode, struct writeback_control *wbc)
@@ -59,12 +59,18 @@ static noinline void dump_vnode(struct afs_vnode *vnode, struct afs_vnode *paren
static void afs_set_netfs_context(struct afs_vnode *vnode)
{
struct netfs_i_context *ctx = netfs_i_context(&vnode->vfs_inode);
struct afs_super_info *as = AFS_FS_S(vnode->vfs_inode.i_sb);

netfs_i_context_init(&vnode->vfs_inode, &afs_req_ops);
ctx->rsize = 4*1024*1024;
ctx->wsize = 16*1024*1024; // 0x33333;
//ctx->min_bshift = ilog2(0x10000);
//ctx->obj_bshift = ilog2(0x40000);
if (vnode->status.type == AFS_FTYPE_FILE && as->fscrypt) {
ctx->crypto_bshift = ilog2(4096);
ctx->min_bshift = ctx->crypto_bshift;
__set_bit(NETFS_ICTX_ENCRYPTED, &ctx->flags);
}
}

/*
@@ -50,6 +50,7 @@ struct afs_fs_context {
bool autocell; /* T if set auto mount operation */
bool dyn_root; /* T if dynamic root */
bool no_cell; /* T if the source is "none" (for dynroot) */
bool fscrypt; /* T if content encryption is engaged */
enum afs_flock_mode flock_mode; /* Partial file-locking emulation mode */
afs_voltype_t type; /* type of volume requested */
unsigned int volnamesz; /* size of volume name */
@@ -229,6 +230,7 @@ struct afs_super_info {
struct afs_volume *volume; /* volume record */
enum afs_flock_mode flock_mode:8; /* File locking emulation mode */
bool dyn_root; /* True if dynamic root */
bool fscrypt; /* T if content encryption is engaged */
};

static inline struct afs_super_info *AFS_FS_S(struct super_block *sb)
@@ -1539,6 +1541,9 @@ extern vm_fault_t afs_page_mkwrite(struct vm_fault *vmf);
extern void afs_prune_wb_keys(struct afs_vnode *);
extern int afs_launder_page(struct page *);
extern void afs_create_write_requests(struct netfs_writeback *);
extern bool afs_encrypt_block(struct netfs_writeback *, loff_t, size_t,
struct scatterlist *, unsigned int,
struct scatterlist *, unsigned int);

/*
* xattr.c
@@ -71,6 +71,7 @@ enum afs_param {
Opt_autocell,
Opt_dyn,
Opt_flock,
Opt_fscrypt,
Opt_source,
};

@@ -86,6 +87,7 @@ static const struct fs_parameter_spec afs_fs_parameters[] = {
fsparam_flag ("autocell", Opt_autocell),
fsparam_flag ("dyn", Opt_dyn),
fsparam_enum ("flock", Opt_flock, afs_param_flock),
fsparam_flag ("fscrypt", Opt_fscrypt),
fsparam_string("source", Opt_source),
{}
};
@@ -342,6 +344,10 @@ static int afs_parse_param(struct fs_context *fc, struct fs_parameter *param)
ctx->flock_mode = result.uint_32;
break;

case Opt_fscrypt:
ctx->fscrypt = true;
break;

default:
return -EINVAL;
}
@@ -516,6 +522,7 @@ static struct afs_super_info *afs_alloc_sbi(struct fs_context *fc)
as->cell = afs_use_cell(ctx->cell, afs_cell_trace_use_sbi);
as->volume = afs_get_volume(ctx->volume,
afs_volume_trace_get_alloc_sbi);
as->fscrypt = ctx->fscrypt;
}
}
return as;
@@ -12,6 +12,7 @@
#include <linux/writeback.h>
#include <linux/pagevec.h>
#include <linux/netfs.h>
#include <crypto/skcipher.h>
#include <trace/events/netfs.h>
#include "internal.h"

@@ -291,6 +292,53 @@ void afs_create_write_requests(struct netfs_writeback *wback)
}
}

/*
* Encrypt part of a write for fscrypt.
*/
bool afs_encrypt_block(struct netfs_writeback *wback, loff_t pos, size_t len,
struct scatterlist *source_sg, unsigned int n_source,
struct scatterlist *dest_sg, unsigned int n_dest)
{
struct crypto_sync_skcipher *ci;
struct crypto_skcipher *tfm;
struct skcipher_request *req;
u8 session_key[8], iv[8];
int ret;

ci = crypto_alloc_sync_skcipher("pcbc(fcrypt)", 0, 0);
if (IS_ERR(ci)) {
ret = PTR_ERR(ci);
goto error;
}
tfm = &ci->base;

memset(session_key, 0, sizeof(session_key));
memset(iv, 0, sizeof(iv));

ret = crypto_sync_skcipher_setkey(ci, session_key, sizeof(session_key));
if (ret < 0)
goto error_ci;

ret = -ENOMEM;
req = skcipher_request_alloc(tfm, GFP_NOFS);
if (!req)
goto error_ci;

memset(iv, 0, sizeof(iv));
skcipher_request_set_sync_tfm(req, ci);
skcipher_request_set_callback(req, 0, NULL, NULL);
skcipher_request_set_crypt(req, source_sg, dest_sg, len, iv);
ret = crypto_skcipher_encrypt(req);

skcipher_request_free(req);
error_ci:
crypto_free_sync_skcipher(ci);
error:
if (ret < 0)
wback->error = ret;
return ret == 0;
}

/*
* Extend the region to be written back to include subsequent contiguously
* dirty pages if possible, but don't sleep while doing so.
@@ -1,6 +1,7 @@
# SPDX-License-Identifier: GPL-2.0

netfs-y := \
crypto.o \
direct.o \
flush.o \
main.o \
@@ -0,0 +1,150 @@
// SPDX-License-Identifier: GPL-2.0-only
/* Network filesystem content encryption support.
*
* Copyright (C) 2022 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*/

#include <linux/fs.h>
#include <linux/mm.h>
#include <linux/pagemap.h>
#include <linux/slab.h>
#include <linux/scatterlist.h>
#include "internal.h"

/*
* Allocate a bunch of pages and add them into the xarray buffer starting at
* the given index.
*/
static int netfs_alloc_buffer(struct xarray *xa, pgoff_t index, unsigned int nr_pages)
{
struct page *page;
unsigned int n;
int ret;
LIST_HEAD(list);

n = alloc_pages_bulk_list(GFP_NOIO, nr_pages, &list);
if (n < nr_pages) {
ret = -ENOMEM;
}

while ((page = list_first_entry_or_null(&list, struct page, lru))) {
list_del(&page->lru);
page->index = index;
ret = xa_insert(xa, index++, page, GFP_NOIO);
if (ret < 0)
break;
}

while ((page = list_first_entry_or_null(&list, struct page, lru))) {
list_del(&page->lru);
__free_page(page);
}
return ret;
}

/*
* Populate a scatterlist from folios in an xarray.
*/
static int netfs_xarray_to_sglist(struct xarray *xa, loff_t pos, size_t len,
struct scatterlist *sg, unsigned int n_sg)
{
struct scatterlist *p = sg;
struct folio *folio = NULL;
size_t seg, offset, skip = 0;
loff_t start = pos;
pgoff_t index = start >> PAGE_SHIFT;
int j;

XA_STATE(xas, xa, index);

sg_init_table(sg, n_sg);

rcu_read_lock();

xas_for_each(&xas, folio, ULONG_MAX) {
if (xas_retry(&xas, folio))
continue;
if (WARN_ON(xa_is_value(folio)) || WARN_ON(folio_test_hugetlb(folio)))
break;
for (j = (folio_index(folio) < index) ? index - folio_index(folio) : 0;
j < folio_nr_pages(folio); j++
) {
struct page *subpage = folio_file_page(folio, j);

offset = (pos + skip) & ~PAGE_MASK;
seg = min(len, PAGE_SIZE - offset);

sg_set_page(p++, subpage, seg, offset);

len -= seg;
skip += seg;
if (len == 0)
break;
}
if (len == 0)
break;
}

rcu_read_unlock();
if (len > 0) {
kdebug("*** Insufficient source (%zx)", len);
//WARN_ON(len > 0);
return -EIO;
}

sg_mark_end(p - 1);
return p - sg;
}

/*
* Prepare a write request for writing. We encrypt from wback->buffer to
* wback->buffer2.
*/
bool netfs_wback_encrypt(struct netfs_writeback *wback)
{
struct netfs_i_context *ctx = netfs_i_context(wback->inode);
struct scatterlist source_sg[16], dest_sg[16];
unsigned int n_source, n_dest;
size_t n, chunk, bsize = 1UL << ctx->crypto_bshift;
loff_t pos;
int ret;

_enter("");

ret = netfs_alloc_buffer(&wback->buffer2, wback->first,
wback->last - wback->first + 1);
if (ret < 0)
goto error;

pos = wback->first * PAGE_SIZE;
n = (wback->last - wback->first + 1) * PAGE_SIZE;
_debug("ENCRYPT %llx-%llx", pos, pos + n - 1);

for (; n > 0; n -= chunk, pos += chunk) {
chunk = min(n, bsize);
ret = netfs_xarray_to_sglist(&wback->buffer, pos, chunk,
source_sg, ARRAY_SIZE(source_sg));
if (ret < 0)
goto error;
n_source = ret;

ret = netfs_xarray_to_sglist(&wback->buffer2, pos, chunk,
dest_sg, ARRAY_SIZE(dest_sg));
if (ret < 0)
goto error;
n_dest = ret;

ret = ctx->ops->encrypt_block(wback, pos, chunk,
source_sg, n_source, dest_sg, n_dest);
if (ret < 0)
goto error;
}

__set_bit(NETFS_WBACK_BUFFERED, &wback->flags);
return true;

error:
wback->error = ret;
return false;
}
@@ -35,6 +35,12 @@ static inline bool netfs_mas_is_flushing(const void *mas_entry)
return (unsigned long)mas_entry & 2UL;
}

/*
* crypto.c
*/
bool netfs_wback_encrypt(struct netfs_writeback *wback);
void netfs_rreq_decrypt(struct netfs_read_request *rreq);

/*
* Return true if the pointer is a valid region pointer - ie. not
* NULL, XA_ZERO_ENTRY, NETFS_*_TO_CACHE or a flushing entry.
@@ -381,11 +381,13 @@ static void netfs_writeback(struct netfs_writeback *wback)

_enter("");

/* TODO: Encrypt or compress the region as appropriate */

/* ->outstanding > 0 carries a ref */
netfs_get_writeback(wback, netfs_wback_trace_get_for_outstanding);

if (test_bit(NETFS_ICTX_ENCRYPTED, &ctx->flags) &&
!netfs_wback_encrypt(wback))
goto out;

/* We need to write all of the region to the cache */
if (test_bit(NETFS_WBACK_WRITE_TO_CACHE, &wback->flags))
netfs_set_up_write_to_cache(wback);
@@ -396,6 +398,7 @@ static void netfs_writeback(struct netfs_writeback *wback)
if (!list_empty(&wback->regions))
ctx->ops->create_write_requests(wback);

out:
if (atomic_dec_and_test(&wback->outstanding))
netfs_write_completed(wback, false);
}
@@ -480,6 +480,7 @@ static enum netfs_handle_nonuptodate netfs_handle_nonuptodate_folio(struct netfs
return NETFS_WHOLE_FOLIO_MODIFY;

if (file->f_mode & FMODE_READ ||
test_bit(NETFS_ICTX_ENCRYPTED, &ctx->flags) ||
test_bit(NETFS_ICTX_DO_RMW, &ctx->flags))
return NETFS_JUST_PREFETCH;

@@ -20,6 +20,7 @@
#include <linux/uio.h>
#include <linux/maple_tree.h>

struct scatterlist;
enum netfs_wback_trace;

/*
@@ -142,11 +143,13 @@ struct netfs_i_context {
#define NETFS_ICTX_NEW_CONTENT 0 /* Set if file has new content (create/trunc-0) */
#define NETFS_ICTX_GOT_CACHED_ZP 1 /* We read zero_point from the cache */
#define NETFS_ICTX_DO_RMW 2 /* Set if RMW required (no write streaming) */
#define NETFS_ICTX_ENCRYPTED 3 /* The file contents are encrypted */
unsigned int rsize; /* Maximum read size */
unsigned int wsize; /* Maximum write size */
unsigned char min_bshift; /* log2 min block size for bounding box or 0 */
unsigned char obj_bshift; /* log2 storage object shift (ceph/pnfs) or 0 */
unsigned char cache_order; /* Log2 of cache's required page alignment */
unsigned char crypto_bshift; /* log2 of crypto block size */
};

/*
@@ -370,6 +373,11 @@ struct netfs_request_ops {
void (*create_write_requests)(struct netfs_writeback *wback);
void (*free_write_request)(struct netfs_write_request *wreq);
void (*invalidate_cache)(struct netfs_writeback *wback);

/* Content encryption */
bool (*encrypt_block)(struct netfs_writeback *wback, loff_t pos, size_t len,
struct scatterlist *source_sg, unsigned int n_source,
struct scatterlist *dest_sg, unsigned int n_dest);
};

/*

0 comments on commit 8573fb6

Please sign in to comment.