Skip to content

Commit

Permalink
*stash (upm_base_support): mm: restrictedmem: Kirill's pinning implem…
Browse files Browse the repository at this point in the history
…entation

Originally-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Signed-off-by: Michael Roth <michael.roth@amd.com>
  • Loading branch information
mdroth committed Feb 12, 2023
1 parent 881eddb commit f780033
Show file tree
Hide file tree
Showing 4 changed files with 51 additions and 36 deletions.
4 changes: 2 additions & 2 deletions include/linux/shmem_fs.h
Expand Up @@ -9,6 +9,7 @@
#include <linux/percpu_counter.h>
#include <linux/xattr.h>
#include <linux/fs_parser.h>
#include <linux/magic.h>

/* inode in-kernel data */

Expand Down Expand Up @@ -75,10 +76,9 @@ extern unsigned long shmem_get_unmapped_area(struct file *, unsigned long addr,
unsigned long len, unsigned long pgoff, unsigned long flags);
extern int shmem_lock(struct file *file, int lock, struct ucounts *ucounts);
#ifdef CONFIG_SHMEM
extern const struct address_space_operations shmem_aops;
static inline bool shmem_mapping(struct address_space *mapping)
{
return mapping->a_ops == &shmem_aops;
return mapping->host->i_sb->s_magic == TMPFS_MAGIC;
}
#else
static inline bool shmem_mapping(struct address_space *mapping)
Expand Down
2 changes: 0 additions & 2 deletions mm/memory-failure.c
Expand Up @@ -941,8 +941,6 @@ static int me_pagecache_clean(struct page_state *ps, struct page *p)
goto out;
}

restrictedmem_error_page(p, mapping);

/*
* The shmem page is kept in page cache instead of truncating
* so is expected to have an extra refcount after error-handling.
Expand Down
76 changes: 47 additions & 29 deletions mm/restrictedmem.c
Expand Up @@ -190,6 +190,51 @@ static struct file *restrictedmem_file_create(struct file *memfd)
return file;
}

static int restricted_error_remove_page(struct address_space *mapping,
struct page *page)
{
struct super_block *sb = restrictedmem_mnt->mnt_sb;
struct inode *inode, *next;
pgoff_t start, end;

start = page->index;
end = start + thp_nr_pages(page);

spin_lock(&sb->s_inode_list_lock);
list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
struct restrictedmem *rm = inode->i_mapping->private_data;
struct restrictedmem_notifier *notifier;
struct file *memfd = rm->memfd;
unsigned long index;

if (memfd->f_mapping != mapping)
continue;

xa_for_each_range(&rm->bindings, index, notifier, start, end)
notifier->ops->error(notifier, start, end);
break;
}
spin_unlock(&sb->s_inode_list_lock);

return 0;
}

#ifdef CONFIG_MIGRATION
static int restricted_folio(struct address_space *mapping, struct folio *dst,
struct folio *src, enum migrate_mode mode)
{
return -EBUSY;
}
#endif

static struct address_space_operations restricted_aops = {
.dirty_folio = noop_dirty_folio,
.error_remove_page = restricted_error_remove_page,
#ifdef CONFIG_MIGRATION
.migrate_folio = restricted_folio,
#endif
};

SYSCALL_DEFINE1(memfd_restricted, unsigned int, flags)
{
struct file *file, *restricted_file;
Expand All @@ -210,6 +255,8 @@ SYSCALL_DEFINE1(memfd_restricted, unsigned int, flags)
file->f_mode |= FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE;
file->f_flags |= O_LARGEFILE;

file->f_mapping->a_ops = &restricted_aops;

restricted_file = restrictedmem_file_create(file);
if (IS_ERR(restricted_file)) {
err = PTR_ERR(restricted_file);
Expand Down Expand Up @@ -294,32 +341,3 @@ int restrictedmem_get_page(struct file *file, pgoff_t offset,
return 0;
}
EXPORT_SYMBOL_GPL(restrictedmem_get_page);

void restrictedmem_error_page(struct page *page, struct address_space *mapping)
{
struct super_block *sb = restrictedmem_mnt->mnt_sb;
struct inode *inode, *next;
pgoff_t start, end;

if (!shmem_mapping(mapping))
return;

start = page->index;
end = start + thp_nr_pages(page);

spin_lock(&sb->s_inode_list_lock);
list_for_each_entry_safe(inode, next, &sb->s_inodes, i_sb_list) {
struct restrictedmem *rm = inode->i_mapping->private_data;
struct restrictedmem_notifier *notifier;
struct file *memfd = rm->memfd;
unsigned long index;

if (memfd->f_mapping != mapping)
continue;

xa_for_each_range(&rm->bindings, index, notifier, start, end)
notifier->ops->error(notifier, start, end);
break;
}
spin_unlock(&sb->s_inode_list_lock);
}
5 changes: 2 additions & 3 deletions mm/shmem.c
Expand Up @@ -231,7 +231,7 @@ static inline void shmem_inode_unacct_blocks(struct inode *inode, long pages)
}

static const struct super_operations shmem_ops;
const struct address_space_operations shmem_aops;
static const struct address_space_operations shmem_aops;
static const struct file_operations shmem_file_operations;
static const struct inode_operations shmem_inode_operations;
static const struct inode_operations shmem_dir_inode_operations;
Expand Down Expand Up @@ -3894,7 +3894,7 @@ static int shmem_error_remove_page(struct address_space *mapping,
return 0;
}

const struct address_space_operations shmem_aops = {
static const struct address_space_operations shmem_aops = {
.writepage = shmem_writepage,
.dirty_folio = noop_dirty_folio,
#ifdef CONFIG_TMPFS
Expand All @@ -3906,7 +3906,6 @@ const struct address_space_operations shmem_aops = {
#endif
.error_remove_page = shmem_error_remove_page,
};
EXPORT_SYMBOL(shmem_aops);

static const struct file_operations shmem_file_operations = {
.mmap = shmem_mmap,
Expand Down

0 comments on commit f780033

Please sign in to comment.