Permalink
Browse files

UBUNTU: SAUCE: (no-up) add tracing for user initiated readahead requests

Track pages which undergo readahead and for each record which were
actually consumed, via either read or faulted into a map.  This allows
userspace readahead applications (such as ureadahead) to track which
pages in core at the end of a boot are actually required and generate an
optimal readahead pack.  It also allows pack adjustment and optimisation
in parallel with readahead, allowing the pack to evolve to be accurate
as userspace paths change.  The status of the pages are reported back via
the mincore() call using a newly allocated bit.

Signed-off-by: Andy Whitcroft <apw@canonical.com>
Acked-by: Stefan Bader <stefan.bader@canonical.com>
Signed-off-by: Leann Ogasawara <leann.ogasawara@canonical.com>
  • Loading branch information...
1 parent 19f949f commit b6a6bfabf89f06cf2ece5fa06f909a4012d3e9c0 Andy Whitcroft committed with heftig Jul 29, 2010
Showing with 16 additions and 1 deletion.
  1. +3 −0 include/linux/page-flags.h
  2. +3 −0 mm/filemap.c
  3. +6 −1 mm/memory.c
  4. +2 −0 mm/mincore.c
  5. +1 −0 mm/page_alloc.c
  6. +1 −0 mm/readahead.c
@@ -109,6 +109,7 @@ enum pageflags {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
PG_compound_lock,
#endif
+ PG_readaheadunused, /* user oriented readahead as yet unused*/
__NR_PAGEFLAGS,
/* Filesystems */
@@ -232,6 +233,8 @@ PAGEFLAG(MappedToDisk, mappedtodisk)
PAGEFLAG(Reclaim, reclaim) TESTCLEARFLAG(Reclaim, reclaim)
PAGEFLAG(Readahead, reclaim) /* Reminder to do async read-ahead */
+PAGEFLAG(ReadaheadUnused, readaheadunused)
+
#ifdef CONFIG_HIGHMEM
/*
* Must use a macro here due to header dependency issues. page_zone() is not
View
@@ -1304,6 +1304,9 @@ int file_read_actor(read_descriptor_t *desc, struct page *page,
if (size > count)
size = count;
+ if (PageReadaheadUnused(page))
+ ClearPageReadaheadUnused(page);
+
/*
* Faults on the destination of a read are common, so do it before
* taking the kmap.
View
@@ -3268,10 +3268,15 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
else
VM_BUG_ON(!PageLocked(vmf.page));
+ page = vmf.page;
+
+ /* Mark the page as used on fault. */
+ if (PageReadaheadUnused(page))
+ ClearPageReadaheadUnused(page);
+
/*
* Should we do an early C-O-W break?
*/
- page = vmf.page;
if (flags & FAULT_FLAG_WRITE) {
if (!(vma->vm_flags & VM_SHARED)) {
page = cow_page;
View
@@ -80,6 +80,8 @@ static unsigned char mincore_page(struct address_space *mapping, pgoff_t pgoff)
#endif
if (page) {
present = PageUptodate(page);
+ if (present)
+ present |= (PageReadaheadUnused(page) << 7);
page_cache_release(page);
}
View
@@ -6114,6 +6114,7 @@ static const struct trace_print_flags pageflag_names[] = {
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
{1UL << PG_compound_lock, "compound_lock" },
#endif
+ {1UL << PG_readaheadunused, "readaheadunused"},
};
static void dump_page_flags(unsigned long flags)
View
@@ -189,6 +189,7 @@ __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
list_add(&page->lru, &page_pool);
if (page_idx == nr_to_read - lookahead_size)
SetPageReadahead(page);
+ SetPageReadaheadUnused(page);
ret++;
}

0 comments on commit b6a6bfa

Please sign in to comment.