Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

introduce mcache, move lwip allocations to mcache #69

Merged
merged 8 commits into from
Sep 7, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion runtime/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ $(ROOT)/runtime/closure_templates.h: $(ROOT)/runtime/contgen

RUNTIME = table.o buffer.o format.o id.o symbol.o rolling.o freelist.o sha256.o \
timer.o pqueue.o tuple.o random.o signature.o rtrie.o tuple_parser.o runtime_init.o \
extra_prints.o debug_heap.o objcache.o merge.o bitmap.o
extra_prints.o debug_heap.o objcache.o merge.o bitmap.o mcache.o

INCLUDES += -I$(ROOT)/runtime

Expand Down
3 changes: 3 additions & 0 deletions runtime/heap/heap.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,9 @@ heap allocate_id_heap(heap h, u64 pagesize); /* id heap with no ranges */
boolean id_heap_add_range(heap h, u64 base, u64 length);
heap wrap_freelist(heap meta, heap parent, bytes size);
heap allocate_objcache(heap meta, heap parent, bytes objsize);
boolean objcache_validate(heap h);
heap objcache_from_object(u64 obj, u64 parent_pagesize);
heap allocate_mcache(heap meta, heap parent, int min_order, int max_order);

// really internals

Expand Down
134 changes: 134 additions & 0 deletions runtime/heap/mcache.c
Original file line number Diff line number Diff line change
@@ -0,0 +1,134 @@
/* multi-cache heap
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

is there something specific to the leaves that require them to be caches? or it is any set of heap where the objects fall into the same size bucket (2^n)?

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The only requirements are that:

  • the leaf heap can be found by object reference
  • can free without size information (to satisfy lwip and anything else that has a malloc/free type interface)

So the only thing that really makes this specific to the objcache is the call to objcache_from_object(). I suppose this could be made into a call in the abstract heap type (heap_from_pointer()?), but I don't know what other heaps would support this.

Also, at this point, objcache (and thus mcache) can accept any size object, not just 2^n.


This is essentially a wrapper heap for a set of caches of varying
object sizes. Object sizes are specified on heap creation. Allocations
are made from the cache of the smallest object size equal to or greater
than the alloc size.
*/

//#define MCACHE_DEBUG

#include <runtime.h>

typedef struct mcache {
struct heap h;
heap parent;
heap meta;
vector caches;
} *mcache;

u64 mcache_alloc(heap h, bytes b)
{
mcache m = (mcache)h;
heap o;
#ifdef MCACHE_DEBUG
console("mcache_alloc: heap ");
print_u64(u64_from_pointer(h));
console(", size ");
print_u64(b);
console(": ");
#endif
/* Could become a binary search if search set is large... */
vector_foreach(m->caches, o) {
if (o && b <= o->pagesize) {
u64 a = allocate_u64(o, o->pagesize);
if (a != INVALID_PHYSICAL)
h->allocated += o->pagesize;
#ifdef MCACHE_DEBUG
console("obj size ");
print_u64(o->pagesize);
console(", addr ");
print_u64(a);
console("\n");
#endif
return a;
}
}
#ifdef MCACHE_DEBUG
console("no matching cache; fail\n");
#endif
return INVALID_PHYSICAL;
}

void mcache_dealloc(heap h, u64 a, bytes b)
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

its worth revisiting whether or not deallocate should take the size. at some point I got tired of having to keep track of it behind the interface when in (almost) all cases, the caller has a really good idea.

i think that probably still true - but given that a large consumer that we don't want to mess with has normal malloc/free, maybe not the most practical

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The only issue I can think of in this case is whether h->allocated should represent actual bytes allocated by the heap or bytes requested / freed by the caller. If the former is true, then I don't see what we'd do with a dealloc size other than a sanity check (which at this point is one-sided since we don't relate the found cache heap to the vector and thus don't know the next largest size, so we just make sure that it's not larger than the given cache size...it's all kind of pointless anyway if we can verify that the object is indeed owned by the leaf cache...)

{
#ifdef MCACHE_DEBUG
console("mcache_dealloc: heap ");
print_u64(u64_from_pointer(h));
console(", addr ");
print_u64(a);
console(", size ");
print_u64(b);
console("\n");
#endif
mcache m = (mcache)h;
heap o = objcache_from_object(a, m->parent->pagesize);
if (o == INVALID_ADDRESS) {
msg_err("mcache %p: can't find cache for object %P, size %d; leaking\n",
m, a, b);
return;
}

/* We don't really need the size, but if we're given a valid one,
make some attempt to verify it. */
if (b != -1ull && b > o->pagesize) {
msg_err("dealloc size (%d) exceeds found cache size (%d); leaking\n",
b, o->pagesize);
return;
}
assert(h->allocated >= o->pagesize);
h->allocated -= o->pagesize;
deallocate(o, a, o->pagesize);
}

void destroy_mcache(heap h)
{
mcache m = (mcache)h;
heap o;
vector_foreach(m->caches, o) {
if (o)
o->destroy(o);
}
deallocate(m->meta, m, sizeof(struct mcache));
}

heap allocate_mcache(heap meta, heap parent, int min_order, int max_order)
{
mcache m = allocate(meta, sizeof(struct mcache));
if (m == INVALID_ADDRESS)
return INVALID_ADDRESS;

#ifdef MCACHE_DEBUG
console("allocate_mcache: heap at ");
print_u64(u64_from_pointer(m));
console("\n");
#endif

m->h.alloc = mcache_alloc;
m->h.dealloc = mcache_dealloc;
m->h.destroy = destroy_mcache;
m->h.pagesize = 1 << min_order; /* default to smallest obj size */
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

random note...i'm not sure having a non-integral order (i'm getting used to that, for some reason never used that term before). ever makes sense. so maybe pagesize should always be log2

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

It would simplify things in some cases, though take away the possibility of caching exact sizes of objects when not a non-integral order (wow, it is fun to write!).

m->h.allocated = 0;
m->meta = meta;
m->parent = parent;
m->caches = allocate_vector(meta, 1);

for(int i=0, order = min_order; order <= max_order; i++, order++) {
u64 obj_size = 1 << order;
heap h = allocate_objcache(meta, parent, obj_size);
#ifdef MCACHE_DEBUG
console(" - cache size ");
print_u64(obj_size);
console(": ");
print_u64(u64_from_pointer(h));
console("\n");
#endif
if (h == INVALID_ADDRESS) {
console("failed to allocate mcache\n");
destroy_mcache((heap)m);
return INVALID_ADDRESS;
}
vector_set(m->caches, i, h);
}
return (heap)m;
}
86 changes: 49 additions & 37 deletions runtime/heap/objcache.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,11 +21,14 @@
#include <runtime.h>

#define FOOTER_MAGIC (u16)(0xcafe)

typedef struct objcache *objcache;
typedef struct footer {
u16 magic; /* try to detect corruption by overruns */
u16 free; /* next free (recycled) object in page */
u16 head; /* next uninitialized object in page */
u16 avail; /* # of free and uninit. objects in page */
objcache cache; /* objcache to which this page belongs */
struct list list; /* full list if avail == 0, free otherwise */
} *footer;

Expand Down Expand Up @@ -97,17 +100,32 @@ static footer objcache_addpage(objcache o)
assert ((p & (page_size(o) - 1)) == 0);

footer f = footer_from_page(o, p);
f->magic = FOOTER_MAGIC;
f->free = invalid_index;
f->head = 0;
f->avail = o->objs_per_page;
f->magic = FOOTER_MAGIC;

f->cache = o;
list_insert_after(&o->free, &f->list);
o->total_objs += o->objs_per_page;

return f;
}

static inline boolean validate_page(objcache o, footer f)
{
if (f->magic != FOOTER_MAGIC) {
msg_err("objcache %p, footer %p, bad magic! (%P)\n", o, f, f->magic);
return false;
}

if (f->cache != o) {
msg_err("objcache %p, footer %p, f->cache mismatch (%p)\n", o, f, f->cache);
return false;
}

return true;
}

static void objcache_deallocate(heap h, u64 x, bytes size)
{
objcache o = (objcache)h;
Expand All @@ -126,8 +144,9 @@ static void objcache_deallocate(heap h, u64 x, bytes size)
msg_debug(" - obj %P, page %p, footer: free %d, head %d, avail %d\n",
x, p, f->free, f->head, f->avail);

if (f->magic != FOOTER_MAGIC) {
halt("heap %p, object %P, size %d: bad magic!\n", h, x, size);
if (!validate_page(o, f)) {
msg_err("leaking object\n");
return;
}

if (f->avail == 0) {
Expand Down Expand Up @@ -168,22 +187,21 @@ static u64 objcache_allocate(heap h, bytes size)
f = footer_from_list(next_free);
} else {
msg_debug("empty; calling objcache_addpage()\n", o->free);

if (!(f = objcache_addpage(o)))
return INVALID_PHYSICAL;
}

page p = page_from_footer(o, f);
u64 obj;

if (f->magic != FOOTER_MAGIC) {
msg_err("heap %p, page %P, size %d: bad magic!\n", h, p, size);
if (!validate_page(o, f)) {
msg_err("alloc failed\n");
return INVALID_PHYSICAL;
}

page p = page_from_footer(o, f);

msg_debug("allocating from page %P\n", p);

/* first check page's free list */
u64 obj;
if (is_valid_index(f->free)) {
msg_debug("f->free %d\n", f->free);
obj = obj_from_index(o, p, f->free);
Expand All @@ -192,9 +210,7 @@ static u64 objcache_allocate(heap h, bytes size)
/* we must have an uninitialized object */
assert(is_valid_index(f->head));
assert(f->head < o->objs_per_page);

msg_debug("f->head %d\n", f->head);

obj = obj_from_index(o, p, f->head);
f->head++;
}
Expand All @@ -205,39 +221,39 @@ static u64 objcache_allocate(heap h, bytes size)
list_delete(&f->list);
list_insert_before(&o->full, &f->list);
}

assert(o->alloced_objs <= o->total_objs);
o->alloced_objs++;
h->allocated += size;

msg_debug("returning obj %P\n", obj);

return obj;
}

static void objcache_destroy(heap h)
{
objcache o = (objcache)h;

/* Check and report if there are unreturned objects, but proceed
to release pages to parent heap anyway. */

if (o->alloced_objs > 0) {
msg_debug("%d objects still allocated in objcache %p; releasing "
"pages anyway\n", o->alloced_objs, o);
}

footer f;
foreach_page_footer(&o->free, f)
deallocate_u64(o->parent, page_from_footer(o, f), page_size(o));
foreach_page_footer(&o->full, f)
deallocate_u64(o->parent, page_from_footer(o, f), page_size(o));
}

foreach_page_footer(&o->free, f) {
page p = page_from_footer(o, f);
deallocate_u64(o->parent, p, page_size(o));
}

foreach_page_footer(&o->full, f) {
page p = page_from_footer(o, f);
deallocate_u64(o->parent, p, page_size(o));
}
heap objcache_from_object(u64 obj, u64 parent_pagesize)
{
footer f = pointer_from_u64((obj & ~(parent_pagesize - 1)) +
(parent_pagesize - sizeof(struct footer)));
if (f->magic != FOOTER_MAGIC)
return INVALID_ADDRESS;
return (heap)f->cache;
}

/* Sanity-checks the object cache, returning true if no discrepancies
Expand All @@ -262,8 +278,9 @@ boolean objcache_validate(heap h)
/* check free list */
foreach_page_footer(&o->free, f) {
page p = page_from_footer(o, f);
if (f->magic != FOOTER_MAGIC) {
msg_err("page %P has wrong magic\n", p);

if (!validate_page(o, f)) {
msg_err("page %P on free list failed validate\n", p);
return false;
}

Expand Down Expand Up @@ -320,8 +337,9 @@ boolean objcache_validate(heap h)
/* check full list */
foreach_page_footer(&o->full, f) {
page p = page_from_footer(o, f);
if (f->magic != FOOTER_MAGIC) {
msg_err("page %P has wrong magic\n", p);

if (!validate_page(o, f)) {
msg_err("page %P on full list failed validate\n", p);
return false;
}

Expand Down Expand Up @@ -426,11 +444,5 @@ heap allocate_objcache(heap meta, heap parent, bytes objsize)
o->total_objs = 0;
o->alloced_objs = 0;

if (!objcache_addpage(o)) {
msg_err("failed to add initial page to objcache %p\n", o);
deallocate(meta, o, sizeof(struct objcache));
return INVALID_ADDRESS;
}

return (heap)o;
}
19 changes: 16 additions & 3 deletions test/objcache_test.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@
#include <errno.h>
#include <string.h>

boolean objcache_validate(heap h);

static inline boolean validate(heap h)
{
if (!objcache_validate(h)) {
Expand All @@ -20,6 +18,16 @@ static inline boolean validate(heap h)
return true;
}

static inline boolean validate_obj(heap h, void * obj)
{
heap o = objcache_from_object(u64_from_pointer(obj), PAGESIZE);
if (o != h) {
msg_err("objcache_from_object returned %p, doesn't match heap %p\n", o, h);
return false;
}
return true;
}

static boolean alloc_vec(heap h, int n, int s, vector v)
{
for (int i=0; i < n; i++) {
Expand All @@ -31,6 +39,8 @@ static boolean alloc_vec(heap h, int n, int s, vector v)
msg_err("tb: failed to allocate object\n");
return false;
}
if (!validate_obj(h, p))
return false;

vector_set(v, i, p);
}
Expand All @@ -48,7 +58,10 @@ static boolean dealloc_vec(heap h, int s, vector v)

if (!validate(h))
return false;


if (!validate_obj(h, p))
return false;

deallocate(h, p, s);
}

Expand Down
Loading