@@ -47,220 +47,6 @@ diff -rupN _source/newlib-1.19.0/newlib/libc/include/sys/utime.h newlib-1.19.0/n
#ifdef __cplusplus
};
#endif
diff -rupN _source/newlib-1.19.0/newlib/libc/stdio/fseek.c newlib-1.19.0/newlib/libc/stdio/fseek.c
--- _source/newlib-1.19.0/newlib/libc/stdio/fseek.c 2009-12-17 13:43:43.000000000 -0600
+++ newlib-1.19.0/newlib/libc/stdio/fseek.c 2011-04-29 19:33:10.000000000 -0500
@@ -160,210 +160,6 @@ _DEFUN(_fseek_r, (ptr, fp, offset, whenc
return EOF;
}
- /*
- * Change any SEEK_CUR to SEEK_SET, and check `whence' argument.
- * After this, whence is either SEEK_SET or SEEK_END.
- */
-
- switch (whence)
- {
- case SEEK_CUR:
- /*
- * In order to seek relative to the current stream offset,
- * we have to first find the current stream offset a la
- * ftell (see ftell for details).
- */
- _fflush_r (ptr, fp); /* may adjust seek offset on append stream */
- if (fp->_flags & __SOFF)
- curoff = fp->_offset;
- else
- {
- curoff = seekfn (ptr, fp->_cookie, (_fpos_t) 0, SEEK_CUR);
- if (curoff == -1L)
- {
- _funlockfile (fp);
- __sfp_lock_release ();
- return EOF;
- }
- }
- if (fp->_flags & __SRD)
- {
- curoff -= fp->_r;
- if (HASUB (fp))
- curoff -= fp->_ur;
- }
- else if (fp->_flags & __SWR && fp->_p != NULL)
- curoff += fp->_p - fp->_bf._base;
-
- offset += curoff;
- whence = SEEK_SET;
- havepos = 1;
- break;
-
- case SEEK_SET:
- case SEEK_END:
- havepos = 0;
- break;
-
- default:
- ptr->_errno = EINVAL;
- _funlockfile (fp);
- __sfp_lock_release ();
- return (EOF);
- }
-
- /*
- * Can only optimise if:
- * reading (and not reading-and-writing);
- * not unbuffered; and
- * this is a `regular' Unix file (and hence seekfn==__sseek).
- * We must check __NBF first, because it is possible to have __NBF
- * and __SOPT both set.
- */
-
- if (fp->_bf._base == NULL)
- __smakebuf_r (ptr, fp);
- if (fp->_flags & (__SWR | __SRW | __SNBF | __SNPT))
- goto dumb;
- if ((fp->_flags & __SOPT) == 0)
- {
- if (seekfn != __sseek
- || fp->_file < 0
- #ifdef __USE_INTERNAL_STAT64
- || _fstat64_r (ptr, fp->_file, &st)
- #else
- || _fstat_r (ptr, fp->_file, &st)
- #endif
- || (st.st_mode & S_IFMT) != S_IFREG)
- {
- fp->_flags |= __SNPT;
- goto dumb;
- }
- #ifdef HAVE_BLKSIZE
- fp->_blksize = st.st_blksize;
- #else
- fp->_blksize = 1024;
- #endif
- fp->_flags |= __SOPT;
- }
-
- /*
- * We are reading; we can try to optimise.
- * Figure out where we are going and where we are now.
- */
-
- if (whence == SEEK_SET)
- target = offset;
- else
- {
- #ifdef __USE_INTERNAL_STAT64
- if (_fstat64_r (ptr, fp->_file, &st))
- #else
- if (_fstat_r (ptr, fp->_file, &st))
- #endif
- goto dumb;
- target = st.st_size + offset;
- }
- if ((long)target != target)
- {
- ptr->_errno = EOVERFLOW;
- _funlockfile (fp);
- __sfp_lock_release ();
- return EOF;
- }
-
- if (!havepos)
- {
- if (fp->_flags & __SOFF)
- curoff = fp->_offset;
- else
- {
- curoff = seekfn (ptr, fp->_cookie, 0L, SEEK_CUR);
- if (curoff == POS_ERR)
- goto dumb;
- }
- curoff -= fp->_r;
- if (HASUB (fp))
- curoff -= fp->_ur;
- }
-
- /*
- * Compute the number of bytes in the input buffer (pretending
- * that any ungetc() input has been discarded). Adjust current
- * offset backwards by this count so that it represents the
- * file offset for the first byte in the current input buffer.
- */
-
- if (HASUB (fp))
- {
- curoff += fp->_r; /* kill off ungetc */
- n = fp->_up - fp->_bf._base;
- curoff -= n;
- n += fp->_ur;
- }
- else
- {
- n = fp->_p - fp->_bf._base;
- curoff -= n;
- n += fp->_r;
- }
-
- /*
- * If the target offset is within the current buffer,
- * simply adjust the pointers, clear EOF, undo ungetc(),
- * and return.
- */
-
- if (target >= curoff && target < curoff + n)
- {
- register int o = target - curoff;
-
- fp->_p = fp->_bf._base + o;
- fp->_r = n - o;
- if (HASUB (fp))
- FREEUB (ptr, fp);
- fp->_flags &= ~__SEOF;
- memset (&fp->_mbstate, 0, sizeof (_mbstate_t));
- _funlockfile (fp);
- __sfp_lock_release ();
- return 0;
- }
-
- /*
- * The place we want to get to is not within the current buffer,
- * but we can still be kind to the kernel copyout mechanism.
- * By aligning the file offset to a block boundary, we can let
- * the kernel use the VM hardware to map pages instead of
- * copying bytes laboriously. Using a block boundary also
- * ensures that we only read one block, rather than two.
- */
-
- curoff = target & ~(fp->_blksize - 1);
- if (seekfn (ptr, fp->_cookie, curoff, SEEK_SET) == POS_ERR)
- goto dumb;
- fp->_r = 0;
- fp->_p = fp->_bf._base;
- if (HASUB (fp))
- FREEUB (ptr, fp);
- fp->_flags &= ~__SEOF;
- n = target - curoff;
- if (n)
- {
- if (__srefill_r (ptr, fp) || fp->_r < n)
- goto dumb;
- fp->_p += n;
- fp->_r -= n;
- }
- memset (&fp->_mbstate, 0, sizeof (_mbstate_t));
- _funlockfile (fp);
- __sfp_lock_release ();
- return 0;
-
- /*
- * We get here if we cannot optimise the seek ... just
- * do it. Allow the seek function to change fp->_bf._base.
- */
-
dumb:
if (_fflush_r (ptr, fp)
|| seekfn (ptr, fp->_cookie, offset, whence) == POS_ERR)
diff -rupN _source/newlib-1.19.0/newlib/libc/stdlib/calloc.c newlib-1.19.0/newlib/libc/stdlib/calloc.c
--- _source/newlib-1.19.0/newlib/libc/stdlib/calloc.c 2008-10-31 16:08:03.000000000 -0500
+++ newlib-1.19.0/newlib/libc/stdlib/calloc.c 2012-04-23 15:45:46.000000000 -0500
@@ -280,1192 +66,6 @@ diff -rupN _source/newlib-1.19.0/newlib/libc/stdlib/calloc.c newlib-1.19.0/newli
#endif
#endif /* MALLOC_PROVIDED */
diff -rupN _source/newlib-1.19.0/newlib/libc/stdlib/malloc.c newlib-1.19.0/newlib/libc/stdlib/malloc.c
--- _source/newlib-1.19.0/newlib/libc/stdlib/malloc.c 2008-11-19 14:55:52.000000000 -0600
+++ newlib-1.19.0/newlib/libc/stdlib/malloc.c 2012-04-24 16:52:45.000000000 -0500
@@ -1,227 +1,955 @@
- /* VxWorks provides its own version of malloc, and we can't use this
- one because VxWorks does not provide sbrk. So we have a hook to
- not compile this code. */
-
- /* The routines here are simple cover fns to the routines that do the real
- work (the reentrant versions). */
- /* FIXME: Does the warning below (see WARNINGS) about non-reentrancy still
- apply? A first guess would be "no", but how about reentrancy in the *same*
- thread? */
-
- #ifdef MALLOC_PROVIDED
-
- int _dummy_malloc = 1;
-
- #else
-
- /*
- FUNCTION
- <<malloc>>, <<realloc>>, <<free>>---manage memory
-
- INDEX
- malloc
- INDEX
- realloc
- INDEX
- reallocf
- INDEX
- free
- INDEX
- memalign
- INDEX
- malloc_usable_size
- INDEX
- _malloc_r
- INDEX
- _realloc_r
- INDEX
- _reallocf_r
- INDEX
- _free_r
- INDEX
- _memalign_r
- INDEX
- _malloc_usable_size_r
-
- ANSI_SYNOPSIS
- #include <stdlib.h>
- void *malloc(size_t <[nbytes]>);
- void *realloc(void *<[aptr]>, size_t <[nbytes]>);
- void *reallocf(void *<[aptr]>, size_t <[nbytes]>);
- void free(void *<[aptr]>);
-
- void *memalign(size_t <[align]>, size_t <[nbytes]>);
-
- size_t malloc_usable_size(void *<[aptr]>);
-
- void *_malloc_r(void *<[reent]>, size_t <[nbytes]>);
- void *_realloc_r(void *<[reent]>,
- void *<[aptr]>, size_t <[nbytes]>);
- void *_reallocf_r(void *<[reent]>,
- void *<[aptr]>, size_t <[nbytes]>);
- void _free_r(void *<[reent]>, void *<[aptr]>);
-
- void *_memalign_r(void *<[reent]>,
- size_t <[align]>, size_t <[nbytes]>);
-
- size_t _malloc_usable_size_r(void *<[reent]>, void *<[aptr]>);
-
- TRAD_SYNOPSIS
- #include <stdlib.h>
- char *malloc(<[nbytes]>)
- size_t <[nbytes]>;
-
- char *realloc(<[aptr]>, <[nbytes]>)
- char *<[aptr]>;
- size_t <[nbytes]>;
-
- char *reallocf(<[aptr]>, <[nbytes]>)
- char *<[aptr]>;
- size_t <[nbytes]>;
-
- void free(<[aptr]>)
- char *<[aptr]>;
-
- char *memalign(<[align]>, <[nbytes]>)
- size_t <[align]>;
- size_t <[nbytes]>;
-
- size_t malloc_usable_size(<[aptr]>)
- char *<[aptr]>;
-
- char *_malloc_r(<[reent]>,<[nbytes]>)
- char *<[reent]>;
- size_t <[nbytes]>;
-
- char *_realloc_r(<[reent]>, <[aptr]>, <[nbytes]>)
- char *<[reent]>;
- char *<[aptr]>;
- size_t <[nbytes]>;
-
- char *_reallocf_r(<[reent]>, <[aptr]>, <[nbytes]>)
- char *<[reent]>;
- char *<[aptr]>;
- size_t <[nbytes]>;
-
- void _free_r(<[reent]>, <[aptr]>)
- char *<[reent]>;
- char *<[aptr]>;
-
- char *_memalign_r(<[reent]>, <[align]>, <[nbytes]>)
- char *<[reent]>;
- size_t <[align]>;
- size_t <[nbytes]>;
-
- size_t malloc_usable_size(<[reent]>, <[aptr]>)
- char *<[reent]>;
- char *<[aptr]>;
-
- DESCRIPTION
- These functions manage a pool of system memory.
-
- Use <<malloc>> to request allocation of an object with at least
- <[nbytes]> bytes of storage available. If the space is available,
- <<malloc>> returns a pointer to a newly allocated block as its result.
-
- If you already have a block of storage allocated by <<malloc>>, but
- you no longer need all the space allocated to it, you can make it
- smaller by calling <<realloc>> with both the object pointer and the
- new desired size as arguments. <<realloc>> guarantees that the
- contents of the smaller object match the beginning of the original object.
-
- Similarly, if you need more space for an object, use <<realloc>> to
- request the larger size; again, <<realloc>> guarantees that the
- beginning of the new, larger object matches the contents of the
- original object.
-
- When you no longer need an object originally allocated by <<malloc>>
- or <<realloc>> (or the related function <<calloc>>), return it to the
- memory storage pool by calling <<free>> with the address of the object
- as the argument. You can also use <<realloc>> for this purpose by
- calling it with <<0>> as the <[nbytes]> argument.
-
- The <<reallocf>> function behaves just like <<realloc>> except if the
- function is required to allocate new storage and this fails. In this
- case <<reallocf>> will free the original object passed in whereas
- <<realloc>> will not.
-
- The <<memalign>> function returns a block of size <[nbytes]> aligned
- to a <[align]> boundary. The <[align]> argument must be a power of
- two.
-
- The <<malloc_usable_size>> function takes a pointer to a block
- allocated by <<malloc>>. It returns the amount of space that is
- available in the block. This may or may not be more than the size
- requested from <<malloc>>, due to alignment or minimum size
- constraints.
-
- The alternate functions <<_malloc_r>>, <<_realloc_r>>, <<_reallocf_r>>,
- <<_free_r>>, <<_memalign_r>>, and <<_malloc_usable_size_r>> are reentrant
- versions. The extra argument <[reent]> is a pointer to a reentrancy structure.
-
- If you have multiple threads of execution which may call any of these
- routines, or if any of these routines may be called reentrantly, then
- you must provide implementations of the <<__malloc_lock>> and
- <<__malloc_unlock>> functions for your system. See the documentation
- for those functions.
-
- These functions operate by calling the function <<_sbrk_r>> or
- <<sbrk>>, which allocates space. You may need to provide one of these
- functions for your system. <<_sbrk_r>> is called with a positive
- value to allocate more space, and with a negative value to release
- previously allocated space if it is no longer required.
- @xref{Stubs}.
-
- RETURNS
- <<malloc>> returns a pointer to the newly allocated space, if
- successful; otherwise it returns <<NULL>>. If your application needs
- to generate empty objects, you may use <<malloc(0)>> for this purpose.
-
- <<realloc>> returns a pointer to the new block of memory, or <<NULL>>
- if a new block could not be allocated. <<NULL>> is also the result
- when you use `<<realloc(<[aptr]>,0)>>' (which has the same effect as
- `<<free(<[aptr]>)>>'). You should always check the result of
- <<realloc>>; successful reallocation is not guaranteed even when
- you request a smaller object.
-
- <<free>> does not return a result.
-
- <<memalign>> returns a pointer to the newly allocated space.
-
- <<malloc_usable_size>> returns the usable size.
-
- PORTABILITY
- <<malloc>>, <<realloc>>, and <<free>> are specified by the ANSI C
- standard, but other conforming implementations of <<malloc>> may
- behave differently when <[nbytes]> is zero.
-
- <<memalign>> is part of SVR4.
-
- <<malloc_usable_size>> is not portable.
-
- Supporting OS subroutines required: <<sbrk>>. */
-
- #include <_ansi.h>
- #include <reent.h>
- #include <stdlib.h>
- #include <malloc.h>
-
- #ifndef _REENT_ONLY
-
- _PTR
- _DEFUN (malloc, (nbytes),
- size_t nbytes) /* get a block */
- {
- return _malloc_r (_REENT, nbytes);
- }
-
- void
- _DEFUN (free, (aptr),
- _PTR aptr)
- {
- _free_r (_REENT, aptr);
- }
-
- #endif
-
- #endif /* ! defined (MALLOC_PROVIDED) */
+ /*
+ * Kevin Lange's Slab Allocator
+ *
+ * Implemented for CS241, Fall 2010, machine problem 7
+ * at the University of Illinois, Urbana-Champaign.
+ *
+ * Overall competition winner for speed.
+ * Well ranked in memory usage.
+ *
+ * XXX: Modified to work withe the ToAru kernel.
+ *
+ * Copyright (c) 2010 Kevin Lange. All rights reserved.
+ *
+ * Developed by: Kevin Lange <lange7@acm.uiuc.edu>
+ * Dave Majnemer <dmajnem2@acm.uiuc.edu>
+ * Assocation for Computing Machinery
+ * University of Illinois, Urbana-Champaign
+ * http://acm.uiuc.edu
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal with the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimers.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimers in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the names of the Association for Computing Machinery, the
+ * University of Illinois, nor the names of its contributors may be used
+ * to endorse or promote products derived from this Software without
+ * specific prior written permission.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * WITH THE SOFTWARE.
+ *
+ * ##########
+ * # README #
+ * ##########
+ *
+ * About the slab allocator
+ * """"""""""""""""""""""""
+ *
+ * This is a simple implementation of a "slab" allocator. It works by operating
+ * on "bins" of items of predefined sizes and a set of pseudo-bins of any size.
+ * When a new allocation request is made, the allocator determines if it will
+ * fit in an existing bin. If there are no bins of the correct size for a given
+ * allocation request, the allocator will make a bin and add it to a(n empty)
+ * list of available bins of that size. In this implementation, we use sizes
+ * from 4 bytes (32 bit) or 8 bytes (64-bit) to 2KB for bins, fitting a 4K page
+ * size. The implementation allows the number of pages in a single bin to be
+ * increased, as well as allowing for changing the size of page (though this
+ * should, for the most part, remain 4KB under any modern system).
+ *
+ * Special thanks
+ * """"""""""""""
+ *
+ * I would like to thank Dave Majnemer, who I have credited above as a
+ * contributor, for his assistance. Without Dave, klmalloc would be a mash
+ * up of bits of forward movement in no discernible pattern. Dave helped
+ * me ensure that I could build a proper slab allocator and has consantly
+ * derided me for not fixing the bugs and to-do items listed in the last
+ * section of this readme.
+ *
+ * GCC Function Attributes
+ * """""""""""""""""""""""
+ *
+ * A couple of GCC function attributes, designated by the __attribute__
+ * directive, are used in this code to streamline optimization.
+ * I've chosen to include a brief overview of the particular attributes
+ * I am making use of:
+ *
+ * - malloc:
+ * Tells gcc that a given function is a memory allocator
+ * and that non-NULL values it returns should never be
+ * associated with other chunks of memory. We use this for
+ * alloc, realloc and calloc, as is requested in the gcc
+ * documentation for the attribute.
+ *
+ * - always_inline:
+ * Tells gcc to always inline the given code, regardless of the
+ * optmization level. Small functions that would be noticeably
+ * slower with the overhead of paramter handling are given
+ * this attribute.
+ *
+ * - pure:
+ * Tells gcc that a function only uses inputs and its output.
+ *
+ * Things to work on
+ * """""""""""""""""
+ *
+ * TODO: Try to be more consistent on comment widths...
+ * FIXME: Make thread safe! Not necessary for competition, but would be nice.
+ * FIXME: Splitting/coalescing is broken. Fix this ASAP!
+ *
+ **/
+
+ #define _XOPEN_SOURCE 700
+
+ /* Includes {{{ */
+ #include <unistd.h>
+ #include <string.h>
+ #include <stdint.h>
+ #include <limits.h>
+ #include <assert.h>
+ #include <stdio.h>
+ /* }}} */
+ /* Definitions {{{ */
+
+ /*
+ * Defines for often-used integral values
+ * related to our binning and paging strategy.
+ */
+ #define NUM_BINS 1U /* Number of bins, total, under 32-bit. */
+ #define SMALLEST_BIN_LOG 2U /* Logarithm base two of the smallest bin: log_2(sizeof(int32)). */
+ #define BIG_BIN (NUM_BINS - 1) /* Index for the big bin, (NUM_BINS - 1) */
+ #define SMALLEST_BIN (1UL << SMALLEST_BIN_LOG) /* Size of the smallest bin. */
+
+ #define PAGE_SIZE 0x1000 /* Size of a page (in bytes), should be 4KB */
+ #define PAGE_MASK (PAGE_SIZE - 1) /* Block mask, size of a page * number of pages - 1. */
+ #define SKIP_P INT32_MAX /* INT32_MAX is half of UINT32_MAX; this gives us a 50% marker for skip lists. */
+ #define SKIP_MAX_LEVEL 6 /* We have a maximum of 6 levels in our skip lists. */
+
+ /* }}} */
+
+ /*
+ * Internal functions.
+ */
+ static void * __attribute__ ((malloc)) klmalloc(size_t size);
+ static void * __attribute__ ((malloc)) klrealloc(void * ptr, size_t size);
+ static void * __attribute__ ((malloc)) klcalloc(size_t nmemb, size_t size);
+ static void * __attribute__ ((malloc)) klvalloc(size_t size);
+ static void klfree(void * ptr);
+
+ void * __attribute__ ((malloc)) malloc(size_t size) {
+ return klmalloc(size);
+ }
+
+ void * __attribute__ ((malloc)) realloc(void * ptr, size_t size) {
+ return klrealloc(ptr, size);
+ }
+
+ void * __attribute__ ((malloc)) calloc(size_t nmemb, size_t size) {
+ return klcalloc(nmemb, size);
+ }
+
+ void * __attribute__ ((malloc)) valloc(size_t size) {
+ return klvalloc(size);
+ }
+
+ void free(void * ptr) {
+ klfree(ptr);
+ }
+
+
+ /* Bin management {{{ */
+
+ /*
+ * Adjust bin size in bin_size call to proper bounds.
+ */
+ static size_t __attribute__ ((always_inline, pure)) klmalloc_adjust_bin(size_t bin)
+ {
+ if (bin <= (size_t)SMALLEST_BIN_LOG)
+ {
+ return 0;
+ }
+ bin -= SMALLEST_BIN_LOG + 1;
+ if (bin > (size_t)BIG_BIN) {
+ return BIG_BIN;
+ }
+ return bin;
+ }
+
+ /*
+ * Given a size value, find the correct bin
+ * to place the requested allocation in.
+ */
+ static size_t __attribute__ ((always_inline, pure)) klmalloc_bin_size(size_t size) {
+ size_t bin = sizeof(size) * CHAR_BIT - __builtin_clzl(size);
+ bin += !!(size & (size - 1));
+ return klmalloc_adjust_bin(bin);
+ }
+
+ /*
+ * Bin header - One page of memory.
+ * Appears at the front of a bin to point to the
+ * previous bin (or NULL if the first), the next bin
+ * (or NULL if the last) and the head of the bin, which
+ * is a stack of cells of data.
+ */
+ typedef struct _klmalloc_bin_header {
+ struct _klmalloc_bin_header * next; /* Pointer to the next node. */
+ void * head; /* Head of this bin. */
+ size_t size; /* Size of this bin, if big; otherwise bin index. */
+ } klmalloc_bin_header;
+
+ /*
+ * A big bin header is basically the same as a regular bin header
+ * only with a pointer to the previous (physically) instead of
+ * a "next" and with a list of forward headers.
+ */
+ typedef struct _klmalloc_big_bin_header {
+ struct _klmalloc_big_bin_header * next;
+ void * head;
+ size_t size;
+ struct _klmalloc_big_bin_header * prev;
+ struct _klmalloc_big_bin_header * forward[SKIP_MAX_LEVEL+1];
+ } klmalloc_big_bin_header;
+
+
+ /*
+ * List of pages in a bin.
+ */
+ typedef struct _klmalloc_bin_header_head {
+ klmalloc_bin_header * first;
+ } klmalloc_bin_header_head;
+
+ /*
+ * Array of available bins.
+ */
+ static klmalloc_bin_header_head klmalloc_bin_head[NUM_BINS - 1]; /* Small bins */
+ static struct _klmalloc_big_bins {
+ klmalloc_big_bin_header head;
+ int level;
+ } klmalloc_big_bins;
+ static klmalloc_big_bin_header * klmalloc_newest_big = NULL; /* Newest big bin */
+
+ /* }}} Bin management */
+ /* Doubly-Linked List {{{ */
+
+ /*
+ * Remove an entry from a page list.
+ * Decouples the element from its
+ * position in the list by linking
+ * its neighbors to eachother.
+ */
+ static void __attribute__ ((always_inline)) klmalloc_list_decouple(klmalloc_bin_header_head *head, klmalloc_bin_header *node) {
+ klmalloc_bin_header *next = node->next;
+ head->first = next;
+ node->next = NULL;
+ }
+
+ /*
+ * Insert an entry into a page list.
+ * The new entry is placed at the front
+ * of the list and the existing border
+ * elements are updated to point back
+ * to it (our list is doubly linked).
+ */
+ static void __attribute__ ((always_inline)) klmalloc_list_insert(klmalloc_bin_header_head *head, klmalloc_bin_header *node) {
+ node->next = head->first;
+ head->first = node;
+ }
+
+ /*
+ * Get the head of a page list.
+ * Because redundant function calls
+ * are really great, and just in case
+ * we change the list implementation.
+ */
+ static klmalloc_bin_header * __attribute__ ((always_inline)) klmalloc_list_head(klmalloc_bin_header_head *head) {
+ return head->first;
+ }
+
+ /* }}} Lists */
+ /* Skip List {{{ */
+
+ /*
+ * Skip lists are efficient
+ * data structures for storing
+ * and searching ordered data.
+ *
+ * Here, the skip lists are used
+ * to keep track of big bins.
+ */
+
+ /*
+ * Generate a random value in an appropriate range.
+ * This is a xor-shift RNG.
+ */
+ static uint32_t __attribute__ ((pure)) klmalloc_skip_rand() {
+ static uint32_t x = 123456789;
+ static uint32_t y = 362436069;
+ static uint32_t z = 521288629;
+ static uint32_t w = 88675123;
+
+ uint32_t t;
+
+ t = x ^ (x << 11);
+ x = y; y = z; z = w;
+ return w = w ^ (w >> 19) ^ t ^ (t >> 8);
+ }
+
+ /*
+ * Generate a random level for a skip node
+ */
+ static int __attribute__ ((pure, always_inline)) klmalloc_random_level() {
+ int level = 0;
+ /*
+ * Keep trying to check rand() against 50% of its maximum.
+ * This provides 50%, 25%, 12.5%, etc. chance for each level.
+ */
+ while (klmalloc_skip_rand() < SKIP_P && level < SKIP_MAX_LEVEL) {
+ ++level;
+ }
+ return level;
+ }
+
+ /*
+ * Find best fit for a given value.
+ */
+ static klmalloc_big_bin_header * klmalloc_skip_list_findbest(size_t search_size) {
+ klmalloc_big_bin_header * node = &klmalloc_big_bins.head;
+ /*
+ * Loop through the skip list until we hit something > our search value.
+ */
+ int i;
+ for (i = klmalloc_big_bins.level; i >= 0; --i) {
+ while (node->forward[i] && (node->forward[i]->size < search_size)) {
+ node = node->forward[i];
+ if (node)
+ assert((node->size + sizeof(klmalloc_big_bin_header)) % PAGE_SIZE == 0);
+ }
+ }
+ /*
+ * This value will either be NULL (we found nothing)
+ * or a node (we found a minimum fit).
+ */
+ node = node->forward[0];
+ if (node) {
+ assert((uintptr_t)node % PAGE_SIZE == 0);
+ assert((node->size + sizeof(klmalloc_big_bin_header)) % PAGE_SIZE == 0);
+ }
+ return node;
+ }
+
+ /*
+ * Insert a header into the skip list.
+ */
+ static void klmalloc_skip_list_insert(klmalloc_big_bin_header * value) {
+ /*
+ * You better be giving me something valid to insert,
+ * or I will slit your ****ing throat.
+ */
+ assert(value != NULL);
+ assert(value->head != NULL);
+ assert((uintptr_t)value->head > (uintptr_t)value);
+ if (value->size > NUM_BINS) {
+ assert((uintptr_t)value->head < (uintptr_t)value + value->size);
+ } else {
+ assert((uintptr_t)value->head < (uintptr_t)value + PAGE_SIZE);
+ }
+ assert((uintptr_t)value % PAGE_SIZE == 0);
+ assert((value->size + sizeof(klmalloc_big_bin_header)) % PAGE_SIZE == 0);
+ assert(value->size != 0);
+
+ /*
+ * Starting from the head node of the bin locator...
+ */
+ klmalloc_big_bin_header * node = &klmalloc_big_bins.head;
+ klmalloc_big_bin_header * update[SKIP_MAX_LEVEL + 1];
+
+ /*
+ * Loop through the skiplist to find the right place
+ * to insert the node (where ->forward[] > value)
+ */
+ int i;
+ for (i = klmalloc_big_bins.level; i >= 0; --i) {
+ while (node->forward[i] && node->forward[i]->size < value->size) {
+ node = node->forward[i];
+ if (node)
+ assert((node->size + sizeof(klmalloc_big_bin_header)) % PAGE_SIZE == 0);
+ }
+ update[i] = node;
+ }
+ node = node->forward[0];
+
+ /*
+ * Make the new skip node and update
+ * the forward values.
+ */
+ if (node != value) {
+ int level = klmalloc_random_level();
+ /*
+ * Get all of the nodes before this.
+ */
+ if (level > klmalloc_big_bins.level) {
+ for (i = klmalloc_big_bins.level + 1; i <= level; ++i) {
+ update[i] = &klmalloc_big_bins.head;
+ }
+ klmalloc_big_bins.level = level;
+ }
+
+ /*
+ * Make the new node.
+ */
+ node = value;
+
+ /*
+ * Run through and point the preceeding nodes
+ * for each level to the new node.
+ */
+ for (i = 0; i <= level; ++i) {
+ node->forward[i] = update[i]->forward[i];
+ if (node->forward[i])
+ assert((node->forward[i]->size + sizeof(klmalloc_big_bin_header)) % PAGE_SIZE == 0);
+ update[i]->forward[i] = node;
+ }
+ }
+ }
+
+ /*
+ * Delete a header from the skip list.
+ * Be sure you didn't change the size, or we won't be able to find it.
+ */
+ static void klmalloc_skip_list_delete(klmalloc_big_bin_header * value) {
+ /*
+ * Debug assertions
+ */
+ assert(value != NULL);
+ assert(value->head);
+ assert((uintptr_t)value->head > (uintptr_t)value);
+ if (value->size > NUM_BINS) {
+ assert((uintptr_t)value->head < (uintptr_t)value + value->size);
+ } else {
+ assert((uintptr_t)value->head < (uintptr_t)value + PAGE_SIZE);
+ }
+
+ /*
+ * Starting from the bin header, again...
+ */
+ klmalloc_big_bin_header * node = &klmalloc_big_bins.head;
+ klmalloc_big_bin_header * update[SKIP_MAX_LEVEL + 1];
+
+ /*
+ * Find the node.
+ */
+ int i;
+ for (i = klmalloc_big_bins.level; i >= 0; --i) {
+ while (node->forward[i] && node->forward[i]->size < value->size) {
+ node = node->forward[i];
+ if (node)
+ assert((node->size + sizeof(klmalloc_big_bin_header)) % PAGE_SIZE == 0);
+ }
+ update[i] = node;
+ }
+ node = node->forward[0];
+ while (node != value) {
+ node = node->forward[0];
+ }
+
+ if (node != value) {
+ node = klmalloc_big_bins.head.forward[0];
+ while (node->forward[0] && node->forward[0] != value) {
+ node = node->forward[0];
+ }
+ node = node->forward[0];
+ }
+ /*
+ * If we found the node, delete it;
+ * otherwise, we do nothing.
+ */
+ if (node == value) {
+ for (i = 0; i <= klmalloc_big_bins.level; ++i) {
+ if (update[i]->forward[i] != node) {
+ break;
+ }
+ update[i]->forward[i] = node->forward[i];
+ if (update[i]->forward[i]) {
+ assert((uintptr_t)(update[i]->forward[i]) % PAGE_SIZE == 0);
+ assert((update[i]->forward[i]->size + sizeof(klmalloc_big_bin_header)) % PAGE_SIZE == 0);
+ }
+ }
+
+ while (klmalloc_big_bins.level > 0 && klmalloc_big_bins.head.forward[klmalloc_big_bins.level] == NULL) {
+ --klmalloc_big_bins.level;
+ }
+ }
+ }
+
+ /* }}} */
+ /* Stack {{{ */
+ /*
+ * Pop an item from a block.
+ * Free space is stored as a stack,
+ * so we get a free space for a bin
+ * by popping a free node from the
+ * top of the stack.
+ */
+ static void * klmalloc_stack_pop(klmalloc_bin_header *header) {
+ assert(header);
+ assert(header->head != NULL);
+ assert((uintptr_t)header->head > (uintptr_t)header);
+ if (header->size > NUM_BINS) {
+ assert((uintptr_t)header->head < (uintptr_t)header + header->size);
+ } else {
+ assert((uintptr_t)header->head < (uintptr_t)header + PAGE_SIZE);
+ }
+
+ /*
+ * Remove the current head and point
+ * the head to where the old head pointed.
+ */
+ void *item = header->head;
+ size_t **head = header->head;
+ size_t *next = *head;
+ header->head = next;
+ return item;
+ }
+
+ /*
+ * Push an item into a block.
+ * When we free memory, we need
+ * to add the freed cell back
+ * into the stack of free spaces
+ * for the block.
+ */
+ static void klmalloc_stack_push(klmalloc_bin_header *header, void *ptr) {
+ assert(ptr != NULL);
+ assert((uintptr_t)ptr > (uintptr_t)header);
+ if (header->size > NUM_BINS) {
+ assert((uintptr_t)ptr < (uintptr_t)header + header->size);
+ } else {
+ assert((uintptr_t)ptr < (uintptr_t)header + PAGE_SIZE);
+ }
+ size_t **item = (size_t **)ptr;
+ *item = (size_t *)header->head;
+ header->head = item;
+ }
+
+ /*
+ * Is this cell stack empty?
+ * If the head of the stack points
+ * to NULL, we have exhausted the
+ * stack, so there is no more free
+ * space available in the block.
+ */
+ static int __attribute__ ((always_inline)) klmalloc_stack_empty(klmalloc_bin_header *header) {
+ return header->head == NULL;
+ }
+
+ /* }}} Stack */
+
+ /* malloc() {{{ */
+ static void * __attribute__ ((malloc)) klmalloc(size_t size) {
+ /*
+ * C standard implementation:
+ * If size is zero, we can choose do a number of things.
+ * This implementation will return a NULL pointer.
+ */
+ if (__builtin_expect(size == 0, 0))
+ return NULL;
+
+ /*
+ * Find the appropriate bin for the requested
+ * allocation and start looking through that list.
+ */
+ unsigned int bucket_id = klmalloc_bin_size(size);
+
+ if (bucket_id < BIG_BIN) {
+ /*
+ * Small bins.
+ */
+ klmalloc_bin_header * bin_header = klmalloc_list_head(&klmalloc_bin_head[bucket_id]);
+ if (!bin_header) {
+ /*
+ * Grow the heap for the new bin.
+ */
+ bin_header = (klmalloc_bin_header*)sbrk(PAGE_SIZE);
+ assert(bin_header != NULL);
+ assert((uintptr_t)bin_header % PAGE_SIZE == 0);
+
+ /*
+ * Set the head of the stack.
+ */
+ bin_header->head = (void*)((uintptr_t)bin_header + sizeof(klmalloc_bin_header));
+ /*
+ * Insert the new bin at the front of
+ * the list of bins for this size.
+ */
+ klmalloc_list_insert(&klmalloc_bin_head[bucket_id], bin_header);
+ /*
+ * Initialize the stack inside the bin.
+ * The stack is initially full, with each
+ * entry pointing to the next until the end
+ * which points to NULL.
+ */
+ size_t adj = SMALLEST_BIN_LOG + bucket_id;
+ size_t i, available = ((PAGE_SIZE - sizeof(klmalloc_bin_header)) >> adj) - 1;
+
+ size_t **base = bin_header->head;
+ for (i = 0; i < available; ++i) {
+ /*
+ * Our available memory is made into a stack, with each
+ * piece of memory turned into a pointer to the next
+ * available piece. When we want to get a new piece
+ * of memory from this block, we just pop off a free
+ * spot and give its address.
+ */
+ base[i << bucket_id] = (size_t *)&base[(i + 1) << bucket_id];
+ }
+ base[available << bucket_id] = NULL;
+ bin_header->size = bucket_id;
+ }
+ size_t ** item = klmalloc_stack_pop(bin_header);
+ if (klmalloc_stack_empty(bin_header)) {
+ klmalloc_list_decouple(&(klmalloc_bin_head[bucket_id]),bin_header);
+ }
+ return item;
+ } else {
+ /*
+ * Big bins.
+ */
+ klmalloc_big_bin_header * bin_header = klmalloc_skip_list_findbest(size);
+ if (bin_header) {
+ assert(bin_header->size >= size);
+ /*
+ * If we found one, delete it from the skip list
+ */
+ klmalloc_skip_list_delete(bin_header);
+ /*
+ * Retreive the head of the block.
+ */
+ size_t ** item = klmalloc_stack_pop((klmalloc_bin_header *)bin_header);
+ #if 0
+ /*
+ * Resize block, if necessary
+ */
+ assert(bin_header->head == NULL);
+ size_t old_size = bin_header->size;
+ //size_t rsize = size;
+ /*
+ * Round the requeste size to our full required size.
+ */
+ size = ((size + sizeof(klmalloc_big_bin_header)) / PAGE_SIZE + 1) * PAGE_SIZE - sizeof(klmalloc_big_bin_header);
+ assert((size + sizeof(klmalloc_big_bin_header)) % PAGE_SIZE == 0);
+ if (bin_header->size > size * 2) {
+ assert(old_size != size);
+ /*
+ * If we have extra space, start splitting.
+ */
+ bin_header->size = size;
+ assert(sbrk(0) >= bin_header->size + (uintptr_t)bin_header);
+ /*
+ * Make a new block at the end of the needed space.
+ */
+ klmalloc_big_bin_header * header_new = (klmalloc_big_bin_header *)((uintptr_t)bin_header + sizeof(klmalloc_big_bin_header) + size);
+ assert((uintptr_t)header_new % PAGE_SIZE == 0);
+ memset(header_new, 0, sizeof(klmalloc_big_bin_header) + sizeof(void *));
+ header_new->prev = bin_header;
+ if (bin_header->next) {
+ bin_header->next->prev = header_new;
+ }
+ header_new->next = bin_header->next;
+ bin_header->next = header_new;
+ if (klmalloc_newest_big == bin_header) {
+ klmalloc_newest_big = header_new;
+ }
+ header_new->size = old_size - (size + sizeof(klmalloc_big_bin_header));
+ assert(((uintptr_t)header_new->size + sizeof(klmalloc_big_bin_header)) % PAGE_SIZE == 0);
+ fprintf(stderr, "Splitting %p [now %zx] at %p [%zx] from [%zx,%zx].\n", (void*)bin_header, bin_header->size, (void*)header_new, header_new->size, old_size, size);
+ /*
+ * Free the new block.
+ */
+ klfree((void *)((uintptr_t)header_new + sizeof(klmalloc_big_bin_header)));
+ }
+ #endif
+ return item;
+ } else {
+ /*
+ * Round requested size to a set of pages, plus the header size.
+ */
+ size_t pages = (size + sizeof(klmalloc_big_bin_header)) / PAGE_SIZE + 1;
+ bin_header = (klmalloc_big_bin_header*)sbrk(PAGE_SIZE * pages);
+ assert((uintptr_t)bin_header % PAGE_SIZE == 0);
+ /*
+ * Give the header the remaining space.
+ */
+ bin_header->size = pages * PAGE_SIZE - sizeof(klmalloc_big_bin_header);
+ assert((bin_header->size + sizeof(klmalloc_big_bin_header)) % PAGE_SIZE == 0);
+ /*
+ * Link the block in physical memory.
+ */
+ bin_header->prev = klmalloc_newest_big;
+ if (bin_header->prev) {
+ bin_header->prev->next = bin_header;
+ }
+ klmalloc_newest_big = bin_header;
+ bin_header->next = NULL;
+ /*
+ * Return the head of the block.
+ */
+ bin_header->head = NULL;
+ return (void*)((uintptr_t)bin_header + sizeof(klmalloc_big_bin_header));
+ }
+ }
+ }
+ /* }}} */
+ /* free() {{{ */
+ static void klfree(void *ptr) {
+ /*
+ * C standard implementation: Do nothing when NULL is passed to free.
+ */
+ if (__builtin_expect(ptr == NULL, 0)) {
+ return;
+ }
+
+ /*
+ * Woah, woah, hold on, was this a page-aligned block?
+ */
+ if ((uintptr_t)ptr % PAGE_SIZE == 0) {
+ /*
+ * Well howdy-do, it was.
+ */
+ ptr = (void *)((uintptr_t)ptr - 1);
+ }
+
+ /*
+ * Get our pointer to the head of this block by
+ * page aligning it.
+ */
+ klmalloc_bin_header * header = (klmalloc_bin_header *)((uintptr_t)ptr & (size_t)~PAGE_MASK);
+ assert((uintptr_t)header % PAGE_SIZE == 0);
+
+ /*
+ * For small bins, the bin number is stored in the size
+ * field of the header. For large bins, the actual size
+ * available in the bin is stored in this field. It's
+ * easy to tell which is which, though.
+ */
+ size_t bucket_id = header->size;
+ if (bucket_id > (size_t)NUM_BINS) {
+ bucket_id = BIG_BIN;
+ klmalloc_big_bin_header *bheader = (klmalloc_big_bin_header*)header;
+
+ assert(bheader);
+ assert(bheader->head == NULL);
+ assert((bheader->size + sizeof(klmalloc_big_bin_header)) % PAGE_SIZE == 0);
+ /*
+ * Coalesce forward blocks into us.
+ */
+ #if 0
+ if (bheader != klmalloc_newest_big) {
+ /*
+ * If we are not the newest big bin, there is most definitely
+ * something in front of us that we can read.
+ */
+ assert((bheader->size + sizeof(klmalloc_big_bin_header)) % PAGE_SIZE == 0);
+ klmalloc_big_bin_header * next = (void *)((uintptr_t)bheader + sizeof(klmalloc_big_bin_header) + bheader->size);
+ assert((uintptr_t)next % PAGE_SIZE == 0);
+ if (next == bheader->next && next->head) { //next->size > NUM_BINS && next->head) {
+ /*
+ * If that something is an available big bin, we can
+ * coalesce it into us to form one larger bin.
+ */
+
+ // XXX
+ size_t old_size = bheader->size;
+ // XXX
+
+ klmalloc_skip_list_delete(next);
+ bheader->size = (size_t)bheader->size + (size_t)sizeof(klmalloc_big_bin_header) + next->size;
+ assert((bheader->size + sizeof(klmalloc_big_bin_header)) % PAGE_SIZE == 0);
+
+ if (next == klmalloc_newest_big) {
+ /*
+ * If the guy in front of us was the newest,
+ * we are now the newest (as we are him).
+ */
+ klmalloc_newest_big = bheader;
+ } else {
+ if (next->next) {
+ next->next->prev = bheader;
+ }
+ }
+ fprintf(stderr,"Coelesced (forwards) %p [%zx] <- %p [%zx] = %zx\n", (void*)bheader, old_size, (void*)next, next->size, bheader->size);
+ }
+ }
+ #endif
+ /*
+ * Coalesce backwards
+ */
+ #if 0
+ if (bheader->prev && bheader->prev->head) {
+ /*
+ * If there is something behind us, it is available, and there is nothing between
+ * it and us, we can coalesce ourselves into it to form a big block.
+ */
+ if ((uintptr_t)bheader->prev + (bheader->prev->size + sizeof(klmalloc_big_bin_header)) == (uintptr_t)bheader) {
+
+ size_t old_size = bheader->prev->size;
+
+ klmalloc_skip_list_delete(bheader->prev);
+ bheader->prev->size = (size_t)bheader->prev->size + (size_t)bheader->size + sizeof(klmalloc_big_bin_header);
+ assert((bheader->prev->size + sizeof(klmalloc_big_bin_header)) % PAGE_SIZE == 0);
+ klmalloc_skip_list_insert(bheader->prev);
+ if (klmalloc_newest_big == bheader) {
+ klmalloc_newest_big = bheader->prev;
+ } else {
+ if (bheader->next) {
+ bheader->next->prev = bheader->prev;
+ }
+ }
+ fprintf(stderr,"Coelesced (backwards) %p [%zx] <- %p [%zx] = %zx\n", (void*)bheader->prev, old_size, (void*)bheader, bheader->size, bheader->size);
+ /*
+ * If we coalesced backwards, we are done.
+ */
+ return;
+ }
+ }
+ #endif
+ /*
+ * Push new space back into the stack.
+ */
+ klmalloc_stack_push((klmalloc_bin_header *)bheader, (void *)((uintptr_t)bheader + sizeof(klmalloc_big_bin_header)));
+ assert(bheader->head != NULL);
+ /*
+ * Insert the block into list of available slabs.
+ */
+ klmalloc_skip_list_insert(bheader);
+ } else {
+ /*
+ * If the stack is empty, we are freeing
+ * a block from a previously full bin.
+ * Return it to the busy bins list.
+ */
+ if (klmalloc_stack_empty(header)) {
+ klmalloc_list_insert(&klmalloc_bin_head[bucket_id], header);
+ }
+ /*
+ * Push new space back into the stack.
+ */
+ klmalloc_stack_push(header, ptr);
+ }
+ }
+ /* }}} */
+ /* valloc() {{{ */
+ static void * __attribute__ ((malloc)) klvalloc(size_t size) {
+ /*
+ * Allocate a page-aligned block.
+ * XXX: THIS IS HORRIBLY, HORRIBLY WASTEFUL!! ONLY USE THIS
+ * IF YOU KNOW WHAT YOU ARE DOING!
+ */
+ size_t true_size = size + PAGE_SIZE - sizeof(klmalloc_big_bin_header); /* Here we go... */
+ void * result = klmalloc(true_size);
+ return (void *)((uintptr_t)result + (PAGE_SIZE - sizeof(klmalloc_big_bin_header)));
+ }
+ /* }}} */
+ /* realloc() {{{ */
+ static void * __attribute__ ((malloc)) klrealloc(void *ptr, size_t size) {
+ /*
+ * C standard implementation: When NULL is passed to realloc,
+ * simply malloc the requested size and return a pointer to that.
+ */
+ if (__builtin_expect(ptr == NULL, 0))
+ return malloc(size);
+
+ /*
+ * C standard implementation: For a size of zero, free the
+ * pointer and return NULL, allocating no new memory.
+ */
+ if (__builtin_expect(size == 0, 0))
+ {
+ free(ptr);
+ return NULL;
+ }
+
+ /*
+ * Find the bin for the given pointer
+ * by aligning it to a page.
+ */
+ klmalloc_bin_header * header_old = (void *)((uintptr_t)ptr & (size_t)~PAGE_MASK);
+
+
+ /*
+ * (This will only happen for a big bin, mathematically speaking)
+ * If we still have room in our bin for the additonal space,
+ * we don't need to do anything.
+ */
+ if (header_old->size >= size) {
+
+ /*
+ * TODO: Break apart blocks here, which is far more important
+ * than breaking them up on allocations.
+ */
+ return ptr;
+ }
+
+ /*
+ * Reallocate more memory.
+ */
+ void * newptr = klmalloc(size);
+ if (__builtin_expect(newptr != NULL, 1)) {
+ size_t old_size = header_old->size;
+ if (old_size < (size_t)BIG_BIN) {
+ /*
+ * If we are copying from a small bin,
+ * we need to get the size of the bin
+ * from its id.
+ */
+ old_size = (1UL << (SMALLEST_BIN_LOG + old_size));
+ }
+
+ /*
+ * Copy the old value into the new value.
+ * Be sure to only copy as much as was in
+ * the old block.
+ */
+ memcpy(newptr, ptr, old_size);
+ klfree(ptr);
+ return newptr;
+ }
+
+ /*
+ * We failed to allocate more memory,
+ * which means we're probably out.
+ *
+ * Bail and return NULL.
+ */
+ return NULL;
+ }
+ /* }}} */
+ /* calloc() {{{ */
+ static void * __attribute__ ((malloc)) klcalloc(size_t nmemb, size_t size) {
+ /*
+ * Allocate memory and zero it before returning
+ * a pointer to the newly allocated memory.
+ *
+ * Implemented by way of a simple malloc followed
+ * by a memset to 0x00 across the length of the
+ * requested memory chunk.
+ */
+
+ fprintf(stderr,"Oh, what a rare oddity this is. Someone has called calloc.\n");
+
+ void *ptr = klmalloc(nmemb * size);
+ if (__builtin_expect(ptr != NULL, 1))
+ memset(ptr,0x00,nmemb * size);
+ return ptr;
+ }
+ /* }}} */
+
+
+ /*
+ * vim:noexpandtab
+ * vim:tabstop=4
+ * vim:shiftwidth=4
+ */
diff -rupN _source/newlib-1.19.0/newlib/libc/stdlib/mallocr.c newlib-1.19.0/newlib/libc/stdlib/mallocr.c
--- _source/newlib-1.19.0/newlib/libc/stdlib/mallocr.c 2010-05-31 14:15:41.000000000 -0500
+++ newlib-1.19.0/newlib/libc/stdlib/mallocr.c 2011-04-30 21:28:46.000000000 -0500