Skip to content
Browse files

[sgen] Remove the copying major collector.

This probably isn't even working anymore, and it's not useful anymore.
  • Loading branch information...
1 parent b69f908 commit db374b70a8b866d60e087b0d9f1c1378709877fa @schani schani committed Jan 16, 2013
View
5 man/mono.1
@@ -1047,9 +1047,8 @@ program but will obviously use more memory. The default nursery size
Specifies which major collector to use. Options are `marksweep' for
the Mark&Sweep collector, `marksweep-conc' for concurrent Mark&Sweep,
`marksweep-par' for parallel Mark&Sweep, `marksweep-fixed' for
-Mark&Sweep with a fixed heap, `marksweep-fixed-par' for parallel
-Mark&Sweep with a fixed heap and `copying' for the copying
-collector. The Mark&Sweep collector is the default.
+Mark&Sweep with a fixed heap, and `marksweep-fixed-par' for parallel
+Mark&Sweep with a fixed heap. The Mark&Sweep collector is the default.
.TP
\fBmajor-heap-size=\fIsize\fR
Sets the size of the major heap (not including the large object space)
View
2 mono/metadata/Makefile.am.in
@@ -218,13 +218,11 @@ sgen_sources = \
sgen-os-win32.c \
sgen-gc.c \
sgen-internal.c \
- sgen-pinned-allocator.c \
sgen-marksweep.c \
sgen-marksweep-fixed.c \
sgen-marksweep-par.c \
sgen-marksweep-fixed-par.c \
sgen-marksweep-conc.c \
- sgen-major-copying.c \
sgen-los.c \
sgen-protocol.c \
sgen-bridge.c \
View
6 mono/metadata/sgen-gc.c
@@ -1678,8 +1678,6 @@ alloc_nursery (void)
scan_starts = (alloc_size + SCAN_START_SIZE - 1) / SCAN_START_SIZE;
section->scan_starts = sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS, TRUE);
section->num_scan_start = scan_starts;
- section->block.role = MEMORY_ROLE_GEN0;
- section->block.next = NULL;
nursery_section = section;
@@ -4813,8 +4811,6 @@ mono_gc_base_init (void)
exit (1);
}
sgen_marksweep_conc_init (&major_collector);
- } else if (!strcmp (major_collector_opt, "copying")) {
- sgen_copying_init (&major_collector);
} else {
fprintf (stderr, "Unknown major collector `%s'.\n", major_collector_opt);
exit (1);
@@ -4997,7 +4993,7 @@ mono_gc_base_init (void)
fprintf (stderr, " max-heap-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
fprintf (stderr, " soft-heap-limit=n (where N is an integer, possibly with a k, m or a g suffix)\n");
fprintf (stderr, " nursery-size=N (where N is an integer, possibly with a k, m or a g suffix)\n");
- fprintf (stderr, " major=COLLECTOR (where COLLECTOR is `marksweep', `marksweep-par', 'marksweep-fixed', 'marksweep-fixed-par' or `copying')\n");
+ fprintf (stderr, " major=COLLECTOR (where COLLECTOR is `marksweep', `marksweep-par', 'marksweep-fixed' or 'marksweep-fixed-par')\n");
fprintf (stderr, " minor=COLLECTOR (where COLLECTOR is `simple' or `split')\n");
fprintf (stderr, " wbarrier=WBARRIER (where WBARRIER is `remset' or `cardtable')\n");
fprintf (stderr, " stack-mark=MARK-METHOD (where MARK-METHOD is 'precise' or 'conservative')\n");
View
40 mono/metadata/sgen-gc.h
@@ -146,25 +146,11 @@ struct _SgenThreadInfo {
#endif
};
-enum {
- MEMORY_ROLE_GEN0,
- MEMORY_ROLE_GEN1,
- MEMORY_ROLE_PINNED
-};
-
-typedef struct _SgenBlock SgenBlock;
-struct _SgenBlock {
- void *next;
- unsigned char role;
-};
-
/*
- * The nursery section and the major copying collector's sections use
- * this struct.
+ * The nursery section uses this struct.
*/
typedef struct _GCMemSection GCMemSection;
struct _GCMemSection {
- SgenBlock block;
char *data;
mword size;
/* pointer where more data could be allocated if it fits */
@@ -179,13 +165,8 @@ struct _GCMemSection {
void **pin_queue_start;
int pin_queue_num_entries;
unsigned short num_scan_start;
- gboolean is_to_space;
};
-#define SGEN_PINNED_CHUNK_FOR_PTR(o) ((SgenBlock*)(((mword)(o)) & ~(SGEN_PINNED_CHUNK_SIZE - 1)))
-
-typedef struct _SgenPinnedChunk SgenPinnedChunk;
-
/*
* Recursion is not allowed for the thread lock.
*/
@@ -479,22 +460,13 @@ enum {
INTERNAL_MEM_MAX
};
-#define SGEN_PINNED_FREELIST_NUM_SLOTS 30
-
-typedef struct {
- SgenPinnedChunk *chunk_list;
- SgenPinnedChunk *free_lists [SGEN_PINNED_FREELIST_NUM_SLOTS];
- void *delayed_free_lists [SGEN_PINNED_FREELIST_NUM_SLOTS];
-} SgenPinnedAllocator;
-
enum {
GENERATION_NURSERY,
GENERATION_OLD,
GENERATION_MAX
};
void sgen_init_internal_allocator (void) MONO_INTERNAL;
-void sgen_init_pinned_allocator (void) MONO_INTERNAL;
typedef struct _ObjectList ObjectList;
struct _ObjectList {
@@ -514,7 +486,6 @@ typedef struct
} ScanCopyContext;
void sgen_report_internal_mem_usage (void) MONO_INTERNAL;
-void sgen_report_pinned_mem_usage (SgenPinnedAllocator *alc) MONO_INTERNAL;
void sgen_dump_internal_mem_usage (FILE *heap_dump_file) MONO_INTERNAL;
void sgen_dump_section (GCMemSection *section, const char *type) MONO_INTERNAL;
void sgen_dump_occupied (char *start, char *end, char *section_start) MONO_INTERNAL;
@@ -529,16 +500,8 @@ void sgen_free_internal (void *addr, int type) MONO_INTERNAL;
void* sgen_alloc_internal_dynamic (size_t size, int type, gboolean assert_on_failure) MONO_INTERNAL;
void sgen_free_internal_dynamic (void *addr, size_t size, int type) MONO_INTERNAL;
-void* sgen_alloc_pinned (SgenPinnedAllocator *allocator, size_t size) MONO_INTERNAL;
-void sgen_free_pinned (SgenPinnedAllocator *allocator, void *addr, size_t size) MONO_INTERNAL;
-
gboolean sgen_parse_environment_string_extract_number (const char *str, glong *out) MONO_INTERNAL;
-void sgen_pinned_scan_objects (SgenPinnedAllocator *alc, IterateObjectCallbackFunc callback, void *callback_data) MONO_INTERNAL;
-void sgen_pinned_scan_pinned_objects (SgenPinnedAllocator *alc, IterateObjectCallbackFunc callback, void *callback_data) MONO_INTERNAL;
-
-void sgen_pinned_update_heap_boundaries (SgenPinnedAllocator *alc) MONO_INTERNAL;
-
void** sgen_find_optimized_pin_queue_area (void *start, void *end, int *num) MONO_INTERNAL;
void sgen_find_section_pin_queue_start_end (GCMemSection *section) MONO_INTERNAL;
void sgen_pin_objects_in_section (GCMemSection *section, ScanCopyContext ctx) MONO_INTERNAL;
@@ -739,7 +702,6 @@ void sgen_marksweep_fixed_init (SgenMajorCollector *collector) MONO_INTERNAL;
void sgen_marksweep_par_init (SgenMajorCollector *collector) MONO_INTERNAL;
void sgen_marksweep_fixed_par_init (SgenMajorCollector *collector) MONO_INTERNAL;
void sgen_marksweep_conc_init (SgenMajorCollector *collector) MONO_INTERNAL;
-void sgen_copying_init (SgenMajorCollector *collector) MONO_INTERNAL;
SgenMajorCollector* sgen_get_major_collector (void) MONO_INTERNAL;
View
716 mono/metadata/sgen-major-copying.c
@@ -1,716 +0,0 @@
-/*
- * sgen-major-copying.c: The copying major collector.
- *
- * Author:
- * Paolo Molaro (lupus@ximian.com)
- *
- * Copyright 2005-2010 Novell, Inc (http://www.novell.com)
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright (C) 2012 Xamarin Inc
- *
- * Thread start/stop adapted from Boehm's GC:
- * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
- * Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved.
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "config.h"
-
-#ifdef HAVE_SGEN_GC
-
-#include "utils/mono-counters.h"
-
-#include "metadata/gc-internal.h"
-#include "metadata/sgen-gc.h"
-#include "metadata/sgen-protocol.h"
-#include "metadata/mono-gc.h"
-#include "metadata/object-internals.h"
-#include "metadata/profiler-private.h"
-#include "metadata/sgen-memory-governor.h"
-
-#ifndef DISABLE_SGEN_MAJOR_COPYING
-
-#define MAJOR_SECTION_SIZE SGEN_PINNED_CHUNK_SIZE
-#define BLOCK_FOR_OBJECT(o) SGEN_PINNED_CHUNK_FOR_PTR ((o))
-#define MAJOR_SECTION_FOR_OBJECT(o) ((GCMemSection*)BLOCK_FOR_OBJECT ((o)))
-
-#define MAJOR_OBJ_IS_IN_TO_SPACE(o) (MAJOR_SECTION_FOR_OBJECT ((o))->is_to_space)
-
-static int num_major_sections = 0;
-
-static GCMemSection *section_list = NULL;
-
-static SgenPinnedAllocator pinned_allocator;
-
-static gboolean have_swept;
-
-/*
- * used when moving the objects
- */
-static char *to_space_bumper = NULL;
-static char *to_space_top = NULL;
-static GCMemSection *to_space_section = NULL;
-
-/* we get this at init */
-static int nursery_bits;
-static char *nursery_start;
-static char *nursery_end;
-
-#define ptr_in_nursery(p) (SGEN_PTR_IN_NURSERY ((p), nursery_bits, nursery_start, nursery_end))
-
-#ifdef HEAVY_STATISTICS
-static long stat_major_copy_object_failed_forwarded = 0;
-static long stat_major_copy_object_failed_pinned = 0;
-static long stat_major_copy_object_failed_large_pinned = 0;
-static long stat_major_copy_object_failed_to_space = 0;
-#endif
-
-static void*
-major_alloc_heap (mword nursery_size, mword nursery_align, int the_nursery_bits)
-{
- if (nursery_align)
- nursery_start = sgen_alloc_os_memory_aligned (nursery_size, nursery_align, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "nursery");
- else
- nursery_start = sgen_alloc_os_memory (nursery_size, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "nursery");
-
- nursery_end = nursery_start + nursery_size;
- nursery_bits = the_nursery_bits;
-
- return nursery_start;
-}
-
-static gboolean
-obj_is_from_pinned_alloc (char *p)
-{
- return BLOCK_FOR_OBJECT (p)->role == MEMORY_ROLE_PINNED;
-}
-
-static void
-free_pinned_object (char *obj, size_t size)
-{
- sgen_free_pinned (&pinned_allocator, obj, size);
-}
-
-/*
- * Allocate a new section of memory to be used as old generation.
- */
-static GCMemSection*
-alloc_major_section (void)
-{
- GCMemSection *section;
- int scan_starts;
-
- section = sgen_alloc_os_memory_aligned (MAJOR_SECTION_SIZE, MAJOR_SECTION_SIZE, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "major heap section");
- section->next_data = section->data = (char*)section + SGEN_SIZEOF_GC_MEM_SECTION;
- g_assert (!((mword)section->data & 7));
- section->size = MAJOR_SECTION_SIZE - SGEN_SIZEOF_GC_MEM_SECTION;
- section->end_data = section->data + section->size;
- sgen_update_heap_boundaries ((mword)section->data, (mword)section->end_data);
- SGEN_LOG (3, "New major heap section: (%p-%p), total: %lld", section->data, section->end_data, (long long int)mono_gc_get_heap_size ());
- scan_starts = (section->size + SGEN_SCAN_START_SIZE - 1) / SGEN_SCAN_START_SIZE;
- section->scan_starts = sgen_alloc_internal_dynamic (sizeof (char*) * scan_starts, INTERNAL_MEM_SCAN_STARTS, TRUE);
- section->num_scan_start = scan_starts;
- section->block.role = MEMORY_ROLE_GEN1;
- section->is_to_space = TRUE;
-
- /* add to the section list */
- section->block.next = section_list;
- section_list = section;
-
- ++num_major_sections;
-
- return section;
-}
-
-static void
-free_major_section (GCMemSection *section)
-{
- SGEN_LOG (3, "Freed major section %p (%p-%p)", section, section->data, section->end_data);
- sgen_free_internal_dynamic (section->scan_starts,
- (section->size + SGEN_SCAN_START_SIZE - 1) / SGEN_SCAN_START_SIZE * sizeof (char*), INTERNAL_MEM_SCAN_STARTS);
- sgen_free_os_memory (section, MAJOR_SECTION_SIZE, SGEN_ALLOC_HEAP);
-
- --num_major_sections;
-}
-
-static void
-new_to_space_section (void)
-{
- /* FIXME: if the current to_space_section is empty, we don't
- have to allocate a new one */
-
- to_space_section = alloc_major_section ();
- to_space_bumper = to_space_section->next_data;
- to_space_top = to_space_section->end_data;
-}
-
-static void
-to_space_set_next_data (void)
-{
- g_assert (to_space_bumper >= to_space_section->next_data && to_space_bumper <= to_space_section->end_data);
- to_space_section->next_data = to_space_bumper;
-}
-
-static void
-to_space_expand (void)
-{
- if (to_space_section) {
- g_assert (to_space_top == to_space_section->end_data);
- to_space_set_next_data ();
- }
-
- new_to_space_section ();
-}
-
-static void*
-major_alloc_object (MonoVTable *vtable, int size, gboolean has_references)
-{
- char *dest = to_space_bumper;
- /* Make sure we have enough space available */
- if (dest + size > to_space_top) {
- to_space_expand ();
- (dest) = to_space_bumper;
- SGEN_ASSERT (8, dest + size <= to_space_top, "space allocation overflow dest %p size %d to-space-top %p", dest, size, to_space_top);
- }
- to_space_bumper += size;
- SGEN_ASSERT (8, to_space_bumper <= to_space_top, "to-space-bumper %p overflow to-space-top %p", to_space_bumper, to_space_top);
- to_space_section->scan_starts [(dest - (char*)to_space_section->data)/SGEN_SCAN_START_SIZE] = dest;
- /* FIXME: write vtable */
- g_assert_not_reached ();
- return dest;
-}
-
-static void
-unset_to_space (void)
-{
- /* between collections the to_space_bumper is invalidated
- because degraded allocations might occur, so we set it to
- NULL, just to make it explicit */
- to_space_bumper = NULL;
-
- /* don't unset to_space_section if we implement the FIXME in
- new_to_space_section */
- to_space_section = NULL;
-}
-
-static gboolean
-major_is_object_live (char *obj)
-{
- mword objsize;
-
- /* nursery */
- if (ptr_in_nursery (obj))
- return FALSE;
-
- objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
-
- /* LOS */
- if (objsize > SGEN_MAX_SMALL_OBJ_SIZE)
- return FALSE;
-
- /* pinned chunk */
- if (obj_is_from_pinned_alloc (obj))
- return FALSE;
-
- /* now we know it's in a major heap section */
- return MAJOR_SECTION_FOR_OBJECT (obj)->is_to_space;
-}
-
-/* size is a multiple of ALLOC_ALIGN */
-static void*
-major_alloc_small_pinned_obj (MonoVTable *vtable, size_t size, gboolean has_references)
-{
- /* FIXME: write vtable */
- g_assert_not_reached ();
- return sgen_alloc_pinned (&pinned_allocator, size);
-}
-
-/*
- * size is already rounded up and we hold the GC lock.
- */
-static void*
-major_alloc_degraded (MonoVTable *vtable, size_t size)
-{
- GCMemSection *section;
- void **p = NULL;
- g_assert (size <= SGEN_MAX_SMALL_OBJ_SIZE);
- HEAVY_STAT (++stat_objects_alloced_degraded);
- HEAVY_STAT (stat_bytes_alloced_degraded += size);
- for (section = section_list; section; section = section->block.next) {
- if ((section->end_data - section->next_data) >= size) {
- p = (void**)section->next_data;
- break;
- }
- }
- if (!p) {
- section = alloc_major_section ();
- section->is_to_space = FALSE;
- /* FIXME: handle OOM */
- p = (void**)section->next_data;
- sgen_register_major_sections_alloced (1);
- }
- section->next_data += size;
- SGEN_LOG (3, "Allocated (degraded) object %p, vtable: %p (%s), size: %zd in section %p", p, vtable, vtable->klass->name, size, section);
- *p = vtable;
- return p;
-}
-
-static inline void
-pin_major_object (char *obj, SgenGrayQueue *queue)
-{
- sgen_pin_object (obj, queue);
-}
-
-#include "sgen-major-copy-object.h"
-
-static void
-major_copy_or_mark_object (void **obj_slot, void *obj_void, SgenGrayQueue *queue)
-{
- char *forwarded;
- char *obj = obj_void;
- mword objsize;
-
- SGEN_ASSERT (9, current_collection_generation == GENERATION_OLD, "old gen parallel allocator called from a %d collection", current_collection_generation);
-
- HEAVY_STAT (++stat_copy_object_called_major);
-
- SGEN_LOG (9, "Precise copy of %p from %p", obj, obj_slot);
-
- /*
- * obj must belong to one of:
- *
- * 1. the nursery
- * 2. the LOS
- * 3. a pinned chunk
- * 4. a non-to-space section of the major heap
- * 5. a to-space section of the major heap
- *
- * In addition, objects in 1, 2 and 4 might also be pinned.
- * Objects in 1 and 4 might be forwarded.
- *
- * Before we can copy the object we must make sure that we are
- * allowed to, i.e. that the object not pinned, not already
- * forwarded, not in the nursery To Space and doesn't belong
- * to the LOS, a pinned chunk, or a to-space section.
- *
- * We are usually called for to-space objects (5) when we have
- * two remset entries for the same reference. The first entry
- * copies the object and updates the reference and the second
- * calls us with the updated reference that points into
- * to-space. There might also be other circumstances where we
- * get to-space objects.
- */
-
- if ((forwarded = SGEN_OBJECT_IS_FORWARDED (obj))) {
- SGEN_ASSERT (9, (*(MonoVTable**)SGEN_LOAD_VTABLE (obj))->gc_descr, "forwarded object %p has no gc descriptor", forwarded);
- SGEN_LOG (9, " (already forwarded to %p)", forwarded);
- HEAVY_STAT (++stat_major_copy_object_failed_forwarded);
- *obj_slot = forwarded;
- return;
- }
- if (SGEN_OBJECT_IS_PINNED (obj)) {
- SGEN_ASSERT (9, ((MonoVTable*)SGEN_LOAD_VTABLE(obj))->gc_descr, "pinned object %p has no gc descriptor", obj);
- SGEN_LOG (9, " (pinned, no change)");
- HEAVY_STAT (++stat_major_copy_object_failed_pinned);
- return;
- }
-
- if (ptr_in_nursery (obj)) {
- /* A To Space object is already on its final destination for the current collection. */
- if (sgen_nursery_is_to_space (obj)) {
- SGEN_ASSERT (9, ((MonoVTable*)SGEN_LOAD_VTABLE(obj))->gc_descr, "to space object %p has no gc descriptor", obj);
- SGEN_LOG (9, " (tospace, no change)");
- return;
- }
- goto copy;
- }
-
- /*
- * At this point we know obj is not pinned, not forwarded and
- * belongs to 2, 3, 4, or 5.
- *
- * LOS object (2) are simple, at least until we always follow
- * the rule: if objsize > SGEN_MAX_SMALL_OBJ_SIZE, pin the
- * object and return it. At the end of major collections, we
- * walk the los list and if the object is pinned, it is
- * marked, otherwise it can be freed.
- *
- * Pinned chunks (3) and major heap sections (4, 5) both
- * reside in blocks, which are always aligned, so once we've
- * eliminated LOS objects, we can just access the block and
- * see whether it's a pinned chunk or a major heap section.
- */
-
- objsize = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)obj));
-
- if (G_UNLIKELY (objsize > SGEN_MAX_SMALL_OBJ_SIZE || obj_is_from_pinned_alloc (obj))) {
- if (SGEN_OBJECT_IS_PINNED (obj))
- return;
- SGEN_LOG (9, " (marked LOS/Pinned %p (%s), size: %td)", obj, sgen_safe_name (obj), objsize);
- binary_protocol_pin (obj, (gpointer)SGEN_LOAD_VTABLE (obj), sgen_safe_object_get_size ((MonoObject*)obj));
- SGEN_PIN_OBJECT (obj);
- GRAY_OBJECT_ENQUEUE (queue, obj);
- HEAVY_STAT (++stat_major_copy_object_failed_large_pinned);
- return;
- }
-
- /*
- * Now we know the object is in a major heap section. All we
- * need to do is check whether it's already in to-space (5) or
- * not (4).
- */
- if (MAJOR_OBJ_IS_IN_TO_SPACE (obj)) {
- SGEN_ASSERT (9, objsize <= SGEN_MAX_SMALL_OBJ_SIZE, "object %p in to space is too big, size %d", objsize);
- SGEN_LOG (9, " (already copied)");
- HEAVY_STAT (++stat_major_copy_object_failed_to_space);
- return;
- }
-
- copy:
- HEAVY_STAT (++stat_objects_copied_major);
-
- *obj_slot = copy_object_no_checks (obj, queue);
-}
-
-static void
-major_copy_or_mark_object_canonical (void **ptr, SgenGrayQueue *queue)
-{
- major_copy_or_mark_object (ptr, *ptr, queue);
-}
-
-#include "sgen-major-scan-object.h"
-
-/* FIXME: later reduce code duplication here with build_nursery_fragments().
- * We don't keep track of section fragments for non-nursery sections yet, so
- * just memset to 0.
- */
-static void
-build_section_fragments (GCMemSection *section)
-{
- int i;
- char *frag_start, *frag_end;
- size_t frag_size;
-
- /* clear scan starts */
- memset (section->scan_starts, 0, section->num_scan_start * sizeof (gpointer));
- frag_start = section->data;
- section->next_data = section->data;
- for (i = 0; i < section->pin_queue_num_entries; ++i) {
- frag_end = section->pin_queue_start [i];
- /* remove the pin bit from pinned objects */
- SGEN_UNPIN_OBJECT (frag_end);
- if (frag_end >= section->data + section->size) {
- frag_end = section->data + section->size;
- } else {
- section->scan_starts [((char*)frag_end - (char*)section->data)/SGEN_SCAN_START_SIZE] = frag_end;
- }
- frag_size = frag_end - frag_start;
- if (frag_size) {
- binary_protocol_empty (frag_start, frag_size);
- memset (frag_start, 0, frag_size);
- }
- frag_size = SGEN_ALIGN_UP (sgen_safe_object_get_size ((MonoObject*)section->pin_queue_start [i]));
- frag_start = (char*)section->pin_queue_start [i] + frag_size;
- section->next_data = MAX (section->next_data, frag_start);
- }
- frag_end = section->end_data;
- frag_size = frag_end - frag_start;
- if (frag_size) {
- binary_protocol_empty (frag_start, frag_size);
- memset (frag_start, 0, frag_size);
- }
-}
-
-static void
-sweep_pinned_objects_callback (char *ptr, size_t size, void *data)
-{
- if (SGEN_OBJECT_IS_PINNED (ptr)) {
- SGEN_UNPIN_OBJECT (ptr);
- SGEN_LOG (6, "Unmarked pinned object %p (%s)", ptr, sgen_safe_name (ptr));
- } else {
- SGEN_LOG (6, "Freeing unmarked pinned object %p (%s)", ptr, sgen_safe_name (ptr));
- free_pinned_object (ptr, size);
- }
-}
-
-static void
-sweep_pinned_objects (void)
-{
- sgen_pinned_scan_objects (&pinned_allocator, sweep_pinned_objects_callback, NULL);
-}
-
-static void
-major_iterate_objects (gboolean non_pinned, gboolean pinned, IterateObjectCallbackFunc callback, void *data)
-{
- if (non_pinned) {
- GCMemSection *section;
- for (section = section_list; section; section = section->block.next)
- sgen_scan_area_with_callback (section->data, section->end_data, callback, data, FALSE);
- }
- if (pinned)
- sgen_pinned_scan_objects (&pinned_allocator, callback, data);
-}
-
-static void
-major_free_non_pinned_object (char *obj, size_t size)
-{
- memset (obj, 0, size);
-}
-
-static void
-pin_pinned_object_callback (void *addr, size_t slot_size, SgenGrayQueue *queue)
-{
- binary_protocol_pin (addr, (gpointer)SGEN_LOAD_VTABLE (addr), sgen_safe_object_get_size ((MonoObject*)addr));
- if (!SGEN_OBJECT_IS_PINNED (addr))
- sgen_pin_stats_register_object ((char*) addr, sgen_safe_object_get_size ((MonoObject*) addr));
- SGEN_PIN_OBJECT (addr);
- GRAY_OBJECT_ENQUEUE (queue, addr);
- SGEN_LOG (6, "Marked pinned object %p (%s) from roots", addr, sgen_safe_name (addr));
-}
-
-static void
-major_find_pin_queue_start_ends (SgenGrayQueue *queue)
-{
- GCMemSection *section;
-
- for (section = section_list; section; section = section->block.next)
- sgen_find_section_pin_queue_start_end (section);
- sgen_pinned_scan_pinned_objects (&pinned_allocator, (IterateObjectCallbackFunc)pin_pinned_object_callback, queue);
-}
-
-static void
-major_pin_objects (SgenGrayQueue *queue)
-{
- GCMemSection *section;
- ScanCopyContext ctx = { NULL, NULL, queue };
-
- for (section = section_list; section; section = section->block.next)
- sgen_pin_objects_in_section (section, ctx);
-}
-
-static void
-major_init_to_space (void)
-{
- new_to_space_section ();
-}
-
-static void
-major_sweep (void)
-{
- GCMemSection *section, *prev_section;
-
- to_space_set_next_data ();
- unset_to_space ();
-
- /* unpin objects from the pinned chunks and free the unmarked ones */
- sweep_pinned_objects ();
-
- sgen_pinned_update_heap_boundaries (&pinned_allocator);
-
- /* free the unused sections */
- prev_section = NULL;
- for (section = section_list; section;) {
- GCMemSection *this_section = section;
-
- /* to_space doesn't need handling here */
- if (section->is_to_space) {
- section->is_to_space = FALSE;
- prev_section = section;
- section = section->block.next;
- goto update;
- }
- /* no pinning object, so the section is free */
- if (!section->pin_queue_num_entries) {
- GCMemSection *to_free;
- g_assert (!section->pin_queue_start);
- if (prev_section)
- prev_section->block.next = section->block.next;
- else
- section_list = section->block.next;
- to_free = section;
- section = section->block.next;
- free_major_section (to_free);
- continue;
- } else {
- SGEN_LOG (6, "Section %p has still pinned objects (%d)", section, section->pin_queue_num_entries);
- build_section_fragments (section);
- }
- prev_section = section;
- section = section->block.next;
-
- update:
- sgen_update_heap_boundaries ((mword)this_section->data, (mword)this_section->data + this_section->size);
- }
-
- have_swept = TRUE;
-}
-
-static void
-major_check_scan_starts (void)
-{
- GCMemSection *section;
- for (section = section_list; section; section = section->block.next)
- sgen_check_section_scan_starts (section);
-}
-
-static void
-major_dump_heap (FILE *heap_dump_file)
-{
- GCMemSection *section;
- for (section = section_list; section; section = section->block.next)
- sgen_dump_section (section, "old");
- /* FIXME: dump pinned sections, too */
-}
-
-static gint64
-major_get_used_size (void)
-{
- gint64 tot = 0;
- GCMemSection *section;
- for (section = section_list; section; section = section->block.next) {
- /* this is approximate... */
- tot += section->next_data - section->data;
- }
- return tot;
-}
-
-/* only valid during minor collections */
-static int old_num_major_sections;
-
-static void
-major_start_nursery_collection (void)
-{
- old_num_major_sections = num_major_sections;
-
- if (!to_space_section) {
- new_to_space_section ();
- } else {
- /* we might have done degraded allocation since the
- last collection */
- g_assert (to_space_bumper <= to_space_section->next_data);
- to_space_bumper = to_space_section->next_data;
-
- to_space_section->is_to_space = TRUE;
- }
-}
-
-static void
-major_finish_nursery_collection (void)
-{
- GCMemSection *section;
- int sections_alloced;
-
- to_space_set_next_data ();
-
- for (section = section_list; section; section = section->block.next)
- section->is_to_space = FALSE;
-
- sections_alloced = num_major_sections - old_num_major_sections;
- sgen_register_major_sections_alloced (sections_alloced);
-}
-
-static void
-major_finish_major_collection (void)
-{
-}
-
-static gboolean
-major_ptr_is_in_non_pinned_space (char *ptr, char **start)
-{
- GCMemSection *section;
-
- // FIXME:
- *start = NULL;
- for (section = section_list; section;) {
- if (ptr >= section->data && ptr < section->data + section->size)
- return TRUE;
- section = section->block.next;
- }
- return FALSE;
-}
-
-static void
-major_report_pinned_memory_usage (void)
-{
- sgen_report_pinned_mem_usage (&pinned_allocator);
-}
-
-static int
-get_num_major_sections (void)
-{
- return num_major_sections;
-}
-
-void
-sgen_copying_init (SgenMajorCollector *collector)
-{
-#ifdef HEAVY_STATISTICS
- mono_counters_register ("# major copy_object() failed forwarded", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_copy_object_failed_forwarded);
- mono_counters_register ("# major copy_object() failed pinned", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_copy_object_failed_pinned);
- mono_counters_register ("# major copy_object() failed large or pinned chunk", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_copy_object_failed_large_pinned);
- mono_counters_register ("# major copy_object() failed to space", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_major_copy_object_failed_to_space);
-#endif
-
- collector->section_size = MAJOR_SECTION_SIZE;
- collector->supports_cardtable = FALSE;
- collector->is_parallel = FALSE;
-
- collector->have_swept = &have_swept;
-
- collector->alloc_heap = major_alloc_heap;
- collector->is_object_live = major_is_object_live;
- collector->alloc_small_pinned_obj = major_alloc_small_pinned_obj;
- collector->alloc_degraded = major_alloc_degraded;
- collector->alloc_object = major_alloc_object;
- collector->free_pinned_object = free_pinned_object;
- collector->iterate_objects = major_iterate_objects;
- collector->free_non_pinned_object = major_free_non_pinned_object;
- collector->find_pin_queue_start_ends = major_find_pin_queue_start_ends;
- collector->pin_objects = major_pin_objects;
- collector->pin_major_object = pin_major_object;
- collector->init_to_space = major_init_to_space;
- collector->sweep = major_sweep;
- collector->check_scan_starts = major_check_scan_starts;
- collector->dump_heap = major_dump_heap;
- collector->get_used_size = major_get_used_size;
- collector->start_nursery_collection = major_start_nursery_collection;
- collector->finish_nursery_collection = major_finish_nursery_collection;
- collector->finish_major_collection = major_finish_major_collection;
- collector->ptr_is_in_non_pinned_space = major_ptr_is_in_non_pinned_space;
- collector->obj_is_from_pinned_alloc = obj_is_from_pinned_alloc;
- collector->report_pinned_memory_usage = major_report_pinned_memory_usage;
- collector->get_num_major_sections = get_num_major_sections;
- collector->handle_gc_param = NULL;
- collector->print_gc_param_usage = NULL;
-
- collector->major_ops.copy_or_mark_object = major_copy_or_mark_object_canonical;
- collector->major_ops.scan_object = major_scan_object;
-}
-
-#else /* DISABLE_SGEN_MAJOR_COPYING */
-
-void
-sgen_copying_init (SgenMajorCollector *collector)
-{
- fprintf (stderr, "Error: Mono was configured using --enable-minimal=sgen_copying.\n");
- exit (1);
-}
-
-#endif /* DISABLE_SGEN_MAJOR_COPYING */
-
-#endif
View
483 mono/metadata/sgen-pinned-allocator.c
@@ -1,483 +0,0 @@
-/*
- * sgen-pinned-allocator.c: Allocator for small pinned objects.
- * Only used in the copying major collector.
- *
- * Author:
- * Paolo Molaro (lupus@ximian.com)
- *
- * Copyright 2005-2010 Novell, Inc (http://www.novell.com)
- *
- * Thread start/stop adapted from Boehm's GC:
- * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
- * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
- * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
- * Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved.
- * Copyright 2001-2003 Ximian, Inc
- * Copyright 2003-2010 Novell, Inc.
- * Copyright (C) 2012 Xamarin Inc
- *
- * This library is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Library General Public
- * License 2.0 as published by the Free Software Foundation;
- *
- * This library is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Library General Public License for more details.
- *
- * You should have received a copy of the GNU Library General Public
- * License 2.0 along with this library; if not, write to the Free
- * Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include "config.h"
-
-#ifdef HAVE_SGEN_GC
-
-#include "utils/mono-counters.h"
-#include "metadata/sgen-gc.h"
-#include "metadata/sgen-memory-governor.h"
-
-/* Pinned objects are allocated in the LOS space if bigger than half a page
- * or from freelists otherwise. We assume that pinned objects are relatively few
- * and they have a slow dying speed (like interned strings, thread objects).
- * As such they will be collected only at major collections.
- * free lists are not global: when we need memory we allocate a PinnedChunk.
- * Each pinned chunk is made of several pages, the first of wich is used
- * internally for bookeeping (here think of a page as 4KB). The bookeeping
- * includes the freelists vectors and info about the object size of each page
- * in the pinned chunk. So, when needed, a free page is found in a pinned chunk,
- * a size is assigned to it, the page is divided in the proper chunks and each
- * chunk is added to the freelist. To not waste space, the remaining space in the
- * first page is used as objects of size 16 or 32 (need to measure which are more
- * common).
- * We use this same structure to allocate memory used internally by the GC, so
- * we never use malloc/free if we need to alloc during collection: the world is stopped
- * and malloc/free will deadlock.
- * When we want to iterate over pinned objects, we just scan a page at a time
- * linearly according to the size of objects in the page: the next pointer used to link
- * the items in the freelist uses the same word as the vtable. Since we keep freelists
- * for each pinned chunk, if the word points outside the pinned chunk it means
- * it is an object.
- * We could avoid this expensive scanning in creative ways. We could have a policy
- * of putting in the pinned space only objects we know about that have no struct fields
- * with references and we can easily use a even expensive write barrier for them,
- * since pointer writes on such objects should be rare.
- * The best compromise is to just alloc interned strings and System.MonoType in them.
- * It would be nice to allocate MonoThread in it, too: must check that we properly
- * use write barriers so we don't have to do any expensive scanning of the whole pinned
- * chunk list during minor collections. We can avoid it now because we alloc in it only
- * reference-free objects.
- */
-struct _SgenPinnedChunk {
- SgenBlock block;
- int num_pages;
- SgenPinnedAllocator *allocator;
- int *page_sizes; /* a 0 means the page is still unused */
- void **free_list;
- SgenPinnedChunk *free_list_nexts [SGEN_PINNED_FREELIST_NUM_SLOTS];
- void *start_data;
- void *data [1]; /* page sizes and free lists are stored here */
-};
-
-#define PINNED_FIRST_SLOT_SIZE (sizeof (gpointer) * 4)
-#define MAX_FREELIST_SIZE 8192
-
-/* This is a fixed value used for pinned chunks, not the system pagesize */
-#define FREELIST_PAGESIZE (16*1024)
-
-/* keep each size a multiple of ALLOC_ALIGN */
-/* on 64 bit systems 8 is likely completely unused. */
-static const int freelist_sizes [] = {
- 8, 16, 24, 32, 40, 48, 64, 80,
- 96, 128, 160, 192, 224, 256, 320, 384,
- 448, 512, 584, 680, 816, 1024, 1360, 2048,
- 2336, 2728, 3272, 4096, 5456, 8192 };
-
-#define LARGE_PINNED_MEM_HEADER_MAGIC 0x7d289f3a
-
-/* FIXME: Do we even need these anymore? Large objects are always
- allocated in the LOS. */
-typedef struct _LargePinnedMemHeader LargePinnedMemHeader;
-struct _LargePinnedMemHeader {
- guint32 magic;
- size_t size;
- double data[0];
-};
-
-static long long pinned_chunk_bytes_alloced = 0;
-static long long large_pinned_bytes_alloced = 0;
-
-#ifdef HEAVY_STATISTICS
-static long long stat_pinned_alloc = 0;
-#endif
-
-/*
- * Debug reporting.
- */
-static void
-report_pinned_chunk (SgenPinnedChunk *chunk, int seq) {
- void **p;
- int i, free_pages, num_free, free_mem;
- free_pages = 0;
- for (i = 0; i < chunk->num_pages; ++i) {
- if (!chunk->page_sizes [i])
- free_pages++;
- }
- printf ("Pinned chunk %d at %p, size: %d, pages: %d, free: %d\n", seq, chunk, chunk->num_pages * FREELIST_PAGESIZE, chunk->num_pages, free_pages);
- free_mem = FREELIST_PAGESIZE * free_pages;
- for (i = 0; i < SGEN_PINNED_FREELIST_NUM_SLOTS; ++i) {
- if (!chunk->free_list [i])
- continue;
- num_free = 0;
- p = chunk->free_list [i];
- while (p) {
- num_free++;
- p = *p;
- }
- printf ("\tfree list of size %d, %d items\n", freelist_sizes [i], num_free);
- free_mem += freelist_sizes [i] * num_free;
- }
- printf ("\tfree memory in chunk: %d\n", free_mem);
-}
-
-/*
- * Debug reporting.
- */
-void
-sgen_report_pinned_mem_usage (SgenPinnedAllocator *alc)
-{
- SgenPinnedChunk *chunk;
- int i = 0;
- for (chunk = alc->chunk_list; chunk; chunk = chunk->block.next)
- report_pinned_chunk (chunk, i++);
-}
-
-/*
- * Find the slot number in the freelist for memory chunks that
- * can contain @size objects.
- */
-static int
-slot_for_size (size_t size)
-{
- int slot;
- /* do a binary search or lookup table later. */
- for (slot = 0; slot < SGEN_PINNED_FREELIST_NUM_SLOTS; ++slot) {
- if (freelist_sizes [slot] >= size)
- return slot;
- }
- g_assert_not_reached ();
- return -1;
-}
-
-/*
- * Build a free list for @size memory chunks from the memory area between
- * start_page and end_page.
- */
-static void
-build_freelist (SgenPinnedAllocator *alc, SgenPinnedChunk *chunk, int slot, int size, char *start_page, char *end_page)
-{
- void **p, **end;
- int count = 0;
- /*g_print ("building freelist for slot %d, size %d in %p\n", slot, size, chunk);*/
- p = (void**)start_page;
- end = (void**)(end_page - size);
- g_assert (!chunk->free_list [slot]);
- chunk->free_list [slot] = p;
- while ((char*)p + size <= (char*)end) {
- count++;
- *p = (void*)((char*)p + size);
- p = *p;
- }
- *p = NULL;
- /*g_print ("%d items created, max: %d\n", count, (end_page - start_page) / size);*/
-
- g_assert (!chunk->free_list_nexts [slot]);
- chunk->free_list_nexts [slot] = alc->free_lists [slot];
- alc->free_lists [slot] = chunk;
-}
-
-static SgenPinnedChunk*
-alloc_pinned_chunk (SgenPinnedAllocator *alc)
-{
- SgenPinnedChunk *chunk;
- int offset;
- int size = SGEN_PINNED_CHUNK_SIZE;
-
- chunk = sgen_alloc_os_memory_aligned (size, size, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "pinned chunk");
- chunk->block.role = MEMORY_ROLE_PINNED;
-
- sgen_update_heap_boundaries ((mword)chunk, ((mword)chunk + size));
-
- pinned_chunk_bytes_alloced += size;
-
- /* setup the bookeeping fields */
- chunk->num_pages = size / FREELIST_PAGESIZE;
- offset = G_STRUCT_OFFSET (SgenPinnedChunk, data);
- chunk->page_sizes = (void*)((char*)chunk + offset);
- offset += sizeof (int) * chunk->num_pages;
- offset = SGEN_ALIGN_UP (offset);
- chunk->free_list = (void*)((char*)chunk + offset);
- offset += sizeof (void*) * SGEN_PINNED_FREELIST_NUM_SLOTS;
- offset = SGEN_ALIGN_UP (offset);
- chunk->start_data = (void*)((char*)chunk + offset);
-
- /* allocate the first page to the freelist */
- chunk->page_sizes [0] = PINNED_FIRST_SLOT_SIZE;
- build_freelist (alc, chunk, slot_for_size (PINNED_FIRST_SLOT_SIZE), PINNED_FIRST_SLOT_SIZE,
- chunk->start_data, ((char*)chunk + FREELIST_PAGESIZE));
- SGEN_LOG (4, "Allocated pinned chunk %p, size: %d", chunk, size);
-
- chunk->block.next = alc->chunk_list;
- alc->chunk_list = chunk;
-
- chunk->allocator = alc;
-
- return chunk;
-}
-
-/* Must be called with an empty freelist for the given slot. */
-static gboolean
-populate_chunk_page (SgenPinnedAllocator *alc, SgenPinnedChunk *chunk, int slot)
-{
- int size = freelist_sizes [slot];
- int i;
- g_assert (!chunk->free_list [slot]);
- g_assert (!chunk->free_list_nexts [slot]);
- for (i = 0; i < chunk->num_pages; ++i) {
- if (chunk->page_sizes [i])
- continue;
- chunk->page_sizes [i] = size;
- build_freelist (alc, chunk, slot, size, (char*)chunk + FREELIST_PAGESIZE * i, (char*)chunk + FREELIST_PAGESIZE * (i + 1));
- return TRUE;
- }
- return FALSE;
-}
-
-static void*
-alloc_from_slot (SgenPinnedAllocator *alc, int slot)
-{
- SgenPinnedChunk *pchunk;
- size_t size = freelist_sizes [slot];
-
- if (alc->delayed_free_lists [slot]) {
- void **p;
- do {
- p = alc->delayed_free_lists [slot];
- } while (SGEN_CAS_PTR (&alc->delayed_free_lists [slot], *p, p) != p);
- memset (p, 0, size);
- return p;
- }
-
- restart:
- pchunk = alc->free_lists [slot];
- if (pchunk) {
- void **p = pchunk->free_list [slot];
- void *next;
-
- g_assert (p);
-
- next = *p;
- pchunk->free_list [slot] = next;
-
- if (!next) {
- alc->free_lists [slot] = pchunk->free_list_nexts [slot];
- pchunk->free_list_nexts [slot] = NULL;
- }
-
- memset (p, 0, size);
- return p;
- }
-
- for (pchunk = alc->chunk_list; pchunk; pchunk = pchunk->block.next) {
- if (populate_chunk_page (alc, pchunk, slot))
- goto restart;
- }
-
- pchunk = alloc_pinned_chunk (alc);
- /* FIXME: handle OOM */
- if (pchunk->free_list [slot])
- goto restart;
- if (!populate_chunk_page (alc, pchunk, slot))
- g_assert_not_reached ();
- goto restart;
-}
-
-/* used for the GC-internal data structures */
-void*
-sgen_alloc_pinned (SgenPinnedAllocator *alc, size_t size)
-{
- int slot;
- void *res = NULL;
-
- HEAVY_STAT (++stat_pinned_alloc);
-
- if (size > freelist_sizes [SGEN_PINNED_FREELIST_NUM_SLOTS - 1]) {
- LargePinnedMemHeader *mh;
-
- size += sizeof (LargePinnedMemHeader);
- mh = sgen_alloc_os_memory (size, SGEN_ALLOC_HEAP | SGEN_ALLOC_ACTIVATE, "large pinned object");
- mh->magic = LARGE_PINNED_MEM_HEADER_MAGIC;
- mh->size = size;
- /* FIXME: do a CAS here */
- large_pinned_bytes_alloced += size;
- return mh->data;
- }
-
- slot = slot_for_size (size);
- g_assert (size <= freelist_sizes [slot]);
- res = alloc_from_slot (alc, slot);
-
- return res;
-}
-
-static void
-free_from_slot (SgenPinnedAllocator *alc, void *addr, int slot)
-{
- SgenPinnedChunk *pchunk = (SgenPinnedChunk*)SGEN_PINNED_CHUNK_FOR_PTR (addr);
- void **p = addr;
- void *next;
-
- g_assert (addr >= (void*)pchunk && (char*)addr < (char*)pchunk + pchunk->num_pages * FREELIST_PAGESIZE);
-
- next = pchunk->free_list [slot];
- *p = next;
- pchunk->free_list [slot] = p;
-
- if (!next) {
- g_assert (!pchunk->free_list_nexts [slot]);
- pchunk->free_list_nexts [slot] = alc->free_lists [slot];
- alc->free_lists [slot] = pchunk;
- }
-}
-
-void
-sgen_free_pinned (SgenPinnedAllocator *alc, void *addr, size_t size)
-{
- LargePinnedMemHeader *mh;
-
- if (!addr)
- return;
-
- if (size <= freelist_sizes [SGEN_PINNED_FREELIST_NUM_SLOTS - 1]) {
- int slot = slot_for_size (size);
- free_from_slot (alc, addr, slot);
- return;
- }
-
- mh = (LargePinnedMemHeader*)((char*)addr - G_STRUCT_OFFSET (LargePinnedMemHeader, data));
- g_assert (mh->magic == LARGE_PINNED_MEM_HEADER_MAGIC);
- g_assert (mh->size == size + sizeof (LargePinnedMemHeader));
- /* FIXME: do a CAS */
- large_pinned_bytes_alloced -= mh->size;
- sgen_free_os_memory (mh, mh->size, SGEN_ALLOC_HEAP);
-}
-
-void
-sgen_init_pinned_allocator (void)
-{
- g_assert (SGEN_PINNED_FREELIST_NUM_SLOTS == sizeof (freelist_sizes) / sizeof (freelist_sizes [0]));
-
-#ifdef HEAVY_STATISTICS
- mono_counters_register ("Pinned allocs", MONO_COUNTER_GC | MONO_COUNTER_LONG, &stat_pinned_alloc);
-#endif
-}
-
-void
-sgen_pinned_scan_objects (SgenPinnedAllocator *alc, IterateObjectCallbackFunc callback, void *callback_data)
-{
- SgenPinnedChunk *chunk;
- int i, obj_size;
- char *p, *endp;
- void **ptr;
- void *end_chunk;
- for (chunk = alc->chunk_list; chunk; chunk = chunk->block.next) {
- end_chunk = (char*)chunk + chunk->num_pages * FREELIST_PAGESIZE;
- SGEN_LOG (6, "Scanning pinned chunk %p (range: %p-%p)", chunk, chunk->start_data, end_chunk);
- for (i = 0; i < chunk->num_pages; ++i) {
- obj_size = chunk->page_sizes [i];
- if (!obj_size)
- continue;
- p = i? (char*)chunk + i * FREELIST_PAGESIZE: chunk->start_data;
- endp = i? p + FREELIST_PAGESIZE: (char*)chunk + FREELIST_PAGESIZE;
- SGEN_LOG (6, "Page %d (size: %d, range: %p-%p)", i, obj_size, p, endp);
- while (p + obj_size <= endp) {
- ptr = (void**)p;
- /* if the first word (the vtable) is outside the chunk we have an object */
- if (*ptr && (*ptr < (void*)chunk || *ptr >= end_chunk))
- callback ((char*)ptr, obj_size, callback_data);
- p += obj_size;
- }
- }
- }
-}
-
-void
-sgen_pinned_update_heap_boundaries (SgenPinnedAllocator *alc)
-{
- SgenPinnedChunk *chunk;
- for (chunk = alc->chunk_list; chunk; chunk = chunk->block.next) {
- char *end_chunk = (char*)chunk + chunk->num_pages * FREELIST_PAGESIZE;
- sgen_update_heap_boundaries ((mword)chunk, (mword)end_chunk);
- }
-}
-
-/*
- * the array of pointers from @start to @end contains conservative
- * pointers to objects inside @chunk: mark each referenced object
- * with the PIN bit.
- */
-static void
-mark_pinned_from_addresses (SgenPinnedChunk *chunk, void **start, void **end, IterateObjectCallbackFunc callback, void *callback_data)
-{
- for (; start < end; start++) {
- char *addr = *start;
- int offset = (char*)addr - (char*)chunk;
- int page = offset / FREELIST_PAGESIZE;
- int obj_offset = page == 0? offset - ((char*)chunk->start_data - (char*)chunk): offset % FREELIST_PAGESIZE;
- int slot_size = chunk->page_sizes [page];
- void **ptr;
- /* the page is not allocated */
- if (!slot_size)
- continue;
- /* would be faster if we restrict the sizes to power of two,
- * but that's a waste of memory: need to measure. it could reduce
- * fragmentation since there are less pages needed, if for example
- * someone interns strings of each size we end up with one page per
- * interned string (still this is just ~40 KB): with more fine-grained sizes
- * this increases the number of used pages.
- */
- if (page == 0) {
- obj_offset /= slot_size;
- obj_offset *= slot_size;
- addr = (char*)chunk->start_data + obj_offset;
- } else {
- obj_offset /= slot_size;
- obj_offset *= slot_size;
- addr = (char*)chunk + page * FREELIST_PAGESIZE + obj_offset;
- }
- ptr = (void**)addr;
- /* if the vtable is inside the chunk it's on the freelist, so skip */
- /* FIXME: is it possible that we're pinning objects more than once here? */
- if (*ptr && (*ptr < (void*)chunk->start_data || *ptr > (void*)((char*)chunk + chunk->num_pages * FREELIST_PAGESIZE)))
- callback (addr, slot_size, callback_data);
- }
-}
-
-void
-sgen_pinned_scan_pinned_objects (SgenPinnedAllocator *alc, IterateObjectCallbackFunc callback, void *callback_data)
-{
- SgenPinnedChunk *chunk;
-
- /* look for pinned addresses for pinned-alloc objects */
- SGEN_LOG (6, "Pinning from pinned-alloc objects");
- for (chunk = alc->chunk_list; chunk; chunk = chunk->block.next) {
- int num_pinned;
- void **pinned = sgen_find_optimized_pin_queue_area (chunk->start_data,
- (char*)chunk + chunk->num_pages * FREELIST_PAGESIZE, &num_pinned);
- if (num_pinned)
- mark_pinned_from_addresses (chunk, pinned, pinned + num_pinned, callback, callback_data);
- }
-}
-
-#endif

0 comments on commit db374b7

Please sign in to comment.
Something went wrong with that request. Please try again.