From 9b968f4c639439d1f36d33851ee2870cab241225 Mon Sep 17 00:00:00 2001 From: Ivan Maidanski Date: Sun, 11 Mar 2012 00:04:34 +0400 Subject: [PATCH] include/boehmgc: Update from BDWGC v7.3alpha1-20120308 snapshot --- include/boehmgc/gc.h | 274 +++++++++++++++++++++---------------- include/boehmgc/gc_confi.h | 34 ++++- include/boehmgc/gc_gcj.h | 21 +-- 3 files changed, 197 insertions(+), 132 deletions(-) diff --git a/include/boehmgc/gc.h b/include/boehmgc/gc.h index 87e2bb0..180d3c7 100644 --- a/include/boehmgc/gc.h +++ b/include/boehmgc/gc.h @@ -72,7 +72,8 @@ GC_API unsigned GC_CALL GC_get_version(void); /* Public read-only variables */ /* The supplied getter functions are preferred for new code. */ -GC_API GC_word GC_gc_no;/* Counter incremented per collection. */ +GC_API GC_ATTR_DEPRECATED GC_word GC_gc_no; + /* Counter incremented per collection. */ /* Includes empty GCs at startup. */ GC_API GC_word GC_CALL GC_get_gc_no(void); /* GC_get_gc_no() is unsynchronized, so */ @@ -80,18 +81,22 @@ GC_API GC_word GC_CALL GC_get_gc_no(void); /* avoid data races on multiprocessors. */ #ifdef GC_THREADS - GC_API int GC_parallel; + GC_API GC_ATTR_DEPRECATED int GC_parallel; /* GC is parallelized for performance on */ /* multiprocessors. Currently set only */ /* implicitly if collector is built with */ /* PARALLEL_MARK defined and if either: */ /* Env variable GC_NPROC is set to > 1, or */ /* GC_NPROC is not set and this is an MP. */ - /* If GC_parallel is set, incremental */ + /* If GC_parallel is on (non-zero), incremental */ /* collection is only partially functional, */ - /* and may not be desirable. This getter does */ + /* and may not be desirable. The getter does */ /* not use or need synchronization (i.e. */ - /* acquiring the GC lock). */ + /* acquiring the GC lock). Starting from */ + /* GC v7.3, GC_parallel value is equal to the */ + /* number of marker threads minus one (i.e. */ + /* number of existing parallel marker threads */ + /* excluding the initiating one). */ GC_API int GC_CALL GC_get_parallel(void); #endif @@ -100,7 +105,7 @@ GC_API GC_word GC_CALL GC_get_gc_no(void); /* The supplied setter and getter functions are preferred for new code. */ typedef void * (GC_CALLBACK * GC_oom_func)(size_t /* bytes_requested */); -GC_API GC_oom_func GC_oom_fn; +GC_API GC_ATTR_DEPRECATED GC_oom_func GC_oom_fn; /* When there is insufficient memory to satisfy */ /* an allocation request, we return */ /* (*GC_oom_fn)(size). By default this just */ @@ -110,11 +115,11 @@ GC_API GC_oom_func GC_oom_fn; /* object. GC_oom_fn must not be 0. */ /* Both the supplied setter and the getter */ /* acquire the GC lock (to avoid data races). */ -GC_API void GC_CALL GC_set_oom_fn(GC_oom_func); +GC_API void GC_CALL GC_set_oom_fn(GC_oom_func) GC_ATTR_NONNULL(1); GC_API GC_oom_func GC_CALL GC_get_oom_fn(void); typedef void (GC_CALLBACK * GC_on_heap_resize_proc)(GC_word /* new_size */); -GC_API GC_on_heap_resize_proc GC_on_heap_resize; +GC_API GC_ATTR_DEPRECATED GC_on_heap_resize_proc GC_on_heap_resize; /* Invoked when the heap grows or shrinks. */ /* Called with the world stopped (and the */ /* allocation lock held). May be 0. */ @@ -123,7 +128,7 @@ GC_API GC_on_heap_resize_proc GC_CALL GC_get_on_heap_resize(void); /* Both the supplied setter and the getter */ /* acquire the GC lock (to avoid data races). */ -GC_API int GC_find_leak; +GC_API GC_ATTR_DEPRECATED int GC_find_leak; /* Do not actually garbage collect, but simply */ /* report inaccessible memory that was not */ /* deallocated with GC_free. Initial value */ @@ -134,7 +139,7 @@ GC_API int GC_find_leak; GC_API void GC_CALL GC_set_find_leak(int); GC_API int GC_CALL GC_get_find_leak(void); -GC_API int GC_all_interior_pointers; +GC_API GC_ATTR_DEPRECATED int GC_all_interior_pointers; /* Arrange for pointers to object interiors to */ /* be recognized as valid. Typically should */ /* not be changed after GC initialization (in */ @@ -150,7 +155,7 @@ GC_API int GC_all_interior_pointers; GC_API void GC_CALL GC_set_all_interior_pointers(int); GC_API int GC_CALL GC_get_all_interior_pointers(void); -GC_API int GC_finalize_on_demand; +GC_API GC_ATTR_DEPRECATED int GC_finalize_on_demand; /* If nonzero, finalizers will only be run in */ /* response to an explicit GC_invoke_finalizers */ /* call. The default is determined by whether */ @@ -160,7 +165,7 @@ GC_API int GC_finalize_on_demand; GC_API void GC_CALL GC_set_finalize_on_demand(int); GC_API int GC_CALL GC_get_finalize_on_demand(void); -GC_API int GC_java_finalization; +GC_API GC_ATTR_DEPRECATED int GC_java_finalization; /* Mark objects reachable from finalizable */ /* objects in a separate post-pass. This makes */ /* it a bit safer to use non-topologically- */ @@ -173,7 +178,7 @@ GC_API void GC_CALL GC_set_java_finalization(int); GC_API int GC_CALL GC_get_java_finalization(void); typedef void (GC_CALLBACK * GC_finalizer_notifier_proc)(void); -GC_API GC_finalizer_notifier_proc GC_finalizer_notifier; +GC_API GC_ATTR_DEPRECATED GC_finalizer_notifier_proc GC_finalizer_notifier; /* Invoked by the collector when there are */ /* objects to be finalized. Invoked at most */ /* once per GC cycle. Never invoked unless */ @@ -186,23 +191,25 @@ GC_API GC_finalizer_notifier_proc GC_finalizer_notifier; GC_API void GC_CALL GC_set_finalizer_notifier(GC_finalizer_notifier_proc); GC_API GC_finalizer_notifier_proc GC_CALL GC_get_finalizer_notifier(void); -GC_API int GC_dont_gc; /* != 0 ==> Don't collect. In versions 6.2a1+, */ +GC_API GC_ATTR_DEPRECATED int GC_dont_gc; + /* != 0 ==> Don't collect. In versions 6.2a1+, */ /* this overrides explicit GC_gcollect() calls. */ /* Used as a counter, so that nested enabling */ /* and disabling work correctly. Should */ /* normally be updated with GC_enable() and */ - /* GC_disable() calls. */ - /* Direct assignment to GC_dont_gc is */ - /* deprecated. */ + /* GC_disable() calls. Direct assignment to */ + /* GC_dont_gc is deprecated. To check whether */ + /* GC is disabled, GC_is_disabled() is */ + /* preferred for new code. */ -GC_API int GC_dont_expand; - /* Don't expand the heap unless explicitly */ +GC_API GC_ATTR_DEPRECATED int GC_dont_expand; + /* Do not expand the heap unless explicitly */ /* requested or forced to. The setter and */ /* getter are unsynchronized. */ GC_API void GC_CALL GC_set_dont_expand(int); GC_API int GC_CALL GC_get_dont_expand(void); -GC_API int GC_use_entire_heap; +GC_API GC_ATTR_DEPRECATED int GC_use_entire_heap; /* Causes the non-incremental collector to use the */ /* entire heap before collecting. This was the only */ /* option for GC versions < 5.0. This sometimes */ @@ -213,7 +220,8 @@ GC_API int GC_use_entire_heap; /* frequencies, and hence fewer instructions executed */ /* in the collector. */ -GC_API int GC_full_freq; /* Number of partial collections between */ +GC_API GC_ATTR_DEPRECATED int GC_full_freq; + /* Number of partial collections between */ /* full collections. Matters only if */ /* GC_incremental is set. */ /* Full collections are also triggered if */ @@ -229,7 +237,7 @@ GC_API int GC_full_freq; /* Number of partial collections between */ GC_API void GC_CALL GC_set_full_freq(int); GC_API int GC_CALL GC_get_full_freq(void); -GC_API GC_word GC_non_gc_bytes; +GC_API GC_ATTR_DEPRECATED GC_word GC_non_gc_bytes; /* Bytes not considered candidates for */ /* collection. Used only to control scheduling */ /* of collections. Updated by */ @@ -242,7 +250,7 @@ GC_API GC_word GC_non_gc_bytes; GC_API void GC_CALL GC_set_non_gc_bytes(GC_word); GC_API GC_word GC_CALL GC_get_non_gc_bytes(void); -GC_API int GC_no_dls; +GC_API GC_ATTR_DEPRECATED int GC_no_dls; /* Don't register dynamic library data segments. */ /* Wizards only. Should be used only if the */ /* application explicitly registers all roots. */ @@ -254,7 +262,7 @@ GC_API int GC_no_dls; GC_API void GC_CALL GC_set_no_dls(int); GC_API int GC_CALL GC_get_no_dls(void); -GC_API GC_word GC_free_space_divisor; +GC_API GC_ATTR_DEPRECATED GC_word GC_free_space_divisor; /* We try to make sure that we allocate at */ /* least N/GC_free_space_divisor bytes between */ /* collections, where N is twice the number */ @@ -274,7 +282,7 @@ GC_API GC_word GC_free_space_divisor; GC_API void GC_CALL GC_set_free_space_divisor(GC_word); GC_API GC_word GC_CALL GC_get_free_space_divisor(void); -GC_API GC_word GC_max_retries; +GC_API GC_ATTR_DEPRECATED GC_word GC_max_retries; /* The maximum number of GCs attempted before */ /* reporting out of memory after heap */ /* expansion fails. Initially 0. */ @@ -286,7 +294,8 @@ GC_API void GC_CALL GC_set_max_retries(GC_word); GC_API GC_word GC_CALL GC_get_max_retries(void); -GC_API char *GC_stackbottom; /* Cool end of user stack. */ +GC_API GC_ATTR_DEPRECATED char *GC_stackbottom; + /* Cool end of user stack. */ /* May be set in the client prior to */ /* calling any GC_ routines. This */ /* avoids some overhead, and */ @@ -296,9 +305,13 @@ GC_API char *GC_stackbottom; /* Cool end of user stack. */ /* automatically. */ /* For multi-threaded code, this is the */ /* cold end of the stack for the */ - /* primordial thread. */ + /* primordial thread. Portable clients */ + /* should use GC_get_stack_base(), */ + /* GC_call_with_gc_active() and */ + /* GC_register_my_thread() instead. */ -GC_API int GC_dont_precollect; /* Don't collect as part of GC */ +GC_API GC_ATTR_DEPRECATED int GC_dont_precollect; + /* Do not collect as part of GC */ /* initialization. Should be set only */ /* if the client wants a chance to */ /* manually initialize the root set */ @@ -311,7 +324,7 @@ GC_API int GC_dont_precollect; /* Don't collect as part of GC */ GC_API void GC_CALL GC_set_dont_precollect(int); GC_API int GC_CALL GC_get_dont_precollect(void); -GC_API unsigned long GC_time_limit; +GC_API GC_ATTR_DEPRECATED unsigned long GC_time_limit; /* If incremental collection is enabled, */ /* We try to terminate collections */ /* after this many milliseconds. Not a */ @@ -366,22 +379,23 @@ GC_API void GC_CALL GC_init(void); /* GC_non_gc_bytes appropriately. */ /* Note that the GC_malloc_stubborn support doesn't really exist */ /* anymore. MANUAL_VDB provides comparable functionality. */ -GC_API void * GC_CALL GC_malloc(size_t /* size_in_bytes */) - GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1); -GC_API void * GC_CALL GC_malloc_atomic(size_t /* size_in_bytes */) - GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1); -GC_API char * GC_CALL GC_strdup(const char *) GC_ATTR_MALLOC; -GC_API char * GC_CALL GC_strndup(const char *, size_t) GC_ATTR_MALLOC; -GC_API void * GC_CALL GC_malloc_uncollectable(size_t /* size_in_bytes */) - GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1); -GC_API void * GC_CALL GC_malloc_stubborn(size_t /* size_in_bytes */) - GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL + GC_malloc(size_t /* size_in_bytes */); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL + GC_malloc_atomic(size_t /* size_in_bytes */); +GC_API GC_ATTR_MALLOC char * GC_CALL GC_strdup(const char *); +GC_API GC_ATTR_MALLOC char * GC_CALL + GC_strndup(const char *, size_t) GC_ATTR_NONNULL(1); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL + GC_malloc_uncollectable(size_t /* size_in_bytes */); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL + GC_malloc_stubborn(size_t /* size_in_bytes */); /* GC_memalign() is not well tested. */ -GC_API void * GC_CALL GC_memalign(size_t /* align */, size_t /* lb */) - GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(2); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(2) void * GC_CALL + GC_memalign(size_t /* align */, size_t /* lb */); GC_API int GC_CALL GC_posix_memalign(void ** /* memptr */, size_t /* align */, - size_t /* lb */); + size_t /* lb */) GC_ATTR_NONNULL(1); /* Explicitly deallocate an object. Dangerous if used incorrectly. */ /* Requires a pointer to the base of an object. */ @@ -404,12 +418,12 @@ GC_API void GC_CALL GC_free(void *); /* allowing more than one stubborn object to be changed at once, but it */ /* is acceptable to do so. The same applies to dropping stubborn */ /* objects that are still changeable. */ -GC_API void GC_CALL GC_change_stubborn(const void *); -GC_API void GC_CALL GC_end_stubborn_change(const void *); +GC_API void GC_CALL GC_change_stubborn(const void *) GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_end_stubborn_change(const void *) GC_ATTR_NONNULL(1); /* Return a pointer to the base (lowest address) of an object given */ /* a pointer to a location within the object. */ -/* I.e. map an interior pointer to the corresponding bas pointer. */ +/* I.e., map an interior pointer to the corresponding base pointer. */ /* Note that with debugging allocation, this returns a pointer to the */ /* actual base of the object, i.e. the debug information, not to */ /* the base of the user object. */ @@ -429,7 +443,7 @@ GC_API int GC_CALL GC_is_heap_ptr(const void *); /* Given a pointer to the base of an object, return its size in bytes. */ /* The returned size may be slightly larger than what was originally */ /* requested. */ -GC_API size_t GC_CALL GC_size(const void * /* object_addr */); +GC_API size_t GC_CALL GC_size(const void * /* obj_addr */) GC_ATTR_NONNULL(1); /* For compatibility with C library. This is occasionally faster than */ /* a malloc followed by a bcopy. But if you rely on that, either here */ @@ -459,16 +473,15 @@ GC_API void GC_CALL GC_set_max_heap_size(GC_word /* n */); /* need not be scanned. This is sometimes important if the application */ /* maps large read/write files into the address space, which could be */ /* mistaken for dynamic library data segments on some systems. */ -/* The section (referred to by low_address) must be pointer-aligned. */ -/* low_address must not be greater than high_address_plus_1. */ +/* Both section start and end are not needed to be pointer-aligned. */ GC_API void GC_CALL GC_exclude_static_roots(void * /* low_address */, - void * /* high_address_plus_1 */); + void * /* high_address_plus_1 */); /* Clear the set of root segments. Wizards only. */ GC_API void GC_CALL GC_clear_roots(void); /* Add a root segment. Wizards only. */ -/* The segment (referred to by low_address) must be pointer-aligned. */ +/* Both segment start and end are not needed to be pointer-aligned. */ /* low_address must not be greater than high_address_plus_1. */ GC_API void GC_CALL GC_add_roots(void * /* low_address */, void * /* high_address_plus_1 */); @@ -520,13 +533,15 @@ GC_API void GC_CALL GC_gcollect_and_unmap(void); /* GC_try_to_collect() returns 0 if the collection was aborted (or the */ /* collections are disabled), 1 if it succeeded. */ typedef int (GC_CALLBACK * GC_stop_func)(void); -GC_API int GC_CALL GC_try_to_collect(GC_stop_func /* stop_func */); +GC_API int GC_CALL GC_try_to_collect(GC_stop_func /* stop_func */) + GC_ATTR_NONNULL(1); /* Set and get the default stop_func. The default stop_func is used by */ /* GC_gcollect() and by implicitly trigged collections (except for the */ /* case when handling out of memory). Must not be 0. */ /* Both the setter and getter acquire the GC lock to avoid data races. */ -GC_API void GC_CALL GC_set_stop_func(GC_stop_func /* stop_func */); +GC_API void GC_CALL GC_set_stop_func(GC_stop_func /* stop_func */) + GC_ATTR_NONNULL(1); GC_API GC_stop_func GC_CALL GC_get_stop_func(void); /* Return the number of bytes in the heap. Excludes collector private */ @@ -583,6 +598,10 @@ GC_API void GC_CALL GC_get_heap_usage_safe(GC_word * /* pheap_size */, /* ineffective. */ GC_API void GC_CALL GC_disable(void); +/* Return non-zero (TRUE) if and only if garbage collection is disabled */ +/* (i.e., GC_dont_gc value is non-zero). Does not acquire the lock. */ +GC_API int GC_CALL GC_is_disabled(void); + /* Re-enable garbage collection. GC_disable() and GC_enable() calls */ /* nest. Garbage collection is enabled if the number of calls to both */ /* both functions is equal. */ @@ -592,7 +611,7 @@ GC_API void GC_CALL GC_enable(void); /* dirty bits are available or most heap objects are pointer-free */ /* (atomic) or immutable. Don't use in leak finding mode. Ignored if */ /* GC_dont_gc is non-zero. Only the generational piece of this is */ -/* functional if GC_parallel is TRUE or if GC_time_limit is */ +/* functional if GC_parallel is non-zero or if GC_time_limit is */ /* GC_TIME_UNLIMITED. Causes thread-local variant of GC_gcj_malloc() */ /* to revert to locked allocation. Must be called before any such */ /* GC_gcj_malloc() calls. For best performance, should be called as */ @@ -609,6 +628,7 @@ GC_API void GC_CALL GC_enable_incremental(void); #define GC_PROTECTS_STACK 8 /* Probably impractical. */ #define GC_PROTECTS_NONE 0 +/* The collector is assumed to be initialized before this call. */ GC_API int GC_CALL GC_incremental_protection_needs(void); /* Perform some garbage collection work, if appropriate. */ @@ -634,10 +654,10 @@ GC_API int GC_CALL GC_collect_a_little(void); /* for arrays likely to be larger than 100K or so. For other systems, */ /* or if the collector is not configured to recognize all interior */ /* pointers, the threshold is normally much higher. */ -GC_API void * GC_CALL GC_malloc_ignore_off_page(size_t /* lb */) - GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1); -GC_API void * GC_CALL GC_malloc_atomic_ignore_off_page(size_t /* lb */) - GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL + GC_malloc_ignore_off_page(size_t /* lb */); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL + GC_malloc_atomic_ignore_off_page(size_t /* lb */); #ifdef GC_ADD_CALLER # define GC_EXTRAS GC_RETURN_ADDR, __FILE__, __LINE__ @@ -649,43 +669,40 @@ GC_API void * GC_CALL GC_malloc_atomic_ignore_off_page(size_t /* lb */) /* The following is only defined if the library has been suitably */ /* compiled: */ -GC_API void * GC_CALL GC_malloc_atomic_uncollectable( - size_t /* size_in_bytes */) - GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1); -GC_API void * GC_CALL GC_debug_malloc_atomic_uncollectable(size_t, - GC_EXTRA_PARAMS) - GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL + GC_malloc_atomic_uncollectable(size_t /* size_in_bytes */); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL + GC_debug_malloc_atomic_uncollectable(size_t, GC_EXTRA_PARAMS); /* Debugging (annotated) allocation. GC_gcollect will check */ /* objects allocated in this way for overwrites, etc. */ -GC_API void * GC_CALL GC_debug_malloc(size_t /* size_in_bytes */, - GC_EXTRA_PARAMS) - GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1); -GC_API void * GC_CALL GC_debug_malloc_atomic(size_t /* size_in_bytes */, - GC_EXTRA_PARAMS) - GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1); -GC_API char * GC_CALL GC_debug_strdup(const char *, - GC_EXTRA_PARAMS) GC_ATTR_MALLOC; -GC_API char * GC_CALL GC_debug_strndup(const char *, size_t, - GC_EXTRA_PARAMS) GC_ATTR_MALLOC; -GC_API void * GC_CALL GC_debug_malloc_uncollectable( - size_t /* size_in_bytes */, GC_EXTRA_PARAMS) - GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1); -GC_API void * GC_CALL GC_debug_malloc_stubborn(size_t /* size_in_bytes */, - GC_EXTRA_PARAMS) - GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1); -GC_API void * GC_CALL GC_debug_malloc_ignore_off_page( - size_t /* size_in_bytes */, GC_EXTRA_PARAMS) - GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1); -GC_API void * GC_CALL GC_debug_malloc_atomic_ignore_off_page( - size_t /* size_in_bytes */, GC_EXTRA_PARAMS) - GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL + GC_debug_malloc(size_t /* size_in_bytes */, GC_EXTRA_PARAMS); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL + GC_debug_malloc_atomic(size_t /* size_in_bytes */, GC_EXTRA_PARAMS); +GC_API GC_ATTR_MALLOC char * GC_CALL + GC_debug_strdup(const char *, GC_EXTRA_PARAMS); +GC_API GC_ATTR_MALLOC char * GC_CALL + GC_debug_strndup(const char *, size_t, GC_EXTRA_PARAMS) + GC_ATTR_NONNULL(1); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL + GC_debug_malloc_uncollectable(size_t /* size_in_bytes */, + GC_EXTRA_PARAMS); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL + GC_debug_malloc_stubborn(size_t /* size_in_bytes */, GC_EXTRA_PARAMS); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL + GC_debug_malloc_ignore_off_page(size_t /* size_in_bytes */, + GC_EXTRA_PARAMS); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL + GC_debug_malloc_atomic_ignore_off_page(size_t /* size_in_bytes */, + GC_EXTRA_PARAMS); GC_API void GC_CALL GC_debug_free(void *); GC_API void * GC_CALL GC_debug_realloc(void * /* old_object */, size_t /* new_size_in_bytes */, GC_EXTRA_PARAMS) /* 'realloc' attr */ GC_ATTR_ALLOC_SIZE(2); -GC_API void GC_CALL GC_debug_change_stubborn(const void *); -GC_API void GC_CALL GC_debug_end_stubborn_change(const void *); +GC_API void GC_CALL GC_debug_change_stubborn(const void *) GC_ATTR_NONNULL(1); +GC_API void GC_CALL GC_debug_end_stubborn_change(const void *) + GC_ATTR_NONNULL(1); /* Routines that allocate objects with debug information (like the */ /* above), but just fill in dummy file and line number information. */ @@ -699,11 +716,11 @@ GC_API void GC_CALL GC_debug_end_stubborn_change(const void *); /* platforms it may be more convenient not to recompile, e.g. for */ /* leak detection. This can be accomplished by instructing the */ /* linker to replace malloc/realloc with these. */ -GC_API void * GC_CALL GC_debug_malloc_replacement(size_t /* size_in_bytes */) - GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1); -GC_API void * GC_CALL GC_debug_realloc_replacement(void * /* object_addr */, - size_t /* size_in_bytes */) - /* 'realloc' attr */ GC_ATTR_ALLOC_SIZE(2); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL + GC_debug_malloc_replacement(size_t /* size_in_bytes */); +GC_API /* 'realloc' attr */ GC_ATTR_ALLOC_SIZE(2) void * GC_CALL + GC_debug_realloc_replacement(void * /* object_addr */, + size_t /* size_in_bytes */); #ifdef GC_DEBUG_REPLACEMENT # define GC_MALLOC(sz) GC_debug_malloc_replacement(sz) @@ -784,9 +801,10 @@ GC_API void * GC_CALL GC_debug_realloc_replacement(void * /* object_addr */, #ifdef GC_REQUIRE_WCSDUP /* This might be unavailable on some targets (or not needed). */ /* wchar_t should be defined in stddef.h */ - GC_API wchar_t * GC_CALL GC_wcsdup(const wchar_t *) GC_ATTR_MALLOC; - GC_API wchar_t * GC_CALL GC_debug_wcsdup(const wchar_t *, - GC_EXTRA_PARAMS) GC_ATTR_MALLOC; + GC_API GC_ATTR_MALLOC wchar_t * GC_CALL + GC_wcsdup(const wchar_t *) GC_ATTR_NONNULL(1); + GC_API GC_ATTR_MALLOC wchar_t * GC_CALL + GC_debug_wcsdup(const wchar_t *, GC_EXTRA_PARAMS) GC_ATTR_NONNULL(1); # ifdef GC_DEBUG # define GC_WCSDUP(s) GC_debug_wcsdup(s, GC_EXTRAS) # else @@ -806,10 +824,12 @@ typedef void (GC_CALLBACK * GC_finalization_proc)(void * /* obj */, GC_API void GC_CALL GC_register_finalizer(void * /* obj */, GC_finalization_proc /* fn */, void * /* cd */, - GC_finalization_proc * /* ofn */, void ** /* ocd */); + GC_finalization_proc * /* ofn */, void ** /* ocd */) + GC_ATTR_NONNULL(1); GC_API void GC_CALL GC_debug_register_finalizer(void * /* obj */, GC_finalization_proc /* fn */, void * /* cd */, - GC_finalization_proc * /* ofn */, void ** /* ocd */); + GC_finalization_proc * /* ofn */, void ** /* ocd */) + GC_ATTR_NONNULL(1); /* When obj is no longer accessible, invoke */ /* (*fn)(obj, cd). If a and b are inaccessible, and */ /* a points to b (after disappearing links have been */ @@ -860,10 +880,12 @@ GC_API void GC_CALL GC_debug_register_finalizer(void * /* obj */, /* refers to the object itself. */ GC_API void GC_CALL GC_register_finalizer_ignore_self(void * /* obj */, GC_finalization_proc /* fn */, void * /* cd */, - GC_finalization_proc * /* ofn */, void ** /* ocd */); + GC_finalization_proc * /* ofn */, void ** /* ocd */) + GC_ATTR_NONNULL(1); GC_API void GC_CALL GC_debug_register_finalizer_ignore_self(void * /* obj */, GC_finalization_proc /* fn */, void * /* cd */, - GC_finalization_proc * /* ofn */, void ** /* ocd */); + GC_finalization_proc * /* ofn */, void ** /* ocd */) + GC_ATTR_NONNULL(1); /* Another version of the above. It ignores all cycles. */ /* It should probably only be used by Java implementations. */ @@ -871,10 +893,12 @@ GC_API void GC_CALL GC_debug_register_finalizer_ignore_self(void * /* obj */, /* refers to the object itself. */ GC_API void GC_CALL GC_register_finalizer_no_order(void * /* obj */, GC_finalization_proc /* fn */, void * /* cd */, - GC_finalization_proc * /* ofn */, void ** /* ocd */); + GC_finalization_proc * /* ofn */, void ** /* ocd */) + GC_ATTR_NONNULL(1); GC_API void GC_CALL GC_debug_register_finalizer_no_order(void * /* obj */, GC_finalization_proc /* fn */, void * /* cd */, - GC_finalization_proc * /* ofn */, void ** /* ocd */); + GC_finalization_proc * /* ofn */, void ** /* ocd */) + GC_ATTR_NONNULL(1); /* This is a special finalizer that is useful when an object's */ /* finalizer must be run when the object is known to be no */ @@ -894,10 +918,12 @@ GC_API void GC_CALL GC_debug_register_finalizer_no_order(void * /* obj */, /* unordered finalization (e.g. Java, C#). */ GC_API void GC_CALL GC_register_finalizer_unreachable(void * /* obj */, GC_finalization_proc /* fn */, void * /* cd */, - GC_finalization_proc * /* ofn */, void ** /* ocd */); + GC_finalization_proc * /* ofn */, void ** /* ocd */) + GC_ATTR_NONNULL(1); GC_API void GC_CALL GC_debug_register_finalizer_unreachable(void * /* obj */, GC_finalization_proc /* fn */, void * /* cd */, - GC_finalization_proc * /* ofn */, void ** /* ocd */); + GC_finalization_proc * /* ofn */, void ** /* ocd */) + GC_ATTR_NONNULL(1); #define GC_NO_MEMORY 2 /* Failure due to lack of memory. */ @@ -908,7 +934,8 @@ GC_API void GC_CALL GC_debug_register_finalizer_unreachable(void * /* obj */, /* where p is a pointer that is not followed by finalization */ /* code, and should not be considered in determining */ /* finalization order. */ -GC_API int GC_CALL GC_register_disappearing_link(void ** /* link */); +GC_API int GC_CALL GC_register_disappearing_link(void ** /* link */) + GC_ATTR_NONNULL(1); /* Link should point to a field of a heap allocated */ /* object obj. *link will be cleared when obj is */ /* found to be inaccessible. This happens BEFORE any */ @@ -930,7 +957,8 @@ GC_API int GC_CALL GC_register_disappearing_link(void ** /* link */); /* Only exists for backward compatibility. See below: */ GC_API int GC_CALL GC_general_register_disappearing_link(void ** /* link */, - const void * /* obj */); + const void * /* obj */) + GC_ATTR_NONNULL(1) GC_ATTR_NONNULL(2); /* A slight generalization of the above. *link is */ /* cleared when obj first becomes inaccessible. This */ /* can be used to implement weak pointers easily and */ @@ -963,7 +991,8 @@ GC_API int GC_CALL GC_general_register_disappearing_link(void ** /* link */, /* memory (and GC_oom_fn did not handle the problem). */ GC_API int GC_CALL GC_move_disappearing_link(void ** /* link */, - void ** /* new_link */); + void ** /* new_link */) + GC_ATTR_NONNULL(2); /* Moves a link previously registered via */ /* GC_general_register_disappearing_link (or */ /* GC_register_disappearing_link). Does not change the */ @@ -1014,7 +1043,7 @@ GC_API int GC_CALL GC_invoke_finalizers(void); /* the GC lock (to avoid data races). */ typedef void (GC_CALLBACK * GC_warn_proc)(char * /* msg */, GC_word /* arg */); -GC_API void GC_CALL GC_set_warn_proc(GC_warn_proc /* p */); +GC_API void GC_CALL GC_set_warn_proc(GC_warn_proc /* p */) GC_ATTR_NONNULL(1); /* GC_get_warn_proc returns the current warn_proc. */ GC_API GC_warn_proc GC_CALL GC_get_warn_proc(void); @@ -1046,7 +1075,7 @@ typedef GC_word GC_hidden_pointer; typedef void * (GC_CALLBACK * GC_fn_type)(void * /* client_data */); GC_API void * GC_CALL GC_call_with_alloc_lock(GC_fn_type /* fn */, - void * /* client_data */); + void * /* client_data */) GC_ATTR_NONNULL(1); /* These routines are intended to explicitly notify the collector */ /* of new threads. Often this is unnecessary because thread creation */ @@ -1075,7 +1104,7 @@ typedef void * (GC_CALLBACK * GC_stack_base_func)( /* be used to provide a sufficiently accurate stack base. And we */ /* implement it everywhere. */ GC_API void * GC_CALL GC_call_with_stack_base(GC_stack_base_func /* fn */, - void * /* arg */); + void * /* arg */) GC_ATTR_NONNULL(1); #define GC_SUCCESS 0 #define GC_DUPLICATE 1 /* Was already registered. */ @@ -1131,7 +1160,8 @@ GC_API void * GC_CALL GC_call_with_stack_base(GC_stack_base_func /* fn */, /* latter case, the explicit call is normally required for threads */ /* created by third-party libraries. */ /* A manually registered thread requires manual unregistering. */ - GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base *); + GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base *) + GC_ATTR_NONNULL(1); /* Return TRUE if and only if the calling thread is registered with */ /* the garbage collector. */ @@ -1162,7 +1192,7 @@ GC_API void * GC_CALL GC_call_with_stack_base(GC_stack_base_func /* fn */, /* allowed for fn to call GC_call_with_gc_active() (even recursively), */ /* thus temporarily toggling the collector's state back to "active". */ GC_API void * GC_CALL GC_do_blocking(GC_fn_type /* fn */, - void * /* client_data */); + void * /* client_data */) GC_ATTR_NONNULL(1); /* Call a function switching to the "active" state of the collector for */ /* the current thread (i.e. the user function is allowed to call any */ @@ -1174,7 +1204,7 @@ GC_API void * GC_CALL GC_do_blocking(GC_fn_type /* fn */, /* GC_do_blocking. GC_call_with_gc_active() often can be used to */ /* provide a sufficiently accurate stack base. */ GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type /* fn */, - void * /* client_data */); + void * /* client_data */) GC_ATTR_NONNULL(1); /* Attempt to fill in the GC_stack_base structure with the stack base */ /* for this thread. This appears to be required to implement anything */ @@ -1183,7 +1213,8 @@ GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type /* fn */, /* It is also unfortunately hard to implement well on many platforms. */ /* Returns GC_SUCCESS or GC_UNIMPLEMENTED. This function acquires the */ /* GC lock on some platforms. */ -GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *); +GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *) + GC_ATTR_NONNULL(1); /* The following routines are primarily intended for use with a */ /* preprocessor which inserts calls to check C pointer arithmetic. */ @@ -1200,8 +1231,10 @@ GC_API void * GC_CALL GC_same_obj(void * /* p */, void * /* q */); /* the second argument is in units of bytes, not multiples of the */ /* object size. This should either be invoked from a macro, or the */ /* call should be automatically generated. */ -GC_API void * GC_CALL GC_pre_incr(void **, ptrdiff_t /* how_much */); -GC_API void * GC_CALL GC_post_incr(void **, ptrdiff_t /* how_much */); +GC_API void * GC_CALL GC_pre_incr(void **, ptrdiff_t /* how_much */) + GC_ATTR_NONNULL(1); +GC_API void * GC_CALL GC_post_incr(void **, ptrdiff_t /* how_much */) + GC_ATTR_NONNULL(1); /* Check that p is visible */ /* to the collector as a possibly pointer containing location. */ @@ -1278,7 +1311,7 @@ GC_API void (GC_CALLBACK * GC_is_visible_print_proc)(void *); /* This returns a list of objects, linked through their first word. */ /* Its use can greatly reduce lock contention problems, since the */ /* allocation lock can be acquired and released many fewer times. */ -GC_API void * GC_CALL GC_malloc_many(size_t /* lb */); +GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_many(size_t /* lb */); #define GC_NEXT(p) (*(void * *)(p)) /* Retrieve the next element */ /* in returned list. */ @@ -1427,9 +1460,9 @@ GC_API int GC_CALL GC_get_force_unmap_on_gcollect(void); /* Similarly gnu-win32 DLLs need explicit initialization from the */ /* main program, as does AIX. */ extern int _data_start__[], _data_end__[], _bss_start__[], _bss_end__[]; -# define GC_DATASTART (_data_start__ < _bss_start__ ? \ +# define GC_DATASTART ((GC_word)_data_start__ < (GC_word)_bss_start__ ? \ (void *)_data_start__ : (void *)_bss_start__) -# define GC_DATAEND (_data_end__ > _bss_end__ ? \ +# define GC_DATAEND ((GC_word)_data_end__ > (GC_word)_bss_end__ ? \ (void *)_data_end__ : (void *)_bss_end__) # define GC_INIT_CONF_ROOTS GC_add_roots(GC_DATASTART, GC_DATAEND); \ GC_gcollect() /* For blacklisting. */ @@ -1439,6 +1472,11 @@ GC_API int GC_CALL GC_get_force_unmap_on_gcollect(void); # define GC_DATASTART ((void *)((ulong)_data)) # define GC_DATAEND ((void *)((ulong)_end)) # define GC_INIT_CONF_ROOTS GC_add_roots(GC_DATASTART, GC_DATAEND) +#elif (defined(PLATFORM_ANDROID) || defined(__ANDROID__)) \ + && !defined(GC_NOT_DLL) + /* Required if GC is built as shared library. */ + extern int __data_start[], _end[]; +# define GC_INIT_CONF_ROOTS GC_add_roots(__data_start, _end) #else # define GC_INIT_CONF_ROOTS /* empty */ #endif diff --git a/include/boehmgc/gc_confi.h b/include/boehmgc/gc_confi.h index d3b5f6a..d4b7231 100644 --- a/include/boehmgc/gc_confi.h +++ b/include/boehmgc/gc_confi.h @@ -164,8 +164,9 @@ # endif #endif /* _WIN32_WCE */ -#if defined(_DLL) && !defined(GC_NOT_DLL) && !defined(GC_DLL) \ - && !defined(__GNUC__) +#if !defined(GC_NOT_DLL) && !defined(GC_DLL) \ + && ((defined(_DLL) && !defined(__GNUC__)) \ + || (defined(DLL_EXPORT) && defined(GC_BUILD))) # define GC_DLL #endif @@ -220,9 +221,13 @@ /* non-NULL pointer it returns cannot alias any other pointer valid */ /* when the function returns). If the client code violates this rule */ /* by using custom GC_oom_func then define GC_OOM_FUNC_RETURNS_ALIAS. */ -# if !defined(GC_OOM_FUNC_RETURNS_ALIAS) && defined(__GNUC__) \ - && (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)) +# ifdef GC_OOM_FUNC_RETURNS_ALIAS +# define GC_ATTR_MALLOC /* empty */ +# elif defined(__GNUC__) && (__GNUC__ > 3 \ + || (__GNUC__ == 3 && __GNUC_MINOR__ >= 1)) # define GC_ATTR_MALLOC __attribute__((__malloc__)) +# elif defined(_MSC_VER) && _MSC_VER >= 14 +# define GC_ATTR_MALLOC __declspec(noalias) __declspec(restrict) # else # define GC_ATTR_MALLOC # endif @@ -239,6 +244,27 @@ # endif #endif +#ifndef GC_ATTR_NONNULL +# if defined(__GNUC__) && __GNUC__ >= 4 +# define GC_ATTR_NONNULL(argnum) __attribute__((__nonnull__(argnum))) +# else +# define GC_ATTR_NONNULL(argnum) /* empty */ +# endif +#endif + +#ifndef GC_ATTR_DEPRECATED +# ifdef GC_BUILD +# undef GC_ATTR_DEPRECATED +# define GC_ATTR_DEPRECATED /* empty */ +# elif defined(__GNUC__) && __GNUC__ >= 4 +# define GC_ATTR_DEPRECATED __attribute__((__deprecated__)) +# elif defined(_MSC_VER) && _MSC_VER >= 12 +# define GC_ATTR_DEPRECATED __declspec(deprecated) +# else +# define GC_ATTR_DEPRECATED /* empty */ +# endif +#endif + #if defined(__sgi) && !defined(__GNUC__) && _COMPILER_VERSION >= 720 # define GC_ADD_CALLER # define GC_RETURN_ADDR (GC_word)__return_address diff --git a/include/boehmgc/gc_gcj.h b/include/boehmgc/gc_gcj.h index 7865878..eba714c 100644 --- a/include/boehmgc/gc_gcj.h +++ b/include/boehmgc/gc_gcj.h @@ -72,21 +72,22 @@ GC_API void GC_CALL GC_init_gcj_malloc(int /* mp_index */, /* Allocate an object, clear it, and store the pointer to the */ /* type structure (vtable in gcj). */ /* This adds a byte at the end of the object if GC_malloc would.*/ -GC_API void * GC_CALL GC_gcj_malloc(size_t /* lb */, - void * /* ptr_to_struct_containing_descr */) - GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL + GC_gcj_malloc(size_t /* lb */, + void * /* ptr_to_struct_containing_descr */); + /* The debug versions allocate such that the specified mark_proc */ /* is always invoked. */ -GC_API void * GC_CALL GC_debug_gcj_malloc(size_t /* lb */, - void * /* ptr_to_struct_containing_descr */, - GC_EXTRA_PARAMS) - GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL + GC_debug_gcj_malloc(size_t /* lb */, + void * /* ptr_to_struct_containing_descr */, + GC_EXTRA_PARAMS); /* Similar to GC_gcj_malloc, but assumes that a pointer to near the */ /* beginning of the resulting object is always maintained. */ -GC_API void * GC_CALL GC_gcj_malloc_ignore_off_page(size_t /* lb */, - void * /* ptr_to_struct_containing_descr */) - GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1); +GC_API GC_ATTR_MALLOC GC_ATTR_ALLOC_SIZE(1) void * GC_CALL + GC_gcj_malloc_ignore_off_page(size_t /* lb */, + void * /* ptr_to_struct_containing_descr */); /* The kind numbers of normal and debug gcj objects. */ /* Useful only for debug support, we hope. */