/
sgc.c
5090 lines (4235 loc) · 122 KB
/
sgc.c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/*
SenoraGC, a relatively portable conservative GC for a slightly
cooperative environment
Copyright (c) 2004-2013 PLT Design Inc.
Copyright (c) 1996-98 Matthew Flatt
All rights reserved.
This library is free software; you can redistribute it and/or
modify it under the terms of the GNU Library General Public
License as published by the Free Software Foundation; either
version 2 of the License, or (at your option) any later version.
This library is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
Library General Public License for more details.
You should have received a copy of the GNU Library General Public
License along with this library; if not, write to the Free
Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
Boston, MA 02110-1301 USA.
After Boehm et al.
Note: this implementation is probably still a little hardwired for
32-bit addresses.
*/
#include <stdlib.h>
#include <setjmp.h>
#include <stdio.h>
#include <string.h>
#include "../sconfig.h"
#include "mzconfig.h"
#include "sgc.h"
#ifdef _WIN64
# define SIZEOF_LONG 8
# ifdef _MSC_VER
# define inline _inline
# endif
#endif
#ifdef SIZEOF_LONG
# if SIZEOF_LONG == 8
# define SIXTY_FOUR_BIT_INTEGERS
# endif
#endif
/****************************************************************************/
/* Option bundles */
/****************************************************************************/
#ifndef SGC_STD_DEBUGGING
# define SGC_STD_DEBUGGING 0
#endif
#ifdef WIN32
# define SGC_STD_DEBUGGING_UNIX 0
# define SGC_STD_DEBUGGING_WINDOWS SGC_STD_DEBUGGING
#else
# define SGC_STD_DEBUGGING_UNIX SGC_STD_DEBUGGING
# define SGC_STD_DEBUGGING_WINDOWS 0
#endif
#ifndef SGC_AUTO_ROOTS
# define SGC_AUTO_ROOTS 0
#endif
/****************************************************************************/
/* Options and debugging flags */
/****************************************************************************/
#define NO_COLLECTIONS 0
/* Disable all collections */
#define NO_DISAPPEARING 0
/* Never perform disappearing link */
#define NO_FINALIZING 0
/* Never invoke queued finalizers */
#define NO_ATOMIC 0
/* Never treat allocated data as atomic */
#define KEEP_BLOCKS_FOREVER 0
/* Number of block sector assignments to keep indefinitely, rather
than recycling them immediately. This speeds up collection
and allocation, but it also costs memory via fragmentation. */
#define NO_STACK_OFFBYONE 0
/* Don't notice a stack-based pointer just past the end of an object */
#define WATCH_FOR_FINALIZATION_CYCLES 0
/* Report cycles in finalizable chunks */
#define USE_GC_FREE_SPACE_DIVISOR 1
/* Grow stack in a way similar to Boehm's GC using the global
GC_free_space_divisor. 0 => GC after allocating twice the
active size from the last GC. */
#define PROVIDE_GC_FREE 0
/* Provide GC_free; automatically implies DISTINGUISH_FREE_FROM_UNMARKED */
#define PROVIDE_CHUNK_GC_FREE 1
/* Provide GC_free that only works on chunks (i.e., large allocation
blocks); frees on small blacks are ignored */
#define PROVIDE_MALLOC_AND_FREE 0
/* Defines malloc(), realloc(), calloc(), and free() in terms of
the collector. Platform-specific allocation routines (e.g., sbrk())
must then be used for low-level allocations by the collector;
turn on one of: GET_MEM_VIA_SBRK or GET_MEM_VIA_MMAP.
This will not work if fprintf uses malloc of free. Turning on
FPRINTF_USE_PRIM_STRINGOUT can solve this problem.
Automatically implies PROVIDE_GC_FREE, but adds extra checks to
CHECK_FREES if PROVIDE_GC_FREE was not otherwise on */
#define GET_MEM_VIA_SBRK 0
/* Instead of calling malloc() to get low-level memory, use
sbrk() directly. (Unix) */
#define GET_MEM_VIA_MMAP SGC_STD_DEBUGGING_UNIX
/* Instead of calling malloc() to get low-level memory, use
mmap() directly. (Unix) */
#define GET_MEM_VIA_VIRTUAL_ALLOC SGC_STD_DEBUGGING_WINDOWS
/* Instead of calling malloc() to get low-level memory, use
VirtualAlloc() directly. (Win32) */
#define RELEASE_UNUSED_SECTORS 1
/* Instead of managing a list of unused sectors, they are
given back to the OS. This only works with mmap(). */
#define DISTINGUISH_FREE_FROM_UNMARKED 0
/* Don't let conservatism resurrect a previously-collected block */
#define TERSE_MEMORY_TRACING 0
/* Automatically implies ALLOW_TRACE_COUNT, ALLOW_TRACE_PATH,
ALLOW_SET_FINALIZER, CHECK_SKIP_MARK_AT_FIRST, and CHECK_WATCH_FOR_PTR_ALLOC */
#define STD_MEMORY_TRACING SGC_STD_DEBUGGING
/* Automatically implies TERSE_MEMORY_TRACING, DUMP_BLOCK_COUNTS,
and DUMP_SECTOR_MAP */
#define DETAIL_MEMORY_TRACING 0
/* Automatically implies STD_MEMORY_TRACING, DUMP_BLOCK_MAPS,
STAMP_AND_REMEMBER_SOURCE, and KEEP_DETAIL_PATH */
#define STAMP_AND_REMEMBER_SOURCE 0
/* Keep timestamps and source of marking from collection */
#define ALLOW_TRACE_COUNT 0
/* Support collection-based trace count callbacks */
#define ALLOW_TRACE_PATH 0
/* Support collection-based trace path callbacks */
#define KEEP_DETAIL_PATH SGC_STD_DEBUGGING
/* Keep source offsets for path traces */
#define CHECK_SKIP_MARK_AT_FIRST 0
/* Enables skipping certain marks during collection from the inital
root supplied as GC_initial_trace_root */
#define ALLOW_SET_LOCKING 0
/* Enables locking out collections for a specific set. */
#define ALLOW_SET_FINALIZER 0
/* Support the per-set custom "de-allocation" callback. */
#define CHECK_WATCH_FOR_PTR_ALLOC SGC_STD_DEBUGGING
/* Set GC_watch_for_ptr to be ~(ptr value);
there are 3 places where the ptr is checked,
unless USE_WATCH_FOUND_FUNC is on */
#define USE_WATCH_FOUND_FUNC SGC_STD_DEBUGGING
/* Calls GC_found_watch when the watch-for ptr is found. */
#define PAD_BOUNDARY_BYTES SGC_STD_DEBUGGING
/* Put a known padding pattern around every allocated
block to test for array overflow/underflow.
Pad-testing is performed at the beginning of every GC.
Automatically implies CHECK_SIMPLE_INTERIOR_POINTERS */
#define CHECK_SIMPLE_INTERIOR_POINTERS 0
/* Recognize pointers into the middle of an allocated
block, (but do not recognize pointers just past the
end of an allocated block, as is generally performed
for stack-based pointers). */
#define DUMP_BLOCK_COUNTS 1
/* GC_dump prints detail information about block and
set size contents. */
#define DUMP_SECTOR_MAP 1
/* GC_dump prints detail information about existing
sectors. */
#ifdef SIXTY_FOUR_BIT_INTEGERS
# undef DUMP_SECTOR_MAP
# define DUMP_SECTOR_MAP 0
#endif
#define DUMP_BLOCK_MAPS 1 /* 0 */
/* GC_dump prints detail information about block and
set address contents. Automatically implies
DUMP_BLOCK_COUNTS. */
#define CHECK_FREES SGC_STD_DEBUGGING
/* Print an error for redundant frees by calling free_error */
#define FPRINTF_USE_PRIM_STRINGOUT SGC_STD_DEBUGGING_WINDOWS
/* Avoids using fprintf while the GC is running, printing
messages instead as pure strings passed to
void GC_prim_stringout(char *s, int len);
which must be provided at link time, or one of
PRIM_STRINGOUT_AS_FWRITE or PRIM_STRINGOUT_AS_WINDOWS_CONSOLE */
#define PRIM_STRINGOUT_AS_FWRITE 0
/* Implements GC_prim_stringout using fwrite. Not likely to
solve any problems, but useful for debugging FPRINTF. */
#define PRIM_STRINGOUT_AS_WINDOWS_CONSOLE SGC_STD_DEBUGGING_WINDOWS
/* Implements GC_prim_stringout using Windows console
functions. */
#define AUTO_STATIC_ROOTS_IF_POSSIBLE SGC_AUTO_ROOTS
/* Automatically registers static C variables as roots if
platform-specific code is porvided */
#define PRINT_INFO_PER_GC SGC_STD_DEBUGGING
/* Writes to stderr before an after each GC, summarizing
the state of memory and current system time at each point. */
#define SHOW_SECTOR_MAPS_AT_GC 0
/* Write a sector map before and after each GC. This is helpful for
noticing unusual memory pattens, such as allocations of large
blocks or unusually repetitive allocations. Only works with
PRINT_INFO_PER_GC. */
/****************************************************************************/
/* Parameters and platform-specific settings */
/****************************************************************************/
/* GC frequency: MEM_USE_FACTOR is max factor between current
allocated bytes and alocated bytes after last GC. */
#ifdef SMALL_HASH_TABLES
# define FIRST_GC_LIMIT 20000
# define MEM_USE_FACTOR 1.40
#else
# define FIRST_GC_LIMIT 100000
# define MEM_USE_FACTOR 3
#endif
#ifdef DOS_FAR_POINTERS
# include <dos.h>
# include <alloc.h>
# define PTR_TO_INT(v) (((((intptr_t)FP_SEG(v)) & 0xFFFF) << 4) + FP_OFF(v))
# define INT_TO_PTR(v) ((void *)((((v) >> 4) << 16) + ((v) & 0xF)))
# if !PROVIDE_MALLOC_AND_FREE
# define MALLOC farmalloc
# define FREE farfree
# endif
#else
# define PTR_TO_INT(v) ((uintptr_t)(v))
# define INT_TO_PTR(v) ((void *)(v))
# if !PROVIDE_MALLOC_AND_FREE
# define MALLOC malloc
# define FREE free
# endif
#endif
#if GET_MEM_VIA_SBRK
# include <unistd.h>
#endif
#if GET_MEM_VIA_MMAP
# include <unistd.h>
# include <sys/mman.h>
# include <sys/types.h>
# include <sys/stat.h>
# include <fcntl.h>
#endif
#ifdef WIN32
# include <windows.h>
#endif
#if !GET_MEM_VIA_MMAP
# undef RELEASE_UNUSED_SECTORS
# define RELEASE_UNUSED_SECTORS 0
#endif
/* System-specific alignment of pointers. */
#ifdef SIXTY_FOUR_BIT_INTEGERS
# define PTR_ALIGNMENT 8
# define LOG_PTR_SIZE 3
# define LOW_32_BITS(x) (x & 0xFFFFFFFF)
#else
# define PTR_ALIGNMENT 4
# define LOG_PTR_SIZE 2
# define LOW_32_BITS(x) x
#endif
#define PTR_SIZE (1 << LOG_PTR_SIZE)
#ifdef _WIN64
# define ALLOC_ALIGNMENT 16
#else
# define ALLOC_ALIGNMENT PTR_SIZE
#endif
#define DOUBLE_SIZE sizeof(double)
/* SECTOR_SEGMENT_SIZE determines the alignment of collector blocks.
Since it should be a power of 2, LOG_SECTOR_SEGMENT_SIZE is
specified directly. A larger block size speeds up GC, but wastes
more unallocated bytes in same-size buckets. */
#define LOG_SECTOR_SEGMENT_SIZE 12
#define SECTOR_SEGMENT_SIZE (1 << LOG_SECTOR_SEGMENT_SIZE)
#define SECTOR_SEGMENT_MASK (~(SECTOR_SEGMENT_SIZE-1))
/* MAX_COMMON_SIZE is maximum size of allocated blocks
using relatively efficient memory layouts. */
#define MAX_COMMON_SIZE (SECTOR_SEGMENT_SIZE >> 2)
#define NUM_COMMON_SIZE ((2 * LOG_SECTOR_SEGMENT_SIZE) + 8)
/* Number of sector segments to be allocated at once with
malloc() to avoid waste when obtaining the proper alignment. */
#define SECTOR_SEGMENT_GROUP_SIZE 32
/* Number of bits used in 32-bit level table for checking existence of
a sector. Creates a table of (1 << SECTOR_LOOKUP_SHIFT) pointers
to individual page tables of size SECTOR_LOOKUP_PAGESIZE. */
#define SECTOR_LOOKUP_PAGESETBITS 12
#define LOG_MAP_PTR_SIZE 2
#define MAP_PTR_SIZE 4
#define SECTOR_LOOKUP_SHIFT ((MAP_PTR_SIZE*8) - SECTOR_LOOKUP_PAGESETBITS)
#define LOG_SECTOR_LOOKUP_PAGESIZE ((MAP_PTR_SIZE*8) - SECTOR_LOOKUP_PAGESETBITS - LOG_SECTOR_SEGMENT_SIZE)
#define SECTOR_LOOKUP_PAGESIZE (1 << LOG_SECTOR_LOOKUP_PAGESIZE)
#define SECTOR_LOOKUP_PAGEMASK (SECTOR_LOOKUP_PAGESIZE - 1)
#define SECTOR_LOOKUP_PAGETABLE(x) (LOW_32_BITS(x) >> SECTOR_LOOKUP_SHIFT)
#define SECTOR_LOOKUP_PAGEPOS(x) ((LOW_32_BITS(x) >> LOG_SECTOR_SEGMENT_SIZE) & SECTOR_LOOKUP_PAGEMASK)
#define LOG_SECTOR_PAGEREC_SIZE (LOG_PTR_SIZE + 1)
/***************************************************************************/
/* Implementation Terminology:
"sector" - A low-level block of memory. Given an arbitrary
pointer value, whether it is contained in a sector and
the starting point of the sector can be determined in
constant time.
"segment" - A portion of a sector, aligned on SECTOR_SEGMENT_SIZE
boundaries.
"page" - part of a table for a (partial) mapping from addresses to
segments
"block" or "common block" - A block for small memory allocations. Blocks
provide allocation by partitioning a sector.
"chunk" - A block of memory too large to be a common block. Each chunk
is allocated in its own sector.
"set" - A collection of blocks & chunks asscoaited with a particular
name, "deallocation" function, trace function, etc.
*/
/***************************************************************************/
/* Debugging the collector: */
#define CHECK 0
#define PRINT 0
#define TIME 0
#define ALWAYS_TRACE 0
#define CHECK_COLLECTING 0
#define MARK_STATS 0
#define ALLOC_STATS 0
#define FINISH_STATS 0
#if DETAIL_MEMORY_TRACING
# undef STD_MEMORY_TRACING
# undef STAMP_AND_REMEMBER_SOURCE
# undef DUMP_BLOCK_MAPS
# undef KEEP_DETAIL_PATH
# define STD_MEMORY_TRACING 1
# define STAMP_AND_REMEMBER_SOURCE 1
# define DUMP_BLOCK_MAPS 1
# define KEEP_DETAIL_PATH 1
#endif
#if STD_MEMORY_TRACING
# undef TERSE_MEMORY_TRACING
# undef DUMP_BLOCK_COUNTS
# define TERSE_MEMORY_TRACING 1
# define DUMP_BLOCK_COUNTS 1
#endif
#if TERSE_MEMORY_TRACING
# undef ALLOW_TRACE_COUNT
# undef ALLOW_TRACE_PATH
# undef ALLOW_SET_FINALIZER
# undef CHECK_WATCH_FOR_PTR_ALLOC
# undef CHECK_SKIP_MARK_AT_FIRST
# define ALLOW_TRACE_COUNT 1
# define ALLOW_TRACE_PATH 1
# define ALLOW_SET_FINALIZER 1
# define CHECK_WATCH_FOR_PTR_ALLOC 1
# define CHECK_SKIP_MARK_AT_FIRST 1
#endif
#if PAD_BOUNDARY_BYTES
# undef CHECK_SIMPLE_INTERIOR_POINTERS
# define CHECK_SIMPLE_INTERIOR_POINTERS 1
#endif
#if DUMP_BLOCK_MAPS
# undef DUMP_BLOCK_COUNTS
# define DUMP_BLOCK_COUNTS 1
#endif
#if PROVIDE_MALLOC_AND_FREE
# if !PROVIDE_GC_FREE
# define EXTRA_FREE_CHECKS 1
# endif
# undef PROVIDE_GC_FREE
# define PROVIDE_GC_FREE 1
#else
# define EXTRA_FREE_CHECKS 0
#endif
#if PROVIDE_GC_FREE
# undef DISTINGUISH_FREE_FROM_UNMARKED
# define DISTINGUISH_FREE_FROM_UNMARKED 1
# undef PROVIDE_CHUNK_GC_FREE
# define PROVIDE_CHUNK_GC_FREE 1
#endif
#if ALLOW_TRACE_COUNT || ALLOW_TRACE_PATH || PROVIDE_GC_FREE
# define KEEP_SET_NO 1
# define KEEP_CHUNK_SET_NO 1
#else
# define KEEP_SET_NO 0
# if PROVIDE_CHUNK_GC_FREE
# define KEEP_CHUNK_SET_NO 1
# endif
#endif
#if PROVIDE_CHUNK_GC_FREE
# define KEEP_PREV_PTR 1
#else
# define KEEP_PREV_PTR 0
#endif
#ifndef NULL
# define NULL 0L
#endif
#if PAD_BOUNDARY_BYTES
/* Pad start & end must be a multiple of DOUBLE_SIZE */
# define PAD_START_SIZE (2 * sizeof(intptr_t))
# define PAD_END_SIZE PAD_START_SIZE
# define PAD_PATTERN 0x7c8c9cAc
# define PAD_FILL_PATTERN 0xc7
# define SET_PAD(p, s, os) set_pad(p, s, os)
# define PAD_FORWARD(p) ((void *)(((char *)p) + PAD_START_SIZE))
# define PAD_BACKWARD(p) ((void *)(((char *)p) - PAD_START_SIZE))
#else
# define PAD_FORWARD(p) (p)
# define PAD_BACKWARD(p) (p)
#endif
/* A root to start with, when non-zero. Useful for tracing from a
particular object. The skip function is used when
CHECK_SKIP_MARK_AT_FIRST to skip certain objects when marking from
this root (when the function return 1) */
void *GC_initial_trace_root;
int (*GC_inital_root_skip)(void *, size_t);
void (*GC_out_of_memory)(void);
/* Sector types: */
enum {
sector_kind_block,
sector_kind_free,
#if !RELEASE_UNUSED_SECTORS
sector_kind_freed,
#else
# define sector_kind_freed sector_kind_free
#endif
sector_kind_chunk,
sector_kind_managed,
sector_kind_other
};
typedef struct BlockOfMemory {
struct Finalizer *finalizers;
uintptr_t start;
uintptr_t end;
uintptr_t top;
short size;
short atomic;
short elem_per_block;
short free_search_start, free_search_bit, free_search_offset;
#if KEEP_SET_NO
short set_no;
#endif
int *positions; /* maps displacement in ptrs => position in objects */
struct BlockOfMemory *next;
#if STAMP_AND_REMEMBER_SOURCE
intptr_t make_time;
intptr_t use_time;
uintptr_t low_marker;
uintptr_t high_marker;
#endif
unsigned char free[1];
} BlockOfMemory;
#if DISTINGUISH_FREE_FROM_UNMARKED
# define FREE_BIT_PER_ELEM 4
# define LOG_FREE_BIT_PER_ELEM 2
# define FREE_BIT_SIZE (8 >> LOG_FREE_BIT_PER_ELEM)
# define FREE_BIT_START 0x2
# define UNMARK_BIT_START 0x1
# define POS_TO_FREE_INDEX(p) (p >> LOG_FREE_BIT_PER_ELEM)
# define POS_TO_UNMARK_INDEX(p) (p >> LOG_FREE_BIT_PER_ELEM)
# define POS_TO_FREE_BIT(p) (FREE_BIT_START << ((p & (FREE_BIT_PER_ELEM - 1)) << 1))
# define POS_TO_UNMARK_BIT(p) (UNMARK_BIT_START << ((p & (FREE_BIT_PER_ELEM - 1)) << 1))
# define ALL_UNMARKED 0x55
# define ALL_FREE 0xAA
# define _NOT_FREE(x) NOT_FREE(x)
# define SHIFT_UNMARK_TO_FREE(x) ((x & ALL_UNMARKED) << 1)
# define SHIFT_COPY_FREE_TO_UNMARKED(x) ((x & ALL_FREE) | ((x & ALL_FREE) >> 1))
#else /* !DISTINGUISH_FREE_FROM_UNMARKED */
# define FREE_BIT_PER_ELEM 8
# define LOG_FREE_BIT_PER_ELEM 3
# define FREE_BIT_SIZE (8 >> LOG_FREE_BIT_PER_ELEM)
# define FREE_BIT_START 0x1
# define UNMARK_BIT_START 0x1
# define POS_TO_FREE_INDEX(p) (p >> LOG_FREE_BIT_PER_ELEM)
# define POS_TO_UNMARK_INDEX(p) (p >> LOG_FREE_BIT_PER_ELEM)
# define POS_TO_FREE_BIT(p) (FREE_BIT_START << (p & (FREE_BIT_PER_ELEM - 1)))
# define POS_TO_UNMARK_BIT(p) (UNMARK_BIT_START << (p & (FREE_BIT_PER_ELEM - 1)))
# define ALL_UNMARKED 0xFF
# define _NOT_FREE(x) 1
#endif /* DISTINGUISH_FREE_FROM_UNMARKED */
#define NOT_FREE(x) (!(x))
#define IS_FREE(x) (x)
#define NOT_MARKED(x) (x)
#define IS_MARKED(x) (!(x))
#define ELEM_PER_BLOCK(b) b->elem_per_block
typedef struct MemoryChunk {
struct Finalizer *finalizers;
uintptr_t start;
uintptr_t end;
struct MemoryChunk *next;
#if KEEP_PREV_PTR
struct MemoryChunk **prev_ptr;
#endif
short atomic;
short marked;
#if STAMP_AND_REMEMBER_SOURCE
intptr_t make_time;
uintptr_t marker;
#endif
#if KEEP_CHUNK_SET_NO
int set_no;
#endif
char data[1];
} MemoryChunk;
/* If this changes size from 2 ptrs, change LOG_SECTOR_PAGEREC_SIZE */
typedef struct {
intptr_t kind; /* sector_kind_other, etc. */
uintptr_t start; /* Sector start; may not be accurate if the segment
is deallocated, but 0 => not in any sector */
} SectorPage;
#if !RELEASE_UNUSED_SECTORS
# include "../utils/splay.c"
typedef struct SectorFreepage {
intptr_t size;
uintptr_t start; /* Sector start */
uintptr_t end; /* start of next */
Tree by_start;
Tree by_end;
Tree by_start_per_size;
Tree by_size;
Tree *same_size;
} SectorFreepage;
static Tree *sector_freepage_by_start;
static Tree *sector_freepage_by_end;
static Tree *sector_freepage_by_size;
#define TREE_FP(t) ((SectorFreepage *)(t->data))
static Tree *next(Tree *node)
{
node = node->right;
if (node) {
while (node->left) {
node = node->left;
}
return node;
} else
return NULL;
}
static void remove_freepage(SectorFreepage *fp)
{
/* Remove fp from freelists: */
sector_freepage_by_start = splay_delete(fp->start, sector_freepage_by_start);
sector_freepage_by_end = splay_delete(fp->end, sector_freepage_by_end);
sector_freepage_by_size = splay(fp->size, sector_freepage_by_size);
if (TREE_FP(sector_freepage_by_size) == fp) {
/* This was the representative for its size; remove it. */
sector_freepage_by_size = splay_delete(fp->size, sector_freepage_by_size);
if (fp->same_size) {
SectorFreepage *same;
same = TREE_FP(fp->same_size);
same->same_size = splay_delete(same->start, fp->same_size);
sector_freepage_by_size = splay_insert(same->size, &same->by_size, sector_freepage_by_size);
}
} else {
/* Not the top-level representative; remove it from the representative's
same_size tree */
SectorFreepage *same;
same = TREE_FP(sector_freepage_by_size);
same->same_size = splay_delete(fp->start, same->same_size);
}
}
static void add_freepage(SectorFreepage *naya)
{
naya->by_start.data = (void *)naya;
sector_freepage_by_start = splay_insert(naya->start, &naya->by_start, sector_freepage_by_start);
naya->by_end.data = (void *)naya;
sector_freepage_by_end = splay_insert(naya->end, &naya->by_end, sector_freepage_by_end);
naya->by_size.data = (void *)naya;
sector_freepage_by_size = splay_insert(naya->size, &naya->by_size, sector_freepage_by_size);
if (TREE_FP(sector_freepage_by_size) != naya) {
/* This size was already in the tree; add it to the next_size list, instead */
SectorFreepage *already = TREE_FP(sector_freepage_by_size);
naya->by_start_per_size.data = (void *)naya;
already->same_size = splay_insert(naya->start, &naya->by_start_per_size, already->same_size);
} else
naya->same_size = NULL;
}
#endif /* !RELEASE_UNUSED_SECTORS */
#define TABLE_HI_SHIFT LOG_SECTOR_SEGMENT_SIZE
#define TABLE_LO_MASK (SECTOR_SEGMENT_SIZE-1)
#define EACH_TABLE_COUNT (1 << (LOG_SECTOR_SEGMENT_SIZE - LOG_PTR_SIZE))
typedef struct GC_Set {
short atomic, uncollectable;
#if ALLOW_SET_LOCKING
short locked;
#endif
char *name;
BlockOfMemory **blocks;
BlockOfMemory **block_ends;
MemoryChunk **othersptr;
#if DUMP_BLOCK_COUNTS
uintptr_t total;
#endif
#if ALLOW_TRACE_COUNT
GC_count_tracer count_tracer;
#endif
#if ALLOW_TRACE_PATH
GC_path_tracer path_tracer;
#endif
#if ALLOW_TRACE_COUNT || ALLOW_TRACE_PATH
GC_trace_init trace_init;
GC_trace_done trace_done;
#endif
#if KEEP_SET_NO || KEEP_CHUNK_SET_NO
int no;
#endif
#if ALLOW_SET_FINALIZER
GC_set_elem_finalizer finalizer;
#endif
} GC_Set;
typedef struct GC_SetWithOthers {
GC_Set c;
MemoryChunk *others;
} GC_SetWithOthers;
static GC_Set **common_sets;
static int num_common_sets;
static BlockOfMemory *common[2 * NUM_COMMON_SIZE]; /* second half is `ends' array */
static BlockOfMemory *atomic_common[2 * NUM_COMMON_SIZE];
static BlockOfMemory *uncollectable_common[2 * NUM_COMMON_SIZE];
static BlockOfMemory *uncollectable_atomic_common[2 * NUM_COMMON_SIZE];
static MemoryChunk *others, *atomic_others;
static MemoryChunk *uncollectable_others, *uncollectable_atomic_others;
static int *common_positionses[NUM_COMMON_SIZE];
#if PROVIDE_MALLOC_AND_FREE
static BlockOfMemory *sys_malloc[2 * NUM_COMMON_SIZE];
static MemoryChunk *sys_malloc_others;
#endif
#define do_malloc_ATOMIC 0x1
#define do_malloc_UNCOLLECTABLE 0x2
#if NO_ATOMIC
# define do_malloc_ATOMIC_UNLESS_DISABLED 0
#else
# define do_malloc_ATOMIC_UNLESS_DISABLED do_malloc_ATOMIC
#endif
int GC_dl_entries;
int GC_fo_entries;
int GC_dont_gc;
void (*GC_push_last_roots)(void);
void (*GC_push_last_roots_again)(void);
enum {
dl_normal,
dl_restored,
dl_late
};
typedef struct DisappearingLink {
short kind;
void *watch;
void **disappear;
void *saved_value;
struct DisappearingLink *prev, *next;
} DisappearingLink;
typedef struct Finalizer {
union {
void *watch; /* pointer to finalized block; used after queued */
int pos; /* position within common block; used before queued */
} u;
short eager_level;
short ignore_self;
void *data;
void (*f)(void *p, void *data);
struct Finalizer *prev, *next; /* also not needed for chunks */
} Finalizer;
static DisappearingLink *disappearing, *late_disappearing;
static Finalizer *queued_finalizers, *last_queued_finalizer;
static int num_queued_finalizers;
static uintptr_t sector_low_plausible, sector_high_plausible;
static uintptr_t low_plausible, high_plausible;
void *GC_stackbottom;
static intptr_t mem_use, mem_limit = FIRST_GC_LIMIT;
#if USE_GC_FREE_SPACE_DIVISOR
int GC_free_space_divisor = 4;
#endif
static intptr_t mem_real_use, mem_uncollectable_use;
static intptr_t sector_mem_use, sector_admin_mem_use, sector_free_mem_use;
static intptr_t manage_mem_use, manage_real_mem_use;
static intptr_t collect_mem_use;
static intptr_t num_sector_allocs, num_sector_frees;
#if ALLOW_TRACE_COUNT
static intptr_t mem_traced;
#endif
static intptr_t num_chunks;
static intptr_t num_blocks;
GC_collect_start_callback_Proc GC_collect_start_callback;
GC_collect_end_callback_Proc GC_collect_end_callback;
void (*GC_custom_finalize)(void);
GC_collect_start_callback_Proc GC_set_collect_start_callback(GC_collect_start_callback_Proc func) {
GC_collect_start_callback_Proc old;
old = GC_collect_start_callback;
GC_collect_start_callback = func;
return old;
}
GC_collect_end_callback_Proc GC_set_collect_end_callback(GC_collect_end_callback_Proc func) {
GC_collect_end_callback_Proc old;
old = GC_collect_end_callback;
GC_collect_end_callback = func;
return old;
}
static intptr_t roots_count;
static intptr_t roots_size;
static uintptr_t *roots;
#if STAMP_AND_REMEMBER_SOURCE
static intptr_t stamp_clock = 0;
#endif
static intptr_t *size_index_map; /* (1/PTR_SIZE)th of requested size to alloc index */
static intptr_t *size_map; /* alloc index to alloc size */
#if CHECK_COLLECTING
static int collecting_now;
#endif
#if FPRINTF_USE_PRIM_STRINGOUT
static void sgc_fprintf(int, const char *, ...);
# define FPRINTF sgc_fprintf
# define STDERR 0
#else
# define FPRINTF fprintf
# define STDERR stderr
#endif
#if CHECK_FREES
static void free_error(const char *msg)
{
FPRINTF(STDERR, msg);
}
#endif
/*************************************************************/
/* Page mapping: */
#ifdef SIXTY_FOUR_BIT_INTEGERS
static SectorPage ***sector_pagetablesss[1 << 16];
# define DECL_SECTOR_PAGETABLES SectorPage **sector_pagetables;
# define GET_SECTOR_PAGETABLES(p) sector_pagetables = create_sector_pagetables(p)
# define FIND_SECTOR_PAGETABLES(p) sector_pagetables = get_sector_pagetables(p)
static void *malloc_plain_sector(int count);
inline static SectorPage **create_sector_pagetables(uintptr_t p) {
uintptr_t pos;
SectorPage ***sector_pagetabless, **sector_pagetables;
pos = ((uintptr_t)p >> 48) & ((1 << 16) - 1);
sector_pagetabless = sector_pagetablesss[pos];
if (!sector_pagetabless) {
int c = (sizeof(SectorPage **) << 16) >> LOG_SECTOR_SEGMENT_SIZE;
sector_pagetabless = (SectorPage ***)malloc_plain_sector(c);
memset(sector_pagetabless, 0, c << LOG_SECTOR_SEGMENT_SIZE);
sector_pagetablesss[pos] = sector_pagetabless;
sector_admin_mem_use += (c << LOG_SECTOR_SEGMENT_SIZE);
}
pos = ((uintptr_t)p >> 32) & ((1 << 16) - 1);
sector_pagetables = sector_pagetabless[pos];
if (!sector_pagetables) {
int c = (SECTOR_LOOKUP_PAGESETBITS + LOG_PTR_SIZE) - LOG_SECTOR_SEGMENT_SIZE;
if (c < 0)
c = 0;
c = 1 << c;
sector_pagetables = (SectorPage **)malloc_plain_sector(c);
memset(sector_pagetables, 0, c << LOG_SECTOR_SEGMENT_SIZE);
sector_admin_mem_use += (c << LOG_SECTOR_SEGMENT_SIZE);
sector_pagetabless[pos] = sector_pagetables;
}
return sector_pagetables;
}
inline static SectorPage **get_sector_pagetables(uintptr_t p) {
uintptr_t pos;
SectorPage ***sector_pagetabless;
pos = ((uintptr_t)p >> 48) & ((1 << 16) - 1);
sector_pagetabless = sector_pagetablesss[pos];
if (!sector_pagetabless)
return NULL;
pos = ((uintptr_t)p >> 32) & ((1 << 16) - 1);
return sector_pagetabless[pos];
}
#else
static SectorPage **sector_pagetables;
# define DECL_SECTOR_PAGETABLES /**/
# define GET_SECTOR_PAGETABLES(p) /**/
# define FIND_SECTOR_PAGETABLES(p) /**/
#endif
/*************************************************************/
/*
The kinds of allocation:
malloc_sector = returns new SECTOR_SEGMENT_SIZE-aligned memory;
relies on nothing else; the memeory blocks must
be explicitly freed with free_sector; all GC
allocation is perfomed via sectors
malloc_managed = malloc "atomic" block used by GC implementation
itself; no GCing should occur during the malloc;
the block is freed with free_managed
realloc_collect_temp = temporary structures used during gc;
no other allocation can take place
during gc, and all memory will be freed
when GC is done with free_collect_temp
*/
#if GET_MEM_VIA_SBRK
static void *platform_plain_sector(int count)
{
caddr_t cur_brk = (caddr_t)sbrk(0);
intptr_t lsbs = (uintptr_t)cur_brk & TABLE_LO_MASK;
void *result;
if (lsbs != 0) {
if ((caddr_t)sbrk(SECTOR_SEGMENT_SIZE - lsbs) == (caddr_t)(-1))
return 0;
}
result = (caddr_t)sbrk((count << LOG_SECTOR_SEGMENT_SIZE));
if (result == (caddr_t)(-1))
return 0;
return result;
}
#endif
#if GET_MEM_VIA_MMAP
static void *platform_plain_sector(int count)
{
#ifdef MAP_ANON
return mmap(NULL, count << LOG_SECTOR_SEGMENT_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
#else
static int fd;
if (!fd) {
fd = open("/dev/zero", O_RDWR);
}
return mmap(0, count << LOG_SECTOR_SEGMENT_SIZE, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0);
#endif
}
static void free_plain_sector(void *p, int count)
{
munmap(p, count << LOG_SECTOR_SEGMENT_SIZE);
}
#endif
#if GET_MEM_VIA_VIRTUAL_ALLOC
static void *platform_plain_sector(int count)
{
/* Since 64k blocks are used up by each call to VirtualAlloc,
use roughly the same trick as in the malloc-based alloc to
avoid wasting the address space. */
static int prealloced;
static void *preallocptr;
if (!prealloced && (count < SECTOR_SEGMENT_GROUP_SIZE)) {
prealloced = SECTOR_SEGMENT_GROUP_SIZE;
preallocptr = VirtualAlloc(NULL, prealloced << LOG_SECTOR_SEGMENT_SIZE,
MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE);
}
if (count <= prealloced) {
void *result = preallocptr;
preallocptr = ((char *)preallocptr) + (count << LOG_SECTOR_SEGMENT_SIZE);
prealloced -= count;
return result;
}
return VirtualAlloc(NULL, count << LOG_SECTOR_SEGMENT_SIZE,
MEM_COMMIT | MEM_RESERVE,
PAGE_READWRITE);
}
#endif
#if !GET_MEM_VIA_SBRK && !GET_MEM_VIA_MMAP && !GET_MEM_VIA_VIRTUAL_ALLOC
# if PROVIDE_MALLOC_AND_FREE
>>
Error: you must pick a platform-specific allocation mechanism
to support malloc() and free()
<<
# endif
static void *platform_plain_sector(int count)
{
static int prealloced;
static void *preallocptr;
if (!prealloced) {
uintptr_t d;