Skip to content
This repository

HTTPS clone URL

Subversion checkout URL

You can clone with HTTPS or Subversion.

Download ZIP
tree: ee0bc0d176
Fetching contributors…

Cannot retrieve contributors at this time

file 1137 lines (1019 sloc) 33.846 kb
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
* Copyright (c) 1998 by Silicon Graphics. All rights reserved.
* Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*
*/


# include "private/gc_priv.h"

# include <stdio.h>
# if !defined(MACOS) && !defined(MSWINCE)
# include <signal.h>
# include <sys/types.h>
# endif

/*
* Separate free lists are maintained for different sized objects
* up to MAXOBJSZ.
* The call GC_allocobj(i,k) ensures that the freelist for
* kind k objects of size i points to a non-empty
* free list. It returns a pointer to the first entry on the free list.
* In a single-threaded world, GC_allocobj may be called to allocate
* an object of (small) size i as follows:
*
* opp = &(GC_objfreelist[i]);
* if (*opp == 0) GC_allocobj(i, NORMAL);
* ptr = *opp;
* *opp = obj_link(ptr);
*
* Note that this is very fast if the free list is non-empty; it should
* only involve the execution of 4 or 5 simple instructions.
* All composite objects on freelists are cleared, except for
* their first word.
*/

/*
* The allocator uses GC_allochblk to allocate large chunks of objects.
* These chunks all start on addresses which are multiples of
* HBLKSZ. Each allocated chunk has an associated header,
* which can be located quickly based on the address of the chunk.
* (See headers.c for details.)
* This makes it possible to check quickly whether an
* arbitrary address corresponds to an object administered by the
* allocator.
*/

word GC_non_gc_bytes = 0; /* Number of bytes not intended to be collected */

word GC_gc_no = 0;

#ifndef SMALL_CONFIG
  int GC_incremental = 0; /* By default, stop the world. */
#endif

int GC_parallel = FALSE; /* By default, parallel GC is off. */

int GC_full_freq = 19; /* Every 20th collection is a full */
/* collection, whether we need it */
/* or not. */

GC_bool GC_need_full_gc = FALSE;
/* Need full GC do to heap growth. */

#ifdef THREADS
  GC_bool GC_world_stopped = FALSE;
# define IF_THREADS(x) x
#else
# define IF_THREADS(x)
#endif

word GC_used_heap_size_after_full = 0;

char * GC_copyright[] =
{"Copyright 1988,1989 Hans-J. Boehm and Alan J. Demers ",
"Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved. ",
"Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved. ",
"Copyright (c) 1999-2001 by Hewlett-Packard Company. All rights reserved. ",
"THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY",
" EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.",
"See source code for details." };

# include "version.h"

#if defined(SAVE_CALL_CHAIN) && \
!(defined(REDIRECT_MALLOC) && defined(GC_HAVE_BUILTIN_BACKTRACE))
# define SAVE_CALL_CHAIN_IN_GC
    /* This is only safe if the call chain save mechanism won't end up */
    /* calling GC_malloc. The GNU C library documentation suggests */
    /* that backtrace doesn't use malloc, but at least the initial */
    /* call in some versions does seem to invoke the dynamic linker, */
    /* which uses malloc. */
#endif

/* some more variables */

extern signed_word GC_mem_found; /* Number of reclaimed longwords */
/* after garbage collection */

GC_bool GC_dont_expand = 0;

word GC_free_space_divisor = 3;

extern GC_bool GC_collection_in_progress();
/* Collection is in progress, or was abandoned. */

int GC_never_stop_func GC_PROTO((void)) { return(0); }

unsigned long GC_time_limit = TIME_LIMIT;

CLOCK_TYPE GC_start_time; /* Time at which we stopped world. */
/* used only in GC_timeout_stop_func. */

int GC_n_attempts = 0; /* Number of attempts at finishing */
/* collection within GC_time_limit. */

#if defined(SMALL_CONFIG) || defined(NO_CLOCK)
# define GC_timeout_stop_func GC_never_stop_func
#else
  int GC_timeout_stop_func GC_PROTO((void))
  {
    CLOCK_TYPE current_time;
    static unsigned count = 0;
    unsigned long time_diff;
    
    if ((count++ & 3) != 0) return(0);
    GET_TIME(current_time);
    time_diff = MS_TIME_DIFF(current_time,GC_start_time);
    if (time_diff >= GC_time_limit) {
# ifdef CONDPRINT
if (GC_print_stats) {
GC_printf0("Abandoning stopped marking after ");
GC_printf1("%lu msecs", (unsigned long)time_diff);
GC_printf1("(attempt %ld)\n", (unsigned long) GC_n_attempts);
}
# endif
     return(1);
    }
    return(0);
  }
#endif /* !SMALL_CONFIG */

/* Return the minimum number of words that must be allocated between */
/* collections to amortize the collection cost. */
static word min_words_allocd()
{
# ifdef THREADS
  /* We punt, for now. */
  register signed_word stack_size = 10000;
# else
        int dummy;
        register signed_word stack_size = (ptr_t)(&dummy) - GC_stackbottom;
# endif
    word total_root_size; /* includes double stack size, */
     /* since the stack is expensive */
     /* to scan. */
    word scan_size; /* Estimate of memory to be scanned */
/* during normal GC. */
    
    if (stack_size < 0) stack_size = -stack_size;
    total_root_size = 2 * stack_size + GC_root_size;
    scan_size = BYTES_TO_WORDS(GC_heapsize - GC_large_free_bytes
+ (GC_large_free_bytes >> 2)
/* use a bit more of large empty heap */
+ total_root_size);
    if (TRUE_INCREMENTAL) {
        return scan_size / (2 * GC_free_space_divisor);
    } else {
        return scan_size / GC_free_space_divisor;
    }
}

/* Return the number of words allocated, adjusted for explicit storage */
/* management, etc.. This number is used in deciding when to trigger */
/* collections. */
word GC_adj_words_allocd()
{
    register signed_word result;
    register signed_word expl_managed =
     BYTES_TO_WORDS((long)GC_non_gc_bytes
     - (long)GC_non_gc_bytes_at_gc);
    
    /* Don't count what was explicitly freed, or newly allocated for */
    /* explicit management. Note that deallocating an explicitly */
    /* managed object should not alter result, assuming the client */
    /* is playing by the rules. */
    result = (signed_word)GC_words_allocd
     - (signed_word)GC_mem_freed
+ (signed_word)GC_finalizer_mem_freed - expl_managed;
    if (result > (signed_word)GC_words_allocd) {
        result = GC_words_allocd;
     /* probably client bug or unfortunate scheduling */
    }
    result += GC_words_finalized;
     /* We count objects enqueued for finalization as though they */
     /* had been reallocated this round. Finalization is user */
     /* visible progress. And if we don't count this, we have */
     /* stability problems for programs that finalize all objects. */
    if ((GC_words_wasted >> 3) < result)
        result += GC_words_wasted;
      /* This doesn't reflect useful work. But if there is lots of */
      /* new fragmentation, the same is probably true of the heap, */
      /* and the collection will be correspondingly cheaper. */
    if (result < (signed_word)(GC_words_allocd >> 3)) {
     /* Always count at least 1/8 of the allocations. We don't want */
     /* to collect too infrequently, since that would inhibit */
     /* coalescing of free storage blocks. */
     /* This also makes us partially robust against client bugs. */
        return(GC_words_allocd >> 3);
    } else {
        return(result);
    }
}


/* Clear up a few frames worth of garbage left at the top of the stack. */
/* This is used to prevent us from accidentally treating garbade left */
/* on the stack by other parts of the collector as roots. This */
/* differs from the code in misc.c, which actually tries to keep the */
/* stack clear of long-lived, client-generated garbage. */
void GC_clear_a_few_frames()
{
# define NWORDS 64
    word frames[NWORDS];
    /* Some compilers will warn that frames was set but never used. */
    /* That's the whole idea ... */
    register int i;
    
    for (i = 0; i < NWORDS; i++) frames[i] = 0;
}

/* Heap size at which we need a collection to avoid expanding past */
/* limits used by blacklisting. */
static word GC_collect_at_heapsize = (word)(-1);

/* Have we allocated enough to amortize a collection? */
GC_bool GC_should_collect()
{
    return(GC_adj_words_allocd() >= min_words_allocd()
|| GC_heapsize >= GC_collect_at_heapsize);
}


void GC_notify_full_gc()
{
    if (GC_start_call_back != (void (*) GC_PROTO((void)))0) {
(*GC_start_call_back)();
    }
}

GC_bool GC_is_full_gc = FALSE;

/*
* Initiate a garbage collection if appropriate.
* Choose judiciously
* between partial, full, and stop-world collections.
* Assumes lock held, signals disabled.
*/
void GC_maybe_gc()
{
    static int n_partial_gcs = 0;

    if (GC_should_collect()) {
        if (!GC_incremental) {
            GC_gcollect_inner();
            n_partial_gcs = 0;
            return;
        } else {
# ifdef PARALLEL_MARK
GC_wait_for_reclaim();
# endif
if (GC_need_full_gc || n_partial_gcs >= GC_full_freq) {
# ifdef CONDPRINT
if (GC_print_stats) {
GC_printf2(
"***>Full mark for collection %lu after %ld allocd bytes\n",
      (unsigned long) GC_gc_no+1,
(long)WORDS_TO_BYTES(GC_words_allocd));
}
# endif
GC_promote_black_lists();
(void)GC_reclaim_all((GC_stop_func)0, TRUE);
GC_clear_marks();
            n_partial_gcs = 0;
GC_notify_full_gc();
  GC_is_full_gc = TRUE;
          } else {
            n_partial_gcs++;
          }
}
        /* We try to mark with the world stopped. */
        /* If we run out of time, this turns into */
        /* incremental marking. */
# ifndef NO_CLOCK
          if (GC_time_limit != GC_TIME_UNLIMITED) { GET_TIME(GC_start_time); }
# endif
        if (GC_stopped_mark(GC_time_limit == GC_TIME_UNLIMITED?
GC_never_stop_func : GC_timeout_stop_func)) {
# ifdef SAVE_CALL_CHAIN_IN_GC
                GC_save_callers(GC_last_stack);
# endif
            GC_finish_collection();
        } else {
if (!GC_is_full_gc) {
/* Count this as the first attempt */
GC_n_attempts++;
}
}
    }
}


/*
* Stop the world garbage collection. Assumes lock held, signals disabled.
* If stop_func is not GC_never_stop_func, then abort if stop_func returns TRUE.
* Return TRUE if we successfully completed the collection.
*/
GC_bool GC_try_to_collect_inner(stop_func)
GC_stop_func stop_func;
{
# ifdef CONDPRINT
        CLOCK_TYPE start_time, current_time;
# endif
    if (GC_dont_gc) return FALSE;
    
    if (GC_notify_event)
GC_notify_event (GC_EVENT_START);
    
    if (GC_incremental && GC_collection_in_progress()) {
# ifdef CONDPRINT
      if (GC_print_stats) {
GC_printf0(
"GC_try_to_collect_inner: finishing collection in progress\n");
      }
# endif /* CONDPRINT */
      /* Just finish collection already in progress. */
     while(GC_collection_in_progress()) {
     if (stop_func()) return(FALSE);
     GC_collect_a_little_inner(1);
     }
    }
    if (stop_func == GC_never_stop_func) GC_notify_full_gc();
# ifdef CONDPRINT
      if (GC_print_stats) {
        if (GC_print_stats) GET_TIME(start_time);
GC_printf2(
"Initiating full world-stop collection %lu after %ld allocd bytes\n",
(unsigned long) GC_gc_no+1,
(long)WORDS_TO_BYTES(GC_words_allocd));
      }
# endif
    GC_promote_black_lists();
    /* Make sure all blocks have been reclaimed, so sweep routines */
    /* don't see cleared mark bits. */
    /* If we're guaranteed to finish, then this is unnecessary. */
    /* In the find_leak case, we have to finish to guarantee that */
    /* previously unmarked objects are not reported as leaks. */
# ifdef PARALLEL_MARK
GC_wait_for_reclaim();
# endif
  if ((GC_find_leak || stop_func != GC_never_stop_func)
&& !GC_reclaim_all(stop_func, FALSE)) {
/* Aborted. So far everything is still consistent. */
return(FALSE);
}
    GC_invalidate_mark_state(); /* Flush mark stack. */
    GC_clear_marks();
# ifdef SAVE_CALL_CHAIN_IN_GC
        GC_save_callers(GC_last_stack);
# endif
    GC_is_full_gc = TRUE;
    if (!GC_stopped_mark(stop_func)) {
      if (!GC_incremental) {
     /* We're partially done and have no way to complete or use */
     /* current work. Reestablish invariants as cheaply as */
     /* possible. */
     GC_invalidate_mark_state();
GC_unpromote_black_lists();
      } /* else we claim the world is already still consistent. We'll */
        /* finish incrementally. */
      return(FALSE);
    }
    GC_finish_collection();
# if defined(CONDPRINT)
      if (GC_print_stats) {
        GET_TIME(current_time);
        GC_printf1("Complete collection took %lu msecs\n",
                   MS_TIME_DIFF(current_time,start_time));
      }
# endif
    if (GC_notify_event)
GC_notify_event (GC_EVENT_END);
      
    return(TRUE);
}



/*
* Perform n units of garbage collection work. A unit is intended to touch
* roughly GC_RATE pages. Every once in a while, we do more than that.
* This needa to be a fairly large number with our current incremental
* GC strategy, since otherwise we allocate too much during GC, and the
* cleanup gets expensive.
*/
# define GC_RATE 10
# define MAX_PRIOR_ATTEMPTS 1
  /* Maximum number of prior attempts at world stop marking */
  /* A value of 1 means that we finish the second time, no matter */
  /* how long it takes. Doesn't count the initial root scan */
  /* for a full GC. */

int GC_deficit = 0; /* The number of extra calls to GC_mark_some */
/* that we have made. */

void GC_collect_a_little_inner(n)
int n;
{
    register int i;
    
    if (GC_dont_gc) return;
    if (GC_incremental && GC_collection_in_progress()) {
     for (i = GC_deficit; i < GC_RATE*n; i++) {
     if (GC_mark_some((ptr_t)0)) {
     /* Need to finish a collection */
# ifdef SAVE_CALL_CHAIN_IN_GC
         GC_save_callers(GC_last_stack);
# endif
# ifdef PARALLEL_MARK
GC_wait_for_reclaim();
# endif
if (GC_n_attempts < MAX_PRIOR_ATTEMPTS
&& GC_time_limit != GC_TIME_UNLIMITED) {
GET_TIME(GC_start_time);
if (!GC_stopped_mark(GC_timeout_stop_func)) {
GC_n_attempts++;
break;
}
} else {
(void)GC_stopped_mark(GC_never_stop_func);
}
     GC_finish_collection();
     break;
     }
     }
     if (GC_deficit > 0) GC_deficit -= GC_RATE*n;
if (GC_deficit < 0) GC_deficit = 0;
    } else {
        GC_maybe_gc();
    }
}

int GC_collect_a_little GC_PROTO(())
{
    int result;
    DCL_LOCK_STATE;

    DISABLE_SIGNALS();
    LOCK();
    GC_collect_a_little_inner(1);
    result = (int)GC_collection_in_progress();
    UNLOCK();
    ENABLE_SIGNALS();
    if (!result && GC_debugging_started) GC_print_all_smashed();
    return(result);
}

/*
* Assumes lock is held, signals are disabled.
* We stop the world.
* If stop_func() ever returns TRUE, we may fail and return FALSE.
* Increment GC_gc_no if we succeed.
*/
GC_bool GC_stopped_mark(stop_func)
GC_stop_func stop_func;
{
    register int i;
    int dummy;
# if defined(PRINTTIMES) || defined(CONDPRINT)
CLOCK_TYPE start_time, current_time;
# endif
# ifdef PRINTTIMES
GET_TIME(start_time);
# endif
# if defined(CONDPRINT) && !defined(PRINTTIMES)
if (GC_print_stats) GET_TIME(start_time);
# endif
# if defined(REGISTER_LIBRARIES_EARLY)
        GC_cond_register_dynamic_libraries();
# endif
    STOP_WORLD();
    IF_THREADS(GC_world_stopped = TRUE);

if (GC_notify_event)
GC_notify_event (GC_EVENT_MARK_START);
# ifdef CONDPRINT
      if (GC_print_stats) {
GC_printf1("--> Marking for collection %lu ",
(unsigned long) GC_gc_no + 1);
GC_printf2("after %lu allocd bytes + %lu wasted bytes\n",
(unsigned long) WORDS_TO_BYTES(GC_words_allocd),
(unsigned long) WORDS_TO_BYTES(GC_words_wasted));
      }
# endif
# ifdef MAKE_BACK_GRAPH
      if (GC_print_back_height) {
        GC_build_back_graph();
      }
# endif

    /* Mark from all roots. */
        /* Minimize junk left in my registers and on the stack */
            GC_clear_a_few_frames();
            GC_noop(0,0,0,0,0,0);
GC_initiate_gc();
for(i = 0;;i++) {
if ((*stop_func)()) {
# ifdef CONDPRINT
if (GC_print_stats) {
GC_printf0("Abandoned stopped marking after ");
GC_printf1("%lu iterations\n",
(unsigned long)i);
}
# endif
GC_deficit = i; /* Give the mutator a chance. */
                    IF_THREADS(GC_world_stopped = FALSE);
START_WORLD();
return(FALSE);
}
if (GC_mark_some((ptr_t)(&dummy))) break;
}

    GC_gc_no++;
# ifdef PRINTSTATS
      GC_printf2("Collection %lu reclaimed %ld bytes",
(unsigned long) GC_gc_no - 1,
(long)WORDS_TO_BYTES(GC_mem_found));
# else
# ifdef CONDPRINT
        if (GC_print_stats) {
GC_printf1("Collection %lu finished", (unsigned long) GC_gc_no - 1);
}
# endif
# endif /* !PRINTSTATS */
# ifdef CONDPRINT
      if (GC_print_stats) {
        GC_printf1(" ---> heapsize = %lu bytes\n",
       (unsigned long) GC_heapsize);
        /* Printf arguments may be pushed in funny places. Clear the */
        /* space. */
        GC_printf0("");
      }
# endif /* CONDPRINT */

    /* Check all debugged objects for consistency */
        if (GC_debugging_started) {
            (*GC_check_heap)();
        }
    

if (GC_notify_event)
GC_notify_event (GC_EVENT_MARK_END);

    IF_THREADS(GC_world_stopped = FALSE);
    START_WORLD();
# ifdef PRINTTIMES
GET_TIME(current_time);
GC_printf1("World-stopped marking took %lu msecs\n",
MS_TIME_DIFF(current_time,start_time));
# else
# ifdef CONDPRINT
if (GC_print_stats) {
GET_TIME(current_time);
GC_printf1("World-stopped marking took %lu msecs\n",
MS_TIME_DIFF(current_time,start_time));
}
# endif
# endif
    return(TRUE);
}

/* Set all mark bits for the free list whose first entry is q */
#ifdef __STDC__
  void GC_set_fl_marks(ptr_t q)
#else
  void GC_set_fl_marks(q)
  ptr_t q;
#endif
{
   ptr_t p;
   struct hblk * h, * last_h = 0;
   hdr *hhdr;
   int word_no;

   for (p = q; p != 0; p = obj_link(p)){
h = HBLKPTR(p);
if (h != last_h) {
last_h = h;
hhdr = HDR(h);
}
word_no = (((word *)p) - ((word *)h));
set_mark_bit_from_hdr(hhdr, word_no);
   }
}

/* Clear all mark bits for the free list whose first entry is q */
/* Decrement GC_mem_found by number of words on free list. */
#ifdef __STDC__
  void GC_clear_fl_marks(ptr_t q)
#else
  void GC_clear_fl_marks(q)
  ptr_t q;
#endif
{
   ptr_t p;
   struct hblk * h, * last_h = 0;
   hdr *hhdr;
   int word_no;

   for (p = q; p != 0; p = obj_link(p)){
h = HBLKPTR(p);
if (h != last_h) {
last_h = h;
hhdr = HDR(h);
}
word_no = (((word *)p) - ((word *)h));
clear_mark_bit_from_hdr(hhdr, word_no);
# ifdef GATHERSTATS
GC_mem_found -= hhdr -> hb_sz;
# endif
   }
}

void (*GC_notify_event) GC_PROTO((GCEventType e));
void (*GC_on_heap_resize) GC_PROTO((size_t new_size));

/* Finish up a collection. Assumes lock is held, signals are disabled, */
/* but the world is otherwise running. */
void GC_finish_collection()
{
# ifdef PRINTTIMES
CLOCK_TYPE start_time;
CLOCK_TYPE finalize_time;
CLOCK_TYPE done_time;

GET_TIME(start_time);
finalize_time = start_time;
# endif


if (GC_notify_event)
GC_notify_event (GC_EVENT_RECLAIM_START);

# ifdef GATHERSTATS
        GC_mem_found = 0;
# endif
# if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
if (getenv("GC_PRINT_ADDRESS_MAP") != 0) {
GC_print_address_map();
}
# endif

    COND_DUMP;
    if (GC_find_leak) {
      /* Mark all objects on the free list. All objects should be */
      /* marked when we're done. */
{
register word size; /* current object size */
int kind;
ptr_t q;

for (kind = 0; kind < GC_n_kinds; kind++) {
for (size = 1; size <= MAXOBJSZ; size++) {
q = GC_obj_kinds[kind].ok_freelist[size];
if (q != 0) GC_set_fl_marks(q);
}
}
}
GC_start_reclaim(TRUE);
/* The above just checks; it doesn't really reclaim anything. */
    }

    GC_finalize();
# ifdef STUBBORN_ALLOC
      GC_clean_changing_list();
# endif

# ifdef PRINTTIMES
      GET_TIME(finalize_time);
# endif

    if (GC_print_back_height) {
# ifdef MAKE_BACK_GRAPH
GC_traverse_back_graph();
# else
# ifndef SMALL_CONFIG
GC_err_printf0("Back height not available: "
"Rebuild collector with -DMAKE_BACK_GRAPH\n");
# endif
# endif
    }

    /* Clear free list mark bits, in case they got accidentally marked */
    /* (or GC_find_leak is set and they were intentionally marked). */
    /* Also subtract memory remaining from GC_mem_found count. */
    /* Note that composite objects on free list are cleared. */
    /* Thus accidentally marking a free list is not a problem; only */
    /* objects on the list itself will be marked, and that's fixed here. */
      {
register word size; /* current object size */
register ptr_t q; /* pointer to current object */
int kind;

for (kind = 0; kind < GC_n_kinds; kind++) {
for (size = 1; size <= MAXOBJSZ; size++) {
q = GC_obj_kinds[kind].ok_freelist[size];
if (q != 0) GC_clear_fl_marks(q);
}
}
      }


# ifdef PRINTSTATS
GC_printf1("Bytes recovered before sweep - f.l. count = %ld\n",
(long)WORDS_TO_BYTES(GC_mem_found));
# endif
    /* Reconstruct free lists to contain everything not marked */
        GC_start_reclaim(FALSE);
        if (GC_is_full_gc) {
GC_used_heap_size_after_full = USED_HEAP_SIZE;
GC_need_full_gc = FALSE;
} else {
GC_need_full_gc =
BYTES_TO_WORDS(USED_HEAP_SIZE - GC_used_heap_size_after_full)
> min_words_allocd();
}

# ifdef PRINTSTATS
GC_printf2(
"Immediately reclaimed %ld bytes in heap of size %lu bytes",
(long)WORDS_TO_BYTES(GC_mem_found),
(unsigned long)GC_heapsize);
# ifdef USE_MUNMAP
GC_printf1("(%lu unmapped)", GC_unmapped_bytes);
# endif
GC_printf2(
"\n%lu (atomic) + %lu (composite) collectable bytes in use\n",
(unsigned long)WORDS_TO_BYTES(GC_atomic_in_use),
(unsigned long)WORDS_TO_BYTES(GC_composite_in_use));
# endif

      GC_n_attempts = 0;
      GC_is_full_gc = FALSE;
    /* Reset or increment counters for next cycle */
      GC_words_allocd_before_gc += GC_words_allocd;
      GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
      GC_words_allocd = 0;
      GC_words_wasted = 0;
      GC_mem_freed = 0;
      GC_finalizer_mem_freed = 0;
# ifdef USE_MUNMAP
      GC_unmap_old();
# endif

if (GC_notify_event)
GC_notify_event (GC_EVENT_RECLAIM_END);
# ifdef PRINTTIMES
GET_TIME(done_time);
GC_printf2("Finalize + initiate sweep took %lu + %lu msecs\n",
MS_TIME_DIFF(finalize_time,start_time),
MS_TIME_DIFF(done_time,finalize_time));
# endif
}

/* Externally callable routine to invoke full, stop-world collection */
# if defined(__STDC__) || defined(__cplusplus)
    int GC_try_to_collect(GC_stop_func stop_func)
# else
    int GC_try_to_collect(stop_func)
    GC_stop_func stop_func;
# endif
{
    int result;
    DCL_LOCK_STATE;
    
    if (GC_debugging_started) GC_print_all_smashed();
    GC_INVOKE_FINALIZERS();
    DISABLE_SIGNALS();
    LOCK();
    ENTER_GC();
    if (!GC_is_initialized) GC_init_inner();
    /* Minimize junk left in my registers */
      GC_noop(0,0,0,0,0,0);
    result = (int)GC_try_to_collect_inner(stop_func);
    EXIT_GC();
    UNLOCK();
    ENABLE_SIGNALS();
    if(result) {
        if (GC_debugging_started) GC_print_all_smashed();
        GC_INVOKE_FINALIZERS();
    }
    return(result);
}

void GC_gcollect GC_PROTO(())
{
    (void)GC_try_to_collect(GC_never_stop_func);
    if (GC_have_errors) GC_print_all_errors();
}

word GC_n_heap_sects = 0; /* Number of sections currently in heap. */

/*
* Use the chunk of memory starting at p of size bytes as part of the heap.
* Assumes p is HBLKSIZE aligned, and bytes is a multiple of HBLKSIZE.
*/
void GC_add_to_heap(p, bytes)
struct hblk *p;
word bytes;
{
    word words;
    hdr * phdr;
    
    if (GC_n_heap_sects >= MAX_HEAP_SECTS) {
     ABORT("Too many heap sections: Increase MAXHINCR or MAX_HEAP_SECTS");
    }
    phdr = GC_install_header(p);
    if (0 == phdr) {
     /* This is extremely unlikely. Can't add it. This will */
     /* almost certainly result in a 0 return from the allocator, */
     /* which is entirely appropriate. */
     return;
    }
    GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)p;
    GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes;
    GC_n_heap_sects++;
    words = BYTES_TO_WORDS(bytes);
    phdr -> hb_sz = words;
    phdr -> hb_map = (unsigned char *)1; /* A value != GC_invalid_map */
    phdr -> hb_flags = 0;
    GC_freehblk(p);
    GC_heapsize += bytes;
    if ((ptr_t)p <= (ptr_t)GC_least_plausible_heap_addr
        || GC_least_plausible_heap_addr == 0) {
        GC_least_plausible_heap_addr = (GC_PTR)((ptr_t)p - sizeof(word));
         /* Making it a little smaller than necessary prevents */
         /* us from getting a false hit from the variable */
         /* itself. There's some unintentional reflection */
         /* here. */
    }
    if ((ptr_t)p + bytes >= (ptr_t)GC_greatest_plausible_heap_addr) {
        GC_greatest_plausible_heap_addr = (GC_PTR)((ptr_t)p + bytes);
    }
}

# if !defined(NO_DEBUGGING)
void GC_print_heap_sects()
{
    register unsigned i;
    
    GC_printf1("Total heap size: %lu\n", (unsigned long) GC_heapsize);
    for (i = 0; i < GC_n_heap_sects; i++) {
        unsigned long start = (unsigned long) GC_heap_sects[i].hs_start;
        unsigned long len = (unsigned long) GC_heap_sects[i].hs_bytes;
        struct hblk *h;
        unsigned nbl = 0;
        
     GC_printf3("Section %ld from 0x%lx to 0x%lx ", (unsigned long)i,
     start, (unsigned long)(start + len));
     for (h = (struct hblk *)start; h < (struct hblk *)(start + len); h++) {
     if (GC_is_black_listed(h, HBLKSIZE)) nbl++;
     }
     GC_printf2("%lu/%lu blacklisted\n", (unsigned long)nbl,
     (unsigned long)(len/HBLKSIZE));
    }
}
# endif

GC_PTR GC_least_plausible_heap_addr = (GC_PTR)ONES;
GC_PTR GC_greatest_plausible_heap_addr = 0;

ptr_t GC_max(x,y)
ptr_t x, y;
{
    return(x > y? x : y);
}

ptr_t GC_min(x,y)
ptr_t x, y;
{
    return(x < y? x : y);
}

# if defined(__STDC__) || defined(__cplusplus)
    void GC_set_max_heap_size(GC_word n)
# else
    void GC_set_max_heap_size(n)
    GC_word n;
# endif
{
    GC_max_heapsize = n;
}

GC_word GC_max_retries = 0;

/*
* this explicitly increases the size of the heap. It is used
* internally, but may also be invoked from GC_expand_hp by the user.
* The argument is in units of HBLKSIZE.
* Tiny values of n are rounded up.
* Returns FALSE on failure.
*/
GC_bool GC_expand_hp_inner(n)
word n;
{
    word bytes;
    struct hblk * space;
    word expansion_slop; /* Number of bytes by which we expect the */
     /* heap to expand soon. */

    if (n < MINHINCR) n = MINHINCR;
    bytes = n * HBLKSIZE;
    /* Make sure bytes is a multiple of GC_page_size */
      {
word mask = GC_page_size - 1;
bytes += mask;
bytes &= ~mask;
      }
    
    if (GC_max_heapsize != 0 && GC_heapsize + bytes > GC_max_heapsize) {
        /* Exceeded self-imposed limit */
        return(FALSE);
    }
    space = GET_MEM(bytes);
    if( space == 0 ) {
# ifdef CONDPRINT
if (GC_print_stats) {
GC_printf1("Failed to expand heap by %ld bytes\n",
(unsigned long)bytes);
}
# endif
return(FALSE);
    }
# ifdef CONDPRINT
      if (GC_print_stats) {
GC_printf2("Increasing heap size by %lu after %lu allocated bytes\n",
(unsigned long)bytes,
(unsigned long)WORDS_TO_BYTES(GC_words_allocd));
# ifdef UNDEFINED
GC_printf1("Root size = %lu\n", GC_root_size);
GC_print_block_list(); GC_print_hblkfreelist();
GC_printf0("\n");
# endif
      }
# endif
    expansion_slop = WORDS_TO_BYTES(min_words_allocd()) + 4*MAXHINCR*HBLKSIZE;
    if (GC_last_heap_addr == 0 && !((word)space & SIGNB)
        || (GC_last_heap_addr != 0 && GC_last_heap_addr < (ptr_t)space)) {
        /* Assume the heap is growing up */
        GC_greatest_plausible_heap_addr =
            (GC_PTR)GC_max((ptr_t)GC_greatest_plausible_heap_addr,
                           (ptr_t)space + bytes + expansion_slop);
    } else {
        /* Heap is growing down */
        GC_least_plausible_heap_addr =
            (GC_PTR)GC_min((ptr_t)GC_least_plausible_heap_addr,
                           (ptr_t)space - expansion_slop);
    }
# if defined(LARGE_CONFIG)
      if (((ptr_t)GC_greatest_plausible_heap_addr <= (ptr_t)space + bytes
           || (ptr_t)GC_least_plausible_heap_addr >= (ptr_t)space)
&& GC_heapsize > 0) {
/* GC_add_to_heap will fix this, but ... */
WARN("Too close to address space limit: blacklisting ineffective\n", 0);
      }
# endif
    GC_prev_heap_addr = GC_last_heap_addr;
    GC_last_heap_addr = (ptr_t)space;
    GC_add_to_heap(space, bytes);
    /* Force GC before we are likely to allocate past expansion_slop */
      GC_collect_at_heapsize =
         GC_heapsize + expansion_slop - 2*MAXHINCR*HBLKSIZE;
# if defined(LARGE_CONFIG)
        if (GC_collect_at_heapsize < GC_heapsize /* wrapped */)
         GC_collect_at_heapsize = (word)(-1);
# endif
if (GC_on_heap_resize)
GC_on_heap_resize (GC_heapsize);

    return(TRUE);
}

/* Really returns a bool, but it's externally visible, so that's clumsy. */
/* Arguments is in bytes. */
# if defined(__STDC__) || defined(__cplusplus)
  int GC_expand_hp(size_t bytes)
# else
  int GC_expand_hp(bytes)
  size_t bytes;
# endif
{
    int result;
    DCL_LOCK_STATE;
    
    DISABLE_SIGNALS();
    LOCK();
    if (!GC_is_initialized) GC_init_inner();
    result = (int)GC_expand_hp_inner(divHBLKSZ((word)bytes));
    if (result) GC_requested_heapsize += bytes;
    UNLOCK();
    ENABLE_SIGNALS();
    return(result);
}

unsigned GC_fail_count = 0;
/* How many consecutive GC/expansion failures? */
/* Reset by GC_allochblk. */

static word last_fo_entries = 0;
static word last_words_finalized = 0;

GC_bool GC_collect_or_expand(needed_blocks, ignore_off_page)
word needed_blocks;
GC_bool ignore_off_page;
{
    if (!GC_incremental && !GC_dont_gc &&
((GC_dont_expand && GC_words_allocd > 0)
|| (GC_fo_entries > (last_fo_entries + 500) && (last_words_finalized || GC_words_finalized))
|| GC_should_collect())) {
      GC_gcollect_inner();
      last_fo_entries = GC_fo_entries;
      last_words_finalized = GC_words_finalized;
    } else {
      word blocks_to_get = GC_heapsize/(HBLKSIZE*GC_free_space_divisor)
       + needed_blocks;
      
      if (blocks_to_get > MAXHINCR) {
          word slop;
          
/* Get the minimum required to make it likely that we */
/* can satisfy the current request in the presence of black- */
/* listing. This will probably be more than MAXHINCR. */
          if (ignore_off_page) {
              slop = 4;
          } else {
slop = 2*divHBLKSZ(BL_LIMIT);
if (slop > needed_blocks) slop = needed_blocks;
}
          if (needed_blocks + slop > MAXHINCR) {
              blocks_to_get = needed_blocks + slop;
          } else {
              blocks_to_get = MAXHINCR;
          }
      }
      if (!GC_expand_hp_inner(blocks_to_get)
        && !GC_expand_hp_inner(needed_blocks)) {
       if (GC_fail_count++ < GC_max_retries) {
       WARN("Out of Memory! Trying to continue ...\n", 0);
GC_gcollect_inner();
} else {
# if !defined(AMIGA) || !defined(GC_AMIGA_FASTALLOC)
WARN("Out of Memory! Returning NIL!\n", 0);
# endif
return(FALSE);
}
      } else {
# ifdef CONDPRINT
            if (GC_fail_count && GC_print_stats) {
GC_printf0("Memory available again ...\n");
}
# endif
      }
    }
    return(TRUE);
}

/*
* Make sure the object free list for sz is not empty.
* Return a pointer to the first object on the free list.
* The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER.
* Assumes we hold the allocator lock and signals are disabled.
*
*/
ptr_t GC_allocobj(sz, kind)
word sz;
int kind;
{
    ptr_t * flh = &(GC_obj_kinds[kind].ok_freelist[sz]);
    GC_bool tried_minor = FALSE;
    
    if (sz == 0) return(0);

    while (*flh == 0) {
      ENTER_GC();
      /* Do our share of marking work */
        if(TRUE_INCREMENTAL) GC_collect_a_little_inner(1);
      /* Sweep blocks for objects of this size */
        GC_continue_reclaim(sz, kind);
      EXIT_GC();
      if (*flh == 0) {
        GC_new_hblk(sz, kind);
      }
      if (*flh == 0) {
        ENTER_GC();
if (GC_incremental && GC_time_limit == GC_TIME_UNLIMITED
&& ! tried_minor ) {
GC_collect_a_little_inner(1);
tried_minor = TRUE;
} else {
          if (!GC_collect_or_expand((word)1,FALSE)) {
EXIT_GC();
return(0);
}
}
EXIT_GC();
      }
    }
    /* Successful allocation; reset failure count. */
    GC_fail_count = 0;
    
    return(*flh);
}
Something went wrong with that request. Please try again.