LCOV - code coverage report
Current view: top level - mm/boehm-gc - alloc.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 277 469 59.1 %
Date: 2017-07-14 10:03:36 Functions: 20 34 58.8 %

          Line data    Source code
       1             : /*
       2             :  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
       3             :  * Copyright (c) 1991-1996 by Xerox Corporation.  All rights reserved.
       4             :  * Copyright (c) 1998 by Silicon Graphics.  All rights reserved.
       5             :  * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
       6             :  *
       7             :  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
       8             :  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
       9             :  *
      10             :  * Permission is hereby granted to use or copy this program
      11             :  * for any purpose,  provided the above notices are retained on all copies.
      12             :  * Permission to modify the code and to distribute modified code is granted,
      13             :  * provided the above notices are retained, and a notice that the code was
      14             :  * modified is included with the above copyright notice.
      15             :  *
      16             :  */
      17             : 
      18             : #include "private/gc_priv.h"
      19             : 
      20             : #include <stdio.h>
      21             : #if !defined(MACOS) && !defined(MSWINCE)
      22             : # include <signal.h>
      23             : # if !defined(__CC_ARM)
      24             : #   include <sys/types.h>
      25             : # endif
      26             : #endif
      27             : 
      28             : /*
      29             :  * Separate free lists are maintained for different sized objects
      30             :  * up to MAXOBJBYTES.
      31             :  * The call GC_allocobj(i,k) ensures that the freelist for
      32             :  * kind k objects of size i points to a non-empty
      33             :  * free list. It returns a pointer to the first entry on the free list.
      34             :  * In a single-threaded world, GC_allocobj may be called to allocate
      35             :  * an object of (small) size i as follows:
      36             :  *
      37             :  *            opp = &(GC_objfreelist[i]);
      38             :  *            if (*opp == 0) GC_allocobj(i, NORMAL);
      39             :  *            ptr = *opp;
      40             :  *            *opp = obj_link(ptr);
      41             :  *
      42             :  * Note that this is very fast if the free list is non-empty; it should
      43             :  * only involve the execution of 4 or 5 simple instructions.
      44             :  * All composite objects on freelists are cleared, except for
      45             :  * their first word.
      46             :  */
      47             : 
      48             : /*
      49             :  * The allocator uses GC_allochblk to allocate large chunks of objects.
      50             :  * These chunks all start on addresses which are multiples of
      51             :  * HBLKSZ.   Each allocated chunk has an associated header,
      52             :  * which can be located quickly based on the address of the chunk.
      53             :  * (See headers.c for details.)
      54             :  * This makes it possible to check quickly whether an
      55             :  * arbitrary address corresponds to an object administered by the
      56             :  * allocator.
      57             :  */
      58             : 
      59             : word GC_non_gc_bytes = 0;  /* Number of bytes not intended to be collected */
      60             : 
      61             : word GC_gc_no = 0;
      62             : 
      63             : #ifndef GC_DISABLE_INCREMENTAL
      64             :   GC_INNER int GC_incremental = 0;      /* By default, stop the world.  */
      65             : #endif
      66             : 
      67             : #ifdef THREADS
      68             :   int GC_parallel = FALSE;      /* By default, parallel GC is off.      */
      69             : #endif
      70             : 
      71             : #ifndef GC_FULL_FREQ
      72             : # define GC_FULL_FREQ 19   /* Every 20th collection is a full   */
      73             :                            /* collection, whether we need it    */
      74             :                            /* or not.                           */
      75             : #endif
      76             : 
      77             : int GC_full_freq = GC_FULL_FREQ;
      78             : 
      79             : STATIC GC_bool GC_need_full_gc = FALSE;
      80             :                            /* Need full GC do to heap growth.   */
      81             : 
      82             : #ifdef THREAD_LOCAL_ALLOC
      83             :   GC_INNER GC_bool GC_world_stopped = FALSE;
      84             : #endif
      85             : 
      86             : STATIC word GC_used_heap_size_after_full = 0;
      87             : 
      88             : /* GC_copyright symbol is externally visible. */
      89             : char * const GC_copyright[] =
      90             : {"Copyright 1988,1989 Hans-J. Boehm and Alan J. Demers ",
      91             : "Copyright (c) 1991-1995 by Xerox Corporation.  All rights reserved. ",
      92             : "Copyright (c) 1996-1998 by Silicon Graphics.  All rights reserved. ",
      93             : "Copyright (c) 1999-2009 by Hewlett-Packard Company.  All rights reserved. ",
      94             : "THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY",
      95             : " EXPRESSED OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.",
      96             : "See source code for details." };
      97             : 
      98             : /* Version macros are now defined in gc_version.h, which is included by */
      99             : /* gc.h, which is included by gc_priv.h.                                */
     100             : #ifndef GC_NO_VERSION_VAR
     101             :   const unsigned GC_version = ((GC_VERSION_MAJOR << 16) |
     102             :                         (GC_VERSION_MINOR << 8) | GC_VERSION_MICRO);
     103             : #endif
     104             : 
     105           0 : GC_API unsigned GC_CALL GC_get_version(void)
     106             : {
     107           0 :   return (GC_VERSION_MAJOR << 16) | (GC_VERSION_MINOR << 8) |
     108             :           GC_VERSION_MICRO;
     109             : }
     110             : 
     111             : /* some more variables */
     112             : 
     113             : #ifdef GC_DONT_EXPAND
     114             :   int GC_dont_expand = TRUE;
     115             : #else
     116             :   int GC_dont_expand = FALSE;
     117             : #endif
     118             : 
     119             : #ifndef GC_FREE_SPACE_DIVISOR
     120             : # define GC_FREE_SPACE_DIVISOR 3 /* must be > 0 */
     121             : #endif
     122             : 
     123             : word GC_free_space_divisor = GC_FREE_SPACE_DIVISOR;
     124             : 
     125        4798 : GC_INNER int GC_CALLBACK GC_never_stop_func(void)
     126             : {
     127        4798 :   return(0);
     128             : }
     129             : 
     130             : #ifndef GC_TIME_LIMIT
     131             : # define GC_TIME_LIMIT 50  /* We try to keep pause times from exceeding  */
     132             :                            /* this by much. In milliseconds.             */
     133             : #endif
     134             : 
     135             : unsigned long GC_time_limit = GC_TIME_LIMIT;
     136             : 
     137             : #ifndef NO_CLOCK
     138             :   STATIC CLOCK_TYPE GC_start_time = 0;
     139             :                                 /* Time at which we stopped world.      */
     140             :                                 /* used only in GC_timeout_stop_func.   */
     141             : #endif
     142             : 
     143             : STATIC int GC_n_attempts = 0;   /* Number of attempts at finishing      */
     144             :                                 /* collection within GC_time_limit.     */
     145             : 
     146             : STATIC GC_stop_func GC_default_stop_func = GC_never_stop_func;
     147             :                                 /* accessed holding the lock.           */
     148             : 
     149           0 : GC_API void GC_CALL GC_set_stop_func(GC_stop_func stop_func)
     150             : {
     151             :   DCL_LOCK_STATE;
     152             :   GC_ASSERT(stop_func != 0);
     153           0 :   LOCK();
     154           0 :   GC_default_stop_func = stop_func;
     155           0 :   UNLOCK();
     156           0 : }
     157             : 
     158           0 : GC_API GC_stop_func GC_CALL GC_get_stop_func(void)
     159             : {
     160             :   GC_stop_func stop_func;
     161             :   DCL_LOCK_STATE;
     162           0 :   LOCK();
     163           0 :   stop_func = GC_default_stop_func;
     164           0 :   UNLOCK();
     165           0 :   return stop_func;
     166             : }
     167             : 
     168             : #if defined(GC_DISABLE_INCREMENTAL) || defined(NO_CLOCK)
     169             : # define GC_timeout_stop_func GC_default_stop_func
     170             : #else
     171           0 :   STATIC int GC_CALLBACK GC_timeout_stop_func (void)
     172             :   {
     173             :     CLOCK_TYPE current_time;
     174             :     static unsigned count = 0;
     175             :     unsigned long time_diff;
     176             : 
     177           0 :     if ((*GC_default_stop_func)())
     178           0 :       return(1);
     179             : 
     180           0 :     if ((count++ & 3) != 0) return(0);
     181           0 :     GET_TIME(current_time);
     182           0 :     time_diff = MS_TIME_DIFF(current_time,GC_start_time);
     183           0 :     if (time_diff >= GC_time_limit) {
     184           0 :         GC_COND_LOG_PRINTF(
     185             :                 "Abandoning stopped marking after %lu msecs (attempt %d)\n",
     186             :                 time_diff, GC_n_attempts);
     187           0 :         return(1);
     188             :     }
     189           0 :     return(0);
     190             :   }
     191             : #endif /* !GC_DISABLE_INCREMENTAL */
     192             : 
     193             : #ifdef THREADS
     194             :   GC_INNER word GC_total_stacksize = 0; /* updated on every push_all_stacks */
     195             : #endif
     196             : 
     197             : /* Return the minimum number of bytes that must be allocated between    */
     198             : /* collections to amortize the collection cost.  Should be non-zero.    */
     199         436 : static word min_bytes_allocd(void)
     200             : {
     201             :     word result;
     202             : #   ifdef STACK_GROWS_UP
     203             :       word stack_size = GC_approx_sp() - GC_stackbottom;
     204             :             /* GC_stackbottom is used only for a single-threaded case.  */
     205             : #   else
     206         436 :       word stack_size = GC_stackbottom - GC_approx_sp();
     207             : #   endif
     208             : 
     209             :     word total_root_size;       /* includes double stack size,  */
     210             :                                 /* since the stack is expensive */
     211             :                                 /* to scan.                     */
     212             :     word scan_size;             /* Estimate of memory to be scanned     */
     213             :                                 /* during normal GC.                    */
     214             : 
     215             : #   ifdef THREADS
     216         436 :       if (GC_need_to_lock) {
     217             :         /* We are multi-threaded... */
     218         110 :         stack_size = GC_total_stacksize;
     219             :         /* For now, we just use the value computed during the latest GC. */
     220             : #       ifdef DEBUG_THREADS
     221             :           GC_log_printf("Total stacks size: %lu\n",
     222             :                         (unsigned long)stack_size);
     223             : #       endif
     224             :       }
     225             : #   endif
     226             : 
     227         436 :     total_root_size = 2 * stack_size + GC_root_size;
     228         872 :     scan_size = 2 * GC_composite_in_use + GC_atomic_in_use / 4
     229         436 :                 + total_root_size;
     230         436 :     result = scan_size / GC_free_space_divisor;
     231         436 :     if (GC_incremental) {
     232           0 :       result /= 2;
     233             :     }
     234         436 :     return result > 0 ? result : 1;
     235             : }
     236             : 
     237             : STATIC word GC_non_gc_bytes_at_gc = 0;
     238             :                 /* Number of explicitly managed bytes of storage        */
     239             :                 /* at last collection.                                  */
     240             : 
     241             : /* Return the number of bytes allocated, adjusted for explicit storage  */
     242             : /* management, etc..  This number is used in deciding when to trigger   */
     243             : /* collections.                                                         */
     244       25239 : STATIC word GC_adj_bytes_allocd(void)
     245             : {
     246             :     signed_word result;
     247       25239 :     signed_word expl_managed = (signed_word)GC_non_gc_bytes
     248       25239 :                                 - (signed_word)GC_non_gc_bytes_at_gc;
     249             : 
     250             :     /* Don't count what was explicitly freed, or newly allocated for    */
     251             :     /* explicit management.  Note that deallocating an explicitly       */
     252             :     /* managed object should not alter result, assuming the client      */
     253             :     /* is playing by the rules.                                         */
     254       25239 :     result = (signed_word)GC_bytes_allocd
     255             :              + (signed_word)GC_bytes_dropped
     256       25239 :              - (signed_word)GC_bytes_freed
     257       25239 :              + (signed_word)GC_finalizer_bytes_freed
     258       25239 :              - expl_managed;
     259       25239 :     if (result > (signed_word)GC_bytes_allocd) {
     260          70 :         result = GC_bytes_allocd;
     261             :         /* probably client bug or unfortunate scheduling */
     262             :     }
     263       25239 :     result += GC_bytes_finalized;
     264             :         /* We count objects enqueued for finalization as though they    */
     265             :         /* had been reallocated this round. Finalization is user        */
     266             :         /* visible progress.  And if we don't count this, we have       */
     267             :         /* stability problems for programs that finalize all objects.   */
     268       25239 :     if (result < (signed_word)(GC_bytes_allocd >> 3)) {
     269             :         /* Always count at least 1/8 of the allocations.  We don't want */
     270             :         /* to collect too infrequently, since that would inhibit        */
     271             :         /* coalescing of free storage blocks.                           */
     272             :         /* This also makes us partially robust against client bugs.     */
     273           0 :         return(GC_bytes_allocd >> 3);
     274             :     } else {
     275       25239 :         return(result);
     276             :     }
     277             : }
     278             : 
     279             : 
     280             : /* Clear up a few frames worth of garbage left at the top of the stack. */
     281             : /* This is used to prevent us from accidentally treating garbage left   */
     282             : /* on the stack by other parts of the collector as roots.  This         */
     283             : /* differs from the code in misc.c, which actually tries to keep the    */
     284             : /* stack clear of long-lived, client-generated garbage.                 */
     285         244 : STATIC void GC_clear_a_few_frames(void)
     286             : {
     287             : #   ifndef CLEAR_NWORDS
     288             : #     define CLEAR_NWORDS 64
     289             : #   endif
     290             :     volatile word frames[CLEAR_NWORDS];
     291         244 :     BZERO((word *)frames, CLEAR_NWORDS * sizeof(word));
     292         244 : }
     293             : 
     294             : /* Heap size at which we need a collection to avoid expanding past      */
     295             : /* limits used by blacklisting.                                         */
     296             : STATIC word GC_collect_at_heapsize = (word)(-1);
     297             : 
     298             : /* Have we allocated enough to amortize a collection? */
     299       25239 : GC_INNER GC_bool GC_should_collect(void)
     300             : {
     301             :     static word last_min_bytes_allocd;
     302             :     static word last_gc_no;
     303       25239 :     if (last_gc_no != GC_gc_no) {
     304          84 :       last_gc_no = GC_gc_no;
     305          84 :       last_min_bytes_allocd = min_bytes_allocd();
     306             :     }
     307       45361 :     return(GC_adj_bytes_allocd() >= last_min_bytes_allocd
     308       45361 :            || GC_heapsize >= GC_collect_at_heapsize);
     309             : }
     310             : 
     311             : /* STATIC */ GC_start_callback_proc GC_start_call_back = 0;
     312             :                         /* Called at start of full collections.         */
     313             :                         /* Not called if 0.  Called with the allocation */
     314             :                         /* lock held.  Not used by GC itself.           */
     315             : 
     316           0 : GC_API void GC_CALL GC_set_start_callback(GC_start_callback_proc fn)
     317             : {
     318             :     DCL_LOCK_STATE;
     319           0 :     LOCK();
     320           0 :     GC_start_call_back = fn;
     321           0 :     UNLOCK();
     322           0 : }
     323             : 
     324           0 : GC_API GC_start_callback_proc GC_CALL GC_get_start_callback(void)
     325             : {
     326             :     GC_start_callback_proc fn;
     327             :     DCL_LOCK_STATE;
     328           0 :     LOCK();
     329           0 :     fn = GC_start_call_back;
     330           0 :     UNLOCK();
     331           0 :     return fn;
     332             : }
     333             : 
     334         244 : GC_INLINE void GC_notify_full_gc(void)
     335             : {
     336         244 :     if (GC_start_call_back != 0) {
     337           0 :         (*GC_start_call_back)();
     338             :     }
     339         244 : }
     340             : 
     341             : STATIC GC_bool GC_is_full_gc = FALSE;
     342             : 
     343             : STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func);
     344             : STATIC void GC_finish_collection(void);
     345             : 
     346             : /*
     347             :  * Initiate a garbage collection if appropriate.
     348             :  * Choose judiciously
     349             :  * between partial, full, and stop-world collections.
     350             :  */
     351           0 : STATIC void GC_maybe_gc(void)
     352             : {
     353             :     static int n_partial_gcs = 0;
     354             : 
     355             :     GC_ASSERT(I_HOLD_LOCK());
     356             :     ASSERT_CANCEL_DISABLED();
     357           0 :     if (GC_should_collect()) {
     358           0 :         if (!GC_incremental) {
     359             :             /* FIXME: If possible, GC_default_stop_func should be used here */
     360           0 :             GC_try_to_collect_inner(GC_never_stop_func);
     361           0 :             n_partial_gcs = 0;
     362           0 :             return;
     363             :         } else {
     364             : #         ifdef PARALLEL_MARK
     365           0 :             if (GC_parallel)
     366           0 :               GC_wait_for_reclaim();
     367             : #         endif
     368           0 :           if (GC_need_full_gc || n_partial_gcs >= GC_full_freq) {
     369           0 :             GC_COND_LOG_PRINTF(
     370             :                 "***>Full mark for collection #%lu after %lu allocd bytes\n",
     371             :                 (unsigned long)GC_gc_no + 1, (unsigned long)GC_bytes_allocd);
     372           0 :             GC_promote_black_lists();
     373           0 :             (void)GC_reclaim_all((GC_stop_func)0, TRUE);
     374           0 :             GC_notify_full_gc();
     375           0 :             GC_clear_marks();
     376           0 :             n_partial_gcs = 0;
     377           0 :             GC_is_full_gc = TRUE;
     378             :           } else {
     379           0 :             n_partial_gcs++;
     380             :           }
     381             :         }
     382             :         /* We try to mark with the world stopped.       */
     383             :         /* If we run out of time, this turns into       */
     384             :         /* incremental marking.                 */
     385             : #       ifndef NO_CLOCK
     386           0 :           if (GC_time_limit != GC_TIME_UNLIMITED) { GET_TIME(GC_start_time); }
     387             : #       endif
     388             :         /* FIXME: If possible, GC_default_stop_func should be   */
     389             :         /* used instead of GC_never_stop_func here.             */
     390           0 :         if (GC_stopped_mark(GC_time_limit == GC_TIME_UNLIMITED?
     391             :                             GC_never_stop_func : GC_timeout_stop_func)) {
     392             : #           ifdef SAVE_CALL_CHAIN
     393             :                 GC_save_callers(GC_last_stack);
     394             : #           endif
     395           0 :             GC_finish_collection();
     396             :         } else {
     397           0 :             if (!GC_is_full_gc) {
     398             :                 /* Count this as the first attempt */
     399           0 :                 GC_n_attempts++;
     400             :             }
     401             :         }
     402             :     }
     403             : }
     404             : 
     405             : 
     406             : /*
     407             :  * Stop the world garbage collection.  Assumes lock held. If stop_func is
     408             :  * not GC_never_stop_func then abort if stop_func returns TRUE.
     409             :  * Return TRUE if we successfully completed the collection.
     410             :  */
     411         244 : GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func stop_func)
     412             : {
     413             : #   ifndef SMALL_CONFIG
     414         244 :       CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
     415             :       CLOCK_TYPE current_time;
     416             : #   endif
     417             :     ASSERT_CANCEL_DISABLED();
     418         244 :     if (GC_dont_gc || (*stop_func)()) return FALSE;
     419         244 :     if (GC_incremental && GC_collection_in_progress()) {
     420           0 :       GC_COND_LOG_PRINTF(
     421             :             "GC_try_to_collect_inner: finishing collection in progress\n");
     422             :       /* Just finish collection already in progress.    */
     423           0 :         while(GC_collection_in_progress()) {
     424           0 :             if ((*stop_func)()) return(FALSE);
     425           0 :             GC_collect_a_little_inner(1);
     426             :         }
     427             :     }
     428         244 :     GC_notify_full_gc();
     429             : #   ifndef SMALL_CONFIG
     430         244 :       if (GC_print_stats) {
     431           0 :         GET_TIME(start_time);
     432           0 :         GC_log_printf("Initiating full world-stop collection!\n");
     433             :       }
     434             : #   endif
     435         244 :     GC_promote_black_lists();
     436             :     /* Make sure all blocks have been reclaimed, so sweep routines      */
     437             :     /* don't see cleared mark bits.                                     */
     438             :     /* If we're guaranteed to finish, then this is unnecessary.         */
     439             :     /* In the find_leak case, we have to finish to guarantee that       */
     440             :     /* previously unmarked objects are not reported as leaks.           */
     441             : #       ifdef PARALLEL_MARK
     442         244 :           if (GC_parallel)
     443         244 :             GC_wait_for_reclaim();
     444             : #       endif
     445         244 :         if ((GC_find_leak || stop_func != GC_never_stop_func)
     446           0 :             && !GC_reclaim_all(stop_func, FALSE)) {
     447             :             /* Aborted.  So far everything is still consistent. */
     448           0 :             return(FALSE);
     449             :         }
     450         244 :     GC_invalidate_mark_state();  /* Flush mark stack.   */
     451         244 :     GC_clear_marks();
     452             : #   ifdef SAVE_CALL_CHAIN
     453             :         GC_save_callers(GC_last_stack);
     454             : #   endif
     455         244 :     GC_is_full_gc = TRUE;
     456         244 :     if (!GC_stopped_mark(stop_func)) {
     457           0 :       if (!GC_incremental) {
     458             :         /* We're partially done and have no way to complete or use      */
     459             :         /* current work.  Reestablish invariants as cheaply as          */
     460             :         /* possible.                                                    */
     461           0 :         GC_invalidate_mark_state();
     462           0 :         GC_unpromote_black_lists();
     463             :       } /* else we claim the world is already still consistent.  We'll  */
     464             :         /* finish incrementally.                                        */
     465           0 :       return(FALSE);
     466             :     }
     467         244 :     GC_finish_collection();
     468             : #   ifndef SMALL_CONFIG
     469         244 :       if (GC_print_stats) {
     470           0 :         GET_TIME(current_time);
     471           0 :         GC_log_printf("Complete collection took %lu msecs\n",
     472           0 :                       MS_TIME_DIFF(current_time,start_time));
     473             :       }
     474             : #   endif
     475         244 :     return(TRUE);
     476             : }
     477             : 
     478             : /*
     479             :  * Perform n units of garbage collection work.  A unit is intended to touch
     480             :  * roughly GC_RATE pages.  Every once in a while, we do more than that.
     481             :  * This needs to be a fairly large number with our current incremental
     482             :  * GC strategy, since otherwise we allocate too much during GC, and the
     483             :  * cleanup gets expensive.
     484             :  */
     485             : #ifndef GC_RATE
     486             : # define GC_RATE 10
     487             : #endif
     488             : #ifndef MAX_PRIOR_ATTEMPTS
     489             : # define MAX_PRIOR_ATTEMPTS 1
     490             : #endif
     491             :         /* Maximum number of prior attempts at world stop marking       */
     492             :         /* A value of 1 means that we finish the second time, no matter */
     493             :         /* how long it takes.  Doesn't count the initial root scan      */
     494             :         /* for a full GC.                                               */
     495             : 
     496             : STATIC int GC_deficit = 0;/* The number of extra calls to GC_mark_some  */
     497             :                           /* that we have made.                         */
     498             : 
     499           0 : GC_INNER void GC_collect_a_little_inner(int n)
     500             : {
     501             :     int i;
     502             :     IF_CANCEL(int cancel_state;)
     503             : 
     504           0 :     if (GC_dont_gc) return;
     505           0 :     DISABLE_CANCEL(cancel_state);
     506           0 :     if (GC_incremental && GC_collection_in_progress()) {
     507           0 :         for (i = GC_deficit; i < GC_RATE*n; i++) {
     508           0 :             if (GC_mark_some((ptr_t)0)) {
     509             :                 /* Need to finish a collection */
     510             : #               ifdef SAVE_CALL_CHAIN
     511             :                     GC_save_callers(GC_last_stack);
     512             : #               endif
     513             : #               ifdef PARALLEL_MARK
     514           0 :                     if (GC_parallel)
     515           0 :                       GC_wait_for_reclaim();
     516             : #               endif
     517           0 :                 if (GC_n_attempts < MAX_PRIOR_ATTEMPTS
     518           0 :                     && GC_time_limit != GC_TIME_UNLIMITED) {
     519             : #                 ifndef NO_CLOCK
     520           0 :                     GET_TIME(GC_start_time);
     521             : #                 endif
     522           0 :                   if (!GC_stopped_mark(GC_timeout_stop_func)) {
     523           0 :                     GC_n_attempts++;
     524           0 :                     break;
     525             :                   }
     526             :                 } else {
     527             :                   /* FIXME: If possible, GC_default_stop_func should be */
     528             :                   /* used here.                                         */
     529           0 :                   (void)GC_stopped_mark(GC_never_stop_func);
     530             :                 }
     531           0 :                 GC_finish_collection();
     532           0 :                 break;
     533             :             }
     534             :         }
     535           0 :         if (GC_deficit > 0) GC_deficit -= GC_RATE*n;
     536           0 :         if (GC_deficit < 0) GC_deficit = 0;
     537             :     } else {
     538           0 :         GC_maybe_gc();
     539             :     }
     540           0 :     RESTORE_CANCEL(cancel_state);
     541             : }
     542             : 
     543             : GC_INNER void (*GC_check_heap)(void) = 0;
     544             : GC_INNER void (*GC_print_all_smashed)(void) = 0;
     545             : 
     546           0 : GC_API int GC_CALL GC_collect_a_little(void)
     547             : {
     548             :     int result;
     549             :     DCL_LOCK_STATE;
     550             : 
     551           0 :     LOCK();
     552           0 :     GC_collect_a_little_inner(1);
     553           0 :     result = (int)GC_collection_in_progress();
     554           0 :     UNLOCK();
     555           0 :     if (!result && GC_debugging_started) GC_print_all_smashed();
     556           0 :     return(result);
     557             : }
     558             : 
     559             : #ifndef SMALL_CONFIG
     560             :   /* Variables for world-stop average delay time statistic computation. */
     561             :   /* "divisor" is incremented every world-stop and halved when reached  */
     562             :   /* its maximum (or upon "total_time" overflow).                       */
     563             :   static unsigned world_stopped_total_time = 0;
     564             :   static unsigned world_stopped_total_divisor = 0;
     565             : # ifndef MAX_TOTAL_TIME_DIVISOR
     566             :     /* We shall not use big values here (so "outdated" delay time       */
     567             :     /* values would have less impact on "average" delay time value than */
     568             :     /* newer ones).                                                     */
     569             : #   define MAX_TOTAL_TIME_DIVISOR 1000
     570             : # endif
     571             : #endif
     572             : 
     573             : #ifdef USE_MUNMAP
     574             : # define IF_USE_MUNMAP(x) x
     575             : # define COMMA_IF_USE_MUNMAP(x) /* comma */, x
     576             : #else
     577             : # define IF_USE_MUNMAP(x) /* empty */
     578             : # define COMMA_IF_USE_MUNMAP(x) /* empty */
     579             : #endif
     580             : 
     581             : /*
     582             :  * Assumes lock is held.  We stop the world and mark from all roots.
     583             :  * If stop_func() ever returns TRUE, we may fail and return FALSE.
     584             :  * Increment GC_gc_no if we succeed.
     585             :  */
     586         244 : STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func)
     587             : {
     588             :     unsigned i;
     589             : #   ifndef SMALL_CONFIG
     590         244 :       CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
     591             :       CLOCK_TYPE current_time;
     592             : #   endif
     593             : 
     594             : #   if !defined(REDIRECT_MALLOC) && defined(USE_WINALLOC)
     595             :         GC_add_current_malloc_heap();
     596             : #   endif
     597             : #   if defined(REGISTER_LIBRARIES_EARLY)
     598         244 :         GC_cond_register_dynamic_libraries();
     599             : #   endif
     600             : 
     601             : #   ifndef SMALL_CONFIG
     602         244 :       if (GC_PRINT_STATS_FLAG)
     603           0 :         GET_TIME(start_time);
     604             : #   endif
     605             : 
     606         244 :     STOP_WORLD();
     607             : #   ifdef THREAD_LOCAL_ALLOC
     608         244 :       GC_world_stopped = TRUE;
     609             : #   endif
     610             :         /* Output blank line for convenience here */
     611         244 :     GC_COND_LOG_PRINTF(
     612             :               "\n--> Marking for collection #%lu after %lu allocated bytes\n",
     613             :               (unsigned long)GC_gc_no + 1, (unsigned long) GC_bytes_allocd);
     614             : #   ifdef MAKE_BACK_GRAPH
     615             :       if (GC_print_back_height) {
     616             :         GC_build_back_graph();
     617             :       }
     618             : #   endif
     619             : 
     620             :     /* Mark from all roots.  */
     621             :         /* Minimize junk left in my registers and on the stack */
     622         244 :             GC_clear_a_few_frames();
     623         244 :             GC_noop6(0,0,0,0,0,0);
     624             : 
     625         244 :         GC_initiate_gc();
     626        4554 :         for (i = 0;;i++) {
     627        4554 :           if ((*stop_func)()) {
     628           0 :             GC_COND_LOG_PRINTF("Abandoned stopped marking after"
     629             :                                " %u iterations\n", i);
     630           0 :             GC_deficit = i;     /* Give the mutator a chance.   */
     631             : #           ifdef THREAD_LOCAL_ALLOC
     632           0 :               GC_world_stopped = FALSE;
     633             : #           endif
     634           0 :             START_WORLD();
     635           0 :             return(FALSE);
     636             :           }
     637        4554 :           if (GC_mark_some(GC_approx_sp())) break;
     638        4310 :         }
     639             : 
     640         244 :     GC_gc_no++;
     641         244 :     GC_DBGLOG_PRINTF("GC #%lu freed %ld bytes, heap %lu KiB"
     642             :                      IF_USE_MUNMAP(" (+ %lu KiB unmapped)") "\n",
     643             :                      (unsigned long)GC_gc_no, (long)GC_bytes_found,
     644           0 :                      TO_KiB_UL(GC_heapsize - GC_unmapped_bytes) /*, */
     645             :                      COMMA_IF_USE_MUNMAP(TO_KiB_UL(GC_unmapped_bytes)));
     646             : 
     647             :     /* Check all debugged objects for consistency */
     648         244 :     if (GC_debugging_started) {
     649           0 :       (*GC_check_heap)();
     650             :     }
     651             : 
     652             : #   ifdef THREAD_LOCAL_ALLOC
     653         244 :       GC_world_stopped = FALSE;
     654             : #   endif
     655         244 :     START_WORLD();
     656             : #   ifndef SMALL_CONFIG
     657         244 :       if (GC_PRINT_STATS_FLAG) {
     658             :         unsigned long time_diff;
     659             :         unsigned total_time, divisor;
     660           0 :         GET_TIME(current_time);
     661           0 :         time_diff = MS_TIME_DIFF(current_time,start_time);
     662             : 
     663             :         /* Compute new world-stop delay total time */
     664           0 :         total_time = world_stopped_total_time;
     665           0 :         divisor = world_stopped_total_divisor;
     666           0 :         if ((int)total_time < 0 || divisor >= MAX_TOTAL_TIME_DIVISOR) {
     667             :           /* Halve values if overflow occurs */
     668           0 :           total_time >>= 1;
     669           0 :           divisor >>= 1;
     670             :         }
     671           0 :         total_time += time_diff < (((unsigned)-1) >> 1) ?
     672             :                         (unsigned)time_diff : ((unsigned)-1) >> 1;
     673             :         /* Update old world_stopped_total_time and its divisor */
     674           0 :         world_stopped_total_time = total_time;
     675           0 :         world_stopped_total_divisor = ++divisor;
     676             : 
     677             :         GC_ASSERT(divisor != 0);
     678           0 :         GC_log_printf(
     679             :                 "World-stopped marking took %lu msecs (%u in average)\n",
     680             :                 time_diff, total_time / divisor);
     681             :       }
     682             : #   endif
     683         244 :     return(TRUE);
     684             : }
     685             : 
     686             : /* Set all mark bits for the free list whose first entry is q   */
     687        1751 : GC_INNER void GC_set_fl_marks(ptr_t q)
     688             : {
     689             :    struct hblk *h, *last_h;
     690             :    hdr *hhdr;
     691             :    IF_PER_OBJ(size_t sz;)
     692             :    unsigned bit_no;
     693             : 
     694        1751 :    if (q != NULL) {
     695        1751 :      h = HBLKPTR(q);
     696        1751 :      last_h = h;
     697        1751 :      hhdr = HDR(h);
     698             :      IF_PER_OBJ(sz = hhdr->hb_sz;)
     699             : 
     700             :      for (;;) {
     701       34461 :         bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
     702       34461 :         if (!mark_bit_from_hdr(hhdr, bit_no)) {
     703       34461 :           set_mark_bit_from_hdr(hhdr, bit_no);
     704       34461 :           ++hhdr -> hb_n_marks;
     705             :         }
     706             : 
     707       34461 :         q = obj_link(q);
     708       34461 :         if (q == NULL)
     709        1751 :           break;
     710             : 
     711       32710 :         h = HBLKPTR(q);
     712       32710 :         if (h != last_h) {
     713           0 :           last_h = h;
     714           0 :           hhdr = HDR(h);
     715             :           IF_PER_OBJ(sz = hhdr->hb_sz;)
     716             :         }
     717       32710 :      }
     718             :    }
     719        1751 : }
     720             : 
     721             : #if defined(GC_ASSERTIONS) && defined(THREADS) && defined(THREAD_LOCAL_ALLOC)
     722             :   /* Check that all mark bits for the free list whose first entry is    */
     723             :   /* (*pfreelist) are set.  Check skipped if points to a special value. */
     724             :   void GC_check_fl_marks(void **pfreelist)
     725             :   {
     726             : #   ifdef AO_HAVE_load_acquire_read
     727             :       AO_t *list = (AO_t *)AO_load_acquire_read((AO_t *)pfreelist);
     728             :                 /* Atomic operations are used because the world is running. */
     729             :       AO_t *prev;
     730             :       AO_t *p;
     731             : 
     732             :       if ((word)list <= HBLKSIZE) return;
     733             : 
     734             :       prev = (AO_t *)pfreelist;
     735             :       for (p = list; p != NULL;) {
     736             :         AO_t *next;
     737             : 
     738             :         if (!GC_is_marked(p)) {
     739             :           ABORT_ARG2("Unmarked local free list entry",
     740             :                      ": object %p on list %p", (void *)p, (void *)list);
     741             :         }
     742             : 
     743             :         /* While traversing the free-list, it re-reads the pointer to   */
     744             :         /* the current node before accepting its next pointer and       */
     745             :         /* bails out if the latter has changed.  That way, it won't     */
     746             :         /* try to follow the pointer which might be been modified       */
     747             :         /* after the object was returned to the client.  It might       */
     748             :         /* perform the mark-check on the just allocated object but      */
     749             :         /* that should be harmless.                                     */
     750             :         next = (AO_t *)AO_load_acquire_read(p);
     751             :         if (AO_load(prev) != (AO_t)p)
     752             :           break;
     753             :         prev = p;
     754             :         p = next;
     755             :       }
     756             : #   else
     757             :       /* FIXME: Not implemented (just skipped). */
     758             :       (void)pfreelist;
     759             : #   endif
     760             :   }
     761             : #endif /* GC_ASSERTIONS && THREAD_LOCAL_ALLOC */
     762             : 
     763             : /* Clear all mark bits for the free list whose first entry is q */
     764             : /* Decrement GC_bytes_found by number of bytes on free list.    */
     765         765 : STATIC void GC_clear_fl_marks(ptr_t q)
     766             : {
     767             :    struct hblk *h, *last_h;
     768             :    hdr *hhdr;
     769             :    size_t sz;
     770             :    unsigned bit_no;
     771             : 
     772         765 :    if (q != NULL) {
     773         765 :      h = HBLKPTR(q);
     774         765 :      last_h = h;
     775         765 :      hhdr = HDR(h);
     776         765 :      sz = hhdr->hb_sz;  /* Normally set only once. */
     777             : 
     778             :      for (;;) {
     779       27196 :         bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
     780       27196 :         if (mark_bit_from_hdr(hhdr, bit_no)) {
     781        5607 :           size_t n_marks = hhdr -> hb_n_marks - 1;
     782        5607 :           clear_mark_bit_from_hdr(hhdr, bit_no);
     783             : #         ifdef PARALLEL_MARK
     784             :             /* Appr. count, don't decrement to zero! */
     785        5607 :             if (0 != n_marks || !GC_parallel) {
     786        5606 :               hhdr -> hb_n_marks = n_marks;
     787             :             }
     788             : #         else
     789             :             hhdr -> hb_n_marks = n_marks;
     790             : #         endif
     791             :         }
     792       27196 :         GC_bytes_found -= sz;
     793             : 
     794       27196 :         q = obj_link(q);
     795       27196 :         if (q == NULL)
     796         765 :           break;
     797             : 
     798       26431 :         h = HBLKPTR(q);
     799       26431 :         if (h != last_h) {
     800           0 :           last_h = h;
     801           0 :           hhdr = HDR(h);
     802           0 :           sz = hhdr->hb_sz;
     803             :         }
     804       26431 :      }
     805             :    }
     806         765 : }
     807             : 
     808             : #if defined(GC_ASSERTIONS) && defined(THREADS) && defined(THREAD_LOCAL_ALLOC)
     809             :   void GC_check_tls(void);
     810             : #endif
     811             : 
     812             : GC_on_heap_resize_proc GC_on_heap_resize = 0;
     813             : 
     814             : /* Used for logging only. */
     815           0 : GC_INLINE int GC_compute_heap_usage_percent(void)
     816             : {
     817           0 :   word used = GC_composite_in_use + GC_atomic_in_use;
     818           0 :   word heap_sz = GC_heapsize - GC_unmapped_bytes;
     819           0 :   return used >= heap_sz ? 0 : used < ((word)-1) / 100 ?
     820           0 :                 (int)((used * 100) / heap_sz) : (int)(used / (heap_sz / 100));
     821             : }
     822             : 
     823             : /* Finish up a collection.  Assumes mark bits are consistent, lock is   */
     824             : /* held, but the world is otherwise running.                            */
     825         244 : STATIC void GC_finish_collection(void)
     826             : {
     827             : #   ifndef SMALL_CONFIG
     828         244 :       CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
     829         244 :       CLOCK_TYPE finalize_time = 0;
     830             :       CLOCK_TYPE done_time;
     831             : #   endif
     832             : 
     833             : #   if defined(GC_ASSERTIONS) && defined(THREADS) \
     834             :        && defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
     835             :         /* Check that we marked some of our own data.           */
     836             :         /* FIXME: Add more checks.                              */
     837             :         GC_check_tls();
     838             : #   endif
     839             : 
     840             : #   ifndef SMALL_CONFIG
     841         244 :       if (GC_print_stats)
     842           0 :         GET_TIME(start_time);
     843             : #   endif
     844             : 
     845             : #   ifndef GC_GET_HEAP_USAGE_NOT_NEEDED
     846         244 :       if (GC_bytes_found > 0)
     847          68 :         GC_reclaimed_bytes_before_gc += (word)GC_bytes_found;
     848             : #   endif
     849         244 :     GC_bytes_found = 0;
     850             : #   if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
     851         244 :         if (GETENV("GC_PRINT_ADDRESS_MAP") != 0) {
     852           0 :           GC_print_address_map();
     853             :         }
     854             : #   endif
     855         244 :     COND_DUMP;
     856         244 :     if (GC_find_leak) {
     857             :       /* Mark all objects on the free list.  All objects should be      */
     858             :       /* marked when we're done.                                        */
     859             :       word size;        /* current object size  */
     860             :       unsigned kind;
     861             :       ptr_t q;
     862             : 
     863           0 :       for (kind = 0; kind < GC_n_kinds; kind++) {
     864           0 :         for (size = 1; size <= MAXOBJGRANULES; size++) {
     865           0 :           q = GC_obj_kinds[kind].ok_freelist[size];
     866           0 :           if (q != 0) GC_set_fl_marks(q);
     867             :         }
     868             :       }
     869           0 :       GC_start_reclaim(TRUE);
     870             :         /* The above just checks; it doesn't really reclaim anything.   */
     871             :     }
     872             : 
     873             : #   ifndef GC_NO_FINALIZATION
     874         244 :       GC_finalize();
     875             : #   endif
     876             : #   ifdef STUBBORN_ALLOC
     877             :       GC_clean_changing_list();
     878             : #   endif
     879             : 
     880             : #   ifndef SMALL_CONFIG
     881         244 :       if (GC_print_stats)
     882           0 :         GET_TIME(finalize_time);
     883             : #   endif
     884             : 
     885         244 :     if (GC_print_back_height) {
     886             : #     ifdef MAKE_BACK_GRAPH
     887             :         GC_traverse_back_graph();
     888             : #     elif !defined(SMALL_CONFIG)
     889           0 :         GC_err_printf("Back height not available: "
     890             :                       "Rebuild collector with -DMAKE_BACK_GRAPH\n");
     891             : #     endif
     892             :     }
     893             : 
     894             :     /* Clear free list mark bits, in case they got accidentally marked   */
     895             :     /* (or GC_find_leak is set and they were intentionally marked).      */
     896             :     /* Also subtract memory remaining from GC_bytes_found count.         */
     897             :     /* Note that composite objects on free list are cleared.             */
     898             :     /* Thus accidentally marking a free list is not a problem;  only     */
     899             :     /* objects on the list itself will be marked, and that's fixed here. */
     900             :     {
     901             :       word size;        /* current object size          */
     902             :       ptr_t q;          /* pointer to current object    */
     903             :       unsigned kind;
     904             : 
     905        1220 :       for (kind = 0; kind < GC_n_kinds; kind++) {
     906      125904 :         for (size = 1; size <= MAXOBJGRANULES; size++) {
     907      124928 :           q = GC_obj_kinds[kind].ok_freelist[size];
     908      124928 :           if (q != 0) GC_clear_fl_marks(q);
     909             :         }
     910             :       }
     911             :     }
     912             : 
     913         244 :     GC_VERBOSE_LOG_PRINTF("Bytes recovered before sweep - f.l. count = %ld\n",
     914             :                           (long)GC_bytes_found);
     915             : 
     916             :     /* Reconstruct free lists to contain everything not marked */
     917         244 :     GC_start_reclaim(FALSE);
     918         244 :     GC_DBGLOG_PRINTF("In-use heap: %d%% (%lu KiB pointers + %lu KiB other)\n",
     919             :                      GC_compute_heap_usage_percent(),
     920           0 :                      TO_KiB_UL(GC_composite_in_use),
     921           0 :                      TO_KiB_UL(GC_atomic_in_use));
     922         244 :     if (GC_is_full_gc) {
     923         244 :         GC_used_heap_size_after_full = USED_HEAP_SIZE;
     924         244 :         GC_need_full_gc = FALSE;
     925             :     } else {
     926           0 :         GC_need_full_gc = USED_HEAP_SIZE - GC_used_heap_size_after_full
     927           0 :                             > min_bytes_allocd();
     928             :     }
     929             : 
     930         244 :     GC_VERBOSE_LOG_PRINTF("Immediately reclaimed %ld bytes, heapsize:"
     931             :                           " %lu bytes" IF_USE_MUNMAP(" (%lu unmapped)") "\n",
     932             :                           (long)GC_bytes_found,
     933             :                           (unsigned long)GC_heapsize /*, */
     934             :                           COMMA_IF_USE_MUNMAP((unsigned long)
     935             :                                               GC_unmapped_bytes));
     936             : 
     937             :     /* Reset or increment counters for next cycle */
     938         244 :     GC_n_attempts = 0;
     939         244 :     GC_is_full_gc = FALSE;
     940         244 :     GC_bytes_allocd_before_gc += GC_bytes_allocd;
     941         244 :     GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
     942         244 :     GC_bytes_allocd = 0;
     943         244 :     GC_bytes_dropped = 0;
     944         244 :     GC_bytes_freed = 0;
     945         244 :     GC_finalizer_bytes_freed = 0;
     946             : 
     947             :     IF_USE_MUNMAP(GC_unmap_old());
     948             : 
     949             : #   ifndef SMALL_CONFIG
     950         244 :       if (GC_print_stats) {
     951           0 :         GET_TIME(done_time);
     952             : #       ifndef GC_NO_FINALIZATION
     953             :           /* A convenient place to output finalization statistics.      */
     954           0 :           GC_print_finalization_stats();
     955             : #       endif
     956           0 :         GC_log_printf("Finalize plus initiate sweep took %lu + %lu msecs\n",
     957           0 :                       MS_TIME_DIFF(finalize_time,start_time),
     958           0 :                       MS_TIME_DIFF(done_time,finalize_time));
     959             :       }
     960             : #   endif
     961         244 : }
     962             : 
     963             : /* If stop_func == 0 then GC_default_stop_func is used instead.         */
     964           5 : STATIC GC_bool GC_try_to_collect_general(GC_stop_func stop_func,
     965             :                                          GC_bool force_unmap GC_ATTR_UNUSED)
     966             : {
     967             :     GC_bool result;
     968             :     IF_USE_MUNMAP(int old_unmap_threshold;)
     969             :     IF_CANCEL(int cancel_state;)
     970             :     DCL_LOCK_STATE;
     971             : 
     972           5 :     if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
     973           5 :     if (GC_debugging_started) GC_print_all_smashed();
     974           5 :     GC_INVOKE_FINALIZERS();
     975           5 :     LOCK();
     976           5 :     DISABLE_CANCEL(cancel_state);
     977             : #   ifdef USE_MUNMAP
     978             :       old_unmap_threshold = GC_unmap_threshold;
     979             :       if (force_unmap ||
     980             :           (GC_force_unmap_on_gcollect && old_unmap_threshold > 0))
     981             :         GC_unmap_threshold = 1; /* unmap as much as possible */
     982             : #   endif
     983           5 :     ENTER_GC();
     984             :     /* Minimize junk left in my registers */
     985           5 :       GC_noop6(0,0,0,0,0,0);
     986           5 :     result = GC_try_to_collect_inner(stop_func != 0 ? stop_func :
     987             :                                      GC_default_stop_func);
     988           5 :     EXIT_GC();
     989             :     IF_USE_MUNMAP(GC_unmap_threshold = old_unmap_threshold); /* restore */
     990           5 :     RESTORE_CANCEL(cancel_state);
     991           5 :     UNLOCK();
     992           5 :     if (result) {
     993           5 :         if (GC_debugging_started) GC_print_all_smashed();
     994           5 :         GC_INVOKE_FINALIZERS();
     995             :     }
     996           5 :     return(result);
     997             : }
     998             : 
     999             : /* Externally callable routines to invoke full, stop-the-world collection. */
    1000           0 : GC_API int GC_CALL GC_try_to_collect(GC_stop_func stop_func)
    1001             : {
    1002             :     GC_ASSERT(stop_func != 0);
    1003           0 :     return (int)GC_try_to_collect_general(stop_func, FALSE);
    1004             : }
    1005             : 
    1006           5 : GC_API void GC_CALL GC_gcollect(void)
    1007             : {
    1008             :     /* 0 is passed as stop_func to get GC_default_stop_func value       */
    1009             :     /* while holding the allocation lock (to prevent data races).       */
    1010           5 :     (void)GC_try_to_collect_general(0, FALSE);
    1011           5 :     if (GC_have_errors) GC_print_all_errors();
    1012           5 : }
    1013             : 
    1014           0 : GC_API void GC_CALL GC_gcollect_and_unmap(void)
    1015             : {
    1016           0 :     (void)GC_try_to_collect_general(GC_never_stop_func, TRUE);
    1017           0 : }
    1018             : 
    1019             : GC_INNER word GC_n_heap_sects = 0;
    1020             :                         /* Number of sections currently in heap. */
    1021             : 
    1022             : #ifdef USE_PROC_FOR_LIBRARIES
    1023             :   GC_INNER word GC_n_memory = 0;
    1024             :                         /* Number of GET_MEM allocated memory sections. */
    1025             : #endif
    1026             : 
    1027             : #ifdef USE_PROC_FOR_LIBRARIES
    1028             :   /* Add HBLKSIZE aligned, GET_MEM-generated block to GC_our_memory. */
    1029             :   /* Defined to do nothing if USE_PROC_FOR_LIBRARIES not set.       */
    1030             :   GC_INNER void GC_add_to_our_memory(ptr_t p, size_t bytes)
    1031             :   {
    1032             :     if (0 == p) return;
    1033             :     if (GC_n_memory >= MAX_HEAP_SECTS)
    1034             :       ABORT("Too many GC-allocated memory sections: Increase MAX_HEAP_SECTS");
    1035             :     GC_our_memory[GC_n_memory].hs_start = p;
    1036             :     GC_our_memory[GC_n_memory].hs_bytes = bytes;
    1037             :     GC_n_memory++;
    1038             :   }
    1039             : #endif
    1040             : 
    1041             : /*
    1042             :  * Use the chunk of memory starting at p of size bytes as part of the heap.
    1043             :  * Assumes p is HBLKSIZE aligned, and bytes is a multiple of HBLKSIZE.
    1044             :  */
    1045         357 : GC_INNER void GC_add_to_heap(struct hblk *p, size_t bytes)
    1046             : {
    1047             :     hdr * phdr;
    1048             :     word endp;
    1049             : 
    1050         357 :     if (GC_n_heap_sects >= MAX_HEAP_SECTS) {
    1051           0 :         ABORT("Too many heap sections: Increase MAXHINCR or MAX_HEAP_SECTS");
    1052             :     }
    1053         714 :     while ((word)p <= HBLKSIZE) {
    1054             :         /* Can't handle memory near address zero. */
    1055           0 :         ++p;
    1056           0 :         bytes -= HBLKSIZE;
    1057           0 :         if (0 == bytes) return;
    1058             :     }
    1059         357 :     endp = (word)p + bytes;
    1060         357 :     if (endp <= (word)p) {
    1061             :         /* Address wrapped. */
    1062           0 :         bytes -= HBLKSIZE;
    1063           0 :         if (0 == bytes) return;
    1064           0 :         endp -= HBLKSIZE;
    1065             :     }
    1066         357 :     phdr = GC_install_header(p);
    1067         357 :     if (0 == phdr) {
    1068             :         /* This is extremely unlikely. Can't add it.  This will         */
    1069             :         /* almost certainly result in a 0 return from the allocator,    */
    1070             :         /* which is entirely appropriate.                               */
    1071           0 :         return;
    1072             :     }
    1073             :     GC_ASSERT(endp > (word)p && endp == (word)p + bytes);
    1074         357 :     GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)p;
    1075         357 :     GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes;
    1076         357 :     GC_n_heap_sects++;
    1077         357 :     phdr -> hb_sz = bytes;
    1078         357 :     phdr -> hb_flags = 0;
    1079         357 :     GC_freehblk(p);
    1080         357 :     GC_heapsize += bytes;
    1081             : 
    1082             :     /* Normally the caller calculates a new GC_collect_at_heapsize,
    1083             :      * but this is also called directly from alloc_mark_stack, so
    1084             :      * adjust here. It will be recalculated when called from
    1085             :      * GC_expand_hp_inner.
    1086             :      */
    1087         357 :     GC_collect_at_heapsize += bytes;
    1088         357 :     if (GC_collect_at_heapsize < GC_heapsize /* wrapped */)
    1089         163 :        GC_collect_at_heapsize = (word)(-1);
    1090             : 
    1091         546 :     if ((word)p <= (word)GC_least_plausible_heap_addr
    1092         546 :         || GC_least_plausible_heap_addr == 0) {
    1093         168 :         GC_least_plausible_heap_addr = (void *)((ptr_t)p - sizeof(word));
    1094             :                 /* Making it a little smaller than necessary prevents   */
    1095             :                 /* us from getting a false hit from the variable        */
    1096             :                 /* itself.  There's some unintentional reflection       */
    1097             :                 /* here.                                                */
    1098             :     }
    1099         357 :     if ((word)p + bytes >= (word)GC_greatest_plausible_heap_addr) {
    1100           0 :         GC_greatest_plausible_heap_addr = (void *)endp;
    1101             :     }
    1102             : }
    1103             : 
    1104             : #if !defined(NO_DEBUGGING)
    1105           0 :   void GC_print_heap_sects(void)
    1106             :   {
    1107             :     unsigned i;
    1108             : 
    1109           0 :     GC_printf("Total heap size: %lu" IF_USE_MUNMAP(" (%lu unmapped)") "\n",
    1110             :               (unsigned long)GC_heapsize /*, */
    1111             :               COMMA_IF_USE_MUNMAP((unsigned long)GC_unmapped_bytes));
    1112             : 
    1113           0 :     for (i = 0; i < GC_n_heap_sects; i++) {
    1114           0 :       ptr_t start = GC_heap_sects[i].hs_start;
    1115           0 :       size_t len = GC_heap_sects[i].hs_bytes;
    1116             :       struct hblk *h;
    1117           0 :       unsigned nbl = 0;
    1118             : 
    1119           0 :       for (h = (struct hblk *)start; (word)h < (word)(start + len); h++) {
    1120           0 :         if (GC_is_black_listed(h, HBLKSIZE)) nbl++;
    1121             :       }
    1122           0 :       GC_printf("Section %d from %p to %p %lu/%lu blacklisted\n",
    1123             :                 i, start, start + len,
    1124             :                 (unsigned long)nbl, (unsigned long)(len/HBLKSIZE));
    1125             :     }
    1126           0 :   }
    1127             : #endif
    1128             : 
    1129             : void * GC_least_plausible_heap_addr = (void *)ONES;
    1130             : void * GC_greatest_plausible_heap_addr = 0;
    1131             : 
    1132         352 : GC_INLINE word GC_max(word x, word y)
    1133             : {
    1134         352 :     return(x > y? x : y);
    1135             : }
    1136             : 
    1137           0 : GC_INLINE word GC_min(word x, word y)
    1138             : {
    1139           0 :     return(x < y? x : y);
    1140             : }
    1141             : 
    1142             : STATIC word GC_max_heapsize = 0;
    1143             : 
    1144         163 : GC_API void GC_CALL GC_set_max_heap_size(GC_word n)
    1145             : {
    1146         163 :     GC_max_heapsize = n;
    1147         163 : }
    1148             : 
    1149             : GC_word GC_max_retries = 0;
    1150             : 
    1151             : /* This explicitly increases the size of the heap.  It is used          */
    1152             : /* internally, but may also be invoked from GC_expand_hp by the user.   */
    1153             : /* The argument is in units of HBLKSIZE (tiny values are rounded up).   */
    1154             : /* Returns FALSE on failure.                                            */
    1155         358 : GC_INNER GC_bool GC_expand_hp_inner(word n)
    1156             : {
    1157             :     word bytes;
    1158             :     struct hblk * space;
    1159             :     word expansion_slop;        /* Number of bytes by which we expect the */
    1160             :                                 /* heap to expand soon.                   */
    1161             : 
    1162         358 :     if (n < MINHINCR) n = MINHINCR;
    1163         358 :     bytes = n * HBLKSIZE;
    1164             :     /* Make sure bytes is a multiple of GC_page_size */
    1165             :       {
    1166         358 :         word mask = GC_page_size - 1;
    1167         358 :         bytes += mask;
    1168         358 :         bytes &= ~mask;
    1169             :       }
    1170             : 
    1171         358 :     if (GC_max_heapsize != 0 && GC_heapsize + bytes > GC_max_heapsize) {
    1172             :         /* Exceeded self-imposed limit */
    1173           6 :         return(FALSE);
    1174             :     }
    1175         352 :     space = GET_MEM(bytes);
    1176             :     GC_add_to_our_memory((ptr_t)space, bytes);
    1177         352 :     if (space == 0) {
    1178           0 :         WARN("Failed to expand heap by %" WARN_PRIdPTR " bytes\n", bytes);
    1179           0 :         return(FALSE);
    1180             :     }
    1181         352 :     GC_INFOLOG_PRINTF("Grow heap to %lu KiB after %lu bytes allocated\n",
    1182           0 :                       TO_KiB_UL(GC_heapsize + bytes),
    1183             :                       (unsigned long)GC_bytes_allocd);
    1184             :     /* Adjust heap limits generously for blacklisting to work better.   */
    1185             :     /* GC_add_to_heap performs minimal adjustment needed for            */
    1186             :     /* correctness.                                                     */
    1187         352 :     expansion_slop = min_bytes_allocd() + 4*MAXHINCR*HBLKSIZE;
    1188        1082 :     if ((GC_last_heap_addr == 0 && !((word)space & SIGNB))
    1189             :         || (GC_last_heap_addr != 0
    1190         378 :             && (word)GC_last_heap_addr < (word)space)) {
    1191             :         /* Assume the heap is growing up */
    1192         352 :         word new_limit = (word)space + bytes + expansion_slop;
    1193         352 :         if (new_limit > (word)space) {
    1194         352 :           GC_greatest_plausible_heap_addr =
    1195         352 :             (void *)GC_max((word)GC_greatest_plausible_heap_addr,
    1196             :                            (word)new_limit);
    1197             :         }
    1198             :     } else {
    1199             :         /* Heap is growing down */
    1200           0 :         word new_limit = (word)space - expansion_slop;
    1201           0 :         if (new_limit < (word)space) {
    1202           0 :           GC_least_plausible_heap_addr =
    1203           0 :             (void *)GC_min((word)GC_least_plausible_heap_addr,
    1204             :                            (word)space - expansion_slop);
    1205             :         }
    1206             :     }
    1207         352 :     GC_prev_heap_addr = GC_last_heap_addr;
    1208         352 :     GC_last_heap_addr = (ptr_t)space;
    1209         352 :     GC_add_to_heap(space, bytes);
    1210             :     /* Force GC before we are likely to allocate past expansion_slop */
    1211         352 :       GC_collect_at_heapsize =
    1212         352 :          GC_heapsize + expansion_slop - 2*MAXHINCR*HBLKSIZE;
    1213         352 :       if (GC_collect_at_heapsize < GC_heapsize /* wrapped */)
    1214           0 :          GC_collect_at_heapsize = (word)(-1);
    1215         352 :     if (GC_on_heap_resize)
    1216           0 :       (*GC_on_heap_resize)(GC_heapsize);
    1217             : 
    1218         352 :     return(TRUE);
    1219             : }
    1220             : 
    1221             : /* Really returns a bool, but it's externally visible, so that's clumsy. */
    1222             : /* Arguments is in bytes.  Includes GC_init() call.                      */
    1223         163 : GC_API int GC_CALL GC_expand_hp(size_t bytes)
    1224             : {
    1225             :     int result;
    1226             :     DCL_LOCK_STATE;
    1227             : 
    1228         163 :     LOCK();
    1229         163 :     if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
    1230         163 :     result = (int)GC_expand_hp_inner(divHBLKSZ((word)bytes));
    1231         163 :     if (result) GC_requested_heapsize += bytes;
    1232         163 :     UNLOCK();
    1233         163 :     return(result);
    1234             : }
    1235             : 
    1236             : word GC_fo_entries = 0; /* used also in extra/MacOS.c */
    1237             : 
    1238             : GC_INNER unsigned GC_fail_count = 0;
    1239             :                         /* How many consecutive GC/expansion failures?  */
    1240             :                         /* Reset by GC_allochblk.                       */
    1241             : 
    1242             : static word last_fo_entries = 0;
    1243             : static word last_bytes_finalized = 0;
    1244             : 
    1245             : /* Collect or expand heap in an attempt make the indicated number of    */
    1246             : /* free blocks available.  Should be called until the blocks are        */
    1247             : /* available (setting retry value to TRUE unless this is the first call */
    1248             : /* in a loop) or until it fails by returning FALSE.                     */
    1249         105 : GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
    1250             :                                       GC_bool ignore_off_page,
    1251             :                                       GC_bool retry)
    1252             : {
    1253         105 :     GC_bool gc_not_stopped = TRUE;
    1254             :     word blocks_to_get;
    1255             :     IF_CANCEL(int cancel_state;)
    1256             : 
    1257         105 :     DISABLE_CANCEL(cancel_state);
    1258         423 :     if (!GC_incremental && !GC_dont_gc &&
    1259         105 :         ((GC_dont_expand && GC_bytes_allocd > 0)
    1260         105 :          || (GC_fo_entries > (last_fo_entries + 500)
    1261         108 :              && (last_bytes_finalized | GC_bytes_finalized) != 0)
    1262         105 :          || GC_should_collect())) {
    1263             :       /* Try to do a full collection using 'default' stop_func (unless  */
    1264             :       /* nothing has been allocated since the latest collection or heap */
    1265             :       /* expansion is disabled).                                        */
    1266         152 :       gc_not_stopped = GC_try_to_collect_inner(
    1267         152 :                         GC_bytes_allocd > 0 && (!GC_dont_expand || !retry) ?
    1268             :                         GC_default_stop_func : GC_never_stop_func);
    1269          76 :       if (gc_not_stopped == TRUE || !retry) {
    1270             :         /* Either the collection hasn't been aborted or this is the     */
    1271             :         /* first attempt (in a loop).                                   */
    1272          76 :         last_fo_entries = GC_fo_entries;
    1273          76 :         last_bytes_finalized = GC_bytes_finalized;
    1274          76 :         RESTORE_CANCEL(cancel_state);
    1275          76 :         return(TRUE);
    1276             :       }
    1277             :     }
    1278             : 
    1279          58 :     blocks_to_get = GC_heapsize/(HBLKSIZE*GC_free_space_divisor)
    1280          58 :                         + needed_blocks;
    1281          29 :     if (blocks_to_get > MAXHINCR) {
    1282             :       word slop;
    1283             : 
    1284             :       /* Get the minimum required to make it likely that we can satisfy */
    1285             :       /* the current request in the presence of black-listing.          */
    1286             :       /* This will probably be more than MAXHINCR.                      */
    1287          11 :       if (ignore_off_page) {
    1288           0 :         slop = 4;
    1289             :       } else {
    1290          11 :         slop = 2 * divHBLKSZ(BL_LIMIT);
    1291          11 :         if (slop > needed_blocks) slop = needed_blocks;
    1292             :       }
    1293          11 :       if (needed_blocks + slop > MAXHINCR) {
    1294           4 :         blocks_to_get = needed_blocks + slop;
    1295             :       } else {
    1296           7 :         blocks_to_get = MAXHINCR;
    1297             :       }
    1298             :     }
    1299             : 
    1300          32 :     if (!GC_expand_hp_inner(blocks_to_get)
    1301           3 :         && !GC_expand_hp_inner(needed_blocks)) {
    1302           3 :       if (gc_not_stopped == FALSE) {
    1303             :         /* Don't increment GC_fail_count here (and no warning).     */
    1304           0 :         GC_gcollect_inner();
    1305             :         GC_ASSERT(GC_bytes_allocd == 0);
    1306           3 :       } else if (GC_fail_count++ < GC_max_retries) {
    1307           0 :         WARN("Out of Memory!  Trying to continue ...\n", 0);
    1308           0 :         GC_gcollect_inner();
    1309             :       } else {
    1310             : #       if !defined(AMIGA) || !defined(GC_AMIGA_FASTALLOC)
    1311           3 :           WARN("Out of Memory! Heap size: %" WARN_PRIdPTR " MiB."
    1312             :                " Returning NULL!\n", (GC_heapsize - GC_unmapped_bytes) >> 20);
    1313             : #       endif
    1314           3 :         RESTORE_CANCEL(cancel_state);
    1315           3 :         return(FALSE);
    1316             :       }
    1317          26 :     } else if (GC_fail_count) {
    1318           0 :       GC_COND_LOG_PRINTF("Memory available again...\n");
    1319             :     }
    1320          26 :     RESTORE_CANCEL(cancel_state);
    1321          26 :     return(TRUE);
    1322             : }
    1323             : 
    1324             : /*
    1325             :  * Make sure the object free list for size gran (in granules) is not empty.
    1326             :  * Return a pointer to the first object on the free list.
    1327             :  * The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER.
    1328             :  * Assumes we hold the allocator lock.
    1329             :  */
    1330       18222 : GC_INNER ptr_t GC_allocobj(size_t gran, int kind)
    1331             : {
    1332       18222 :     void ** flh = &(GC_obj_kinds[kind].ok_freelist[gran]);
    1333       18222 :     GC_bool tried_minor = FALSE;
    1334       18222 :     GC_bool retry = FALSE;
    1335             : 
    1336       18222 :     if (gran == 0) return(0);
    1337             : 
    1338       54726 :     while (*flh == 0) {
    1339       18282 :       ENTER_GC();
    1340             :       /* Do our share of marking work */
    1341       18282 :         if(TRUE_INCREMENTAL) GC_collect_a_little_inner(1);
    1342             :       /* Sweep blocks for objects of this size */
    1343       18282 :         GC_continue_reclaim(gran, kind);
    1344       18282 :       EXIT_GC();
    1345       18282 :       if (*flh == 0) {
    1346       17517 :         GC_new_hblk(gran, kind);
    1347             :       }
    1348       18282 :       if (*flh == 0) {
    1349          60 :         ENTER_GC();
    1350          60 :         if (GC_incremental && GC_time_limit == GC_TIME_UNLIMITED
    1351           0 :             && !tried_minor) {
    1352           0 :           GC_collect_a_little_inner(1);
    1353           0 :           tried_minor = TRUE;
    1354             :         } else {
    1355          60 :           if (!GC_collect_or_expand(1, FALSE, retry)) {
    1356           0 :             EXIT_GC();
    1357           0 :             return(0);
    1358             :           }
    1359          60 :           retry = TRUE;
    1360             :         }
    1361          60 :         EXIT_GC();
    1362             :       }
    1363             :     }
    1364             :     /* Successful allocation; reset failure count.      */
    1365       18222 :     GC_fail_count = 0;
    1366             : 
    1367       18222 :     return(*flh);
    1368             : }

Generated by: LCOV version 1.11