LCOV - code coverage report
Current view: top level - mm/boehm-gc - mark.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 347 530 65.5 %
Date: 2017-07-14 10:03:36 Functions: 27 37 73.0 %

          Line data    Source code
       1             : /*
       2             :  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
       3             :  * Copyright (c) 1991-1995 by Xerox Corporation.  All rights reserved.
       4             :  * Copyright (c) 2000 by Hewlett-Packard Company.  All rights reserved.
       5             :  *
       6             :  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
       7             :  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
       8             :  *
       9             :  * Permission is hereby granted to use or copy this program
      10             :  * for any purpose,  provided the above notices are retained on all copies.
      11             :  * Permission to modify the code and to distribute modified code is granted,
      12             :  * provided the above notices are retained, and a notice that the code was
      13             :  * modified is included with the above copyright notice.
      14             :  *
      15             :  */
      16             : 
      17             : #include "private/gc_pmark.h"
      18             : 
      19             : #include <stdio.h>
      20             : 
      21             : #if defined(MSWIN32) && defined(__GNUC__)
      22             : # include <excpt.h>
      23             : #endif
      24             : 
      25             : /* Make arguments appear live to compiler.  Put here to minimize the    */
      26             : /* risk of inlining.  Used to minimize junk left in registers.          */
      27         249 : void GC_noop6(word arg1 GC_ATTR_UNUSED, word arg2 GC_ATTR_UNUSED,
      28             :               word arg3 GC_ATTR_UNUSED, word arg4 GC_ATTR_UNUSED,
      29             :               word arg5 GC_ATTR_UNUSED, word arg6 GC_ATTR_UNUSED)
      30             : {
      31             :   /* Empty */
      32         249 : }
      33             : 
      34             : /* Single argument version, robust against whole program analysis. */
      35             : volatile word GC_noop_sink;
      36       41655 : GC_API void GC_CALL GC_noop1(word x)
      37             : {
      38       41655 :     GC_noop_sink = x;
      39       41655 : }
      40             : 
      41             : /* mark_proc GC_mark_procs[MAX_MARK_PROCS] = {0} -- declared in gc_priv.h */
      42             : 
      43             : GC_INNER unsigned GC_n_mark_procs = GC_RESERVED_MARK_PROCS;
      44             : 
      45             : /* Initialize GC_obj_kinds properly and standard free lists properly.   */
      46             : /* This must be done statically since they may be accessed before       */
      47             : /* GC_init is called.                                                   */
      48             : /* It's done here, since we need to deal with mark descriptors.         */
      49             : GC_INNER struct obj_kind GC_obj_kinds[MAXOBJKINDS] = {
      50             : /* PTRFREE */ { &GC_aobjfreelist[0], 0 /* filled in dynamically */,
      51             :                 0 | GC_DS_LENGTH, FALSE, FALSE
      52             :                 /*, */ OK_DISCLAIM_INITZ },
      53             : /* NORMAL  */ { &GC_objfreelist[0], 0,
      54             :                 0 | GC_DS_LENGTH,  /* Adjusted in GC_init for EXTRA_BYTES */
      55             :                 TRUE /* add length to descr */, TRUE
      56             :                 /*, */ OK_DISCLAIM_INITZ },
      57             : /* UNCOLLECTABLE */
      58             :               { &GC_uobjfreelist[0], 0,
      59             :                 0 | GC_DS_LENGTH, TRUE /* add length to descr */, TRUE
      60             :                 /*, */ OK_DISCLAIM_INITZ },
      61             : # ifdef ATOMIC_UNCOLLECTABLE
      62             :    /* AUNCOLLECTABLE */
      63             :               { &GC_auobjfreelist[0], 0,
      64             :                 0 | GC_DS_LENGTH, FALSE /* add length to descr */, FALSE
      65             :                 /*, */ OK_DISCLAIM_INITZ },
      66             : # endif
      67             : # ifdef STUBBORN_ALLOC
      68             : /*STUBBORN*/ { (void **)&GC_sobjfreelist[0], 0,
      69             :                 0 | GC_DS_LENGTH, TRUE /* add length to descr */, TRUE
      70             :                 /*, */ OK_DISCLAIM_INITZ },
      71             : # endif
      72             : };
      73             : 
      74             : # ifdef ATOMIC_UNCOLLECTABLE
      75             : #   ifdef STUBBORN_ALLOC
      76             : #     define GC_N_KINDS_INITIAL_VALUE 5
      77             : #   else
      78             : #     define GC_N_KINDS_INITIAL_VALUE 4
      79             : #   endif
      80             : # else
      81             : #   ifdef STUBBORN_ALLOC
      82             : #     define GC_N_KINDS_INITIAL_VALUE 4
      83             : #   else
      84             : #     define GC_N_KINDS_INITIAL_VALUE 3
      85             : #   endif
      86             : # endif
      87             : 
      88             : GC_INNER unsigned GC_n_kinds = GC_N_KINDS_INITIAL_VALUE;
      89             : 
      90             : # ifndef INITIAL_MARK_STACK_SIZE
      91             : #   define INITIAL_MARK_STACK_SIZE (1*HBLKSIZE)
      92             :                 /* INITIAL_MARK_STACK_SIZE * sizeof(mse) should be a    */
      93             :                 /* multiple of HBLKSIZE.                                */
      94             :                 /* The incremental collector actually likes a larger    */
      95             :                 /* size, since it want to push all marked dirty objs    */
      96             :                 /* before marking anything new.  Currently we let it    */
      97             :                 /* grow dynamically.                                    */
      98             : # endif
      99             : 
     100             : STATIC word GC_n_rescuing_pages = 0;
     101             :                                 /* Number of dirty pages we marked from */
     102             :                                 /* excludes ptrfree pages, etc.         */
     103             : 
     104             : GC_INNER size_t GC_mark_stack_size = 0;
     105             : 
     106             : #ifdef PARALLEL_MARK
     107             :   STATIC volatile AO_t GC_first_nonempty = 0;
     108             :         /* Lowest entry on mark stack   */
     109             :         /* that may be nonempty.        */
     110             :         /* Updated only by initiating   */
     111             :         /* thread.                      */
     112             : #endif
     113             : 
     114             : GC_INNER mark_state_t GC_mark_state = MS_NONE;
     115             : 
     116             : GC_INNER GC_bool GC_mark_stack_too_small = FALSE;
     117             : 
     118             : static struct hblk * scan_ptr;
     119             : 
     120             : STATIC GC_bool GC_objects_are_marked = FALSE;
     121             :                 /* Are there collectible marked objects in the heap?    */
     122             : 
     123             : /* Is a collection in progress?  Note that this can return true in the  */
     124             : /* nonincremental case, if a collection has been abandoned and the      */
     125             : /* mark state is now MS_INVALID.                                        */
     126           0 : GC_INNER GC_bool GC_collection_in_progress(void)
     127             : {
     128           0 :     return(GC_mark_state != MS_NONE);
     129             : }
     130             : 
     131             : /* clear all mark bits in the header */
     132      127862 : GC_INNER void GC_clear_hdr_marks(hdr *hhdr)
     133             : {
     134      127862 :     size_t last_bit = FINAL_MARK_BIT(hhdr -> hb_sz);
     135      127862 :     BZERO(hhdr -> hb_marks, sizeof(hhdr->hb_marks));
     136      127862 :     set_mark_bit_from_hdr(hhdr, last_bit);
     137      127862 :     hhdr -> hb_n_marks = 0;
     138      127862 : }
     139             : 
     140             : /* Set all mark bits in the header.  Used for uncollectible blocks. */
     141        4224 : GC_INNER void GC_set_hdr_marks(hdr *hhdr)
     142             : {
     143             :     unsigned i;
     144        4224 :     size_t sz = hhdr -> hb_sz;
     145        4224 :     unsigned n_marks = (unsigned)FINAL_MARK_BIT(sz);
     146             : 
     147             : #   ifdef USE_MARK_BYTES
     148      122888 :       for (i = 0; i <= n_marks; i += (unsigned)MARK_BIT_OFFSET(sz)) {
     149      118664 :         hhdr -> hb_marks[i] = 1;
     150             :       }
     151             : #   else
     152             :       for (i = 0; i < divWORDSZ(n_marks + WORDSZ); ++i) {
     153             :         hhdr -> hb_marks[i] = ONES;
     154             :       }
     155             : #   endif
     156             : #   ifdef MARK_BIT_PER_OBJ
     157             :       hhdr -> hb_n_marks = n_marks - 1;
     158             : #   else
     159        4224 :       hhdr -> hb_n_marks = HBLK_OBJS(sz);
     160             : #   endif
     161        4224 : }
     162             : 
     163             : /*
     164             :  * Clear all mark bits associated with block h.
     165             :  */
     166       78596 : static void clear_marks_for_block(struct hblk *h, word dummy GC_ATTR_UNUSED)
     167             : {
     168       78596 :     register hdr * hhdr = HDR(h);
     169             : 
     170       78596 :     if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) return;
     171             :         /* Mark bit for these is cleared only once the object is        */
     172             :         /* explicitly deallocated.  This either frees the block, or     */
     173             :         /* the bit is cleared once the object is on the free list.      */
     174       74822 :     GC_clear_hdr_marks(hhdr);
     175             : }
     176             : 
     177             : /* Slow but general routines for setting/clearing/asking about mark bits */
     178       17926 : GC_API void GC_CALL GC_set_mark_bit(const void *p)
     179             : {
     180       17926 :     struct hblk *h = HBLKPTR(p);
     181       17926 :     hdr * hhdr = HDR(h);
     182       17926 :     word bit_no = MARK_BIT_NO((ptr_t)p - (ptr_t)h, hhdr -> hb_sz);
     183             : 
     184       17926 :     if (!mark_bit_from_hdr(hhdr, bit_no)) {
     185         664 :       set_mark_bit_from_hdr(hhdr, bit_no);
     186         664 :       ++hhdr -> hb_n_marks;
     187             :     }
     188       17926 : }
     189             : 
     190           0 : GC_API void GC_CALL GC_clear_mark_bit(const void *p)
     191             : {
     192           0 :     struct hblk *h = HBLKPTR(p);
     193           0 :     hdr * hhdr = HDR(h);
     194           0 :     word bit_no = MARK_BIT_NO((ptr_t)p - (ptr_t)h, hhdr -> hb_sz);
     195             : 
     196           0 :     if (mark_bit_from_hdr(hhdr, bit_no)) {
     197             :       size_t n_marks;
     198           0 :       clear_mark_bit_from_hdr(hhdr, bit_no);
     199           0 :       n_marks = hhdr -> hb_n_marks - 1;
     200             : #     ifdef PARALLEL_MARK
     201           0 :         if (n_marks != 0 || !GC_parallel)
     202           0 :           hhdr -> hb_n_marks = n_marks;
     203             :         /* Don't decrement to zero.  The counts are approximate due to  */
     204             :         /* concurrency issues, but we need to ensure that a count of    */
     205             :         /* zero implies an empty block.                                 */
     206             : #     else
     207             :           hhdr -> hb_n_marks = n_marks;
     208             : #     endif
     209             :     }
     210           0 : }
     211             : 
     212      162901 : GC_API int GC_CALL GC_is_marked(const void *p)
     213             : {
     214      162901 :     struct hblk *h = HBLKPTR(p);
     215      162901 :     hdr * hhdr = HDR(h);
     216      162901 :     word bit_no = MARK_BIT_NO((ptr_t)p - (ptr_t)h, hhdr -> hb_sz);
     217             : 
     218      162901 :     return (int)mark_bit_from_hdr(hhdr, bit_no); /* 0 or 1 */
     219             : }
     220             : 
     221             : /*
     222             :  * Clear mark bits in all allocated heap blocks.  This invalidates
     223             :  * the marker invariant, and sets GC_mark_state to reflect this.
     224             :  * (This implicitly starts marking to reestablish the invariant.)
     225             :  */
     226         244 : GC_INNER void GC_clear_marks(void)
     227             : {
     228         244 :     GC_apply_to_all_blocks(clear_marks_for_block, (word)0);
     229         244 :     GC_objects_are_marked = FALSE;
     230         244 :     GC_mark_state = MS_INVALID;
     231         244 :     scan_ptr = 0;
     232         244 : }
     233             : 
     234             : #ifdef CHECKSUMS
     235             :   void GC_check_dirty(void);
     236             : #endif
     237             : 
     238             : /* Initiate a garbage collection.  Initiates a full collection if the   */
     239             : /* mark state is invalid.                                               */
     240         244 : GC_INNER void GC_initiate_gc(void)
     241             : {
     242             : #   ifndef GC_DISABLE_INCREMENTAL
     243         244 :         if (GC_dirty_maintained) GC_read_dirty();
     244             : #   endif
     245             : #   ifdef STUBBORN_ALLOC
     246             :         GC_read_changed();
     247             : #   endif
     248             : #   ifdef CHECKSUMS
     249             :         if (GC_dirty_maintained) GC_check_dirty();
     250             : #   endif
     251         244 :     GC_n_rescuing_pages = 0;
     252         244 :     if (GC_mark_state == MS_NONE) {
     253           0 :         GC_mark_state = MS_PUSH_RESCUERS;
     254         244 :     } else if (GC_mark_state != MS_INVALID) {
     255           0 :         ABORT("Unexpected state");
     256             :     } /* else this is really a full collection, and mark        */
     257             :       /* bits are invalid.                                      */
     258         244 :     scan_ptr = 0;
     259         244 : }
     260             : 
     261             : #ifdef PARALLEL_MARK
     262             :     STATIC void GC_do_parallel_mark(void); /* initiate parallel marking. */
     263             : #endif /* PARALLEL_MARK */
     264             : 
     265             : #ifdef GC_DISABLE_INCREMENTAL
     266             : # define GC_push_next_marked_dirty(h) GC_push_next_marked(h)
     267             : #else
     268             :   STATIC struct hblk * GC_push_next_marked_dirty(struct hblk *h);
     269             :                 /* Invoke GC_push_marked on next dirty block above h.   */
     270             :                 /* Return a pointer just past the end of this block.    */
     271             : #endif /* !GC_DISABLE_INCREMENTAL */
     272             : STATIC struct hblk * GC_push_next_marked(struct hblk *h);
     273             :                 /* Ditto, but also mark from clean pages.       */
     274             : STATIC struct hblk * GC_push_next_marked_uncollectable(struct hblk *h);
     275             :                 /* Ditto, but mark only from uncollectible pages.       */
     276             : 
     277             : static void alloc_mark_stack(size_t);
     278             : 
     279             : # if (((defined(MSWIN32) || defined(MSWINCE)) && !defined(__GNUC__)) \
     280             :         || (defined(MSWIN32) && defined(I386)) /* for Win98 */ \
     281             :         || (defined(USE_PROC_FOR_LIBRARIES) && defined(THREADS))) \
     282             :      && !defined(NO_WRAP_MARK_SOME)
     283             :     /* Under rare conditions, we may end up marking from nonexistent memory. */
     284             :     /* Hence we need to be prepared to recover by running GC_mark_some       */
     285             :     /* with a suitable handler in place.                                     */
     286             :     /* FIXME: Should we really need it for WinCE?  If yes then          */
     287             :     /* WRAP_MARK_SOME should be also defined for CeGCC which requires   */
     288             :     /* CPU/OS-specific code in mark_ex_handler() and GC_mark_some()     */
     289             :     /* (for manual stack unwinding and exception handler installation). */
     290             : #   define WRAP_MARK_SOME
     291             : # endif
     292             : 
     293             : /* Perform a small amount of marking.                   */
     294             : /* We try to touch roughly a page of memory.            */
     295             : /* Return TRUE if we just finished a mark phase.        */
     296             : /* Cold_gc_frame is an address inside a GC frame that   */
     297             : /* remains valid until all marking is complete.         */
     298             : /* A zero value indicates that it's OK to miss some     */
     299             : /* register values.                                     */
     300             : /* We hold the allocation lock.  In the case of         */
     301             : /* incremental collection, the world may not be stopped.*/
     302             : #ifdef WRAP_MARK_SOME
     303             :   /* For win32, this is called after we establish a structured  */
     304             :   /* exception handler, in case Windows unmaps one of our root  */
     305             :   /* segments.  See below.  In either case, we acquire the      */
     306             :   /* allocator lock long before we get here.                    */
     307             :   STATIC GC_bool GC_mark_some_inner(ptr_t cold_gc_frame)
     308             : #else
     309        4554 :   GC_INNER GC_bool GC_mark_some(ptr_t cold_gc_frame)
     310             : #endif
     311             : {
     312        4554 :     switch(GC_mark_state) {
     313             :         case MS_NONE:
     314           0 :             break;
     315             : 
     316             :         case MS_PUSH_RESCUERS:
     317           0 :             if ((word)GC_mark_stack_top
     318           0 :                 >= (word)(GC_mark_stack_limit - INITIAL_MARK_STACK_SIZE/2)) {
     319             :                 /* Go ahead and mark, even though that might cause us to */
     320             :                 /* see more marked dirty objects later on.  Avoid this   */
     321             :                 /* in the future.                                        */
     322           0 :                 GC_mark_stack_too_small = TRUE;
     323           0 :                 MARK_FROM_MARK_STACK();
     324           0 :                 break;
     325             :             } else {
     326           0 :                 scan_ptr = GC_push_next_marked_dirty(scan_ptr);
     327           0 :                 if (scan_ptr == 0) {
     328           0 :                     GC_COND_LOG_PRINTF("Marked from %lu dirty pages\n",
     329             :                                        (unsigned long)GC_n_rescuing_pages);
     330           0 :                     GC_push_roots(FALSE, cold_gc_frame);
     331           0 :                     GC_objects_are_marked = TRUE;
     332           0 :                     if (GC_mark_state != MS_INVALID) {
     333           0 :                         GC_mark_state = MS_ROOTS_PUSHED;
     334             :                     }
     335             :                 }
     336             :             }
     337           0 :             break;
     338             : 
     339             :         case MS_PUSH_UNCOLLECTABLE:
     340        8132 :             if ((word)GC_mark_stack_top
     341        4066 :                 >= (word)(GC_mark_stack + GC_mark_stack_size/4)) {
     342             : #               ifdef PARALLEL_MARK
     343             :                   /* Avoid this, since we don't parallelize the marker  */
     344             :                   /* here.                                              */
     345          48 :                   if (GC_parallel) GC_mark_stack_too_small = TRUE;
     346             : #               endif
     347          48 :                 MARK_FROM_MARK_STACK();
     348          48 :                 break;
     349             :             } else {
     350        4018 :                 scan_ptr = GC_push_next_marked_uncollectable(scan_ptr);
     351        4018 :                 if (scan_ptr == 0) {
     352         244 :                     GC_push_roots(TRUE, cold_gc_frame);
     353         244 :                     GC_objects_are_marked = TRUE;
     354         244 :                     if (GC_mark_state != MS_INVALID) {
     355         244 :                         GC_mark_state = MS_ROOTS_PUSHED;
     356             :                     }
     357             :                 }
     358             :             }
     359        4018 :             break;
     360             : 
     361             :         case MS_ROOTS_PUSHED:
     362             : #           ifdef PARALLEL_MARK
     363             :               /* In the incremental GC case, this currently doesn't     */
     364             :               /* quite do the right thing, since it runs to             */
     365             :               /* completion.  On the other hand, starting a             */
     366             :               /* parallel marker is expensive, so perhaps it is         */
     367             :               /* the right thing?                                       */
     368             :               /* Eventually, incremental marking should run             */
     369             :               /* asynchronously in multiple threads, without grabbing   */
     370             :               /* the allocation lock.                                   */
     371         244 :                 if (GC_parallel) {
     372         244 :                   GC_do_parallel_mark();
     373             :                   GC_ASSERT((word)GC_mark_stack_top < (word)GC_first_nonempty);
     374         244 :                   GC_mark_stack_top = GC_mark_stack - 1;
     375         244 :                   if (GC_mark_stack_too_small) {
     376           5 :                     alloc_mark_stack(2*GC_mark_stack_size);
     377             :                   }
     378         244 :                   if (GC_mark_state == MS_ROOTS_PUSHED) {
     379         244 :                     GC_mark_state = MS_NONE;
     380         244 :                     return(TRUE);
     381             :                   }
     382           0 :                   break;
     383             :                 }
     384             : #           endif
     385           0 :             if ((word)GC_mark_stack_top >= (word)GC_mark_stack) {
     386           0 :                 MARK_FROM_MARK_STACK();
     387           0 :                 break;
     388             :             } else {
     389           0 :                 GC_mark_state = MS_NONE;
     390           0 :                 if (GC_mark_stack_too_small) {
     391           0 :                     alloc_mark_stack(2*GC_mark_stack_size);
     392             :                 }
     393           0 :                 return(TRUE);
     394             :             }
     395             : 
     396             :         case MS_INVALID:
     397             :         case MS_PARTIALLY_INVALID:
     398         244 :             if (!GC_objects_are_marked) {
     399         244 :                 GC_mark_state = MS_PUSH_UNCOLLECTABLE;
     400         244 :                 break;
     401             :             }
     402           0 :             if ((word)GC_mark_stack_top >= (word)GC_mark_stack) {
     403           0 :                 MARK_FROM_MARK_STACK();
     404           0 :                 break;
     405             :             }
     406           0 :             if (scan_ptr == 0 && GC_mark_state == MS_INVALID) {
     407             :                 /* About to start a heap scan for marked objects. */
     408             :                 /* Mark stack is empty.  OK to reallocate.        */
     409           0 :                 if (GC_mark_stack_too_small) {
     410           0 :                     alloc_mark_stack(2*GC_mark_stack_size);
     411             :                 }
     412           0 :                 GC_mark_state = MS_PARTIALLY_INVALID;
     413             :             }
     414           0 :             scan_ptr = GC_push_next_marked(scan_ptr);
     415           0 :             if (scan_ptr == 0 && GC_mark_state == MS_PARTIALLY_INVALID) {
     416           0 :                 GC_push_roots(TRUE, cold_gc_frame);
     417           0 :                 GC_objects_are_marked = TRUE;
     418           0 :                 if (GC_mark_state != MS_INVALID) {
     419           0 :                     GC_mark_state = MS_ROOTS_PUSHED;
     420             :                 }
     421             :             }
     422           0 :             break;
     423             : 
     424             :         default:
     425           0 :             ABORT("GC_mark_some: bad state");
     426             :     }
     427        4310 :     return(FALSE);
     428             : }
     429             : 
     430             : #ifdef WRAP_MARK_SOME
     431             : 
     432             : # if (defined(MSWIN32) || defined(MSWINCE)) && defined(__GNUC__)
     433             : 
     434             :     typedef struct {
     435             :       EXCEPTION_REGISTRATION ex_reg;
     436             :       void *alt_path;
     437             :     } ext_ex_regn;
     438             : 
     439             :     static EXCEPTION_DISPOSITION mark_ex_handler(
     440             :         struct _EXCEPTION_RECORD *ex_rec,
     441             :         void *est_frame,
     442             :         struct _CONTEXT *context,
     443             :         void *disp_ctxt GC_ATTR_UNUSED)
     444             :     {
     445             :         if (ex_rec->ExceptionCode == STATUS_ACCESS_VIOLATION) {
     446             :           ext_ex_regn *xer = (ext_ex_regn *)est_frame;
     447             : 
     448             :           /* Unwind from the inner function assuming the standard */
     449             :           /* function prologue.                                   */
     450             :           /* Assumes code has not been compiled with              */
     451             :           /* -fomit-frame-pointer.                                */
     452             :           context->Esp = context->Ebp;
     453             :           context->Ebp = *((DWORD *)context->Esp);
     454             :           context->Esp = context->Esp - 8;
     455             : 
     456             :           /* Resume execution at the "real" handler within the    */
     457             :           /* wrapper function.                                    */
     458             :           context->Eip = (DWORD )(xer->alt_path);
     459             : 
     460             :           return ExceptionContinueExecution;
     461             : 
     462             :         } else {
     463             :             return ExceptionContinueSearch;
     464             :         }
     465             :     }
     466             : # endif /* __GNUC__  && MSWIN32 */
     467             : 
     468             : #if defined(GC_WIN32_THREADS) && !defined(__GNUC__)
     469             :   GC_INNER GC_bool GC_started_thread_while_stopped(void);
     470             :   /* In win32_threads.c.  Did we invalidate mark phase with an  */
     471             :   /* unexpected thread start?                                   */
     472             : #endif
     473             : 
     474             :   GC_INNER GC_bool GC_mark_some(ptr_t cold_gc_frame)
     475             :   {
     476             :       GC_bool ret_val;
     477             : 
     478             : #   if defined(MSWIN32) || defined(MSWINCE)
     479             : #    ifndef __GNUC__
     480             :       /* Windows 98 appears to asynchronously create and remove  */
     481             :       /* writable memory mappings, for reasons we haven't yet    */
     482             :       /* understood.  Since we look for writable regions to      */
     483             :       /* determine the root set, we may try to mark from an      */
     484             :       /* address range that disappeared since we started the     */
     485             :       /* collection.  Thus we have to recover from faults here.  */
     486             :       /* This code does not appear to be necessary for Windows   */
     487             :       /* 95/NT/2000+. Note that this code should never generate  */
     488             :       /* an incremental GC write fault.                          */
     489             :       /* This code seems to be necessary for WinCE (at least in  */
     490             :       /* the case we'd decide to add MEM_PRIVATE sections to     */
     491             :       /* data roots in GC_register_dynamic_libraries()).         */
     492             :       /* It's conceivable that this is the same issue with       */
     493             :       /* terminating threads that we see with Linux and          */
     494             :       /* USE_PROC_FOR_LIBRARIES.                                 */
     495             : 
     496             :       __try {
     497             :           ret_val = GC_mark_some_inner(cold_gc_frame);
     498             :       } __except (GetExceptionCode() == EXCEPTION_ACCESS_VIOLATION ?
     499             :                 EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH) {
     500             :           goto handle_ex;
     501             :       }
     502             : #     ifdef GC_WIN32_THREADS
     503             :         /* With DllMain-based thread tracking, a thread may have        */
     504             :         /* started while we were marking.  This is logically equivalent */
     505             :         /* to the exception case; our results are invalid and we have   */
     506             :         /* to start over.  This cannot be prevented since we can't      */
     507             :         /* block in DllMain.                                            */
     508             :         if (GC_started_thread_while_stopped()) goto handle_ex;
     509             : #     endif
     510             :      rm_handler:
     511             :       return ret_val;
     512             : 
     513             : #    else /* __GNUC__ */
     514             : 
     515             :       /* Manually install an exception handler since GCC does    */
     516             :       /* not yet support Structured Exception Handling (SEH) on  */
     517             :       /* Win32.                                                  */
     518             : 
     519             :       ext_ex_regn er;
     520             : 
     521             :       er.alt_path = &&handle_ex;
     522             :       er.ex_reg.handler = mark_ex_handler;
     523             :       __asm__ __volatile__ ("movl %%fs:0, %0" : "=r" (er.ex_reg.prev));
     524             :       __asm__ __volatile__ ("movl %0, %%fs:0" : : "r" (&er));
     525             :       ret_val = GC_mark_some_inner(cold_gc_frame);
     526             :       /* Prevent GCC from considering the following code unreachable */
     527             :       /* and thus eliminating it.                                    */
     528             :         if (er.alt_path == 0)
     529             :           goto handle_ex;
     530             :     rm_handler:
     531             :       /* Uninstall the exception handler */
     532             :       __asm__ __volatile__ ("mov %0, %%fs:0" : : "r" (er.ex_reg.prev));
     533             :       return ret_val;
     534             : 
     535             : #    endif /* __GNUC__ */
     536             : #   else /* !MSWIN32 */
     537             :       /* Here we are handling the case in which /proc is used for root  */
     538             :       /* finding, and we have threads.  We may find a stack for a       */
     539             :       /* thread that is in the process of exiting, and disappears       */
     540             :       /* while we are marking it.  This seems extremely difficult to    */
     541             :       /* avoid otherwise.                                               */
     542             :       if (GC_incremental) {
     543             :         WARN("Incremental GC incompatible with /proc roots\n", 0);
     544             :         /* I'm not sure if this could still work ...    */
     545             :       }
     546             :       GC_setup_temporary_fault_handler();
     547             :       if(SETJMP(GC_jmp_buf) != 0) goto handle_ex;
     548             :       ret_val = GC_mark_some_inner(cold_gc_frame);
     549             :     rm_handler:
     550             :       GC_reset_fault_handler();
     551             :       return ret_val;
     552             : 
     553             : #   endif /* !MSWIN32 */
     554             : 
     555             : handle_ex:
     556             :     /* Exception handler starts here for all cases. */
     557             :       WARN("Caught ACCESS_VIOLATION in marker;"
     558             :            " memory mapping disappeared\n", 0);
     559             : 
     560             :       /* We have bad roots on the stack.  Discard mark stack.   */
     561             :       /* Rescan from marked objects.  Redetermine roots.        */
     562             :       GC_invalidate_mark_state();
     563             :       scan_ptr = 0;
     564             : 
     565             :       ret_val = FALSE;
     566             :       goto rm_handler;  /* Back to platform-specific code. */
     567             :   }
     568             : #endif /* WRAP_MARK_SOME */
     569             : 
     570         244 : GC_INNER void GC_invalidate_mark_state(void)
     571             : {
     572         244 :     GC_mark_state = MS_INVALID;
     573         244 :     GC_mark_stack_top = GC_mark_stack-1;
     574         244 : }
     575             : 
     576           0 : GC_INNER mse * GC_signal_mark_stack_overflow(mse *msp)
     577             : {
     578           0 :     GC_mark_state = MS_INVALID;
     579             : #   ifdef PARALLEL_MARK
     580             :       /* We are using a local_mark_stack in parallel mode, so   */
     581             :       /* do not signal the global mark stack to be resized.     */
     582             :       /* That will be done if required in GC_return_mark_stack. */
     583           0 :       if (!GC_parallel)
     584           0 :         GC_mark_stack_too_small = TRUE;
     585             : #   else
     586             :       GC_mark_stack_too_small = TRUE;
     587             : #   endif
     588           0 :     GC_COND_LOG_PRINTF("Mark stack overflow; current size = %lu entries\n",
     589             :                        (unsigned long)GC_mark_stack_size);
     590           0 :     return(msp - GC_MARK_STACK_DISCARDS);
     591             : }
     592             : 
     593             : /*
     594             :  * Mark objects pointed to by the regions described by
     595             :  * mark stack entries between mark_stack and mark_stack_top,
     596             :  * inclusive.  Assumes the upper limit of a mark stack entry
     597             :  * is never 0.  A mark stack entry never has size 0.
     598             :  * We try to traverse on the order of a hblk of memory before we return.
     599             :  * Caller is responsible for calling this until the mark stack is empty.
     600             :  * Note that this is the most performance critical routine in the
     601             :  * collector.  Hence it contains all sorts of ugly hacks to speed
     602             :  * things up.  In particular, we avoid procedure calls on the common
     603             :  * path, we take advantage of peculiarities of the mark descriptor
     604             :  * encoding, we optionally maintain a cache for the block address to
     605             :  * header mapping, we prefetch when an object is "grayed", etc.
     606             :  */
     607       93299 : GC_INNER mse * GC_mark_from(mse *mark_stack_top, mse *mark_stack,
     608             :                             mse *mark_stack_limit)
     609             : {
     610       93299 :   signed_word credit = HBLKSIZE;  /* Remaining credit for marking work  */
     611             :   ptr_t current_p;      /* Pointer to current candidate ptr.    */
     612             :   word current; /* Candidate pointer.                           */
     613             :   ptr_t limit;  /* (Incl) limit of current candidate range.     */
     614             :   word descr;
     615       93299 :   ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
     616       93299 :   ptr_t least_ha = GC_least_plausible_heap_addr;
     617             :   DECLARE_HDR_CACHE;
     618             : 
     619             : # define SPLIT_RANGE_WORDS 128  /* Must be power of 2.          */
     620             : 
     621       93299 :   GC_objects_are_marked = TRUE;
     622       93299 :   INIT_HDR_CACHE;
     623             : # ifdef OS2 /* Use untweaked version to circumvent compiler problem */
     624             :     while ((word)mark_stack_top >= (word)mark_stack && credit >= 0)
     625             : # else
     626     1416363 :     while ((((ptr_t)mark_stack_top - (ptr_t)mark_stack) | credit) >= 0)
     627             : # endif
     628             :   {
     629     1177055 :     current_p = mark_stack_top -> mse_start;
     630     1177055 :     descr = mark_stack_top -> mse_descr.w;
     631             :   retry:
     632             :     /* current_p and descr describe the current object.         */
     633             :     /* *mark_stack_top is vacant.                               */
     634             :     /* The following is 0 only for small objects described by a simple  */
     635             :     /* length descriptor.  For many applications this is the common     */
     636             :     /* case, so we try to detect it quickly.                            */
     637     1371324 :     if (descr & ((~(WORDS_TO_BYTES(SPLIT_RANGE_WORDS) - 1)) | GC_DS_TAGS)) {
     638      394443 :       word tag = descr & GC_DS_TAGS;
     639             : 
     640      394443 :       switch(tag) {
     641             :         case GC_DS_LENGTH:
     642             :           /* Large length.                                              */
     643             :           /* Process part of the range to avoid pushing too much on the */
     644             :           /* stack.                                                     */
     645             :           GC_ASSERT(descr < (word)GC_greatest_plausible_heap_addr
     646             :                             - (word)GC_least_plausible_heap_addr);
     647             : #         ifdef ENABLE_TRACE
     648             :             if ((word)GC_trace_addr >= (word)current_p
     649             :                 && (word)GC_trace_addr < (word)(current_p + descr)) {
     650             :               GC_log_printf("GC #%u: large section; start %p, len %lu\n",
     651             :                         (unsigned)GC_gc_no, current_p, (unsigned long)descr);
     652             :             }
     653             : #         endif /* ENABLE_TRACE */
     654             : #         ifdef PARALLEL_MARK
     655             : #           define SHARE_BYTES 2048
     656      781306 :             if (descr > SHARE_BYTES && GC_parallel
     657      388511 :                 && (word)mark_stack_top < (word)(mark_stack_limit - 1)) {
     658      194269 :               int new_size = (descr/2) & ~(sizeof(word)-1);
     659      194269 :               mark_stack_top -> mse_start = current_p;
     660      194269 :               mark_stack_top -> mse_descr.w = new_size + sizeof(word);
     661             :                                         /* makes sure we handle         */
     662             :                                         /* misaligned pointers.         */
     663      194269 :               mark_stack_top++;
     664             : #             ifdef ENABLE_TRACE
     665             :                 if ((word)GC_trace_addr >= (word)current_p
     666             :                     && (word)GC_trace_addr < (word)(current_p + descr)) {
     667             :                   GC_log_printf("GC #%u: splitting (parallel) %p at %p\n",
     668             :                         (unsigned)GC_gc_no, current_p, current_p + new_size);
     669             :                 }
     670             : #             endif /* ENABLE_TRACE */
     671      194269 :               current_p += new_size;
     672      194269 :               descr -= new_size;
     673      194269 :               goto retry;
     674             :             }
     675             : #         endif /* PARALLEL_MARK */
     676      198526 :           mark_stack_top -> mse_start =
     677             :                 limit = current_p + WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
     678      198526 :           mark_stack_top -> mse_descr.w =
     679      198526 :                                 descr - WORDS_TO_BYTES(SPLIT_RANGE_WORDS-1);
     680             : #         ifdef ENABLE_TRACE
     681             :             if ((word)GC_trace_addr >= (word)current_p
     682             :                 && (word)GC_trace_addr < (word)(current_p + descr)) {
     683             :               GC_log_printf("GC #%u: splitting %p at %p\n",
     684             :                             (unsigned)GC_gc_no, current_p, limit);
     685             :             }
     686             : #         endif /* ENABLE_TRACE */
     687             :           /* Make sure that pointers overlapping the two ranges are     */
     688             :           /* considered.                                                */
     689      198526 :           limit += sizeof(word) - ALIGNMENT;
     690      198526 :           break;
     691             :         case GC_DS_BITMAP:
     692           0 :           mark_stack_top--;
     693             : #         ifdef ENABLE_TRACE
     694             :             if ((word)GC_trace_addr >= (word)current_p
     695             :                 && (word)GC_trace_addr < (word)(current_p
     696             :                                                 + WORDS_TO_BYTES(WORDSZ-2))) {
     697             :               GC_log_printf("GC #%u: tracing from %p bitmap descr %lu\n",
     698             :                             (unsigned)GC_gc_no, current_p,
     699             :                             (unsigned long)descr);
     700             :             }
     701             : #         endif /* ENABLE_TRACE */
     702           0 :           descr &= ~GC_DS_TAGS;
     703           0 :           credit -= WORDS_TO_BYTES(WORDSZ/2); /* guess */
     704           0 :           while (descr != 0) {
     705           0 :             if ((signed_word)descr < 0) {
     706           0 :               current = *(word *)current_p;
     707             :               FIXUP_POINTER(current);
     708           0 :               if (current >= (word)least_ha && current < (word)greatest_ha) {
     709           0 :                 PREFETCH((ptr_t)current);
     710             : #               ifdef ENABLE_TRACE
     711             :                   if (GC_trace_addr == current_p) {
     712             :                     GC_log_printf("GC #%u: considering(3) %p -> %p\n",
     713             :                                   (unsigned)GC_gc_no, current_p,
     714             :                                   (ptr_t)current);
     715             :                   }
     716             : #               endif /* ENABLE_TRACE */
     717           0 :                 PUSH_CONTENTS((ptr_t)current, mark_stack_top,
     718             :                               mark_stack_limit, current_p, exit1);
     719             :               }
     720             :             }
     721           0 :             descr <<= 1;
     722           0 :             current_p += sizeof(word);
     723             :           }
     724           0 :           continue;
     725             :         case GC_DS_PROC:
     726       12464 :           mark_stack_top--;
     727             : #         ifdef ENABLE_TRACE
     728             :             if ((word)GC_trace_addr >= (word)current_p
     729             :                 && GC_base(current_p) != 0
     730             :                 && GC_base(current_p) == GC_base(GC_trace_addr)) {
     731             :               GC_log_printf("GC #%u: tracing from %p, proc descr %lu\n",
     732             :                             (unsigned)GC_gc_no, current_p,
     733             :                             (unsigned long)descr);
     734             :             }
     735             : #         endif /* ENABLE_TRACE */
     736       12464 :           credit -= GC_PROC_BYTES;
     737       12464 :           mark_stack_top = (*PROC(descr))((word *)current_p, mark_stack_top,
     738             :                                           mark_stack_limit, ENV(descr));
     739           0 :           continue;
     740             :         case GC_DS_PER_OBJECT:
     741           0 :           if ((signed_word)descr >= 0) {
     742             :             /* Descriptor is in the object.     */
     743           0 :             descr = *(word *)(current_p + descr - GC_DS_PER_OBJECT);
     744             :           } else {
     745             :             /* Descriptor is in type descriptor pointed to by first     */
     746             :             /* word in object.                                          */
     747           0 :             ptr_t type_descr = *(ptr_t *)current_p;
     748             :             /* type_descr is either a valid pointer to the descriptor   */
     749             :             /* structure, or this object was on a free list.  If it     */
     750             :             /* it was anything but the last object on the free list,    */
     751             :             /* we will misinterpret the next object on the free list as */
     752             :             /* the type descriptor, and get a 0 GC descriptor, which    */
     753             :             /* is ideal.  Unfortunately, we need to check for the last  */
     754             :             /* object case explicitly.                                  */
     755           0 :             if (0 == type_descr) {
     756             :                 /* Rarely executed.     */
     757           0 :                 mark_stack_top--;
     758           0 :                 continue;
     759             :             }
     760           0 :             descr = *(word *)(type_descr
     761             :                               - (descr + (GC_INDIR_PER_OBJ_BIAS
     762             :                                           - GC_DS_PER_OBJECT)));
     763             :           }
     764           0 :           if (0 == descr) {
     765             :               /* Can happen either because we generated a 0 descriptor  */
     766             :               /* or we saw a pointer to a free object.          */
     767           0 :               mark_stack_top--;
     768           0 :               continue;
     769             :           }
     770           0 :           goto retry;
     771             :         default:
     772           0 :           limit = 0; /* initialized to prevent warning. */
     773           0 :           ABORT_RET("GC_mark_from: bad state");
     774             :       }
     775             :     } else /* Small object with length descriptor */ {
     776      976881 :       mark_stack_top--;
     777             : #     ifndef SMALL_CONFIG
     778      976881 :         if (descr < sizeof(word))
     779           0 :           continue;
     780             : #     endif
     781      976881 :       limit = current_p + (word)descr;
     782             :     }
     783             : #   ifdef ENABLE_TRACE
     784             :         if ((word)GC_trace_addr >= (word)current_p
     785             :             && (word)GC_trace_addr < (word)limit) {
     786             :           GC_log_printf("GC #%u: Tracing from %p, length is %lu\n",
     787             :                         (unsigned)GC_gc_no, current_p, (unsigned long)descr);
     788             :         }
     789             : #   endif /* ENABLE_TRACE */
     790             :     /* The simple case in which we're scanning a range. */
     791             :     GC_ASSERT(!((word)current_p & (ALIGNMENT-1)));
     792     1164591 :     credit -= limit - current_p;
     793     1164591 :     limit -= sizeof(word);
     794             :     {
     795             : #     define PREF_DIST 4
     796             : 
     797             : #     ifndef SMALL_CONFIG
     798             :         word deferred;
     799             : 
     800             :         /* Try to prefetch the next pointer to be examined ASAP.        */
     801             :         /* Empirically, this also seems to help slightly without        */
     802             :         /* prefetches, at least on linux/X86.  Presumably this loop     */
     803             :         /* ends up with less register pressure, and gcc thus ends up    */
     804             :         /* generating slightly better code.  Overall gcc code quality   */
     805             :         /* for this loop is still not great.                            */
     806             :         for(;;) {
     807    14356813 :           PREFETCH(limit - PREF_DIST*CACHE_LINE_SIZE);
     808             :           GC_ASSERT((word)limit >= (word)current_p);
     809    14428012 :           deferred = *(word *)limit;
     810             :           FIXUP_POINTER(deferred);
     811    14428012 :           limit -= ALIGNMENT;
     812    14428012 :           if (deferred >= (word)least_ha && deferred < (word)greatest_ha) {
     813      178028 :             PREFETCH((ptr_t)deferred);
     814      178413 :             break;
     815             :           }
     816    14249984 :           if ((word)current_p > (word)limit) goto next_object;
     817             :           /* Unroll once, so we don't do too many of the prefetches     */
     818             :           /* based on limit.                                            */
     819    13922743 :           deferred = *(word *)limit;
     820             :           FIXUP_POINTER(deferred);
     821    13922743 :           limit -= ALIGNMENT;
     822    13922743 :           if (deferred >= (word)least_ha && deferred < (word)greatest_ha) {
     823      246174 :             PREFETCH((ptr_t)deferred);
     824      238367 :             break;
     825             :           }
     826    13676569 :           if ((word)current_p > (word)limit) goto next_object;
     827    13192222 :         }
     828             : #     endif
     829             : 
     830     3512110 :       while ((word)current_p <= (word)limit) {
     831             :         /* Empirically, unrolling this loop doesn't help a lot. */
     832             :         /* Since PUSH_CONTENTS expands to a lot of code,        */
     833             :         /* we don't.                                            */
     834     2672467 :         current = *(word *)current_p;
     835             :         FIXUP_POINTER(current);
     836     2672467 :         PREFETCH(current_p + PREF_DIST*CACHE_LINE_SIZE);
     837     2504540 :         if (current >= (word)least_ha && current < (word)greatest_ha) {
     838             :           /* Prefetch the contents of the object we just pushed.  It's  */
     839             :           /* likely we will need them soon.                             */
     840     1204516 :           PREFETCH((ptr_t)current);
     841             : #         ifdef ENABLE_TRACE
     842             :             if (GC_trace_addr == current_p) {
     843             :               GC_log_printf("GC #%u: considering(1) %p -> %p\n",
     844             :                             (unsigned)GC_gc_no, current_p, (ptr_t)current);
     845             :             }
     846             : #         endif /* ENABLE_TRACE */
     847     1372549 :           PUSH_CONTENTS((ptr_t)current, mark_stack_top,
     848             :                         mark_stack_limit, current_p, exit2);
     849             :         }
     850     2678550 :         current_p += ALIGNMENT;
     851             :       }
     852             : 
     853             : #     ifndef SMALL_CONFIG
     854             :         /* We still need to mark the entry we previously prefetched.    */
     855             :         /* We already know that it passes the preliminary pointer       */
     856             :         /* validity test.                                               */
     857             : #       ifdef ENABLE_TRACE
     858             :             if (GC_trace_addr == current_p) {
     859             :               GC_log_printf("GC #%u: considering(2) %p -> %p\n",
     860             :                             (unsigned)GC_gc_no, current_p, (ptr_t)deferred);
     861             :             }
     862             : #       endif /* ENABLE_TRACE */
     863      422863 :         PUSH_CONTENTS((ptr_t)deferred, mark_stack_top,
     864             :                       mark_stack_limit, current_p, exit4);
     865             :         next_object:;
     866             : #     endif
     867             :     }
     868             :   }
     869      146009 :   return mark_stack_top;
     870             : }
     871             : 
     872             : #ifdef PARALLEL_MARK
     873             : 
     874             : STATIC GC_bool GC_help_wanted = FALSE;  /* Protected by mark lock       */
     875             : STATIC unsigned GC_helper_count = 0;    /* Number of running helpers.   */
     876             :                                         /* Protected by mark lock       */
     877             : STATIC unsigned GC_active_count = 0;    /* Number of active helpers.    */
     878             :                                         /* Protected by mark lock       */
     879             :                                         /* May increase and decrease    */
     880             :                                         /* within each mark cycle.  But */
     881             :                                         /* once it returns to 0, it     */
     882             :                                         /* stays zero for the cycle.    */
     883             : 
     884             : GC_INNER word GC_mark_no = 0;
     885             : 
     886             : #define LOCAL_MARK_STACK_SIZE HBLKSIZE
     887             :         /* Under normal circumstances, this is big enough to guarantee  */
     888             :         /* we don't overflow half of it in a single call to             */
     889             :         /* GC_mark_from.                                                */
     890             : 
     891             : 
     892             : /* Steal mark stack entries starting at mse low into mark stack local   */
     893             : /* until we either steal mse high, or we have max entries.              */
     894             : /* Return a pointer to the top of the local mark stack.                 */
     895             : /* *next is replaced by a pointer to the next unscanned mark stack      */
     896             : /* entry.                                                               */
     897       29042 : STATIC mse * GC_steal_mark_stack(mse * low, mse * high, mse * local,
     898             :                                  unsigned max, mse **next)
     899             : {
     900             :     mse *p;
     901       29042 :     mse *top = local - 1;
     902       29042 :     unsigned i = 0;
     903             : 
     904             :     GC_ASSERT((word)high >= (word)(low - 1)
     905             :               && (word)(high - low + 1) <= GC_mark_stack_size);
     906      241666 :     for (p = low; (word)p <= (word)high && i <= max; ++p) {
     907      213300 :         word descr = (word)AO_load(&p->mse_descr.ao);
     908      214451 :         if (descr != 0) {
     909             :             /* Must be ordered after read of descr: */
     910       90809 :             AO_store_release_write(&p->mse_descr.ao, 0);
     911             :             /* More than one thread may get this entry, but that's only */
     912             :             /* a minor performance problem.                             */
     913       88982 :             ++top;
     914       88982 :             top -> mse_descr.w = descr;
     915       88982 :             top -> mse_start = p -> mse_start;
     916             :             GC_ASSERT((top->mse_descr.w & GC_DS_TAGS) != GC_DS_LENGTH ||
     917             :                       top->mse_descr.w < (word)GC_greatest_plausible_heap_addr
     918             :                                          - (word)GC_least_plausible_heap_addr);
     919             :             /* If this is a big object, count it as                     */
     920             :             /* size/256 + 1 objects.                                    */
     921       88982 :             ++i;
     922       88982 :             if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) i += (int)(descr >> 8);
     923             :         }
     924             :     }
     925       28366 :     *next = p;
     926       28366 :     return top;
     927             : }
     928             : 
     929             : /* Copy back a local mark stack.        */
     930             : /* low and high are inclusive bounds.   */
     931        2111 : STATIC void GC_return_mark_stack(mse * low, mse * high)
     932             : {
     933             :     mse * my_top;
     934             :     mse * my_start;
     935             :     size_t stack_size;
     936             : 
     937        2111 :     if ((word)high < (word)low) return;
     938        2111 :     stack_size = high - low + 1;
     939        2111 :     GC_acquire_mark_lock();
     940        2113 :     my_top = GC_mark_stack_top; /* Concurrent modification impossible. */
     941        2113 :     my_start = my_top + 1;
     942        2113 :     if ((word)(my_start - GC_mark_stack + stack_size)
     943             :                 > (word)GC_mark_stack_size) {
     944           0 :       GC_COND_LOG_PRINTF("No room to copy back mark stack\n");
     945           0 :       GC_mark_state = MS_INVALID;
     946           0 :       GC_mark_stack_too_small = TRUE;
     947             :       /* We drop the local mark stack.  We'll fix things later. */
     948             :     } else {
     949        2113 :       BCOPY(low, my_start, stack_size * sizeof(mse));
     950             :       GC_ASSERT((mse *)AO_load((volatile AO_t *)(&GC_mark_stack_top))
     951             :                 == my_top);
     952        2113 :       AO_store_release_write((volatile AO_t *)(&GC_mark_stack_top),
     953             :                              (AO_t)(my_top + stack_size));
     954             :                 /* Ensures visibility of previously written stack contents. */
     955             :     }
     956        2113 :     GC_release_mark_lock();
     957        2113 :     GC_notify_all_marker();
     958             : }
     959             : 
     960             : /* Mark from the local mark stack.              */
     961             : /* On return, the local mark stack is empty.    */
     962             : /* But this may be achieved by copying the      */
     963             : /* local mark stack back into the global one.   */
     964      128638 : STATIC void GC_do_local_mark(mse *local_mark_stack, mse *local_top)
     965             : {
     966             :     unsigned n;
     967             : #   define N_LOCAL_ITERS 1
     968             : 
     969             : #   ifdef GC_ASSERTIONS
     970             :       /* Make sure we don't hold mark lock. */
     971             :         GC_acquire_mark_lock();
     972             :         GC_release_mark_lock();
     973             : #   endif
     974             :     for (;;) {
     975      227877 :         for (n = 0; n < N_LOCAL_ITERS; ++n) {
     976      128632 :             local_top = GC_mark_from(local_top, local_mark_stack,
     977             :                                      local_mark_stack + LOCAL_MARK_STACK_SIZE);
     978      128321 :             if ((word)local_top < (word)local_mark_stack) return;
     979       99242 :             if ((word)(local_top - local_mark_stack)
     980             :                         >= LOCAL_MARK_STACK_SIZE / 2) {
     981           3 :                 GC_return_mark_stack(local_mark_stack, local_top);
     982           3 :                 return;
     983             :             }
     984             :         }
     985      105543 :         if ((word)AO_load((volatile AO_t *)&GC_mark_stack_top)
     986             :             < (word)AO_load(&GC_first_nonempty)
     987       99245 :             && GC_active_count < GC_helper_count
     988        6070 :             && (word)local_top > (word)(local_mark_stack + 1)) {
     989             :             /* Try to share the load, since the main stack is empty,    */
     990             :             /* and helper threads are waiting for a refill.             */
     991             :             /* The entries near the bottom of the stack are likely      */
     992             :             /* to require more work.  Thus we return those, even though */
     993             :             /* it's harder.                                             */
     994        2109 :             mse * new_bottom = local_mark_stack
     995        4218 :                                 + (local_top - local_mark_stack)/2;
     996             :             GC_ASSERT((word)new_bottom > (word)local_mark_stack
     997             :                       && (word)new_bottom < (word)local_top);
     998        2109 :             GC_return_mark_stack(local_mark_stack, new_bottom - 1);
     999        2109 :             memmove(local_mark_stack, new_bottom,
    1000        2109 :                     (local_top - new_bottom + 1) * sizeof(mse));
    1001        2109 :             local_top -= (new_bottom - local_mark_stack);
    1002             :         }
    1003       99473 :     }
    1004             : }
    1005             : 
    1006             : #define ENTRIES_TO_GET 5
    1007             : 
    1008             : /* Mark using the local mark stack until the global mark stack is empty */
    1009             : /* and there are no active workers. Update GC_first_nonempty to reflect */
    1010             : /* progress.                                                            */
    1011             : /* Caller does not hold mark lock.                                      */
    1012             : /* Caller has already incremented GC_helper_count.  We decrement it,    */
    1013             : /* and maintain GC_active_count.                                        */
    1014         788 : STATIC void GC_mark_local(mse *local_mark_stack, int id)
    1015             : {
    1016             :     mse * my_first_nonempty;
    1017             : 
    1018         788 :     GC_acquire_mark_lock();
    1019         788 :     GC_active_count++;
    1020         788 :     my_first_nonempty = (mse *)AO_load(&GC_first_nonempty);
    1021             :     GC_ASSERT((word)GC_mark_stack <= (word)my_first_nonempty);
    1022             :     GC_ASSERT((word)my_first_nonempty
    1023             :         <= (word)AO_load((volatile AO_t *)&GC_mark_stack_top) + sizeof(mse));
    1024         788 :     GC_VERBOSE_LOG_PRINTF("Starting mark helper %lu\n", (unsigned long)id);
    1025         788 :     GC_release_mark_lock();
    1026             :     for (;;) {
    1027             :         size_t n_on_stack;
    1028             :         unsigned n_to_get;
    1029             :         mse * my_top;
    1030             :         mse * local_top;
    1031       31932 :         mse * global_first_nonempty = (mse *)AO_load(&GC_first_nonempty);
    1032             : 
    1033             :         GC_ASSERT((word)my_first_nonempty >= (word)GC_mark_stack &&
    1034             :                   (word)my_first_nonempty <=
    1035             :                         (word)AO_load((volatile AO_t *)&GC_mark_stack_top)
    1036             :                         + sizeof(mse));
    1037             :         GC_ASSERT((word)global_first_nonempty >= (word)GC_mark_stack &&
    1038             :                   (word)global_first_nonempty <=
    1039             :                         (word)AO_load((volatile AO_t *)&GC_mark_stack_top)
    1040             :                         + sizeof(mse));
    1041       31966 :         if ((word)my_first_nonempty < (word)global_first_nonempty) {
    1042        7110 :             my_first_nonempty = global_first_nonempty;
    1043       24856 :         } else if ((word)global_first_nonempty < (word)my_first_nonempty) {
    1044       20979 :             AO_compare_and_swap(&GC_first_nonempty,
    1045             :                                 (AO_t) global_first_nonempty,
    1046             :                                 (AO_t) my_first_nonempty);
    1047             :             /* If this fails, we just go ahead, without updating        */
    1048             :             /* GC_first_nonempty.                                       */
    1049             :         }
    1050             :         /* Perhaps we should also update GC_first_nonempty, if it */
    1051             :         /* is less.  But that would require using atomic updates. */
    1052       32057 :         my_top = (mse *)AO_load_acquire((volatile AO_t *)(&GC_mark_stack_top));
    1053       31929 :         n_on_stack = my_top - my_first_nonempty + 1;
    1054       31929 :         if (0 == n_on_stack) {
    1055        2846 :             GC_acquire_mark_lock();
    1056        2854 :             my_top = GC_mark_stack_top;
    1057             :                 /* Asynchronous modification impossible here,   */
    1058             :                 /* since we hold mark lock.                     */
    1059        2854 :             n_on_stack = my_top - my_first_nonempty + 1;
    1060        2854 :             if (0 == n_on_stack) {
    1061        2838 :                 GC_active_count--;
    1062             :                 GC_ASSERT(GC_active_count <= GC_helper_count);
    1063             :                 /* Other markers may redeposit objects  */
    1064             :                 /* on the stack.                                */
    1065        2838 :                 if (0 == GC_active_count) GC_notify_all_marker();
    1066       13114 :                 while (GC_active_count > 0
    1067        5532 :                        && (word)AO_load(&GC_first_nonempty)
    1068        4744 :                                 > (word)GC_mark_stack_top) {
    1069             :                     /* We will be notified if either GC_active_count    */
    1070             :                     /* reaches zero, or if more objects are pushed on   */
    1071             :                     /* the global mark stack.                           */
    1072        2694 :                     GC_wait_marker();
    1073             :                 }
    1074        3626 :                 if (GC_active_count == 0
    1075        2838 :                     && (word)AO_load(&GC_first_nonempty)
    1076         788 :                         > (word)GC_mark_stack_top) {
    1077         788 :                     GC_bool need_to_notify = FALSE;
    1078             :                     /* The above conditions can't be falsified while we */
    1079             :                     /* hold the mark lock, since neither                */
    1080             :                     /* GC_active_count nor GC_mark_stack_top can        */
    1081             :                     /* change.  GC_first_nonempty can only be           */
    1082             :                     /* incremented asynchronously.  Thus we know that   */
    1083             :                     /* both conditions actually held simultaneously.    */
    1084         788 :                     GC_helper_count--;
    1085         788 :                     if (0 == GC_helper_count) need_to_notify = TRUE;
    1086         788 :                     GC_VERBOSE_LOG_PRINTF("Finished mark helper %lu\n",
    1087             :                                           (unsigned long)id);
    1088         788 :                     GC_release_mark_lock();
    1089         776 :                     if (need_to_notify) GC_notify_all_marker();
    1090             :                     return;
    1091             :                 }
    1092             :                 /* else there's something on the stack again, or        */
    1093             :                 /* another helper may push something.                   */
    1094        2050 :                 GC_active_count++;
    1095             :                 GC_ASSERT(GC_active_count > 0);
    1096        2050 :                 GC_release_mark_lock();
    1097        2049 :                 continue;
    1098             :             } else {
    1099          16 :                 GC_release_mark_lock();
    1100             :             }
    1101             :         }
    1102       29102 :         n_to_get = ENTRIES_TO_GET;
    1103       29102 :         if (n_on_stack < 2 * ENTRIES_TO_GET) n_to_get = 1;
    1104       29102 :         local_top = GC_steal_mark_stack(my_first_nonempty, my_top,
    1105             :                                         local_mark_stack, n_to_get,
    1106             :                                         &my_first_nonempty);
    1107             :         GC_ASSERT((word)my_first_nonempty >= (word)GC_mark_stack &&
    1108             :                   (word)my_first_nonempty <=
    1109             :                         (word)AO_load((volatile AO_t *)&GC_mark_stack_top)
    1110             :                         + sizeof(mse));
    1111       29236 :         GC_do_local_mark(local_mark_stack, local_top);
    1112       31144 :     }
    1113             : }
    1114             : 
    1115             : /* Perform Parallel mark.                       */
    1116             : /* We hold the GC lock, not the mark lock.      */
    1117             : /* Currently runs until the mark stack is       */
    1118             : /* empty.                                       */
    1119         244 : STATIC void GC_do_parallel_mark(void)
    1120             : {
    1121             :     mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
    1122             :                 /* Note: local_mark_stack is quite big (up to 128 KiB). */
    1123             : 
    1124         244 :     GC_acquire_mark_lock();
    1125             :     GC_ASSERT(I_HOLD_LOCK());
    1126             :     /* This could be a GC_ASSERT, but it seems safer to keep it on      */
    1127             :     /* all the time, especially since it's cheap.                       */
    1128         244 :     if (GC_help_wanted || GC_active_count != 0 || GC_helper_count != 0)
    1129           0 :         ABORT("Tried to start parallel mark in bad state");
    1130         244 :     GC_VERBOSE_LOG_PRINTF("Starting marking for mark phase number %lu\n",
    1131             :                           (unsigned long)GC_mark_no);
    1132         244 :     GC_first_nonempty = (AO_t)GC_mark_stack;
    1133         244 :     GC_active_count = 0;
    1134         244 :     GC_helper_count = 1;
    1135         244 :     GC_help_wanted = TRUE;
    1136         244 :     GC_release_mark_lock();
    1137         244 :     GC_notify_all_marker();
    1138             :         /* Wake up potential helpers.   */
    1139         244 :     GC_mark_local(local_mark_stack, 0);
    1140         244 :     GC_acquire_mark_lock();
    1141         244 :     GC_help_wanted = FALSE;
    1142             :     /* Done; clean up.  */
    1143         649 :     while (GC_helper_count > 0) {
    1144         161 :       GC_wait_marker();
    1145             :     }
    1146             :     /* GC_helper_count cannot be incremented while GC_help_wanted == FALSE */
    1147         244 :     GC_VERBOSE_LOG_PRINTF("Finished marking for mark phase number %lu\n",
    1148             :                           (unsigned long)GC_mark_no);
    1149         244 :     GC_mark_no++;
    1150         244 :     GC_release_mark_lock();
    1151         244 :     GC_notify_all_marker();
    1152         244 : }
    1153             : 
    1154             : 
    1155             : /* Try to help out the marker, if it's running.         */
    1156             : /* We do not hold the GC lock, but the requestor does.  */
    1157      398724 : GC_INNER void GC_help_marker(word my_mark_no)
    1158             : {
    1159             :     unsigned my_id;
    1160             :     mse local_mark_stack[LOCAL_MARK_STACK_SIZE];
    1161             :                 /* Note: local_mark_stack is quite big (up to 128 KiB). */
    1162             : 
    1163      398724 :     if (!GC_parallel) return;
    1164             : 
    1165         955 :     GC_acquire_mark_lock();
    1166        7269 :     while (GC_mark_no < my_mark_no
    1167        6236 :            || (!GC_help_wanted && GC_mark_no == my_mark_no)) {
    1168        3284 :       GC_wait_marker();
    1169             :     }
    1170         544 :     my_id = GC_helper_count;
    1171         544 :     if (GC_mark_no != my_mark_no || my_id > (unsigned)GC_markers_m1) {
    1172             :       /* Second test is useful only if original threads can also        */
    1173             :       /* act as helpers.  Under Linux they can't.                       */
    1174           0 :       GC_release_mark_lock();
    1175           0 :       return;
    1176             :     }
    1177         544 :     GC_helper_count = my_id + 1;
    1178         544 :     GC_release_mark_lock();
    1179         544 :     GC_mark_local(local_mark_stack, my_id);
    1180             :     /* GC_mark_local decrements GC_helper_count. */
    1181             : }
    1182             : 
    1183             : #endif /* PARALLEL_MARK */
    1184             : 
    1185             : /* Allocate or reallocate space for mark stack of size n entries.  */
    1186             : /* May silently fail.                                              */
    1187         168 : static void alloc_mark_stack(size_t n)
    1188             : {
    1189         168 :     mse * new_stack = (mse *)GC_scratch_alloc(n * sizeof(struct GC_ms_entry));
    1190             : #   ifdef GWW_VDB
    1191             :       /* Don't recycle a stack segment obtained with the wrong flags.   */
    1192             :       /* Win32 GetWriteWatch requires the right kind of memory.         */
    1193             :       static GC_bool GC_incremental_at_stack_alloc = FALSE;
    1194             :       GC_bool recycle_old = (!GC_incremental || GC_incremental_at_stack_alloc);
    1195             : 
    1196             :       GC_incremental_at_stack_alloc = GC_incremental;
    1197             : #   else
    1198             : #     define recycle_old TRUE
    1199             : #   endif
    1200             : 
    1201         168 :     GC_mark_stack_too_small = FALSE;
    1202         168 :     if (GC_mark_stack_size != 0) {
    1203           5 :         if (new_stack != 0) {
    1204             :           if (recycle_old) {
    1205             :             /* Recycle old space */
    1206           5 :               size_t page_offset = (word)GC_mark_stack & (GC_page_size - 1);
    1207           5 :               size_t size = GC_mark_stack_size * sizeof(struct GC_ms_entry);
    1208           5 :               size_t displ = 0;
    1209             : 
    1210           5 :               if (0 != page_offset) displ = GC_page_size - page_offset;
    1211           5 :               size = (size - displ) & ~(GC_page_size - 1);
    1212           5 :               if (size > 0) {
    1213           5 :                 GC_add_to_heap((struct hblk *)
    1214           5 :                                 ((word)GC_mark_stack + displ), (word)size);
    1215             :               }
    1216             :           }
    1217           5 :           GC_mark_stack = new_stack;
    1218           5 :           GC_mark_stack_size = n;
    1219             :           /* FIXME: Do we need some way to reset GC_mark_stack_size?    */
    1220           5 :           GC_mark_stack_limit = new_stack + n;
    1221           5 :           GC_COND_LOG_PRINTF("Grew mark stack to %lu frames\n",
    1222             :                              (unsigned long)GC_mark_stack_size);
    1223             :         } else {
    1224           0 :           WARN("Failed to grow mark stack to %" WARN_PRIdPTR " frames\n", n);
    1225             :         }
    1226             :     } else {
    1227         163 :         if (new_stack == 0) {
    1228           0 :             GC_err_printf("No space for mark stack\n");
    1229           0 :             EXIT();
    1230             :         }
    1231         163 :         GC_mark_stack = new_stack;
    1232         163 :         GC_mark_stack_size = n;
    1233         163 :         GC_mark_stack_limit = new_stack + n;
    1234             :     }
    1235         168 :     GC_mark_stack_top = GC_mark_stack-1;
    1236         168 : }
    1237             : 
    1238         163 : GC_INNER void GC_mark_init(void)
    1239             : {
    1240         163 :     alloc_mark_stack(INITIAL_MARK_STACK_SIZE);
    1241         163 : }
    1242             : 
    1243             : /*
    1244             :  * Push all locations between b and t onto the mark stack.
    1245             :  * b is the first location to be checked. t is one past the last
    1246             :  * location to be checked.
    1247             :  * Should only be used if there is no possibility of mark stack
    1248             :  * overflow.
    1249             :  */
    1250        3532 : GC_API void GC_CALL GC_push_all(char *bottom, char *top)
    1251             : {
    1252             :     register word length;
    1253             : 
    1254        3532 :     bottom = (char *)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
    1255        3532 :     top = (char *)(((word) top) & ~(ALIGNMENT-1));
    1256        3532 :     if ((word)bottom >= (word)top) return;
    1257             : 
    1258        3532 :     GC_mark_stack_top++;
    1259        3532 :     if ((word)GC_mark_stack_top >= (word)GC_mark_stack_limit) {
    1260           0 :         ABORT("Unexpected mark stack overflow");
    1261             :     }
    1262        3532 :     length = top - bottom;
    1263             : #   if GC_DS_TAGS > ALIGNMENT - 1
    1264             :         length += GC_DS_TAGS;
    1265             :         length &= ~GC_DS_TAGS;
    1266             : #   endif
    1267        3532 :     GC_mark_stack_top -> mse_start = bottom;
    1268        3532 :     GC_mark_stack_top -> mse_descr.w = length;
    1269             : }
    1270             : 
    1271             : #ifndef GC_DISABLE_INCREMENTAL
    1272             : 
    1273             :   /* Analogous to the above, but push only those pages h with           */
    1274             :   /* dirty_fn(h) != 0.  We use GC_push_all to actually push the block.  */
    1275             :   /* Used both to selectively push dirty pages, or to push a block in   */
    1276             :   /* piecemeal fashion, to allow for more marking concurrency.          */
    1277             :   /* Will not overflow mark stack if GC_push_all pushes a small fixed   */
    1278             :   /* number of entries.  (This is invoked only if GC_push_all pushes    */
    1279             :   /* a single entry, or if it marks each object before pushing it, thus */
    1280             :   /* ensuring progress in the event of a stack overflow.)               */
    1281           0 :   STATIC void GC_push_selected(ptr_t bottom, ptr_t top,
    1282             :                                GC_bool (*dirty_fn)(struct hblk *))
    1283             :   {
    1284             :     struct hblk * h;
    1285             : 
    1286           0 :     bottom = (ptr_t)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
    1287           0 :     top = (ptr_t)(((word) top) & ~(ALIGNMENT-1));
    1288           0 :     if ((word)bottom >= (word)top) return;
    1289             : 
    1290           0 :     h = HBLKPTR(bottom + HBLKSIZE);
    1291           0 :     if ((word)top <= (word)h) {
    1292           0 :         if ((*dirty_fn)(h-1)) {
    1293           0 :             GC_push_all(bottom, top);
    1294             :         }
    1295           0 :         return;
    1296             :     }
    1297           0 :     if ((*dirty_fn)(h-1)) {
    1298           0 :         GC_push_all(bottom, (ptr_t)h);
    1299             :     }
    1300             : 
    1301           0 :     while ((word)(h+1) <= (word)top) {
    1302           0 :         if ((*dirty_fn)(h)) {
    1303           0 :             if ((word)(GC_mark_stack_top - GC_mark_stack)
    1304           0 :                 > 3 * GC_mark_stack_size / 4) {
    1305             :                 /* Danger of mark stack overflow */
    1306           0 :                 GC_push_all((ptr_t)h, top);
    1307           0 :                 return;
    1308             :             } else {
    1309           0 :                 GC_push_all((ptr_t)h, (ptr_t)(h+1));
    1310             :             }
    1311             :         }
    1312           0 :         h++;
    1313             :     }
    1314             : 
    1315           0 :     if ((ptr_t)h != top && (*dirty_fn)(h)) {
    1316           0 :        GC_push_all((ptr_t)h, top);
    1317             :     }
    1318           0 :     if ((word)GC_mark_stack_top >= (word)GC_mark_stack_limit) {
    1319           0 :         ABORT("Unexpected mark stack overflow");
    1320             :     }
    1321             :   }
    1322             : 
    1323        3532 :   GC_API void GC_CALL GC_push_conditional(char *bottom, char *top, int all)
    1324             :   {
    1325        3532 :     if (!all) {
    1326           0 :       GC_push_selected((ptr_t)bottom, (ptr_t)top, GC_page_was_dirty);
    1327             :     } else {
    1328             : #     ifdef PROC_VDB
    1329             :         if (GC_dirty_maintained) {
    1330             :           /* Pages that were never dirtied cannot contain pointers.     */
    1331             :           GC_push_selected((ptr_t)bottom, (ptr_t)top, GC_page_was_ever_dirty);
    1332             :         } else
    1333             : #     endif
    1334             :       /* else */ {
    1335        3532 :         GC_push_all(bottom, top);
    1336             :       }
    1337             :     }
    1338        3532 :   }
    1339             : #else
    1340             :   GC_API void GC_CALL GC_push_conditional(char *bottom, char *top,
    1341             :                                           int all GC_ATTR_UNUSED)
    1342             :   {
    1343             :     GC_push_all(bottom, top);
    1344             :   }
    1345             : #endif /* GC_DISABLE_INCREMENTAL */
    1346             : 
    1347             : #if defined(MSWIN32) || defined(MSWINCE)
    1348             :   void __cdecl GC_push_one(word p)
    1349             : #else
    1350           0 :   void GC_push_one(word p)
    1351             : #endif
    1352             : {
    1353           0 :     GC_PUSH_ONE_STACK(p, MARKED_FROM_REGISTER);
    1354           0 : }
    1355             : 
    1356           0 : GC_API struct GC_ms_entry * GC_CALL GC_mark_and_push(void *obj,
    1357             :                                                 mse *mark_stack_ptr,
    1358             :                                                 mse *mark_stack_limit,
    1359             :                                                 void ** src GC_ATTR_UNUSED)
    1360             : {
    1361             :     hdr * hhdr;
    1362             : 
    1363           0 :     PREFETCH(obj);
    1364           0 :     GET_HDR(obj, hhdr);
    1365           0 :     if ((EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr), FALSE)
    1366           0 :          && (!GC_all_interior_pointers
    1367           0 :              || NULL == (hhdr = GC_find_header(GC_base(obj)))))
    1368           0 :         || EXPECT(HBLK_IS_FREE(hhdr), FALSE)) {
    1369           0 :       GC_ADD_TO_BLACK_LIST_NORMAL(obj, (ptr_t)src);
    1370           0 :       return mark_stack_ptr;
    1371             :     }
    1372             : 
    1373           0 :     PUSH_CONTENTS_HDR(obj, mark_stack_ptr /* modified */, mark_stack_limit,
    1374             :                       (ptr_t)src, was_marked, hhdr, TRUE);
    1375             :  was_marked:
    1376           0 :     return mark_stack_ptr;
    1377             : }
    1378             : 
    1379             : #if defined(MANUAL_VDB) && defined(THREADS)
    1380             :   void GC_dirty(ptr_t p);
    1381             : #endif
    1382             : 
    1383             : /* Mark and push (i.e. gray) a single object p onto the main    */
    1384             : /* mark stack.  Consider p to be valid if it is an interior     */
    1385             : /* pointer.                                                     */
    1386             : /* The object p has passed a preliminary pointer validity       */
    1387             : /* test, but we do not definitely know whether it is valid.     */
    1388             : /* Mark bits are NOT atomically updated.  Thus this must be the */
    1389             : /* only thread setting them.                                    */
    1390             : # if defined(PRINT_BLACK_LIST) || defined(KEEP_BACK_PTRS)
    1391             :     GC_INNER void GC_mark_and_push_stack(ptr_t p, ptr_t source)
    1392             : # else
    1393       16661 :     GC_INNER void GC_mark_and_push_stack(ptr_t p)
    1394             : #   define source ((ptr_t)0)
    1395             : # endif
    1396             : {
    1397             :     hdr * hhdr;
    1398       16661 :     ptr_t r = p;
    1399             : 
    1400       16661 :     PREFETCH(p);
    1401       16661 :     GET_HDR(p, hhdr);
    1402       16661 :     if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr), FALSE)) {
    1403        4568 :         if (hhdr != 0) {
    1404          34 :           r = GC_base(p);
    1405          34 :           hhdr = HDR(r);
    1406             :         }
    1407        4568 :         if (hhdr == 0) {
    1408        4534 :             GC_ADD_TO_BLACK_LIST_STACK(p, source);
    1409        4534 :             return;
    1410             :         }
    1411             :     }
    1412       12127 :     if (EXPECT(HBLK_IS_FREE(hhdr), FALSE)) {
    1413           0 :         GC_ADD_TO_BLACK_LIST_NORMAL(p, source);
    1414           0 :         return;
    1415             :     }
    1416             : #   if defined(MANUAL_VDB) && defined(THREADS)
    1417             :       /* Pointer is on the stack.  We may have dirtied the object       */
    1418             :       /* it points to, but not yet have called GC_dirty();              */
    1419             :       GC_dirty(p);      /* Implicitly affects entire object.            */
    1420             : #   endif
    1421       12127 :     PUSH_CONTENTS_HDR(r, GC_mark_stack_top, GC_mark_stack_limit,
    1422             :                       source, mark_and_push_exit, hhdr, FALSE);
    1423             :   mark_and_push_exit: ;
    1424             :     /* We silently ignore pointers to near the end of a block,  */
    1425             :     /* which is very mildly suboptimal.                         */
    1426             :     /* FIXME: We should probably add a header word to address   */
    1427             :     /* this.                                                    */
    1428             : }
    1429             : # undef source
    1430             : 
    1431             : # ifdef TRACE_BUF
    1432             : 
    1433             : # define TRACE_ENTRIES 1000
    1434             : 
    1435             : struct trace_entry {
    1436             :     char * kind;
    1437             :     word gc_no;
    1438             :     word bytes_allocd;
    1439             :     word arg1;
    1440             :     word arg2;
    1441             : } GC_trace_buf[TRACE_ENTRIES];
    1442             : 
    1443             : int GC_trace_buf_ptr = 0;
    1444             : 
    1445             : void GC_add_trace_entry(char *kind, word arg1, word arg2)
    1446             : {
    1447             :     GC_trace_buf[GC_trace_buf_ptr].kind = kind;
    1448             :     GC_trace_buf[GC_trace_buf_ptr].gc_no = GC_gc_no;
    1449             :     GC_trace_buf[GC_trace_buf_ptr].bytes_allocd = GC_bytes_allocd;
    1450             :     GC_trace_buf[GC_trace_buf_ptr].arg1 = arg1 ^ 0x80000000;
    1451             :     GC_trace_buf[GC_trace_buf_ptr].arg2 = arg2 ^ 0x80000000;
    1452             :     GC_trace_buf_ptr++;
    1453             :     if (GC_trace_buf_ptr >= TRACE_ENTRIES) GC_trace_buf_ptr = 0;
    1454             : }
    1455             : 
    1456             : void GC_print_trace_inner(word gc_no)
    1457             : {
    1458             :     int i;
    1459             :     struct trace_entry *p;
    1460             : 
    1461             :     for (i = GC_trace_buf_ptr-1; i != GC_trace_buf_ptr; i--) {
    1462             :         if (i < 0) i = TRACE_ENTRIES-1;
    1463             :         p = GC_trace_buf + i;
    1464             :         if (p -> gc_no < gc_no || p -> kind == 0) {
    1465             :             return;
    1466             :         }
    1467             :         GC_printf("Trace:%s (gc:%u, bytes:%lu) 0x%lX, 0x%lX\n",
    1468             :                   p -> kind, (unsigned)p -> gc_no,
    1469             :                   (unsigned long)p -> bytes_allocd,
    1470             :                   (long)p->arg1 ^ 0x80000000L, (long)p->arg2 ^ 0x80000000L);
    1471             :     }
    1472             :     GC_printf("Trace incomplete\n");
    1473             : }
    1474             : 
    1475             : void GC_print_trace(word gc_no)
    1476             : {
    1477             :     DCL_LOCK_STATE;
    1478             : 
    1479             :     LOCK();
    1480             :     GC_print_trace_inner(gc_no);
    1481             :     UNLOCK();
    1482             : }
    1483             : 
    1484             : # endif /* TRACE_BUF */
    1485             : 
    1486             : /*
    1487             :  * A version of GC_push_all that treats all interior pointers as valid
    1488             :  * and scans the entire region immediately, in case the contents
    1489             :  * change.
    1490             :  */
    1491         731 : GC_INNER void GC_push_all_eager(ptr_t bottom, ptr_t top)
    1492             : {
    1493         731 :     word * b = (word *)(((word) bottom + ALIGNMENT-1) & ~(ALIGNMENT-1));
    1494         731 :     word * t = (word *)(((word) top) & ~(ALIGNMENT-1));
    1495             :     register word *p;
    1496             :     register word q;
    1497             :     register word *lim;
    1498         731 :     register ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
    1499         731 :     register ptr_t least_ha = GC_least_plausible_heap_addr;
    1500             : #   define GC_greatest_plausible_heap_addr greatest_ha
    1501             : #   define GC_least_plausible_heap_addr least_ha
    1502             : 
    1503         731 :     if (top == 0) return;
    1504             :     /* check all pointers in range and push if they appear      */
    1505             :     /* to be valid.                                             */
    1506         731 :       lim = t - 1 /* longword */;
    1507      374651 :       for (p = b; (word)p <= (word)lim;
    1508      373189 :            p = (word *)(((ptr_t)p) + ALIGNMENT)) {
    1509      373189 :         q = *p;
    1510      373189 :         GC_PUSH_ONE_STACK(q, p);
    1511             :       }
    1512             : #   undef GC_greatest_plausible_heap_addr
    1513             : #   undef GC_least_plausible_heap_addr
    1514             : }
    1515             : 
    1516         487 : GC_INNER void GC_push_all_stack(ptr_t bottom, ptr_t top)
    1517             : {
    1518             : # if defined(THREADS) && defined(MPROTECT_VDB)
    1519         487 :     GC_push_all_eager(bottom, top);
    1520             : # else
    1521             :     if (!NEED_FIXUP_POINTER && GC_all_interior_pointers) {
    1522             :       GC_push_all(bottom, top);
    1523             :     } else {
    1524             :       GC_push_all_eager(bottom, top);
    1525             :     }
    1526             : # endif
    1527         487 : }
    1528             : 
    1529             : #if !defined(SMALL_CONFIG) && !defined(USE_MARK_BYTES) && \
    1530             :     defined(MARK_BIT_PER_GRANULE)
    1531             : # if GC_GRANULE_WORDS == 1
    1532             : #   define USE_PUSH_MARKED_ACCELERATORS
    1533             : #   define PUSH_GRANULE(q) \
    1534             :                 do { \
    1535             :                   word qcontents = (q)[0]; \
    1536             :                   GC_PUSH_ONE_HEAP(qcontents, q, GC_mark_stack_top); \
    1537             :                 } while (0)
    1538             : # elif GC_GRANULE_WORDS == 2
    1539             : #   define USE_PUSH_MARKED_ACCELERATORS
    1540             : #   define PUSH_GRANULE(q) \
    1541             :                 do { \
    1542             :                   word qcontents = (q)[0]; \
    1543             :                   GC_PUSH_ONE_HEAP(qcontents, q, GC_mark_stack_top); \
    1544             :                   qcontents = (q)[1]; \
    1545             :                   GC_PUSH_ONE_HEAP(qcontents, (q)+1, GC_mark_stack_top); \
    1546             :                 } while (0)
    1547             : # elif GC_GRANULE_WORDS == 4
    1548             : #   define USE_PUSH_MARKED_ACCELERATORS
    1549             : #   define PUSH_GRANULE(q) \
    1550             :                 do { \
    1551             :                   word qcontents = (q)[0]; \
    1552             :                   GC_PUSH_ONE_HEAP(qcontents, q, GC_mark_stack_top); \
    1553             :                   qcontents = (q)[1]; \
    1554             :                   GC_PUSH_ONE_HEAP(qcontents, (q)+1, GC_mark_stack_top); \
    1555             :                   qcontents = (q)[2]; \
    1556             :                   GC_PUSH_ONE_HEAP(qcontents, (q)+2, GC_mark_stack_top); \
    1557             :                   qcontents = (q)[3]; \
    1558             :                   GC_PUSH_ONE_HEAP(qcontents, (q)+3, GC_mark_stack_top); \
    1559             :                 } while (0)
    1560             : # endif
    1561             : #endif /* !USE_MARK_BYTES && MARK_BIT_PER_GRANULE */
    1562             : 
    1563             : #ifdef USE_PUSH_MARKED_ACCELERATORS
    1564             : /* Push all objects reachable from marked objects in the given block */
    1565             : /* containing objects of size 1 granule.                             */
    1566             : STATIC void GC_push_marked1(struct hblk *h, hdr *hhdr)
    1567             : {
    1568             :     word * mark_word_addr = &(hhdr->hb_marks[0]);
    1569             :     word *p;
    1570             :     word *plim;
    1571             :     word *q;
    1572             :     word mark_word;
    1573             : 
    1574             :     /* Allow registers to be used for some frequently accessed  */
    1575             :     /* global variables.  Otherwise aliasing issues are likely  */
    1576             :     /* to prevent that.                                         */
    1577             :     ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
    1578             :     ptr_t least_ha = GC_least_plausible_heap_addr;
    1579             :     mse * mark_stack_top = GC_mark_stack_top;
    1580             :     mse * mark_stack_limit = GC_mark_stack_limit;
    1581             : 
    1582             : #   undef GC_mark_stack_top
    1583             : #   undef GC_mark_stack_limit
    1584             : #   define GC_mark_stack_top mark_stack_top
    1585             : #   define GC_mark_stack_limit mark_stack_limit
    1586             : #   define GC_greatest_plausible_heap_addr greatest_ha
    1587             : #   define GC_least_plausible_heap_addr least_ha
    1588             : 
    1589             :     p = (word *)(h->hb_body);
    1590             :     plim = (word *)(((word)h) + HBLKSIZE);
    1591             : 
    1592             :     /* go through all words in block */
    1593             :         while ((word)p < (word)plim) {
    1594             :             mark_word = *mark_word_addr++;
    1595             :             q = p;
    1596             :             while(mark_word != 0) {
    1597             :               if (mark_word & 1) {
    1598             :                   PUSH_GRANULE(q);
    1599             :               }
    1600             :               q += GC_GRANULE_WORDS;
    1601             :               mark_word >>= 1;
    1602             :             }
    1603             :             p += WORDSZ*GC_GRANULE_WORDS;
    1604             :         }
    1605             : 
    1606             : #   undef GC_greatest_plausible_heap_addr
    1607             : #   undef GC_least_plausible_heap_addr
    1608             : #   undef GC_mark_stack_top
    1609             : #   undef GC_mark_stack_limit
    1610             : #   define GC_mark_stack_limit GC_arrays._mark_stack_limit
    1611             : #   define GC_mark_stack_top GC_arrays._mark_stack_top
    1612             :     GC_mark_stack_top = mark_stack_top;
    1613             : }
    1614             : 
    1615             : 
    1616             : #ifndef UNALIGNED_PTRS
    1617             : 
    1618             : /* Push all objects reachable from marked objects in the given block */
    1619             : /* of size 2 (granules) objects.                                     */
    1620             : STATIC void GC_push_marked2(struct hblk *h, hdr *hhdr)
    1621             : {
    1622             :     word * mark_word_addr = &(hhdr->hb_marks[0]);
    1623             :     word *p;
    1624             :     word *plim;
    1625             :     word *q;
    1626             :     word mark_word;
    1627             : 
    1628             :     ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
    1629             :     ptr_t least_ha = GC_least_plausible_heap_addr;
    1630             :     mse * mark_stack_top = GC_mark_stack_top;
    1631             :     mse * mark_stack_limit = GC_mark_stack_limit;
    1632             : 
    1633             : #   undef GC_mark_stack_top
    1634             : #   undef GC_mark_stack_limit
    1635             : #   define GC_mark_stack_top mark_stack_top
    1636             : #   define GC_mark_stack_limit mark_stack_limit
    1637             : #   define GC_greatest_plausible_heap_addr greatest_ha
    1638             : #   define GC_least_plausible_heap_addr least_ha
    1639             : 
    1640             :     p = (word *)(h->hb_body);
    1641             :     plim = (word *)(((word)h) + HBLKSIZE);
    1642             : 
    1643             :     /* go through all words in block */
    1644             :         while ((word)p < (word)plim) {
    1645             :             mark_word = *mark_word_addr++;
    1646             :             q = p;
    1647             :             while(mark_word != 0) {
    1648             :               if (mark_word & 1) {
    1649             :                   PUSH_GRANULE(q);
    1650             :                   PUSH_GRANULE(q + GC_GRANULE_WORDS);
    1651             :               }
    1652             :               q += 2 * GC_GRANULE_WORDS;
    1653             :               mark_word >>= 2;
    1654             :             }
    1655             :             p += WORDSZ*GC_GRANULE_WORDS;
    1656             :         }
    1657             : 
    1658             : #   undef GC_greatest_plausible_heap_addr
    1659             : #   undef GC_least_plausible_heap_addr
    1660             : #   undef GC_mark_stack_top
    1661             : #   undef GC_mark_stack_limit
    1662             : #   define GC_mark_stack_limit GC_arrays._mark_stack_limit
    1663             : #   define GC_mark_stack_top GC_arrays._mark_stack_top
    1664             :     GC_mark_stack_top = mark_stack_top;
    1665             : }
    1666             : 
    1667             : # if GC_GRANULE_WORDS < 4
    1668             : /* Push all objects reachable from marked objects in the given block */
    1669             : /* of size 4 (granules) objects.                                     */
    1670             : /* There is a risk of mark stack overflow here.  But we handle that. */
    1671             : /* And only unmarked objects get pushed, so it's not very likely.    */
    1672             : STATIC void GC_push_marked4(struct hblk *h, hdr *hhdr)
    1673             : {
    1674             :     word * mark_word_addr = &(hhdr->hb_marks[0]);
    1675             :     word *p;
    1676             :     word *plim;
    1677             :     word *q;
    1678             :     word mark_word;
    1679             : 
    1680             :     ptr_t greatest_ha = GC_greatest_plausible_heap_addr;
    1681             :     ptr_t least_ha = GC_least_plausible_heap_addr;
    1682             :     mse * mark_stack_top = GC_mark_stack_top;
    1683             :     mse * mark_stack_limit = GC_mark_stack_limit;
    1684             : 
    1685             : #   undef GC_mark_stack_top
    1686             : #   undef GC_mark_stack_limit
    1687             : #   define GC_mark_stack_top mark_stack_top
    1688             : #   define GC_mark_stack_limit mark_stack_limit
    1689             : #   define GC_greatest_plausible_heap_addr greatest_ha
    1690             : #   define GC_least_plausible_heap_addr least_ha
    1691             : 
    1692             :     p = (word *)(h->hb_body);
    1693             :     plim = (word *)(((word)h) + HBLKSIZE);
    1694             : 
    1695             :     /* go through all words in block */
    1696             :         while ((word)p < (word)plim) {
    1697             :             mark_word = *mark_word_addr++;
    1698             :             q = p;
    1699             :             while(mark_word != 0) {
    1700             :               if (mark_word & 1) {
    1701             :                   PUSH_GRANULE(q);
    1702             :                   PUSH_GRANULE(q + GC_GRANULE_WORDS);
    1703             :                   PUSH_GRANULE(q + 2*GC_GRANULE_WORDS);
    1704             :                   PUSH_GRANULE(q + 3*GC_GRANULE_WORDS);
    1705             :               }
    1706             :               q += 4 * GC_GRANULE_WORDS;
    1707             :               mark_word >>= 4;
    1708             :             }
    1709             :             p += WORDSZ*GC_GRANULE_WORDS;
    1710             :         }
    1711             : #   undef GC_greatest_plausible_heap_addr
    1712             : #   undef GC_least_plausible_heap_addr
    1713             : #   undef GC_mark_stack_top
    1714             : #   undef GC_mark_stack_limit
    1715             : #   define GC_mark_stack_limit GC_arrays._mark_stack_limit
    1716             : #   define GC_mark_stack_top GC_arrays._mark_stack_top
    1717             :     GC_mark_stack_top = mark_stack_top;
    1718             : }
    1719             : 
    1720             : #endif /* GC_GRANULE_WORDS < 4 */
    1721             : 
    1722             : #endif /* UNALIGNED_PTRS */
    1723             : 
    1724             : #endif /* USE_PUSH_MARKED_ACCELERATORS */
    1725             : 
    1726             : /* Push all objects reachable from marked objects in the given block */
    1727        3774 : STATIC void GC_push_marked(struct hblk *h, hdr *hhdr)
    1728             : {
    1729        3774 :     size_t sz = hhdr -> hb_sz;
    1730        3774 :     word descr = hhdr -> hb_descr;
    1731             :     ptr_t p;
    1732             :     word bit_no;
    1733             :     ptr_t lim;
    1734             :     mse * GC_mark_stack_top_reg;
    1735        3774 :     mse * mark_stack_limit = GC_mark_stack_limit;
    1736             : 
    1737             :     /* Some quick shortcuts: */
    1738        3774 :         if ((0 | GC_DS_LENGTH) == descr) return;
    1739        3774 :         if (GC_block_empty(hhdr)/* nothing marked */) return;
    1740        3774 :     GC_n_rescuing_pages++;
    1741        3774 :     GC_objects_are_marked = TRUE;
    1742        3774 :     if (sz > MAXOBJBYTES) {
    1743           0 :         lim = h -> hb_body;
    1744             :     } else {
    1745        3774 :         lim = (h + 1)->hb_body - sz;
    1746             :     }
    1747             : 
    1748        3774 :     switch(BYTES_TO_GRANULES(sz)) {
    1749             : #   if defined(USE_PUSH_MARKED_ACCELERATORS)
    1750             :      case 1:
    1751             :        GC_push_marked1(h, hhdr);
    1752             :        break;
    1753             : #    if !defined(UNALIGNED_PTRS)
    1754             :        case 2:
    1755             :          GC_push_marked2(h, hhdr);
    1756             :          break;
    1757             : #     if GC_GRANULE_WORDS < 4
    1758             :        case 4:
    1759             :          GC_push_marked4(h, hhdr);
    1760             :          break;
    1761             : #     endif
    1762             : #    endif
    1763             : #   endif
    1764             :      default:
    1765        3774 :       GC_mark_stack_top_reg = GC_mark_stack_top;
    1766       93949 :       for (p = h -> hb_body, bit_no = 0; (word)p <= (word)lim;
    1767       86401 :            p += sz, bit_no += MARK_BIT_OFFSET(sz)) {
    1768       86401 :          if (mark_bit_from_hdr(hhdr, bit_no)) {
    1769             :            /* Mark from fields inside the object */
    1770       68245 :              PUSH_OBJ(p, hhdr, GC_mark_stack_top_reg, mark_stack_limit);
    1771             :          }
    1772             :       }
    1773        3774 :       GC_mark_stack_top = GC_mark_stack_top_reg;
    1774             :     }
    1775             : }
    1776             : 
    1777             : #ifdef ENABLE_DISCLAIM
    1778             : /* Unconditionally mark from all objects which have not been reclaimed. */
    1779             : /* This is useful in order to retain pointers which are reachable from  */
    1780             : /* the disclaim notifiers.                                              */
    1781             : /*                                                                      */
    1782             : /* To determine whether an object has been reclaimed, we require that   */
    1783             : /* any live object has a non-zero as one of the two lowest bits of the  */
    1784             : /* first word.  On the other hand, a reclaimed object is a members of   */
    1785             : /* free-lists, and thus contains a word-aligned next-pointer as the     */
    1786             : /* first word.                                                          */
    1787           0 :  STATIC void GC_push_unconditionally(struct hblk *h, hdr *hhdr)
    1788             :  {
    1789           0 :     size_t sz = hhdr -> hb_sz;
    1790           0 :     word descr = hhdr -> hb_descr;
    1791             :     ptr_t p;
    1792             :     ptr_t lim;
    1793             :     mse * GC_mark_stack_top_reg;
    1794           0 :     mse * mark_stack_limit = GC_mark_stack_limit;
    1795             : 
    1796           0 :     if ((0 | GC_DS_LENGTH) == descr)
    1797           0 :         return;
    1798             : 
    1799           0 :     GC_n_rescuing_pages++;
    1800           0 :     GC_objects_are_marked = TRUE;
    1801           0 :     if (sz > MAXOBJBYTES)
    1802           0 :         lim = h -> hb_body;
    1803             :     else
    1804           0 :         lim = (h + 1)->hb_body - sz;
    1805             : 
    1806           0 :     GC_mark_stack_top_reg = GC_mark_stack_top;
    1807           0 :     for (p = h -> hb_body; (word)p <= (word)lim; p += sz)
    1808           0 :         if ((*(GC_word *)p & 0x3) != 0)
    1809           0 :             PUSH_OBJ(p, hhdr, GC_mark_stack_top_reg, mark_stack_limit);
    1810           0 :     GC_mark_stack_top = GC_mark_stack_top_reg;
    1811             :   }
    1812             : #endif /* ENABLE_DISCLAIM */
    1813             : 
    1814             : #ifndef GC_DISABLE_INCREMENTAL
    1815             :   /* Test whether any page in the given block is dirty.   */
    1816           0 :   STATIC GC_bool GC_block_was_dirty(struct hblk *h, hdr *hhdr)
    1817             :   {
    1818           0 :     size_t sz = hhdr -> hb_sz;
    1819             : 
    1820           0 :     if (sz <= MAXOBJBYTES) {
    1821           0 :          return(GC_page_was_dirty(h));
    1822             :     } else {
    1823           0 :          ptr_t p = (ptr_t)h;
    1824           0 :          while ((word)p < (word)h + sz) {
    1825           0 :              if (GC_page_was_dirty((struct hblk *)p)) return(TRUE);
    1826           0 :              p += HBLKSIZE;
    1827             :          }
    1828           0 :          return(FALSE);
    1829             :     }
    1830             :   }
    1831             : #endif /* GC_DISABLE_INCREMENTAL */
    1832             : 
    1833             : /* Similar to GC_push_marked, but skip over unallocated blocks  */
    1834             : /* and return address of next plausible block.                  */
    1835           0 : STATIC struct hblk * GC_push_next_marked(struct hblk *h)
    1836             : {
    1837           0 :     hdr * hhdr = HDR(h);
    1838             : 
    1839           0 :     if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr) || HBLK_IS_FREE(hhdr), FALSE)) {
    1840           0 :       h = GC_next_used_block(h);
    1841           0 :       if (h == 0) return(0);
    1842           0 :       hhdr = GC_find_header((ptr_t)h);
    1843             :     }
    1844           0 :     GC_push_marked(h, hhdr);
    1845           0 :     return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
    1846             : }
    1847             : 
    1848             : #ifndef GC_DISABLE_INCREMENTAL
    1849             :   /* Identical to above, but mark only from dirty pages   */
    1850           0 :   STATIC struct hblk * GC_push_next_marked_dirty(struct hblk *h)
    1851             :   {
    1852           0 :     hdr * hhdr = HDR(h);
    1853             : 
    1854           0 :     if (!GC_dirty_maintained) ABORT("Dirty bits not set up");
    1855             :     for (;;) {
    1856           0 :         if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr)
    1857             :                    || HBLK_IS_FREE(hhdr), FALSE)) {
    1858           0 :           h = GC_next_used_block(h);
    1859           0 :           if (h == 0) return(0);
    1860           0 :           hhdr = GC_find_header((ptr_t)h);
    1861             :         }
    1862             : #       ifdef STUBBORN_ALLOC
    1863             :           if (hhdr -> hb_obj_kind == STUBBORN) {
    1864             :             if (GC_page_was_changed(h) && GC_block_was_dirty(h, hhdr)) {
    1865             :                 break;
    1866             :             }
    1867             :           } else {
    1868             :             if (GC_block_was_dirty(h, hhdr)) break;
    1869             :           }
    1870             : #       else
    1871           0 :           if (GC_block_was_dirty(h, hhdr)) break;
    1872             : #       endif
    1873           0 :         h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
    1874           0 :         hhdr = HDR(h);
    1875           0 :     }
    1876           0 :     GC_push_marked(h, hhdr);
    1877           0 :     return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
    1878             :   }
    1879             : #endif /* !GC_DISABLE_INCREMENTAL */
    1880             : 
    1881             : /* Similar to above, but for uncollectible pages.  Needed since we      */
    1882             : /* do not clear marks for such pages, even for full collections.        */
    1883        4018 : STATIC struct hblk * GC_push_next_marked_uncollectable(struct hblk *h)
    1884             : {
    1885        4018 :     hdr * hhdr = HDR(h);
    1886             : 
    1887             :     for (;;) {
    1888       78840 :         if (EXPECT(IS_FORWARDING_ADDR_OR_NIL(hhdr)
    1889             :                    || HBLK_IS_FREE(hhdr), FALSE)) {
    1890         947 :           h = GC_next_used_block(h);
    1891         947 :           if (h == 0) return(0);
    1892         703 :           hhdr = GC_find_header((ptr_t)h);
    1893             :         }
    1894       78596 :         if (hhdr -> hb_obj_kind == UNCOLLECTABLE) {
    1895        3774 :             GC_push_marked(h, hhdr);
    1896        3774 :             break;
    1897             :         }
    1898             : #       ifdef ENABLE_DISCLAIM
    1899       74822 :             if ((hhdr -> hb_flags & MARK_UNCONDITIONALLY) != 0) {
    1900           0 :                 GC_push_unconditionally(h, hhdr);
    1901           0 :                 break;
    1902             :             }
    1903             : #       endif
    1904       74822 :         h += OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
    1905       74822 :         hhdr = HDR(h);
    1906       74822 :     }
    1907        3774 :     return(h + OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz));
    1908             : }

Generated by: LCOV version 1.11