LCOV - code coverage report
Current view: top level - mm/boehm-gc - pthread_support.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 221 464 47.6 %
Date: 2017-07-14 10:03:36 Functions: 28 49 57.1 %

          Line data    Source code
       1             : /*
       2             :  * Copyright (c) 1994 by Xerox Corporation.  All rights reserved.
       3             :  * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
       4             :  * Copyright (c) 1998 by Fergus Henderson.  All rights reserved.
       5             :  * Copyright (c) 2000-2005 by Hewlett-Packard Company.  All rights reserved.
       6             :  *
       7             :  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
       8             :  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
       9             :  *
      10             :  * Permission is hereby granted to use or copy this program
      11             :  * for any purpose,  provided the above notices are retained on all copies.
      12             :  * Permission to modify the code and to distribute modified code is granted,
      13             :  * provided the above notices are retained, and a notice that the code was
      14             :  * modified is included with the above copyright notice.
      15             :  */
      16             : 
      17             : #include "private/pthread_support.h"
      18             : 
      19             : /*
      20             :  * Support code originally for LinuxThreads, the clone()-based kernel
      21             :  * thread package for Linux which is included in libc6.
      22             :  *
      23             :  * This code no doubt makes some assumptions beyond what is
      24             :  * guaranteed by the pthread standard, though it now does
      25             :  * very little of that.  It now also supports NPTL, and many
      26             :  * other Posix thread implementations.  We are trying to merge
      27             :  * all flavors of pthread support code into this file.
      28             :  */
      29             : 
      30             : /*
      31             :  * Linux_threads.c now also includes some code to support HPUX and
      32             :  * OSF1 (Compaq Tru64 Unix, really).  The OSF1 support is based on Eric Benson's
      33             :  * patch.
      34             :  *
      35             :  * Eric also suggested an alternate basis for a lock implementation in
      36             :  * his code:
      37             :  * + #elif defined(OSF1)
      38             :  * +    unsigned long GC_allocate_lock = 0;
      39             :  * +    msemaphore GC_allocate_semaphore;
      40             :  * + #  define GC_TRY_LOCK() \
      41             :  * +    ((msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) == 0) \
      42             :  * +     ? (GC_allocate_lock = 1) \
      43             :  * +     : 0)
      44             :  * + #  define GC_LOCK_TAKEN GC_allocate_lock
      45             :  */
      46             : 
      47             : #if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS)
      48             : 
      49             : # include <stdlib.h>
      50             : # include <pthread.h>
      51             : # include <sched.h>
      52             : # include <time.h>
      53             : # include <errno.h>
      54             : # include <unistd.h>
      55             : # if !defined(GC_RTEMS_PTHREADS)
      56             : #   include <sys/mman.h>
      57             : # endif
      58             : # include <sys/time.h>
      59             : # include <sys/types.h>
      60             : # include <sys/stat.h>
      61             : # include <fcntl.h>
      62             : # include <signal.h>
      63             : 
      64             : # include "gc_inline.h"
      65             : 
      66             : #if defined(GC_DARWIN_THREADS)
      67             : # include "private/darwin_semaphore.h"
      68             : #else
      69             : # include <semaphore.h>
      70             : #endif /* !GC_DARWIN_THREADS */
      71             : 
      72             : #if defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS)
      73             : # include <sys/sysctl.h>
      74             : #endif /* GC_DARWIN_THREADS */
      75             : 
      76             : #if defined(GC_NETBSD_THREADS) || defined(GC_OPENBSD_THREADS)
      77             : # include <sys/param.h>
      78             : # include <sys/sysctl.h>
      79             : #endif /* GC_NETBSD_THREADS */
      80             : 
      81             : /* Allocator lock definitions.          */
      82             : #if !defined(USE_SPIN_LOCK)
      83             :   GC_INNER pthread_mutex_t GC_allocate_ml = PTHREAD_MUTEX_INITIALIZER;
      84             : #endif
      85             : 
      86             : #ifdef GC_ASSERTIONS
      87             :   GC_INNER unsigned long GC_lock_holder = NO_THREAD;
      88             :                 /* Used only for assertions.    */
      89             : #endif
      90             : 
      91             : #if defined(GC_DGUX386_THREADS)
      92             : # include <sys/dg_sys_info.h>
      93             : # include <sys/_int_psem.h>
      94             :   /* sem_t is an uint in DG/UX */
      95             :   typedef unsigned int sem_t;
      96             : #endif /* GC_DGUX386_THREADS */
      97             : 
      98             : /* Undefine macros used to redirect pthread primitives. */
      99             : # undef pthread_create
     100             : # ifndef GC_NO_PTHREAD_SIGMASK
     101             : #   undef pthread_sigmask
     102             : # endif
     103             : # ifndef GC_NO_PTHREAD_CANCEL
     104             : #   undef pthread_cancel
     105             : # endif
     106             : # ifdef GC_PTHREAD_EXIT_ATTRIBUTE
     107             : #   undef pthread_exit
     108             : # endif
     109             : # undef pthread_join
     110             : # undef pthread_detach
     111             : # if defined(GC_OSF1_THREADS) && defined(_PTHREAD_USE_MANGLED_NAMES_) \
     112             :      && !defined(_PTHREAD_USE_PTDNAM_)
     113             :   /* Restore the original mangled names on Tru64 UNIX.  */
     114             : #   define pthread_create __pthread_create
     115             : #   define pthread_join __pthread_join
     116             : #   define pthread_detach __pthread_detach
     117             : #   ifndef GC_NO_PTHREAD_CANCEL
     118             : #     define pthread_cancel __pthread_cancel
     119             : #   endif
     120             : #   ifdef GC_PTHREAD_EXIT_ATTRIBUTE
     121             : #     define pthread_exit __pthread_exit
     122             : #   endif
     123             : # endif
     124             : 
     125             : #ifdef GC_USE_LD_WRAP
     126             : #   define WRAP_FUNC(f) __wrap_##f
     127             : #   define REAL_FUNC(f) __real_##f
     128             :     int REAL_FUNC(pthread_create)(pthread_t *,
     129             :                                   GC_PTHREAD_CREATE_CONST pthread_attr_t *,
     130             :                                   void *(*start_routine)(void *), void *);
     131             :     int REAL_FUNC(pthread_join)(pthread_t, void **);
     132             :     int REAL_FUNC(pthread_detach)(pthread_t);
     133             : #   ifndef GC_NO_PTHREAD_SIGMASK
     134             :       int REAL_FUNC(pthread_sigmask)(int, const sigset_t *, sigset_t *);
     135             : #   endif
     136             : #   ifndef GC_NO_PTHREAD_CANCEL
     137             :       int REAL_FUNC(pthread_cancel)(pthread_t);
     138             : #   endif
     139             : #   ifdef GC_PTHREAD_EXIT_ATTRIBUTE
     140             :       void REAL_FUNC(pthread_exit)(void *) GC_PTHREAD_EXIT_ATTRIBUTE;
     141             : #   endif
     142             : #else
     143             : #   ifdef GC_USE_DLOPEN_WRAP
     144             : #     include <dlfcn.h>
     145             : #     define WRAP_FUNC(f) f
     146             : #     define REAL_FUNC(f) GC_real_##f
     147             :       /* We define both GC_f and plain f to be the wrapped function.    */
     148             :       /* In that way plain calls work, as do calls from files that      */
     149             :       /* included gc.h, which redefined f to GC_f.                      */
     150             :       /* FIXME: Needs work for DARWIN and True64 (OSF1) */
     151             :       typedef int (* GC_pthread_create_t)(pthread_t *,
     152             :                                     GC_PTHREAD_CREATE_CONST pthread_attr_t *,
     153             :                                     void * (*)(void *), void *);
     154             :       static GC_pthread_create_t REAL_FUNC(pthread_create);
     155             : #     ifndef GC_NO_PTHREAD_SIGMASK
     156             :         typedef int (* GC_pthread_sigmask_t)(int, const sigset_t *,
     157             :                                              sigset_t *);
     158             :         static GC_pthread_sigmask_t REAL_FUNC(pthread_sigmask);
     159             : #     endif
     160             :       typedef int (* GC_pthread_join_t)(pthread_t, void **);
     161             :       static GC_pthread_join_t REAL_FUNC(pthread_join);
     162             :       typedef int (* GC_pthread_detach_t)(pthread_t);
     163             :       static GC_pthread_detach_t REAL_FUNC(pthread_detach);
     164             : #     ifndef GC_NO_PTHREAD_CANCEL
     165             :         typedef int (* GC_pthread_cancel_t)(pthread_t);
     166             :         static GC_pthread_cancel_t REAL_FUNC(pthread_cancel);
     167             : #     endif
     168             : #     ifdef GC_PTHREAD_EXIT_ATTRIBUTE
     169             :         typedef void (* GC_pthread_exit_t)(void *) GC_PTHREAD_EXIT_ATTRIBUTE;
     170             :         static GC_pthread_exit_t REAL_FUNC(pthread_exit);
     171             : #     endif
     172             : #   else
     173             : #     define WRAP_FUNC(f) GC_##f
     174             : #     if !defined(GC_DGUX386_THREADS)
     175             : #       define REAL_FUNC(f) f
     176             : #     else /* GC_DGUX386_THREADS */
     177             : #       define REAL_FUNC(f) __d10_##f
     178             : #     endif /* GC_DGUX386_THREADS */
     179             : #   endif
     180             : #endif
     181             : 
     182             : #if defined(GC_USE_LD_WRAP) || defined(GC_USE_DLOPEN_WRAP)
     183             :   /* Define GC_ functions as aliases for the plain ones, which will     */
     184             :   /* be intercepted.  This allows files which include gc.h, and hence   */
     185             :   /* generate references to the GC_ symbols, to see the right symbols.  */
     186             :   GC_API int GC_pthread_create(pthread_t * t,
     187             :                                GC_PTHREAD_CREATE_CONST pthread_attr_t *a,
     188             :                                void * (* fn)(void *), void * arg)
     189             :   {
     190             :     return pthread_create(t, a, fn, arg);
     191             :   }
     192             : 
     193             : # ifndef GC_NO_PTHREAD_SIGMASK
     194             :     GC_API int GC_pthread_sigmask(int how, const sigset_t *mask,
     195             :                                   sigset_t *old)
     196             :     {
     197             :       return pthread_sigmask(how, mask, old);
     198             :     }
     199             : # endif /* !GC_NO_PTHREAD_SIGMASK */
     200             : 
     201             :   GC_API int GC_pthread_join(pthread_t t, void **res)
     202             :   {
     203             :     return pthread_join(t, res);
     204             :   }
     205             : 
     206             :   GC_API int GC_pthread_detach(pthread_t t)
     207             :   {
     208             :     return pthread_detach(t);
     209             :   }
     210             : 
     211             : # ifndef GC_NO_PTHREAD_CANCEL
     212             :     GC_API int GC_pthread_cancel(pthread_t t)
     213             :     {
     214             :       return pthread_cancel(t);
     215             :     }
     216             : # endif /* !GC_NO_PTHREAD_CANCEL */
     217             : 
     218             : # ifdef GC_PTHREAD_EXIT_ATTRIBUTE
     219             :     GC_API GC_PTHREAD_EXIT_ATTRIBUTE void GC_pthread_exit(void *retval)
     220             :     {
     221             :       pthread_exit(retval);
     222             :     }
     223             : # endif /* GC_PTHREAD_EXIT_ATTRIBUTE */
     224             : #endif /* Linker-based interception. */
     225             : 
     226             : #ifdef GC_USE_DLOPEN_WRAP
     227             :   STATIC GC_bool GC_syms_initialized = FALSE;
     228             : 
     229             :   STATIC void GC_init_real_syms(void)
     230             :   {
     231             :     void *dl_handle;
     232             : 
     233             :     if (GC_syms_initialized) return;
     234             : #   ifdef RTLD_NEXT
     235             :       dl_handle = RTLD_NEXT;
     236             : #   else
     237             :       dl_handle = dlopen("libpthread.so.0", RTLD_LAZY);
     238             :       if (NULL == dl_handle) {
     239             :         dl_handle = dlopen("libpthread.so", RTLD_LAZY); /* without ".0" */
     240             :       }
     241             :       if (NULL == dl_handle) ABORT("Couldn't open libpthread");
     242             : #   endif
     243             :     REAL_FUNC(pthread_create) = (GC_pthread_create_t)
     244             :                                 dlsym(dl_handle, "pthread_create");
     245             : #   ifdef RTLD_NEXT
     246             :       if (REAL_FUNC(pthread_create) == 0)
     247             :         ABORT("pthread_create not found"
     248             :               " (probably -lgc is specified after -lpthread)");
     249             : #   endif
     250             : #   ifndef GC_NO_PTHREAD_SIGMASK
     251             :       REAL_FUNC(pthread_sigmask) = (GC_pthread_sigmask_t)
     252             :                                 dlsym(dl_handle, "pthread_sigmask");
     253             : #   endif
     254             :     REAL_FUNC(pthread_join) = (GC_pthread_join_t)
     255             :                                 dlsym(dl_handle, "pthread_join");
     256             :     REAL_FUNC(pthread_detach) = (GC_pthread_detach_t)
     257             :                                   dlsym(dl_handle, "pthread_detach");
     258             : #   ifndef GC_NO_PTHREAD_CANCEL
     259             :       REAL_FUNC(pthread_cancel) = (GC_pthread_cancel_t)
     260             :                                     dlsym(dl_handle, "pthread_cancel");
     261             : #   endif
     262             : #   ifdef GC_PTHREAD_EXIT_ATTRIBUTE
     263             :       REAL_FUNC(pthread_exit) = (GC_pthread_exit_t)
     264             :                                   dlsym(dl_handle, "pthread_exit");
     265             : #   endif
     266             :     GC_syms_initialized = TRUE;
     267             :   }
     268             : 
     269             : # define INIT_REAL_SYMS() if (EXPECT(GC_syms_initialized, TRUE)) {} \
     270             :                             else GC_init_real_syms()
     271             : #else
     272             : # define INIT_REAL_SYMS() (void)0
     273             : #endif
     274             : 
     275             : static GC_bool parallel_initialized = FALSE;
     276             : 
     277             : GC_INNER GC_bool GC_need_to_lock = FALSE;
     278             : 
     279             : STATIC int GC_nprocs = 1;
     280             :                         /* Number of processors.  We may not have       */
     281             :                         /* access to all of them, but this is as good   */
     282             :                         /* a guess as any ...                           */
     283             : 
     284             : #ifdef THREAD_LOCAL_ALLOC
     285             :   /* We must explicitly mark ptrfree and gcj free lists, since the free */
     286             :   /* list links wouldn't otherwise be found.  We also set them in the   */
     287             :   /* normal free lists, since that involves touching less memory than   */
     288             :   /* if we scanned them normally.                                       */
     289         244 :   GC_INNER void GC_mark_thread_local_free_lists(void)
     290             :   {
     291             :     int i;
     292             :     GC_thread p;
     293             : 
     294       62708 :     for (i = 0; i < THREAD_TABLE_SZ; ++i) {
     295       62951 :       for (p = GC_threads[i]; 0 != p; p = p -> next) {
     296         487 :         if (!(p -> flags & FINISHED))
     297         487 :           GC_mark_thread_local_fls_for(&(p->tlfs));
     298             :       }
     299             :     }
     300         244 :   }
     301             : 
     302             : # if defined(GC_ASSERTIONS)
     303             :     void GC_check_tls_for(GC_tlfs p);
     304             : #   if defined(USE_CUSTOM_SPECIFIC)
     305             :       void GC_check_tsd_marks(tsd *key);
     306             : #   endif
     307             : 
     308             :     /* Check that all thread-local free-lists are completely marked.    */
     309             :     /* Also check that thread-specific-data structures are marked.      */
     310             :     void GC_check_tls(void)
     311             :     {
     312             :         int i;
     313             :         GC_thread p;
     314             : 
     315             :         for (i = 0; i < THREAD_TABLE_SZ; ++i) {
     316             :           for (p = GC_threads[i]; 0 != p; p = p -> next) {
     317             :             if (!(p -> flags & FINISHED))
     318             :               GC_check_tls_for(&(p->tlfs));
     319             :           }
     320             :         }
     321             : #       if defined(USE_CUSTOM_SPECIFIC)
     322             :           if (GC_thread_key != 0)
     323             :             GC_check_tsd_marks(GC_thread_key);
     324             : #       endif
     325             :     }
     326             : # endif /* GC_ASSERTIONS */
     327             : 
     328             : #endif /* THREAD_LOCAL_ALLOC */
     329             : 
     330             : #ifdef PARALLEL_MARK
     331             : 
     332             : # ifndef MAX_MARKERS
     333             : #   define MAX_MARKERS 16
     334             : # endif
     335             : 
     336             : static ptr_t marker_sp[MAX_MARKERS - 1] = {0};
     337             : #ifdef IA64
     338             :   static ptr_t marker_bsp[MAX_MARKERS - 1] = {0};
     339             : #endif
     340             : 
     341             : #if defined(GC_DARWIN_THREADS) && !defined(GC_NO_THREADS_DISCOVERY)
     342             :   static mach_port_t marker_mach_threads[MAX_MARKERS - 1] = {0};
     343             : 
     344             :   /* Used only by GC_suspend_thread_list().     */
     345             :   GC_INNER GC_bool GC_is_mach_marker(thread_act_t thread)
     346             :   {
     347             :     int i;
     348             :     for (i = 0; i < GC_markers_m1; i++) {
     349             :       if (marker_mach_threads[i] == thread)
     350             :         return TRUE;
     351             :     }
     352             :     return FALSE;
     353             :   }
     354             : #endif /* GC_DARWIN_THREADS */
     355             : 
     356         489 : STATIC void * GC_mark_thread(void * id)
     357             : {
     358         489 :   word my_mark_no = 0;
     359             :   IF_CANCEL(int cancel_state;)
     360             : 
     361         489 :   if ((word)id == (word)-1) return 0; /* to make compiler happy */
     362         489 :   DISABLE_CANCEL(cancel_state);
     363             :                          /* Mark threads are not cancellable; they      */
     364             :                          /* should be invisible to client.              */
     365         489 :   marker_sp[(word)id] = GC_approx_sp();
     366             : # ifdef IA64
     367             :     marker_bsp[(word)id] = GC_save_regs_in_stack();
     368             : # endif
     369             : # if defined(GC_DARWIN_THREADS) && !defined(GC_NO_THREADS_DISCOVERY)
     370             :     marker_mach_threads[(word)id] = mach_thread_self();
     371             : # endif
     372             : 
     373      398636 :   for (;; ++my_mark_no) {
     374             :     /* GC_mark_no is passed only to allow GC_help_marker to terminate   */
     375             :     /* promptly.  This is important if it were called from the signal   */
     376             :     /* handler or from the GC lock acquisition code.  Under Linux, it's */
     377             :     /* not safe to call it from a signal handler, since it uses mutexes */
     378             :     /* and condition variables.  Since it is called only here, the      */
     379             :     /* argument is unnecessary.                                         */
     380      399125 :     if (my_mark_no < GC_mark_no || my_mark_no > GC_mark_no + 2) {
     381             :         /* resynchronize if we get far off, e.g. because GC_mark_no     */
     382             :         /* wrapped.                                                     */
     383      133594 :         my_mark_no = GC_mark_no;
     384             :     }
     385             : #   ifdef DEBUG_THREADS
     386             :       GC_log_printf("Starting mark helper for mark number %lu\n",
     387             :                     (unsigned long)my_mark_no);
     388             : #   endif
     389      399125 :     GC_help_marker(my_mark_no);
     390      398636 :   }
     391             : }
     392             : 
     393             : STATIC pthread_t GC_mark_threads[MAX_MARKERS];
     394             : 
     395             : #ifdef CAN_HANDLE_FORK
     396             :   static int available_markers_m1 = 0;
     397             : # define start_mark_threads GC_start_mark_threads
     398             :   GC_API void GC_CALL
     399             : #else
     400             : # define available_markers_m1 GC_markers_m1
     401             :   static void
     402             : #endif
     403         163 : start_mark_threads(void)
     404             : {
     405             :     int i;
     406             :     pthread_attr_t attr;
     407             : 
     408             :     GC_ASSERT(I_DONT_HOLD_LOCK());
     409             : #   ifdef CAN_HANDLE_FORK
     410         163 :       if (available_markers_m1 <= 0 || GC_parallel) return;
     411             :                 /* Skip if parallel markers disabled or already started. */
     412             : #   endif
     413             : 
     414             :     INIT_REAL_SYMS(); /* for pthread_create */
     415             : 
     416         163 :     if (0 != pthread_attr_init(&attr)) ABORT("pthread_attr_init failed");
     417         163 :     if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
     418           0 :         ABORT("pthread_attr_setdetachstate failed");
     419             : 
     420             : #   if defined(HPUX) || defined(GC_DGUX386_THREADS)
     421             :       /* Default stack size is usually too small: fix it. */
     422             :       /* Otherwise marker threads or GC may run out of    */
     423             :       /* space.                                           */
     424             : #     define MIN_STACK_SIZE (8*HBLKSIZE*sizeof(word))
     425             :       {
     426             :         size_t old_size;
     427             : 
     428             :         if (pthread_attr_getstacksize(&attr, &old_size) != 0)
     429             :           ABORT("pthread_attr_getstacksize failed");
     430             :         if (old_size < MIN_STACK_SIZE) {
     431             :           if (pthread_attr_setstacksize(&attr, MIN_STACK_SIZE) != 0)
     432             :             ABORT("pthread_attr_setstacksize failed");
     433             :         }
     434             :       }
     435             : #   endif /* HPUX || GC_DGUX386_THREADS */
     436         652 :     for (i = 0; i < available_markers_m1; ++i) {
     437         489 :       if (0 != REAL_FUNC(pthread_create)(GC_mark_threads + i, &attr,
     438             :                               GC_mark_thread, (void *)(word)i)) {
     439           0 :         WARN("Marker thread creation failed, errno = %" WARN_PRIdPTR "\n",
     440             :              errno);
     441             :         /* Don't try to create other marker threads.    */
     442           0 :         break;
     443             :       }
     444             :     }
     445         163 :     GC_markers_m1 = i;
     446         163 :     pthread_attr_destroy(&attr);
     447         163 :     GC_COND_LOG_PRINTF("Started %d mark helper threads\n", GC_markers_m1);
     448             : }
     449             : 
     450             : #endif /* PARALLEL_MARK */
     451             : 
     452             : GC_INNER GC_bool GC_thr_initialized = FALSE;
     453             : 
     454             : GC_INNER volatile GC_thread GC_threads[THREAD_TABLE_SZ] = {0};
     455             : 
     456           0 : void GC_push_thread_structures(void)
     457             : {
     458             :     GC_ASSERT(I_HOLD_LOCK());
     459           0 :     GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
     460             : #   if defined(THREAD_LOCAL_ALLOC)
     461           0 :       GC_push_all((ptr_t)(&GC_thread_key),
     462             :                   (ptr_t)(&GC_thread_key) + sizeof(GC_thread_key));
     463             : #   endif
     464           0 : }
     465             : 
     466             : #ifdef DEBUG_THREADS
     467             :   STATIC int GC_count_threads(void)
     468             :   {
     469             :     int i;
     470             :     int count = 0;
     471             :     GC_ASSERT(I_HOLD_LOCK());
     472             :     for (i = 0; i < THREAD_TABLE_SZ; ++i) {
     473             :         GC_thread th = GC_threads[i];
     474             :         while (th) {
     475             :             if (!(th->flags & FINISHED))
     476             :                 ++count;
     477             :             th = th->next;
     478             :         }
     479             :     }
     480             :     return count;
     481             :   }
     482             : #endif /* DEBUG_THREADS */
     483             : 
     484             : /* It may not be safe to allocate when we register the first thread.    */
     485             : static struct GC_Thread_Rep first_thread;
     486             : 
     487             : /* Add a thread to GC_threads.  We assume it wasn't already there.      */
     488             : /* Caller holds allocation lock.                                        */
     489         662 : STATIC GC_thread GC_new_thread(pthread_t id)
     490             : {
     491         662 :     int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ;
     492             :     GC_thread result;
     493             :     static GC_bool first_thread_used = FALSE;
     494             : #   ifdef DEBUG_THREADS
     495             :         GC_log_printf("Creating thread %p\n", (void *)id);
     496             : #   endif
     497             : 
     498             :     GC_ASSERT(I_HOLD_LOCK());
     499         662 :     if (!EXPECT(first_thread_used, TRUE)) {
     500         163 :         result = &first_thread;
     501         163 :         first_thread_used = TRUE;
     502             :     } else {
     503         499 :         result = (struct GC_Thread_Rep *)
     504             :                  GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
     505         499 :         if (result == 0) return(0);
     506             :     }
     507         662 :     result -> id = id;
     508             : #   ifdef PLATFORM_ANDROID
     509             :       result -> kernel_id = gettid();
     510             : #   endif
     511         662 :     result -> next = GC_threads[hv];
     512         662 :     GC_threads[hv] = result;
     513             : #   ifdef NACL
     514             :       GC_nacl_gc_thread_self = result;
     515             :       GC_nacl_initialize_gc_thread();
     516             : #   endif
     517             :     GC_ASSERT(result -> flags == 0 && result -> thread_blocked == 0);
     518         662 :     return(result);
     519             : }
     520             : 
     521             : /* Delete a thread from GC_threads.  We assume it is there.     */
     522             : /* (The code intentionally traps if it wasn't.)                 */
     523             : /* It is safe to delete the main thread.                        */
     524         151 : STATIC void GC_delete_thread(pthread_t id)
     525             : {
     526         151 :     int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ;
     527         151 :     register GC_thread p = GC_threads[hv];
     528         151 :     register GC_thread prev = 0;
     529             : 
     530             : #   ifdef DEBUG_THREADS
     531             :       GC_log_printf("Deleting thread %p, n_threads = %d\n",
     532             :                     (void *)id, GC_count_threads());
     533             : #   endif
     534             : 
     535             : #   ifdef NACL
     536             :       GC_nacl_shutdown_gc_thread();
     537             :       GC_nacl_gc_thread_self = NULL;
     538             : #   endif
     539             : 
     540             :     GC_ASSERT(I_HOLD_LOCK());
     541         446 :     while (!THREAD_EQUAL(p -> id, id)) {
     542         144 :         prev = p;
     543         144 :         p = p -> next;
     544             :     }
     545         151 :     if (prev == 0) {
     546           7 :         GC_threads[hv] = p -> next;
     547             :     } else {
     548         144 :         prev -> next = p -> next;
     549             :     }
     550         151 :     if (p != &first_thread) {
     551             : #     ifdef GC_DARWIN_THREADS
     552             :         mach_port_deallocate(mach_task_self(), p->stop_info.mach_thread);
     553             : #     endif
     554         151 :       GC_INTERNAL_FREE(p);
     555             :     }
     556         151 : }
     557             : 
     558             : /* If a thread has been joined, but we have not yet             */
     559             : /* been notified, then there may be more than one thread        */
     560             : /* in the table with the same pthread id.                       */
     561             : /* This is OK, but we need a way to delete a specific one.      */
     562           0 : STATIC void GC_delete_gc_thread(GC_thread t)
     563             : {
     564           0 :     pthread_t id = t -> id;
     565           0 :     int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ;
     566           0 :     register GC_thread p = GC_threads[hv];
     567           0 :     register GC_thread prev = 0;
     568             : 
     569             :     GC_ASSERT(I_HOLD_LOCK());
     570           0 :     while (p != t) {
     571           0 :         prev = p;
     572           0 :         p = p -> next;
     573             :     }
     574           0 :     if (prev == 0) {
     575           0 :         GC_threads[hv] = p -> next;
     576             :     } else {
     577           0 :         prev -> next = p -> next;
     578             :     }
     579             : #   ifdef GC_DARWIN_THREADS
     580             :         mach_port_deallocate(mach_task_self(), p->stop_info.mach_thread);
     581             : #   endif
     582           0 :     GC_INTERNAL_FREE(p);
     583             : 
     584             : #   ifdef DEBUG_THREADS
     585             :       GC_log_printf("Deleted thread %p, n_threads = %d\n",
     586             :                     (void *)id, GC_count_threads());
     587             : #   endif
     588           0 : }
     589             : 
     590             : /* Return a GC_thread corresponding to a given pthread_t.       */
     591             : /* Returns 0 if it's not there.                                 */
     592             : /* Caller holds allocation lock or otherwise inhibits           */
     593             : /* updates.                                                     */
     594             : /* If there is more than one thread with the given id we        */
     595             : /* return the most recent one.                                  */
     596         908 : GC_INNER GC_thread GC_lookup_thread(pthread_t id)
     597             : {
     598         908 :     int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ;
     599         908 :     register GC_thread p = GC_threads[hv];
     600             : 
     601         908 :     while (p != 0 && !THREAD_EQUAL(p -> id, id)) p = p -> next;
     602         908 :     return(p);
     603             : }
     604             : 
     605             : /* Called by GC_finalize() (in case of an allocation failure observed). */
     606           3 : GC_INNER void GC_reset_finalizer_nested(void)
     607             : {
     608           3 :   GC_thread me = GC_lookup_thread(pthread_self());
     609           3 :   me->finalizer_nested = 0;
     610           3 : }
     611             : 
     612             : /* Checks and updates the thread-local level of finalizers recursion.   */
     613             : /* Returns NULL if GC_invoke_finalizers() should not be called by the   */
     614             : /* collector (to minimize the risk of a deep finalizers recursion),     */
     615             : /* otherwise returns a pointer to the thread-local finalizer_nested.    */
     616             : /* Called by GC_notify_or_invoke_finalizers() only (the lock is held).  */
     617           0 : GC_INNER unsigned char *GC_check_finalizer_nested(void)
     618             : {
     619           0 :   GC_thread me = GC_lookup_thread(pthread_self());
     620           0 :   unsigned nesting_level = me->finalizer_nested;
     621           0 :   if (nesting_level) {
     622             :     /* We are inside another GC_invoke_finalizers().            */
     623             :     /* Skip some implicitly-called GC_invoke_finalizers()       */
     624             :     /* depending on the nesting (recursion) level.              */
     625           0 :     if (++me->finalizer_skipped < (1U << nesting_level)) return NULL;
     626           0 :     me->finalizer_skipped = 0;
     627             :   }
     628           0 :   me->finalizer_nested = (unsigned char)(nesting_level + 1);
     629           0 :   return &me->finalizer_nested;
     630             : }
     631             : 
     632             : #if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
     633             :   /* This is called from thread-local GC_malloc(). */
     634             :   GC_bool GC_is_thread_tsd_valid(void *tsd)
     635             :   {
     636             :     GC_thread me;
     637             :     DCL_LOCK_STATE;
     638             : 
     639             :     LOCK();
     640             :     me = GC_lookup_thread(pthread_self());
     641             :     UNLOCK();
     642             :     return (word)tsd >= (word)(&me->tlfs)
     643             :             && (word)tsd < (word)(&me->tlfs) + sizeof(me->tlfs);
     644             :   }
     645             : #endif /* GC_ASSERTIONS && THREAD_LOCAL_ALLOC */
     646             : 
     647           0 : GC_API int GC_CALL GC_thread_is_registered(void)
     648             : {
     649           0 :     pthread_t self = pthread_self();
     650             :     GC_thread me;
     651             :     DCL_LOCK_STATE;
     652             : 
     653           0 :     LOCK();
     654           0 :     me = GC_lookup_thread(self);
     655           0 :     UNLOCK();
     656           0 :     return me != NULL;
     657             : }
     658             : 
     659             : #ifdef CAN_HANDLE_FORK
     660             : /* Remove all entries from the GC_threads table, except the     */
     661             : /* one for the current thread.  We need to do this in the child */
     662             : /* process after a fork(), since only the current thread        */
     663             : /* survives in the child.                                       */
     664           0 : STATIC void GC_remove_all_threads_but_me(void)
     665             : {
     666           0 :     pthread_t self = pthread_self();
     667             :     int hv;
     668             :     GC_thread p, next, me;
     669             : 
     670           0 :     for (hv = 0; hv < THREAD_TABLE_SZ; ++hv) {
     671           0 :       me = 0;
     672           0 :       for (p = GC_threads[hv]; 0 != p; p = next) {
     673           0 :         next = p -> next;
     674           0 :         if (THREAD_EQUAL(p -> id, self)) {
     675           0 :           me = p;
     676           0 :           p -> next = 0;
     677             : #         ifdef GC_DARWIN_THREADS
     678             :             /* Update thread Id after fork (it is OK to call    */
     679             :             /* GC_destroy_thread_local and GC_free_internal     */
     680             :             /* before update).                                  */
     681             :             me -> stop_info.mach_thread = mach_thread_self();
     682             : #         elif defined(PLATFORM_ANDROID)
     683             :             me -> kernel_id = gettid();
     684             : #         endif
     685             : #         if defined(THREAD_LOCAL_ALLOC) && !defined(USE_CUSTOM_SPECIFIC)
     686             :             /* Some TLS implementations might be not fork-friendly, so  */
     687             :             /* we re-assign thread-local pointer to 'tlfs' for safety   */
     688             :             /* instead of the assertion check (again, it is OK to call  */
     689             :             /* GC_destroy_thread_local and GC_free_internal before).    */
     690           0 :             if (GC_setspecific(GC_thread_key, &me->tlfs) != 0)
     691             :               ABORT("GC_setspecific failed (in child)");
     692             : #         endif
     693             :         } else {
     694             : #         ifdef THREAD_LOCAL_ALLOC
     695           0 :             if (!(p -> flags & FINISHED)) {
     696           0 :               GC_destroy_thread_local(&(p->tlfs));
     697             :               GC_remove_specific(GC_thread_key);
     698             :             }
     699             : #         endif
     700           0 :           if (p != &first_thread) GC_INTERNAL_FREE(p);
     701             :         }
     702             :       }
     703           0 :       GC_threads[hv] = me;
     704             :     }
     705           0 : }
     706             : #endif /* CAN_HANDLE_FORK */
     707             : 
     708             : #ifdef USE_PROC_FOR_LIBRARIES
     709             :   GC_INNER GC_bool GC_segment_is_thread_stack(ptr_t lo, ptr_t hi)
     710             :   {
     711             :     int i;
     712             :     GC_thread p;
     713             : 
     714             :     GC_ASSERT(I_HOLD_LOCK());
     715             : #   ifdef PARALLEL_MARK
     716             :       for (i = 0; i < GC_markers_m1; ++i) {
     717             :         if ((word)marker_sp[i] > (word)lo && (word)marker_sp[i] < (word)hi)
     718             :           return TRUE;
     719             : #       ifdef IA64
     720             :           if ((word)marker_bsp[i] > (word)lo
     721             :               && (word)marker_bsp[i] < (word)hi)
     722             :             return TRUE;
     723             : #       endif
     724             :       }
     725             : #   endif
     726             :     for (i = 0; i < THREAD_TABLE_SZ; i++) {
     727             :       for (p = GC_threads[i]; p != 0; p = p -> next) {
     728             :         if (0 != p -> stack_end) {
     729             : #         ifdef STACK_GROWS_UP
     730             :             if ((word)p->stack_end >= (word)lo
     731             :                 && (word)p->stack_end < (word)hi)
     732             :               return TRUE;
     733             : #         else /* STACK_GROWS_DOWN */
     734             :             if ((word)p->stack_end > (word)lo
     735             :                 && (word)p->stack_end <= (word)hi)
     736             :               return TRUE;
     737             : #         endif
     738             :         }
     739             :       }
     740             :     }
     741             :     return FALSE;
     742             :   }
     743             : #endif /* USE_PROC_FOR_LIBRARIES */
     744             : 
     745             : #ifdef IA64
     746             :   /* Find the largest stack_base smaller than bound.  May be used       */
     747             :   /* to find the boundary between a register stack and adjacent         */
     748             :   /* immediately preceding memory stack.                                */
     749             :   GC_INNER ptr_t GC_greatest_stack_base_below(ptr_t bound)
     750             :   {
     751             :     int i;
     752             :     GC_thread p;
     753             :     ptr_t result = 0;
     754             : 
     755             :     GC_ASSERT(I_HOLD_LOCK());
     756             : #   ifdef PARALLEL_MARK
     757             :       for (i = 0; i < GC_markers_m1; ++i) {
     758             :         if ((word)marker_sp[i] > (word)result
     759             :             && (word)marker_sp[i] < (word)bound)
     760             :           result = marker_sp[i];
     761             :       }
     762             : #   endif
     763             :     for (i = 0; i < THREAD_TABLE_SZ; i++) {
     764             :       for (p = GC_threads[i]; p != 0; p = p -> next) {
     765             :         if ((word)p->stack_end > (word)result
     766             :             && (word)p->stack_end < (word)bound) {
     767             :           result = p -> stack_end;
     768             :         }
     769             :       }
     770             :     }
     771             :     return result;
     772             :   }
     773             : #endif /* IA64 */
     774             : 
     775             : #ifndef STAT_READ
     776             :   /* Also defined in os_dep.c.  */
     777             : # define STAT_BUF_SIZE 4096
     778             : # define STAT_READ read
     779             :         /* If read is wrapped, this may need to be redefined to call    */
     780             :         /* the real one.                                                */
     781             : #endif
     782             : 
     783             : #ifdef GC_HPUX_THREADS
     784             : # define GC_get_nprocs() pthread_num_processors_np()
     785             : 
     786             : #elif defined(GC_OSF1_THREADS) || defined(GC_AIX_THREADS) \
     787             :       || defined(GC_SOLARIS_THREADS) || defined(GC_GNU_THREADS) \
     788             :       || defined(PLATFORM_ANDROID) || defined(NACL)
     789             :   GC_INLINE int GC_get_nprocs(void)
     790             :   {
     791             :     int nprocs = (int)sysconf(_SC_NPROCESSORS_ONLN);
     792             :     return nprocs > 0 ? nprocs : 1; /* ignore error silently */
     793             :   }
     794             : 
     795             : #elif defined(GC_IRIX_THREADS)
     796             :   GC_INLINE int GC_get_nprocs(void)
     797             :   {
     798             :     int nprocs = (int)sysconf(_SC_NPROC_ONLN);
     799             :     return nprocs > 0 ? nprocs : 1; /* ignore error silently */
     800             :   }
     801             : 
     802             : #elif defined(GC_LINUX_THREADS) /* && !PLATFORM_ANDROID && !NACL */
     803             :   /* Return the number of processors. */
     804         163 :   STATIC int GC_get_nprocs(void)
     805             :   {
     806             :     /* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that     */
     807             :     /* appears to be buggy in many cases.                             */
     808             :     /* We look for lines "cpu<n>" in /proc/stat.                      */
     809             :     char stat_buf[STAT_BUF_SIZE];
     810             :     int f;
     811             :     int result, i, len;
     812             : 
     813         163 :     f = open("/proc/stat", O_RDONLY);
     814         163 :     if (f < 0) {
     815           0 :       WARN("Couldn't read /proc/stat\n", 0);
     816           0 :       return 1; /* assume an uniprocessor */
     817             :     }
     818         163 :     len = STAT_READ(f, stat_buf, STAT_BUF_SIZE);
     819         163 :     close(f);
     820             : 
     821         163 :     result = 1;
     822             :         /* Some old kernels only have a single "cpu nnnn ..."   */
     823             :         /* entry in /proc/stat.  We identify those as           */
     824             :         /* uniprocessors.                                       */
     825             : 
     826      250042 :     for (i = 0; i < len - 100; ++i) {
     827      251346 :       if (stat_buf[i] == '\n' && stat_buf[i+1] == 'c'
     828        1467 :           && stat_buf[i+2] == 'p' && stat_buf[i+3] == 'u') {
     829         652 :         int cpu_no = atoi(&stat_buf[i + 4]);
     830         652 :         if (cpu_no >= result)
     831         489 :           result = cpu_no + 1;
     832             :       }
     833             :     }
     834         163 :     return result;
     835             :   }
     836             : 
     837             : #elif defined(GC_DGUX386_THREADS)
     838             :   /* Return the number of processors, or i <= 0 if it can't be determined. */
     839             :   STATIC int GC_get_nprocs(void)
     840             :   {
     841             :     int numCpus;
     842             :     struct dg_sys_info_pm_info pm_sysinfo;
     843             :     int status = 0;
     844             : 
     845             :     status = dg_sys_info((long int *) &pm_sysinfo,
     846             :         DG_SYS_INFO_PM_INFO_TYPE, DG_SYS_INFO_PM_CURRENT_VERSION);
     847             :     if (status < 0)
     848             :        /* set -1 for error */
     849             :        numCpus = -1;
     850             :     else
     851             :       /* Active CPUs */
     852             :       numCpus = pm_sysinfo.idle_vp_count;
     853             :     return(numCpus);
     854             :   }
     855             : 
     856             : #elif defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS) \
     857             :       || defined(GC_NETBSD_THREADS) || defined(GC_OPENBSD_THREADS)
     858             :   STATIC int GC_get_nprocs(void)
     859             :   {
     860             :     int mib[] = {CTL_HW,HW_NCPU};
     861             :     int res;
     862             :     size_t len = sizeof(res);
     863             : 
     864             :     sysctl(mib, sizeof(mib)/sizeof(int), &res, &len, NULL, 0);
     865             :     return res;
     866             :   }
     867             : 
     868             : #else
     869             :   /* E.g., GC_RTEMS_PTHREADS */
     870             : # define GC_get_nprocs() 1 /* not implemented */
     871             : #endif /* !GC_LINUX_THREADS && !GC_DARWIN_THREADS && ... */
     872             : 
     873             : #if defined(ARM32) && defined(GC_LINUX_THREADS) && !defined(NACL)
     874             :   /* Some buggy Linux/arm kernels show only non-sleeping CPUs in        */
     875             :   /* /proc/stat (and /proc/cpuinfo), so another data system source is   */
     876             :   /* tried first.  Result <= 0 on error.                                */
     877             :   STATIC int GC_get_nprocs_present(void)
     878             :   {
     879             :     char stat_buf[16];
     880             :     int f;
     881             :     int len;
     882             : 
     883             :     f = open("/sys/devices/system/cpu/present", O_RDONLY);
     884             :     if (f < 0)
     885             :       return -1; /* cannot open the file */
     886             : 
     887             :     len = STAT_READ(f, stat_buf, sizeof(stat_buf));
     888             :     close(f);
     889             : 
     890             :     /* Recognized file format: "0\n" or "0-<max_cpu_id>\n"      */
     891             :     /* The file might probably contain a comma-separated list   */
     892             :     /* but we do not need to handle it (just silently ignore).  */
     893             :     if (len < 2 || stat_buf[0] != '0' || stat_buf[len - 1] != '\n') {
     894             :       return 0; /* read error or unrecognized content */
     895             :     } else if (len == 2) {
     896             :       return 1; /* an uniprocessor */
     897             :     } else if (stat_buf[1] != '-') {
     898             :       return 0; /* unrecognized content */
     899             :     }
     900             : 
     901             :     stat_buf[len - 1] = '\0'; /* terminate the string */
     902             :     return atoi(&stat_buf[2]) + 1; /* skip "0-" and parse max_cpu_num */
     903             :   }
     904             : #endif /* ARM32 && GC_LINUX_THREADS && !NACL */
     905             : 
     906             : /* We hold the GC lock.  Wait until an in-progress GC has finished.     */
     907             : /* Repeatedly RELEASES GC LOCK in order to wait.                        */
     908             : /* If wait_for_all is true, then we exit with the GC lock held and no   */
     909             : /* collection in progress; otherwise we just wait for the current GC    */
     910             : /* to finish.                                                           */
     911         151 : STATIC void GC_wait_for_gc_completion(GC_bool wait_for_all)
     912             : {
     913             :     DCL_LOCK_STATE;
     914             :     GC_ASSERT(I_HOLD_LOCK());
     915             :     ASSERT_CANCEL_DISABLED();
     916         151 :     if (GC_incremental && GC_collection_in_progress()) {
     917           0 :         word old_gc_no = GC_gc_no;
     918             : 
     919             :         /* Make sure that no part of our stack is still on the mark stack, */
     920             :         /* since it's about to be unmapped.                                */
     921           0 :         while (GC_incremental && GC_collection_in_progress()
     922           0 :                && (wait_for_all || old_gc_no == GC_gc_no)) {
     923           0 :             ENTER_GC();
     924           0 :             GC_in_thread_creation = TRUE;
     925           0 :             GC_collect_a_little_inner(1);
     926           0 :             GC_in_thread_creation = FALSE;
     927           0 :             EXIT_GC();
     928           0 :             UNLOCK();
     929           0 :             sched_yield();
     930           0 :             LOCK();
     931             :         }
     932             :     }
     933         151 : }
     934             : 
     935             : #ifdef CAN_HANDLE_FORK
     936             : /* Procedures called before and after a fork.  The goal here is to make */
     937             : /* it safe to call GC_malloc() in a forked child.  It's unclear that is */
     938             : /* attainable, since the single UNIX spec seems to imply that one       */
     939             : /* should only call async-signal-safe functions, and we probably can't  */
     940             : /* quite guarantee that.  But we give it our best shot.  (That same     */
     941             : /* spec also implies that it's not safe to call the system malloc       */
     942             : /* between fork() and exec().  Thus we're doing no worse than it.)      */
     943             : 
     944             : IF_CANCEL(static int fork_cancel_state;)
     945             :                                 /* protected by allocation lock.        */
     946             : 
     947             : /* Called before a fork()               */
     948           0 : static void fork_prepare_proc(void)
     949             : {
     950             :     /* Acquire all relevant locks, so that after releasing the locks    */
     951             :     /* the child will see a consistent state in which monitor           */
     952             :     /* invariants hold.  Unfortunately, we can't acquire libc locks     */
     953             :     /* we might need, and there seems to be no guarantee that libc      */
     954             :     /* must install a suitable fork handler.                            */
     955             :     /* Wait for an ongoing GC to finish, since we can't finish it in    */
     956             :     /* the (one remaining thread in) the child.                         */
     957           0 :       LOCK();
     958           0 :       DISABLE_CANCEL(fork_cancel_state);
     959             :                 /* Following waits may include cancellation points. */
     960             : #     if defined(PARALLEL_MARK)
     961           0 :         if (GC_parallel)
     962           0 :           GC_wait_for_reclaim();
     963             : #     endif
     964           0 :       GC_wait_for_gc_completion(TRUE);
     965             : #     if defined(PARALLEL_MARK)
     966           0 :         if (GC_parallel)
     967           0 :           GC_acquire_mark_lock();
     968             : #     endif
     969           0 : }
     970             : 
     971             : /* Called in parent after a fork() (even if the latter failed). */
     972           0 : static void fork_parent_proc(void)
     973             : {
     974             : #   if defined(PARALLEL_MARK)
     975           0 :       if (GC_parallel)
     976           0 :         GC_release_mark_lock();
     977             : #   endif
     978           0 :     RESTORE_CANCEL(fork_cancel_state);
     979           0 :     UNLOCK();
     980           0 : }
     981             : 
     982             : /* Called in child after a fork()       */
     983           0 : static void fork_child_proc(void)
     984             : {
     985             :     /* Clean up the thread table, so that just our thread is left. */
     986             : #   if defined(PARALLEL_MARK)
     987           0 :       if (GC_parallel)
     988           0 :         GC_release_mark_lock();
     989             : #   endif
     990           0 :     GC_remove_all_threads_but_me();
     991             : #   ifdef PARALLEL_MARK
     992             :       /* Turn off parallel marking in the child, since we are probably  */
     993             :       /* just going to exec, and we would have to restart mark threads. */
     994           0 :         GC_parallel = FALSE;
     995             : #   endif /* PARALLEL_MARK */
     996           0 :     RESTORE_CANCEL(fork_cancel_state);
     997           0 :     UNLOCK();
     998           0 : }
     999             : 
    1000             :   /* Routines for fork handling by client (no-op if pthread_atfork works). */
    1001           0 :   GC_API void GC_CALL GC_atfork_prepare(void)
    1002             :   {
    1003             : #   if defined(GC_DARWIN_THREADS) && defined(MPROTECT_VDB)
    1004             :       if (GC_dirty_maintained) {
    1005             :         GC_ASSERT(0 == GC_handle_fork);
    1006             :         ABORT("Unable to fork while mprotect_thread is running");
    1007             :       }
    1008             : #   endif
    1009           0 :     if (GC_handle_fork <= 0)
    1010           0 :       fork_prepare_proc();
    1011           0 :   }
    1012             : 
    1013           0 :   GC_API void GC_CALL GC_atfork_parent(void)
    1014             :   {
    1015           0 :     if (GC_handle_fork <= 0)
    1016           0 :       fork_parent_proc();
    1017           0 :   }
    1018             : 
    1019           0 :   GC_API void GC_CALL GC_atfork_child(void)
    1020             :   {
    1021           0 :     if (GC_handle_fork <= 0)
    1022           0 :       fork_child_proc();
    1023           0 :   }
    1024             : #endif /* CAN_HANDLE_FORK */
    1025             : 
    1026             : #ifdef INCLUDE_LINUX_THREAD_DESCR
    1027             :   __thread int GC_dummy_thread_local;
    1028             :   GC_INNER GC_bool GC_enclosing_mapping(ptr_t addr,
    1029             :                                         ptr_t *startp, ptr_t *endp);
    1030             : #endif
    1031             : 
    1032             : /* We hold the allocation lock. */
    1033         163 : GC_INNER void GC_thr_init(void)
    1034             : {
    1035         163 :   if (GC_thr_initialized) return;
    1036         163 :   GC_thr_initialized = TRUE;
    1037             : 
    1038             :   GC_ASSERT((word)&GC_threads % sizeof(word) == 0);
    1039             : # ifdef CAN_HANDLE_FORK
    1040             :     /* Prepare for forks if requested.  */
    1041         163 :     if (GC_handle_fork) {
    1042             : #     ifdef CAN_CALL_ATFORK
    1043           0 :         if (pthread_atfork(fork_prepare_proc, fork_parent_proc,
    1044             :                            fork_child_proc) == 0) {
    1045             :           /* Handlers successfully registered.  */
    1046           0 :           GC_handle_fork = 1;
    1047             :         } else
    1048             : #     endif
    1049           0 :       /* else */ if (GC_handle_fork != -1)
    1050           0 :         ABORT("pthread_atfork failed");
    1051             :     }
    1052             : # endif
    1053             : # ifdef INCLUDE_LINUX_THREAD_DESCR
    1054             :     /* Explicitly register the region including the address     */
    1055             :     /* of a thread local variable.  This should include thread  */
    1056             :     /* locals for the main thread, except for those allocated   */
    1057             :     /* in response to dlopen calls.                             */
    1058             :     {
    1059             :       ptr_t thread_local_addr = (ptr_t)(&GC_dummy_thread_local);
    1060             :       ptr_t main_thread_start, main_thread_end;
    1061             :       if (!GC_enclosing_mapping(thread_local_addr, &main_thread_start,
    1062             :                                 &main_thread_end)) {
    1063             :         ABORT("Failed to find mapping for main thread thread locals");
    1064             :       } else {
    1065             :         /* main_thread_start and main_thread_end are initialized.       */
    1066             :         GC_add_roots_inner(main_thread_start, main_thread_end, FALSE);
    1067             :       }
    1068             :     }
    1069             : # endif
    1070             :   /* Add the initial thread, so we can stop it. */
    1071             :   {
    1072         163 :     GC_thread t = GC_new_thread(pthread_self());
    1073         163 :     if (t == NULL)
    1074           0 :       ABORT("Failed to allocate memory for the initial thread");
    1075             : #   ifdef GC_DARWIN_THREADS
    1076             :       t -> stop_info.mach_thread = mach_thread_self();
    1077             : #   else
    1078         163 :       t -> stop_info.stack_ptr = GC_approx_sp();
    1079             : #   endif
    1080         163 :     t -> flags = DETACHED | MAIN_THREAD;
    1081             :   }
    1082             : 
    1083             : # ifndef GC_DARWIN_THREADS
    1084         163 :     GC_stop_init();
    1085             : # endif
    1086             : 
    1087             :   /* Set GC_nprocs.     */
    1088             :   {
    1089         163 :     char * nprocs_string = GETENV("GC_NPROCS");
    1090         163 :     GC_nprocs = -1;
    1091         163 :     if (nprocs_string != NULL) GC_nprocs = atoi(nprocs_string);
    1092             :   }
    1093         163 :   if (GC_nprocs <= 0
    1094             : #     if defined(ARM32) && defined(GC_LINUX_THREADS) && !defined(NACL)
    1095             :         && (GC_nprocs = GC_get_nprocs_present()) <= 1
    1096             :                                 /* Workaround for some Linux/arm kernels */
    1097             : #     endif
    1098             :       )
    1099             :   {
    1100         163 :     GC_nprocs = GC_get_nprocs();
    1101             :   }
    1102         163 :   if (GC_nprocs <= 0) {
    1103           0 :     WARN("GC_get_nprocs() returned %" WARN_PRIdPTR "\n", GC_nprocs);
    1104           0 :     GC_nprocs = 2; /* assume dual-core */
    1105             : #   ifdef PARALLEL_MARK
    1106           0 :       available_markers_m1 = 0; /* but use only one marker */
    1107             : #   endif
    1108             :   } else {
    1109             : #  ifdef PARALLEL_MARK
    1110             :      {
    1111         163 :        char * markers_string = GETENV("GC_MARKERS");
    1112             :        int markers_m1;
    1113             : 
    1114         163 :        if (markers_string != NULL) {
    1115           0 :          markers_m1 = atoi(markers_string) - 1;
    1116           0 :          if (markers_m1 >= MAX_MARKERS) {
    1117           0 :            WARN("Limiting number of mark threads\n", 0);
    1118           0 :            markers_m1 = MAX_MARKERS - 1;
    1119             :          }
    1120             :        } else {
    1121         163 :          markers_m1 = GC_nprocs - 1;
    1122             : #        ifdef GC_MIN_MARKERS
    1123             :            /* This is primarily for targets without getenv().   */
    1124             :            if (markers_m1 < GC_MIN_MARKERS - 1)
    1125             :              markers_m1 = GC_MIN_MARKERS - 1;
    1126             : #        endif
    1127         163 :          if (markers_m1 >= MAX_MARKERS)
    1128           0 :            markers_m1 = MAX_MARKERS - 1; /* silently limit the value */
    1129             :        }
    1130         163 :        available_markers_m1 = markers_m1;
    1131             :      }
    1132             : #  endif
    1133             :   }
    1134         163 :   GC_COND_LOG_PRINTF("Number of processors = %d\n", GC_nprocs);
    1135             : # ifdef PARALLEL_MARK
    1136         163 :     if (available_markers_m1 <= 0) {
    1137             :       /* Disable parallel marking.      */
    1138           0 :       GC_parallel = FALSE;
    1139           0 :       GC_COND_LOG_PRINTF(
    1140             :                 "Single marker thread, turning off parallel marking\n");
    1141             :     } else {
    1142             :       /* Disable true incremental collection, but generational is OK.   */
    1143         163 :       GC_time_limit = GC_TIME_UNLIMITED;
    1144             :       /* If we are using a parallel marker, actually start helper threads. */
    1145         163 :       start_mark_threads();
    1146             :     }
    1147             : # endif
    1148             : }
    1149             : 
    1150             : /* Perform all initializations, including those that    */
    1151             : /* may require allocation.                              */
    1152             : /* Called without allocation lock.                      */
    1153             : /* Must be called before a second thread is created.    */
    1154             : /* Did we say it's called without the allocation lock?  */
    1155         163 : GC_INNER void GC_init_parallel(void)
    1156             : {
    1157             : #   if defined(THREAD_LOCAL_ALLOC)
    1158             :       DCL_LOCK_STATE;
    1159             : #   endif
    1160         163 :     if (parallel_initialized) return;
    1161         163 :     parallel_initialized = TRUE;
    1162             : 
    1163             :     /* GC_init() calls us back, so set flag first.      */
    1164         163 :     if (!GC_is_initialized) GC_init();
    1165             :     /* Initialize thread local free lists if used.      */
    1166             : #   if defined(THREAD_LOCAL_ALLOC)
    1167         163 :       LOCK();
    1168         163 :       GC_init_thread_local(&(GC_lookup_thread(pthread_self())->tlfs));
    1169         163 :       UNLOCK();
    1170             : #   endif
    1171             : }
    1172             : 
    1173             : #ifndef GC_NO_PTHREAD_SIGMASK
    1174           0 :   GC_API int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set,
    1175             :                                         sigset_t *oset)
    1176             :   {
    1177             :     sigset_t fudged_set;
    1178             :     int sig_suspend;
    1179             : 
    1180             :     INIT_REAL_SYMS();
    1181           0 :     if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
    1182           0 :         fudged_set = *set;
    1183           0 :         sig_suspend = GC_get_suspend_signal();
    1184             :         GC_ASSERT(sig_suspend >= 0);
    1185           0 :         sigdelset(&fudged_set, sig_suspend);
    1186           0 :         set = &fudged_set;
    1187             :     }
    1188           0 :     return(REAL_FUNC(pthread_sigmask)(how, set, oset));
    1189             :   }
    1190             : #endif /* !GC_NO_PTHREAD_SIGMASK */
    1191             : 
    1192             : /* Wrapper for functions that are likely to block for an appreciable    */
    1193             : /* length of time.                                                      */
    1194             : 
    1195           0 : GC_INNER void GC_do_blocking_inner(ptr_t data, void * context GC_ATTR_UNUSED)
    1196             : {
    1197           0 :     struct blocking_data * d = (struct blocking_data *) data;
    1198           0 :     pthread_t self = pthread_self();
    1199             :     GC_thread me;
    1200             : #   if defined(SPARC) || defined(IA64)
    1201             :         ptr_t stack_ptr = GC_save_regs_in_stack();
    1202             : #   endif
    1203             : #   if defined(GC_DARWIN_THREADS) && !defined(DARWIN_DONT_PARSE_STACK)
    1204             :         GC_bool topOfStackUnset = FALSE;
    1205             : #   endif
    1206             :     DCL_LOCK_STATE;
    1207             : 
    1208           0 :     LOCK();
    1209           0 :     me = GC_lookup_thread(self);
    1210             :     GC_ASSERT(!(me -> thread_blocked));
    1211             : #   ifdef SPARC
    1212             :         me -> stop_info.stack_ptr = stack_ptr;
    1213             : #   else
    1214           0 :         me -> stop_info.stack_ptr = GC_approx_sp();
    1215             : #   endif
    1216             : #   if defined(GC_DARWIN_THREADS) && !defined(DARWIN_DONT_PARSE_STACK)
    1217             :         if (me -> topOfStack == NULL) {
    1218             :             /* GC_do_blocking_inner is not called recursively,  */
    1219             :             /* so topOfStack should be computed now.            */
    1220             :             topOfStackUnset = TRUE;
    1221             :             me -> topOfStack = GC_FindTopOfStack(0);
    1222             :         }
    1223             : #   endif
    1224             : #   ifdef IA64
    1225             :         me -> backing_store_ptr = stack_ptr;
    1226             : #   endif
    1227           0 :     me -> thread_blocked = (unsigned char)TRUE;
    1228             :     /* Save context here if we want to support precise stack marking */
    1229           0 :     UNLOCK();
    1230           0 :     d -> client_data = (d -> fn)(d -> client_data);
    1231           0 :     LOCK();   /* This will block if the world is stopped.       */
    1232           0 :     me -> thread_blocked = FALSE;
    1233             : #   if defined(GC_DARWIN_THREADS) && !defined(DARWIN_DONT_PARSE_STACK)
    1234             :         if (topOfStackUnset)
    1235             :             me -> topOfStack = NULL; /* make topOfStack unset again */
    1236             : #   endif
    1237           0 :     UNLOCK();
    1238           0 : }
    1239             : 
    1240             : /* GC_call_with_gc_active() has the opposite to GC_do_blocking()        */
    1241             : /* functionality.  It might be called from a user function invoked by   */
    1242             : /* GC_do_blocking() to temporarily back allow calling any GC function   */
    1243             : /* and/or manipulating pointers to the garbage collected heap.          */
    1244           0 : GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type fn,
    1245             :                                              void * client_data)
    1246             : {
    1247             :     struct GC_traced_stack_sect_s stacksect;
    1248           0 :     pthread_t self = pthread_self();
    1249             :     GC_thread me;
    1250             :     DCL_LOCK_STATE;
    1251             : 
    1252           0 :     LOCK();   /* This will block if the world is stopped.       */
    1253           0 :     me = GC_lookup_thread(self);
    1254             : 
    1255             :     /* Adjust our stack base value (this could happen unless    */
    1256             :     /* GC_get_stack_base() was used which returned GC_SUCCESS). */
    1257           0 :     if ((me -> flags & MAIN_THREAD) == 0) {
    1258             :       GC_ASSERT(me -> stack_end != NULL);
    1259           0 :       if ((word)me->stack_end HOTTER_THAN (word)(&stacksect))
    1260           0 :         me -> stack_end = (ptr_t)(&stacksect);
    1261             :     } else {
    1262             :       /* The original stack. */
    1263           0 :       if ((word)GC_stackbottom HOTTER_THAN (word)(&stacksect))
    1264           0 :         GC_stackbottom = (ptr_t)(&stacksect);
    1265             :     }
    1266             : 
    1267           0 :     if (!me->thread_blocked) {
    1268             :       /* We are not inside GC_do_blocking() - do nothing more.  */
    1269           0 :       UNLOCK();
    1270           0 :       client_data = fn(client_data);
    1271             :       /* Prevent treating the above as a tail call.     */
    1272           0 :       GC_noop1((word)(&stacksect));
    1273           0 :       return client_data; /* result */
    1274             :     }
    1275             : 
    1276             :     /* Setup new "stack section".       */
    1277           0 :     stacksect.saved_stack_ptr = me -> stop_info.stack_ptr;
    1278             : #   ifdef IA64
    1279             :       /* This is the same as in GC_call_with_stack_base().      */
    1280             :       stacksect.backing_store_end = GC_save_regs_in_stack();
    1281             :       /* Unnecessarily flushes register stack,          */
    1282             :       /* but that probably doesn't hurt.                */
    1283             :       stacksect.saved_backing_store_ptr = me -> backing_store_ptr;
    1284             : #   endif
    1285           0 :     stacksect.prev = me -> traced_stack_sect;
    1286           0 :     me -> thread_blocked = FALSE;
    1287           0 :     me -> traced_stack_sect = &stacksect;
    1288             : 
    1289           0 :     UNLOCK();
    1290           0 :     client_data = fn(client_data);
    1291             :     GC_ASSERT(me -> thread_blocked == FALSE);
    1292             :     GC_ASSERT(me -> traced_stack_sect == &stacksect);
    1293             : 
    1294             :     /* Restore original "stack section".        */
    1295           0 :     LOCK();
    1296           0 :     me -> traced_stack_sect = stacksect.prev;
    1297             : #   ifdef IA64
    1298             :       me -> backing_store_ptr = stacksect.saved_backing_store_ptr;
    1299             : #   endif
    1300           0 :     me -> thread_blocked = (unsigned char)TRUE;
    1301           0 :     me -> stop_info.stack_ptr = stacksect.saved_stack_ptr;
    1302           0 :     UNLOCK();
    1303             : 
    1304           0 :     return client_data; /* result */
    1305             : }
    1306             : 
    1307         151 : STATIC void GC_unregister_my_thread_inner(GC_thread me)
    1308             : {
    1309             : #   ifdef DEBUG_THREADS
    1310             :       GC_log_printf(
    1311             :                 "Unregistering thread %p, gc_thread = %p, n_threads = %d\n",
    1312             :                 (void *)me->id, me, GC_count_threads());
    1313             : #   endif
    1314             :     GC_ASSERT(!(me -> flags & FINISHED));
    1315             : #   if defined(THREAD_LOCAL_ALLOC)
    1316             :       GC_ASSERT(GC_getspecific(GC_thread_key) == &me->tlfs);
    1317         151 :       GC_destroy_thread_local(&(me->tlfs));
    1318             : #   endif
    1319             : #   if defined(GC_PTHREAD_EXIT_ATTRIBUTE) || !defined(GC_NO_PTHREAD_CANCEL)
    1320             :       /* Handle DISABLED_GC flag which is set by the    */
    1321             :       /* intercepted pthread_cancel or pthread_exit.    */
    1322         151 :       if ((me -> flags & DISABLED_GC) != 0) {
    1323           0 :         GC_dont_gc--;
    1324             :       }
    1325             : #   endif
    1326         151 :     if (me -> flags & DETACHED) {
    1327         151 :         GC_delete_thread(pthread_self());
    1328             :     } else {
    1329           0 :         me -> flags |= FINISHED;
    1330             :     }
    1331             : #   if defined(THREAD_LOCAL_ALLOC)
    1332             :       /* It is required to call remove_specific defined in specific.c. */
    1333             :       GC_remove_specific(GC_thread_key);
    1334             : #   endif
    1335         151 : }
    1336             : 
    1337           0 : GC_API int GC_CALL GC_unregister_my_thread(void)
    1338             : {
    1339           0 :     pthread_t self = pthread_self();
    1340             :     GC_thread me;
    1341             :     IF_CANCEL(int cancel_state;)
    1342             :     DCL_LOCK_STATE;
    1343             : 
    1344           0 :     LOCK();
    1345           0 :     DISABLE_CANCEL(cancel_state);
    1346             :     /* Wait for any GC that may be marking from our stack to    */
    1347             :     /* complete before we remove this thread.                   */
    1348           0 :     GC_wait_for_gc_completion(FALSE);
    1349           0 :     me = GC_lookup_thread(self);
    1350             : #   ifdef DEBUG_THREADS
    1351             :         GC_log_printf(
    1352             :                 "Called GC_unregister_my_thread on %p, gc_thread = %p\n",
    1353             :                 (void *)self, me);
    1354             : #   endif
    1355             :     GC_ASSERT(me->id == self);
    1356           0 :     GC_unregister_my_thread_inner(me);
    1357           0 :     RESTORE_CANCEL(cancel_state);
    1358           0 :     UNLOCK();
    1359           0 :     return GC_SUCCESS;
    1360             : }
    1361             : 
    1362             : /* Called at thread exit.                               */
    1363             : /* Never called for main thread.  That's OK, since it   */
    1364             : /* results in at most a tiny one-time leak.  And        */
    1365             : /* linuxthreads doesn't reclaim the main threads        */
    1366             : /* resources or id anyway.                              */
    1367         151 : GC_INNER_PTHRSTART void GC_thread_exit_proc(void *arg)
    1368             : {
    1369             : #   ifdef DEBUG_THREADS
    1370             :         GC_log_printf("Called GC_thread_exit_proc on %p, gc_thread = %p\n",
    1371             :                       (void *)((GC_thread)arg)->id, arg);
    1372             : #   endif
    1373             :     IF_CANCEL(int cancel_state;)
    1374             :     DCL_LOCK_STATE;
    1375             : 
    1376         151 :     LOCK();
    1377         151 :     DISABLE_CANCEL(cancel_state);
    1378         151 :     GC_wait_for_gc_completion(FALSE);
    1379         151 :     GC_unregister_my_thread_inner((GC_thread)arg);
    1380         151 :     RESTORE_CANCEL(cancel_state);
    1381         151 :     UNLOCK();
    1382         151 : }
    1383             : 
    1384           0 : GC_API int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval)
    1385             : {
    1386             :     int result;
    1387             :     GC_thread t;
    1388             :     DCL_LOCK_STATE;
    1389             : 
    1390             :     INIT_REAL_SYMS();
    1391           0 :     LOCK();
    1392           0 :     t = GC_lookup_thread(thread);
    1393             :     /* This is guaranteed to be the intended one, since the thread id   */
    1394             :     /* can't have been recycled by pthreads.                            */
    1395           0 :     UNLOCK();
    1396           0 :     result = REAL_FUNC(pthread_join)(thread, retval);
    1397             : # if defined(GC_FREEBSD_THREADS)
    1398             :     /* On FreeBSD, the wrapped pthread_join() sometimes returns (what
    1399             :        appears to be) a spurious EINTR which caused the test and real code
    1400             :        to gratuitously fail.  Having looked at system pthread library source
    1401             :        code, I see how this return code may be generated.  In one path of
    1402             :        code, pthread_join() just returns the errno setting of the thread
    1403             :        being joined.  This does not match the POSIX specification or the
    1404             :        local man pages thus I have taken the liberty to catch this one
    1405             :        spurious return value properly conditionalized on GC_FREEBSD_THREADS. */
    1406             :     if (result == EINTR) result = 0;
    1407             : # endif
    1408           0 :     if (result == 0) {
    1409           0 :         LOCK();
    1410             :         /* Here the pthread thread id may have been recycled. */
    1411             :         GC_ASSERT((t -> flags & FINISHED) != 0);
    1412           0 :         GC_delete_gc_thread(t);
    1413           0 :         UNLOCK();
    1414             :     }
    1415           0 :     return result;
    1416             : }
    1417             : 
    1418           0 : GC_API int WRAP_FUNC(pthread_detach)(pthread_t thread)
    1419             : {
    1420             :     int result;
    1421             :     GC_thread t;
    1422             :     DCL_LOCK_STATE;
    1423             : 
    1424             :     INIT_REAL_SYMS();
    1425           0 :     LOCK();
    1426           0 :     t = GC_lookup_thread(thread);
    1427           0 :     UNLOCK();
    1428           0 :     result = REAL_FUNC(pthread_detach)(thread);
    1429           0 :     if (result == 0) {
    1430           0 :       LOCK();
    1431           0 :       t -> flags |= DETACHED;
    1432             :       /* Here the pthread thread id may have been recycled. */
    1433           0 :       if ((t -> flags & FINISHED) != 0) {
    1434           0 :         GC_delete_gc_thread(t);
    1435             :       }
    1436           0 :       UNLOCK();
    1437             :     }
    1438           0 :     return result;
    1439             : }
    1440             : 
    1441             : #ifndef GC_NO_PTHREAD_CANCEL
    1442             :   /* We should deal with the fact that apparently on Solaris and,       */
    1443             :   /* probably, on some Linux we can't collect while a thread is         */
    1444             :   /* exiting, since signals aren't handled properly.  This currently    */
    1445             :   /* gives rise to deadlocks.  The only workaround seen is to intercept */
    1446             :   /* pthread_cancel() and pthread_exit(), and disable the collections   */
    1447             :   /* until the thread exit handler is called.  That's ugly, because we  */
    1448             :   /* risk growing the heap unnecessarily. But it seems that we don't    */
    1449             :   /* really have an option in that the process is not in a fully        */
    1450             :   /* functional state while a thread is exiting.                        */
    1451           0 :   GC_API int WRAP_FUNC(pthread_cancel)(pthread_t thread)
    1452             :   {
    1453             : #   ifdef CANCEL_SAFE
    1454             :       GC_thread t;
    1455             :       DCL_LOCK_STATE;
    1456             : #   endif
    1457             : 
    1458             :     INIT_REAL_SYMS();
    1459             : #   ifdef CANCEL_SAFE
    1460           0 :       LOCK();
    1461           0 :       t = GC_lookup_thread(thread);
    1462             :       /* We test DISABLED_GC because pthread_exit could be called at    */
    1463             :       /* the same time.  (If t is NULL then pthread_cancel should       */
    1464             :       /* return ESRCH.)                                                 */
    1465           0 :       if (t != NULL && (t -> flags & DISABLED_GC) == 0) {
    1466           0 :         t -> flags |= DISABLED_GC;
    1467           0 :         GC_dont_gc++;
    1468             :       }
    1469           0 :       UNLOCK();
    1470             : #   endif
    1471           0 :     return REAL_FUNC(pthread_cancel)(thread);
    1472             :   }
    1473             : #endif /* !GC_NO_PTHREAD_CANCEL */
    1474             : 
    1475             : #ifdef GC_PTHREAD_EXIT_ATTRIBUTE
    1476           0 :   GC_API GC_PTHREAD_EXIT_ATTRIBUTE void WRAP_FUNC(pthread_exit)(void *retval)
    1477             :   {
    1478           0 :     pthread_t self = pthread_self();
    1479             :     GC_thread me;
    1480             :     DCL_LOCK_STATE;
    1481             : 
    1482             :     INIT_REAL_SYMS();
    1483           0 :     LOCK();
    1484           0 :     me = GC_lookup_thread(self);
    1485             :     /* We test DISABLED_GC because someone else could call    */
    1486             :     /* pthread_cancel at the same time.                       */
    1487           0 :     if (me != 0 && (me -> flags & DISABLED_GC) == 0) {
    1488           0 :       me -> flags |= DISABLED_GC;
    1489           0 :       GC_dont_gc++;
    1490             :     }
    1491           0 :     UNLOCK();
    1492             : 
    1493             : #   ifdef NACL
    1494             :       /* Native Client doesn't support pthread cleanup functions, */
    1495             :       /* so cleanup the thread here.                              */
    1496             :       GC_thread_exit_proc(0);
    1497             : #   endif
    1498             : 
    1499           0 :     REAL_FUNC(pthread_exit)(retval);
    1500             :   }
    1501             : #endif /* GC_PTHREAD_EXIT_ATTRIBUTE */
    1502             : 
    1503             : GC_INNER GC_bool GC_in_thread_creation = FALSE;
    1504             :                                 /* Protected by allocation lock. */
    1505             : 
    1506         499 : GC_INLINE void GC_record_stack_base(GC_thread me,
    1507             :                                     const struct GC_stack_base *sb)
    1508             : {
    1509             : #   ifndef GC_DARWIN_THREADS
    1510         499 :       me -> stop_info.stack_ptr = sb -> mem_base;
    1511             : #   endif
    1512         499 :     me -> stack_end = sb -> mem_base;
    1513         499 :     if (me -> stack_end == NULL)
    1514           0 :       ABORT("Bad stack base in GC_register_my_thread");
    1515             : #   ifdef IA64
    1516             :       me -> backing_store_end = sb -> reg_base;
    1517             : #   endif
    1518         499 : }
    1519             : 
    1520         499 : STATIC GC_thread GC_register_my_thread_inner(const struct GC_stack_base *sb,
    1521             :                                              pthread_t my_pthread)
    1522             : {
    1523             :     GC_thread me;
    1524             : 
    1525         499 :     GC_in_thread_creation = TRUE; /* OK to collect from unknown thread. */
    1526         499 :     me = GC_new_thread(my_pthread);
    1527         499 :     GC_in_thread_creation = FALSE;
    1528         499 :     if (me == 0)
    1529           0 :       ABORT("Failed to allocate memory for thread registering");
    1530             : #   ifdef GC_DARWIN_THREADS
    1531             :       me -> stop_info.mach_thread = mach_thread_self();
    1532             : #   endif
    1533         499 :     GC_record_stack_base(me, sb);
    1534             : #   ifdef GC_EXPLICIT_SIGNALS_UNBLOCK
    1535             :       /* Since this could be executed from a detached thread    */
    1536             :       /* destructor, our signals might already be blocked.      */
    1537             :       GC_unblock_gc_signals();
    1538             : #   endif
    1539         499 :     return me;
    1540             : }
    1541             : 
    1542           0 : GC_API void GC_CALL GC_allow_register_threads(void)
    1543             : {
    1544             :     /* Check GC is initialized and the current thread is registered. */
    1545             :     GC_ASSERT(GC_lookup_thread(pthread_self()) != 0);
    1546             : 
    1547           0 :     GC_need_to_lock = TRUE; /* We are multi-threaded now. */
    1548           0 : }
    1549             : 
    1550         499 : GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base *sb)
    1551             : {
    1552         499 :     pthread_t self = pthread_self();
    1553             :     GC_thread me;
    1554             :     DCL_LOCK_STATE;
    1555             : 
    1556         499 :     if (GC_need_to_lock == FALSE)
    1557           0 :         ABORT("Threads explicit registering is not previously enabled");
    1558             : 
    1559         499 :     LOCK();
    1560         499 :     me = GC_lookup_thread(self);
    1561         499 :     if (0 == me) {
    1562           0 :         me = GC_register_my_thread_inner(sb, self);
    1563           0 :         me -> flags |= DETACHED;
    1564             :           /* Treat as detached, since we do not need to worry about     */
    1565             :           /* pointer results.                                           */
    1566             : #       if defined(THREAD_LOCAL_ALLOC)
    1567           0 :           GC_init_thread_local(&(me->tlfs));
    1568             : #       endif
    1569           0 :         UNLOCK();
    1570           0 :         return GC_SUCCESS;
    1571         499 :     } else if ((me -> flags & FINISHED) != 0) {
    1572             :         /* This code is executed when a thread is registered from the   */
    1573             :         /* client thread key destructor.                                */
    1574           0 :         GC_record_stack_base(me, sb);
    1575           0 :         me -> flags &= ~FINISHED; /* but not DETACHED */
    1576             : #       ifdef GC_EXPLICIT_SIGNALS_UNBLOCK
    1577             :           /* Since this could be executed from a thread destructor,     */
    1578             :           /* our signals might be blocked.                              */
    1579             :           GC_unblock_gc_signals();
    1580             : #       endif
    1581             : #       if defined(THREAD_LOCAL_ALLOC)
    1582           0 :           GC_init_thread_local(&(me->tlfs));
    1583             : #       endif
    1584           0 :         UNLOCK();
    1585           0 :         return GC_SUCCESS;
    1586             :     } else {
    1587         499 :         UNLOCK();
    1588         499 :         return GC_DUPLICATE;
    1589             :     }
    1590             : }
    1591             : 
    1592             : struct start_info {
    1593             :     void *(*start_routine)(void *);
    1594             :     void *arg;
    1595             :     word flags;
    1596             :     sem_t registered;           /* 1 ==> in our thread table, but       */
    1597             :                                 /* parent hasn't yet noticed.           */
    1598             : };
    1599             : 
    1600             : /* Called from GC_inner_start_routine().  Defined in this file to       */
    1601             : /* minimize the number of include files in pthread_start.c (because     */
    1602             : /* sem_t and sem_post() are not used that file directly).               */
    1603         499 : GC_INNER_PTHRSTART GC_thread GC_start_rtn_prepare_thread(
    1604             :                                         void *(**pstart)(void *),
    1605             :                                         void **pstart_arg,
    1606             :                                         struct GC_stack_base *sb, void *arg)
    1607             : {
    1608         499 :     struct start_info * si = arg;
    1609         499 :     pthread_t self = pthread_self();
    1610             :     GC_thread me;
    1611             :     DCL_LOCK_STATE;
    1612             : 
    1613             : #   ifdef DEBUG_THREADS
    1614             :       GC_log_printf("Starting thread %p, pid = %ld, sp = %p\n",
    1615             :                     (void *)self, (long)getpid(), &arg);
    1616             : #   endif
    1617         499 :     LOCK();
    1618         499 :     me = GC_register_my_thread_inner(sb, self);
    1619         499 :     me -> flags = si -> flags;
    1620             : #   if defined(THREAD_LOCAL_ALLOC)
    1621         499 :       GC_init_thread_local(&(me->tlfs));
    1622             : #   endif
    1623         499 :     UNLOCK();
    1624         499 :     *pstart = si -> start_routine;
    1625             : #   ifdef DEBUG_THREADS
    1626             :       GC_log_printf("start_routine = %p\n", (void *)(signed_word)(*pstart));
    1627             : #   endif
    1628         499 :     *pstart_arg = si -> arg;
    1629         499 :     sem_post(&(si -> registered));      /* Last action on si.   */
    1630             :                                         /* OK to deallocate.    */
    1631         499 :     return me;
    1632             : }
    1633             : 
    1634             : GC_INNER_PTHRSTART void * GC_CALLBACK GC_inner_start_routine(
    1635             :                                         struct GC_stack_base *sb, void *arg);
    1636             :                                         /* defined in pthread_start.c   */
    1637             : 
    1638         499 : STATIC void * GC_start_routine(void * arg)
    1639             : {
    1640             : #   ifdef INCLUDE_LINUX_THREAD_DESCR
    1641             :       struct GC_stack_base sb;
    1642             : 
    1643             : #     ifdef REDIRECT_MALLOC
    1644             :         /* GC_get_stack_base may call pthread_getattr_np, which can     */
    1645             :         /* unfortunately call realloc, which may allocate from an       */
    1646             :         /* unregistered thread.  This is unpleasant, since it might     */
    1647             :         /* force heap growth (or, even, heap overflow).                 */
    1648             :         GC_disable();
    1649             : #     endif
    1650             :       if (GC_get_stack_base(&sb) != GC_SUCCESS)
    1651             :         ABORT("Failed to get thread stack base");
    1652             : #     ifdef REDIRECT_MALLOC
    1653             :         GC_enable();
    1654             : #     endif
    1655             :       return GC_inner_start_routine(&sb, arg);
    1656             : #   else
    1657         499 :       return GC_call_with_stack_base(GC_inner_start_routine, arg);
    1658             : #   endif
    1659             : }
    1660             : 
    1661         499 : GC_API int WRAP_FUNC(pthread_create)(pthread_t *new_thread,
    1662             :                      GC_PTHREAD_CREATE_CONST pthread_attr_t *attr,
    1663             :                      void *(*start_routine)(void *), void *arg)
    1664             : {
    1665             :     int result;
    1666             :     int detachstate;
    1667         499 :     word my_flags = 0;
    1668             :     struct start_info * si;
    1669             :     DCL_LOCK_STATE;
    1670             :         /* This is otherwise saved only in an area mmapped by the thread */
    1671             :         /* library, which isn't visible to the collector.                */
    1672             : 
    1673             :     /* We resist the temptation to muck with the stack size here,       */
    1674             :     /* even if the default is unreasonably small.  That's the client's  */
    1675             :     /* responsibility.                                                  */
    1676             : 
    1677             :     INIT_REAL_SYMS();
    1678         499 :     LOCK();
    1679         499 :     si = (struct start_info *)GC_INTERNAL_MALLOC(sizeof(struct start_info),
    1680             :                                                  NORMAL);
    1681         499 :     UNLOCK();
    1682         499 :     if (!EXPECT(parallel_initialized, TRUE))
    1683           0 :       GC_init_parallel();
    1684         499 :     if (EXPECT(0 == si, FALSE) &&
    1685           0 :         (si = (struct start_info *)
    1686           0 :                 (*GC_get_oom_fn())(sizeof(struct start_info))) == 0)
    1687           0 :       return(ENOMEM);
    1688         499 :     if (sem_init(&(si -> registered), GC_SEM_INIT_PSHARED, 0) != 0)
    1689           0 :       ABORT("sem_init failed");
    1690             : 
    1691         499 :     si -> start_routine = start_routine;
    1692         499 :     si -> arg = arg;
    1693         499 :     LOCK();
    1694         499 :     if (!EXPECT(GC_thr_initialized, TRUE))
    1695           0 :       GC_thr_init();
    1696             : #   ifdef GC_ASSERTIONS
    1697             :       {
    1698             :         size_t stack_size = 0;
    1699             :         if (NULL != attr) {
    1700             :            pthread_attr_getstacksize(attr, &stack_size);
    1701             :         }
    1702             :         if (0 == stack_size) {
    1703             :            pthread_attr_t my_attr;
    1704             : 
    1705             :            pthread_attr_init(&my_attr);
    1706             :            pthread_attr_getstacksize(&my_attr, &stack_size);
    1707             :            pthread_attr_destroy(&my_attr);
    1708             :         }
    1709             :         /* On Solaris 10, with default attr initialization,     */
    1710             :         /* stack_size remains 0.  Fudge it.                     */
    1711             :         if (0 == stack_size) {
    1712             : #           ifndef SOLARIS
    1713             :               WARN("Failed to get stack size for assertion checking\n", 0);
    1714             : #           endif
    1715             :             stack_size = 1000000;
    1716             :         }
    1717             : #       ifdef PARALLEL_MARK
    1718             :           GC_ASSERT(stack_size >= (8*HBLKSIZE*sizeof(word)));
    1719             : #       else
    1720             :           /* FreeBSD-5.3/Alpha: default pthread stack is 64K,   */
    1721             :           /* HBLKSIZE=8192, sizeof(word)=8                      */
    1722             :           GC_ASSERT(stack_size >= 65536);
    1723             : #       endif
    1724             :         /* Our threads may need to do some work for the GC.     */
    1725             :         /* Ridiculously small threads won't work, and they      */
    1726             :         /* probably wouldn't work anyway.                       */
    1727             :       }
    1728             : #   endif
    1729         499 :     if (NULL == attr) {
    1730           0 :         detachstate = PTHREAD_CREATE_JOINABLE;
    1731             :     } else {
    1732         499 :         pthread_attr_getdetachstate(attr, &detachstate);
    1733             :     }
    1734         499 :     if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
    1735         499 :     si -> flags = my_flags;
    1736         499 :     UNLOCK();
    1737             : #   ifdef DEBUG_THREADS
    1738             :       GC_log_printf("About to start new thread from thread %p\n",
    1739             :                     (void *)pthread_self());
    1740             : #   endif
    1741         499 :     GC_need_to_lock = TRUE;
    1742             : 
    1743         499 :     result = REAL_FUNC(pthread_create)(new_thread, attr, GC_start_routine, si);
    1744             : 
    1745             :     /* Wait until child has been added to the thread table.             */
    1746             :     /* This also ensures that we hold onto si until the child is done   */
    1747             :     /* with it.  Thus it doesn't matter whether it is otherwise         */
    1748             :     /* visible to the collector.                                        */
    1749         499 :     if (0 == result) {
    1750             :         IF_CANCEL(int cancel_state;)
    1751             : 
    1752             : #       ifdef DEBUG_THREADS
    1753             :           if (new_thread)
    1754             :             GC_log_printf("Started thread %p\n", (void *)(*new_thread));
    1755             : #       endif
    1756         499 :         DISABLE_CANCEL(cancel_state);
    1757             :                 /* pthread_create is not a cancellation point. */
    1758         499 :         while (0 != sem_wait(&(si -> registered))) {
    1759           0 :             if (EINTR != errno) ABORT("sem_wait failed");
    1760             :         }
    1761         499 :         RESTORE_CANCEL(cancel_state);
    1762             :     }
    1763         499 :     sem_destroy(&(si -> registered));
    1764         499 :     LOCK();
    1765         499 :     GC_INTERNAL_FREE(si);
    1766         499 :     UNLOCK();
    1767             : 
    1768         499 :     return(result);
    1769             : }
    1770             : 
    1771             : #if defined(USE_SPIN_LOCK) || !defined(NO_PTHREAD_TRYLOCK)
    1772             : /* Spend a few cycles in a way that can't introduce contention with     */
    1773             : /* other threads.                                                       */
    1774       25883 : STATIC void GC_pause(void)
    1775             : {
    1776             :     int i;
    1777             : #   if !defined(__GNUC__) || defined(__INTEL_COMPILER)
    1778             :       volatile word dummy = 0;
    1779             : #   endif
    1780             : 
    1781      284095 :     for (i = 0; i < 10; ++i) {
    1782             : #     if defined(__GNUC__) && !defined(__INTEL_COMPILER)
    1783      258234 :         __asm__ __volatile__ (" " : : : "memory");
    1784             : #     else
    1785             :         /* Something that's unlikely to be optimized away. */
    1786             :         GC_noop1(++dummy);
    1787             : #     endif
    1788             :     }
    1789       25861 : }
    1790             : #endif
    1791             : 
    1792             : #define SPIN_MAX 128    /* Maximum number of calls to GC_pause before   */
    1793             :                         /* give up.                                     */
    1794             : 
    1795             : GC_INNER volatile GC_bool GC_collecting = 0;
    1796             :                         /* A hint that we're in the collector and       */
    1797             :                         /* holding the allocation lock for an           */
    1798             :                         /* extended period.                             */
    1799             : 
    1800             : #if (!defined(USE_SPIN_LOCK) && !defined(NO_PTHREAD_TRYLOCK)) \
    1801             :         || defined(PARALLEL_MARK)
    1802             : /* If we don't want to use the below spinlock implementation, either    */
    1803             : /* because we don't have a GC_test_and_set implementation, or because   */
    1804             : /* we don't want to risk sleeping, we can still try spinning on         */
    1805             : /* pthread_mutex_trylock for a while.  This appears to be very          */
    1806             : /* beneficial in many cases.                                            */
    1807             : /* I suspect that under high contention this is nearly always better    */
    1808             : /* than the spin lock.  But it's a bit slower on a uniprocessor.        */
    1809             : /* Hence we still default to the spin lock.                             */
    1810             : /* This is also used to acquire the mark lock for the parallel          */
    1811             : /* marker.                                                              */
    1812             : 
    1813             : /* Here we use a strict exponential backoff scheme.  I don't know       */
    1814             : /* whether that's better or worse than the above.  We eventually        */
    1815             : /* yield by calling pthread_mutex_lock(); it never makes sense to       */
    1816             : /* explicitly sleep.                                                    */
    1817             : 
    1818             : /* #define LOCK_STATS */
    1819             : /* Note that LOCK_STATS requires AO_HAVE_test_and_set.  */
    1820             : #ifdef LOCK_STATS
    1821             :   volatile AO_t GC_spin_count = 0;
    1822             :   volatile AO_t GC_block_count = 0;
    1823             :   volatile AO_t GC_unlocked_count = 0;
    1824             : #endif
    1825             : 
    1826       84573 : STATIC void GC_generic_lock(pthread_mutex_t * lock)
    1827             : {
    1828             : #ifndef NO_PTHREAD_TRYLOCK
    1829       84573 :     unsigned pause_length = 1;
    1830             :     unsigned i;
    1831             : 
    1832       84573 :     if (0 == pthread_mutex_trylock(lock)) {
    1833             : #       ifdef LOCK_STATS
    1834             :             (void)AO_fetch_and_add1(&GC_unlocked_count);
    1835             : #       endif
    1836       84107 :         return;
    1837             :     }
    1838        3760 :     for (; pause_length <= SPIN_MAX; pause_length <<= 1) {
    1839       27706 :         for (i = 0; i < pause_length; ++i) {
    1840       25834 :             GC_pause();
    1841             :         }
    1842        1872 :         switch(pthread_mutex_trylock(lock)) {
    1843             :             case 0:
    1844             : #               ifdef LOCK_STATS
    1845             :                     (void)AO_fetch_and_add1(&GC_spin_count);
    1846             : #               endif
    1847         522 :                 return;
    1848             :             case EBUSY:
    1849             :                 break;
    1850             :             default:
    1851           0 :                 ABORT("Unexpected error from pthread_mutex_trylock");
    1852             :         }
    1853             :     }
    1854             : #endif /* !NO_PTHREAD_TRYLOCK */
    1855             : #   ifdef LOCK_STATS
    1856             :         (void)AO_fetch_and_add1(&GC_block_count);
    1857             : #   endif
    1858           8 :     pthread_mutex_lock(lock);
    1859             : }
    1860             : 
    1861             : #endif /* !USE_SPIN_LOCK || ... */
    1862             : 
    1863             : #if defined(USE_SPIN_LOCK)
    1864             : 
    1865             : /* Reasonably fast spin locks.  Basically the same implementation */
    1866             : /* as STL alloc.h.  This isn't really the right way to do this.   */
    1867             : /* but until the POSIX scheduling mess gets straightened out ...  */
    1868             : 
    1869             : GC_INNER volatile AO_TS_t GC_allocate_lock = AO_TS_INITIALIZER;
    1870             : 
    1871             : GC_INNER void GC_lock(void)
    1872             : {
    1873             : #   define low_spin_max 30  /* spin cycles if we suspect uniprocessor */
    1874             : #   define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */
    1875             :     static unsigned spin_max = low_spin_max;
    1876             :     unsigned my_spin_max;
    1877             :     static unsigned last_spins = 0;
    1878             :     unsigned my_last_spins;
    1879             :     unsigned i;
    1880             : 
    1881             :     if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) {
    1882             :         return;
    1883             :     }
    1884             :     my_spin_max = spin_max;
    1885             :     my_last_spins = last_spins;
    1886             :     for (i = 0; i < my_spin_max; i++) {
    1887             :         if (GC_collecting || GC_nprocs == 1) goto yield;
    1888             :         if (i < my_last_spins/2) {
    1889             :             GC_pause();
    1890             :             continue;
    1891             :         }
    1892             :         if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) {
    1893             :             /*
    1894             :              * got it!
    1895             :              * Spinning worked.  Thus we're probably not being scheduled
    1896             :              * against the other process with which we were contending.
    1897             :              * Thus it makes sense to spin longer the next time.
    1898             :              */
    1899             :             last_spins = i;
    1900             :             spin_max = high_spin_max;
    1901             :             return;
    1902             :         }
    1903             :     }
    1904             :     /* We are probably being scheduled against the other process.  Sleep. */
    1905             :     spin_max = low_spin_max;
    1906             : yield:
    1907             :     for (i = 0;; ++i) {
    1908             :         if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) {
    1909             :             return;
    1910             :         }
    1911             : #       define SLEEP_THRESHOLD 12
    1912             :                 /* Under Linux very short sleeps tend to wait until     */
    1913             :                 /* the current time quantum expires.  On old Linux      */
    1914             :                 /* kernels nanosleep(<= 2ms) just spins under Linux.    */
    1915             :                 /* (Under 2.4, this happens only for real-time          */
    1916             :                 /* processes.)  We want to minimize both behaviors      */
    1917             :                 /* here.                                                */
    1918             :         if (i < SLEEP_THRESHOLD) {
    1919             :             sched_yield();
    1920             :         } else {
    1921             :             struct timespec ts;
    1922             : 
    1923             :             if (i > 24) i = 24;
    1924             :                         /* Don't wait for more than about 15msecs, even */
    1925             :                         /* under extreme contention.                    */
    1926             :             ts.tv_sec = 0;
    1927             :             ts.tv_nsec = 1 << i;
    1928             :             nanosleep(&ts, 0);
    1929             :         }
    1930             :     }
    1931             : }
    1932             : 
    1933             : #else  /* !USE_SPIN_LOCK */
    1934         210 : GC_INNER void GC_lock(void)
    1935             : {
    1936             : #ifndef NO_PTHREAD_TRYLOCK
    1937         223 :     if (1 == GC_nprocs || GC_collecting) {
    1938          13 :         pthread_mutex_lock(&GC_allocate_ml);
    1939             :     } else {
    1940         197 :         GC_generic_lock(&GC_allocate_ml);
    1941             :     }
    1942             : #else  /* !NO_PTHREAD_TRYLOCK */
    1943             :     pthread_mutex_lock(&GC_allocate_ml);
    1944             : #endif /* !NO_PTHREAD_TRYLOCK */
    1945         210 : }
    1946             : 
    1947             : #endif /* !USE_SPIN_LOCK */
    1948             : 
    1949             : #ifdef PARALLEL_MARK
    1950             : 
    1951             : # ifdef GC_ASSERTIONS
    1952             :     STATIC unsigned long GC_mark_lock_holder = NO_THREAD;
    1953             : #   define SET_MARK_LOCK_HOLDER \
    1954             :                 (void)(GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self()))
    1955             : #   define UNSET_MARK_LOCK_HOLDER \
    1956             :                 do { \
    1957             :                   GC_ASSERT(GC_mark_lock_holder \
    1958             :                                 == NUMERIC_THREAD_ID(pthread_self())); \
    1959             :                   GC_mark_lock_holder = NO_THREAD; \
    1960             :                 } while (0)
    1961             : # else
    1962             : #   define SET_MARK_LOCK_HOLDER (void)0
    1963             : #   define UNSET_MARK_LOCK_HOLDER (void)0
    1964             : # endif /* !GC_ASSERTIONS */
    1965             : 
    1966             : #ifdef GLIBC_2_1_MUTEX_HACK
    1967             :   /* Ugly workaround for a linux threads bug in the final versions      */
    1968             :   /* of glibc2.1.  Pthread_mutex_trylock sets the mutex owner           */
    1969             :   /* field even when it fails to acquire the mutex.  This causes        */
    1970             :   /* pthread_cond_wait to die.  Remove for glibc2.2.                    */
    1971             :   /* According to the man page, we should use                           */
    1972             :   /* PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, but that isn't actually   */
    1973             :   /* defined.                                                           */
    1974             :   static pthread_mutex_t mark_mutex =
    1975             :         {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, {0, 0}};
    1976             : #else
    1977             :   static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;
    1978             : #endif
    1979             : 
    1980             : static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;
    1981             : 
    1982       84380 : GC_INNER void GC_acquire_mark_lock(void)
    1983             : {
    1984             :     GC_ASSERT(GC_mark_lock_holder != NUMERIC_THREAD_ID(pthread_self()));
    1985       84380 :     GC_generic_lock(&mark_mutex);
    1986             :     SET_MARK_LOCK_HOLDER;
    1987       84440 : }
    1988             : 
    1989       83951 : GC_INNER void GC_release_mark_lock(void)
    1990             : {
    1991             :     UNSET_MARK_LOCK_HOLDER;
    1992       83951 :     if (pthread_mutex_unlock(&mark_mutex) != 0) {
    1993           0 :         ABORT("pthread_mutex_unlock failed");
    1994             :     }
    1995       83942 : }
    1996             : 
    1997             : /* Collector must wait for a freelist builders for 2 reasons:           */
    1998             : /* 1) Mark bits may still be getting examined without lock.             */
    1999             : /* 2) Partial free lists referenced only by locals may not be scanned   */
    2000             : /*    correctly, e.g. if they contain "pointer-free" objects, since the */
    2001             : /*    free-list link may be ignored.                                    */
    2002           0 : STATIC void GC_wait_builder(void)
    2003             : {
    2004             :     ASSERT_CANCEL_DISABLED();
    2005             :     UNSET_MARK_LOCK_HOLDER;
    2006           0 :     if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {
    2007           0 :         ABORT("pthread_cond_wait failed");
    2008             :     }
    2009             :     GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
    2010             :     SET_MARK_LOCK_HOLDER;
    2011           0 : }
    2012             : 
    2013         244 : GC_INNER void GC_wait_for_reclaim(void)
    2014             : {
    2015         244 :     GC_acquire_mark_lock();
    2016         488 :     while (GC_fl_builder_count > 0) {
    2017           0 :         GC_wait_builder();
    2018             :     }
    2019         244 :     GC_release_mark_lock();
    2020         244 : }
    2021             : 
    2022       38338 : GC_INNER void GC_notify_all_builder(void)
    2023             : {
    2024             :     GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
    2025       38338 :     if (pthread_cond_broadcast(&builder_cv) != 0) {
    2026           0 :         ABORT("pthread_cond_broadcast failed");
    2027             :     }
    2028       38338 : }
    2029             : 
    2030             : static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
    2031             : 
    2032        6139 : GC_INNER void GC_wait_marker(void)
    2033             : {
    2034             :     ASSERT_CANCEL_DISABLED();
    2035             :     UNSET_MARK_LOCK_HOLDER;
    2036        6139 :     if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {
    2037           0 :         ABORT("pthread_cond_wait failed");
    2038             :     }
    2039             :     GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
    2040             :     SET_MARK_LOCK_HOLDER;
    2041        5650 : }
    2042             : 
    2043        3089 : GC_INNER void GC_notify_all_marker(void)
    2044             : {
    2045        3089 :     if (pthread_cond_broadcast(&mark_cv) != 0) {
    2046           0 :         ABORT("pthread_cond_broadcast failed");
    2047             :     }
    2048        3088 : }
    2049             : 
    2050             : #endif /* PARALLEL_MARK */
    2051             : 
    2052             : #ifdef PTHREAD_REGISTER_CANCEL_WEAK_STUBS
    2053             :   /* Workaround "undefined reference" linkage errors on some targets. */
    2054             :   void __pthread_register_cancel() __attribute__((__weak__));
    2055             :   void __pthread_unregister_cancel() __attribute__((__weak__));
    2056             :   void __pthread_register_cancel() {}
    2057             :   void __pthread_unregister_cancel() {}
    2058             : #endif
    2059             : 
    2060             : #endif /* GC_PTHREADS */

Generated by: LCOV version 1.11