Line data Source code
1 : /*
2 : * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3 : * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4 : * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
5 : * Copyright (c) 2000-2005 by Hewlett-Packard Company. All rights reserved.
6 : *
7 : * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 : * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 : *
10 : * Permission is hereby granted to use or copy this program
11 : * for any purpose, provided the above notices are retained on all copies.
12 : * Permission to modify the code and to distribute modified code is granted,
13 : * provided the above notices are retained, and a notice that the code was
14 : * modified is included with the above copyright notice.
15 : */
16 :
17 : #include "private/pthread_support.h"
18 :
19 : /*
20 : * Support code originally for LinuxThreads, the clone()-based kernel
21 : * thread package for Linux which is included in libc6.
22 : *
23 : * This code no doubt makes some assumptions beyond what is
24 : * guaranteed by the pthread standard, though it now does
25 : * very little of that. It now also supports NPTL, and many
26 : * other Posix thread implementations. We are trying to merge
27 : * all flavors of pthread support code into this file.
28 : */
29 : /* DG/UX ix86 support <takis@xfree86.org> */
30 : /*
31 : * Linux_threads.c now also includes some code to support HPUX and
32 : * OSF1 (Compaq Tru64 Unix, really). The OSF1 support is based on Eric Benson's
33 : * patch.
34 : *
35 : * Eric also suggested an alternate basis for a lock implementation in
36 : * his code:
37 : * + #elif defined(OSF1)
38 : * + unsigned long GC_allocate_lock = 0;
39 : * + msemaphore GC_allocate_semaphore;
40 : * + # define GC_TRY_LOCK() \
41 : * + ((msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) == 0) \
42 : * + ? (GC_allocate_lock = 1) \
43 : * + : 0)
44 : * + # define GC_LOCK_TAKEN GC_allocate_lock
45 : */
46 :
47 : #if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS)
48 :
49 : # include <stdlib.h>
50 : # include <pthread.h>
51 : # include <sched.h>
52 : # include <time.h>
53 : # include <errno.h>
54 : # include <unistd.h>
55 : # if !defined(GC_RTEMS_PTHREADS)
56 : # include <sys/mman.h>
57 : # endif
58 : # include <sys/time.h>
59 : # include <sys/types.h>
60 : # include <sys/stat.h>
61 : # include <fcntl.h>
62 : # include <signal.h>
63 :
64 : # include "gc_inline.h"
65 :
66 : #if defined(GC_DARWIN_THREADS)
67 : # include "private/darwin_semaphore.h"
68 : #else
69 : # include <semaphore.h>
70 : #endif /* !GC_DARWIN_THREADS */
71 :
72 : #if defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS)
73 : # include <sys/sysctl.h>
74 : #endif /* GC_DARWIN_THREADS */
75 :
76 : #if defined(GC_NETBSD_THREADS) || defined(GC_OPENBSD_THREADS)
77 : # include <sys/param.h>
78 : # include <sys/sysctl.h>
79 : #endif /* GC_NETBSD_THREADS */
80 :
81 : /* Allocator lock definitions. */
82 : #if !defined(USE_SPIN_LOCK)
83 : GC_INNER pthread_mutex_t GC_allocate_ml = PTHREAD_MUTEX_INITIALIZER;
84 : #endif
85 : GC_INNER unsigned long GC_lock_holder = NO_THREAD;
86 : /* Used only for assertions, and to prevent */
87 : /* recursive reentry in the system call wrapper. */
88 :
89 : #if defined(GC_DGUX386_THREADS)
90 : # include <sys/dg_sys_info.h>
91 : # include <sys/_int_psem.h>
92 : /* sem_t is an uint in DG/UX */
93 : typedef unsigned int sem_t;
94 : #endif /* GC_DGUX386_THREADS */
95 :
96 : /* Undefine macros used to redirect pthread primitives. */
97 : # undef pthread_create
98 : # ifndef GC_NO_PTHREAD_SIGMASK
99 : # undef pthread_sigmask
100 : # endif
101 : # ifndef GC_NO_PTHREAD_CANCEL
102 : # undef pthread_cancel
103 : # endif
104 : # ifdef GC_PTHREAD_EXIT_ATTRIBUTE
105 : # undef pthread_exit
106 : # endif
107 : # undef pthread_join
108 : # undef pthread_detach
109 : # if defined(GC_OSF1_THREADS) && defined(_PTHREAD_USE_MANGLED_NAMES_) \
110 : && !defined(_PTHREAD_USE_PTDNAM_)
111 : /* Restore the original mangled names on Tru64 UNIX. */
112 : # define pthread_create __pthread_create
113 : # define pthread_join __pthread_join
114 : # define pthread_detach __pthread_detach
115 : # ifndef GC_NO_PTHREAD_CANCEL
116 : # define pthread_cancel __pthread_cancel
117 : # endif
118 : # ifdef GC_PTHREAD_EXIT_ATTRIBUTE
119 : # define pthread_exit __pthread_exit
120 : # endif
121 : # endif
122 :
123 : #ifdef GC_USE_LD_WRAP
124 : # define WRAP_FUNC(f) __wrap_##f
125 : # define REAL_FUNC(f) __real_##f
126 : int REAL_FUNC(pthread_create)(pthread_t *,
127 : GC_PTHREAD_CREATE_CONST pthread_attr_t *,
128 : void *(*start_routine)(void *), void *);
129 : int REAL_FUNC(pthread_join)(pthread_t, void **);
130 : int REAL_FUNC(pthread_detach)(pthread_t);
131 : # ifndef GC_NO_PTHREAD_SIGMASK
132 : int REAL_FUNC(pthread_sigmask)(int, const sigset_t *, sigset_t *);
133 : # endif
134 : # ifndef GC_NO_PTHREAD_CANCEL
135 : int REAL_FUNC(pthread_cancel)(pthread_t);
136 : # endif
137 : # ifdef GC_PTHREAD_EXIT_ATTRIBUTE
138 : void REAL_FUNC(pthread_exit)(void *) GC_PTHREAD_EXIT_ATTRIBUTE;
139 : # endif
140 : #else
141 : # ifdef GC_USE_DLOPEN_WRAP
142 : # include <dlfcn.h>
143 : # define WRAP_FUNC(f) f
144 : # define REAL_FUNC(f) GC_real_##f
145 : /* We define both GC_f and plain f to be the wrapped function. */
146 : /* In that way plain calls work, as do calls from files that */
147 : /* included gc.h, wich redefined f to GC_f. */
148 : /* FIXME: Needs work for DARWIN and True64 (OSF1) */
149 : typedef int (* GC_pthread_create_t)(pthread_t *,
150 : GC_PTHREAD_CREATE_CONST pthread_attr_t *,
151 : void * (*)(void *), void *);
152 : static GC_pthread_create_t REAL_FUNC(pthread_create);
153 : # ifndef GC_NO_PTHREAD_SIGMASK
154 : typedef int (* GC_pthread_sigmask_t)(int, const sigset_t *,
155 : sigset_t *);
156 : static GC_pthread_sigmask_t REAL_FUNC(pthread_sigmask);
157 : # endif
158 : typedef int (* GC_pthread_join_t)(pthread_t, void **);
159 : static GC_pthread_join_t REAL_FUNC(pthread_join);
160 : typedef int (* GC_pthread_detach_t)(pthread_t);
161 : static GC_pthread_detach_t REAL_FUNC(pthread_detach);
162 : # ifndef GC_NO_PTHREAD_CANCEL
163 : typedef int (* GC_pthread_cancel_t)(pthread_t);
164 : static GC_pthread_cancel_t REAL_FUNC(pthread_cancel);
165 : # endif
166 : # ifdef GC_PTHREAD_EXIT_ATTRIBUTE
167 : typedef void (* GC_pthread_exit_t)(void *) GC_PTHREAD_EXIT_ATTRIBUTE;
168 : static GC_pthread_exit_t REAL_FUNC(pthread_exit);
169 : # endif
170 : # else
171 : # define WRAP_FUNC(f) GC_##f
172 : # if !defined(GC_DGUX386_THREADS)
173 : # define REAL_FUNC(f) f
174 : # else /* GC_DGUX386_THREADS */
175 : # define REAL_FUNC(f) __d10_##f
176 : # endif /* GC_DGUX386_THREADS */
177 : # endif
178 : #endif
179 :
180 : #if defined(GC_USE_LD_WRAP) || defined(GC_USE_DLOPEN_WRAP)
181 : /* Define GC_ functions as aliases for the plain ones, which will */
182 : /* be intercepted. This allows files which include gc.h, and hence */
183 : /* generate references to the GC_ symbols, to see the right symbols. */
184 : GC_API int GC_pthread_create(pthread_t * t,
185 : GC_PTHREAD_CREATE_CONST pthread_attr_t *a,
186 : void * (* fn)(void *), void * arg)
187 : {
188 : return pthread_create(t, a, fn, arg);
189 : }
190 :
191 : # ifndef GC_NO_PTHREAD_SIGMASK
192 : GC_API int GC_pthread_sigmask(int how, const sigset_t *mask,
193 : sigset_t *old)
194 : {
195 : return pthread_sigmask(how, mask, old);
196 : }
197 : # endif /* !GC_NO_PTHREAD_SIGMASK */
198 :
199 : GC_API int GC_pthread_join(pthread_t t, void **res)
200 : {
201 : return pthread_join(t, res);
202 : }
203 :
204 : GC_API int GC_pthread_detach(pthread_t t)
205 : {
206 : return pthread_detach(t);
207 : }
208 :
209 : # ifndef GC_NO_PTHREAD_CANCEL
210 : GC_API int GC_pthread_cancel(pthread_t t)
211 : {
212 : return pthread_cancel(t);
213 : }
214 : # endif /* !GC_NO_PTHREAD_CANCEL */
215 :
216 : # ifdef GC_PTHREAD_EXIT_ATTRIBUTE
217 : GC_API GC_PTHREAD_EXIT_ATTRIBUTE void GC_pthread_exit(void *retval)
218 : {
219 : pthread_exit(retval);
220 : }
221 : # endif /* GC_PTHREAD_EXIT_ATTRIBUTE */
222 : #endif /* Linker-based interception. */
223 :
224 : #ifdef GC_USE_DLOPEN_WRAP
225 : STATIC GC_bool GC_syms_initialized = FALSE;
226 :
227 : STATIC void GC_init_real_syms(void)
228 : {
229 : void *dl_handle;
230 : # ifndef RTLD_NEXT
231 : # define LIBPTHREAD_NAME "libpthread.so.0"
232 : # define LIBPTHREAD_NAME_LEN 16 /* incl. trailing 0 */
233 : size_t len = LIBPTHREAD_NAME_LEN - 1;
234 : char namebuf[LIBPTHREAD_NAME_LEN];
235 : static char *libpthread_name = LIBPTHREAD_NAME;
236 : # endif
237 :
238 : if (GC_syms_initialized) return;
239 : # ifdef RTLD_NEXT
240 : dl_handle = RTLD_NEXT;
241 : # else
242 : dl_handle = dlopen(libpthread_name, RTLD_LAZY);
243 : if (NULL == dl_handle) {
244 : while (isdigit(libpthread_name[len-1])) --len;
245 : if (libpthread_name[len-1] == '.') --len;
246 : BCOPY(libpthread_name, namebuf, len);
247 : namebuf[len] = '\0';
248 : dl_handle = dlopen(namebuf, RTLD_LAZY);
249 : }
250 : if (NULL == dl_handle) ABORT("Couldn't open libpthread");
251 : # endif
252 : REAL_FUNC(pthread_create) = (GC_pthread_create_t)
253 : dlsym(dl_handle, "pthread_create");
254 : # ifdef RTLD_NEXT
255 : if (REAL_FUNC(pthread_create) == 0)
256 : ABORT("pthread_create not found"
257 : " (probably -lgc is specified after -lpthread)");
258 : # endif
259 : # ifndef GC_NO_PTHREAD_SIGMASK
260 : REAL_FUNC(pthread_sigmask) = (GC_pthread_sigmask_t)
261 : dlsym(dl_handle, "pthread_sigmask");
262 : # endif
263 : REAL_FUNC(pthread_join) = (GC_pthread_join_t)
264 : dlsym(dl_handle, "pthread_join");
265 : REAL_FUNC(pthread_detach) = (GC_pthread_detach_t)
266 : dlsym(dl_handle, "pthread_detach");
267 : # ifndef GC_NO_PTHREAD_CANCEL
268 : REAL_FUNC(pthread_cancel) = (GC_pthread_cancel_t)
269 : dlsym(dl_handle, "pthread_cancel");
270 : # endif
271 : # ifdef GC_PTHREAD_EXIT_ATTRIBUTE
272 : REAL_FUNC(pthread_exit) = (GC_pthread_exit_t)
273 : dlsym(dl_handle, "pthread_exit");
274 : # endif
275 : GC_syms_initialized = TRUE;
276 : }
277 :
278 : # define INIT_REAL_SYMS() if (!GC_syms_initialized) GC_init_real_syms();
279 : #else
280 : # define INIT_REAL_SYMS()
281 : #endif
282 :
283 : static GC_bool parallel_initialized = FALSE;
284 :
285 : GC_INNER GC_bool GC_need_to_lock = FALSE;
286 :
287 : STATIC long GC_nprocs = 1;
288 : /* Number of processors. We may not have */
289 : /* access to all of them, but this is as good */
290 : /* a guess as any ... */
291 :
292 : #ifdef THREAD_LOCAL_ALLOC
293 : /* We must explicitly mark ptrfree and gcj free lists, since the free */
294 : /* list links wouldn't otherwise be found. We also set them in the */
295 : /* normal free lists, since that involves touching less memory than */
296 : /* if we scanned them normally. */
297 258 : GC_INNER void GC_mark_thread_local_free_lists(void)
298 : {
299 : int i;
300 : GC_thread p;
301 :
302 66306 : for (i = 0; i < THREAD_TABLE_SZ; ++i) {
303 66591 : for (p = GC_threads[i]; 0 != p; p = p -> next) {
304 543 : if (!(p -> flags & FINISHED))
305 543 : GC_mark_thread_local_fls_for(&(p->tlfs));
306 : }
307 : }
308 258 : }
309 :
310 : # if defined(GC_ASSERTIONS)
311 : void GC_check_tls_for(GC_tlfs p);
312 : # if defined(USE_CUSTOM_SPECIFIC)
313 : void GC_check_tsd_marks(tsd *key);
314 : # endif
315 :
316 : /* Check that all thread-local free-lists are completely marked. */
317 : /* Also check that thread-specific-data structures are marked. */
318 : void GC_check_tls(void)
319 : {
320 : int i;
321 : GC_thread p;
322 :
323 : for (i = 0; i < THREAD_TABLE_SZ; ++i) {
324 : for (p = GC_threads[i]; 0 != p; p = p -> next) {
325 : if (!(p -> flags & FINISHED))
326 : GC_check_tls_for(&(p->tlfs));
327 : }
328 : }
329 : # if defined(USE_CUSTOM_SPECIFIC)
330 : if (GC_thread_key != 0)
331 : GC_check_tsd_marks(GC_thread_key);
332 : # endif
333 : }
334 : # endif /* GC_ASSERTIONS */
335 :
336 : #endif /* THREAD_LOCAL_ALLOC */
337 :
338 : #ifdef PARALLEL_MARK
339 :
340 : # ifndef MAX_MARKERS
341 : # define MAX_MARKERS 16
342 : # endif
343 :
344 : static ptr_t marker_sp[MAX_MARKERS - 1] = {0};
345 : #ifdef IA64
346 : static ptr_t marker_bsp[MAX_MARKERS - 1] = {0};
347 : #endif
348 :
349 : #if defined(GC_DARWIN_THREADS) && !defined(GC_NO_THREADS_DISCOVERY)
350 : static mach_port_t marker_mach_threads[MAX_MARKERS - 1] = {0};
351 :
352 : /* Used only by GC_suspend_thread_list(). */
353 : GC_INNER GC_bool GC_is_mach_marker(thread_act_t thread)
354 : {
355 : int i;
356 : for (i = 0; i < GC_markers - 1; i++) {
357 : if (marker_mach_threads[i] == thread)
358 : return TRUE;
359 : }
360 : return FALSE;
361 : }
362 : #endif /* GC_DARWIN_THREADS */
363 :
364 : STATIC void * GC_mark_thread(void * id)
365 : {
366 : word my_mark_no = 0;
367 : IF_CANCEL(int cancel_state;)
368 :
369 : if ((word)id == (word)-1) return 0; /* to make compiler happy */
370 : DISABLE_CANCEL(cancel_state);
371 : /* Mark threads are not cancellable; they */
372 : /* should be invisible to client. */
373 : marker_sp[(word)id] = GC_approx_sp();
374 : # ifdef IA64
375 : marker_bsp[(word)id] = GC_save_regs_in_stack();
376 : # endif
377 : # if defined(GC_DARWIN_THREADS) && !defined(GC_NO_THREADS_DISCOVERY)
378 : marker_mach_threads[(word)id] = mach_thread_self();
379 : # endif
380 :
381 : for (;; ++my_mark_no) {
382 : /* GC_mark_no is passed only to allow GC_help_marker to terminate */
383 : /* promptly. This is important if it were called from the signal */
384 : /* handler or from the GC lock acquisition code. Under Linux, it's */
385 : /* not safe to call it from a signal handler, since it uses mutexes */
386 : /* and condition variables. Since it is called only here, the */
387 : /* argument is unnecessary. */
388 : if (my_mark_no < GC_mark_no || my_mark_no > GC_mark_no + 2) {
389 : /* resynchronize if we get far off, e.g. because GC_mark_no */
390 : /* wrapped. */
391 : my_mark_no = GC_mark_no;
392 : }
393 : # ifdef DEBUG_THREADS
394 : GC_log_printf("Starting mark helper for mark number %lu\n",
395 : (unsigned long)my_mark_no);
396 : # endif
397 : GC_help_marker(my_mark_no);
398 : }
399 : }
400 :
401 : STATIC pthread_t GC_mark_threads[MAX_MARKERS];
402 :
403 : static void start_mark_threads(void)
404 : {
405 : int i;
406 : pthread_attr_t attr;
407 :
408 : GC_ASSERT(I_DONT_HOLD_LOCK());
409 : INIT_REAL_SYMS(); /* for pthread_create */
410 :
411 : if (0 != pthread_attr_init(&attr)) ABORT("pthread_attr_init failed");
412 :
413 : if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
414 : ABORT("pthread_attr_setdetachstate failed");
415 :
416 : # if defined(HPUX) || defined(GC_DGUX386_THREADS)
417 : /* Default stack size is usually too small: fix it. */
418 : /* Otherwise marker threads or GC may run out of */
419 : /* space. */
420 : # define MIN_STACK_SIZE (8*HBLKSIZE*sizeof(word))
421 : {
422 : size_t old_size;
423 : int code;
424 :
425 : if (pthread_attr_getstacksize(&attr, &old_size) != 0)
426 : ABORT("pthread_attr_getstacksize failed");
427 : if (old_size < MIN_STACK_SIZE) {
428 : if (pthread_attr_setstacksize(&attr, MIN_STACK_SIZE) != 0)
429 : ABORT("pthread_attr_setstacksize failed");
430 : }
431 : }
432 : # endif /* HPUX || GC_DGUX386_THREADS */
433 : for (i = 0; i < GC_markers - 1; ++i) {
434 : if (0 != REAL_FUNC(pthread_create)(GC_mark_threads + i, &attr,
435 : GC_mark_thread, (void *)(word)i)) {
436 : WARN("Marker thread creation failed, errno = %" GC_PRIdPTR "\n",
437 : errno);
438 : /* Don't try to create other marker threads. */
439 : GC_markers = i + 1;
440 : if (i == 0) GC_parallel = FALSE;
441 : break;
442 : }
443 : }
444 : if (GC_print_stats) {
445 : GC_log_printf("Started %ld mark helper threads\n", GC_markers - 1);
446 : }
447 : pthread_attr_destroy(&attr);
448 : }
449 :
450 : #endif /* PARALLEL_MARK */
451 :
452 : GC_INNER GC_bool GC_thr_initialized = FALSE;
453 :
454 : GC_INNER volatile GC_thread GC_threads[THREAD_TABLE_SZ] = {0};
455 :
456 0 : void GC_push_thread_structures(void)
457 : {
458 : GC_ASSERT(I_HOLD_LOCK());
459 0 : GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
460 : # if defined(THREAD_LOCAL_ALLOC)
461 0 : GC_push_all((ptr_t)(&GC_thread_key),
462 : (ptr_t)(&GC_thread_key) + sizeof(&GC_thread_key));
463 : # endif
464 0 : }
465 :
466 : /* It may not be safe to allocate when we register the first thread. */
467 : static struct GC_Thread_Rep first_thread;
468 :
469 : /* Add a thread to GC_threads. We assume it wasn't already there. */
470 : /* Caller holds allocation lock. */
471 662 : STATIC GC_thread GC_new_thread(pthread_t id)
472 : {
473 662 : int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ;
474 : GC_thread result;
475 : static GC_bool first_thread_used = FALSE;
476 :
477 : GC_ASSERT(I_HOLD_LOCK());
478 662 : if (!first_thread_used) {
479 163 : result = &first_thread;
480 163 : first_thread_used = TRUE;
481 : } else {
482 499 : result = (struct GC_Thread_Rep *)
483 : GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
484 499 : if (result == 0) return(0);
485 : }
486 662 : result -> id = id;
487 : # ifdef PLATFORM_ANDROID
488 : result -> kernel_id = gettid();
489 : # endif
490 662 : result -> next = GC_threads[hv];
491 662 : GC_threads[hv] = result;
492 : # ifdef NACL
493 : GC_nacl_gc_thread_self = result;
494 : GC_nacl_initialize_gc_thread();
495 : # endif
496 : GC_ASSERT(result -> flags == 0 && result -> thread_blocked == 0);
497 662 : return(result);
498 : }
499 :
500 : /* Delete a thread from GC_threads. We assume it is there. */
501 : /* (The code intentionally traps if it wasn't.) */
502 : /* It is safe to delete the main thread. */
503 151 : STATIC void GC_delete_thread(pthread_t id)
504 : {
505 151 : int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ;
506 151 : register GC_thread p = GC_threads[hv];
507 151 : register GC_thread prev = 0;
508 :
509 : # ifdef NACL
510 : GC_nacl_shutdown_gc_thread();
511 : GC_nacl_gc_thread_self = NULL;
512 : # endif
513 :
514 : GC_ASSERT(I_HOLD_LOCK());
515 446 : while (!THREAD_EQUAL(p -> id, id)) {
516 144 : prev = p;
517 144 : p = p -> next;
518 : }
519 151 : if (prev == 0) {
520 7 : GC_threads[hv] = p -> next;
521 : } else {
522 144 : prev -> next = p -> next;
523 : }
524 151 : if (p != &first_thread) {
525 : # ifdef GC_DARWIN_THREADS
526 : mach_port_deallocate(mach_task_self(), p->stop_info.mach_thread);
527 : # endif
528 151 : GC_INTERNAL_FREE(p);
529 : }
530 151 : }
531 :
532 : /* If a thread has been joined, but we have not yet */
533 : /* been notified, then there may be more than one thread */
534 : /* in the table with the same pthread id. */
535 : /* This is OK, but we need a way to delete a specific one. */
536 0 : STATIC void GC_delete_gc_thread(GC_thread t)
537 : {
538 0 : pthread_t id = t -> id;
539 0 : int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ;
540 0 : register GC_thread p = GC_threads[hv];
541 0 : register GC_thread prev = 0;
542 :
543 : GC_ASSERT(I_HOLD_LOCK());
544 0 : while (p != t) {
545 0 : prev = p;
546 0 : p = p -> next;
547 : }
548 0 : if (prev == 0) {
549 0 : GC_threads[hv] = p -> next;
550 : } else {
551 0 : prev -> next = p -> next;
552 : }
553 : # ifdef GC_DARWIN_THREADS
554 : mach_port_deallocate(mach_task_self(), p->stop_info.mach_thread);
555 : # endif
556 0 : GC_INTERNAL_FREE(p);
557 0 : }
558 :
559 : /* Return a GC_thread corresponding to a given pthread_t. */
560 : /* Returns 0 if it's not there. */
561 : /* Caller holds allocation lock or otherwise inhibits */
562 : /* updates. */
563 : /* If there is more than one thread with the given id we */
564 : /* return the most recent one. */
565 931 : GC_INNER GC_thread GC_lookup_thread(pthread_t id)
566 : {
567 931 : int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ;
568 931 : register GC_thread p = GC_threads[hv];
569 :
570 931 : while (p != 0 && !THREAD_EQUAL(p -> id, id)) p = p -> next;
571 931 : return(p);
572 : }
573 :
574 : /* Called by GC_finalize() (in case of an allocation failure observed). */
575 3 : GC_INNER void GC_reset_finalizer_nested(void)
576 : {
577 3 : GC_thread me = GC_lookup_thread(pthread_self());
578 3 : me->finalizer_nested = 0;
579 3 : }
580 :
581 : /* Checks and updates the thread-local level of finalizers recursion. */
582 : /* Returns NULL if GC_invoke_finalizers() should not be called by the */
583 : /* collector (to minimize the risk of a deep finalizers recursion), */
584 : /* otherwise returns a pointer to the thread-local finalizer_nested. */
585 : /* Called by GC_notify_or_invoke_finalizers() only (the lock is held). */
586 0 : GC_INNER unsigned char *GC_check_finalizer_nested(void)
587 : {
588 0 : GC_thread me = GC_lookup_thread(pthread_self());
589 0 : unsigned nesting_level = me->finalizer_nested;
590 0 : if (nesting_level) {
591 : /* We are inside another GC_invoke_finalizers(). */
592 : /* Skip some implicitly-called GC_invoke_finalizers() */
593 : /* depending on the nesting (recursion) level. */
594 0 : if (++me->finalizer_skipped < (1U << nesting_level)) return NULL;
595 0 : me->finalizer_skipped = 0;
596 : }
597 0 : me->finalizer_nested = (unsigned char)(nesting_level + 1);
598 0 : return &me->finalizer_nested;
599 : }
600 :
601 : #if defined(GC_ASSERTIONS) && defined(THREAD_LOCAL_ALLOC)
602 : /* This is called from thread-local GC_malloc(). */
603 : GC_bool GC_is_thread_tsd_valid(void *tsd)
604 : {
605 : GC_thread me;
606 : DCL_LOCK_STATE;
607 :
608 : LOCK();
609 : me = GC_lookup_thread(pthread_self());
610 : UNLOCK();
611 : return (char *)tsd >= (char *)&me->tlfs
612 : && (char *)tsd < (char *)&me->tlfs + sizeof(me->tlfs);
613 : }
614 : #endif /* GC_ASSERTIONS && THREAD_LOCAL_ALLOC */
615 :
616 : #ifdef CAN_HANDLE_FORK
617 : /* Remove all entries from the GC_threads table, except the */
618 : /* one for the current thread. We need to do this in the child */
619 : /* process after a fork(), since only the current thread */
620 : /* survives in the child. */
621 0 : STATIC void GC_remove_all_threads_but_me(void)
622 : {
623 0 : pthread_t self = pthread_self();
624 : int hv;
625 : GC_thread p, next, me;
626 :
627 0 : for (hv = 0; hv < THREAD_TABLE_SZ; ++hv) {
628 0 : me = 0;
629 0 : for (p = GC_threads[hv]; 0 != p; p = next) {
630 0 : next = p -> next;
631 0 : if (THREAD_EQUAL(p -> id, self)) {
632 0 : me = p;
633 0 : p -> next = 0;
634 : # ifdef GC_DARWIN_THREADS
635 : /* Update thread Id after fork (it is ok to call */
636 : /* GC_destroy_thread_local and GC_free_internal */
637 : /* before update). */
638 : me -> stop_info.mach_thread = mach_thread_self();
639 : # endif
640 : # if defined(THREAD_LOCAL_ALLOC) && !defined(USE_CUSTOM_SPECIFIC)
641 : /* Some TLS implementations might be not fork-friendly, so */
642 : /* we re-assign thread-local pointer to 'tlfs' for safety */
643 : /* instead of the assertion check (again, it is ok to call */
644 : /* GC_destroy_thread_local and GC_free_internal before). */
645 0 : if (GC_setspecific(GC_thread_key, &me->tlfs) != 0)
646 : ABORT("GC_setspecific failed (in child)");
647 : # endif
648 : } else {
649 : # ifdef THREAD_LOCAL_ALLOC
650 0 : if (!(p -> flags & FINISHED)) {
651 0 : GC_destroy_thread_local(&(p->tlfs));
652 : GC_remove_specific(GC_thread_key);
653 : }
654 : # endif
655 0 : if (p != &first_thread) GC_INTERNAL_FREE(p);
656 : }
657 : }
658 0 : GC_threads[hv] = me;
659 : }
660 0 : }
661 : #endif /* CAN_HANDLE_FORK */
662 :
663 : #ifdef USE_PROC_FOR_LIBRARIES
664 : GC_INNER GC_bool GC_segment_is_thread_stack(ptr_t lo, ptr_t hi)
665 : {
666 : int i;
667 : GC_thread p;
668 :
669 : GC_ASSERT(I_HOLD_LOCK());
670 : # ifdef PARALLEL_MARK
671 : for (i = 0; i < GC_markers - 1; ++i) {
672 : if (marker_sp[i] > lo && marker_sp[i] < hi) return TRUE;
673 : # ifdef IA64
674 : if (marker_bsp[i] > lo && marker_bsp[i] < hi) return TRUE;
675 : # endif
676 : }
677 : # endif
678 : for (i = 0; i < THREAD_TABLE_SZ; i++) {
679 : for (p = GC_threads[i]; p != 0; p = p -> next) {
680 : if (0 != p -> stack_end) {
681 : # ifdef STACK_GROWS_UP
682 : if (p -> stack_end >= lo && p -> stack_end < hi) return TRUE;
683 : # else /* STACK_GROWS_DOWN */
684 : if (p -> stack_end > lo && p -> stack_end <= hi) return TRUE;
685 : # endif
686 : }
687 : }
688 : }
689 : return FALSE;
690 : }
691 : #endif /* USE_PROC_FOR_LIBRARIES */
692 :
693 : #ifdef IA64
694 : /* Find the largest stack_base smaller than bound. May be used */
695 : /* to find the boundary between a register stack and adjacent */
696 : /* immediately preceding memory stack. */
697 : GC_INNER ptr_t GC_greatest_stack_base_below(ptr_t bound)
698 : {
699 : int i;
700 : GC_thread p;
701 : ptr_t result = 0;
702 :
703 : GC_ASSERT(I_HOLD_LOCK());
704 : # ifdef PARALLEL_MARK
705 : for (i = 0; i < GC_markers - 1; ++i) {
706 : if (marker_sp[i] > result && marker_sp[i] < bound)
707 : result = marker_sp[i];
708 : }
709 : # endif
710 : for (i = 0; i < THREAD_TABLE_SZ; i++) {
711 : for (p = GC_threads[i]; p != 0; p = p -> next) {
712 : if (p -> stack_end > result && p -> stack_end < bound) {
713 : result = p -> stack_end;
714 : }
715 : }
716 : }
717 : return result;
718 : }
719 : #endif /* IA64 */
720 :
721 : #ifndef STAT_READ
722 : /* Also defined in os_dep.c. */
723 : # define STAT_BUF_SIZE 4096
724 : # define STAT_READ read
725 : /* If read is wrapped, this may need to be redefined to call */
726 : /* the real one. */
727 : #endif
728 :
729 : #if defined(GC_LINUX_THREADS) && !defined(PLATFORM_ANDROID) && !defined(NACL)
730 : /* Return the number of processors. */
731 163 : STATIC int GC_get_nprocs(void)
732 : {
733 : /* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that */
734 : /* appears to be buggy in many cases. */
735 : /* We look for lines "cpu<n>" in /proc/stat. */
736 : char stat_buf[STAT_BUF_SIZE];
737 : int f;
738 : int result, i, len;
739 :
740 163 : f = open("/proc/stat", O_RDONLY);
741 163 : if (f < 0) {
742 0 : WARN("Couldn't read /proc/stat\n", 0);
743 0 : return 1; /* assume an uniprocessor */
744 : }
745 163 : len = STAT_READ(f, stat_buf, STAT_BUF_SIZE);
746 163 : close(f);
747 :
748 163 : result = 1;
749 : /* Some old kernels only have a single "cpu nnnn ..." */
750 : /* entry in /proc/stat. We identify those as */
751 : /* uniprocessors. */
752 :
753 250368 : for (i = 0; i < len - 100; ++i) {
754 251672 : if (stat_buf[i] == '\n' && stat_buf[i+1] == 'c'
755 1467 : && stat_buf[i+2] == 'p' && stat_buf[i+3] == 'u') {
756 652 : int cpu_no = atoi(&stat_buf[i + 4]);
757 652 : if (cpu_no >= result)
758 489 : result = cpu_no + 1;
759 : }
760 : }
761 163 : return result;
762 : }
763 : #endif /* GC_LINUX_THREADS && !PLATFORM_ANDROID && !NACL */
764 :
765 : #if defined(ARM32) && defined(GC_LINUX_THREADS) && !defined(NACL)
766 : /* Some buggy Linux/arm kernels show only non-sleeping CPUs in */
767 : /* /proc/stat (and /proc/cpuinfo), so another data system source is */
768 : /* tried first. Result <= 0 on error. */
769 : STATIC int GC_get_nprocs_present(void)
770 : {
771 : char stat_buf[16];
772 : int f;
773 : int len;
774 :
775 : f = open("/sys/devices/system/cpu/present", O_RDONLY);
776 : if (f < 0)
777 : return -1; /* cannot open the file */
778 :
779 : len = STAT_READ(f, stat_buf, sizeof(stat_buf));
780 : close(f);
781 :
782 : /* Recognized file format: "0\n" or "0-<max_cpu_id>\n" */
783 : /* The file might probably contain a comma-separated list */
784 : /* but we do not need to handle it (just silently ignore). */
785 : if (len < 2 || stat_buf[0] != '0' || stat_buf[len - 1] != '\n') {
786 : return 0; /* read error or unrecognized content */
787 : } else if (len == 2) {
788 : return 1; /* an uniprocessor */
789 : } else if (stat_buf[1] != '-') {
790 : return 0; /* unrecognized content */
791 : }
792 :
793 : stat_buf[len - 1] = '\0'; /* terminate the string */
794 : return atoi(&stat_buf[2]) + 1; /* skip "0-" and parse max_cpu_num */
795 : }
796 : #endif /* ARM32 && GC_LINUX_THREADS && !NACL */
797 :
798 : /* We hold the GC lock. Wait until an in-progress GC has finished. */
799 : /* Repeatedly RELEASES GC LOCK in order to wait. */
800 : /* If wait_for_all is true, then we exit with the GC lock held and no */
801 : /* collection in progress; otherwise we just wait for the current GC */
802 : /* to finish. */
803 151 : STATIC void GC_wait_for_gc_completion(GC_bool wait_for_all)
804 : {
805 : DCL_LOCK_STATE;
806 : GC_ASSERT(I_HOLD_LOCK());
807 : ASSERT_CANCEL_DISABLED();
808 151 : if (GC_incremental && GC_collection_in_progress()) {
809 0 : word old_gc_no = GC_gc_no;
810 :
811 : /* Make sure that no part of our stack is still on the mark stack, */
812 : /* since it's about to be unmapped. */
813 0 : while (GC_incremental && GC_collection_in_progress()
814 0 : && (wait_for_all || old_gc_no == GC_gc_no)) {
815 0 : ENTER_GC();
816 0 : GC_in_thread_creation = TRUE;
817 0 : GC_collect_a_little_inner(1);
818 0 : GC_in_thread_creation = FALSE;
819 0 : EXIT_GC();
820 0 : UNLOCK();
821 0 : sched_yield();
822 0 : LOCK();
823 : }
824 : }
825 151 : }
826 :
827 : #ifdef CAN_HANDLE_FORK
828 : /* Procedures called before and after a fork. The goal here is to make */
829 : /* it safe to call GC_malloc() in a forked child. It's unclear that is */
830 : /* attainable, since the single UNIX spec seems to imply that one */
831 : /* should only call async-signal-safe functions, and we probably can't */
832 : /* quite guarantee that. But we give it our best shot. (That same */
833 : /* spec also implies that it's not safe to call the system malloc */
834 : /* between fork() and exec(). Thus we're doing no worse than it.) */
835 :
836 : IF_CANCEL(static int fork_cancel_state;)
837 : /* protected by allocation lock. */
838 :
839 : /* Called before a fork() */
840 0 : STATIC void GC_fork_prepare_proc(void)
841 : {
842 : /* Acquire all relevant locks, so that after releasing the locks */
843 : /* the child will see a consistent state in which monitor */
844 : /* invariants hold. Unfortunately, we can't acquire libc locks */
845 : /* we might need, and there seems to be no guarantee that libc */
846 : /* must install a suitable fork handler. */
847 : /* Wait for an ongoing GC to finish, since we can't finish it in */
848 : /* the (one remaining thread in) the child. */
849 0 : LOCK();
850 0 : DISABLE_CANCEL(fork_cancel_state);
851 : /* Following waits may include cancellation points. */
852 : # if defined(PARALLEL_MARK)
853 : if (GC_parallel)
854 : GC_wait_for_reclaim();
855 : # endif
856 0 : GC_wait_for_gc_completion(TRUE);
857 : # if defined(PARALLEL_MARK)
858 : if (GC_parallel)
859 : GC_acquire_mark_lock();
860 : # endif
861 0 : }
862 :
863 : /* Called in parent after a fork() */
864 0 : STATIC void GC_fork_parent_proc(void)
865 : {
866 : # if defined(PARALLEL_MARK)
867 : if (GC_parallel)
868 : GC_release_mark_lock();
869 : # endif
870 0 : RESTORE_CANCEL(fork_cancel_state);
871 0 : UNLOCK();
872 0 : }
873 :
874 : /* Called in child after a fork() */
875 0 : STATIC void GC_fork_child_proc(void)
876 : {
877 : /* Clean up the thread table, so that just our thread is left. */
878 : # if defined(PARALLEL_MARK)
879 : if (GC_parallel)
880 : GC_release_mark_lock();
881 : # endif
882 0 : GC_remove_all_threads_but_me();
883 : # ifdef PARALLEL_MARK
884 : /* Turn off parallel marking in the child, since we are probably */
885 : /* just going to exec, and we would have to restart mark threads. */
886 : GC_markers = 1;
887 : GC_parallel = FALSE;
888 : # endif /* PARALLEL_MARK */
889 0 : RESTORE_CANCEL(fork_cancel_state);
890 0 : UNLOCK();
891 0 : }
892 : #endif /* CAN_HANDLE_FORK */
893 :
894 : #if defined(GC_DGUX386_THREADS)
895 : /* Return the number of processors, or i<= 0 if it can't be determined. */
896 : STATIC int GC_get_nprocs(void)
897 : {
898 : /* <takis@XFree86.Org> */
899 : int numCpus;
900 : struct dg_sys_info_pm_info pm_sysinfo;
901 : int status = 0;
902 :
903 : status = dg_sys_info((long int *) &pm_sysinfo,
904 : DG_SYS_INFO_PM_INFO_TYPE, DG_SYS_INFO_PM_CURRENT_VERSION);
905 : if (status < 0)
906 : /* set -1 for error */
907 : numCpus = -1;
908 : else
909 : /* Active CPUs */
910 : numCpus = pm_sysinfo.idle_vp_count;
911 :
912 : # ifdef DEBUG_THREADS
913 : GC_log_printf("Number of active CPUs in this system: %d\n", numCpus);
914 : # endif
915 : return(numCpus);
916 : }
917 : #endif /* GC_DGUX386_THREADS */
918 :
919 : #if defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS) \
920 : || defined(GC_NETBSD_THREADS) || defined(GC_OPENBSD_THREADS)
921 : static int get_ncpu(void)
922 : {
923 : int mib[] = {CTL_HW,HW_NCPU};
924 : int res;
925 : size_t len = sizeof(res);
926 :
927 : sysctl(mib, sizeof(mib)/sizeof(int), &res, &len, NULL, 0);
928 : return res;
929 : }
930 : #endif /* GC_DARWIN_THREADS || ... */
931 :
932 : #ifdef INCLUDE_LINUX_THREAD_DESCR
933 : __thread int GC_dummy_thread_local;
934 : GC_INNER GC_bool GC_enclosing_mapping(ptr_t addr,
935 : ptr_t *startp, ptr_t *endp);
936 : #endif
937 :
938 : /* We hold the allocation lock. */
939 163 : GC_INNER void GC_thr_init(void)
940 : {
941 163 : if (GC_thr_initialized) return;
942 163 : GC_thr_initialized = TRUE;
943 :
944 : # ifdef CAN_HANDLE_FORK
945 : /* Prepare for forks if requested. */
946 163 : if (GC_handle_fork
947 163 : && pthread_atfork(GC_fork_prepare_proc, GC_fork_parent_proc,
948 0 : GC_fork_child_proc) != 0)
949 0 : ABORT("pthread_atfork failed");
950 : # endif
951 : # ifdef INCLUDE_LINUX_THREAD_DESCR
952 : /* Explicitly register the region including the address */
953 : /* of a thread local variable. This should include thread */
954 : /* locals for the main thread, except for those allocated */
955 : /* in response to dlopen calls. */
956 : {
957 : ptr_t thread_local_addr = (ptr_t)(&GC_dummy_thread_local);
958 : ptr_t main_thread_start, main_thread_end;
959 : if (!GC_enclosing_mapping(thread_local_addr, &main_thread_start,
960 : &main_thread_end)) {
961 : ABORT("Failed to find mapping for main thread thread locals");
962 : } else {
963 : /* main_thread_start and main_thread_end are initialized. */
964 : GC_add_roots_inner(main_thread_start, main_thread_end, FALSE);
965 : }
966 : }
967 : # endif
968 : /* Add the initial thread, so we can stop it. */
969 : {
970 163 : GC_thread t = GC_new_thread(pthread_self());
971 163 : if (t == NULL)
972 0 : ABORT("Failed to allocate memory for the initial thread");
973 : # ifdef GC_DARWIN_THREADS
974 : t -> stop_info.mach_thread = mach_thread_self();
975 : # else
976 163 : t -> stop_info.stack_ptr = GC_approx_sp();
977 : # endif
978 163 : t -> flags = DETACHED | MAIN_THREAD;
979 : }
980 :
981 : # ifndef GC_DARWIN_THREADS
982 163 : GC_stop_init();
983 : # endif
984 :
985 : /* Set GC_nprocs. */
986 : {
987 163 : char * nprocs_string = GETENV("GC_NPROCS");
988 163 : GC_nprocs = -1;
989 163 : if (nprocs_string != NULL) GC_nprocs = atoi(nprocs_string);
990 : }
991 163 : if (GC_nprocs <= 0
992 : # if defined(ARM32) && defined(GC_LINUX_THREADS) && !defined(NACL)
993 : && (GC_nprocs = GC_get_nprocs_present()) <= 1
994 : /* Workaround for some Linux/arm kernels */
995 : # endif
996 : )
997 : {
998 : # if defined(GC_HPUX_THREADS)
999 : GC_nprocs = pthread_num_processors_np();
1000 : # elif defined(GC_OSF1_THREADS) || defined(GC_AIX_THREADS) \
1001 : || defined(GC_SOLARIS_THREADS) || defined(GC_GNU_THREADS) \
1002 : || defined(PLATFORM_ANDROID) || defined(NACL)
1003 : GC_nprocs = sysconf(_SC_NPROCESSORS_ONLN);
1004 : if (GC_nprocs <= 0) GC_nprocs = 1;
1005 : # elif defined(GC_IRIX_THREADS)
1006 : GC_nprocs = sysconf(_SC_NPROC_ONLN);
1007 : if (GC_nprocs <= 0) GC_nprocs = 1;
1008 : # elif defined(GC_DARWIN_THREADS) || defined(GC_FREEBSD_THREADS) \
1009 : || defined(GC_NETBSD_THREADS) || defined(GC_OPENBSD_THREADS)
1010 : GC_nprocs = get_ncpu();
1011 : # elif defined(GC_LINUX_THREADS) || defined(GC_DGUX386_THREADS)
1012 163 : GC_nprocs = GC_get_nprocs();
1013 : # elif defined(GC_RTEMS_PTHREADS)
1014 : GC_nprocs = 1; /* not implemented */
1015 : # endif
1016 : }
1017 163 : if (GC_nprocs <= 0) {
1018 0 : WARN("GC_get_nprocs() returned %" GC_PRIdPTR "\n", GC_nprocs);
1019 0 : GC_nprocs = 2; /* assume dual-core */
1020 : # ifdef PARALLEL_MARK
1021 : GC_markers = 1;
1022 : # endif
1023 : } else {
1024 : # ifdef PARALLEL_MARK
1025 : {
1026 : char * markers_string = GETENV("GC_MARKERS");
1027 : if (markers_string != NULL) {
1028 : GC_markers = atoi(markers_string);
1029 : if (GC_markers > MAX_MARKERS) {
1030 : WARN("Limiting number of mark threads\n", 0);
1031 : GC_markers = MAX_MARKERS;
1032 : }
1033 : } else {
1034 : GC_markers = GC_nprocs;
1035 : if (GC_markers >= MAX_MARKERS)
1036 : GC_markers = MAX_MARKERS; /* silently limit GC_markers value */
1037 : }
1038 : }
1039 : # endif
1040 : }
1041 : # ifdef PARALLEL_MARK
1042 : if (GC_print_stats) {
1043 : GC_log_printf(
1044 : "Number of processors = %ld, number of marker threads = %ld\n",
1045 : GC_nprocs, GC_markers);
1046 : }
1047 : if (GC_markers <= 1) {
1048 : GC_parallel = FALSE;
1049 : if (GC_print_stats) {
1050 : GC_log_printf("Single marker thread, turning off parallel marking\n");
1051 : }
1052 : } else {
1053 : GC_parallel = TRUE;
1054 : /* Disable true incremental collection, but generational is OK. */
1055 : GC_time_limit = GC_TIME_UNLIMITED;
1056 : }
1057 : /* If we are using a parallel marker, actually start helper threads. */
1058 : if (GC_parallel) {
1059 : start_mark_threads();
1060 : }
1061 : # endif
1062 : }
1063 :
1064 : /* Perform all initializations, including those that */
1065 : /* may require allocation. */
1066 : /* Called without allocation lock. */
1067 : /* Must be called before a second thread is created. */
1068 : /* Did we say it's called without the allocation lock? */
1069 163 : GC_INNER void GC_init_parallel(void)
1070 : {
1071 : # if defined(THREAD_LOCAL_ALLOC)
1072 : DCL_LOCK_STATE;
1073 : # endif
1074 163 : if (parallel_initialized) return;
1075 163 : parallel_initialized = TRUE;
1076 :
1077 : /* GC_init() calls us back, so set flag first. */
1078 163 : if (!GC_is_initialized) GC_init();
1079 : /* Initialize thread local free lists if used. */
1080 : # if defined(THREAD_LOCAL_ALLOC)
1081 163 : LOCK();
1082 163 : GC_init_thread_local(&(GC_lookup_thread(pthread_self())->tlfs));
1083 163 : UNLOCK();
1084 : # endif
1085 : }
1086 :
1087 : #ifndef GC_NO_PTHREAD_SIGMASK
1088 0 : GC_API int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set,
1089 : sigset_t *oset)
1090 : {
1091 : sigset_t fudged_set;
1092 :
1093 : INIT_REAL_SYMS();
1094 0 : if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
1095 0 : fudged_set = *set;
1096 0 : sigdelset(&fudged_set, SIG_SUSPEND);
1097 0 : set = &fudged_set;
1098 : }
1099 0 : return(REAL_FUNC(pthread_sigmask)(how, set, oset));
1100 : }
1101 : #endif /* !GC_NO_PTHREAD_SIGMASK */
1102 :
1103 : /* Wrapper for functions that are likely to block for an appreciable */
1104 : /* length of time. */
1105 :
1106 : /*ARGSUSED*/
1107 0 : GC_INNER void GC_do_blocking_inner(ptr_t data, void * context)
1108 : {
1109 0 : struct blocking_data * d = (struct blocking_data *) data;
1110 : GC_thread me;
1111 : # if defined(SPARC) || defined(IA64)
1112 : ptr_t stack_ptr = GC_save_regs_in_stack();
1113 : # endif
1114 : # if defined(GC_DARWIN_THREADS) && !defined(DARWIN_DONT_PARSE_STACK)
1115 : GC_bool topOfStackUnset = FALSE;
1116 : # endif
1117 : DCL_LOCK_STATE;
1118 :
1119 0 : LOCK();
1120 0 : me = GC_lookup_thread(pthread_self());
1121 : GC_ASSERT(!(me -> thread_blocked));
1122 : # ifdef SPARC
1123 : me -> stop_info.stack_ptr = stack_ptr;
1124 : # else
1125 0 : me -> stop_info.stack_ptr = GC_approx_sp();
1126 : # endif
1127 : # if defined(GC_DARWIN_THREADS) && !defined(DARWIN_DONT_PARSE_STACK)
1128 : if (me -> topOfStack == NULL) {
1129 : /* GC_do_blocking_inner is not called recursively, */
1130 : /* so topOfStack should be computed now. */
1131 : topOfStackUnset = TRUE;
1132 : me -> topOfStack = GC_FindTopOfStack(0);
1133 : }
1134 : # endif
1135 : # ifdef IA64
1136 : me -> backing_store_ptr = stack_ptr;
1137 : # endif
1138 0 : me -> thread_blocked = (unsigned char)TRUE;
1139 : /* Save context here if we want to support precise stack marking */
1140 0 : UNLOCK();
1141 0 : d -> client_data = (d -> fn)(d -> client_data);
1142 0 : LOCK(); /* This will block if the world is stopped. */
1143 0 : me -> thread_blocked = FALSE;
1144 : # if defined(GC_DARWIN_THREADS) && !defined(DARWIN_DONT_PARSE_STACK)
1145 : if (topOfStackUnset)
1146 : me -> topOfStack = NULL; /* make topOfStack unset again */
1147 : # endif
1148 0 : UNLOCK();
1149 0 : }
1150 :
1151 : /* GC_call_with_gc_active() has the opposite to GC_do_blocking() */
1152 : /* functionality. It might be called from a user function invoked by */
1153 : /* GC_do_blocking() to temporarily back allow calling any GC function */
1154 : /* and/or manipulating pointers to the garbage collected heap. */
1155 0 : GC_API void * GC_CALL GC_call_with_gc_active(GC_fn_type fn,
1156 : void * client_data)
1157 : {
1158 : struct GC_traced_stack_sect_s stacksect;
1159 : GC_thread me;
1160 : DCL_LOCK_STATE;
1161 :
1162 0 : LOCK(); /* This will block if the world is stopped. */
1163 0 : me = GC_lookup_thread(pthread_self());
1164 :
1165 : /* Adjust our stack base value (this could happen unless */
1166 : /* GC_get_stack_base() was used which returned GC_SUCCESS). */
1167 0 : if ((me -> flags & MAIN_THREAD) == 0) {
1168 : GC_ASSERT(me -> stack_end != NULL);
1169 0 : if (me -> stack_end HOTTER_THAN (ptr_t)(&stacksect))
1170 0 : me -> stack_end = (ptr_t)(&stacksect);
1171 : } else {
1172 : /* The original stack. */
1173 0 : if (GC_stackbottom HOTTER_THAN (ptr_t)(&stacksect))
1174 0 : GC_stackbottom = (ptr_t)(&stacksect);
1175 : }
1176 :
1177 0 : if (!me->thread_blocked) {
1178 : /* We are not inside GC_do_blocking() - do nothing more. */
1179 0 : UNLOCK();
1180 0 : return fn(client_data);
1181 : }
1182 :
1183 : /* Setup new "stack section". */
1184 0 : stacksect.saved_stack_ptr = me -> stop_info.stack_ptr;
1185 : # ifdef IA64
1186 : /* This is the same as in GC_call_with_stack_base(). */
1187 : stacksect.backing_store_end = GC_save_regs_in_stack();
1188 : /* Unnecessarily flushes register stack, */
1189 : /* but that probably doesn't hurt. */
1190 : stacksect.saved_backing_store_ptr = me -> backing_store_ptr;
1191 : # endif
1192 0 : stacksect.prev = me -> traced_stack_sect;
1193 0 : me -> thread_blocked = FALSE;
1194 0 : me -> traced_stack_sect = &stacksect;
1195 :
1196 0 : UNLOCK();
1197 0 : client_data = fn(client_data);
1198 : GC_ASSERT(me -> thread_blocked == FALSE);
1199 : GC_ASSERT(me -> traced_stack_sect == &stacksect);
1200 :
1201 : /* Restore original "stack section". */
1202 0 : LOCK();
1203 0 : me -> traced_stack_sect = stacksect.prev;
1204 : # ifdef IA64
1205 : me -> backing_store_ptr = stacksect.saved_backing_store_ptr;
1206 : # endif
1207 0 : me -> thread_blocked = (unsigned char)TRUE;
1208 0 : me -> stop_info.stack_ptr = stacksect.saved_stack_ptr;
1209 0 : UNLOCK();
1210 :
1211 0 : return client_data; /* result */
1212 : }
1213 :
1214 151 : STATIC void GC_unregister_my_thread_inner(GC_thread me)
1215 : {
1216 : # ifdef DEBUG_THREADS
1217 : GC_log_printf("Unregistering thread 0x%x\n", (unsigned)pthread_self());
1218 : # endif
1219 : GC_ASSERT(!(me -> flags & FINISHED));
1220 : # if defined(THREAD_LOCAL_ALLOC)
1221 151 : GC_destroy_thread_local(&(me->tlfs));
1222 : # endif
1223 : # if defined(GC_PTHREAD_EXIT_ATTRIBUTE) || !defined(GC_NO_PTHREAD_CANCEL)
1224 : /* Handle DISABLED_GC flag which is set by the */
1225 : /* intercepted pthread_cancel or pthread_exit. */
1226 151 : if ((me -> flags & DISABLED_GC) != 0) {
1227 0 : GC_dont_gc--;
1228 : }
1229 : # endif
1230 151 : if (me -> flags & DETACHED) {
1231 151 : GC_delete_thread(pthread_self());
1232 : } else {
1233 0 : me -> flags |= FINISHED;
1234 : }
1235 : # if defined(THREAD_LOCAL_ALLOC)
1236 : /* It is required to call remove_specific defined in specific.c. */
1237 : GC_remove_specific(GC_thread_key);
1238 : # endif
1239 151 : }
1240 :
1241 0 : GC_API int GC_CALL GC_unregister_my_thread(void)
1242 : {
1243 0 : pthread_t self = pthread_self();
1244 : IF_CANCEL(int cancel_state;)
1245 : DCL_LOCK_STATE;
1246 :
1247 0 : LOCK();
1248 0 : DISABLE_CANCEL(cancel_state);
1249 : /* Wait for any GC that may be marking from our stack to */
1250 : /* complete before we remove this thread. */
1251 0 : GC_wait_for_gc_completion(FALSE);
1252 0 : GC_unregister_my_thread_inner(GC_lookup_thread(self));
1253 0 : RESTORE_CANCEL(cancel_state);
1254 0 : UNLOCK();
1255 0 : return GC_SUCCESS;
1256 : }
1257 :
1258 : /* Called at thread exit. */
1259 : /* Never called for main thread. That's OK, since it */
1260 : /* results in at most a tiny one-time leak. And */
1261 : /* linuxthreads doesn't reclaim the main threads */
1262 : /* resources or id anyway. */
1263 151 : GC_INNER void GC_thread_exit_proc(void *arg)
1264 : {
1265 : IF_CANCEL(int cancel_state;)
1266 : DCL_LOCK_STATE;
1267 :
1268 151 : LOCK();
1269 151 : DISABLE_CANCEL(cancel_state);
1270 151 : GC_wait_for_gc_completion(FALSE);
1271 151 : GC_unregister_my_thread_inner((GC_thread)arg);
1272 151 : RESTORE_CANCEL(cancel_state);
1273 151 : UNLOCK();
1274 151 : }
1275 :
1276 0 : GC_API int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval)
1277 : {
1278 : int result;
1279 : GC_thread t;
1280 : DCL_LOCK_STATE;
1281 :
1282 : INIT_REAL_SYMS();
1283 0 : LOCK();
1284 0 : t = GC_lookup_thread(thread);
1285 : /* This is guaranteed to be the intended one, since the thread id */
1286 : /* can't have been recycled by pthreads. */
1287 0 : UNLOCK();
1288 0 : result = REAL_FUNC(pthread_join)(thread, retval);
1289 : # if defined(GC_FREEBSD_THREADS)
1290 : /* On FreeBSD, the wrapped pthread_join() sometimes returns (what
1291 : appears to be) a spurious EINTR which caused the test and real code
1292 : to gratuitously fail. Having looked at system pthread library source
1293 : code, I see how this return code may be generated. In one path of
1294 : code, pthread_join() just returns the errno setting of the thread
1295 : being joined. This does not match the POSIX specification or the
1296 : local man pages thus I have taken the liberty to catch this one
1297 : spurious return value properly conditionalized on GC_FREEBSD_THREADS. */
1298 : if (result == EINTR) result = 0;
1299 : # endif
1300 0 : if (result == 0) {
1301 0 : LOCK();
1302 : /* Here the pthread thread id may have been recycled. */
1303 : GC_ASSERT((t -> flags & FINISHED) != 0);
1304 0 : GC_delete_gc_thread(t);
1305 0 : UNLOCK();
1306 : }
1307 0 : return result;
1308 : }
1309 :
1310 0 : GC_API int WRAP_FUNC(pthread_detach)(pthread_t thread)
1311 : {
1312 : int result;
1313 : GC_thread t;
1314 : DCL_LOCK_STATE;
1315 :
1316 : INIT_REAL_SYMS();
1317 0 : LOCK();
1318 0 : t = GC_lookup_thread(thread);
1319 0 : UNLOCK();
1320 0 : result = REAL_FUNC(pthread_detach)(thread);
1321 0 : if (result == 0) {
1322 0 : LOCK();
1323 0 : t -> flags |= DETACHED;
1324 : /* Here the pthread thread id may have been recycled. */
1325 0 : if ((t -> flags & FINISHED) != 0) {
1326 0 : GC_delete_gc_thread(t);
1327 : }
1328 0 : UNLOCK();
1329 : }
1330 0 : return result;
1331 : }
1332 :
1333 : #ifndef GC_NO_PTHREAD_CANCEL
1334 : /* We should deal with the fact that apparently on Solaris and, */
1335 : /* probably, on some Linux we can't collect while a thread is */
1336 : /* exiting, since signals aren't handled properly. This currently */
1337 : /* gives rise to deadlocks. The only workaround seen is to intercept */
1338 : /* pthread_cancel() and pthread_exit(), and disable the collections */
1339 : /* until the thread exit handler is called. That's ugly, because we */
1340 : /* risk growing the heap unnecessarily. But it seems that we don't */
1341 : /* really have an option in that the process is not in a fully */
1342 : /* functional state while a thread is exiting. */
1343 0 : GC_API int WRAP_FUNC(pthread_cancel)(pthread_t thread)
1344 : {
1345 : # ifdef CANCEL_SAFE
1346 : GC_thread t;
1347 : DCL_LOCK_STATE;
1348 : # endif
1349 :
1350 : INIT_REAL_SYMS();
1351 : # ifdef CANCEL_SAFE
1352 0 : LOCK();
1353 0 : t = GC_lookup_thread(thread);
1354 : /* We test DISABLED_GC because pthread_exit could be called at */
1355 : /* the same time. (If t is NULL then pthread_cancel should */
1356 : /* return ESRCH.) */
1357 0 : if (t != NULL && (t -> flags & DISABLED_GC) == 0) {
1358 0 : t -> flags |= DISABLED_GC;
1359 0 : GC_dont_gc++;
1360 : }
1361 0 : UNLOCK();
1362 : # endif
1363 0 : return REAL_FUNC(pthread_cancel)(thread);
1364 : }
1365 : #endif /* !GC_NO_PTHREAD_CANCEL */
1366 :
1367 : #ifdef GC_PTHREAD_EXIT_ATTRIBUTE
1368 0 : GC_API GC_PTHREAD_EXIT_ATTRIBUTE void WRAP_FUNC(pthread_exit)(void *retval)
1369 : {
1370 : GC_thread me;
1371 : DCL_LOCK_STATE;
1372 :
1373 : INIT_REAL_SYMS();
1374 0 : LOCK();
1375 0 : me = GC_lookup_thread(pthread_self());
1376 : /* We test DISABLED_GC because someone else could call */
1377 : /* pthread_cancel at the same time. */
1378 0 : if (me != 0 && (me -> flags & DISABLED_GC) == 0) {
1379 0 : me -> flags |= DISABLED_GC;
1380 0 : GC_dont_gc++;
1381 : }
1382 0 : UNLOCK();
1383 :
1384 : # ifdef NACL
1385 : /* Native Client doesn't support pthread cleanup functions, */
1386 : /* so cleanup the thread here. */
1387 : GC_thread_exit_proc(0);
1388 : # endif
1389 :
1390 0 : REAL_FUNC(pthread_exit)(retval);
1391 : }
1392 : #endif /* GC_PTHREAD_EXIT_ATTRIBUTE */
1393 :
1394 : GC_INNER GC_bool GC_in_thread_creation = FALSE;
1395 : /* Protected by allocation lock. */
1396 :
1397 499 : GC_INLINE void GC_record_stack_base(GC_thread me,
1398 : const struct GC_stack_base *sb)
1399 : {
1400 : # ifndef GC_DARWIN_THREADS
1401 499 : me -> stop_info.stack_ptr = sb -> mem_base;
1402 : # endif
1403 499 : me -> stack_end = sb -> mem_base;
1404 499 : if (me -> stack_end == NULL)
1405 0 : ABORT("Bad stack base in GC_register_my_thread");
1406 : # ifdef IA64
1407 : me -> backing_store_end = sb -> reg_base;
1408 : # endif
1409 499 : }
1410 :
1411 499 : STATIC GC_thread GC_register_my_thread_inner(const struct GC_stack_base *sb,
1412 : pthread_t my_pthread)
1413 : {
1414 : GC_thread me;
1415 :
1416 499 : GC_in_thread_creation = TRUE; /* OK to collect from unknown thread. */
1417 499 : me = GC_new_thread(my_pthread);
1418 499 : GC_in_thread_creation = FALSE;
1419 499 : if (me == 0)
1420 0 : ABORT("Failed to allocate memory for thread registering");
1421 : # ifdef GC_DARWIN_THREADS
1422 : me -> stop_info.mach_thread = mach_thread_self();
1423 : # endif
1424 499 : GC_record_stack_base(me, sb);
1425 : # ifdef GC_EXPLICIT_SIGNALS_UNBLOCK
1426 : /* Since this could be executed from a detached thread */
1427 : /* destructor, our signals might already be blocked. */
1428 : GC_unblock_gc_signals();
1429 : # endif
1430 499 : return me;
1431 : }
1432 :
1433 0 : GC_API void GC_CALL GC_allow_register_threads(void)
1434 : {
1435 : /* Check GC is initialized and the current thread is registered. */
1436 : GC_ASSERT(GC_lookup_thread(pthread_self()) != 0);
1437 :
1438 0 : GC_need_to_lock = TRUE; /* We are multi-threaded now. */
1439 0 : }
1440 :
1441 499 : GC_API int GC_CALL GC_register_my_thread(const struct GC_stack_base *sb)
1442 : {
1443 499 : pthread_t self = pthread_self();
1444 : GC_thread me;
1445 : DCL_LOCK_STATE;
1446 :
1447 499 : if (GC_need_to_lock == FALSE)
1448 0 : ABORT("Threads explicit registering is not previously enabled");
1449 :
1450 499 : LOCK();
1451 499 : me = GC_lookup_thread(self);
1452 499 : if (0 == me) {
1453 0 : me = GC_register_my_thread_inner(sb, self);
1454 0 : me -> flags |= DETACHED;
1455 : /* Treat as detached, since we do not need to worry about */
1456 : /* pointer results. */
1457 : # if defined(THREAD_LOCAL_ALLOC)
1458 0 : GC_init_thread_local(&(me->tlfs));
1459 : # endif
1460 0 : UNLOCK();
1461 0 : return GC_SUCCESS;
1462 499 : } else if ((me -> flags & FINISHED) != 0) {
1463 : /* This code is executed when a thread is registered from the */
1464 : /* client thread key destructor. */
1465 0 : GC_record_stack_base(me, sb);
1466 0 : me -> flags &= ~FINISHED; /* but not DETACHED */
1467 : # ifdef GC_EXPLICIT_SIGNALS_UNBLOCK
1468 : /* Since this could be executed from a thread destructor, */
1469 : /* our signals might be blocked. */
1470 : GC_unblock_gc_signals();
1471 : # endif
1472 : # if defined(THREAD_LOCAL_ALLOC)
1473 0 : GC_init_thread_local(&(me->tlfs));
1474 : # endif
1475 0 : UNLOCK();
1476 0 : return GC_SUCCESS;
1477 : } else {
1478 499 : UNLOCK();
1479 499 : return GC_DUPLICATE;
1480 : }
1481 : }
1482 :
1483 : struct start_info {
1484 : void *(*start_routine)(void *);
1485 : void *arg;
1486 : word flags;
1487 : sem_t registered; /* 1 ==> in our thread table, but */
1488 : /* parent hasn't yet noticed. */
1489 : };
1490 :
1491 : /* Called from GC_inner_start_routine(). Defined in this file to */
1492 : /* minimize the number of include files in pthread_start.c (because */
1493 : /* sem_t and sem_post() are not used that file directly). */
1494 499 : GC_INNER GC_thread GC_start_rtn_prepare_thread(void *(**pstart)(void *),
1495 : void **pstart_arg,
1496 : struct GC_stack_base *sb, void *arg)
1497 : {
1498 499 : struct start_info * si = arg;
1499 499 : pthread_t self = pthread_self();
1500 : GC_thread me;
1501 : DCL_LOCK_STATE;
1502 :
1503 : # ifdef DEBUG_THREADS
1504 : GC_log_printf("Starting thread 0x%x, pid = %ld, sp = %p\n",
1505 : (unsigned)self, (long)getpid(), &arg);
1506 : # endif
1507 499 : LOCK();
1508 499 : me = GC_register_my_thread_inner(sb, self);
1509 499 : me -> flags = si -> flags;
1510 : # if defined(THREAD_LOCAL_ALLOC)
1511 499 : GC_init_thread_local(&(me->tlfs));
1512 : # endif
1513 499 : UNLOCK();
1514 499 : *pstart = si -> start_routine;
1515 : # ifdef DEBUG_THREADS
1516 : GC_log_printf("start_routine = %p\n", (void *)(signed_word)(*pstart));
1517 : # endif
1518 499 : *pstart_arg = si -> arg;
1519 499 : sem_post(&(si -> registered)); /* Last action on si. */
1520 : /* OK to deallocate. */
1521 499 : return me;
1522 : }
1523 :
1524 : void * GC_CALLBACK GC_inner_start_routine(struct GC_stack_base *sb, void *arg);
1525 : /* defined in pthread_start.c */
1526 :
1527 499 : STATIC void * GC_start_routine(void * arg)
1528 : {
1529 : # ifdef INCLUDE_LINUX_THREAD_DESCR
1530 : struct GC_stack_base sb;
1531 :
1532 : # ifdef REDIRECT_MALLOC
1533 : /* GC_get_stack_base may call pthread_getattr_np, which can */
1534 : /* unfortunately call realloc, which may allocate from an */
1535 : /* unregistered thread. This is unpleasant, since it might */
1536 : /* force heap growth (or, even, heap overflow). */
1537 : GC_disable();
1538 : # endif
1539 : if (GC_get_stack_base(&sb) != GC_SUCCESS)
1540 : ABORT("Failed to get thread stack base");
1541 : # ifdef REDIRECT_MALLOC
1542 : GC_enable();
1543 : # endif
1544 : return GC_inner_start_routine(&sb, arg);
1545 : # else
1546 499 : return GC_call_with_stack_base(GC_inner_start_routine, arg);
1547 : # endif
1548 : }
1549 :
1550 499 : GC_API int WRAP_FUNC(pthread_create)(pthread_t *new_thread,
1551 : GC_PTHREAD_CREATE_CONST pthread_attr_t *attr,
1552 : void *(*start_routine)(void *), void *arg)
1553 : {
1554 : int result;
1555 : int detachstate;
1556 499 : word my_flags = 0;
1557 : struct start_info * si;
1558 : DCL_LOCK_STATE;
1559 : /* This is otherwise saved only in an area mmapped by the thread */
1560 : /* library, which isn't visible to the collector. */
1561 :
1562 : /* We resist the temptation to muck with the stack size here, */
1563 : /* even if the default is unreasonably small. That's the client's */
1564 : /* responsibility. */
1565 :
1566 : INIT_REAL_SYMS();
1567 499 : LOCK();
1568 499 : si = (struct start_info *)GC_INTERNAL_MALLOC(sizeof(struct start_info),
1569 : NORMAL);
1570 499 : UNLOCK();
1571 499 : if (!parallel_initialized) GC_init_parallel();
1572 499 : if (0 == si &&
1573 0 : (si = (struct start_info *)
1574 0 : (*GC_get_oom_fn())(sizeof(struct start_info))) == 0)
1575 0 : return(ENOMEM);
1576 499 : if (sem_init(&(si -> registered), GC_SEM_INIT_PSHARED, 0) != 0)
1577 0 : ABORT("sem_init failed");
1578 :
1579 499 : si -> start_routine = start_routine;
1580 499 : si -> arg = arg;
1581 499 : LOCK();
1582 499 : if (!GC_thr_initialized) GC_thr_init();
1583 : # ifdef GC_ASSERTIONS
1584 : {
1585 : size_t stack_size = 0;
1586 : if (NULL != attr) {
1587 : pthread_attr_getstacksize(attr, &stack_size);
1588 : }
1589 : if (0 == stack_size) {
1590 : pthread_attr_t my_attr;
1591 : pthread_attr_init(&my_attr);
1592 : pthread_attr_getstacksize(&my_attr, &stack_size);
1593 : }
1594 : /* On Solaris 10, with default attr initialization, */
1595 : /* stack_size remains 0. Fudge it. */
1596 : if (0 == stack_size) {
1597 : # ifndef SOLARIS
1598 : WARN("Failed to get stack size for assertion checking\n", 0);
1599 : # endif
1600 : stack_size = 1000000;
1601 : }
1602 : # ifdef PARALLEL_MARK
1603 : GC_ASSERT(stack_size >= (8*HBLKSIZE*sizeof(word)));
1604 : # else
1605 : /* FreeBSD-5.3/Alpha: default pthread stack is 64K, */
1606 : /* HBLKSIZE=8192, sizeof(word)=8 */
1607 : GC_ASSERT(stack_size >= 65536);
1608 : # endif
1609 : /* Our threads may need to do some work for the GC. */
1610 : /* Ridiculously small threads won't work, and they */
1611 : /* probably wouldn't work anyway. */
1612 : }
1613 : # endif
1614 499 : if (NULL == attr) {
1615 0 : detachstate = PTHREAD_CREATE_JOINABLE;
1616 : } else {
1617 499 : pthread_attr_getdetachstate(attr, &detachstate);
1618 : }
1619 499 : if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
1620 499 : si -> flags = my_flags;
1621 499 : UNLOCK();
1622 : # ifdef DEBUG_THREADS
1623 : GC_log_printf("About to start new thread from thread 0x%x\n",
1624 : (unsigned)pthread_self());
1625 : # endif
1626 499 : GC_need_to_lock = TRUE;
1627 :
1628 499 : result = REAL_FUNC(pthread_create)(new_thread, attr, GC_start_routine, si);
1629 :
1630 : # ifdef DEBUG_THREADS
1631 : GC_log_printf("Started thread 0x%x\n", (unsigned)(*new_thread));
1632 : # endif
1633 : /* Wait until child has been added to the thread table. */
1634 : /* This also ensures that we hold onto si until the child is done */
1635 : /* with it. Thus it doesn't matter whether it is otherwise */
1636 : /* visible to the collector. */
1637 499 : if (0 == result) {
1638 : IF_CANCEL(int cancel_state;)
1639 499 : DISABLE_CANCEL(cancel_state);
1640 : /* pthread_create is not a cancellation point. */
1641 998 : while (0 != sem_wait(&(si -> registered))) {
1642 0 : if (EINTR != errno) ABORT("sem_wait failed");
1643 : }
1644 499 : RESTORE_CANCEL(cancel_state);
1645 : }
1646 499 : sem_destroy(&(si -> registered));
1647 499 : LOCK();
1648 499 : GC_INTERNAL_FREE(si);
1649 499 : UNLOCK();
1650 :
1651 499 : return(result);
1652 : }
1653 :
1654 : #if defined(USE_SPIN_LOCK) || !defined(NO_PTHREAD_TRYLOCK)
1655 : /* Spend a few cycles in a way that can't introduce contention with */
1656 : /* other threads. */
1657 4336 : STATIC void GC_pause(void)
1658 : {
1659 : int i;
1660 : # if !defined(__GNUC__) || defined(__INTEL_COMPILER)
1661 : volatile word dummy = 0;
1662 : # endif
1663 :
1664 47696 : for (i = 0; i < 10; ++i) {
1665 : # if defined(__GNUC__) && !defined(__INTEL_COMPILER)
1666 43360 : __asm__ __volatile__ (" " : : : "memory");
1667 : # else
1668 : /* Something that's unlikely to be optimized away. */
1669 : GC_noop(++dummy);
1670 : # endif
1671 : }
1672 4336 : }
1673 : #endif
1674 :
1675 : #define SPIN_MAX 128 /* Maximum number of calls to GC_pause before */
1676 : /* give up. */
1677 :
1678 : GC_INNER volatile GC_bool GC_collecting = 0;
1679 : /* A hint that we're in the collector and */
1680 : /* holding the allocation lock for an */
1681 : /* extended period. */
1682 :
1683 : #if (!defined(USE_SPIN_LOCK) && !defined(NO_PTHREAD_TRYLOCK)) \
1684 : || defined(PARALLEL_MARK)
1685 : /* If we don't want to use the below spinlock implementation, either */
1686 : /* because we don't have a GC_test_and_set implementation, or because */
1687 : /* we don't want to risk sleeping, we can still try spinning on */
1688 : /* pthread_mutex_trylock for a while. This appears to be very */
1689 : /* beneficial in many cases. */
1690 : /* I suspect that under high contention this is nearly always better */
1691 : /* than the spin lock. But it's a bit slower on a uniprocessor. */
1692 : /* Hence we still default to the spin lock. */
1693 : /* This is also used to acquire the mark lock for the parallel */
1694 : /* marker. */
1695 :
1696 : /* Here we use a strict exponential backoff scheme. I don't know */
1697 : /* whether that's better or worse than the above. We eventually */
1698 : /* yield by calling pthread_mutex_lock(); it never makes sense to */
1699 : /* explicitly sleep. */
1700 :
1701 : /* #define LOCK_STATS */
1702 : /* Note that LOCK_STATS requires AO_HAVE_test_and_set. */
1703 : #ifdef LOCK_STATS
1704 : AO_t GC_spin_count = 0;
1705 : AO_t GC_block_count = 0;
1706 : AO_t GC_unlocked_count = 0;
1707 : #endif
1708 :
1709 185 : STATIC void GC_generic_lock(pthread_mutex_t * lock)
1710 : {
1711 : #ifndef NO_PTHREAD_TRYLOCK
1712 185 : unsigned pause_length = 1;
1713 : unsigned i;
1714 :
1715 185 : if (0 == pthread_mutex_trylock(lock)) {
1716 : # ifdef LOCK_STATS
1717 : (void)AO_fetch_and_add1(&GC_unlocked_count);
1718 : # endif
1719 89 : return;
1720 : }
1721 364 : for (; pause_length <= SPIN_MAX; pause_length <<= 1) {
1722 4695 : for (i = 0; i < pause_length; ++i) {
1723 4336 : GC_pause();
1724 : }
1725 359 : switch(pthread_mutex_trylock(lock)) {
1726 : case 0:
1727 : # ifdef LOCK_STATS
1728 : (void)AO_fetch_and_add1(&GC_spin_count);
1729 : # endif
1730 91 : return;
1731 : case EBUSY:
1732 268 : break;
1733 : default:
1734 0 : ABORT("Unexpected error from pthread_mutex_trylock");
1735 : }
1736 : }
1737 : #endif /* !NO_PTHREAD_TRYLOCK */
1738 : # ifdef LOCK_STATS
1739 : (void)AO_fetch_and_add1(&GC_block_count);
1740 : # endif
1741 5 : pthread_mutex_lock(lock);
1742 : }
1743 :
1744 : #endif /* !USE_SPIN_LOCK || ... */
1745 :
1746 : #if defined(USE_SPIN_LOCK)
1747 :
1748 : /* Reasonably fast spin locks. Basically the same implementation */
1749 : /* as STL alloc.h. This isn't really the right way to do this. */
1750 : /* but until the POSIX scheduling mess gets straightened out ... */
1751 :
1752 : GC_INNER volatile AO_TS_t GC_allocate_lock = AO_TS_INITIALIZER;
1753 :
1754 : GC_INNER void GC_lock(void)
1755 : {
1756 : # define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
1757 : # define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */
1758 : static unsigned spin_max = low_spin_max;
1759 : unsigned my_spin_max;
1760 : static unsigned last_spins = 0;
1761 : unsigned my_last_spins;
1762 : unsigned i;
1763 :
1764 : if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) {
1765 : return;
1766 : }
1767 : my_spin_max = spin_max;
1768 : my_last_spins = last_spins;
1769 : for (i = 0; i < my_spin_max; i++) {
1770 : if (GC_collecting || GC_nprocs == 1) goto yield;
1771 : if (i < my_last_spins/2) {
1772 : GC_pause();
1773 : continue;
1774 : }
1775 : if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) {
1776 : /*
1777 : * got it!
1778 : * Spinning worked. Thus we're probably not being scheduled
1779 : * against the other process with which we were contending.
1780 : * Thus it makes sense to spin longer the next time.
1781 : */
1782 : last_spins = i;
1783 : spin_max = high_spin_max;
1784 : return;
1785 : }
1786 : }
1787 : /* We are probably being scheduled against the other process. Sleep. */
1788 : spin_max = low_spin_max;
1789 : yield:
1790 : for (i = 0;; ++i) {
1791 : if (AO_test_and_set_acquire(&GC_allocate_lock) == AO_TS_CLEAR) {
1792 : return;
1793 : }
1794 : # define SLEEP_THRESHOLD 12
1795 : /* Under Linux very short sleeps tend to wait until */
1796 : /* the current time quantum expires. On old Linux */
1797 : /* kernels nanosleep(<= 2ms) just spins under Linux. */
1798 : /* (Under 2.4, this happens only for real-time */
1799 : /* processes.) We want to minimize both behaviors */
1800 : /* here. */
1801 : if (i < SLEEP_THRESHOLD) {
1802 : sched_yield();
1803 : } else {
1804 : struct timespec ts;
1805 :
1806 : if (i > 24) i = 24;
1807 : /* Don't wait for more than about 15msecs, even */
1808 : /* under extreme contention. */
1809 : ts.tv_sec = 0;
1810 : ts.tv_nsec = 1 << i;
1811 : nanosleep(&ts, 0);
1812 : }
1813 : }
1814 : }
1815 :
1816 : #else /* !USE_SPINLOCK */
1817 196 : GC_INNER void GC_lock(void)
1818 : {
1819 : #ifndef NO_PTHREAD_TRYLOCK
1820 207 : if (1 == GC_nprocs || GC_collecting) {
1821 11 : pthread_mutex_lock(&GC_allocate_ml);
1822 : } else {
1823 185 : GC_generic_lock(&GC_allocate_ml);
1824 : }
1825 : #else /* !NO_PTHREAD_TRYLOCK */
1826 : pthread_mutex_lock(&GC_allocate_ml);
1827 : #endif /* !NO_PTHREAD_TRYLOCK */
1828 196 : }
1829 :
1830 : #endif /* !USE_SPINLOCK */
1831 :
1832 : #ifdef PARALLEL_MARK
1833 :
1834 : #ifdef GC_ASSERTIONS
1835 : GC_INNER unsigned long GC_mark_lock_holder = NO_THREAD;
1836 : #endif
1837 :
1838 : #ifdef GLIBC_2_1_MUTEX_HACK
1839 : /* Ugly workaround for a linux threads bug in the final versions */
1840 : /* of glibc2.1. Pthread_mutex_trylock sets the mutex owner */
1841 : /* field even when it fails to acquire the mutex. This causes */
1842 : /* pthread_cond_wait to die. Remove for glibc2.2. */
1843 : /* According to the man page, we should use */
1844 : /* PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, but that isn't actually */
1845 : /* defined. */
1846 : static pthread_mutex_t mark_mutex =
1847 : {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, {0, 0}};
1848 : #else
1849 : static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;
1850 : #endif
1851 :
1852 : static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;
1853 :
1854 : GC_INNER void GC_acquire_mark_lock(void)
1855 : {
1856 : GC_generic_lock(&mark_mutex);
1857 : # ifdef GC_ASSERTIONS
1858 : GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self());
1859 : # endif
1860 : }
1861 :
1862 : GC_INNER void GC_release_mark_lock(void)
1863 : {
1864 : GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
1865 : # ifdef GC_ASSERTIONS
1866 : GC_mark_lock_holder = NO_THREAD;
1867 : # endif
1868 : if (pthread_mutex_unlock(&mark_mutex) != 0) {
1869 : ABORT("pthread_mutex_unlock failed");
1870 : }
1871 : }
1872 :
1873 : /* Collector must wait for a freelist builders for 2 reasons: */
1874 : /* 1) Mark bits may still be getting examined without lock. */
1875 : /* 2) Partial free lists referenced only by locals may not be scanned */
1876 : /* correctly, e.g. if they contain "pointer-free" objects, since the */
1877 : /* free-list link may be ignored. */
1878 : STATIC void GC_wait_builder(void)
1879 : {
1880 : GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
1881 : ASSERT_CANCEL_DISABLED();
1882 : # ifdef GC_ASSERTIONS
1883 : GC_mark_lock_holder = NO_THREAD;
1884 : # endif
1885 : if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {
1886 : ABORT("pthread_cond_wait failed");
1887 : }
1888 : GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
1889 : # ifdef GC_ASSERTIONS
1890 : GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self());
1891 : # endif
1892 : }
1893 :
1894 : GC_INNER void GC_wait_for_reclaim(void)
1895 : {
1896 : GC_acquire_mark_lock();
1897 : while (GC_fl_builder_count > 0) {
1898 : GC_wait_builder();
1899 : }
1900 : GC_release_mark_lock();
1901 : }
1902 :
1903 : GC_INNER void GC_notify_all_builder(void)
1904 : {
1905 : GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
1906 : if (pthread_cond_broadcast(&builder_cv) != 0) {
1907 : ABORT("pthread_cond_broadcast failed");
1908 : }
1909 : }
1910 :
1911 : static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
1912 :
1913 : GC_INNER void GC_wait_marker(void)
1914 : {
1915 : GC_ASSERT(GC_mark_lock_holder == NUMERIC_THREAD_ID(pthread_self()));
1916 : ASSERT_CANCEL_DISABLED();
1917 : # ifdef GC_ASSERTIONS
1918 : GC_mark_lock_holder = NO_THREAD;
1919 : # endif
1920 : if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {
1921 : ABORT("pthread_cond_wait failed");
1922 : }
1923 : GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
1924 : # ifdef GC_ASSERTIONS
1925 : GC_mark_lock_holder = NUMERIC_THREAD_ID(pthread_self());
1926 : # endif
1927 : }
1928 :
1929 : GC_INNER void GC_notify_all_marker(void)
1930 : {
1931 : if (pthread_cond_broadcast(&mark_cv) != 0) {
1932 : ABORT("pthread_cond_broadcast failed");
1933 : }
1934 : }
1935 :
1936 : #endif /* PARALLEL_MARK */
1937 :
1938 : #endif /* GC_PTHREADS */
|