Line data Source code
1 : /*
2 : * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3 : * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4 : * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
5 : * Copyright (c) 2000-2009 by Hewlett-Packard Development Company.
6 : * All rights reserved.
7 : *
8 : * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9 : * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 : *
11 : * Permission is hereby granted to use or copy this program
12 : * for any purpose, provided the above notices are retained on all copies.
13 : * Permission to modify the code and to distribute modified code is granted,
14 : * provided the above notices are retained, and a notice that the code was
15 : * modified is included with the above copyright notice.
16 : */
17 :
18 : #include "private/pthread_support.h"
19 :
20 : #if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS) && \
21 : !defined(GC_DARWIN_THREADS)
22 :
23 : #ifdef NACL
24 :
25 : #include <unistd.h>
26 : #include <sys/time.h>
27 :
28 : STATIC int GC_nacl_num_gc_threads = 0;
29 : STATIC __thread int GC_nacl_thread_idx = -1;
30 : STATIC int GC_nacl_park_threads_now = 0;
31 : STATIC pthread_t GC_nacl_thread_parker = -1;
32 :
33 : GC_INNER __thread GC_thread GC_nacl_gc_thread_self = NULL;
34 :
35 : int GC_nacl_thread_parked[MAX_NACL_GC_THREADS];
36 : int GC_nacl_thread_used[MAX_NACL_GC_THREADS];
37 :
38 : #elif defined(GC_OPENBSD_UTHREADS)
39 :
40 : # include <pthread_np.h>
41 :
42 : #else /* !GC_OPENBSD_UTHREADS && !NACL */
43 :
44 : #include <signal.h>
45 : #include <semaphore.h>
46 : #include <errno.h>
47 : #include <unistd.h>
48 : #include "atomic_ops.h"
49 :
50 : /* It's safe to call original pthread_sigmask() here. */
51 : #undef pthread_sigmask
52 :
53 : #ifdef DEBUG_THREADS
54 : # ifndef NSIG
55 : # if defined(MAXSIG)
56 : # define NSIG (MAXSIG+1)
57 : # elif defined(_NSIG)
58 : # define NSIG _NSIG
59 : # elif defined(__SIGRTMAX)
60 : # define NSIG (__SIGRTMAX+1)
61 : # else
62 : --> please fix it
63 : # endif
64 : # endif /* NSIG */
65 :
66 : void GC_print_sig_mask(void)
67 : {
68 : sigset_t blocked;
69 : int i;
70 :
71 : if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
72 : ABORT("pthread_sigmask failed");
73 : for (i = 1; i < NSIG; i++) {
74 : if (sigismember(&blocked, i))
75 : GC_printf("Signal blocked: %d\n", i);
76 : }
77 : }
78 : #endif /* DEBUG_THREADS */
79 :
80 : /* Remove the signals that we want to allow in thread stopping */
81 : /* handler from a set. */
82 326 : STATIC void GC_remove_allowed_signals(sigset_t *set)
83 : {
84 1304 : if (sigdelset(set, SIGINT) != 0
85 326 : || sigdelset(set, SIGQUIT) != 0
86 326 : || sigdelset(set, SIGABRT) != 0
87 652 : || sigdelset(set, SIGTERM) != 0) {
88 0 : ABORT("sigdelset failed");
89 : }
90 :
91 : # ifdef MPROTECT_VDB
92 : /* Handlers write to the thread structure, which is in the heap, */
93 : /* and hence can trigger a protection fault. */
94 652 : if (sigdelset(set, SIGSEGV) != 0
95 : # ifdef SIGBUS
96 652 : || sigdelset(set, SIGBUS) != 0
97 : # endif
98 : ) {
99 0 : ABORT("sigdelset failed");
100 : }
101 : # endif
102 326 : }
103 :
104 : static sigset_t suspend_handler_mask;
105 :
106 : STATIC volatile AO_t GC_stop_count = 0;
107 : /* Incremented at the beginning of GC_stop_world. */
108 :
109 : STATIC volatile AO_t GC_world_is_stopped = FALSE;
110 : /* FALSE ==> it is safe for threads to restart, i.e. */
111 : /* they will see another suspend signal before they */
112 : /* are expected to stop (unless they have voluntarily */
113 : /* stopped). */
114 :
115 : #ifdef GC_OSF1_THREADS
116 : STATIC GC_bool GC_retry_signals = TRUE;
117 : #else
118 : STATIC GC_bool GC_retry_signals = FALSE;
119 : #endif
120 :
121 : /*
122 : * We use signals to stop threads during GC.
123 : *
124 : * Suspended threads wait in signal handler for SIG_THR_RESTART.
125 : * That's more portable than semaphores or condition variables.
126 : * (We do use sem_post from a signal handler, but that should be portable.)
127 : *
128 : * The thread suspension signal SIG_SUSPEND is now defined in gc_priv.h.
129 : * Note that we can't just stop a thread; we need it to save its stack
130 : * pointer(s) and acknowledge.
131 : */
132 : #ifndef SIG_THR_RESTART
133 : # if defined(GC_HPUX_THREADS) || defined(GC_OSF1_THREADS) \
134 : || defined(GC_NETBSD_THREADS) || defined(GC_USESIGRT_SIGNALS)
135 : # ifdef _SIGRTMIN
136 : # define SIG_THR_RESTART _SIGRTMIN + 5
137 : # else
138 : # define SIG_THR_RESTART SIGRTMIN + 5
139 : # endif
140 : # else
141 : # define SIG_THR_RESTART SIGXCPU
142 : # endif
143 : #endif
144 :
145 : #define SIGNAL_UNSET (-1)
146 : /* Since SIG_SUSPEND and/or SIG_THR_RESTART could represent */
147 : /* a non-constant expression (e.g., in case of SIGRTMIN), */
148 : /* actual signal numbers are determined by GC_stop_init() */
149 : /* unless manually set (before GC initialization). */
150 : STATIC int GC_sig_suspend = SIGNAL_UNSET;
151 : STATIC int GC_sig_thr_restart = SIGNAL_UNSET;
152 :
153 0 : GC_API void GC_CALL GC_set_suspend_signal(int sig)
154 : {
155 0 : if (GC_is_initialized) return;
156 :
157 0 : GC_sig_suspend = sig;
158 : }
159 :
160 0 : GC_API void GC_CALL GC_set_thr_restart_signal(int sig)
161 : {
162 0 : if (GC_is_initialized) return;
163 :
164 0 : GC_sig_thr_restart = sig;
165 : }
166 :
167 0 : GC_API int GC_CALL GC_get_suspend_signal(void)
168 : {
169 0 : return GC_sig_suspend != SIGNAL_UNSET ? GC_sig_suspend : SIG_SUSPEND;
170 : }
171 :
172 0 : GC_API int GC_CALL GC_get_thr_restart_signal(void)
173 : {
174 0 : return GC_sig_thr_restart != SIGNAL_UNSET
175 : ? GC_sig_thr_restart : SIG_THR_RESTART;
176 : }
177 :
178 : #ifdef GC_EXPLICIT_SIGNALS_UNBLOCK
179 : /* Some targets (e.g., Solaris) might require this to be called when */
180 : /* doing thread registering from the thread destructor. */
181 : GC_INNER void GC_unblock_gc_signals(void)
182 : {
183 : sigset_t set;
184 : sigemptyset(&set);
185 : GC_ASSERT(GC_sig_suspend != SIGNAL_UNSET);
186 : GC_ASSERT(GC_sig_thr_restart != SIGNAL_UNSET);
187 : sigaddset(&set, GC_sig_suspend);
188 : sigaddset(&set, GC_sig_thr_restart);
189 : if (pthread_sigmask(SIG_UNBLOCK, &set, NULL) != 0)
190 : ABORT("pthread_sigmask failed");
191 : }
192 : #endif /* GC_EXPLICIT_SIGNALS_UNBLOCK */
193 :
194 : STATIC sem_t GC_suspend_ack_sem;
195 :
196 : #ifdef GC_NETBSD_THREADS
197 : # define GC_NETBSD_THREADS_WORKAROUND
198 : /* It seems to be necessary to wait until threads have restarted. */
199 : /* But it is unclear why that is the case. */
200 : STATIC sem_t GC_restart_ack_sem;
201 : #endif
202 :
203 : STATIC void GC_suspend_handler_inner(ptr_t sig_arg, void *context);
204 :
205 : #ifdef SA_SIGINFO
206 243 : STATIC void GC_suspend_handler(int sig, siginfo_t * info GC_ATTR_UNUSED,
207 : void * context GC_ATTR_UNUSED)
208 : #else
209 : STATIC void GC_suspend_handler(int sig)
210 : #endif
211 : {
212 243 : int old_errno = errno;
213 :
214 : # if defined(IA64) || defined(HP_PA) || defined(M68K)
215 : GC_with_callee_saves_pushed(GC_suspend_handler_inner, (ptr_t)(word)sig);
216 : # else
217 : /* We believe that in all other cases the full context is already */
218 : /* in the signal handler frame. */
219 : # ifndef SA_SIGINFO
220 : void *context = 0;
221 : # endif
222 243 : GC_suspend_handler_inner((ptr_t)(word)sig, context);
223 : # endif
224 240 : errno = old_errno;
225 240 : }
226 :
227 243 : STATIC void GC_suspend_handler_inner(ptr_t sig_arg,
228 : void * context GC_ATTR_UNUSED)
229 : {
230 243 : pthread_t self = pthread_self();
231 : GC_thread me;
232 : IF_CANCEL(int cancel_state;)
233 242 : AO_t my_stop_count = AO_load(&GC_stop_count);
234 :
235 243 : if ((signed_word)sig_arg != GC_sig_suspend) {
236 : # if defined(GC_FREEBSD_THREADS)
237 : /* Workaround "deferred signal handling" bug in FreeBSD 9.2. */
238 : if (0 == sig_arg) return;
239 : # endif
240 0 : ABORT("Bad signal in suspend_handler");
241 : }
242 :
243 243 : DISABLE_CANCEL(cancel_state);
244 : /* pthread_setcancelstate is not defined to be async-signal-safe. */
245 : /* But the glibc version appears to be in the absence of */
246 : /* asynchronous cancellation. And since this signal handler */
247 : /* to block on sigsuspend, which is both async-signal-safe */
248 : /* and a cancellation point, there seems to be no obvious way */
249 : /* out of it. In fact, it looks to me like an async-signal-safe */
250 : /* cancellation point is inherently a problem, unless there is */
251 : /* some way to disable cancellation in the handler. */
252 : # ifdef DEBUG_THREADS
253 : GC_log_printf("Suspending %p\n", (void *)self);
254 : # endif
255 :
256 243 : me = GC_lookup_thread(self);
257 : /* The lookup here is safe, since I'm doing this on behalf */
258 : /* of a thread which holds the allocation lock in order */
259 : /* to stop the world. Thus concurrent modification of the */
260 : /* data structure is impossible. */
261 242 : if (me -> stop_info.last_stop_count == my_stop_count) {
262 : /* Duplicate signal. OK if we are retrying. */
263 0 : if (!GC_retry_signals) {
264 0 : WARN("Duplicate suspend signal in thread %p\n", self);
265 : }
266 0 : RESTORE_CANCEL(cancel_state);
267 0 : return;
268 : }
269 : # ifdef SPARC
270 : me -> stop_info.stack_ptr = GC_save_regs_in_stack();
271 : # else
272 242 : me -> stop_info.stack_ptr = GC_approx_sp();
273 : # endif
274 : # ifdef IA64
275 : me -> backing_store_ptr = GC_save_regs_in_stack();
276 : # endif
277 :
278 : /* Tell the thread that wants to stop the world that this */
279 : /* thread has been stopped. Note that sem_post() is */
280 : /* the only async-signal-safe primitive in LinuxThreads. */
281 242 : sem_post(&GC_suspend_ack_sem);
282 219 : me -> stop_info.last_stop_count = my_stop_count;
283 :
284 : /* Wait until that thread tells us to restart by sending */
285 : /* this thread a GC_sig_thr_restart signal (should be masked */
286 : /* at this point thus there is no race). */
287 : /* We do not continue until we receive that signal, */
288 : /* but we do not take that as authoritative. (We may be */
289 : /* accidentally restarted by one of the user signals we */
290 : /* don't block.) After we receive the signal, we use a */
291 : /* primitive and expensive mechanism to wait until it's */
292 : /* really safe to proceed. Under normal circumstances, */
293 : /* this code should not be executed. */
294 : do {
295 219 : sigsuspend (&suspend_handler_mask);
296 : } while (AO_load_acquire(&GC_world_is_stopped)
297 235 : && AO_load(&GC_stop_count) == my_stop_count);
298 : /* If the RESTART signal gets lost, we can still lose. That should */
299 : /* be less likely than losing the SUSPEND signal, since we don't do */
300 : /* much between the sem_post and sigsuspend. */
301 : /* We'd need more handshaking to work around that. */
302 : /* Simply dropping the sigsuspend call should be safe, but is */
303 : /* unlikely to be efficient. */
304 :
305 : # ifdef DEBUG_THREADS
306 : GC_log_printf("Continuing %p\n", (void *)self);
307 : # endif
308 232 : RESTORE_CANCEL(cancel_state);
309 : }
310 :
311 241 : STATIC void GC_restart_handler(int sig)
312 : {
313 : # if defined(DEBUG_THREADS) || defined(GC_NETBSD_THREADS_WORKAROUND)
314 : int old_errno = errno; /* Preserve errno value. */
315 : # endif
316 :
317 241 : if (sig != GC_sig_thr_restart)
318 0 : ABORT("Bad signal in restart handler");
319 :
320 : # ifdef GC_NETBSD_THREADS_WORKAROUND
321 : sem_post(&GC_restart_ack_sem);
322 : # endif
323 :
324 : /*
325 : ** Note: even if we don't do anything useful here,
326 : ** it would still be necessary to have a signal handler,
327 : ** rather than ignoring the signals, otherwise
328 : ** the signals will not be delivered at all, and
329 : ** will thus not interrupt the sigsuspend() above.
330 : */
331 :
332 : # ifdef DEBUG_THREADS
333 : GC_log_printf("In GC_restart_handler for %p\n", (void *)pthread_self());
334 : # endif
335 : # if defined(DEBUG_THREADS) || defined(GC_NETBSD_THREADS_WORKAROUND)
336 : errno = old_errno;
337 : # endif
338 241 : }
339 :
340 : #endif /* !GC_OPENBSD_UTHREADS && !NACL */
341 :
342 : #ifdef IA64
343 : # define IF_IA64(x) x
344 : #else
345 : # define IF_IA64(x)
346 : #endif
347 : /* We hold allocation lock. Should do exactly the right thing if the */
348 : /* world is stopped. Should not fail if it isn't. */
349 244 : GC_INNER void GC_push_all_stacks(void)
350 : {
351 244 : GC_bool found_me = FALSE;
352 244 : size_t nthreads = 0;
353 : int i;
354 : GC_thread p;
355 : ptr_t lo, hi;
356 : /* On IA64, we also need to scan the register backing store. */
357 : IF_IA64(ptr_t bs_lo; ptr_t bs_hi;)
358 : struct GC_traced_stack_sect_s *traced_stack_sect;
359 244 : pthread_t self = pthread_self();
360 244 : word total_size = 0;
361 :
362 244 : if (!EXPECT(GC_thr_initialized, TRUE))
363 0 : GC_thr_init();
364 : # ifdef DEBUG_THREADS
365 : GC_log_printf("Pushing stacks from thread %p\n", (void *)self);
366 : # endif
367 62708 : for (i = 0; i < THREAD_TABLE_SZ; i++) {
368 62951 : for (p = GC_threads[i]; p != 0; p = p -> next) {
369 487 : if (p -> flags & FINISHED) continue;
370 487 : ++nthreads;
371 487 : traced_stack_sect = p -> traced_stack_sect;
372 487 : if (THREAD_EQUAL(p -> id, self)) {
373 : GC_ASSERT(!p->thread_blocked);
374 : # ifdef SPARC
375 : lo = (ptr_t)GC_save_regs_in_stack();
376 : # else
377 244 : lo = GC_approx_sp();
378 : # endif
379 244 : found_me = TRUE;
380 : IF_IA64(bs_hi = (ptr_t)GC_save_regs_in_stack();)
381 : } else {
382 243 : lo = p -> stop_info.stack_ptr;
383 : IF_IA64(bs_hi = p -> backing_store_ptr;)
384 243 : if (traced_stack_sect != NULL
385 0 : && traced_stack_sect->saved_stack_ptr == lo) {
386 : /* If the thread has never been stopped since the recent */
387 : /* GC_call_with_gc_active invocation then skip the top */
388 : /* "stack section" as stack_ptr already points to. */
389 0 : traced_stack_sect = traced_stack_sect->prev;
390 : }
391 : }
392 487 : if ((p -> flags & MAIN_THREAD) == 0) {
393 243 : hi = p -> stack_end;
394 : IF_IA64(bs_lo = p -> backing_store_end);
395 : } else {
396 : /* The original stack. */
397 244 : hi = GC_stackbottom;
398 : IF_IA64(bs_lo = BACKING_STORE_BASE;)
399 : }
400 : # ifdef DEBUG_THREADS
401 : GC_log_printf("Stack for thread %p = [%p,%p)\n",
402 : (void *)p->id, lo, hi);
403 : # endif
404 487 : if (0 == lo) ABORT("GC_push_all_stacks: sp not set!");
405 487 : GC_push_all_stack_sections(lo, hi, traced_stack_sect);
406 : # ifdef STACK_GROWS_UP
407 : total_size += lo - hi;
408 : # else
409 487 : total_size += hi - lo; /* lo <= hi */
410 : # endif
411 : # ifdef NACL
412 : /* Push reg_storage as roots, this will cover the reg context. */
413 : GC_push_all_stack((ptr_t)p -> stop_info.reg_storage,
414 : (ptr_t)(p -> stop_info.reg_storage + NACL_GC_REG_STORAGE_SIZE));
415 : total_size += NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t);
416 : # endif
417 : # ifdef IA64
418 : # ifdef DEBUG_THREADS
419 : GC_log_printf("Reg stack for thread %p = [%p,%p)\n",
420 : (void *)p->id, bs_lo, bs_hi);
421 : # endif
422 : /* FIXME: This (if p->id==self) may add an unbounded number of */
423 : /* entries, and hence overflow the mark stack, which is bad. */
424 : GC_push_all_register_sections(bs_lo, bs_hi,
425 : THREAD_EQUAL(p -> id, self),
426 : traced_stack_sect);
427 : total_size += bs_hi - bs_lo; /* bs_lo <= bs_hi */
428 : # endif
429 : }
430 : }
431 244 : GC_VERBOSE_LOG_PRINTF("Pushed %d thread stacks\n", (int)nthreads);
432 244 : if (!found_me && !GC_in_thread_creation)
433 0 : ABORT("Collecting from unknown thread");
434 244 : GC_total_stacksize = total_size;
435 244 : }
436 :
437 : #ifdef DEBUG_THREADS
438 : /* There seems to be a very rare thread stopping problem. To help us */
439 : /* debug that, we save the ids of the stopping thread. */
440 : pthread_t GC_stopping_thread;
441 : int GC_stopping_pid = 0;
442 : #endif
443 :
444 : #ifdef PLATFORM_ANDROID
445 : extern int tkill(pid_t tid, int sig); /* from sys/linux-unistd.h */
446 :
447 : static int android_thread_kill(pid_t tid, int sig)
448 : {
449 : int ret;
450 : int old_errno = errno;
451 :
452 : ret = tkill(tid, sig);
453 : if (ret < 0) {
454 : ret = errno;
455 : errno = old_errno;
456 : }
457 :
458 : return ret;
459 : }
460 : #endif /* PLATFORM_ANDROID */
461 :
462 : /* We hold the allocation lock. Suspend all threads that might */
463 : /* still be running. Return the number of suspend signals that */
464 : /* were sent. */
465 244 : STATIC int GC_suspend_all(void)
466 : {
467 244 : int n_live_threads = 0;
468 : int i;
469 :
470 : # ifndef NACL
471 : GC_thread p;
472 : # ifndef GC_OPENBSD_UTHREADS
473 : int result;
474 : # endif
475 244 : pthread_t self = pthread_self();
476 :
477 : # ifdef DEBUG_THREADS
478 : GC_stopping_thread = self;
479 : GC_stopping_pid = getpid();
480 : # endif
481 62708 : for (i = 0; i < THREAD_TABLE_SZ; i++) {
482 62951 : for (p = GC_threads[i]; p != 0; p = p -> next) {
483 487 : if (!THREAD_EQUAL(p -> id, self)) {
484 243 : if (p -> flags & FINISHED) continue;
485 243 : if (p -> thread_blocked) /* Will wait */ continue;
486 : # ifndef GC_OPENBSD_UTHREADS
487 243 : if (p -> stop_info.last_stop_count == GC_stop_count) continue;
488 243 : n_live_threads++;
489 : # endif
490 : # ifdef DEBUG_THREADS
491 : GC_log_printf("Sending suspend signal to %p\n", (void *)p->id);
492 : # endif
493 :
494 : # ifdef GC_OPENBSD_UTHREADS
495 : {
496 : stack_t stack;
497 : if (pthread_suspend_np(p -> id) != 0)
498 : ABORT("pthread_suspend_np failed");
499 : if (pthread_stackseg_np(p->id, &stack))
500 : ABORT("pthread_stackseg_np failed");
501 : p -> stop_info.stack_ptr = (ptr_t)stack.ss_sp - stack.ss_size;
502 : }
503 : # else
504 : # ifndef PLATFORM_ANDROID
505 243 : result = pthread_kill(p -> id, GC_sig_suspend);
506 : # else
507 : result = android_thread_kill(p -> kernel_id, GC_sig_suspend);
508 : # endif
509 243 : switch(result) {
510 : case ESRCH:
511 : /* Not really there anymore. Possible? */
512 0 : n_live_threads--;
513 0 : break;
514 : case 0:
515 243 : break;
516 : default:
517 0 : ABORT_ARG1("pthread_kill failed at suspend",
518 : ": errcode= %d", result);
519 : }
520 : # endif
521 : }
522 : }
523 : }
524 :
525 : # else /* NACL */
526 : # ifndef NACL_PARK_WAIT_NANOSECONDS
527 : # define NACL_PARK_WAIT_NANOSECONDS (100 * 1000)
528 : # endif
529 : # define NANOS_PER_SECOND (1000UL * 1000 * 1000)
530 : unsigned long num_sleeps = 0;
531 :
532 : # ifdef DEBUG_THREADS
533 : GC_log_printf("pthread_stop_world: num_threads %d\n",
534 : GC_nacl_num_gc_threads - 1);
535 : # endif
536 : GC_nacl_thread_parker = pthread_self();
537 : GC_nacl_park_threads_now = 1;
538 : # ifdef DEBUG_THREADS
539 : GC_stopping_thread = GC_nacl_thread_parker;
540 : GC_stopping_pid = getpid();
541 : # endif
542 :
543 : while (1) {
544 : int num_threads_parked = 0;
545 : struct timespec ts;
546 : int num_used = 0;
547 :
548 : /* Check the 'parked' flag for each thread the GC knows about. */
549 : for (i = 0; i < MAX_NACL_GC_THREADS
550 : && num_used < GC_nacl_num_gc_threads; i++) {
551 : if (GC_nacl_thread_used[i] == 1) {
552 : num_used++;
553 : if (GC_nacl_thread_parked[i] == 1) {
554 : num_threads_parked++;
555 : }
556 : }
557 : }
558 : /* -1 for the current thread. */
559 : if (num_threads_parked >= GC_nacl_num_gc_threads - 1)
560 : break;
561 : ts.tv_sec = 0;
562 : ts.tv_nsec = NACL_PARK_WAIT_NANOSECONDS;
563 : # ifdef DEBUG_THREADS
564 : GC_log_printf("Sleep waiting for %d threads to park...\n",
565 : GC_nacl_num_gc_threads - num_threads_parked - 1);
566 : # endif
567 : /* This requires _POSIX_TIMERS feature. */
568 : nanosleep(&ts, 0);
569 : if (++num_sleeps > NANOS_PER_SECOND / NACL_PARK_WAIT_NANOSECONDS) {
570 : WARN("GC appears stalled waiting for %" WARN_PRIdPTR
571 : " threads to park...\n",
572 : GC_nacl_num_gc_threads - num_threads_parked - 1);
573 : num_sleeps = 0;
574 : }
575 : }
576 : # endif /* NACL */
577 244 : return n_live_threads;
578 : }
579 :
580 244 : GC_INNER void GC_stop_world(void)
581 : {
582 : # if !defined(GC_OPENBSD_UTHREADS) && !defined(NACL)
583 : int i;
584 : int n_live_threads;
585 : int code;
586 : # endif
587 : GC_ASSERT(I_HOLD_LOCK());
588 : # ifdef DEBUG_THREADS
589 : GC_log_printf("Stopping the world from %p\n", (void *)pthread_self());
590 : # endif
591 :
592 : /* Make sure all free list construction has stopped before we start. */
593 : /* No new construction can start, since free list construction is */
594 : /* required to acquire and release the GC lock before it starts, */
595 : /* and we have the lock. */
596 : # ifdef PARALLEL_MARK
597 244 : if (GC_parallel) {
598 244 : GC_acquire_mark_lock();
599 : GC_ASSERT(GC_fl_builder_count == 0);
600 : /* We should have previously waited for it to become zero. */
601 : }
602 : # endif /* PARALLEL_MARK */
603 :
604 : # if defined(GC_OPENBSD_UTHREADS) || defined(NACL)
605 : (void)GC_suspend_all();
606 : # else
607 244 : AO_store(&GC_stop_count, GC_stop_count+1);
608 : /* Only concurrent reads are possible. */
609 244 : AO_store_release(&GC_world_is_stopped, TRUE);
610 244 : n_live_threads = GC_suspend_all();
611 :
612 244 : if (GC_retry_signals) {
613 0 : unsigned long wait_usecs = 0; /* Total wait since retry. */
614 : # define WAIT_UNIT 3000
615 : # define RETRY_INTERVAL 100000
616 : for (;;) {
617 : int ack_count;
618 :
619 0 : sem_getvalue(&GC_suspend_ack_sem, &ack_count);
620 0 : if (ack_count == n_live_threads) break;
621 0 : if (wait_usecs > RETRY_INTERVAL) {
622 0 : int newly_sent = GC_suspend_all();
623 :
624 0 : GC_COND_LOG_PRINTF("Resent %d signals after timeout\n", newly_sent);
625 0 : sem_getvalue(&GC_suspend_ack_sem, &ack_count);
626 0 : if (newly_sent < n_live_threads - ack_count) {
627 0 : WARN("Lost some threads during GC_stop_world?!\n",0);
628 0 : n_live_threads = ack_count + newly_sent;
629 : }
630 0 : wait_usecs = 0;
631 : }
632 0 : usleep(WAIT_UNIT);
633 0 : wait_usecs += WAIT_UNIT;
634 0 : }
635 : }
636 :
637 487 : for (i = 0; i < n_live_threads; i++) {
638 : retry:
639 243 : code = sem_wait(&GC_suspend_ack_sem);
640 243 : if (0 != code) {
641 : /* On Linux, sem_wait is documented to always return zero. */
642 : /* But the documentation appears to be incorrect. */
643 0 : if (errno == EINTR) {
644 : /* Seems to happen with some versions of gdb. */
645 0 : goto retry;
646 : }
647 0 : ABORT("sem_wait for handler failed");
648 : }
649 : }
650 : # endif
651 :
652 : # ifdef PARALLEL_MARK
653 244 : if (GC_parallel)
654 244 : GC_release_mark_lock();
655 : # endif
656 : # ifdef DEBUG_THREADS
657 : GC_log_printf("World stopped from %p\n", (void *)pthread_self());
658 : GC_stopping_thread = 0;
659 : # endif
660 244 : }
661 :
662 : #ifdef NACL
663 : # if defined(__x86_64__)
664 : # define NACL_STORE_REGS() \
665 : do { \
666 : __asm__ __volatile__ ("push %rbx"); \
667 : __asm__ __volatile__ ("push %rbp"); \
668 : __asm__ __volatile__ ("push %r12"); \
669 : __asm__ __volatile__ ("push %r13"); \
670 : __asm__ __volatile__ ("push %r14"); \
671 : __asm__ __volatile__ ("push %r15"); \
672 : __asm__ __volatile__ ("mov %%esp, %0" \
673 : : "=m" (GC_nacl_gc_thread_self->stop_info.stack_ptr)); \
674 : BCOPY(GC_nacl_gc_thread_self->stop_info.stack_ptr, \
675 : GC_nacl_gc_thread_self->stop_info.reg_storage, \
676 : NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t)); \
677 : __asm__ __volatile__ ("naclasp $48, %r15"); \
678 : } while (0)
679 : # elif defined(__i386__)
680 : # define NACL_STORE_REGS() \
681 : do { \
682 : __asm__ __volatile__ ("push %ebx"); \
683 : __asm__ __volatile__ ("push %ebp"); \
684 : __asm__ __volatile__ ("push %esi"); \
685 : __asm__ __volatile__ ("push %edi"); \
686 : __asm__ __volatile__ ("mov %%esp, %0" \
687 : : "=m" (GC_nacl_gc_thread_self->stop_info.stack_ptr)); \
688 : BCOPY(GC_nacl_gc_thread_self->stop_info.stack_ptr, \
689 : GC_nacl_gc_thread_self->stop_info.reg_storage, \
690 : NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t));\
691 : __asm__ __volatile__ ("add $16, %esp"); \
692 : } while (0)
693 : # else
694 : # error FIXME for non-amd64/x86 NaCl
695 : # endif
696 :
697 : GC_API_OSCALL void nacl_pre_syscall_hook(void)
698 : {
699 : if (GC_nacl_thread_idx != -1) {
700 : NACL_STORE_REGS();
701 : GC_nacl_gc_thread_self->stop_info.stack_ptr = GC_approx_sp();
702 : GC_nacl_thread_parked[GC_nacl_thread_idx] = 1;
703 : }
704 : }
705 :
706 : GC_API_OSCALL void __nacl_suspend_thread_if_needed(void)
707 : {
708 : if (GC_nacl_park_threads_now) {
709 : pthread_t self = pthread_self();
710 :
711 : /* Don't try to park the thread parker. */
712 : if (GC_nacl_thread_parker == self)
713 : return;
714 :
715 : /* This can happen when a thread is created outside of the GC */
716 : /* system (wthread mostly). */
717 : if (GC_nacl_thread_idx < 0)
718 : return;
719 :
720 : /* If it was already 'parked', we're returning from a syscall, */
721 : /* so don't bother storing registers again, the GC has a set. */
722 : if (!GC_nacl_thread_parked[GC_nacl_thread_idx]) {
723 : NACL_STORE_REGS();
724 : GC_nacl_gc_thread_self->stop_info.stack_ptr = GC_approx_sp();
725 : }
726 : GC_nacl_thread_parked[GC_nacl_thread_idx] = 1;
727 : while (GC_nacl_park_threads_now) {
728 : /* Just spin. */
729 : }
730 : GC_nacl_thread_parked[GC_nacl_thread_idx] = 0;
731 :
732 : /* Clear out the reg storage for next suspend. */
733 : BZERO(GC_nacl_gc_thread_self->stop_info.reg_storage,
734 : NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t));
735 : }
736 : }
737 :
738 : GC_API_OSCALL void nacl_post_syscall_hook(void)
739 : {
740 : /* Calling __nacl_suspend_thread_if_needed right away should */
741 : /* guarantee we don't mutate the GC set. */
742 : __nacl_suspend_thread_if_needed();
743 : if (GC_nacl_thread_idx != -1) {
744 : GC_nacl_thread_parked[GC_nacl_thread_idx] = 0;
745 : }
746 : }
747 :
748 : STATIC GC_bool GC_nacl_thread_parking_inited = FALSE;
749 : STATIC pthread_mutex_t GC_nacl_thread_alloc_lock = PTHREAD_MUTEX_INITIALIZER;
750 :
751 : extern void nacl_register_gc_hooks(void (*pre)(void), void (*post)(void));
752 :
753 : GC_INNER void GC_nacl_initialize_gc_thread(void)
754 : {
755 : int i;
756 : nacl_register_gc_hooks(nacl_pre_syscall_hook, nacl_post_syscall_hook);
757 : pthread_mutex_lock(&GC_nacl_thread_alloc_lock);
758 : if (!EXPECT(GC_nacl_thread_parking_inited, TRUE)) {
759 : BZERO(GC_nacl_thread_parked, sizeof(GC_nacl_thread_parked));
760 : BZERO(GC_nacl_thread_used, sizeof(GC_nacl_thread_used));
761 : GC_nacl_thread_parking_inited = TRUE;
762 : }
763 : GC_ASSERT(GC_nacl_num_gc_threads <= MAX_NACL_GC_THREADS);
764 : for (i = 0; i < MAX_NACL_GC_THREADS; i++) {
765 : if (GC_nacl_thread_used[i] == 0) {
766 : GC_nacl_thread_used[i] = 1;
767 : GC_nacl_thread_idx = i;
768 : GC_nacl_num_gc_threads++;
769 : break;
770 : }
771 : }
772 : pthread_mutex_unlock(&GC_nacl_thread_alloc_lock);
773 : }
774 :
775 : GC_INNER void GC_nacl_shutdown_gc_thread(void)
776 : {
777 : pthread_mutex_lock(&GC_nacl_thread_alloc_lock);
778 : GC_ASSERT(GC_nacl_thread_idx >= 0);
779 : GC_ASSERT(GC_nacl_thread_idx < MAX_NACL_GC_THREADS);
780 : GC_ASSERT(GC_nacl_thread_used[GC_nacl_thread_idx] != 0);
781 : GC_nacl_thread_used[GC_nacl_thread_idx] = 0;
782 : GC_nacl_thread_idx = -1;
783 : GC_nacl_num_gc_threads--;
784 : pthread_mutex_unlock(&GC_nacl_thread_alloc_lock);
785 : }
786 : #endif /* NACL */
787 :
788 : /* Caller holds allocation lock, and has held it continuously since */
789 : /* the world stopped. */
790 244 : GC_INNER void GC_start_world(void)
791 : {
792 : # ifndef NACL
793 244 : pthread_t self = pthread_self();
794 : register int i;
795 : register GC_thread p;
796 : # ifndef GC_OPENBSD_UTHREADS
797 244 : register int n_live_threads = 0;
798 : register int result;
799 : # endif
800 : # ifdef GC_NETBSD_THREADS_WORKAROUND
801 : int code;
802 : # endif
803 :
804 : # ifdef DEBUG_THREADS
805 : GC_log_printf("World starting\n");
806 : # endif
807 :
808 : # ifndef GC_OPENBSD_UTHREADS
809 244 : AO_store(&GC_world_is_stopped, FALSE);
810 : # endif
811 62708 : for (i = 0; i < THREAD_TABLE_SZ; i++) {
812 62951 : for (p = GC_threads[i]; p != 0; p = p -> next) {
813 487 : if (!THREAD_EQUAL(p -> id, self)) {
814 243 : if (p -> flags & FINISHED) continue;
815 243 : if (p -> thread_blocked) continue;
816 : # ifndef GC_OPENBSD_UTHREADS
817 243 : n_live_threads++;
818 : # endif
819 : # ifdef DEBUG_THREADS
820 : GC_log_printf("Sending restart signal to %p\n", (void *)p->id);
821 : # endif
822 :
823 : # ifdef GC_OPENBSD_UTHREADS
824 : if (pthread_resume_np(p -> id) != 0)
825 : ABORT("pthread_resume_np failed");
826 : # else
827 : # ifndef PLATFORM_ANDROID
828 243 : result = pthread_kill(p -> id, GC_sig_thr_restart);
829 : # else
830 : result = android_thread_kill(p -> kernel_id,
831 : GC_sig_thr_restart);
832 : # endif
833 243 : switch(result) {
834 : case ESRCH:
835 : /* Not really there anymore. Possible? */
836 0 : n_live_threads--;
837 0 : break;
838 : case 0:
839 243 : break;
840 : default:
841 0 : ABORT_ARG1("pthread_kill failed at resume",
842 : ": errcode= %d", result);
843 : }
844 : # endif
845 : }
846 : }
847 : }
848 : # ifdef GC_NETBSD_THREADS_WORKAROUND
849 : for (i = 0; i < n_live_threads; i++) {
850 : while (0 != (code = sem_wait(&GC_restart_ack_sem))) {
851 : if (errno != EINTR) {
852 : ABORT_ARG1("sem_wait() for restart handler failed",
853 : ": errcode= %d", code);
854 : }
855 : }
856 : }
857 : # endif
858 : # ifdef DEBUG_THREADS
859 : GC_log_printf("World started\n");
860 : # endif
861 : # else /* NACL */
862 : # ifdef DEBUG_THREADS
863 : GC_log_printf("World starting...\n");
864 : # endif
865 : GC_nacl_park_threads_now = 0;
866 : # endif
867 244 : }
868 :
869 163 : GC_INNER void GC_stop_init(void)
870 : {
871 : # if !defined(GC_OPENBSD_UTHREADS) && !defined(NACL)
872 : struct sigaction act;
873 :
874 163 : if (SIGNAL_UNSET == GC_sig_suspend)
875 163 : GC_sig_suspend = SIG_SUSPEND;
876 163 : if (SIGNAL_UNSET == GC_sig_thr_restart)
877 163 : GC_sig_thr_restart = SIG_THR_RESTART;
878 163 : if (GC_sig_suspend == GC_sig_thr_restart)
879 0 : ABORT("Cannot use same signal for thread suspend and resume");
880 :
881 163 : if (sem_init(&GC_suspend_ack_sem, GC_SEM_INIT_PSHARED, 0) != 0)
882 0 : ABORT("sem_init failed");
883 : # ifdef GC_NETBSD_THREADS_WORKAROUND
884 : if (sem_init(&GC_restart_ack_sem, GC_SEM_INIT_PSHARED, 0) != 0)
885 : ABORT("sem_init failed");
886 : # endif
887 :
888 : # ifdef SA_RESTART
889 163 : act.sa_flags = SA_RESTART
890 : # else
891 : act.sa_flags = 0
892 : # endif
893 : # ifdef SA_SIGINFO
894 : | SA_SIGINFO
895 : # endif
896 : ;
897 163 : if (sigfillset(&act.sa_mask) != 0) {
898 0 : ABORT("sigfillset failed");
899 : }
900 : # ifdef GC_RTEMS_PTHREADS
901 : if(sigprocmask(SIG_UNBLOCK, &act.sa_mask, NULL) != 0) {
902 : ABORT("sigprocmask failed");
903 : }
904 : # endif
905 163 : GC_remove_allowed_signals(&act.sa_mask);
906 : /* GC_sig_thr_restart is set in the resulting mask. */
907 : /* It is unmasked by the handler when necessary. */
908 : # ifdef SA_SIGINFO
909 163 : act.sa_sigaction = GC_suspend_handler;
910 : # else
911 : act.sa_handler = GC_suspend_handler;
912 : # endif
913 : /* act.sa_restorer is deprecated and should not be initialized. */
914 163 : if (sigaction(GC_sig_suspend, &act, NULL) != 0) {
915 0 : ABORT("Cannot set SIG_SUSPEND handler");
916 : }
917 :
918 : # ifdef SA_SIGINFO
919 163 : act.sa_flags &= ~SA_SIGINFO;
920 : # endif
921 163 : act.sa_handler = GC_restart_handler;
922 163 : if (sigaction(GC_sig_thr_restart, &act, NULL) != 0) {
923 0 : ABORT("Cannot set SIG_THR_RESTART handler");
924 : }
925 :
926 : /* Initialize suspend_handler_mask (excluding GC_sig_thr_restart). */
927 163 : if (sigfillset(&suspend_handler_mask) != 0) ABORT("sigfillset failed");
928 163 : GC_remove_allowed_signals(&suspend_handler_mask);
929 163 : if (sigdelset(&suspend_handler_mask, GC_sig_thr_restart) != 0)
930 0 : ABORT("sigdelset failed");
931 :
932 : /* Check for GC_RETRY_SIGNALS. */
933 163 : if (0 != GETENV("GC_RETRY_SIGNALS")) {
934 0 : GC_retry_signals = TRUE;
935 : }
936 163 : if (0 != GETENV("GC_NO_RETRY_SIGNALS")) {
937 0 : GC_retry_signals = FALSE;
938 : }
939 163 : if (GC_retry_signals) {
940 0 : GC_COND_LOG_PRINTF("Will retry suspend signal if necessary\n");
941 : }
942 : # endif /* !GC_OPENBSD_UTHREADS && !NACL */
943 163 : }
944 :
945 : #endif /* GC_PTHREADS && !GC_DARWIN_THREADS && !GC_WIN32_THREADS */
|