Line data Source code
1 : /*
2 : * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3 : * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4 : * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
5 : * Copyright (c) 2000-2009 by Hewlett-Packard Development Company.
6 : * All rights reserved.
7 : *
8 : * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
9 : * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
10 : *
11 : * Permission is hereby granted to use or copy this program
12 : * for any purpose, provided the above notices are retained on all copies.
13 : * Permission to modify the code and to distribute modified code is granted,
14 : * provided the above notices are retained, and a notice that the code was
15 : * modified is included with the above copyright notice.
16 : */
17 :
18 : #include "private/pthread_support.h"
19 :
20 : #if defined(GC_PTHREADS) && !defined(GC_WIN32_THREADS) && \
21 : !defined(GC_DARWIN_THREADS)
22 :
23 : #ifdef NACL
24 :
25 : #include <unistd.h>
26 : #include <sys/time.h>
27 :
28 : STATIC int GC_nacl_num_gc_threads = 0;
29 : STATIC __thread int GC_nacl_thread_idx = -1;
30 : STATIC int GC_nacl_park_threads_now = 0;
31 : STATIC pthread_t GC_nacl_thread_parker = -1;
32 :
33 : GC_INNER __thread GC_thread GC_nacl_gc_thread_self = NULL;
34 :
35 : int GC_nacl_thread_parked[MAX_NACL_GC_THREADS];
36 : int GC_nacl_thread_used[MAX_NACL_GC_THREADS];
37 :
38 : #elif !defined(GC_OPENBSD_THREADS)
39 :
40 : #include <signal.h>
41 : #include <semaphore.h>
42 : #include <errno.h>
43 : #include <unistd.h>
44 : #include "atomic_ops.h"
45 :
46 : /* It's safe to call original pthread_sigmask() here. */
47 : #undef pthread_sigmask
48 :
49 : #ifdef DEBUG_THREADS
50 : # ifndef NSIG
51 : # if defined(MAXSIG)
52 : # define NSIG (MAXSIG+1)
53 : # elif defined(_NSIG)
54 : # define NSIG _NSIG
55 : # elif defined(__SIGRTMAX)
56 : # define NSIG (__SIGRTMAX+1)
57 : # else
58 : --> please fix it
59 : # endif
60 : # endif /* NSIG */
61 :
62 : void GC_print_sig_mask(void)
63 : {
64 : sigset_t blocked;
65 : int i;
66 :
67 : if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
68 : ABORT("pthread_sigmask failed");
69 : GC_printf("Blocked: ");
70 : for (i = 1; i < NSIG; i++) {
71 : if (sigismember(&blocked, i))
72 : GC_printf("%d ", i);
73 : }
74 : GC_printf("\n");
75 : }
76 : #endif /* DEBUG_THREADS */
77 :
78 : /* Remove the signals that we want to allow in thread stopping */
79 : /* handler from a set. */
80 326 : STATIC void GC_remove_allowed_signals(sigset_t *set)
81 : {
82 1304 : if (sigdelset(set, SIGINT) != 0
83 326 : || sigdelset(set, SIGQUIT) != 0
84 326 : || sigdelset(set, SIGABRT) != 0
85 652 : || sigdelset(set, SIGTERM) != 0) {
86 0 : ABORT("sigdelset() failed");
87 : }
88 :
89 : # ifdef MPROTECT_VDB
90 : /* Handlers write to the thread structure, which is in the heap, */
91 : /* and hence can trigger a protection fault. */
92 652 : if (sigdelset(set, SIGSEGV) != 0
93 : # ifdef SIGBUS
94 652 : || sigdelset(set, SIGBUS) != 0
95 : # endif
96 : ) {
97 0 : ABORT("sigdelset() failed");
98 : }
99 : # endif
100 326 : }
101 :
102 : static sigset_t suspend_handler_mask;
103 :
104 : STATIC volatile AO_t GC_stop_count = 0;
105 : /* Incremented at the beginning of GC_stop_world. */
106 :
107 : STATIC volatile AO_t GC_world_is_stopped = FALSE;
108 : /* FALSE ==> it is safe for threads to restart, i.e. */
109 : /* they will see another suspend signal before they */
110 : /* are expected to stop (unless they have voluntarily */
111 : /* stopped). */
112 :
113 : #ifdef GC_OSF1_THREADS
114 : STATIC GC_bool GC_retry_signals = TRUE;
115 : #else
116 : STATIC GC_bool GC_retry_signals = FALSE;
117 : #endif
118 :
119 : /*
120 : * We use signals to stop threads during GC.
121 : *
122 : * Suspended threads wait in signal handler for SIG_THR_RESTART.
123 : * That's more portable than semaphores or condition variables.
124 : * (We do use sem_post from a signal handler, but that should be portable.)
125 : *
126 : * The thread suspension signal SIG_SUSPEND is now defined in gc_priv.h.
127 : * Note that we can't just stop a thread; we need it to save its stack
128 : * pointer(s) and acknowledge.
129 : */
130 :
131 : #ifndef SIG_THR_RESTART
132 : # if defined(GC_HPUX_THREADS) || defined(GC_OSF1_THREADS) \
133 : || defined(GC_NETBSD_THREADS)
134 : # ifdef _SIGRTMIN
135 : # define SIG_THR_RESTART _SIGRTMIN + 5
136 : # else
137 : # define SIG_THR_RESTART SIGRTMIN + 5
138 : # endif
139 : # else
140 : # define SIG_THR_RESTART SIGXCPU
141 : # endif
142 : #endif
143 :
144 : #ifdef GC_EXPLICIT_SIGNALS_UNBLOCK
145 : /* Some targets (eg., Solaris) might require this to be called when */
146 : /* doing thread registering from the thread destructor. */
147 : GC_INNER void GC_unblock_gc_signals(void)
148 : {
149 : sigset_t set;
150 : sigemptyset(&set);
151 : sigaddset(&set, SIG_SUSPEND);
152 : sigaddset(&set, SIG_THR_RESTART);
153 : if (pthread_sigmask(SIG_UNBLOCK, &set, NULL) != 0)
154 : ABORT("pthread_sigmask failed");
155 : }
156 : #endif /* GC_EXPLICIT_SIGNALS_UNBLOCK */
157 :
158 : STATIC sem_t GC_suspend_ack_sem;
159 :
160 : #ifdef GC_NETBSD_THREADS
161 : # define GC_NETBSD_THREADS_WORKAROUND
162 : /* It seems to be necessary to wait until threads have restarted. */
163 : /* But it is unclear why that is the case. */
164 : STATIC sem_t GC_restart_ack_sem;
165 : #endif
166 :
167 : STATIC void GC_suspend_handler_inner(ptr_t sig_arg, void *context);
168 :
169 : #ifdef SA_SIGINFO
170 : /*ARGSUSED*/
171 267 : STATIC void GC_suspend_handler(int sig, siginfo_t *info, void *context)
172 : #else
173 : STATIC void GC_suspend_handler(int sig)
174 : #endif
175 : {
176 : # if defined(IA64) || defined(HP_PA) || defined(M68K)
177 : int old_errno = errno;
178 : GC_with_callee_saves_pushed(GC_suspend_handler_inner, (ptr_t)(word)sig);
179 : errno = old_errno;
180 : # else
181 : /* We believe that in all other cases the full context is already */
182 : /* in the signal handler frame. */
183 267 : int old_errno = errno;
184 : # ifndef SA_SIGINFO
185 : void *context = 0;
186 : # endif
187 267 : GC_suspend_handler_inner((ptr_t)(word)sig, context);
188 267 : errno = old_errno;
189 : # endif
190 266 : }
191 :
192 : /*ARGSUSED*/
193 265 : STATIC void GC_suspend_handler_inner(ptr_t sig_arg, void *context)
194 : {
195 265 : pthread_t self = pthread_self();
196 : GC_thread me;
197 : IF_CANCEL(int cancel_state;)
198 264 : AO_t my_stop_count = AO_load(&GC_stop_count);
199 :
200 259 : if ((signed_word)sig_arg != SIG_SUSPEND)
201 0 : ABORT("Bad signal in suspend_handler");
202 :
203 259 : DISABLE_CANCEL(cancel_state);
204 : /* pthread_setcancelstate is not defined to be async-signal-safe. */
205 : /* But the glibc version appears to be in the absence of */
206 : /* asynchronous cancellation. And since this signal handler */
207 : /* to block on sigsuspend, which is both async-signal-safe */
208 : /* and a cancellation point, there seems to be no obvious way */
209 : /* out of it. In fact, it looks to me like an async-signal-safe */
210 : /* cancellation point is inherently a problem, unless there is */
211 : /* some way to disable cancellation in the handler. */
212 : # ifdef DEBUG_THREADS
213 : GC_log_printf("Suspending 0x%x\n", (unsigned)self);
214 : # endif
215 :
216 266 : me = GC_lookup_thread(self);
217 : /* The lookup here is safe, since I'm doing this on behalf */
218 : /* of a thread which holds the allocation lock in order */
219 : /* to stop the world. Thus concurrent modification of the */
220 : /* data structure is impossible. */
221 265 : if (me -> stop_info.last_stop_count == my_stop_count) {
222 : /* Duplicate signal. OK if we are retrying. */
223 0 : if (!GC_retry_signals) {
224 0 : WARN("Duplicate suspend signal in thread %p\n", self);
225 : }
226 0 : RESTORE_CANCEL(cancel_state);
227 0 : return;
228 : }
229 : # ifdef SPARC
230 : me -> stop_info.stack_ptr = GC_save_regs_in_stack();
231 : # else
232 265 : me -> stop_info.stack_ptr = GC_approx_sp();
233 : # endif
234 : # ifdef IA64
235 : me -> backing_store_ptr = GC_save_regs_in_stack();
236 : # endif
237 :
238 : /* Tell the thread that wants to stop the world that this */
239 : /* thread has been stopped. Note that sem_post() is */
240 : /* the only async-signal-safe primitive in LinuxThreads. */
241 265 : sem_post(&GC_suspend_ack_sem);
242 210 : me -> stop_info.last_stop_count = my_stop_count;
243 :
244 : /* Wait until that thread tells us to restart by sending */
245 : /* this thread a SIG_THR_RESTART signal. */
246 : /* SIG_THR_RESTART should be masked at this point. Thus */
247 : /* there is no race. */
248 : /* We do not continue until we receive a SIG_THR_RESTART, */
249 : /* but we do not take that as authoritative. (We may be */
250 : /* accidentally restarted by one of the user signals we */
251 : /* don't block.) After we receive the signal, we use a */
252 : /* primitive and expensive mechanism to wait until it's */
253 : /* really safe to proceed. Under normal circumstances, */
254 : /* this code should not be executed. */
255 : do {
256 210 : sigsuspend (&suspend_handler_mask);
257 : } while (AO_load_acquire(&GC_world_is_stopped)
258 263 : && AO_load(&GC_stop_count) == my_stop_count);
259 : /* If the RESTART signal gets lost, we can still lose. That should */
260 : /* be less likely than losing the SUSPEND signal, since we don't do */
261 : /* much between the sem_post and sigsuspend. */
262 : /* We'd need more handshaking to work around that. */
263 : /* Simply dropping the sigsuspend call should be safe, but is */
264 : /* unlikely to be efficient. */
265 :
266 : # ifdef DEBUG_THREADS
267 : GC_log_printf("Continuing 0x%x\n", (unsigned)self);
268 : # endif
269 265 : RESTORE_CANCEL(cancel_state);
270 : }
271 :
272 265 : STATIC void GC_restart_handler(int sig)
273 : {
274 : # if defined(DEBUG_THREADS) || defined(GC_NETBSD_THREADS_WORKAROUND)
275 : int old_errno = errno; /* Preserve errno value. */
276 : # endif
277 :
278 265 : if (sig != SIG_THR_RESTART) ABORT("Bad signal in suspend_handler");
279 :
280 : # ifdef GC_NETBSD_THREADS_WORKAROUND
281 : sem_post(&GC_restart_ack_sem);
282 : # endif
283 :
284 : /*
285 : ** Note: even if we don't do anything useful here,
286 : ** it would still be necessary to have a signal handler,
287 : ** rather than ignoring the signals, otherwise
288 : ** the signals will not be delivered at all, and
289 : ** will thus not interrupt the sigsuspend() above.
290 : */
291 :
292 : # ifdef DEBUG_THREADS
293 : GC_log_printf("In GC_restart_handler for 0x%x\n",
294 : (unsigned)pthread_self());
295 : # endif
296 : # if defined(DEBUG_THREADS) || defined(GC_NETBSD_THREADS_WORKAROUND)
297 : errno = old_errno;
298 : # endif
299 265 : }
300 :
301 : #endif /* !GC_OPENBSD_THREADS && !NACL */
302 :
303 : #ifdef IA64
304 : # define IF_IA64(x) x
305 : #else
306 : # define IF_IA64(x)
307 : #endif
308 : /* We hold allocation lock. Should do exactly the right thing if the */
309 : /* world is stopped. Should not fail if it isn't. */
310 258 : GC_INNER void GC_push_all_stacks(void)
311 : {
312 258 : GC_bool found_me = FALSE;
313 258 : size_t nthreads = 0;
314 : int i;
315 : GC_thread p;
316 : ptr_t lo, hi;
317 : /* On IA64, we also need to scan the register backing store. */
318 : IF_IA64(ptr_t bs_lo; ptr_t bs_hi;)
319 258 : pthread_t self = pthread_self();
320 258 : word total_size = 0;
321 :
322 258 : if (!GC_thr_initialized) GC_thr_init();
323 : # ifdef DEBUG_THREADS
324 : GC_log_printf("Pushing stacks from thread 0x%x\n", (unsigned)self);
325 : # endif
326 66306 : for (i = 0; i < THREAD_TABLE_SZ; i++) {
327 66591 : for (p = GC_threads[i]; p != 0; p = p -> next) {
328 543 : if (p -> flags & FINISHED) continue;
329 543 : ++nthreads;
330 543 : if (THREAD_EQUAL(p -> id, self)) {
331 : GC_ASSERT(!p->thread_blocked);
332 : # ifdef SPARC
333 : lo = (ptr_t)GC_save_regs_in_stack();
334 : # else
335 258 : lo = GC_approx_sp();
336 : # endif
337 258 : found_me = TRUE;
338 : IF_IA64(bs_hi = (ptr_t)GC_save_regs_in_stack();)
339 : } else {
340 285 : lo = p -> stop_info.stack_ptr;
341 : IF_IA64(bs_hi = p -> backing_store_ptr;)
342 : }
343 543 : if ((p -> flags & MAIN_THREAD) == 0) {
344 285 : hi = p -> stack_end;
345 : IF_IA64(bs_lo = p -> backing_store_end);
346 : } else {
347 : /* The original stack. */
348 258 : hi = GC_stackbottom;
349 : IF_IA64(bs_lo = BACKING_STORE_BASE;)
350 : }
351 : # ifdef DEBUG_THREADS
352 : GC_log_printf("Stack for thread 0x%x = [%p,%p)\n",
353 : (unsigned)(p -> id), lo, hi);
354 : # endif
355 543 : if (0 == lo) ABORT("GC_push_all_stacks: sp not set!");
356 543 : GC_push_all_stack_sections(lo, hi, p -> traced_stack_sect);
357 : # ifdef STACK_GROWS_UP
358 : total_size += lo - hi;
359 : # else
360 543 : total_size += hi - lo; /* lo <= hi */
361 : # endif
362 : # ifdef NACL
363 : /* Push reg_storage as roots, this will cover the reg context. */
364 : GC_push_all_stack((ptr_t)p -> stop_info.reg_storage,
365 : (ptr_t)(p -> stop_info.reg_storage + NACL_GC_REG_STORAGE_SIZE));
366 : total_size += NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t);
367 : # endif
368 : # ifdef IA64
369 : # ifdef DEBUG_THREADS
370 : GC_log_printf("Reg stack for thread 0x%x = [%p,%p)\n",
371 : (unsigned)p -> id, bs_lo, bs_hi);
372 : # endif
373 : /* FIXME: This (if p->id==self) may add an unbounded number of */
374 : /* entries, and hence overflow the mark stack, which is bad. */
375 : GC_push_all_register_sections(bs_lo, bs_hi,
376 : THREAD_EQUAL(p -> id, self),
377 : p -> traced_stack_sect);
378 : total_size += bs_hi - bs_lo; /* bs_lo <= bs_hi */
379 : # endif
380 : }
381 : }
382 258 : if (GC_print_stats == VERBOSE) {
383 0 : GC_log_printf("Pushed %d thread stacks\n", (int)nthreads);
384 : }
385 258 : if (!found_me && !GC_in_thread_creation)
386 0 : ABORT("Collecting from unknown thread");
387 258 : GC_total_stacksize = total_size;
388 258 : }
389 :
390 : #ifdef DEBUG_THREADS
391 : /* There seems to be a very rare thread stopping problem. To help us */
392 : /* debug that, we save the ids of the stopping thread. */
393 : pthread_t GC_stopping_thread;
394 : int GC_stopping_pid = 0;
395 : #endif
396 :
397 : #ifdef PLATFORM_ANDROID
398 : extern int tkill(pid_t tid, int sig); /* from sys/linux-unistd.h */
399 :
400 : static int android_thread_kill(pid_t tid, int sig)
401 : {
402 : int ret;
403 : int old_errno = errno;
404 :
405 : ret = tkill(tid, sig);
406 : if (ret < 0) {
407 : ret = errno;
408 : errno = old_errno;
409 : }
410 :
411 : return ret;
412 : }
413 : #endif /* PLATFORM_ANDROID */
414 :
415 : /* We hold the allocation lock. Suspend all threads that might */
416 : /* still be running. Return the number of suspend signals that */
417 : /* were sent. */
418 252 : STATIC int GC_suspend_all(void)
419 : {
420 252 : int n_live_threads = 0;
421 : int i;
422 :
423 : # ifndef NACL
424 : GC_thread p;
425 : # ifndef GC_OPENBSD_THREADS
426 : int result;
427 : # endif
428 252 : pthread_t self = pthread_self();
429 :
430 : # ifdef DEBUG_THREADS
431 : GC_stopping_thread = self;
432 : GC_stopping_pid = getpid();
433 : # endif
434 64764 : for (i = 0; i < THREAD_TABLE_SZ; i++) {
435 65031 : for (p = GC_threads[i]; p != 0; p = p -> next) {
436 519 : if (!THREAD_EQUAL(p -> id, self)) {
437 267 : if (p -> flags & FINISHED) continue;
438 267 : if (p -> thread_blocked) /* Will wait */ continue;
439 : # ifndef GC_OPENBSD_THREADS
440 267 : if (p -> stop_info.last_stop_count == GC_stop_count) continue;
441 267 : n_live_threads++;
442 : # endif
443 : # ifdef DEBUG_THREADS
444 : GC_log_printf("Sending suspend signal to 0x%x\n",
445 : (unsigned)(p -> id));
446 : # endif
447 :
448 : # ifdef GC_OPENBSD_THREADS
449 : {
450 : stack_t stack;
451 : if (pthread_suspend_np(p -> id) != 0)
452 : ABORT("pthread_suspend_np failed");
453 : if (pthread_stackseg_np(p->id, &stack))
454 : ABORT("pthread_stackseg_np failed");
455 : p -> stop_info.stack_ptr = (ptr_t)stack.ss_sp - stack.ss_size;
456 : }
457 : # else
458 : # ifndef PLATFORM_ANDROID
459 267 : result = pthread_kill(p -> id, SIG_SUSPEND);
460 : # else
461 : result = android_thread_kill(p -> kernel_id, SIG_SUSPEND);
462 : # endif
463 267 : switch(result) {
464 : case ESRCH:
465 : /* Not really there anymore. Possible? */
466 0 : n_live_threads--;
467 0 : break;
468 : case 0:
469 267 : break;
470 : default:
471 0 : ABORT("pthread_kill failed");
472 : }
473 : # endif
474 : }
475 : }
476 : }
477 :
478 : # else /* NACL */
479 : # ifndef NACL_PARK_WAIT_NANOSECONDS
480 : # define NACL_PARK_WAIT_NANOSECONDS (100 * 1000)
481 : # endif
482 : # ifdef DEBUG_THREADS
483 : GC_log_printf("pthread_stop_world: num_threads %d\n",
484 : GC_nacl_num_gc_threads - 1);
485 : # endif
486 : GC_nacl_thread_parker = pthread_self();
487 : GC_nacl_park_threads_now = 1;
488 : # ifdef DEBUG_THREADS
489 : GC_stopping_thread = GC_nacl_thread_parker;
490 : GC_stopping_pid = getpid();
491 : # endif
492 :
493 : while (1) {
494 : int num_threads_parked = 0;
495 : struct timespec ts;
496 : int num_used = 0;
497 :
498 : /* Check the 'parked' flag for each thread the GC knows about. */
499 : for (i = 0; i < MAX_NACL_GC_THREADS
500 : && num_used < GC_nacl_num_gc_threads; i++) {
501 : if (GC_nacl_thread_used[i] == 1) {
502 : num_used++;
503 : if (GC_nacl_thread_parked[i] == 1) {
504 : num_threads_parked++;
505 : }
506 : }
507 : }
508 : /* -1 for the current thread. */
509 : if (num_threads_parked >= GC_nacl_num_gc_threads - 1)
510 : break;
511 : ts.tv_sec = 0;
512 : ts.tv_nsec = NACL_PARK_WAIT_NANOSECONDS;
513 : # ifdef DEBUG_THREADS
514 : GC_log_printf("Sleep waiting for %d threads to park...\n",
515 : GC_nacl_num_gc_threads - num_threads_parked - 1);
516 : # endif
517 : /* This requires _POSIX_TIMERS feature. */
518 : nanosleep(&ts, 0);
519 : }
520 : # endif /* NACL */
521 252 : return n_live_threads;
522 : }
523 :
524 252 : GC_INNER void GC_stop_world(void)
525 : {
526 : # if !defined(GC_OPENBSD_THREADS) && !defined(NACL)
527 : int i;
528 : int n_live_threads;
529 : int code;
530 : # endif
531 : GC_ASSERT(I_HOLD_LOCK());
532 : # ifdef DEBUG_THREADS
533 : GC_log_printf("Stopping the world from 0x%x\n", (unsigned)pthread_self());
534 : # endif
535 :
536 : /* Make sure all free list construction has stopped before we start. */
537 : /* No new construction can start, since free list construction is */
538 : /* required to acquire and release the GC lock before it starts, */
539 : /* and we have the lock. */
540 : # ifdef PARALLEL_MARK
541 : if (GC_parallel) {
542 : GC_acquire_mark_lock();
543 : GC_ASSERT(GC_fl_builder_count == 0);
544 : /* We should have previously waited for it to become zero. */
545 : }
546 : # endif /* PARALLEL_MARK */
547 :
548 : # if defined(GC_OPENBSD_THREADS) || defined(NACL)
549 : (void)GC_suspend_all();
550 : # else
551 252 : AO_store(&GC_stop_count, GC_stop_count+1);
552 : /* Only concurrent reads are possible. */
553 252 : AO_store_release(&GC_world_is_stopped, TRUE);
554 252 : n_live_threads = GC_suspend_all();
555 :
556 252 : if (GC_retry_signals) {
557 0 : unsigned long wait_usecs = 0; /* Total wait since retry. */
558 : # define WAIT_UNIT 3000
559 : # define RETRY_INTERVAL 100000
560 : for (;;) {
561 : int ack_count;
562 :
563 0 : sem_getvalue(&GC_suspend_ack_sem, &ack_count);
564 0 : if (ack_count == n_live_threads) break;
565 0 : if (wait_usecs > RETRY_INTERVAL) {
566 0 : int newly_sent = GC_suspend_all();
567 :
568 0 : if (GC_print_stats) {
569 0 : GC_log_printf("Resent %d signals after timeout\n", newly_sent);
570 : }
571 0 : sem_getvalue(&GC_suspend_ack_sem, &ack_count);
572 0 : if (newly_sent < n_live_threads - ack_count) {
573 0 : WARN("Lost some threads during GC_stop_world?!\n",0);
574 0 : n_live_threads = ack_count + newly_sent;
575 : }
576 0 : wait_usecs = 0;
577 : }
578 0 : usleep(WAIT_UNIT);
579 0 : wait_usecs += WAIT_UNIT;
580 0 : }
581 : }
582 :
583 519 : for (i = 0; i < n_live_threads; i++) {
584 : retry:
585 267 : if (0 != (code = sem_wait(&GC_suspend_ack_sem))) {
586 : /* On Linux, sem_wait is documented to always return zero. */
587 : /* But the documentation appears to be incorrect. */
588 0 : if (errno == EINTR) {
589 : /* Seems to happen with some versions of gdb. */
590 0 : goto retry;
591 : }
592 0 : ABORT("sem_wait for handler failed");
593 : }
594 : }
595 : # endif
596 :
597 : # ifdef PARALLEL_MARK
598 : if (GC_parallel)
599 : GC_release_mark_lock();
600 : # endif
601 : # ifdef DEBUG_THREADS
602 : GC_log_printf("World stopped from 0x%x\n", (unsigned)pthread_self());
603 : GC_stopping_thread = 0;
604 : # endif
605 252 : }
606 :
607 : #ifdef NACL
608 : # if defined(__x86_64__)
609 : # define NACL_STORE_REGS() \
610 : do { \
611 : __asm__ __volatile__ ("push %rbx"); \
612 : __asm__ __volatile__ ("push %rbp"); \
613 : __asm__ __volatile__ ("push %r12"); \
614 : __asm__ __volatile__ ("push %r13"); \
615 : __asm__ __volatile__ ("push %r14"); \
616 : __asm__ __volatile__ ("push %r15"); \
617 : __asm__ __volatile__ ("mov %%esp, %0" \
618 : : "=m" (GC_nacl_gc_thread_self->stop_info.stack_ptr)); \
619 : BCOPY(GC_nacl_gc_thread_self->stop_info.stack_ptr, \
620 : GC_nacl_gc_thread_self->stop_info.reg_storage, \
621 : NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t)); \
622 : __asm__ __volatile__ ("naclasp $48, %r15"); \
623 : } while (0)
624 : # elif defined(__i386__)
625 : # define NACL_STORE_REGS() \
626 : do { \
627 : __asm__ __volatile__ ("push %ebx"); \
628 : __asm__ __volatile__ ("push %ebp"); \
629 : __asm__ __volatile__ ("push %esi"); \
630 : __asm__ __volatile__ ("push %edi"); \
631 : __asm__ __volatile__ ("mov %%esp, %0" \
632 : : "=m" (GC_nacl_gc_thread_self->stop_info.stack_ptr)); \
633 : BCOPY(GC_nacl_gc_thread_self->stop_info.stack_ptr, \
634 : GC_nacl_gc_thread_self->stop_info.reg_storage, \
635 : NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t));\
636 : __asm__ __volatile__ ("add $16, %esp"); \
637 : } while (0)
638 : # else
639 : # error FIXME for non-amd64/x86 NaCl
640 : # endif
641 :
642 : GC_API_OSCALL void nacl_pre_syscall_hook(void)
643 : {
644 : if (GC_nacl_thread_idx != -1) {
645 : NACL_STORE_REGS();
646 : GC_nacl_gc_thread_self->stop_info.stack_ptr = GC_approx_sp();
647 : GC_nacl_thread_parked[GC_nacl_thread_idx] = 1;
648 : }
649 : }
650 :
651 : GC_API_OSCALL void __nacl_suspend_thread_if_needed(void)
652 : {
653 : if (GC_nacl_park_threads_now) {
654 : pthread_t self = pthread_self();
655 :
656 : /* Don't try to park the thread parker. */
657 : if (GC_nacl_thread_parker == self)
658 : return;
659 :
660 : /* This can happen when a thread is created outside of the GC */
661 : /* system (wthread mostly). */
662 : if (GC_nacl_thread_idx < 0)
663 : return;
664 :
665 : /* If it was already 'parked', we're returning from a syscall, */
666 : /* so don't bother storing registers again, the GC has a set. */
667 : if (!GC_nacl_thread_parked[GC_nacl_thread_idx]) {
668 : NACL_STORE_REGS();
669 : GC_nacl_gc_thread_self->stop_info.stack_ptr = GC_approx_sp();
670 : }
671 : GC_nacl_thread_parked[GC_nacl_thread_idx] = 1;
672 : while (GC_nacl_park_threads_now) {
673 : /* Just spin. */
674 : }
675 : GC_nacl_thread_parked[GC_nacl_thread_idx] = 0;
676 :
677 : /* Clear out the reg storage for next suspend. */
678 : BZERO(GC_nacl_gc_thread_self->stop_info.reg_storage,
679 : NACL_GC_REG_STORAGE_SIZE * sizeof(ptr_t));
680 : }
681 : }
682 :
683 : GC_API_OSCALL void nacl_post_syscall_hook(void)
684 : {
685 : /* Calling __nacl_suspend_thread_if_needed right away should */
686 : /* guarantee we don't mutate the GC set. */
687 : __nacl_suspend_thread_if_needed();
688 : if (GC_nacl_thread_idx != -1) {
689 : GC_nacl_thread_parked[GC_nacl_thread_idx] = 0;
690 : }
691 : }
692 :
693 : STATIC GC_bool GC_nacl_thread_parking_inited = FALSE;
694 : STATIC pthread_mutex_t GC_nacl_thread_alloc_lock = PTHREAD_MUTEX_INITIALIZER;
695 :
696 : GC_INNER void GC_nacl_initialize_gc_thread(void)
697 : {
698 : int i;
699 : pthread_mutex_lock(&GC_nacl_thread_alloc_lock);
700 : if (!GC_nacl_thread_parking_inited) {
701 : BZERO(GC_nacl_thread_parked, sizeof(GC_nacl_thread_parked));
702 : BZERO(GC_nacl_thread_used, sizeof(GC_nacl_thread_used));
703 : GC_nacl_thread_parking_inited = TRUE;
704 : }
705 : GC_ASSERT(GC_nacl_num_gc_threads <= MAX_NACL_GC_THREADS);
706 : for (i = 0; i < MAX_NACL_GC_THREADS; i++) {
707 : if (GC_nacl_thread_used[i] == 0) {
708 : GC_nacl_thread_used[i] = 1;
709 : GC_nacl_thread_idx = i;
710 : GC_nacl_num_gc_threads++;
711 : break;
712 : }
713 : }
714 : pthread_mutex_unlock(&GC_nacl_thread_alloc_lock);
715 : }
716 :
717 : GC_INNER void GC_nacl_shutdown_gc_thread(void)
718 : {
719 : pthread_mutex_lock(&GC_nacl_thread_alloc_lock);
720 : GC_ASSERT(GC_nacl_thread_idx >= 0);
721 : GC_ASSERT(GC_nacl_thread_idx < MAX_NACL_GC_THREADS);
722 : GC_ASSERT(GC_nacl_thread_used[GC_nacl_thread_idx] != 0);
723 : GC_nacl_thread_used[GC_nacl_thread_idx] = 0;
724 : GC_nacl_thread_idx = -1;
725 : GC_nacl_num_gc_threads--;
726 : pthread_mutex_unlock(&GC_nacl_thread_alloc_lock);
727 : }
728 : #endif /* NACL */
729 :
730 : /* Caller holds allocation lock, and has held it continuously since */
731 : /* the world stopped. */
732 252 : GC_INNER void GC_start_world(void)
733 : {
734 : # ifndef NACL
735 252 : pthread_t self = pthread_self();
736 : register int i;
737 : register GC_thread p;
738 : # ifndef GC_OPENBSD_THREADS
739 252 : register int n_live_threads = 0;
740 : register int result;
741 : # endif
742 : # ifdef GC_NETBSD_THREADS_WORKAROUND
743 : int code;
744 : # endif
745 :
746 : # ifdef DEBUG_THREADS
747 : GC_log_printf("World starting\n");
748 : # endif
749 :
750 : # ifndef GC_OPENBSD_THREADS
751 252 : AO_store(&GC_world_is_stopped, FALSE);
752 : # endif
753 64764 : for (i = 0; i < THREAD_TABLE_SZ; i++) {
754 65031 : for (p = GC_threads[i]; p != 0; p = p -> next) {
755 519 : if (!THREAD_EQUAL(p -> id, self)) {
756 267 : if (p -> flags & FINISHED) continue;
757 267 : if (p -> thread_blocked) continue;
758 : # ifndef GC_OPENBSD_THREADS
759 267 : n_live_threads++;
760 : # endif
761 : # ifdef DEBUG_THREADS
762 : GC_log_printf("Sending restart signal to 0x%x\n",
763 : (unsigned)(p -> id));
764 : # endif
765 :
766 : # ifdef GC_OPENBSD_THREADS
767 : if (pthread_resume_np(p -> id) != 0)
768 : ABORT("pthread_resume_np failed");
769 : # else
770 : # ifndef PLATFORM_ANDROID
771 267 : result = pthread_kill(p -> id, SIG_THR_RESTART);
772 : # else
773 : result = android_thread_kill(p -> kernel_id, SIG_THR_RESTART);
774 : # endif
775 267 : switch(result) {
776 : case ESRCH:
777 : /* Not really there anymore. Possible? */
778 0 : n_live_threads--;
779 0 : break;
780 : case 0:
781 267 : break;
782 : default:
783 0 : ABORT("pthread_kill failed");
784 : }
785 : # endif
786 : }
787 : }
788 : }
789 : # ifdef GC_NETBSD_THREADS_WORKAROUND
790 : for (i = 0; i < n_live_threads; i++) {
791 : while (0 != (code = sem_wait(&GC_restart_ack_sem))) {
792 : if (errno != EINTR) {
793 : if (GC_print_stats)
794 : GC_log_printf("sem_wait() returned %d\n", code);
795 : ABORT("sem_wait() for restart handler failed");
796 : }
797 : }
798 : }
799 : # endif
800 : # ifdef DEBUG_THREADS
801 : GC_log_printf("World started\n");
802 : # endif
803 : # else /* NACL */
804 : # ifdef DEBUG_THREADS
805 : GC_log_printf("World starting...\n");
806 : # endif
807 : GC_nacl_park_threads_now = 0;
808 : # endif
809 252 : }
810 :
811 163 : GC_INNER void GC_stop_init(void)
812 : {
813 : # if !defined(GC_OPENBSD_THREADS) && !defined(NACL)
814 : struct sigaction act;
815 :
816 163 : if (sem_init(&GC_suspend_ack_sem, GC_SEM_INIT_PSHARED, 0) != 0)
817 0 : ABORT("sem_init failed");
818 : # ifdef GC_NETBSD_THREADS_WORKAROUND
819 : if (sem_init(&GC_restart_ack_sem, GC_SEM_INIT_PSHARED, 0) != 0)
820 : ABORT("sem_init failed");
821 : # endif
822 :
823 : # ifdef SA_RESTART
824 163 : act.sa_flags = SA_RESTART
825 : # else
826 : act.sa_flags = 0
827 : # endif
828 : # ifdef SA_SIGINFO
829 : | SA_SIGINFO
830 : # endif
831 : ;
832 163 : if (sigfillset(&act.sa_mask) != 0) {
833 0 : ABORT("sigfillset() failed");
834 : }
835 : # ifdef GC_RTEMS_PTHREADS
836 : if(sigprocmask(SIG_UNBLOCK, &act.sa_mask, NULL) != 0) {
837 : ABORT("rtems sigprocmask() failed");
838 : }
839 : # endif
840 163 : GC_remove_allowed_signals(&act.sa_mask);
841 : /* SIG_THR_RESTART is set in the resulting mask. */
842 : /* It is unmasked by the handler when necessary. */
843 : # ifdef SA_SIGINFO
844 163 : act.sa_sigaction = GC_suspend_handler;
845 : # else
846 : act.sa_handler = GC_suspend_handler;
847 : # endif
848 163 : if (sigaction(SIG_SUSPEND, &act, NULL) != 0) {
849 0 : ABORT("Cannot set SIG_SUSPEND handler");
850 : }
851 :
852 : # ifdef SA_SIGINFO
853 163 : act.sa_flags &= ~ SA_SIGINFO;
854 : # endif
855 163 : act.sa_handler = GC_restart_handler;
856 163 : if (sigaction(SIG_THR_RESTART, &act, NULL) != 0) {
857 0 : ABORT("Cannot set SIG_THR_RESTART handler");
858 : }
859 :
860 : /* Initialize suspend_handler_mask. It excludes SIG_THR_RESTART. */
861 163 : if (sigfillset(&suspend_handler_mask) != 0) ABORT("sigfillset() failed");
862 163 : GC_remove_allowed_signals(&suspend_handler_mask);
863 163 : if (sigdelset(&suspend_handler_mask, SIG_THR_RESTART) != 0)
864 0 : ABORT("sigdelset() failed");
865 :
866 : /* Check for GC_RETRY_SIGNALS. */
867 163 : if (0 != GETENV("GC_RETRY_SIGNALS")) {
868 0 : GC_retry_signals = TRUE;
869 : }
870 163 : if (0 != GETENV("GC_NO_RETRY_SIGNALS")) {
871 0 : GC_retry_signals = FALSE;
872 : }
873 163 : if (GC_print_stats && GC_retry_signals) {
874 0 : GC_log_printf("Will retry suspend signal if necessary\n");
875 : }
876 : # endif /* !GC_OPENBSD_THREADS && !NACL */
877 163 : }
878 :
879 : #endif
|