Line data Source code
1 : /*
2 : * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 : * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 : * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 : * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
6 : *
7 : * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 : * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 : *
10 : * Permission is hereby granted to use or copy this program
11 : * for any purpose, provided the above notices are retained on all copies.
12 : * Permission to modify the code and to distribute modified code is granted,
13 : * provided the above notices are retained, and a notice that the code was
14 : * modified is included with the above copyright notice.
15 : */
16 :
17 : #include "private/gc_priv.h"
18 :
19 : #if defined(LINUX) && !defined(POWERPC)
20 : # include <linux/version.h>
21 : # if (LINUX_VERSION_CODE <= 0x10400)
22 : /* Ugly hack to get struct sigcontext_struct definition. Required */
23 : /* for some early 1.3.X releases. Will hopefully go away soon. */
24 : /* in some later Linux releases, asm/sigcontext.h may have to */
25 : /* be included instead. */
26 : # define __KERNEL__
27 : # include <asm/signal.h>
28 : # undef __KERNEL__
29 : # else
30 : /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
31 : /* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */
32 : /* prototypes, so we have to include the top-level sigcontext.h to */
33 : /* make sure the former gets defined to be the latter if appropriate. */
34 : # include <features.h>
35 : # if 2 <= __GLIBC__
36 : # if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
37 : /* glibc 2.1 no longer has sigcontext.h. But signal.h */
38 : /* has the right declaration for glibc 2.1. */
39 : # include <sigcontext.h>
40 : # endif /* 0 == __GLIBC_MINOR__ */
41 : # else /* not 2 <= __GLIBC__ */
42 : /* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
43 : /* one. Check LINUX_VERSION_CODE to see which we should reference. */
44 : # include <asm/sigcontext.h>
45 : # endif /* 2 <= __GLIBC__ */
46 : # endif
47 : #endif
48 :
49 : #if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS) \
50 : && !defined(MSWINCE) && !defined(__CC_ARM)
51 : # include <sys/types.h>
52 : # if !defined(MSWIN32)
53 : # include <unistd.h>
54 : # endif
55 : #endif
56 :
57 : #include <stdio.h>
58 : #if defined(MSWINCE) || defined(SN_TARGET_PS3)
59 : # define SIGSEGV 0 /* value is irrelevant */
60 : #else
61 : # include <signal.h>
62 : #endif
63 :
64 : #if defined(UNIX_LIKE) || defined(CYGWIN32) || defined(NACL)
65 : # include <fcntl.h>
66 : #endif
67 :
68 : #if defined(LINUX) || defined(LINUX_STACKBOTTOM)
69 : # include <ctype.h>
70 : #endif
71 :
72 : /* Blatantly OS dependent routines, except for those that are related */
73 : /* to dynamic loading. */
74 :
75 : #ifdef AMIGA
76 : # define GC_AMIGA_DEF
77 : # include "extra/AmigaOS.c"
78 : # undef GC_AMIGA_DEF
79 : #endif
80 :
81 : #if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
82 : # ifndef WIN32_LEAN_AND_MEAN
83 : # define WIN32_LEAN_AND_MEAN 1
84 : # endif
85 : # define NOSERVICE
86 : # include <windows.h>
87 : /* It's not clear this is completely kosher under Cygwin. But it */
88 : /* allows us to get a working GC_get_stack_base. */
89 : #endif
90 :
91 : #ifdef MACOS
92 : # include <Processes.h>
93 : #endif
94 :
95 : #ifdef IRIX5
96 : # include <sys/uio.h>
97 : # include <malloc.h> /* for locking */
98 : #endif
99 :
100 : #if defined(LINUX) || defined(FREEBSD) || defined(SOLARIS) || defined(IRIX5) \
101 : || ((defined(USE_MMAP) || defined(USE_MUNMAP)) \
102 : && !defined(MSWIN32) && !defined(MSWINCE))
103 : # define MMAP_SUPPORTED
104 : #endif
105 :
106 : #if defined(MMAP_SUPPORTED) || defined(ADD_HEAP_GUARD_PAGES)
107 : # if defined(USE_MUNMAP) && !defined(USE_MMAP)
108 : # error "invalid config - USE_MUNMAP requires USE_MMAP"
109 : # endif
110 : # include <sys/types.h>
111 : # include <sys/mman.h>
112 : # include <sys/stat.h>
113 : # include <errno.h>
114 : #endif
115 :
116 : #ifdef DARWIN
117 : /* for get_etext and friends */
118 : # include <mach-o/getsect.h>
119 : #endif
120 :
121 : #ifdef DJGPP
122 : /* Apparently necessary for djgpp 2.01. May cause problems with */
123 : /* other versions. */
124 : typedef long unsigned int caddr_t;
125 : #endif
126 :
127 : #ifdef PCR
128 : # include "il/PCR_IL.h"
129 : # include "th/PCR_ThCtl.h"
130 : # include "mm/PCR_MM.h"
131 : #endif
132 :
133 : #if !defined(NO_EXECUTE_PERMISSION)
134 : STATIC GC_bool GC_pages_executable = TRUE;
135 : #else
136 : STATIC GC_bool GC_pages_executable = FALSE;
137 : #endif
138 : #define IGNORE_PAGES_EXECUTABLE 1
139 : /* Undefined on GC_pages_executable real use. */
140 :
141 : #ifdef NEED_PROC_MAPS
142 : /* We need to parse /proc/self/maps, either to find dynamic libraries, */
143 : /* and/or to find the register backing store base (IA64). Do it once */
144 : /* here. */
145 :
146 : #define READ read
147 :
148 : /* Repeatedly perform a read call until the buffer is filled or */
149 : /* we encounter EOF. */
150 0 : STATIC ssize_t GC_repeat_read(int fd, char *buf, size_t count)
151 : {
152 0 : size_t num_read = 0;
153 : ssize_t result;
154 :
155 : ASSERT_CANCEL_DISABLED();
156 0 : while (num_read < count) {
157 0 : result = READ(fd, buf + num_read, count - num_read);
158 0 : if (result < 0) return result;
159 0 : if (result == 0) break;
160 0 : num_read += result;
161 : }
162 0 : return num_read;
163 : }
164 :
165 : #ifdef THREADS
166 : /* Determine the length of a file by incrementally reading it into a */
167 : /* This would be silly to use on a file supporting lseek, but Linux */
168 : /* /proc files usually do not. */
169 0 : STATIC size_t GC_get_file_len(int f)
170 : {
171 0 : size_t total = 0;
172 : ssize_t result;
173 : # define GET_FILE_LEN_BUF_SZ 500
174 : char buf[GET_FILE_LEN_BUF_SZ];
175 :
176 : do {
177 0 : result = read(f, buf, GET_FILE_LEN_BUF_SZ);
178 0 : if (result == -1) return 0;
179 0 : total += result;
180 0 : } while (result > 0);
181 0 : return total;
182 : }
183 :
184 0 : STATIC size_t GC_get_maps_len(void)
185 : {
186 0 : int f = open("/proc/self/maps", O_RDONLY);
187 : size_t result;
188 0 : if (f < 0) return 0; /* treat missing file as empty */
189 0 : result = GC_get_file_len(f);
190 0 : close(f);
191 0 : return result;
192 : }
193 : #endif /* THREADS */
194 :
195 : /* Copy the contents of /proc/self/maps to a buffer in our address */
196 : /* space. Return the address of the buffer, or zero on failure. */
197 : /* This code could be simplified if we could determine its size ahead */
198 : /* of time. */
199 0 : GC_INNER char * GC_get_maps(void)
200 : {
201 : int f;
202 : ssize_t result;
203 : static char *maps_buf = NULL;
204 : static size_t maps_buf_sz = 1;
205 0 : size_t maps_size, old_maps_size = 0;
206 :
207 : /* The buffer is essentially static, so there must be a single client. */
208 : GC_ASSERT(I_HOLD_LOCK());
209 :
210 : /* Note that in the presence of threads, the maps file can */
211 : /* essentially shrink asynchronously and unexpectedly as */
212 : /* threads that we already think of as dead release their */
213 : /* stacks. And there is no easy way to read the entire */
214 : /* file atomically. This is arguably a misfeature of the */
215 : /* /proc/.../maps interface. */
216 :
217 : /* Since we don't believe the file can grow */
218 : /* asynchronously, it should suffice to first determine */
219 : /* the size (using lseek or read), and then to reread the */
220 : /* file. If the size is inconsistent we have to retry. */
221 : /* This only matters with threads enabled, and if we use */
222 : /* this to locate roots (not the default). */
223 :
224 : # ifdef THREADS
225 : /* Determine the initial size of /proc/self/maps. */
226 : /* Note that lseek doesn't work, at least as of 2.6.15. */
227 0 : maps_size = GC_get_maps_len();
228 0 : if (0 == maps_size) return 0;
229 : # else
230 : maps_size = 4000; /* Guess */
231 : # endif
232 :
233 : /* Read /proc/self/maps, growing maps_buf as necessary. */
234 : /* Note that we may not allocate conventionally, and */
235 : /* thus can't use stdio. */
236 : do {
237 0 : while (maps_size >= maps_buf_sz) {
238 : /* Grow only by powers of 2, since we leak "too small" buffers.*/
239 0 : while (maps_size >= maps_buf_sz) maps_buf_sz *= 2;
240 0 : maps_buf = GC_scratch_alloc(maps_buf_sz);
241 : # ifdef THREADS
242 : /* Recompute initial length, since we allocated. */
243 : /* This can only happen a few times per program */
244 : /* execution. */
245 0 : maps_size = GC_get_maps_len();
246 0 : if (0 == maps_size) return 0;
247 : # endif
248 0 : if (maps_buf == 0) return 0;
249 : }
250 : GC_ASSERT(maps_buf_sz >= maps_size + 1);
251 0 : f = open("/proc/self/maps", O_RDONLY);
252 0 : if (-1 == f) return 0;
253 : # ifdef THREADS
254 0 : old_maps_size = maps_size;
255 : # endif
256 0 : maps_size = 0;
257 : do {
258 0 : result = GC_repeat_read(f, maps_buf, maps_buf_sz-1);
259 0 : if (result <= 0)
260 0 : break;
261 0 : maps_size += result;
262 0 : } while ((size_t)result == maps_buf_sz-1);
263 0 : close(f);
264 0 : if (result <= 0)
265 0 : return 0;
266 : # ifdef THREADS
267 0 : if (maps_size > old_maps_size) {
268 0 : if (GC_print_stats)
269 0 : GC_log_printf(
270 : "Unexpected maps size growth from %lu to %lu\n",
271 : (unsigned long)old_maps_size,
272 : (unsigned long)maps_size);
273 0 : ABORT("Unexpected asynchronous /proc/self/maps growth: "
274 : "unregistered thread?");
275 : }
276 : # endif
277 0 : } while (maps_size >= maps_buf_sz || maps_size < old_maps_size);
278 : /* In the single-threaded case, the second clause is false. */
279 0 : maps_buf[maps_size] = '\0';
280 :
281 : /* Apply fn to result. */
282 0 : return maps_buf;
283 : }
284 :
285 : /*
286 : * GC_parse_map_entry parses an entry from /proc/self/maps so we can
287 : * locate all writable data segments that belong to shared libraries.
288 : * The format of one of these entries and the fields we care about
289 : * is as follows:
290 : * XXXXXXXX-XXXXXXXX r-xp 00000000 30:05 260537 name of mapping...\n
291 : * ^^^^^^^^ ^^^^^^^^ ^^^^ ^^
292 : * start end prot maj_dev
293 : *
294 : * Note that since about august 2003 kernels, the columns no longer have
295 : * fixed offsets on 64-bit kernels. Hence we no longer rely on fixed offsets
296 : * anywhere, which is safer anyway.
297 : */
298 :
299 : /* Assign various fields of the first line in buf_ptr to (*start), */
300 : /* (*end), (*prot), (*maj_dev) and (*mapping_name). mapping_name may */
301 : /* be NULL. (*prot) and (*mapping_name) are assigned pointers into the */
302 : /* original buffer. */
303 0 : GC_INNER char *GC_parse_map_entry(char *buf_ptr, ptr_t *start, ptr_t *end,
304 : char **prot, unsigned int *maj_dev,
305 : char **mapping_name)
306 : {
307 : char *start_start, *end_start, *maj_dev_start;
308 : char *p;
309 : char *endp;
310 :
311 0 : if (buf_ptr == NULL || *buf_ptr == '\0') {
312 0 : return NULL;
313 : }
314 :
315 0 : p = buf_ptr;
316 0 : while (isspace(*p)) ++p;
317 0 : start_start = p;
318 : GC_ASSERT(isxdigit(*start_start));
319 0 : *start = (ptr_t)strtoul(start_start, &endp, 16); p = endp;
320 : GC_ASSERT(*p=='-');
321 :
322 0 : ++p;
323 0 : end_start = p;
324 : GC_ASSERT(isxdigit(*end_start));
325 0 : *end = (ptr_t)strtoul(end_start, &endp, 16); p = endp;
326 : GC_ASSERT(isspace(*p));
327 :
328 0 : while (isspace(*p)) ++p;
329 : GC_ASSERT(*p == 'r' || *p == '-');
330 0 : *prot = p;
331 : /* Skip past protection field to offset field */
332 0 : while (!isspace(*p)) ++p; while (isspace(*p)) ++p;
333 : GC_ASSERT(isxdigit(*p));
334 : /* Skip past offset field, which we ignore */
335 0 : while (!isspace(*p)) ++p; while (isspace(*p)) ++p;
336 0 : maj_dev_start = p;
337 : GC_ASSERT(isxdigit(*maj_dev_start));
338 0 : *maj_dev = strtoul(maj_dev_start, NULL, 16);
339 :
340 0 : if (mapping_name == 0) {
341 0 : while (*p && *p++ != '\n');
342 : } else {
343 0 : while (*p && *p != '\n' && *p != '/' && *p != '[') p++;
344 0 : *mapping_name = p;
345 0 : while (*p && *p++ != '\n');
346 : }
347 :
348 0 : return p;
349 : }
350 :
351 : #if defined(IA64) || defined(INCLUDE_LINUX_THREAD_DESCR)
352 : /* Try to read the backing store base from /proc/self/maps. */
353 : /* Return the bounds of the writable mapping with a 0 major device, */
354 : /* which includes the address passed as data. */
355 : /* Return FALSE if there is no such mapping. */
356 : GC_INNER GC_bool GC_enclosing_mapping(ptr_t addr, ptr_t *startp,
357 : ptr_t *endp)
358 : {
359 : char *prot;
360 : ptr_t my_start, my_end;
361 : unsigned int maj_dev;
362 : char *maps = GC_get_maps();
363 : char *buf_ptr = maps;
364 :
365 : if (0 == maps) return(FALSE);
366 : for (;;) {
367 : buf_ptr = GC_parse_map_entry(buf_ptr, &my_start, &my_end,
368 : &prot, &maj_dev, 0);
369 :
370 : if (buf_ptr == NULL) return FALSE;
371 : if (prot[1] == 'w' && maj_dev == 0) {
372 : if (my_end > addr && my_start <= addr) {
373 : *startp = my_start;
374 : *endp = my_end;
375 : return TRUE;
376 : }
377 : }
378 : }
379 : return FALSE;
380 : }
381 : #endif /* IA64 || INCLUDE_LINUX_THREAD_DESCR */
382 :
383 : #if defined(REDIRECT_MALLOC)
384 : /* Find the text(code) mapping for the library whose name, after */
385 : /* stripping the directory part, starts with nm. */
386 : GC_INNER GC_bool GC_text_mapping(char *nm, ptr_t *startp, ptr_t *endp)
387 : {
388 : size_t nm_len = strlen(nm);
389 : char *prot;
390 : char *map_path;
391 : ptr_t my_start, my_end;
392 : unsigned int maj_dev;
393 : char *maps = GC_get_maps();
394 : char *buf_ptr = maps;
395 :
396 : if (0 == maps) return(FALSE);
397 : for (;;) {
398 : buf_ptr = GC_parse_map_entry(buf_ptr, &my_start, &my_end,
399 : &prot, &maj_dev, &map_path);
400 :
401 : if (buf_ptr == NULL) return FALSE;
402 : if (prot[0] == 'r' && prot[1] == '-' && prot[2] == 'x') {
403 : char *p = map_path;
404 : /* Set p to point just past last slash, if any. */
405 : while (*p != '\0' && *p != '\n' && *p != ' ' && *p != '\t') ++p;
406 : while (*p != '/' && p >= map_path) --p;
407 : ++p;
408 : if (strncmp(nm, p, nm_len) == 0) {
409 : *startp = my_start;
410 : *endp = my_end;
411 : return TRUE;
412 : }
413 : }
414 : }
415 : return FALSE;
416 : }
417 : #endif /* REDIRECT_MALLOC */
418 :
419 : #ifdef IA64
420 : static ptr_t backing_store_base_from_proc(void)
421 : {
422 : ptr_t my_start, my_end;
423 : if (!GC_enclosing_mapping(GC_save_regs_in_stack(), &my_start, &my_end)) {
424 : if (GC_print_stats) {
425 : GC_log_printf("Failed to find backing store base from /proc\n");
426 : }
427 : return 0;
428 : }
429 : return my_start;
430 : }
431 : #endif
432 :
433 : #endif /* NEED_PROC_MAPS */
434 :
435 : #if defined(SEARCH_FOR_DATA_START)
436 : /* The I386 case can be handled without a search. The Alpha case */
437 : /* used to be handled differently as well, but the rules changed */
438 : /* for recent Linux versions. This seems to be the easiest way to */
439 : /* cover all versions. */
440 :
441 : # if defined(LINUX) || defined(HURD)
442 : /* Some Linux distributions arrange to define __data_start. Some */
443 : /* define data_start as a weak symbol. The latter is technically */
444 : /* broken, since the user program may define data_start, in which */
445 : /* case we lose. Nonetheless, we try both, preferring __data_start.*/
446 : /* We assume gcc-compatible pragmas. */
447 : # pragma weak __data_start
448 : extern int __data_start[];
449 : # pragma weak data_start
450 : extern int data_start[];
451 : # endif /* LINUX */
452 : extern int _end[];
453 :
454 : ptr_t GC_data_start = NULL;
455 :
456 : ptr_t GC_find_limit(ptr_t, GC_bool);
457 :
458 163 : GC_INNER void GC_init_linux_data_start(void)
459 : {
460 :
461 : # if defined(LINUX) || defined(HURD)
462 : /* Try the easy approaches first: */
463 163 : if ((ptr_t)__data_start != 0) {
464 163 : GC_data_start = (ptr_t)(__data_start);
465 163 : return;
466 : }
467 0 : if ((ptr_t)data_start != 0) {
468 0 : GC_data_start = (ptr_t)(data_start);
469 0 : return;
470 : }
471 : # endif /* LINUX */
472 0 : GC_data_start = GC_find_limit((ptr_t)(_end), FALSE);
473 : }
474 : #endif /* SEARCH_FOR_DATA_START */
475 :
476 : #ifdef ECOS
477 :
478 : # ifndef ECOS_GC_MEMORY_SIZE
479 : # define ECOS_GC_MEMORY_SIZE (448 * 1024)
480 : # endif /* ECOS_GC_MEMORY_SIZE */
481 :
482 : /* FIXME: This is a simple way of allocating memory which is */
483 : /* compatible with ECOS early releases. Later releases use a more */
484 : /* sophisticated means of allocating memory than this simple static */
485 : /* allocator, but this method is at least bound to work. */
486 : static char ecos_gc_memory[ECOS_GC_MEMORY_SIZE];
487 : static char *ecos_gc_brk = ecos_gc_memory;
488 :
489 : static void *tiny_sbrk(ptrdiff_t increment)
490 : {
491 : void *p = ecos_gc_brk;
492 : ecos_gc_brk += increment;
493 : if (ecos_gc_brk > ecos_gc_memory + sizeof(ecos_gc_memory)) {
494 : ecos_gc_brk -= increment;
495 : return NULL;
496 : }
497 : return p;
498 : }
499 : # define sbrk tiny_sbrk
500 : #endif /* ECOS */
501 :
502 : #if defined(NETBSD) && defined(__ELF__)
503 : ptr_t GC_data_start = NULL;
504 : ptr_t GC_find_limit(ptr_t, GC_bool);
505 :
506 : extern char **environ;
507 :
508 : GC_INNER void GC_init_netbsd_elf(void)
509 : {
510 : /* This may need to be environ, without the underscore, for */
511 : /* some versions. */
512 : GC_data_start = GC_find_limit((ptr_t)&environ, FALSE);
513 : }
514 : #endif /* NETBSD */
515 :
516 : #ifdef OPENBSD
517 : static struct sigaction old_segv_act;
518 : STATIC sigjmp_buf GC_jmp_buf_openbsd;
519 :
520 : # ifdef THREADS
521 : # include <sys/syscall.h>
522 : extern sigset_t __syscall(quad_t, ...);
523 : # endif
524 :
525 : /* Don't use GC_find_limit() because siglongjmp() outside of the */
526 : /* signal handler by-passes our userland pthreads lib, leaving */
527 : /* SIGSEGV and SIGPROF masked. Instead, use this custom one that */
528 : /* works-around the issues. */
529 :
530 : /*ARGSUSED*/
531 : STATIC void GC_fault_handler_openbsd(int sig)
532 : {
533 : siglongjmp(GC_jmp_buf_openbsd, 1);
534 : }
535 :
536 : /* Return the first non-addressible location > p or bound. */
537 : /* Requires the allocation lock. */
538 : STATIC ptr_t GC_find_limit_openbsd(ptr_t p, ptr_t bound)
539 : {
540 : static volatile ptr_t result;
541 : /* Safer if static, since otherwise it may not be */
542 : /* preserved across the longjmp. Can safely be */
543 : /* static since it's only called with the */
544 : /* allocation lock held. */
545 :
546 : struct sigaction act;
547 : size_t pgsz = (size_t)sysconf(_SC_PAGESIZE);
548 : GC_ASSERT(I_HOLD_LOCK());
549 :
550 : act.sa_handler = GC_fault_handler_openbsd;
551 : sigemptyset(&act.sa_mask);
552 : act.sa_flags = SA_NODEFER | SA_RESTART;
553 : sigaction(SIGSEGV, &act, &old_segv_act);
554 :
555 : if (sigsetjmp(GC_jmp_buf_openbsd, 1) == 0) {
556 : result = (ptr_t)((word)p & ~(pgsz-1));
557 : for (;;) {
558 : result += pgsz;
559 : if (result >= bound) {
560 : result = bound;
561 : break;
562 : }
563 : GC_noop1((word)(*result));
564 : }
565 : }
566 :
567 : # ifdef THREADS
568 : /* Due to the siglongjump we need to manually unmask SIGPROF. */
569 : __syscall(SYS_sigprocmask, SIG_UNBLOCK, sigmask(SIGPROF));
570 : # endif
571 :
572 : sigaction(SIGSEGV, &old_segv_act, 0);
573 : return(result);
574 : }
575 :
576 : /* Return first addressable location > p or bound. */
577 : /* Requires the allocation lock. */
578 : STATIC ptr_t GC_skip_hole_openbsd(ptr_t p, ptr_t bound)
579 : {
580 : static volatile ptr_t result;
581 : static volatile int firstpass;
582 :
583 : struct sigaction act;
584 : size_t pgsz = (size_t)sysconf(_SC_PAGESIZE);
585 : GC_ASSERT(I_HOLD_LOCK());
586 :
587 : act.sa_handler = GC_fault_handler_openbsd;
588 : sigemptyset(&act.sa_mask);
589 : act.sa_flags = SA_NODEFER | SA_RESTART;
590 : sigaction(SIGSEGV, &act, &old_segv_act);
591 :
592 : firstpass = 1;
593 : result = (ptr_t)((word)p & ~(pgsz-1));
594 : if (sigsetjmp(GC_jmp_buf_openbsd, 1) != 0 || firstpass) {
595 : firstpass = 0;
596 : result += pgsz;
597 : if (result >= bound) {
598 : result = bound;
599 : } else {
600 : GC_noop1((word)(*result));
601 : }
602 : }
603 :
604 : sigaction(SIGSEGV, &old_segv_act, 0);
605 : return(result);
606 : }
607 : #endif /* OPENBSD */
608 :
609 : # ifdef OS2
610 :
611 : # include <stddef.h>
612 :
613 : # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
614 :
615 : struct exe_hdr {
616 : unsigned short magic_number;
617 : unsigned short padding[29];
618 : long new_exe_offset;
619 : };
620 :
621 : #define E_MAGIC(x) (x).magic_number
622 : #define EMAGIC 0x5A4D
623 : #define E_LFANEW(x) (x).new_exe_offset
624 :
625 : struct e32_exe {
626 : unsigned char magic_number[2];
627 : unsigned char byte_order;
628 : unsigned char word_order;
629 : unsigned long exe_format_level;
630 : unsigned short cpu;
631 : unsigned short os;
632 : unsigned long padding1[13];
633 : unsigned long object_table_offset;
634 : unsigned long object_count;
635 : unsigned long padding2[31];
636 : };
637 :
638 : #define E32_MAGIC1(x) (x).magic_number[0]
639 : #define E32MAGIC1 'L'
640 : #define E32_MAGIC2(x) (x).magic_number[1]
641 : #define E32MAGIC2 'X'
642 : #define E32_BORDER(x) (x).byte_order
643 : #define E32LEBO 0
644 : #define E32_WORDER(x) (x).word_order
645 : #define E32LEWO 0
646 : #define E32_CPU(x) (x).cpu
647 : #define E32CPU286 1
648 : #define E32_OBJTAB(x) (x).object_table_offset
649 : #define E32_OBJCNT(x) (x).object_count
650 :
651 : struct o32_obj {
652 : unsigned long size;
653 : unsigned long base;
654 : unsigned long flags;
655 : unsigned long pagemap;
656 : unsigned long mapsize;
657 : unsigned long reserved;
658 : };
659 :
660 : #define O32_FLAGS(x) (x).flags
661 : #define OBJREAD 0x0001L
662 : #define OBJWRITE 0x0002L
663 : #define OBJINVALID 0x0080L
664 : #define O32_SIZE(x) (x).size
665 : #define O32_BASE(x) (x).base
666 :
667 : # else /* IBM's compiler */
668 :
669 : /* A kludge to get around what appears to be a header file bug */
670 : # ifndef WORD
671 : # define WORD unsigned short
672 : # endif
673 : # ifndef DWORD
674 : # define DWORD unsigned long
675 : # endif
676 :
677 : # define EXE386 1
678 : # include <newexe.h>
679 : # include <exe386.h>
680 :
681 : # endif /* __IBMC__ */
682 :
683 : # define INCL_DOSEXCEPTIONS
684 : # define INCL_DOSPROCESS
685 : # define INCL_DOSERRORS
686 : # define INCL_DOSMODULEMGR
687 : # define INCL_DOSMEMMGR
688 : # include <os2.h>
689 :
690 : # endif /* OS/2 */
691 :
692 : /* Find the page size */
693 : GC_INNER word GC_page_size = 0;
694 :
695 : #if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
696 : # ifndef VER_PLATFORM_WIN32_CE
697 : # define VER_PLATFORM_WIN32_CE 3
698 : # endif
699 :
700 : # if defined(MSWINCE) && defined(THREADS)
701 : GC_INNER GC_bool GC_dont_query_stack_min = FALSE;
702 : # endif
703 :
704 : GC_INNER void GC_setpagesize(void)
705 : {
706 : GetSystemInfo(&GC_sysinfo);
707 : GC_page_size = GC_sysinfo.dwPageSize;
708 : # if defined(MSWINCE) && !defined(_WIN32_WCE_EMULATION)
709 : {
710 : OSVERSIONINFO verInfo;
711 : /* Check the current WinCE version. */
712 : verInfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
713 : if (!GetVersionEx(&verInfo))
714 : ABORT("GetVersionEx failed");
715 : if (verInfo.dwPlatformId == VER_PLATFORM_WIN32_CE &&
716 : verInfo.dwMajorVersion < 6) {
717 : /* Only the first 32 MB of address space belongs to the */
718 : /* current process (unless WinCE 6.0+ or emulation). */
719 : GC_sysinfo.lpMaximumApplicationAddress = (LPVOID)((word)32 << 20);
720 : # ifdef THREADS
721 : /* On some old WinCE versions, it's observed that */
722 : /* VirtualQuery calls don't work properly when used to */
723 : /* get thread current stack committed minimum. */
724 : if (verInfo.dwMajorVersion < 5)
725 : GC_dont_query_stack_min = TRUE;
726 : # endif
727 : }
728 : }
729 : # endif
730 : }
731 :
732 : # ifndef CYGWIN32
733 : # define is_writable(prot) ((prot) == PAGE_READWRITE \
734 : || (prot) == PAGE_WRITECOPY \
735 : || (prot) == PAGE_EXECUTE_READWRITE \
736 : || (prot) == PAGE_EXECUTE_WRITECOPY)
737 : /* Return the number of bytes that are writable starting at p. */
738 : /* The pointer p is assumed to be page aligned. */
739 : /* If base is not 0, *base becomes the beginning of the */
740 : /* allocation region containing p. */
741 : STATIC word GC_get_writable_length(ptr_t p, ptr_t *base)
742 : {
743 : MEMORY_BASIC_INFORMATION buf;
744 : word result;
745 : word protect;
746 :
747 : result = VirtualQuery(p, &buf, sizeof(buf));
748 : if (result != sizeof(buf)) ABORT("Weird VirtualQuery result");
749 : if (base != 0) *base = (ptr_t)(buf.AllocationBase);
750 : protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE));
751 : if (!is_writable(protect)) {
752 : return(0);
753 : }
754 : if (buf.State != MEM_COMMIT) return(0);
755 : return(buf.RegionSize);
756 : }
757 :
758 : GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
759 : {
760 : ptr_t trunc_sp = (ptr_t)((word)GC_approx_sp() & ~(GC_page_size - 1));
761 : /* FIXME: This won't work if called from a deeply recursive */
762 : /* client code (and the committed stack space has grown). */
763 : word size = GC_get_writable_length(trunc_sp, 0);
764 : GC_ASSERT(size != 0);
765 : sb -> mem_base = trunc_sp + size;
766 : return GC_SUCCESS;
767 : }
768 : # else /* CYGWIN32 */
769 : /* An alternate version for Cygwin (adapted from Dave Korn's */
770 : /* gcc version of boehm-gc). */
771 : GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
772 : {
773 : extern void * _tlsbase __asm__ ("%fs:4");
774 : sb -> mem_base = _tlsbase;
775 : return GC_SUCCESS;
776 : }
777 : # endif /* CYGWIN32 */
778 : # define HAVE_GET_STACK_BASE
779 :
780 : #else /* !MSWIN32 */
781 163 : GC_INNER void GC_setpagesize(void)
782 : {
783 : # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP)
784 163 : GC_page_size = GETPAGESIZE();
785 163 : if (!GC_page_size) ABORT("getpagesize() failed");
786 : # else
787 : /* It's acceptable to fake it. */
788 : GC_page_size = HBLKSIZE;
789 : # endif
790 163 : }
791 : #endif /* !MSWIN32 */
792 :
793 : #ifdef BEOS
794 : # include <kernel/OS.h>
795 :
796 : GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
797 : {
798 : thread_info th;
799 : get_thread_info(find_thread(NULL),&th);
800 : sb->mem_base = th.stack_end;
801 : return GC_SUCCESS;
802 : }
803 : # define HAVE_GET_STACK_BASE
804 : #endif /* BEOS */
805 :
806 : #ifdef OS2
807 : GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
808 : {
809 : PTIB ptib; /* thread information block */
810 : PPIB ppib;
811 : if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
812 : ABORT("DosGetInfoBlocks failed");
813 : }
814 : sb->mem_base = ptib->tib_pstacklimit;
815 : return GC_SUCCESS;
816 : }
817 : # define HAVE_GET_STACK_BASE
818 : #endif /* OS2 */
819 :
820 : # ifdef AMIGA
821 : # define GC_AMIGA_SB
822 : # include "extra/AmigaOS.c"
823 : # undef GC_AMIGA_SB
824 : # endif /* AMIGA */
825 :
826 : # if defined(NEED_FIND_LIMIT) || defined(UNIX_LIKE)
827 :
828 : typedef void (*GC_fault_handler_t)(int);
829 :
830 : # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) \
831 : || defined(HURD) || defined(NETBSD)
832 : static struct sigaction old_segv_act;
833 : # if defined(_sigargs) /* !Irix6.x */ || defined(HPUX) \
834 : || defined(HURD) || defined(NETBSD) || defined(FREEBSD)
835 : static struct sigaction old_bus_act;
836 : # endif
837 : # else
838 : static GC_fault_handler_t old_segv_handler, old_bus_handler;
839 : # endif
840 :
841 0 : GC_INNER void GC_set_and_save_fault_handler(GC_fault_handler_t h)
842 : {
843 : # if defined(SUNOS5SIGS) || defined(IRIX5) \
844 : || defined(OSF1) || defined(HURD) || defined(NETBSD)
845 : struct sigaction act;
846 :
847 : act.sa_handler = h;
848 : # ifdef SIGACTION_FLAGS_NODEFER_HACK
849 : /* Was necessary for Solaris 2.3 and very temporary */
850 : /* NetBSD bugs. */
851 : act.sa_flags = SA_RESTART | SA_NODEFER;
852 : # else
853 : act.sa_flags = SA_RESTART;
854 : # endif
855 :
856 : (void) sigemptyset(&act.sa_mask);
857 : # ifdef GC_IRIX_THREADS
858 : /* Older versions have a bug related to retrieving and */
859 : /* and setting a handler at the same time. */
860 : (void) sigaction(SIGSEGV, 0, &old_segv_act);
861 : (void) sigaction(SIGSEGV, &act, 0);
862 : # else
863 : (void) sigaction(SIGSEGV, &act, &old_segv_act);
864 : # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
865 : || defined(HPUX) || defined(HURD) || defined(NETBSD) \
866 : || defined(FREEBSD)
867 : /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
868 : /* Pthreads doesn't exist under Irix 5.x, so we */
869 : /* don't have to worry in the threads case. */
870 : (void) sigaction(SIGBUS, &act, &old_bus_act);
871 : # endif
872 : # endif /* GC_IRIX_THREADS */
873 : # else
874 0 : old_segv_handler = signal(SIGSEGV, h);
875 : # ifdef SIGBUS
876 0 : old_bus_handler = signal(SIGBUS, h);
877 : # endif
878 : # endif
879 0 : }
880 : # endif /* NEED_FIND_LIMIT || UNIX_LIKE */
881 :
882 : # if defined(NEED_FIND_LIMIT) \
883 : || (defined(USE_PROC_FOR_LIBRARIES) && defined(THREADS))
884 : /* Some tools to implement HEURISTIC2 */
885 : # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
886 :
887 : /*ARGSUSED*/
888 0 : STATIC void GC_fault_handler(int sig)
889 : {
890 0 : LONGJMP(GC_jmp_buf, 1);
891 : }
892 :
893 0 : GC_INNER void GC_setup_temporary_fault_handler(void)
894 : {
895 : /* Handler is process-wide, so this should only happen in */
896 : /* one thread at a time. */
897 : GC_ASSERT(I_HOLD_LOCK());
898 0 : GC_set_and_save_fault_handler(GC_fault_handler);
899 0 : }
900 :
901 0 : GC_INNER void GC_reset_fault_handler(void)
902 : {
903 : # if defined(SUNOS5SIGS) || defined(IRIX5) \
904 : || defined(OSF1) || defined(HURD) || defined(NETBSD)
905 : (void) sigaction(SIGSEGV, &old_segv_act, 0);
906 : # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
907 : || defined(HPUX) || defined(HURD) || defined(NETBSD) \
908 : || defined(FREEBSD)
909 : (void) sigaction(SIGBUS, &old_bus_act, 0);
910 : # endif
911 : # else
912 0 : (void) signal(SIGSEGV, old_segv_handler);
913 : # ifdef SIGBUS
914 0 : (void) signal(SIGBUS, old_bus_handler);
915 : # endif
916 : # endif
917 0 : }
918 :
919 : /* Return the first non-addressable location > p (up) or */
920 : /* the smallest location q s.t. [q,p) is addressable (!up). */
921 : /* We assume that p (up) or p-1 (!up) is addressable. */
922 : /* Requires allocation lock. */
923 0 : STATIC ptr_t GC_find_limit_with_bound(ptr_t p, GC_bool up, ptr_t bound)
924 : {
925 : static volatile ptr_t result;
926 : /* Safer if static, since otherwise it may not be */
927 : /* preserved across the longjmp. Can safely be */
928 : /* static since it's only called with the */
929 : /* allocation lock held. */
930 :
931 : GC_ASSERT(I_HOLD_LOCK());
932 0 : GC_setup_temporary_fault_handler();
933 0 : if (SETJMP(GC_jmp_buf) == 0) {
934 0 : result = (ptr_t)(((word)(p))
935 : & ~(MIN_PAGE_SIZE-1));
936 : for (;;) {
937 0 : if (up) {
938 0 : result += MIN_PAGE_SIZE;
939 0 : if (result >= bound) {
940 0 : result = bound;
941 0 : break;
942 : }
943 : } else {
944 0 : result -= MIN_PAGE_SIZE;
945 0 : if (result <= bound) {
946 0 : result = bound - MIN_PAGE_SIZE;
947 : /* This is to compensate */
948 : /* further result increment (we */
949 : /* do not modify "up" variable */
950 : /* since it might be clobbered */
951 : /* by setjmp otherwise). */
952 0 : break;
953 : }
954 : }
955 0 : GC_noop1((word)(*result));
956 0 : }
957 : }
958 0 : GC_reset_fault_handler();
959 0 : if (!up) {
960 0 : result += MIN_PAGE_SIZE;
961 : }
962 0 : return(result);
963 : }
964 :
965 0 : ptr_t GC_find_limit(ptr_t p, GC_bool up)
966 : {
967 0 : return GC_find_limit_with_bound(p, up, up ? (ptr_t)(word)(-1) : 0);
968 : }
969 : # endif /* NEED_FIND_LIMIT || USE_PROC_FOR_LIBRARIES */
970 :
971 : #ifdef HPUX_STACKBOTTOM
972 :
973 : #include <sys/param.h>
974 : #include <sys/pstat.h>
975 :
976 : GC_INNER ptr_t GC_get_register_stack_base(void)
977 : {
978 : struct pst_vm_status vm_status;
979 :
980 : int i = 0;
981 : while (pstat_getprocvm(&vm_status, sizeof(vm_status), 0, i++) == 1) {
982 : if (vm_status.pst_type == PS_RSESTACK) {
983 : return (ptr_t) vm_status.pst_vaddr;
984 : }
985 : }
986 :
987 : /* old way to get the register stackbottom */
988 : return (ptr_t)(((word)GC_stackbottom - BACKING_STORE_DISPLACEMENT - 1)
989 : & ~(BACKING_STORE_ALIGNMENT - 1));
990 : }
991 :
992 : #endif /* HPUX_STACK_BOTTOM */
993 :
994 : #ifdef LINUX_STACKBOTTOM
995 :
996 : # include <sys/types.h>
997 : # include <sys/stat.h>
998 :
999 : # define STAT_SKIP 27 /* Number of fields preceding startstack */
1000 : /* field in /proc/self/stat */
1001 :
1002 : # ifdef USE_LIBC_PRIVATES
1003 : # pragma weak __libc_stack_end
1004 : extern ptr_t __libc_stack_end;
1005 : # endif
1006 :
1007 : # ifdef IA64
1008 : # ifdef USE_LIBC_PRIVATES
1009 : # pragma weak __libc_ia64_register_backing_store_base
1010 : extern ptr_t __libc_ia64_register_backing_store_base;
1011 : # endif
1012 :
1013 : GC_INNER ptr_t GC_get_register_stack_base(void)
1014 : {
1015 : ptr_t result;
1016 :
1017 : # ifdef USE_LIBC_PRIVATES
1018 : if (0 != &__libc_ia64_register_backing_store_base
1019 : && 0 != __libc_ia64_register_backing_store_base) {
1020 : /* Glibc 2.2.4 has a bug such that for dynamically linked */
1021 : /* executables __libc_ia64_register_backing_store_base is */
1022 : /* defined but uninitialized during constructor calls. */
1023 : /* Hence we check for both nonzero address and value. */
1024 : return __libc_ia64_register_backing_store_base;
1025 : }
1026 : # endif
1027 : result = backing_store_base_from_proc();
1028 : if (0 == result) {
1029 : result = GC_find_limit(GC_save_regs_in_stack(), FALSE);
1030 : /* Now seems to work better than constant displacement */
1031 : /* heuristic used in 6.X versions. The latter seems to */
1032 : /* fail for 2.6 kernels. */
1033 : }
1034 : return result;
1035 : }
1036 : # endif /* IA64 */
1037 :
1038 0 : STATIC ptr_t GC_linux_main_stack_base(void)
1039 : {
1040 : /* We read the stack base value from /proc/self/stat. We do this */
1041 : /* using direct I/O system calls in order to avoid calling malloc */
1042 : /* in case REDIRECT_MALLOC is defined. */
1043 : # ifndef STAT_READ
1044 : /* Also defined in pthread_support.c. */
1045 : # define STAT_BUF_SIZE 4096
1046 : # define STAT_READ read
1047 : # endif
1048 : /* Should probably call the real read, if read is wrapped. */
1049 : char stat_buf[STAT_BUF_SIZE];
1050 : int f;
1051 : word result;
1052 0 : int i, buf_offset = 0, len;
1053 :
1054 : /* First try the easy way. This should work for glibc 2.2 */
1055 : /* This fails in a prelinked ("prelink" command) executable */
1056 : /* since the correct value of __libc_stack_end never */
1057 : /* becomes visible to us. The second test works around */
1058 : /* this. */
1059 : # ifdef USE_LIBC_PRIVATES
1060 : if (0 != &__libc_stack_end && 0 != __libc_stack_end ) {
1061 : # if defined(IA64)
1062 : /* Some versions of glibc set the address 16 bytes too */
1063 : /* low while the initialization code is running. */
1064 : if (((word)__libc_stack_end & 0xfff) + 0x10 < 0x1000) {
1065 : return __libc_stack_end + 0x10;
1066 : } /* Otherwise it's not safe to add 16 bytes and we fall */
1067 : /* back to using /proc. */
1068 : # elif defined(SPARC)
1069 : /* Older versions of glibc for 64-bit Sparc do not set
1070 : * this variable correctly, it gets set to either zero
1071 : * or one.
1072 : */
1073 : if (__libc_stack_end != (ptr_t) (unsigned long)0x1)
1074 : return __libc_stack_end;
1075 : # else
1076 : return __libc_stack_end;
1077 : # endif
1078 : }
1079 : # endif
1080 0 : f = open("/proc/self/stat", O_RDONLY);
1081 0 : if (f < 0)
1082 0 : ABORT("Couldn't read /proc/self/stat");
1083 0 : len = STAT_READ(f, stat_buf, STAT_BUF_SIZE);
1084 0 : close(f);
1085 :
1086 : /* Skip the required number of fields. This number is hopefully */
1087 : /* constant across all Linux implementations. */
1088 0 : for (i = 0; i < STAT_SKIP; ++i) {
1089 0 : while (buf_offset < len && isspace(stat_buf[buf_offset++])) {
1090 : /* empty */
1091 : }
1092 0 : while (buf_offset < len && !isspace(stat_buf[buf_offset++])) {
1093 : /* empty */
1094 : }
1095 : }
1096 : /* Skip spaces. */
1097 0 : while (buf_offset < len && isspace(stat_buf[buf_offset])) {
1098 0 : buf_offset++;
1099 : }
1100 : /* Find the end of the number and cut the buffer there. */
1101 0 : for (i = 0; buf_offset + i < len; i++) {
1102 0 : if (!isdigit(stat_buf[buf_offset + i])) break;
1103 : }
1104 0 : if (buf_offset + i >= len) ABORT("Could not parse /proc/self/stat");
1105 0 : stat_buf[buf_offset + i] = '\0';
1106 :
1107 0 : result = (word)STRTOULL(&stat_buf[buf_offset], NULL, 10);
1108 0 : if (result < 0x100000 || (result & (sizeof(word) - 1)) != 0)
1109 0 : ABORT("Absurd stack bottom value");
1110 0 : return (ptr_t)result;
1111 : }
1112 : #endif /* LINUX_STACKBOTTOM */
1113 :
1114 : #ifdef FREEBSD_STACKBOTTOM
1115 : /* This uses an undocumented sysctl call, but at least one expert */
1116 : /* believes it will stay. */
1117 :
1118 : # include <unistd.h>
1119 : # include <sys/types.h>
1120 : # include <sys/sysctl.h>
1121 :
1122 : STATIC ptr_t GC_freebsd_main_stack_base(void)
1123 : {
1124 : int nm[2] = {CTL_KERN, KERN_USRSTACK};
1125 : ptr_t base;
1126 : size_t len = sizeof(ptr_t);
1127 : int r = sysctl(nm, 2, &base, &len, NULL, 0);
1128 : if (r) ABORT("Error getting main stack base");
1129 : return base;
1130 : }
1131 : #endif /* FREEBSD_STACKBOTTOM */
1132 :
1133 : #if defined(ECOS) || defined(NOSYS)
1134 : ptr_t GC_get_main_stack_base(void)
1135 : {
1136 : return STACKBOTTOM;
1137 : }
1138 : # define GET_MAIN_STACKBASE_SPECIAL
1139 : #elif !defined(BEOS) && !defined(AMIGA) && !defined(OS2) \
1140 : && !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) \
1141 : && !defined(GC_OPENBSD_THREADS) \
1142 : && (!defined(GC_SOLARIS_THREADS) || defined(_STRICT_STDC))
1143 :
1144 : # if defined(LINUX) && defined(USE_GET_STACKBASE_FOR_MAIN)
1145 : # include <pthread.h>
1146 : # elif defined(DARWIN) && !defined(NO_PTHREAD_GET_STACKADDR_NP)
1147 : /* We could use pthread_get_stackaddr_np even in case of a */
1148 : /* single-threaded gclib (there is no -lpthread on Darwin). */
1149 : # include <pthread.h>
1150 : # undef STACKBOTTOM
1151 : # define STACKBOTTOM (ptr_t)pthread_get_stackaddr_np(pthread_self())
1152 : # endif
1153 :
1154 163 : ptr_t GC_get_main_stack_base(void)
1155 : {
1156 : ptr_t result;
1157 : # if defined(LINUX) && !defined(NACL) \
1158 : && (defined(USE_GET_STACKBASE_FOR_MAIN) \
1159 : || (defined(THREADS) && !defined(REDIRECT_MALLOC)))
1160 : pthread_attr_t attr;
1161 : void *stackaddr;
1162 : size_t size;
1163 :
1164 163 : if (pthread_getattr_np(pthread_self(), &attr) == 0) {
1165 326 : if (pthread_attr_getstack(&attr, &stackaddr, &size) == 0
1166 326 : && stackaddr != NULL) {
1167 163 : pthread_attr_destroy(&attr);
1168 : # ifdef STACK_GROWS_DOWN
1169 163 : stackaddr = (char *)stackaddr + size;
1170 : # endif
1171 163 : return (ptr_t)stackaddr;
1172 : }
1173 0 : pthread_attr_destroy(&attr);
1174 : }
1175 0 : WARN("pthread_getattr_np or pthread_attr_getstack failed"
1176 : " for main thread\n", 0);
1177 : # endif
1178 : # ifdef STACKBOTTOM
1179 : result = STACKBOTTOM;
1180 : # else
1181 : # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
1182 : # ifdef HEURISTIC1
1183 : # ifdef STACK_GROWS_DOWN
1184 : result = (ptr_t)(((word)GC_approx_sp() + STACKBOTTOM_ALIGNMENT_M1)
1185 : & ~STACKBOTTOM_ALIGNMENT_M1);
1186 : # else
1187 : result = (ptr_t)((word)GC_approx_sp() & ~STACKBOTTOM_ALIGNMENT_M1);
1188 : # endif
1189 : # endif /* HEURISTIC1 */
1190 : # ifdef LINUX_STACKBOTTOM
1191 0 : result = GC_linux_main_stack_base();
1192 : # endif
1193 : # ifdef FREEBSD_STACKBOTTOM
1194 : result = GC_freebsd_main_stack_base();
1195 : # endif
1196 : # ifdef HEURISTIC2
1197 : {
1198 : ptr_t sp = GC_approx_sp();
1199 : # ifdef STACK_GROWS_DOWN
1200 : result = GC_find_limit(sp, TRUE);
1201 : # ifdef HEURISTIC2_LIMIT
1202 : if (result > HEURISTIC2_LIMIT
1203 : && sp < HEURISTIC2_LIMIT) {
1204 : result = HEURISTIC2_LIMIT;
1205 : }
1206 : # endif
1207 : # else
1208 : result = GC_find_limit(sp, FALSE);
1209 : # ifdef HEURISTIC2_LIMIT
1210 : if (result < HEURISTIC2_LIMIT
1211 : && sp > HEURISTIC2_LIMIT) {
1212 : result = HEURISTIC2_LIMIT;
1213 : }
1214 : # endif
1215 : # endif
1216 : }
1217 : # endif /* HEURISTIC2 */
1218 : # ifdef STACK_GROWS_DOWN
1219 0 : if (result == 0)
1220 0 : result = (ptr_t)(signed_word)(-sizeof(ptr_t));
1221 : # endif
1222 : # endif
1223 : GC_ASSERT(GC_approx_sp() HOTTER_THAN result);
1224 0 : return(result);
1225 : }
1226 : # define GET_MAIN_STACKBASE_SPECIAL
1227 : #endif /* !AMIGA, !BEOS, !OPENBSD, !OS2, !Windows */
1228 :
1229 : #if (defined(GC_LINUX_THREADS) || defined(PLATFORM_ANDROID)) && !defined(NACL)
1230 :
1231 : # include <pthread.h>
1232 : /* extern int pthread_getattr_np(pthread_t, pthread_attr_t *); */
1233 :
1234 499 : GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
1235 : {
1236 : pthread_attr_t attr;
1237 : size_t size;
1238 : # ifdef IA64
1239 : DCL_LOCK_STATE;
1240 : # endif
1241 :
1242 499 : if (pthread_getattr_np(pthread_self(), &attr) != 0) {
1243 0 : WARN("pthread_getattr_np failed\n", 0);
1244 0 : return GC_UNIMPLEMENTED;
1245 : }
1246 499 : if (pthread_attr_getstack(&attr, &(b -> mem_base), &size) != 0) {
1247 0 : ABORT("pthread_attr_getstack failed");
1248 : }
1249 499 : pthread_attr_destroy(&attr);
1250 : # ifdef STACK_GROWS_DOWN
1251 499 : b -> mem_base = (char *)(b -> mem_base) + size;
1252 : # endif
1253 : # ifdef IA64
1254 : /* We could try backing_store_base_from_proc, but that's safe */
1255 : /* only if no mappings are being asynchronously created. */
1256 : /* Subtracting the size from the stack base doesn't work for at */
1257 : /* least the main thread. */
1258 : LOCK();
1259 : {
1260 : IF_CANCEL(int cancel_state;)
1261 : ptr_t bsp;
1262 : ptr_t next_stack;
1263 :
1264 : DISABLE_CANCEL(cancel_state);
1265 : bsp = GC_save_regs_in_stack();
1266 : next_stack = GC_greatest_stack_base_below(bsp);
1267 : if (0 == next_stack) {
1268 : b -> reg_base = GC_find_limit(bsp, FALSE);
1269 : } else {
1270 : /* Avoid walking backwards into preceding memory stack and */
1271 : /* growing it. */
1272 : b -> reg_base = GC_find_limit_with_bound(bsp, FALSE, next_stack);
1273 : }
1274 : RESTORE_CANCEL(cancel_state);
1275 : }
1276 : UNLOCK();
1277 : # endif
1278 499 : return GC_SUCCESS;
1279 : }
1280 : # define HAVE_GET_STACK_BASE
1281 : #endif /* GC_LINUX_THREADS */
1282 :
1283 : #if defined(GC_DARWIN_THREADS) && !defined(NO_PTHREAD_GET_STACKADDR_NP)
1284 : # include <pthread.h>
1285 :
1286 : GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
1287 : {
1288 : /* pthread_get_stackaddr_np() should return stack bottom (highest */
1289 : /* stack address plus 1). */
1290 : b->mem_base = pthread_get_stackaddr_np(pthread_self());
1291 : GC_ASSERT((void *)GC_approx_sp() HOTTER_THAN b->mem_base);
1292 : return GC_SUCCESS;
1293 : }
1294 : # define HAVE_GET_STACK_BASE
1295 : #endif /* GC_DARWIN_THREADS */
1296 :
1297 : #ifdef GC_OPENBSD_THREADS
1298 : # include <sys/signal.h>
1299 : # include <pthread.h>
1300 : # include <pthread_np.h>
1301 :
1302 : /* Find the stack using pthread_stackseg_np(). */
1303 : GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
1304 : {
1305 : stack_t stack;
1306 : if (pthread_stackseg_np(pthread_self(), &stack))
1307 : ABORT("pthread_stackseg_np(self) failed");
1308 : sb->mem_base = stack.ss_sp;
1309 : return GC_SUCCESS;
1310 : }
1311 : # define HAVE_GET_STACK_BASE
1312 : #endif /* GC_OPENBSD_THREADS */
1313 :
1314 : #if defined(GC_SOLARIS_THREADS) && !defined(_STRICT_STDC)
1315 :
1316 : # include <thread.h>
1317 : # include <signal.h>
1318 : # include <pthread.h>
1319 :
1320 : /* These variables are used to cache ss_sp value for the primordial */
1321 : /* thread (it's better not to call thr_stksegment() twice for this */
1322 : /* thread - see JDK bug #4352906). */
1323 : static pthread_t stackbase_main_self = 0;
1324 : /* 0 means stackbase_main_ss_sp value is unset. */
1325 : static void *stackbase_main_ss_sp = NULL;
1326 :
1327 : GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
1328 : {
1329 : stack_t s;
1330 : pthread_t self = pthread_self();
1331 :
1332 : if (self == stackbase_main_self)
1333 : {
1334 : /* If the client calls GC_get_stack_base() from the main thread */
1335 : /* then just return the cached value. */
1336 : b -> mem_base = stackbase_main_ss_sp;
1337 : GC_ASSERT(b -> mem_base != NULL);
1338 : return GC_SUCCESS;
1339 : }
1340 :
1341 : if (thr_stksegment(&s)) {
1342 : /* According to the manual, the only failure error code returned */
1343 : /* is EAGAIN meaning "the information is not available due to the */
1344 : /* thread is not yet completely initialized or it is an internal */
1345 : /* thread" - this shouldn't happen here. */
1346 : ABORT("thr_stksegment failed");
1347 : }
1348 : /* s.ss_sp holds the pointer to the stack bottom. */
1349 : GC_ASSERT((void *)GC_approx_sp() HOTTER_THAN s.ss_sp);
1350 :
1351 : if (!stackbase_main_self && thr_main() != 0)
1352 : {
1353 : /* Cache the stack base value for the primordial thread (this */
1354 : /* is done during GC_init, so there is no race). */
1355 : stackbase_main_ss_sp = s.ss_sp;
1356 : stackbase_main_self = self;
1357 : }
1358 :
1359 : b -> mem_base = s.ss_sp;
1360 : return GC_SUCCESS;
1361 : }
1362 : # define HAVE_GET_STACK_BASE
1363 : #endif /* GC_SOLARIS_THREADS */
1364 :
1365 : #ifdef GC_RTEMS_PTHREADS
1366 : GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
1367 : {
1368 : sb->mem_base = rtems_get_stack_bottom();
1369 : return GC_SUCCESS;
1370 : }
1371 : # define HAVE_GET_STACK_BASE
1372 : #endif /* GC_RTEMS_PTHREADS */
1373 :
1374 : #ifndef HAVE_GET_STACK_BASE
1375 : /* Retrieve stack base. */
1376 : /* Using the GC_find_limit version is risky. */
1377 : /* On IA64, for example, there is no guard page between the */
1378 : /* stack of one thread and the register backing store of the */
1379 : /* next. Thus this is likely to identify way too large a */
1380 : /* "stack" and thus at least result in disastrous performance. */
1381 : /* FIXME - Implement better strategies here. */
1382 : GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
1383 : {
1384 : # ifdef NEED_FIND_LIMIT
1385 : IF_CANCEL(int cancel_state;)
1386 : DCL_LOCK_STATE;
1387 :
1388 : LOCK();
1389 : DISABLE_CANCEL(cancel_state); /* May be unnecessary? */
1390 : # ifdef STACK_GROWS_DOWN
1391 : b -> mem_base = GC_find_limit(GC_approx_sp(), TRUE);
1392 : # ifdef IA64
1393 : b -> reg_base = GC_find_limit(GC_save_regs_in_stack(), FALSE);
1394 : # endif
1395 : # else
1396 : b -> mem_base = GC_find_limit(GC_approx_sp(), FALSE);
1397 : # endif
1398 : RESTORE_CANCEL(cancel_state);
1399 : UNLOCK();
1400 : return GC_SUCCESS;
1401 : # else
1402 : return GC_UNIMPLEMENTED;
1403 : # endif
1404 : }
1405 : #endif /* !HAVE_GET_STACK_BASE */
1406 :
1407 : #ifndef GET_MAIN_STACKBASE_SPECIAL
1408 : /* This is always called from the main thread. Default implementation. */
1409 : ptr_t GC_get_main_stack_base(void)
1410 : {
1411 : struct GC_stack_base sb;
1412 :
1413 : if (GC_get_stack_base(&sb) != GC_SUCCESS)
1414 : ABORT("GC_get_stack_base failed");
1415 : GC_ASSERT((void *)GC_approx_sp() HOTTER_THAN sb.mem_base);
1416 : return (ptr_t)sb.mem_base;
1417 : }
1418 : #endif /* !GET_MAIN_STACKBASE_SPECIAL */
1419 :
1420 : /* Register static data segment(s) as roots. If more data segments are */
1421 : /* added later then they need to be registered at that point (as we do */
1422 : /* with SunOS dynamic loading), or GC_mark_roots needs to check for */
1423 : /* them (as we do with PCR). Called with allocator lock held. */
1424 : # ifdef OS2
1425 :
1426 : void GC_register_data_segments(void)
1427 : {
1428 : PTIB ptib;
1429 : PPIB ppib;
1430 : HMODULE module_handle;
1431 : # define PBUFSIZ 512
1432 : UCHAR path[PBUFSIZ];
1433 : FILE * myexefile;
1434 : struct exe_hdr hdrdos; /* MSDOS header. */
1435 : struct e32_exe hdr386; /* Real header for my executable */
1436 : struct o32_obj seg; /* Currrent segment */
1437 : int nsegs;
1438 :
1439 :
1440 : if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
1441 : ABORT("DosGetInfoBlocks failed");
1442 : }
1443 : module_handle = ppib -> pib_hmte;
1444 : if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
1445 : GC_err_printf("DosQueryModuleName failed\n");
1446 : ABORT("DosGetInfoBlocks failed");
1447 : }
1448 : myexefile = fopen(path, "rb");
1449 : if (myexefile == 0) {
1450 : if (GC_print_stats) {
1451 : GC_err_puts("Couldn't open executable ");
1452 : GC_err_puts(path);
1453 : GC_err_puts("\n");
1454 : }
1455 : ABORT("Failed to open executable");
1456 : }
1457 : if (fread((char *)(&hdrdos), 1, sizeof(hdrdos), myexefile)
1458 : < sizeof(hdrdos)) {
1459 : if (GC_print_stats) {
1460 : GC_err_puts("Couldn't read MSDOS header from ");
1461 : GC_err_puts(path);
1462 : GC_err_puts("\n");
1463 : }
1464 : ABORT("Couldn't read MSDOS header");
1465 : }
1466 : if (E_MAGIC(hdrdos) != EMAGIC) {
1467 : if (GC_print_stats) {
1468 : GC_err_puts("Executable has wrong DOS magic number: ");
1469 : GC_err_puts(path);
1470 : GC_err_puts("\n");
1471 : }
1472 : ABORT("Bad DOS magic number");
1473 : }
1474 : if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
1475 : if (GC_print_stats) {
1476 : GC_err_puts("Seek to new header failed in ");
1477 : GC_err_puts(path);
1478 : GC_err_puts("\n");
1479 : }
1480 : ABORT("Bad DOS magic number");
1481 : }
1482 : if (fread((char *)(&hdr386), 1, sizeof(hdr386), myexefile)
1483 : < sizeof(hdr386)) {
1484 : if (GC_print_stats) {
1485 : GC_err_puts("Couldn't read MSDOS header from ");
1486 : GC_err_puts(path);
1487 : GC_err_puts("\n");
1488 : }
1489 : ABORT("Couldn't read OS/2 header");
1490 : }
1491 : if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
1492 : if (GC_print_stats) {
1493 : GC_err_puts("Executable has wrong OS/2 magic number: ");
1494 : GC_err_puts(path);
1495 : GC_err_puts("\n");
1496 : }
1497 : ABORT("Bad OS/2 magic number");
1498 : }
1499 : if (E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
1500 : if (GC_print_stats) {
1501 : GC_err_puts("Executable has wrong byte order: ");
1502 : GC_err_puts(path);
1503 : GC_err_puts("\n");
1504 : }
1505 : ABORT("Bad byte order");
1506 : }
1507 : if (E32_CPU(hdr386) == E32CPU286) {
1508 : if (GC_print_stats) {
1509 : GC_err_puts("GC can't handle 80286 executables: ");
1510 : GC_err_puts(path);
1511 : GC_err_puts("\n");
1512 : }
1513 : ABORT("Intel 80286 executables are unsupported");
1514 : }
1515 : if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
1516 : SEEK_SET) != 0) {
1517 : if (GC_print_stats) {
1518 : GC_err_puts("Seek to object table failed: ");
1519 : GC_err_puts(path);
1520 : GC_err_puts("\n");
1521 : }
1522 : ABORT("Seek to object table failed");
1523 : }
1524 : for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) {
1525 : int flags;
1526 : if (fread((char *)(&seg), 1, sizeof(seg), myexefile) < sizeof(seg)) {
1527 : if (GC_print_stats) {
1528 : GC_err_puts("Couldn't read obj table entry from ");
1529 : GC_err_puts(path);
1530 : GC_err_puts("\n");
1531 : }
1532 : ABORT("Couldn't read obj table entry");
1533 : }
1534 : flags = O32_FLAGS(seg);
1535 : if (!(flags & OBJWRITE)) continue;
1536 : if (!(flags & OBJREAD)) continue;
1537 : if (flags & OBJINVALID) {
1538 : GC_err_printf("Object with invalid pages?\n");
1539 : continue;
1540 : }
1541 : GC_add_roots_inner((ptr_t)O32_BASE(seg),
1542 : (ptr_t)(O32_BASE(seg)+O32_SIZE(seg)), FALSE);
1543 : }
1544 : }
1545 :
1546 : # else /* !OS2 */
1547 :
1548 : # if defined(GWW_VDB)
1549 : # ifndef MEM_WRITE_WATCH
1550 : # define MEM_WRITE_WATCH 0x200000
1551 : # endif
1552 : # ifndef WRITE_WATCH_FLAG_RESET
1553 : # define WRITE_WATCH_FLAG_RESET 1
1554 : # endif
1555 :
1556 : /* Since we can't easily check whether ULONG_PTR and SIZE_T are */
1557 : /* defined in Win32 basetsd.h, we define own ULONG_PTR. */
1558 : # define GC_ULONG_PTR word
1559 :
1560 : typedef UINT (WINAPI * GetWriteWatch_type)(
1561 : DWORD, PVOID, GC_ULONG_PTR /* SIZE_T */,
1562 : PVOID *, GC_ULONG_PTR *, PULONG);
1563 : static GetWriteWatch_type GetWriteWatch_func;
1564 : static DWORD GetWriteWatch_alloc_flag;
1565 :
1566 : # define GC_GWW_AVAILABLE() (GetWriteWatch_func != NULL)
1567 :
1568 : static void detect_GetWriteWatch(void)
1569 : {
1570 : static GC_bool done;
1571 : HMODULE hK32;
1572 : if (done)
1573 : return;
1574 :
1575 : # if defined(MPROTECT_VDB)
1576 : {
1577 : char * str = GETENV("GC_USE_GETWRITEWATCH");
1578 : # if defined(GC_PREFER_MPROTECT_VDB)
1579 : if (str == NULL || (*str == '0' && *(str + 1) == '\0')) {
1580 : /* GC_USE_GETWRITEWATCH is unset or set to "0". */
1581 : done = TRUE; /* falling back to MPROTECT_VDB strategy. */
1582 : /* This should work as if GWW_VDB is undefined. */
1583 : return;
1584 : }
1585 : # else
1586 : if (str != NULL && *str == '0' && *(str + 1) == '\0') {
1587 : /* GC_USE_GETWRITEWATCH is set "0". */
1588 : done = TRUE; /* falling back to MPROTECT_VDB strategy. */
1589 : return;
1590 : }
1591 : # endif
1592 : }
1593 : # endif
1594 :
1595 : hK32 = GetModuleHandle(TEXT("kernel32.dll"));
1596 : if (hK32 != (HMODULE)0 &&
1597 : (GetWriteWatch_func = (GetWriteWatch_type)GetProcAddress(hK32,
1598 : "GetWriteWatch")) != NULL) {
1599 : /* Also check whether VirtualAlloc accepts MEM_WRITE_WATCH, */
1600 : /* as some versions of kernel32.dll have one but not the */
1601 : /* other, making the feature completely broken. */
1602 : void * page = VirtualAlloc(NULL, GC_page_size,
1603 : MEM_WRITE_WATCH | MEM_RESERVE,
1604 : PAGE_READWRITE);
1605 : if (page != NULL) {
1606 : PVOID pages[16];
1607 : GC_ULONG_PTR count = 16;
1608 : DWORD page_size;
1609 : /* Check that it actually works. In spite of some */
1610 : /* documentation it actually seems to exist on W2K. */
1611 : /* This test may be unnecessary, but ... */
1612 : if (GetWriteWatch_func(WRITE_WATCH_FLAG_RESET,
1613 : page, GC_page_size,
1614 : pages,
1615 : &count,
1616 : &page_size) != 0) {
1617 : /* GetWriteWatch always fails. */
1618 : GetWriteWatch_func = NULL;
1619 : } else {
1620 : GetWriteWatch_alloc_flag = MEM_WRITE_WATCH;
1621 : }
1622 : VirtualFree(page, GC_page_size, MEM_RELEASE);
1623 : } else {
1624 : /* GetWriteWatch will be useless. */
1625 : GetWriteWatch_func = NULL;
1626 : }
1627 : }
1628 : if (GC_print_stats) {
1629 : if (GetWriteWatch_func == NULL) {
1630 : GC_log_printf("Did not find a usable GetWriteWatch()\n");
1631 : } else {
1632 : GC_log_printf("Using GetWriteWatch()\n");
1633 : }
1634 : }
1635 : done = TRUE;
1636 : }
1637 :
1638 : # else
1639 : # define GetWriteWatch_alloc_flag 0
1640 : # endif /* !GWW_VDB */
1641 :
1642 : # if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
1643 :
1644 : # ifdef MSWIN32
1645 : /* Unfortunately, we have to handle win32s very differently from NT, */
1646 : /* Since VirtualQuery has very different semantics. In particular, */
1647 : /* under win32s a VirtualQuery call on an unmapped page returns an */
1648 : /* invalid result. Under NT, GC_register_data_segments is a no-op */
1649 : /* and all real work is done by GC_register_dynamic_libraries. Under */
1650 : /* win32s, we cannot find the data segments associated with dll's. */
1651 : /* We register the main data segment here. */
1652 : GC_INNER GC_bool GC_no_win32_dlls = FALSE;
1653 : /* This used to be set for gcc, to avoid dealing with */
1654 : /* the structured exception handling issues. But we now have */
1655 : /* assembly code to do that right. */
1656 :
1657 : GC_INNER GC_bool GC_wnt = FALSE;
1658 : /* This is a Windows NT derivative, i.e. NT, W2K, XP or later. */
1659 :
1660 : GC_INNER void GC_init_win32(void)
1661 : {
1662 : /* Set GC_wnt. If we're running under win32s, assume that no DLLs */
1663 : /* will be loaded. I doubt anyone still runs win32s, but... */
1664 : DWORD v = GetVersion();
1665 : GC_wnt = !(v & 0x80000000);
1666 : GC_no_win32_dlls |= ((!GC_wnt) && (v & 0xff) <= 3);
1667 : # ifdef USE_MUNMAP
1668 : if (GC_no_win32_dlls) {
1669 : /* Turn off unmapping for safety (since may not work well with */
1670 : /* GlobalAlloc). */
1671 : GC_unmap_threshold = 0;
1672 : }
1673 : # endif
1674 : }
1675 :
1676 : /* Return the smallest address a such that VirtualQuery */
1677 : /* returns correct results for all addresses between a and start. */
1678 : /* Assumes VirtualQuery returns correct information for start. */
1679 : STATIC ptr_t GC_least_described_address(ptr_t start)
1680 : {
1681 : MEMORY_BASIC_INFORMATION buf;
1682 : size_t result;
1683 : LPVOID limit;
1684 : ptr_t p;
1685 : LPVOID q;
1686 :
1687 : limit = GC_sysinfo.lpMinimumApplicationAddress;
1688 : p = (ptr_t)((word)start & ~(GC_page_size - 1));
1689 : for (;;) {
1690 : q = (LPVOID)(p - GC_page_size);
1691 : if ((ptr_t)q > (ptr_t)p /* underflow */ || q < limit) break;
1692 : result = VirtualQuery(q, &buf, sizeof(buf));
1693 : if (result != sizeof(buf) || buf.AllocationBase == 0) break;
1694 : p = (ptr_t)(buf.AllocationBase);
1695 : }
1696 : return p;
1697 : }
1698 : # endif /* MSWIN32 */
1699 :
1700 : # ifndef REDIRECT_MALLOC
1701 : /* We maintain a linked list of AllocationBase values that we know */
1702 : /* correspond to malloc heap sections. Currently this is only called */
1703 : /* during a GC. But there is some hope that for long running */
1704 : /* programs we will eventually see most heap sections. */
1705 :
1706 : /* In the long run, it would be more reliable to occasionally walk */
1707 : /* the malloc heap with HeapWalk on the default heap. But that */
1708 : /* apparently works only for NT-based Windows. */
1709 :
1710 : STATIC size_t GC_max_root_size = 100000; /* Appr. largest root size. */
1711 :
1712 : # ifndef CYGWIN32
1713 : /* In the long run, a better data structure would also be nice ... */
1714 : STATIC struct GC_malloc_heap_list {
1715 : void * allocation_base;
1716 : struct GC_malloc_heap_list *next;
1717 : } *GC_malloc_heap_l = 0;
1718 :
1719 : /* Is p the base of one of the malloc heap sections we already know */
1720 : /* about? */
1721 : STATIC GC_bool GC_is_malloc_heap_base(ptr_t p)
1722 : {
1723 : struct GC_malloc_heap_list *q = GC_malloc_heap_l;
1724 :
1725 : while (0 != q) {
1726 : if (q -> allocation_base == p) return TRUE;
1727 : q = q -> next;
1728 : }
1729 : return FALSE;
1730 : }
1731 :
1732 : STATIC void *GC_get_allocation_base(void *p)
1733 : {
1734 : MEMORY_BASIC_INFORMATION buf;
1735 : size_t result = VirtualQuery(p, &buf, sizeof(buf));
1736 : if (result != sizeof(buf)) {
1737 : ABORT("Weird VirtualQuery result");
1738 : }
1739 : return buf.AllocationBase;
1740 : }
1741 :
1742 : GC_INNER void GC_add_current_malloc_heap(void)
1743 : {
1744 : struct GC_malloc_heap_list *new_l =
1745 : malloc(sizeof(struct GC_malloc_heap_list));
1746 : void * candidate = GC_get_allocation_base(new_l);
1747 :
1748 : if (new_l == 0) return;
1749 : if (GC_is_malloc_heap_base(candidate)) {
1750 : /* Try a little harder to find malloc heap. */
1751 : size_t req_size = 10000;
1752 : do {
1753 : void *p = malloc(req_size);
1754 : if (0 == p) {
1755 : free(new_l);
1756 : return;
1757 : }
1758 : candidate = GC_get_allocation_base(p);
1759 : free(p);
1760 : req_size *= 2;
1761 : } while (GC_is_malloc_heap_base(candidate)
1762 : && req_size < GC_max_root_size/10 && req_size < 500000);
1763 : if (GC_is_malloc_heap_base(candidate)) {
1764 : free(new_l);
1765 : return;
1766 : }
1767 : }
1768 : if (GC_print_stats)
1769 : GC_log_printf("Found new system malloc AllocationBase at %p\n",
1770 : candidate);
1771 : new_l -> allocation_base = candidate;
1772 : new_l -> next = GC_malloc_heap_l;
1773 : GC_malloc_heap_l = new_l;
1774 : }
1775 : # endif /* !CYGWIN32 */
1776 :
1777 : # endif /* !REDIRECT_MALLOC */
1778 :
1779 : STATIC word GC_n_heap_bases = 0; /* See GC_heap_bases. */
1780 :
1781 : /* Is p the start of either the malloc heap, or of one of our */
1782 : /* heap sections? */
1783 : GC_INNER GC_bool GC_is_heap_base(ptr_t p)
1784 : {
1785 : unsigned i;
1786 : # ifndef REDIRECT_MALLOC
1787 : if (GC_root_size > GC_max_root_size) GC_max_root_size = GC_root_size;
1788 : # ifndef CYGWIN32
1789 : if (GC_is_malloc_heap_base(p)) return TRUE;
1790 : # endif
1791 : # endif
1792 : for (i = 0; i < GC_n_heap_bases; i++) {
1793 : if (GC_heap_bases[i] == p) return TRUE;
1794 : }
1795 : return FALSE;
1796 : }
1797 :
1798 : #ifdef MSWIN32
1799 : STATIC void GC_register_root_section(ptr_t static_root)
1800 : {
1801 : MEMORY_BASIC_INFORMATION buf;
1802 : size_t result;
1803 : DWORD protect;
1804 : LPVOID p;
1805 : char * base;
1806 : char * limit, * new_limit;
1807 :
1808 : if (!GC_no_win32_dlls) return;
1809 : p = base = limit = GC_least_described_address(static_root);
1810 : while (p < GC_sysinfo.lpMaximumApplicationAddress) {
1811 : result = VirtualQuery(p, &buf, sizeof(buf));
1812 : if (result != sizeof(buf) || buf.AllocationBase == 0
1813 : || GC_is_heap_base(buf.AllocationBase)) break;
1814 : new_limit = (char *)p + buf.RegionSize;
1815 : protect = buf.Protect;
1816 : if (buf.State == MEM_COMMIT
1817 : && is_writable(protect)) {
1818 : if ((char *)p == limit) {
1819 : limit = new_limit;
1820 : } else {
1821 : if (base != limit) GC_add_roots_inner(base, limit, FALSE);
1822 : base = p;
1823 : limit = new_limit;
1824 : }
1825 : }
1826 : if (p > (LPVOID)new_limit /* overflow */) break;
1827 : p = (LPVOID)new_limit;
1828 : }
1829 : if (base != limit) GC_add_roots_inner(base, limit, FALSE);
1830 : }
1831 : #endif /* MSWIN32 */
1832 :
1833 : void GC_register_data_segments(void)
1834 : {
1835 : # ifdef MSWIN32
1836 : GC_register_root_section((ptr_t)&GC_pages_executable);
1837 : /* any other GC global variable would fit too. */
1838 : # endif
1839 : }
1840 :
1841 : # else /* !OS2 && !Windows */
1842 :
1843 : # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
1844 : || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
1845 : ptr_t GC_SysVGetDataStart(size_t max_page_size, ptr_t etext_addr)
1846 : {
1847 : word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1848 : & ~(sizeof(word) - 1);
1849 : /* etext rounded to word boundary */
1850 : word next_page = ((text_end + (word)max_page_size - 1)
1851 : & ~((word)max_page_size - 1));
1852 : word page_offset = (text_end & ((word)max_page_size - 1));
1853 : char * volatile result = (char *)(next_page + page_offset);
1854 : /* Note that this isn't equivalent to just adding */
1855 : /* max_page_size to &etext if &etext is at a page boundary */
1856 :
1857 : GC_setup_temporary_fault_handler();
1858 : if (SETJMP(GC_jmp_buf) == 0) {
1859 : /* Try writing to the address. */
1860 : *result = *result;
1861 : GC_reset_fault_handler();
1862 : } else {
1863 : GC_reset_fault_handler();
1864 : /* We got here via a longjmp. The address is not readable. */
1865 : /* This is known to happen under Solaris 2.4 + gcc, which place */
1866 : /* string constants in the text segment, but after etext. */
1867 : /* Use plan B. Note that we now know there is a gap between */
1868 : /* text and data segments, so plan A bought us something. */
1869 : result = (char *)GC_find_limit((ptr_t)(DATAEND), FALSE);
1870 : }
1871 : return((ptr_t)result);
1872 : }
1873 : # endif
1874 :
1875 : # if defined(FREEBSD) && !defined(PCR) && (defined(I386) || defined(X86_64) \
1876 : || defined(powerpc) || defined(__powerpc__))
1877 :
1878 : /* Its unclear whether this should be identical to the above, or */
1879 : /* whether it should apply to non-X86 architectures. */
1880 : /* For now we don't assume that there is always an empty page after */
1881 : /* etext. But in some cases there actually seems to be slightly more. */
1882 : /* This also deals with holes between read-only data and writable data. */
1883 : ptr_t GC_FreeBSDGetDataStart(size_t max_page_size, ptr_t etext_addr)
1884 : {
1885 : word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1886 : & ~(sizeof(word) - 1);
1887 : /* etext rounded to word boundary */
1888 : volatile word next_page = (text_end + (word)max_page_size - 1)
1889 : & ~((word)max_page_size - 1);
1890 : volatile ptr_t result = (ptr_t)text_end;
1891 : GC_setup_temporary_fault_handler();
1892 : if (SETJMP(GC_jmp_buf) == 0) {
1893 : /* Try reading at the address. */
1894 : /* This should happen before there is another thread. */
1895 : for (; next_page < (word)(DATAEND); next_page += (word)max_page_size)
1896 : *(volatile char *)next_page;
1897 : GC_reset_fault_handler();
1898 : } else {
1899 : GC_reset_fault_handler();
1900 : /* As above, we go to plan B */
1901 : result = GC_find_limit((ptr_t)(DATAEND), FALSE);
1902 : }
1903 : return(result);
1904 : }
1905 :
1906 : # endif /* FREEBSD */
1907 :
1908 :
1909 : #ifdef AMIGA
1910 :
1911 : # define GC_AMIGA_DS
1912 : # include "extra/AmigaOS.c"
1913 : # undef GC_AMIGA_DS
1914 :
1915 : #elif defined(OPENBSD)
1916 :
1917 : /* Depending on arch alignment, there can be multiple holes */
1918 : /* between DATASTART and DATAEND. Scan in DATASTART .. DATAEND */
1919 : /* and register each region. */
1920 : void GC_register_data_segments(void)
1921 : {
1922 : ptr_t region_start = DATASTART;
1923 : ptr_t region_end;
1924 :
1925 : for (;;) {
1926 : region_end = GC_find_limit_openbsd(region_start, DATAEND);
1927 : GC_add_roots_inner(region_start, region_end, FALSE);
1928 : if (region_end >= DATAEND)
1929 : break;
1930 : region_start = GC_skip_hole_openbsd(region_end, DATAEND);
1931 : }
1932 : }
1933 :
1934 : # else /* !OS2 && !Windows && !AMIGA && !OPENBSD */
1935 :
1936 0 : void GC_register_data_segments(void)
1937 : {
1938 : # if !defined(PCR) && !defined(MACOS)
1939 : # if defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS)
1940 : /* As of Solaris 2.3, the Solaris threads implementation */
1941 : /* allocates the data structure for the initial thread with */
1942 : /* sbrk at process startup. It needs to be scanned, so that */
1943 : /* we don't lose some malloc allocated data structures */
1944 : /* hanging from it. We're on thin ice here ... */
1945 : extern caddr_t sbrk(int);
1946 :
1947 : GC_add_roots_inner(DATASTART, (ptr_t)sbrk(0), FALSE);
1948 : # else
1949 0 : GC_add_roots_inner(DATASTART, (ptr_t)(DATAEND), FALSE);
1950 : # if defined(DATASTART2)
1951 : GC_add_roots_inner(DATASTART2, (ptr_t)(DATAEND2), FALSE);
1952 : # endif
1953 : # endif
1954 : # endif
1955 : # if defined(MACOS)
1956 : {
1957 : # if defined(THINK_C)
1958 : extern void* GC_MacGetDataStart(void);
1959 : /* globals begin above stack and end at a5. */
1960 : GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1961 : (ptr_t)LMGetCurrentA5(), FALSE);
1962 : # else
1963 : # if defined(__MWERKS__)
1964 : # if !__POWERPC__
1965 : extern void* GC_MacGetDataStart(void);
1966 : /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
1967 : # if __option(far_data)
1968 : extern void* GC_MacGetDataEnd(void);
1969 : # endif
1970 : /* globals begin above stack and end at a5. */
1971 : GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1972 : (ptr_t)LMGetCurrentA5(), FALSE);
1973 : /* MATTHEW: Handle Far Globals */
1974 : # if __option(far_data)
1975 : /* Far globals follow he QD globals: */
1976 : GC_add_roots_inner((ptr_t)LMGetCurrentA5(),
1977 : (ptr_t)GC_MacGetDataEnd(), FALSE);
1978 : # endif
1979 : # else
1980 : extern char __data_start__[], __data_end__[];
1981 : GC_add_roots_inner((ptr_t)&__data_start__,
1982 : (ptr_t)&__data_end__, FALSE);
1983 : # endif /* __POWERPC__ */
1984 : # endif /* __MWERKS__ */
1985 : # endif /* !THINK_C */
1986 : }
1987 : # endif /* MACOS */
1988 :
1989 : /* Dynamic libraries are added at every collection, since they may */
1990 : /* change. */
1991 0 : }
1992 :
1993 : # endif /* ! AMIGA */
1994 : # endif /* ! MSWIN32 && ! MSWINCE*/
1995 : # endif /* ! OS2 */
1996 :
1997 : /*
1998 : * Auxiliary routines for obtaining memory from OS.
1999 : */
2000 :
2001 : # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MSWIN32) \
2002 : && !defined(MSWINCE) && !defined(MACOS) && !defined(DOS4GW) \
2003 : && !defined(NONSTOP) && !defined(SN_TARGET_PS3) && !defined(RTEMS) \
2004 : && !defined(__CC_ARM)
2005 :
2006 : # define SBRK_ARG_T ptrdiff_t
2007 :
2008 : #if defined(MMAP_SUPPORTED)
2009 :
2010 : #ifdef USE_MMAP_FIXED
2011 : # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
2012 : /* Seems to yield better performance on Solaris 2, but can */
2013 : /* be unreliable if something is already mapped at the address. */
2014 : #else
2015 : # define GC_MMAP_FLAGS MAP_PRIVATE
2016 : #endif
2017 :
2018 : #ifdef USE_MMAP_ANON
2019 : # define zero_fd -1
2020 : # if defined(MAP_ANONYMOUS)
2021 : # define OPT_MAP_ANON MAP_ANONYMOUS
2022 : # else
2023 : # define OPT_MAP_ANON MAP_ANON
2024 : # endif
2025 : #else
2026 : static int zero_fd;
2027 : # define OPT_MAP_ANON 0
2028 : #endif
2029 :
2030 : #ifndef HEAP_START
2031 : # define HEAP_START ((ptr_t)0)
2032 : #endif
2033 :
2034 0 : STATIC ptr_t GC_unix_mmap_get_mem(word bytes)
2035 : {
2036 : void *result;
2037 : static ptr_t last_addr = HEAP_START;
2038 :
2039 : # ifndef USE_MMAP_ANON
2040 : static GC_bool initialized = FALSE;
2041 :
2042 0 : if (!initialized) {
2043 0 : zero_fd = open("/dev/zero", O_RDONLY);
2044 0 : fcntl(zero_fd, F_SETFD, FD_CLOEXEC);
2045 0 : initialized = TRUE;
2046 : }
2047 : # endif
2048 :
2049 0 : if (bytes & (GC_page_size - 1)) ABORT("Bad GET_MEM arg");
2050 0 : result = mmap(last_addr, bytes, (PROT_READ | PROT_WRITE)
2051 0 : | (GC_pages_executable ? PROT_EXEC : 0),
2052 : GC_MMAP_FLAGS | OPT_MAP_ANON, zero_fd, 0/* offset */);
2053 : # undef IGNORE_PAGES_EXECUTABLE
2054 :
2055 0 : if (result == MAP_FAILED) return(0);
2056 0 : last_addr = (ptr_t)result + bytes + GC_page_size - 1;
2057 0 : last_addr = (ptr_t)((word)last_addr & ~(GC_page_size - 1));
2058 : # if !defined(LINUX)
2059 : if (last_addr == 0) {
2060 : /* Oops. We got the end of the address space. This isn't */
2061 : /* usable by arbitrary C code, since one-past-end pointers */
2062 : /* don't work, so we discard it and try again. */
2063 : munmap(result, (size_t)(-GC_page_size) - (size_t)result);
2064 : /* Leave last page mapped, so we can't repeat. */
2065 : return GC_unix_mmap_get_mem(bytes);
2066 : }
2067 : # else
2068 : GC_ASSERT(last_addr != 0);
2069 : # endif
2070 0 : return((ptr_t)result);
2071 : }
2072 :
2073 : # endif /* MMAP_SUPPORTED */
2074 :
2075 : #if defined(USE_MMAP)
2076 : ptr_t GC_unix_get_mem(word bytes)
2077 : {
2078 : return GC_unix_mmap_get_mem(bytes);
2079 : }
2080 : #else /* !USE_MMAP */
2081 :
2082 1190 : STATIC ptr_t GC_unix_sbrk_get_mem(word bytes)
2083 : {
2084 : ptr_t result;
2085 : # ifdef IRIX5
2086 : /* Bare sbrk isn't thread safe. Play by malloc rules. */
2087 : /* The equivalent may be needed on other systems as well. */
2088 : __LOCK_MALLOC();
2089 : # endif
2090 : {
2091 1190 : ptr_t cur_brk = (ptr_t)sbrk(0);
2092 1190 : SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
2093 :
2094 1190 : if ((SBRK_ARG_T)bytes < 0) {
2095 0 : result = 0; /* too big */
2096 0 : goto out;
2097 : }
2098 1190 : if (lsbs != 0) {
2099 0 : if((ptr_t)sbrk(GC_page_size - lsbs) == (ptr_t)(-1)) {
2100 0 : result = 0;
2101 0 : goto out;
2102 : }
2103 : }
2104 : # ifdef ADD_HEAP_GUARD_PAGES
2105 : /* This is useful for catching severe memory overwrite problems that */
2106 : /* span heap sections. It shouldn't otherwise be turned on. */
2107 : {
2108 : ptr_t guard = (ptr_t)sbrk((SBRK_ARG_T)GC_page_size);
2109 : if (mprotect(guard, GC_page_size, PROT_NONE) != 0)
2110 : ABORT("ADD_HEAP_GUARD_PAGES: mprotect failed");
2111 : }
2112 : # endif /* ADD_HEAP_GUARD_PAGES */
2113 1190 : result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
2114 1190 : if (result == (ptr_t)(-1)) result = 0;
2115 : }
2116 : out:
2117 : # ifdef IRIX5
2118 : __UNLOCK_MALLOC();
2119 : # endif
2120 1190 : return(result);
2121 : }
2122 :
2123 1190 : ptr_t GC_unix_get_mem(word bytes)
2124 : {
2125 : # if defined(MMAP_SUPPORTED)
2126 : /* By default, we try both sbrk and mmap, in that order. */
2127 : static GC_bool sbrk_failed = FALSE;
2128 1190 : ptr_t result = 0;
2129 :
2130 1190 : if (!sbrk_failed) result = GC_unix_sbrk_get_mem(bytes);
2131 1190 : if (0 == result) {
2132 0 : sbrk_failed = TRUE;
2133 0 : result = GC_unix_mmap_get_mem(bytes);
2134 : }
2135 1190 : if (0 == result) {
2136 : /* Try sbrk again, in case sbrk memory became available. */
2137 0 : result = GC_unix_sbrk_get_mem(bytes);
2138 : }
2139 1190 : return result;
2140 : # else /* !MMAP_SUPPORTED */
2141 : return GC_unix_sbrk_get_mem(bytes);
2142 : # endif
2143 : }
2144 :
2145 : #endif /* !USE_MMAP */
2146 :
2147 : # endif /* UN*X */
2148 :
2149 : # ifdef OS2
2150 :
2151 : void * os2_alloc(size_t bytes)
2152 : {
2153 : void * result;
2154 :
2155 : if (DosAllocMem(&result, bytes, (PAG_READ | PAG_WRITE | PAG_COMMIT)
2156 : | (GC_pages_executable ? PAG_EXECUTE : 0))
2157 : != NO_ERROR) {
2158 : return(0);
2159 : }
2160 : /* FIXME: What's the purpose of this recursion? (Probably, if */
2161 : /* DosAllocMem returns memory at 0 address then just retry once.) */
2162 : if (result == 0) return(os2_alloc(bytes));
2163 : return(result);
2164 : }
2165 :
2166 : # endif /* OS2 */
2167 :
2168 : # if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
2169 : GC_INNER SYSTEM_INFO GC_sysinfo;
2170 : # endif
2171 :
2172 : #ifdef MSWIN32
2173 :
2174 : # ifdef USE_GLOBAL_ALLOC
2175 : # define GLOBAL_ALLOC_TEST 1
2176 : # else
2177 : # define GLOBAL_ALLOC_TEST GC_no_win32_dlls
2178 : # endif
2179 :
2180 : # ifdef GC_USE_MEM_TOP_DOWN
2181 : STATIC DWORD GC_mem_top_down = MEM_TOP_DOWN;
2182 : /* Use GC_USE_MEM_TOP_DOWN for better 64-bit */
2183 : /* testing. Otherwise all addresses tend to */
2184 : /* end up in first 4GB, hiding bugs. */
2185 : # else
2186 : STATIC DWORD GC_mem_top_down = 0;
2187 : # endif
2188 :
2189 : #endif /* MSWIN32 */
2190 :
2191 : #if defined(MSWIN32) || defined(CYGWIN32)
2192 : ptr_t GC_win32_get_mem(word bytes)
2193 : {
2194 : ptr_t result;
2195 :
2196 : # ifdef CYGWIN32
2197 : result = GC_unix_get_mem(bytes);
2198 : # else
2199 : if (GLOBAL_ALLOC_TEST) {
2200 : /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
2201 : /* There are also unconfirmed rumors of other */
2202 : /* problems, so we dodge the issue. */
2203 : result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE);
2204 : result = (ptr_t)(((word)result + HBLKSIZE - 1) & ~(HBLKSIZE-1));
2205 : } else {
2206 : /* VirtualProtect only works on regions returned by a */
2207 : /* single VirtualAlloc call. Thus we allocate one */
2208 : /* extra page, which will prevent merging of blocks */
2209 : /* in separate regions, and eliminate any temptation */
2210 : /* to call VirtualProtect on a range spanning regions. */
2211 : /* This wastes a small amount of memory, and risks */
2212 : /* increased fragmentation. But better alternatives */
2213 : /* would require effort. */
2214 : # ifdef MPROTECT_VDB
2215 : /* We can't check for GC_incremental here (because */
2216 : /* GC_enable_incremental() might be called some time */
2217 : /* later after the GC initialization). */
2218 : # ifdef GWW_VDB
2219 : # define VIRTUAL_ALLOC_PAD (GC_GWW_AVAILABLE() ? 0 : 1)
2220 : # else
2221 : # define VIRTUAL_ALLOC_PAD 1
2222 : # endif
2223 : # else
2224 : # define VIRTUAL_ALLOC_PAD 0
2225 : # endif
2226 : /* Pass the MEM_WRITE_WATCH only if GetWriteWatch-based */
2227 : /* VDBs are enabled and the GetWriteWatch function is */
2228 : /* available. Otherwise we waste resources or possibly */
2229 : /* cause VirtualAlloc to fail (observed in Windows 2000 */
2230 : /* SP2). */
2231 : result = (ptr_t) VirtualAlloc(NULL, bytes + VIRTUAL_ALLOC_PAD,
2232 : GetWriteWatch_alloc_flag
2233 : | (MEM_COMMIT | MEM_RESERVE)
2234 : | GC_mem_top_down,
2235 : GC_pages_executable ? PAGE_EXECUTE_READWRITE :
2236 : PAGE_READWRITE);
2237 : # undef IGNORE_PAGES_EXECUTABLE
2238 : }
2239 : # endif /* !CYGWIN32 */
2240 : if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
2241 : /* If I read the documentation correctly, this can */
2242 : /* only happen if HBLKSIZE > 64k or not a power of 2. */
2243 : if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
2244 : if (0 != result) GC_heap_bases[GC_n_heap_bases++] = result;
2245 : return(result);
2246 : }
2247 :
2248 : GC_API void GC_CALL GC_win32_free_heap(void)
2249 : {
2250 : # ifndef CYGWIN32
2251 : if (GC_no_win32_dlls)
2252 : # endif
2253 : {
2254 : while (GC_n_heap_bases-- > 0) {
2255 : # ifdef CYGWIN32
2256 : /* FIXME: Is it ok to use non-GC free() here? */
2257 : # else
2258 : GlobalFree(GC_heap_bases[GC_n_heap_bases]);
2259 : # endif
2260 : GC_heap_bases[GC_n_heap_bases] = 0;
2261 : }
2262 : }
2263 : }
2264 : #endif /* MSWIN32 || CYGWIN32 */
2265 :
2266 : #ifdef AMIGA
2267 : # define GC_AMIGA_AM
2268 : # include "extra/AmigaOS.c"
2269 : # undef GC_AMIGA_AM
2270 : #endif
2271 :
2272 :
2273 : #ifdef MSWINCE
2274 : ptr_t GC_wince_get_mem(word bytes)
2275 : {
2276 : ptr_t result = 0; /* initialized to prevent warning. */
2277 : word i;
2278 :
2279 : /* Round up allocation size to multiple of page size */
2280 : bytes = (bytes + GC_page_size-1) & ~(GC_page_size-1);
2281 :
2282 : /* Try to find reserved, uncommitted pages */
2283 : for (i = 0; i < GC_n_heap_bases; i++) {
2284 : if (((word)(-(signed_word)GC_heap_lengths[i])
2285 : & (GC_sysinfo.dwAllocationGranularity-1))
2286 : >= bytes) {
2287 : result = GC_heap_bases[i] + GC_heap_lengths[i];
2288 : break;
2289 : }
2290 : }
2291 :
2292 : if (i == GC_n_heap_bases) {
2293 : /* Reserve more pages */
2294 : word res_bytes = (bytes + GC_sysinfo.dwAllocationGranularity-1)
2295 : & ~(GC_sysinfo.dwAllocationGranularity-1);
2296 : /* If we ever support MPROTECT_VDB here, we will probably need to */
2297 : /* ensure that res_bytes is strictly > bytes, so that VirtualProtect */
2298 : /* never spans regions. It seems to be OK for a VirtualFree */
2299 : /* argument to span regions, so we should be OK for now. */
2300 : result = (ptr_t) VirtualAlloc(NULL, res_bytes,
2301 : MEM_RESERVE | MEM_TOP_DOWN,
2302 : GC_pages_executable ? PAGE_EXECUTE_READWRITE :
2303 : PAGE_READWRITE);
2304 : if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
2305 : /* If I read the documentation correctly, this can */
2306 : /* only happen if HBLKSIZE > 64k or not a power of 2. */
2307 : if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
2308 : if (result == NULL) return NULL;
2309 : GC_heap_bases[GC_n_heap_bases] = result;
2310 : GC_heap_lengths[GC_n_heap_bases] = 0;
2311 : GC_n_heap_bases++;
2312 : }
2313 :
2314 : /* Commit pages */
2315 : result = (ptr_t) VirtualAlloc(result, bytes, MEM_COMMIT,
2316 : GC_pages_executable ? PAGE_EXECUTE_READWRITE :
2317 : PAGE_READWRITE);
2318 : # undef IGNORE_PAGES_EXECUTABLE
2319 :
2320 : if (result != NULL) {
2321 : if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
2322 : GC_heap_lengths[i] += bytes;
2323 : }
2324 :
2325 : return(result);
2326 : }
2327 : #endif
2328 :
2329 : #ifdef USE_MUNMAP
2330 :
2331 : /* For now, this only works on Win32/WinCE and some Unix-like */
2332 : /* systems. If you have something else, don't define */
2333 : /* USE_MUNMAP. */
2334 :
2335 : #if !defined(MSWIN32) && !defined(MSWINCE)
2336 :
2337 : #include <unistd.h>
2338 : #include <sys/mman.h>
2339 : #include <sys/stat.h>
2340 : #include <sys/types.h>
2341 :
2342 : #endif
2343 :
2344 : /* Compute a page aligned starting address for the unmap */
2345 : /* operation on a block of size bytes starting at start. */
2346 : /* Return 0 if the block is too small to make this feasible. */
2347 : STATIC ptr_t GC_unmap_start(ptr_t start, size_t bytes)
2348 : {
2349 : ptr_t result;
2350 : /* Round start to next page boundary. */
2351 : result = (ptr_t)((word)(start + GC_page_size - 1) & ~(GC_page_size - 1));
2352 : if (result + GC_page_size > start + bytes) return 0;
2353 : return result;
2354 : }
2355 :
2356 : /* Compute end address for an unmap operation on the indicated */
2357 : /* block. */
2358 : STATIC ptr_t GC_unmap_end(ptr_t start, size_t bytes)
2359 : {
2360 : return (ptr_t)((word)(start + bytes) & ~(GC_page_size - 1));
2361 : }
2362 :
2363 : /* Under Win32/WinCE we commit (map) and decommit (unmap) */
2364 : /* memory using VirtualAlloc and VirtualFree. These functions */
2365 : /* work on individual allocations of virtual memory, made */
2366 : /* previously using VirtualAlloc with the MEM_RESERVE flag. */
2367 : /* The ranges we need to (de)commit may span several of these */
2368 : /* allocations; therefore we use VirtualQuery to check */
2369 : /* allocation lengths, and split up the range as necessary. */
2370 :
2371 : /* We assume that GC_remap is called on exactly the same range */
2372 : /* as a previous call to GC_unmap. It is safe to consistently */
2373 : /* round the endpoints in both places. */
2374 : GC_INNER void GC_unmap(ptr_t start, size_t bytes)
2375 : {
2376 : ptr_t start_addr = GC_unmap_start(start, bytes);
2377 : ptr_t end_addr = GC_unmap_end(start, bytes);
2378 : word len = end_addr - start_addr;
2379 : if (0 == start_addr) return;
2380 : # if defined(MSWIN32) || defined(MSWINCE)
2381 : while (len != 0) {
2382 : MEMORY_BASIC_INFORMATION mem_info;
2383 : GC_word free_len;
2384 : if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
2385 : != sizeof(mem_info))
2386 : ABORT("Weird VirtualQuery result");
2387 : free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
2388 : if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
2389 : ABORT("VirtualFree failed");
2390 : GC_unmapped_bytes += free_len;
2391 : start_addr += free_len;
2392 : len -= free_len;
2393 : }
2394 : # else
2395 : /* We immediately remap it to prevent an intervening mmap from */
2396 : /* accidentally grabbing the same address space. */
2397 : {
2398 : void * result;
2399 : result = mmap(start_addr, len, PROT_NONE,
2400 : MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON,
2401 : zero_fd, 0/* offset */);
2402 : if (result != (void *)start_addr)
2403 : ABORT("mmap(PROT_NONE) failed");
2404 : }
2405 : GC_unmapped_bytes += len;
2406 : # endif
2407 : }
2408 :
2409 : GC_INNER void GC_remap(ptr_t start, size_t bytes)
2410 : {
2411 : ptr_t start_addr = GC_unmap_start(start, bytes);
2412 : ptr_t end_addr = GC_unmap_end(start, bytes);
2413 : word len = end_addr - start_addr;
2414 :
2415 : /* FIXME: Handle out-of-memory correctly (at least for Win32) */
2416 : # if defined(MSWIN32) || defined(MSWINCE)
2417 : ptr_t result;
2418 :
2419 : if (0 == start_addr) return;
2420 : while (len != 0) {
2421 : MEMORY_BASIC_INFORMATION mem_info;
2422 : GC_word alloc_len;
2423 : if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
2424 : != sizeof(mem_info))
2425 : ABORT("Weird VirtualQuery result");
2426 : alloc_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
2427 : result = VirtualAlloc(start_addr, alloc_len, MEM_COMMIT,
2428 : GC_pages_executable ? PAGE_EXECUTE_READWRITE :
2429 : PAGE_READWRITE);
2430 : if (result != start_addr) {
2431 : if (GetLastError() == ERROR_NOT_ENOUGH_MEMORY ||
2432 : GetLastError() == ERROR_OUTOFMEMORY) {
2433 : ABORT("Not enough memory to process remapping");
2434 : } else {
2435 : ABORT("VirtualAlloc remapping failed");
2436 : }
2437 : }
2438 : GC_unmapped_bytes -= alloc_len;
2439 : start_addr += alloc_len;
2440 : len -= alloc_len;
2441 : }
2442 : # else
2443 : /* It was already remapped with PROT_NONE. */
2444 : int result;
2445 : if (0 == start_addr) return;
2446 :
2447 : # ifndef NACL
2448 : result = mprotect(start_addr, len, (PROT_READ | PROT_WRITE)
2449 : | (GC_pages_executable ? PROT_EXEC : 0));
2450 : # else
2451 : {
2452 : /* NaCl does not expose mprotect, but mmap should work fine. */
2453 : void *mmap_result = mmap(start_addr, len, (PROT_READ | PROT_WRITE)
2454 : | (GC_pages_executable ? PROT_EXEC : 0),
2455 : MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON,
2456 : zero_fd, 0 /* offset */);
2457 : if (mmap_result != (void *)start_addr)
2458 : ABORT("mmap as mprotect failed");
2459 : /* Fake the return value as if mprotect succeeded. */
2460 : result = 0;
2461 : }
2462 : # endif /* NACL */
2463 : # undef IGNORE_PAGES_EXECUTABLE
2464 :
2465 : if (result != 0) {
2466 : if (GC_print_stats)
2467 : GC_log_printf("Mprotect failed at %p (length %lu) with errno %d\n",
2468 : start_addr, (unsigned long)len, errno);
2469 : ABORT("mprotect remapping failed");
2470 : }
2471 : GC_unmapped_bytes -= len;
2472 : # endif
2473 : }
2474 :
2475 : /* Two adjacent blocks have already been unmapped and are about to */
2476 : /* be merged. Unmap the whole block. This typically requires */
2477 : /* that we unmap a small section in the middle that was not previously */
2478 : /* unmapped due to alignment constraints. */
2479 : GC_INNER void GC_unmap_gap(ptr_t start1, size_t bytes1, ptr_t start2,
2480 : size_t bytes2)
2481 : {
2482 : ptr_t start1_addr = GC_unmap_start(start1, bytes1);
2483 : ptr_t end1_addr = GC_unmap_end(start1, bytes1);
2484 : ptr_t start2_addr = GC_unmap_start(start2, bytes2);
2485 : ptr_t start_addr = end1_addr;
2486 : ptr_t end_addr = start2_addr;
2487 : size_t len;
2488 : GC_ASSERT(start1 + bytes1 == start2);
2489 : if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
2490 : if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
2491 : if (0 == start_addr) return;
2492 : len = end_addr - start_addr;
2493 : # if defined(MSWIN32) || defined(MSWINCE)
2494 : while (len != 0) {
2495 : MEMORY_BASIC_INFORMATION mem_info;
2496 : GC_word free_len;
2497 : if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
2498 : != sizeof(mem_info))
2499 : ABORT("Weird VirtualQuery result");
2500 : free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
2501 : if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
2502 : ABORT("VirtualFree failed");
2503 : GC_unmapped_bytes += free_len;
2504 : start_addr += free_len;
2505 : len -= free_len;
2506 : }
2507 : # else
2508 : if (len != 0) {
2509 : /* Immediately remap as above. */
2510 : void * result;
2511 : result = mmap(start_addr, len, PROT_NONE,
2512 : MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON,
2513 : zero_fd, 0/* offset */);
2514 : if (result != (void *)start_addr)
2515 : ABORT("mmap(PROT_NONE) failed");
2516 : }
2517 : GC_unmapped_bytes += len;
2518 : # endif
2519 : }
2520 :
2521 : #endif /* USE_MUNMAP */
2522 :
2523 : /* Routine for pushing any additional roots. In THREADS */
2524 : /* environment, this is also responsible for marking from */
2525 : /* thread stacks. */
2526 : #ifndef THREADS
2527 : void (*GC_push_other_roots)(void) = 0;
2528 : #else /* THREADS */
2529 :
2530 : # ifdef PCR
2531 : PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy)
2532 : {
2533 : struct PCR_ThCtl_TInfoRep info;
2534 : PCR_ERes result;
2535 :
2536 : info.ti_stkLow = info.ti_stkHi = 0;
2537 : result = PCR_ThCtl_GetInfo(t, &info);
2538 : GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
2539 : return(result);
2540 : }
2541 :
2542 : /* Push the contents of an old object. We treat this as stack */
2543 : /* data only because that makes it robust against mark stack */
2544 : /* overflow. */
2545 : PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
2546 : {
2547 : GC_push_all_stack((ptr_t)p, (ptr_t)p + size);
2548 : return(PCR_ERes_okay);
2549 : }
2550 :
2551 : extern struct PCR_MM_ProcsRep * GC_old_allocator;
2552 : /* defined in pcr_interface.c. */
2553 :
2554 : STATIC void GC_default_push_other_roots(void)
2555 : {
2556 : /* Traverse data allocated by previous memory managers. */
2557 : if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
2558 : GC_push_old_obj, 0)
2559 : != PCR_ERes_okay) {
2560 : ABORT("Old object enumeration failed");
2561 : }
2562 : /* Traverse all thread stacks. */
2563 : if (PCR_ERes_IsErr(
2564 : PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0))
2565 : || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
2566 : ABORT("Thread stack marking failed");
2567 : }
2568 : }
2569 :
2570 : # endif /* PCR */
2571 :
2572 : # if defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
2573 258 : STATIC void GC_default_push_other_roots(void)
2574 : {
2575 258 : GC_push_all_stacks();
2576 258 : }
2577 : # endif /* GC_WIN32_THREADS || GC_PTHREADS */
2578 :
2579 : # ifdef SN_TARGET_PS3
2580 : STATIC void GC_default_push_other_roots(void)
2581 : {
2582 : ABORT("GC_default_push_other_roots is not implemented");
2583 : }
2584 :
2585 : void GC_push_thread_structures(void)
2586 : {
2587 : ABORT("GC_push_thread_structures is not implemented");
2588 : }
2589 : # endif /* SN_TARGET_PS3 */
2590 :
2591 : void (*GC_push_other_roots)(void) = GC_default_push_other_roots;
2592 : #endif /* THREADS */
2593 :
2594 : /*
2595 : * Routines for accessing dirty bits on virtual pages.
2596 : * There are six ways to maintain this information:
2597 : * DEFAULT_VDB: A simple dummy implementation that treats every page
2598 : * as possibly dirty. This makes incremental collection
2599 : * useless, but the implementation is still correct.
2600 : * MANUAL_VDB: Stacks and static data are always considered dirty.
2601 : * Heap pages are considered dirty if GC_dirty(p) has been
2602 : * called on some pointer p pointing to somewhere inside
2603 : * an object on that page. A GC_dirty() call on a large
2604 : * object directly dirties only a single page, but for
2605 : * MANUAL_VDB we are careful to treat an object with a dirty
2606 : * page as completely dirty.
2607 : * In order to avoid races, an object must be marked dirty
2608 : * after it is written, and a reference to the object
2609 : * must be kept on a stack or in a register in the interim.
2610 : * With threads enabled, an object directly reachable from the
2611 : * stack at the time of a collection is treated as dirty.
2612 : * In single-threaded mode, it suffices to ensure that no
2613 : * collection can take place between the pointer assignment
2614 : * and the GC_dirty() call.
2615 : * PCR_VDB: Use PPCRs virtual dirty bit facility.
2616 : * PROC_VDB: Use the /proc facility for reading dirty bits. Only
2617 : * works under some SVR4 variants. Even then, it may be
2618 : * too slow to be entirely satisfactory. Requires reading
2619 : * dirty bits for entire address space. Implementations tend
2620 : * to assume that the client is a (slow) debugger.
2621 : * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
2622 : * dirtied pages. The implementation (and implementability)
2623 : * is highly system dependent. This usually fails when system
2624 : * calls write to a protected page. We prevent the read system
2625 : * call from doing so. It is the clients responsibility to
2626 : * make sure that other system calls are similarly protected
2627 : * or write only to the stack.
2628 : * GWW_VDB: Use the Win32 GetWriteWatch functions, if available, to
2629 : * read dirty bits. In case it is not available (because we
2630 : * are running on Windows 95, Windows 2000 or earlier),
2631 : * MPROTECT_VDB may be defined as a fallback strategy.
2632 : */
2633 : #ifndef GC_DISABLE_INCREMENTAL
2634 : GC_INNER GC_bool GC_dirty_maintained = FALSE;
2635 : #endif
2636 :
2637 : #if defined(PROC_VDB) || defined(GWW_VDB)
2638 : /* Add all pages in pht2 to pht1 */
2639 : STATIC void GC_or_pages(page_hash_table pht1, page_hash_table pht2)
2640 : {
2641 : register unsigned i;
2642 : for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i];
2643 : }
2644 :
2645 : # ifdef MPROTECT_VDB
2646 : STATIC GC_bool GC_gww_page_was_dirty(struct hblk * h)
2647 : # else
2648 : GC_INNER GC_bool GC_page_was_dirty(struct hblk * h)
2649 : # endif
2650 : {
2651 : register word index;
2652 : if (HDR(h) == 0)
2653 : return TRUE;
2654 : index = PHT_HASH(h);
2655 : return get_pht_entry_from_index(GC_grungy_pages, index);
2656 : }
2657 :
2658 : # if defined(CHECKSUMS) || defined(PROC_VDB)
2659 : /* Used only if GWW_VDB. */
2660 : # ifdef MPROTECT_VDB
2661 : STATIC GC_bool GC_gww_page_was_ever_dirty(struct hblk * h)
2662 : # else
2663 : GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk * h)
2664 : # endif
2665 : {
2666 : register word index;
2667 : if (HDR(h) == 0)
2668 : return TRUE;
2669 : index = PHT_HASH(h);
2670 : return get_pht_entry_from_index(GC_written_pages, index);
2671 : }
2672 : # endif /* CHECKSUMS || PROC_VDB */
2673 :
2674 : # ifndef MPROTECT_VDB
2675 : /* Ignore write hints. They don't help us here. */
2676 : /*ARGSUSED*/
2677 : GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
2678 : GC_bool is_ptrfree) {}
2679 : # endif
2680 :
2681 : #endif /* PROC_VDB || GWW_VDB */
2682 :
2683 : #ifdef GWW_VDB
2684 :
2685 : # define GC_GWW_BUF_LEN (MAXHINCR * HBLKSIZE / 4096 /* X86 page size */)
2686 : /* Still susceptible to overflow, if there are very large allocations, */
2687 : /* and everything is dirty. */
2688 : static PVOID gww_buf[GC_GWW_BUF_LEN];
2689 :
2690 : # ifdef MPROTECT_VDB
2691 : GC_INNER GC_bool GC_gww_dirty_init(void)
2692 : {
2693 : detect_GetWriteWatch();
2694 : return GC_GWW_AVAILABLE();
2695 : }
2696 : # else
2697 : GC_INNER void GC_dirty_init(void)
2698 : {
2699 : detect_GetWriteWatch();
2700 : GC_dirty_maintained = GC_GWW_AVAILABLE();
2701 : }
2702 : # endif /* !MPROTECT_VDB */
2703 :
2704 : # ifdef MPROTECT_VDB
2705 : STATIC void GC_gww_read_dirty(void)
2706 : # else
2707 : GC_INNER void GC_read_dirty(void)
2708 : # endif
2709 : {
2710 : word i;
2711 :
2712 : BZERO(GC_grungy_pages, sizeof(GC_grungy_pages));
2713 :
2714 : for (i = 0; i != GC_n_heap_sects; ++i) {
2715 : GC_ULONG_PTR count;
2716 :
2717 : do {
2718 : PVOID * pages, * pages_end;
2719 : DWORD page_size;
2720 :
2721 : pages = gww_buf;
2722 : count = GC_GWW_BUF_LEN;
2723 : /* GetWriteWatch is documented as returning non-zero when it */
2724 : /* fails, but the documentation doesn't explicitly say why it */
2725 : /* would fail or what its behaviour will be if it fails. */
2726 : /* It does appear to fail, at least on recent W2K instances, if */
2727 : /* the underlying memory was not allocated with the appropriate */
2728 : /* flag. This is common if GC_enable_incremental is called */
2729 : /* shortly after GC initialization. To avoid modifying the */
2730 : /* interface, we silently work around such a failure, it only */
2731 : /* affects the initial (small) heap allocation. If there are */
2732 : /* more dirty pages than will fit in the buffer, this is not */
2733 : /* treated as a failure; we must check the page count in the */
2734 : /* loop condition. Since each partial call will reset the */
2735 : /* status of some pages, this should eventually terminate even */
2736 : /* in the overflow case. */
2737 : if (GetWriteWatch_func(WRITE_WATCH_FLAG_RESET,
2738 : GC_heap_sects[i].hs_start,
2739 : GC_heap_sects[i].hs_bytes,
2740 : pages,
2741 : &count,
2742 : &page_size) != 0) {
2743 : static int warn_count = 0;
2744 : unsigned j;
2745 : struct hblk * start = (struct hblk *)GC_heap_sects[i].hs_start;
2746 : static struct hblk *last_warned = 0;
2747 : size_t nblocks = divHBLKSZ(GC_heap_sects[i].hs_bytes);
2748 :
2749 : if ( i != 0 && last_warned != start && warn_count++ < 5) {
2750 : last_warned = start;
2751 : WARN(
2752 : "GC_gww_read_dirty unexpectedly failed at %p: "
2753 : "Falling back to marking all pages dirty\n", start);
2754 : }
2755 : for (j = 0; j < nblocks; ++j) {
2756 : word hash = PHT_HASH(start + j);
2757 : set_pht_entry_from_index(GC_grungy_pages, hash);
2758 : }
2759 : count = 1; /* Done with this section. */
2760 : } else /* succeeded */ {
2761 : pages_end = pages + count;
2762 : while (pages != pages_end) {
2763 : struct hblk * h = (struct hblk *) *pages++;
2764 : struct hblk * h_end = (struct hblk *) ((char *) h + page_size);
2765 : do
2766 : set_pht_entry_from_index(GC_grungy_pages, PHT_HASH(h));
2767 : while (++h < h_end);
2768 : }
2769 : }
2770 : } while (count == GC_GWW_BUF_LEN);
2771 : /* FIXME: It's unclear from Microsoft's documentation if this loop */
2772 : /* is useful. We suspect the call just fails if the buffer fills */
2773 : /* up. But that should still be handled correctly. */
2774 : }
2775 :
2776 : GC_or_pages(GC_written_pages, GC_grungy_pages);
2777 : }
2778 : #endif /* GWW_VDB */
2779 :
2780 : #ifdef DEFAULT_VDB
2781 : /* All of the following assume the allocation lock is held. */
2782 :
2783 : /* The client asserts that unallocated pages in the heap are never */
2784 : /* written. */
2785 :
2786 : /* Initialize virtual dirty bit implementation. */
2787 : GC_INNER void GC_dirty_init(void)
2788 : {
2789 : if (GC_print_stats == VERBOSE)
2790 : GC_log_printf("Initializing DEFAULT_VDB...\n");
2791 : GC_dirty_maintained = TRUE;
2792 : }
2793 :
2794 : /* Retrieve system dirty bits for heap to a local buffer. */
2795 : /* Restore the systems notion of which pages are dirty. */
2796 : GC_INNER void GC_read_dirty(void) {}
2797 :
2798 : /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
2799 : /* If the actual page size is different, this returns TRUE if any */
2800 : /* of the pages overlapping h are dirty. This routine may err on the */
2801 : /* side of labeling pages as dirty (and this implementation does). */
2802 : /*ARGSUSED*/
2803 : GC_INNER GC_bool GC_page_was_dirty(struct hblk *h)
2804 : {
2805 : return(TRUE);
2806 : }
2807 :
2808 : /* The following two routines are typically less crucial. */
2809 : /* They matter most with large dynamic libraries, or if we can't */
2810 : /* accurately identify stacks, e.g. under Solaris 2.X. Otherwise the */
2811 : /* following default versions are adequate. */
2812 : # ifdef CHECKSUMS
2813 : /* Could any valid GC heap pointer ever have been written to this page? */
2814 : /*ARGSUSED*/
2815 : GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk *h)
2816 : {
2817 : return(TRUE);
2818 : }
2819 : # endif /* CHECKSUMS */
2820 :
2821 : /* A call that: */
2822 : /* I) hints that [h, h+nblocks) is about to be written. */
2823 : /* II) guarantees that protection is removed. */
2824 : /* (I) may speed up some dirty bit implementations. */
2825 : /* (II) may be essential if we need to ensure that */
2826 : /* pointer-free system call buffers in the heap are */
2827 : /* not protected. */
2828 : /*ARGSUSED*/
2829 : GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
2830 : GC_bool is_ptrfree) {}
2831 : #endif /* DEFAULT_VDB */
2832 :
2833 : #ifdef MANUAL_VDB
2834 : /* Initialize virtual dirty bit implementation. */
2835 : GC_INNER void GC_dirty_init(void)
2836 : {
2837 : if (GC_print_stats == VERBOSE)
2838 : GC_log_printf("Initializing MANUAL_VDB...\n");
2839 : /* GC_dirty_pages and GC_grungy_pages are already cleared. */
2840 : GC_dirty_maintained = TRUE;
2841 : }
2842 :
2843 : /* Retrieve system dirty bits for heap to a local buffer. */
2844 : /* Restore the systems notion of which pages are dirty. */
2845 : GC_INNER void GC_read_dirty(void)
2846 : {
2847 : BCOPY((word *)GC_dirty_pages, GC_grungy_pages,
2848 : (sizeof GC_dirty_pages));
2849 : BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages));
2850 : }
2851 :
2852 : /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
2853 : /* If the actual page size is different, this returns TRUE if any */
2854 : /* of the pages overlapping h are dirty. This routine may err on the */
2855 : /* side of labeling pages as dirty (and this implementation does). */
2856 : GC_INNER GC_bool GC_page_was_dirty(struct hblk *h)
2857 : {
2858 : register word index = PHT_HASH(h);
2859 : return(HDR(h) == 0 || get_pht_entry_from_index(GC_grungy_pages, index));
2860 : }
2861 :
2862 : # define async_set_pht_entry_from_index(db, index) \
2863 : set_pht_entry_from_index(db, index) /* for now */
2864 :
2865 : /* Mark the page containing p as dirty. Logically, this dirties the */
2866 : /* entire object. */
2867 : void GC_dirty(ptr_t p)
2868 : {
2869 : word index = PHT_HASH(p);
2870 : async_set_pht_entry_from_index(GC_dirty_pages, index);
2871 : }
2872 :
2873 : /*ARGSUSED*/
2874 : GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
2875 : GC_bool is_ptrfree) {}
2876 :
2877 : # ifdef CHECKSUMS
2878 : /* Could any valid GC heap pointer ever have been written to this page? */
2879 : /*ARGSUSED*/
2880 : GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk *h)
2881 : {
2882 : /* FIXME - implement me. */
2883 : return(TRUE);
2884 : }
2885 : # endif /* CHECKSUMS */
2886 :
2887 : #endif /* MANUAL_VDB */
2888 :
2889 : #ifdef MPROTECT_VDB
2890 : /* See DEFAULT_VDB for interface descriptions. */
2891 :
2892 : /*
2893 : * This implementation maintains dirty bits itself by catching write
2894 : * faults and keeping track of them. We assume nobody else catches
2895 : * SIGBUS or SIGSEGV. We assume no write faults occur in system calls.
2896 : * This means that clients must ensure that system calls don't write
2897 : * to the write-protected heap. Probably the best way to do this is to
2898 : * ensure that system calls write at most to POINTERFREE objects in the
2899 : * heap, and do even that only if we are on a platform on which those
2900 : * are not protected. Another alternative is to wrap system calls
2901 : * (see example for read below), but the current implementation holds
2902 : * applications.
2903 : * We assume the page size is a multiple of HBLKSIZE.
2904 : * We prefer them to be the same. We avoid protecting POINTERFREE
2905 : * objects only if they are the same.
2906 : */
2907 : # ifdef DARWIN
2908 : /* Using vm_protect (mach syscall) over mprotect (BSD syscall) seems to
2909 : decrease the likelihood of some of the problems described below. */
2910 : # include <mach/vm_map.h>
2911 : STATIC mach_port_t GC_task_self = 0;
2912 : # define PROTECT(addr,len) \
2913 : if(vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len), \
2914 : FALSE, VM_PROT_READ \
2915 : | (GC_pages_executable ? VM_PROT_EXECUTE : 0)) \
2916 : != KERN_SUCCESS) { \
2917 : ABORT("vm_protect(PROTECT) failed"); \
2918 : }
2919 : # define UNPROTECT(addr,len) \
2920 : if(vm_protect(GC_task_self,(vm_address_t)(addr),(vm_size_t)(len), \
2921 : FALSE, (VM_PROT_READ | VM_PROT_WRITE) \
2922 : | (GC_pages_executable ? VM_PROT_EXECUTE : 0)) \
2923 : != KERN_SUCCESS) { \
2924 : ABORT("vm_protect(UNPROTECT) failed"); \
2925 : }
2926 :
2927 : # elif !defined(MSWIN32) && !defined(MSWINCE)
2928 : # include <sys/mman.h>
2929 : # include <signal.h>
2930 : # include <sys/syscall.h>
2931 :
2932 : # define PROTECT(addr, len) \
2933 : if (mprotect((caddr_t)(addr), (size_t)(len), \
2934 : PROT_READ \
2935 : | (GC_pages_executable ? PROT_EXEC : 0)) < 0) { \
2936 : ABORT("mprotect failed"); \
2937 : }
2938 : # define UNPROTECT(addr, len) \
2939 : if (mprotect((caddr_t)(addr), (size_t)(len), \
2940 : (PROT_READ | PROT_WRITE) \
2941 : | (GC_pages_executable ? PROT_EXEC : 0)) < 0) { \
2942 : ABORT(GC_pages_executable ? "un-mprotect executable page" \
2943 : " failed (probably disabled by OS)" : \
2944 : "un-mprotect failed"); \
2945 : }
2946 : # undef IGNORE_PAGES_EXECUTABLE
2947 :
2948 : # else /* MSWIN32 */
2949 : # ifndef MSWINCE
2950 : # include <signal.h>
2951 : # endif
2952 :
2953 : static DWORD protect_junk;
2954 : # define PROTECT(addr, len) \
2955 : if (!VirtualProtect((addr), (len), \
2956 : GC_pages_executable ? PAGE_EXECUTE_READ : \
2957 : PAGE_READONLY, \
2958 : &protect_junk)) { \
2959 : if (GC_print_stats) \
2960 : GC_log_printf("Last error code: 0x%lx\n", (long)GetLastError()); \
2961 : ABORT("VirtualProtect failed"); \
2962 : }
2963 : # define UNPROTECT(addr, len) \
2964 : if (!VirtualProtect((addr), (len), \
2965 : GC_pages_executable ? PAGE_EXECUTE_READWRITE : \
2966 : PAGE_READWRITE, \
2967 : &protect_junk)) { \
2968 : ABORT("un-VirtualProtect failed"); \
2969 : }
2970 : # endif /* MSWIN32 || MSWINCE || DARWIN */
2971 :
2972 : # if defined(MSWIN32)
2973 : typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_HNDLR_PTR;
2974 : # undef SIG_DFL
2975 : # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER)((signed_word)-1)
2976 : # elif defined(MSWINCE)
2977 : typedef LONG (WINAPI *SIG_HNDLR_PTR)(struct _EXCEPTION_POINTERS *);
2978 : # undef SIG_DFL
2979 : # define SIG_DFL (SIG_HNDLR_PTR) (-1)
2980 : # elif defined(DARWIN)
2981 : typedef void (* SIG_HNDLR_PTR)();
2982 : # else
2983 : typedef void (* SIG_HNDLR_PTR)(int, siginfo_t *, void *);
2984 : typedef void (* PLAIN_HNDLR_PTR)(int);
2985 : # endif
2986 :
2987 : # if defined(__GLIBC__)
2988 : # if __GLIBC__ < 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ < 2
2989 : # error glibc too old?
2990 : # endif
2991 : # endif
2992 :
2993 : #ifndef DARWIN
2994 : STATIC SIG_HNDLR_PTR GC_old_segv_handler = 0;
2995 : /* Also old MSWIN32 ACCESS_VIOLATION filter */
2996 : # if !defined(MSWIN32) && !defined(MSWINCE)
2997 : STATIC SIG_HNDLR_PTR GC_old_bus_handler = 0;
2998 : STATIC GC_bool GC_old_bus_handler_used_si = FALSE;
2999 : STATIC GC_bool GC_old_segv_handler_used_si = FALSE;
3000 : # endif
3001 : #endif /* !DARWIN */
3002 :
3003 : #if defined(THREADS)
3004 : /* We need to lock around the bitmap update in the write fault handler */
3005 : /* in order to avoid the risk of losing a bit. We do this with a */
3006 : /* test-and-set spin lock if we know how to do that. Otherwise we */
3007 : /* check whether we are already in the handler and use the dumb but */
3008 : /* safe fallback algorithm of setting all bits in the word. */
3009 : /* Contention should be very rare, so we do the minimum to handle it */
3010 : /* correctly. */
3011 : #ifdef AO_HAVE_test_and_set_acquire
3012 : GC_INNER volatile AO_TS_t GC_fault_handler_lock = AO_TS_INITIALIZER;
3013 0 : static void async_set_pht_entry_from_index(volatile page_hash_table db,
3014 : size_t index)
3015 : {
3016 0 : while (AO_test_and_set_acquire(&GC_fault_handler_lock) == AO_TS_SET) {
3017 : /* empty */
3018 : }
3019 : /* Could also revert to set_pht_entry_from_index_safe if initial */
3020 : /* GC_test_and_set fails. */
3021 0 : set_pht_entry_from_index(db, index);
3022 0 : AO_CLEAR(&GC_fault_handler_lock);
3023 0 : }
3024 : #else /* !AO_HAVE_test_and_set_acquire */
3025 : # error No test_and_set operation: Introduces a race.
3026 : /* THIS WOULD BE INCORRECT! */
3027 : /* The dirty bit vector may be temporarily wrong, */
3028 : /* just before we notice the conflict and correct it. We may end up */
3029 : /* looking at it while it's wrong. But this requires contention */
3030 : /* exactly when a GC is triggered, which seems far less likely to */
3031 : /* fail than the old code, which had no reported failures. Thus we */
3032 : /* leave it this way while we think of something better, or support */
3033 : /* GC_test_and_set on the remaining platforms. */
3034 : static volatile word currently_updating = 0;
3035 : static void async_set_pht_entry_from_index(volatile page_hash_table db,
3036 : size_t index)
3037 : {
3038 : unsigned int update_dummy;
3039 : currently_updating = (word)(&update_dummy);
3040 : set_pht_entry_from_index(db, index);
3041 : /* If we get contention in the 10 or so instruction window here, */
3042 : /* and we get stopped by a GC between the two updates, we lose! */
3043 : if (currently_updating != (word)(&update_dummy)) {
3044 : set_pht_entry_from_index_safe(db, index);
3045 : /* We claim that if two threads concurrently try to update the */
3046 : /* dirty bit vector, the first one to execute UPDATE_START */
3047 : /* will see it changed when UPDATE_END is executed. (Note that */
3048 : /* &update_dummy must differ in two distinct threads.) It */
3049 : /* will then execute set_pht_entry_from_index_safe, thus */
3050 : /* returning us to a safe state, though not soon enough. */
3051 : }
3052 : }
3053 : #endif /* !AO_HAVE_test_and_set_acquire */
3054 : #else /* !THREADS */
3055 : # define async_set_pht_entry_from_index(db, index) \
3056 : set_pht_entry_from_index(db, index)
3057 : #endif /* !THREADS */
3058 :
3059 : #ifdef CHECKSUMS
3060 : void GC_record_fault(struct hblk * h); /* from checksums.c */
3061 : #endif
3062 :
3063 : #ifndef DARWIN
3064 :
3065 : # if !defined(MSWIN32) && !defined(MSWINCE)
3066 : # include <errno.h>
3067 : # if defined(FREEBSD) || defined(HURD) || defined(HPUX)
3068 : # define SIG_OK (sig == SIGBUS || sig == SIGSEGV)
3069 : # else
3070 : # define SIG_OK (sig == SIGSEGV)
3071 : # endif
3072 : # if defined(FREEBSD)
3073 : # ifndef SEGV_ACCERR
3074 : # define SEGV_ACCERR 2
3075 : # endif
3076 : # define CODE_OK (si -> si_code == BUS_PAGE_FAULT \
3077 : || si -> si_code == SEGV_ACCERR)
3078 : # elif defined(OSF1)
3079 : # define CODE_OK (si -> si_code == 2 /* experimentally determined */)
3080 : # elif defined(IRIX5)
3081 : # define CODE_OK (si -> si_code == EACCES)
3082 : # elif defined(HURD)
3083 : # define CODE_OK TRUE
3084 : # elif defined(LINUX)
3085 : # define CODE_OK TRUE
3086 : /* Empirically c.trapno == 14, on IA32, but is that useful? */
3087 : /* Should probably consider alignment issues on other */
3088 : /* architectures. */
3089 : # elif defined(HPUX)
3090 : # define CODE_OK (si -> si_code == SEGV_ACCERR \
3091 : || si -> si_code == BUS_ADRERR \
3092 : || si -> si_code == BUS_UNKNOWN \
3093 : || si -> si_code == SEGV_UNKNOWN \
3094 : || si -> si_code == BUS_OBJERR)
3095 : # elif defined(SUNOS5SIGS)
3096 : # define CODE_OK (si -> si_code == SEGV_ACCERR)
3097 : # endif
3098 : # ifndef NO_GETCONTEXT
3099 : # include <ucontext.h>
3100 : # endif
3101 : /*ARGSUSED*/
3102 0 : STATIC void GC_write_fault_handler(int sig, siginfo_t *si, void *raw_sc)
3103 : # else
3104 : # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode \
3105 : == STATUS_ACCESS_VIOLATION)
3106 : # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] \
3107 : == 1) /* Write fault */
3108 : STATIC LONG WINAPI GC_write_fault_handler(
3109 : struct _EXCEPTION_POINTERS *exc_info)
3110 : # endif /* MSWIN32 || MSWINCE */
3111 : {
3112 : # if !defined(MSWIN32) && !defined(MSWINCE)
3113 0 : char *addr = si -> si_addr;
3114 : # else
3115 : char * addr = (char *) (exc_info -> ExceptionRecord
3116 : -> ExceptionInformation[1]);
3117 : # endif
3118 : unsigned i;
3119 :
3120 0 : if (SIG_OK && CODE_OK) {
3121 : register struct hblk * h =
3122 0 : (struct hblk *)((word)addr & ~(GC_page_size-1));
3123 : GC_bool in_allocd_block;
3124 : # ifdef CHECKSUMS
3125 : GC_record_fault(h);
3126 : # endif
3127 :
3128 : # ifdef SUNOS5SIGS
3129 : /* Address is only within the correct physical page. */
3130 : in_allocd_block = FALSE;
3131 : for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
3132 : if (HDR(h+i) != 0) {
3133 : in_allocd_block = TRUE;
3134 : break;
3135 : }
3136 : }
3137 : # else
3138 0 : in_allocd_block = (HDR(addr) != 0);
3139 : # endif
3140 0 : if (!in_allocd_block) {
3141 : /* FIXME - We should make sure that we invoke the */
3142 : /* old handler with the appropriate calling */
3143 : /* sequence, which often depends on SA_SIGINFO. */
3144 :
3145 : /* Heap blocks now begin and end on page boundaries */
3146 : SIG_HNDLR_PTR old_handler;
3147 :
3148 : # if defined(MSWIN32) || defined(MSWINCE)
3149 : old_handler = GC_old_segv_handler;
3150 : # else
3151 : GC_bool used_si;
3152 :
3153 0 : if (sig == SIGSEGV) {
3154 0 : old_handler = GC_old_segv_handler;
3155 0 : used_si = GC_old_segv_handler_used_si;
3156 : } else {
3157 0 : old_handler = GC_old_bus_handler;
3158 0 : used_si = GC_old_bus_handler_used_si;
3159 : }
3160 : # endif
3161 :
3162 0 : if (old_handler == (SIG_HNDLR_PTR)SIG_DFL) {
3163 : # if !defined(MSWIN32) && !defined(MSWINCE)
3164 0 : if (GC_print_stats)
3165 0 : GC_log_printf("Unexpected segfault at %p\n", addr);
3166 0 : ABORT("Unexpected bus error or segmentation fault");
3167 : # else
3168 : return(EXCEPTION_CONTINUE_SEARCH);
3169 : # endif
3170 : } else {
3171 : /*
3172 : * FIXME: This code should probably check if the
3173 : * old signal handler used the traditional style and
3174 : * if so call it using that style.
3175 : */
3176 : # if defined(MSWIN32) || defined(MSWINCE)
3177 : return((*old_handler)(exc_info));
3178 : # else
3179 0 : if (used_si)
3180 0 : ((SIG_HNDLR_PTR)old_handler) (sig, si, raw_sc);
3181 : else
3182 : /* FIXME: should pass nonstandard args as well. */
3183 0 : ((PLAIN_HNDLR_PTR)old_handler) (sig);
3184 0 : return;
3185 : # endif
3186 : }
3187 : }
3188 0 : UNPROTECT(h, GC_page_size);
3189 : /* We need to make sure that no collection occurs between */
3190 : /* the UNPROTECT and the setting of the dirty bit. Otherwise */
3191 : /* a write by a third thread might go unnoticed. Reversing */
3192 : /* the order is just as bad, since we would end up unprotecting */
3193 : /* a page in a GC cycle during which it's not marked. */
3194 : /* Currently we do this by disabling the thread stopping */
3195 : /* signals while this handler is running. An alternative might */
3196 : /* be to record the fact that we're about to unprotect, or */
3197 : /* have just unprotected a page in the GC's thread structure, */
3198 : /* and then to have the thread stopping code set the dirty */
3199 : /* flag, if necessary. */
3200 0 : for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
3201 0 : size_t index = PHT_HASH(h+i);
3202 :
3203 0 : async_set_pht_entry_from_index(GC_dirty_pages, index);
3204 : }
3205 : /* The write may not take place before dirty bits are read. */
3206 : /* But then we'll fault again ... */
3207 : # if defined(MSWIN32) || defined(MSWINCE)
3208 : return(EXCEPTION_CONTINUE_EXECUTION);
3209 : # else
3210 0 : return;
3211 : # endif
3212 : }
3213 : # if defined(MSWIN32) || defined(MSWINCE)
3214 : return EXCEPTION_CONTINUE_SEARCH;
3215 : # else
3216 0 : if (GC_print_stats)
3217 0 : GC_log_printf("Unexpected segfault at %p\n", addr);
3218 0 : ABORT("Unexpected bus error or segmentation fault");
3219 : # endif
3220 : }
3221 :
3222 : # ifdef GC_WIN32_THREADS
3223 : GC_INNER void GC_set_write_fault_handler(void)
3224 : {
3225 : SetUnhandledExceptionFilter(GC_write_fault_handler);
3226 : }
3227 : # endif
3228 : #endif /* !DARWIN */
3229 :
3230 : /* We hold the allocation lock. We expect block h to be written */
3231 : /* shortly. Ensure that all pages containing any part of the n hblks */
3232 : /* starting at h are no longer protected. If is_ptrfree is false, also */
3233 : /* ensure that they will subsequently appear to be dirty. Not allowed */
3234 : /* to call GC_printf (and the friends) here, see Win32 GC_stop_world() */
3235 : /* for the information. */
3236 57686 : GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
3237 : GC_bool is_ptrfree)
3238 : {
3239 : struct hblk * h_trunc; /* Truncated to page boundary */
3240 : struct hblk * h_end; /* Page boundary following block end */
3241 : struct hblk * current;
3242 :
3243 : # if defined(GWW_VDB)
3244 : if (GC_GWW_AVAILABLE()) return;
3245 : # endif
3246 57686 : if (!GC_dirty_maintained) return;
3247 0 : h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
3248 0 : h_end = (struct hblk *)(((word)(h + nblocks) + GC_page_size-1)
3249 0 : & ~(GC_page_size-1));
3250 0 : if (h_end == h_trunc + 1 &&
3251 0 : get_pht_entry_from_index(GC_dirty_pages, PHT_HASH(h_trunc))) {
3252 : /* already marked dirty, and hence unprotected. */
3253 0 : return;
3254 : }
3255 0 : for (current = h_trunc; current < h_end; ++current) {
3256 0 : size_t index = PHT_HASH(current);
3257 0 : if (!is_ptrfree || current < h || current >= h + nblocks) {
3258 0 : async_set_pht_entry_from_index(GC_dirty_pages, index);
3259 : }
3260 : }
3261 0 : UNPROTECT(h_trunc, (ptr_t)h_end - (ptr_t)h_trunc);
3262 : }
3263 :
3264 : #if !defined(DARWIN)
3265 0 : GC_INNER void GC_dirty_init(void)
3266 : {
3267 : # if !defined(MSWIN32) && !defined(MSWINCE)
3268 : struct sigaction act, oldact;
3269 0 : act.sa_flags = SA_RESTART | SA_SIGINFO;
3270 0 : act.sa_sigaction = GC_write_fault_handler;
3271 0 : (void)sigemptyset(&act.sa_mask);
3272 : # ifdef SIG_SUSPEND
3273 : /* Arrange to postpone SIG_SUSPEND while we're in a write fault */
3274 : /* handler. This effectively makes the handler atomic w.r.t. */
3275 : /* stopping the world for GC. */
3276 0 : (void)sigaddset(&act.sa_mask, SIG_SUSPEND);
3277 : # endif /* SIG_SUSPEND */
3278 : # endif
3279 0 : if (GC_print_stats == VERBOSE)
3280 0 : GC_log_printf(
3281 : "Initializing mprotect virtual dirty bit implementation\n");
3282 0 : GC_dirty_maintained = TRUE;
3283 0 : if (GC_page_size % HBLKSIZE != 0) {
3284 0 : ABORT("Page size not multiple of HBLKSIZE");
3285 : }
3286 : # if !defined(MSWIN32) && !defined(MSWINCE)
3287 : # if defined(GC_IRIX_THREADS)
3288 : sigaction(SIGSEGV, 0, &oldact);
3289 : sigaction(SIGSEGV, &act, 0);
3290 : # else
3291 : {
3292 0 : int res = sigaction(SIGSEGV, &act, &oldact);
3293 0 : if (res != 0) ABORT("Sigaction failed");
3294 : }
3295 : # endif
3296 0 : if (oldact.sa_flags & SA_SIGINFO) {
3297 0 : GC_old_segv_handler = oldact.sa_sigaction;
3298 0 : GC_old_segv_handler_used_si = TRUE;
3299 : } else {
3300 0 : GC_old_segv_handler = (SIG_HNDLR_PTR)oldact.sa_handler;
3301 0 : GC_old_segv_handler_used_si = FALSE;
3302 : }
3303 0 : if (GC_old_segv_handler == (SIG_HNDLR_PTR)SIG_IGN) {
3304 0 : if (GC_print_stats)
3305 0 : GC_err_printf("Previously ignored segmentation violation!?\n");
3306 0 : GC_old_segv_handler = (SIG_HNDLR_PTR)SIG_DFL;
3307 : }
3308 0 : if (GC_old_segv_handler != (SIG_HNDLR_PTR)SIG_DFL) {
3309 0 : if (GC_print_stats == VERBOSE)
3310 0 : GC_log_printf("Replaced other SIGSEGV handler\n");
3311 : }
3312 : # if defined(HPUX) || defined(LINUX) || defined(HURD) \
3313 : || (defined(FREEBSD) && defined(SUNOS5SIGS))
3314 0 : sigaction(SIGBUS, &act, &oldact);
3315 0 : if (oldact.sa_flags & SA_SIGINFO) {
3316 0 : GC_old_bus_handler = oldact.sa_sigaction;
3317 0 : GC_old_bus_handler_used_si = TRUE;
3318 : } else {
3319 0 : GC_old_bus_handler = (SIG_HNDLR_PTR)oldact.sa_handler;
3320 0 : GC_old_bus_handler_used_si = FALSE;
3321 : }
3322 0 : if (GC_old_bus_handler == (SIG_HNDLR_PTR)SIG_IGN) {
3323 0 : if (GC_print_stats)
3324 0 : GC_err_printf("Previously ignored bus error!?\n");
3325 0 : GC_old_bus_handler = (SIG_HNDLR_PTR)SIG_DFL;
3326 : }
3327 0 : if (GC_old_bus_handler != (SIG_HNDLR_PTR)SIG_DFL) {
3328 0 : if (GC_print_stats == VERBOSE)
3329 0 : GC_log_printf("Replaced other SIGBUS handler\n");
3330 : }
3331 : # endif /* HPUX || LINUX || HURD || (FREEBSD && SUNOS5SIGS) */
3332 : # endif /* ! MS windows */
3333 : # if defined(GWW_VDB)
3334 : if (GC_gww_dirty_init())
3335 : return;
3336 : # endif
3337 : # if defined(MSWIN32)
3338 : GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
3339 : if (GC_old_segv_handler != NULL) {
3340 : if (GC_print_stats)
3341 : GC_log_printf("Replaced other UnhandledExceptionFilter\n");
3342 : } else {
3343 : GC_old_segv_handler = SIG_DFL;
3344 : }
3345 : # elif defined(MSWINCE)
3346 : /* MPROTECT_VDB is unsupported for WinCE at present. */
3347 : /* FIXME: implement it (if possible). */
3348 : # endif
3349 0 : }
3350 : #endif /* !DARWIN */
3351 :
3352 0 : GC_API int GC_CALL GC_incremental_protection_needs(void)
3353 : {
3354 0 : if (GC_page_size == HBLKSIZE) {
3355 0 : return GC_PROTECTS_POINTER_HEAP;
3356 : } else {
3357 0 : return GC_PROTECTS_POINTER_HEAP | GC_PROTECTS_PTRFREE_HEAP;
3358 : }
3359 : }
3360 : #define HAVE_INCREMENTAL_PROTECTION_NEEDS
3361 :
3362 : #define IS_PTRFREE(hhdr) ((hhdr)->hb_descr == 0)
3363 : #define PAGE_ALIGNED(x) !((word)(x) & (GC_page_size - 1))
3364 :
3365 0 : STATIC void GC_protect_heap(void)
3366 : {
3367 : ptr_t start;
3368 : size_t len;
3369 : struct hblk * current;
3370 : struct hblk * current_start; /* Start of block to be protected. */
3371 : struct hblk * limit;
3372 : unsigned i;
3373 : GC_bool protect_all =
3374 0 : (0 != (GC_incremental_protection_needs() & GC_PROTECTS_PTRFREE_HEAP));
3375 0 : for (i = 0; i < GC_n_heap_sects; i++) {
3376 0 : start = GC_heap_sects[i].hs_start;
3377 0 : len = GC_heap_sects[i].hs_bytes;
3378 0 : if (protect_all) {
3379 0 : PROTECT(start, len);
3380 : } else {
3381 : GC_ASSERT(PAGE_ALIGNED(len));
3382 : GC_ASSERT(PAGE_ALIGNED(start));
3383 0 : current_start = current = (struct hblk *)start;
3384 0 : limit = (struct hblk *)(start + len);
3385 0 : while (current < limit) {
3386 : hdr * hhdr;
3387 : word nhblks;
3388 : GC_bool is_ptrfree;
3389 :
3390 : GC_ASSERT(PAGE_ALIGNED(current));
3391 0 : GET_HDR(current, hhdr);
3392 0 : if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
3393 : /* This can happen only if we're at the beginning of a */
3394 : /* heap segment, and a block spans heap segments. */
3395 : /* We will handle that block as part of the preceding */
3396 : /* segment. */
3397 : GC_ASSERT(current_start == current);
3398 0 : current_start = ++current;
3399 0 : continue;
3400 : }
3401 0 : if (HBLK_IS_FREE(hhdr)) {
3402 : GC_ASSERT(PAGE_ALIGNED(hhdr -> hb_sz));
3403 0 : nhblks = divHBLKSZ(hhdr -> hb_sz);
3404 0 : is_ptrfree = TRUE; /* dirty on alloc */
3405 : } else {
3406 0 : nhblks = OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
3407 0 : is_ptrfree = IS_PTRFREE(hhdr);
3408 : }
3409 0 : if (is_ptrfree) {
3410 0 : if (current_start < current) {
3411 0 : PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
3412 : }
3413 0 : current_start = (current += nhblks);
3414 : } else {
3415 0 : current += nhblks;
3416 : }
3417 : }
3418 0 : if (current_start < current) {
3419 0 : PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
3420 : }
3421 : }
3422 : }
3423 0 : }
3424 :
3425 : /* We assume that either the world is stopped or its OK to lose dirty */
3426 : /* bits while this is happenning (as in GC_enable_incremental). */
3427 0 : GC_INNER void GC_read_dirty(void)
3428 : {
3429 : # if defined(GWW_VDB)
3430 : if (GC_GWW_AVAILABLE()) {
3431 : GC_gww_read_dirty();
3432 : return;
3433 : }
3434 : # endif
3435 0 : BCOPY((word *)GC_dirty_pages, GC_grungy_pages,
3436 : (sizeof GC_dirty_pages));
3437 0 : BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages));
3438 0 : GC_protect_heap();
3439 0 : }
3440 :
3441 0 : GC_INNER GC_bool GC_page_was_dirty(struct hblk *h)
3442 : {
3443 : register word index;
3444 :
3445 : # if defined(GWW_VDB)
3446 : if (GC_GWW_AVAILABLE())
3447 : return GC_gww_page_was_dirty(h);
3448 : # endif
3449 :
3450 0 : index = PHT_HASH(h);
3451 0 : return(HDR(h) == 0 || get_pht_entry_from_index(GC_grungy_pages, index));
3452 : }
3453 :
3454 : /*
3455 : * Acquiring the allocation lock here is dangerous, since this
3456 : * can be called from within GC_call_with_alloc_lock, and the cord
3457 : * package does so. On systems that allow nested lock acquisition, this
3458 : * happens to work.
3459 : * On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
3460 : */
3461 :
3462 : #if 0
3463 : static GC_bool syscall_acquired_lock = FALSE; /* Protected by GC lock. */
3464 :
3465 : void GC_begin_syscall(void)
3466 : {
3467 : /* FIXME: Resurrecting this code would require fixing the */
3468 : /* test, which can spuriously return TRUE. */
3469 : if (!I_HOLD_LOCK()) {
3470 : LOCK();
3471 : syscall_acquired_lock = TRUE;
3472 : }
3473 : }
3474 :
3475 : void GC_end_syscall(void)
3476 : {
3477 : if (syscall_acquired_lock) {
3478 : syscall_acquired_lock = FALSE;
3479 : UNLOCK();
3480 : }
3481 : }
3482 :
3483 : void GC_unprotect_range(ptr_t addr, word len)
3484 : {
3485 : struct hblk * start_block;
3486 : struct hblk * end_block;
3487 : register struct hblk *h;
3488 : ptr_t obj_start;
3489 :
3490 : if (!GC_dirty_maintained) return;
3491 : obj_start = GC_base(addr);
3492 : if (obj_start == 0) return;
3493 : if (GC_base(addr + len - 1) != obj_start) {
3494 : ABORT("GC_unprotect_range(range bigger than object)");
3495 : }
3496 : start_block = (struct hblk *)((word)addr & ~(GC_page_size - 1));
3497 : end_block = (struct hblk *)((word)(addr + len - 1) & ~(GC_page_size - 1));
3498 : end_block += GC_page_size/HBLKSIZE - 1;
3499 : for (h = start_block; h <= end_block; h++) {
3500 : register word index = PHT_HASH(h);
3501 :
3502 : async_set_pht_entry_from_index(GC_dirty_pages, index);
3503 : }
3504 : UNPROTECT(start_block,
3505 : ((ptr_t)end_block - (ptr_t)start_block) + HBLKSIZE);
3506 : }
3507 :
3508 :
3509 : /* We no longer wrap read by default, since that was causing too many */
3510 : /* problems. It is preferred that the client instead avoids writing */
3511 : /* to the write-protected heap with a system call. */
3512 : /* This still serves as sample code if you do want to wrap system calls.*/
3513 :
3514 : #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(GC_USE_LD_WRAP)
3515 : /* Replacement for UNIX system call. */
3516 : /* Other calls that write to the heap should be handled similarly. */
3517 : /* Note that this doesn't work well for blocking reads: It will hold */
3518 : /* the allocation lock for the entire duration of the call. Multithreaded */
3519 : /* clients should really ensure that it won't block, either by setting */
3520 : /* the descriptor nonblocking, or by calling select or poll first, to */
3521 : /* make sure that input is available. */
3522 : /* Another, preferred alternative is to ensure that system calls never */
3523 : /* write to the protected heap (see above). */
3524 : # include <unistd.h>
3525 : # include <sys/uio.h>
3526 : ssize_t read(int fd, void *buf, size_t nbyte)
3527 : {
3528 : int result;
3529 :
3530 : GC_begin_syscall();
3531 : GC_unprotect_range(buf, (word)nbyte);
3532 : # if defined(IRIX5) || defined(GC_LINUX_THREADS)
3533 : /* Indirect system call may not always be easily available. */
3534 : /* We could call _read, but that would interfere with the */
3535 : /* libpthread interception of read. */
3536 : /* On Linux, we have to be careful with the linuxthreads */
3537 : /* read interception. */
3538 : {
3539 : struct iovec iov;
3540 :
3541 : iov.iov_base = buf;
3542 : iov.iov_len = nbyte;
3543 : result = readv(fd, &iov, 1);
3544 : }
3545 : # else
3546 : # if defined(HURD)
3547 : result = __read(fd, buf, nbyte);
3548 : # else
3549 : /* The two zero args at the end of this list are because one
3550 : IA-64 syscall() implementation actually requires six args
3551 : to be passed, even though they aren't always used. */
3552 : result = syscall(SYS_read, fd, buf, nbyte, 0, 0);
3553 : # endif /* !HURD */
3554 : # endif
3555 : GC_end_syscall();
3556 : return(result);
3557 : }
3558 : #endif /* !MSWIN32 && !MSWINCE && !GC_LINUX_THREADS */
3559 :
3560 : #if defined(GC_USE_LD_WRAP) && !defined(THREADS)
3561 : /* We use the GNU ld call wrapping facility. */
3562 : /* I'm not sure that this actually wraps whatever version of read */
3563 : /* is called by stdio. That code also mentions __read. */
3564 : # include <unistd.h>
3565 : ssize_t __wrap_read(int fd, void *buf, size_t nbyte)
3566 : {
3567 : int result;
3568 :
3569 : GC_begin_syscall();
3570 : GC_unprotect_range(buf, (word)nbyte);
3571 : result = __real_read(fd, buf, nbyte);
3572 : GC_end_syscall();
3573 : return(result);
3574 : }
3575 :
3576 : /* We should probably also do this for __read, or whatever stdio */
3577 : /* actually calls. */
3578 : #endif
3579 : #endif /* 0 */
3580 :
3581 : # ifdef CHECKSUMS
3582 : /*ARGSUSED*/
3583 : GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk *h)
3584 : {
3585 : # if defined(GWW_VDB)
3586 : if (GC_GWW_AVAILABLE())
3587 : return GC_gww_page_was_ever_dirty(h);
3588 : # endif
3589 : return(TRUE);
3590 : }
3591 : # endif /* CHECKSUMS */
3592 :
3593 : #endif /* MPROTECT_VDB */
3594 :
3595 : #ifdef PROC_VDB
3596 : /* See DEFAULT_VDB for interface descriptions. */
3597 :
3598 : /* This implementation assumes a Solaris 2.X like /proc */
3599 : /* pseudo-file-system from which we can read page modified bits. This */
3600 : /* facility is far from optimal (e.g. we would like to get the info for */
3601 : /* only some of the address space), but it avoids intercepting system */
3602 : /* calls. */
3603 :
3604 : # include <errno.h>
3605 : # include <sys/types.h>
3606 : # include <sys/signal.h>
3607 : # include <sys/fault.h>
3608 : # include <sys/syscall.h>
3609 : # include <sys/procfs.h>
3610 : # include <sys/stat.h>
3611 :
3612 : # define INITIAL_BUF_SZ 16384
3613 : STATIC word GC_proc_buf_size = INITIAL_BUF_SZ;
3614 : STATIC char *GC_proc_buf = NULL;
3615 : STATIC int GC_proc_fd = 0;
3616 :
3617 : GC_INNER void GC_dirty_init(void)
3618 : {
3619 : int fd;
3620 : char buf[30];
3621 :
3622 : if (GC_bytes_allocd != 0 || GC_bytes_allocd_before_gc != 0) {
3623 : memset(GC_written_pages, 0xff, sizeof(page_hash_table));
3624 : if (GC_print_stats == VERBOSE)
3625 : GC_log_printf("Allocated bytes:%lu:all pages may have been written\n",
3626 : (unsigned long)(GC_bytes_allocd
3627 : + GC_bytes_allocd_before_gc));
3628 : }
3629 :
3630 : sprintf(buf, "/proc/%ld", (long)getpid());
3631 : fd = open(buf, O_RDONLY);
3632 : if (fd < 0) {
3633 : ABORT("/proc open failed");
3634 : }
3635 : GC_proc_fd = syscall(SYS_ioctl, fd, PIOCOPENPD, 0);
3636 : close(fd);
3637 : syscall(SYS_fcntl, GC_proc_fd, F_SETFD, FD_CLOEXEC);
3638 : if (GC_proc_fd < 0) {
3639 : WARN("/proc ioctl(PIOCOPENPD) failed", 0);
3640 : return;
3641 : }
3642 :
3643 : GC_dirty_maintained = TRUE;
3644 : GC_proc_buf = GC_scratch_alloc(GC_proc_buf_size);
3645 : if (GC_proc_buf == NULL)
3646 : ABORT("Insufficient space for /proc read");
3647 : }
3648 :
3649 : # define READ read
3650 :
3651 : GC_INNER void GC_read_dirty(void)
3652 : {
3653 : int nmaps;
3654 : unsigned long npages;
3655 : unsigned pagesize;
3656 : ptr_t vaddr, limit;
3657 : struct prasmap * map;
3658 : char * bufp;
3659 : int i;
3660 :
3661 : BZERO(GC_grungy_pages, sizeof(GC_grungy_pages));
3662 : bufp = GC_proc_buf;
3663 : if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
3664 : /* Retry with larger buffer. */
3665 : word new_size = 2 * GC_proc_buf_size;
3666 : char *new_buf;
3667 : if (GC_print_stats)
3668 : GC_err_printf("/proc read failed: GC_proc_buf_size = %lu\n",
3669 : (unsigned long)GC_proc_buf_size);
3670 :
3671 : new_buf = GC_scratch_alloc(new_size);
3672 : if (new_buf != 0) {
3673 : GC_proc_buf = bufp = new_buf;
3674 : GC_proc_buf_size = new_size;
3675 : }
3676 : if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
3677 : WARN("Insufficient space for /proc read\n", 0);
3678 : /* Punt: */
3679 : memset(GC_grungy_pages, 0xff, sizeof (page_hash_table));
3680 : memset(GC_written_pages, 0xff, sizeof(page_hash_table));
3681 : return;
3682 : }
3683 : }
3684 :
3685 : /* Copy dirty bits into GC_grungy_pages */
3686 : nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
3687 : # ifdef DEBUG_DIRTY_BITS
3688 : GC_log_printf("Proc VDB read: pr_nmap= %u, pr_npage= %lu\n",
3689 : nmaps, ((struct prpageheader *)bufp)->pr_npage);
3690 :
3691 : # endif
3692 : bufp += sizeof(struct prpageheader);
3693 : for (i = 0; i < nmaps; i++) {
3694 : map = (struct prasmap *)bufp;
3695 : vaddr = (ptr_t)(map -> pr_vaddr);
3696 : npages = map -> pr_npage;
3697 : pagesize = map -> pr_pagesize;
3698 : # ifdef DEBUG_DIRTY_BITS
3699 : GC_log_printf(
3700 : "pr_vaddr= %p, npage= %lu, mflags= 0x%x, pagesize= 0x%x\n",
3701 : vaddr, npages, map->pr_mflags, pagesize);
3702 : # endif
3703 :
3704 : bufp += sizeof(struct prasmap);
3705 : limit = vaddr + pagesize * npages;
3706 : for (; vaddr < limit; vaddr += pagesize) {
3707 : if ((*bufp++) & PG_MODIFIED) {
3708 : register struct hblk * h;
3709 : ptr_t next_vaddr = vaddr + pagesize;
3710 : # ifdef DEBUG_DIRTY_BITS
3711 : GC_log_printf("dirty page at: %p\n", vaddr);
3712 : # endif
3713 : for (h = (struct hblk *)vaddr; (ptr_t)h < next_vaddr; h++) {
3714 : register word index = PHT_HASH(h);
3715 : set_pht_entry_from_index(GC_grungy_pages, index);
3716 : }
3717 : }
3718 : }
3719 : bufp = (char *)(((word)bufp + (sizeof(long)-1)) & ~(sizeof(long)-1));
3720 : }
3721 : # ifdef DEBUG_DIRTY_BITS
3722 : GC_log_printf("Proc VDB read done.\n");
3723 : # endif
3724 :
3725 : /* Update GC_written_pages. */
3726 : GC_or_pages(GC_written_pages, GC_grungy_pages);
3727 : }
3728 :
3729 : # undef READ
3730 : #endif /* PROC_VDB */
3731 :
3732 : #ifdef PCR_VDB
3733 :
3734 : # include "vd/PCR_VD.h"
3735 :
3736 : # define NPAGES (32*1024) /* 128 MB */
3737 :
3738 : PCR_VD_DB GC_grungy_bits[NPAGES];
3739 :
3740 : STATIC ptr_t GC_vd_base = NULL;
3741 : /* Address corresponding to GC_grungy_bits[0] */
3742 : /* HBLKSIZE aligned. */
3743 :
3744 : GC_INNER void GC_dirty_init(void)
3745 : {
3746 : GC_dirty_maintained = TRUE;
3747 : /* For the time being, we assume the heap generally grows up */
3748 : GC_vd_base = GC_heap_sects[0].hs_start;
3749 : if (GC_vd_base == 0) {
3750 : ABORT("Bad initial heap segment");
3751 : }
3752 : if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE)
3753 : != PCR_ERes_okay) {
3754 : ABORT("Dirty bit initialization failed");
3755 : }
3756 : }
3757 :
3758 : GC_INNER void GC_read_dirty(void)
3759 : {
3760 : /* lazily enable dirty bits on newly added heap sects */
3761 : {
3762 : static int onhs = 0;
3763 : int nhs = GC_n_heap_sects;
3764 : for(; onhs < nhs; onhs++) {
3765 : PCR_VD_WriteProtectEnable(
3766 : GC_heap_sects[onhs].hs_start,
3767 : GC_heap_sects[onhs].hs_bytes );
3768 : }
3769 : }
3770 :
3771 : if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits)
3772 : != PCR_ERes_okay) {
3773 : ABORT("Dirty bit read failed");
3774 : }
3775 : }
3776 :
3777 : GC_INNER GC_bool GC_page_was_dirty(struct hblk *h)
3778 : {
3779 : if((ptr_t)h < GC_vd_base || (ptr_t)h >= GC_vd_base + NPAGES*HBLKSIZE) {
3780 : return(TRUE);
3781 : }
3782 : return(GC_grungy_bits[h - (struct hblk *)GC_vd_base] & PCR_VD_DB_dirtyBit);
3783 : }
3784 :
3785 : /*ARGSUSED*/
3786 : GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
3787 : GC_bool is_ptrfree)
3788 : {
3789 : PCR_VD_WriteProtectDisable(h, nblocks*HBLKSIZE);
3790 : PCR_VD_WriteProtectEnable(h, nblocks*HBLKSIZE);
3791 : }
3792 :
3793 : #endif /* PCR_VDB */
3794 :
3795 : #if defined(MPROTECT_VDB) && defined(DARWIN)
3796 : /* The following sources were used as a "reference" for this exception
3797 : handling code:
3798 : 1. Apple's mach/xnu documentation
3799 : 2. Timothy J. Wood's "Mach Exception Handlers 101" post to the
3800 : omnigroup's macosx-dev list.
3801 : www.omnigroup.com/mailman/archive/macosx-dev/2000-June/014178.html
3802 : 3. macosx-nat.c from Apple's GDB source code.
3803 : */
3804 :
3805 : /* The bug that caused all this trouble should now be fixed. This should
3806 : eventually be removed if all goes well. */
3807 :
3808 : /* #define BROKEN_EXCEPTION_HANDLING */
3809 :
3810 : #include <mach/mach.h>
3811 : #include <mach/mach_error.h>
3812 : #include <mach/thread_status.h>
3813 : #include <mach/exception.h>
3814 : #include <mach/task.h>
3815 : #include <pthread.h>
3816 :
3817 : /* These are not defined in any header, although they are documented */
3818 : extern boolean_t
3819 : exc_server(mach_msg_header_t *, mach_msg_header_t *);
3820 :
3821 : extern kern_return_t
3822 : exception_raise(mach_port_t, mach_port_t, mach_port_t, exception_type_t,
3823 : exception_data_t, mach_msg_type_number_t);
3824 :
3825 : extern kern_return_t
3826 : exception_raise_state(mach_port_t, mach_port_t, mach_port_t, exception_type_t,
3827 : exception_data_t, mach_msg_type_number_t,
3828 : thread_state_flavor_t*, thread_state_t,
3829 : mach_msg_type_number_t, thread_state_t,
3830 : mach_msg_type_number_t*);
3831 :
3832 : extern kern_return_t
3833 : exception_raise_state_identity(mach_port_t, mach_port_t, mach_port_t,
3834 : exception_type_t, exception_data_t,
3835 : mach_msg_type_number_t, thread_state_flavor_t*,
3836 : thread_state_t, mach_msg_type_number_t,
3837 : thread_state_t, mach_msg_type_number_t*);
3838 :
3839 : GC_API_OSCALL kern_return_t
3840 : catch_exception_raise(mach_port_t exception_port, mach_port_t thread,
3841 : mach_port_t task, exception_type_t exception,
3842 : exception_data_t code, mach_msg_type_number_t code_count);
3843 :
3844 : /* These should never be called, but just in case... */
3845 : GC_API_OSCALL kern_return_t
3846 : catch_exception_raise_state(mach_port_name_t exception_port, int exception,
3847 : exception_data_t code,
3848 : mach_msg_type_number_t codeCnt, int flavor,
3849 : thread_state_t old_state, int old_stateCnt,
3850 : thread_state_t new_state, int new_stateCnt)
3851 : {
3852 : ABORT("Unexpected catch_exception_raise_state invocation");
3853 : return(KERN_INVALID_ARGUMENT);
3854 : }
3855 :
3856 : GC_API_OSCALL kern_return_t
3857 : catch_exception_raise_state_identity(mach_port_name_t exception_port,
3858 : mach_port_t thread, mach_port_t task,
3859 : int exception, exception_data_t code,
3860 : mach_msg_type_number_t codeCnt, int flavor,
3861 : thread_state_t old_state, int old_stateCnt,
3862 : thread_state_t new_state, int new_stateCnt)
3863 : {
3864 : ABORT("Unexpected catch_exception_raise_state_identity invocation");
3865 : return(KERN_INVALID_ARGUMENT);
3866 : }
3867 :
3868 : #define MAX_EXCEPTION_PORTS 16
3869 :
3870 : static struct {
3871 : mach_msg_type_number_t count;
3872 : exception_mask_t masks[MAX_EXCEPTION_PORTS];
3873 : exception_handler_t ports[MAX_EXCEPTION_PORTS];
3874 : exception_behavior_t behaviors[MAX_EXCEPTION_PORTS];
3875 : thread_state_flavor_t flavors[MAX_EXCEPTION_PORTS];
3876 : } GC_old_exc_ports;
3877 :
3878 : STATIC struct {
3879 : void (*volatile os_callback[3])(void);
3880 : mach_port_t exception;
3881 : # if defined(THREADS)
3882 : mach_port_t reply;
3883 : # endif
3884 : } GC_ports = {
3885 : {
3886 : /* This is to prevent stripping these routines as dead. */
3887 : (void (*)(void))catch_exception_raise,
3888 : (void (*)(void))catch_exception_raise_state,
3889 : (void (*)(void))catch_exception_raise_state_identity
3890 : },
3891 : 0
3892 : };
3893 :
3894 : typedef struct {
3895 : mach_msg_header_t head;
3896 : } GC_msg_t;
3897 :
3898 : typedef enum {
3899 : GC_MP_NORMAL,
3900 : GC_MP_DISCARDING,
3901 : GC_MP_STOPPED
3902 : } GC_mprotect_state_t;
3903 :
3904 : #ifdef THREADS
3905 : /* FIXME: 1 and 2 seem to be safe to use in the msgh_id field, */
3906 : /* but it isn't documented. Use the source and see if they */
3907 : /* should be ok. */
3908 : # define ID_STOP 1
3909 : # define ID_RESUME 2
3910 :
3911 : /* This value is only used on the reply port. */
3912 : # define ID_ACK 3
3913 :
3914 : STATIC GC_mprotect_state_t GC_mprotect_state = 0;
3915 :
3916 : /* The following should ONLY be called when the world is stopped. */
3917 : STATIC void GC_mprotect_thread_notify(mach_msg_id_t id)
3918 : {
3919 : struct {
3920 : GC_msg_t msg;
3921 : mach_msg_trailer_t trailer;
3922 : } buf;
3923 : mach_msg_return_t r;
3924 :
3925 : /* remote, local */
3926 : buf.msg.head.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND, 0);
3927 : buf.msg.head.msgh_size = sizeof(buf.msg);
3928 : buf.msg.head.msgh_remote_port = GC_ports.exception;
3929 : buf.msg.head.msgh_local_port = MACH_PORT_NULL;
3930 : buf.msg.head.msgh_id = id;
3931 :
3932 : r = mach_msg(&buf.msg.head, MACH_SEND_MSG | MACH_RCV_MSG | MACH_RCV_LARGE,
3933 : sizeof(buf.msg), sizeof(buf), GC_ports.reply,
3934 : MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
3935 : if (r != MACH_MSG_SUCCESS)
3936 : ABORT("mach_msg failed in GC_mprotect_thread_notify");
3937 : if (buf.msg.head.msgh_id != ID_ACK)
3938 : ABORT("Invalid ack in GC_mprotect_thread_notify");
3939 : }
3940 :
3941 : /* Should only be called by the mprotect thread */
3942 : STATIC void GC_mprotect_thread_reply(void)
3943 : {
3944 : GC_msg_t msg;
3945 : mach_msg_return_t r;
3946 : /* remote, local */
3947 :
3948 : msg.head.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND, 0);
3949 : msg.head.msgh_size = sizeof(msg);
3950 : msg.head.msgh_remote_port = GC_ports.reply;
3951 : msg.head.msgh_local_port = MACH_PORT_NULL;
3952 : msg.head.msgh_id = ID_ACK;
3953 :
3954 : r = mach_msg(&msg.head, MACH_SEND_MSG, sizeof(msg), 0, MACH_PORT_NULL,
3955 : MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
3956 : if (r != MACH_MSG_SUCCESS)
3957 : ABORT("mach_msg failed in GC_mprotect_thread_reply");
3958 : }
3959 :
3960 : GC_INNER void GC_mprotect_stop(void)
3961 : {
3962 : GC_mprotect_thread_notify(ID_STOP);
3963 : }
3964 :
3965 : GC_INNER void GC_mprotect_resume(void)
3966 : {
3967 : GC_mprotect_thread_notify(ID_RESUME);
3968 : }
3969 :
3970 : # ifndef GC_NO_THREADS_DISCOVERY
3971 : GC_INNER void GC_darwin_register_mach_handler_thread(mach_port_t thread);
3972 : # endif
3973 :
3974 : #else
3975 : /* The compiler should optimize away any GC_mprotect_state computations */
3976 : # define GC_mprotect_state GC_MP_NORMAL
3977 : #endif /* !THREADS */
3978 :
3979 : STATIC void *GC_mprotect_thread(void *arg)
3980 : {
3981 : mach_msg_return_t r;
3982 : /* These two structures contain some private kernel data. We don't */
3983 : /* need to access any of it so we don't bother defining a proper */
3984 : /* struct. The correct definitions are in the xnu source code. */
3985 : struct {
3986 : mach_msg_header_t head;
3987 : char data[256];
3988 : } reply;
3989 : struct {
3990 : mach_msg_header_t head;
3991 : mach_msg_body_t msgh_body;
3992 : char data[1024];
3993 : } msg;
3994 : mach_msg_id_t id;
3995 :
3996 : # if defined(THREADS) && !defined(GC_NO_THREADS_DISCOVERY)
3997 : GC_darwin_register_mach_handler_thread(mach_thread_self());
3998 : # endif
3999 :
4000 : for(;;) {
4001 : r = mach_msg(&msg.head, MACH_RCV_MSG | MACH_RCV_LARGE |
4002 : (GC_mprotect_state == GC_MP_DISCARDING ? MACH_RCV_TIMEOUT : 0),
4003 : 0, sizeof(msg), GC_ports.exception,
4004 : GC_mprotect_state == GC_MP_DISCARDING ? 0
4005 : : MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
4006 : id = r == MACH_MSG_SUCCESS ? msg.head.msgh_id : -1;
4007 :
4008 : # if defined(THREADS)
4009 : if(GC_mprotect_state == GC_MP_DISCARDING) {
4010 : if(r == MACH_RCV_TIMED_OUT) {
4011 : GC_mprotect_state = GC_MP_STOPPED;
4012 : GC_mprotect_thread_reply();
4013 : continue;
4014 : }
4015 : if(r == MACH_MSG_SUCCESS && (id == ID_STOP || id == ID_RESUME))
4016 : ABORT("Out of order mprotect thread request");
4017 : }
4018 : # endif /* THREADS */
4019 :
4020 : if (r != MACH_MSG_SUCCESS) {
4021 : if (GC_print_stats)
4022 : GC_log_printf("mach_msg failed with code %d: %s\n", (int)r,
4023 : mach_error_string(r));
4024 : ABORT("mach_msg failed");
4025 : }
4026 :
4027 : switch(id) {
4028 : # if defined(THREADS)
4029 : case ID_STOP:
4030 : if(GC_mprotect_state != GC_MP_NORMAL)
4031 : ABORT("Called mprotect_stop when state wasn't normal");
4032 : GC_mprotect_state = GC_MP_DISCARDING;
4033 : break;
4034 : case ID_RESUME:
4035 : if(GC_mprotect_state != GC_MP_STOPPED)
4036 : ABORT("Called mprotect_resume when state wasn't stopped");
4037 : GC_mprotect_state = GC_MP_NORMAL;
4038 : GC_mprotect_thread_reply();
4039 : break;
4040 : # endif /* THREADS */
4041 : default:
4042 : /* Handle the message (calls catch_exception_raise) */
4043 : if(!exc_server(&msg.head, &reply.head))
4044 : ABORT("exc_server failed");
4045 : /* Send the reply */
4046 : r = mach_msg(&reply.head, MACH_SEND_MSG, reply.head.msgh_size, 0,
4047 : MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE,
4048 : MACH_PORT_NULL);
4049 : if(r != MACH_MSG_SUCCESS) {
4050 : /* This will fail if the thread dies, but the thread */
4051 : /* shouldn't die... */
4052 : # ifdef BROKEN_EXCEPTION_HANDLING
4053 : GC_err_printf("mach_msg failed with %d %s while sending "
4054 : "exc reply\n", (int)r, mach_error_string(r));
4055 : # else
4056 : ABORT("mach_msg failed while sending exception reply");
4057 : # endif
4058 : }
4059 : } /* switch */
4060 : } /* for(;;) */
4061 : /* NOT REACHED */
4062 : return NULL;
4063 : }
4064 :
4065 : /* All this SIGBUS code shouldn't be necessary. All protection faults should
4066 : be going through the mach exception handler. However, it seems a SIGBUS is
4067 : occasionally sent for some unknown reason. Even more odd, it seems to be
4068 : meaningless and safe to ignore. */
4069 : #ifdef BROKEN_EXCEPTION_HANDLING
4070 :
4071 : /* Updates to this aren't atomic, but the SIGBUS'es seem pretty rare. */
4072 : /* Even if this doesn't get updated property, it isn't really a problem. */
4073 : STATIC int GC_sigbus_count = 0;
4074 :
4075 : STATIC void GC_darwin_sigbus(int num, siginfo_t *sip, void *context)
4076 : {
4077 : if (num != SIGBUS)
4078 : ABORT("Got a non-sigbus signal in the sigbus handler");
4079 :
4080 : /* Ugh... some seem safe to ignore, but too many in a row probably means
4081 : trouble. GC_sigbus_count is reset for each mach exception that is
4082 : handled */
4083 : if (GC_sigbus_count >= 8) {
4084 : ABORT("Got more than 8 SIGBUSs in a row!");
4085 : } else {
4086 : GC_sigbus_count++;
4087 : WARN("Ignoring SIGBUS.\n", 0);
4088 : }
4089 : }
4090 : #endif /* BROKEN_EXCEPTION_HANDLING */
4091 :
4092 : GC_INNER void GC_dirty_init(void)
4093 : {
4094 : kern_return_t r;
4095 : mach_port_t me;
4096 : pthread_t thread;
4097 : pthread_attr_t attr;
4098 : exception_mask_t mask;
4099 :
4100 : # ifdef CAN_HANDLE_FORK
4101 : if (GC_handle_fork) {
4102 : /* To both support GC incremental mode and GC functions usage in */
4103 : /* the forked child, pthread_atfork should be used to install */
4104 : /* handlers that switch off GC_dirty_maintained in the child */
4105 : /* gracefully (unprotecting all pages and clearing */
4106 : /* GC_mach_handler_thread). For now, we just disable incremental */
4107 : /* mode if fork() handling is requested by the client. */
4108 : if (GC_print_stats)
4109 : GC_log_printf(
4110 : "GC incremental mode disabled since fork() handling requested\n");
4111 : return;
4112 : }
4113 : # endif
4114 :
4115 : if (GC_print_stats == VERBOSE)
4116 : GC_log_printf(
4117 : "Initializing mach/darwin mprotect virtual dirty bit implementation\n");
4118 : # ifdef BROKEN_EXCEPTION_HANDLING
4119 : WARN("Enabling workarounds for various darwin "
4120 : "exception handling bugs.\n", 0);
4121 : # endif
4122 : GC_dirty_maintained = TRUE;
4123 : if (GC_page_size % HBLKSIZE != 0) {
4124 : ABORT("Page size not multiple of HBLKSIZE");
4125 : }
4126 :
4127 : GC_task_self = me = mach_task_self();
4128 :
4129 : r = mach_port_allocate(me, MACH_PORT_RIGHT_RECEIVE, &GC_ports.exception);
4130 : if (r != KERN_SUCCESS)
4131 : ABORT("mach_port_allocate failed (exception port)");
4132 :
4133 : r = mach_port_insert_right(me, GC_ports.exception, GC_ports.exception,
4134 : MACH_MSG_TYPE_MAKE_SEND);
4135 : if (r != KERN_SUCCESS)
4136 : ABORT("mach_port_insert_right failed (exception port)");
4137 :
4138 : # if defined(THREADS)
4139 : r = mach_port_allocate(me, MACH_PORT_RIGHT_RECEIVE, &GC_ports.reply);
4140 : if(r != KERN_SUCCESS)
4141 : ABORT("mach_port_allocate failed (reply port)");
4142 : # endif
4143 :
4144 : /* The exceptions we want to catch */
4145 : mask = EXC_MASK_BAD_ACCESS;
4146 :
4147 : r = task_get_exception_ports(me, mask, GC_old_exc_ports.masks,
4148 : &GC_old_exc_ports.count, GC_old_exc_ports.ports,
4149 : GC_old_exc_ports.behaviors,
4150 : GC_old_exc_ports.flavors);
4151 : if (r != KERN_SUCCESS)
4152 : ABORT("task_get_exception_ports failed");
4153 :
4154 : r = task_set_exception_ports(me, mask, GC_ports.exception, EXCEPTION_DEFAULT,
4155 : GC_MACH_THREAD_STATE);
4156 : if (r != KERN_SUCCESS)
4157 : ABORT("task_set_exception_ports failed");
4158 : if (pthread_attr_init(&attr) != 0)
4159 : ABORT("pthread_attr_init failed");
4160 : if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) != 0)
4161 : ABORT("pthread_attr_setdetachedstate failed");
4162 :
4163 : # undef pthread_create
4164 : /* This will call the real pthread function, not our wrapper */
4165 : if (pthread_create(&thread, &attr, GC_mprotect_thread, NULL) != 0)
4166 : ABORT("pthread_create failed");
4167 : pthread_attr_destroy(&attr);
4168 :
4169 : /* Setup the sigbus handler for ignoring the meaningless SIGBUSs */
4170 : # ifdef BROKEN_EXCEPTION_HANDLING
4171 : {
4172 : struct sigaction sa, oldsa;
4173 : sa.sa_handler = (SIG_HNDLR_PTR)GC_darwin_sigbus;
4174 : sigemptyset(&sa.sa_mask);
4175 : sa.sa_flags = SA_RESTART|SA_SIGINFO;
4176 : if (sigaction(SIGBUS, &sa, &oldsa) < 0)
4177 : ABORT("sigaction failed");
4178 : if ((SIG_HNDLR_PTR)oldsa.sa_handler != SIG_DFL) {
4179 : if (GC_print_stats == VERBOSE)
4180 : GC_err_printf("Replaced other SIGBUS handler\n");
4181 : }
4182 : }
4183 : # endif /* BROKEN_EXCEPTION_HANDLING */
4184 : }
4185 :
4186 : /* The source code for Apple's GDB was used as a reference for the */
4187 : /* exception forwarding code. This code is similar to be GDB code only */
4188 : /* because there is only one way to do it. */
4189 : STATIC kern_return_t GC_forward_exception(mach_port_t thread, mach_port_t task,
4190 : exception_type_t exception,
4191 : exception_data_t data,
4192 : mach_msg_type_number_t data_count)
4193 : {
4194 : unsigned int i;
4195 : kern_return_t r;
4196 : mach_port_t port;
4197 : exception_behavior_t behavior;
4198 : thread_state_flavor_t flavor;
4199 :
4200 : thread_state_data_t thread_state;
4201 : mach_msg_type_number_t thread_state_count = THREAD_STATE_MAX;
4202 :
4203 : for (i=0; i < GC_old_exc_ports.count; i++)
4204 : if (GC_old_exc_ports.masks[i] & (1 << exception))
4205 : break;
4206 : if (i == GC_old_exc_ports.count)
4207 : ABORT("No handler for exception!");
4208 :
4209 : port = GC_old_exc_ports.ports[i];
4210 : behavior = GC_old_exc_ports.behaviors[i];
4211 : flavor = GC_old_exc_ports.flavors[i];
4212 :
4213 : if (behavior == EXCEPTION_STATE || behavior == EXCEPTION_STATE_IDENTITY) {
4214 : r = thread_get_state(thread, flavor, thread_state, &thread_state_count);
4215 : if(r != KERN_SUCCESS)
4216 : ABORT("thread_get_state failed in forward_exception");
4217 : }
4218 :
4219 : switch(behavior) {
4220 : case EXCEPTION_STATE:
4221 : r = exception_raise_state(port, thread, task, exception, data, data_count,
4222 : &flavor, thread_state, thread_state_count,
4223 : thread_state, &thread_state_count);
4224 : break;
4225 : case EXCEPTION_STATE_IDENTITY:
4226 : r = exception_raise_state_identity(port, thread, task, exception, data,
4227 : data_count, &flavor, thread_state,
4228 : thread_state_count, thread_state,
4229 : &thread_state_count);
4230 : break;
4231 : /* case EXCEPTION_DEFAULT: */ /* default signal handlers */
4232 : default: /* user-supplied signal handlers */
4233 : r = exception_raise(port, thread, task, exception, data, data_count);
4234 : }
4235 :
4236 : if (behavior == EXCEPTION_STATE || behavior == EXCEPTION_STATE_IDENTITY) {
4237 : r = thread_set_state(thread, flavor, thread_state, thread_state_count);
4238 : if (r != KERN_SUCCESS)
4239 : ABORT("thread_set_state failed in forward_exception");
4240 : }
4241 : return r;
4242 : }
4243 :
4244 : #define FWD() GC_forward_exception(thread, task, exception, code, code_count)
4245 :
4246 : #ifdef ARM32
4247 : # define DARWIN_EXC_STATE ARM_EXCEPTION_STATE
4248 : # define DARWIN_EXC_STATE_COUNT ARM_EXCEPTION_STATE_COUNT
4249 : # define DARWIN_EXC_STATE_T arm_exception_state_t
4250 : # define DARWIN_EXC_STATE_DAR THREAD_FLD(far)
4251 : #elif defined(POWERPC)
4252 : # if CPP_WORDSZ == 32
4253 : # define DARWIN_EXC_STATE PPC_EXCEPTION_STATE
4254 : # define DARWIN_EXC_STATE_COUNT PPC_EXCEPTION_STATE_COUNT
4255 : # define DARWIN_EXC_STATE_T ppc_exception_state_t
4256 : # else
4257 : # define DARWIN_EXC_STATE PPC_EXCEPTION_STATE64
4258 : # define DARWIN_EXC_STATE_COUNT PPC_EXCEPTION_STATE64_COUNT
4259 : # define DARWIN_EXC_STATE_T ppc_exception_state64_t
4260 : # endif
4261 : # define DARWIN_EXC_STATE_DAR THREAD_FLD(dar)
4262 : #elif defined(I386) || defined(X86_64)
4263 : # if CPP_WORDSZ == 32
4264 : # define DARWIN_EXC_STATE x86_EXCEPTION_STATE32
4265 : # define DARWIN_EXC_STATE_COUNT x86_EXCEPTION_STATE32_COUNT
4266 : # define DARWIN_EXC_STATE_T x86_exception_state32_t
4267 : # else
4268 : # define DARWIN_EXC_STATE x86_EXCEPTION_STATE64
4269 : # define DARWIN_EXC_STATE_COUNT x86_EXCEPTION_STATE64_COUNT
4270 : # define DARWIN_EXC_STATE_T x86_exception_state64_t
4271 : # endif
4272 : # define DARWIN_EXC_STATE_DAR THREAD_FLD(faultvaddr)
4273 : #else
4274 : # error FIXME for non-arm/ppc/x86 darwin
4275 : #endif
4276 :
4277 : /* This violates the namespace rules but there isn't anything that can */
4278 : /* be done about it. The exception handling stuff is hard coded to */
4279 : /* call this. catch_exception_raise, catch_exception_raise_state and */
4280 : /* and catch_exception_raise_state_identity are called from OS. */
4281 : GC_API_OSCALL kern_return_t
4282 : catch_exception_raise(mach_port_t exception_port, mach_port_t thread,
4283 : mach_port_t task, exception_type_t exception,
4284 : exception_data_t code, mach_msg_type_number_t code_count)
4285 : {
4286 : kern_return_t r;
4287 : char *addr;
4288 : struct hblk *h;
4289 : unsigned int i;
4290 : thread_state_flavor_t flavor = DARWIN_EXC_STATE;
4291 : mach_msg_type_number_t exc_state_count = DARWIN_EXC_STATE_COUNT;
4292 : DARWIN_EXC_STATE_T exc_state;
4293 :
4294 : if (exception != EXC_BAD_ACCESS || code[0] != KERN_PROTECTION_FAILURE) {
4295 : # ifdef DEBUG_EXCEPTION_HANDLING
4296 : /* We aren't interested, pass it on to the old handler */
4297 : GC_log_printf("Exception: 0x%x Code: 0x%x 0x%x in catch...\n",
4298 : exception, code_count > 0 ? code[0] : -1,
4299 : code_count > 1 ? code[1] : -1);
4300 : # endif
4301 : return FWD();
4302 : }
4303 :
4304 : r = thread_get_state(thread, flavor, (natural_t*)&exc_state,
4305 : &exc_state_count);
4306 : if(r != KERN_SUCCESS) {
4307 : /* The thread is supposed to be suspended while the exception */
4308 : /* handler is called. This shouldn't fail. */
4309 : # ifdef BROKEN_EXCEPTION_HANDLING
4310 : GC_err_printf("thread_get_state failed in catch_exception_raise\n");
4311 : return KERN_SUCCESS;
4312 : # else
4313 : ABORT("thread_get_state failed in catch_exception_raise");
4314 : # endif
4315 : }
4316 :
4317 : /* This is the address that caused the fault */
4318 : addr = (char*) exc_state.DARWIN_EXC_STATE_DAR;
4319 : if (HDR(addr) == 0) {
4320 : /* Ugh... just like the SIGBUS problem above, it seems we get */
4321 : /* a bogus KERN_PROTECTION_FAILURE every once and a while. We wait */
4322 : /* till we get a bunch in a row before doing anything about it. */
4323 : /* If a "real" fault ever occurs it'll just keep faulting over and */
4324 : /* over and we'll hit the limit pretty quickly. */
4325 : # ifdef BROKEN_EXCEPTION_HANDLING
4326 : static char *last_fault;
4327 : static int last_fault_count;
4328 :
4329 : if(addr != last_fault) {
4330 : last_fault = addr;
4331 : last_fault_count = 0;
4332 : }
4333 : if(++last_fault_count < 32) {
4334 : if(last_fault_count == 1)
4335 : WARN("Ignoring KERN_PROTECTION_FAILURE at %p\n", addr);
4336 : return KERN_SUCCESS;
4337 : }
4338 :
4339 : GC_err_printf(
4340 : "Unexpected KERN_PROTECTION_FAILURE at %p; aborting...\n", addr);
4341 : /* Can't pass it along to the signal handler because that is */
4342 : /* ignoring SIGBUS signals. We also shouldn't call ABORT here as */
4343 : /* signals don't always work too well from the exception handler. */
4344 : exit(EXIT_FAILURE);
4345 : # else /* BROKEN_EXCEPTION_HANDLING */
4346 : /* Pass it along to the next exception handler
4347 : (which should call SIGBUS/SIGSEGV) */
4348 : return FWD();
4349 : # endif /* !BROKEN_EXCEPTION_HANDLING */
4350 : }
4351 :
4352 : # ifdef BROKEN_EXCEPTION_HANDLING
4353 : /* Reset the number of consecutive SIGBUSs */
4354 : GC_sigbus_count = 0;
4355 : # endif
4356 :
4357 : if (GC_mprotect_state == GC_MP_NORMAL) { /* common case */
4358 : h = (struct hblk*)((word)addr & ~(GC_page_size-1));
4359 : UNPROTECT(h, GC_page_size);
4360 : for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
4361 : register int index = PHT_HASH(h+i);
4362 : async_set_pht_entry_from_index(GC_dirty_pages, index);
4363 : }
4364 : } else if (GC_mprotect_state == GC_MP_DISCARDING) {
4365 : /* Lie to the thread for now. No sense UNPROTECT()ing the memory
4366 : when we're just going to PROTECT() it again later. The thread
4367 : will just fault again once it resumes */
4368 : } else {
4369 : /* Shouldn't happen, i don't think */
4370 : GC_err_printf("KERN_PROTECTION_FAILURE while world is stopped\n");
4371 : return FWD();
4372 : }
4373 : return KERN_SUCCESS;
4374 : }
4375 : #undef FWD
4376 :
4377 : #ifndef NO_DESC_CATCH_EXCEPTION_RAISE
4378 : /* These symbols should have REFERENCED_DYNAMICALLY (0x10) bit set to */
4379 : /* let strip know they are not to be stripped. */
4380 : __asm__(".desc _catch_exception_raise, 0x10");
4381 : __asm__(".desc _catch_exception_raise_state, 0x10");
4382 : __asm__(".desc _catch_exception_raise_state_identity, 0x10");
4383 : #endif
4384 :
4385 : #endif /* DARWIN && MPROTECT_VDB */
4386 :
4387 : #ifndef HAVE_INCREMENTAL_PROTECTION_NEEDS
4388 : GC_API int GC_CALL GC_incremental_protection_needs(void)
4389 : {
4390 : return GC_PROTECTS_NONE;
4391 : }
4392 : #endif /* !HAVE_INCREMENTAL_PROTECTION_NEEDS */
4393 :
4394 : #ifdef ECOS
4395 : /* Undo sbrk() redirection. */
4396 : # undef sbrk
4397 : #endif
4398 :
4399 : /* If value is non-zero then allocate executable memory. */
4400 0 : GC_API void GC_CALL GC_set_pages_executable(int value)
4401 : {
4402 : GC_ASSERT(!GC_is_initialized);
4403 : /* Even if IGNORE_PAGES_EXECUTABLE is defined, GC_pages_executable is */
4404 : /* touched here to prevent a compiler warning. */
4405 0 : GC_pages_executable = (GC_bool)(value != 0);
4406 0 : }
4407 :
4408 : /* Returns non-zero if the GC-allocated memory is executable. */
4409 : /* GC_get_pages_executable is defined after all the places */
4410 : /* where GC_get_pages_executable is undefined. */
4411 0 : GC_API int GC_CALL GC_get_pages_executable(void)
4412 : {
4413 : # ifdef IGNORE_PAGES_EXECUTABLE
4414 : return 1; /* Always allocate executable memory. */
4415 : # else
4416 0 : return (int)GC_pages_executable;
4417 : # endif
4418 : }
4419 :
4420 : /* Call stack save code for debugging. Should probably be in */
4421 : /* mach_dep.c, but that requires reorganization. */
4422 :
4423 : /* I suspect the following works for most X86 *nix variants, so */
4424 : /* long as the frame pointer is explicitly stored. In the case of gcc, */
4425 : /* compiler flags (e.g. -fomit-frame-pointer) determine whether it is. */
4426 : #if defined(I386) && defined(LINUX) && defined(SAVE_CALL_CHAIN)
4427 : # include <features.h>
4428 :
4429 : struct frame {
4430 : struct frame *fr_savfp;
4431 : long fr_savpc;
4432 : long fr_arg[NARGS]; /* All the arguments go here. */
4433 : };
4434 : #endif
4435 :
4436 : #if defined(SPARC)
4437 : # if defined(LINUX)
4438 : # include <features.h>
4439 :
4440 : struct frame {
4441 : long fr_local[8];
4442 : long fr_arg[6];
4443 : struct frame *fr_savfp;
4444 : long fr_savpc;
4445 : # ifndef __arch64__
4446 : char *fr_stret;
4447 : # endif
4448 : long fr_argd[6];
4449 : long fr_argx[0];
4450 : };
4451 : # elif defined (DRSNX)
4452 : # include <sys/sparc/frame.h>
4453 : # elif defined(OPENBSD)
4454 : # include <frame.h>
4455 : # elif defined(FREEBSD) || defined(NETBSD)
4456 : # include <machine/frame.h>
4457 : # else
4458 : # include <sys/frame.h>
4459 : # endif
4460 : # if NARGS > 6
4461 : # error We only know how to get the first 6 arguments
4462 : # endif
4463 : #endif /* SPARC */
4464 :
4465 : #ifdef NEED_CALLINFO
4466 : /* Fill in the pc and argument information for up to NFRAMES of my */
4467 : /* callers. Ignore my frame and my callers frame. */
4468 :
4469 : #ifdef LINUX
4470 : # include <unistd.h>
4471 : #endif
4472 :
4473 : #endif /* NEED_CALLINFO */
4474 :
4475 : #if defined(GC_HAVE_BUILTIN_BACKTRACE)
4476 : # ifdef _MSC_VER
4477 : # include "private/msvc_dbg.h"
4478 : # else
4479 : # include <execinfo.h>
4480 : # endif
4481 : #endif
4482 :
4483 : #ifdef SAVE_CALL_CHAIN
4484 :
4485 : #if NARGS == 0 && NFRAMES % 2 == 0 /* No padding */ \
4486 : && defined(GC_HAVE_BUILTIN_BACKTRACE)
4487 :
4488 : #ifdef REDIRECT_MALLOC
4489 : /* Deal with possible malloc calls in backtrace by omitting */
4490 : /* the infinitely recursing backtrace. */
4491 : # ifdef THREADS
4492 : __thread /* If your compiler doesn't understand this */
4493 : /* you could use something like pthread_getspecific. */
4494 : # endif
4495 : GC_in_save_callers = FALSE;
4496 : #endif
4497 :
4498 : GC_INNER void GC_save_callers(struct callinfo info[NFRAMES])
4499 : {
4500 : void * tmp_info[NFRAMES + 1];
4501 : int npcs, i;
4502 : # define IGNORE_FRAMES 1
4503 :
4504 : /* We retrieve NFRAMES+1 pc values, but discard the first, since it */
4505 : /* points to our own frame. */
4506 : # ifdef REDIRECT_MALLOC
4507 : if (GC_in_save_callers) {
4508 : info[0].ci_pc = (word)(&GC_save_callers);
4509 : for (i = 1; i < NFRAMES; ++i) info[i].ci_pc = 0;
4510 : return;
4511 : }
4512 : GC_in_save_callers = TRUE;
4513 : # endif
4514 : GC_STATIC_ASSERT(sizeof(struct callinfo) == sizeof(void *));
4515 : npcs = backtrace((void **)tmp_info, NFRAMES + IGNORE_FRAMES);
4516 : BCOPY(tmp_info+IGNORE_FRAMES, info, (npcs - IGNORE_FRAMES) * sizeof(void *));
4517 : for (i = npcs - IGNORE_FRAMES; i < NFRAMES; ++i) info[i].ci_pc = 0;
4518 : # ifdef REDIRECT_MALLOC
4519 : GC_in_save_callers = FALSE;
4520 : # endif
4521 : }
4522 :
4523 : #else /* No builtin backtrace; do it ourselves */
4524 :
4525 : #if (defined(OPENBSD) || defined(NETBSD) || defined(FREEBSD)) && defined(SPARC)
4526 : # define FR_SAVFP fr_fp
4527 : # define FR_SAVPC fr_pc
4528 : #else
4529 : # define FR_SAVFP fr_savfp
4530 : # define FR_SAVPC fr_savpc
4531 : #endif
4532 :
4533 : #if defined(SPARC) && (defined(__arch64__) || defined(__sparcv9))
4534 : # define BIAS 2047
4535 : #else
4536 : # define BIAS 0
4537 : #endif
4538 :
4539 : GC_INNER void GC_save_callers(struct callinfo info[NFRAMES])
4540 : {
4541 : struct frame *frame;
4542 : struct frame *fp;
4543 : int nframes = 0;
4544 : # ifdef I386
4545 : /* We assume this is turned on only with gcc as the compiler. */
4546 : asm("movl %%ebp,%0" : "=r"(frame));
4547 : fp = frame;
4548 : # else
4549 : frame = (struct frame *)GC_save_regs_in_stack();
4550 : fp = (struct frame *)((long) frame -> FR_SAVFP + BIAS);
4551 : #endif
4552 :
4553 : for (; (!(fp HOTTER_THAN frame) && !(GC_stackbottom HOTTER_THAN (ptr_t)fp)
4554 : && (nframes < NFRAMES));
4555 : fp = (struct frame *)((long) fp -> FR_SAVFP + BIAS), nframes++) {
4556 : register int i;
4557 :
4558 : info[nframes].ci_pc = fp->FR_SAVPC;
4559 : # if NARGS > 0
4560 : for (i = 0; i < NARGS; i++) {
4561 : info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
4562 : }
4563 : # endif /* NARGS > 0 */
4564 : }
4565 : if (nframes < NFRAMES) info[nframes].ci_pc = 0;
4566 : }
4567 :
4568 : #endif /* No builtin backtrace */
4569 :
4570 : #endif /* SAVE_CALL_CHAIN */
4571 :
4572 : #ifdef NEED_CALLINFO
4573 :
4574 : /* Print info to stderr. We do NOT hold the allocation lock */
4575 : GC_INNER void GC_print_callers(struct callinfo info[NFRAMES])
4576 : {
4577 : int i;
4578 : static int reentry_count = 0;
4579 : GC_bool stop = FALSE;
4580 : DCL_LOCK_STATE;
4581 :
4582 : /* FIXME: This should probably use a different lock, so that we */
4583 : /* become callable with or without the allocation lock. */
4584 : LOCK();
4585 : ++reentry_count;
4586 : UNLOCK();
4587 :
4588 : # if NFRAMES == 1
4589 : GC_err_printf("\tCaller at allocation:\n");
4590 : # else
4591 : GC_err_printf("\tCall chain at allocation:\n");
4592 : # endif
4593 : for (i = 0; i < NFRAMES && !stop; i++) {
4594 : if (info[i].ci_pc == 0) break;
4595 : # if NARGS > 0
4596 : {
4597 : int j;
4598 :
4599 : GC_err_printf("\t\targs: ");
4600 : for (j = 0; j < NARGS; j++) {
4601 : if (j != 0) GC_err_printf(", ");
4602 : GC_err_printf("%d (0x%X)", ~(info[i].ci_arg[j]),
4603 : ~(info[i].ci_arg[j]));
4604 : }
4605 : GC_err_printf("\n");
4606 : }
4607 : # endif
4608 : if (reentry_count > 1) {
4609 : /* We were called during an allocation during */
4610 : /* a previous GC_print_callers call; punt. */
4611 : GC_err_printf("\t\t##PC##= 0x%lx\n", info[i].ci_pc);
4612 : continue;
4613 : }
4614 : {
4615 : # ifdef LINUX
4616 : FILE *pipe;
4617 : # endif
4618 : # if defined(GC_HAVE_BUILTIN_BACKTRACE) \
4619 : && !defined(GC_BACKTRACE_SYMBOLS_BROKEN)
4620 : char **sym_name =
4621 : backtrace_symbols((void **)(&(info[i].ci_pc)), 1);
4622 : char *name = sym_name[0];
4623 : # else
4624 : char buf[40];
4625 : char *name = buf;
4626 : sprintf(buf, "##PC##= 0x%lx", info[i].ci_pc);
4627 : # endif
4628 : # if defined(LINUX) && !defined(SMALL_CONFIG)
4629 : /* Try for a line number. */
4630 : {
4631 : # define EXE_SZ 100
4632 : static char exe_name[EXE_SZ];
4633 : # define CMD_SZ 200
4634 : char cmd_buf[CMD_SZ];
4635 : # define RESULT_SZ 200
4636 : static char result_buf[RESULT_SZ];
4637 : size_t result_len;
4638 : char *old_preload;
4639 : # define PRELOAD_SZ 200
4640 : char preload_buf[PRELOAD_SZ];
4641 : static GC_bool found_exe_name = FALSE;
4642 : static GC_bool will_fail = FALSE;
4643 : int ret_code;
4644 : /* Try to get it via a hairy and expensive scheme. */
4645 : /* First we get the name of the executable: */
4646 : if (will_fail) goto out;
4647 : if (!found_exe_name) {
4648 : ret_code = readlink("/proc/self/exe", exe_name, EXE_SZ);
4649 : if (ret_code < 0 || ret_code >= EXE_SZ
4650 : || exe_name[0] != '/') {
4651 : will_fail = TRUE; /* Don't try again. */
4652 : goto out;
4653 : }
4654 : exe_name[ret_code] = '\0';
4655 : found_exe_name = TRUE;
4656 : }
4657 : /* Then we use popen to start addr2line -e <exe> <addr> */
4658 : /* There are faster ways to do this, but hopefully this */
4659 : /* isn't time critical. */
4660 : sprintf(cmd_buf, "/usr/bin/addr2line -f -e %s 0x%lx", exe_name,
4661 : (unsigned long)info[i].ci_pc);
4662 : old_preload = GETENV("LD_PRELOAD");
4663 : if (0 != old_preload) {
4664 : if (strlen (old_preload) >= PRELOAD_SZ) {
4665 : will_fail = TRUE;
4666 : goto out;
4667 : }
4668 : strcpy (preload_buf, old_preload);
4669 : unsetenv ("LD_PRELOAD");
4670 : }
4671 : pipe = popen(cmd_buf, "r");
4672 : if (0 != old_preload
4673 : && 0 != setenv ("LD_PRELOAD", preload_buf, 0)) {
4674 : WARN("Failed to reset LD_PRELOAD\n", 0);
4675 : }
4676 : if (pipe == NULL
4677 : || (result_len = fread(result_buf, 1, RESULT_SZ - 1, pipe))
4678 : == 0) {
4679 : if (pipe != NULL) pclose(pipe);
4680 : will_fail = TRUE;
4681 : goto out;
4682 : }
4683 : if (result_buf[result_len - 1] == '\n') --result_len;
4684 : result_buf[result_len] = 0;
4685 : if (result_buf[0] == '?'
4686 : || (result_buf[result_len-2] == ':'
4687 : && result_buf[result_len-1] == '0')) {
4688 : pclose(pipe);
4689 : goto out;
4690 : }
4691 : /* Get rid of embedded newline, if any. Test for "main" */
4692 : {
4693 : char * nl = strchr(result_buf, '\n');
4694 : if (nl != NULL && nl < result_buf + result_len) {
4695 : *nl = ':';
4696 : }
4697 : if (strncmp(result_buf, "main", nl - result_buf) == 0) {
4698 : stop = TRUE;
4699 : }
4700 : }
4701 : if (result_len < RESULT_SZ - 25) {
4702 : /* Add in hex address */
4703 : sprintf(result_buf + result_len, " [0x%lx]",
4704 : (unsigned long)info[i].ci_pc);
4705 : }
4706 : name = result_buf;
4707 : pclose(pipe);
4708 : out:;
4709 : }
4710 : # endif /* LINUX */
4711 : GC_err_printf("\t\t%s\n", name);
4712 : # if defined(GC_HAVE_BUILTIN_BACKTRACE) \
4713 : && !defined(GC_BACKTRACE_SYMBOLS_BROKEN)
4714 : free(sym_name); /* May call GC_free; that's OK */
4715 : # endif
4716 : }
4717 : }
4718 : LOCK();
4719 : --reentry_count;
4720 : UNLOCK();
4721 : }
4722 :
4723 : #endif /* NEED_CALLINFO */
4724 :
4725 : #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
4726 : /* Dump /proc/self/maps to GC_stderr, to enable looking up names for */
4727 : /* addresses in FIND_LEAK output. */
4728 0 : void GC_print_address_map(void)
4729 : {
4730 : char *maps;
4731 :
4732 0 : GC_err_printf("---------- Begin address map ----------\n");
4733 0 : maps = GC_get_maps();
4734 0 : GC_err_puts(maps != NULL ? maps : "Failed to get map!\n");
4735 0 : GC_err_printf("---------- End address map ----------\n");
4736 0 : }
4737 : #endif /* LINUX && ELF */
|