Line data Source code
1 : /*
2 : * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 : * Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved.
4 : * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 : * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
6 : *
7 : * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 : * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 : *
10 : * Permission is hereby granted to use or copy this program
11 : * for any purpose, provided the above notices are retained on all copies.
12 : * Permission to modify the code and to distribute modified code is granted,
13 : * provided the above notices are retained, and a notice that the code was
14 : * modified is included with the above copyright notice.
15 : */
16 :
17 : #include "private/gc_priv.h"
18 :
19 : #if defined(LINUX) && !defined(POWERPC) && !defined(NO_SIGCONTEXT_H)
20 : # include <linux/version.h>
21 : # if (LINUX_VERSION_CODE <= 0x10400)
22 : /* Ugly hack to get struct sigcontext_struct definition. Required */
23 : /* for some early 1.3.X releases. Will hopefully go away soon. */
24 : /* in some later Linux releases, asm/sigcontext.h may have to */
25 : /* be included instead. */
26 : # define __KERNEL__
27 : # include <asm/signal.h>
28 : # undef __KERNEL__
29 : # else
30 : /* Kernels prior to 2.1.1 defined struct sigcontext_struct instead of */
31 : /* struct sigcontext. libc6 (glibc2) uses "struct sigcontext" in */
32 : /* prototypes, so we have to include the top-level sigcontext.h to */
33 : /* make sure the former gets defined to be the latter if appropriate. */
34 : # include <features.h>
35 : # if 2 <= __GLIBC__
36 : # if 2 == __GLIBC__ && 0 == __GLIBC_MINOR__
37 : /* glibc 2.1 no longer has sigcontext.h. But signal.h */
38 : /* has the right declaration for glibc 2.1. */
39 : # include <sigcontext.h>
40 : # endif /* 0 == __GLIBC_MINOR__ */
41 : # else /* __GLIBC__ < 2 */
42 : /* libc5 doesn't have <sigcontext.h>: go directly with the kernel */
43 : /* one. Check LINUX_VERSION_CODE to see which we should reference. */
44 : # include <asm/sigcontext.h>
45 : # endif /* __GLIBC__ < 2 */
46 : # endif
47 : #endif /* LINUX && !POWERPC */
48 :
49 : #if !defined(OS2) && !defined(PCR) && !defined(AMIGA) && !defined(MACOS) \
50 : && !defined(MSWINCE) && !defined(__CC_ARM)
51 : # include <sys/types.h>
52 : # if !defined(MSWIN32)
53 : # include <unistd.h>
54 : # endif
55 : #endif
56 :
57 : #include <stdio.h>
58 : #if defined(MSWINCE) || defined(SN_TARGET_PS3)
59 : # define SIGSEGV 0 /* value is irrelevant */
60 : #else
61 : # include <signal.h>
62 : #endif
63 :
64 : #if defined(UNIX_LIKE) || defined(CYGWIN32) || defined(NACL) \
65 : || defined(SYMBIAN)
66 : # include <fcntl.h>
67 : #endif
68 :
69 : #if defined(LINUX) || defined(LINUX_STACKBOTTOM)
70 : # include <ctype.h>
71 : #endif
72 :
73 : /* Blatantly OS dependent routines, except for those that are related */
74 : /* to dynamic loading. */
75 :
76 : #ifdef AMIGA
77 : # define GC_AMIGA_DEF
78 : # include "extra/AmigaOS.c"
79 : # undef GC_AMIGA_DEF
80 : #endif
81 :
82 : #if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
83 : # ifndef WIN32_LEAN_AND_MEAN
84 : # define WIN32_LEAN_AND_MEAN 1
85 : # endif
86 : # define NOSERVICE
87 : # include <windows.h>
88 : /* It's not clear this is completely kosher under Cygwin. But it */
89 : /* allows us to get a working GC_get_stack_base. */
90 : #endif
91 :
92 : #ifdef MACOS
93 : # include <Processes.h>
94 : #endif
95 :
96 : #ifdef IRIX5
97 : # include <sys/uio.h>
98 : # include <malloc.h> /* for locking */
99 : #endif
100 :
101 : #if defined(MMAP_SUPPORTED) || defined(ADD_HEAP_GUARD_PAGES)
102 : # if defined(USE_MUNMAP) && !defined(USE_MMAP)
103 : # error "invalid config - USE_MUNMAP requires USE_MMAP"
104 : # endif
105 : # include <sys/types.h>
106 : # include <sys/mman.h>
107 : # include <sys/stat.h>
108 : # include <errno.h>
109 : #endif
110 :
111 : #ifdef DARWIN
112 : /* for get_etext and friends */
113 : # include <mach-o/getsect.h>
114 : #endif
115 :
116 : #ifdef DJGPP
117 : /* Apparently necessary for djgpp 2.01. May cause problems with */
118 : /* other versions. */
119 : typedef long unsigned int caddr_t;
120 : #endif
121 :
122 : #ifdef PCR
123 : # include "il/PCR_IL.h"
124 : # include "th/PCR_ThCtl.h"
125 : # include "mm/PCR_MM.h"
126 : #endif
127 :
128 : #if !defined(NO_EXECUTE_PERMISSION)
129 : STATIC GC_bool GC_pages_executable = TRUE;
130 : #else
131 : STATIC GC_bool GC_pages_executable = FALSE;
132 : #endif
133 : #define IGNORE_PAGES_EXECUTABLE 1
134 : /* Undefined on GC_pages_executable real use. */
135 :
136 : #ifdef NEED_PROC_MAPS
137 : /* We need to parse /proc/self/maps, either to find dynamic libraries, */
138 : /* and/or to find the register backing store base (IA64). Do it once */
139 : /* here. */
140 :
141 : #define READ read
142 :
143 : /* Repeatedly perform a read call until the buffer is filled or */
144 : /* we encounter EOF. */
145 0 : STATIC ssize_t GC_repeat_read(int fd, char *buf, size_t count)
146 : {
147 0 : size_t num_read = 0;
148 : ssize_t result;
149 :
150 : ASSERT_CANCEL_DISABLED();
151 0 : while (num_read < count) {
152 0 : result = READ(fd, buf + num_read, count - num_read);
153 0 : if (result < 0) return result;
154 0 : if (result == 0) break;
155 0 : num_read += result;
156 : }
157 0 : return num_read;
158 : }
159 :
160 : #ifdef THREADS
161 : /* Determine the length of a file by incrementally reading it into a */
162 : /* This would be silly to use on a file supporting lseek, but Linux */
163 : /* /proc files usually do not. */
164 0 : STATIC size_t GC_get_file_len(int f)
165 : {
166 0 : size_t total = 0;
167 : ssize_t result;
168 : # define GET_FILE_LEN_BUF_SZ 500
169 : char buf[GET_FILE_LEN_BUF_SZ];
170 :
171 : do {
172 0 : result = read(f, buf, GET_FILE_LEN_BUF_SZ);
173 0 : if (result == -1) return 0;
174 0 : total += result;
175 0 : } while (result > 0);
176 0 : return total;
177 : }
178 :
179 0 : STATIC size_t GC_get_maps_len(void)
180 : {
181 0 : int f = open("/proc/self/maps", O_RDONLY);
182 : size_t result;
183 0 : if (f < 0) return 0; /* treat missing file as empty */
184 0 : result = GC_get_file_len(f);
185 0 : close(f);
186 0 : return result;
187 : }
188 : #endif /* THREADS */
189 :
190 : /* Copy the contents of /proc/self/maps to a buffer in our address */
191 : /* space. Return the address of the buffer, or zero on failure. */
192 : /* This code could be simplified if we could determine its size ahead */
193 : /* of time. */
194 0 : GC_INNER char * GC_get_maps(void)
195 : {
196 : int f;
197 : ssize_t result;
198 : static char *maps_buf = NULL;
199 : static size_t maps_buf_sz = 1;
200 0 : size_t maps_size, old_maps_size = 0;
201 :
202 : /* The buffer is essentially static, so there must be a single client. */
203 : GC_ASSERT(I_HOLD_LOCK());
204 :
205 : /* Note that in the presence of threads, the maps file can */
206 : /* essentially shrink asynchronously and unexpectedly as */
207 : /* threads that we already think of as dead release their */
208 : /* stacks. And there is no easy way to read the entire */
209 : /* file atomically. This is arguably a misfeature of the */
210 : /* /proc/.../maps interface. */
211 :
212 : /* Since we don't believe the file can grow */
213 : /* asynchronously, it should suffice to first determine */
214 : /* the size (using lseek or read), and then to reread the */
215 : /* file. If the size is inconsistent we have to retry. */
216 : /* This only matters with threads enabled, and if we use */
217 : /* this to locate roots (not the default). */
218 :
219 : # ifdef THREADS
220 : /* Determine the initial size of /proc/self/maps. */
221 : /* Note that lseek doesn't work, at least as of 2.6.15. */
222 0 : maps_size = GC_get_maps_len();
223 0 : if (0 == maps_size) return 0;
224 : # else
225 : maps_size = 4000; /* Guess */
226 : # endif
227 :
228 : /* Read /proc/self/maps, growing maps_buf as necessary. */
229 : /* Note that we may not allocate conventionally, and */
230 : /* thus can't use stdio. */
231 : do {
232 0 : while (maps_size >= maps_buf_sz) {
233 : /* Grow only by powers of 2, since we leak "too small" buffers.*/
234 0 : while (maps_size >= maps_buf_sz) maps_buf_sz *= 2;
235 0 : maps_buf = GC_scratch_alloc(maps_buf_sz);
236 : # ifdef THREADS
237 : /* Recompute initial length, since we allocated. */
238 : /* This can only happen a few times per program */
239 : /* execution. */
240 0 : maps_size = GC_get_maps_len();
241 0 : if (0 == maps_size) return 0;
242 : # endif
243 0 : if (maps_buf == 0) return 0;
244 : }
245 : GC_ASSERT(maps_buf_sz >= maps_size + 1);
246 0 : f = open("/proc/self/maps", O_RDONLY);
247 0 : if (-1 == f) return 0;
248 : # ifdef THREADS
249 0 : old_maps_size = maps_size;
250 : # endif
251 0 : maps_size = 0;
252 : do {
253 0 : result = GC_repeat_read(f, maps_buf, maps_buf_sz-1);
254 0 : if (result <= 0)
255 0 : break;
256 0 : maps_size += result;
257 0 : } while ((size_t)result == maps_buf_sz-1);
258 0 : close(f);
259 0 : if (result <= 0)
260 0 : return 0;
261 : # ifdef THREADS
262 0 : if (maps_size > old_maps_size) {
263 0 : ABORT_ARG2("Unexpected asynchronous /proc/self/maps growth "
264 : "(unregistered thread?)", " from %lu to %lu",
265 : (unsigned long)old_maps_size,
266 : (unsigned long)maps_size);
267 : }
268 : # endif
269 0 : } while (maps_size >= maps_buf_sz || maps_size < old_maps_size);
270 : /* In the single-threaded case, the second clause is false. */
271 0 : maps_buf[maps_size] = '\0';
272 :
273 : /* Apply fn to result. */
274 0 : return maps_buf;
275 : }
276 :
277 : /*
278 : * GC_parse_map_entry parses an entry from /proc/self/maps so we can
279 : * locate all writable data segments that belong to shared libraries.
280 : * The format of one of these entries and the fields we care about
281 : * is as follows:
282 : * XXXXXXXX-XXXXXXXX r-xp 00000000 30:05 260537 name of mapping...\n
283 : * ^^^^^^^^ ^^^^^^^^ ^^^^ ^^
284 : * start end prot maj_dev
285 : *
286 : * Note that since about august 2003 kernels, the columns no longer have
287 : * fixed offsets on 64-bit kernels. Hence we no longer rely on fixed offsets
288 : * anywhere, which is safer anyway.
289 : */
290 :
291 : /* Assign various fields of the first line in buf_ptr to (*start), */
292 : /* (*end), (*prot), (*maj_dev) and (*mapping_name). mapping_name may */
293 : /* be NULL. (*prot) and (*mapping_name) are assigned pointers into the */
294 : /* original buffer. */
295 : #if (defined(DYNAMIC_LOADING) && defined(USE_PROC_FOR_LIBRARIES)) \
296 : || defined(IA64) || defined(INCLUDE_LINUX_THREAD_DESCR) \
297 : || defined(REDIRECT_MALLOC)
298 : GC_INNER char *GC_parse_map_entry(char *buf_ptr, ptr_t *start, ptr_t *end,
299 : char **prot, unsigned int *maj_dev,
300 : char **mapping_name)
301 : {
302 : char *start_start, *end_start, *maj_dev_start;
303 : char *p;
304 : char *endp;
305 :
306 : if (buf_ptr == NULL || *buf_ptr == '\0') {
307 : return NULL;
308 : }
309 :
310 : p = buf_ptr;
311 : while (isspace(*p)) ++p;
312 : start_start = p;
313 : GC_ASSERT(isxdigit(*start_start));
314 : *start = (ptr_t)strtoul(start_start, &endp, 16); p = endp;
315 : GC_ASSERT(*p=='-');
316 :
317 : ++p;
318 : end_start = p;
319 : GC_ASSERT(isxdigit(*end_start));
320 : *end = (ptr_t)strtoul(end_start, &endp, 16); p = endp;
321 : GC_ASSERT(isspace(*p));
322 :
323 : while (isspace(*p)) ++p;
324 : GC_ASSERT(*p == 'r' || *p == '-');
325 : *prot = p;
326 : /* Skip past protection field to offset field */
327 : while (!isspace(*p)) ++p; while (isspace(*p)) ++p;
328 : GC_ASSERT(isxdigit(*p));
329 : /* Skip past offset field, which we ignore */
330 : while (!isspace(*p)) ++p; while (isspace(*p)) ++p;
331 : maj_dev_start = p;
332 : GC_ASSERT(isxdigit(*maj_dev_start));
333 : *maj_dev = strtoul(maj_dev_start, NULL, 16);
334 :
335 : if (mapping_name == 0) {
336 : while (*p && *p++ != '\n');
337 : } else {
338 : while (*p && *p != '\n' && *p != '/' && *p != '[') p++;
339 : *mapping_name = p;
340 : while (*p && *p++ != '\n');
341 : }
342 : return p;
343 : }
344 : #endif /* REDIRECT_MALLOC || DYNAMIC_LOADING || IA64 || ... */
345 :
346 : #if defined(IA64) || defined(INCLUDE_LINUX_THREAD_DESCR)
347 : /* Try to read the backing store base from /proc/self/maps. */
348 : /* Return the bounds of the writable mapping with a 0 major device, */
349 : /* which includes the address passed as data. */
350 : /* Return FALSE if there is no such mapping. */
351 : GC_INNER GC_bool GC_enclosing_mapping(ptr_t addr, ptr_t *startp,
352 : ptr_t *endp)
353 : {
354 : char *prot;
355 : ptr_t my_start, my_end;
356 : unsigned int maj_dev;
357 : char *maps = GC_get_maps();
358 : char *buf_ptr = maps;
359 :
360 : if (0 == maps) return(FALSE);
361 : for (;;) {
362 : buf_ptr = GC_parse_map_entry(buf_ptr, &my_start, &my_end,
363 : &prot, &maj_dev, 0);
364 :
365 : if (buf_ptr == NULL) return FALSE;
366 : if (prot[1] == 'w' && maj_dev == 0) {
367 : if ((word)my_end > (word)addr && (word)my_start <= (word)addr) {
368 : *startp = my_start;
369 : *endp = my_end;
370 : return TRUE;
371 : }
372 : }
373 : }
374 : return FALSE;
375 : }
376 : #endif /* IA64 || INCLUDE_LINUX_THREAD_DESCR */
377 :
378 : #if defined(REDIRECT_MALLOC)
379 : /* Find the text(code) mapping for the library whose name, after */
380 : /* stripping the directory part, starts with nm. */
381 : GC_INNER GC_bool GC_text_mapping(char *nm, ptr_t *startp, ptr_t *endp)
382 : {
383 : size_t nm_len = strlen(nm);
384 : char *prot;
385 : char *map_path;
386 : ptr_t my_start, my_end;
387 : unsigned int maj_dev;
388 : char *maps = GC_get_maps();
389 : char *buf_ptr = maps;
390 :
391 : if (0 == maps) return(FALSE);
392 : for (;;) {
393 : buf_ptr = GC_parse_map_entry(buf_ptr, &my_start, &my_end,
394 : &prot, &maj_dev, &map_path);
395 :
396 : if (buf_ptr == NULL) return FALSE;
397 : if (prot[0] == 'r' && prot[1] == '-' && prot[2] == 'x') {
398 : char *p = map_path;
399 : /* Set p to point just past last slash, if any. */
400 : while (*p != '\0' && *p != '\n' && *p != ' ' && *p != '\t') ++p;
401 : while (*p != '/' && (word)p >= (word)map_path) --p;
402 : ++p;
403 : if (strncmp(nm, p, nm_len) == 0) {
404 : *startp = my_start;
405 : *endp = my_end;
406 : return TRUE;
407 : }
408 : }
409 : }
410 : return FALSE;
411 : }
412 : #endif /* REDIRECT_MALLOC */
413 :
414 : #ifdef IA64
415 : static ptr_t backing_store_base_from_proc(void)
416 : {
417 : ptr_t my_start, my_end;
418 : if (!GC_enclosing_mapping(GC_save_regs_in_stack(), &my_start, &my_end)) {
419 : GC_COND_LOG_PRINTF("Failed to find backing store base from /proc\n");
420 : return 0;
421 : }
422 : return my_start;
423 : }
424 : #endif
425 :
426 : #endif /* NEED_PROC_MAPS */
427 :
428 : #if defined(SEARCH_FOR_DATA_START)
429 : /* The I386 case can be handled without a search. The Alpha case */
430 : /* used to be handled differently as well, but the rules changed */
431 : /* for recent Linux versions. This seems to be the easiest way to */
432 : /* cover all versions. */
433 :
434 : # if defined(LINUX) || defined(HURD)
435 : /* Some Linux distributions arrange to define __data_start. Some */
436 : /* define data_start as a weak symbol. The latter is technically */
437 : /* broken, since the user program may define data_start, in which */
438 : /* case we lose. Nonetheless, we try both, preferring __data_start.*/
439 : /* We assume gcc-compatible pragmas. */
440 : # pragma weak __data_start
441 : # pragma weak data_start
442 : extern int __data_start[], data_start[];
443 : # ifdef PLATFORM_ANDROID
444 : # pragma weak _etext
445 : # pragma weak __dso_handle
446 : extern int _etext[], __dso_handle[];
447 : # endif
448 : # endif /* LINUX */
449 : extern int _end[];
450 :
451 : ptr_t GC_data_start = NULL;
452 :
453 : ptr_t GC_find_limit(ptr_t, GC_bool);
454 :
455 163 : GC_INNER void GC_init_linux_data_start(void)
456 : {
457 : # if (defined(LINUX) || defined(HURD)) && !defined(IGNORE_PROG_DATA_START)
458 : /* Try the easy approaches first: */
459 : # ifdef PLATFORM_ANDROID
460 : /* Workaround for "gold" (default) linker (as of Android NDK r9b). */
461 : if ((word)__data_start < (word)_etext
462 : && (word)_etext < (word)__dso_handle) {
463 : GC_data_start = (ptr_t)(__dso_handle);
464 : # ifdef DEBUG_ADD_DEL_ROOTS
465 : GC_log_printf(
466 : "__data_start is wrong; using __dso_handle as data start\n");
467 : # endif
468 : GC_ASSERT((word)GC_data_start <= (word)_end);
469 : return;
470 : }
471 : # endif
472 163 : if ((ptr_t)__data_start != 0) {
473 163 : GC_data_start = (ptr_t)(__data_start);
474 : GC_ASSERT((word)GC_data_start <= (word)_end);
475 163 : return;
476 : }
477 0 : if ((ptr_t)data_start != 0) {
478 0 : GC_data_start = (ptr_t)(data_start);
479 : GC_ASSERT((word)GC_data_start <= (word)_end);
480 0 : return;
481 : }
482 : # ifdef DEBUG_ADD_DEL_ROOTS
483 : GC_log_printf("__data_start not provided\n");
484 : # endif
485 : # endif /* LINUX */
486 :
487 0 : if (GC_no_dls) {
488 : /* Not needed, avoids the SIGSEGV caused by */
489 : /* GC_find_limit which complicates debugging. */
490 0 : GC_data_start = (ptr_t)_end; /* set data root size to 0 */
491 0 : return;
492 : }
493 :
494 0 : GC_data_start = GC_find_limit((ptr_t)(_end), FALSE);
495 : }
496 : #endif /* SEARCH_FOR_DATA_START */
497 :
498 : #ifdef ECOS
499 :
500 : # ifndef ECOS_GC_MEMORY_SIZE
501 : # define ECOS_GC_MEMORY_SIZE (448 * 1024)
502 : # endif /* ECOS_GC_MEMORY_SIZE */
503 :
504 : /* FIXME: This is a simple way of allocating memory which is */
505 : /* compatible with ECOS early releases. Later releases use a more */
506 : /* sophisticated means of allocating memory than this simple static */
507 : /* allocator, but this method is at least bound to work. */
508 : static char ecos_gc_memory[ECOS_GC_MEMORY_SIZE];
509 : static char *ecos_gc_brk = ecos_gc_memory;
510 :
511 : static void *tiny_sbrk(ptrdiff_t increment)
512 : {
513 : void *p = ecos_gc_brk;
514 : ecos_gc_brk += increment;
515 : if ((word)ecos_gc_brk > (word)(ecos_gc_memory + sizeof(ecos_gc_memory))) {
516 : ecos_gc_brk -= increment;
517 : return NULL;
518 : }
519 : return p;
520 : }
521 : # define sbrk tiny_sbrk
522 : #endif /* ECOS */
523 :
524 : #if defined(NETBSD) && defined(__ELF__)
525 : ptr_t GC_data_start = NULL;
526 : ptr_t GC_find_limit(ptr_t, GC_bool);
527 :
528 : extern char **environ;
529 :
530 : GC_INNER void GC_init_netbsd_elf(void)
531 : {
532 : /* This may need to be environ, without the underscore, for */
533 : /* some versions. */
534 : GC_data_start = GC_find_limit((ptr_t)&environ, FALSE);
535 : }
536 : #endif /* NETBSD */
537 :
538 : #ifdef OPENBSD
539 : static struct sigaction old_segv_act;
540 : STATIC sigjmp_buf GC_jmp_buf_openbsd;
541 :
542 : # ifdef THREADS
543 : # include <sys/syscall.h>
544 : extern sigset_t __syscall(quad_t, ...);
545 : # endif
546 :
547 : /* Don't use GC_find_limit() because siglongjmp() outside of the */
548 : /* signal handler by-passes our userland pthreads lib, leaving */
549 : /* SIGSEGV and SIGPROF masked. Instead, use this custom one that */
550 : /* works-around the issues. */
551 :
552 : STATIC void GC_fault_handler_openbsd(int sig GC_ATTR_UNUSED)
553 : {
554 : siglongjmp(GC_jmp_buf_openbsd, 1);
555 : }
556 :
557 : /* Return the first non-addressable location > p or bound. */
558 : /* Requires the allocation lock. */
559 : STATIC ptr_t GC_find_limit_openbsd(ptr_t p, ptr_t bound)
560 : {
561 : static volatile ptr_t result;
562 : /* Safer if static, since otherwise it may not be */
563 : /* preserved across the longjmp. Can safely be */
564 : /* static since it's only called with the */
565 : /* allocation lock held. */
566 :
567 : struct sigaction act;
568 : size_t pgsz = (size_t)sysconf(_SC_PAGESIZE);
569 : GC_ASSERT(I_HOLD_LOCK());
570 :
571 : act.sa_handler = GC_fault_handler_openbsd;
572 : sigemptyset(&act.sa_mask);
573 : act.sa_flags = SA_NODEFER | SA_RESTART;
574 : /* act.sa_restorer is deprecated and should not be initialized. */
575 : sigaction(SIGSEGV, &act, &old_segv_act);
576 :
577 : if (sigsetjmp(GC_jmp_buf_openbsd, 1) == 0) {
578 : result = (ptr_t)((word)p & ~(pgsz-1));
579 : for (;;) {
580 : result += pgsz;
581 : if ((word)result >= (word)bound) {
582 : result = bound;
583 : break;
584 : }
585 : GC_noop1((word)(*result));
586 : }
587 : }
588 :
589 : # ifdef THREADS
590 : /* Due to the siglongjump we need to manually unmask SIGPROF. */
591 : __syscall(SYS_sigprocmask, SIG_UNBLOCK, sigmask(SIGPROF));
592 : # endif
593 :
594 : sigaction(SIGSEGV, &old_segv_act, 0);
595 : return(result);
596 : }
597 :
598 : /* Return first addressable location > p or bound. */
599 : /* Requires the allocation lock. */
600 : STATIC ptr_t GC_skip_hole_openbsd(ptr_t p, ptr_t bound)
601 : {
602 : static volatile ptr_t result;
603 : static volatile int firstpass;
604 :
605 : struct sigaction act;
606 : size_t pgsz = (size_t)sysconf(_SC_PAGESIZE);
607 : GC_ASSERT(I_HOLD_LOCK());
608 :
609 : act.sa_handler = GC_fault_handler_openbsd;
610 : sigemptyset(&act.sa_mask);
611 : act.sa_flags = SA_NODEFER | SA_RESTART;
612 : /* act.sa_restorer is deprecated and should not be initialized. */
613 : sigaction(SIGSEGV, &act, &old_segv_act);
614 :
615 : firstpass = 1;
616 : result = (ptr_t)((word)p & ~(pgsz-1));
617 : if (sigsetjmp(GC_jmp_buf_openbsd, 1) != 0 || firstpass) {
618 : firstpass = 0;
619 : result += pgsz;
620 : if ((word)result >= (word)bound) {
621 : result = bound;
622 : } else {
623 : GC_noop1((word)(*result));
624 : }
625 : }
626 :
627 : sigaction(SIGSEGV, &old_segv_act, 0);
628 : return(result);
629 : }
630 : #endif /* OPENBSD */
631 :
632 : # ifdef OS2
633 :
634 : # include <stddef.h>
635 :
636 : # if !defined(__IBMC__) && !defined(__WATCOMC__) /* e.g. EMX */
637 :
638 : struct exe_hdr {
639 : unsigned short magic_number;
640 : unsigned short padding[29];
641 : long new_exe_offset;
642 : };
643 :
644 : #define E_MAGIC(x) (x).magic_number
645 : #define EMAGIC 0x5A4D
646 : #define E_LFANEW(x) (x).new_exe_offset
647 :
648 : struct e32_exe {
649 : unsigned char magic_number[2];
650 : unsigned char byte_order;
651 : unsigned char word_order;
652 : unsigned long exe_format_level;
653 : unsigned short cpu;
654 : unsigned short os;
655 : unsigned long padding1[13];
656 : unsigned long object_table_offset;
657 : unsigned long object_count;
658 : unsigned long padding2[31];
659 : };
660 :
661 : #define E32_MAGIC1(x) (x).magic_number[0]
662 : #define E32MAGIC1 'L'
663 : #define E32_MAGIC2(x) (x).magic_number[1]
664 : #define E32MAGIC2 'X'
665 : #define E32_BORDER(x) (x).byte_order
666 : #define E32LEBO 0
667 : #define E32_WORDER(x) (x).word_order
668 : #define E32LEWO 0
669 : #define E32_CPU(x) (x).cpu
670 : #define E32CPU286 1
671 : #define E32_OBJTAB(x) (x).object_table_offset
672 : #define E32_OBJCNT(x) (x).object_count
673 :
674 : struct o32_obj {
675 : unsigned long size;
676 : unsigned long base;
677 : unsigned long flags;
678 : unsigned long pagemap;
679 : unsigned long mapsize;
680 : unsigned long reserved;
681 : };
682 :
683 : #define O32_FLAGS(x) (x).flags
684 : #define OBJREAD 0x0001L
685 : #define OBJWRITE 0x0002L
686 : #define OBJINVALID 0x0080L
687 : #define O32_SIZE(x) (x).size
688 : #define O32_BASE(x) (x).base
689 :
690 : # else /* IBM's compiler */
691 :
692 : /* A kludge to get around what appears to be a header file bug */
693 : # ifndef WORD
694 : # define WORD unsigned short
695 : # endif
696 : # ifndef DWORD
697 : # define DWORD unsigned long
698 : # endif
699 :
700 : # define EXE386 1
701 : # include <newexe.h>
702 : # include <exe386.h>
703 :
704 : # endif /* __IBMC__ */
705 :
706 : # define INCL_DOSEXCEPTIONS
707 : # define INCL_DOSPROCESS
708 : # define INCL_DOSERRORS
709 : # define INCL_DOSMODULEMGR
710 : # define INCL_DOSMEMMGR
711 : # include <os2.h>
712 :
713 : # endif /* OS/2 */
714 :
715 : /* Find the page size */
716 : GC_INNER word GC_page_size = 0;
717 :
718 : #if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
719 : # ifndef VER_PLATFORM_WIN32_CE
720 : # define VER_PLATFORM_WIN32_CE 3
721 : # endif
722 :
723 : # if defined(MSWINCE) && defined(THREADS)
724 : GC_INNER GC_bool GC_dont_query_stack_min = FALSE;
725 : # endif
726 :
727 : GC_INNER SYSTEM_INFO GC_sysinfo;
728 :
729 : GC_INNER void GC_setpagesize(void)
730 : {
731 : GetSystemInfo(&GC_sysinfo);
732 : GC_page_size = GC_sysinfo.dwPageSize;
733 : # if defined(MSWINCE) && !defined(_WIN32_WCE_EMULATION)
734 : {
735 : OSVERSIONINFO verInfo;
736 : /* Check the current WinCE version. */
737 : verInfo.dwOSVersionInfoSize = sizeof(OSVERSIONINFO);
738 : if (!GetVersionEx(&verInfo))
739 : ABORT("GetVersionEx failed");
740 : if (verInfo.dwPlatformId == VER_PLATFORM_WIN32_CE &&
741 : verInfo.dwMajorVersion < 6) {
742 : /* Only the first 32 MB of address space belongs to the */
743 : /* current process (unless WinCE 6.0+ or emulation). */
744 : GC_sysinfo.lpMaximumApplicationAddress = (LPVOID)((word)32 << 20);
745 : # ifdef THREADS
746 : /* On some old WinCE versions, it's observed that */
747 : /* VirtualQuery calls don't work properly when used to */
748 : /* get thread current stack committed minimum. */
749 : if (verInfo.dwMajorVersion < 5)
750 : GC_dont_query_stack_min = TRUE;
751 : # endif
752 : }
753 : }
754 : # endif
755 : }
756 :
757 : # ifndef CYGWIN32
758 : # define is_writable(prot) ((prot) == PAGE_READWRITE \
759 : || (prot) == PAGE_WRITECOPY \
760 : || (prot) == PAGE_EXECUTE_READWRITE \
761 : || (prot) == PAGE_EXECUTE_WRITECOPY)
762 : /* Return the number of bytes that are writable starting at p. */
763 : /* The pointer p is assumed to be page aligned. */
764 : /* If base is not 0, *base becomes the beginning of the */
765 : /* allocation region containing p. */
766 : STATIC word GC_get_writable_length(ptr_t p, ptr_t *base)
767 : {
768 : MEMORY_BASIC_INFORMATION buf;
769 : word result;
770 : word protect;
771 :
772 : result = VirtualQuery(p, &buf, sizeof(buf));
773 : if (result != sizeof(buf)) ABORT("Weird VirtualQuery result");
774 : if (base != 0) *base = (ptr_t)(buf.AllocationBase);
775 : protect = (buf.Protect & ~(PAGE_GUARD | PAGE_NOCACHE));
776 : if (!is_writable(protect)) {
777 : return(0);
778 : }
779 : if (buf.State != MEM_COMMIT) return(0);
780 : return(buf.RegionSize);
781 : }
782 :
783 : GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
784 : {
785 : ptr_t trunc_sp = (ptr_t)((word)GC_approx_sp() & ~(GC_page_size - 1));
786 : /* FIXME: This won't work if called from a deeply recursive */
787 : /* client code (and the committed stack space has grown). */
788 : word size = GC_get_writable_length(trunc_sp, 0);
789 : GC_ASSERT(size != 0);
790 : sb -> mem_base = trunc_sp + size;
791 : return GC_SUCCESS;
792 : }
793 : # else /* CYGWIN32 */
794 : /* An alternate version for Cygwin (adapted from Dave Korn's */
795 : /* gcc version of boehm-gc). */
796 : GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
797 : {
798 : # ifdef X86_64
799 : sb -> mem_base = ((NT_TIB*)NtCurrentTeb())->StackBase;
800 : # else
801 : void * _tlsbase;
802 :
803 : __asm__ ("movl %%fs:4, %0"
804 : : "=r" (_tlsbase));
805 : sb -> mem_base = _tlsbase;
806 : # endif
807 : return GC_SUCCESS;
808 : }
809 : # endif /* CYGWIN32 */
810 : # define HAVE_GET_STACK_BASE
811 :
812 : #else /* !MSWIN32 */
813 163 : GC_INNER void GC_setpagesize(void)
814 : {
815 : # if defined(MPROTECT_VDB) || defined(PROC_VDB) || defined(USE_MMAP)
816 163 : GC_page_size = GETPAGESIZE();
817 163 : if (!GC_page_size) ABORT("getpagesize failed");
818 : # else
819 : /* It's acceptable to fake it. */
820 : GC_page_size = HBLKSIZE;
821 : # endif
822 163 : }
823 : #endif /* !MSWIN32 */
824 :
825 : #ifdef BEOS
826 : # include <kernel/OS.h>
827 :
828 : GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
829 : {
830 : thread_info th;
831 : get_thread_info(find_thread(NULL),&th);
832 : sb->mem_base = th.stack_end;
833 : return GC_SUCCESS;
834 : }
835 : # define HAVE_GET_STACK_BASE
836 : #endif /* BEOS */
837 :
838 : #ifdef OS2
839 : GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
840 : {
841 : PTIB ptib; /* thread information block */
842 : PPIB ppib;
843 : if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
844 : ABORT("DosGetInfoBlocks failed");
845 : }
846 : sb->mem_base = ptib->tib_pstacklimit;
847 : return GC_SUCCESS;
848 : }
849 : # define HAVE_GET_STACK_BASE
850 : #endif /* OS2 */
851 :
852 : # ifdef AMIGA
853 : # define GC_AMIGA_SB
854 : # include "extra/AmigaOS.c"
855 : # undef GC_AMIGA_SB
856 : # endif /* AMIGA */
857 :
858 : # if defined(NEED_FIND_LIMIT) || defined(UNIX_LIKE)
859 :
860 : typedef void (*GC_fault_handler_t)(int);
861 :
862 : # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) \
863 : || defined(HURD) || defined(FREEBSD) || defined(NETBSD)
864 : static struct sigaction old_segv_act;
865 : # if defined(_sigargs) /* !Irix6.x */ \
866 : || defined(HURD) || defined(NETBSD) || defined(FREEBSD)
867 : static struct sigaction old_bus_act;
868 : # endif
869 : # else
870 : static GC_fault_handler_t old_segv_handler;
871 : # ifdef SIGBUS
872 : static GC_fault_handler_t old_bus_handler;
873 : # endif
874 : # endif
875 :
876 0 : GC_INNER void GC_set_and_save_fault_handler(GC_fault_handler_t h)
877 : {
878 : # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) \
879 : || defined(HURD) || defined(FREEBSD) || defined(NETBSD)
880 : struct sigaction act;
881 :
882 : act.sa_handler = h;
883 : # ifdef SIGACTION_FLAGS_NODEFER_HACK
884 : /* Was necessary for Solaris 2.3 and very temporary */
885 : /* NetBSD bugs. */
886 : act.sa_flags = SA_RESTART | SA_NODEFER;
887 : # else
888 : act.sa_flags = SA_RESTART;
889 : # endif
890 :
891 : (void) sigemptyset(&act.sa_mask);
892 : /* act.sa_restorer is deprecated and should not be initialized. */
893 : # ifdef GC_IRIX_THREADS
894 : /* Older versions have a bug related to retrieving and */
895 : /* and setting a handler at the same time. */
896 : (void) sigaction(SIGSEGV, 0, &old_segv_act);
897 : (void) sigaction(SIGSEGV, &act, 0);
898 : # else
899 : (void) sigaction(SIGSEGV, &act, &old_segv_act);
900 : # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
901 : || defined(HURD) || defined(NETBSD) || defined(FREEBSD)
902 : /* Under Irix 5.x or HP/UX, we may get SIGBUS. */
903 : /* Pthreads doesn't exist under Irix 5.x, so we */
904 : /* don't have to worry in the threads case. */
905 : (void) sigaction(SIGBUS, &act, &old_bus_act);
906 : # endif
907 : # endif /* !GC_IRIX_THREADS */
908 : # else
909 0 : old_segv_handler = signal(SIGSEGV, h);
910 : # ifdef SIGBUS
911 0 : old_bus_handler = signal(SIGBUS, h);
912 : # endif
913 : # endif
914 0 : }
915 : # endif /* NEED_FIND_LIMIT || UNIX_LIKE */
916 :
917 : # if defined(NEED_FIND_LIMIT) \
918 : || (defined(USE_PROC_FOR_LIBRARIES) && defined(THREADS))
919 : /* Some tools to implement HEURISTIC2 */
920 : # define MIN_PAGE_SIZE 256 /* Smallest conceivable page size, bytes */
921 :
922 0 : STATIC void GC_fault_handler(int sig GC_ATTR_UNUSED)
923 : {
924 0 : LONGJMP(GC_jmp_buf, 1);
925 : }
926 :
927 0 : GC_INNER void GC_setup_temporary_fault_handler(void)
928 : {
929 : /* Handler is process-wide, so this should only happen in */
930 : /* one thread at a time. */
931 : GC_ASSERT(I_HOLD_LOCK());
932 0 : GC_set_and_save_fault_handler(GC_fault_handler);
933 0 : }
934 :
935 0 : GC_INNER void GC_reset_fault_handler(void)
936 : {
937 : # if defined(SUNOS5SIGS) || defined(IRIX5) || defined(OSF1) \
938 : || defined(HURD) || defined(FREEBSD) || defined(NETBSD)
939 : (void) sigaction(SIGSEGV, &old_segv_act, 0);
940 : # if defined(IRIX5) && defined(_sigargs) /* Irix 5.x, not 6.x */ \
941 : || defined(HURD) || defined(NETBSD)
942 : (void) sigaction(SIGBUS, &old_bus_act, 0);
943 : # endif
944 : # else
945 0 : (void) signal(SIGSEGV, old_segv_handler);
946 : # ifdef SIGBUS
947 0 : (void) signal(SIGBUS, old_bus_handler);
948 : # endif
949 : # endif
950 0 : }
951 :
952 : /* Return the first non-addressable location > p (up) or */
953 : /* the smallest location q s.t. [q,p) is addressable (!up). */
954 : /* We assume that p (up) or p-1 (!up) is addressable. */
955 : /* Requires allocation lock. */
956 0 : STATIC ptr_t GC_find_limit_with_bound(ptr_t p, GC_bool up, ptr_t bound)
957 : {
958 : static volatile ptr_t result;
959 : /* Safer if static, since otherwise it may not be */
960 : /* preserved across the longjmp. Can safely be */
961 : /* static since it's only called with the */
962 : /* allocation lock held. */
963 :
964 : GC_ASSERT(I_HOLD_LOCK());
965 0 : GC_setup_temporary_fault_handler();
966 0 : if (SETJMP(GC_jmp_buf) == 0) {
967 0 : result = (ptr_t)(((word)(p))
968 : & ~(MIN_PAGE_SIZE-1));
969 : for (;;) {
970 0 : if (up) {
971 0 : result += MIN_PAGE_SIZE;
972 0 : if ((word)result >= (word)bound) {
973 0 : result = bound;
974 0 : break;
975 : }
976 : } else {
977 0 : result -= MIN_PAGE_SIZE;
978 0 : if ((word)result <= (word)bound) {
979 0 : result = bound - MIN_PAGE_SIZE;
980 : /* This is to compensate */
981 : /* further result increment (we */
982 : /* do not modify "up" variable */
983 : /* since it might be clobbered */
984 : /* by setjmp otherwise). */
985 0 : break;
986 : }
987 : }
988 0 : GC_noop1((word)(*result));
989 0 : }
990 : }
991 0 : GC_reset_fault_handler();
992 0 : if (!up) {
993 0 : result += MIN_PAGE_SIZE;
994 : }
995 0 : return(result);
996 : }
997 :
998 0 : ptr_t GC_find_limit(ptr_t p, GC_bool up)
999 : {
1000 0 : return GC_find_limit_with_bound(p, up, up ? (ptr_t)(word)(-1) : 0);
1001 : }
1002 : # endif /* NEED_FIND_LIMIT || USE_PROC_FOR_LIBRARIES */
1003 :
1004 : #ifdef HPUX_STACKBOTTOM
1005 :
1006 : #include <sys/param.h>
1007 : #include <sys/pstat.h>
1008 :
1009 : GC_INNER ptr_t GC_get_register_stack_base(void)
1010 : {
1011 : struct pst_vm_status vm_status;
1012 :
1013 : int i = 0;
1014 : while (pstat_getprocvm(&vm_status, sizeof(vm_status), 0, i++) == 1) {
1015 : if (vm_status.pst_type == PS_RSESTACK) {
1016 : return (ptr_t) vm_status.pst_vaddr;
1017 : }
1018 : }
1019 :
1020 : /* old way to get the register stackbottom */
1021 : return (ptr_t)(((word)GC_stackbottom - BACKING_STORE_DISPLACEMENT - 1)
1022 : & ~(BACKING_STORE_ALIGNMENT - 1));
1023 : }
1024 :
1025 : #endif /* HPUX_STACK_BOTTOM */
1026 :
1027 : #ifdef LINUX_STACKBOTTOM
1028 :
1029 : # include <sys/types.h>
1030 : # include <sys/stat.h>
1031 :
1032 : # define STAT_SKIP 27 /* Number of fields preceding startstack */
1033 : /* field in /proc/self/stat */
1034 :
1035 : # ifdef USE_LIBC_PRIVATES
1036 : # pragma weak __libc_stack_end
1037 : extern ptr_t __libc_stack_end;
1038 : # endif
1039 :
1040 : # ifdef IA64
1041 : # ifdef USE_LIBC_PRIVATES
1042 : # pragma weak __libc_ia64_register_backing_store_base
1043 : extern ptr_t __libc_ia64_register_backing_store_base;
1044 : # endif
1045 :
1046 : GC_INNER ptr_t GC_get_register_stack_base(void)
1047 : {
1048 : ptr_t result;
1049 :
1050 : # ifdef USE_LIBC_PRIVATES
1051 : if (0 != &__libc_ia64_register_backing_store_base
1052 : && 0 != __libc_ia64_register_backing_store_base) {
1053 : /* Glibc 2.2.4 has a bug such that for dynamically linked */
1054 : /* executables __libc_ia64_register_backing_store_base is */
1055 : /* defined but uninitialized during constructor calls. */
1056 : /* Hence we check for both nonzero address and value. */
1057 : return __libc_ia64_register_backing_store_base;
1058 : }
1059 : # endif
1060 : result = backing_store_base_from_proc();
1061 : if (0 == result) {
1062 : result = GC_find_limit(GC_save_regs_in_stack(), FALSE);
1063 : /* Now seems to work better than constant displacement */
1064 : /* heuristic used in 6.X versions. The latter seems to */
1065 : /* fail for 2.6 kernels. */
1066 : }
1067 : return result;
1068 : }
1069 : # endif /* IA64 */
1070 :
1071 0 : STATIC ptr_t GC_linux_main_stack_base(void)
1072 : {
1073 : /* We read the stack base value from /proc/self/stat. We do this */
1074 : /* using direct I/O system calls in order to avoid calling malloc */
1075 : /* in case REDIRECT_MALLOC is defined. */
1076 : # ifndef STAT_READ
1077 : /* Also defined in pthread_support.c. */
1078 : # define STAT_BUF_SIZE 4096
1079 : # define STAT_READ read
1080 : # endif
1081 : /* Should probably call the real read, if read is wrapped. */
1082 : char stat_buf[STAT_BUF_SIZE];
1083 : int f;
1084 : word result;
1085 0 : int i, buf_offset = 0, len;
1086 :
1087 : /* First try the easy way. This should work for glibc 2.2 */
1088 : /* This fails in a prelinked ("prelink" command) executable */
1089 : /* since the correct value of __libc_stack_end never */
1090 : /* becomes visible to us. The second test works around */
1091 : /* this. */
1092 : # ifdef USE_LIBC_PRIVATES
1093 0 : if (0 != &__libc_stack_end && 0 != __libc_stack_end ) {
1094 : # if defined(IA64)
1095 : /* Some versions of glibc set the address 16 bytes too */
1096 : /* low while the initialization code is running. */
1097 : if (((word)__libc_stack_end & 0xfff) + 0x10 < 0x1000) {
1098 : return __libc_stack_end + 0x10;
1099 : } /* Otherwise it's not safe to add 16 bytes and we fall */
1100 : /* back to using /proc. */
1101 : # elif defined(SPARC)
1102 : /* Older versions of glibc for 64-bit SPARC do not set this */
1103 : /* variable correctly, it gets set to either zero or one. */
1104 : if (__libc_stack_end != (ptr_t) (unsigned long)0x1)
1105 : return __libc_stack_end;
1106 : # else
1107 0 : return __libc_stack_end;
1108 : # endif
1109 : }
1110 : # endif
1111 0 : f = open("/proc/self/stat", O_RDONLY);
1112 0 : if (f < 0)
1113 0 : ABORT("Couldn't read /proc/self/stat");
1114 0 : len = STAT_READ(f, stat_buf, STAT_BUF_SIZE);
1115 0 : close(f);
1116 :
1117 : /* Skip the required number of fields. This number is hopefully */
1118 : /* constant across all Linux implementations. */
1119 0 : for (i = 0; i < STAT_SKIP; ++i) {
1120 0 : while (buf_offset < len && isspace(stat_buf[buf_offset++])) {
1121 : /* empty */
1122 : }
1123 0 : while (buf_offset < len && !isspace(stat_buf[buf_offset++])) {
1124 : /* empty */
1125 : }
1126 : }
1127 : /* Skip spaces. */
1128 0 : while (buf_offset < len && isspace(stat_buf[buf_offset])) {
1129 0 : buf_offset++;
1130 : }
1131 : /* Find the end of the number and cut the buffer there. */
1132 0 : for (i = 0; buf_offset + i < len; i++) {
1133 0 : if (!isdigit(stat_buf[buf_offset + i])) break;
1134 : }
1135 0 : if (buf_offset + i >= len) ABORT("Could not parse /proc/self/stat");
1136 0 : stat_buf[buf_offset + i] = '\0';
1137 :
1138 0 : result = (word)STRTOULL(&stat_buf[buf_offset], NULL, 10);
1139 0 : if (result < 0x100000 || (result & (sizeof(word) - 1)) != 0)
1140 0 : ABORT("Absurd stack bottom value");
1141 0 : return (ptr_t)result;
1142 : }
1143 : #endif /* LINUX_STACKBOTTOM */
1144 :
1145 : #ifdef FREEBSD_STACKBOTTOM
1146 : /* This uses an undocumented sysctl call, but at least one expert */
1147 : /* believes it will stay. */
1148 :
1149 : # include <unistd.h>
1150 : # include <sys/types.h>
1151 : # include <sys/sysctl.h>
1152 :
1153 : STATIC ptr_t GC_freebsd_main_stack_base(void)
1154 : {
1155 : int nm[2] = {CTL_KERN, KERN_USRSTACK};
1156 : ptr_t base;
1157 : size_t len = sizeof(ptr_t);
1158 : int r = sysctl(nm, 2, &base, &len, NULL, 0);
1159 : if (r) ABORT("Error getting main stack base");
1160 : return base;
1161 : }
1162 : #endif /* FREEBSD_STACKBOTTOM */
1163 :
1164 : #if defined(ECOS) || defined(NOSYS)
1165 : ptr_t GC_get_main_stack_base(void)
1166 : {
1167 : return STACKBOTTOM;
1168 : }
1169 : # define GET_MAIN_STACKBASE_SPECIAL
1170 : #elif defined(SYMBIAN)
1171 : extern int GC_get_main_symbian_stack_base(void);
1172 : ptr_t GC_get_main_stack_base(void)
1173 : {
1174 : return (ptr_t)GC_get_main_symbian_stack_base();
1175 : }
1176 : # define GET_MAIN_STACKBASE_SPECIAL
1177 : #elif !defined(BEOS) && !defined(AMIGA) && !defined(OS2) \
1178 : && !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32) \
1179 : && !defined(GC_OPENBSD_THREADS) \
1180 : && (!defined(GC_SOLARIS_THREADS) || defined(_STRICT_STDC))
1181 :
1182 : # if defined(LINUX) && defined(USE_GET_STACKBASE_FOR_MAIN)
1183 : # include <pthread.h>
1184 : # elif defined(DARWIN) && !defined(NO_PTHREAD_GET_STACKADDR_NP)
1185 : /* We could use pthread_get_stackaddr_np even in case of a */
1186 : /* single-threaded gclib (there is no -lpthread on Darwin). */
1187 : # include <pthread.h>
1188 : # undef STACKBOTTOM
1189 : # define STACKBOTTOM (ptr_t)pthread_get_stackaddr_np(pthread_self())
1190 : # endif
1191 :
1192 163 : ptr_t GC_get_main_stack_base(void)
1193 : {
1194 : ptr_t result;
1195 : # if defined(LINUX) && !defined(NO_PTHREAD_GETATTR_NP) \
1196 : && (defined(USE_GET_STACKBASE_FOR_MAIN) \
1197 : || (defined(THREADS) && !defined(REDIRECT_MALLOC)))
1198 : pthread_attr_t attr;
1199 : void *stackaddr;
1200 : size_t size;
1201 :
1202 163 : if (pthread_getattr_np(pthread_self(), &attr) == 0) {
1203 326 : if (pthread_attr_getstack(&attr, &stackaddr, &size) == 0
1204 326 : && stackaddr != NULL) {
1205 163 : pthread_attr_destroy(&attr);
1206 : # ifdef STACK_GROWS_DOWN
1207 163 : stackaddr = (char *)stackaddr + size;
1208 : # endif
1209 163 : return (ptr_t)stackaddr;
1210 : }
1211 0 : pthread_attr_destroy(&attr);
1212 : }
1213 0 : WARN("pthread_getattr_np or pthread_attr_getstack failed"
1214 : " for main thread\n", 0);
1215 : # endif
1216 : # ifdef STACKBOTTOM
1217 : result = STACKBOTTOM;
1218 : # else
1219 : # define STACKBOTTOM_ALIGNMENT_M1 ((word)STACK_GRAN - 1)
1220 : # ifdef HEURISTIC1
1221 : # ifdef STACK_GROWS_DOWN
1222 : result = (ptr_t)(((word)GC_approx_sp() + STACKBOTTOM_ALIGNMENT_M1)
1223 : & ~STACKBOTTOM_ALIGNMENT_M1);
1224 : # else
1225 : result = (ptr_t)((word)GC_approx_sp() & ~STACKBOTTOM_ALIGNMENT_M1);
1226 : # endif
1227 : # endif /* HEURISTIC1 */
1228 : # ifdef LINUX_STACKBOTTOM
1229 0 : result = GC_linux_main_stack_base();
1230 : # endif
1231 : # ifdef FREEBSD_STACKBOTTOM
1232 : result = GC_freebsd_main_stack_base();
1233 : # endif
1234 : # ifdef HEURISTIC2
1235 : {
1236 : ptr_t sp = GC_approx_sp();
1237 : # ifdef STACK_GROWS_DOWN
1238 : result = GC_find_limit(sp, TRUE);
1239 : # ifdef HEURISTIC2_LIMIT
1240 : if ((word)result > (word)HEURISTIC2_LIMIT
1241 : && (word)sp < (word)HEURISTIC2_LIMIT) {
1242 : result = HEURISTIC2_LIMIT;
1243 : }
1244 : # endif
1245 : # else
1246 : result = GC_find_limit(sp, FALSE);
1247 : # ifdef HEURISTIC2_LIMIT
1248 : if ((word)result < (word)HEURISTIC2_LIMIT
1249 : && (word)sp > (word)HEURISTIC2_LIMIT) {
1250 : result = HEURISTIC2_LIMIT;
1251 : }
1252 : # endif
1253 : # endif
1254 : }
1255 : # endif /* HEURISTIC2 */
1256 : # ifdef STACK_GROWS_DOWN
1257 0 : if (result == 0)
1258 0 : result = (ptr_t)(signed_word)(-sizeof(ptr_t));
1259 : # endif
1260 : # endif
1261 : GC_ASSERT((word)GC_approx_sp() HOTTER_THAN (word)result);
1262 0 : return(result);
1263 : }
1264 : # define GET_MAIN_STACKBASE_SPECIAL
1265 : #endif /* !AMIGA, !BEOS, !OPENBSD, !OS2, !Windows */
1266 :
1267 : #if (defined(GC_LINUX_THREADS) || defined(PLATFORM_ANDROID)) \
1268 : && !defined(NO_PTHREAD_GETATTR_NP)
1269 :
1270 : # include <pthread.h>
1271 : /* extern int pthread_getattr_np(pthread_t, pthread_attr_t *); */
1272 :
1273 499 : GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
1274 : {
1275 : pthread_attr_t attr;
1276 : size_t size;
1277 : # ifdef IA64
1278 : DCL_LOCK_STATE;
1279 : # endif
1280 :
1281 499 : if (pthread_getattr_np(pthread_self(), &attr) != 0) {
1282 0 : WARN("pthread_getattr_np failed\n", 0);
1283 0 : return GC_UNIMPLEMENTED;
1284 : }
1285 499 : if (pthread_attr_getstack(&attr, &(b -> mem_base), &size) != 0) {
1286 0 : ABORT("pthread_attr_getstack failed");
1287 : }
1288 499 : pthread_attr_destroy(&attr);
1289 : # ifdef STACK_GROWS_DOWN
1290 499 : b -> mem_base = (char *)(b -> mem_base) + size;
1291 : # endif
1292 : # ifdef IA64
1293 : /* We could try backing_store_base_from_proc, but that's safe */
1294 : /* only if no mappings are being asynchronously created. */
1295 : /* Subtracting the size from the stack base doesn't work for at */
1296 : /* least the main thread. */
1297 : LOCK();
1298 : {
1299 : IF_CANCEL(int cancel_state;)
1300 : ptr_t bsp;
1301 : ptr_t next_stack;
1302 :
1303 : DISABLE_CANCEL(cancel_state);
1304 : bsp = GC_save_regs_in_stack();
1305 : next_stack = GC_greatest_stack_base_below(bsp);
1306 : if (0 == next_stack) {
1307 : b -> reg_base = GC_find_limit(bsp, FALSE);
1308 : } else {
1309 : /* Avoid walking backwards into preceding memory stack and */
1310 : /* growing it. */
1311 : b -> reg_base = GC_find_limit_with_bound(bsp, FALSE, next_stack);
1312 : }
1313 : RESTORE_CANCEL(cancel_state);
1314 : }
1315 : UNLOCK();
1316 : # endif
1317 499 : return GC_SUCCESS;
1318 : }
1319 : # define HAVE_GET_STACK_BASE
1320 : #endif /* GC_LINUX_THREADS */
1321 :
1322 : #if defined(GC_DARWIN_THREADS) && !defined(NO_PTHREAD_GET_STACKADDR_NP)
1323 : # include <pthread.h>
1324 :
1325 : GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
1326 : {
1327 : /* pthread_get_stackaddr_np() should return stack bottom (highest */
1328 : /* stack address plus 1). */
1329 : b->mem_base = pthread_get_stackaddr_np(pthread_self());
1330 : GC_ASSERT((word)GC_approx_sp() HOTTER_THAN (word)b->mem_base);
1331 : return GC_SUCCESS;
1332 : }
1333 : # define HAVE_GET_STACK_BASE
1334 : #endif /* GC_DARWIN_THREADS */
1335 :
1336 : #ifdef GC_OPENBSD_THREADS
1337 : # include <sys/signal.h>
1338 : # include <pthread.h>
1339 : # include <pthread_np.h>
1340 :
1341 : /* Find the stack using pthread_stackseg_np(). */
1342 : GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
1343 : {
1344 : stack_t stack;
1345 : if (pthread_stackseg_np(pthread_self(), &stack))
1346 : ABORT("pthread_stackseg_np(self) failed");
1347 : sb->mem_base = stack.ss_sp;
1348 : return GC_SUCCESS;
1349 : }
1350 : # define HAVE_GET_STACK_BASE
1351 : #endif /* GC_OPENBSD_THREADS */
1352 :
1353 : #if defined(GC_SOLARIS_THREADS) && !defined(_STRICT_STDC)
1354 :
1355 : # include <thread.h>
1356 : # include <signal.h>
1357 : # include <pthread.h>
1358 :
1359 : /* These variables are used to cache ss_sp value for the primordial */
1360 : /* thread (it's better not to call thr_stksegment() twice for this */
1361 : /* thread - see JDK bug #4352906). */
1362 : static pthread_t stackbase_main_self = 0;
1363 : /* 0 means stackbase_main_ss_sp value is unset. */
1364 : static void *stackbase_main_ss_sp = NULL;
1365 :
1366 : GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
1367 : {
1368 : stack_t s;
1369 : pthread_t self = pthread_self();
1370 :
1371 : if (self == stackbase_main_self)
1372 : {
1373 : /* If the client calls GC_get_stack_base() from the main thread */
1374 : /* then just return the cached value. */
1375 : b -> mem_base = stackbase_main_ss_sp;
1376 : GC_ASSERT(b -> mem_base != NULL);
1377 : return GC_SUCCESS;
1378 : }
1379 :
1380 : if (thr_stksegment(&s)) {
1381 : /* According to the manual, the only failure error code returned */
1382 : /* is EAGAIN meaning "the information is not available due to the */
1383 : /* thread is not yet completely initialized or it is an internal */
1384 : /* thread" - this shouldn't happen here. */
1385 : ABORT("thr_stksegment failed");
1386 : }
1387 : /* s.ss_sp holds the pointer to the stack bottom. */
1388 : GC_ASSERT((word)GC_approx_sp() HOTTER_THAN (word)s.ss_sp);
1389 :
1390 : if (!stackbase_main_self && thr_main() != 0)
1391 : {
1392 : /* Cache the stack base value for the primordial thread (this */
1393 : /* is done during GC_init, so there is no race). */
1394 : stackbase_main_ss_sp = s.ss_sp;
1395 : stackbase_main_self = self;
1396 : }
1397 :
1398 : b -> mem_base = s.ss_sp;
1399 : return GC_SUCCESS;
1400 : }
1401 : # define HAVE_GET_STACK_BASE
1402 : #endif /* GC_SOLARIS_THREADS */
1403 :
1404 : #ifdef GC_RTEMS_PTHREADS
1405 : GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *sb)
1406 : {
1407 : sb->mem_base = rtems_get_stack_bottom();
1408 : return GC_SUCCESS;
1409 : }
1410 : # define HAVE_GET_STACK_BASE
1411 : #endif /* GC_RTEMS_PTHREADS */
1412 :
1413 : #ifndef HAVE_GET_STACK_BASE
1414 : # ifdef NEED_FIND_LIMIT
1415 : /* Retrieve stack base. */
1416 : /* Using the GC_find_limit version is risky. */
1417 : /* On IA64, for example, there is no guard page between the */
1418 : /* stack of one thread and the register backing store of the */
1419 : /* next. Thus this is likely to identify way too large a */
1420 : /* "stack" and thus at least result in disastrous performance. */
1421 : /* FIXME - Implement better strategies here. */
1422 : GC_API int GC_CALL GC_get_stack_base(struct GC_stack_base *b)
1423 : {
1424 : IF_CANCEL(int cancel_state;)
1425 : DCL_LOCK_STATE;
1426 :
1427 : LOCK();
1428 : DISABLE_CANCEL(cancel_state); /* May be unnecessary? */
1429 : # ifdef STACK_GROWS_DOWN
1430 : b -> mem_base = GC_find_limit(GC_approx_sp(), TRUE);
1431 : # ifdef IA64
1432 : b -> reg_base = GC_find_limit(GC_save_regs_in_stack(), FALSE);
1433 : # endif
1434 : # else
1435 : b -> mem_base = GC_find_limit(GC_approx_sp(), FALSE);
1436 : # endif
1437 : RESTORE_CANCEL(cancel_state);
1438 : UNLOCK();
1439 : return GC_SUCCESS;
1440 : }
1441 : # else
1442 : GC_API int GC_CALL GC_get_stack_base(
1443 : struct GC_stack_base *b GC_ATTR_UNUSED)
1444 : {
1445 : # if defined(GET_MAIN_STACKBASE_SPECIAL) && !defined(THREADS) \
1446 : && !defined(IA64)
1447 : b->mem_base = GC_get_main_stack_base();
1448 : return GC_SUCCESS;
1449 : # else
1450 : return GC_UNIMPLEMENTED;
1451 : # endif
1452 : }
1453 : # endif /* !NEED_FIND_LIMIT */
1454 : #endif /* !HAVE_GET_STACK_BASE */
1455 :
1456 : #ifndef GET_MAIN_STACKBASE_SPECIAL
1457 : /* This is always called from the main thread. Default implementation. */
1458 : ptr_t GC_get_main_stack_base(void)
1459 : {
1460 : struct GC_stack_base sb;
1461 :
1462 : if (GC_get_stack_base(&sb) != GC_SUCCESS)
1463 : ABORT("GC_get_stack_base failed");
1464 : GC_ASSERT((word)GC_approx_sp() HOTTER_THAN (word)sb.mem_base);
1465 : return (ptr_t)sb.mem_base;
1466 : }
1467 : #endif /* !GET_MAIN_STACKBASE_SPECIAL */
1468 :
1469 : /* Register static data segment(s) as roots. If more data segments are */
1470 : /* added later then they need to be registered at that point (as we do */
1471 : /* with SunOS dynamic loading), or GC_mark_roots needs to check for */
1472 : /* them (as we do with PCR). Called with allocator lock held. */
1473 : # ifdef OS2
1474 :
1475 : void GC_register_data_segments(void)
1476 : {
1477 : PTIB ptib;
1478 : PPIB ppib;
1479 : HMODULE module_handle;
1480 : # define PBUFSIZ 512
1481 : UCHAR path[PBUFSIZ];
1482 : FILE * myexefile;
1483 : struct exe_hdr hdrdos; /* MSDOS header. */
1484 : struct e32_exe hdr386; /* Real header for my executable */
1485 : struct o32_obj seg; /* Current segment */
1486 : int nsegs;
1487 :
1488 : if (DosGetInfoBlocks(&ptib, &ppib) != NO_ERROR) {
1489 : ABORT("DosGetInfoBlocks failed");
1490 : }
1491 : module_handle = ppib -> pib_hmte;
1492 : if (DosQueryModuleName(module_handle, PBUFSIZ, path) != NO_ERROR) {
1493 : ABORT("DosQueryModuleName failed");
1494 : }
1495 : myexefile = fopen(path, "rb");
1496 : if (myexefile == 0) {
1497 : ABORT_ARG1("Failed to open executable", ": %s", path);
1498 : }
1499 : if (fread((char *)(&hdrdos), 1, sizeof(hdrdos), myexefile)
1500 : < sizeof(hdrdos)) {
1501 : ABORT_ARG1("Could not read MSDOS header", " from: %s", path);
1502 : }
1503 : if (E_MAGIC(hdrdos) != EMAGIC) {
1504 : ABORT_ARG1("Bad DOS magic number", " in file: %s", path);
1505 : }
1506 : if (fseek(myexefile, E_LFANEW(hdrdos), SEEK_SET) != 0) {
1507 : ABORT_ARG1("Bad DOS magic number", " in file: %s", path);
1508 : }
1509 : if (fread((char *)(&hdr386), 1, sizeof(hdr386), myexefile)
1510 : < sizeof(hdr386)) {
1511 : ABORT_ARG1("Could not read OS/2 header", " from: %s", path);
1512 : }
1513 : if (E32_MAGIC1(hdr386) != E32MAGIC1 || E32_MAGIC2(hdr386) != E32MAGIC2) {
1514 : ABORT_ARG1("Bad OS/2 magic number", " in file: %s", path);
1515 : }
1516 : if (E32_BORDER(hdr386) != E32LEBO || E32_WORDER(hdr386) != E32LEWO) {
1517 : ABORT_ARG1("Bad byte order in executable", " file: %s", path);
1518 : }
1519 : if (E32_CPU(hdr386) == E32CPU286) {
1520 : ABORT_ARG1("GC cannot handle 80286 executables", ": %s", path);
1521 : }
1522 : if (fseek(myexefile, E_LFANEW(hdrdos) + E32_OBJTAB(hdr386),
1523 : SEEK_SET) != 0) {
1524 : ABORT_ARG1("Seek to object table failed", " in file: %s", path);
1525 : }
1526 : for (nsegs = E32_OBJCNT(hdr386); nsegs > 0; nsegs--) {
1527 : int flags;
1528 : if (fread((char *)(&seg), 1, sizeof(seg), myexefile) < sizeof(seg)) {
1529 : ABORT_ARG1("Could not read obj table entry", " from file: %s", path);
1530 : }
1531 : flags = O32_FLAGS(seg);
1532 : if (!(flags & OBJWRITE)) continue;
1533 : if (!(flags & OBJREAD)) continue;
1534 : if (flags & OBJINVALID) {
1535 : GC_err_printf("Object with invalid pages?\n");
1536 : continue;
1537 : }
1538 : GC_add_roots_inner((ptr_t)O32_BASE(seg),
1539 : (ptr_t)(O32_BASE(seg)+O32_SIZE(seg)), FALSE);
1540 : }
1541 : }
1542 :
1543 : # else /* !OS2 */
1544 :
1545 : # if defined(GWW_VDB)
1546 : # ifndef MEM_WRITE_WATCH
1547 : # define MEM_WRITE_WATCH 0x200000
1548 : # endif
1549 : # ifndef WRITE_WATCH_FLAG_RESET
1550 : # define WRITE_WATCH_FLAG_RESET 1
1551 : # endif
1552 :
1553 : /* Since we can't easily check whether ULONG_PTR and SIZE_T are */
1554 : /* defined in Win32 basetsd.h, we define own ULONG_PTR. */
1555 : # define GC_ULONG_PTR word
1556 :
1557 : typedef UINT (WINAPI * GetWriteWatch_type)(
1558 : DWORD, PVOID, GC_ULONG_PTR /* SIZE_T */,
1559 : PVOID *, GC_ULONG_PTR *, PULONG);
1560 : static GetWriteWatch_type GetWriteWatch_func;
1561 : static DWORD GetWriteWatch_alloc_flag;
1562 :
1563 : # define GC_GWW_AVAILABLE() (GetWriteWatch_func != NULL)
1564 :
1565 : static void detect_GetWriteWatch(void)
1566 : {
1567 : static GC_bool done;
1568 : HMODULE hK32;
1569 : if (done)
1570 : return;
1571 :
1572 : # if defined(MPROTECT_VDB)
1573 : {
1574 : char * str = GETENV("GC_USE_GETWRITEWATCH");
1575 : # if defined(GC_PREFER_MPROTECT_VDB)
1576 : if (str == NULL || (*str == '0' && *(str + 1) == '\0')) {
1577 : /* GC_USE_GETWRITEWATCH is unset or set to "0". */
1578 : done = TRUE; /* falling back to MPROTECT_VDB strategy. */
1579 : /* This should work as if GWW_VDB is undefined. */
1580 : return;
1581 : }
1582 : # else
1583 : if (str != NULL && *str == '0' && *(str + 1) == '\0') {
1584 : /* GC_USE_GETWRITEWATCH is set "0". */
1585 : done = TRUE; /* falling back to MPROTECT_VDB strategy. */
1586 : return;
1587 : }
1588 : # endif
1589 : }
1590 : # endif
1591 :
1592 : hK32 = GetModuleHandle(TEXT("kernel32.dll"));
1593 : if (hK32 != (HMODULE)0 &&
1594 : (GetWriteWatch_func = (GetWriteWatch_type)GetProcAddress(hK32,
1595 : "GetWriteWatch")) != NULL) {
1596 : /* Also check whether VirtualAlloc accepts MEM_WRITE_WATCH, */
1597 : /* as some versions of kernel32.dll have one but not the */
1598 : /* other, making the feature completely broken. */
1599 : void * page = VirtualAlloc(NULL, GC_page_size,
1600 : MEM_WRITE_WATCH | MEM_RESERVE,
1601 : PAGE_READWRITE);
1602 : if (page != NULL) {
1603 : PVOID pages[16];
1604 : GC_ULONG_PTR count = 16;
1605 : DWORD page_size;
1606 : /* Check that it actually works. In spite of some */
1607 : /* documentation it actually seems to exist on W2K. */
1608 : /* This test may be unnecessary, but ... */
1609 : if (GetWriteWatch_func(WRITE_WATCH_FLAG_RESET,
1610 : page, GC_page_size,
1611 : pages,
1612 : &count,
1613 : &page_size) != 0) {
1614 : /* GetWriteWatch always fails. */
1615 : GetWriteWatch_func = NULL;
1616 : } else {
1617 : GetWriteWatch_alloc_flag = MEM_WRITE_WATCH;
1618 : }
1619 : VirtualFree(page, 0 /* dwSize */, MEM_RELEASE);
1620 : } else {
1621 : /* GetWriteWatch will be useless. */
1622 : GetWriteWatch_func = NULL;
1623 : }
1624 : }
1625 : # ifndef SMALL_CONFIG
1626 : if (GetWriteWatch_func == NULL) {
1627 : GC_COND_LOG_PRINTF("Did not find a usable GetWriteWatch()\n");
1628 : } else {
1629 : GC_COND_LOG_PRINTF("Using GetWriteWatch()\n");
1630 : }
1631 : # endif
1632 : done = TRUE;
1633 : }
1634 :
1635 : # else
1636 : # define GetWriteWatch_alloc_flag 0
1637 : # endif /* !GWW_VDB */
1638 :
1639 : # if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
1640 :
1641 : # ifdef MSWIN32
1642 : /* Unfortunately, we have to handle win32s very differently from NT, */
1643 : /* Since VirtualQuery has very different semantics. In particular, */
1644 : /* under win32s a VirtualQuery call on an unmapped page returns an */
1645 : /* invalid result. Under NT, GC_register_data_segments is a no-op */
1646 : /* and all real work is done by GC_register_dynamic_libraries. Under */
1647 : /* win32s, we cannot find the data segments associated with dll's. */
1648 : /* We register the main data segment here. */
1649 : GC_INNER GC_bool GC_no_win32_dlls = FALSE;
1650 : /* This used to be set for gcc, to avoid dealing with */
1651 : /* the structured exception handling issues. But we now have */
1652 : /* assembly code to do that right. */
1653 :
1654 : GC_INNER GC_bool GC_wnt = FALSE;
1655 : /* This is a Windows NT derivative, i.e. NT, W2K, XP or later. */
1656 :
1657 : GC_INNER void GC_init_win32(void)
1658 : {
1659 : /* Set GC_wnt. If we're running under win32s, assume that no DLLs */
1660 : /* will be loaded. I doubt anyone still runs win32s, but... */
1661 : DWORD v = GetVersion();
1662 : GC_wnt = !(v & 0x80000000);
1663 : GC_no_win32_dlls |= ((!GC_wnt) && (v & 0xff) <= 3);
1664 : # ifdef USE_MUNMAP
1665 : if (GC_no_win32_dlls) {
1666 : /* Turn off unmapping for safety (since may not work well with */
1667 : /* GlobalAlloc). */
1668 : GC_unmap_threshold = 0;
1669 : }
1670 : # endif
1671 : }
1672 :
1673 : /* Return the smallest address a such that VirtualQuery */
1674 : /* returns correct results for all addresses between a and start. */
1675 : /* Assumes VirtualQuery returns correct information for start. */
1676 : STATIC ptr_t GC_least_described_address(ptr_t start)
1677 : {
1678 : MEMORY_BASIC_INFORMATION buf;
1679 : size_t result;
1680 : LPVOID limit;
1681 : ptr_t p;
1682 : LPVOID q;
1683 :
1684 : limit = GC_sysinfo.lpMinimumApplicationAddress;
1685 : p = (ptr_t)((word)start & ~(GC_page_size - 1));
1686 : for (;;) {
1687 : q = (LPVOID)(p - GC_page_size);
1688 : if ((word)q > (word)p /* underflow */ || (word)q < (word)limit) break;
1689 : result = VirtualQuery(q, &buf, sizeof(buf));
1690 : if (result != sizeof(buf) || buf.AllocationBase == 0) break;
1691 : p = (ptr_t)(buf.AllocationBase);
1692 : }
1693 : return p;
1694 : }
1695 : # endif /* MSWIN32 */
1696 :
1697 : # ifndef REDIRECT_MALLOC
1698 : /* We maintain a linked list of AllocationBase values that we know */
1699 : /* correspond to malloc heap sections. Currently this is only called */
1700 : /* during a GC. But there is some hope that for long running */
1701 : /* programs we will eventually see most heap sections. */
1702 :
1703 : /* In the long run, it would be more reliable to occasionally walk */
1704 : /* the malloc heap with HeapWalk on the default heap. But that */
1705 : /* apparently works only for NT-based Windows. */
1706 :
1707 : STATIC size_t GC_max_root_size = 100000; /* Appr. largest root size. */
1708 :
1709 : # ifdef USE_WINALLOC
1710 : /* In the long run, a better data structure would also be nice ... */
1711 : STATIC struct GC_malloc_heap_list {
1712 : void * allocation_base;
1713 : struct GC_malloc_heap_list *next;
1714 : } *GC_malloc_heap_l = 0;
1715 :
1716 : /* Is p the base of one of the malloc heap sections we already know */
1717 : /* about? */
1718 : STATIC GC_bool GC_is_malloc_heap_base(ptr_t p)
1719 : {
1720 : struct GC_malloc_heap_list *q = GC_malloc_heap_l;
1721 :
1722 : while (0 != q) {
1723 : if (q -> allocation_base == p) return TRUE;
1724 : q = q -> next;
1725 : }
1726 : return FALSE;
1727 : }
1728 :
1729 : STATIC void *GC_get_allocation_base(void *p)
1730 : {
1731 : MEMORY_BASIC_INFORMATION buf;
1732 : size_t result = VirtualQuery(p, &buf, sizeof(buf));
1733 : if (result != sizeof(buf)) {
1734 : ABORT("Weird VirtualQuery result");
1735 : }
1736 : return buf.AllocationBase;
1737 : }
1738 :
1739 : GC_INNER void GC_add_current_malloc_heap(void)
1740 : {
1741 : struct GC_malloc_heap_list *new_l =
1742 : malloc(sizeof(struct GC_malloc_heap_list));
1743 : void * candidate = GC_get_allocation_base(new_l);
1744 :
1745 : if (new_l == 0) return;
1746 : if (GC_is_malloc_heap_base(candidate)) {
1747 : /* Try a little harder to find malloc heap. */
1748 : size_t req_size = 10000;
1749 : do {
1750 : void *p = malloc(req_size);
1751 : if (0 == p) {
1752 : free(new_l);
1753 : return;
1754 : }
1755 : candidate = GC_get_allocation_base(p);
1756 : free(p);
1757 : req_size *= 2;
1758 : } while (GC_is_malloc_heap_base(candidate)
1759 : && req_size < GC_max_root_size/10 && req_size < 500000);
1760 : if (GC_is_malloc_heap_base(candidate)) {
1761 : free(new_l);
1762 : return;
1763 : }
1764 : }
1765 : GC_COND_LOG_PRINTF("Found new system malloc AllocationBase at %p\n",
1766 : candidate);
1767 : new_l -> allocation_base = candidate;
1768 : new_l -> next = GC_malloc_heap_l;
1769 : GC_malloc_heap_l = new_l;
1770 : }
1771 : # endif /* USE_WINALLOC */
1772 :
1773 : # endif /* !REDIRECT_MALLOC */
1774 :
1775 : STATIC word GC_n_heap_bases = 0; /* See GC_heap_bases. */
1776 :
1777 : /* Is p the start of either the malloc heap, or of one of our */
1778 : /* heap sections? */
1779 : GC_INNER GC_bool GC_is_heap_base(ptr_t p)
1780 : {
1781 : unsigned i;
1782 : # ifndef REDIRECT_MALLOC
1783 : if (GC_root_size > GC_max_root_size) GC_max_root_size = GC_root_size;
1784 : # ifdef USE_WINALLOC
1785 : if (GC_is_malloc_heap_base(p)) return TRUE;
1786 : # endif
1787 : # endif
1788 : for (i = 0; i < GC_n_heap_bases; i++) {
1789 : if (GC_heap_bases[i] == p) return TRUE;
1790 : }
1791 : return FALSE;
1792 : }
1793 :
1794 : #ifdef MSWIN32
1795 : STATIC void GC_register_root_section(ptr_t static_root)
1796 : {
1797 : MEMORY_BASIC_INFORMATION buf;
1798 : size_t result;
1799 : DWORD protect;
1800 : LPVOID p;
1801 : char * base;
1802 : char * limit, * new_limit;
1803 :
1804 : if (!GC_no_win32_dlls) return;
1805 : p = base = limit = GC_least_described_address(static_root);
1806 : while ((word)p < (word)GC_sysinfo.lpMaximumApplicationAddress) {
1807 : result = VirtualQuery(p, &buf, sizeof(buf));
1808 : if (result != sizeof(buf) || buf.AllocationBase == 0
1809 : || GC_is_heap_base(buf.AllocationBase)) break;
1810 : new_limit = (char *)p + buf.RegionSize;
1811 : protect = buf.Protect;
1812 : if (buf.State == MEM_COMMIT
1813 : && is_writable(protect)) {
1814 : if ((char *)p == limit) {
1815 : limit = new_limit;
1816 : } else {
1817 : if (base != limit) GC_add_roots_inner(base, limit, FALSE);
1818 : base = p;
1819 : limit = new_limit;
1820 : }
1821 : }
1822 : if ((word)p > (word)new_limit /* overflow */) break;
1823 : p = (LPVOID)new_limit;
1824 : }
1825 : if (base != limit) GC_add_roots_inner(base, limit, FALSE);
1826 : }
1827 : #endif /* MSWIN32 */
1828 :
1829 : void GC_register_data_segments(void)
1830 : {
1831 : # ifdef MSWIN32
1832 : GC_register_root_section((ptr_t)&GC_pages_executable);
1833 : /* any other GC global variable would fit too. */
1834 : # endif
1835 : }
1836 :
1837 : # else /* !OS2 && !Windows */
1838 :
1839 : # if (defined(SVR4) || defined(AUX) || defined(DGUX) \
1840 : || (defined(LINUX) && defined(SPARC))) && !defined(PCR)
1841 : ptr_t GC_SysVGetDataStart(size_t max_page_size, ptr_t etext_addr)
1842 : {
1843 : word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1844 : & ~(sizeof(word) - 1);
1845 : /* etext rounded to word boundary */
1846 : word next_page = ((text_end + (word)max_page_size - 1)
1847 : & ~((word)max_page_size - 1));
1848 : word page_offset = (text_end & ((word)max_page_size - 1));
1849 : char * volatile result = (char *)(next_page + page_offset);
1850 : /* Note that this isn't equivalent to just adding */
1851 : /* max_page_size to &etext if &etext is at a page boundary */
1852 :
1853 : GC_setup_temporary_fault_handler();
1854 : if (SETJMP(GC_jmp_buf) == 0) {
1855 : /* Try writing to the address. */
1856 : *result = *result;
1857 : GC_reset_fault_handler();
1858 : } else {
1859 : GC_reset_fault_handler();
1860 : /* We got here via a longjmp. The address is not readable. */
1861 : /* This is known to happen under Solaris 2.4 + gcc, which place */
1862 : /* string constants in the text segment, but after etext. */
1863 : /* Use plan B. Note that we now know there is a gap between */
1864 : /* text and data segments, so plan A bought us something. */
1865 : result = (char *)GC_find_limit((ptr_t)(DATAEND), FALSE);
1866 : }
1867 : return((ptr_t)result);
1868 : }
1869 : # endif
1870 :
1871 : # if defined(FREEBSD) && !defined(PCR) && (defined(I386) || defined(X86_64) \
1872 : || defined(powerpc) || defined(__powerpc__))
1873 :
1874 : /* Its unclear whether this should be identical to the above, or */
1875 : /* whether it should apply to non-X86 architectures. */
1876 : /* For now we don't assume that there is always an empty page after */
1877 : /* etext. But in some cases there actually seems to be slightly more. */
1878 : /* This also deals with holes between read-only data and writable data. */
1879 : ptr_t GC_FreeBSDGetDataStart(size_t max_page_size, ptr_t etext_addr)
1880 : {
1881 : word text_end = ((word)(etext_addr) + sizeof(word) - 1)
1882 : & ~(sizeof(word) - 1);
1883 : /* etext rounded to word boundary */
1884 : volatile word next_page = (text_end + (word)max_page_size - 1)
1885 : & ~((word)max_page_size - 1);
1886 : volatile ptr_t result = (ptr_t)text_end;
1887 : GC_setup_temporary_fault_handler();
1888 : if (SETJMP(GC_jmp_buf) == 0) {
1889 : /* Try reading at the address. */
1890 : /* This should happen before there is another thread. */
1891 : for (; next_page < (word)(DATAEND); next_page += (word)max_page_size)
1892 : *(volatile char *)next_page;
1893 : GC_reset_fault_handler();
1894 : } else {
1895 : GC_reset_fault_handler();
1896 : /* As above, we go to plan B */
1897 : result = GC_find_limit((ptr_t)(DATAEND), FALSE);
1898 : }
1899 : return(result);
1900 : }
1901 :
1902 : # endif /* FREEBSD */
1903 :
1904 :
1905 : #ifdef AMIGA
1906 :
1907 : # define GC_AMIGA_DS
1908 : # include "extra/AmigaOS.c"
1909 : # undef GC_AMIGA_DS
1910 :
1911 : #elif defined(OPENBSD)
1912 :
1913 : /* Depending on arch alignment, there can be multiple holes */
1914 : /* between DATASTART and DATAEND. Scan in DATASTART .. DATAEND */
1915 : /* and register each region. */
1916 : void GC_register_data_segments(void)
1917 : {
1918 : ptr_t region_start = DATASTART;
1919 : ptr_t region_end;
1920 :
1921 : for (;;) {
1922 : region_end = GC_find_limit_openbsd(region_start, DATAEND);
1923 : GC_add_roots_inner(region_start, region_end, FALSE);
1924 : if ((word)region_end >= (word)(DATAEND))
1925 : break;
1926 : region_start = GC_skip_hole_openbsd(region_end, DATAEND);
1927 : }
1928 : }
1929 :
1930 : # else /* !OS2 && !Windows && !AMIGA && !OPENBSD */
1931 :
1932 0 : void GC_register_data_segments(void)
1933 : {
1934 : # if !defined(PCR) && !defined(MACOS)
1935 : # if defined(REDIRECT_MALLOC) && defined(GC_SOLARIS_THREADS)
1936 : /* As of Solaris 2.3, the Solaris threads implementation */
1937 : /* allocates the data structure for the initial thread with */
1938 : /* sbrk at process startup. It needs to be scanned, so that */
1939 : /* we don't lose some malloc allocated data structures */
1940 : /* hanging from it. We're on thin ice here ... */
1941 : extern caddr_t sbrk(int);
1942 :
1943 : GC_ASSERT(DATASTART);
1944 : {
1945 : ptr_t p = (ptr_t)sbrk(0);
1946 : if ((word)(DATASTART) < (word)p)
1947 : GC_add_roots_inner(DATASTART, p, FALSE);
1948 : }
1949 : # else
1950 : GC_ASSERT(DATASTART);
1951 0 : GC_add_roots_inner(DATASTART, (ptr_t)(DATAEND), FALSE);
1952 : # if defined(DATASTART2)
1953 : GC_add_roots_inner(DATASTART2, (ptr_t)(DATAEND2), FALSE);
1954 : # endif
1955 : # endif
1956 : # endif
1957 : # if defined(MACOS)
1958 : {
1959 : # if defined(THINK_C)
1960 : extern void* GC_MacGetDataStart(void);
1961 : /* globals begin above stack and end at a5. */
1962 : GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1963 : (ptr_t)LMGetCurrentA5(), FALSE);
1964 : # else
1965 : # if defined(__MWERKS__)
1966 : # if !__POWERPC__
1967 : extern void* GC_MacGetDataStart(void);
1968 : /* MATTHEW: Function to handle Far Globals (CW Pro 3) */
1969 : # if __option(far_data)
1970 : extern void* GC_MacGetDataEnd(void);
1971 : # endif
1972 : /* globals begin above stack and end at a5. */
1973 : GC_add_roots_inner((ptr_t)GC_MacGetDataStart(),
1974 : (ptr_t)LMGetCurrentA5(), FALSE);
1975 : /* MATTHEW: Handle Far Globals */
1976 : # if __option(far_data)
1977 : /* Far globals follow he QD globals: */
1978 : GC_add_roots_inner((ptr_t)LMGetCurrentA5(),
1979 : (ptr_t)GC_MacGetDataEnd(), FALSE);
1980 : # endif
1981 : # else
1982 : extern char __data_start__[], __data_end__[];
1983 : GC_add_roots_inner((ptr_t)&__data_start__,
1984 : (ptr_t)&__data_end__, FALSE);
1985 : # endif /* __POWERPC__ */
1986 : # endif /* __MWERKS__ */
1987 : # endif /* !THINK_C */
1988 : }
1989 : # endif /* MACOS */
1990 :
1991 : /* Dynamic libraries are added at every collection, since they may */
1992 : /* change. */
1993 0 : }
1994 :
1995 : # endif /* !AMIGA */
1996 : # endif /* !MSWIN32 && !MSWINCE */
1997 : # endif /* !OS2 */
1998 :
1999 : /*
2000 : * Auxiliary routines for obtaining memory from OS.
2001 : */
2002 :
2003 : # if !defined(OS2) && !defined(PCR) && !defined(AMIGA) \
2004 : && !defined(USE_WINALLOC) && !defined(MACOS) && !defined(DOS4GW) \
2005 : && !defined(NONSTOP) && !defined(SN_TARGET_PS3) && !defined(RTEMS) \
2006 : && !defined(__CC_ARM)
2007 :
2008 : # define SBRK_ARG_T ptrdiff_t
2009 :
2010 : #if defined(MMAP_SUPPORTED)
2011 :
2012 : #ifdef USE_MMAP_FIXED
2013 : # define GC_MMAP_FLAGS MAP_FIXED | MAP_PRIVATE
2014 : /* Seems to yield better performance on Solaris 2, but can */
2015 : /* be unreliable if something is already mapped at the address. */
2016 : #else
2017 : # define GC_MMAP_FLAGS MAP_PRIVATE
2018 : #endif
2019 :
2020 : #ifdef USE_MMAP_ANON
2021 : # define zero_fd -1
2022 : # if defined(MAP_ANONYMOUS)
2023 : # define OPT_MAP_ANON MAP_ANONYMOUS
2024 : # else
2025 : # define OPT_MAP_ANON MAP_ANON
2026 : # endif
2027 : #else
2028 : static int zero_fd;
2029 : # define OPT_MAP_ANON 0
2030 : #endif
2031 :
2032 : #ifndef HEAP_START
2033 : # define HEAP_START ((ptr_t)0)
2034 : #endif
2035 :
2036 : #ifdef SYMBIAN
2037 : extern char* GC_get_private_path_and_zero_file(void);
2038 : #endif
2039 :
2040 0 : STATIC ptr_t GC_unix_mmap_get_mem(word bytes)
2041 : {
2042 : void *result;
2043 : static ptr_t last_addr = HEAP_START;
2044 :
2045 : # ifndef USE_MMAP_ANON
2046 : static GC_bool initialized = FALSE;
2047 :
2048 0 : if (!EXPECT(initialized, TRUE)) {
2049 : # ifdef SYMBIAN
2050 : char* path = GC_get_private_path_and_zero_file();
2051 : zero_fd = open(path, O_RDWR | O_CREAT, 0666);
2052 : free(path);
2053 : # else
2054 0 : zero_fd = open("/dev/zero", O_RDONLY);
2055 : # endif
2056 0 : if (zero_fd == -1)
2057 0 : ABORT("Could not open /dev/zero");
2058 :
2059 0 : fcntl(zero_fd, F_SETFD, FD_CLOEXEC);
2060 0 : initialized = TRUE;
2061 : }
2062 : # endif
2063 :
2064 0 : if (bytes & (GC_page_size - 1)) ABORT("Bad GET_MEM arg");
2065 0 : result = mmap(last_addr, bytes, (PROT_READ | PROT_WRITE)
2066 0 : | (GC_pages_executable ? PROT_EXEC : 0),
2067 : GC_MMAP_FLAGS | OPT_MAP_ANON, zero_fd, 0/* offset */);
2068 : # undef IGNORE_PAGES_EXECUTABLE
2069 :
2070 0 : if (result == MAP_FAILED) return(0);
2071 0 : last_addr = (ptr_t)result + bytes + GC_page_size - 1;
2072 0 : last_addr = (ptr_t)((word)last_addr & ~(GC_page_size - 1));
2073 : # if !defined(LINUX)
2074 : if (last_addr == 0) {
2075 : /* Oops. We got the end of the address space. This isn't */
2076 : /* usable by arbitrary C code, since one-past-end pointers */
2077 : /* don't work, so we discard it and try again. */
2078 : munmap(result, (size_t)(-GC_page_size) - (size_t)result);
2079 : /* Leave last page mapped, so we can't repeat. */
2080 : return GC_unix_mmap_get_mem(bytes);
2081 : }
2082 : # else
2083 : GC_ASSERT(last_addr != 0);
2084 : # endif
2085 0 : if (((word)result % HBLKSIZE) != 0)
2086 0 : ABORT(
2087 : "GC_unix_get_mem: Memory returned by mmap is not aligned to HBLKSIZE.");
2088 0 : return((ptr_t)result);
2089 : }
2090 :
2091 : # endif /* MMAP_SUPPORTED */
2092 :
2093 : #if defined(USE_MMAP)
2094 : ptr_t GC_unix_get_mem(word bytes)
2095 : {
2096 : return GC_unix_mmap_get_mem(bytes);
2097 : }
2098 : #else /* !USE_MMAP */
2099 :
2100 1284 : STATIC ptr_t GC_unix_sbrk_get_mem(word bytes)
2101 : {
2102 : ptr_t result;
2103 : # ifdef IRIX5
2104 : /* Bare sbrk isn't thread safe. Play by malloc rules. */
2105 : /* The equivalent may be needed on other systems as well. */
2106 : __LOCK_MALLOC();
2107 : # endif
2108 : {
2109 1284 : ptr_t cur_brk = (ptr_t)sbrk(0);
2110 1284 : SBRK_ARG_T lsbs = (word)cur_brk & (GC_page_size-1);
2111 :
2112 1284 : if ((SBRK_ARG_T)bytes < 0) {
2113 0 : result = 0; /* too big */
2114 0 : goto out;
2115 : }
2116 1284 : if (lsbs != 0) {
2117 0 : if((ptr_t)sbrk(GC_page_size - lsbs) == (ptr_t)(-1)) {
2118 0 : result = 0;
2119 0 : goto out;
2120 : }
2121 : }
2122 : # ifdef ADD_HEAP_GUARD_PAGES
2123 : /* This is useful for catching severe memory overwrite problems that */
2124 : /* span heap sections. It shouldn't otherwise be turned on. */
2125 : {
2126 : ptr_t guard = (ptr_t)sbrk((SBRK_ARG_T)GC_page_size);
2127 : if (mprotect(guard, GC_page_size, PROT_NONE) != 0)
2128 : ABORT("ADD_HEAP_GUARD_PAGES: mprotect failed");
2129 : }
2130 : # endif /* ADD_HEAP_GUARD_PAGES */
2131 1284 : result = (ptr_t)sbrk((SBRK_ARG_T)bytes);
2132 1284 : if (result == (ptr_t)(-1)) result = 0;
2133 : }
2134 : out:
2135 : # ifdef IRIX5
2136 : __UNLOCK_MALLOC();
2137 : # endif
2138 1284 : return(result);
2139 : }
2140 :
2141 1284 : ptr_t GC_unix_get_mem(word bytes)
2142 : {
2143 : # if defined(MMAP_SUPPORTED)
2144 : /* By default, we try both sbrk and mmap, in that order. */
2145 : static GC_bool sbrk_failed = FALSE;
2146 1284 : ptr_t result = 0;
2147 :
2148 1284 : if (!sbrk_failed) result = GC_unix_sbrk_get_mem(bytes);
2149 1284 : if (0 == result) {
2150 0 : sbrk_failed = TRUE;
2151 0 : result = GC_unix_mmap_get_mem(bytes);
2152 : }
2153 1284 : if (0 == result) {
2154 : /* Try sbrk again, in case sbrk memory became available. */
2155 0 : result = GC_unix_sbrk_get_mem(bytes);
2156 : }
2157 1284 : return result;
2158 : # else /* !MMAP_SUPPORTED */
2159 : return GC_unix_sbrk_get_mem(bytes);
2160 : # endif
2161 : }
2162 :
2163 : #endif /* !USE_MMAP */
2164 :
2165 : # endif /* UN*X */
2166 :
2167 : # ifdef OS2
2168 :
2169 : void * os2_alloc(size_t bytes)
2170 : {
2171 : void * result;
2172 :
2173 : if (DosAllocMem(&result, bytes, (PAG_READ | PAG_WRITE | PAG_COMMIT)
2174 : | (GC_pages_executable ? PAG_EXECUTE : 0))
2175 : != NO_ERROR) {
2176 : return(0);
2177 : }
2178 : /* FIXME: What's the purpose of this recursion? (Probably, if */
2179 : /* DosAllocMem returns memory at 0 address then just retry once.) */
2180 : if (result == 0) return(os2_alloc(bytes));
2181 : return(result);
2182 : }
2183 :
2184 : # endif /* OS2 */
2185 :
2186 : #ifdef MSWINCE
2187 : ptr_t GC_wince_get_mem(word bytes)
2188 : {
2189 : ptr_t result = 0; /* initialized to prevent warning. */
2190 : word i;
2191 :
2192 : /* Round up allocation size to multiple of page size */
2193 : bytes = (bytes + GC_page_size-1) & ~(GC_page_size-1);
2194 :
2195 : /* Try to find reserved, uncommitted pages */
2196 : for (i = 0; i < GC_n_heap_bases; i++) {
2197 : if (((word)(-(signed_word)GC_heap_lengths[i])
2198 : & (GC_sysinfo.dwAllocationGranularity-1))
2199 : >= bytes) {
2200 : result = GC_heap_bases[i] + GC_heap_lengths[i];
2201 : break;
2202 : }
2203 : }
2204 :
2205 : if (i == GC_n_heap_bases) {
2206 : /* Reserve more pages */
2207 : word res_bytes = (bytes + GC_sysinfo.dwAllocationGranularity-1)
2208 : & ~(GC_sysinfo.dwAllocationGranularity-1);
2209 : /* If we ever support MPROTECT_VDB here, we will probably need to */
2210 : /* ensure that res_bytes is strictly > bytes, so that VirtualProtect */
2211 : /* never spans regions. It seems to be OK for a VirtualFree */
2212 : /* argument to span regions, so we should be OK for now. */
2213 : result = (ptr_t) VirtualAlloc(NULL, res_bytes,
2214 : MEM_RESERVE | MEM_TOP_DOWN,
2215 : GC_pages_executable ? PAGE_EXECUTE_READWRITE :
2216 : PAGE_READWRITE);
2217 : if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
2218 : /* If I read the documentation correctly, this can */
2219 : /* only happen if HBLKSIZE > 64k or not a power of 2. */
2220 : if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
2221 : if (result == NULL) return NULL;
2222 : GC_heap_bases[GC_n_heap_bases] = result;
2223 : GC_heap_lengths[GC_n_heap_bases] = 0;
2224 : GC_n_heap_bases++;
2225 : }
2226 :
2227 : /* Commit pages */
2228 : result = (ptr_t) VirtualAlloc(result, bytes, MEM_COMMIT,
2229 : GC_pages_executable ? PAGE_EXECUTE_READWRITE :
2230 : PAGE_READWRITE);
2231 : # undef IGNORE_PAGES_EXECUTABLE
2232 :
2233 : if (result != NULL) {
2234 : if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
2235 : GC_heap_lengths[i] += bytes;
2236 : }
2237 :
2238 : return(result);
2239 : }
2240 :
2241 : #elif defined(USE_WINALLOC) || defined(CYGWIN32)
2242 :
2243 : # ifdef USE_GLOBAL_ALLOC
2244 : # define GLOBAL_ALLOC_TEST 1
2245 : # else
2246 : # define GLOBAL_ALLOC_TEST GC_no_win32_dlls
2247 : # endif
2248 :
2249 : # if defined(GC_USE_MEM_TOP_DOWN) && defined(USE_WINALLOC)
2250 : DWORD GC_mem_top_down = MEM_TOP_DOWN;
2251 : /* Use GC_USE_MEM_TOP_DOWN for better 64-bit */
2252 : /* testing. Otherwise all addresses tend to */
2253 : /* end up in first 4GB, hiding bugs. */
2254 : # else
2255 : # define GC_mem_top_down 0
2256 : # endif /* !GC_USE_MEM_TOP_DOWN */
2257 :
2258 : ptr_t GC_win32_get_mem(word bytes)
2259 : {
2260 : ptr_t result;
2261 :
2262 : # ifndef USE_WINALLOC
2263 : result = GC_unix_get_mem(bytes);
2264 : # else
2265 : # ifdef MSWIN32
2266 : if (GLOBAL_ALLOC_TEST) {
2267 : /* VirtualAlloc doesn't like PAGE_EXECUTE_READWRITE. */
2268 : /* There are also unconfirmed rumors of other */
2269 : /* problems, so we dodge the issue. */
2270 : result = (ptr_t) GlobalAlloc(0, bytes + HBLKSIZE);
2271 : result = (ptr_t)(((word)result + HBLKSIZE - 1) & ~(HBLKSIZE-1));
2272 : } else
2273 : # endif
2274 : /* else */ {
2275 : /* VirtualProtect only works on regions returned by a */
2276 : /* single VirtualAlloc call. Thus we allocate one */
2277 : /* extra page, which will prevent merging of blocks */
2278 : /* in separate regions, and eliminate any temptation */
2279 : /* to call VirtualProtect on a range spanning regions. */
2280 : /* This wastes a small amount of memory, and risks */
2281 : /* increased fragmentation. But better alternatives */
2282 : /* would require effort. */
2283 : # ifdef MPROTECT_VDB
2284 : /* We can't check for GC_incremental here (because */
2285 : /* GC_enable_incremental() might be called some time */
2286 : /* later after the GC initialization). */
2287 : # ifdef GWW_VDB
2288 : # define VIRTUAL_ALLOC_PAD (GC_GWW_AVAILABLE() ? 0 : 1)
2289 : # else
2290 : # define VIRTUAL_ALLOC_PAD 1
2291 : # endif
2292 : # else
2293 : # define VIRTUAL_ALLOC_PAD 0
2294 : # endif
2295 : /* Pass the MEM_WRITE_WATCH only if GetWriteWatch-based */
2296 : /* VDBs are enabled and the GetWriteWatch function is */
2297 : /* available. Otherwise we waste resources or possibly */
2298 : /* cause VirtualAlloc to fail (observed in Windows 2000 */
2299 : /* SP2). */
2300 : result = (ptr_t) VirtualAlloc(NULL, bytes + VIRTUAL_ALLOC_PAD,
2301 : GetWriteWatch_alloc_flag
2302 : | (MEM_COMMIT | MEM_RESERVE)
2303 : | GC_mem_top_down,
2304 : GC_pages_executable ? PAGE_EXECUTE_READWRITE :
2305 : PAGE_READWRITE);
2306 : # undef IGNORE_PAGES_EXECUTABLE
2307 : }
2308 : # endif /* USE_WINALLOC */
2309 : if (HBLKDISPL(result) != 0) ABORT("Bad VirtualAlloc result");
2310 : /* If I read the documentation correctly, this can */
2311 : /* only happen if HBLKSIZE > 64k or not a power of 2. */
2312 : if (GC_n_heap_bases >= MAX_HEAP_SECTS) ABORT("Too many heap sections");
2313 : if (0 != result) GC_heap_bases[GC_n_heap_bases++] = result;
2314 : return(result);
2315 : }
2316 :
2317 : GC_API void GC_CALL GC_win32_free_heap(void)
2318 : {
2319 : # ifndef CYGWIN32
2320 : if (GLOBAL_ALLOC_TEST)
2321 : # endif
2322 : {
2323 : while (GC_n_heap_bases-- > 0) {
2324 : # ifdef CYGWIN32
2325 : /* FIXME: Is it OK to use non-GC free() here? */
2326 : # else
2327 : GlobalFree(GC_heap_bases[GC_n_heap_bases]);
2328 : # endif
2329 : GC_heap_bases[GC_n_heap_bases] = 0;
2330 : }
2331 : } /* else */
2332 : # ifndef CYGWIN32
2333 : else {
2334 : /* Avoiding VirtualAlloc leak. */
2335 : while (GC_n_heap_bases > 0) {
2336 : VirtualFree(GC_heap_bases[--GC_n_heap_bases], 0, MEM_RELEASE);
2337 : GC_heap_bases[GC_n_heap_bases] = 0;
2338 : }
2339 : }
2340 : # endif
2341 : }
2342 : #endif /* USE_WINALLOC || CYGWIN32 */
2343 :
2344 : #ifdef AMIGA
2345 : # define GC_AMIGA_AM
2346 : # include "extra/AmigaOS.c"
2347 : # undef GC_AMIGA_AM
2348 : #endif
2349 :
2350 : #ifdef USE_MUNMAP
2351 :
2352 : /* For now, this only works on Win32/WinCE and some Unix-like */
2353 : /* systems. If you have something else, don't define */
2354 : /* USE_MUNMAP. */
2355 :
2356 : #if !defined(MSWIN32) && !defined(MSWINCE)
2357 : # include <unistd.h>
2358 : # include <sys/mman.h>
2359 : # include <sys/stat.h>
2360 : # include <sys/types.h>
2361 : #endif
2362 :
2363 : /* Compute a page aligned starting address for the unmap */
2364 : /* operation on a block of size bytes starting at start. */
2365 : /* Return 0 if the block is too small to make this feasible. */
2366 : STATIC ptr_t GC_unmap_start(ptr_t start, size_t bytes)
2367 : {
2368 : ptr_t result;
2369 : /* Round start to next page boundary. */
2370 : result = (ptr_t)((word)(start + GC_page_size - 1) & ~(GC_page_size - 1));
2371 : if ((word)(result + GC_page_size) > (word)(start + bytes)) return 0;
2372 : return result;
2373 : }
2374 :
2375 : /* Compute end address for an unmap operation on the indicated */
2376 : /* block. */
2377 : STATIC ptr_t GC_unmap_end(ptr_t start, size_t bytes)
2378 : {
2379 : return (ptr_t)((word)(start + bytes) & ~(GC_page_size - 1));
2380 : }
2381 :
2382 : /* Under Win32/WinCE we commit (map) and decommit (unmap) */
2383 : /* memory using VirtualAlloc and VirtualFree. These functions */
2384 : /* work on individual allocations of virtual memory, made */
2385 : /* previously using VirtualAlloc with the MEM_RESERVE flag. */
2386 : /* The ranges we need to (de)commit may span several of these */
2387 : /* allocations; therefore we use VirtualQuery to check */
2388 : /* allocation lengths, and split up the range as necessary. */
2389 :
2390 : /* We assume that GC_remap is called on exactly the same range */
2391 : /* as a previous call to GC_unmap. It is safe to consistently */
2392 : /* round the endpoints in both places. */
2393 : GC_INNER void GC_unmap(ptr_t start, size_t bytes)
2394 : {
2395 : ptr_t start_addr = GC_unmap_start(start, bytes);
2396 : ptr_t end_addr = GC_unmap_end(start, bytes);
2397 : word len = end_addr - start_addr;
2398 :
2399 : if (0 == start_addr) return;
2400 : # ifdef USE_WINALLOC
2401 : while (len != 0) {
2402 : MEMORY_BASIC_INFORMATION mem_info;
2403 : GC_word free_len;
2404 :
2405 : if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
2406 : != sizeof(mem_info))
2407 : ABORT("Weird VirtualQuery result");
2408 : free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
2409 : if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
2410 : ABORT("VirtualFree failed");
2411 : GC_unmapped_bytes += free_len;
2412 : start_addr += free_len;
2413 : len -= free_len;
2414 : }
2415 : # else
2416 : /* We immediately remap it to prevent an intervening mmap from */
2417 : /* accidentally grabbing the same address space. */
2418 : {
2419 : void * result;
2420 :
2421 : result = mmap(start_addr, len, PROT_NONE,
2422 : MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON,
2423 : zero_fd, 0/* offset */);
2424 : if (result != (void *)start_addr)
2425 : ABORT("mmap(PROT_NONE) failed");
2426 : }
2427 : GC_unmapped_bytes += len;
2428 : # endif
2429 : }
2430 :
2431 : GC_INNER void GC_remap(ptr_t start, size_t bytes)
2432 : {
2433 : ptr_t start_addr = GC_unmap_start(start, bytes);
2434 : ptr_t end_addr = GC_unmap_end(start, bytes);
2435 : word len = end_addr - start_addr;
2436 : if (0 == start_addr) return;
2437 :
2438 : /* FIXME: Handle out-of-memory correctly (at least for Win32) */
2439 : # ifdef USE_WINALLOC
2440 : while (len != 0) {
2441 : MEMORY_BASIC_INFORMATION mem_info;
2442 : GC_word alloc_len;
2443 : ptr_t result;
2444 :
2445 : if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
2446 : != sizeof(mem_info))
2447 : ABORT("Weird VirtualQuery result");
2448 : alloc_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
2449 : result = VirtualAlloc(start_addr, alloc_len, MEM_COMMIT,
2450 : GC_pages_executable ? PAGE_EXECUTE_READWRITE :
2451 : PAGE_READWRITE);
2452 : if (result != start_addr) {
2453 : if (GetLastError() == ERROR_NOT_ENOUGH_MEMORY ||
2454 : GetLastError() == ERROR_OUTOFMEMORY) {
2455 : ABORT("Not enough memory to process remapping");
2456 : } else {
2457 : ABORT("VirtualAlloc remapping failed");
2458 : }
2459 : }
2460 : GC_unmapped_bytes -= alloc_len;
2461 : start_addr += alloc_len;
2462 : len -= alloc_len;
2463 : }
2464 : # else
2465 : /* It was already remapped with PROT_NONE. */
2466 : {
2467 : # ifdef NACL
2468 : /* NaCl does not expose mprotect, but mmap should work fine. */
2469 : void *mmap_result = mmap(start_addr, len, (PROT_READ | PROT_WRITE)
2470 : | (GC_pages_executable ? PROT_EXEC : 0),
2471 : MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON,
2472 : zero_fd, 0 /* offset */);
2473 : if (mmap_result != (void *)start_addr)
2474 : ABORT("mmap as mprotect failed");
2475 : # else
2476 : if (mprotect(start_addr, len, (PROT_READ | PROT_WRITE)
2477 : | (GC_pages_executable ? PROT_EXEC : 0)) != 0) {
2478 : ABORT_ARG3("mprotect remapping failed",
2479 : " at %p (length %lu), errcode= %d",
2480 : start_addr, (unsigned long)len, errno);
2481 : }
2482 : # endif /* !NACL */
2483 : }
2484 : # undef IGNORE_PAGES_EXECUTABLE
2485 : GC_unmapped_bytes -= len;
2486 : # endif
2487 : }
2488 :
2489 : /* Two adjacent blocks have already been unmapped and are about to */
2490 : /* be merged. Unmap the whole block. This typically requires */
2491 : /* that we unmap a small section in the middle that was not previously */
2492 : /* unmapped due to alignment constraints. */
2493 : GC_INNER void GC_unmap_gap(ptr_t start1, size_t bytes1, ptr_t start2,
2494 : size_t bytes2)
2495 : {
2496 : ptr_t start1_addr = GC_unmap_start(start1, bytes1);
2497 : ptr_t end1_addr = GC_unmap_end(start1, bytes1);
2498 : ptr_t start2_addr = GC_unmap_start(start2, bytes2);
2499 : ptr_t start_addr = end1_addr;
2500 : ptr_t end_addr = start2_addr;
2501 : size_t len;
2502 :
2503 : GC_ASSERT(start1 + bytes1 == start2);
2504 : if (0 == start1_addr) start_addr = GC_unmap_start(start1, bytes1 + bytes2);
2505 : if (0 == start2_addr) end_addr = GC_unmap_end(start1, bytes1 + bytes2);
2506 : if (0 == start_addr) return;
2507 : len = end_addr - start_addr;
2508 : # ifdef USE_WINALLOC
2509 : while (len != 0) {
2510 : MEMORY_BASIC_INFORMATION mem_info;
2511 : GC_word free_len;
2512 :
2513 : if (VirtualQuery(start_addr, &mem_info, sizeof(mem_info))
2514 : != sizeof(mem_info))
2515 : ABORT("Weird VirtualQuery result");
2516 : free_len = (len < mem_info.RegionSize) ? len : mem_info.RegionSize;
2517 : if (!VirtualFree(start_addr, free_len, MEM_DECOMMIT))
2518 : ABORT("VirtualFree failed");
2519 : GC_unmapped_bytes += free_len;
2520 : start_addr += free_len;
2521 : len -= free_len;
2522 : }
2523 : # else
2524 : if (len != 0) {
2525 : /* Immediately remap as above. */
2526 : void * result;
2527 : result = mmap(start_addr, len, PROT_NONE,
2528 : MAP_PRIVATE | MAP_FIXED | OPT_MAP_ANON,
2529 : zero_fd, 0/* offset */);
2530 : if (result != (void *)start_addr)
2531 : ABORT("mmap(PROT_NONE) failed");
2532 : }
2533 : GC_unmapped_bytes += len;
2534 : # endif
2535 : }
2536 :
2537 : #endif /* USE_MUNMAP */
2538 :
2539 : /* Routine for pushing any additional roots. In THREADS */
2540 : /* environment, this is also responsible for marking from */
2541 : /* thread stacks. */
2542 : #ifndef THREADS
2543 : GC_push_other_roots_proc GC_push_other_roots = 0;
2544 : #else /* THREADS */
2545 :
2546 : # ifdef PCR
2547 : PCR_ERes GC_push_thread_stack(PCR_Th_T *t, PCR_Any dummy)
2548 : {
2549 : struct PCR_ThCtl_TInfoRep info;
2550 : PCR_ERes result;
2551 :
2552 : info.ti_stkLow = info.ti_stkHi = 0;
2553 : result = PCR_ThCtl_GetInfo(t, &info);
2554 : GC_push_all_stack((ptr_t)(info.ti_stkLow), (ptr_t)(info.ti_stkHi));
2555 : return(result);
2556 : }
2557 :
2558 : /* Push the contents of an old object. We treat this as stack */
2559 : /* data only because that makes it robust against mark stack */
2560 : /* overflow. */
2561 : PCR_ERes GC_push_old_obj(void *p, size_t size, PCR_Any data)
2562 : {
2563 : GC_push_all_stack((ptr_t)p, (ptr_t)p + size);
2564 : return(PCR_ERes_okay);
2565 : }
2566 :
2567 : extern struct PCR_MM_ProcsRep * GC_old_allocator;
2568 : /* defined in pcr_interface.c. */
2569 :
2570 : STATIC void GC_CALLBACK GC_default_push_other_roots(void)
2571 : {
2572 : /* Traverse data allocated by previous memory managers. */
2573 : if ((*(GC_old_allocator->mmp_enumerate))(PCR_Bool_false,
2574 : GC_push_old_obj, 0)
2575 : != PCR_ERes_okay) {
2576 : ABORT("Old object enumeration failed");
2577 : }
2578 : /* Traverse all thread stacks. */
2579 : if (PCR_ERes_IsErr(
2580 : PCR_ThCtl_ApplyToAllOtherThreads(GC_push_thread_stack,0))
2581 : || PCR_ERes_IsErr(GC_push_thread_stack(PCR_Th_CurrThread(), 0))) {
2582 : ABORT("Thread stack marking failed");
2583 : }
2584 : }
2585 :
2586 : # endif /* PCR */
2587 :
2588 : # if defined(GC_PTHREADS) || defined(GC_WIN32_THREADS)
2589 244 : STATIC void GC_CALLBACK GC_default_push_other_roots(void)
2590 : {
2591 244 : GC_push_all_stacks();
2592 244 : }
2593 : # endif /* GC_WIN32_THREADS || GC_PTHREADS */
2594 :
2595 : # ifdef SN_TARGET_PS3
2596 : STATIC void GC_CALLBACK GC_default_push_other_roots(void)
2597 : {
2598 : ABORT("GC_default_push_other_roots is not implemented");
2599 : }
2600 :
2601 : void GC_push_thread_structures(void)
2602 : {
2603 : ABORT("GC_push_thread_structures is not implemented");
2604 : }
2605 : # endif /* SN_TARGET_PS3 */
2606 :
2607 : GC_push_other_roots_proc GC_push_other_roots = GC_default_push_other_roots;
2608 : #endif /* THREADS */
2609 :
2610 0 : GC_API void GC_CALL GC_set_push_other_roots(GC_push_other_roots_proc fn)
2611 : {
2612 0 : GC_push_other_roots = fn;
2613 0 : }
2614 :
2615 0 : GC_API GC_push_other_roots_proc GC_CALL GC_get_push_other_roots(void)
2616 : {
2617 0 : return GC_push_other_roots;
2618 : }
2619 :
2620 : /*
2621 : * Routines for accessing dirty bits on virtual pages.
2622 : * There are six ways to maintain this information:
2623 : * DEFAULT_VDB: A simple dummy implementation that treats every page
2624 : * as possibly dirty. This makes incremental collection
2625 : * useless, but the implementation is still correct.
2626 : * MANUAL_VDB: Stacks and static data are always considered dirty.
2627 : * Heap pages are considered dirty if GC_dirty(p) has been
2628 : * called on some pointer p pointing to somewhere inside
2629 : * an object on that page. A GC_dirty() call on a large
2630 : * object directly dirties only a single page, but for
2631 : * MANUAL_VDB we are careful to treat an object with a dirty
2632 : * page as completely dirty.
2633 : * In order to avoid races, an object must be marked dirty
2634 : * after it is written, and a reference to the object
2635 : * must be kept on a stack or in a register in the interim.
2636 : * With threads enabled, an object directly reachable from the
2637 : * stack at the time of a collection is treated as dirty.
2638 : * In single-threaded mode, it suffices to ensure that no
2639 : * collection can take place between the pointer assignment
2640 : * and the GC_dirty() call.
2641 : * PCR_VDB: Use PPCRs virtual dirty bit facility.
2642 : * PROC_VDB: Use the /proc facility for reading dirty bits. Only
2643 : * works under some SVR4 variants. Even then, it may be
2644 : * too slow to be entirely satisfactory. Requires reading
2645 : * dirty bits for entire address space. Implementations tend
2646 : * to assume that the client is a (slow) debugger.
2647 : * MPROTECT_VDB:Protect pages and then catch the faults to keep track of
2648 : * dirtied pages. The implementation (and implementability)
2649 : * is highly system dependent. This usually fails when system
2650 : * calls write to a protected page. We prevent the read system
2651 : * call from doing so. It is the clients responsibility to
2652 : * make sure that other system calls are similarly protected
2653 : * or write only to the stack.
2654 : * GWW_VDB: Use the Win32 GetWriteWatch functions, if available, to
2655 : * read dirty bits. In case it is not available (because we
2656 : * are running on Windows 95, Windows 2000 or earlier),
2657 : * MPROTECT_VDB may be defined as a fallback strategy.
2658 : */
2659 : #ifndef GC_DISABLE_INCREMENTAL
2660 : GC_INNER GC_bool GC_dirty_maintained = FALSE;
2661 : #endif
2662 :
2663 : #if defined(PROC_VDB) || defined(GWW_VDB)
2664 : /* Add all pages in pht2 to pht1 */
2665 : STATIC void GC_or_pages(page_hash_table pht1, page_hash_table pht2)
2666 : {
2667 : register unsigned i;
2668 : for (i = 0; i < PHT_SIZE; i++) pht1[i] |= pht2[i];
2669 : }
2670 :
2671 : # ifdef MPROTECT_VDB
2672 : STATIC GC_bool GC_gww_page_was_dirty(struct hblk * h)
2673 : # else
2674 : GC_INNER GC_bool GC_page_was_dirty(struct hblk * h)
2675 : # endif
2676 : {
2677 : register word index;
2678 : if (HDR(h) == 0)
2679 : return TRUE;
2680 : index = PHT_HASH(h);
2681 : return get_pht_entry_from_index(GC_grungy_pages, index);
2682 : }
2683 :
2684 : # if defined(CHECKSUMS) || defined(PROC_VDB)
2685 : /* Used only if GWW_VDB. */
2686 : # ifdef MPROTECT_VDB
2687 : STATIC GC_bool GC_gww_page_was_ever_dirty(struct hblk * h)
2688 : # else
2689 : GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk * h)
2690 : # endif
2691 : {
2692 : register word index;
2693 : if (HDR(h) == 0)
2694 : return TRUE;
2695 : index = PHT_HASH(h);
2696 : return get_pht_entry_from_index(GC_written_pages, index);
2697 : }
2698 : # endif /* CHECKSUMS || PROC_VDB */
2699 :
2700 : # ifndef MPROTECT_VDB
2701 : /* Ignore write hints. They don't help us here. */
2702 : GC_INNER void GC_remove_protection(struct hblk * h GC_ATTR_UNUSED,
2703 : word nblocks GC_ATTR_UNUSED,
2704 : GC_bool is_ptrfree GC_ATTR_UNUSED) {}
2705 : # endif
2706 :
2707 : #endif /* PROC_VDB || GWW_VDB */
2708 :
2709 : #ifdef GWW_VDB
2710 :
2711 : # define GC_GWW_BUF_LEN (MAXHINCR * HBLKSIZE / 4096 /* X86 page size */)
2712 : /* Still susceptible to overflow, if there are very large allocations, */
2713 : /* and everything is dirty. */
2714 : static PVOID gww_buf[GC_GWW_BUF_LEN];
2715 :
2716 : # ifdef MPROTECT_VDB
2717 : GC_INNER GC_bool GC_gww_dirty_init(void)
2718 : {
2719 : detect_GetWriteWatch();
2720 : return GC_GWW_AVAILABLE();
2721 : }
2722 : # else
2723 : GC_INNER void GC_dirty_init(void)
2724 : {
2725 : detect_GetWriteWatch();
2726 : GC_dirty_maintained = GC_GWW_AVAILABLE();
2727 : }
2728 : # endif /* !MPROTECT_VDB */
2729 :
2730 : # ifdef MPROTECT_VDB
2731 : STATIC void GC_gww_read_dirty(void)
2732 : # else
2733 : GC_INNER void GC_read_dirty(void)
2734 : # endif
2735 : {
2736 : word i;
2737 :
2738 : BZERO(GC_grungy_pages, sizeof(GC_grungy_pages));
2739 :
2740 : for (i = 0; i != GC_n_heap_sects; ++i) {
2741 : GC_ULONG_PTR count;
2742 :
2743 : do {
2744 : PVOID * pages, * pages_end;
2745 : DWORD page_size;
2746 :
2747 : pages = gww_buf;
2748 : count = GC_GWW_BUF_LEN;
2749 : /* GetWriteWatch is documented as returning non-zero when it */
2750 : /* fails, but the documentation doesn't explicitly say why it */
2751 : /* would fail or what its behaviour will be if it fails. */
2752 : /* It does appear to fail, at least on recent W2K instances, if */
2753 : /* the underlying memory was not allocated with the appropriate */
2754 : /* flag. This is common if GC_enable_incremental is called */
2755 : /* shortly after GC initialization. To avoid modifying the */
2756 : /* interface, we silently work around such a failure, it only */
2757 : /* affects the initial (small) heap allocation. If there are */
2758 : /* more dirty pages than will fit in the buffer, this is not */
2759 : /* treated as a failure; we must check the page count in the */
2760 : /* loop condition. Since each partial call will reset the */
2761 : /* status of some pages, this should eventually terminate even */
2762 : /* in the overflow case. */
2763 : if (GetWriteWatch_func(WRITE_WATCH_FLAG_RESET,
2764 : GC_heap_sects[i].hs_start,
2765 : GC_heap_sects[i].hs_bytes,
2766 : pages,
2767 : &count,
2768 : &page_size) != 0) {
2769 : static int warn_count = 0;
2770 : unsigned j;
2771 : struct hblk * start = (struct hblk *)GC_heap_sects[i].hs_start;
2772 : static struct hblk *last_warned = 0;
2773 : size_t nblocks = divHBLKSZ(GC_heap_sects[i].hs_bytes);
2774 :
2775 : if ( i != 0 && last_warned != start && warn_count++ < 5) {
2776 : last_warned = start;
2777 : WARN(
2778 : "GC_gww_read_dirty unexpectedly failed at %p: "
2779 : "Falling back to marking all pages dirty\n", start);
2780 : }
2781 : for (j = 0; j < nblocks; ++j) {
2782 : word hash = PHT_HASH(start + j);
2783 : set_pht_entry_from_index(GC_grungy_pages, hash);
2784 : }
2785 : count = 1; /* Done with this section. */
2786 : } else /* succeeded */ {
2787 : pages_end = pages + count;
2788 : while (pages != pages_end) {
2789 : struct hblk * h = (struct hblk *) *pages++;
2790 : struct hblk * h_end = (struct hblk *) ((char *) h + page_size);
2791 : do {
2792 : set_pht_entry_from_index(GC_grungy_pages, PHT_HASH(h));
2793 : } while ((word)(++h) < (word)h_end);
2794 : }
2795 : }
2796 : } while (count == GC_GWW_BUF_LEN);
2797 : /* FIXME: It's unclear from Microsoft's documentation if this loop */
2798 : /* is useful. We suspect the call just fails if the buffer fills */
2799 : /* up. But that should still be handled correctly. */
2800 : }
2801 :
2802 : GC_or_pages(GC_written_pages, GC_grungy_pages);
2803 : }
2804 : #endif /* GWW_VDB */
2805 :
2806 : #ifdef DEFAULT_VDB
2807 : /* All of the following assume the allocation lock is held. */
2808 :
2809 : /* The client asserts that unallocated pages in the heap are never */
2810 : /* written. */
2811 :
2812 : /* Initialize virtual dirty bit implementation. */
2813 : GC_INNER void GC_dirty_init(void)
2814 : {
2815 : GC_VERBOSE_LOG_PRINTF("Initializing DEFAULT_VDB...\n");
2816 : GC_dirty_maintained = TRUE;
2817 : }
2818 :
2819 : /* Retrieve system dirty bits for heap to a local buffer. */
2820 : /* Restore the systems notion of which pages are dirty. */
2821 : GC_INNER void GC_read_dirty(void) {}
2822 :
2823 : /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
2824 : /* If the actual page size is different, this returns TRUE if any */
2825 : /* of the pages overlapping h are dirty. This routine may err on the */
2826 : /* side of labeling pages as dirty (and this implementation does). */
2827 : GC_INNER GC_bool GC_page_was_dirty(struct hblk * h GC_ATTR_UNUSED)
2828 : {
2829 : return(TRUE);
2830 : }
2831 :
2832 : /* The following two routines are typically less crucial. */
2833 : /* They matter most with large dynamic libraries, or if we can't */
2834 : /* accurately identify stacks, e.g. under Solaris 2.X. Otherwise the */
2835 : /* following default versions are adequate. */
2836 : # ifdef CHECKSUMS
2837 : /* Could any valid GC heap pointer ever have been written to this page? */
2838 : GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk * h GC_ATTR_UNUSED)
2839 : {
2840 : return(TRUE);
2841 : }
2842 : # endif /* CHECKSUMS */
2843 :
2844 : /* A call that: */
2845 : /* I) hints that [h, h+nblocks) is about to be written. */
2846 : /* II) guarantees that protection is removed. */
2847 : /* (I) may speed up some dirty bit implementations. */
2848 : /* (II) may be essential if we need to ensure that */
2849 : /* pointer-free system call buffers in the heap are */
2850 : /* not protected. */
2851 : GC_INNER void GC_remove_protection(struct hblk * h GC_ATTR_UNUSED,
2852 : word nblocks GC_ATTR_UNUSED,
2853 : GC_bool is_ptrfree GC_ATTR_UNUSED) {}
2854 : #endif /* DEFAULT_VDB */
2855 :
2856 : #ifdef MANUAL_VDB
2857 : /* Initialize virtual dirty bit implementation. */
2858 : GC_INNER void GC_dirty_init(void)
2859 : {
2860 : GC_VERBOSE_LOG_PRINTF("Initializing MANUAL_VDB...\n");
2861 : /* GC_dirty_pages and GC_grungy_pages are already cleared. */
2862 : GC_dirty_maintained = TRUE;
2863 : }
2864 :
2865 : /* Retrieve system dirty bits for heap to a local buffer. */
2866 : /* Restore the systems notion of which pages are dirty. */
2867 : GC_INNER void GC_read_dirty(void)
2868 : {
2869 : BCOPY((word *)GC_dirty_pages, GC_grungy_pages,
2870 : (sizeof GC_dirty_pages));
2871 : BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages));
2872 : }
2873 :
2874 : /* Is the HBLKSIZE sized page at h marked dirty in the local buffer? */
2875 : /* If the actual page size is different, this returns TRUE if any */
2876 : /* of the pages overlapping h are dirty. This routine may err on the */
2877 : /* side of labeling pages as dirty (and this implementation does). */
2878 : GC_INNER GC_bool GC_page_was_dirty(struct hblk *h)
2879 : {
2880 : register word index = PHT_HASH(h);
2881 : return(HDR(h) == 0 || get_pht_entry_from_index(GC_grungy_pages, index));
2882 : }
2883 :
2884 : # define async_set_pht_entry_from_index(db, index) \
2885 : set_pht_entry_from_index(db, index) /* for now */
2886 :
2887 : /* Mark the page containing p as dirty. Logically, this dirties the */
2888 : /* entire object. */
2889 : void GC_dirty(ptr_t p)
2890 : {
2891 : word index = PHT_HASH(p);
2892 : async_set_pht_entry_from_index(GC_dirty_pages, index);
2893 : }
2894 :
2895 : GC_INNER void GC_remove_protection(struct hblk * h GC_ATTR_UNUSED,
2896 : word nblocks GC_ATTR_UNUSED,
2897 : GC_bool is_ptrfree GC_ATTR_UNUSED) {}
2898 :
2899 : # ifdef CHECKSUMS
2900 : /* Could any valid GC heap pointer ever have been written to this page? */
2901 : GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk * h GC_ATTR_UNUSED)
2902 : {
2903 : /* FIXME - implement me. */
2904 : return(TRUE);
2905 : }
2906 : # endif /* CHECKSUMS */
2907 :
2908 : #endif /* MANUAL_VDB */
2909 :
2910 : #ifdef MPROTECT_VDB
2911 : /* See DEFAULT_VDB for interface descriptions. */
2912 :
2913 : /*
2914 : * This implementation maintains dirty bits itself by catching write
2915 : * faults and keeping track of them. We assume nobody else catches
2916 : * SIGBUS or SIGSEGV. We assume no write faults occur in system calls.
2917 : * This means that clients must ensure that system calls don't write
2918 : * to the write-protected heap. Probably the best way to do this is to
2919 : * ensure that system calls write at most to pointer-free objects in the
2920 : * heap, and do even that only if we are on a platform on which those
2921 : * are not protected. Another alternative is to wrap system calls
2922 : * (see example for read below), but the current implementation holds
2923 : * applications.
2924 : * We assume the page size is a multiple of HBLKSIZE.
2925 : * We prefer them to be the same. We avoid protecting pointer-free
2926 : * objects only if they are the same.
2927 : */
2928 : # ifdef DARWIN
2929 : /* Using vm_protect (mach syscall) over mprotect (BSD syscall) seems to
2930 : decrease the likelihood of some of the problems described below. */
2931 : # include <mach/vm_map.h>
2932 : STATIC mach_port_t GC_task_self = 0;
2933 : # define PROTECT(addr,len) \
2934 : if (vm_protect(GC_task_self, (vm_address_t)(addr), (vm_size_t)(len), \
2935 : FALSE, VM_PROT_READ \
2936 : | (GC_pages_executable ? VM_PROT_EXECUTE : 0)) \
2937 : == KERN_SUCCESS) {} else ABORT("vm_protect(PROTECT) failed")
2938 : # define UNPROTECT(addr,len) \
2939 : if (vm_protect(GC_task_self, (vm_address_t)(addr), (vm_size_t)(len), \
2940 : FALSE, (VM_PROT_READ | VM_PROT_WRITE) \
2941 : | (GC_pages_executable ? VM_PROT_EXECUTE : 0)) \
2942 : == KERN_SUCCESS) {} else ABORT("vm_protect(UNPROTECT) failed")
2943 :
2944 : # elif !defined(USE_WINALLOC)
2945 : # include <sys/mman.h>
2946 : # include <signal.h>
2947 : # include <sys/syscall.h>
2948 :
2949 : # define PROTECT(addr, len) \
2950 : if (mprotect((caddr_t)(addr), (size_t)(len), \
2951 : PROT_READ \
2952 : | (GC_pages_executable ? PROT_EXEC : 0)) >= 0) { \
2953 : } else ABORT("mprotect failed")
2954 : # define UNPROTECT(addr, len) \
2955 : if (mprotect((caddr_t)(addr), (size_t)(len), \
2956 : (PROT_READ | PROT_WRITE) \
2957 : | (GC_pages_executable ? PROT_EXEC : 0)) >= 0) { \
2958 : } else ABORT(GC_pages_executable ? \
2959 : "un-mprotect executable page failed" \
2960 : " (probably disabled by OS)" : \
2961 : "un-mprotect failed")
2962 : # undef IGNORE_PAGES_EXECUTABLE
2963 :
2964 : # else /* USE_WINALLOC */
2965 : # ifndef MSWINCE
2966 : # include <signal.h>
2967 : # endif
2968 :
2969 : static DWORD protect_junk;
2970 : # define PROTECT(addr, len) \
2971 : if (VirtualProtect((addr), (len), \
2972 : GC_pages_executable ? PAGE_EXECUTE_READ : \
2973 : PAGE_READONLY, \
2974 : &protect_junk)) { \
2975 : } else ABORT_ARG1("VirtualProtect failed", \
2976 : ": errcode= 0x%X", (unsigned)GetLastError())
2977 : # define UNPROTECT(addr, len) \
2978 : if (VirtualProtect((addr), (len), \
2979 : GC_pages_executable ? PAGE_EXECUTE_READWRITE : \
2980 : PAGE_READWRITE, \
2981 : &protect_junk)) { \
2982 : } else ABORT("un-VirtualProtect failed")
2983 : # endif /* USE_WINALLOC */
2984 :
2985 : # if defined(MSWIN32)
2986 : typedef LPTOP_LEVEL_EXCEPTION_FILTER SIG_HNDLR_PTR;
2987 : # undef SIG_DFL
2988 : # define SIG_DFL (LPTOP_LEVEL_EXCEPTION_FILTER)((signed_word)-1)
2989 : # elif defined(MSWINCE)
2990 : typedef LONG (WINAPI *SIG_HNDLR_PTR)(struct _EXCEPTION_POINTERS *);
2991 : # undef SIG_DFL
2992 : # define SIG_DFL (SIG_HNDLR_PTR) (-1)
2993 : # elif defined(DARWIN)
2994 : typedef void (* SIG_HNDLR_PTR)();
2995 : # else
2996 : typedef void (* SIG_HNDLR_PTR)(int, siginfo_t *, void *);
2997 : typedef void (* PLAIN_HNDLR_PTR)(int);
2998 : # endif
2999 :
3000 : # if defined(__GLIBC__)
3001 : # if __GLIBC__ < 2 || __GLIBC__ == 2 && __GLIBC_MINOR__ < 2
3002 : # error glibc too old?
3003 : # endif
3004 : # endif
3005 :
3006 : #ifndef DARWIN
3007 : STATIC SIG_HNDLR_PTR GC_old_segv_handler = 0;
3008 : /* Also old MSWIN32 ACCESS_VIOLATION filter */
3009 : # if !defined(MSWIN32) && !defined(MSWINCE)
3010 : STATIC SIG_HNDLR_PTR GC_old_bus_handler = 0;
3011 : # if defined(FREEBSD) || defined(HURD) || defined(HPUX)
3012 : STATIC GC_bool GC_old_bus_handler_used_si = FALSE;
3013 : # endif
3014 : STATIC GC_bool GC_old_segv_handler_used_si = FALSE;
3015 : # endif /* !MSWIN32 */
3016 : #endif /* !DARWIN */
3017 :
3018 : #if defined(THREADS)
3019 : /* We need to lock around the bitmap update in the write fault handler */
3020 : /* in order to avoid the risk of losing a bit. We do this with a */
3021 : /* test-and-set spin lock if we know how to do that. Otherwise we */
3022 : /* check whether we are already in the handler and use the dumb but */
3023 : /* safe fallback algorithm of setting all bits in the word. */
3024 : /* Contention should be very rare, so we do the minimum to handle it */
3025 : /* correctly. */
3026 : #ifdef AO_HAVE_test_and_set_acquire
3027 : GC_INNER volatile AO_TS_t GC_fault_handler_lock = AO_TS_INITIALIZER;
3028 0 : static void async_set_pht_entry_from_index(volatile page_hash_table db,
3029 : size_t index)
3030 : {
3031 0 : while (AO_test_and_set_acquire(&GC_fault_handler_lock) == AO_TS_SET) {
3032 : /* empty */
3033 : }
3034 : /* Could also revert to set_pht_entry_from_index_safe if initial */
3035 : /* GC_test_and_set fails. */
3036 0 : set_pht_entry_from_index(db, index);
3037 0 : AO_CLEAR(&GC_fault_handler_lock);
3038 0 : }
3039 : #else /* !AO_HAVE_test_and_set_acquire */
3040 : # error No test_and_set operation: Introduces a race.
3041 : /* THIS WOULD BE INCORRECT! */
3042 : /* The dirty bit vector may be temporarily wrong, */
3043 : /* just before we notice the conflict and correct it. We may end up */
3044 : /* looking at it while it's wrong. But this requires contention */
3045 : /* exactly when a GC is triggered, which seems far less likely to */
3046 : /* fail than the old code, which had no reported failures. Thus we */
3047 : /* leave it this way while we think of something better, or support */
3048 : /* GC_test_and_set on the remaining platforms. */
3049 : static int * volatile currently_updating = 0;
3050 : static void async_set_pht_entry_from_index(volatile page_hash_table db,
3051 : size_t index)
3052 : {
3053 : int update_dummy;
3054 : currently_updating = &update_dummy;
3055 : set_pht_entry_from_index(db, index);
3056 : /* If we get contention in the 10 or so instruction window here, */
3057 : /* and we get stopped by a GC between the two updates, we lose! */
3058 : if (currently_updating != &update_dummy) {
3059 : set_pht_entry_from_index_safe(db, index);
3060 : /* We claim that if two threads concurrently try to update the */
3061 : /* dirty bit vector, the first one to execute UPDATE_START */
3062 : /* will see it changed when UPDATE_END is executed. (Note that */
3063 : /* &update_dummy must differ in two distinct threads.) It */
3064 : /* will then execute set_pht_entry_from_index_safe, thus */
3065 : /* returning us to a safe state, though not soon enough. */
3066 : }
3067 : }
3068 : #endif /* !AO_HAVE_test_and_set_acquire */
3069 : #else /* !THREADS */
3070 : # define async_set_pht_entry_from_index(db, index) \
3071 : set_pht_entry_from_index(db, index)
3072 : #endif /* !THREADS */
3073 :
3074 : #ifdef CHECKSUMS
3075 : void GC_record_fault(struct hblk * h); /* from checksums.c */
3076 : #endif
3077 :
3078 : #ifndef DARWIN
3079 :
3080 : # if !defined(MSWIN32) && !defined(MSWINCE)
3081 : # include <errno.h>
3082 : # if defined(FREEBSD) || defined(HURD) || defined(HPUX)
3083 : # define SIG_OK (sig == SIGBUS || sig == SIGSEGV)
3084 : # else
3085 : # define SIG_OK (sig == SIGSEGV)
3086 : /* Catch SIGSEGV but ignore SIGBUS. */
3087 : # endif
3088 : # if defined(FREEBSD)
3089 : # ifndef SEGV_ACCERR
3090 : # define SEGV_ACCERR 2
3091 : # endif
3092 : # if defined(POWERPC)
3093 : # define AIM /* Pretend that we're AIM. */
3094 : # include <machine/trap.h>
3095 : # define CODE_OK (si -> si_code == EXC_DSI \
3096 : || si -> si_code == SEGV_ACCERR)
3097 : # else
3098 : # define CODE_OK (si -> si_code == BUS_PAGE_FAULT \
3099 : || si -> si_code == SEGV_ACCERR)
3100 : # endif
3101 : # elif defined(OSF1)
3102 : # define CODE_OK (si -> si_code == 2 /* experimentally determined */)
3103 : # elif defined(IRIX5)
3104 : # define CODE_OK (si -> si_code == EACCES)
3105 : # elif defined(HURD)
3106 : # define CODE_OK TRUE
3107 : # elif defined(LINUX)
3108 : # define CODE_OK TRUE
3109 : /* Empirically c.trapno == 14, on IA32, but is that useful? */
3110 : /* Should probably consider alignment issues on other */
3111 : /* architectures. */
3112 : # elif defined(HPUX)
3113 : # define CODE_OK (si -> si_code == SEGV_ACCERR \
3114 : || si -> si_code == BUS_ADRERR \
3115 : || si -> si_code == BUS_UNKNOWN \
3116 : || si -> si_code == SEGV_UNKNOWN \
3117 : || si -> si_code == BUS_OBJERR)
3118 : # elif defined(SUNOS5SIGS)
3119 : # define CODE_OK (si -> si_code == SEGV_ACCERR)
3120 : # endif
3121 : # ifndef NO_GETCONTEXT
3122 : # include <ucontext.h>
3123 : # endif
3124 0 : STATIC void GC_write_fault_handler(int sig, siginfo_t *si, void *raw_sc)
3125 : # else
3126 : # define SIG_OK (exc_info -> ExceptionRecord -> ExceptionCode \
3127 : == STATUS_ACCESS_VIOLATION)
3128 : # define CODE_OK (exc_info -> ExceptionRecord -> ExceptionInformation[0] \
3129 : == 1) /* Write fault */
3130 : STATIC LONG WINAPI GC_write_fault_handler(
3131 : struct _EXCEPTION_POINTERS *exc_info)
3132 : # endif /* MSWIN32 || MSWINCE */
3133 : {
3134 : # if !defined(MSWIN32) && !defined(MSWINCE)
3135 0 : char *addr = si -> si_addr;
3136 : # else
3137 : char * addr = (char *) (exc_info -> ExceptionRecord
3138 : -> ExceptionInformation[1]);
3139 : # endif
3140 : unsigned i;
3141 :
3142 0 : if (SIG_OK && CODE_OK) {
3143 : register struct hblk * h =
3144 0 : (struct hblk *)((word)addr & ~(GC_page_size-1));
3145 : GC_bool in_allocd_block;
3146 : # ifdef CHECKSUMS
3147 : GC_record_fault(h);
3148 : # endif
3149 :
3150 : # ifdef SUNOS5SIGS
3151 : /* Address is only within the correct physical page. */
3152 : in_allocd_block = FALSE;
3153 : for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
3154 : if (HDR(h+i) != 0) {
3155 : in_allocd_block = TRUE;
3156 : break;
3157 : }
3158 : }
3159 : # else
3160 0 : in_allocd_block = (HDR(addr) != 0);
3161 : # endif
3162 0 : if (!in_allocd_block) {
3163 : /* FIXME - We should make sure that we invoke the */
3164 : /* old handler with the appropriate calling */
3165 : /* sequence, which often depends on SA_SIGINFO. */
3166 :
3167 : /* Heap blocks now begin and end on page boundaries */
3168 : SIG_HNDLR_PTR old_handler;
3169 :
3170 : # if defined(MSWIN32) || defined(MSWINCE)
3171 : old_handler = GC_old_segv_handler;
3172 : # else
3173 : GC_bool used_si;
3174 :
3175 : # if defined(FREEBSD) || defined(HURD) || defined(HPUX)
3176 : if (sig == SIGBUS) {
3177 : old_handler = GC_old_bus_handler;
3178 : used_si = GC_old_bus_handler_used_si;
3179 : } else
3180 : # endif
3181 : /* else */ {
3182 0 : old_handler = GC_old_segv_handler;
3183 0 : used_si = GC_old_segv_handler_used_si;
3184 : }
3185 : # endif
3186 :
3187 0 : if (old_handler == (SIG_HNDLR_PTR)SIG_DFL) {
3188 : # if !defined(MSWIN32) && !defined(MSWINCE)
3189 0 : ABORT_ARG1("Unexpected bus error or segmentation fault",
3190 : " at %p", addr);
3191 : # else
3192 : return(EXCEPTION_CONTINUE_SEARCH);
3193 : # endif
3194 : } else {
3195 : /*
3196 : * FIXME: This code should probably check if the
3197 : * old signal handler used the traditional style and
3198 : * if so call it using that style.
3199 : */
3200 : # if defined(MSWIN32) || defined(MSWINCE)
3201 : return((*old_handler)(exc_info));
3202 : # else
3203 0 : if (used_si)
3204 0 : ((SIG_HNDLR_PTR)old_handler) (sig, si, raw_sc);
3205 : else
3206 : /* FIXME: should pass nonstandard args as well. */
3207 0 : ((PLAIN_HNDLR_PTR)old_handler) (sig);
3208 0 : return;
3209 : # endif
3210 : }
3211 : }
3212 0 : UNPROTECT(h, GC_page_size);
3213 : /* We need to make sure that no collection occurs between */
3214 : /* the UNPROTECT and the setting of the dirty bit. Otherwise */
3215 : /* a write by a third thread might go unnoticed. Reversing */
3216 : /* the order is just as bad, since we would end up unprotecting */
3217 : /* a page in a GC cycle during which it's not marked. */
3218 : /* Currently we do this by disabling the thread stopping */
3219 : /* signals while this handler is running. An alternative might */
3220 : /* be to record the fact that we're about to unprotect, or */
3221 : /* have just unprotected a page in the GC's thread structure, */
3222 : /* and then to have the thread stopping code set the dirty */
3223 : /* flag, if necessary. */
3224 0 : for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
3225 0 : size_t index = PHT_HASH(h+i);
3226 :
3227 0 : async_set_pht_entry_from_index(GC_dirty_pages, index);
3228 : }
3229 : /* The write may not take place before dirty bits are read. */
3230 : /* But then we'll fault again ... */
3231 : # if defined(MSWIN32) || defined(MSWINCE)
3232 : return(EXCEPTION_CONTINUE_EXECUTION);
3233 : # else
3234 0 : return;
3235 : # endif
3236 : }
3237 : # if defined(MSWIN32) || defined(MSWINCE)
3238 : return EXCEPTION_CONTINUE_SEARCH;
3239 : # else
3240 0 : ABORT_ARG1("Unexpected bus error or segmentation fault",
3241 : " at %p", addr);
3242 : # endif
3243 : }
3244 :
3245 : # ifdef GC_WIN32_THREADS
3246 : GC_INNER void GC_set_write_fault_handler(void)
3247 : {
3248 : SetUnhandledExceptionFilter(GC_write_fault_handler);
3249 : }
3250 : # endif
3251 : #endif /* !DARWIN */
3252 :
3253 : /* We hold the allocation lock. We expect block h to be written */
3254 : /* shortly. Ensure that all pages containing any part of the n hblks */
3255 : /* starting at h are no longer protected. If is_ptrfree is false, also */
3256 : /* ensure that they will subsequently appear to be dirty. Not allowed */
3257 : /* to call GC_printf (and the friends) here, see Win32 GC_stop_world() */
3258 : /* for the information. */
3259 58567 : GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
3260 : GC_bool is_ptrfree)
3261 : {
3262 : struct hblk * h_trunc; /* Truncated to page boundary */
3263 : struct hblk * h_end; /* Page boundary following block end */
3264 : struct hblk * current;
3265 :
3266 : # if defined(GWW_VDB)
3267 : if (GC_GWW_AVAILABLE()) return;
3268 : # endif
3269 58567 : if (!GC_dirty_maintained) return;
3270 0 : h_trunc = (struct hblk *)((word)h & ~(GC_page_size-1));
3271 0 : h_end = (struct hblk *)(((word)(h + nblocks) + GC_page_size-1)
3272 0 : & ~(GC_page_size-1));
3273 0 : if (h_end == h_trunc + 1 &&
3274 0 : get_pht_entry_from_index(GC_dirty_pages, PHT_HASH(h_trunc))) {
3275 : /* already marked dirty, and hence unprotected. */
3276 0 : return;
3277 : }
3278 0 : for (current = h_trunc; (word)current < (word)h_end; ++current) {
3279 0 : size_t index = PHT_HASH(current);
3280 0 : if (!is_ptrfree || (word)current < (word)h
3281 0 : || (word)current >= (word)(h + nblocks)) {
3282 0 : async_set_pht_entry_from_index(GC_dirty_pages, index);
3283 : }
3284 : }
3285 0 : UNPROTECT(h_trunc, (ptr_t)h_end - (ptr_t)h_trunc);
3286 : }
3287 :
3288 : #if !defined(DARWIN)
3289 0 : GC_INNER void GC_dirty_init(void)
3290 : {
3291 : # if !defined(MSWIN32) && !defined(MSWINCE)
3292 : struct sigaction act, oldact;
3293 0 : act.sa_flags = SA_RESTART | SA_SIGINFO;
3294 0 : act.sa_sigaction = GC_write_fault_handler;
3295 0 : (void)sigemptyset(&act.sa_mask);
3296 : # if defined(THREADS) && !defined(GC_OPENBSD_UTHREADS) \
3297 : && !defined(GC_WIN32_THREADS) && !defined(NACL)
3298 : /* Arrange to postpone the signal while we are in a write fault */
3299 : /* handler. This effectively makes the handler atomic w.r.t. */
3300 : /* stopping the world for GC. */
3301 0 : (void)sigaddset(&act.sa_mask, GC_get_suspend_signal());
3302 : # endif
3303 : # endif /* !MSWIN32 */
3304 0 : GC_VERBOSE_LOG_PRINTF(
3305 : "Initializing mprotect virtual dirty bit implementation\n");
3306 0 : GC_dirty_maintained = TRUE;
3307 0 : if (GC_page_size % HBLKSIZE != 0) {
3308 0 : ABORT("Page size not multiple of HBLKSIZE");
3309 : }
3310 : # if !defined(MSWIN32) && !defined(MSWINCE)
3311 : /* act.sa_restorer is deprecated and should not be initialized. */
3312 : # if defined(GC_IRIX_THREADS)
3313 : sigaction(SIGSEGV, 0, &oldact);
3314 : sigaction(SIGSEGV, &act, 0);
3315 : # else
3316 : {
3317 0 : int res = sigaction(SIGSEGV, &act, &oldact);
3318 0 : if (res != 0) ABORT("Sigaction failed");
3319 : }
3320 : # endif
3321 0 : if (oldact.sa_flags & SA_SIGINFO) {
3322 0 : GC_old_segv_handler = oldact.sa_sigaction;
3323 0 : GC_old_segv_handler_used_si = TRUE;
3324 : } else {
3325 0 : GC_old_segv_handler = (SIG_HNDLR_PTR)oldact.sa_handler;
3326 0 : GC_old_segv_handler_used_si = FALSE;
3327 : }
3328 0 : if (GC_old_segv_handler == (SIG_HNDLR_PTR)SIG_IGN) {
3329 0 : WARN("Previously ignored segmentation violation!?\n", 0);
3330 0 : GC_old_segv_handler = (SIG_HNDLR_PTR)SIG_DFL;
3331 : }
3332 0 : if (GC_old_segv_handler != (SIG_HNDLR_PTR)SIG_DFL) {
3333 0 : GC_VERBOSE_LOG_PRINTF("Replaced other SIGSEGV handler\n");
3334 : }
3335 : # if defined(HPUX) || defined(LINUX) || defined(HURD) \
3336 : || (defined(FREEBSD) && defined(SUNOS5SIGS))
3337 0 : sigaction(SIGBUS, &act, &oldact);
3338 0 : if ((oldact.sa_flags & SA_SIGINFO) != 0) {
3339 0 : GC_old_bus_handler = oldact.sa_sigaction;
3340 : # if !defined(LINUX)
3341 : GC_old_bus_handler_used_si = TRUE;
3342 : # endif
3343 : } else {
3344 0 : GC_old_bus_handler = (SIG_HNDLR_PTR)oldact.sa_handler;
3345 : # if !defined(LINUX)
3346 : GC_old_bus_handler_used_si = FALSE;
3347 : # endif
3348 : }
3349 0 : if (GC_old_bus_handler == (SIG_HNDLR_PTR)SIG_IGN) {
3350 0 : WARN("Previously ignored bus error!?\n", 0);
3351 : # if !defined(LINUX)
3352 : GC_old_bus_handler = (SIG_HNDLR_PTR)SIG_DFL;
3353 : # else
3354 : /* GC_old_bus_handler is not used by GC_write_fault_handler. */
3355 : # endif
3356 0 : } else if (GC_old_bus_handler != (SIG_HNDLR_PTR)SIG_DFL) {
3357 0 : GC_VERBOSE_LOG_PRINTF("Replaced other SIGBUS handler\n");
3358 : }
3359 : # endif /* HPUX || LINUX || HURD || (FREEBSD && SUNOS5SIGS) */
3360 : # endif /* ! MS windows */
3361 : # if defined(GWW_VDB)
3362 : if (GC_gww_dirty_init())
3363 : return;
3364 : # endif
3365 : # if defined(MSWIN32)
3366 : GC_old_segv_handler = SetUnhandledExceptionFilter(GC_write_fault_handler);
3367 : if (GC_old_segv_handler != NULL) {
3368 : GC_COND_LOG_PRINTF("Replaced other UnhandledExceptionFilter\n");
3369 : } else {
3370 : GC_old_segv_handler = SIG_DFL;
3371 : }
3372 : # elif defined(MSWINCE)
3373 : /* MPROTECT_VDB is unsupported for WinCE at present. */
3374 : /* FIXME: implement it (if possible). */
3375 : # endif
3376 0 : }
3377 : #endif /* !DARWIN */
3378 :
3379 0 : GC_API int GC_CALL GC_incremental_protection_needs(void)
3380 : {
3381 : GC_ASSERT(GC_is_initialized);
3382 :
3383 0 : if (GC_page_size == HBLKSIZE) {
3384 0 : return GC_PROTECTS_POINTER_HEAP;
3385 : } else {
3386 0 : return GC_PROTECTS_POINTER_HEAP | GC_PROTECTS_PTRFREE_HEAP;
3387 : }
3388 : }
3389 : #define HAVE_INCREMENTAL_PROTECTION_NEEDS
3390 :
3391 : #define IS_PTRFREE(hhdr) ((hhdr)->hb_descr == 0)
3392 : #define PAGE_ALIGNED(x) !((word)(x) & (GC_page_size - 1))
3393 :
3394 0 : STATIC void GC_protect_heap(void)
3395 : {
3396 : ptr_t start;
3397 : size_t len;
3398 : struct hblk * current;
3399 : struct hblk * current_start; /* Start of block to be protected. */
3400 : struct hblk * limit;
3401 : unsigned i;
3402 : GC_bool protect_all =
3403 0 : (0 != (GC_incremental_protection_needs() & GC_PROTECTS_PTRFREE_HEAP));
3404 0 : for (i = 0; i < GC_n_heap_sects; i++) {
3405 0 : start = GC_heap_sects[i].hs_start;
3406 0 : len = GC_heap_sects[i].hs_bytes;
3407 0 : if (protect_all) {
3408 0 : PROTECT(start, len);
3409 : } else {
3410 : GC_ASSERT(PAGE_ALIGNED(len));
3411 : GC_ASSERT(PAGE_ALIGNED(start));
3412 0 : current_start = current = (struct hblk *)start;
3413 0 : limit = (struct hblk *)(start + len);
3414 0 : while ((word)current < (word)limit) {
3415 : hdr * hhdr;
3416 : word nhblks;
3417 : GC_bool is_ptrfree;
3418 :
3419 : GC_ASSERT(PAGE_ALIGNED(current));
3420 0 : GET_HDR(current, hhdr);
3421 0 : if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
3422 : /* This can happen only if we're at the beginning of a */
3423 : /* heap segment, and a block spans heap segments. */
3424 : /* We will handle that block as part of the preceding */
3425 : /* segment. */
3426 : GC_ASSERT(current_start == current);
3427 0 : current_start = ++current;
3428 0 : continue;
3429 : }
3430 0 : if (HBLK_IS_FREE(hhdr)) {
3431 : GC_ASSERT(PAGE_ALIGNED(hhdr -> hb_sz));
3432 0 : nhblks = divHBLKSZ(hhdr -> hb_sz);
3433 0 : is_ptrfree = TRUE; /* dirty on alloc */
3434 : } else {
3435 0 : nhblks = OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
3436 0 : is_ptrfree = IS_PTRFREE(hhdr);
3437 : }
3438 0 : if (is_ptrfree) {
3439 0 : if ((word)current_start < (word)current) {
3440 0 : PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
3441 : }
3442 0 : current_start = (current += nhblks);
3443 : } else {
3444 0 : current += nhblks;
3445 : }
3446 : }
3447 0 : if ((word)current_start < (word)current) {
3448 0 : PROTECT(current_start, (ptr_t)current - (ptr_t)current_start);
3449 : }
3450 : }
3451 : }
3452 0 : }
3453 :
3454 : /* We assume that either the world is stopped or its OK to lose dirty */
3455 : /* bits while this is happening (as in GC_enable_incremental). */
3456 0 : GC_INNER void GC_read_dirty(void)
3457 : {
3458 : # if defined(GWW_VDB)
3459 : if (GC_GWW_AVAILABLE()) {
3460 : GC_gww_read_dirty();
3461 : return;
3462 : }
3463 : # endif
3464 0 : BCOPY((word *)GC_dirty_pages, GC_grungy_pages,
3465 : (sizeof GC_dirty_pages));
3466 0 : BZERO((word *)GC_dirty_pages, (sizeof GC_dirty_pages));
3467 0 : GC_protect_heap();
3468 0 : }
3469 :
3470 0 : GC_INNER GC_bool GC_page_was_dirty(struct hblk *h)
3471 : {
3472 : register word index;
3473 :
3474 : # if defined(GWW_VDB)
3475 : if (GC_GWW_AVAILABLE())
3476 : return GC_gww_page_was_dirty(h);
3477 : # endif
3478 :
3479 0 : index = PHT_HASH(h);
3480 0 : return(HDR(h) == 0 || get_pht_entry_from_index(GC_grungy_pages, index));
3481 : }
3482 :
3483 : /*
3484 : * Acquiring the allocation lock here is dangerous, since this
3485 : * can be called from within GC_call_with_alloc_lock, and the cord
3486 : * package does so. On systems that allow nested lock acquisition, this
3487 : * happens to work.
3488 : * On other systems, SET_LOCK_HOLDER and friends must be suitably defined.
3489 : */
3490 :
3491 : #if 0
3492 : static GC_bool syscall_acquired_lock = FALSE; /* Protected by GC lock. */
3493 :
3494 : void GC_begin_syscall(void)
3495 : {
3496 : /* FIXME: Resurrecting this code would require fixing the */
3497 : /* test, which can spuriously return TRUE. */
3498 : if (!I_HOLD_LOCK()) {
3499 : LOCK();
3500 : syscall_acquired_lock = TRUE;
3501 : }
3502 : }
3503 :
3504 : void GC_end_syscall(void)
3505 : {
3506 : if (syscall_acquired_lock) {
3507 : syscall_acquired_lock = FALSE;
3508 : UNLOCK();
3509 : }
3510 : }
3511 :
3512 : void GC_unprotect_range(ptr_t addr, word len)
3513 : {
3514 : struct hblk * start_block;
3515 : struct hblk * end_block;
3516 : register struct hblk *h;
3517 : ptr_t obj_start;
3518 :
3519 : if (!GC_dirty_maintained) return;
3520 : obj_start = GC_base(addr);
3521 : if (obj_start == 0) return;
3522 : if (GC_base(addr + len - 1) != obj_start) {
3523 : ABORT("GC_unprotect_range(range bigger than object)");
3524 : }
3525 : start_block = (struct hblk *)((word)addr & ~(GC_page_size - 1));
3526 : end_block = (struct hblk *)((word)(addr + len - 1) & ~(GC_page_size - 1));
3527 : end_block += GC_page_size/HBLKSIZE - 1;
3528 : for (h = start_block; (word)h <= (word)end_block; h++) {
3529 : register word index = PHT_HASH(h);
3530 :
3531 : async_set_pht_entry_from_index(GC_dirty_pages, index);
3532 : }
3533 : UNPROTECT(start_block,
3534 : ((ptr_t)end_block - (ptr_t)start_block) + HBLKSIZE);
3535 : }
3536 :
3537 :
3538 : /* We no longer wrap read by default, since that was causing too many */
3539 : /* problems. It is preferred that the client instead avoids writing */
3540 : /* to the write-protected heap with a system call. */
3541 : /* This still serves as sample code if you do want to wrap system calls.*/
3542 :
3543 : #if !defined(MSWIN32) && !defined(MSWINCE) && !defined(GC_USE_LD_WRAP)
3544 : /* Replacement for UNIX system call. */
3545 : /* Other calls that write to the heap should be handled similarly. */
3546 : /* Note that this doesn't work well for blocking reads: It will hold */
3547 : /* the allocation lock for the entire duration of the call. */
3548 : /* Multi-threaded clients should really ensure that it won't block, */
3549 : /* either by setting the descriptor non-blocking, or by calling select */
3550 : /* or poll first, to make sure that input is available. */
3551 : /* Another, preferred alternative is to ensure that system calls never */
3552 : /* write to the protected heap (see above). */
3553 : # include <unistd.h>
3554 : # include <sys/uio.h>
3555 : ssize_t read(int fd, void *buf, size_t nbyte)
3556 : {
3557 : int result;
3558 :
3559 : GC_begin_syscall();
3560 : GC_unprotect_range(buf, (word)nbyte);
3561 : # if defined(IRIX5) || defined(GC_LINUX_THREADS)
3562 : /* Indirect system call may not always be easily available. */
3563 : /* We could call _read, but that would interfere with the */
3564 : /* libpthread interception of read. */
3565 : /* On Linux, we have to be careful with the linuxthreads */
3566 : /* read interception. */
3567 : {
3568 : struct iovec iov;
3569 :
3570 : iov.iov_base = buf;
3571 : iov.iov_len = nbyte;
3572 : result = readv(fd, &iov, 1);
3573 : }
3574 : # else
3575 : # if defined(HURD)
3576 : result = __read(fd, buf, nbyte);
3577 : # else
3578 : /* The two zero args at the end of this list are because one
3579 : IA-64 syscall() implementation actually requires six args
3580 : to be passed, even though they aren't always used. */
3581 : result = syscall(SYS_read, fd, buf, nbyte, 0, 0);
3582 : # endif /* !HURD */
3583 : # endif
3584 : GC_end_syscall();
3585 : return(result);
3586 : }
3587 : #endif /* !MSWIN32 && !MSWINCE && !GC_LINUX_THREADS */
3588 :
3589 : #if defined(GC_USE_LD_WRAP) && !defined(THREADS)
3590 : /* We use the GNU ld call wrapping facility. */
3591 : /* I'm not sure that this actually wraps whatever version of read */
3592 : /* is called by stdio. That code also mentions __read. */
3593 : # include <unistd.h>
3594 : ssize_t __wrap_read(int fd, void *buf, size_t nbyte)
3595 : {
3596 : int result;
3597 :
3598 : GC_begin_syscall();
3599 : GC_unprotect_range(buf, (word)nbyte);
3600 : result = __real_read(fd, buf, nbyte);
3601 : GC_end_syscall();
3602 : return(result);
3603 : }
3604 :
3605 : /* We should probably also do this for __read, or whatever stdio */
3606 : /* actually calls. */
3607 : #endif
3608 : #endif /* 0 */
3609 :
3610 : # ifdef CHECKSUMS
3611 : GC_INNER GC_bool GC_page_was_ever_dirty(struct hblk * h GC_ATTR_UNUSED)
3612 : {
3613 : # if defined(GWW_VDB)
3614 : if (GC_GWW_AVAILABLE())
3615 : return GC_gww_page_was_ever_dirty(h);
3616 : # endif
3617 : return(TRUE);
3618 : }
3619 : # endif /* CHECKSUMS */
3620 :
3621 : #endif /* MPROTECT_VDB */
3622 :
3623 : #ifdef PROC_VDB
3624 : /* See DEFAULT_VDB for interface descriptions. */
3625 :
3626 : /* This implementation assumes a Solaris 2.X like /proc */
3627 : /* pseudo-file-system from which we can read page modified bits. This */
3628 : /* facility is far from optimal (e.g. we would like to get the info for */
3629 : /* only some of the address space), but it avoids intercepting system */
3630 : /* calls. */
3631 :
3632 : # include <errno.h>
3633 : # include <sys/types.h>
3634 : # include <sys/signal.h>
3635 : # include <sys/fault.h>
3636 : # include <sys/syscall.h>
3637 : # include <sys/procfs.h>
3638 : # include <sys/stat.h>
3639 :
3640 : # define INITIAL_BUF_SZ 16384
3641 : STATIC word GC_proc_buf_size = INITIAL_BUF_SZ;
3642 : STATIC char *GC_proc_buf = NULL;
3643 : STATIC int GC_proc_fd = 0;
3644 :
3645 : GC_INNER void GC_dirty_init(void)
3646 : {
3647 : int fd;
3648 : char buf[30];
3649 :
3650 : if (GC_bytes_allocd != 0 || GC_bytes_allocd_before_gc != 0) {
3651 : memset(GC_written_pages, 0xff, sizeof(page_hash_table));
3652 : GC_VERBOSE_LOG_PRINTF(
3653 : "Allocated %lu bytes: all pages may have been written\n",
3654 : (unsigned long)(GC_bytes_allocd + GC_bytes_allocd_before_gc));
3655 : }
3656 :
3657 : (void)snprintf(buf, sizeof(buf), "/proc/%ld", (long)getpid());
3658 : buf[sizeof(buf) - 1] = '\0';
3659 : fd = open(buf, O_RDONLY);
3660 : if (fd < 0) {
3661 : ABORT("/proc open failed");
3662 : }
3663 : GC_proc_fd = syscall(SYS_ioctl, fd, PIOCOPENPD, 0);
3664 : close(fd);
3665 : syscall(SYS_fcntl, GC_proc_fd, F_SETFD, FD_CLOEXEC);
3666 : if (GC_proc_fd < 0) {
3667 : WARN("/proc ioctl(PIOCOPENPD) failed", 0);
3668 : return;
3669 : }
3670 :
3671 : GC_dirty_maintained = TRUE;
3672 : GC_proc_buf = GC_scratch_alloc(GC_proc_buf_size);
3673 : if (GC_proc_buf == NULL)
3674 : ABORT("Insufficient space for /proc read");
3675 : }
3676 :
3677 : # define READ read
3678 :
3679 : GC_INNER void GC_read_dirty(void)
3680 : {
3681 : int nmaps;
3682 : unsigned long npages;
3683 : unsigned pagesize;
3684 : ptr_t vaddr, limit;
3685 : struct prasmap * map;
3686 : char * bufp;
3687 : int i;
3688 :
3689 : BZERO(GC_grungy_pages, sizeof(GC_grungy_pages));
3690 : bufp = GC_proc_buf;
3691 : if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
3692 : /* Retry with larger buffer. */
3693 : word new_size = 2 * GC_proc_buf_size;
3694 : char *new_buf;
3695 :
3696 : WARN("/proc read failed: GC_proc_buf_size = %" WARN_PRIdPTR "\n",
3697 : (signed_word)GC_proc_buf_size);
3698 : new_buf = GC_scratch_alloc(new_size);
3699 : if (new_buf != 0) {
3700 : GC_proc_buf = bufp = new_buf;
3701 : GC_proc_buf_size = new_size;
3702 : }
3703 : if (READ(GC_proc_fd, bufp, GC_proc_buf_size) <= 0) {
3704 : WARN("Insufficient space for /proc read\n", 0);
3705 : /* Punt: */
3706 : memset(GC_grungy_pages, 0xff, sizeof (page_hash_table));
3707 : memset(GC_written_pages, 0xff, sizeof(page_hash_table));
3708 : return;
3709 : }
3710 : }
3711 :
3712 : /* Copy dirty bits into GC_grungy_pages */
3713 : nmaps = ((struct prpageheader *)bufp) -> pr_nmap;
3714 : # ifdef DEBUG_DIRTY_BITS
3715 : GC_log_printf("Proc VDB read: pr_nmap= %u, pr_npage= %lu\n",
3716 : nmaps, ((struct prpageheader *)bufp)->pr_npage);
3717 : # endif
3718 : bufp += sizeof(struct prpageheader);
3719 : for (i = 0; i < nmaps; i++) {
3720 : map = (struct prasmap *)bufp;
3721 : vaddr = (ptr_t)(map -> pr_vaddr);
3722 : npages = map -> pr_npage;
3723 : pagesize = map -> pr_pagesize;
3724 : # ifdef DEBUG_DIRTY_BITS
3725 : GC_log_printf(
3726 : "pr_vaddr= %p, npage= %lu, mflags= 0x%x, pagesize= 0x%x\n",
3727 : vaddr, npages, map->pr_mflags, pagesize);
3728 : # endif
3729 :
3730 : bufp += sizeof(struct prasmap);
3731 : limit = vaddr + pagesize * npages;
3732 : for (; (word)vaddr < (word)limit; vaddr += pagesize) {
3733 : if ((*bufp++) & PG_MODIFIED) {
3734 : register struct hblk * h;
3735 : ptr_t next_vaddr = vaddr + pagesize;
3736 : # ifdef DEBUG_DIRTY_BITS
3737 : GC_log_printf("dirty page at: %p\n", vaddr);
3738 : # endif
3739 : for (h = (struct hblk *)vaddr;
3740 : (word)h < (word)next_vaddr; h++) {
3741 : register word index = PHT_HASH(h);
3742 : set_pht_entry_from_index(GC_grungy_pages, index);
3743 : }
3744 : }
3745 : }
3746 : bufp = (char *)(((word)bufp + (sizeof(long)-1)) & ~(sizeof(long)-1));
3747 : }
3748 : # ifdef DEBUG_DIRTY_BITS
3749 : GC_log_printf("Proc VDB read done.\n");
3750 : # endif
3751 :
3752 : /* Update GC_written_pages. */
3753 : GC_or_pages(GC_written_pages, GC_grungy_pages);
3754 : }
3755 :
3756 : # undef READ
3757 : #endif /* PROC_VDB */
3758 :
3759 : #ifdef PCR_VDB
3760 :
3761 : # include "vd/PCR_VD.h"
3762 :
3763 : # define NPAGES (32*1024) /* 128 MB */
3764 :
3765 : PCR_VD_DB GC_grungy_bits[NPAGES];
3766 :
3767 : STATIC ptr_t GC_vd_base = NULL;
3768 : /* Address corresponding to GC_grungy_bits[0] */
3769 : /* HBLKSIZE aligned. */
3770 :
3771 : GC_INNER void GC_dirty_init(void)
3772 : {
3773 : GC_dirty_maintained = TRUE;
3774 : /* For the time being, we assume the heap generally grows up */
3775 : GC_vd_base = GC_heap_sects[0].hs_start;
3776 : if (GC_vd_base == 0) {
3777 : ABORT("Bad initial heap segment");
3778 : }
3779 : if (PCR_VD_Start(HBLKSIZE, GC_vd_base, NPAGES*HBLKSIZE)
3780 : != PCR_ERes_okay) {
3781 : ABORT("Dirty bit initialization failed");
3782 : }
3783 : }
3784 :
3785 : GC_INNER void GC_read_dirty(void)
3786 : {
3787 : /* lazily enable dirty bits on newly added heap sects */
3788 : {
3789 : static int onhs = 0;
3790 : int nhs = GC_n_heap_sects;
3791 : for(; onhs < nhs; onhs++) {
3792 : PCR_VD_WriteProtectEnable(
3793 : GC_heap_sects[onhs].hs_start,
3794 : GC_heap_sects[onhs].hs_bytes );
3795 : }
3796 : }
3797 :
3798 : if (PCR_VD_Clear(GC_vd_base, NPAGES*HBLKSIZE, GC_grungy_bits)
3799 : != PCR_ERes_okay) {
3800 : ABORT("Dirty bit read failed");
3801 : }
3802 : }
3803 :
3804 : GC_INNER GC_bool GC_page_was_dirty(struct hblk *h)
3805 : {
3806 : if ((word)h < (word)GC_vd_base
3807 : || (word)h >= (word)(GC_vd_base + NPAGES*HBLKSIZE)) {
3808 : return(TRUE);
3809 : }
3810 : return(GC_grungy_bits[h - (struct hblk *)GC_vd_base] & PCR_VD_DB_dirtyBit);
3811 : }
3812 :
3813 : GC_INNER void GC_remove_protection(struct hblk *h, word nblocks,
3814 : GC_bool is_ptrfree GC_ATTR_UNUSED)
3815 : {
3816 : PCR_VD_WriteProtectDisable(h, nblocks*HBLKSIZE);
3817 : PCR_VD_WriteProtectEnable(h, nblocks*HBLKSIZE);
3818 : }
3819 :
3820 : #endif /* PCR_VDB */
3821 :
3822 : #if defined(MPROTECT_VDB) && defined(DARWIN)
3823 : /* The following sources were used as a "reference" for this exception
3824 : handling code:
3825 : 1. Apple's mach/xnu documentation
3826 : 2. Timothy J. Wood's "Mach Exception Handlers 101" post to the
3827 : omnigroup's macosx-dev list.
3828 : www.omnigroup.com/mailman/archive/macosx-dev/2000-June/014178.html
3829 : 3. macosx-nat.c from Apple's GDB source code.
3830 : */
3831 :
3832 : /* The bug that caused all this trouble should now be fixed. This should
3833 : eventually be removed if all goes well. */
3834 :
3835 : /* #define BROKEN_EXCEPTION_HANDLING */
3836 :
3837 : #include <mach/mach.h>
3838 : #include <mach/mach_error.h>
3839 : #include <mach/thread_status.h>
3840 : #include <mach/exception.h>
3841 : #include <mach/task.h>
3842 : #include <pthread.h>
3843 :
3844 : /* These are not defined in any header, although they are documented */
3845 : extern boolean_t
3846 : exc_server(mach_msg_header_t *, mach_msg_header_t *);
3847 :
3848 : extern kern_return_t
3849 : exception_raise(mach_port_t, mach_port_t, mach_port_t, exception_type_t,
3850 : exception_data_t, mach_msg_type_number_t);
3851 :
3852 : extern kern_return_t
3853 : exception_raise_state(mach_port_t, mach_port_t, mach_port_t, exception_type_t,
3854 : exception_data_t, mach_msg_type_number_t,
3855 : thread_state_flavor_t*, thread_state_t,
3856 : mach_msg_type_number_t, thread_state_t,
3857 : mach_msg_type_number_t*);
3858 :
3859 : extern kern_return_t
3860 : exception_raise_state_identity(mach_port_t, mach_port_t, mach_port_t,
3861 : exception_type_t, exception_data_t,
3862 : mach_msg_type_number_t, thread_state_flavor_t*,
3863 : thread_state_t, mach_msg_type_number_t,
3864 : thread_state_t, mach_msg_type_number_t*);
3865 :
3866 : GC_API_OSCALL kern_return_t
3867 : catch_exception_raise(mach_port_t exception_port, mach_port_t thread,
3868 : mach_port_t task, exception_type_t exception,
3869 : exception_data_t code, mach_msg_type_number_t code_count);
3870 :
3871 : /* These should never be called, but just in case... */
3872 : GC_API_OSCALL kern_return_t
3873 : catch_exception_raise_state(mach_port_name_t exception_port GC_ATTR_UNUSED,
3874 : int exception GC_ATTR_UNUSED, exception_data_t code GC_ATTR_UNUSED,
3875 : mach_msg_type_number_t codeCnt GC_ATTR_UNUSED, int flavor GC_ATTR_UNUSED,
3876 : thread_state_t old_state GC_ATTR_UNUSED, int old_stateCnt GC_ATTR_UNUSED,
3877 : thread_state_t new_state GC_ATTR_UNUSED, int new_stateCnt GC_ATTR_UNUSED)
3878 : {
3879 : ABORT_RET("Unexpected catch_exception_raise_state invocation");
3880 : return(KERN_INVALID_ARGUMENT);
3881 : }
3882 :
3883 : GC_API_OSCALL kern_return_t
3884 : catch_exception_raise_state_identity(
3885 : mach_port_name_t exception_port GC_ATTR_UNUSED,
3886 : mach_port_t thread GC_ATTR_UNUSED, mach_port_t task GC_ATTR_UNUSED,
3887 : int exception GC_ATTR_UNUSED, exception_data_t code GC_ATTR_UNUSED,
3888 : mach_msg_type_number_t codeCnt GC_ATTR_UNUSED, int flavor GC_ATTR_UNUSED,
3889 : thread_state_t old_state GC_ATTR_UNUSED, int old_stateCnt GC_ATTR_UNUSED,
3890 : thread_state_t new_state GC_ATTR_UNUSED, int new_stateCnt GC_ATTR_UNUSED)
3891 : {
3892 : ABORT_RET("Unexpected catch_exception_raise_state_identity invocation");
3893 : return(KERN_INVALID_ARGUMENT);
3894 : }
3895 :
3896 : #define MAX_EXCEPTION_PORTS 16
3897 :
3898 : static struct {
3899 : mach_msg_type_number_t count;
3900 : exception_mask_t masks[MAX_EXCEPTION_PORTS];
3901 : exception_handler_t ports[MAX_EXCEPTION_PORTS];
3902 : exception_behavior_t behaviors[MAX_EXCEPTION_PORTS];
3903 : thread_state_flavor_t flavors[MAX_EXCEPTION_PORTS];
3904 : } GC_old_exc_ports;
3905 :
3906 : STATIC struct {
3907 : void (*volatile os_callback[3])(void);
3908 : mach_port_t exception;
3909 : # if defined(THREADS)
3910 : mach_port_t reply;
3911 : # endif
3912 : } GC_ports = {
3913 : {
3914 : /* This is to prevent stripping these routines as dead. */
3915 : (void (*)(void))catch_exception_raise,
3916 : (void (*)(void))catch_exception_raise_state,
3917 : (void (*)(void))catch_exception_raise_state_identity
3918 : },
3919 : # ifdef THREADS
3920 : 0, /* for 'exception' */
3921 : # endif
3922 : 0
3923 : };
3924 :
3925 : typedef struct {
3926 : mach_msg_header_t head;
3927 : } GC_msg_t;
3928 :
3929 : typedef enum {
3930 : GC_MP_NORMAL,
3931 : GC_MP_DISCARDING,
3932 : GC_MP_STOPPED
3933 : } GC_mprotect_state_t;
3934 :
3935 : #ifdef THREADS
3936 : /* FIXME: 1 and 2 seem to be safe to use in the msgh_id field, but it */
3937 : /* is not documented. Use the source and see if they should be OK. */
3938 : # define ID_STOP 1
3939 : # define ID_RESUME 2
3940 :
3941 : /* This value is only used on the reply port. */
3942 : # define ID_ACK 3
3943 :
3944 : STATIC GC_mprotect_state_t GC_mprotect_state = 0;
3945 :
3946 : /* The following should ONLY be called when the world is stopped. */
3947 : STATIC void GC_mprotect_thread_notify(mach_msg_id_t id)
3948 : {
3949 : struct {
3950 : GC_msg_t msg;
3951 : mach_msg_trailer_t trailer;
3952 : } buf;
3953 : mach_msg_return_t r;
3954 :
3955 : /* remote, local */
3956 : buf.msg.head.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND, 0);
3957 : buf.msg.head.msgh_size = sizeof(buf.msg);
3958 : buf.msg.head.msgh_remote_port = GC_ports.exception;
3959 : buf.msg.head.msgh_local_port = MACH_PORT_NULL;
3960 : buf.msg.head.msgh_id = id;
3961 :
3962 : r = mach_msg(&buf.msg.head, MACH_SEND_MSG | MACH_RCV_MSG | MACH_RCV_LARGE,
3963 : sizeof(buf.msg), sizeof(buf), GC_ports.reply,
3964 : MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
3965 : if (r != MACH_MSG_SUCCESS)
3966 : ABORT("mach_msg failed in GC_mprotect_thread_notify");
3967 : if (buf.msg.head.msgh_id != ID_ACK)
3968 : ABORT("Invalid ack in GC_mprotect_thread_notify");
3969 : }
3970 :
3971 : /* Should only be called by the mprotect thread */
3972 : STATIC void GC_mprotect_thread_reply(void)
3973 : {
3974 : GC_msg_t msg;
3975 : mach_msg_return_t r;
3976 : /* remote, local */
3977 :
3978 : msg.head.msgh_bits = MACH_MSGH_BITS(MACH_MSG_TYPE_MAKE_SEND, 0);
3979 : msg.head.msgh_size = sizeof(msg);
3980 : msg.head.msgh_remote_port = GC_ports.reply;
3981 : msg.head.msgh_local_port = MACH_PORT_NULL;
3982 : msg.head.msgh_id = ID_ACK;
3983 :
3984 : r = mach_msg(&msg.head, MACH_SEND_MSG, sizeof(msg), 0, MACH_PORT_NULL,
3985 : MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
3986 : if (r != MACH_MSG_SUCCESS)
3987 : ABORT("mach_msg failed in GC_mprotect_thread_reply");
3988 : }
3989 :
3990 : GC_INNER void GC_mprotect_stop(void)
3991 : {
3992 : GC_mprotect_thread_notify(ID_STOP);
3993 : }
3994 :
3995 : GC_INNER void GC_mprotect_resume(void)
3996 : {
3997 : GC_mprotect_thread_notify(ID_RESUME);
3998 : }
3999 :
4000 : # ifndef GC_NO_THREADS_DISCOVERY
4001 : GC_INNER void GC_darwin_register_mach_handler_thread(mach_port_t thread);
4002 : # endif
4003 :
4004 : #else
4005 : /* The compiler should optimize away any GC_mprotect_state computations */
4006 : # define GC_mprotect_state GC_MP_NORMAL
4007 : #endif /* !THREADS */
4008 :
4009 : STATIC void *GC_mprotect_thread(void *arg)
4010 : {
4011 : mach_msg_return_t r;
4012 : /* These two structures contain some private kernel data. We don't */
4013 : /* need to access any of it so we don't bother defining a proper */
4014 : /* struct. The correct definitions are in the xnu source code. */
4015 : struct {
4016 : mach_msg_header_t head;
4017 : char data[256];
4018 : } reply;
4019 : struct {
4020 : mach_msg_header_t head;
4021 : mach_msg_body_t msgh_body;
4022 : char data[1024];
4023 : } msg;
4024 : mach_msg_id_t id;
4025 :
4026 : if ((word)arg == (word)-1) return 0; /* to make compiler happy */
4027 :
4028 : # if defined(THREADS) && !defined(GC_NO_THREADS_DISCOVERY)
4029 : GC_darwin_register_mach_handler_thread(mach_thread_self());
4030 : # endif
4031 :
4032 : for(;;) {
4033 : r = mach_msg(&msg.head, MACH_RCV_MSG | MACH_RCV_LARGE |
4034 : (GC_mprotect_state == GC_MP_DISCARDING ? MACH_RCV_TIMEOUT : 0),
4035 : 0, sizeof(msg), GC_ports.exception,
4036 : GC_mprotect_state == GC_MP_DISCARDING ? 0
4037 : : MACH_MSG_TIMEOUT_NONE, MACH_PORT_NULL);
4038 : id = r == MACH_MSG_SUCCESS ? msg.head.msgh_id : -1;
4039 :
4040 : # if defined(THREADS)
4041 : if(GC_mprotect_state == GC_MP_DISCARDING) {
4042 : if(r == MACH_RCV_TIMED_OUT) {
4043 : GC_mprotect_state = GC_MP_STOPPED;
4044 : GC_mprotect_thread_reply();
4045 : continue;
4046 : }
4047 : if(r == MACH_MSG_SUCCESS && (id == ID_STOP || id == ID_RESUME))
4048 : ABORT("Out of order mprotect thread request");
4049 : }
4050 : # endif /* THREADS */
4051 :
4052 : if (r != MACH_MSG_SUCCESS) {
4053 : ABORT_ARG2("mach_msg failed",
4054 : ": errcode= %d (%s)", (int)r, mach_error_string(r));
4055 : }
4056 :
4057 : switch(id) {
4058 : # if defined(THREADS)
4059 : case ID_STOP:
4060 : if(GC_mprotect_state != GC_MP_NORMAL)
4061 : ABORT("Called mprotect_stop when state wasn't normal");
4062 : GC_mprotect_state = GC_MP_DISCARDING;
4063 : break;
4064 : case ID_RESUME:
4065 : if(GC_mprotect_state != GC_MP_STOPPED)
4066 : ABORT("Called mprotect_resume when state wasn't stopped");
4067 : GC_mprotect_state = GC_MP_NORMAL;
4068 : GC_mprotect_thread_reply();
4069 : break;
4070 : # endif /* THREADS */
4071 : default:
4072 : /* Handle the message (calls catch_exception_raise) */
4073 : if(!exc_server(&msg.head, &reply.head))
4074 : ABORT("exc_server failed");
4075 : /* Send the reply */
4076 : r = mach_msg(&reply.head, MACH_SEND_MSG, reply.head.msgh_size, 0,
4077 : MACH_PORT_NULL, MACH_MSG_TIMEOUT_NONE,
4078 : MACH_PORT_NULL);
4079 : if(r != MACH_MSG_SUCCESS) {
4080 : /* This will fail if the thread dies, but the thread */
4081 : /* shouldn't die... */
4082 : # ifdef BROKEN_EXCEPTION_HANDLING
4083 : GC_err_printf("mach_msg failed with %d %s while sending "
4084 : "exc reply\n", (int)r, mach_error_string(r));
4085 : # else
4086 : ABORT("mach_msg failed while sending exception reply");
4087 : # endif
4088 : }
4089 : } /* switch */
4090 : } /* for(;;) */
4091 : }
4092 :
4093 : /* All this SIGBUS code shouldn't be necessary. All protection faults should
4094 : be going through the mach exception handler. However, it seems a SIGBUS is
4095 : occasionally sent for some unknown reason. Even more odd, it seems to be
4096 : meaningless and safe to ignore. */
4097 : #ifdef BROKEN_EXCEPTION_HANDLING
4098 :
4099 : /* Updates to this aren't atomic, but the SIGBUS'es seem pretty rare. */
4100 : /* Even if this doesn't get updated property, it isn't really a problem. */
4101 : STATIC int GC_sigbus_count = 0;
4102 :
4103 : STATIC void GC_darwin_sigbus(int num, siginfo_t *sip, void *context)
4104 : {
4105 : if (num != SIGBUS)
4106 : ABORT("Got a non-sigbus signal in the sigbus handler");
4107 :
4108 : /* Ugh... some seem safe to ignore, but too many in a row probably means
4109 : trouble. GC_sigbus_count is reset for each mach exception that is
4110 : handled */
4111 : if (GC_sigbus_count >= 8) {
4112 : ABORT("Got more than 8 SIGBUSs in a row!");
4113 : } else {
4114 : GC_sigbus_count++;
4115 : WARN("Ignoring SIGBUS.\n", 0);
4116 : }
4117 : }
4118 : #endif /* BROKEN_EXCEPTION_HANDLING */
4119 :
4120 : GC_INNER void GC_dirty_init(void)
4121 : {
4122 : kern_return_t r;
4123 : mach_port_t me;
4124 : pthread_t thread;
4125 : pthread_attr_t attr;
4126 : exception_mask_t mask;
4127 :
4128 : # ifdef CAN_HANDLE_FORK
4129 : if (GC_handle_fork) {
4130 : /* To both support GC incremental mode and GC functions usage in */
4131 : /* the forked child, pthread_atfork should be used to install */
4132 : /* handlers that switch off GC_dirty_maintained in the child */
4133 : /* gracefully (unprotecting all pages and clearing */
4134 : /* GC_mach_handler_thread). For now, we just disable incremental */
4135 : /* mode if fork() handling is requested by the client. */
4136 : GC_COND_LOG_PRINTF("GC incremental mode disabled since fork()"
4137 : " handling requested\n");
4138 : return;
4139 : }
4140 : # endif
4141 :
4142 : GC_VERBOSE_LOG_PRINTF("Initializing mach/darwin mprotect"
4143 : " virtual dirty bit implementation\n");
4144 : # ifdef BROKEN_EXCEPTION_HANDLING
4145 : WARN("Enabling workarounds for various darwin "
4146 : "exception handling bugs.\n", 0);
4147 : # endif
4148 : GC_dirty_maintained = TRUE;
4149 : if (GC_page_size % HBLKSIZE != 0) {
4150 : ABORT("Page size not multiple of HBLKSIZE");
4151 : }
4152 :
4153 : GC_task_self = me = mach_task_self();
4154 :
4155 : r = mach_port_allocate(me, MACH_PORT_RIGHT_RECEIVE, &GC_ports.exception);
4156 : if (r != KERN_SUCCESS)
4157 : ABORT("mach_port_allocate failed (exception port)");
4158 :
4159 : r = mach_port_insert_right(me, GC_ports.exception, GC_ports.exception,
4160 : MACH_MSG_TYPE_MAKE_SEND);
4161 : if (r != KERN_SUCCESS)
4162 : ABORT("mach_port_insert_right failed (exception port)");
4163 :
4164 : # if defined(THREADS)
4165 : r = mach_port_allocate(me, MACH_PORT_RIGHT_RECEIVE, &GC_ports.reply);
4166 : if(r != KERN_SUCCESS)
4167 : ABORT("mach_port_allocate failed (reply port)");
4168 : # endif
4169 :
4170 : /* The exceptions we want to catch */
4171 : mask = EXC_MASK_BAD_ACCESS;
4172 :
4173 : r = task_get_exception_ports(me, mask, GC_old_exc_ports.masks,
4174 : &GC_old_exc_ports.count, GC_old_exc_ports.ports,
4175 : GC_old_exc_ports.behaviors,
4176 : GC_old_exc_ports.flavors);
4177 : if (r != KERN_SUCCESS)
4178 : ABORT("task_get_exception_ports failed");
4179 :
4180 : r = task_set_exception_ports(me, mask, GC_ports.exception, EXCEPTION_DEFAULT,
4181 : GC_MACH_THREAD_STATE);
4182 : if (r != KERN_SUCCESS)
4183 : ABORT("task_set_exception_ports failed");
4184 : if (pthread_attr_init(&attr) != 0)
4185 : ABORT("pthread_attr_init failed");
4186 : if (pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED) != 0)
4187 : ABORT("pthread_attr_setdetachedstate failed");
4188 :
4189 : # undef pthread_create
4190 : /* This will call the real pthread function, not our wrapper */
4191 : if (pthread_create(&thread, &attr, GC_mprotect_thread, NULL) != 0)
4192 : ABORT("pthread_create failed");
4193 : pthread_attr_destroy(&attr);
4194 :
4195 : /* Setup the sigbus handler for ignoring the meaningless SIGBUSs */
4196 : # ifdef BROKEN_EXCEPTION_HANDLING
4197 : {
4198 : struct sigaction sa, oldsa;
4199 : sa.sa_handler = (SIG_HNDLR_PTR)GC_darwin_sigbus;
4200 : sigemptyset(&sa.sa_mask);
4201 : sa.sa_flags = SA_RESTART|SA_SIGINFO;
4202 : /* sa.sa_restorer is deprecated and should not be initialized. */
4203 : if (sigaction(SIGBUS, &sa, &oldsa) < 0)
4204 : ABORT("sigaction failed");
4205 : if ((SIG_HNDLR_PTR)oldsa.sa_handler != SIG_DFL) {
4206 : GC_VERBOSE_LOG_PRINTF("Replaced other SIGBUS handler\n");
4207 : }
4208 : }
4209 : # endif /* BROKEN_EXCEPTION_HANDLING */
4210 : }
4211 :
4212 : /* The source code for Apple's GDB was used as a reference for the */
4213 : /* exception forwarding code. This code is similar to be GDB code only */
4214 : /* because there is only one way to do it. */
4215 : STATIC kern_return_t GC_forward_exception(mach_port_t thread, mach_port_t task,
4216 : exception_type_t exception,
4217 : exception_data_t data,
4218 : mach_msg_type_number_t data_count)
4219 : {
4220 : unsigned int i;
4221 : kern_return_t r;
4222 : mach_port_t port;
4223 : exception_behavior_t behavior;
4224 : thread_state_flavor_t flavor;
4225 :
4226 : thread_state_data_t thread_state;
4227 : mach_msg_type_number_t thread_state_count = THREAD_STATE_MAX;
4228 :
4229 : for (i=0; i < GC_old_exc_ports.count; i++)
4230 : if (GC_old_exc_ports.masks[i] & (1 << exception))
4231 : break;
4232 : if (i == GC_old_exc_ports.count)
4233 : ABORT("No handler for exception!");
4234 :
4235 : port = GC_old_exc_ports.ports[i];
4236 : behavior = GC_old_exc_ports.behaviors[i];
4237 : flavor = GC_old_exc_ports.flavors[i];
4238 :
4239 : if (behavior == EXCEPTION_STATE || behavior == EXCEPTION_STATE_IDENTITY) {
4240 : r = thread_get_state(thread, flavor, thread_state, &thread_state_count);
4241 : if(r != KERN_SUCCESS)
4242 : ABORT("thread_get_state failed in forward_exception");
4243 : }
4244 :
4245 : switch(behavior) {
4246 : case EXCEPTION_STATE:
4247 : r = exception_raise_state(port, thread, task, exception, data, data_count,
4248 : &flavor, thread_state, thread_state_count,
4249 : thread_state, &thread_state_count);
4250 : break;
4251 : case EXCEPTION_STATE_IDENTITY:
4252 : r = exception_raise_state_identity(port, thread, task, exception, data,
4253 : data_count, &flavor, thread_state,
4254 : thread_state_count, thread_state,
4255 : &thread_state_count);
4256 : break;
4257 : /* case EXCEPTION_DEFAULT: */ /* default signal handlers */
4258 : default: /* user-supplied signal handlers */
4259 : r = exception_raise(port, thread, task, exception, data, data_count);
4260 : }
4261 :
4262 : if (behavior == EXCEPTION_STATE || behavior == EXCEPTION_STATE_IDENTITY) {
4263 : r = thread_set_state(thread, flavor, thread_state, thread_state_count);
4264 : if (r != KERN_SUCCESS)
4265 : ABORT("thread_set_state failed in forward_exception");
4266 : }
4267 : return r;
4268 : }
4269 :
4270 : #define FWD() GC_forward_exception(thread, task, exception, code, code_count)
4271 :
4272 : #ifdef ARM32
4273 : # define DARWIN_EXC_STATE ARM_EXCEPTION_STATE
4274 : # define DARWIN_EXC_STATE_COUNT ARM_EXCEPTION_STATE_COUNT
4275 : # define DARWIN_EXC_STATE_T arm_exception_state_t
4276 : # define DARWIN_EXC_STATE_DAR THREAD_FLD(far)
4277 : #elif defined(POWERPC)
4278 : # if CPP_WORDSZ == 32
4279 : # define DARWIN_EXC_STATE PPC_EXCEPTION_STATE
4280 : # define DARWIN_EXC_STATE_COUNT PPC_EXCEPTION_STATE_COUNT
4281 : # define DARWIN_EXC_STATE_T ppc_exception_state_t
4282 : # else
4283 : # define DARWIN_EXC_STATE PPC_EXCEPTION_STATE64
4284 : # define DARWIN_EXC_STATE_COUNT PPC_EXCEPTION_STATE64_COUNT
4285 : # define DARWIN_EXC_STATE_T ppc_exception_state64_t
4286 : # endif
4287 : # define DARWIN_EXC_STATE_DAR THREAD_FLD(dar)
4288 : #elif defined(I386) || defined(X86_64)
4289 : # if CPP_WORDSZ == 32
4290 : # if defined(i386_EXCEPTION_STATE_COUNT) \
4291 : && !defined(x86_EXCEPTION_STATE32_COUNT)
4292 : /* Use old naming convention for 32-bit x86. */
4293 : # define DARWIN_EXC_STATE i386_EXCEPTION_STATE
4294 : # define DARWIN_EXC_STATE_COUNT i386_EXCEPTION_STATE_COUNT
4295 : # define DARWIN_EXC_STATE_T i386_exception_state_t
4296 : # else
4297 : # define DARWIN_EXC_STATE x86_EXCEPTION_STATE32
4298 : # define DARWIN_EXC_STATE_COUNT x86_EXCEPTION_STATE32_COUNT
4299 : # define DARWIN_EXC_STATE_T x86_exception_state32_t
4300 : # endif
4301 : # else
4302 : # define DARWIN_EXC_STATE x86_EXCEPTION_STATE64
4303 : # define DARWIN_EXC_STATE_COUNT x86_EXCEPTION_STATE64_COUNT
4304 : # define DARWIN_EXC_STATE_T x86_exception_state64_t
4305 : # endif
4306 : # define DARWIN_EXC_STATE_DAR THREAD_FLD(faultvaddr)
4307 : #else
4308 : # error FIXME for non-arm/ppc/x86 darwin
4309 : #endif
4310 :
4311 : /* This violates the namespace rules but there isn't anything that can */
4312 : /* be done about it. The exception handling stuff is hard coded to */
4313 : /* call this. catch_exception_raise, catch_exception_raise_state and */
4314 : /* and catch_exception_raise_state_identity are called from OS. */
4315 : GC_API_OSCALL kern_return_t
4316 : catch_exception_raise(mach_port_t exception_port GC_ATTR_UNUSED,
4317 : mach_port_t thread, mach_port_t task GC_ATTR_UNUSED,
4318 : exception_type_t exception, exception_data_t code,
4319 : mach_msg_type_number_t code_count GC_ATTR_UNUSED)
4320 : {
4321 : kern_return_t r;
4322 : char *addr;
4323 : struct hblk *h;
4324 : unsigned int i;
4325 : thread_state_flavor_t flavor = DARWIN_EXC_STATE;
4326 : mach_msg_type_number_t exc_state_count = DARWIN_EXC_STATE_COUNT;
4327 : DARWIN_EXC_STATE_T exc_state;
4328 :
4329 : if (exception != EXC_BAD_ACCESS || code[0] != KERN_PROTECTION_FAILURE) {
4330 : # ifdef DEBUG_EXCEPTION_HANDLING
4331 : /* We aren't interested, pass it on to the old handler */
4332 : GC_log_printf("Exception: 0x%x Code: 0x%x 0x%x in catch...\n",
4333 : exception, code_count > 0 ? code[0] : -1,
4334 : code_count > 1 ? code[1] : -1);
4335 : # endif
4336 : return FWD();
4337 : }
4338 :
4339 : r = thread_get_state(thread, flavor, (natural_t*)&exc_state,
4340 : &exc_state_count);
4341 : if(r != KERN_SUCCESS) {
4342 : /* The thread is supposed to be suspended while the exception */
4343 : /* handler is called. This shouldn't fail. */
4344 : # ifdef BROKEN_EXCEPTION_HANDLING
4345 : GC_err_printf("thread_get_state failed in catch_exception_raise\n");
4346 : return KERN_SUCCESS;
4347 : # else
4348 : ABORT("thread_get_state failed in catch_exception_raise");
4349 : # endif
4350 : }
4351 :
4352 : /* This is the address that caused the fault */
4353 : addr = (char*) exc_state.DARWIN_EXC_STATE_DAR;
4354 : if (HDR(addr) == 0) {
4355 : /* Ugh... just like the SIGBUS problem above, it seems we get */
4356 : /* a bogus KERN_PROTECTION_FAILURE every once and a while. We wait */
4357 : /* till we get a bunch in a row before doing anything about it. */
4358 : /* If a "real" fault ever occurs it'll just keep faulting over and */
4359 : /* over and we'll hit the limit pretty quickly. */
4360 : # ifdef BROKEN_EXCEPTION_HANDLING
4361 : static char *last_fault;
4362 : static int last_fault_count;
4363 :
4364 : if(addr != last_fault) {
4365 : last_fault = addr;
4366 : last_fault_count = 0;
4367 : }
4368 : if(++last_fault_count < 32) {
4369 : if(last_fault_count == 1)
4370 : WARN("Ignoring KERN_PROTECTION_FAILURE at %p\n", addr);
4371 : return KERN_SUCCESS;
4372 : }
4373 :
4374 : GC_err_printf(
4375 : "Unexpected KERN_PROTECTION_FAILURE at %p; aborting...\n", addr);
4376 : /* Can't pass it along to the signal handler because that is */
4377 : /* ignoring SIGBUS signals. We also shouldn't call ABORT here as */
4378 : /* signals don't always work too well from the exception handler. */
4379 : EXIT();
4380 : # else /* BROKEN_EXCEPTION_HANDLING */
4381 : /* Pass it along to the next exception handler
4382 : (which should call SIGBUS/SIGSEGV) */
4383 : return FWD();
4384 : # endif /* !BROKEN_EXCEPTION_HANDLING */
4385 : }
4386 :
4387 : # ifdef BROKEN_EXCEPTION_HANDLING
4388 : /* Reset the number of consecutive SIGBUSs */
4389 : GC_sigbus_count = 0;
4390 : # endif
4391 :
4392 : if (GC_mprotect_state == GC_MP_NORMAL) { /* common case */
4393 : h = (struct hblk*)((word)addr & ~(GC_page_size-1));
4394 : UNPROTECT(h, GC_page_size);
4395 : for (i = 0; i < divHBLKSZ(GC_page_size); i++) {
4396 : register int index = PHT_HASH(h+i);
4397 : async_set_pht_entry_from_index(GC_dirty_pages, index);
4398 : }
4399 : } else if (GC_mprotect_state == GC_MP_DISCARDING) {
4400 : /* Lie to the thread for now. No sense UNPROTECT()ing the memory
4401 : when we're just going to PROTECT() it again later. The thread
4402 : will just fault again once it resumes */
4403 : } else {
4404 : /* Shouldn't happen, i don't think */
4405 : GC_err_printf("KERN_PROTECTION_FAILURE while world is stopped\n");
4406 : return FWD();
4407 : }
4408 : return KERN_SUCCESS;
4409 : }
4410 : #undef FWD
4411 :
4412 : #ifndef NO_DESC_CATCH_EXCEPTION_RAISE
4413 : /* These symbols should have REFERENCED_DYNAMICALLY (0x10) bit set to */
4414 : /* let strip know they are not to be stripped. */
4415 : __asm__(".desc _catch_exception_raise, 0x10");
4416 : __asm__(".desc _catch_exception_raise_state, 0x10");
4417 : __asm__(".desc _catch_exception_raise_state_identity, 0x10");
4418 : #endif
4419 :
4420 : #endif /* DARWIN && MPROTECT_VDB */
4421 :
4422 : #ifndef HAVE_INCREMENTAL_PROTECTION_NEEDS
4423 : GC_API int GC_CALL GC_incremental_protection_needs(void)
4424 : {
4425 : return GC_PROTECTS_NONE;
4426 : }
4427 : #endif /* !HAVE_INCREMENTAL_PROTECTION_NEEDS */
4428 :
4429 : #ifdef ECOS
4430 : /* Undo sbrk() redirection. */
4431 : # undef sbrk
4432 : #endif
4433 :
4434 : /* If value is non-zero then allocate executable memory. */
4435 0 : GC_API void GC_CALL GC_set_pages_executable(int value)
4436 : {
4437 : GC_ASSERT(!GC_is_initialized);
4438 : /* Even if IGNORE_PAGES_EXECUTABLE is defined, GC_pages_executable is */
4439 : /* touched here to prevent a compiler warning. */
4440 0 : GC_pages_executable = (GC_bool)(value != 0);
4441 0 : }
4442 :
4443 : /* Returns non-zero if the GC-allocated memory is executable. */
4444 : /* GC_get_pages_executable is defined after all the places */
4445 : /* where GC_get_pages_executable is undefined. */
4446 0 : GC_API int GC_CALL GC_get_pages_executable(void)
4447 : {
4448 : # ifdef IGNORE_PAGES_EXECUTABLE
4449 : return 1; /* Always allocate executable memory. */
4450 : # else
4451 0 : return (int)GC_pages_executable;
4452 : # endif
4453 : }
4454 :
4455 : /* Call stack save code for debugging. Should probably be in */
4456 : /* mach_dep.c, but that requires reorganization. */
4457 :
4458 : /* I suspect the following works for most X86 *nix variants, so */
4459 : /* long as the frame pointer is explicitly stored. In the case of gcc, */
4460 : /* compiler flags (e.g. -fomit-frame-pointer) determine whether it is. */
4461 : #if defined(I386) && defined(LINUX) && defined(SAVE_CALL_CHAIN)
4462 : # include <features.h>
4463 :
4464 : struct frame {
4465 : struct frame *fr_savfp;
4466 : long fr_savpc;
4467 : long fr_arg[NARGS]; /* All the arguments go here. */
4468 : };
4469 : #endif
4470 :
4471 : #if defined(SPARC)
4472 : # if defined(LINUX)
4473 : # include <features.h>
4474 :
4475 : struct frame {
4476 : long fr_local[8];
4477 : long fr_arg[6];
4478 : struct frame *fr_savfp;
4479 : long fr_savpc;
4480 : # ifndef __arch64__
4481 : char *fr_stret;
4482 : # endif
4483 : long fr_argd[6];
4484 : long fr_argx[0];
4485 : };
4486 : # elif defined (DRSNX)
4487 : # include <sys/sparc/frame.h>
4488 : # elif defined(OPENBSD)
4489 : # include <frame.h>
4490 : # elif defined(FREEBSD) || defined(NETBSD)
4491 : # include <machine/frame.h>
4492 : # else
4493 : # include <sys/frame.h>
4494 : # endif
4495 : # if NARGS > 6
4496 : # error We only know how to get the first 6 arguments
4497 : # endif
4498 : #endif /* SPARC */
4499 :
4500 : #ifdef NEED_CALLINFO
4501 : /* Fill in the pc and argument information for up to NFRAMES of my */
4502 : /* callers. Ignore my frame and my callers frame. */
4503 :
4504 : #ifdef LINUX
4505 : # include <unistd.h>
4506 : #endif
4507 :
4508 : #endif /* NEED_CALLINFO */
4509 :
4510 : #if defined(GC_HAVE_BUILTIN_BACKTRACE)
4511 : # ifdef _MSC_VER
4512 : # include "private/msvc_dbg.h"
4513 : # else
4514 : # include <execinfo.h>
4515 : # endif
4516 : #endif
4517 :
4518 : #ifdef SAVE_CALL_CHAIN
4519 :
4520 : #if NARGS == 0 && NFRAMES % 2 == 0 /* No padding */ \
4521 : && defined(GC_HAVE_BUILTIN_BACKTRACE)
4522 :
4523 : #ifdef REDIRECT_MALLOC
4524 : /* Deal with possible malloc calls in backtrace by omitting */
4525 : /* the infinitely recursing backtrace. */
4526 : # ifdef THREADS
4527 : __thread /* If your compiler doesn't understand this */
4528 : /* you could use something like pthread_getspecific. */
4529 : # endif
4530 : GC_in_save_callers = FALSE;
4531 : #endif
4532 :
4533 : GC_INNER void GC_save_callers(struct callinfo info[NFRAMES])
4534 : {
4535 : void * tmp_info[NFRAMES + 1];
4536 : int npcs, i;
4537 : # define IGNORE_FRAMES 1
4538 :
4539 : /* We retrieve NFRAMES+1 pc values, but discard the first, since it */
4540 : /* points to our own frame. */
4541 : # ifdef REDIRECT_MALLOC
4542 : if (GC_in_save_callers) {
4543 : info[0].ci_pc = (word)(&GC_save_callers);
4544 : for (i = 1; i < NFRAMES; ++i) info[i].ci_pc = 0;
4545 : return;
4546 : }
4547 : GC_in_save_callers = TRUE;
4548 : # endif
4549 : GC_STATIC_ASSERT(sizeof(struct callinfo) == sizeof(void *));
4550 : npcs = backtrace((void **)tmp_info, NFRAMES + IGNORE_FRAMES);
4551 : BCOPY(tmp_info+IGNORE_FRAMES, info, (npcs - IGNORE_FRAMES) * sizeof(void *));
4552 : for (i = npcs - IGNORE_FRAMES; i < NFRAMES; ++i) info[i].ci_pc = 0;
4553 : # ifdef REDIRECT_MALLOC
4554 : GC_in_save_callers = FALSE;
4555 : # endif
4556 : }
4557 :
4558 : #else /* No builtin backtrace; do it ourselves */
4559 :
4560 : #if (defined(OPENBSD) || defined(NETBSD) || defined(FREEBSD)) && defined(SPARC)
4561 : # define FR_SAVFP fr_fp
4562 : # define FR_SAVPC fr_pc
4563 : #else
4564 : # define FR_SAVFP fr_savfp
4565 : # define FR_SAVPC fr_savpc
4566 : #endif
4567 :
4568 : #if defined(SPARC) && (defined(__arch64__) || defined(__sparcv9))
4569 : # define BIAS 2047
4570 : #else
4571 : # define BIAS 0
4572 : #endif
4573 :
4574 : GC_INNER void GC_save_callers(struct callinfo info[NFRAMES])
4575 : {
4576 : struct frame *frame;
4577 : struct frame *fp;
4578 : int nframes = 0;
4579 : # ifdef I386
4580 : /* We assume this is turned on only with gcc as the compiler. */
4581 : asm("movl %%ebp,%0" : "=r"(frame));
4582 : fp = frame;
4583 : # else
4584 : frame = (struct frame *)GC_save_regs_in_stack();
4585 : fp = (struct frame *)((long) frame -> FR_SAVFP + BIAS);
4586 : #endif
4587 :
4588 : for (; !((word)fp HOTTER_THAN (word)frame)
4589 : && !((word)GC_stackbottom HOTTER_THAN (word)fp)
4590 : && nframes < NFRAMES;
4591 : fp = (struct frame *)((long) fp -> FR_SAVFP + BIAS), nframes++) {
4592 : register int i;
4593 :
4594 : info[nframes].ci_pc = fp->FR_SAVPC;
4595 : # if NARGS > 0
4596 : for (i = 0; i < NARGS; i++) {
4597 : info[nframes].ci_arg[i] = ~(fp->fr_arg[i]);
4598 : }
4599 : # endif /* NARGS > 0 */
4600 : }
4601 : if (nframes < NFRAMES) info[nframes].ci_pc = 0;
4602 : }
4603 :
4604 : #endif /* No builtin backtrace */
4605 :
4606 : #endif /* SAVE_CALL_CHAIN */
4607 :
4608 : #ifdef NEED_CALLINFO
4609 :
4610 : /* Print info to stderr. We do NOT hold the allocation lock */
4611 : GC_INNER void GC_print_callers(struct callinfo info[NFRAMES])
4612 : {
4613 : int i;
4614 : static int reentry_count = 0;
4615 : GC_bool stop = FALSE;
4616 : DCL_LOCK_STATE;
4617 :
4618 : /* FIXME: This should probably use a different lock, so that we */
4619 : /* become callable with or without the allocation lock. */
4620 : LOCK();
4621 : ++reentry_count;
4622 : UNLOCK();
4623 :
4624 : # if NFRAMES == 1
4625 : GC_err_printf("\tCaller at allocation:\n");
4626 : # else
4627 : GC_err_printf("\tCall chain at allocation:\n");
4628 : # endif
4629 : for (i = 0; i < NFRAMES && !stop; i++) {
4630 : if (info[i].ci_pc == 0) break;
4631 : # if NARGS > 0
4632 : {
4633 : int j;
4634 :
4635 : GC_err_printf("\t\targs: ");
4636 : for (j = 0; j < NARGS; j++) {
4637 : if (j != 0) GC_err_printf(", ");
4638 : GC_err_printf("%d (0x%X)", ~(info[i].ci_arg[j]),
4639 : ~(info[i].ci_arg[j]));
4640 : }
4641 : GC_err_printf("\n");
4642 : }
4643 : # endif
4644 : if (reentry_count > 1) {
4645 : /* We were called during an allocation during */
4646 : /* a previous GC_print_callers call; punt. */
4647 : GC_err_printf("\t\t##PC##= 0x%lx\n", info[i].ci_pc);
4648 : continue;
4649 : }
4650 : {
4651 : # if defined(GC_HAVE_BUILTIN_BACKTRACE) \
4652 : && !defined(GC_BACKTRACE_SYMBOLS_BROKEN)
4653 : char **sym_name =
4654 : backtrace_symbols((void **)(&(info[i].ci_pc)), 1);
4655 : char *name = sym_name[0];
4656 : # else
4657 : char buf[40];
4658 : char *name = buf;
4659 : (void)snprintf(buf, sizeof(buf), "##PC##= 0x%lx", info[i].ci_pc);
4660 : buf[sizeof(buf) - 1] = '\0';
4661 : # endif
4662 : # if defined(LINUX) && !defined(SMALL_CONFIG)
4663 : /* Try for a line number. */
4664 : {
4665 : FILE *pipe;
4666 : # define EXE_SZ 100
4667 : static char exe_name[EXE_SZ];
4668 : # define CMD_SZ 200
4669 : char cmd_buf[CMD_SZ];
4670 : # define RESULT_SZ 200
4671 : static char result_buf[RESULT_SZ];
4672 : size_t result_len;
4673 : char *old_preload;
4674 : # define PRELOAD_SZ 200
4675 : char preload_buf[PRELOAD_SZ];
4676 : static GC_bool found_exe_name = FALSE;
4677 : static GC_bool will_fail = FALSE;
4678 : int ret_code;
4679 : /* Try to get it via a hairy and expensive scheme. */
4680 : /* First we get the name of the executable: */
4681 : if (will_fail) goto out;
4682 : if (!found_exe_name) {
4683 : ret_code = readlink("/proc/self/exe", exe_name, EXE_SZ);
4684 : if (ret_code < 0 || ret_code >= EXE_SZ
4685 : || exe_name[0] != '/') {
4686 : will_fail = TRUE; /* Don't try again. */
4687 : goto out;
4688 : }
4689 : exe_name[ret_code] = '\0';
4690 : found_exe_name = TRUE;
4691 : }
4692 : /* Then we use popen to start addr2line -e <exe> <addr> */
4693 : /* There are faster ways to do this, but hopefully this */
4694 : /* isn't time critical. */
4695 : (void)snprintf(cmd_buf, sizeof(cmd_buf),
4696 : "/usr/bin/addr2line -f -e %s 0x%lx",
4697 : exe_name, (unsigned long)info[i].ci_pc);
4698 : cmd_buf[sizeof(cmd_buf) - 1] = '\0';
4699 : old_preload = GETENV("LD_PRELOAD");
4700 : if (0 != old_preload) {
4701 : size_t old_len = strlen(old_preload);
4702 : if (old_len >= PRELOAD_SZ) {
4703 : will_fail = TRUE;
4704 : goto out;
4705 : }
4706 : BCOPY(old_preload, preload_buf, old_len + 1);
4707 : unsetenv ("LD_PRELOAD");
4708 : }
4709 : pipe = popen(cmd_buf, "r");
4710 : if (0 != old_preload
4711 : && 0 != setenv ("LD_PRELOAD", preload_buf, 0)) {
4712 : WARN("Failed to reset LD_PRELOAD\n", 0);
4713 : }
4714 : if (pipe == NULL
4715 : || (result_len = fread(result_buf, 1,
4716 : RESULT_SZ - 1, pipe)) == 0) {
4717 : if (pipe != NULL) pclose(pipe);
4718 : will_fail = TRUE;
4719 : goto out;
4720 : }
4721 : if (result_buf[result_len - 1] == '\n') --result_len;
4722 : result_buf[result_len] = 0;
4723 : if (result_buf[0] == '?'
4724 : || (result_buf[result_len-2] == ':'
4725 : && result_buf[result_len-1] == '0')) {
4726 : pclose(pipe);
4727 : goto out;
4728 : }
4729 : /* Get rid of embedded newline, if any. Test for "main" */
4730 : {
4731 : char * nl = strchr(result_buf, '\n');
4732 : if (nl != NULL
4733 : && (word)nl < (word)(result_buf + result_len)) {
4734 : *nl = ':';
4735 : }
4736 : if (strncmp(result_buf, "main", nl - result_buf) == 0) {
4737 : stop = TRUE;
4738 : }
4739 : }
4740 : if (result_len < RESULT_SZ - 25) {
4741 : /* Add in hex address */
4742 : (void)snprintf(&result_buf[result_len],
4743 : sizeof(result_buf) - result_len,
4744 : " [0x%lx]", (unsigned long)info[i].ci_pc);
4745 : result_buf[sizeof(result_buf) - 1] = '\0';
4746 : }
4747 : name = result_buf;
4748 : pclose(pipe);
4749 : out:;
4750 : }
4751 : # endif /* LINUX */
4752 : GC_err_printf("\t\t%s\n", name);
4753 : # if defined(GC_HAVE_BUILTIN_BACKTRACE) \
4754 : && !defined(GC_BACKTRACE_SYMBOLS_BROKEN)
4755 : free(sym_name); /* May call GC_free; that's OK */
4756 : # endif
4757 : }
4758 : }
4759 : LOCK();
4760 : --reentry_count;
4761 : UNLOCK();
4762 : }
4763 :
4764 : #endif /* NEED_CALLINFO */
4765 :
4766 : #if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
4767 : /* Dump /proc/self/maps to GC_stderr, to enable looking up names for */
4768 : /* addresses in FIND_LEAK output. */
4769 0 : void GC_print_address_map(void)
4770 : {
4771 : char *maps;
4772 :
4773 0 : GC_err_printf("---------- Begin address map ----------\n");
4774 0 : maps = GC_get_maps();
4775 0 : GC_err_puts(maps != NULL ? maps : "Failed to get map!\n");
4776 0 : GC_err_printf("---------- End address map ----------\n");
4777 0 : }
4778 : #endif /* LINUX && ELF */
|