Line data Source code
1 : /*
2 : * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 : * Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
4 : * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
5 : *
6 : * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
7 : * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
8 : *
9 : * Permission is hereby granted to use or copy this program
10 : * for any purpose, provided the above notices are retained on all copies.
11 : * Permission to modify the code and to distribute modified code is granted,
12 : * provided the above notices are retained, and a notice that the code was
13 : * modified is included with the above copyright notice.
14 : */
15 :
16 : #include "private/gc_priv.h"
17 :
18 : #include <stdio.h>
19 : #include <string.h>
20 :
21 : /* Allocate reclaim list for kind: */
22 : /* Return TRUE on success */
23 489 : STATIC GC_bool GC_alloc_reclaim_list(struct obj_kind *kind)
24 : {
25 489 : struct hblk ** result = (struct hblk **)
26 489 : GC_scratch_alloc((MAXOBJGRANULES+1) * sizeof(struct hblk *));
27 489 : if (result == 0) return(FALSE);
28 489 : BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *));
29 489 : kind -> ok_reclaim_list = result;
30 489 : return(TRUE);
31 : }
32 :
33 : GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
34 : GC_bool ignore_off_page,
35 : GC_bool retry); /* from alloc.c */
36 :
37 : /* Allocate a large block of size lb bytes. */
38 : /* The block is not cleared. */
39 : /* Flags is 0 or IGNORE_OFF_PAGE. */
40 : /* We hold the allocation lock. */
41 : /* EXTRA_BYTES were already added to lb. */
42 2003 : GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
43 : {
44 : struct hblk * h;
45 : word n_blocks;
46 : ptr_t result;
47 2003 : GC_bool retry = FALSE;
48 :
49 : /* Round up to a multiple of a granule. */
50 2003 : lb = (lb + GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1);
51 2003 : n_blocks = OBJ_SZ_TO_BLOCKS(lb);
52 2003 : if (!GC_is_initialized) GC_init();
53 : /* Do our share of marking work */
54 2003 : if (GC_incremental && !GC_dont_gc)
55 0 : GC_collect_a_little_inner((int)n_blocks);
56 2003 : h = GC_allochblk(lb, k, flags);
57 : # ifdef USE_MUNMAP
58 : if (0 == h) {
59 : GC_merge_unmapped();
60 : h = GC_allochblk(lb, k, flags);
61 : }
62 : # endif
63 4050 : while (0 == h && GC_collect_or_expand(n_blocks, flags != 0, retry)) {
64 44 : h = GC_allochblk(lb, k, flags);
65 44 : retry = TRUE;
66 : }
67 2003 : if (h == 0) {
68 3 : result = 0;
69 : } else {
70 2000 : size_t total_bytes = n_blocks * HBLKSIZE;
71 2000 : if (n_blocks > 1) {
72 1121 : GC_large_allocd_bytes += total_bytes;
73 1121 : if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
74 580 : GC_max_large_allocd_bytes = GC_large_allocd_bytes;
75 : }
76 2000 : result = h -> hb_body;
77 : }
78 2003 : return result;
79 : }
80 :
81 : /* Allocate a large block of size lb bytes. Clear if appropriate. */
82 : /* We hold the allocation lock. */
83 : /* EXTRA_BYTES were already added to lb. */
84 7 : STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
85 : {
86 7 : ptr_t result = GC_alloc_large(lb, k, flags);
87 7 : word n_blocks = OBJ_SZ_TO_BLOCKS(lb);
88 :
89 7 : if (0 == result) return 0;
90 7 : if (GC_debugging_started || GC_obj_kinds[k].ok_init) {
91 : /* Clear the whole block, in case of GC_realloc call. */
92 7 : BZERO(result, n_blocks * HBLKSIZE);
93 : }
94 7 : return result;
95 : }
96 :
97 : /* allocate lb bytes for an object of kind k. */
98 : /* Should not be used to directly to allocate */
99 : /* objects such as STUBBORN objects that */
100 : /* require special handling on allocation. */
101 : /* First a version that assumes we already */
102 : /* hold lock: */
103 62399 : GC_INNER void * GC_generic_malloc_inner(size_t lb, int k)
104 : {
105 : void *op;
106 :
107 124077 : if(SMALL_OBJ(lb)) {
108 62398 : struct obj_kind * kind = GC_obj_kinds + k;
109 62398 : size_t lg = GC_size_map[lb];
110 62398 : void ** opp = &(kind -> ok_freelist[lg]);
111 :
112 62398 : if( (op = *opp) == 0 ) {
113 18092 : if (GC_size_map[lb] == 0) {
114 720 : if (!GC_is_initialized) GC_init();
115 720 : if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
116 720 : return(GC_generic_malloc_inner(lb, k));
117 : }
118 17372 : if (kind -> ok_reclaim_list == 0) {
119 489 : if (!GC_alloc_reclaim_list(kind)) goto out;
120 : }
121 17372 : op = GC_allocobj(lg, k);
122 17372 : if (op == 0) goto out;
123 : }
124 61678 : *opp = obj_link(op);
125 61678 : obj_link(op) = 0;
126 61678 : GC_bytes_allocd += GRANULES_TO_BYTES(lg);
127 : } else {
128 1 : op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0);
129 1 : GC_bytes_allocd += lb;
130 : }
131 :
132 : out:
133 61679 : return op;
134 : }
135 :
136 : /* Allocate a composite object of size n bytes. The caller guarantees */
137 : /* that pointers past the first page are not relevant. Caller holds */
138 : /* allocation lock. */
139 803 : GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k)
140 : {
141 : word lb_adjusted;
142 : void * op;
143 :
144 803 : if (lb <= HBLKSIZE)
145 797 : return(GC_generic_malloc_inner(lb, k));
146 6 : lb_adjusted = ADD_SLOP(lb);
147 6 : op = GC_alloc_large_and_clear(lb_adjusted, k, IGNORE_OFF_PAGE);
148 6 : GC_bytes_allocd += lb_adjusted;
149 6 : return op;
150 : }
151 :
152 17858 : GC_API void * GC_CALL GC_generic_malloc(size_t lb, int k)
153 : {
154 : void * result;
155 : DCL_LOCK_STATE;
156 :
157 17858 : if (GC_have_errors) GC_print_all_errors();
158 17858 : GC_INVOKE_FINALIZERS();
159 33720 : if (SMALL_OBJ(lb)) {
160 15862 : LOCK();
161 15862 : result = GC_generic_malloc_inner((word)lb, k);
162 15862 : UNLOCK();
163 : } else {
164 : size_t lg;
165 : size_t lb_rounded;
166 : word n_blocks;
167 : GC_bool init;
168 1996 : lg = ROUNDED_UP_GRANULES(lb);
169 1996 : lb_rounded = GRANULES_TO_BYTES(lg);
170 1996 : if (lb_rounded < lb)
171 0 : return((*GC_get_oom_fn())(lb));
172 1996 : n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
173 1996 : init = GC_obj_kinds[k].ok_init;
174 1996 : LOCK();
175 1996 : result = (ptr_t)GC_alloc_large(lb_rounded, k, 0);
176 1996 : if (0 != result) {
177 1993 : if (GC_debugging_started) {
178 0 : BZERO(result, n_blocks * HBLKSIZE);
179 : } else {
180 : # ifdef THREADS
181 : /* Clear any memory that might be used for GC descriptors */
182 : /* before we release the lock. */
183 1993 : ((word *)result)[0] = 0;
184 1993 : ((word *)result)[1] = 0;
185 1993 : ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
186 1993 : ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
187 : # endif
188 : }
189 : }
190 1996 : GC_bytes_allocd += lb_rounded;
191 1996 : UNLOCK();
192 1996 : if (init && !GC_debugging_started && 0 != result) {
193 630 : BZERO(result, n_blocks * HBLKSIZE);
194 : }
195 : }
196 17858 : if (0 == result) {
197 3 : return((*GC_get_oom_fn())(lb));
198 : } else {
199 17855 : return(result);
200 : }
201 : }
202 :
203 : /* Allocate lb bytes of atomic (pointerfree) data */
204 : #ifdef THREAD_LOCAL_ALLOC
205 71851 : GC_INNER void * GC_core_malloc_atomic(size_t lb)
206 : #else
207 : GC_API void * GC_CALL GC_malloc_atomic(size_t lb)
208 : #endif
209 : {
210 : void *op;
211 : void ** opp;
212 : size_t lg;
213 : DCL_LOCK_STATE;
214 :
215 71851 : if(SMALL_OBJ(lb)) {
216 70486 : lg = GC_size_map[lb];
217 70486 : opp = &(GC_aobjfreelist[lg]);
218 70486 : LOCK();
219 70486 : if (EXPECT((op = *opp) == 0, FALSE)) {
220 8437 : UNLOCK();
221 8437 : return(GENERAL_MALLOC((word)lb, PTRFREE));
222 : }
223 62049 : *opp = obj_link(op);
224 62049 : GC_bytes_allocd += GRANULES_TO_BYTES(lg);
225 62049 : UNLOCK();
226 62049 : return((void *) op);
227 : } else {
228 1365 : return(GENERAL_MALLOC((word)lb, PTRFREE));
229 : }
230 : }
231 :
232 : /* Allocate lb bytes of composite (pointerful) data */
233 : #ifdef THREAD_LOCAL_ALLOC
234 47080 : GC_INNER void * GC_core_malloc(size_t lb)
235 : #else
236 : GC_API void * GC_CALL GC_malloc(size_t lb)
237 : #endif
238 : {
239 : void *op;
240 : void **opp;
241 : size_t lg;
242 : DCL_LOCK_STATE;
243 :
244 47080 : if(SMALL_OBJ(lb)) {
245 46449 : lg = GC_size_map[lb];
246 46449 : opp = (void **)&(GC_objfreelist[lg]);
247 46449 : LOCK();
248 46449 : if (EXPECT((op = *opp) == 0, FALSE)) {
249 3201 : UNLOCK();
250 3201 : return (GENERAL_MALLOC((word)lb, NORMAL));
251 : }
252 : GC_ASSERT(0 == obj_link(op)
253 : || ((word)obj_link(op)
254 : <= (word)GC_greatest_plausible_heap_addr
255 : && (word)obj_link(op)
256 : >= (word)GC_least_plausible_heap_addr));
257 43248 : *opp = obj_link(op);
258 43248 : obj_link(op) = 0;
259 43248 : GC_bytes_allocd += GRANULES_TO_BYTES(lg);
260 43248 : UNLOCK();
261 43248 : return op;
262 : } else {
263 631 : return(GENERAL_MALLOC(lb, NORMAL));
264 : }
265 : }
266 :
267 : /* Allocate lb bytes of pointerful, traced, but not collectable data */
268 69101 : GC_API void * GC_CALL GC_malloc_uncollectable(size_t lb)
269 : {
270 : void *op;
271 : void **opp;
272 : size_t lg;
273 : DCL_LOCK_STATE;
274 :
275 69101 : if( SMALL_OBJ(lb) ) {
276 69101 : if (EXTRA_BYTES != 0 && lb != 0) lb--;
277 : /* We don't need the extra byte, since this won't be */
278 : /* collected anyway. */
279 69101 : lg = GC_size_map[lb];
280 69101 : opp = &(GC_uobjfreelist[lg]);
281 69101 : LOCK();
282 69101 : if( (op = *opp) != 0 ) {
283 64877 : *opp = obj_link(op);
284 64877 : obj_link(op) = 0;
285 64877 : GC_bytes_allocd += GRANULES_TO_BYTES(lg);
286 : /* Mark bit ws already set on free list. It will be */
287 : /* cleared only temporarily during a collection, as a */
288 : /* result of the normal free list mark bit clearing. */
289 64877 : GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
290 64877 : UNLOCK();
291 : } else {
292 4224 : UNLOCK();
293 4224 : op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
294 : /* For small objects, the free lists are completely marked. */
295 : }
296 : GC_ASSERT(0 == op || GC_is_marked(op));
297 69101 : return((void *) op);
298 : } else {
299 : hdr * hhdr;
300 :
301 0 : op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
302 0 : if (0 == op) return(0);
303 :
304 : GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */
305 0 : hhdr = HDR(op);
306 : /* We don't need the lock here, since we have an undisguised */
307 : /* pointer. We do need to hold the lock while we adjust */
308 : /* mark bits. */
309 0 : LOCK();
310 0 : set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
311 : # ifndef THREADS
312 : GC_ASSERT(hhdr -> hb_n_marks == 0);
313 : /* This is not guaranteed in the multi-threaded case */
314 : /* because the counter could be updated before locking. */
315 : # endif
316 0 : hhdr -> hb_n_marks = 1;
317 0 : UNLOCK();
318 0 : return((void *) op);
319 : }
320 : }
321 :
322 : #ifdef REDIRECT_MALLOC
323 :
324 : # ifndef MSWINCE
325 : # include <errno.h>
326 : # endif
327 :
328 : /* Avoid unnecessary nested procedure calls here, by #defining some */
329 : /* malloc replacements. Otherwise we end up saving a */
330 : /* meaningless return address in the object. It also speeds things up, */
331 : /* but it is admittedly quite ugly. */
332 :
333 : # define GC_debug_malloc_replacement(lb) \
334 : GC_debug_malloc(lb, GC_DBG_RA "unknown", 0)
335 :
336 : void * malloc(size_t lb)
337 : {
338 : /* It might help to manually inline the GC_malloc call here. */
339 : /* But any decent compiler should reduce the extra procedure call */
340 : /* to at most a jump instruction in this case. */
341 : # if defined(I386) && defined(GC_SOLARIS_THREADS)
342 : /*
343 : * Thread initialisation can call malloc before
344 : * we're ready for it.
345 : * It's not clear that this is enough to help matters.
346 : * The thread implementation may well call malloc at other
347 : * inopportune times.
348 : */
349 : if (!GC_is_initialized) return sbrk(lb);
350 : # endif /* I386 && GC_SOLARIS_THREADS */
351 : return((void *)REDIRECT_MALLOC(lb));
352 : }
353 :
354 : #if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */
355 : STATIC ptr_t GC_libpthread_start = 0;
356 : STATIC ptr_t GC_libpthread_end = 0;
357 : STATIC ptr_t GC_libld_start = 0;
358 : STATIC ptr_t GC_libld_end = 0;
359 :
360 : STATIC void GC_init_lib_bounds(void)
361 : {
362 : if (GC_libpthread_start != 0) return;
363 : GC_init(); /* if not called yet */
364 : if (!GC_text_mapping("libpthread-",
365 : &GC_libpthread_start, &GC_libpthread_end)) {
366 : WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0);
367 : /* This might still work with some versions of libpthread, */
368 : /* so we don't abort. Perhaps we should. */
369 : /* Generate message only once: */
370 : GC_libpthread_start = (ptr_t)1;
371 : }
372 : if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) {
373 : WARN("Failed to find ld.so text mapping: Expect crash\n", 0);
374 : }
375 : }
376 : #endif /* GC_LINUX_THREADS */
377 :
378 : #include <limits.h>
379 : #ifdef SIZE_MAX
380 : # define GC_SIZE_MAX SIZE_MAX
381 : #else
382 : # define GC_SIZE_MAX (~(size_t)0)
383 : #endif
384 :
385 : #define GC_SQRT_SIZE_MAX ((1U << (WORDSZ / 2)) - 1)
386 :
387 : void * calloc(size_t n, size_t lb)
388 : {
389 : if ((lb | n) > GC_SQRT_SIZE_MAX /* fast initial test */
390 : && lb && n > GC_SIZE_MAX / lb)
391 : return NULL;
392 : # if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */
393 : /* libpthread allocated some memory that is only pointed to by */
394 : /* mmapped thread stacks. Make sure it's not collectable. */
395 : {
396 : static GC_bool lib_bounds_set = FALSE;
397 : ptr_t caller = (ptr_t)__builtin_return_address(0);
398 : /* This test does not need to ensure memory visibility, since */
399 : /* the bounds will be set when/if we create another thread. */
400 : if (!lib_bounds_set) {
401 : GC_init_lib_bounds();
402 : lib_bounds_set = TRUE;
403 : }
404 : if ((caller >= GC_libpthread_start && caller < GC_libpthread_end)
405 : || (caller >= GC_libld_start && caller < GC_libld_end))
406 : return GC_malloc_uncollectable(n*lb);
407 : /* The two ranges are actually usually adjacent, so there may */
408 : /* be a way to speed this up. */
409 : }
410 : # endif
411 : return((void *)REDIRECT_MALLOC(n*lb));
412 : }
413 :
414 : #ifndef strdup
415 : char *strdup(const char *s)
416 : {
417 : size_t lb = strlen(s) + 1;
418 : char *result = (char *)REDIRECT_MALLOC(lb);
419 : if (result == 0) {
420 : errno = ENOMEM;
421 : return 0;
422 : }
423 : BCOPY(s, result, lb);
424 : return result;
425 : }
426 : #endif /* !defined(strdup) */
427 : /* If strdup is macro defined, we assume that it actually calls malloc, */
428 : /* and thus the right thing will happen even without overriding it. */
429 : /* This seems to be true on most Linux systems. */
430 :
431 : #ifndef strndup
432 : /* This is similar to strdup(). */
433 : char *strndup(const char *str, size_t size)
434 : {
435 : char *copy;
436 : size_t len = strlen(str);
437 : if (len > size)
438 : len = size;
439 : copy = (char *)REDIRECT_MALLOC(len + 1);
440 : if (copy == NULL) {
441 : errno = ENOMEM;
442 : return NULL;
443 : }
444 : BCOPY(str, copy, len);
445 : copy[len] = '\0';
446 : return copy;
447 : }
448 : #endif /* !strndup */
449 :
450 : #undef GC_debug_malloc_replacement
451 :
452 : #endif /* REDIRECT_MALLOC */
453 :
454 : /* Explicitly deallocate an object p. */
455 0 : GC_API void GC_CALL GC_free(void * p)
456 : {
457 : struct hblk *h;
458 : hdr *hhdr;
459 : size_t sz; /* In bytes */
460 : size_t ngranules; /* sz in granules */
461 : void **flh;
462 : int knd;
463 : struct obj_kind * ok;
464 : DCL_LOCK_STATE;
465 :
466 0 : if (p == 0) return;
467 : /* Required by ANSI. It's not my fault ... */
468 : # ifdef LOG_ALLOCS
469 : GC_err_printf("GC_free(%p): %lu\n", p, (unsigned long)GC_gc_no);
470 : # endif
471 0 : h = HBLKPTR(p);
472 0 : hhdr = HDR(h);
473 : # if defined(REDIRECT_MALLOC) && \
474 : (defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
475 : || defined(MSWIN32))
476 : /* For Solaris, we have to redirect malloc calls during */
477 : /* initialization. For the others, this seems to happen */
478 : /* implicitly. */
479 : /* Don't try to deallocate that memory. */
480 : if (0 == hhdr) return;
481 : # endif
482 : GC_ASSERT(GC_base(p) == p);
483 0 : sz = hhdr -> hb_sz;
484 0 : ngranules = BYTES_TO_GRANULES(sz);
485 0 : knd = hhdr -> hb_obj_kind;
486 0 : ok = &GC_obj_kinds[knd];
487 0 : if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) {
488 0 : LOCK();
489 0 : GC_bytes_freed += sz;
490 0 : if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
491 : /* Its unnecessary to clear the mark bit. If the */
492 : /* object is reallocated, it doesn't matter. O.w. the */
493 : /* collector will do it, since it's on a free list. */
494 0 : if (ok -> ok_init) {
495 0 : BZERO((word *)p + 1, sz-sizeof(word));
496 : }
497 0 : flh = &(ok -> ok_freelist[ngranules]);
498 0 : obj_link(p) = *flh;
499 0 : *flh = (ptr_t)p;
500 0 : UNLOCK();
501 : } else {
502 0 : size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
503 0 : LOCK();
504 0 : GC_bytes_freed += sz;
505 0 : if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
506 0 : if (nblocks > 1) {
507 0 : GC_large_allocd_bytes -= nblocks * HBLKSIZE;
508 : }
509 0 : GC_freehblk(h);
510 0 : UNLOCK();
511 : }
512 : }
513 :
514 : /* Explicitly deallocate an object p when we already hold lock. */
515 : /* Only used for internally allocated objects, so we can take some */
516 : /* shortcuts. */
517 : #ifdef THREADS
518 650 : GC_INNER void GC_free_inner(void * p)
519 : {
520 : struct hblk *h;
521 : hdr *hhdr;
522 : size_t sz; /* bytes */
523 : size_t ngranules; /* sz in granules */
524 : void ** flh;
525 : int knd;
526 : struct obj_kind * ok;
527 : DCL_LOCK_STATE;
528 :
529 650 : h = HBLKPTR(p);
530 650 : hhdr = HDR(h);
531 650 : knd = hhdr -> hb_obj_kind;
532 650 : sz = hhdr -> hb_sz;
533 650 : ngranules = BYTES_TO_GRANULES(sz);
534 650 : ok = &GC_obj_kinds[knd];
535 650 : if (ngranules <= MAXOBJGRANULES) {
536 650 : GC_bytes_freed += sz;
537 650 : if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
538 650 : if (ok -> ok_init) {
539 650 : BZERO((word *)p + 1, sz-sizeof(word));
540 : }
541 650 : flh = &(ok -> ok_freelist[ngranules]);
542 650 : obj_link(p) = *flh;
543 650 : *flh = (ptr_t)p;
544 : } else {
545 0 : size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
546 0 : GC_bytes_freed += sz;
547 0 : if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
548 0 : if (nblocks > 1) {
549 0 : GC_large_allocd_bytes -= nblocks * HBLKSIZE;
550 : }
551 0 : GC_freehblk(h);
552 : }
553 650 : }
554 : #endif /* THREADS */
555 :
556 : #if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE)
557 : # define REDIRECT_FREE GC_free
558 : #endif
559 :
560 : #ifdef REDIRECT_FREE
561 : void free(void * p)
562 : {
563 : # if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES)
564 : {
565 : /* Don't bother with initialization checks. If nothing */
566 : /* has been initialized, the check fails, and that's safe, */
567 : /* since we haven't allocated uncollectable objects either. */
568 : ptr_t caller = (ptr_t)__builtin_return_address(0);
569 : /* This test does not need to ensure memory visibility, since */
570 : /* the bounds will be set when/if we create another thread. */
571 : if (caller >= GC_libpthread_start && caller < GC_libpthread_end
572 : || (caller >= GC_libld_start && caller < GC_libld_end)) {
573 : GC_free(p);
574 : return;
575 : }
576 : }
577 : # endif
578 : # ifndef IGNORE_FREE
579 : REDIRECT_FREE(p);
580 : # endif
581 : }
582 : #endif /* REDIRECT_FREE */
|