LCOV - code coverage report
Current view: top level - mm/boehm-gc - malloc.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 151 199 75.9 %
Date: 2017-07-14 10:03:36 Functions: 10 11 90.9 %

          Line data    Source code
       1             : /*
       2             :  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
       3             :  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
       4             :  * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
       5             :  *
       6             :  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
       7             :  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
       8             :  *
       9             :  * Permission is hereby granted to use or copy this program
      10             :  * for any purpose,  provided the above notices are retained on all copies.
      11             :  * Permission to modify the code and to distribute modified code is granted,
      12             :  * provided the above notices are retained, and a notice that the code was
      13             :  * modified is included with the above copyright notice.
      14             :  */
      15             : 
      16             : #include "private/gc_priv.h"
      17             : 
      18             : #include <stdio.h>
      19             : #include <string.h>
      20             : 
      21             : /* Allocate reclaim list for kind:      */
      22             : /* Return TRUE on success               */
      23         489 : STATIC GC_bool GC_alloc_reclaim_list(struct obj_kind *kind)
      24             : {
      25         489 :     struct hblk ** result = (struct hblk **)
      26         489 :                 GC_scratch_alloc((MAXOBJGRANULES+1) * sizeof(struct hblk *));
      27         489 :     if (result == 0) return(FALSE);
      28         489 :     BZERO(result, (MAXOBJGRANULES+1)*sizeof(struct hblk *));
      29         489 :     kind -> ok_reclaim_list = result;
      30         489 :     return(TRUE);
      31             : }
      32             : 
      33             : GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
      34             :                                       GC_bool ignore_off_page,
      35             :                                       GC_bool retry); /* from alloc.c */
      36             : 
      37             : /* Allocate a large block of size lb bytes.     */
      38             : /* The block is not cleared.                    */
      39             : /* Flags is 0 or IGNORE_OFF_PAGE.               */
      40             : /* We hold the allocation lock.                 */
      41             : /* EXTRA_BYTES were already added to lb.        */
      42        2004 : GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
      43             : {
      44             :     struct hblk * h;
      45             :     word n_blocks;
      46             :     ptr_t result;
      47        2004 :     GC_bool retry = FALSE;
      48             : 
      49             :     /* Round up to a multiple of a granule. */
      50        2004 :       lb = (lb + GRANULE_BYTES - 1) & ~(GRANULE_BYTES - 1);
      51        2004 :     n_blocks = OBJ_SZ_TO_BLOCKS(lb);
      52        2004 :     if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
      53             :     /* Do our share of marking work */
      54        2004 :         if (GC_incremental && !GC_dont_gc)
      55           0 :             GC_collect_a_little_inner((int)n_blocks);
      56        2004 :     h = GC_allochblk(lb, k, flags);
      57             : #   ifdef USE_MUNMAP
      58             :         if (0 == h) {
      59             :             GC_merge_unmapped();
      60             :             h = GC_allochblk(lb, k, flags);
      61             :         }
      62             : #   endif
      63        4050 :     while (0 == h && GC_collect_or_expand(n_blocks, flags != 0, retry)) {
      64          42 :         h = GC_allochblk(lb, k, flags);
      65          42 :         retry = TRUE;
      66             :     }
      67        2004 :     if (h == 0) {
      68           3 :         result = 0;
      69             :     } else {
      70        2001 :         size_t total_bytes = n_blocks * HBLKSIZE;
      71        2001 :         if (n_blocks > 1) {
      72        1122 :             GC_large_allocd_bytes += total_bytes;
      73        1122 :             if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
      74         589 :                 GC_max_large_allocd_bytes = GC_large_allocd_bytes;
      75             :         }
      76             :         /* FIXME: Do we need some way to reset GC_max_large_allocd_bytes? */
      77        2001 :         result = h -> hb_body;
      78             :     }
      79        2004 :     return result;
      80             : }
      81             : 
      82             : /* Allocate a large block of size lb bytes.  Clear if appropriate.      */
      83             : /* We hold the allocation lock.                                         */
      84             : /* EXTRA_BYTES were already added to lb.                                */
      85           8 : STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
      86             : {
      87           8 :     ptr_t result = GC_alloc_large(lb, k, flags);
      88           8 :     word n_blocks = OBJ_SZ_TO_BLOCKS(lb);
      89             : 
      90           8 :     if (0 == result) return 0;
      91           8 :     if (GC_debugging_started || GC_obj_kinds[k].ok_init) {
      92             :         /* Clear the whole block, in case of GC_realloc call. */
      93           8 :         BZERO(result, n_blocks * HBLKSIZE);
      94             :     }
      95           8 :     return result;
      96             : }
      97             : 
      98             : /* allocate lb bytes for an object of kind k.   */
      99             : /* Should not be used to directly to allocate   */
     100             : /* objects such as STUBBORN objects that        */
     101             : /* require special handling on allocation.      */
     102             : /* First a version that assumes we already      */
     103             : /* hold lock:                                   */
     104       62538 : GC_INNER void * GC_generic_malloc_inner(size_t lb, int k)
     105             : {
     106             :     void *op;
     107             : 
     108      125075 :     if(SMALL_OBJ(lb)) {
     109       62537 :         struct obj_kind * kind = GC_obj_kinds + k;
     110       62537 :         size_t lg = GC_size_map[lb];
     111       62537 :         void ** opp = &(kind -> ok_freelist[lg]);
     112             : 
     113       62537 :         op = *opp;
     114       62537 :         if (EXPECT(0 == op, FALSE)) {
     115       18222 :           if (lg == 0) {
     116         642 :             if (!EXPECT(GC_is_initialized, TRUE)) {
     117           0 :               GC_init();
     118           0 :               lg = GC_size_map[lb];
     119             :             }
     120         642 :             if (0 == lg) {
     121         642 :               GC_extend_size_map(lb);
     122         642 :               lg = GC_size_map[lb];
     123             :               GC_ASSERT(lg != 0);
     124             :             }
     125             :             /* Retry */
     126         642 :             opp = &(kind -> ok_freelist[lg]);
     127         642 :             op = *opp;
     128             :           }
     129       18222 :           if (0 == op) {
     130       18711 :             if (0 == kind -> ok_reclaim_list &&
     131         489 :                 !GC_alloc_reclaim_list(kind))
     132           0 :               return NULL;
     133       18222 :             op = GC_allocobj(lg, k);
     134       18222 :             if (0 == op)
     135           0 :               return NULL;
     136             :           }
     137             :         }
     138       62537 :         *opp = obj_link(op);
     139       62537 :         obj_link(op) = 0;
     140       62537 :         GC_bytes_allocd += GRANULES_TO_BYTES(lg);
     141             :     } else {
     142           1 :         op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0);
     143           1 :         GC_bytes_allocd += lb;
     144             :     }
     145             : 
     146       62538 :     return op;
     147             : }
     148             : 
     149             : /* Allocate a composite object of size n bytes.  The caller guarantees  */
     150             : /* that pointers past the first page are not relevant.  Caller holds    */
     151             : /* allocation lock.                                                     */
     152         804 : GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k)
     153             : {
     154             :     word lb_adjusted;
     155             :     void * op;
     156             : 
     157         804 :     if (lb <= HBLKSIZE)
     158         797 :         return(GC_generic_malloc_inner(lb, k));
     159           7 :     lb_adjusted = ADD_SLOP(lb);
     160           7 :     op = GC_alloc_large_and_clear(lb_adjusted, k, IGNORE_OFF_PAGE);
     161           7 :     GC_bytes_allocd += lb_adjusted;
     162           7 :     return op;
     163             : }
     164             : 
     165             : #ifdef GC_COLLECT_AT_MALLOC
     166             :   /* Parameter to force GC at every malloc of size greater or equal to  */
     167             :   /* the given value.  This might be handy during debugging.            */
     168             :   size_t GC_dbg_collect_at_malloc_min_lb = (GC_COLLECT_AT_MALLOC);
     169             : #endif
     170             : 
     171       18725 : GC_API GC_ATTR_MALLOC void * GC_CALL GC_generic_malloc(size_t lb, int k)
     172             : {
     173             :     void * result;
     174             :     DCL_LOCK_STATE;
     175             : 
     176       18725 :     if (EXPECT(GC_have_errors, FALSE))
     177           0 :       GC_print_all_errors();
     178       18725 :     GC_INVOKE_FINALIZERS();
     179             :     GC_DBG_COLLECT_AT_MALLOC(lb);
     180       35454 :     if (SMALL_OBJ(lb)) {
     181       16729 :         LOCK();
     182       16729 :         result = GC_generic_malloc_inner((word)lb, k);
     183       16729 :         UNLOCK();
     184             :     } else {
     185             :         size_t lg;
     186             :         size_t lb_rounded;
     187             :         word n_blocks;
     188             :         GC_bool init;
     189             : 
     190        1996 :         lg = ROUNDED_UP_GRANULES(lb);
     191        1996 :         lb_rounded = GRANULES_TO_BYTES(lg);
     192        1996 :         if (lb_rounded < lb)
     193           0 :             return((*GC_get_oom_fn())(lb));
     194        1996 :         n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
     195        1996 :         init = GC_obj_kinds[k].ok_init;
     196        1996 :         LOCK();
     197        1996 :         result = (ptr_t)GC_alloc_large(lb_rounded, k, 0);
     198        1996 :         if (0 != result) {
     199        1993 :           if (GC_debugging_started) {
     200           0 :             BZERO(result, n_blocks * HBLKSIZE);
     201             :           } else {
     202             : #           ifdef THREADS
     203             :               /* Clear any memory that might be used for GC descriptors */
     204             :               /* before we release the lock.                            */
     205        1993 :                 ((word *)result)[0] = 0;
     206        1993 :                 ((word *)result)[1] = 0;
     207        1993 :                 ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
     208        1993 :                 ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
     209             : #           endif
     210             :           }
     211             :         }
     212        1996 :         GC_bytes_allocd += lb_rounded;
     213        1996 :         UNLOCK();
     214        1996 :         if (init && !GC_debugging_started && 0 != result) {
     215         630 :             BZERO(result, n_blocks * HBLKSIZE);
     216             :         }
     217             :     }
     218       18725 :     if (0 == result) {
     219           3 :         return((*GC_get_oom_fn())(lb));
     220             :     } else {
     221       18722 :         return(result);
     222             :     }
     223             : }
     224             : 
     225             : /* Allocate lb bytes of atomic (pointer-free) data. */
     226             : #ifdef THREAD_LOCAL_ALLOC
     227       71911 :   GC_INNER void * GC_core_malloc_atomic(size_t lb)
     228             : #else
     229             :   GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_atomic(size_t lb)
     230             : #endif
     231             : {
     232             :     void *op;
     233             :     void ** opp;
     234             :     size_t lg;
     235             :     DCL_LOCK_STATE;
     236             : 
     237       71911 :     if(SMALL_OBJ(lb)) {
     238             :         GC_DBG_COLLECT_AT_MALLOC(lb);
     239       70546 :         lg = GC_size_map[lb];
     240       70546 :         opp = &(GC_aobjfreelist[lg]);
     241       70546 :         LOCK();
     242       70546 :         if (EXPECT((op = *opp) == 0, FALSE)) {
     243        9179 :             UNLOCK();
     244        9179 :             return(GENERAL_MALLOC((word)lb, PTRFREE));
     245             :         }
     246       61367 :         *opp = obj_link(op);
     247       61367 :         GC_bytes_allocd += GRANULES_TO_BYTES(lg);
     248       61367 :         UNLOCK();
     249       61367 :         return((void *) op);
     250             :    } else {
     251        1365 :        return(GENERAL_MALLOC((word)lb, PTRFREE));
     252             :    }
     253             : }
     254             : 
     255             : /* Allocate lb bytes of composite (pointerful) data */
     256             : #ifdef THREAD_LOCAL_ALLOC
     257       47104 :   GC_INNER void * GC_core_malloc(size_t lb)
     258             : #else
     259             :   GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc(size_t lb)
     260             : #endif
     261             : {
     262             :     void *op;
     263             :     void **opp;
     264             :     size_t lg;
     265             :     DCL_LOCK_STATE;
     266             : 
     267       47104 :     if(SMALL_OBJ(lb)) {
     268             :         GC_DBG_COLLECT_AT_MALLOC(lb);
     269       46473 :         lg = GC_size_map[lb];
     270       46473 :         opp = (void **)&(GC_objfreelist[lg]);
     271       46473 :         LOCK();
     272       46473 :         if (EXPECT((op = *opp) == 0, FALSE)) {
     273        3326 :             UNLOCK();
     274        3326 :             return (GENERAL_MALLOC((word)lb, NORMAL));
     275             :         }
     276             :         GC_ASSERT(0 == obj_link(op)
     277             :                   || ((word)obj_link(op)
     278             :                         <= (word)GC_greatest_plausible_heap_addr
     279             :                      && (word)obj_link(op)
     280             :                         >= (word)GC_least_plausible_heap_addr));
     281       43147 :         *opp = obj_link(op);
     282       43147 :         obj_link(op) = 0;
     283       43147 :         GC_bytes_allocd += GRANULES_TO_BYTES(lg);
     284       43147 :         UNLOCK();
     285       43147 :         return op;
     286             :    } else {
     287         631 :        return(GENERAL_MALLOC(lb, NORMAL));
     288             :    }
     289             : }
     290             : 
     291             : /* Allocate lb bytes of pointerful, traced, but not collectible data.   */
     292       69101 : GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_uncollectable(size_t lb)
     293             : {
     294             :     void *op;
     295             :     void **opp;
     296             :     size_t lg;
     297             :     DCL_LOCK_STATE;
     298             : 
     299       69101 :     if( SMALL_OBJ(lb) ) {
     300             :         GC_DBG_COLLECT_AT_MALLOC(lb);
     301       69101 :         if (EXTRA_BYTES != 0 && lb != 0) lb--;
     302             :                   /* We don't need the extra byte, since this won't be  */
     303             :                   /* collected anyway.                                  */
     304       69101 :         lg = GC_size_map[lb];
     305       69101 :         opp = &(GC_uobjfreelist[lg]);
     306       69101 :         LOCK();
     307       69101 :         op = *opp;
     308       69101 :         if (EXPECT(0 != op, TRUE)) {
     309       64877 :             *opp = obj_link(op);
     310       64877 :             obj_link(op) = 0;
     311       64877 :             GC_bytes_allocd += GRANULES_TO_BYTES(lg);
     312             :             /* Mark bit ws already set on free list.  It will be        */
     313             :             /* cleared only temporarily during a collection, as a       */
     314             :             /* result of the normal free list mark bit clearing.        */
     315       64877 :             GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
     316       64877 :             UNLOCK();
     317             :         } else {
     318        4224 :             UNLOCK();
     319        4224 :             op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
     320             :             /* For small objects, the free lists are completely marked. */
     321             :         }
     322             :         GC_ASSERT(0 == op || GC_is_marked(op));
     323       69101 :         return((void *) op);
     324             :     } else {
     325             :         hdr * hhdr;
     326             : 
     327           0 :         op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
     328           0 :         if (0 == op) return(0);
     329             : 
     330             :         GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */
     331           0 :         hhdr = HDR(op);
     332             :         /* We don't need the lock here, since we have an undisguised    */
     333             :         /* pointer.  We do need to hold the lock while we adjust        */
     334             :         /* mark bits.                                                   */
     335           0 :         LOCK();
     336           0 :         set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
     337             : #       ifndef THREADS
     338             :           GC_ASSERT(hhdr -> hb_n_marks == 0);
     339             :                 /* This is not guaranteed in the multi-threaded case    */
     340             :                 /* because the counter could be updated before locking. */
     341             : #       endif
     342           0 :         hhdr -> hb_n_marks = 1;
     343           0 :         UNLOCK();
     344           0 :         return((void *) op);
     345             :     }
     346             : }
     347             : 
     348             : #ifdef REDIRECT_MALLOC
     349             : 
     350             : # ifndef MSWINCE
     351             : #  include <errno.h>
     352             : # endif
     353             : 
     354             : /* Avoid unnecessary nested procedure calls here, by #defining some     */
     355             : /* malloc replacements.  Otherwise we end up saving a                   */
     356             : /* meaningless return address in the object.  It also speeds things up, */
     357             : /* but it is admittedly quite ugly.                                     */
     358             : # define GC_debug_malloc_replacement(lb) GC_debug_malloc(lb, GC_DBG_EXTRAS)
     359             : 
     360             : void * malloc(size_t lb)
     361             : {
     362             :     /* It might help to manually inline the GC_malloc call here.        */
     363             :     /* But any decent compiler should reduce the extra procedure call   */
     364             :     /* to at most a jump instruction in this case.                      */
     365             : #   if defined(I386) && defined(GC_SOLARIS_THREADS)
     366             :       /* Thread initialization can call malloc before we're ready for.  */
     367             :       /* It's not clear that this is enough to help matters.            */
     368             :       /* The thread implementation may well call malloc at other        */
     369             :       /* inopportune times.                                             */
     370             :       if (!EXPECT(GC_is_initialized, TRUE)) return sbrk(lb);
     371             : #   endif /* I386 && GC_SOLARIS_THREADS */
     372             :     return((void *)REDIRECT_MALLOC(lb));
     373             : }
     374             : 
     375             : #if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */
     376             :   STATIC ptr_t GC_libpthread_start = 0;
     377             :   STATIC ptr_t GC_libpthread_end = 0;
     378             :   STATIC ptr_t GC_libld_start = 0;
     379             :   STATIC ptr_t GC_libld_end = 0;
     380             : 
     381             :   STATIC void GC_init_lib_bounds(void)
     382             :   {
     383             :     if (GC_libpthread_start != 0) return;
     384             :     GC_init(); /* if not called yet */
     385             :     if (!GC_text_mapping("libpthread-",
     386             :                          &GC_libpthread_start, &GC_libpthread_end)) {
     387             :         WARN("Failed to find libpthread.so text mapping: Expect crash\n", 0);
     388             :         /* This might still work with some versions of libpthread,      */
     389             :         /* so we don't abort.  Perhaps we should.                       */
     390             :         /* Generate message only once:                                  */
     391             :           GC_libpthread_start = (ptr_t)1;
     392             :     }
     393             :     if (!GC_text_mapping("ld-", &GC_libld_start, &GC_libld_end)) {
     394             :         WARN("Failed to find ld.so text mapping: Expect crash\n", 0);
     395             :     }
     396             :   }
     397             : #endif /* GC_LINUX_THREADS */
     398             : 
     399             : #include <limits.h>
     400             : #ifdef SIZE_MAX
     401             : # define GC_SIZE_MAX SIZE_MAX
     402             : #else
     403             : # define GC_SIZE_MAX (~(size_t)0)
     404             : #endif
     405             : 
     406             : #define GC_SQRT_SIZE_MAX ((1U << (WORDSZ / 2)) - 1)
     407             : 
     408             : void * calloc(size_t n, size_t lb)
     409             : {
     410             :     if ((lb | n) > GC_SQRT_SIZE_MAX /* fast initial test */
     411             :         && lb && n > GC_SIZE_MAX / lb)
     412             :       return NULL;
     413             : #   if defined(GC_LINUX_THREADS) /* && !defined(USE_PROC_FOR_LIBRARIES) */
     414             :         /* libpthread allocated some memory that is only pointed to by  */
     415             :         /* mmapped thread stacks.  Make sure it is not collectible.     */
     416             :         {
     417             :           static GC_bool lib_bounds_set = FALSE;
     418             :           ptr_t caller = (ptr_t)__builtin_return_address(0);
     419             :           /* This test does not need to ensure memory visibility, since */
     420             :           /* the bounds will be set when/if we create another thread.   */
     421             :           if (!EXPECT(lib_bounds_set, TRUE)) {
     422             :             GC_init_lib_bounds();
     423             :             lib_bounds_set = TRUE;
     424             :           }
     425             :           if (((word)caller >= (word)GC_libpthread_start
     426             :                && (word)caller < (word)GC_libpthread_end)
     427             :               || ((word)caller >= (word)GC_libld_start
     428             :                   && (word)caller < (word)GC_libld_end))
     429             :             return GC_malloc_uncollectable(n*lb);
     430             :           /* The two ranges are actually usually adjacent, so there may */
     431             :           /* be a way to speed this up.                                 */
     432             :         }
     433             : #   endif
     434             :     return((void *)REDIRECT_MALLOC(n*lb));
     435             : }
     436             : 
     437             : #ifndef strdup
     438             :   char *strdup(const char *s)
     439             :   {
     440             :     size_t lb = strlen(s) + 1;
     441             :     char *result = (char *)REDIRECT_MALLOC(lb);
     442             :     if (result == 0) {
     443             :       errno = ENOMEM;
     444             :       return 0;
     445             :     }
     446             :     BCOPY(s, result, lb);
     447             :     return result;
     448             :   }
     449             : #endif /* !defined(strdup) */
     450             :  /* If strdup is macro defined, we assume that it actually calls malloc, */
     451             :  /* and thus the right thing will happen even without overriding it.     */
     452             :  /* This seems to be true on most Linux systems.                         */
     453             : 
     454             : #ifndef strndup
     455             :   /* This is similar to strdup().       */
     456             :   char *strndup(const char *str, size_t size)
     457             :   {
     458             :     char *copy;
     459             :     size_t len = strlen(str);
     460             :     if (len > size)
     461             :       len = size;
     462             :     copy = (char *)REDIRECT_MALLOC(len + 1);
     463             :     if (copy == NULL) {
     464             :       errno = ENOMEM;
     465             :       return NULL;
     466             :     }
     467             :     BCOPY(str, copy, len);
     468             :     copy[len] = '\0';
     469             :     return copy;
     470             :   }
     471             : #endif /* !strndup */
     472             : 
     473             : #undef GC_debug_malloc_replacement
     474             : 
     475             : #endif /* REDIRECT_MALLOC */
     476             : 
     477             : /* Explicitly deallocate an object p.                           */
     478           0 : GC_API void GC_CALL GC_free(void * p)
     479             : {
     480             :     struct hblk *h;
     481             :     hdr *hhdr;
     482             :     size_t sz; /* In bytes */
     483             :     size_t ngranules;   /* sz in granules */
     484             :     void **flh;
     485             :     int knd;
     486             :     struct obj_kind * ok;
     487             :     DCL_LOCK_STATE;
     488             : 
     489           0 :     if (p == 0) return;
     490             :         /* Required by ANSI.  It's not my fault ...     */
     491             : #   ifdef LOG_ALLOCS
     492             :       GC_log_printf("GC_free(%p) after GC #%lu\n",
     493             :                     p, (unsigned long)GC_gc_no);
     494             : #   endif
     495           0 :     h = HBLKPTR(p);
     496           0 :     hhdr = HDR(h);
     497             : #   if defined(REDIRECT_MALLOC) && \
     498             :         (defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
     499             :          || defined(MSWIN32))
     500             :         /* For Solaris, we have to redirect malloc calls during         */
     501             :         /* initialization.  For the others, this seems to happen        */
     502             :         /* implicitly.                                                  */
     503             :         /* Don't try to deallocate that memory.                         */
     504             :         if (0 == hhdr) return;
     505             : #   endif
     506             :     GC_ASSERT(GC_base(p) == p);
     507           0 :     sz = hhdr -> hb_sz;
     508           0 :     ngranules = BYTES_TO_GRANULES(sz);
     509           0 :     knd = hhdr -> hb_obj_kind;
     510           0 :     ok = &GC_obj_kinds[knd];
     511           0 :     if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) {
     512           0 :         LOCK();
     513           0 :         GC_bytes_freed += sz;
     514           0 :         if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
     515             :                 /* Its unnecessary to clear the mark bit.  If the       */
     516             :                 /* object is reallocated, it doesn't matter.  O.w. the  */
     517             :                 /* collector will do it, since it's on a free list.     */
     518           0 :         if (ok -> ok_init) {
     519           0 :             BZERO((word *)p + 1, sz-sizeof(word));
     520             :         }
     521           0 :         flh = &(ok -> ok_freelist[ngranules]);
     522           0 :         obj_link(p) = *flh;
     523           0 :         *flh = (ptr_t)p;
     524           0 :         UNLOCK();
     525             :     } else {
     526           0 :         size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
     527           0 :         LOCK();
     528           0 :         GC_bytes_freed += sz;
     529           0 :         if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
     530           0 :         if (nblocks > 1) {
     531           0 :           GC_large_allocd_bytes -= nblocks * HBLKSIZE;
     532             :         }
     533           0 :         GC_freehblk(h);
     534           0 :         UNLOCK();
     535             :     }
     536             : }
     537             : 
     538             : /* Explicitly deallocate an object p when we already hold lock.         */
     539             : /* Only used for internally allocated objects, so we can take some      */
     540             : /* shortcuts.                                                           */
     541             : #ifdef THREADS
     542         650 :   GC_INNER void GC_free_inner(void * p)
     543             :   {
     544             :     struct hblk *h;
     545             :     hdr *hhdr;
     546             :     size_t sz; /* bytes */
     547             :     size_t ngranules;  /* sz in granules */
     548             :     void ** flh;
     549             :     int knd;
     550             :     struct obj_kind * ok;
     551             : 
     552         650 :     h = HBLKPTR(p);
     553         650 :     hhdr = HDR(h);
     554         650 :     knd = hhdr -> hb_obj_kind;
     555         650 :     sz = hhdr -> hb_sz;
     556         650 :     ngranules = BYTES_TO_GRANULES(sz);
     557         650 :     ok = &GC_obj_kinds[knd];
     558         650 :     if (ngranules <= MAXOBJGRANULES) {
     559         650 :         GC_bytes_freed += sz;
     560         650 :         if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
     561         650 :         if (ok -> ok_init) {
     562         650 :             BZERO((word *)p + 1, sz-sizeof(word));
     563             :         }
     564         650 :         flh = &(ok -> ok_freelist[ngranules]);
     565         650 :         obj_link(p) = *flh;
     566         650 :         *flh = (ptr_t)p;
     567             :     } else {
     568           0 :         size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
     569           0 :         GC_bytes_freed += sz;
     570           0 :         if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
     571           0 :         if (nblocks > 1) {
     572           0 :           GC_large_allocd_bytes -= nblocks * HBLKSIZE;
     573             :         }
     574           0 :         GC_freehblk(h);
     575             :     }
     576         650 :   }
     577             : #endif /* THREADS */
     578             : 
     579             : #if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE)
     580             : # define REDIRECT_FREE GC_free
     581             : #endif
     582             : 
     583             : #ifdef REDIRECT_FREE
     584             :   void free(void * p)
     585             :   {
     586             : #   if defined(GC_LINUX_THREADS) && !defined(USE_PROC_FOR_LIBRARIES)
     587             :         {
     588             :           /* Don't bother with initialization checks.  If nothing       */
     589             :           /* has been initialized, the check fails, and that's safe,    */
     590             :           /* since we have not allocated uncollectible objects neither. */
     591             :           ptr_t caller = (ptr_t)__builtin_return_address(0);
     592             :           /* This test does not need to ensure memory visibility, since */
     593             :           /* the bounds will be set when/if we create another thread.   */
     594             :           if (((word)caller >= (word)GC_libpthread_start
     595             :                && (word)caller < (word)GC_libpthread_end)
     596             :               || ((word)caller >= (word)GC_libld_start
     597             :                   && (word)caller < (word)GC_libld_end)) {
     598             :             GC_free(p);
     599             :             return;
     600             :           }
     601             :         }
     602             : #   endif
     603             : #   ifndef IGNORE_FREE
     604             :       REDIRECT_FREE(p);
     605             : #   endif
     606             :   }
     607             : #endif /* REDIRECT_FREE */

Generated by: LCOV version 1.11