LCOV - code coverage report
Current view: top level - mm/boehm-gc - mallocx.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 78 238 32.8 %
Date: 2017-07-14 10:03:36 Functions: 1 14 7.1 %

          Line data    Source code
       1             : /*
       2             :  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
       3             :  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
       4             :  * Copyright (c) 1996 by Silicon Graphics.  All rights reserved.
       5             :  * Copyright (c) 2000 by Hewlett-Packard Company.  All rights reserved.
       6             :  *
       7             :  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
       8             :  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
       9             :  *
      10             :  * Permission is hereby granted to use or copy this program
      11             :  * for any purpose,  provided the above notices are retained on all copies.
      12             :  * Permission to modify the code and to distribute modified code is granted,
      13             :  * provided the above notices are retained, and a notice that the code was
      14             :  * modified is included with the above copyright notice.
      15             :  */
      16             : 
      17             : #include "private/gc_priv.h"
      18             : 
      19             : /*
      20             :  * These are extra allocation routines which are likely to be less
      21             :  * frequently used than those in malloc.c.  They are separate in the
      22             :  * hope that the .o file will be excluded from statically linked
      23             :  * executables.  We should probably break this up further.
      24             :  */
      25             : 
      26             : #include <stdio.h>
      27             : #include <string.h>
      28             : 
      29             : #ifdef MSWINCE
      30             : # ifndef WIN32_LEAN_AND_MEAN
      31             : #   define WIN32_LEAN_AND_MEAN 1
      32             : # endif
      33             : # define NOSERVICE
      34             : # include <windows.h>
      35             : #else
      36             : # include <errno.h>
      37             : #endif
      38             : 
      39             : /* Some externally visible but unadvertised variables to allow access to */
      40             : /* free lists from inlined allocators without including gc_priv.h        */
      41             : /* or introducing dependencies on internal data structure layouts.       */
      42             : void ** const GC_objfreelist_ptr = GC_objfreelist;
      43             : void ** const GC_aobjfreelist_ptr = GC_aobjfreelist;
      44             : void ** const GC_uobjfreelist_ptr = GC_uobjfreelist;
      45             : # ifdef ATOMIC_UNCOLLECTABLE
      46             :     void ** const GC_auobjfreelist_ptr = GC_auobjfreelist;
      47             : # endif
      48             : 
      49             : 
      50           0 : STATIC void * GC_generic_or_special_malloc(size_t lb, int knd)
      51             : {
      52           0 :     switch(knd) {
      53             : #     ifdef STUBBORN_ALLOC
      54             :         case STUBBORN:
      55             :             return(GC_malloc_stubborn((size_t)lb));
      56             : #     endif
      57             :         case PTRFREE:
      58           0 :             return(GC_malloc_atomic((size_t)lb));
      59             :         case NORMAL:
      60           0 :             return(GC_malloc((size_t)lb));
      61             :         case UNCOLLECTABLE:
      62           0 :             return(GC_malloc_uncollectable((size_t)lb));
      63             : #       ifdef ATOMIC_UNCOLLECTABLE
      64             :           case AUNCOLLECTABLE:
      65           0 :             return(GC_malloc_atomic_uncollectable((size_t)lb));
      66             : #       endif /* ATOMIC_UNCOLLECTABLE */
      67             :         default:
      68           0 :             return(GC_generic_malloc(lb,knd));
      69             :     }
      70             : }
      71             : 
      72             : /* Change the size of the block pointed to by p to contain at least   */
      73             : /* lb bytes.  The object may be (and quite likely will be) moved.     */
      74             : /* The kind (e.g. atomic) is the same as that of the old.             */
      75             : /* Shrinking of large blocks is not implemented well.                 */
      76           0 : GC_API void * GC_CALL GC_realloc(void * p, size_t lb)
      77             : {
      78             :     struct hblk * h;
      79             :     hdr * hhdr;
      80             :     size_t sz;   /* Current size in bytes       */
      81             :     size_t orig_sz;      /* Original sz in bytes        */
      82             :     int obj_kind;
      83             : 
      84           0 :     if (p == 0) return(GC_malloc(lb));  /* Required by ANSI */
      85           0 :     h = HBLKPTR(p);
      86           0 :     hhdr = HDR(h);
      87           0 :     sz = hhdr -> hb_sz;
      88           0 :     obj_kind = hhdr -> hb_obj_kind;
      89           0 :     orig_sz = sz;
      90             : 
      91           0 :     if (sz > MAXOBJBYTES) {
      92             :         /* Round it up to the next whole heap block */
      93             :           word descr;
      94             : 
      95           0 :           sz = (sz+HBLKSIZE-1) & (~HBLKMASK);
      96           0 :           hhdr -> hb_sz = sz;
      97           0 :           descr = GC_obj_kinds[obj_kind].ok_descriptor;
      98           0 :           if (GC_obj_kinds[obj_kind].ok_relocate_descr) descr += sz;
      99           0 :           hhdr -> hb_descr = descr;
     100             : #         ifdef MARK_BIT_PER_OBJ
     101             :             GC_ASSERT(hhdr -> hb_inv_sz == LARGE_INV_SZ);
     102             : #         else
     103             :             GC_ASSERT(hhdr -> hb_large_block &&
     104             :                       hhdr -> hb_map[ANY_INDEX] == 1);
     105             : #         endif
     106           0 :           if (IS_UNCOLLECTABLE(obj_kind)) GC_non_gc_bytes += (sz - orig_sz);
     107             :           /* Extra area is already cleared by GC_alloc_large_and_clear. */
     108             :     }
     109           0 :     if (ADD_SLOP(lb) <= sz) {
     110           0 :         if (lb >= (sz >> 1)) {
     111             : #           ifdef STUBBORN_ALLOC
     112             :                 if (obj_kind == STUBBORN) GC_change_stubborn(p);
     113             : #           endif
     114           0 :             if (orig_sz > lb) {
     115             :               /* Clear unneeded part of object to avoid bogus pointer */
     116             :               /* tracing.                                             */
     117             :               /* Safe for stubborn objects.                           */
     118           0 :                 BZERO(((ptr_t)p) + lb, orig_sz - lb);
     119             :             }
     120           0 :             return(p);
     121             :         } else {
     122             :             /* shrink */
     123             :               void * result =
     124           0 :                         GC_generic_or_special_malloc((word)lb, obj_kind);
     125             : 
     126           0 :               if (result == 0) return(0);
     127             :                   /* Could also return original object.  But this       */
     128             :                   /* gives the client warning of imminent disaster.     */
     129           0 :               BCOPY(p, result, lb);
     130             : #             ifndef IGNORE_FREE
     131           0 :                 GC_free(p);
     132             : #             endif
     133           0 :               return(result);
     134             :         }
     135             :     } else {
     136             :         /* grow */
     137           0 :           void * result = GC_generic_or_special_malloc((word)lb, obj_kind);
     138             : 
     139           0 :           if (result == 0) return(0);
     140           0 :           BCOPY(p, result, sz);
     141             : #         ifndef IGNORE_FREE
     142           0 :             GC_free(p);
     143             : #         endif
     144           0 :           return(result);
     145             :     }
     146             : }
     147             : 
     148             : # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_REALLOC)
     149             : #   define REDIRECT_REALLOC GC_realloc
     150             : # endif
     151             : 
     152             : # ifdef REDIRECT_REALLOC
     153             : 
     154             : /* As with malloc, avoid two levels of extra calls here.        */
     155             : # define GC_debug_realloc_replacement(p, lb) \
     156             :         GC_debug_realloc(p, lb, GC_DBG_EXTRAS)
     157             : 
     158             : void * realloc(void * p, size_t lb)
     159             :   {
     160             :     return(REDIRECT_REALLOC(p, lb));
     161             :   }
     162             : 
     163             : # undef GC_debug_realloc_replacement
     164             : # endif /* REDIRECT_REALLOC */
     165             : 
     166             : /* Allocate memory such that only pointers to near the          */
     167             : /* beginning of the object are considered.                      */
     168             : /* We avoid holding allocation lock while we clear the memory.  */
     169             : GC_API GC_ATTR_MALLOC void * GC_CALL
     170           0 :     GC_generic_malloc_ignore_off_page(size_t lb, int k)
     171             : {
     172             :     void *result;
     173             :     size_t lg;
     174             :     size_t lb_rounded;
     175             :     word n_blocks;
     176             :     GC_bool init;
     177             :     DCL_LOCK_STATE;
     178             : 
     179           0 :     if (SMALL_OBJ(lb))
     180           0 :         return(GC_generic_malloc((word)lb, k));
     181           0 :     lg = ROUNDED_UP_GRANULES(lb);
     182           0 :     lb_rounded = GRANULES_TO_BYTES(lg);
     183           0 :     if (lb_rounded < lb)
     184           0 :         return((*GC_get_oom_fn())(lb));
     185           0 :     n_blocks = OBJ_SZ_TO_BLOCKS(lb_rounded);
     186           0 :     init = GC_obj_kinds[k].ok_init;
     187           0 :     if (EXPECT(GC_have_errors, FALSE))
     188           0 :       GC_print_all_errors();
     189           0 :     GC_INVOKE_FINALIZERS();
     190             :     GC_DBG_COLLECT_AT_MALLOC(lb);
     191           0 :     LOCK();
     192           0 :     result = (ptr_t)GC_alloc_large(ADD_SLOP(lb), k, IGNORE_OFF_PAGE);
     193           0 :     if (0 != result) {
     194           0 :         if (GC_debugging_started) {
     195           0 :             BZERO(result, n_blocks * HBLKSIZE);
     196             :         } else {
     197             : #           ifdef THREADS
     198             :               /* Clear any memory that might be used for GC descriptors */
     199             :               /* before we release the lock.                          */
     200           0 :                 ((word *)result)[0] = 0;
     201           0 :                 ((word *)result)[1] = 0;
     202           0 :                 ((word *)result)[GRANULES_TO_WORDS(lg)-1] = 0;
     203           0 :                 ((word *)result)[GRANULES_TO_WORDS(lg)-2] = 0;
     204             : #           endif
     205             :         }
     206             :     }
     207           0 :     GC_bytes_allocd += lb_rounded;
     208           0 :     if (0 == result) {
     209           0 :         GC_oom_func oom_fn = GC_oom_fn;
     210           0 :         UNLOCK();
     211           0 :         return((*oom_fn)(lb));
     212             :     } else {
     213           0 :         UNLOCK();
     214           0 :         if (init && !GC_debugging_started) {
     215           0 :             BZERO(result, n_blocks * HBLKSIZE);
     216             :         }
     217           0 :         return(result);
     218             :     }
     219             : }
     220             : 
     221           0 : GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_ignore_off_page(size_t lb)
     222             : {
     223           0 :     return((void *)GC_generic_malloc_ignore_off_page(lb, NORMAL));
     224             : }
     225             : 
     226             : GC_API GC_ATTR_MALLOC void * GC_CALL
     227           0 :     GC_malloc_atomic_ignore_off_page(size_t lb)
     228             : {
     229           0 :     return((void *)GC_generic_malloc_ignore_off_page(lb, PTRFREE));
     230             : }
     231             : 
     232             : /* Increment GC_bytes_allocd from code that doesn't have direct access  */
     233             : /* to GC_arrays.                                                        */
     234           0 : GC_API void GC_CALL GC_incr_bytes_allocd(size_t n)
     235             : {
     236           0 :     GC_bytes_allocd += n;
     237           0 : }
     238             : 
     239             : /* The same for GC_bytes_freed.                         */
     240           0 : GC_API void GC_CALL GC_incr_bytes_freed(size_t n)
     241             : {
     242           0 :     GC_bytes_freed += n;
     243           0 : }
     244             : 
     245             : # ifdef PARALLEL_MARK
     246             :     STATIC volatile AO_t GC_bytes_allocd_tmp = 0;
     247             :                         /* Number of bytes of memory allocated since    */
     248             :                         /* we released the GC lock.  Instead of         */
     249             :                         /* reacquiring the GC lock just to add this in, */
     250             :                         /* we add it in the next time we reacquire      */
     251             :                         /* the lock.  (Atomically adding it doesn't     */
     252             :                         /* work, since we would have to atomically      */
     253             :                         /* update it in GC_malloc, which is too         */
     254             :                         /* expensive.)                                  */
     255             : # endif /* PARALLEL_MARK */
     256             : 
     257             : /* Return a list of 1 or more objects of the indicated size, linked     */
     258             : /* through the first word in the object.  This has the advantage that   */
     259             : /* it acquires the allocation lock only once, and may greatly reduce    */
     260             : /* time wasted contending for the allocation lock.  Typical usage would */
     261             : /* be in a thread that requires many items of the same size.  It would  */
     262             : /* keep its own free list in thread-local storage, and call             */
     263             : /* GC_malloc_many or friends to replenish it.  (We do not round up      */
     264             : /* object sizes, since a call indicates the intention to consume many   */
     265             : /* objects of exactly this size.)                                       */
     266             : /* We assume that the size is a multiple of GRANULE_BYTES.              */
     267             : /* We return the free-list by assigning it to *result, since it is      */
     268             : /* not safe to return, e.g. a linked list of pointer-free objects,      */
     269             : /* since the collector would not retain the entire list if it were      */
     270             : /* invoked just as we were returning.                                   */
     271             : /* Note that the client should usually clear the link field.            */
     272       40455 : GC_API void GC_CALL GC_generic_malloc_many(size_t lb, int k, void **result)
     273             : {
     274             :     void *op;
     275             :     void *p;
     276             :     void **opp;
     277             :     size_t lw;      /* Length in words.     */
     278             :     size_t lg;      /* Length in granules.  */
     279       40455 :     signed_word my_bytes_allocd = 0;
     280       40455 :     struct obj_kind * ok = &(GC_obj_kinds[k]);
     281             :     struct hblk ** rlh;
     282             :     DCL_LOCK_STATE;
     283             : 
     284             :     GC_ASSERT(lb != 0 && (lb & (GRANULE_BYTES-1)) == 0);
     285       40455 :     if (!SMALL_OBJ(lb)) {
     286           0 :         op = GC_generic_malloc(lb, k);
     287           0 :         if (EXPECT(0 != op, TRUE))
     288           0 :             obj_link(op) = 0;
     289           0 :         *result = op;
     290           0 :         return;
     291             :     }
     292       40455 :     lw = BYTES_TO_WORDS(lb);
     293       40455 :     lg = BYTES_TO_GRANULES(lb);
     294       40455 :     if (EXPECT(GC_have_errors, FALSE))
     295           0 :       GC_print_all_errors();
     296       40455 :     GC_INVOKE_FINALIZERS();
     297             :     GC_DBG_COLLECT_AT_MALLOC(lb);
     298       40455 :     LOCK();
     299       40455 :     if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
     300             :     /* Do our share of marking work */
     301       40455 :       if (GC_incremental && !GC_dont_gc) {
     302           0 :         ENTER_GC();
     303           0 :         GC_collect_a_little_inner(1);
     304           0 :         EXIT_GC();
     305             :       }
     306             :     /* First see if we can reclaim a page of objects waiting to be */
     307             :     /* reclaimed.                                                  */
     308       40455 :     rlh = ok -> ok_reclaim_list;
     309       40455 :     if (rlh != NULL) {
     310             :         struct hblk * hbp;
     311             :         hdr * hhdr;
     312             : 
     313       40455 :         rlh += lg;
     314       80912 :         while ((hbp = *rlh) != 0) {
     315        4758 :             hhdr = HDR(hbp);
     316        4758 :             *rlh = hhdr -> hb_next;
     317             :             GC_ASSERT(hhdr -> hb_sz == lb);
     318        4758 :             hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
     319             : #           ifdef PARALLEL_MARK
     320        4758 :               if (GC_parallel) {
     321             :                   signed_word my_bytes_allocd_tmp =
     322        4758 :                                 (signed_word)AO_load(&GC_bytes_allocd_tmp);
     323             :                   GC_ASSERT(my_bytes_allocd_tmp >= 0);
     324             :                   /* We only decrement it while holding the GC lock.    */
     325             :                   /* Thus we can't accidentally adjust it down in more  */
     326             :                   /* than one thread simultaneously.                    */
     327             : 
     328        4758 :                   if (my_bytes_allocd_tmp != 0) {
     329        4746 :                     (void)AO_fetch_and_add(&GC_bytes_allocd_tmp,
     330        4746 :                                            (AO_t)(-my_bytes_allocd_tmp));
     331        4746 :                     GC_bytes_allocd += my_bytes_allocd_tmp;
     332             :                   }
     333        4758 :                   GC_acquire_mark_lock();
     334        4758 :                   ++ GC_fl_builder_count;
     335        4758 :                   UNLOCK();
     336        4758 :                   GC_release_mark_lock();
     337             :               }
     338             : #           endif
     339        4758 :             op = GC_reclaim_generic(hbp, hhdr, lb,
     340             :                                     ok -> ok_init, 0, &my_bytes_allocd);
     341        4758 :             if (op != 0) {
     342             :               /* We also reclaimed memory, so we need to adjust         */
     343             :               /* that count.                                            */
     344             :               /* This should be atomic, so the results may be           */
     345             :               /* inaccurate.                                            */
     346        4756 :               GC_bytes_found += my_bytes_allocd;
     347             : #             ifdef PARALLEL_MARK
     348        4756 :                 if (GC_parallel) {
     349        4756 :                   *result = op;
     350        4756 :                   (void)AO_fetch_and_add(&GC_bytes_allocd_tmp,
     351             :                                          (AO_t)my_bytes_allocd);
     352        4756 :                   GC_acquire_mark_lock();
     353        4756 :                   -- GC_fl_builder_count;
     354        4756 :                   if (GC_fl_builder_count == 0) GC_notify_all_builder();
     355        4756 :                   GC_release_mark_lock();
     356        4756 :                   (void) GC_clear_stack(0);
     357        4756 :                   return;
     358             :                 }
     359             : #             endif
     360           0 :               GC_bytes_allocd += my_bytes_allocd;
     361           0 :               goto out;
     362             :             }
     363             : #           ifdef PARALLEL_MARK
     364           2 :               if (GC_parallel) {
     365           2 :                 GC_acquire_mark_lock();
     366           2 :                 -- GC_fl_builder_count;
     367           2 :                 if (GC_fl_builder_count == 0) GC_notify_all_builder();
     368           2 :                 GC_release_mark_lock();
     369           2 :                 LOCK();
     370             :                 /* GC lock is needed for reclaim list access.   We      */
     371             :                 /* must decrement fl_builder_count before reacquiring   */
     372             :                 /* the lock.  Hopefully this path is rare.              */
     373             :               }
     374             : #           endif
     375             :         }
     376             :     }
     377             :     /* Next try to use prefix of global free list if there is one.      */
     378             :     /* We don't refill it, but we need to use it up before allocating   */
     379             :     /* a new block ourselves.                                           */
     380       35699 :       opp = &(GC_obj_kinds[k].ok_freelist[lg]);
     381       35699 :       if ( (op = *opp) != 0 ) {
     382        2064 :         *opp = 0;
     383        2064 :         my_bytes_allocd = 0;
     384       79864 :         for (p = op; p != 0; p = obj_link(p)) {
     385       77800 :           my_bytes_allocd += lb;
     386       77800 :           if ((word)my_bytes_allocd >= HBLKSIZE) {
     387           0 :             *opp = obj_link(p);
     388           0 :             obj_link(p) = 0;
     389           0 :             break;
     390             :           }
     391             :         }
     392        2064 :         GC_bytes_allocd += my_bytes_allocd;
     393        2064 :         goto out;
     394             :       }
     395             :     /* Next try to allocate a new block worth of objects of this size.  */
     396             :     {
     397       33635 :         struct hblk *h = GC_allochblk(lb, k, 0);
     398       33635 :         if (h != 0) {
     399       33580 :           if (IS_UNCOLLECTABLE(k)) GC_set_hdr_marks(HDR(h));
     400       33580 :           GC_bytes_allocd += HBLKSIZE - HBLKSIZE % lb;
     401             : #         ifdef PARALLEL_MARK
     402       33580 :             if (GC_parallel) {
     403       33580 :               GC_acquire_mark_lock();
     404       33580 :               ++ GC_fl_builder_count;
     405       33580 :               UNLOCK();
     406       33580 :               GC_release_mark_lock();
     407             : 
     408       38588 :               op = GC_build_fl(h, lw,
     409       38588 :                         (ok -> ok_init || GC_debugging_started), 0);
     410             : 
     411       33580 :               *result = op;
     412       33580 :               GC_acquire_mark_lock();
     413       33580 :               -- GC_fl_builder_count;
     414       33580 :               if (GC_fl_builder_count == 0) GC_notify_all_builder();
     415       33580 :               GC_release_mark_lock();
     416       33580 :               (void) GC_clear_stack(0);
     417       33580 :               return;
     418             :             }
     419             : #         endif
     420           0 :           op = GC_build_fl(h, lw, (ok -> ok_init || GC_debugging_started), 0);
     421           0 :           goto out;
     422             :         }
     423             :     }
     424             : 
     425             :     /* As a last attempt, try allocating a single object.  Note that    */
     426             :     /* this may trigger a collection or expand the heap.                */
     427          55 :       op = GC_generic_malloc_inner(lb, k);
     428          55 :       if (0 != op) obj_link(op) = 0;
     429             : 
     430             :   out:
     431        2119 :     *result = op;
     432        2119 :     UNLOCK();
     433        2119 :     (void) GC_clear_stack(0);
     434             : }
     435             : 
     436             : /* Note that the "atomic" version of this would be unsafe, since the    */
     437             : /* links would not be seen by the collector.                            */
     438           0 : GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_many(size_t lb)
     439             : {
     440             :     void *result;
     441           0 :     GC_generic_malloc_many((lb + EXTRA_BYTES + GRANULE_BYTES-1)
     442             :                            & ~(GRANULE_BYTES-1),
     443             :                            NORMAL, &result);
     444           0 :     return result;
     445             : }
     446             : 
     447             : /* Not well tested nor integrated.      */
     448             : /* Debug version is tricky and currently missing.       */
     449             : #include <limits.h>
     450             : 
     451           0 : GC_API GC_ATTR_MALLOC void * GC_CALL GC_memalign(size_t align, size_t lb)
     452             : {
     453             :     size_t new_lb;
     454             :     size_t offset;
     455             :     ptr_t result;
     456             : 
     457           0 :     if (align <= GRANULE_BYTES) return GC_malloc(lb);
     458           0 :     if (align >= HBLKSIZE/2 || lb >= HBLKSIZE/2) {
     459           0 :         if (align > HBLKSIZE) {
     460           0 :           return (*GC_get_oom_fn())(LONG_MAX-1024); /* Fail */
     461             :         }
     462           0 :         return GC_malloc(lb <= HBLKSIZE? HBLKSIZE : lb);
     463             :             /* Will be HBLKSIZE aligned.        */
     464             :     }
     465             :     /* We could also try to make sure that the real rounded-up object size */
     466             :     /* is a multiple of align.  That would be correct up to HBLKSIZE.      */
     467           0 :     new_lb = lb + align - 1;
     468           0 :     result = GC_malloc(new_lb);
     469             :             /* It is OK not to check result for NULL as in that case    */
     470             :             /* GC_memalign returns NULL too since (0 + 0 % align) is 0. */
     471           0 :     offset = (word)result % align;
     472           0 :     if (offset != 0) {
     473           0 :         offset = align - offset;
     474           0 :         if (!GC_all_interior_pointers) {
     475           0 :             if (offset >= VALID_OFFSET_SZ) return GC_malloc(HBLKSIZE);
     476           0 :             GC_register_displacement(offset);
     477             :         }
     478             :     }
     479           0 :     result = (void *) ((ptr_t)result + offset);
     480             :     GC_ASSERT((word)result % align == 0);
     481           0 :     return result;
     482             : }
     483             : 
     484             : /* This one exists largely to redirect posix_memalign for leaks finding. */
     485           0 : GC_API int GC_CALL GC_posix_memalign(void **memptr, size_t align, size_t lb)
     486             : {
     487             :   /* Check alignment properly.  */
     488           0 :   if (((align - 1) & align) != 0 || align < sizeof(void *)) {
     489             : #   ifdef MSWINCE
     490             :       return ERROR_INVALID_PARAMETER;
     491             : #   else
     492           0 :       return EINVAL;
     493             : #   endif
     494             :   }
     495             : 
     496           0 :   if ((*memptr = GC_memalign(align, lb)) == NULL) {
     497             : #   ifdef MSWINCE
     498             :       return ERROR_NOT_ENOUGH_MEMORY;
     499             : #   else
     500           0 :       return ENOMEM;
     501             : #   endif
     502             :   }
     503           0 :   return 0;
     504             : }
     505             : 
     506             : #ifdef ATOMIC_UNCOLLECTABLE
     507             :   /* Allocate lb bytes of pointer-free, untraced, uncollectible data    */
     508             :   /* This is normally roughly equivalent to the system malloc.          */
     509             :   /* But it may be useful if malloc is redefined.                       */
     510             :   GC_API GC_ATTR_MALLOC void * GC_CALL
     511           0 :         GC_malloc_atomic_uncollectable(size_t lb)
     512             :   {
     513             :     void *op;
     514             :     void **opp;
     515             :     size_t lg;
     516             :     DCL_LOCK_STATE;
     517             : 
     518           0 :     if( SMALL_OBJ(lb) ) {
     519             :         GC_DBG_COLLECT_AT_MALLOC(lb);
     520           0 :         if (EXTRA_BYTES != 0 && lb != 0) lb--;
     521             :                   /* We don't need the extra byte, since this won't be  */
     522             :                   /* collected anyway.                                  */
     523           0 :         lg = GC_size_map[lb];
     524           0 :         opp = &(GC_auobjfreelist[lg]);
     525           0 :         LOCK();
     526           0 :         op = *opp;
     527           0 :         if (EXPECT(0 != op, TRUE)) {
     528           0 :             *opp = obj_link(op);
     529           0 :             obj_link(op) = 0;
     530           0 :             GC_bytes_allocd += GRANULES_TO_BYTES(lg);
     531             :             /* Mark bit was already set while object was on free list. */
     532           0 :             GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
     533           0 :             UNLOCK();
     534             :         } else {
     535           0 :             UNLOCK();
     536           0 :             op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
     537             :         }
     538             :         GC_ASSERT(0 == op || GC_is_marked(op));
     539           0 :         return((void *) op);
     540             :     } else {
     541             :         hdr * hhdr;
     542             : 
     543           0 :         op = (ptr_t)GC_generic_malloc(lb, AUNCOLLECTABLE);
     544           0 :         if (0 == op) return(0);
     545             : 
     546             :         GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0);
     547           0 :         hhdr = HDR(op);
     548             : 
     549           0 :         LOCK();
     550           0 :         set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
     551             : #       ifndef THREADS
     552             :           GC_ASSERT(hhdr -> hb_n_marks == 0);
     553             : #       endif
     554           0 :         hhdr -> hb_n_marks = 1;
     555           0 :         UNLOCK();
     556           0 :         return((void *) op);
     557             :     }
     558             :   }
     559             : #endif /* ATOMIC_UNCOLLECTABLE */
     560             : 
     561             : /* provide a version of strdup() that uses the collector to allocate the
     562             :    copy of the string */
     563           0 : GC_API GC_ATTR_MALLOC char * GC_CALL GC_strdup(const char *s)
     564             : {
     565             :   char *copy;
     566             :   size_t lb;
     567           0 :   if (s == NULL) return NULL;
     568           0 :   lb = strlen(s) + 1;
     569           0 :   if ((copy = GC_malloc_atomic(lb)) == NULL) {
     570             : #   ifndef MSWINCE
     571           0 :       errno = ENOMEM;
     572             : #   endif
     573           0 :     return NULL;
     574             :   }
     575           0 :   BCOPY(s, copy, lb);
     576           0 :   return copy;
     577             : }
     578             : 
     579           0 : GC_API GC_ATTR_MALLOC char * GC_CALL GC_strndup(const char *str, size_t size)
     580             : {
     581             :   char *copy;
     582           0 :   size_t len = strlen(str); /* str is expected to be non-NULL  */
     583           0 :   if (len > size)
     584           0 :     len = size;
     585           0 :   copy = GC_malloc_atomic(len + 1);
     586           0 :   if (copy == NULL) {
     587             : #   ifndef MSWINCE
     588           0 :       errno = ENOMEM;
     589             : #   endif
     590           0 :     return NULL;
     591             :   }
     592           0 :   BCOPY(str, copy, len);
     593           0 :   copy[len] = '\0';
     594           0 :   return copy;
     595             : }
     596             : 
     597             : #ifdef GC_REQUIRE_WCSDUP
     598             : # include <wchar.h> /* for wcslen() */
     599             : 
     600             :   GC_API GC_ATTR_MALLOC wchar_t * GC_CALL GC_wcsdup(const wchar_t *str)
     601             :   {
     602             :     size_t lb = (wcslen(str) + 1) * sizeof(wchar_t);
     603             :     wchar_t *copy = GC_malloc_atomic(lb);
     604             :     if (copy == NULL) {
     605             : #     ifndef MSWINCE
     606             :         errno = ENOMEM;
     607             : #     endif
     608             :       return NULL;
     609             :     }
     610             :     BCOPY(str, copy, lb);
     611             :     return copy;
     612             :   }
     613             : #endif /* GC_REQUIRE_WCSDUP */

Generated by: LCOV version 1.11