LCOV - code coverage report
Current view: top level - mm/boehm-gc - allchblk.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 212 290 73.1 %
Date: 2017-07-14 10:03:36 Functions: 12 16 75.0 %

          Line data    Source code
       1             : /*
       2             :  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
       3             :  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
       4             :  * Copyright (c) 1998-1999 by Silicon Graphics.  All rights reserved.
       5             :  * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
       6             :  *
       7             :  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
       8             :  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
       9             :  *
      10             :  * Permission is hereby granted to use or copy this program
      11             :  * for any purpose,  provided the above notices are retained on all copies.
      12             :  * Permission to modify the code and to distribute modified code is granted,
      13             :  * provided the above notices are retained, and a notice that the code was
      14             :  * modified is included with the above copyright notice.
      15             :  */
      16             : 
      17             : #include "private/gc_priv.h"
      18             : 
      19             : #include <stdio.h>
      20             : 
      21             : #ifdef GC_USE_ENTIRE_HEAP
      22             :   int GC_use_entire_heap = TRUE;
      23             : #else
      24             :   int GC_use_entire_heap = FALSE;
      25             : #endif
      26             : 
      27             : /*
      28             :  * Free heap blocks are kept on one of several free lists,
      29             :  * depending on the size of the block.  Each free list is doubly linked.
      30             :  * Adjacent free blocks are coalesced.
      31             :  */
      32             : 
      33             : 
      34             : # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
      35             :                 /* largest block we will allocate starting on a black   */
      36             :                 /* listed block.  Must be >= HBLKSIZE.                  */
      37             : 
      38             : 
      39             : # define UNIQUE_THRESHOLD 32
      40             :         /* Sizes up to this many HBLKs each have their own free list    */
      41             : # define HUGE_THRESHOLD 256
      42             :         /* Sizes of at least this many heap blocks are mapped to a      */
      43             :         /* single free list.                                            */
      44             : # define FL_COMPRESSION 8
      45             :         /* In between sizes map this many distinct sizes to a single    */
      46             :         /* bin.                                                         */
      47             : 
      48             : # define N_HBLK_FLS ((HUGE_THRESHOLD - UNIQUE_THRESHOLD) / FL_COMPRESSION \
      49             :                      + UNIQUE_THRESHOLD)
      50             : 
      51             : #ifndef GC_GCJ_SUPPORT
      52             :   STATIC
      53             : #endif
      54             :   struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
      55             :                                 /* List of completely empty heap blocks */
      56             :                                 /* Linked through hb_next field of      */
      57             :                                 /* header structure associated with     */
      58             :                                 /* block.  Remains externally visible   */
      59             :                                 /* as used by GNU GCJ currently.        */
      60             : 
      61             : #ifndef GC_GCJ_SUPPORT
      62             :   STATIC
      63             : #endif
      64             :   word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
      65             :         /* Number of free bytes on each list.  Remains visible to GCJ.  */
      66             : 
      67             : /* Return the largest n such that the number of free bytes on lists     */
      68             : /* n .. N_HBLK_FLS is greater or equal to GC_max_large_allocd_bytes     */
      69             : /* minus GC_large_allocd_bytes.  If there is no such n, return 0.       */
      70        5041 : GC_INLINE int GC_enough_large_bytes_left(void)
      71             : {
      72             :     int n;
      73        5041 :     word bytes = GC_large_allocd_bytes;
      74             : 
      75             :     GC_ASSERT(GC_max_large_allocd_bytes <= GC_heapsize);
      76       25854 :     for (n = N_HBLK_FLS; n >= 0; --n) {
      77       25792 :         bytes += GC_free_bytes[n];
      78       25792 :         if (bytes >= GC_max_large_allocd_bytes) return n;
      79             :     }
      80          62 :     return 0;
      81             : }
      82             : 
      83             : /* Map a number of blocks to the appropriate large block free list index. */
      84      165070 : STATIC int GC_hblk_fl_from_blocks(word blocks_needed)
      85             : {
      86      165070 :     if (blocks_needed <= UNIQUE_THRESHOLD) return (int)blocks_needed;
      87       73419 :     if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
      88       23026 :     return (int)(blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
      89       23026 :                                         + UNIQUE_THRESHOLD;
      90             : 
      91             : }
      92             : 
      93             : # define PHDR(hhdr) HDR((hhdr) -> hb_prev)
      94             : # define NHDR(hhdr) HDR((hhdr) -> hb_next)
      95             : 
      96             : # ifdef USE_MUNMAP
      97             : #   define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
      98             : # else
      99             : #   define IS_MAPPED(hhdr) TRUE
     100             : # endif /* !USE_MUNMAP */
     101             : 
     102             : #if !defined(NO_DEBUGGING) || defined(GC_ASSERTIONS)
     103             :   /* Should return the same value as GC_large_free_bytes.       */
     104           0 :   GC_INNER word GC_compute_large_free_bytes(void)
     105             :   {
     106             :       struct hblk * h;
     107             :       hdr * hhdr;
     108           0 :       word total_free = 0;
     109             :       unsigned i;
     110             : 
     111           0 :       for (i = 0; i <= N_HBLK_FLS; ++i) {
     112           0 :         for (h = GC_hblkfreelist[i]; h != 0; h = hhdr->hb_next) {
     113           0 :           hhdr = HDR(h);
     114           0 :           total_free += hhdr->hb_sz;
     115             :         }
     116             :       }
     117           0 :       return total_free;
     118             :   }
     119             : #endif /* !NO_DEBUGGING || GC_ASSERTIONS */
     120             : 
     121             : # if !defined(NO_DEBUGGING)
     122           0 : void GC_print_hblkfreelist(void)
     123             : {
     124             :     struct hblk * h;
     125             :     hdr * hhdr;
     126             :     unsigned i;
     127             :     word total;
     128             : 
     129           0 :     for (i = 0; i <= N_HBLK_FLS; ++i) {
     130           0 :       h = GC_hblkfreelist[i];
     131           0 :       if (0 != h) GC_printf("Free list %u (total size %lu):\n",
     132             :                             i, (unsigned long)GC_free_bytes[i]);
     133           0 :       while (h != 0) {
     134           0 :         hhdr = HDR(h);
     135           0 :         GC_printf("\t%p size %lu %s black listed\n",
     136             :                 (void *)h, (unsigned long) hhdr -> hb_sz,
     137             :                 GC_is_black_listed(h, HBLKSIZE) != 0 ? "start" :
     138           0 :                 GC_is_black_listed(h, hhdr -> hb_sz) != 0 ? "partially" :
     139             :                                                         "not");
     140           0 :         h = hhdr -> hb_next;
     141             :       }
     142             :     }
     143           0 :     GC_printf("GC_large_free_bytes: %lu\n",
     144             :               (unsigned long)GC_large_free_bytes);
     145             : 
     146           0 :     if ((total = GC_compute_large_free_bytes()) != GC_large_free_bytes)
     147           0 :           GC_err_printf("GC_large_free_bytes INCONSISTENT!! Should be: %lu\n",
     148             :                         (unsigned long)total);
     149           0 : }
     150             : 
     151             : /* Return the free list index on which the block described by the header */
     152             : /* appears, or -1 if it appears nowhere.                                 */
     153           0 : static int free_list_index_of(hdr *wanted)
     154             : {
     155             :     struct hblk * h;
     156             :     hdr * hhdr;
     157             :     int i;
     158             : 
     159           0 :     for (i = 0; i <= N_HBLK_FLS; ++i) {
     160           0 :       h = GC_hblkfreelist[i];
     161           0 :       while (h != 0) {
     162           0 :         hhdr = HDR(h);
     163           0 :         if (hhdr == wanted) return i;
     164           0 :         h = hhdr -> hb_next;
     165             :       }
     166             :     }
     167           0 :     return -1;
     168             : }
     169             : 
     170           0 : void GC_dump_regions(void)
     171             : {
     172             :     unsigned i;
     173             :     ptr_t start, end;
     174             :     ptr_t p;
     175             :     size_t bytes;
     176             :     hdr *hhdr;
     177           0 :     for (i = 0; i < GC_n_heap_sects; ++i) {
     178           0 :         start = GC_heap_sects[i].hs_start;
     179           0 :         bytes = GC_heap_sects[i].hs_bytes;
     180           0 :         end = start + bytes;
     181             :         /* Merge in contiguous sections.        */
     182           0 :           while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
     183           0 :             ++i;
     184           0 :             end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
     185             :           }
     186           0 :         GC_printf("***Section from %p to %p\n", start, end);
     187           0 :         for (p = start; (word)p < (word)end; ) {
     188           0 :             hhdr = HDR(p);
     189           0 :             if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
     190           0 :                 GC_printf("\t%p Missing header!!(%p)\n", p, (void *)hhdr);
     191           0 :                 p += HBLKSIZE;
     192           0 :                 continue;
     193             :             }
     194           0 :             if (HBLK_IS_FREE(hhdr)) {
     195           0 :                 int correct_index = GC_hblk_fl_from_blocks(
     196           0 :                                         divHBLKSZ(hhdr -> hb_sz));
     197             :                 int actual_index;
     198             : 
     199           0 :                 GC_printf("\t%p\tfree block of size 0x%lx bytes%s\n", p,
     200             :                           (unsigned long)(hhdr -> hb_sz),
     201             :                           IS_MAPPED(hhdr) ? "" : " (unmapped)");
     202           0 :                 actual_index = free_list_index_of(hhdr);
     203           0 :                 if (-1 == actual_index) {
     204           0 :                     GC_printf("\t\tBlock not on free list %d!!\n",
     205             :                               correct_index);
     206           0 :                 } else if (correct_index != actual_index) {
     207           0 :                     GC_printf("\t\tBlock on list %d, should be on %d!!\n",
     208             :                               actual_index, correct_index);
     209             :                 }
     210           0 :                 p += hhdr -> hb_sz;
     211             :             } else {
     212           0 :                 GC_printf("\t%p\tused for blocks of size 0x%lx bytes\n", p,
     213             :                           (unsigned long)(hhdr -> hb_sz));
     214           0 :                 p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
     215             :             }
     216             :         }
     217             :     }
     218           0 : }
     219             : 
     220             : # endif /* NO_DEBUGGING */
     221             : 
     222             : /* Initialize hdr for a block containing the indicated size and         */
     223             : /* kind of objects.                                                     */
     224             : /* Return FALSE on failure.                                             */
     225       53040 : static GC_bool setup_header(hdr * hhdr, struct hblk *block, size_t byte_sz,
     226             :                             int kind, unsigned flags)
     227             : {
     228             :     word descr;
     229             : #   ifndef MARK_BIT_PER_OBJ
     230             :       size_t granules;
     231             : #   endif
     232             : 
     233             : #   ifdef ENABLE_DISCLAIM
     234       53040 :       if (GC_obj_kinds[kind].ok_disclaim_proc)
     235           0 :         flags |= HAS_DISCLAIM;
     236       53040 :       if (GC_obj_kinds[kind].ok_mark_unconditionally)
     237           0 :         flags |= MARK_UNCONDITIONALLY;
     238             : #   endif
     239             : 
     240             :     /* Set size, kind and mark proc fields */
     241       53040 :       hhdr -> hb_sz = byte_sz;
     242       53040 :       hhdr -> hb_obj_kind = (unsigned char)kind;
     243       53040 :       hhdr -> hb_flags = (unsigned char)flags;
     244       53040 :       hhdr -> hb_block = block;
     245       53040 :       descr = GC_obj_kinds[kind].ok_descriptor;
     246       53040 :       if (GC_obj_kinds[kind].ok_relocate_descr) descr += byte_sz;
     247       53040 :       hhdr -> hb_descr = descr;
     248             : 
     249             : #   ifdef MARK_BIT_PER_OBJ
     250             :      /* Set hb_inv_sz as portably as possible.                          */
     251             :      /* We set it to the smallest value such that sz * inv_sz > 2**32    */
     252             :      /* This may be more precision than necessary.                      */
     253             :       if (byte_sz > MAXOBJBYTES) {
     254             :          hhdr -> hb_inv_sz = LARGE_INV_SZ;
     255             :       } else {
     256             :         word inv_sz;
     257             : 
     258             : #       if CPP_WORDSZ == 64
     259             :           inv_sz = ((word)1 << 32)/byte_sz;
     260             :           if (((inv_sz*byte_sz) >> 32) == 0) ++inv_sz;
     261             : #       else  /* 32 bit words */
     262             :           GC_ASSERT(byte_sz >= 4);
     263             :           inv_sz = ((unsigned)1 << 31)/byte_sz;
     264             :           inv_sz *= 2;
     265             :           while (inv_sz*byte_sz > byte_sz) ++inv_sz;
     266             : #       endif
     267             :         hhdr -> hb_inv_sz = inv_sz;
     268             :       }
     269             : #   else /* MARK_BIT_PER_GRANULE */
     270       53040 :       hhdr -> hb_large_block = (unsigned char)(byte_sz > MAXOBJBYTES);
     271       53040 :       granules = BYTES_TO_GRANULES(byte_sz);
     272       53040 :       if (EXPECT(!GC_add_map_entry(granules), FALSE)) {
     273             :         /* Make it look like a valid block. */
     274           0 :         hhdr -> hb_sz = HBLKSIZE;
     275           0 :         hhdr -> hb_descr = 0;
     276           0 :         hhdr -> hb_large_block = TRUE;
     277           0 :         hhdr -> hb_map = 0;
     278           0 :         return FALSE;
     279             :       } else {
     280       53040 :         size_t index = (hhdr -> hb_large_block? 0 : granules);
     281       53040 :         hhdr -> hb_map = GC_obj_map[index];
     282             :       }
     283             : #   endif /* MARK_BIT_PER_GRANULE */
     284             : 
     285             :     /* Clear mark bits */
     286       53040 :     GC_clear_hdr_marks(hhdr);
     287             : 
     288       53040 :     hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
     289       53040 :     return(TRUE);
     290             : }
     291             : 
     292             : /* Remove hhdr from the free list (it is assumed to specified by index). */
     293       82183 : STATIC void GC_remove_from_fl_at(hdr *hhdr, int index)
     294             : {
     295             :     GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
     296       82183 :     if (hhdr -> hb_prev == 0) {
     297             :         GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
     298       82128 :         GC_hblkfreelist[index] = hhdr -> hb_next;
     299             :     } else {
     300             :         hdr *phdr;
     301          55 :         GET_HDR(hhdr -> hb_prev, phdr);
     302          55 :         phdr -> hb_next = hhdr -> hb_next;
     303             :     }
     304             :     /* We always need index to maintain free counts.    */
     305             :     GC_ASSERT(GC_free_bytes[index] >= hhdr -> hb_sz);
     306       82183 :     GC_free_bytes[index] -= hhdr -> hb_sz;
     307       82183 :     if (0 != hhdr -> hb_next) {
     308             :         hdr * nhdr;
     309             :         GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
     310       19989 :         GET_HDR(hhdr -> hb_next, nhdr);
     311       19989 :         nhdr -> hb_prev = hhdr -> hb_prev;
     312             :     }
     313       82183 : }
     314             : 
     315             : /* Remove hhdr from the appropriate free list (we assume it is on the   */
     316             : /* size-appropriate free list).                                         */
     317       29143 : GC_INLINE void GC_remove_from_fl(hdr *hhdr)
     318             : {
     319       29143 :   GC_remove_from_fl_at(hhdr, GC_hblk_fl_from_blocks(divHBLKSZ(hhdr->hb_sz)));
     320       29143 : }
     321             : 
     322             : /* Return a pointer to the free block ending just before h, if any.     */
     323       32964 : STATIC struct hblk * GC_free_block_ending_at(struct hblk *h)
     324             : {
     325       32964 :     struct hblk * p = h - 1;
     326             :     hdr * phdr;
     327             : 
     328       32964 :     GET_HDR(p, phdr);
     329       66470 :     while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
     330         542 :         p = FORWARDED_ADDR(p,phdr);
     331         542 :         phdr = HDR(p);
     332             :     }
     333       32964 :     if (0 != phdr) {
     334       32473 :         if(HBLK_IS_FREE(phdr)) {
     335           1 :             return p;
     336             :         } else {
     337       32472 :             return 0;
     338             :         }
     339             :     }
     340         491 :     p = GC_prev_block(h - 1);
     341         491 :     if (0 != p) {
     342         316 :       phdr = HDR(p);
     343         316 :       if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
     344         243 :         return p;
     345             :       }
     346             :     }
     347         248 :     return 0;
     348             : }
     349             : 
     350             : /* Add hhdr to the appropriate free list.               */
     351             : /* We maintain individual free lists sorted by address. */
     352       82729 : STATIC void GC_add_to_fl(struct hblk *h, hdr *hhdr)
     353             : {
     354       82729 :     int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
     355       82729 :     struct hblk *second = GC_hblkfreelist[index];
     356             :     hdr * second_hdr;
     357             : #   if defined(GC_ASSERTIONS) && !defined(USE_MUNMAP)
     358             :       struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
     359             :       hdr * nexthdr = HDR(next);
     360             :       struct hblk *prev = GC_free_block_ending_at(h);
     361             :       hdr * prevhdr = HDR(prev);
     362             :       GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr)
     363             :                 || (signed_word)GC_heapsize < 0);
     364             :                 /* In the last case, blocks may be too large to merge. */
     365             :       GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr)
     366             :                 || (signed_word)GC_heapsize < 0);
     367             : #   endif
     368             : 
     369             :     GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
     370       82729 :     GC_hblkfreelist[index] = h;
     371       82729 :     GC_free_bytes[index] += hhdr -> hb_sz;
     372             :     GC_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes);
     373       82729 :     hhdr -> hb_next = second;
     374       82729 :     hhdr -> hb_prev = 0;
     375       82729 :     if (0 != second) {
     376       20365 :       GET_HDR(second, second_hdr);
     377       20365 :       second_hdr -> hb_prev = h;
     378             :     }
     379       82729 :     hhdr -> hb_flags |= FREE_BLK;
     380       82729 : }
     381             : 
     382             : #ifdef USE_MUNMAP
     383             : 
     384             : #   ifndef MUNMAP_THRESHOLD
     385             : #     define MUNMAP_THRESHOLD 6
     386             : #   endif
     387             : 
     388             : GC_INNER int GC_unmap_threshold = MUNMAP_THRESHOLD;
     389             : 
     390             : /* Unmap blocks that haven't been recently touched.  This is the only way */
     391             : /* way blocks are ever unmapped.                                          */
     392             : GC_INNER void GC_unmap_old(void)
     393             : {
     394             :     struct hblk * h;
     395             :     hdr * hhdr;
     396             :     int i;
     397             : 
     398             :     if (GC_unmap_threshold == 0)
     399             :       return; /* unmapping disabled */
     400             : 
     401             :     for (i = 0; i <= N_HBLK_FLS; ++i) {
     402             :       for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
     403             :         hhdr = HDR(h);
     404             :         if (!IS_MAPPED(hhdr)) continue;
     405             : 
     406             :         if ((unsigned short)GC_gc_no - hhdr -> hb_last_reclaimed >
     407             :                 (unsigned short)GC_unmap_threshold) {
     408             :           GC_unmap((ptr_t)h, hhdr -> hb_sz);
     409             :           hhdr -> hb_flags |= WAS_UNMAPPED;
     410             :         }
     411             :       }
     412             :     }
     413             : }
     414             : 
     415             : /* Merge all unmapped blocks that are adjacent to other free            */
     416             : /* blocks.  This may involve remapping, since all blocks are either     */
     417             : /* fully mapped or fully unmapped.                                      */
     418             : GC_INNER void GC_merge_unmapped(void)
     419             : {
     420             :     struct hblk * h, *next;
     421             :     hdr * hhdr, *nexthdr;
     422             :     word size, nextsize;
     423             :     int i;
     424             : 
     425             :     for (i = 0; i <= N_HBLK_FLS; ++i) {
     426             :       h = GC_hblkfreelist[i];
     427             :       while (h != 0) {
     428             :         GET_HDR(h, hhdr);
     429             :         size = hhdr->hb_sz;
     430             :         next = (struct hblk *)((word)h + size);
     431             :         GET_HDR(next, nexthdr);
     432             :         /* Coalesce with successor, if possible */
     433             :           if (0 != nexthdr && HBLK_IS_FREE(nexthdr)
     434             :               && (signed_word) (size + (nextsize = nexthdr->hb_sz)) > 0
     435             :                  /* no pot. overflow */) {
     436             :             /* Note that we usually try to avoid adjacent free blocks   */
     437             :             /* that are either both mapped or both unmapped.  But that  */
     438             :             /* isn't guaranteed to hold since we remap blocks when we   */
     439             :             /* split them, and don't merge at that point.  It may also  */
     440             :             /* not hold if the merged block would be too big.           */
     441             :             if (IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
     442             :               /* make both consistent, so that we can merge */
     443             :                 if (size > nextsize) {
     444             :                   GC_remap((ptr_t)next, nextsize);
     445             :                 } else {
     446             :                   GC_unmap((ptr_t)h, size);
     447             :                   GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
     448             :                   hhdr -> hb_flags |= WAS_UNMAPPED;
     449             :                 }
     450             :             } else if (IS_MAPPED(nexthdr) && !IS_MAPPED(hhdr)) {
     451             :               if (size > nextsize) {
     452             :                 GC_unmap((ptr_t)next, nextsize);
     453             :                 GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
     454             :               } else {
     455             :                 GC_remap((ptr_t)h, size);
     456             :                 hhdr -> hb_flags &= ~WAS_UNMAPPED;
     457             :                 hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed;
     458             :               }
     459             :             } else if (!IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
     460             :               /* Unmap any gap in the middle */
     461             :                 GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
     462             :             }
     463             :             /* If they are both unmapped, we merge, but leave unmapped. */
     464             :             GC_remove_from_fl_at(hhdr, i);
     465             :             GC_remove_from_fl(nexthdr);
     466             :             hhdr -> hb_sz += nexthdr -> hb_sz;
     467             :             GC_remove_header(next);
     468             :             GC_add_to_fl(h, hhdr);
     469             :             /* Start over at beginning of list */
     470             :             h = GC_hblkfreelist[i];
     471             :           } else /* not mergable with successor */ {
     472             :             h = hhdr -> hb_next;
     473             :           }
     474             :       } /* while (h != 0) ... */
     475             :     } /* for ... */
     476             : }
     477             : 
     478             : #endif /* USE_MUNMAP */
     479             : 
     480             : /*
     481             :  * Return a pointer to a block starting at h of length bytes.
     482             :  * Memory for the block is mapped.
     483             :  * Remove the block from its free list, and return the remainder (if any)
     484             :  * to its appropriate free list.
     485             :  * May fail by returning 0.
     486             :  * The header for the returned block must be set up by the caller.
     487             :  * If the return value is not 0, then hhdr is the header for it.
     488             :  */
     489       53038 : STATIC struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr,
     490             :                                        size_t bytes, int index)
     491             : {
     492       53038 :     word total_size = hhdr -> hb_sz;
     493             :     struct hblk * rest;
     494             :     hdr * rest_hdr;
     495             : 
     496             :     GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
     497       53038 :     GC_remove_from_fl_at(hhdr, index);
     498       53038 :     if (total_size == bytes) return h;
     499       49750 :     rest = (struct hblk *)((word)h + bytes);
     500       49750 :     rest_hdr = GC_install_header(rest);
     501       49750 :     if (0 == rest_hdr) {
     502             :         /* FIXME: This is likely to be very bad news ... */
     503           0 :         WARN("Header allocation failed: Dropping block.\n", 0);
     504           0 :         return(0);
     505             :     }
     506       49750 :     rest_hdr -> hb_sz = total_size - bytes;
     507       49750 :     rest_hdr -> hb_flags = 0;
     508             : #   ifdef GC_ASSERTIONS
     509             :       /* Mark h not free, to avoid assertion about adjacent free blocks. */
     510             :         hhdr -> hb_flags &= ~FREE_BLK;
     511             : #   endif
     512       49750 :     GC_add_to_fl(rest, rest_hdr);
     513       49750 :     return h;
     514             : }
     515             : 
     516             : /*
     517             :  * H is a free block.  N points at an address inside it.
     518             :  * A new header for n has already been set up.  Fix up h's header
     519             :  * to reflect the fact that it is being split, move it to the
     520             :  * appropriate free list.
     521             :  * N replaces h in the original free list.
     522             :  *
     523             :  * Nhdr is not completely filled in, since it is about to allocated.
     524             :  * It may in fact end up on the wrong free list for its size.
     525             :  * That's not a disaster, since n is about to be allocated
     526             :  * by our caller.
     527             :  * (Hence adding it to a free list is silly.  But this path is hopefully
     528             :  * rare enough that it doesn't matter.  The code is cleaner this way.)
     529             :  */
     530          15 : STATIC void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n,
     531             :                            hdr *nhdr, int index /* Index of free list */)
     532             : {
     533          15 :     word total_size = hhdr -> hb_sz;
     534          15 :     word h_size = (word)n - (word)h;
     535          15 :     struct hblk *prev = hhdr -> hb_prev;
     536          15 :     struct hblk *next = hhdr -> hb_next;
     537             : 
     538             :     /* Replace h with n on its freelist */
     539          15 :       nhdr -> hb_prev = prev;
     540          15 :       nhdr -> hb_next = next;
     541          15 :       nhdr -> hb_sz = total_size - h_size;
     542          15 :       nhdr -> hb_flags = 0;
     543          15 :       if (0 != prev) {
     544           0 :         HDR(prev) -> hb_next = n;
     545             :       } else {
     546          15 :         GC_hblkfreelist[index] = n;
     547             :       }
     548          15 :       if (0 != next) {
     549           0 :         HDR(next) -> hb_prev = n;
     550             :       }
     551             :       GC_ASSERT(GC_free_bytes[index] > h_size);
     552          15 :       GC_free_bytes[index] -= h_size;
     553             : #   ifdef USE_MUNMAP
     554             :       hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
     555             : #   endif
     556          15 :     hhdr -> hb_sz = h_size;
     557          15 :     GC_add_to_fl(h, hhdr);
     558          15 :     nhdr -> hb_flags |= FREE_BLK;
     559          15 : }
     560             : 
     561             : STATIC struct hblk *
     562             : GC_allochblk_nth(size_t sz /* bytes */, int kind, unsigned flags, int n,
     563             :                  int may_split);
     564             : #define AVOID_SPLIT_REMAPPED 2
     565             : 
     566             : /*
     567             :  * Allocate (and return pointer to) a heap block
     568             :  *   for objects of size sz bytes, searching the nth free list.
     569             :  *
     570             :  * NOTE: We set obj_map field in header correctly.
     571             :  *       Caller is responsible for building an object freelist in block.
     572             :  *
     573             :  * The client is responsible for clearing the block, if necessary.
     574             :  */
     575             : GC_INNER struct hblk *
     576       53198 : GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */)
     577             : {
     578             :     word blocks;
     579             :     int start_list;
     580             :     struct hblk *result;
     581             :     int may_split;
     582             :     int split_limit; /* Highest index of free list whose blocks we      */
     583             :                      /* split.                                          */
     584             : 
     585             :     GC_ASSERT((sz & (GRANULE_BYTES - 1)) == 0);
     586       53198 :     blocks = OBJ_SZ_TO_BLOCKS(sz);
     587       53198 :     if ((signed_word)(blocks * HBLKSIZE) < 0) {
     588           0 :       return 0;
     589             :     }
     590       53198 :     start_list = GC_hblk_fl_from_blocks(blocks);
     591             :     /* Try for an exact match first. */
     592       53198 :     result = GC_allochblk_nth(sz, kind, flags, start_list, FALSE);
     593       53198 :     if (0 != result) return result;
     594             : 
     595       49910 :     may_split = TRUE;
     596      244867 :     if (GC_use_entire_heap || GC_dont_gc
     597       99820 :         || USED_HEAP_SIZE < GC_requested_heapsize
     598      100178 :         || GC_incremental || !GC_should_collect()) {
     599             :         /* Should use more of the heap, even if it requires splitting. */
     600       44869 :         split_limit = N_HBLK_FLS;
     601        5041 :     } else if (GC_finalizer_bytes_freed > (GC_heapsize >> 4)) {
     602             :           /* If we are deallocating lots of memory from         */
     603             :           /* finalizers, fail and collect sooner rather         */
     604             :           /* than later.                                        */
     605           0 :           split_limit = 0;
     606             :     } else {
     607             :           /* If we have enough large blocks left to cover any   */
     608             :           /* previous request for large blocks, we go ahead     */
     609             :           /* and split.  Assuming a steady state, that should   */
     610             :           /* be safe.  It means that we can use the full        */
     611             :           /* heap if we allocate only small objects.            */
     612        5041 :           split_limit = GC_enough_large_bytes_left();
     613             : #         ifdef USE_MUNMAP
     614             :             if (split_limit > 0)
     615             :               may_split = AVOID_SPLIT_REMAPPED;
     616             : #         endif
     617             :     }
     618       49910 :     if (start_list < UNIQUE_THRESHOLD) {
     619             :       /* No reason to try start_list again, since all blocks are exact  */
     620             :       /* matches.                                                       */
     621       49698 :       ++start_list;
     622             :     }
     623     2307737 :     for (; start_list <= split_limit; ++start_list) {
     624     2307577 :         result = GC_allochblk_nth(sz, kind, flags, start_list, may_split);
     625     2307577 :         if (0 != result)
     626       49750 :             break;
     627             :     }
     628       49910 :     return result;
     629             : }
     630             : 
     631             : STATIC long GC_large_alloc_warn_suppressed = 0;
     632             :                         /* Number of warnings suppressed so far.        */
     633             : 
     634             : /* The same, but with search restricted to nth free list.  Flags is     */
     635             : /* IGNORE_OFF_PAGE or zero.  sz is in bytes.  The may_split flag        */
     636             : /* indicates whether it is OK to split larger blocks (if set to         */
     637             : /* AVOID_SPLIT_REMAPPED then memory remapping followed by splitting     */
     638             : /* should be generally avoided).                                        */
     639             : STATIC struct hblk *
     640     2360777 : GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n, int may_split)
     641             : {
     642             :     struct hblk *hbp;
     643             :     hdr * hhdr;                 /* Header corr. to hbp */
     644             :     struct hblk *thishbp;
     645             :     hdr * thishdr;              /* Header corr. to thishbp */
     646             :     signed_word size_needed;    /* number of bytes in requested objects */
     647             :     signed_word size_avail;     /* bytes available in this block        */
     648             : 
     649     2360777 :     size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
     650             : 
     651             :     /* search for a big enough block in free list */
     652     2360818 :         for (hbp = GC_hblkfreelist[n];; hbp = hhdr -> hb_next) {
     653     2360818 :             if (NULL == hbp) return NULL;
     654       53081 :             GET_HDR(hbp, hhdr); /* set hhdr value */
     655       53081 :             size_avail = hhdr->hb_sz;
     656       53081 :             if (size_avail < size_needed) continue;
     657       53066 :             if (size_avail != size_needed) {
     658             :               signed_word next_size;
     659             : 
     660       49753 :               if (!may_split) continue;
     661             :               /* If the next heap block is obviously better, go on.     */
     662             :               /* This prevents us from disassembling a single large     */
     663             :               /* block to get tiny blocks.                              */
     664       49752 :               thishbp = hhdr -> hb_next;
     665       49752 :               if (thishbp != 0) {
     666        1203 :                 GET_HDR(thishbp, thishdr);
     667        1203 :                 next_size = (signed_word)(thishdr -> hb_sz);
     668        1205 :                 if (next_size < size_avail
     669             :                     && next_size >= size_needed
     670           2 :                     && !GC_is_black_listed(thishbp, (word)size_needed)) {
     671           2 :                     continue;
     672             :                 }
     673             :               }
     674             :             }
     675       53063 :             if (!IS_UNCOLLECTABLE(kind) && (kind != PTRFREE
     676             :                         || size_needed > (signed_word)MAX_BLACK_LIST_ALLOC)) {
     677       34304 :               struct hblk * lasthbp = hbp;
     678       34304 :               ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
     679       34304 :               signed_word orig_avail = size_avail;
     680       34304 :               signed_word eff_size_needed = (flags & IGNORE_OFF_PAGE) != 0 ?
     681             :                                                 (signed_word)HBLKSIZE
     682       34304 :                                                 : size_needed;
     683             : 
     684      102970 :               while ((word)lasthbp <= (word)search_end
     685       68666 :                      && (thishbp = GC_is_black_listed(lasthbp,
     686             :                                             (word)eff_size_needed)) != 0) {
     687          42 :                 lasthbp = thishbp;
     688             :               }
     689       34304 :               size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
     690       34304 :               thishbp = lasthbp;
     691       34304 :               if (size_avail >= size_needed) {
     692       34278 :                 if (thishbp != hbp) {
     693             : #                 ifdef USE_MUNMAP
     694             :                     /* Avoid remapping followed by splitting.   */
     695             :                     if (may_split == AVOID_SPLIT_REMAPPED && !IS_MAPPED(hhdr))
     696             :                       continue;
     697             : #                 endif
     698          15 :                   thishdr = GC_install_header(thishbp);
     699          15 :                   if (0 != thishdr) {
     700             :                   /* Make sure it's mapped before we mangle it. */
     701             : #                   ifdef USE_MUNMAP
     702             :                       if (!IS_MAPPED(hhdr)) {
     703             :                         GC_remap((ptr_t)hbp, hhdr -> hb_sz);
     704             :                         hhdr -> hb_flags &= ~WAS_UNMAPPED;
     705             :                       }
     706             : #                   endif
     707             :                   /* Split the block at thishbp */
     708          15 :                       GC_split_block(hbp, hhdr, thishbp, thishdr, n);
     709             :                   /* Advance to thishbp */
     710          15 :                       hbp = thishbp;
     711          15 :                       hhdr = thishdr;
     712             :                       /* We must now allocate thishbp, since it may     */
     713             :                       /* be on the wrong free list.                     */
     714             :                   }
     715             :                 }
     716          28 :               } else if (size_needed > (signed_word)BL_LIMIT
     717          26 :                          && orig_avail - size_needed
     718           1 :                             > (signed_word)BL_LIMIT) {
     719             :                 /* Punt, since anything else risks unreasonable heap growth. */
     720           1 :                 if (++GC_large_alloc_warn_suppressed
     721             :                     >= GC_large_alloc_warn_interval) {
     722           0 :                   WARN("Repeated allocation of very large block "
     723             :                        "(appr. size %" WARN_PRIdPTR "):\n"
     724             :                        "\tMay lead to memory leak and poor performance.\n",
     725             :                        size_needed);
     726           0 :                   GC_large_alloc_warn_suppressed = 0;
     727             :                 }
     728           1 :                 size_avail = orig_avail;
     729          25 :               } else if (size_avail == 0 && size_needed == HBLKSIZE
     730             :                          && IS_MAPPED(hhdr)) {
     731          24 :                 if (!GC_find_leak) {
     732             :                   static unsigned count = 0;
     733             : 
     734             :                   /* The block is completely blacklisted.  We need      */
     735             :                   /* to drop some such blocks, since otherwise we spend */
     736             :                   /* all our time traversing them if pointer-free       */
     737             :                   /* blocks are unpopular.                              */
     738             :                   /* A dropped block will be reconsidered at next GC.   */
     739          24 :                   if ((++count & 3) == 0) {
     740             :                     /* Allocate and drop the block in small chunks, to  */
     741             :                     /* maximize the chance that we will recover some    */
     742             :                     /* later.                                           */
     743           2 :                       word total_size = hhdr -> hb_sz;
     744           2 :                       struct hblk * limit = hbp + divHBLKSZ(total_size);
     745             :                       struct hblk * h;
     746           2 :                       struct hblk * prev = hhdr -> hb_prev;
     747             : 
     748           2 :                       GC_large_free_bytes -= total_size;
     749           2 :                       GC_bytes_dropped += total_size;
     750           2 :                       GC_remove_from_fl_at(hhdr, n);
     751           4 :                       for (h = hbp; (word)h < (word)limit; h++) {
     752           2 :                         if (h != hbp) {
     753           0 :                           hhdr = GC_install_header(h);
     754             :                         }
     755           2 :                         if (NULL != hhdr) {
     756           2 :                           (void)setup_header(hhdr, h, HBLKSIZE, PTRFREE, 0);
     757             :                                                     /* Can't fail. */
     758           2 :                           if (GC_debugging_started) {
     759           0 :                             BZERO(h, HBLKSIZE);
     760             :                           }
     761             :                         }
     762             :                       }
     763             :                     /* Restore hbp to point at free block */
     764           2 :                       hbp = prev;
     765           2 :                       if (0 == hbp) {
     766           2 :                         return GC_allochblk_nth(sz, kind, flags, n, may_split);
     767             :                       }
     768           0 :                       hhdr = HDR(hbp);
     769             :                   }
     770             :                 }
     771             :               }
     772             :             }
     773       53061 :             if( size_avail >= size_needed ) {
     774             : #               ifdef USE_MUNMAP
     775             :                   if (!IS_MAPPED(hhdr)) {
     776             :                     GC_remap((ptr_t)hbp, hhdr -> hb_sz);
     777             :                     hhdr -> hb_flags &= ~WAS_UNMAPPED;
     778             :                     /* Note: This may leave adjacent, mapped free blocks. */
     779             :                   }
     780             : #               endif
     781             :                 /* hbp may be on the wrong freelist; the parameter n    */
     782             :                 /* is important.                                        */
     783       53038 :                 hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
     784             :                 break;
     785             :             }
     786          41 :         }
     787             : 
     788       53038 :     if (0 == hbp) return 0;
     789             : 
     790             :     /* Add it to map of valid blocks */
     791       53038 :         if (!GC_install_counts(hbp, (word)size_needed)) return(0);
     792             :         /* This leaks memory under very rare conditions. */
     793             : 
     794             :     /* Set up header */
     795       53038 :         if (!setup_header(hhdr, hbp, sz, kind, flags)) {
     796           0 :             GC_remove_counts(hbp, (word)size_needed);
     797           0 :             return(0); /* ditto */
     798             :         }
     799             : #   ifndef GC_DISABLE_INCREMENTAL
     800             :         /* Notify virtual dirty bit implementation that we are about to */
     801             :         /* write.  Ensure that pointer-free objects are not protected   */
     802             :         /* if it is avoidable.  This also ensures that newly allocated  */
     803             :         /* blocks are treated as dirty.  Necessary since we don't       */
     804             :         /* protect free blocks.                                         */
     805             :         GC_ASSERT((size_needed & (HBLKSIZE-1)) == 0);
     806       53038 :         GC_remove_protection(hbp, divHBLKSZ(size_needed),
     807             :                              (hhdr -> hb_descr == 0) /* pointer-free */);
     808             : #   endif
     809             :     /* We just successfully allocated a block.  Restart count of        */
     810             :     /* consecutive failures.                                            */
     811       53038 :     GC_fail_count = 0;
     812             : 
     813       53038 :     GC_large_free_bytes -= size_needed;
     814             :     GC_ASSERT(IS_MAPPED(hhdr));
     815       53038 :     return( hbp );
     816             : }
     817             : 
     818             : /*
     819             :  * Free a heap block.
     820             :  *
     821             :  * Coalesce the block with its neighbors if possible.
     822             :  *
     823             :  * All mark words are assumed to be cleared.
     824             :  */
     825       32964 : GC_INNER void GC_freehblk(struct hblk *hbp)
     826             : {
     827             :     struct hblk *next, *prev;
     828             :     hdr *hhdr, *prevhdr, *nexthdr;
     829             :     word size;
     830             : 
     831       32964 :     GET_HDR(hbp, hhdr);
     832       32964 :     size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr->hb_sz);
     833       32964 :     if ((signed_word)size <= 0)
     834           0 :       ABORT("Deallocating excessively large block.  Too large an allocation?");
     835             :       /* Probably possible if we try to allocate more than half the address */
     836             :       /* space at once.  If we don't catch it here, strange things happen   */
     837             :       /* later.                                                             */
     838       32964 :     GC_remove_counts(hbp, size);
     839       32964 :     hhdr->hb_sz = size;
     840             : #   ifdef USE_MUNMAP
     841             :       hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
     842             : #   endif
     843             : 
     844             :     /* Check for duplicate deallocation in the easy case */
     845       32964 :       if (HBLK_IS_FREE(hhdr)) {
     846           0 :         ABORT_ARG1("Duplicate large block deallocation",
     847             :                    " of %p", (void *)hbp);
     848             :       }
     849             : 
     850             :     GC_ASSERT(IS_MAPPED(hhdr));
     851       32964 :     hhdr -> hb_flags |= FREE_BLK;
     852       32964 :     next = (struct hblk *)((ptr_t)hbp + size);
     853       32964 :     GET_HDR(next, nexthdr);
     854       32964 :     prev = GC_free_block_ending_at(hbp);
     855             :     /* Coalesce with successor, if possible */
     856       61863 :       if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)
     857       28899 :          && (signed_word)(hhdr -> hb_sz + nexthdr -> hb_sz) > 0
     858             :          /* no overflow */) {
     859       28899 :         GC_remove_from_fl(nexthdr);
     860       28899 :         hhdr -> hb_sz += nexthdr -> hb_sz;
     861       28899 :         GC_remove_header(next);
     862             :       }
     863             :     /* Coalesce with predecessor, if possible. */
     864       32964 :       if (0 != prev) {
     865         244 :         prevhdr = HDR(prev);
     866         244 :         if (IS_MAPPED(prevhdr)
     867         244 :             && (signed_word)(hhdr -> hb_sz + prevhdr -> hb_sz) > 0) {
     868         244 :           GC_remove_from_fl(prevhdr);
     869         244 :           prevhdr -> hb_sz += hhdr -> hb_sz;
     870             : #         ifdef USE_MUNMAP
     871             :             prevhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
     872             : #         endif
     873         244 :           GC_remove_header(hbp);
     874         244 :           hbp = prev;
     875         244 :           hhdr = prevhdr;
     876             :         }
     877             :       }
     878             :     /* FIXME: It is not clear we really always want to do these merges  */
     879             :     /* with USE_MUNMAP, since it updates ages and hence prevents        */
     880             :     /* unmapping.                                                       */
     881             : 
     882       32964 :     GC_large_free_bytes += size;
     883       32964 :     GC_add_to_fl(hbp, hhdr);
     884       32964 : }

Generated by: LCOV version 1.11