LCOV - code coverage report
Current view: top level - mm/boehm-gc - allchblk.c (source / functions) Hit Total Coverage
Test: coverage.info Lines: 205 280 73.2 %
Date: 2015-06-10 18:10:59 Functions: 11 14 78.6 %

          Line data    Source code
       1             : /*
       2             :  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
       3             :  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
       4             :  * Copyright (c) 1998-1999 by Silicon Graphics.  All rights reserved.
       5             :  * Copyright (c) 1999 by Hewlett-Packard Company. All rights reserved.
       6             :  *
       7             :  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
       8             :  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
       9             :  *
      10             :  * Permission is hereby granted to use or copy this program
      11             :  * for any purpose,  provided the above notices are retained on all copies.
      12             :  * Permission to modify the code and to distribute modified code is granted,
      13             :  * provided the above notices are retained, and a notice that the code was
      14             :  * modified is included with the above copyright notice.
      15             :  */
      16             : 
      17             : #include "private/gc_priv.h"
      18             : 
      19             : #include <stdio.h>
      20             : 
      21             : #ifdef GC_USE_ENTIRE_HEAP
      22             :   int GC_use_entire_heap = TRUE;
      23             : #else
      24             :   int GC_use_entire_heap = FALSE;
      25             : #endif
      26             : 
      27             : /*
      28             :  * Free heap blocks are kept on one of several free lists,
      29             :  * depending on the size of the block.  Each free list is doubly linked.
      30             :  * Adjacent free blocks are coalesced.
      31             :  */
      32             : 
      33             : 
      34             : # define MAX_BLACK_LIST_ALLOC (2*HBLKSIZE)
      35             :                 /* largest block we will allocate starting on a black   */
      36             :                 /* listed block.  Must be >= HBLKSIZE.                  */
      37             : 
      38             : 
      39             : # define UNIQUE_THRESHOLD 32
      40             :         /* Sizes up to this many HBLKs each have their own free list    */
      41             : # define HUGE_THRESHOLD 256
      42             :         /* Sizes of at least this many heap blocks are mapped to a      */
      43             :         /* single free list.                                            */
      44             : # define FL_COMPRESSION 8
      45             :         /* In between sizes map this many distinct sizes to a single    */
      46             :         /* bin.                                                         */
      47             : 
      48             : # define N_HBLK_FLS (HUGE_THRESHOLD - UNIQUE_THRESHOLD)/FL_COMPRESSION \
      49             :                                  + UNIQUE_THRESHOLD
      50             : 
      51             : #ifndef GC_GCJ_SUPPORT
      52             :   STATIC
      53             : #endif
      54             :   struct hblk * GC_hblkfreelist[N_HBLK_FLS+1] = { 0 };
      55             :                                 /* List of completely empty heap blocks */
      56             :                                 /* Linked through hb_next field of      */
      57             :                                 /* header structure associated with     */
      58             :                                 /* block.  Remains externally visible   */
      59             :                                 /* as used by GNU GCJ currently.        */
      60             : 
      61             : #ifndef USE_MUNMAP
      62             : 
      63             : #ifndef GC_GCJ_SUPPORT
      64             :   STATIC
      65             : #endif
      66             :   word GC_free_bytes[N_HBLK_FLS+1] = { 0 };
      67             :         /* Number of free bytes on each list.  Remains visible to GCJ.  */
      68             : 
      69             :   /* Return the largest n such that                                     */
      70             :   /* Is GC_large_allocd_bytes + the number of free bytes on lists       */
      71             :   /* n .. N_HBLK_FLS > GC_max_large_allocd_bytes.                       */
      72             :   /* If there is no such n, return 0.                                   */
      73        5621 :   GC_INLINE int GC_enough_large_bytes_left(void)
      74             :   {
      75             :     int n;
      76        5621 :     word bytes = GC_large_allocd_bytes;
      77             : 
      78             :     GC_ASSERT(GC_max_large_allocd_bytes <= GC_heapsize);
      79       25950 :     for (n = N_HBLK_FLS; n >= 0; --n) {
      80       25879 :         bytes += GC_free_bytes[n];
      81       25879 :         if (bytes >= GC_max_large_allocd_bytes) return n;
      82             :     }
      83          71 :     return 0;
      84             :   }
      85             : 
      86             : # define INCR_FREE_BYTES(n, b) GC_free_bytes[n] += (b);
      87             : 
      88             : # define FREE_ASSERT(e) GC_ASSERT(e)
      89             : 
      90             : #else /* USE_MUNMAP */
      91             : 
      92             : # define INCR_FREE_BYTES(n, b)
      93             : # define FREE_ASSERT(e)
      94             : 
      95             : #endif /* USE_MUNMAP */
      96             : 
      97             : /* Map a number of blocks to the appropriate large block free list index. */
      98      159663 : STATIC int GC_hblk_fl_from_blocks(word blocks_needed)
      99             : {
     100      159663 :     if (blocks_needed <= UNIQUE_THRESHOLD) return (int)blocks_needed;
     101       72765 :     if (blocks_needed >= HUGE_THRESHOLD) return N_HBLK_FLS;
     102       23240 :     return (int)(blocks_needed - UNIQUE_THRESHOLD)/FL_COMPRESSION
     103       23240 :                                         + UNIQUE_THRESHOLD;
     104             : 
     105             : }
     106             : 
     107             : # define PHDR(hhdr) HDR(hhdr -> hb_prev)
     108             : # define NHDR(hhdr) HDR(hhdr -> hb_next)
     109             : 
     110             : # ifdef USE_MUNMAP
     111             : #   define IS_MAPPED(hhdr) (((hhdr) -> hb_flags & WAS_UNMAPPED) == 0)
     112             : # else  /* !USE_MUNMAP */
     113             : #   define IS_MAPPED(hhdr) 1
     114             : # endif /* USE_MUNMAP */
     115             : 
     116             : # if !defined(NO_DEBUGGING)
     117           0 : void GC_print_hblkfreelist(void)
     118             : {
     119             :     struct hblk * h;
     120           0 :     word total_free = 0;
     121             :     hdr * hhdr;
     122             :     word sz;
     123             :     unsigned i;
     124             : 
     125           0 :     for (i = 0; i <= N_HBLK_FLS; ++i) {
     126           0 :       h = GC_hblkfreelist[i];
     127             : #     ifdef USE_MUNMAP
     128             :         if (0 != h) GC_printf("Free list %u:\n", i);
     129             : #     else
     130           0 :         if (0 != h) GC_printf("Free list %u (total size %lu):\n",
     131             :                               i, (unsigned long)GC_free_bytes[i]);
     132             : #     endif
     133           0 :       while (h != 0) {
     134           0 :         hhdr = HDR(h);
     135           0 :         sz = hhdr -> hb_sz;
     136           0 :         total_free += sz;
     137           0 :         GC_printf("\t%p size %lu %s black listed\n", h, (unsigned long)sz,
     138             :                 GC_is_black_listed(h, HBLKSIZE) != 0 ? "start" :
     139           0 :                 GC_is_black_listed(h, hhdr -> hb_sz) != 0 ? "partially" :
     140             :                                                         "not");
     141           0 :         h = hhdr -> hb_next;
     142             :       }
     143             :     }
     144             : #   ifndef USE_MUNMAP
     145           0 :       if (total_free != GC_large_free_bytes) {
     146           0 :         GC_printf("GC_large_free_bytes = %lu (INCONSISTENT!!)\n",
     147             :                   (unsigned long) GC_large_free_bytes);
     148             :       }
     149             : #   endif
     150           0 :     GC_printf("Total of %lu bytes on free list\n", (unsigned long)total_free);
     151           0 : }
     152             : 
     153             : /* Return the free list index on which the block described by the header */
     154             : /* appears, or -1 if it appears nowhere.                                 */
     155           0 : static int free_list_index_of(hdr *wanted)
     156             : {
     157             :     struct hblk * h;
     158             :     hdr * hhdr;
     159             :     int i;
     160             : 
     161           0 :     for (i = 0; i <= N_HBLK_FLS; ++i) {
     162           0 :       h = GC_hblkfreelist[i];
     163           0 :       while (h != 0) {
     164           0 :         hhdr = HDR(h);
     165           0 :         if (hhdr == wanted) return i;
     166           0 :         h = hhdr -> hb_next;
     167             :       }
     168             :     }
     169           0 :     return -1;
     170             : }
     171             : 
     172           0 : void GC_dump_regions(void)
     173             : {
     174             :     unsigned i;
     175             :     ptr_t start, end;
     176             :     ptr_t p;
     177             :     size_t bytes;
     178             :     hdr *hhdr;
     179           0 :     for (i = 0; i < GC_n_heap_sects; ++i) {
     180           0 :         start = GC_heap_sects[i].hs_start;
     181           0 :         bytes = GC_heap_sects[i].hs_bytes;
     182           0 :         end = start + bytes;
     183             :         /* Merge in contiguous sections.        */
     184           0 :           while (i+1 < GC_n_heap_sects && GC_heap_sects[i+1].hs_start == end) {
     185           0 :             ++i;
     186           0 :             end = GC_heap_sects[i].hs_start + GC_heap_sects[i].hs_bytes;
     187             :           }
     188           0 :         GC_printf("***Section from %p to %p\n", start, end);
     189           0 :         for (p = start; p < end;) {
     190           0 :             hhdr = HDR(p);
     191           0 :             if (IS_FORWARDING_ADDR_OR_NIL(hhdr)) {
     192           0 :                 GC_printf("\t%p Missing header!!(%p)\n", p, hhdr);
     193           0 :                 p += HBLKSIZE;
     194           0 :                 continue;
     195             :             }
     196           0 :             if (HBLK_IS_FREE(hhdr)) {
     197           0 :                 int correct_index = GC_hblk_fl_from_blocks(
     198           0 :                                         divHBLKSZ(hhdr -> hb_sz));
     199             :                 int actual_index;
     200             : 
     201           0 :                 GC_printf("\t%p\tfree block of size 0x%lx bytes%s\n", p,
     202             :                           (unsigned long)(hhdr -> hb_sz),
     203             :                           IS_MAPPED(hhdr) ? "" : " (unmapped)");
     204           0 :                 actual_index = free_list_index_of(hhdr);
     205           0 :                 if (-1 == actual_index) {
     206           0 :                     GC_printf("\t\tBlock not on free list %d!!\n",
     207             :                               correct_index);
     208           0 :                 } else if (correct_index != actual_index) {
     209           0 :                     GC_printf("\t\tBlock on list %d, should be on %d!!\n",
     210             :                               actual_index, correct_index);
     211             :                 }
     212           0 :                 p += hhdr -> hb_sz;
     213             :             } else {
     214           0 :                 GC_printf("\t%p\tused for blocks of size 0x%lx bytes\n", p,
     215             :                           (unsigned long)(hhdr -> hb_sz));
     216           0 :                 p += HBLKSIZE * OBJ_SZ_TO_BLOCKS(hhdr -> hb_sz);
     217             :             }
     218             :         }
     219             :     }
     220           0 : }
     221             : 
     222             : # endif /* NO_DEBUGGING */
     223             : 
     224             : /* Initialize hdr for a block containing the indicated size and         */
     225             : /* kind of objects.                                                     */
     226             : /* Return FALSE on failure.                                             */
     227       52105 : static GC_bool setup_header(hdr * hhdr, struct hblk *block, size_t byte_sz,
     228             :                             int kind, unsigned flags)
     229             : {
     230             :     word descr;
     231             : #   ifndef MARK_BIT_PER_OBJ
     232             :       size_t granules;
     233             : #   endif
     234             : 
     235             :     /* Set size, kind and mark proc fields */
     236       52105 :       hhdr -> hb_sz = byte_sz;
     237       52105 :       hhdr -> hb_obj_kind = (unsigned char)kind;
     238       52105 :       hhdr -> hb_flags = (unsigned char)flags;
     239       52105 :       hhdr -> hb_block = block;
     240       52105 :       descr = GC_obj_kinds[kind].ok_descriptor;
     241       52105 :       if (GC_obj_kinds[kind].ok_relocate_descr) descr += byte_sz;
     242       52105 :       hhdr -> hb_descr = descr;
     243             : 
     244             : #   ifdef MARK_BIT_PER_OBJ
     245             :      /* Set hb_inv_sz as portably as possible.                          */
     246             :      /* We set it to the smallest value such that sz * inv_sz > 2**32    */
     247             :      /* This may be more precision than necessary.                      */
     248             :       if (byte_sz > MAXOBJBYTES) {
     249             :          hhdr -> hb_inv_sz = LARGE_INV_SZ;
     250             :       } else {
     251             :         word inv_sz;
     252             : 
     253             : #       if CPP_WORDSZ == 64
     254             :           inv_sz = ((word)1 << 32)/byte_sz;
     255             :           if (((inv_sz*byte_sz) >> 32) == 0) ++inv_sz;
     256             : #       else  /* 32 bit words */
     257             :           GC_ASSERT(byte_sz >= 4);
     258             :           inv_sz = ((unsigned)1 << 31)/byte_sz;
     259             :           inv_sz *= 2;
     260             :           while (inv_sz*byte_sz > byte_sz) ++inv_sz;
     261             : #       endif
     262             :         hhdr -> hb_inv_sz = inv_sz;
     263             :       }
     264             : #   else /* MARK_BIT_PER_GRANULE */
     265       52105 :       hhdr -> hb_large_block = (unsigned char)(byte_sz > MAXOBJBYTES);
     266       52105 :       granules = BYTES_TO_GRANULES(byte_sz);
     267       52105 :       if (EXPECT(!GC_add_map_entry(granules), FALSE)) {
     268             :         /* Make it look like a valid block. */
     269           0 :         hhdr -> hb_sz = HBLKSIZE;
     270           0 :         hhdr -> hb_descr = 0;
     271           0 :         hhdr -> hb_large_block = TRUE;
     272           0 :         hhdr -> hb_map = 0;
     273           0 :         return FALSE;
     274             :       } else {
     275       52105 :         size_t index = (hhdr -> hb_large_block? 0 : granules);
     276       52105 :         hhdr -> hb_map = GC_obj_map[index];
     277             :       }
     278             : #   endif /* MARK_BIT_PER_GRANULE */
     279             : 
     280             :     /* Clear mark bits */
     281       52105 :     GC_clear_hdr_marks(hhdr);
     282             : 
     283       52105 :     hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
     284       52105 :     return(TRUE);
     285             : }
     286             : 
     287             : #define FL_UNKNOWN -1
     288             : /*
     289             :  * Remove hhdr from the appropriate free list.
     290             :  * We assume it is on the nth free list, or on the size
     291             :  * appropriate free list if n is FL_UNKNOWN.
     292             :  */
     293       79632 : STATIC void GC_remove_from_fl(hdr *hhdr, int n)
     294             : {
     295             :     int index;
     296             : 
     297             :     GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
     298             : #   ifndef USE_MUNMAP
     299             :       /* We always need index to maintain free counts.  */
     300       79632 :       if (FL_UNKNOWN == n) {
     301       27529 :           index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
     302             :       } else {
     303       52103 :           index = n;
     304             :       }
     305             : #   endif
     306       79632 :     if (hhdr -> hb_prev == 0) {
     307             : #       ifdef USE_MUNMAP
     308             :           if (FL_UNKNOWN == n) {
     309             :             index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
     310             :           } else {
     311             :             index = n;
     312             :           }
     313             : #       endif
     314             :         GC_ASSERT(HDR(GC_hblkfreelist[index]) == hhdr);
     315       79562 :         GC_hblkfreelist[index] = hhdr -> hb_next;
     316             :     } else {
     317             :         hdr *phdr;
     318          70 :         GET_HDR(hhdr -> hb_prev, phdr);
     319          70 :         phdr -> hb_next = hhdr -> hb_next;
     320             :     }
     321             :     FREE_ASSERT(GC_free_bytes[index] >= hhdr -> hb_sz);
     322       79632 :     INCR_FREE_BYTES(index, - (signed_word)(hhdr -> hb_sz));
     323       79632 :     if (0 != hhdr -> hb_next) {
     324             :         hdr * nhdr;
     325             :         GC_ASSERT(!IS_FORWARDING_ADDR_OR_NIL(NHDR(hhdr)));
     326       18569 :         GET_HDR(hhdr -> hb_next, nhdr);
     327       18569 :         nhdr -> hb_prev = hhdr -> hb_prev;
     328             :     }
     329       79632 : }
     330             : 
     331             : /*
     332             :  * Return a pointer to the free block ending just before h, if any.
     333             :  */
     334       31050 : STATIC struct hblk * GC_free_block_ending_at(struct hblk *h)
     335             : {
     336       31050 :     struct hblk * p = h - 1;
     337             :     hdr * phdr;
     338             : 
     339       31050 :     GET_HDR(p, phdr);
     340       62657 :     while (0 != phdr && IS_FORWARDING_ADDR_OR_NIL(phdr)) {
     341         557 :         p = FORWARDED_ADDR(p,phdr);
     342         557 :         phdr = HDR(p);
     343             :     }
     344       31050 :     if (0 != phdr) {
     345       30626 :         if(HBLK_IS_FREE(phdr)) {
     346           4 :             return p;
     347             :         } else {
     348       30622 :             return 0;
     349             :         }
     350             :     }
     351         424 :     p = GC_prev_block(h - 1);
     352         424 :     if (0 != p) {
     353         259 :       phdr = HDR(p);
     354         259 :       if (HBLK_IS_FREE(phdr) && (ptr_t)p + phdr -> hb_sz == (ptr_t)h) {
     355         218 :         return p;
     356             :       }
     357             :     }
     358         206 :     return 0;
     359             : }
     360             : 
     361             : /*
     362             :  * Add hhdr to the appropriate free list.
     363             :  * We maintain individual free lists sorted by address.
     364             :  */
     365       79868 : STATIC void GC_add_to_fl(struct hblk *h, hdr *hhdr)
     366             : {
     367       79868 :     int index = GC_hblk_fl_from_blocks(divHBLKSZ(hhdr -> hb_sz));
     368       79868 :     struct hblk *second = GC_hblkfreelist[index];
     369             :     hdr * second_hdr;
     370             : #   if defined(GC_ASSERTIONS) && !defined(USE_MUNMAP)
     371             :       struct hblk *next = (struct hblk *)((word)h + hhdr -> hb_sz);
     372             :       hdr * nexthdr = HDR(next);
     373             :       struct hblk *prev = GC_free_block_ending_at(h);
     374             :       hdr * prevhdr = HDR(prev);
     375             :       GC_ASSERT(nexthdr == 0 || !HBLK_IS_FREE(nexthdr)
     376             :                 || (signed_word)GC_heapsize < 0);
     377             :                 /* In the last case, blocks may be too large to merge. */
     378             :       GC_ASSERT(prev == 0 || !HBLK_IS_FREE(prevhdr)
     379             :                 || (signed_word)GC_heapsize < 0);
     380             : #   endif
     381             :     GC_ASSERT(((hhdr -> hb_sz) & (HBLKSIZE-1)) == 0);
     382       79868 :     GC_hblkfreelist[index] = h;
     383       79868 :     INCR_FREE_BYTES(index, hhdr -> hb_sz);
     384             :     FREE_ASSERT(GC_free_bytes[index] <= GC_large_free_bytes);
     385       79868 :     hhdr -> hb_next = second;
     386       79868 :     hhdr -> hb_prev = 0;
     387       79868 :     if (0 != second) {
     388       18636 :       GET_HDR(second, second_hdr);
     389       18636 :       second_hdr -> hb_prev = h;
     390             :     }
     391       79868 :     hhdr -> hb_flags |= FREE_BLK;
     392       79868 : }
     393             : 
     394             : #ifdef USE_MUNMAP
     395             : 
     396             : #   ifndef MUNMAP_THRESHOLD
     397             : #     define MUNMAP_THRESHOLD 6
     398             : #   endif
     399             : 
     400             : GC_INNER int GC_unmap_threshold = MUNMAP_THRESHOLD;
     401             : 
     402             : /* Unmap blocks that haven't been recently touched.  This is the only way */
     403             : /* way blocks are ever unmapped.                                          */
     404             : GC_INNER void GC_unmap_old(void)
     405             : {
     406             :     struct hblk * h;
     407             :     hdr * hhdr;
     408             :     int i;
     409             : 
     410             :     if (GC_unmap_threshold == 0)
     411             :       return; /* unmapping disabled */
     412             : 
     413             :     for (i = 0; i <= N_HBLK_FLS; ++i) {
     414             :       for (h = GC_hblkfreelist[i]; 0 != h; h = hhdr -> hb_next) {
     415             :         hhdr = HDR(h);
     416             :         if (!IS_MAPPED(hhdr)) continue;
     417             : 
     418             :         if ((unsigned short)GC_gc_no - hhdr -> hb_last_reclaimed >
     419             :                 (unsigned short)GC_unmap_threshold) {
     420             :           GC_unmap((ptr_t)h, hhdr -> hb_sz);
     421             :           hhdr -> hb_flags |= WAS_UNMAPPED;
     422             :         }
     423             :       }
     424             :     }
     425             : }
     426             : 
     427             : /* Merge all unmapped blocks that are adjacent to other free            */
     428             : /* blocks.  This may involve remapping, since all blocks are either     */
     429             : /* fully mapped or fully unmapped.                                      */
     430             : GC_INNER void GC_merge_unmapped(void)
     431             : {
     432             :     struct hblk * h, *next;
     433             :     hdr * hhdr, *nexthdr;
     434             :     word size, nextsize;
     435             :     int i;
     436             : 
     437             :     for (i = 0; i <= N_HBLK_FLS; ++i) {
     438             :       h = GC_hblkfreelist[i];
     439             :       while (h != 0) {
     440             :         GET_HDR(h, hhdr);
     441             :         size = hhdr->hb_sz;
     442             :         next = (struct hblk *)((word)h + size);
     443             :         GET_HDR(next, nexthdr);
     444             :         /* Coalesce with successor, if possible */
     445             :           if (0 != nexthdr && HBLK_IS_FREE(nexthdr)
     446             :               && (signed_word) (size + (nextsize = nexthdr->hb_sz)) > 0
     447             :                  /* no pot. overflow */) {
     448             :             /* Note that we usually try to avoid adjacent free blocks   */
     449             :             /* that are either both mapped or both unmapped.  But that  */
     450             :             /* isn't guaranteed to hold since we remap blocks when we   */
     451             :             /* split them, and don't merge at that point.  It may also  */
     452             :             /* not hold if the merged block would be too big.           */
     453             :             if (IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
     454             :               /* make both consistent, so that we can merge */
     455             :                 if (size > nextsize) {
     456             :                   GC_remap((ptr_t)next, nextsize);
     457             :                 } else {
     458             :                   GC_unmap((ptr_t)h, size);
     459             :                   GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
     460             :                   hhdr -> hb_flags |= WAS_UNMAPPED;
     461             :                 }
     462             :             } else if (IS_MAPPED(nexthdr) && !IS_MAPPED(hhdr)) {
     463             :               if (size > nextsize) {
     464             :                 GC_unmap((ptr_t)next, nextsize);
     465             :                 GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
     466             :               } else {
     467             :                 GC_remap((ptr_t)h, size);
     468             :                 hhdr -> hb_flags &= ~WAS_UNMAPPED;
     469             :                 hhdr -> hb_last_reclaimed = nexthdr -> hb_last_reclaimed;
     470             :               }
     471             :             } else if (!IS_MAPPED(hhdr) && !IS_MAPPED(nexthdr)) {
     472             :               /* Unmap any gap in the middle */
     473             :                 GC_unmap_gap((ptr_t)h, size, (ptr_t)next, nextsize);
     474             :             }
     475             :             /* If they are both unmapped, we merge, but leave unmapped. */
     476             :             GC_remove_from_fl(hhdr, i);
     477             :             GC_remove_from_fl(nexthdr, FL_UNKNOWN);
     478             :             hhdr -> hb_sz += nexthdr -> hb_sz;
     479             :             GC_remove_header(next);
     480             :             GC_add_to_fl(h, hhdr);
     481             :             /* Start over at beginning of list */
     482             :             h = GC_hblkfreelist[i];
     483             :           } else /* not mergable with successor */ {
     484             :             h = hhdr -> hb_next;
     485             :           }
     486             :       } /* while (h != 0) ... */
     487             :     } /* for ... */
     488             : }
     489             : 
     490             : #endif /* USE_MUNMAP */
     491             : 
     492             : /*
     493             :  * Return a pointer to a block starting at h of length bytes.
     494             :  * Memory for the block is mapped.
     495             :  * Remove the block from its free list, and return the remainder (if any)
     496             :  * to its appropriate free list.
     497             :  * May fail by returning 0.
     498             :  * The header for the returned block must be set up by the caller.
     499             :  * If the return value is not 0, then hhdr is the header for it.
     500             :  */
     501       52091 : STATIC struct hblk * GC_get_first_part(struct hblk *h, hdr *hhdr,
     502             :                                        size_t bytes, int index)
     503             : {
     504       52091 :     word total_size = hhdr -> hb_sz;
     505             :     struct hblk * rest;
     506             :     hdr * rest_hdr;
     507             : 
     508             :     GC_ASSERT((total_size & (HBLKSIZE-1)) == 0);
     509       52091 :     GC_remove_from_fl(hhdr, index);
     510       52091 :     if (total_size == bytes) return h;
     511       48801 :     rest = (struct hblk *)((word)h + bytes);
     512       48801 :     rest_hdr = GC_install_header(rest);
     513       48801 :     if (0 == rest_hdr) {
     514             :         /* FIXME: This is likely to be very bad news ... */
     515           0 :         WARN("Header allocation failed: Dropping block.\n", 0);
     516           0 :         return(0);
     517             :     }
     518       48801 :     rest_hdr -> hb_sz = total_size - bytes;
     519       48801 :     rest_hdr -> hb_flags = 0;
     520             : #   ifdef GC_ASSERTIONS
     521             :       /* Mark h not free, to avoid assertion about adjacent free blocks. */
     522             :         hhdr -> hb_flags &= ~FREE_BLK;
     523             : #   endif
     524       48801 :     GC_add_to_fl(rest, rest_hdr);
     525       48801 :     return h;
     526             : }
     527             : 
     528             : /*
     529             :  * H is a free block.  N points at an address inside it.
     530             :  * A new header for n has already been set up.  Fix up h's header
     531             :  * to reflect the fact that it is being split, move it to the
     532             :  * appropriate free list.
     533             :  * N replaces h in the original free list.
     534             :  *
     535             :  * Nhdr is not completely filled in, since it is about to allocated.
     536             :  * It may in fact end up on the wrong free list for its size.
     537             :  * That's not a disaster, since n is about to be allocated
     538             :  * by our caller.
     539             :  * (Hence adding it to a free list is silly.  But this path is hopefully
     540             :  * rare enough that it doesn't matter.  The code is cleaner this way.)
     541             :  */
     542          17 : STATIC void GC_split_block(struct hblk *h, hdr *hhdr, struct hblk *n,
     543             :                            hdr *nhdr, int index /* Index of free list */)
     544             : {
     545          17 :     word total_size = hhdr -> hb_sz;
     546          17 :     word h_size = (word)n - (word)h;
     547          17 :     struct hblk *prev = hhdr -> hb_prev;
     548          17 :     struct hblk *next = hhdr -> hb_next;
     549             : 
     550             :     /* Replace h with n on its freelist */
     551          17 :       nhdr -> hb_prev = prev;
     552          17 :       nhdr -> hb_next = next;
     553          17 :       nhdr -> hb_sz = total_size - h_size;
     554          17 :       nhdr -> hb_flags = 0;
     555          17 :       if (0 != prev) {
     556           0 :         HDR(prev) -> hb_next = n;
     557             :       } else {
     558          17 :         GC_hblkfreelist[index] = n;
     559             :       }
     560          17 :       if (0 != next) {
     561           1 :         HDR(next) -> hb_prev = n;
     562             :       }
     563          17 :       INCR_FREE_BYTES(index, -(signed_word)h_size);
     564             :       FREE_ASSERT(GC_free_bytes[index] > 0);
     565             : #   ifdef USE_MUNMAP
     566             :       hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
     567             : #   endif
     568          17 :     hhdr -> hb_sz = h_size;
     569          17 :     GC_add_to_fl(h, hhdr);
     570          17 :     nhdr -> hb_flags |= FREE_BLK;
     571          17 : }
     572             : 
     573             : STATIC struct hblk *
     574             : GC_allochblk_nth(size_t sz/* bytes */, int kind, unsigned flags, int n,
     575             :                  GC_bool may_split);
     576             : 
     577             : /*
     578             :  * Allocate (and return pointer to) a heap block
     579             :  *   for objects of size sz bytes, searching the nth free list.
     580             :  *
     581             :  * NOTE: We set obj_map field in header correctly.
     582             :  *       Caller is responsible for building an object freelist in block.
     583             :  *
     584             :  * The client is responsible for clearing the block, if necessary.
     585             :  */
     586             : GC_INNER struct hblk *
     587       52266 : GC_allochblk(size_t sz, int kind, unsigned flags/* IGNORE_OFF_PAGE or 0 */)
     588             : {
     589             :     word blocks;
     590             :     int start_list;
     591             :     int i;
     592             :     struct hblk *result;
     593             :     int split_limit; /* Highest index of free list whose blocks we      */
     594             :                      /* split.                                          */
     595             : 
     596             :     GC_ASSERT((sz & (GRANULE_BYTES - 1)) == 0);
     597       52266 :     blocks = OBJ_SZ_TO_BLOCKS(sz);
     598       52266 :     if ((signed_word)(blocks * HBLKSIZE) < 0) {
     599           0 :       return 0;
     600             :     }
     601       52266 :     start_list = GC_hblk_fl_from_blocks(blocks);
     602             :     /* Try for an exact match first. */
     603       52266 :     result = GC_allochblk_nth(sz, kind, flags, start_list, FALSE);
     604       52266 :     if (0 != result) return result;
     605      238477 :     if (GC_use_entire_heap || GC_dont_gc
     606       97952 :         || USED_HEAP_SIZE < GC_requested_heapsize
     607       97170 :         || GC_incremental || !GC_should_collect()) {
     608             :         /* Should use more of the heap, even if it requires splitting. */
     609       43355 :         split_limit = N_HBLK_FLS;
     610             :     } else {
     611             : #     ifdef USE_MUNMAP
     612             :         /* avoid splitting, since that might require remapping */
     613             :         split_limit = 0;
     614             : #     else
     615        5621 :         if (GC_finalizer_bytes_freed > (GC_heapsize >> 4)) {
     616             :           /* If we are deallocating lots of memory from         */
     617             :           /* finalizers, fail and collect sooner rather         */
     618             :           /* than later.                                        */
     619           0 :           split_limit = 0;
     620             :         } else {
     621             :           /* If we have enough large blocks left to cover any   */
     622             :           /* previous request for large blocks, we go ahead     */
     623             :           /* and split.  Assuming a steady state, that should   */
     624             :           /* be safe.  It means that we can use the full        */
     625             :           /* heap if we allocate only small objects.            */
     626        5621 :           split_limit = GC_enough_large_bytes_left();
     627             :         }
     628             : #     endif
     629             :     }
     630       48976 :     if (start_list < UNIQUE_THRESHOLD) {
     631             :       /* No reason to try start_list again, since all blocks are exact  */
     632             :       /* matches.                                                       */
     633       48787 :       ++start_list;
     634             :     }
     635     2219423 :     for (i = start_list; i <= split_limit; ++i) {
     636     2219248 :         struct hblk * result = GC_allochblk_nth(sz, kind, flags, i, TRUE);
     637     2219248 :         if (0 != result) return result;
     638             :     }
     639         175 :     return 0;
     640             : }
     641             : 
     642             : STATIC long GC_large_alloc_warn_suppressed = 0;
     643             :                         /* Number of warnings suppressed so far.        */
     644             : 
     645             : /*
     646             :  * The same, but with search restricted to nth free list.
     647             :  * Flags is IGNORE_OFF_PAGE or zero.
     648             :  * Unlike the above, sz is in bytes.
     649             :  * The may_split flag indicates whether it's OK to split larger blocks.
     650             :  */
     651             : STATIC struct hblk *
     652     2271526 : GC_allochblk_nth(size_t sz, int kind, unsigned flags, int n,
     653             :                  GC_bool may_split)
     654             : {
     655             :     struct hblk *hbp;
     656             :     hdr * hhdr;         /* Header corr. to hbp */
     657             :                         /* Initialized after loop if hbp !=0    */
     658             :                         /* Gcc uninitialized use warning is bogus.      */
     659             :     struct hblk *thishbp;
     660             :     hdr * thishdr;              /* Header corr. to hbp */
     661             :     signed_word size_needed;    /* number of bytes in requested objects */
     662             :     signed_word size_avail;     /* bytes available in this block        */
     663             : 
     664     2271526 :     size_needed = HBLKSIZE * OBJ_SZ_TO_BLOCKS(sz);
     665             : 
     666             :     /* search for a big enough block in free list */
     667     2271526 :         hbp = GC_hblkfreelist[n];
     668     2271582 :         for(; 0 != hbp; hbp = hhdr -> hb_next) {
     669       52159 :             GET_HDR(hbp, hhdr);
     670       52159 :             size_avail = hhdr->hb_sz;
     671       52159 :             if (size_avail < size_needed) continue;
     672       52146 :             if (size_avail != size_needed) {
     673             :               signed_word next_size;
     674             : 
     675       48808 :               if (!may_split) continue;
     676             :               /* If the next heap block is obviously better, go on.     */
     677             :               /* This prevents us from disassembling a single large block */
     678             :               /* to get tiny blocks.                                    */
     679       48807 :               thishbp = hhdr -> hb_next;
     680       48807 :               if (thishbp != 0) {
     681        1465 :                 GET_HDR(thishbp, thishdr);
     682        1465 :                 next_size = (signed_word)(thishdr -> hb_sz);
     683        1468 :                 if (next_size < size_avail
     684             :                     && next_size >= size_needed
     685           3 :                     && !GC_is_black_listed(thishbp, (word)size_needed)) {
     686           3 :                     continue;
     687             :                 }
     688             :               }
     689             :             }
     690       52142 :             if ( !IS_UNCOLLECTABLE(kind) && (kind != PTRFREE
     691             :                         || size_needed > (signed_word)MAX_BLACK_LIST_ALLOC)) {
     692       34183 :               struct hblk * lasthbp = hbp;
     693       34183 :               ptr_t search_end = (ptr_t)hbp + size_avail - size_needed;
     694       34183 :               signed_word orig_avail = size_avail;
     695       34183 :               signed_word eff_size_needed = (flags & IGNORE_OFF_PAGE) != 0 ?
     696             :                                                 (signed_word)HBLKSIZE
     697       34183 :                                                 : size_needed;
     698             : 
     699             : 
     700      102652 :               while ((ptr_t)lasthbp <= search_end
     701       68469 :                      && (thishbp = GC_is_black_listed(lasthbp,
     702             :                                                       (word)eff_size_needed))
     703             :                         != 0) {
     704          77 :                 lasthbp = thishbp;
     705             :               }
     706       34183 :               size_avail -= (ptr_t)lasthbp - (ptr_t)hbp;
     707       34183 :               thishbp = lasthbp;
     708       34183 :               if (size_avail >= size_needed) {
     709       34149 :                 if (thishbp != hbp &&
     710          17 :                     0 != (thishdr = GC_install_header(thishbp))) {
     711             :                   /* Make sure it's mapped before we mangle it. */
     712             : #                   ifdef USE_MUNMAP
     713             :                       if (!IS_MAPPED(hhdr)) {
     714             :                         GC_remap((ptr_t)hbp, hhdr -> hb_sz);
     715             :                         hhdr -> hb_flags &= ~WAS_UNMAPPED;
     716             :                       }
     717             : #                   endif
     718             :                   /* Split the block at thishbp */
     719          17 :                       GC_split_block(hbp, hhdr, thishbp, thishdr, n);
     720             :                   /* Advance to thishbp */
     721          17 :                       hbp = thishbp;
     722          17 :                       hhdr = thishdr;
     723             :                       /* We must now allocate thishbp, since it may     */
     724             :                       /* be on the wrong free list.                     */
     725             :                 }
     726          51 :               } else if (size_needed > (signed_word)BL_LIMIT
     727          51 :                          && orig_avail - size_needed
     728           0 :                             > (signed_word)BL_LIMIT) {
     729             :                 /* Punt, since anything else risks unreasonable heap growth. */
     730           0 :                 if (++GC_large_alloc_warn_suppressed
     731             :                     >= GC_large_alloc_warn_interval) {
     732           0 :                   WARN("Repeated allocation of very large block "
     733             :                        "(appr. size %" GC_PRIdPTR "):\n"
     734             :                        "\tMay lead to memory leak and poor performance.\n",
     735             :                        size_needed);
     736           0 :                   GC_large_alloc_warn_suppressed = 0;
     737             :                 }
     738           0 :                 size_avail = orig_avail;
     739          51 :               } else if (size_avail == 0 && size_needed == HBLKSIZE
     740             :                          && IS_MAPPED(hhdr)) {
     741          50 :                 if (!GC_find_leak) {
     742             :                   static unsigned count = 0;
     743             : 
     744             :                   /* The block is completely blacklisted.  We need      */
     745             :                   /* to drop some such blocks, since otherwise we spend */
     746             :                   /* all our time traversing them if pointerfree        */
     747             :                   /* blocks are unpopular.                              */
     748             :                   /* A dropped block will be reconsidered at next GC.   */
     749          50 :                   if ((++count & 3) == 0) {
     750             :                     /* Allocate and drop the block in small chunks, to  */
     751             :                     /* maximize the chance that we will recover some    */
     752             :                     /* later.                                           */
     753          12 :                       word total_size = hhdr -> hb_sz;
     754          12 :                       struct hblk * limit = hbp + divHBLKSZ(total_size);
     755             :                       struct hblk * h;
     756          12 :                       struct hblk * prev = hhdr -> hb_prev;
     757             : 
     758          12 :                       GC_large_free_bytes -= total_size;
     759          12 :                       GC_bytes_dropped += total_size;
     760          12 :                       GC_remove_from_fl(hhdr, n);
     761          26 :                       for (h = hbp; h < limit; h++) {
     762          14 :                         if (h == hbp || 0 != (hhdr = GC_install_header(h))) {
     763          14 :                           (void) setup_header(
     764             :                                   hhdr, h,
     765             :                                   HBLKSIZE,
     766             :                                   PTRFREE, 0); /* Can't fail */
     767          14 :                           if (GC_debugging_started) {
     768           0 :                             BZERO(h, HBLKSIZE);
     769             :                           }
     770             :                         }
     771             :                       }
     772             :                     /* Restore hbp to point at free block */
     773          12 :                       hbp = prev;
     774          12 :                       if (0 == hbp) {
     775          12 :                         return GC_allochblk_nth(sz, kind, flags, n, may_split);
     776             :                       }
     777           0 :                       hhdr = HDR(hbp);
     778             :                   }
     779             :                 }
     780             :               }
     781             :             }
     782       52130 :             if( size_avail >= size_needed ) {
     783             : #               ifdef USE_MUNMAP
     784             :                   if (!IS_MAPPED(hhdr)) {
     785             :                     GC_remap((ptr_t)hbp, hhdr -> hb_sz);
     786             :                     hhdr -> hb_flags &= ~WAS_UNMAPPED;
     787             :                     /* Note: This may leave adjacent, mapped free blocks. */
     788             :                   }
     789             : #               endif
     790             :                 /* hbp may be on the wrong freelist; the parameter n    */
     791             :                 /* is important.                                        */
     792       52091 :                 hbp = GC_get_first_part(hbp, hhdr, size_needed, n);
     793       52091 :                 break;
     794             :             }
     795             :         }
     796             : 
     797     2271514 :     if (0 == hbp) return 0;
     798             : 
     799             :     /* Add it to map of valid blocks */
     800       52091 :         if (!GC_install_counts(hbp, (word)size_needed)) return(0);
     801             :         /* This leaks memory under very rare conditions. */
     802             : 
     803             :     /* Set up header */
     804       52091 :         if (!setup_header(hhdr, hbp, sz, kind, flags)) {
     805           0 :             GC_remove_counts(hbp, (word)size_needed);
     806           0 :             return(0); /* ditto */
     807             :         }
     808             : #   ifndef GC_DISABLE_INCREMENTAL
     809             :         /* Notify virtual dirty bit implementation that we are about to */
     810             :         /* write.  Ensure that pointerfree objects are not protected if */
     811             :         /* it's avoidable.  This also ensures that newly allocated      */
     812             :         /* blocks are treated as dirty.  Necessary since we don't       */
     813             :         /* protect free blocks.                                         */
     814             :         GC_ASSERT((size_needed & (HBLKSIZE-1)) == 0);
     815       52091 :         GC_remove_protection(hbp, divHBLKSZ(size_needed),
     816             :                              (hhdr -> hb_descr == 0) /* pointer-free */);
     817             : #   endif
     818             :     /* We just successfully allocated a block.  Restart count of        */
     819             :     /* consecutive failures.                                            */
     820       52091 :     GC_fail_count = 0;
     821             : 
     822       52091 :     GC_large_free_bytes -= size_needed;
     823             : 
     824             :     GC_ASSERT(IS_MAPPED(hhdr));
     825       52091 :     return( hbp );
     826             : }
     827             : 
     828             : /*
     829             :  * Free a heap block.
     830             :  *
     831             :  * Coalesce the block with its neighbors if possible.
     832             :  *
     833             :  * All mark words are assumed to be cleared.
     834             :  */
     835       31050 : GC_INNER void GC_freehblk(struct hblk *hbp)
     836             : {
     837             :     struct hblk *next, *prev;
     838             :     hdr *hhdr, *prevhdr, *nexthdr;
     839             :     signed_word size;
     840             : 
     841       31050 :     GET_HDR(hbp, hhdr);
     842       31050 :     size = hhdr->hb_sz;
     843       31050 :     size = HBLKSIZE * OBJ_SZ_TO_BLOCKS(size);
     844       31050 :     if (size <= 0)
     845           0 :       ABORT("Deallocating excessively large block.  Too large an allocation?");
     846             :       /* Probably possible if we try to allocate more than half the address */
     847             :       /* space at once.  If we don't catch it here, strange things happen   */
     848             :       /* later.                                                             */
     849       31050 :     GC_remove_counts(hbp, (word)size);
     850       31050 :     hhdr->hb_sz = size;
     851             : #   ifdef USE_MUNMAP
     852             :       hhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
     853             : #   endif
     854             : 
     855             :     /* Check for duplicate deallocation in the easy case */
     856       31050 :       if (HBLK_IS_FREE(hhdr)) {
     857           0 :         if (GC_print_stats)
     858           0 :           GC_log_printf("Duplicate large block deallocation of %p\n", hbp);
     859           0 :         ABORT("Duplicate large block deallocation");
     860             :       }
     861             : 
     862             :     GC_ASSERT(IS_MAPPED(hhdr));
     863       31050 :     hhdr -> hb_flags |= FREE_BLK;
     864       31050 :     next = (struct hblk *)((word)hbp + size);
     865       31050 :     GET_HDR(next, nexthdr);
     866       31050 :     prev = GC_free_block_ending_at(hbp);
     867             :     /* Coalesce with successor, if possible */
     868       58357 :       if(0 != nexthdr && HBLK_IS_FREE(nexthdr) && IS_MAPPED(nexthdr)
     869       27307 :          && (signed_word)(hhdr -> hb_sz + nexthdr -> hb_sz) > 0
     870             :          /* no overflow */) {
     871       27307 :         GC_remove_from_fl(nexthdr, FL_UNKNOWN);
     872       27307 :         hhdr -> hb_sz += nexthdr -> hb_sz;
     873       27307 :         GC_remove_header(next);
     874             :       }
     875             :     /* Coalesce with predecessor, if possible. */
     876       31050 :       if (0 != prev) {
     877         222 :         prevhdr = HDR(prev);
     878         222 :         if (IS_MAPPED(prevhdr)
     879         222 :             && (signed_word)(hhdr -> hb_sz + prevhdr -> hb_sz) > 0) {
     880         222 :           GC_remove_from_fl(prevhdr, FL_UNKNOWN);
     881         222 :           prevhdr -> hb_sz += hhdr -> hb_sz;
     882             : #         ifdef USE_MUNMAP
     883             :             prevhdr -> hb_last_reclaimed = (unsigned short)GC_gc_no;
     884             : #         endif
     885         222 :           GC_remove_header(hbp);
     886         222 :           hbp = prev;
     887         222 :           hhdr = prevhdr;
     888             :         }
     889             :       }
     890             :     /* FIXME: It is not clear we really always want to do these merges  */
     891             :     /* with USE_MUNMAP, since it updates ages and hence prevents        */
     892             :     /* unmapping.                                                       */
     893             : 
     894       31050 :     GC_large_free_bytes += size;
     895       31050 :     GC_add_to_fl(hbp, hhdr);
     896       31050 : }

Generated by: LCOV version 1.11