Line data Source code
1 : /*
2 : * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 : * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 : * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 : * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
6 : *
7 : * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 : * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 : *
10 : * Permission is hereby granted to use or copy this program
11 : * for any purpose, provided the above notices are retained on all copies.
12 : * Permission to modify the code and to distribute modified code is granted,
13 : * provided the above notices are retained, and a notice that the code was
14 : * modified is included with the above copyright notice.
15 : */
16 :
17 : #include "private/gc_priv.h"
18 :
19 : #ifdef ENABLE_DISCLAIM
20 : # include "gc_disclaim.h"
21 : #endif
22 :
23 : #include <stdio.h>
24 :
25 : GC_INNER signed_word GC_bytes_found = 0;
26 : /* Number of bytes of memory reclaimed */
27 : /* minus the number of bytes originally */
28 : /* on free lists which we had to drop. */
29 :
30 : #if defined(PARALLEL_MARK)
31 : GC_INNER word GC_fl_builder_count = 0;
32 : /* Number of threads currently building free lists without */
33 : /* holding GC lock. It is not safe to collect if this is */
34 : /* nonzero. */
35 : #endif /* PARALLEL_MARK */
36 :
37 : /* We defer printing of leaked objects until we're done with the GC */
38 : /* cycle, since the routine for printing objects needs to run outside */
39 : /* the collector, e.g. without the allocation lock. */
40 : #ifndef MAX_LEAKED
41 : # define MAX_LEAKED 40
42 : #endif
43 : STATIC ptr_t GC_leaked[MAX_LEAKED] = { NULL };
44 : STATIC unsigned GC_n_leaked = 0;
45 :
46 : GC_INNER GC_bool GC_have_errors = FALSE;
47 :
48 : #if !defined(EAGER_SWEEP) && defined(ENABLE_DISCLAIM)
49 : STATIC void GC_reclaim_unconditionally_marked(void);
50 : #endif
51 :
52 0 : GC_INLINE void GC_add_leaked(ptr_t leaked)
53 : {
54 : # ifndef SHORT_DBG_HDRS
55 0 : if (GC_findleak_delay_free && !GC_check_leaked(leaked))
56 0 : return;
57 : # endif
58 :
59 0 : GC_have_errors = TRUE;
60 0 : if (GC_n_leaked < MAX_LEAKED) {
61 0 : GC_leaked[GC_n_leaked++] = leaked;
62 : /* Make sure it's not reclaimed this cycle */
63 0 : GC_set_mark_bit(leaked);
64 : }
65 : }
66 :
67 : /* Print all objects on the list after printing any smashed objects. */
68 : /* Clear both lists. Called without the allocation lock held. */
69 0 : GC_INNER void GC_print_all_errors(void)
70 : {
71 : static GC_bool printing_errors = FALSE;
72 : GC_bool have_errors;
73 : unsigned i, n_leaked;
74 : ptr_t leaked[MAX_LEAKED];
75 : DCL_LOCK_STATE;
76 :
77 0 : LOCK();
78 0 : if (printing_errors) {
79 0 : UNLOCK();
80 0 : return;
81 : }
82 0 : have_errors = GC_have_errors;
83 0 : printing_errors = TRUE;
84 0 : n_leaked = GC_n_leaked;
85 : GC_ASSERT(n_leaked <= MAX_LEAKED);
86 0 : BCOPY(GC_leaked, leaked, n_leaked * sizeof(ptr_t));
87 0 : GC_n_leaked = 0;
88 0 : BZERO(GC_leaked, n_leaked * sizeof(ptr_t));
89 0 : UNLOCK();
90 :
91 0 : if (GC_debugging_started) {
92 0 : GC_print_all_smashed();
93 : } else {
94 0 : have_errors = FALSE;
95 : }
96 :
97 0 : if (n_leaked > 0) {
98 0 : GC_err_printf("Found %u leaked objects:\n", n_leaked);
99 0 : have_errors = TRUE;
100 : }
101 0 : for (i = 0; i < n_leaked; i++) {
102 0 : ptr_t p = leaked[i];
103 0 : GC_print_heap_obj(p);
104 0 : GC_free(p);
105 : }
106 :
107 0 : if (have_errors
108 : # ifndef GC_ABORT_ON_LEAK
109 0 : && GETENV("GC_ABORT_ON_LEAK") != NULL
110 : # endif
111 : ) {
112 0 : ABORT("Leaked or smashed objects encountered");
113 : }
114 :
115 0 : LOCK();
116 0 : printing_errors = FALSE;
117 0 : UNLOCK();
118 : }
119 :
120 :
121 : /*
122 : * reclaim phase
123 : *
124 : */
125 :
126 : /* Test whether a block is completely empty, i.e. contains no marked */
127 : /* objects. This does not require the block to be in physical memory. */
128 80172 : GC_INNER GC_bool GC_block_empty(hdr *hhdr)
129 : {
130 80172 : return (hhdr -> hb_n_marks == 0);
131 : }
132 :
133 44808 : STATIC GC_bool GC_block_nearly_full(hdr *hhdr)
134 : {
135 44808 : return (hhdr -> hb_n_marks > 7 * HBLK_OBJS(hhdr -> hb_sz)/8);
136 : }
137 :
138 : /* FIXME: This should perhaps again be specialized for USE_MARK_BYTES */
139 : /* and USE_MARK_BITS cases. */
140 :
141 : /*
142 : * Restore unmarked small objects in h of size sz to the object
143 : * free list. Returns the new list.
144 : * Clears unmarked objects. Sz is in bytes.
145 : */
146 3764 : STATIC ptr_t GC_reclaim_clear(struct hblk *hbp, hdr *hhdr, size_t sz,
147 : ptr_t list, signed_word *count)
148 : {
149 3764 : word bit_no = 0;
150 : word *p, *q, *plim;
151 3764 : signed_word n_bytes_found = 0;
152 :
153 : GC_ASSERT(hhdr == GC_find_header((ptr_t)hbp));
154 : GC_ASSERT(sz == hhdr -> hb_sz);
155 : GC_ASSERT((sz & (BYTES_PER_WORD-1)) == 0);
156 3764 : p = (word *)(hbp->hb_body);
157 3764 : plim = (word *)(hbp->hb_body + HBLKSIZE - sz);
158 :
159 : /* go through all words in block */
160 310269 : while ((word)p <= (word)plim) {
161 302741 : if (mark_bit_from_hdr(hhdr, bit_no)) {
162 101388 : p = (word *)((ptr_t)p + sz);
163 : } else {
164 201353 : n_bytes_found += sz;
165 : /* object is available - put on list */
166 201353 : obj_link(p) = list;
167 201353 : list = ((ptr_t)p);
168 : /* Clear object, advance p to next object in the process */
169 201353 : q = (word *)((ptr_t)p + sz);
170 : # ifdef USE_MARK_BYTES
171 : GC_ASSERT(!(sz & 1)
172 : && !((word)p & (2 * sizeof(word) - 1)));
173 201353 : p[1] = 0;
174 201353 : p += 2;
175 846325 : while ((word)p < (word)q) {
176 443619 : CLEAR_DOUBLE(p);
177 443619 : p += 2;
178 : }
179 : # else
180 : p++; /* Skip link field */
181 : while ((word)p < (word)q) {
182 : *p++ = 0;
183 : }
184 : # endif
185 : }
186 302741 : bit_no += MARK_BIT_OFFSET(sz);
187 : }
188 3764 : *count += n_bytes_found;
189 3764 : return(list);
190 : }
191 :
192 : /* The same thing, but don't clear objects: */
193 1765 : STATIC ptr_t GC_reclaim_uninit(struct hblk *hbp, hdr *hhdr, size_t sz,
194 : ptr_t list, signed_word *count)
195 : {
196 1765 : word bit_no = 0;
197 : word *p, *plim;
198 1765 : signed_word n_bytes_found = 0;
199 :
200 : GC_ASSERT(sz == hhdr -> hb_sz);
201 1765 : p = (word *)(hbp->hb_body);
202 1765 : plim = (word *)((ptr_t)hbp + HBLKSIZE - sz);
203 :
204 : /* go through all words in block */
205 106673 : while ((word)p <= (word)plim) {
206 103143 : if (!mark_bit_from_hdr(hhdr, bit_no)) {
207 63480 : n_bytes_found += sz;
208 : /* object is available - put on list */
209 63480 : obj_link(p) = list;
210 63480 : list = ((ptr_t)p);
211 : }
212 103143 : p = (word *)((ptr_t)p + sz);
213 103143 : bit_no += MARK_BIT_OFFSET(sz);
214 : }
215 1765 : *count += n_bytes_found;
216 1765 : return(list);
217 : }
218 :
219 : #ifdef ENABLE_DISCLAIM
220 : /* Call reclaim notifier for block's kind on each unmarked object in */
221 : /* block, all within a pair of corresponding enter/leave callbacks. */
222 0 : STATIC ptr_t GC_disclaim_and_reclaim(struct hblk *hbp, hdr *hhdr, size_t sz,
223 : ptr_t list, signed_word *count)
224 : {
225 0 : int bit_no = 0;
226 : word *p, *q, *plim;
227 0 : signed_word n_bytes_found = 0;
228 0 : struct obj_kind *ok = &GC_obj_kinds[hhdr->hb_obj_kind];
229 0 : int (GC_CALLBACK *disclaim)(void *) = ok->ok_disclaim_proc;
230 :
231 : GC_ASSERT(sz == hhdr -> hb_sz);
232 0 : p = (word *)(hbp -> hb_body);
233 0 : plim = (word *)((ptr_t)p + HBLKSIZE - sz);
234 :
235 0 : while ((word)p <= (word)plim) {
236 0 : int marked = mark_bit_from_hdr(hhdr, bit_no);
237 0 : if (!marked && (*disclaim)(p)) {
238 0 : hhdr -> hb_n_marks++;
239 0 : marked = 1;
240 : }
241 0 : if (marked)
242 0 : p = (word *)((ptr_t)p + sz);
243 : else {
244 0 : n_bytes_found += sz;
245 : /* object is available - put on list */
246 0 : obj_link(p) = list;
247 0 : list = ((ptr_t)p);
248 : /* Clear object, advance p to next object in the process */
249 0 : q = (word *)((ptr_t)p + sz);
250 : # ifdef USE_MARK_BYTES
251 : GC_ASSERT((sz & 1) == 0);
252 : GC_ASSERT(((word)p & (2 * sizeof(word) - 1)) == 0);
253 0 : p[1] = 0;
254 0 : p += 2;
255 0 : while ((word)p < (word)q) {
256 0 : CLEAR_DOUBLE(p);
257 0 : p += 2;
258 : }
259 : # else
260 : p++; /* Skip link field */
261 : while ((word)p < (word)q) {
262 : *p++ = 0;
263 : }
264 : # endif
265 : }
266 0 : bit_no += MARK_BIT_OFFSET(sz);
267 : }
268 0 : *count += n_bytes_found;
269 0 : return list;
270 : }
271 : #endif /* ENABLE_DISCLAIM */
272 :
273 : /* Don't really reclaim objects, just check for unmarked ones: */
274 0 : STATIC void GC_reclaim_check(struct hblk *hbp, hdr *hhdr, word sz)
275 : {
276 : word bit_no;
277 : ptr_t p, plim;
278 : GC_ASSERT(sz == hhdr -> hb_sz);
279 :
280 : /* go through all words in block */
281 0 : p = hbp->hb_body;
282 0 : plim = p + HBLKSIZE - sz;
283 0 : for (bit_no = 0; (word)p <= (word)plim;
284 0 : p += sz, bit_no += MARK_BIT_OFFSET(sz)) {
285 0 : if (!mark_bit_from_hdr(hhdr, bit_no)) {
286 0 : GC_add_leaked(p);
287 : }
288 : }
289 0 : }
290 :
291 : /*
292 : * Generic procedure to rebuild a free list in hbp.
293 : * Also called directly from GC_malloc_many.
294 : * Sz is now in bytes.
295 : */
296 5529 : GC_INNER ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz,
297 : GC_bool init, ptr_t list,
298 : signed_word *count)
299 : {
300 : ptr_t result;
301 :
302 : GC_ASSERT(GC_find_header((ptr_t)hbp) == hhdr);
303 : # ifndef GC_DISABLE_INCREMENTAL
304 5529 : GC_remove_protection(hbp, 1, (hhdr)->hb_descr == 0 /* Pointer-free? */);
305 : # endif
306 : # ifdef ENABLE_DISCLAIM
307 5529 : if ((hhdr -> hb_flags & HAS_DISCLAIM) != 0) {
308 0 : result = GC_disclaim_and_reclaim(hbp, hhdr, sz, list, count);
309 : } else
310 : # endif
311 9293 : /* else */ if (init || GC_debugging_started) {
312 3764 : result = GC_reclaim_clear(hbp, hhdr, sz, list, count);
313 : } else {
314 : GC_ASSERT((hhdr)->hb_descr == 0 /* Pointer-free block */);
315 1765 : result = GC_reclaim_uninit(hbp, hhdr, sz, list, count);
316 : }
317 5529 : if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) GC_set_hdr_marks(hhdr);
318 5529 : return result;
319 : }
320 :
321 : /*
322 : * Restore unmarked small objects in the block pointed to by hbp
323 : * to the appropriate object free list.
324 : * If entirely empty blocks are to be completely deallocated, then
325 : * caller should perform that check.
326 : */
327 771 : STATIC void GC_reclaim_small_nonempty_block(struct hblk *hbp,
328 : GC_bool report_if_found)
329 : {
330 771 : hdr *hhdr = HDR(hbp);
331 771 : size_t sz = hhdr -> hb_sz;
332 771 : struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
333 771 : void **flh = &(ok -> ok_freelist[BYTES_TO_GRANULES(sz)]);
334 :
335 771 : hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
336 :
337 771 : if (report_if_found) {
338 0 : GC_reclaim_check(hbp, hhdr, sz);
339 : } else {
340 771 : *flh = GC_reclaim_generic(hbp, hhdr, sz, ok -> ok_init,
341 771 : *flh, &GC_bytes_found);
342 : }
343 771 : }
344 :
345 : #ifdef ENABLE_DISCLAIM
346 0 : STATIC void GC_disclaim_and_reclaim_or_free_small_block(struct hblk *hbp)
347 : {
348 0 : hdr *hhdr = HDR(hbp);
349 0 : size_t sz = hhdr -> hb_sz;
350 0 : struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
351 0 : void **flh = &(ok -> ok_freelist[BYTES_TO_GRANULES(sz)]);
352 : void *flh_next;
353 :
354 0 : hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
355 0 : flh_next = GC_reclaim_generic(hbp, hhdr, sz, ok -> ok_init,
356 0 : *flh, &GC_bytes_found);
357 0 : if (hhdr -> hb_n_marks)
358 0 : *flh = flh_next;
359 : else {
360 0 : GC_bytes_found += HBLKSIZE;
361 0 : GC_freehblk(hbp);
362 : }
363 0 : }
364 : #endif /* ENABLE_DISCLAIM */
365 :
366 : /*
367 : * Restore an unmarked large object or an entirely empty blocks of small objects
368 : * to the heap block free list.
369 : * Otherwise enqueue the block for later processing
370 : * by GC_reclaim_small_nonempty_block.
371 : * If report_if_found is TRUE, then process any block immediately, and
372 : * simply report free objects; do not actually reclaim them.
373 : */
374 78596 : STATIC void GC_reclaim_block(struct hblk *hbp, word report_if_found)
375 : {
376 78596 : hdr * hhdr = HDR(hbp);
377 78596 : size_t sz = hhdr -> hb_sz; /* size of objects in current block */
378 78596 : struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
379 : struct hblk ** rlh;
380 :
381 78596 : if( sz > MAXOBJBYTES ) { /* 1 big object */
382 2198 : if( !mark_bit_from_hdr(hhdr, 0) ) {
383 1017 : if (report_if_found) {
384 0 : GC_add_leaked((ptr_t)hbp);
385 : } else {
386 : size_t blocks;
387 :
388 : # ifdef ENABLE_DISCLAIM
389 1017 : if (EXPECT(hhdr->hb_flags & HAS_DISCLAIM, 0)) {
390 0 : struct obj_kind *ok = &GC_obj_kinds[hhdr->hb_obj_kind];
391 0 : if ((*ok->ok_disclaim_proc)(hbp)) {
392 : /* Not disclaimed => resurrect the object. */
393 0 : set_mark_bit_from_hdr(hhdr, 0);
394 0 : goto in_use;
395 : }
396 : }
397 : # endif
398 1017 : blocks = OBJ_SZ_TO_BLOCKS(sz);
399 1017 : if (blocks > 1) {
400 776 : GC_large_allocd_bytes -= blocks * HBLKSIZE;
401 : }
402 1017 : GC_bytes_found += sz;
403 1017 : GC_freehblk(hbp);
404 : }
405 : } else {
406 : # ifdef ENABLE_DISCLAIM
407 : in_use:
408 : # endif
409 1181 : if (hhdr -> hb_descr != 0) {
410 552 : GC_composite_in_use += sz;
411 : } else {
412 629 : GC_atomic_in_use += sz;
413 : }
414 : }
415 : } else {
416 76398 : GC_bool empty = GC_block_empty(hhdr);
417 : # ifdef PARALLEL_MARK
418 : /* Count can be low or one too high because we sometimes */
419 : /* have to ignore decrements. Objects can also potentially */
420 : /* be repeatedly marked by each marker. */
421 : /* Here we assume two markers, but this is extremely */
422 : /* unlikely to fail spuriously with more. And if it does, it */
423 : /* should be looked at. */
424 : GC_ASSERT(hhdr -> hb_n_marks <= 2 * (HBLKSIZE/sz + 1) + 16);
425 : # else
426 : GC_ASSERT(sz * hhdr -> hb_n_marks <= HBLKSIZE);
427 : # endif
428 76398 : if (report_if_found) {
429 0 : GC_reclaim_small_nonempty_block(hbp, TRUE /* report_if_found */);
430 76398 : } else if (empty) {
431 : # ifdef ENABLE_DISCLAIM
432 31590 : if ((hhdr -> hb_flags & HAS_DISCLAIM) != 0) {
433 0 : GC_disclaim_and_reclaim_or_free_small_block(hbp);
434 : } else
435 : # endif
436 : /* else */ {
437 31590 : GC_bytes_found += HBLKSIZE;
438 31590 : GC_freehblk(hbp);
439 : }
440 44808 : } else if (GC_find_leak || !GC_block_nearly_full(hhdr)) {
441 : /* group of smaller objects, enqueue the real work */
442 9932 : rlh = &(ok -> ok_reclaim_list[BYTES_TO_GRANULES(sz)]);
443 9932 : hhdr -> hb_next = *rlh;
444 9932 : *rlh = hbp;
445 : } /* else not worth salvaging. */
446 : /* We used to do the nearly_full check later, but we */
447 : /* already have the right cache context here. Also */
448 : /* doing it here avoids some silly lock contention in */
449 : /* GC_malloc_many. */
450 :
451 76398 : if (hhdr -> hb_descr != 0) {
452 62949 : GC_composite_in_use += sz * hhdr -> hb_n_marks;
453 : } else {
454 13449 : GC_atomic_in_use += sz * hhdr -> hb_n_marks;
455 : }
456 : }
457 78596 : }
458 :
459 : #if !defined(NO_DEBUGGING)
460 : /* Routines to gather and print heap block info */
461 : /* intended for debugging. Otherwise should be called */
462 : /* with lock. */
463 :
464 : struct Print_stats
465 : {
466 : size_t number_of_blocks;
467 : size_t total_bytes;
468 : };
469 :
470 : #ifdef USE_MARK_BYTES
471 :
472 : /* Return the number of set mark bits in the given header. */
473 : /* Remains externally visible as used by GNU GCJ currently. */
474 0 : int GC_n_set_marks(hdr *hhdr)
475 : {
476 0 : int result = 0;
477 : int i;
478 0 : size_t sz = hhdr -> hb_sz;
479 0 : int offset = (int)MARK_BIT_OFFSET(sz);
480 0 : int limit = (int)FINAL_MARK_BIT(sz);
481 :
482 0 : for (i = 0; i < limit; i += offset) {
483 0 : result += hhdr -> hb_marks[i];
484 : }
485 : GC_ASSERT(hhdr -> hb_marks[limit]);
486 0 : return(result);
487 : }
488 :
489 : #else
490 :
491 : /* Number of set bits in a word. Not performance critical. */
492 : static int set_bits(word n)
493 : {
494 : word m = n;
495 : int result = 0;
496 :
497 : while (m > 0) {
498 : if (m & 1) result++;
499 : m >>= 1;
500 : }
501 : return(result);
502 : }
503 :
504 : int GC_n_set_marks(hdr *hhdr)
505 : {
506 : int result = 0;
507 : int i;
508 : int n_mark_words;
509 : # ifdef MARK_BIT_PER_OBJ
510 : int n_objs = (int)HBLK_OBJS(hhdr -> hb_sz);
511 :
512 : if (0 == n_objs) n_objs = 1;
513 : n_mark_words = divWORDSZ(n_objs + WORDSZ - 1);
514 : # else /* MARK_BIT_PER_GRANULE */
515 : n_mark_words = MARK_BITS_SZ;
516 : # endif
517 : for (i = 0; i < n_mark_words - 1; i++) {
518 : result += set_bits(hhdr -> hb_marks[i]);
519 : }
520 : # ifdef MARK_BIT_PER_OBJ
521 : result += set_bits((hhdr -> hb_marks[n_mark_words - 1])
522 : << (n_mark_words * WORDSZ - n_objs));
523 : # else
524 : result += set_bits(hhdr -> hb_marks[n_mark_words - 1]);
525 : # endif
526 : return(result - 1);
527 : }
528 :
529 : #endif /* !USE_MARK_BYTES */
530 :
531 0 : STATIC void GC_print_block_descr(struct hblk *h,
532 : word /* struct PrintStats */ raw_ps)
533 : {
534 0 : hdr * hhdr = HDR(h);
535 0 : size_t bytes = hhdr -> hb_sz;
536 : struct Print_stats *ps;
537 0 : unsigned n_marks = GC_n_set_marks(hhdr);
538 :
539 0 : if (hhdr -> hb_n_marks != n_marks) {
540 0 : GC_printf("(%u:%u,%u!=%u)\n", hhdr->hb_obj_kind, (unsigned)bytes,
541 : (unsigned)hhdr->hb_n_marks, n_marks);
542 : } else {
543 0 : GC_printf("(%u:%u,%u)\n", hhdr->hb_obj_kind,
544 : (unsigned)bytes, n_marks);
545 : }
546 0 : bytes += HBLKSIZE-1;
547 0 : bytes &= ~(HBLKSIZE-1);
548 :
549 0 : ps = (struct Print_stats *)raw_ps;
550 0 : ps->total_bytes += bytes;
551 0 : ps->number_of_blocks++;
552 0 : }
553 :
554 0 : void GC_print_block_list(void)
555 : {
556 : struct Print_stats pstats;
557 :
558 0 : GC_printf("(kind(0=ptrfree,1=normal,2=unc.):size_in_bytes, #_marks_set)\n");
559 0 : pstats.number_of_blocks = 0;
560 0 : pstats.total_bytes = 0;
561 0 : GC_apply_to_all_blocks(GC_print_block_descr, (word)&pstats);
562 0 : GC_printf("blocks= %lu, bytes= %lu\n",
563 : (unsigned long)pstats.number_of_blocks,
564 : (unsigned long)pstats.total_bytes);
565 0 : }
566 :
567 : /* Currently for debugger use only: */
568 0 : void GC_print_free_list(int kind, size_t sz_in_granules)
569 : {
570 0 : struct obj_kind * ok = &GC_obj_kinds[kind];
571 0 : ptr_t flh = ok -> ok_freelist[sz_in_granules];
572 : int n;
573 :
574 0 : for (n = 0; flh; n++) {
575 0 : struct hblk *block = HBLKPTR(flh);
576 0 : GC_printf("Free object in heap block %p [%d]: %p\n",
577 : (void *)block, n, flh);
578 0 : flh = obj_link(flh);
579 : }
580 0 : }
581 :
582 : #endif /* !NO_DEBUGGING */
583 :
584 : /*
585 : * Clear all obj_link pointers in the list of free objects *flp.
586 : * Clear *flp.
587 : * This must be done before dropping a list of free gcj-style objects,
588 : * since may otherwise end up with dangling "descriptor" pointers.
589 : * It may help for other pointer-containing objects.
590 : */
591 0 : STATIC void GC_clear_fl_links(void **flp)
592 : {
593 0 : void *next = *flp;
594 :
595 0 : while (0 != next) {
596 0 : *flp = 0;
597 0 : flp = &(obj_link(next));
598 0 : next = *flp;
599 : }
600 0 : }
601 :
602 : /*
603 : * Perform GC_reclaim_block on the entire heap, after first clearing
604 : * small object free lists (if we are not just looking for leaks).
605 : */
606 244 : GC_INNER void GC_start_reclaim(GC_bool report_if_found)
607 : {
608 : unsigned kind;
609 :
610 : # if defined(PARALLEL_MARK)
611 : GC_ASSERT(0 == GC_fl_builder_count);
612 : # endif
613 : /* Reset in use counters. GC_reclaim_block recomputes them. */
614 244 : GC_composite_in_use = 0;
615 244 : GC_atomic_in_use = 0;
616 : /* Clear reclaim- and free-lists */
617 1220 : for (kind = 0; kind < GC_n_kinds; kind++) {
618 : void **fop;
619 : void **lim;
620 976 : struct hblk ** rlist = GC_obj_kinds[kind].ok_reclaim_list;
621 976 : GC_bool should_clobber = (GC_obj_kinds[kind].ok_descriptor != 0);
622 :
623 976 : if (rlist == 0) continue; /* This kind not used. */
624 243 : if (!report_if_found) {
625 243 : lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJGRANULES+1]);
626 31833 : for (fop = GC_obj_kinds[kind].ok_freelist;
627 31347 : (word)fop < (word)lim; fop++) {
628 31347 : if (*fop != 0) {
629 765 : if (should_clobber) {
630 0 : GC_clear_fl_links(fop);
631 : } else {
632 765 : *fop = 0;
633 : }
634 : }
635 : }
636 : } /* otherwise free list objects are marked, */
637 : /* and its safe to leave them */
638 243 : BZERO(rlist, (MAXOBJGRANULES + 1) * sizeof(void *));
639 : }
640 :
641 :
642 : /* Go through all heap blocks (in hblklist) and reclaim unmarked objects */
643 : /* or enqueue the block for later processing. */
644 244 : GC_apply_to_all_blocks(GC_reclaim_block, (word)report_if_found);
645 :
646 : # ifdef EAGER_SWEEP
647 : /* This is a very stupid thing to do. We make it possible anyway, */
648 : /* so that you can convince yourself that it really is very stupid. */
649 : GC_reclaim_all((GC_stop_func)0, FALSE);
650 : # elif defined(ENABLE_DISCLAIM)
651 : /* However, make sure to clear reclaimable objects of kinds with */
652 : /* unconditional marking enabled before we do any significant */
653 : /* marking work. */
654 244 : GC_reclaim_unconditionally_marked();
655 : # endif
656 : # if defined(PARALLEL_MARK)
657 : GC_ASSERT(0 == GC_fl_builder_count);
658 : # endif
659 :
660 244 : }
661 :
662 : /*
663 : * Sweep blocks of the indicated object size and kind until either the
664 : * appropriate free list is nonempty, or there are no more blocks to
665 : * sweep.
666 : */
667 18282 : GC_INNER void GC_continue_reclaim(size_t sz /* granules */, int kind)
668 : {
669 : hdr * hhdr;
670 : struct hblk * hbp;
671 18282 : struct obj_kind * ok = &(GC_obj_kinds[kind]);
672 18282 : struct hblk ** rlh = ok -> ok_reclaim_list;
673 18282 : void **flh = &(ok -> ok_freelist[sz]);
674 :
675 18282 : if (rlh == 0) return; /* No blocks of this kind. */
676 18282 : rlh += sz;
677 36570 : while ((hbp = *rlh) != 0) {
678 771 : hhdr = HDR(hbp);
679 771 : *rlh = hhdr -> hb_next;
680 771 : GC_reclaim_small_nonempty_block(hbp, FALSE);
681 771 : if (*flh != 0) break;
682 : }
683 : }
684 :
685 : /*
686 : * Reclaim all small blocks waiting to be reclaimed.
687 : * Abort and return FALSE when/if (*stop_func)() returns TRUE.
688 : * If this returns TRUE, then it's safe to restart the world
689 : * with incorrectly cleared mark bits.
690 : * If ignore_old is TRUE, then reclaim only blocks that have been
691 : * recently reclaimed, and discard the rest.
692 : * Stop_func may be 0.
693 : */
694 0 : GC_INNER GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old)
695 : {
696 : word sz;
697 : unsigned kind;
698 : hdr * hhdr;
699 : struct hblk * hbp;
700 : struct obj_kind * ok;
701 : struct hblk ** rlp;
702 : struct hblk ** rlh;
703 : # ifndef SMALL_CONFIG
704 0 : CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
705 : CLOCK_TYPE done_time;
706 :
707 0 : if (GC_print_stats == VERBOSE)
708 0 : GET_TIME(start_time);
709 : # endif
710 :
711 0 : for (kind = 0; kind < GC_n_kinds; kind++) {
712 0 : ok = &(GC_obj_kinds[kind]);
713 0 : rlp = ok -> ok_reclaim_list;
714 0 : if (rlp == 0) continue;
715 0 : for (sz = 1; sz <= MAXOBJGRANULES; sz++) {
716 0 : rlh = rlp + sz;
717 0 : while ((hbp = *rlh) != 0) {
718 0 : if (stop_func != (GC_stop_func)0 && (*stop_func)()) {
719 0 : return(FALSE);
720 : }
721 0 : hhdr = HDR(hbp);
722 0 : *rlh = hhdr -> hb_next;
723 0 : if (!ignore_old || hhdr -> hb_last_reclaimed == GC_gc_no - 1) {
724 : /* It's likely we'll need it this time, too */
725 : /* It's been touched recently, so this */
726 : /* shouldn't trigger paging. */
727 0 : GC_reclaim_small_nonempty_block(hbp, FALSE);
728 : }
729 : }
730 : }
731 : }
732 : # ifndef SMALL_CONFIG
733 0 : if (GC_print_stats == VERBOSE) {
734 0 : GET_TIME(done_time);
735 0 : GC_verbose_log_printf("Disposing of reclaim lists took %lu msecs\n",
736 0 : MS_TIME_DIFF(done_time,start_time));
737 : }
738 : # endif
739 0 : return(TRUE);
740 : }
741 :
742 : #if !defined(EAGER_SWEEP) && defined(ENABLE_DISCLAIM)
743 : /* We do an eager sweep on heap blocks where unconditional marking has */
744 : /* been enabled, so that any reclaimable objects have been reclaimed */
745 : /* before we start marking. This is a simplified GC_reclaim_all */
746 : /* restricted to kinds where ok_mark_unconditionally is true. */
747 244 : STATIC void GC_reclaim_unconditionally_marked(void)
748 : {
749 : word sz;
750 : unsigned kind;
751 : hdr * hhdr;
752 : struct hblk * hbp;
753 : struct obj_kind * ok;
754 : struct hblk ** rlp;
755 : struct hblk ** rlh;
756 :
757 1220 : for (kind = 0; kind < GC_n_kinds; kind++) {
758 976 : ok = &(GC_obj_kinds[kind]);
759 976 : if (!ok->ok_mark_unconditionally)
760 976 : continue;
761 0 : rlp = ok->ok_reclaim_list;
762 0 : if (rlp == 0)
763 0 : continue;
764 0 : for (sz = 1; sz <= MAXOBJGRANULES; sz++) {
765 0 : rlh = rlp + sz;
766 0 : while ((hbp = *rlh) != 0) {
767 0 : hhdr = HDR(hbp);
768 0 : *rlh = hhdr->hb_next;
769 0 : GC_reclaim_small_nonempty_block(hbp, FALSE);
770 : }
771 : }
772 : }
773 244 : }
774 : #endif /* !EAGER_SWEEP && ENABLE_DISCLAIM */
|