Line data Source code
1 : /*
2 : * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 : * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 : * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 : * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
6 : *
7 : * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 : * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 : *
10 : * Permission is hereby granted to use or copy this program
11 : * for any purpose, provided the above notices are retained on all copies.
12 : * Permission to modify the code and to distribute modified code is granted,
13 : * provided the above notices are retained, and a notice that the code was
14 : * modified is included with the above copyright notice.
15 : */
16 :
17 : #include "private/gc_priv.h"
18 :
19 : #include <stdio.h>
20 :
21 : GC_INNER signed_word GC_bytes_found = 0;
22 : /* Number of bytes of memory reclaimed */
23 : /* minus the number of bytes originally */
24 : /* on free lists which we had to drop. */
25 :
26 : #if defined(PARALLEL_MARK)
27 : GC_INNER word GC_fl_builder_count = 0;
28 : /* Number of threads currently building free lists without */
29 : /* holding GC lock. It is not safe to collect if this is */
30 : /* nonzero. */
31 : #endif /* PARALLEL_MARK */
32 :
33 : /* We defer printing of leaked objects until we're done with the GC */
34 : /* cycle, since the routine for printing objects needs to run outside */
35 : /* the collector, e.g. without the allocation lock. */
36 : #ifndef MAX_LEAKED
37 : # define MAX_LEAKED 40
38 : #endif
39 : STATIC ptr_t GC_leaked[MAX_LEAKED] = { NULL };
40 : STATIC unsigned GC_n_leaked = 0;
41 :
42 : GC_INNER GC_bool GC_have_errors = FALSE;
43 :
44 0 : GC_INLINE void GC_add_leaked(ptr_t leaked)
45 : {
46 : # ifndef SHORT_DBG_HDRS
47 0 : if (GC_findleak_delay_free && !GC_check_leaked(leaked))
48 0 : return;
49 : # endif
50 :
51 0 : GC_have_errors = TRUE;
52 : /* FIXME: Prevent adding an object while printing leaked ones. */
53 0 : if (GC_n_leaked < MAX_LEAKED) {
54 0 : GC_leaked[GC_n_leaked++] = leaked;
55 : /* Make sure it's not reclaimed this cycle */
56 0 : GC_set_mark_bit(leaked);
57 : }
58 : }
59 :
60 : /* Print all objects on the list after printing any smashed objects. */
61 : /* Clear both lists. Called without the allocation lock held. */
62 0 : GC_INNER void GC_print_all_errors(void)
63 : {
64 : static GC_bool printing_errors = FALSE;
65 : GC_bool have_errors;
66 : unsigned i;
67 : DCL_LOCK_STATE;
68 :
69 0 : LOCK();
70 0 : if (printing_errors) {
71 0 : UNLOCK();
72 0 : return;
73 : }
74 0 : have_errors = GC_have_errors;
75 0 : printing_errors = TRUE;
76 0 : UNLOCK();
77 :
78 0 : if (GC_debugging_started) {
79 0 : GC_print_all_smashed();
80 : } else {
81 0 : have_errors = FALSE;
82 : }
83 :
84 0 : for (i = 0; i < GC_n_leaked; ++i) {
85 0 : ptr_t p = GC_leaked[i];
86 0 : if (HDR(p) -> hb_obj_kind == PTRFREE) {
87 0 : GC_err_printf("Leaked atomic object at ");
88 : } else {
89 0 : GC_err_printf("Leaked composite object at ");
90 : }
91 0 : GC_print_heap_obj(p);
92 0 : GC_err_printf("\n");
93 0 : GC_free(p);
94 0 : GC_leaked[i] = 0;
95 0 : have_errors = TRUE;
96 : }
97 0 : GC_n_leaked = 0;
98 :
99 0 : if (have_errors
100 : # ifndef GC_ABORT_ON_LEAK
101 0 : && GETENV("GC_ABORT_ON_LEAK") != NULL
102 : # endif
103 : ) {
104 0 : ABORT("Leaked or smashed objects encountered");
105 : }
106 :
107 0 : printing_errors = FALSE;
108 : }
109 :
110 :
111 : /*
112 : * reclaim phase
113 : *
114 : */
115 :
116 : /* Test whether a block is completely empty, i.e. contains no marked */
117 : /* objects. This does not require the block to be in physical memory. */
118 81533 : GC_INNER GC_bool GC_block_empty(hdr *hhdr)
119 : {
120 81533 : return (hhdr -> hb_n_marks == 0);
121 : }
122 :
123 44389 : STATIC GC_bool GC_block_nearly_full(hdr *hhdr)
124 : {
125 44389 : return (hhdr -> hb_n_marks > 7 * HBLK_OBJS(hhdr -> hb_sz)/8);
126 : }
127 :
128 : /* FIXME: This should perhaps again be specialized for USE_MARK_BYTES */
129 : /* and USE_MARK_BITS cases. */
130 :
131 : /*
132 : * Restore unmarked small objects in h of size sz to the object
133 : * free list. Returns the new list.
134 : * Clears unmarked objects. Sz is in bytes.
135 : */
136 3869 : STATIC ptr_t GC_reclaim_clear(struct hblk *hbp, hdr *hhdr, size_t sz,
137 : ptr_t list, signed_word *count)
138 : {
139 3869 : word bit_no = 0;
140 : word *p, *q, *plim;
141 3869 : signed_word n_bytes_found = 0;
142 :
143 : GC_ASSERT(hhdr == GC_find_header((ptr_t)hbp));
144 : GC_ASSERT(sz == hhdr -> hb_sz);
145 : GC_ASSERT((sz & (BYTES_PER_WORD-1)) == 0);
146 3869 : p = (word *)(hbp->hb_body);
147 3869 : plim = (word *)(hbp->hb_body + HBLKSIZE - sz);
148 :
149 : /* go through all words in block */
150 321489 : while (p <= plim) {
151 313751 : if( mark_bit_from_hdr(hhdr, bit_no) ) {
152 110256 : p = (word *)((ptr_t)p + sz);
153 : } else {
154 203495 : n_bytes_found += sz;
155 : /* object is available - put on list */
156 203495 : obj_link(p) = list;
157 203495 : list = ((ptr_t)p);
158 : /* Clear object, advance p to next object in the process */
159 203495 : q = (word *)((ptr_t)p + sz);
160 : # ifdef USE_MARK_BYTES
161 : GC_ASSERT(!(sz & 1)
162 : && !((word)p & (2 * sizeof(word) - 1)));
163 : p[1] = 0;
164 : p += 2;
165 : while (p < q) {
166 : CLEAR_DOUBLE(p);
167 : p += 2;
168 : }
169 : # else
170 203495 : p++; /* Skip link field */
171 1502601 : while (p < q) {
172 1095611 : *p++ = 0;
173 : }
174 : # endif
175 : }
176 313751 : bit_no += MARK_BIT_OFFSET(sz);
177 : }
178 3869 : *count += n_bytes_found;
179 3869 : return(list);
180 : }
181 :
182 : /* The same thing, but don't clear objects: */
183 1726 : STATIC ptr_t GC_reclaim_uninit(struct hblk *hbp, hdr *hhdr, size_t sz,
184 : ptr_t list, signed_word *count)
185 : {
186 1726 : word bit_no = 0;
187 : word *p, *plim;
188 1726 : signed_word n_bytes_found = 0;
189 :
190 : GC_ASSERT(sz == hhdr -> hb_sz);
191 1726 : p = (word *)(hbp->hb_body);
192 1726 : plim = (word *)((ptr_t)hbp + HBLKSIZE - sz);
193 :
194 : /* go through all words in block */
195 106806 : while (p <= plim) {
196 103354 : if( !mark_bit_from_hdr(hhdr, bit_no) ) {
197 66808 : n_bytes_found += sz;
198 : /* object is available - put on list */
199 66808 : obj_link(p) = list;
200 66808 : list = ((ptr_t)p);
201 : }
202 103354 : p = (word *)((ptr_t)p + sz);
203 103354 : bit_no += MARK_BIT_OFFSET(sz);
204 : }
205 1726 : *count += n_bytes_found;
206 1726 : return(list);
207 : }
208 :
209 : /* Don't really reclaim objects, just check for unmarked ones: */
210 0 : STATIC void GC_reclaim_check(struct hblk *hbp, hdr *hhdr, word sz)
211 : {
212 : word bit_no;
213 : ptr_t p, plim;
214 : GC_ASSERT(sz == hhdr -> hb_sz);
215 :
216 : /* go through all words in block */
217 0 : p = hbp->hb_body;
218 0 : plim = p + HBLKSIZE - sz;
219 0 : for (bit_no = 0; p <= plim; p += sz, bit_no += MARK_BIT_OFFSET(sz)) {
220 0 : if (!mark_bit_from_hdr(hhdr, bit_no)) {
221 0 : GC_add_leaked(p);
222 : }
223 : }
224 0 : }
225 :
226 : /*
227 : * Generic procedure to rebuild a free list in hbp.
228 : * Also called directly from GC_malloc_many.
229 : * Sz is now in bytes.
230 : */
231 5595 : GC_INNER ptr_t GC_reclaim_generic(struct hblk * hbp, hdr *hhdr, size_t sz,
232 : GC_bool init, ptr_t list,
233 : signed_word *count)
234 : {
235 : ptr_t result;
236 :
237 : GC_ASSERT(GC_find_header((ptr_t)hbp) == hhdr);
238 : # ifndef GC_DISABLE_INCREMENTAL
239 5595 : GC_remove_protection(hbp, 1, (hhdr)->hb_descr == 0 /* Pointer-free? */);
240 : # endif
241 9464 : if (init || GC_debugging_started) {
242 3869 : result = GC_reclaim_clear(hbp, hhdr, sz, list, count);
243 : } else {
244 : GC_ASSERT((hhdr)->hb_descr == 0 /* Pointer-free block */);
245 1726 : result = GC_reclaim_uninit(hbp, hhdr, sz, list, count);
246 : }
247 5595 : if (IS_UNCOLLECTABLE(hhdr -> hb_obj_kind)) GC_set_hdr_marks(hhdr);
248 5595 : return result;
249 : }
250 :
251 : /*
252 : * Restore unmarked small objects in the block pointed to by hbp
253 : * to the appropriate object free list.
254 : * If entirely empty blocks are to be completely deallocated, then
255 : * caller should perform that check.
256 : */
257 824 : STATIC void GC_reclaim_small_nonempty_block(struct hblk *hbp,
258 : GC_bool report_if_found)
259 : {
260 824 : hdr *hhdr = HDR(hbp);
261 824 : size_t sz = hhdr -> hb_sz;
262 824 : struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
263 824 : void **flh = &(ok -> ok_freelist[BYTES_TO_GRANULES(sz)]);
264 :
265 824 : hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;
266 :
267 824 : if (report_if_found) {
268 0 : GC_reclaim_check(hbp, hhdr, sz);
269 : } else {
270 824 : *flh = GC_reclaim_generic(hbp, hhdr, sz, ok -> ok_init,
271 824 : *flh, &GC_bytes_found);
272 : }
273 824 : }
274 :
275 : /*
276 : * Restore an unmarked large object or an entirely empty blocks of small objects
277 : * to the heap block free list.
278 : * Otherwise enqueue the block for later processing
279 : * by GC_reclaim_small_nonempty_block.
280 : * If report_if_found is TRUE, then process any block immediately, and
281 : * simply report free objects; do not actually reclaim them.
282 : */
283 76399 : STATIC void GC_reclaim_block(struct hblk *hbp, word report_if_found)
284 : {
285 76399 : hdr * hhdr = HDR(hbp);
286 76399 : size_t sz = hhdr -> hb_sz; /* size of objects in current block */
287 76399 : struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
288 : struct hblk ** rlh;
289 :
290 76399 : if( sz > MAXOBJBYTES ) { /* 1 big object */
291 2348 : if( !mark_bit_from_hdr(hhdr, 0) ) {
292 1031 : if (report_if_found) {
293 0 : GC_add_leaked((ptr_t)hbp);
294 : } else {
295 1031 : size_t blocks = OBJ_SZ_TO_BLOCKS(sz);
296 1031 : if (blocks > 1) {
297 780 : GC_large_allocd_bytes -= blocks * HBLKSIZE;
298 : }
299 1031 : GC_bytes_found += sz;
300 1031 : GC_freehblk(hbp);
301 : }
302 : } else {
303 1317 : if (hhdr -> hb_descr != 0) {
304 617 : GC_composite_in_use += sz;
305 : } else {
306 700 : GC_atomic_in_use += sz;
307 : }
308 : }
309 : } else {
310 74051 : GC_bool empty = GC_block_empty(hhdr);
311 : # ifdef PARALLEL_MARK
312 : /* Count can be low or one too high because we sometimes */
313 : /* have to ignore decrements. Objects can also potentially */
314 : /* be repeatedly marked by each marker. */
315 : /* Here we assume two markers, but this is extremely */
316 : /* unlikely to fail spuriously with more. And if it does, it */
317 : /* should be looked at. */
318 : GC_ASSERT(hhdr -> hb_n_marks <= 2 * (HBLKSIZE/sz + 1) + 16);
319 : # else
320 : GC_ASSERT(sz * hhdr -> hb_n_marks <= HBLKSIZE);
321 : # endif
322 74051 : if (hhdr -> hb_descr != 0) {
323 61850 : GC_composite_in_use += sz * hhdr -> hb_n_marks;
324 : } else {
325 12201 : GC_atomic_in_use += sz * hhdr -> hb_n_marks;
326 : }
327 74051 : if (report_if_found) {
328 0 : GC_reclaim_small_nonempty_block(hbp, TRUE /* report_if_found */);
329 74051 : } else if (empty) {
330 29662 : GC_bytes_found += HBLKSIZE;
331 29662 : GC_freehblk(hbp);
332 44389 : } else if (GC_find_leak || !GC_block_nearly_full(hhdr)) {
333 : /* group of smaller objects, enqueue the real work */
334 10684 : rlh = &(ok -> ok_reclaim_list[BYTES_TO_GRANULES(sz)]);
335 10684 : hhdr -> hb_next = *rlh;
336 10684 : *rlh = hbp;
337 : } /* else not worth salvaging. */
338 : /* We used to do the nearly_full check later, but we */
339 : /* already have the right cache context here. Also */
340 : /* doing it here avoids some silly lock contention in */
341 : /* GC_malloc_many. */
342 : }
343 76399 : }
344 :
345 : #if !defined(NO_DEBUGGING)
346 : /* Routines to gather and print heap block info */
347 : /* intended for debugging. Otherwise should be called */
348 : /* with lock. */
349 :
350 : struct Print_stats
351 : {
352 : size_t number_of_blocks;
353 : size_t total_bytes;
354 : };
355 :
356 : #ifdef USE_MARK_BYTES
357 :
358 : /* Return the number of set mark bits in the given header. */
359 : /* Remains externally visible as used by GNU GCJ currently. */
360 : int GC_n_set_marks(hdr *hhdr)
361 : {
362 : int result = 0;
363 : int i;
364 : size_t sz = hhdr -> hb_sz;
365 : int offset = (int)MARK_BIT_OFFSET(sz);
366 : int limit = (int)FINAL_MARK_BIT(sz);
367 :
368 : for (i = 0; i < limit; i += offset) {
369 : result += hhdr -> hb_marks[i];
370 : }
371 : GC_ASSERT(hhdr -> hb_marks[limit]);
372 : return(result);
373 : }
374 :
375 : #else
376 :
377 : /* Number of set bits in a word. Not performance critical. */
378 0 : static int set_bits(word n)
379 : {
380 0 : word m = n;
381 0 : int result = 0;
382 :
383 0 : while (m > 0) {
384 0 : if (m & 1) result++;
385 0 : m >>= 1;
386 : }
387 0 : return(result);
388 : }
389 :
390 0 : int GC_n_set_marks(hdr *hhdr)
391 : {
392 0 : int result = 0;
393 : int i;
394 : int n_mark_words;
395 : # ifdef MARK_BIT_PER_OBJ
396 : int n_objs = (int)HBLK_OBJS(hhdr -> hb_sz);
397 :
398 : if (0 == n_objs) n_objs = 1;
399 : n_mark_words = divWORDSZ(n_objs + WORDSZ - 1);
400 : # else /* MARK_BIT_PER_GRANULE */
401 0 : n_mark_words = MARK_BITS_SZ;
402 : # endif
403 0 : for (i = 0; i < n_mark_words - 1; i++) {
404 0 : result += set_bits(hhdr -> hb_marks[i]);
405 : }
406 : # ifdef MARK_BIT_PER_OBJ
407 : result += set_bits((hhdr -> hb_marks[n_mark_words - 1])
408 : << (n_mark_words * WORDSZ - n_objs));
409 : # else
410 0 : result += set_bits(hhdr -> hb_marks[n_mark_words - 1]);
411 : # endif
412 0 : return(result - 1);
413 : }
414 :
415 : #endif /* !USE_MARK_BYTES */
416 :
417 0 : STATIC void GC_print_block_descr(struct hblk *h,
418 : word /* struct PrintStats */ raw_ps)
419 : {
420 0 : hdr * hhdr = HDR(h);
421 0 : size_t bytes = hhdr -> hb_sz;
422 : struct Print_stats *ps;
423 0 : unsigned n_marks = GC_n_set_marks(hhdr);
424 :
425 0 : if (hhdr -> hb_n_marks != n_marks) {
426 0 : GC_printf("(%u:%u,%u!=%u)", hhdr -> hb_obj_kind, (unsigned)bytes,
427 : (unsigned)hhdr -> hb_n_marks, n_marks);
428 : } else {
429 0 : GC_printf("(%u:%u,%u)", hhdr -> hb_obj_kind,
430 : (unsigned)bytes, n_marks);
431 : }
432 0 : bytes += HBLKSIZE-1;
433 0 : bytes &= ~(HBLKSIZE-1);
434 :
435 0 : ps = (struct Print_stats *)raw_ps;
436 0 : ps->total_bytes += bytes;
437 0 : ps->number_of_blocks++;
438 0 : }
439 :
440 0 : void GC_print_block_list(void)
441 : {
442 : struct Print_stats pstats;
443 :
444 0 : GC_printf("(kind(0=ptrfree,1=normal,2=unc.):size_in_bytes, #_marks_set)\n");
445 0 : pstats.number_of_blocks = 0;
446 0 : pstats.total_bytes = 0;
447 0 : GC_apply_to_all_blocks(GC_print_block_descr, (word)&pstats);
448 0 : GC_printf("\nblocks = %lu, bytes = %lu\n",
449 : (unsigned long)pstats.number_of_blocks,
450 : (unsigned long)pstats.total_bytes);
451 0 : }
452 :
453 : /* Currently for debugger use only: */
454 0 : void GC_print_free_list(int kind, size_t sz_in_granules)
455 : {
456 0 : struct obj_kind * ok = &GC_obj_kinds[kind];
457 0 : ptr_t flh = ok -> ok_freelist[sz_in_granules];
458 0 : struct hblk *lastBlock = 0;
459 : int n;
460 :
461 0 : for (n = 1; flh; n++) {
462 0 : struct hblk *block = HBLKPTR(flh);
463 0 : if (block != lastBlock) {
464 0 : GC_printf("\nIn heap block at %p:\n\t", block);
465 0 : lastBlock = block;
466 : }
467 0 : GC_printf("%d: %p;", n, flh);
468 0 : flh = obj_link(flh);
469 : }
470 0 : }
471 :
472 : #endif /* !NO_DEBUGGING */
473 :
474 : /*
475 : * Clear all obj_link pointers in the list of free objects *flp.
476 : * Clear *flp.
477 : * This must be done before dropping a list of free gcj-style objects,
478 : * since may otherwise end up with dangling "descriptor" pointers.
479 : * It may help for other pointer-containing objects.
480 : */
481 0 : STATIC void GC_clear_fl_links(void **flp)
482 : {
483 0 : void *next = *flp;
484 :
485 0 : while (0 != next) {
486 0 : *flp = 0;
487 0 : flp = &(obj_link(next));
488 0 : next = *flp;
489 : }
490 0 : }
491 :
492 : /*
493 : * Perform GC_reclaim_block on the entire heap, after first clearing
494 : * small object free lists (if we are not just looking for leaks).
495 : */
496 252 : GC_INNER void GC_start_reclaim(GC_bool report_if_found)
497 : {
498 : unsigned kind;
499 :
500 : # if defined(PARALLEL_MARK)
501 : GC_ASSERT(0 == GC_fl_builder_count);
502 : # endif
503 : /* Reset in use counters. GC_reclaim_block recomputes them. */
504 252 : GC_composite_in_use = 0;
505 252 : GC_atomic_in_use = 0;
506 : /* Clear reclaim- and free-lists */
507 1260 : for (kind = 0; kind < GC_n_kinds; kind++) {
508 : void **fop;
509 : void **lim;
510 1008 : struct hblk ** rlist = GC_obj_kinds[kind].ok_reclaim_list;
511 1008 : GC_bool should_clobber = (GC_obj_kinds[kind].ok_descriptor != 0);
512 :
513 1008 : if (rlist == 0) continue; /* This kind not used. */
514 267 : if (!report_if_found) {
515 267 : lim = &(GC_obj_kinds[kind].ok_freelist[MAXOBJGRANULES+1]);
516 34710 : for( fop = GC_obj_kinds[kind].ok_freelist; fop < lim; fop++ ) {
517 34443 : if (*fop != 0) {
518 804 : if (should_clobber) {
519 0 : GC_clear_fl_links(fop);
520 : } else {
521 804 : *fop = 0;
522 : }
523 : }
524 : }
525 : } /* otherwise free list objects are marked, */
526 : /* and its safe to leave them */
527 267 : BZERO(rlist, (MAXOBJGRANULES + 1) * sizeof(void *));
528 : }
529 :
530 :
531 : /* Go through all heap blocks (in hblklist) and reclaim unmarked objects */
532 : /* or enqueue the block for later processing. */
533 252 : GC_apply_to_all_blocks(GC_reclaim_block, (word)report_if_found);
534 :
535 : # ifdef EAGER_SWEEP
536 : /* This is a very stupid thing to do. We make it possible anyway, */
537 : /* so that you can convince yourself that it really is very stupid. */
538 : GC_reclaim_all((GC_stop_func)0, FALSE);
539 : # endif
540 : # if defined(PARALLEL_MARK)
541 : GC_ASSERT(0 == GC_fl_builder_count);
542 : # endif
543 :
544 252 : }
545 :
546 : /*
547 : * Sweep blocks of the indicated object size and kind until either the
548 : * appropriate free list is nonempty, or there are no more blocks to
549 : * sweep.
550 : */
551 17437 : GC_INNER void GC_continue_reclaim(size_t sz /* granules */, int kind)
552 : {
553 : hdr * hhdr;
554 : struct hblk * hbp;
555 17437 : struct obj_kind * ok = &(GC_obj_kinds[kind]);
556 17437 : struct hblk ** rlh = ok -> ok_reclaim_list;
557 17437 : void **flh = &(ok -> ok_freelist[sz]);
558 :
559 17437 : if (rlh == 0) return; /* No blocks of this kind. */
560 17437 : rlh += sz;
561 34874 : while ((hbp = *rlh) != 0) {
562 824 : hhdr = HDR(hbp);
563 824 : *rlh = hhdr -> hb_next;
564 824 : GC_reclaim_small_nonempty_block(hbp, FALSE);
565 824 : if (*flh != 0) break;
566 : }
567 : }
568 :
569 : /*
570 : * Reclaim all small blocks waiting to be reclaimed.
571 : * Abort and return FALSE when/if (*stop_func)() returns TRUE.
572 : * If this returns TRUE, then it's safe to restart the world
573 : * with incorrectly cleared mark bits.
574 : * If ignore_old is TRUE, then reclaim only blocks that have been
575 : * recently reclaimed, and discard the rest.
576 : * Stop_func may be 0.
577 : */
578 0 : GC_INNER GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old)
579 : {
580 : word sz;
581 : unsigned kind;
582 : hdr * hhdr;
583 : struct hblk * hbp;
584 : struct obj_kind * ok;
585 : struct hblk ** rlp;
586 : struct hblk ** rlh;
587 : # ifndef SMALL_CONFIG
588 0 : CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
589 : CLOCK_TYPE done_time;
590 :
591 0 : if (GC_print_stats == VERBOSE)
592 0 : GET_TIME(start_time);
593 : # endif
594 :
595 0 : for (kind = 0; kind < GC_n_kinds; kind++) {
596 0 : ok = &(GC_obj_kinds[kind]);
597 0 : rlp = ok -> ok_reclaim_list;
598 0 : if (rlp == 0) continue;
599 0 : for (sz = 1; sz <= MAXOBJGRANULES; sz++) {
600 0 : rlh = rlp + sz;
601 0 : while ((hbp = *rlh) != 0) {
602 0 : if (stop_func != (GC_stop_func)0 && (*stop_func)()) {
603 0 : return(FALSE);
604 : }
605 0 : hhdr = HDR(hbp);
606 0 : *rlh = hhdr -> hb_next;
607 0 : if (!ignore_old || hhdr -> hb_last_reclaimed == GC_gc_no - 1) {
608 : /* It's likely we'll need it this time, too */
609 : /* It's been touched recently, so this */
610 : /* shouldn't trigger paging. */
611 0 : GC_reclaim_small_nonempty_block(hbp, FALSE);
612 : }
613 : }
614 : }
615 : }
616 : # ifndef SMALL_CONFIG
617 0 : if (GC_print_stats == VERBOSE) {
618 0 : GET_TIME(done_time);
619 0 : GC_log_printf("Disposing of reclaim lists took %lu msecs\n",
620 0 : MS_TIME_DIFF(done_time,start_time));
621 : }
622 : # endif
623 0 : return(TRUE);
624 : }
|