Line data Source code
1 : /*
2 : * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 : * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 : * Copyright (c) 1998 by Silicon Graphics. All rights reserved.
5 : * Copyright (c) 1999-2004 Hewlett-Packard Development Company, L.P.
6 : *
7 : * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 : * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 : *
10 : * Permission is hereby granted to use or copy this program
11 : * for any purpose, provided the above notices are retained on all copies.
12 : * Permission to modify the code and to distribute modified code is granted,
13 : * provided the above notices are retained, and a notice that the code was
14 : * modified is included with the above copyright notice.
15 : *
16 : */
17 :
18 : #include "private/gc_priv.h"
19 :
20 : #include <stdio.h>
21 : #if !defined(MACOS) && !defined(MSWINCE)
22 : # include <signal.h>
23 : # if !defined(__CC_ARM)
24 : # include <sys/types.h>
25 : # endif
26 : #endif
27 :
28 : /*
29 : * Separate free lists are maintained for different sized objects
30 : * up to MAXOBJBYTES.
31 : * The call GC_allocobj(i,k) ensures that the freelist for
32 : * kind k objects of size i points to a non-empty
33 : * free list. It returns a pointer to the first entry on the free list.
34 : * In a single-threaded world, GC_allocobj may be called to allocate
35 : * an object of (small) size i as follows:
36 : *
37 : * opp = &(GC_objfreelist[i]);
38 : * if (*opp == 0) GC_allocobj(i, NORMAL);
39 : * ptr = *opp;
40 : * *opp = obj_link(ptr);
41 : *
42 : * Note that this is very fast if the free list is non-empty; it should
43 : * only involve the execution of 4 or 5 simple instructions.
44 : * All composite objects on freelists are cleared, except for
45 : * their first word.
46 : */
47 :
48 : /*
49 : * The allocator uses GC_allochblk to allocate large chunks of objects.
50 : * These chunks all start on addresses which are multiples of
51 : * HBLKSZ. Each allocated chunk has an associated header,
52 : * which can be located quickly based on the address of the chunk.
53 : * (See headers.c for details.)
54 : * This makes it possible to check quickly whether an
55 : * arbitrary address corresponds to an object administered by the
56 : * allocator.
57 : */
58 :
59 : word GC_non_gc_bytes = 0; /* Number of bytes not intended to be collected */
60 :
61 : word GC_gc_no = 0;
62 :
63 : #ifndef GC_DISABLE_INCREMENTAL
64 : GC_INNER int GC_incremental = 0; /* By default, stop the world. */
65 : #endif
66 :
67 : #ifdef THREADS
68 : int GC_parallel = FALSE; /* By default, parallel GC is off. */
69 : #endif
70 :
71 : #ifndef GC_FULL_FREQ
72 : # define GC_FULL_FREQ 19 /* Every 20th collection is a full */
73 : /* collection, whether we need it */
74 : /* or not. */
75 : #endif
76 :
77 : int GC_full_freq = GC_FULL_FREQ;
78 :
79 : STATIC GC_bool GC_need_full_gc = FALSE;
80 : /* Need full GC do to heap growth. */
81 :
82 : #ifdef THREAD_LOCAL_ALLOC
83 : GC_INNER GC_bool GC_world_stopped = FALSE;
84 : #endif
85 :
86 : STATIC word GC_used_heap_size_after_full = 0;
87 :
88 : /* GC_copyright symbol is externally visible. */
89 : char * const GC_copyright[] =
90 : {"Copyright 1988,1989 Hans-J. Boehm and Alan J. Demers ",
91 : "Copyright (c) 1991-1995 by Xerox Corporation. All rights reserved. ",
92 : "Copyright (c) 1996-1998 by Silicon Graphics. All rights reserved. ",
93 : "Copyright (c) 1999-2009 by Hewlett-Packard Company. All rights reserved. ",
94 : "THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY",
95 : " EXPRESSED OR IMPLIED. ANY USE IS AT YOUR OWN RISK.",
96 : "See source code for details." };
97 :
98 : /* Version macros are now defined in gc_version.h, which is included by */
99 : /* gc.h, which is included by gc_priv.h. */
100 : #ifndef GC_NO_VERSION_VAR
101 : const unsigned GC_version = ((GC_VERSION_MAJOR << 16) |
102 : (GC_VERSION_MINOR << 8) | GC_TMP_ALPHA_VERSION);
103 : #endif
104 :
105 0 : GC_API unsigned GC_CALL GC_get_version(void)
106 : {
107 0 : return (GC_VERSION_MAJOR << 16) | (GC_VERSION_MINOR << 8) |
108 : GC_TMP_ALPHA_VERSION;
109 : }
110 :
111 : /* some more variables */
112 :
113 : #ifdef GC_DONT_EXPAND
114 : GC_bool GC_dont_expand = TRUE;
115 : #else
116 : GC_bool GC_dont_expand = FALSE;
117 : #endif
118 :
119 : #ifndef GC_FREE_SPACE_DIVISOR
120 : # define GC_FREE_SPACE_DIVISOR 3 /* must be > 0 */
121 : #endif
122 :
123 : word GC_free_space_divisor = GC_FREE_SPACE_DIVISOR;
124 :
125 74825 : GC_INNER int GC_CALLBACK GC_never_stop_func(void)
126 : {
127 74825 : return(0);
128 : }
129 :
130 : #ifndef GC_TIME_LIMIT
131 : # define GC_TIME_LIMIT 50 /* We try to keep pause times from exceeding */
132 : /* this by much. In milliseconds. */
133 : #endif
134 :
135 : unsigned long GC_time_limit = GC_TIME_LIMIT;
136 :
137 : #ifndef NO_CLOCK
138 : STATIC CLOCK_TYPE GC_start_time = 0;
139 : /* Time at which we stopped world. */
140 : /* used only in GC_timeout_stop_func. */
141 : #endif
142 :
143 : STATIC int GC_n_attempts = 0; /* Number of attempts at finishing */
144 : /* collection within GC_time_limit. */
145 :
146 : STATIC GC_stop_func GC_default_stop_func = GC_never_stop_func;
147 : /* accessed holding the lock. */
148 :
149 0 : GC_API void GC_CALL GC_set_stop_func(GC_stop_func stop_func)
150 : {
151 : DCL_LOCK_STATE;
152 : GC_ASSERT(stop_func != 0);
153 0 : LOCK();
154 0 : GC_default_stop_func = stop_func;
155 0 : UNLOCK();
156 0 : }
157 :
158 0 : GC_API GC_stop_func GC_CALL GC_get_stop_func(void)
159 : {
160 : GC_stop_func stop_func;
161 : DCL_LOCK_STATE;
162 0 : LOCK();
163 0 : stop_func = GC_default_stop_func;
164 0 : UNLOCK();
165 0 : return stop_func;
166 : }
167 :
168 : #if defined(GC_DISABLE_INCREMENTAL) || defined(NO_CLOCK)
169 : # define GC_timeout_stop_func GC_default_stop_func
170 : #else
171 0 : STATIC int GC_CALLBACK GC_timeout_stop_func (void)
172 : {
173 : CLOCK_TYPE current_time;
174 : static unsigned count = 0;
175 : unsigned long time_diff;
176 :
177 0 : if ((*GC_default_stop_func)())
178 0 : return(1);
179 :
180 0 : if ((count++ & 3) != 0) return(0);
181 0 : GET_TIME(current_time);
182 0 : time_diff = MS_TIME_DIFF(current_time,GC_start_time);
183 0 : if (time_diff >= GC_time_limit) {
184 0 : if (GC_print_stats) {
185 0 : GC_log_printf(
186 : "Abandoning stopped marking after %lu msecs (attempt %d)\n",
187 : time_diff, GC_n_attempts);
188 : }
189 0 : return(1);
190 : }
191 0 : return(0);
192 : }
193 : #endif /* !GC_DISABLE_INCREMENTAL */
194 :
195 : #ifdef THREADS
196 : GC_INNER word GC_total_stacksize = 0; /* updated on every push_all_stacks */
197 : #endif
198 :
199 : /* Return the minimum number of words that must be allocated between */
200 : /* collections to amortize the collection cost. */
201 442 : static word min_bytes_allocd(void)
202 : {
203 : # ifdef STACK_GROWS_UP
204 : word stack_size = GC_approx_sp() - GC_stackbottom;
205 : /* GC_stackbottom is used only for a single-threaded case. */
206 : # else
207 442 : word stack_size = GC_stackbottom - GC_approx_sp();
208 : # endif
209 :
210 : word total_root_size; /* includes double stack size, */
211 : /* since the stack is expensive */
212 : /* to scan. */
213 : word scan_size; /* Estimate of memory to be scanned */
214 : /* during normal GC. */
215 :
216 : # ifdef THREADS
217 442 : if (GC_need_to_lock) {
218 : /* We are multi-threaded... */
219 116 : stack_size = GC_total_stacksize;
220 : /* For now, we just use the value computed during the latest GC. */
221 : # ifdef DEBUG_THREADS
222 : GC_log_printf("Total stacks size: %lu\n",
223 : (unsigned long)stack_size);
224 : # endif
225 : }
226 : # endif
227 :
228 442 : total_root_size = 2 * stack_size + GC_root_size;
229 884 : scan_size = 2 * GC_composite_in_use + GC_atomic_in_use / 4
230 442 : + total_root_size;
231 442 : if (GC_incremental) {
232 0 : return scan_size / (2 * GC_free_space_divisor);
233 : } else {
234 442 : return scan_size / GC_free_space_divisor;
235 : }
236 : }
237 :
238 : /* Return the number of bytes allocated, adjusted for explicit storage */
239 : /* management, etc.. This number is used in deciding when to trigger */
240 : /* collections. */
241 24209 : STATIC word GC_adj_bytes_allocd(void)
242 : {
243 : signed_word result;
244 24209 : signed_word expl_managed = (signed_word)GC_non_gc_bytes
245 24209 : - (signed_word)GC_non_gc_bytes_at_gc;
246 :
247 : /* Don't count what was explicitly freed, or newly allocated for */
248 : /* explicit management. Note that deallocating an explicitly */
249 : /* managed object should not alter result, assuming the client */
250 : /* is playing by the rules. */
251 24209 : result = (signed_word)GC_bytes_allocd
252 : + (signed_word)GC_bytes_dropped
253 24209 : - (signed_word)GC_bytes_freed
254 24209 : + (signed_word)GC_finalizer_bytes_freed
255 24209 : - expl_managed;
256 24209 : if (result > (signed_word)GC_bytes_allocd) {
257 17100 : result = GC_bytes_allocd;
258 : /* probably client bug or unfortunate scheduling */
259 : }
260 24209 : result += GC_bytes_finalized;
261 : /* We count objects enqueued for finalization as though they */
262 : /* had been reallocated this round. Finalization is user */
263 : /* visible progress. And if we don't count this, we have */
264 : /* stability problems for programs that finalize all objects. */
265 24209 : if (result < (signed_word)(GC_bytes_allocd >> 3)) {
266 : /* Always count at least 1/8 of the allocations. We don't want */
267 : /* to collect too infrequently, since that would inhibit */
268 : /* coalescing of free storage blocks. */
269 : /* This also makes us partially robust against client bugs. */
270 0 : return(GC_bytes_allocd >> 3);
271 : } else {
272 24209 : return(result);
273 : }
274 : }
275 :
276 :
277 : /* Clear up a few frames worth of garbage left at the top of the stack. */
278 : /* This is used to prevent us from accidentally treating garbage left */
279 : /* on the stack by other parts of the collector as roots. This */
280 : /* differs from the code in misc.c, which actually tries to keep the */
281 : /* stack clear of long-lived, client-generated garbage. */
282 252 : STATIC void GC_clear_a_few_frames(void)
283 : {
284 : # ifndef CLEAR_NWORDS
285 : # define CLEAR_NWORDS 64
286 : # endif
287 : volatile word frames[CLEAR_NWORDS];
288 252 : BZERO((word *)frames, CLEAR_NWORDS * sizeof(word));
289 252 : }
290 :
291 : /* Heap size at which we need a collection to avoid expanding past */
292 : /* limits used by blacklisting. */
293 : STATIC word GC_collect_at_heapsize = (word)(-1);
294 :
295 : /* Have we allocated enough to amortize a collection? */
296 24209 : GC_INNER GC_bool GC_should_collect(void)
297 : {
298 : static word last_min_bytes_allocd;
299 : static word last_gc_no;
300 24209 : if (last_gc_no != GC_gc_no) {
301 91 : last_gc_no = GC_gc_no;
302 91 : last_min_bytes_allocd = min_bytes_allocd();
303 : }
304 42713 : return(GC_adj_bytes_allocd() >= last_min_bytes_allocd
305 42713 : || GC_heapsize >= GC_collect_at_heapsize);
306 : }
307 :
308 : /* STATIC */ GC_start_callback_proc GC_start_call_back = 0;
309 : /* Called at start of full collections. */
310 : /* Not called if 0. Called with the allocation */
311 : /* lock held. Not used by GC itself. */
312 :
313 0 : GC_API void GC_CALL GC_set_start_callback(GC_start_callback_proc fn)
314 : {
315 : DCL_LOCK_STATE;
316 0 : LOCK();
317 0 : GC_start_call_back = fn;
318 0 : UNLOCK();
319 0 : }
320 :
321 0 : GC_API GC_start_callback_proc GC_CALL GC_get_start_callback(void)
322 : {
323 : GC_start_callback_proc fn;
324 : DCL_LOCK_STATE;
325 0 : LOCK();
326 0 : fn = GC_start_call_back;
327 0 : UNLOCK();
328 0 : return fn;
329 : }
330 :
331 252 : GC_INLINE void GC_notify_full_gc(void)
332 : {
333 252 : if (GC_start_call_back != 0) {
334 0 : (*GC_start_call_back)();
335 : }
336 252 : }
337 :
338 : STATIC GC_bool GC_is_full_gc = FALSE;
339 :
340 : STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func);
341 : STATIC void GC_finish_collection(void);
342 :
343 : /*
344 : * Initiate a garbage collection if appropriate.
345 : * Choose judiciously
346 : * between partial, full, and stop-world collections.
347 : */
348 0 : STATIC void GC_maybe_gc(void)
349 : {
350 : static int n_partial_gcs = 0;
351 :
352 : GC_ASSERT(I_HOLD_LOCK());
353 : ASSERT_CANCEL_DISABLED();
354 0 : if (GC_should_collect()) {
355 0 : if (!GC_incremental) {
356 : /* FIXME: If possible, GC_default_stop_func should be used here */
357 0 : GC_try_to_collect_inner(GC_never_stop_func);
358 0 : n_partial_gcs = 0;
359 0 : return;
360 : } else {
361 : # ifdef PARALLEL_MARK
362 : if (GC_parallel)
363 : GC_wait_for_reclaim();
364 : # endif
365 0 : if (GC_need_full_gc || n_partial_gcs >= GC_full_freq) {
366 0 : if (GC_print_stats) {
367 0 : GC_log_printf(
368 : "***>Full mark for collection %lu after %ld allocd bytes\n",
369 : (unsigned long)GC_gc_no + 1, (long)GC_bytes_allocd);
370 : }
371 0 : GC_promote_black_lists();
372 0 : (void)GC_reclaim_all((GC_stop_func)0, TRUE);
373 0 : GC_notify_full_gc();
374 0 : GC_clear_marks();
375 0 : n_partial_gcs = 0;
376 0 : GC_is_full_gc = TRUE;
377 : } else {
378 0 : n_partial_gcs++;
379 : }
380 : }
381 : /* We try to mark with the world stopped. */
382 : /* If we run out of time, this turns into */
383 : /* incremental marking. */
384 : # ifndef NO_CLOCK
385 0 : if (GC_time_limit != GC_TIME_UNLIMITED) { GET_TIME(GC_start_time); }
386 : # endif
387 : /* FIXME: If possible, GC_default_stop_func should be */
388 : /* used instead of GC_never_stop_func here. */
389 0 : if (GC_stopped_mark(GC_time_limit == GC_TIME_UNLIMITED?
390 : GC_never_stop_func : GC_timeout_stop_func)) {
391 : # ifdef SAVE_CALL_CHAIN
392 : GC_save_callers(GC_last_stack);
393 : # endif
394 0 : GC_finish_collection();
395 : } else {
396 0 : if (!GC_is_full_gc) {
397 : /* Count this as the first attempt */
398 0 : GC_n_attempts++;
399 : }
400 : }
401 : }
402 : }
403 :
404 :
405 : /*
406 : * Stop the world garbage collection. Assumes lock held. If stop_func is
407 : * not GC_never_stop_func then abort if stop_func returns TRUE.
408 : * Return TRUE if we successfully completed the collection.
409 : */
410 252 : GC_INNER GC_bool GC_try_to_collect_inner(GC_stop_func stop_func)
411 : {
412 : # ifndef SMALL_CONFIG
413 252 : CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
414 : CLOCK_TYPE current_time;
415 : # endif
416 : ASSERT_CANCEL_DISABLED();
417 252 : if (GC_dont_gc || (*stop_func)()) return FALSE;
418 252 : if (GC_incremental && GC_collection_in_progress()) {
419 0 : if (GC_print_stats) {
420 0 : GC_log_printf(
421 : "GC_try_to_collect_inner: finishing collection in progress\n");
422 : }
423 : /* Just finish collection already in progress. */
424 0 : while(GC_collection_in_progress()) {
425 0 : if ((*stop_func)()) return(FALSE);
426 0 : GC_collect_a_little_inner(1);
427 : }
428 : }
429 252 : GC_notify_full_gc();
430 : # ifndef SMALL_CONFIG
431 252 : if (GC_print_stats) {
432 0 : GET_TIME(start_time);
433 0 : GC_log_printf("Initiating full world-stop collection!\n");
434 : }
435 : # endif
436 252 : GC_promote_black_lists();
437 : /* Make sure all blocks have been reclaimed, so sweep routines */
438 : /* don't see cleared mark bits. */
439 : /* If we're guaranteed to finish, then this is unnecessary. */
440 : /* In the find_leak case, we have to finish to guarantee that */
441 : /* previously unmarked objects are not reported as leaks. */
442 : # ifdef PARALLEL_MARK
443 : if (GC_parallel)
444 : GC_wait_for_reclaim();
445 : # endif
446 252 : if ((GC_find_leak || stop_func != GC_never_stop_func)
447 0 : && !GC_reclaim_all(stop_func, FALSE)) {
448 : /* Aborted. So far everything is still consistent. */
449 0 : return(FALSE);
450 : }
451 252 : GC_invalidate_mark_state(); /* Flush mark stack. */
452 252 : GC_clear_marks();
453 : # ifdef SAVE_CALL_CHAIN
454 : GC_save_callers(GC_last_stack);
455 : # endif
456 252 : GC_is_full_gc = TRUE;
457 252 : if (!GC_stopped_mark(stop_func)) {
458 0 : if (!GC_incremental) {
459 : /* We're partially done and have no way to complete or use */
460 : /* current work. Reestablish invariants as cheaply as */
461 : /* possible. */
462 0 : GC_invalidate_mark_state();
463 0 : GC_unpromote_black_lists();
464 : } /* else we claim the world is already still consistent. We'll */
465 : /* finish incrementally. */
466 0 : return(FALSE);
467 : }
468 252 : GC_finish_collection();
469 : # ifndef SMALL_CONFIG
470 252 : if (GC_print_stats) {
471 0 : GET_TIME(current_time);
472 0 : GC_log_printf("Complete collection took %lu msecs\n",
473 0 : MS_TIME_DIFF(current_time,start_time));
474 : }
475 : # endif
476 252 : return(TRUE);
477 : }
478 :
479 : /*
480 : * Perform n units of garbage collection work. A unit is intended to touch
481 : * roughly GC_RATE pages. Every once in a while, we do more than that.
482 : * This needs to be a fairly large number with our current incremental
483 : * GC strategy, since otherwise we allocate too much during GC, and the
484 : * cleanup gets expensive.
485 : */
486 : #ifndef GC_RATE
487 : # define GC_RATE 10
488 : #endif
489 : #ifndef MAX_PRIOR_ATTEMPTS
490 : # define MAX_PRIOR_ATTEMPTS 1
491 : #endif
492 : /* Maximum number of prior attempts at world stop marking */
493 : /* A value of 1 means that we finish the second time, no matter */
494 : /* how long it takes. Doesn't count the initial root scan */
495 : /* for a full GC. */
496 :
497 : STATIC int GC_deficit = 0;/* The number of extra calls to GC_mark_some */
498 : /* that we have made. */
499 :
500 0 : GC_INNER void GC_collect_a_little_inner(int n)
501 : {
502 : int i;
503 : IF_CANCEL(int cancel_state;)
504 :
505 0 : if (GC_dont_gc) return;
506 0 : DISABLE_CANCEL(cancel_state);
507 0 : if (GC_incremental && GC_collection_in_progress()) {
508 0 : for (i = GC_deficit; i < GC_RATE*n; i++) {
509 0 : if (GC_mark_some((ptr_t)0)) {
510 : /* Need to finish a collection */
511 : # ifdef SAVE_CALL_CHAIN
512 : GC_save_callers(GC_last_stack);
513 : # endif
514 : # ifdef PARALLEL_MARK
515 : if (GC_parallel)
516 : GC_wait_for_reclaim();
517 : # endif
518 0 : if (GC_n_attempts < MAX_PRIOR_ATTEMPTS
519 0 : && GC_time_limit != GC_TIME_UNLIMITED) {
520 : # ifndef NO_CLOCK
521 0 : GET_TIME(GC_start_time);
522 : # endif
523 0 : if (!GC_stopped_mark(GC_timeout_stop_func)) {
524 0 : GC_n_attempts++;
525 0 : break;
526 : }
527 : } else {
528 : /* FIXME: If possible, GC_default_stop_func should be */
529 : /* used here. */
530 0 : (void)GC_stopped_mark(GC_never_stop_func);
531 : }
532 0 : GC_finish_collection();
533 0 : break;
534 : }
535 : }
536 0 : if (GC_deficit > 0) GC_deficit -= GC_RATE*n;
537 0 : if (GC_deficit < 0) GC_deficit = 0;
538 : } else {
539 0 : GC_maybe_gc();
540 : }
541 0 : RESTORE_CANCEL(cancel_state);
542 : }
543 :
544 : GC_INNER void (*GC_check_heap)(void) = 0;
545 : GC_INNER void (*GC_print_all_smashed)(void) = 0;
546 :
547 0 : GC_API int GC_CALL GC_collect_a_little(void)
548 : {
549 : int result;
550 : DCL_LOCK_STATE;
551 :
552 0 : LOCK();
553 0 : GC_collect_a_little_inner(1);
554 0 : result = (int)GC_collection_in_progress();
555 0 : UNLOCK();
556 0 : if (!result && GC_debugging_started) GC_print_all_smashed();
557 0 : return(result);
558 : }
559 :
560 : #ifndef SMALL_CONFIG
561 : /* Variables for world-stop average delay time statistic computation. */
562 : /* "divisor" is incremented every world-stop and halved when reached */
563 : /* its maximum (or upon "total_time" oveflow). */
564 : static unsigned world_stopped_total_time = 0;
565 : static unsigned world_stopped_total_divisor = 0;
566 : # ifndef MAX_TOTAL_TIME_DIVISOR
567 : /* We shall not use big values here (so "outdated" delay time */
568 : /* values would have less impact on "average" delay time value than */
569 : /* newer ones). */
570 : # define MAX_TOTAL_TIME_DIVISOR 1000
571 : # endif
572 : #endif
573 :
574 : /*
575 : * Assumes lock is held. We stop the world and mark from all roots.
576 : * If stop_func() ever returns TRUE, we may fail and return FALSE.
577 : * Increment GC_gc_no if we succeed.
578 : */
579 252 : STATIC GC_bool GC_stopped_mark(GC_stop_func stop_func)
580 : {
581 : unsigned i;
582 : # ifndef SMALL_CONFIG
583 252 : CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
584 : CLOCK_TYPE current_time;
585 : # endif
586 :
587 : # if !defined(REDIRECT_MALLOC) && (defined(MSWIN32) || defined(MSWINCE))
588 : GC_add_current_malloc_heap();
589 : # endif
590 : # if defined(REGISTER_LIBRARIES_EARLY)
591 252 : GC_cond_register_dynamic_libraries();
592 : # endif
593 :
594 : # ifndef SMALL_CONFIG
595 252 : if (GC_print_stats)
596 0 : GET_TIME(start_time);
597 : # endif
598 :
599 252 : STOP_WORLD();
600 : # ifdef THREAD_LOCAL_ALLOC
601 252 : GC_world_stopped = TRUE;
602 : # endif
603 252 : if (GC_print_stats) {
604 : /* Output blank line for convenience here */
605 0 : GC_log_printf(
606 : "\n--> Marking for collection %lu after %lu allocated bytes\n",
607 : (unsigned long)GC_gc_no + 1, (unsigned long) GC_bytes_allocd);
608 : }
609 : # ifdef MAKE_BACK_GRAPH
610 : if (GC_print_back_height) {
611 : GC_build_back_graph();
612 : }
613 : # endif
614 :
615 : /* Mark from all roots. */
616 : /* Minimize junk left in my registers and on the stack */
617 252 : GC_clear_a_few_frames();
618 252 : GC_noop(0,0,0,0,0,0);
619 252 : GC_initiate_gc();
620 74573 : for (i = 0;;i++) {
621 74573 : if ((*stop_func)()) {
622 0 : if (GC_print_stats) {
623 0 : GC_log_printf("Abandoned stopped marking after %u iterations\n",
624 : i);
625 : }
626 0 : GC_deficit = i; /* Give the mutator a chance. */
627 : # ifdef THREAD_LOCAL_ALLOC
628 0 : GC_world_stopped = FALSE;
629 : # endif
630 0 : START_WORLD();
631 0 : return(FALSE);
632 : }
633 74573 : if (GC_mark_some(GC_approx_sp())) break;
634 74321 : }
635 :
636 252 : GC_gc_no++;
637 252 : if (GC_print_stats) {
638 0 : GC_log_printf(
639 : "Collection %lu reclaimed %ld bytes ---> heapsize = %lu bytes\n",
640 : (unsigned long)(GC_gc_no - 1), (long)GC_bytes_found,
641 : (unsigned long)GC_heapsize);
642 : }
643 :
644 : /* Check all debugged objects for consistency */
645 252 : if (GC_debugging_started) {
646 0 : (*GC_check_heap)();
647 : }
648 :
649 : # ifdef THREAD_LOCAL_ALLOC
650 252 : GC_world_stopped = FALSE;
651 : # endif
652 252 : START_WORLD();
653 : # ifndef SMALL_CONFIG
654 252 : if (GC_print_stats) {
655 : unsigned long time_diff;
656 : unsigned total_time, divisor;
657 0 : GET_TIME(current_time);
658 0 : time_diff = MS_TIME_DIFF(current_time,start_time);
659 :
660 : /* Compute new world-stop delay total time */
661 0 : total_time = world_stopped_total_time;
662 0 : divisor = world_stopped_total_divisor;
663 0 : if ((int)total_time < 0 || divisor >= MAX_TOTAL_TIME_DIVISOR) {
664 : /* Halve values if overflow occurs */
665 0 : total_time >>= 1;
666 0 : divisor >>= 1;
667 : }
668 0 : total_time += time_diff < (((unsigned)-1) >> 1) ?
669 : (unsigned)time_diff : ((unsigned)-1) >> 1;
670 : /* Update old world_stopped_total_time and its divisor */
671 0 : world_stopped_total_time = total_time;
672 0 : world_stopped_total_divisor = ++divisor;
673 :
674 : GC_ASSERT(divisor != 0);
675 0 : GC_log_printf(
676 : "World-stopped marking took %lu msecs (%u in average)\n",
677 : time_diff, total_time / divisor);
678 : }
679 : # endif
680 252 : return(TRUE);
681 : }
682 :
683 : /* Set all mark bits for the free list whose first entry is q */
684 2093 : GC_INNER void GC_set_fl_marks(ptr_t q)
685 : {
686 : struct hblk *h, *last_h;
687 : hdr *hhdr;
688 : IF_PER_OBJ(size_t sz;)
689 : unsigned bit_no;
690 :
691 2093 : if (q != NULL) {
692 2093 : h = HBLKPTR(q);
693 2093 : last_h = h;
694 2093 : hhdr = HDR(h);
695 : IF_PER_OBJ(sz = hhdr->hb_sz;)
696 :
697 : for (;;) {
698 39852 : bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
699 39852 : if (!mark_bit_from_hdr(hhdr, bit_no)) {
700 37044 : set_mark_bit_from_hdr(hhdr, bit_no);
701 37044 : ++hhdr -> hb_n_marks;
702 : }
703 :
704 39852 : q = obj_link(q);
705 39852 : if (q == NULL)
706 2093 : break;
707 :
708 37759 : h = HBLKPTR(q);
709 37759 : if (h != last_h) {
710 0 : last_h = h;
711 0 : hhdr = HDR(h);
712 : IF_PER_OBJ(sz = hhdr->hb_sz;)
713 : }
714 37759 : }
715 : }
716 2093 : }
717 :
718 : #if defined(GC_ASSERTIONS) && defined(THREADS) && defined(THREAD_LOCAL_ALLOC)
719 : /* Check that all mark bits for the free list whose first entry is */
720 : /* (*pfreelist) are set. Check skipped if points to a special value. */
721 : void GC_check_fl_marks(void **pfreelist)
722 : {
723 : # ifdef AO_HAVE_load_acquire_read
724 : AO_t *list = (AO_t *)AO_load_acquire_read((AO_t *)pfreelist);
725 : /* Atomic operations are used because the world is running. */
726 : AO_t *prev;
727 : AO_t *p;
728 :
729 : if ((word)list <= HBLKSIZE) return;
730 :
731 : prev = (AO_t *)pfreelist;
732 : for (p = list; p != NULL;) {
733 : AO_t *next;
734 :
735 : if (!GC_is_marked((ptr_t)p)) {
736 : GC_err_printf("Unmarked object %p on list %p\n",
737 : (void *)p, (void *)list);
738 : ABORT("Unmarked local free list entry");
739 : }
740 :
741 : /* While traversing the free-list, it re-reads the pointer to */
742 : /* the current node before accepting its next pointer and */
743 : /* bails out if the latter has changed. That way, it won't */
744 : /* try to follow the pointer which might be been modified */
745 : /* after the object was returned to the client. It might */
746 : /* perform the mark-check on the just allocated object but */
747 : /* that should be harmless. */
748 : next = (AO_t *)AO_load_acquire_read(p);
749 : if (AO_load(prev) != (AO_t)p)
750 : break;
751 : prev = p;
752 : p = next;
753 : }
754 : # else
755 : /* FIXME: Not implemented (just skipped). */
756 : (void)pfreelist;
757 : # endif
758 : }
759 : #endif /* GC_ASSERTIONS && THREAD_LOCAL_ALLOC */
760 :
761 : /* Clear all mark bits for the free list whose first entry is q */
762 : /* Decrement GC_bytes_found by number of bytes on free list. */
763 804 : STATIC void GC_clear_fl_marks(ptr_t q)
764 : {
765 : struct hblk *h, *last_h;
766 : hdr *hhdr;
767 : size_t sz;
768 : unsigned bit_no;
769 :
770 804 : if (q != NULL) {
771 804 : h = HBLKPTR(q);
772 804 : last_h = h;
773 804 : hhdr = HDR(h);
774 804 : sz = hhdr->hb_sz; /* Normally set only once. */
775 :
776 : for (;;) {
777 28429 : bit_no = MARK_BIT_NO((ptr_t)q - (ptr_t)h, sz);
778 28429 : if (mark_bit_from_hdr(hhdr, bit_no)) {
779 5582 : size_t n_marks = hhdr -> hb_n_marks - 1;
780 5582 : clear_mark_bit_from_hdr(hhdr, bit_no);
781 : # ifdef PARALLEL_MARK
782 : /* Appr. count, don't decrement to zero! */
783 : if (0 != n_marks || !GC_parallel) {
784 : hhdr -> hb_n_marks = n_marks;
785 : }
786 : # else
787 5582 : hhdr -> hb_n_marks = n_marks;
788 : # endif
789 : }
790 28429 : GC_bytes_found -= sz;
791 :
792 28429 : q = obj_link(q);
793 28429 : if (q == NULL)
794 804 : break;
795 :
796 27625 : h = HBLKPTR(q);
797 27625 : if (h != last_h) {
798 0 : last_h = h;
799 0 : hhdr = HDR(h);
800 0 : sz = hhdr->hb_sz;
801 : }
802 27625 : }
803 : }
804 804 : }
805 :
806 : #if defined(GC_ASSERTIONS) && defined(THREADS) && defined(THREAD_LOCAL_ALLOC)
807 : void GC_check_tls(void);
808 : #endif
809 :
810 : /* Finish up a collection. Assumes mark bits are consistent, lock is */
811 : /* held, but the world is otherwise running. */
812 252 : STATIC void GC_finish_collection(void)
813 : {
814 : # ifndef SMALL_CONFIG
815 252 : CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
816 252 : CLOCK_TYPE finalize_time = 0;
817 : CLOCK_TYPE done_time;
818 : # endif
819 :
820 : # if defined(GC_ASSERTIONS) && defined(THREADS) \
821 : && defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
822 : /* Check that we marked some of our own data. */
823 : /* FIXME: Add more checks. */
824 : GC_check_tls();
825 : # endif
826 :
827 : # ifndef SMALL_CONFIG
828 252 : if (GC_print_stats)
829 0 : GET_TIME(start_time);
830 : # endif
831 :
832 252 : GC_bytes_found = 0;
833 : # if defined(LINUX) && defined(__ELF__) && !defined(SMALL_CONFIG)
834 252 : if (GETENV("GC_PRINT_ADDRESS_MAP") != 0) {
835 0 : GC_print_address_map();
836 : }
837 : # endif
838 252 : COND_DUMP;
839 252 : if (GC_find_leak) {
840 : /* Mark all objects on the free list. All objects should be */
841 : /* marked when we're done. */
842 : word size; /* current object size */
843 : unsigned kind;
844 : ptr_t q;
845 :
846 0 : for (kind = 0; kind < GC_n_kinds; kind++) {
847 0 : for (size = 1; size <= MAXOBJGRANULES; size++) {
848 0 : q = GC_obj_kinds[kind].ok_freelist[size];
849 0 : if (q != 0) GC_set_fl_marks(q);
850 : }
851 : }
852 0 : GC_start_reclaim(TRUE);
853 : /* The above just checks; it doesn't really reclaim anything. */
854 : }
855 :
856 252 : GC_finalize();
857 : # ifdef STUBBORN_ALLOC
858 : GC_clean_changing_list();
859 : # endif
860 :
861 : # ifndef SMALL_CONFIG
862 252 : if (GC_print_stats)
863 0 : GET_TIME(finalize_time);
864 : # endif
865 :
866 252 : if (GC_print_back_height) {
867 : # ifdef MAKE_BACK_GRAPH
868 : GC_traverse_back_graph();
869 : # elif !defined(SMALL_CONFIG)
870 0 : GC_err_printf("Back height not available: "
871 : "Rebuild collector with -DMAKE_BACK_GRAPH\n");
872 : # endif
873 : }
874 :
875 : /* Clear free list mark bits, in case they got accidentally marked */
876 : /* (or GC_find_leak is set and they were intentionally marked). */
877 : /* Also subtract memory remaining from GC_bytes_found count. */
878 : /* Note that composite objects on free list are cleared. */
879 : /* Thus accidentally marking a free list is not a problem; only */
880 : /* objects on the list itself will be marked, and that's fixed here. */
881 : {
882 : word size; /* current object size */
883 : ptr_t q; /* pointer to current object */
884 : unsigned kind;
885 :
886 1260 : for (kind = 0; kind < GC_n_kinds; kind++) {
887 130032 : for (size = 1; size <= MAXOBJGRANULES; size++) {
888 129024 : q = GC_obj_kinds[kind].ok_freelist[size];
889 129024 : if (q != 0) GC_clear_fl_marks(q);
890 : }
891 : }
892 : }
893 :
894 252 : if (GC_print_stats == VERBOSE)
895 0 : GC_log_printf("Bytes recovered before sweep - f.l. count = %ld\n",
896 : (long)GC_bytes_found);
897 :
898 : /* Reconstruct free lists to contain everything not marked */
899 252 : GC_start_reclaim(FALSE);
900 252 : if (GC_print_stats) {
901 0 : GC_log_printf("Heap contains %lu pointer-containing "
902 : "+ %lu pointer-free reachable bytes\n",
903 : (unsigned long)GC_composite_in_use,
904 : (unsigned long)GC_atomic_in_use);
905 : }
906 252 : if (GC_is_full_gc) {
907 252 : GC_used_heap_size_after_full = USED_HEAP_SIZE;
908 252 : GC_need_full_gc = FALSE;
909 : } else {
910 0 : GC_need_full_gc = USED_HEAP_SIZE - GC_used_heap_size_after_full
911 0 : > min_bytes_allocd();
912 : }
913 :
914 252 : if (GC_print_stats == VERBOSE) {
915 : # ifdef USE_MUNMAP
916 : GC_log_printf("Immediately reclaimed %ld bytes in heap"
917 : " of size %lu bytes (%lu unmapped)\n",
918 : (long)GC_bytes_found, (unsigned long)GC_heapsize,
919 : (unsigned long)GC_unmapped_bytes);
920 : # else
921 0 : GC_log_printf(
922 : "Immediately reclaimed %ld bytes in heap of size %lu bytes\n",
923 : (long)GC_bytes_found, (unsigned long)GC_heapsize);
924 : # endif
925 : }
926 :
927 : /* Reset or increment counters for next cycle */
928 252 : GC_n_attempts = 0;
929 252 : GC_is_full_gc = FALSE;
930 252 : GC_bytes_allocd_before_gc += GC_bytes_allocd;
931 252 : GC_non_gc_bytes_at_gc = GC_non_gc_bytes;
932 252 : GC_bytes_allocd = 0;
933 252 : GC_bytes_dropped = 0;
934 252 : GC_bytes_freed = 0;
935 252 : GC_finalizer_bytes_freed = 0;
936 :
937 : # ifdef USE_MUNMAP
938 : GC_unmap_old();
939 : # endif
940 :
941 : # ifndef SMALL_CONFIG
942 252 : if (GC_print_stats) {
943 0 : GET_TIME(done_time);
944 :
945 : /* A convenient place to output finalization statistics. */
946 0 : GC_print_finalization_stats();
947 :
948 0 : GC_log_printf("Finalize plus initiate sweep took %lu + %lu msecs\n",
949 0 : MS_TIME_DIFF(finalize_time,start_time),
950 0 : MS_TIME_DIFF(done_time,finalize_time));
951 : }
952 : # endif
953 252 : }
954 :
955 : /* If stop_func == 0 then GC_default_stop_func is used instead. */
956 5 : STATIC GC_bool GC_try_to_collect_general(GC_stop_func stop_func,
957 : GC_bool force_unmap)
958 : {
959 : GC_bool result;
960 : # ifdef USE_MUNMAP
961 : int old_unmap_threshold;
962 : # endif
963 : IF_CANCEL(int cancel_state;)
964 : DCL_LOCK_STATE;
965 :
966 5 : if (!GC_is_initialized) GC_init();
967 5 : if (GC_debugging_started) GC_print_all_smashed();
968 5 : GC_INVOKE_FINALIZERS();
969 5 : LOCK();
970 5 : DISABLE_CANCEL(cancel_state);
971 : # ifdef USE_MUNMAP
972 : old_unmap_threshold = GC_unmap_threshold;
973 : if (force_unmap ||
974 : (GC_force_unmap_on_gcollect && old_unmap_threshold > 0))
975 : GC_unmap_threshold = 1; /* unmap as much as possible */
976 : # endif
977 5 : ENTER_GC();
978 : /* Minimize junk left in my registers */
979 5 : GC_noop(0,0,0,0,0,0);
980 5 : result = GC_try_to_collect_inner(stop_func != 0 ? stop_func :
981 : GC_default_stop_func);
982 5 : EXIT_GC();
983 : # ifdef USE_MUNMAP
984 : GC_unmap_threshold = old_unmap_threshold; /* restore */
985 : # endif
986 5 : RESTORE_CANCEL(cancel_state);
987 5 : UNLOCK();
988 5 : if (result) {
989 5 : if (GC_debugging_started) GC_print_all_smashed();
990 5 : GC_INVOKE_FINALIZERS();
991 : }
992 5 : return(result);
993 : }
994 :
995 : /* Externally callable routines to invoke full, stop-the-world collection. */
996 0 : GC_API int GC_CALL GC_try_to_collect(GC_stop_func stop_func)
997 : {
998 : GC_ASSERT(stop_func != 0);
999 0 : return (int)GC_try_to_collect_general(stop_func, FALSE);
1000 : }
1001 :
1002 5 : GC_API void GC_CALL GC_gcollect(void)
1003 : {
1004 : /* 0 is passed as stop_func to get GC_default_stop_func value */
1005 : /* while holding the allocation lock (to prevent data races). */
1006 5 : (void)GC_try_to_collect_general(0, FALSE);
1007 5 : if (GC_have_errors) GC_print_all_errors();
1008 5 : }
1009 :
1010 0 : GC_API void GC_CALL GC_gcollect_and_unmap(void)
1011 : {
1012 0 : (void)GC_try_to_collect_general(GC_never_stop_func, TRUE);
1013 0 : }
1014 :
1015 : GC_INNER word GC_n_heap_sects = 0;
1016 : /* Number of sections currently in heap. */
1017 :
1018 : #ifdef USE_PROC_FOR_LIBRARIES
1019 : GC_INNER word GC_n_memory = 0;
1020 : /* Number of GET_MEM allocated memory sections. */
1021 : #endif
1022 :
1023 : #ifdef USE_PROC_FOR_LIBRARIES
1024 : /* Add HBLKSIZE aligned, GET_MEM-generated block to GC_our_memory. */
1025 : /* Defined to do nothing if USE_PROC_FOR_LIBRARIES not set. */
1026 : GC_INNER void GC_add_to_our_memory(ptr_t p, size_t bytes)
1027 : {
1028 : if (0 == p) return;
1029 : if (GC_n_memory >= MAX_HEAP_SECTS)
1030 : ABORT("Too many GC-allocated memory sections: Increase MAX_HEAP_SECTS");
1031 : GC_our_memory[GC_n_memory].hs_start = p;
1032 : GC_our_memory[GC_n_memory].hs_bytes = bytes;
1033 : GC_n_memory++;
1034 : }
1035 : #endif
1036 :
1037 : /*
1038 : * Use the chunk of memory starting at p of size bytes as part of the heap.
1039 : * Assumes p is HBLKSIZE aligned, and bytes is a multiple of HBLKSIZE.
1040 : */
1041 357 : GC_INNER void GC_add_to_heap(struct hblk *p, size_t bytes)
1042 : {
1043 : hdr * phdr;
1044 : word endp;
1045 :
1046 357 : if (GC_n_heap_sects >= MAX_HEAP_SECTS) {
1047 0 : ABORT("Too many heap sections: Increase MAXHINCR or MAX_HEAP_SECTS");
1048 : }
1049 714 : while ((word)p <= HBLKSIZE) {
1050 : /* Can't handle memory near address zero. */
1051 0 : ++p;
1052 0 : bytes -= HBLKSIZE;
1053 0 : if (0 == bytes) return;
1054 : }
1055 357 : endp = (word)p + bytes;
1056 357 : if (endp <= (word)p) {
1057 : /* Address wrapped. */
1058 0 : bytes -= HBLKSIZE;
1059 0 : if (0 == bytes) return;
1060 0 : endp -= HBLKSIZE;
1061 : }
1062 357 : phdr = GC_install_header(p);
1063 357 : if (0 == phdr) {
1064 : /* This is extremely unlikely. Can't add it. This will */
1065 : /* almost certainly result in a 0 return from the allocator, */
1066 : /* which is entirely appropriate. */
1067 0 : return;
1068 : }
1069 : GC_ASSERT(endp > (word)p && endp == (word)p + bytes);
1070 357 : GC_heap_sects[GC_n_heap_sects].hs_start = (ptr_t)p;
1071 357 : GC_heap_sects[GC_n_heap_sects].hs_bytes = bytes;
1072 357 : GC_n_heap_sects++;
1073 357 : phdr -> hb_sz = bytes;
1074 357 : phdr -> hb_flags = 0;
1075 357 : GC_freehblk(p);
1076 357 : GC_heapsize += bytes;
1077 549 : if ((ptr_t)p <= (ptr_t)GC_least_plausible_heap_addr
1078 549 : || GC_least_plausible_heap_addr == 0) {
1079 165 : GC_least_plausible_heap_addr = (void *)((ptr_t)p - sizeof(word));
1080 : /* Making it a little smaller than necessary prevents */
1081 : /* us from getting a false hit from the variable */
1082 : /* itself. There's some unintentional reflection */
1083 : /* here. */
1084 : }
1085 357 : if ((ptr_t)p + bytes >= (ptr_t)GC_greatest_plausible_heap_addr) {
1086 0 : GC_greatest_plausible_heap_addr = (void *)endp;
1087 : }
1088 : }
1089 :
1090 : #if !defined(NO_DEBUGGING)
1091 0 : void GC_print_heap_sects(void)
1092 : {
1093 : unsigned i;
1094 :
1095 0 : GC_printf("Total heap size: %lu\n", (unsigned long)GC_heapsize);
1096 0 : for (i = 0; i < GC_n_heap_sects; i++) {
1097 0 : ptr_t start = GC_heap_sects[i].hs_start;
1098 0 : size_t len = GC_heap_sects[i].hs_bytes;
1099 : struct hblk *h;
1100 0 : unsigned nbl = 0;
1101 :
1102 0 : for (h = (struct hblk *)start; h < (struct hblk *)(start + len); h++) {
1103 0 : if (GC_is_black_listed(h, HBLKSIZE)) nbl++;
1104 : }
1105 0 : GC_printf("Section %d from %p to %p %lu/%lu blacklisted\n",
1106 : i, start, start + len,
1107 : (unsigned long)nbl, (unsigned long)(len/HBLKSIZE));
1108 : }
1109 0 : }
1110 : #endif
1111 :
1112 : void * GC_least_plausible_heap_addr = (void *)ONES;
1113 : void * GC_greatest_plausible_heap_addr = 0;
1114 :
1115 351 : GC_INLINE word GC_max(word x, word y)
1116 : {
1117 351 : return(x > y? x : y);
1118 : }
1119 :
1120 0 : GC_INLINE word GC_min(word x, word y)
1121 : {
1122 0 : return(x < y? x : y);
1123 : }
1124 :
1125 163 : GC_API void GC_CALL GC_set_max_heap_size(GC_word n)
1126 : {
1127 163 : GC_max_heapsize = n;
1128 163 : }
1129 :
1130 : GC_word GC_max_retries = 0;
1131 :
1132 : /*
1133 : * this explicitly increases the size of the heap. It is used
1134 : * internally, but may also be invoked from GC_expand_hp by the user.
1135 : * The argument is in units of HBLKSIZE.
1136 : * Tiny values of n are rounded up.
1137 : * Returns FALSE on failure.
1138 : */
1139 357 : GC_INNER GC_bool GC_expand_hp_inner(word n)
1140 : {
1141 : word bytes;
1142 : struct hblk * space;
1143 : word expansion_slop; /* Number of bytes by which we expect the */
1144 : /* heap to expand soon. */
1145 :
1146 357 : if (n < MINHINCR) n = MINHINCR;
1147 357 : bytes = n * HBLKSIZE;
1148 : /* Make sure bytes is a multiple of GC_page_size */
1149 : {
1150 357 : word mask = GC_page_size - 1;
1151 357 : bytes += mask;
1152 357 : bytes &= ~mask;
1153 : }
1154 :
1155 357 : if (GC_max_heapsize != 0 && GC_heapsize + bytes > GC_max_heapsize) {
1156 : /* Exceeded self-imposed limit */
1157 6 : return(FALSE);
1158 : }
1159 351 : space = GET_MEM(bytes);
1160 : GC_add_to_our_memory((ptr_t)space, bytes);
1161 351 : if (space == 0) {
1162 0 : if (GC_print_stats) {
1163 0 : GC_log_printf("Failed to expand heap by %ld bytes\n",
1164 : (unsigned long)bytes);
1165 : }
1166 0 : return(FALSE);
1167 : }
1168 351 : if (GC_print_stats) {
1169 0 : GC_log_printf("Increasing heap size by %lu after %lu allocated bytes\n",
1170 : (unsigned long)bytes, (unsigned long)GC_bytes_allocd);
1171 : }
1172 : /* Adjust heap limits generously for blacklisting to work better. */
1173 : /* GC_add_to_heap performs minimal adjustment needed for */
1174 : /* correctness. */
1175 351 : expansion_slop = min_bytes_allocd() + 4*MAXHINCR*HBLKSIZE;
1176 1078 : if ((GC_last_heap_addr == 0 && !((word)space & SIGNB))
1177 376 : || (GC_last_heap_addr != 0 && GC_last_heap_addr < (ptr_t)space)) {
1178 : /* Assume the heap is growing up */
1179 351 : word new_limit = (word)space + bytes + expansion_slop;
1180 351 : if (new_limit > (word)space) {
1181 351 : GC_greatest_plausible_heap_addr =
1182 351 : (void *)GC_max((word)GC_greatest_plausible_heap_addr,
1183 : (word)new_limit);
1184 : }
1185 : } else {
1186 : /* Heap is growing down */
1187 0 : word new_limit = (word)space - expansion_slop;
1188 0 : if (new_limit < (word)space) {
1189 0 : GC_least_plausible_heap_addr =
1190 0 : (void *)GC_min((word)GC_least_plausible_heap_addr,
1191 : (word)space - expansion_slop);
1192 : }
1193 : }
1194 351 : GC_prev_heap_addr = GC_last_heap_addr;
1195 351 : GC_last_heap_addr = (ptr_t)space;
1196 351 : GC_add_to_heap(space, bytes);
1197 : /* Force GC before we are likely to allocate past expansion_slop */
1198 351 : GC_collect_at_heapsize =
1199 351 : GC_heapsize + expansion_slop - 2*MAXHINCR*HBLKSIZE;
1200 351 : if (GC_collect_at_heapsize < GC_heapsize /* wrapped */)
1201 0 : GC_collect_at_heapsize = (word)(-1);
1202 351 : return(TRUE);
1203 : }
1204 :
1205 : /* Really returns a bool, but it's externally visible, so that's clumsy. */
1206 : /* Arguments is in bytes. Includes GC_init() call. */
1207 163 : GC_API int GC_CALL GC_expand_hp(size_t bytes)
1208 : {
1209 : int result;
1210 : DCL_LOCK_STATE;
1211 :
1212 163 : LOCK();
1213 163 : if (!GC_is_initialized) GC_init();
1214 163 : result = (int)GC_expand_hp_inner(divHBLKSZ((word)bytes));
1215 163 : if (result) GC_requested_heapsize += bytes;
1216 163 : UNLOCK();
1217 163 : return(result);
1218 : }
1219 :
1220 : GC_INNER unsigned GC_fail_count = 0;
1221 : /* How many consecutive GC/expansion failures? */
1222 : /* Reset by GC_allochblk. */
1223 :
1224 : /* Collect or expand heap in an attempt make the indicated number of */
1225 : /* free blocks available. Should be called until the blocks are */
1226 : /* available (seting retry value to TRUE unless this is the first call */
1227 : /* in a loop) or until it fails by returning FALSE. */
1228 112 : GC_INNER GC_bool GC_collect_or_expand(word needed_blocks,
1229 : GC_bool ignore_off_page,
1230 : GC_bool retry)
1231 : {
1232 112 : GC_bool gc_not_stopped = TRUE;
1233 : word blocks_to_get;
1234 : IF_CANCEL(int cancel_state;)
1235 :
1236 112 : DISABLE_CANCEL(cancel_state);
1237 336 : if (!GC_incremental && !GC_dont_gc &&
1238 224 : ((GC_dont_expand && GC_bytes_allocd > 0) || GC_should_collect())) {
1239 : /* Try to do a full collection using 'default' stop_func (unless */
1240 : /* nothing has been allocated since the latest collection or heap */
1241 : /* expansion is disabled). */
1242 168 : gc_not_stopped = GC_try_to_collect_inner(
1243 168 : GC_bytes_allocd > 0 && (!GC_dont_expand || !retry) ?
1244 : GC_default_stop_func : GC_never_stop_func);
1245 84 : if (gc_not_stopped == TRUE || !retry) {
1246 : /* Either the collection hasn't been aborted or this is the */
1247 : /* first attempt (in a loop). */
1248 84 : RESTORE_CANCEL(cancel_state);
1249 84 : return(TRUE);
1250 : }
1251 : }
1252 :
1253 56 : blocks_to_get = GC_heapsize/(HBLKSIZE*GC_free_space_divisor)
1254 56 : + needed_blocks;
1255 28 : if (blocks_to_get > MAXHINCR) {
1256 : word slop;
1257 :
1258 : /* Get the minimum required to make it likely that we can satisfy */
1259 : /* the current request in the presence of black-listing. */
1260 : /* This will probably be more than MAXHINCR. */
1261 10 : if (ignore_off_page) {
1262 0 : slop = 4;
1263 : } else {
1264 10 : slop = 2 * divHBLKSZ(BL_LIMIT);
1265 10 : if (slop > needed_blocks) slop = needed_blocks;
1266 : }
1267 10 : if (needed_blocks + slop > MAXHINCR) {
1268 4 : blocks_to_get = needed_blocks + slop;
1269 : } else {
1270 6 : blocks_to_get = MAXHINCR;
1271 : }
1272 : }
1273 :
1274 31 : if (!GC_expand_hp_inner(blocks_to_get)
1275 3 : && !GC_expand_hp_inner(needed_blocks)) {
1276 3 : if (gc_not_stopped == FALSE) {
1277 : /* Don't increment GC_fail_count here (and no warning). */
1278 0 : GC_gcollect_inner();
1279 : GC_ASSERT(GC_bytes_allocd == 0);
1280 3 : } else if (GC_fail_count++ < GC_max_retries) {
1281 0 : WARN("Out of Memory! Trying to continue ...\n", 0);
1282 0 : GC_gcollect_inner();
1283 : } else {
1284 : # if !defined(AMIGA) || !defined(GC_AMIGA_FASTALLOC)
1285 3 : WARN("Out of Memory! Heap size: %" GC_PRIdPTR " MiB."
1286 : " Returning NULL!\n", (GC_heapsize - GC_unmapped_bytes) >> 20);
1287 : # endif
1288 3 : RESTORE_CANCEL(cancel_state);
1289 3 : return(FALSE);
1290 : }
1291 25 : } else if (GC_fail_count && GC_print_stats) {
1292 0 : GC_log_printf("Memory available again...\n");
1293 : }
1294 25 : RESTORE_CANCEL(cancel_state);
1295 25 : return(TRUE);
1296 : }
1297 :
1298 : /*
1299 : * Make sure the object free list for size gran (in granules) is not empty.
1300 : * Return a pointer to the first object on the free list.
1301 : * The object MUST BE REMOVED FROM THE FREE LIST BY THE CALLER.
1302 : * Assumes we hold the allocator lock.
1303 : */
1304 17372 : GC_INNER ptr_t GC_allocobj(size_t gran, int kind)
1305 : {
1306 17372 : void ** flh = &(GC_obj_kinds[kind].ok_freelist[gran]);
1307 17372 : GC_bool tried_minor = FALSE;
1308 17372 : GC_bool retry = FALSE;
1309 :
1310 17372 : if (gran == 0) return(0);
1311 :
1312 52181 : while (*flh == 0) {
1313 17437 : ENTER_GC();
1314 : /* Do our share of marking work */
1315 17437 : if(TRUE_INCREMENTAL) GC_collect_a_little_inner(1);
1316 : /* Sweep blocks for objects of this size */
1317 17437 : GC_continue_reclaim(gran, kind);
1318 17437 : EXIT_GC();
1319 17437 : if (*flh == 0) {
1320 16613 : GC_new_hblk(gran, kind);
1321 : }
1322 17437 : if (*flh == 0) {
1323 65 : ENTER_GC();
1324 65 : if (GC_incremental && GC_time_limit == GC_TIME_UNLIMITED
1325 0 : && !tried_minor) {
1326 0 : GC_collect_a_little_inner(1);
1327 0 : tried_minor = TRUE;
1328 : } else {
1329 65 : if (!GC_collect_or_expand(1, FALSE, retry)) {
1330 0 : EXIT_GC();
1331 0 : return(0);
1332 : }
1333 65 : retry = TRUE;
1334 : }
1335 65 : EXIT_GC();
1336 : }
1337 : }
1338 : /* Successful allocation; reset failure count. */
1339 17372 : GC_fail_count = 0;
1340 :
1341 17372 : return(*flh);
1342 : }
|