Line data Source code
1 : /*
2 : * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
3 : * Copyright (c) 1991-1996 by Xerox Corporation. All rights reserved.
4 : * Copyright (c) 1996-1999 by Silicon Graphics. All rights reserved.
5 : * Copyright (C) 2007 Free Software Foundation, Inc
6 :
7 : * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 : * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 : *
10 : * Permission is hereby granted to use or copy this program
11 : * for any purpose, provided the above notices are retained on all copies.
12 : * Permission to modify the code and to distribute modified code is granted,
13 : * provided the above notices are retained, and a notice that the code was
14 : * modified is included with the above copyright notice.
15 : */
16 :
17 : #include "private/gc_pmark.h"
18 :
19 : #ifdef FINALIZE_ON_DEMAND
20 : int GC_finalize_on_demand = 1;
21 : #else
22 : int GC_finalize_on_demand = 0;
23 : #endif
24 :
25 : #ifdef JAVA_FINALIZATION
26 : int GC_java_finalization = 1;
27 : #else
28 : int GC_java_finalization = 0;
29 : #endif
30 :
31 : /* Type of mark procedure used for marking from finalizable object. */
32 : /* This procedure normally does not mark the object, only its */
33 : /* descendents. */
34 : typedef void (* finalization_mark_proc)(ptr_t /* finalizable_obj_ptr */);
35 :
36 : #define HASH3(addr,size,log_size) \
37 : ((((word)(addr) >> 3) ^ ((word)(addr) >> (3 + (log_size)))) \
38 : & ((size) - 1))
39 : #define HASH2(addr,log_size) HASH3(addr, 1 << log_size, log_size)
40 :
41 : struct hash_chain_entry {
42 : word hidden_key;
43 : struct hash_chain_entry * next;
44 : };
45 :
46 : static struct disappearing_link {
47 : struct hash_chain_entry prolog;
48 : # define dl_hidden_link prolog.hidden_key
49 : /* Field to be cleared. */
50 : # define dl_next(x) (struct disappearing_link *)((x) -> prolog.next)
51 : # define dl_set_next(x,y) (x)->prolog.next = (struct hash_chain_entry *)(y)
52 :
53 : word dl_hidden_obj; /* Pointer to object base */
54 : } **dl_head = 0;
55 :
56 : static signed_word log_dl_table_size = -1;
57 : /* Binary log of */
58 : /* current size of array pointed to by dl_head. */
59 : /* -1 ==> size is 0. */
60 :
61 : STATIC word GC_dl_entries = 0;
62 : /* Number of entries currently in disappearing */
63 : /* link table. */
64 :
65 : static struct finalizable_object {
66 : struct hash_chain_entry prolog;
67 : # define fo_hidden_base prolog.hidden_key
68 : /* Pointer to object base. */
69 : /* No longer hidden once object */
70 : /* is on finalize_now queue. */
71 : # define fo_next(x) (struct finalizable_object *)((x) -> prolog.next)
72 : # define fo_set_next(x,y) (x)->prolog.next = (struct hash_chain_entry *)(y)
73 : GC_finalization_proc fo_fn; /* Finalizer. */
74 : ptr_t fo_client_data;
75 : word fo_object_size; /* In bytes. */
76 : finalization_mark_proc fo_mark_proc; /* Mark-through procedure */
77 : } **fo_head = 0;
78 :
79 : STATIC struct finalizable_object * GC_finalize_now = 0;
80 : /* List of objects that should be finalized now. */
81 :
82 : static signed_word log_fo_table_size = -1;
83 :
84 : word GC_fo_entries = 0; /* used also in extra/MacOS.c */
85 :
86 0 : GC_INNER void GC_push_finalizer_structures(void)
87 : {
88 0 : GC_push_all((ptr_t)(&dl_head), (ptr_t)(&dl_head) + sizeof(word));
89 0 : GC_push_all((ptr_t)(&fo_head), (ptr_t)(&fo_head) + sizeof(word));
90 0 : GC_push_all((ptr_t)(&GC_finalize_now),
91 : (ptr_t)(&GC_finalize_now) + sizeof(word));
92 0 : }
93 :
94 : /* Double the size of a hash table. *size_ptr is the log of its current */
95 : /* size. May be a no-op. */
96 : /* *table is a pointer to an array of hash headers. If we succeed, we */
97 : /* update both *table and *log_size_ptr. */
98 : /* Lock is held. */
99 803 : STATIC void GC_grow_table(struct hash_chain_entry ***table,
100 : signed_word *log_size_ptr)
101 : {
102 : register word i;
103 : register struct hash_chain_entry *p;
104 803 : signed_word log_old_size = *log_size_ptr;
105 803 : signed_word log_new_size = log_old_size + 1;
106 803 : word old_size = ((log_old_size == -1)? 0: (1 << log_old_size));
107 803 : word new_size = (word)1 << log_new_size;
108 : /* FIXME: Power of 2 size often gets rounded up to one more page. */
109 803 : struct hash_chain_entry **new_table = (struct hash_chain_entry **)
110 : GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
111 803 : (size_t)new_size * sizeof(struct hash_chain_entry *), NORMAL);
112 :
113 803 : if (new_table == 0) {
114 0 : if (*table == 0) {
115 0 : ABORT("Insufficient space for initial table allocation");
116 : } else {
117 0 : return;
118 : }
119 : }
120 37132 : for (i = 0; i < old_size; i++) {
121 36329 : p = (*table)[i];
122 109627 : while (p != 0) {
123 36969 : ptr_t real_key = GC_REVEAL_POINTER(p -> hidden_key);
124 36969 : struct hash_chain_entry *next = p -> next;
125 36969 : size_t new_hash = HASH3(real_key, new_size, log_new_size);
126 :
127 36969 : p -> next = new_table[new_hash];
128 36969 : new_table[new_hash] = p;
129 36969 : p = next;
130 : }
131 : }
132 803 : *log_size_ptr = log_new_size;
133 803 : *table = new_table;
134 : }
135 :
136 0 : GC_API int GC_CALL GC_register_disappearing_link(void * * link)
137 : {
138 : ptr_t base;
139 :
140 0 : base = (ptr_t)GC_base((void *)link);
141 0 : if (base == 0)
142 0 : ABORT("Bad arg to GC_register_disappearing_link");
143 0 : return(GC_general_register_disappearing_link(link, base));
144 : }
145 :
146 0 : GC_API int GC_CALL GC_general_register_disappearing_link(void * * link,
147 : void * obj)
148 : {
149 : struct disappearing_link *curr_dl;
150 : size_t index;
151 : struct disappearing_link * new_dl;
152 : DCL_LOCK_STATE;
153 :
154 0 : if (((word)link & (ALIGNMENT-1)) || link == NULL)
155 0 : ABORT("Bad arg to GC_general_register_disappearing_link");
156 0 : LOCK();
157 : GC_ASSERT(obj != NULL && GC_base(obj) == obj);
158 0 : if (log_dl_table_size == -1
159 0 : || GC_dl_entries > ((word)1 << log_dl_table_size)) {
160 0 : GC_grow_table((struct hash_chain_entry ***)(&dl_head),
161 : &log_dl_table_size);
162 0 : if (GC_print_stats) {
163 0 : GC_log_printf("Grew dl table to %u entries\n",
164 : (1 << (unsigned)log_dl_table_size));
165 : }
166 : }
167 0 : index = HASH2(link, log_dl_table_size);
168 0 : for (curr_dl = dl_head[index]; curr_dl != 0; curr_dl = dl_next(curr_dl)) {
169 0 : if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
170 0 : curr_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
171 0 : UNLOCK();
172 0 : return GC_DUPLICATE;
173 : }
174 : }
175 0 : new_dl = (struct disappearing_link *)
176 : GC_INTERNAL_MALLOC(sizeof(struct disappearing_link),NORMAL);
177 0 : if (0 == new_dl) {
178 0 : GC_oom_func oom_fn = GC_oom_fn;
179 0 : UNLOCK();
180 0 : new_dl = (struct disappearing_link *)
181 : (*oom_fn)(sizeof(struct disappearing_link));
182 0 : if (0 == new_dl) {
183 0 : return GC_NO_MEMORY;
184 : }
185 : /* It's not likely we'll make it here, but ... */
186 0 : LOCK();
187 : /* Recalculate index since the table may grow. */
188 0 : index = HASH2(link, log_dl_table_size);
189 : /* Check again that our disappearing link not in the table. */
190 0 : for (curr_dl = dl_head[index]; curr_dl != 0;
191 0 : curr_dl = dl_next(curr_dl)) {
192 0 : if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
193 0 : curr_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
194 0 : UNLOCK();
195 : # ifndef DBG_HDRS_ALL
196 : /* Free unused new_dl returned by GC_oom_fn() */
197 0 : GC_free((void *)new_dl);
198 : # endif
199 0 : return GC_DUPLICATE;
200 : }
201 : }
202 : }
203 0 : new_dl -> dl_hidden_obj = GC_HIDE_POINTER(obj);
204 0 : new_dl -> dl_hidden_link = GC_HIDE_POINTER(link);
205 0 : dl_set_next(new_dl, dl_head[index]);
206 0 : dl_head[index] = new_dl;
207 0 : GC_dl_entries++;
208 0 : UNLOCK();
209 0 : return GC_SUCCESS;
210 : }
211 :
212 0 : GC_API int GC_CALL GC_unregister_disappearing_link(void * * link)
213 : {
214 : struct disappearing_link *curr_dl, *prev_dl;
215 : size_t index;
216 : DCL_LOCK_STATE;
217 :
218 0 : if (((word)link & (ALIGNMENT-1)) != 0) return(0); /* Nothing to do. */
219 :
220 0 : LOCK();
221 0 : index = HASH2(link, log_dl_table_size);
222 0 : prev_dl = 0; curr_dl = dl_head[index];
223 0 : while (curr_dl != 0) {
224 0 : if (curr_dl -> dl_hidden_link == GC_HIDE_POINTER(link)) {
225 0 : if (prev_dl == 0) {
226 0 : dl_head[index] = dl_next(curr_dl);
227 : } else {
228 0 : dl_set_next(prev_dl, dl_next(curr_dl));
229 : }
230 0 : GC_dl_entries--;
231 0 : UNLOCK();
232 : # ifdef DBG_HDRS_ALL
233 : dl_set_next(curr_dl, 0);
234 : # else
235 0 : GC_free((void *)curr_dl);
236 : # endif
237 0 : return(1);
238 : }
239 0 : prev_dl = curr_dl;
240 0 : curr_dl = dl_next(curr_dl);
241 : }
242 0 : UNLOCK();
243 0 : return(0);
244 : }
245 :
246 : /* Possible finalization_marker procedures. Note that mark stack */
247 : /* overflow is handled by the caller, and is not a disaster. */
248 24344 : STATIC void GC_normal_finalize_mark_proc(ptr_t p)
249 : {
250 24344 : hdr * hhdr = HDR(p);
251 :
252 24344 : PUSH_OBJ(p, hhdr, GC_mark_stack_top,
253 : &(GC_mark_stack[GC_mark_stack_size]));
254 24344 : }
255 :
256 : /* This only pays very partial attention to the mark descriptor. */
257 : /* It does the right thing for normal and atomic objects, and treats */
258 : /* most others as normal. */
259 0 : STATIC void GC_ignore_self_finalize_mark_proc(ptr_t p)
260 : {
261 0 : hdr * hhdr = HDR(p);
262 0 : word descr = hhdr -> hb_descr;
263 : ptr_t q;
264 : word r;
265 : ptr_t scan_limit;
266 0 : ptr_t target_limit = p + hhdr -> hb_sz - 1;
267 :
268 0 : if ((descr & GC_DS_TAGS) == GC_DS_LENGTH) {
269 0 : scan_limit = p + descr - sizeof(word);
270 : } else {
271 0 : scan_limit = target_limit + 1 - sizeof(word);
272 : }
273 0 : for (q = p; q <= scan_limit; q += ALIGNMENT) {
274 0 : r = *(word *)q;
275 0 : if ((ptr_t)r < p || (ptr_t)r > target_limit) {
276 0 : GC_PUSH_ONE_HEAP(r, q);
277 : }
278 : }
279 0 : }
280 :
281 : /*ARGSUSED*/
282 12924 : STATIC void GC_null_finalize_mark_proc(ptr_t p) {}
283 :
284 : /* Possible finalization_marker procedures. Note that mark stack */
285 : /* overflow is handled by the caller, and is not a disaster. */
286 :
287 : /* GC_unreachable_finalize_mark_proc is an alias for normal marking, */
288 : /* but it is explicitly tested for, and triggers different */
289 : /* behavior. Objects registered in this way are not finalized */
290 : /* if they are reachable by other finalizable objects, even if those */
291 : /* other objects specify no ordering. */
292 11803 : STATIC void GC_unreachable_finalize_mark_proc(ptr_t p)
293 : {
294 11803 : GC_normal_finalize_mark_proc(p);
295 11803 : }
296 :
297 : /* Register a finalization function. See gc.h for details. */
298 : /* The last parameter is a procedure that determines */
299 : /* marking for finalization ordering. Any objects marked */
300 : /* by that procedure will be guaranteed to not have been */
301 : /* finalized when this finalizer is invoked. */
302 43959 : STATIC void GC_register_finalizer_inner(void * obj,
303 : GC_finalization_proc fn, void *cd,
304 : GC_finalization_proc *ofn, void **ocd,
305 : finalization_mark_proc mp)
306 : {
307 : ptr_t base;
308 : struct finalizable_object * curr_fo, * prev_fo;
309 : size_t index;
310 43959 : struct finalizable_object *new_fo = 0;
311 43959 : hdr *hhdr = NULL; /* initialized to prevent warning. */
312 : GC_oom_func oom_fn;
313 : DCL_LOCK_STATE;
314 :
315 43959 : LOCK();
316 87755 : if (log_fo_table_size == -1
317 87755 : || GC_fo_entries > ((word)1 << log_fo_table_size)) {
318 803 : GC_grow_table((struct hash_chain_entry ***)(&fo_head),
319 : &log_fo_table_size);
320 803 : if (GC_print_stats) {
321 0 : GC_log_printf("Grew fo table to %u entries\n",
322 : (1 << (unsigned)log_fo_table_size));
323 : }
324 : }
325 : /* in the THREADS case we hold allocation lock. */
326 43959 : base = (ptr_t)obj;
327 : for (;;) {
328 43959 : index = HASH2(base, log_fo_table_size);
329 43959 : prev_fo = 0; curr_fo = fo_head[index];
330 116008 : while (curr_fo != 0) {
331 : GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
332 28090 : if (curr_fo -> fo_hidden_base == GC_HIDE_POINTER(base)) {
333 : /* Interruption by a signal in the middle of this */
334 : /* should be safe. The client may see only *ocd */
335 : /* updated, but we'll declare that to be his problem. */
336 0 : if (ocd) *ocd = (void *) (curr_fo -> fo_client_data);
337 0 : if (ofn) *ofn = curr_fo -> fo_fn;
338 : /* Delete the structure for base. */
339 0 : if (prev_fo == 0) {
340 0 : fo_head[index] = fo_next(curr_fo);
341 : } else {
342 0 : fo_set_next(prev_fo, fo_next(curr_fo));
343 : }
344 0 : if (fn == 0) {
345 0 : GC_fo_entries--;
346 : /* May not happen if we get a signal. But a high */
347 : /* estimate will only make the table larger than */
348 : /* necessary. */
349 : # if !defined(THREADS) && !defined(DBG_HDRS_ALL)
350 : GC_free((void *)curr_fo);
351 : # endif
352 : } else {
353 0 : curr_fo -> fo_fn = fn;
354 0 : curr_fo -> fo_client_data = (ptr_t)cd;
355 0 : curr_fo -> fo_mark_proc = mp;
356 : /* Reinsert it. We deleted it first to maintain */
357 : /* consistency in the event of a signal. */
358 0 : if (prev_fo == 0) {
359 0 : fo_head[index] = curr_fo;
360 : } else {
361 0 : fo_set_next(prev_fo, curr_fo);
362 : }
363 : }
364 0 : UNLOCK();
365 : # ifndef DBG_HDRS_ALL
366 0 : if (EXPECT(new_fo != 0, FALSE)) {
367 : /* Free unused new_fo returned by GC_oom_fn() */
368 0 : GC_free((void *)new_fo);
369 : }
370 : # endif
371 0 : return;
372 : }
373 28090 : prev_fo = curr_fo;
374 28090 : curr_fo = fo_next(curr_fo);
375 : }
376 43959 : if (EXPECT(new_fo != 0, FALSE)) {
377 : /* new_fo is returned by GC_oom_fn(), so fn != 0 and hhdr != 0. */
378 0 : break;
379 : }
380 43959 : if (fn == 0) {
381 0 : if (ocd) *ocd = 0;
382 0 : if (ofn) *ofn = 0;
383 0 : UNLOCK();
384 0 : return;
385 : }
386 43959 : GET_HDR(base, hhdr);
387 43959 : if (EXPECT(0 == hhdr, FALSE)) {
388 : /* We won't collect it, hence finalizer wouldn't be run. */
389 0 : if (ocd) *ocd = 0;
390 0 : if (ofn) *ofn = 0;
391 0 : UNLOCK();
392 0 : return;
393 : }
394 43959 : new_fo = (struct finalizable_object *)
395 : GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
396 43959 : if (EXPECT(new_fo != 0, TRUE))
397 43959 : break;
398 0 : oom_fn = GC_oom_fn;
399 0 : UNLOCK();
400 0 : new_fo = (struct finalizable_object *)
401 : (*oom_fn)(sizeof(struct finalizable_object));
402 0 : if (0 == new_fo) {
403 : /* No enough memory. *ocd and *ofn remains unchanged. */
404 0 : return;
405 : }
406 : /* It's not likely we'll make it here, but ... */
407 0 : LOCK();
408 : /* Recalculate index since the table may grow and */
409 : /* check again that our finalizer is not in the table. */
410 0 : }
411 : GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object));
412 43959 : if (ocd) *ocd = 0;
413 43959 : if (ofn) *ofn = 0;
414 43959 : new_fo -> fo_hidden_base = GC_HIDE_POINTER(base);
415 43959 : new_fo -> fo_fn = fn;
416 43959 : new_fo -> fo_client_data = (ptr_t)cd;
417 43959 : new_fo -> fo_object_size = hhdr -> hb_sz;
418 43959 : new_fo -> fo_mark_proc = mp;
419 43959 : fo_set_next(new_fo, fo_head[index]);
420 43959 : GC_fo_entries++;
421 43959 : fo_head[index] = new_fo;
422 43959 : UNLOCK();
423 : }
424 :
425 0 : GC_API void GC_CALL GC_register_finalizer(void * obj,
426 : GC_finalization_proc fn, void * cd,
427 : GC_finalization_proc *ofn, void ** ocd)
428 : {
429 0 : GC_register_finalizer_inner(obj, fn, cd, ofn,
430 : ocd, GC_normal_finalize_mark_proc);
431 0 : }
432 :
433 0 : GC_API void GC_CALL GC_register_finalizer_ignore_self(void * obj,
434 : GC_finalization_proc fn, void * cd,
435 : GC_finalization_proc *ofn, void ** ocd)
436 : {
437 0 : GC_register_finalizer_inner(obj, fn, cd, ofn,
438 : ocd, GC_ignore_self_finalize_mark_proc);
439 0 : }
440 :
441 23453 : GC_API void GC_CALL GC_register_finalizer_no_order(void * obj,
442 : GC_finalization_proc fn, void * cd,
443 : GC_finalization_proc *ofn, void ** ocd)
444 : {
445 23453 : GC_register_finalizer_inner(obj, fn, cd, ofn,
446 : ocd, GC_null_finalize_mark_proc);
447 23453 : }
448 :
449 : static GC_bool need_unreachable_finalization = FALSE;
450 : /* Avoid the work if this isn't used. */
451 :
452 20506 : GC_API void GC_CALL GC_register_finalizer_unreachable(void * obj,
453 : GC_finalization_proc fn, void * cd,
454 : GC_finalization_proc *ofn, void ** ocd)
455 : {
456 20506 : need_unreachable_finalization = TRUE;
457 : GC_ASSERT(GC_java_finalization);
458 20506 : GC_register_finalizer_inner(obj, fn, cd, ofn,
459 : ocd, GC_unreachable_finalize_mark_proc);
460 20506 : }
461 :
462 : #ifndef NO_DEBUGGING
463 0 : void GC_dump_finalization(void)
464 : {
465 : struct disappearing_link * curr_dl;
466 : struct finalizable_object * curr_fo;
467 : ptr_t real_ptr, real_link;
468 0 : int dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
469 0 : int fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
470 : int i;
471 :
472 0 : GC_printf("Disappearing links:\n");
473 0 : for (i = 0; i < dl_size; i++) {
474 0 : for (curr_dl = dl_head[i]; curr_dl != 0; curr_dl = dl_next(curr_dl)) {
475 0 : real_ptr = GC_REVEAL_POINTER(curr_dl -> dl_hidden_obj);
476 0 : real_link = GC_REVEAL_POINTER(curr_dl -> dl_hidden_link);
477 0 : GC_printf("Object: %p, Link:%p\n", real_ptr, real_link);
478 : }
479 : }
480 0 : GC_printf("Finalizers:\n");
481 0 : for (i = 0; i < fo_size; i++) {
482 0 : for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
483 0 : real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
484 0 : GC_printf("Finalizable object: %p\n", real_ptr);
485 : }
486 : }
487 0 : }
488 : #endif /* !NO_DEBUGGING */
489 :
490 : #ifndef SMALL_CONFIG
491 : STATIC word GC_old_dl_entries = 0; /* for stats printing */
492 : #endif
493 :
494 : #ifndef THREADS
495 : /* Global variables to minimize the level of recursion when a client */
496 : /* finalizer allocates memory. */
497 : STATIC int GC_finalizer_nested = 0;
498 : /* Only the lowest byte is used, the rest is */
499 : /* padding for proper global data alignment */
500 : /* required for some compilers (like Watcom). */
501 : STATIC unsigned GC_finalizer_skipped = 0;
502 :
503 : /* Checks and updates the level of finalizers recursion. */
504 : /* Returns NULL if GC_invoke_finalizers() should not be called by the */
505 : /* collector (to minimize the risk of a deep finalizers recursion), */
506 : /* otherwise returns a pointer to GC_finalizer_nested. */
507 : STATIC unsigned char *GC_check_finalizer_nested(void)
508 : {
509 : unsigned nesting_level = *(unsigned char *)&GC_finalizer_nested;
510 : if (nesting_level) {
511 : /* We are inside another GC_invoke_finalizers(). */
512 : /* Skip some implicitly-called GC_invoke_finalizers() */
513 : /* depending on the nesting (recursion) level. */
514 : if (++GC_finalizer_skipped < (1U << nesting_level)) return NULL;
515 : GC_finalizer_skipped = 0;
516 : }
517 : *(char *)&GC_finalizer_nested = (char)(nesting_level + 1);
518 : return (unsigned char *)&GC_finalizer_nested;
519 : }
520 : #endif /* THREADS */
521 :
522 : /* Called with held lock (but the world is running). */
523 : /* Cause disappearing links to disappear and unreachable objects to be */
524 : /* enqueued for finalization. */
525 252 : GC_INNER void GC_finalize(void)
526 : {
527 : struct disappearing_link * curr_dl, * prev_dl, * next_dl;
528 : struct finalizable_object * curr_fo, * prev_fo, * next_fo;
529 : ptr_t real_ptr, real_link;
530 : size_t i;
531 252 : size_t dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
532 252 : size_t fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
533 :
534 : # ifndef SMALL_CONFIG
535 : /* Save current GC_dl_entries value for stats printing */
536 252 : GC_old_dl_entries = GC_dl_entries;
537 : # endif
538 :
539 : /* Make disappearing links disappear */
540 252 : for (i = 0; i < dl_size; i++) {
541 0 : curr_dl = dl_head[i];
542 0 : prev_dl = 0;
543 0 : while (curr_dl != 0) {
544 0 : real_ptr = GC_REVEAL_POINTER(curr_dl -> dl_hidden_obj);
545 0 : real_link = GC_REVEAL_POINTER(curr_dl -> dl_hidden_link);
546 0 : if (!GC_is_marked(real_ptr)) {
547 0 : *(word *)real_link = 0;
548 0 : next_dl = dl_next(curr_dl);
549 0 : if (prev_dl == 0) {
550 0 : dl_head[i] = next_dl;
551 : } else {
552 0 : dl_set_next(prev_dl, next_dl);
553 : }
554 0 : GC_clear_mark_bit((ptr_t)curr_dl);
555 0 : GC_dl_entries--;
556 0 : curr_dl = next_dl;
557 : } else {
558 0 : prev_dl = curr_dl;
559 0 : curr_dl = dl_next(curr_dl);
560 : }
561 : }
562 : }
563 : /* Mark all objects reachable via chains of 1 or more pointers */
564 : /* from finalizable objects. */
565 : GC_ASSERT(GC_mark_state == MS_NONE);
566 40732 : for (i = 0; i < fo_size; i++) {
567 67364 : for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
568 : GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
569 26884 : real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
570 26884 : if (!GC_is_marked(real_ptr)) {
571 : GC_MARKED_FOR_FINALIZATION(real_ptr);
572 24727 : GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
573 24727 : if (GC_is_marked(real_ptr)) {
574 0 : WARN("Finalization cycle involving %p\n", real_ptr);
575 : }
576 : }
577 : }
578 : }
579 : /* Enqueue for finalization all objects that are still */
580 : /* unreachable. */
581 252 : GC_bytes_finalized = 0;
582 40732 : for (i = 0; i < fo_size; i++) {
583 40480 : curr_fo = fo_head[i];
584 40480 : prev_fo = 0;
585 107844 : while (curr_fo != 0) {
586 26884 : real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
587 26884 : if (!GC_is_marked(real_ptr)) {
588 24727 : if (!GC_java_finalization) {
589 0 : GC_set_mark_bit(real_ptr);
590 : }
591 : /* Delete from hash table */
592 24727 : next_fo = fo_next(curr_fo);
593 24727 : if (prev_fo == 0) {
594 24623 : fo_head[i] = next_fo;
595 : } else {
596 104 : fo_set_next(prev_fo, next_fo);
597 : }
598 24727 : GC_fo_entries--;
599 : /* Add to list of objects awaiting finalization. */
600 24727 : fo_set_next(curr_fo, GC_finalize_now);
601 24727 : GC_finalize_now = curr_fo;
602 : /* unhide object pointer so any future collections will */
603 : /* see it. */
604 24727 : curr_fo -> fo_hidden_base =
605 24727 : (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
606 24727 : GC_bytes_finalized +=
607 : curr_fo -> fo_object_size
608 : + sizeof(struct finalizable_object);
609 : GC_ASSERT(GC_is_marked(GC_base((ptr_t)curr_fo)));
610 24727 : curr_fo = next_fo;
611 : } else {
612 2157 : prev_fo = curr_fo;
613 2157 : curr_fo = fo_next(curr_fo);
614 : }
615 : }
616 : }
617 :
618 252 : if (GC_java_finalization) {
619 : /* make sure we mark everything reachable from objects finalized
620 : using the no_order mark_proc */
621 25254 : for (curr_fo = GC_finalize_now;
622 24750 : curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
623 24750 : real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
624 24750 : if (!GC_is_marked(real_ptr)) {
625 19015 : if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
626 12541 : GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
627 : }
628 19015 : if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
629 12541 : GC_set_mark_bit(real_ptr);
630 : }
631 : }
632 : }
633 :
634 : /* now revive finalize-when-unreachable objects reachable from
635 : other finalizable objects */
636 252 : if (need_unreachable_finalization) {
637 89 : curr_fo = GC_finalize_now;
638 89 : prev_fo = 0;
639 24928 : while (curr_fo != 0) {
640 24750 : next_fo = fo_next(curr_fo);
641 24750 : if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
642 11803 : real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
643 11803 : if (!GC_is_marked(real_ptr)) {
644 0 : GC_set_mark_bit(real_ptr);
645 : } else {
646 11803 : if (prev_fo == 0)
647 0 : GC_finalize_now = next_fo;
648 : else
649 11803 : fo_set_next(prev_fo, next_fo);
650 :
651 11803 : curr_fo -> fo_hidden_base =
652 11803 : GC_HIDE_POINTER(curr_fo -> fo_hidden_base);
653 11803 : GC_bytes_finalized -=
654 : curr_fo->fo_object_size + sizeof(struct finalizable_object);
655 :
656 11803 : i = HASH2(real_ptr, log_fo_table_size);
657 11803 : fo_set_next (curr_fo, fo_head[i]);
658 11803 : GC_fo_entries++;
659 11803 : fo_head[i] = curr_fo;
660 11803 : curr_fo = prev_fo;
661 : }
662 : }
663 24750 : prev_fo = curr_fo;
664 24750 : curr_fo = next_fo;
665 : }
666 : }
667 : }
668 :
669 : /* Remove dangling disappearing links. */
670 252 : for (i = 0; i < dl_size; i++) {
671 0 : curr_dl = dl_head[i];
672 0 : prev_dl = 0;
673 0 : while (curr_dl != 0) {
674 0 : real_link = GC_base(GC_REVEAL_POINTER(curr_dl -> dl_hidden_link));
675 0 : if (real_link != 0 && !GC_is_marked(real_link)) {
676 0 : next_dl = dl_next(curr_dl);
677 0 : if (prev_dl == 0) {
678 0 : dl_head[i] = next_dl;
679 : } else {
680 0 : dl_set_next(prev_dl, next_dl);
681 : }
682 0 : GC_clear_mark_bit((ptr_t)curr_dl);
683 0 : GC_dl_entries--;
684 0 : curr_dl = next_dl;
685 : } else {
686 0 : prev_dl = curr_dl;
687 0 : curr_dl = dl_next(curr_dl);
688 : }
689 : }
690 : }
691 252 : if (GC_fail_count) {
692 : /* Don't prevent running finalizers if there has been an allocation */
693 : /* failure recently. */
694 : # ifdef THREADS
695 3 : GC_reset_finalizer_nested();
696 : # else
697 : GC_finalizer_nested = 0;
698 : # endif
699 : }
700 252 : }
701 :
702 : #ifndef JAVA_FINALIZATION_NOT_NEEDED
703 :
704 : /* Enqueue all remaining finalizers to be run - Assumes lock is held. */
705 0 : STATIC void GC_enqueue_all_finalizers(void)
706 : {
707 : struct finalizable_object * curr_fo, * prev_fo, * next_fo;
708 : ptr_t real_ptr;
709 : register int i;
710 : int fo_size;
711 :
712 0 : fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
713 0 : GC_bytes_finalized = 0;
714 0 : for (i = 0; i < fo_size; i++) {
715 0 : curr_fo = fo_head[i];
716 0 : prev_fo = 0;
717 0 : while (curr_fo != 0) {
718 0 : real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
719 0 : GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
720 0 : GC_set_mark_bit(real_ptr);
721 :
722 : /* Delete from hash table */
723 0 : next_fo = fo_next(curr_fo);
724 0 : if (prev_fo == 0) {
725 0 : fo_head[i] = next_fo;
726 : } else {
727 0 : fo_set_next(prev_fo, next_fo);
728 : }
729 0 : GC_fo_entries--;
730 :
731 : /* Add to list of objects awaiting finalization. */
732 0 : fo_set_next(curr_fo, GC_finalize_now);
733 0 : GC_finalize_now = curr_fo;
734 :
735 : /* unhide object pointer so any future collections will */
736 : /* see it. */
737 0 : curr_fo -> fo_hidden_base =
738 0 : (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
739 0 : GC_bytes_finalized +=
740 : curr_fo -> fo_object_size + sizeof(struct finalizable_object);
741 0 : curr_fo = next_fo;
742 : }
743 : }
744 0 : }
745 :
746 : /* Invoke all remaining finalizers that haven't yet been run.
747 : * This is needed for strict compliance with the Java standard,
748 : * which can make the runtime guarantee that all finalizers are run.
749 : * Unfortunately, the Java standard implies we have to keep running
750 : * finalizers until there are no more left, a potential infinite loop.
751 : * YUCK.
752 : * Note that this is even more dangerous than the usual Java
753 : * finalizers, in that objects reachable from static variables
754 : * may have been finalized when these finalizers are run.
755 : * Finalizers run at this point must be prepared to deal with a
756 : * mostly broken world.
757 : * This routine is externally callable, so is called without
758 : * the allocation lock.
759 : */
760 0 : GC_API void GC_CALL GC_finalize_all(void)
761 : {
762 : DCL_LOCK_STATE;
763 :
764 0 : LOCK();
765 0 : while (GC_fo_entries > 0) {
766 0 : GC_enqueue_all_finalizers();
767 0 : UNLOCK();
768 0 : GC_invoke_finalizers();
769 : /* Running the finalizers in this thread is arguably not a good */
770 : /* idea when we should be notifying another thread to run them. */
771 : /* But otherwise we don't have a great way to wait for them to */
772 : /* run. */
773 0 : LOCK();
774 : }
775 0 : UNLOCK();
776 0 : }
777 :
778 : #endif /* !JAVA_FINALIZATION_NOT_NEEDED */
779 :
780 : /* Returns true if it is worth calling GC_invoke_finalizers. (Useful if */
781 : /* finalizers can only be called from some kind of `safe state' and */
782 : /* getting into that safe state is expensive.) */
783 0 : GC_API int GC_CALL GC_should_invoke_finalizers(void)
784 : {
785 0 : return GC_finalize_now != 0;
786 : }
787 :
788 : /* Invoke finalizers for all objects that are ready to be finalized. */
789 : /* Should be called without allocation lock. */
790 55 : GC_API int GC_CALL GC_invoke_finalizers(void)
791 : {
792 : struct finalizable_object * curr_fo;
793 55 : int count = 0;
794 55 : word bytes_freed_before = 0; /* initialized to prevent warning. */
795 : DCL_LOCK_STATE;
796 :
797 13034 : while (GC_finalize_now != 0) {
798 : # ifdef THREADS
799 12924 : LOCK();
800 : # endif
801 12924 : if (count == 0) {
802 52 : bytes_freed_before = GC_bytes_freed;
803 : /* Don't do this outside, since we need the lock. */
804 : }
805 12924 : curr_fo = GC_finalize_now;
806 : # ifdef THREADS
807 12924 : if (curr_fo != 0) GC_finalize_now = fo_next(curr_fo);
808 12924 : UNLOCK();
809 12924 : if (curr_fo == 0) break;
810 : # else
811 : GC_finalize_now = fo_next(curr_fo);
812 : # endif
813 12924 : fo_set_next(curr_fo, 0);
814 25848 : (*(curr_fo -> fo_fn))((ptr_t)(curr_fo -> fo_hidden_base),
815 12924 : curr_fo -> fo_client_data);
816 12924 : curr_fo -> fo_client_data = 0;
817 12924 : ++count;
818 : # ifdef UNDEFINED
819 : /* This is probably a bad idea. It throws off accounting if */
820 : /* nearly all objects are finalizable. O.w. it shouldn't */
821 : /* matter. */
822 : GC_free((void *)curr_fo);
823 : # endif
824 : }
825 : /* bytes_freed_before is initialized whenever count != 0 */
826 55 : if (count != 0 && bytes_freed_before != GC_bytes_freed) {
827 0 : LOCK();
828 0 : GC_finalizer_bytes_freed += (GC_bytes_freed - bytes_freed_before);
829 0 : UNLOCK();
830 : }
831 55 : return count;
832 : }
833 :
834 : /* All accesses to it should be synchronized to avoid data races. */
835 : GC_finalizer_notifier_proc GC_finalizer_notifier =
836 : (GC_finalizer_notifier_proc)0;
837 :
838 : static GC_word last_finalizer_notification = 0;
839 :
840 58257 : GC_INNER void GC_notify_or_invoke_finalizers(void)
841 : {
842 58257 : GC_finalizer_notifier_proc notifier_fn = 0;
843 : # if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
844 : static word last_back_trace_gc_no = 1; /* Skip first one. */
845 : # endif
846 : DCL_LOCK_STATE;
847 :
848 : # if defined(THREADS) && !defined(KEEP_BACK_PTRS) \
849 : && !defined(MAKE_BACK_GRAPH)
850 : /* Quick check (while unlocked) for an empty finalization queue. */
851 58257 : if (GC_finalize_now == 0) return;
852 : # endif
853 479 : LOCK();
854 :
855 : /* This is a convenient place to generate backtraces if appropriate, */
856 : /* since that code is not callable with the allocation lock. */
857 : # if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
858 : if (GC_gc_no > last_back_trace_gc_no) {
859 : # ifdef KEEP_BACK_PTRS
860 : long i;
861 : /* Stops when GC_gc_no wraps; that's OK. */
862 : last_back_trace_gc_no = (word)(-1); /* disable others. */
863 : for (i = 0; i < GC_backtraces; ++i) {
864 : /* FIXME: This tolerates concurrent heap mutation, */
865 : /* which may cause occasional mysterious results. */
866 : /* We need to release the GC lock, since GC_print_callers */
867 : /* acquires it. It probably shouldn't. */
868 : UNLOCK();
869 : GC_generate_random_backtrace_no_gc();
870 : LOCK();
871 : }
872 : last_back_trace_gc_no = GC_gc_no;
873 : # endif
874 : # ifdef MAKE_BACK_GRAPH
875 : if (GC_print_back_height) {
876 : UNLOCK();
877 : GC_print_back_graph_stats();
878 : LOCK();
879 : }
880 : # endif
881 : }
882 : # endif
883 479 : if (GC_finalize_now == 0) {
884 0 : UNLOCK();
885 0 : return;
886 : }
887 :
888 479 : if (!GC_finalize_on_demand) {
889 0 : unsigned char *pnested = GC_check_finalizer_nested();
890 0 : UNLOCK();
891 : /* Skip GC_invoke_finalizers() if nested */
892 0 : if (pnested != NULL) {
893 0 : (void) GC_invoke_finalizers();
894 0 : *pnested = 0; /* Reset since no more finalizers. */
895 : # ifndef THREADS
896 : GC_ASSERT(GC_finalize_now == 0);
897 : # endif /* Otherwise GC can run concurrently and add more */
898 : }
899 0 : return;
900 : }
901 :
902 : /* These variables require synchronization to avoid data races. */
903 479 : if (last_finalizer_notification != GC_gc_no) {
904 54 : last_finalizer_notification = GC_gc_no;
905 54 : notifier_fn = GC_finalizer_notifier;
906 : }
907 479 : UNLOCK();
908 479 : if (notifier_fn != 0)
909 54 : (*notifier_fn)(); /* Invoke the notifier */
910 : }
911 :
912 0 : GC_API void * GC_CALL GC_call_with_alloc_lock(GC_fn_type fn,
913 : void * client_data)
914 : {
915 : void * result;
916 : DCL_LOCK_STATE;
917 :
918 : # ifdef THREADS
919 0 : LOCK();
920 : /* FIXME - This looks wrong!! */
921 0 : SET_LOCK_HOLDER();
922 : # endif
923 0 : result = (*fn)(client_data);
924 : # ifdef THREADS
925 : # ifndef GC_ASSERTIONS
926 0 : UNSET_LOCK_HOLDER();
927 : # endif /* o.w. UNLOCK() does it implicitly */
928 0 : UNLOCK();
929 : # endif
930 0 : return(result);
931 : }
932 :
933 : #ifndef SMALL_CONFIG
934 0 : GC_INNER void GC_print_finalization_stats(void)
935 : {
936 0 : struct finalizable_object *fo = GC_finalize_now;
937 0 : unsigned long ready = 0;
938 :
939 0 : GC_log_printf(
940 : "%lu finalization table entries; %lu disappearing links alive\n",
941 : (unsigned long)GC_fo_entries, (unsigned long)GC_dl_entries);
942 0 : for (; 0 != fo; fo = fo_next(fo)) ++ready;
943 0 : GC_log_printf("%lu objects are eligible for immediate finalization; "
944 : "%ld links cleared\n",
945 : ready, (long)GC_old_dl_entries - (long)GC_dl_entries);
946 0 : }
947 : #endif /* !SMALL_CONFIG */
|