Line data Source code
1 : /* src/threads/lock.cpp - lock implementation
2 :
3 : Copyright (C) 1996-2013
4 : CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
5 :
6 : This file is part of CACAO.
7 :
8 : This program is free software; you can redistribute it and/or
9 : modify it under the terms of the GNU General Public License as
10 : published by the Free Software Foundation; either version 2, or (at
11 : your option) any later version.
12 :
13 : This program is distributed in the hope that it will be useful, but
14 : WITHOUT ANY WARRANTY; without even the implied warranty of
15 : MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 : General Public License for more details.
17 :
18 : You should have received a copy of the GNU General Public License
19 : along with this program; if not, write to the Free Software
20 : Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21 : 02110-1301, USA.
22 :
23 : */
24 :
25 : #include "threads/lock.hpp"
26 : #include <assert.h> // for assert
27 : #include <stdint.h> // for uintptr_t, int32_t
28 : #include <stdio.h> // for NULL
29 : #include <list> // for _List_iterator, etc
30 : #include "arch.hpp" // for CAS_PROVIDES_FULL_BARRIER
31 : #include "config.h" // for ENABLE_GC_BOEHM
32 : #include "lockword.hpp" // for Lockword
33 : #include "mm/gc.hpp" // for heap_hashcode, etc
34 : #include "mm/memory.hpp" // for MNEW, MZERO, FREE, MFREE, etc
35 : #include "native/llni.hpp" // for LLNI_DIRECT, LLNI_class_get
36 : #include "threads/condition.hpp" // for Condition
37 : #include "threads/mutex.hpp" // for Mutex
38 : #include "threads/thread.hpp" // for threadobject, etc
39 : #include "threads/atomic.hpp" // for memory_barrier, etc
40 : #include "threads/threadlist.hpp" // for ThreadList
41 : #include "toolbox/OStream.hpp" // for OStream, nl
42 : #include "toolbox/list.hpp" // for List
43 : #include "toolbox/logging.hpp" // for log_println, LOG
44 : #include "vm/class.hpp" // for operator<<, etc
45 : #include "vm/exceptions.hpp"
46 : #include "vm/finalizer.hpp" // for Finalizer
47 : #include "vm/global.hpp" // for java_handle_t
48 : #include "vm/options.hpp" // for opt_DebugLocks
49 : #include "vm/types.hpp" // for u4, s4, s8
50 :
51 : #if defined(ENABLE_JVMTI)
52 : #include "native/jvmti/cacaodbg.h"
53 : #endif
54 :
55 : #if defined(ENABLE_GC_BOEHM)
56 : # include "mm/boehm-gc/include/gc.h"
57 : #endif
58 :
59 : /* debug **********************************************************************/
60 :
61 : #if !defined(NDEBUG)
62 : # define DEBUGLOCKS(format) \
63 : do { \
64 : if (opt_DebugLocks) { \
65 : log_println format; \
66 : } \
67 : } while (0)
68 : #else
69 : # define DEBUGLOCKS(format)
70 : #endif
71 :
72 : STAT_DECLARE_VAR(int,size_lock_record,0)
73 : STAT_DECLARE_VAR(int,size_lock_hashtable,0)
74 : STAT_DECLARE_VAR(int,size_lock_waiter,0)
75 :
76 : /******************************************************************************/
77 : /* MACROS */
78 : /******************************************************************************/
79 :
80 : /* number of lock records in the first pool allocated for a thread */
81 : #define LOCK_INITIAL_LOCK_RECORDS 8
82 :
83 : #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
84 :
85 :
86 : /******************************************************************************/
87 : /* MACROS FOR THIN/FAT LOCKS */
88 : /******************************************************************************/
89 :
90 : /* We use a variant of the tasuki locks described in the paper
91 : *
92 : * Tamiya Onodera, Kiyokuni Kawachiya
93 : * A Study of Locking Objects with Bimodal Fields
94 : * Proceedings of the ACM OOPSLA '99, pp. 223-237
95 : * 1999
96 : *
97 : * The underlying thin locks are a variant of the thin locks described in
98 : *
99 : * Bacon, Konuru, Murthy, Serrano
100 : * Thin Locks: Featherweight Synchronization for Java
101 : * Proceedings of the ACM Conference on Programming Language Design and
102 : * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
103 : * June 1998
104 : *
105 : * In thin lock mode the lockword looks like this:
106 : *
107 : * ,----------------------,-----------,---,
108 : * | thread ID | count | 0 |
109 : * `----------------------'-----------'---'
110 : *
111 : * thread ID......the 'index' of the owning thread, or 0
112 : * count..........number of times the lock has been entered minus 1
113 : * 0..............the shape bit is 0 in thin lock mode
114 : *
115 : * In fat lock mode it is basically a lock_record_t *:
116 : *
117 : * ,----------------------------------,---,
118 : * | lock_record_t * (without LSB) | 1 |
119 : * `----------------------------------'---'
120 : *
121 : * 1..............the shape bit is 1 in fat lock mode
122 : */
123 :
124 : /* global variables ***********************************************************/
125 :
126 : /* hashtable mapping objects to lock records */
127 : static lock_hashtable_t lock_hashtable;
128 :
129 :
130 : /******************************************************************************/
131 : /* PROTOTYPES */
132 : /******************************************************************************/
133 :
134 : static void lock_hashtable_init(void);
135 :
136 : static inline uintptr_t* lock_lockword_get(java_handle_t* o);
137 : static void lock_record_enter(threadobject *t, lock_record_t *lr);
138 : static void lock_record_exit(threadobject *t, lock_record_t *lr);
139 : static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
140 : static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
141 :
142 :
143 : /*============================================================================*/
144 : /* INITIALIZATION OF DATA STRUCTURES */
145 : /*============================================================================*/
146 :
147 :
148 : /* lock_init *******************************************************************
149 :
150 : Initialize global data for locking.
151 :
152 : *******************************************************************************/
153 :
154 163 : void lock_init(void)
155 : {
156 : /* initialize lock hashtable */
157 :
158 163 : lock_hashtable_init();
159 163 : }
160 :
161 :
162 : /* lock_record_new *************************************************************
163 :
164 : Allocate a lock record.
165 :
166 : *******************************************************************************/
167 :
168 20007 : static lock_record_t *lock_record_new(void)
169 : {
170 : lock_record_t *lr;
171 :
172 : /* allocate the data structure on the C heap */
173 :
174 20007 : lr = NEW(lock_record_t);
175 :
176 : STATISTICS(size_lock_record += sizeof(lock_record_t));
177 :
178 : /* initialize the members */
179 :
180 20007 : lr->object = NULL;
181 20007 : lr->owner = NULL;
182 20007 : lr->count = 0;
183 20007 : lr->waiters = new List<threadobject*>();
184 :
185 : #if defined(ENABLE_GC_CACAO)
186 : /* register the lock object as weak reference with the GC */
187 :
188 : gc_weakreference_register(&(lr->object), GC_REFTYPE_LOCKRECORD);
189 : #endif
190 :
191 : // Initialize the mutex.
192 20007 : lr->mutex = new Mutex();
193 :
194 20007 : DEBUGLOCKS(("[lock_record_new : lr=%p]", (void *) lr));
195 :
196 20007 : return lr;
197 : }
198 :
199 :
200 : /* lock_record_free ************************************************************
201 :
202 : Free a lock record.
203 :
204 : IN:
205 : lr....lock record to free
206 :
207 : *******************************************************************************/
208 :
209 0 : static void lock_record_free(lock_record_t *lr)
210 : {
211 0 : DEBUGLOCKS(("[lock_record_free : lr=%p]", (void *) lr));
212 :
213 : // Destroy the mutex.
214 0 : delete lr->mutex;
215 :
216 : #if defined(ENABLE_GC_CACAO)
217 : /* unregister the lock object reference with the GC */
218 :
219 : gc_weakreference_unregister(&(lr->object));
220 : #endif
221 :
222 : // Free the waiters list.
223 0 : delete lr->waiters;
224 :
225 : /* Free the data structure. */
226 :
227 0 : FREE(lr, lock_record_t);
228 :
229 : STATISTICS(size_lock_record -= sizeof(lock_record_t));
230 0 : }
231 :
232 :
233 : /*============================================================================*/
234 : /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
235 : /*============================================================================*/
236 :
237 : /* lock_hashtable_init *********************************************************
238 :
239 : Initialize the global hashtable mapping objects to lock records.
240 :
241 : *******************************************************************************/
242 :
243 163 : static void lock_hashtable_init(void)
244 : {
245 163 : lock_hashtable.mutex = new Mutex();
246 :
247 163 : lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
248 163 : lock_hashtable.entries = 0;
249 163 : lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
250 :
251 : STATISTICS(size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size);
252 :
253 163 : MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
254 163 : }
255 :
256 :
257 : /* lock_hashtable_grow *********************************************************
258 :
259 : Grow the lock record hashtable to about twice its current size and
260 : rehash the entries.
261 :
262 : *******************************************************************************/
263 :
264 : /* must be called with hashtable mutex locked */
265 4 : static void lock_hashtable_grow(void)
266 : {
267 : u4 oldsize;
268 : u4 newsize;
269 : lock_record_t **oldtable;
270 : lock_record_t **newtable;
271 : lock_record_t *lr;
272 : lock_record_t *next;
273 : u4 i;
274 : u4 h;
275 : u4 newslot;
276 :
277 : /* allocate a new table */
278 :
279 4 : oldsize = lock_hashtable.size;
280 4 : newsize = oldsize*2 + 1; /* XXX should use prime numbers */
281 :
282 4 : DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
283 :
284 4 : oldtable = lock_hashtable.ptr;
285 4 : newtable = MNEW(lock_record_t *, newsize);
286 :
287 : STATISTICS(size_lock_hashtable += sizeof(lock_record_t *) * newsize);
288 :
289 4 : MZERO(newtable, lock_record_t *, newsize);
290 :
291 : /* rehash the entries */
292 :
293 24210 : for (i = 0; i < oldsize; i++) {
294 24206 : lr = oldtable[i];
295 80688 : while (lr) {
296 32276 : next = lr->hashlink;
297 :
298 32276 : h = heap_hashcode(lr->object);
299 32276 : newslot = h % newsize;
300 :
301 32276 : lr->hashlink = newtable[newslot];
302 32276 : newtable[newslot] = lr;
303 :
304 32276 : lr = next;
305 : }
306 : }
307 :
308 : /* replace the old table */
309 :
310 4 : lock_hashtable.ptr = newtable;
311 4 : lock_hashtable.size = newsize;
312 :
313 4 : MFREE(oldtable, lock_record_t *, oldsize);
314 :
315 : STATISTICS(size_lock_hashtable -= sizeof(lock_record_t *) * oldsize);
316 4 : }
317 :
318 :
319 : /* lock_hashtable_cleanup ******************************************************
320 :
321 : Removes (and frees) lock records which have a cleared object reference
322 : from the hashtable. The locked object was reclaimed by the GC.
323 :
324 : *******************************************************************************/
325 :
326 : #if defined(ENABLE_GC_CACAO)
327 : void lock_hashtable_cleanup(void)
328 : {
329 : threadobject *t;
330 : lock_record_t *lr;
331 : lock_record_t *prev;
332 : lock_record_t *next;
333 : int i;
334 :
335 : t = THREADOBJECT;
336 :
337 : /* lock the hashtable */
338 :
339 : Mutex_lock(lock_hashtable.mutex);
340 :
341 : /* search the hashtable for cleared references */
342 :
343 : for (i = 0; i < lock_hashtable.size; i++) {
344 : lr = lock_hashtable.ptr[i];
345 : prev = NULL;
346 :
347 : while (lr) {
348 : next = lr->hashlink;
349 :
350 : /* remove lock records with cleared references */
351 :
352 : if (lr->object == NULL) {
353 :
354 : /* unlink the lock record from the hashtable */
355 :
356 : if (prev == NULL)
357 : lock_hashtable.ptr[i] = next;
358 : else
359 : prev->hashlink = next;
360 :
361 : /* free the lock record */
362 :
363 : lock_record_free(lr);
364 :
365 : } else {
366 : prev = lr;
367 : }
368 :
369 : lr = next;
370 : }
371 : }
372 :
373 : /* unlock the hashtable */
374 :
375 : Mutex_unlock(lock_hashtable.mutex);
376 : }
377 : #endif
378 :
379 :
380 : /* lock_hashtable_get **********************************************************
381 :
382 : Find the lock record for the given object. If it does not exists,
383 : yet, create it and enter it in the hashtable.
384 :
385 : IN:
386 : o....the object to look up
387 :
388 : RETURN VALUE:
389 : the lock record to use for this object
390 :
391 : *******************************************************************************/
392 :
393 : #if defined(ENABLE_GC_BOEHM)
394 : static void lock_record_finalizer(java_handle_t *object, void *p);
395 : #endif
396 :
397 20007 : static lock_record_t *lock_hashtable_get(java_handle_t* o)
398 : {
399 : // This function is inside a critical section.
400 20007 : GCCriticalSection cs;
401 :
402 : u4 slot;
403 : lock_record_t *lr;
404 :
405 : // lw_cache is used throughout this file because the lockword can change at
406 : // any time, unless it is absolutely certain that we are holding the lock.
407 : // We don't do deflation, so we would also not expect a fat lockword to
408 : // change, but for the sake of uniformity, lw_cache is used even in this
409 : // case.
410 20007 : uintptr_t lw_cache = *lock_lockword_get(o);
411 20007 : Lockword lockword(lw_cache);
412 :
413 20007 : if (lockword.is_fat_lock())
414 0 : return lockword.get_fat_lock();
415 :
416 : // Lock the hashtable.
417 20007 : lock_hashtable.mutex->lock();
418 :
419 : /* lookup the lock record in the hashtable */
420 :
421 20007 : slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
422 20007 : lr = lock_hashtable.ptr[slot];
423 :
424 36257 : for (; lr != NULL; lr = lr->hashlink) {
425 16250 : if (lr->object == LLNI_DIRECT(o))
426 0 : break;
427 : }
428 :
429 20007 : if (lr == NULL) {
430 : /* not found, we must create a new one */
431 :
432 20007 : lr = lock_record_new();
433 :
434 20007 : lr->object = LLNI_DIRECT(o);
435 :
436 : #if defined(ENABLE_GC_BOEHM)
437 : /* register new finalizer to clean up the lock record */
438 :
439 20007 : Finalizer::attach_custom_finalizer(o, lock_record_finalizer, 0);
440 : #endif
441 :
442 : /* enter it in the hashtable */
443 :
444 20007 : lr->hashlink = lock_hashtable.ptr[slot];
445 20007 : lock_hashtable.ptr[slot] = lr;
446 20007 : lock_hashtable.entries++;
447 :
448 : /* check whether the hash should grow */
449 :
450 20007 : if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
451 4 : lock_hashtable_grow();
452 : }
453 : }
454 :
455 : // Unlock the hashtable.
456 20007 : lock_hashtable.mutex->unlock();
457 :
458 : /* return the new lock record */
459 :
460 20007 : return lr;
461 : }
462 :
463 : /* lock_hashtable_remove *******************************************************
464 :
465 : Remove the lock record for the given object from the hashtable
466 : and free it afterwards.
467 :
468 : IN:
469 : t....the current thread
470 : o....the object to look up
471 :
472 : *******************************************************************************/
473 :
474 0 : static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
475 : {
476 : lock_record_t *lr;
477 : u4 slot;
478 : lock_record_t *tmplr;
479 :
480 : // Lock the hashtable.
481 0 : lock_hashtable.mutex->lock();
482 :
483 : /* get lock record */
484 :
485 0 : uintptr_t lw_cache = *lock_lockword_get(o);
486 0 : Lockword lockword(lw_cache);
487 :
488 : // Sanity check.
489 0 : assert(lockword.is_fat_lock());
490 :
491 0 : lr = lockword.get_fat_lock();
492 :
493 : /* remove the lock-record from the hashtable */
494 :
495 0 : slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
496 0 : tmplr = lock_hashtable.ptr[slot];
497 :
498 0 : if (tmplr == lr) {
499 : /* special handling if it's the first in the chain */
500 :
501 0 : lock_hashtable.ptr[slot] = lr->hashlink;
502 : }
503 : else {
504 0 : for (; tmplr != NULL; tmplr = tmplr->hashlink) {
505 0 : if (tmplr->hashlink == lr) {
506 0 : tmplr->hashlink = lr->hashlink;
507 0 : break;
508 : }
509 : }
510 :
511 0 : assert(tmplr != NULL);
512 : }
513 :
514 : /* decrease entry count */
515 :
516 0 : lock_hashtable.entries--;
517 :
518 : // Unlock the hashtable.
519 0 : lock_hashtable.mutex->unlock();
520 :
521 : /* free the lock record */
522 :
523 0 : lock_record_free(lr);
524 0 : }
525 :
526 :
527 : /* lock_record_finalizer *******************************************************
528 :
529 : XXX Remove me for exact GC.
530 :
531 : *******************************************************************************/
532 :
533 : #define DEBUG_NAME "finalizer"
534 :
535 0 : static void lock_record_finalizer(java_handle_t *o, void *p)
536 : {
537 : #if defined(ENABLE_LOGGING)
538 : classinfo *c;
539 :
540 : LLNI_class_get(o, c);
541 :
542 : LOG("[finalizer lockrecord:"
543 : << " o=" << o
544 : << " p=" << p
545 : << " class=" << c
546 : << "]" << cacao::nl);
547 : #endif
548 :
549 : /* remove the lock-record entry from the hashtable and free it */
550 :
551 0 : lock_hashtable_remove(THREADOBJECT, o);
552 0 : }
553 :
554 : #undef DEBUG_NAME
555 :
556 : /*============================================================================*/
557 : /* LOCKING ALGORITHM */
558 : /*============================================================================*/
559 :
560 :
561 : /* lock_lockword_get ***********************************************************
562 :
563 : Get the lockword for the given object.
564 :
565 : IN:
566 : o............the object
567 :
568 : *******************************************************************************/
569 :
570 2902565 : static inline uintptr_t* lock_lockword_get(java_handle_t* o)
571 : {
572 : #if defined(ENABLE_GC_CACAO)
573 : // Sanity check.
574 : assert(GCCriticalSection::inside() == true);
575 : #endif
576 :
577 2902565 : return &(LLNI_DIRECT(o)->lockword);
578 : }
579 :
580 :
581 : /* lock_record_enter ***********************************************************
582 :
583 : Enter the lock represented by the given lock record.
584 :
585 : IN:
586 : t.................the current thread
587 : lr................the lock record
588 :
589 : *******************************************************************************/
590 :
591 57205 : static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
592 : {
593 57205 : lr->mutex->lock();
594 57205 : lr->owner = t;
595 57205 : }
596 :
597 :
598 : /* lock_record_exit ************************************************************
599 :
600 : Release the lock represented by the given lock record.
601 :
602 : IN:
603 : t.................the current thread
604 : lr................the lock record
605 :
606 : PRE-CONDITION:
607 : The current thread must own the lock represented by this lock record.
608 : This is NOT checked by this function!
609 :
610 : *******************************************************************************/
611 :
612 57205 : static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
613 : {
614 57205 : lr->owner = NULL;
615 57205 : lr->mutex->unlock();
616 57211 : }
617 :
618 :
619 : /* lock_inflate ****************************************************************
620 :
621 : Inflate the lock of the given object. This may only be called by the
622 : owner of the monitor of the object.
623 :
624 : IN:
625 : o............the object of which to inflate the lock
626 : lr...........the lock record to install. The current thread must
627 : own the lock of this lock record!
628 :
629 : PRE-CONDITION:
630 : The current thread must be the owner of this object's monitor AND
631 : of the lock record's lock!
632 :
633 : *******************************************************************************/
634 :
635 2 : static void lock_inflate(java_handle_t *o, lock_record_t *lr)
636 : {
637 2 : Lockword lockword(*lock_lockword_get(o));
638 2 : lockword.inflate(lr);
639 2 : }
640 :
641 :
642 : /* sable_flc_waiting ***********************************************************
643 :
644 : Enqueue the current thread on another thread's FLC list. The function
645 : blocks until the lock has been inflated by the owning thread.
646 :
647 : The algorithm used to be an almost literal copy from SableVM. The
648 : superfluous list traversal in the waiting loop has been removed since,
649 : though.
650 :
651 : IN:
652 : lockword.....the object's lockword as seen at the first locking attempt
653 : t............the current thread
654 : o............the object of which to enter the monitor
655 :
656 : *******************************************************************************/
657 :
658 5 : static void sable_flc_waiting(uintptr_t lw_cache, threadobject *t, java_handle_t *o)
659 : {
660 : int32_t index;
661 : threadobject *t_other;
662 : int old_flc;
663 :
664 5 : Lockword lockword(lw_cache);
665 5 : index = lockword.get_thin_lock_thread_index();
666 5 : t_other = ThreadList::get()->get_thread_by_index(index);
667 :
668 : // The lockword could have changed during our way here. If the
669 : // thread index is zero, the lock got unlocked and we simply
670 : // return.
671 5 : if (t_other == NULL)
672 : /* failure, TODO: add statistics */
673 1 : return;
674 :
675 4 : t_other->flc_lock->lock();
676 4 : old_flc = t_other->flc_bit;
677 4 : t_other->flc_bit = true;
678 :
679 4 : DEBUGLOCKS(("thread %d set flc bit for lock-holding thread %d", t->index, t_other->index));
680 :
681 : // Set FLC bit first, then read the lockword again.
682 4 : Atomic::memory_barrier();
683 :
684 4 : lw_cache = *lock_lockword_get(o);
685 :
686 : /* Lockword is still the way it was seen before */
687 4 : if (lockword.is_thin_lock() && (lockword.get_thin_lock_thread_index() == index))
688 : {
689 : //threadobject *f;
690 : /* Add tuple (t, o) to the other thread's FLC list */
691 3 : t->flc_object = o;
692 3 : t->flc_next = t_other->flc_list;
693 3 : t_other->flc_list = t;
694 3 : if (t->flc_next == 0)
695 3 : t_other->flc_tail = t;
696 : //f = t_other->flc_tail;
697 :
698 : // The other thread will clear flc_object.
699 9 : while (t->flc_object)
700 : {
701 : // We are not cleared yet -- the other thread cannot have seen
702 : // the FLC bit yet.
703 3 : assert(t_other->flc_bit);
704 :
705 : // Wait until another thread sees the flc bit and notifies
706 : // us of unlocking.
707 3 : t->flc_cond->wait(t_other->flc_lock);
708 : }
709 :
710 3 : t->flc_next = NULL;
711 : }
712 : else
713 1 : t_other->flc_bit = old_flc;
714 :
715 4 : t_other->flc_lock->unlock();
716 : }
717 :
718 : /* notify_flc_waiters **********************************************************
719 :
720 : Traverse the thread's FLC list and inflate all corresponding locks. Notify
721 : the associated threads as well.
722 :
723 : IN:
724 : t............the current thread
725 : o............the object currently being unlocked
726 :
727 : *******************************************************************************/
728 :
729 20009 : static void notify_flc_waiters(threadobject *t, java_handle_t *o)
730 : {
731 : threadobject *current;
732 :
733 20009 : t->flc_lock->lock();
734 :
735 20009 : current = t->flc_list;
736 40021 : while (current)
737 : {
738 3 : if (current->flc_object != o)
739 : {
740 : /* The object has to be inflated so the other threads can properly
741 : block on it. */
742 :
743 : // Only if not already inflated.
744 2 : Lockword lockword(*lock_lockword_get(current->flc_object));
745 2 : if (lockword.is_thin_lock()) {
746 2 : lock_record_t *lr = lock_hashtable_get(current->flc_object);
747 2 : lock_record_enter(t, lr);
748 :
749 2 : DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
750 : t->index, (void*) current->flc_object, (void*) lr));
751 :
752 2 : lock_inflate(current->flc_object, lr);
753 : }
754 : }
755 :
756 : // Wake the waiting threads.
757 3 : current->flc_cond->broadcast();
758 3 : current->flc_object = NULL;
759 :
760 3 : current = current->flc_next;
761 : }
762 :
763 20009 : t->flc_list = NULL;
764 20009 : t->flc_bit = false;
765 :
766 20009 : t->flc_lock->unlock();
767 20009 : }
768 :
769 : /* lock_monitor_enter **********************************************************
770 :
771 : Acquire the monitor of the given object. If the current thread already
772 : owns the monitor, the lock counter is simply increased.
773 :
774 : This function blocks until it can acquire the monitor.
775 :
776 : IN:
777 : t............the current thread
778 : o............the object of which to enter the monitor
779 :
780 : RETURN VALUE:
781 : true.........the lock has been successfully acquired
782 : false........an exception has been thrown
783 :
784 : *******************************************************************************/
785 :
786 1422600 : bool lock_monitor_enter(java_handle_t *o)
787 : {
788 : // This function is inside a critical section.
789 1422600 : GCCriticalSection cs;
790 :
791 1422600 : if (o == NULL) {
792 1 : exceptions_throw_nullpointerexception();
793 1 : return false;
794 : }
795 :
796 1422599 : threadobject* t = thread_get_current();
797 :
798 1422598 : uintptr_t thinlock = t->thinlock;
799 :
800 : retry:
801 : // Most common case: try to thin-lock an unlocked object.
802 1422603 : uintptr_t *lw_ptr = lock_lockword_get(o);
803 1422603 : uintptr_t lw_cache = *lw_ptr;
804 1422603 : Lockword lockword(lw_cache);
805 1422595 : bool result = Lockword(*lw_ptr).lock(thinlock);
806 :
807 1422607 : if (result == true) {
808 : // Success, we locked it.
809 : // NOTE: The Java Memory Model requires a memory barrier here.
810 : #if defined(CAS_PROVIDES_FULL_BARRIER) && CAS_PROVIDES_FULL_BARRIER
811 : // On some architectures, the CAS (hidden in the
812 : // lockword.lock call above), already provides this barrier,
813 : // so we only need to inform the compiler.
814 1235132 : Atomic::instruction_barrier();
815 : #else
816 : Atomic::memory_barrier();
817 : #endif
818 1235132 : return true;
819 : }
820 :
821 : // Next common case: recursive lock with small recursion count.
822 : // NOTE: We don't have to worry about stale values here, as any
823 : // stale value will indicate another thread holding the lock (or
824 : // an inflated lock).
825 187475 : if (lockword.get_thin_lock_without_count() == thinlock) {
826 : // We own this monitor. Check the current recursion count.
827 170224 : if (lockword.is_max_thin_lock_count() == false) {
828 : // The recursion count is low enough.
829 170224 : Lockword(*lw_ptr).increase_thin_lock_count();
830 :
831 : // Success, we locked it.
832 170224 : return true;
833 : }
834 : else {
835 : // Recursion count overflow.
836 0 : lock_record_t* lr = lock_hashtable_get(o);
837 0 : lock_record_enter(t, lr);
838 0 : lock_inflate(o, lr);
839 0 : lr->count++;
840 :
841 0 : notify_flc_waiters(t, o);
842 :
843 0 : return true;
844 : }
845 : }
846 :
847 : // The lock is either contented or fat.
848 17251 : if (lockword.is_fat_lock()) {
849 17246 : lock_record_t* lr = lockword.get_fat_lock();
850 :
851 : // Check for recursive entering.
852 17246 : if (lr->owner == t) {
853 40 : lr->count++;
854 40 : return true;
855 : }
856 :
857 : // Acquire the mutex of the lock record.
858 17206 : lock_record_enter(t, lr);
859 :
860 : // Sanity check.
861 17206 : assert(lr->count == 0);
862 17206 : return true;
863 : }
864 :
865 : /****** inflation path ******/
866 :
867 : #if defined(ENABLE_JVMTI)
868 : /* Monitor Contended Enter */
869 : jvmti_MonitorContendedEntering(false, o);
870 : #endif
871 :
872 5 : sable_flc_waiting(lw_cache, t, o);
873 :
874 : #if defined(ENABLE_JVMTI)
875 : /* Monitor Contended Entered */
876 : jvmti_MonitorContendedEntering(true, o);
877 : #endif
878 5 : goto retry;
879 : }
880 :
881 :
882 : /* lock_monitor_exit ***********************************************************
883 :
884 : Decrement the counter of a (currently owned) monitor. If the counter
885 : reaches zero, release the monitor.
886 :
887 : If the current thread is not the owner of the monitor, an
888 : IllegalMonitorState exception is thrown.
889 :
890 : IN:
891 : t............the current thread
892 : o............the object of which to exit the monitor
893 :
894 : RETURN VALUE:
895 : true.........everything ok,
896 : false........an exception has been thrown
897 :
898 : *******************************************************************************/
899 :
900 1422600 : bool lock_monitor_exit(java_handle_t* o)
901 : {
902 : // This function is inside a critical section.
903 1422600 : GCCriticalSection cs;
904 :
905 1422598 : if (o == NULL) {
906 1 : exceptions_throw_nullpointerexception();
907 1 : return false;
908 : }
909 :
910 1422597 : threadobject* t = thread_get_current();
911 :
912 1422599 : uintptr_t thinlock = t->thinlock;
913 :
914 : // We don't have to worry about stale values here, as any stale
915 : // value will indicate that we don't own the lock.
916 1422599 : uintptr_t *lw_ptr = lock_lockword_get(o);
917 1422598 : uintptr_t lw_cache = *lw_ptr;
918 1422598 : Lockword lockword(lw_cache);
919 :
920 : // Most common case: we release a thin lock that we hold once.
921 1422598 : if (lockword.get_thin_lock() == thinlock) {
922 : // Memory barrier for Java Memory Model.
923 1215125 : Atomic::write_memory_barrier();
924 1215125 : Lockword(*lw_ptr).unlock();
925 : // Memory barrier for FLC bit testing.
926 1215125 : Atomic::memory_barrier();
927 :
928 : /* check if there has been a flat lock contention on this object */
929 :
930 1215125 : if (t->flc_bit) {
931 4 : DEBUGLOCKS(("thread %d saw flc bit", t->index));
932 :
933 : /* there has been a contention on this thin lock */
934 4 : notify_flc_waiters(t, o);
935 : }
936 :
937 1215125 : return true;
938 : }
939 :
940 : // Next common case: we release a recursive lock, count > 0.
941 207473 : if (lockword.get_thin_lock_without_count() == thinlock) {
942 170223 : Lockword(*lw_ptr).decrease_thin_lock_count();
943 170223 : return true;
944 : }
945 :
946 : // Either the lock is fat, or we don't hold it at all.
947 37247 : if (lockword.is_fat_lock()) {
948 37247 : lock_record_t* lr = lockword.get_fat_lock();
949 :
950 : // Check if we own this monitor.
951 : // NOTE: We don't have to worry about stale values here, as
952 : // any stale value will be != t and thus fail this check.
953 37248 : if (lr->owner != t) {
954 0 : exceptions_throw_illegalmonitorstateexception();
955 0 : return false;
956 : }
957 :
958 : /* { the current thread `t` owns the lock record `lr` on object `o` } */
959 :
960 37248 : if (lr->count != 0) {
961 : // We had locked this one recursively. Just decrement, it
962 : // will still be locked.
963 41 : lr->count--;
964 41 : return true;
965 : }
966 :
967 : // Unlock this lock record.
968 37207 : lock_record_exit(t, lr);
969 37209 : return true;
970 : }
971 :
972 : // Legal thin lock cases have been handled above, so this is an
973 : // error.
974 0 : exceptions_throw_illegalmonitorstateexception();
975 :
976 0 : return false;
977 : }
978 :
979 :
980 : /* lock_record_add_waiter ******************************************************
981 :
982 : Add a thread to the list of waiting threads of a lock record.
983 :
984 : IN:
985 : lr...........the lock record
986 : thread.......the thread to add
987 :
988 : *******************************************************************************/
989 :
990 20010 : static void lock_record_add_waiter(lock_record_t *lr, threadobject* t)
991 : {
992 : // Add the thread as last entry to waiters list.
993 20010 : lr->waiters->push_back(t);
994 :
995 : STATISTICS(size_lock_waiter += sizeof(threadobject*));
996 20010 : }
997 :
998 :
999 : /* lock_record_remove_waiter ***************************************************
1000 :
1001 : Remove a thread from the list of waiting threads of a lock record.
1002 :
1003 : IN:
1004 : lr...........the lock record
1005 : t............the current thread
1006 :
1007 : PRE-CONDITION:
1008 : The current thread must be the owner of the lock record.
1009 :
1010 : *******************************************************************************/
1011 :
1012 20010 : static void lock_record_remove_waiter(lock_record_t *lr, threadobject* t)
1013 : {
1014 : // Remove the thread from the waiters.
1015 20010 : lr->waiters->remove(t);
1016 :
1017 : STATISTICS(size_lock_waiter -= sizeof(threadobject*));
1018 20010 : }
1019 :
1020 :
1021 : /* lock_record_wait ************************************************************
1022 :
1023 : Wait on a lock record for a given (maximum) amount of time.
1024 :
1025 : IN:
1026 : t............the current thread
1027 : lr...........the lock record
1028 : millis.......milliseconds of timeout
1029 : nanos........nanoseconds of timeout
1030 :
1031 : RETURN VALUE:
1032 : true.........we have been interrupted,
1033 : false........everything ok
1034 :
1035 : PRE-CONDITION:
1036 : The current thread must be the owner of the lock record.
1037 : This is NOT checked by this function!
1038 :
1039 : *******************************************************************************/
1040 :
1041 20010 : static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1042 : {
1043 : s4 lockcount;
1044 20010 : bool wasinterrupted = false;
1045 :
1046 20010 : DEBUGLOCKS(("[lock_record_wait : lr=%p, t=%p, millis=%lld, nanos=%d]",
1047 : lr, thread, millis, nanos));
1048 :
1049 : /* { the thread t owns the fat lock record lr on the object o } */
1050 :
1051 : /* register us as waiter for this object */
1052 :
1053 20010 : lock_record_add_waiter(lr, thread);
1054 :
1055 : /* remember the old lock count */
1056 :
1057 20010 : lockcount = lr->count;
1058 :
1059 : /* unlock this record */
1060 :
1061 20010 : lr->count = 0;
1062 20010 : lock_record_exit(thread, lr);
1063 :
1064 : /* wait until notified/interrupted/timed out */
1065 :
1066 20010 : threads_wait_with_timeout_relative(thread, millis, nanos);
1067 :
1068 : /* re-enter the monitor */
1069 :
1070 20010 : lock_record_enter(thread, lr);
1071 :
1072 : /* remove us from the list of waiting threads */
1073 :
1074 20010 : lock_record_remove_waiter(lr, thread);
1075 :
1076 : /* restore the old lock count */
1077 :
1078 20010 : lr->count = lockcount;
1079 :
1080 : /* We can only be signaled OR interrupted, not both. If both flags
1081 : are set, reset only signaled and leave the thread in
1082 : interrupted state. Otherwise, clear both. */
1083 :
1084 20010 : if (!thread->signaled) {
1085 20003 : wasinterrupted = thread->interrupted;
1086 20003 : thread->interrupted = false;
1087 : }
1088 :
1089 20010 : thread->signaled = false;
1090 :
1091 : /* return if we have been interrupted */
1092 :
1093 20010 : return wasinterrupted;
1094 : }
1095 :
1096 :
1097 : /* lock_monitor_wait ***********************************************************
1098 :
1099 : Wait on an object for a given (maximum) amount of time.
1100 :
1101 : IN:
1102 : t............the current thread
1103 : o............the object
1104 : millis.......milliseconds of timeout
1105 : nanos........nanoseconds of timeout
1106 :
1107 : PRE-CONDITION:
1108 : The current thread must be the owner of the object's monitor.
1109 :
1110 : *******************************************************************************/
1111 :
1112 20010 : static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1113 : {
1114 : lock_record_t *lr;
1115 :
1116 20010 : uintptr_t *lw_ptr = lock_lockword_get(o);
1117 20010 : uintptr_t lw_cache = *lw_ptr;
1118 20010 : Lockword lockword(lw_cache);
1119 :
1120 : // Check if we own this monitor.
1121 : // NOTE: We don't have to worry about stale values here, as any
1122 : // stale value will fail this check.
1123 20010 : if (lockword.is_fat_lock()) {
1124 5 : lr = lockword.get_fat_lock();
1125 :
1126 5 : if (lr->owner != t) {
1127 0 : exceptions_throw_illegalmonitorstateexception();
1128 0 : return;
1129 : }
1130 : }
1131 : else {
1132 : // It's a thin lock.
1133 20005 : if (lockword.get_thin_lock_without_count() != t->thinlock) {
1134 0 : exceptions_throw_illegalmonitorstateexception();
1135 0 : return;
1136 : }
1137 :
1138 : // Get the lock-record.
1139 20005 : lr = lock_hashtable_get(o);
1140 20005 : lock_record_enter(t, lr);
1141 :
1142 : // Inflate this lock.
1143 20005 : Lockword(*lw_ptr).inflate(lr);
1144 :
1145 20005 : notify_flc_waiters(t, o);
1146 : }
1147 :
1148 : /* { the thread t owns the fat lock record lr on the object o } */
1149 :
1150 20010 : if (lock_record_wait(t, lr, millis, nanos))
1151 20000 : exceptions_throw_interruptedexception();
1152 : }
1153 :
1154 :
1155 : /* lock_record_notify **********************************************************
1156 :
1157 : Notify one thread or all threads waiting on the given lock record.
1158 :
1159 : IN:
1160 : t............the current thread
1161 : lr...........the lock record
1162 : one..........if true, only notify one thread
1163 :
1164 : PRE-CONDITION:
1165 : The current thread must be the owner of the lock record.
1166 : This is NOT checked by this function!
1167 :
1168 : *******************************************************************************/
1169 :
1170 17174 : static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1171 : {
1172 : #if defined(ENABLE_GC_CACAO)
1173 : // Sanity check.
1174 : assert(GCCriticalSection::inside() == false);
1175 : #endif
1176 :
1177 : // { The thread t owns the fat lock record lr on the object o }
1178 :
1179 17181 : for (List<threadobject*>::iterator it = lr->waiters->begin(); it != lr->waiters->end(); it++) {
1180 7 : threadobject* waiter = *it;
1181 :
1182 : // We must skip threads which have already been notified. They
1183 : // will remove themselves from the list.
1184 7 : if (waiter->signaled)
1185 0 : continue;
1186 :
1187 : // Enter the wait-mutex.
1188 7 : waiter->waitmutex->lock();
1189 :
1190 7 : DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, one=%d]", lr, t, waiter, one));
1191 :
1192 : // Signal the waiter.
1193 7 : waiter->waitcond->signal();
1194 :
1195 : // Mark the thread as signaled.
1196 7 : waiter->signaled = true;
1197 :
1198 : // Leave the wait-mutex.
1199 7 : waiter->waitmutex->unlock();
1200 :
1201 : // If we should only wake one thread, we are done.
1202 7 : if (one)
1203 0 : break;
1204 : }
1205 17174 : }
1206 :
1207 :
1208 : /* lock_monitor_notify *********************************************************
1209 :
1210 : Notify one thread or all threads waiting on the given object.
1211 :
1212 : IN:
1213 : t............the current thread
1214 : o............the object
1215 : one..........if true, only notify one thread
1216 :
1217 : PRE-CONDITION:
1218 : The current thread must be the owner of the object's monitor.
1219 :
1220 : *******************************************************************************/
1221 :
1222 17466 : static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1223 : {
1224 17466 : lock_record_t* lr = NULL;
1225 :
1226 : {
1227 : // This scope is inside a critical section.
1228 17466 : GCCriticalSection cs;
1229 :
1230 17466 : uintptr_t lw_cache = *lock_lockword_get(o);
1231 17466 : Lockword lockword(lw_cache);
1232 :
1233 : // Check if we own this monitor.
1234 : // NOTE: We don't have to worry about stale values here, as any
1235 : // stale value will fail this check.
1236 :
1237 17466 : if (lockword.is_fat_lock()) {
1238 17174 : lr = lockword.get_fat_lock();
1239 :
1240 17174 : if (lr->owner != t) {
1241 0 : exceptions_throw_illegalmonitorstateexception();
1242 : return;
1243 : }
1244 : }
1245 : else {
1246 : // It's a thin lock.
1247 292 : if (lockword.get_thin_lock_without_count() != t->thinlock) {
1248 0 : exceptions_throw_illegalmonitorstateexception();
1249 : return;
1250 : }
1251 :
1252 : // No thread can wait on a thin lock, so there's nothing to do.
1253 : return;
1254 0 : }
1255 : }
1256 :
1257 : // { The thread t owns the fat lock record lr on the object o }
1258 17174 : lock_record_notify(t, lr, one);
1259 : }
1260 :
1261 :
1262 :
1263 : /*============================================================================*/
1264 : /* INQUIRY FUNCIONS */
1265 : /*============================================================================*/
1266 :
1267 :
1268 : /* lock_is_held_by_current_thread **********************************************
1269 :
1270 : Return true if the current thread owns the monitor of the given object.
1271 :
1272 : IN:
1273 : o............the object
1274 :
1275 : RETURN VALUE:
1276 : true, if the current thread holds the lock of this object.
1277 :
1278 : *******************************************************************************/
1279 :
1280 2 : bool lock_is_held_by_current_thread(java_handle_t *o)
1281 : {
1282 : // This function is inside a critical section.
1283 2 : GCCriticalSection cs;
1284 :
1285 : // Check if we own this monitor.
1286 : // NOTE: We don't have to worry about stale values here, as any
1287 : // stale value will fail this check.
1288 2 : threadobject* t = thread_get_current();
1289 2 : uintptr_t lw_cache = *lock_lockword_get(o);
1290 2 : Lockword lockword(lw_cache);
1291 :
1292 2 : if (lockword.is_fat_lock()) {
1293 : // It's a fat lock.
1294 0 : lock_record_t* lr = lockword.get_fat_lock();
1295 0 : return (lr->owner == t);
1296 : }
1297 : else {
1298 : // It's a thin lock.
1299 2 : return (lockword.get_thin_lock_without_count() == t->thinlock);
1300 0 : }
1301 : }
1302 :
1303 :
1304 :
1305 : /*============================================================================*/
1306 : /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1307 : /*============================================================================*/
1308 :
1309 :
1310 : /* lock_wait_for_object ********************************************************
1311 :
1312 : Wait for the given object.
1313 :
1314 : IN:
1315 : o............the object
1316 : millis.......milliseconds to wait
1317 : nanos........nanoseconds to wait
1318 :
1319 : *******************************************************************************/
1320 :
1321 20010 : void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1322 : {
1323 : threadobject *thread;
1324 :
1325 20010 : thread = THREADOBJECT;
1326 :
1327 20010 : lock_monitor_wait(thread, o, millis, nanos);
1328 20010 : }
1329 :
1330 :
1331 : /* lock_notify_object **********************************************************
1332 :
1333 : Notify one thread waiting on the given object.
1334 :
1335 : IN:
1336 : o............the object
1337 :
1338 : *******************************************************************************/
1339 :
1340 17167 : void lock_notify_object(java_handle_t *o)
1341 : {
1342 : threadobject *thread;
1343 :
1344 17167 : thread = THREADOBJECT;
1345 :
1346 17167 : lock_monitor_notify(thread, o, true);
1347 17167 : }
1348 :
1349 :
1350 : /* lock_notify_all_object ******************************************************
1351 :
1352 : Notify all threads waiting on the given object.
1353 :
1354 : IN:
1355 : o............the object
1356 :
1357 : *******************************************************************************/
1358 :
1359 299 : void lock_notify_all_object(java_handle_t *o)
1360 : {
1361 : threadobject *thread;
1362 :
1363 299 : thread = THREADOBJECT;
1364 :
1365 299 : lock_monitor_notify(thread, o, false);
1366 299 : }
1367 :
1368 :
1369 : /*
1370 : * These are local overrides for various environment variables in Emacs.
1371 : * Please do not remove this and leave it at the end of the file, where
1372 : * Emacs will automagically detect them.
1373 : * ---------------------------------------------------------------------
1374 : * Local variables:
1375 : * mode: c++
1376 : * indent-tabs-mode: t
1377 : * c-basic-offset: 4
1378 : * tab-width: 4
1379 : * End:
1380 : * vim:noexpandtab:sw=4:ts=4:
1381 : */
|