CACAO
lock.cpp
Go to the documentation of this file.
1 /* src/threads/lock.cpp - lock implementation
2 
3  Copyright (C) 1996-2013
4  CACAOVM - Verein zur Foerderung der freien virtuellen Maschine CACAO
5 
6  This file is part of CACAO.
7 
8  This program is free software; you can redistribute it and/or
9  modify it under the terms of the GNU General Public License as
10  published by the Free Software Foundation; either version 2, or (at
11  your option) any later version.
12 
13  This program is distributed in the hope that it will be useful, but
14  WITHOUT ANY WARRANTY; without even the implied warranty of
15  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16  General Public License for more details.
17 
18  You should have received a copy of the GNU General Public License
19  along with this program; if not, write to the Free Software
20  Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
21  02110-1301, USA.
22 
23 */
24 
25 #include "threads/lock.hpp"
26 #include <assert.h> // for assert
27 #include <stdint.h> // for uintptr_t, int32_t
28 #include <stdio.h> // for NULL
29 #include <list> // for _List_iterator, etc
30 #include "arch.hpp" // for CAS_PROVIDES_FULL_BARRIER
31 #include "config.h" // for ENABLE_GC_BOEHM
32 #include "lockword.hpp" // for Lockword
33 #include "mm/gc.hpp" // for heap_hashcode, etc
34 #include "mm/memory.hpp" // for MNEW, MZERO, FREE, MFREE, etc
35 #include "native/llni.hpp" // for LLNI_DIRECT, LLNI_class_get
36 #include "threads/condition.hpp" // for Condition
37 #include "threads/mutex.hpp" // for Mutex
38 #include "threads/thread.hpp" // for threadobject, etc
39 #include "threads/atomic.hpp" // for memory_barrier, etc
40 #include "threads/threadlist.hpp" // for ThreadList
41 #include "toolbox/OStream.hpp" // for OStream, nl
42 #include "toolbox/list.hpp" // for List
43 #include "toolbox/logging.hpp" // for log_println, LOG
44 #include "vm/class.hpp" // for operator<<, etc
45 #include "vm/exceptions.hpp"
46 #include "vm/finalizer.hpp" // for Finalizer
47 #include "vm/global.hpp" // for java_handle_t
48 #include "vm/options.hpp" // for opt_DebugLocks
49 #include "vm/types.hpp" // for u4, s4, s8
50 
51 #if defined(ENABLE_JVMTI)
52 #include "native/jvmti/cacaodbg.h"
53 #endif
54 
55 #if defined(ENABLE_GC_BOEHM)
56 # include "mm/boehm-gc/include/gc.h"
57 #endif
58 
59 /* debug **********************************************************************/
60 
61 #if !defined(NDEBUG)
62 # define DEBUGLOCKS(format) \
63  do { \
64  if (opt_DebugLocks) { \
65  log_println format; \
66  } \
67  } while (0)
68 #else
69 # define DEBUGLOCKS(format)
70 #endif
71 
72 STAT_DECLARE_VAR(int,size_lock_record,0)
73 STAT_DECLARE_VAR(int,size_lock_hashtable,0)
74 STAT_DECLARE_VAR(int,size_lock_waiter,0)
75 
76 /******************************************************************************/
77 /* MACROS */
78 /******************************************************************************/
79 
80 /* number of lock records in the first pool allocated for a thread */
81 #define LOCK_INITIAL_LOCK_RECORDS 8
82 
83 #define LOCK_INITIAL_HASHTABLE_SIZE 1613 /* a prime in the middle between 1024 and 2048 */
84 
85 
86 /******************************************************************************/
87 /* MACROS FOR THIN/FAT LOCKS */
88 /******************************************************************************/
89 
90 /* We use a variant of the tasuki locks described in the paper
91  *
92  * Tamiya Onodera, Kiyokuni Kawachiya
93  * A Study of Locking Objects with Bimodal Fields
94  * Proceedings of the ACM OOPSLA '99, pp. 223-237
95  * 1999
96  *
97  * The underlying thin locks are a variant of the thin locks described in
98  *
99  * Bacon, Konuru, Murthy, Serrano
100  * Thin Locks: Featherweight Synchronization for Java
101  * Proceedings of the ACM Conference on Programming Language Design and
102  * Implementation (Montreal, Canada), SIGPLAN Notices volume 33, number 6,
103  * June 1998
104  *
105  * In thin lock mode the lockword looks like this:
106  *
107  * ,----------------------,-----------,---,
108  * | thread ID | count | 0 |
109  * `----------------------'-----------'---'
110  *
111  * thread ID......the 'index' of the owning thread, or 0
112  * count..........number of times the lock has been entered minus 1
113  * 0..............the shape bit is 0 in thin lock mode
114  *
115  * In fat lock mode it is basically a lock_record_t *:
116  *
117  * ,----------------------------------,---,
118  * | lock_record_t * (without LSB) | 1 |
119  * `----------------------------------'---'
120  *
121  * 1..............the shape bit is 1 in fat lock mode
122  */
123 
124 /* global variables ***********************************************************/
125 
126 /* hashtable mapping objects to lock records */
128 
129 
130 /******************************************************************************/
131 /* PROTOTYPES */
132 /******************************************************************************/
133 
134 static void lock_hashtable_init(void);
135 
136 static inline uintptr_t* lock_lockword_get(java_handle_t* o);
139 static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos);
140 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one);
141 
142 
143 /*============================================================================*/
144 /* INITIALIZATION OF DATA STRUCTURES */
145 /*============================================================================*/
146 
147 
148 /* lock_init *******************************************************************
149 
150  Initialize global data for locking.
151 
152 *******************************************************************************/
153 
154 void lock_init(void)
155 {
156  /* initialize lock hashtable */
157 
159 }
160 
161 
162 /* lock_record_new *************************************************************
163 
164  Allocate a lock record.
165 
166 *******************************************************************************/
167 
169 {
170  lock_record_t *lr;
171 
172  /* allocate the data structure on the C heap */
173 
174  lr = NEW(lock_record_t);
175 
176  STATISTICS(size_lock_record += sizeof(lock_record_t));
177 
178  /* initialize the members */
179 
180  lr->object = NULL;
181  lr->owner = NULL;
182  lr->count = 0;
183  lr->waiters = new List<threadobject*>();
184 
185 #if defined(ENABLE_GC_CACAO)
186  /* register the lock object as weak reference with the GC */
187 
189 #endif
190 
191  // Initialize the mutex.
192  lr->mutex = new Mutex();
193 
194  DEBUGLOCKS(("[lock_record_new : lr=%p]", (void *) lr));
195 
196  return lr;
197 }
198 
199 
200 /* lock_record_free ************************************************************
201 
202  Free a lock record.
203 
204  IN:
205  lr....lock record to free
206 
207 *******************************************************************************/
208 
210 {
211  DEBUGLOCKS(("[lock_record_free : lr=%p]", (void *) lr));
212 
213  // Destroy the mutex.
214  delete lr->mutex;
215 
216 #if defined(ENABLE_GC_CACAO)
217  /* unregister the lock object reference with the GC */
218 
220 #endif
221 
222  // Free the waiters list.
223  delete lr->waiters;
224 
225  /* Free the data structure. */
226 
227  FREE(lr, lock_record_t);
228 
229  STATISTICS(size_lock_record -= sizeof(lock_record_t));
230 }
231 
232 
233 /*============================================================================*/
234 /* HASHTABLE MAPPING OBJECTS TO LOCK RECORDS */
235 /*============================================================================*/
236 
237 /* lock_hashtable_init *********************************************************
238 
239  Initialize the global hashtable mapping objects to lock records.
240 
241 *******************************************************************************/
242 
243 static void lock_hashtable_init(void)
244 {
245  lock_hashtable.mutex = new Mutex();
246 
247  lock_hashtable.size = LOCK_INITIAL_HASHTABLE_SIZE;
248  lock_hashtable.entries = 0;
249  lock_hashtable.ptr = MNEW(lock_record_t *, lock_hashtable.size);
250 
251  STATISTICS(size_lock_hashtable += sizeof(lock_record_t *) * lock_hashtable.size);
252 
253  MZERO(lock_hashtable.ptr, lock_record_t *, lock_hashtable.size);
254 }
255 
256 
257 /* lock_hashtable_grow *********************************************************
258 
259  Grow the lock record hashtable to about twice its current size and
260  rehash the entries.
261 
262 *******************************************************************************/
263 
264 /* must be called with hashtable mutex locked */
265 static void lock_hashtable_grow(void)
266 {
267  u4 oldsize;
268  u4 newsize;
269  lock_record_t **oldtable;
270  lock_record_t **newtable;
271  lock_record_t *lr;
273  u4 i;
274  u4 h;
275  u4 newslot;
276 
277  /* allocate a new table */
278 
279  oldsize = lock_hashtable.size;
280  newsize = oldsize*2 + 1; /* XXX should use prime numbers */
281 
282  DEBUGLOCKS(("growing lock hashtable to size %d", newsize));
283 
284  oldtable = lock_hashtable.ptr;
285  newtable = MNEW(lock_record_t *, newsize);
286 
287  STATISTICS(size_lock_hashtable += sizeof(lock_record_t *) * newsize);
288 
289  MZERO(newtable, lock_record_t *, newsize);
290 
291  /* rehash the entries */
292 
293  for (i = 0; i < oldsize; i++) {
294  lr = oldtable[i];
295  while (lr) {
296  next = lr->hashlink;
297 
298  h = heap_hashcode(lr->object);
299  newslot = h % newsize;
300 
301  lr->hashlink = newtable[newslot];
302  newtable[newslot] = lr;
303 
304  lr = next;
305  }
306  }
307 
308  /* replace the old table */
309 
310  lock_hashtable.ptr = newtable;
311  lock_hashtable.size = newsize;
312 
313  MFREE(oldtable, lock_record_t *, oldsize);
314 
315  STATISTICS(size_lock_hashtable -= sizeof(lock_record_t *) * oldsize);
316 }
317 
318 
319 /* lock_hashtable_cleanup ******************************************************
320 
321  Removes (and frees) lock records which have a cleared object reference
322  from the hashtable. The locked object was reclaimed by the GC.
323 
324 *******************************************************************************/
325 
326 #if defined(ENABLE_GC_CACAO)
327 void lock_hashtable_cleanup(void)
328 {
329  threadobject *t;
330  lock_record_t *lr;
331  lock_record_t *prev;
333  int i;
334 
335  t = THREADOBJECT;
336 
337  /* lock the hashtable */
338 
339  Mutex_lock(lock_hashtable.mutex);
340 
341  /* search the hashtable for cleared references */
342 
343  for (i = 0; i < lock_hashtable.size; i++) {
344  lr = lock_hashtable.ptr[i];
345  prev = NULL;
346 
347  while (lr) {
348  next = lr->hashlink;
349 
350  /* remove lock records with cleared references */
351 
352  if (lr->object == NULL) {
353 
354  /* unlink the lock record from the hashtable */
355 
356  if (prev == NULL)
357  lock_hashtable.ptr[i] = next;
358  else
359  prev->hashlink = next;
360 
361  /* free the lock record */
362 
363  lock_record_free(lr);
364 
365  } else {
366  prev = lr;
367  }
368 
369  lr = next;
370  }
371  }
372 
373  /* unlock the hashtable */
374 
375  Mutex_unlock(lock_hashtable.mutex);
376 }
377 #endif
378 
379 
380 /* lock_hashtable_get **********************************************************
381 
382  Find the lock record for the given object. If it does not exists,
383  yet, create it and enter it in the hashtable.
384 
385  IN:
386  o....the object to look up
387 
388  RETURN VALUE:
389  the lock record to use for this object
390 
391 *******************************************************************************/
392 
393 #if defined(ENABLE_GC_BOEHM)
394 static void lock_record_finalizer(java_handle_t *object, void *p);
395 #endif
396 
398 {
399  // This function is inside a critical section.
401 
402  u4 slot;
403  lock_record_t *lr;
404 
405  // lw_cache is used throughout this file because the lockword can change at
406  // any time, unless it is absolutely certain that we are holding the lock.
407  // We don't do deflation, so we would also not expect a fat lockword to
408  // change, but for the sake of uniformity, lw_cache is used even in this
409  // case.
410  uintptr_t lw_cache = *lock_lockword_get(o);
411  Lockword lockword(lw_cache);
412 
413  if (lockword.is_fat_lock())
414  return lockword.get_fat_lock();
415 
416  // Lock the hashtable.
417  lock_hashtable.mutex->lock();
418 
419  /* lookup the lock record in the hashtable */
420 
421  slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
422  lr = lock_hashtable.ptr[slot];
423 
424  for (; lr != NULL; lr = lr->hashlink) {
425  if (lr->object == LLNI_DIRECT(o))
426  break;
427  }
428 
429  if (lr == NULL) {
430  /* not found, we must create a new one */
431 
432  lr = lock_record_new();
433 
434  lr->object = LLNI_DIRECT(o);
435 
436 #if defined(ENABLE_GC_BOEHM)
437  /* register new finalizer to clean up the lock record */
438 
440 #endif
441 
442  /* enter it in the hashtable */
443 
444  lr->hashlink = lock_hashtable.ptr[slot];
445  lock_hashtable.ptr[slot] = lr;
446  lock_hashtable.entries++;
447 
448  /* check whether the hash should grow */
449 
450  if (lock_hashtable.entries * 3 > lock_hashtable.size * 4) {
452  }
453  }
454 
455  // Unlock the hashtable.
456  lock_hashtable.mutex->unlock();
457 
458  /* return the new lock record */
459 
460  return lr;
461 }
462 
463 /* lock_hashtable_remove *******************************************************
464 
465  Remove the lock record for the given object from the hashtable
466  and free it afterwards.
467 
468  IN:
469  t....the current thread
470  o....the object to look up
471 
472 *******************************************************************************/
473 
475 {
476  lock_record_t *lr;
477  u4 slot;
478  lock_record_t *tmplr;
479 
480  // Lock the hashtable.
481  lock_hashtable.mutex->lock();
482 
483  /* get lock record */
484 
485  uintptr_t lw_cache = *lock_lockword_get(o);
486  Lockword lockword(lw_cache);
487 
488  // Sanity check.
489  assert(lockword.is_fat_lock());
490 
491  lr = lockword.get_fat_lock();
492 
493  /* remove the lock-record from the hashtable */
494 
495  slot = heap_hashcode(LLNI_DIRECT(o)) % lock_hashtable.size;
496  tmplr = lock_hashtable.ptr[slot];
497 
498  if (tmplr == lr) {
499  /* special handling if it's the first in the chain */
500 
501  lock_hashtable.ptr[slot] = lr->hashlink;
502  }
503  else {
504  for (; tmplr != NULL; tmplr = tmplr->hashlink) {
505  if (tmplr->hashlink == lr) {
506  tmplr->hashlink = lr->hashlink;
507  break;
508  }
509  }
510 
511  assert(tmplr != NULL);
512  }
513 
514  /* decrease entry count */
515 
516  lock_hashtable.entries--;
517 
518  // Unlock the hashtable.
519  lock_hashtable.mutex->unlock();
520 
521  /* free the lock record */
522 
523  lock_record_free(lr);
524 }
525 
526 
527 /* lock_record_finalizer *******************************************************
528 
529  XXX Remove me for exact GC.
530 
531 *******************************************************************************/
532 
533 #define DEBUG_NAME "finalizer"
534 
535 static void lock_record_finalizer(java_handle_t *o, void *p)
536 {
537 #if defined(ENABLE_LOGGING)
538  classinfo *c;
539 
540  LLNI_class_get(o, c);
541 
542  LOG("[finalizer lockrecord:"
543  << " o=" << o
544  << " p=" << p
545  << " class=" << c
546  << "]" << cacao::nl);
547 #endif
548 
549  /* remove the lock-record entry from the hashtable and free it */
550 
552 }
553 
554 #undef DEBUG_NAME
555 
556 /*============================================================================*/
557 /* LOCKING ALGORITHM */
558 /*============================================================================*/
559 
560 
561 /* lock_lockword_get ***********************************************************
562 
563  Get the lockword for the given object.
564 
565  IN:
566  o............the object
567 
568 *******************************************************************************/
569 
570 static inline uintptr_t* lock_lockword_get(java_handle_t* o)
571 {
572 #if defined(ENABLE_GC_CACAO)
573  // Sanity check.
574  assert(GCCriticalSection::inside() == true);
575 #endif
576 
577  return &(LLNI_DIRECT(o)->lockword);
578 }
579 
580 
581 /* lock_record_enter ***********************************************************
582 
583  Enter the lock represented by the given lock record.
584 
585  IN:
586  t.................the current thread
587  lr................the lock record
588 
589 *******************************************************************************/
590 
591 static inline void lock_record_enter(threadobject *t, lock_record_t *lr)
592 {
593  lr->mutex->lock();
594  lr->owner = t;
595 }
596 
597 
598 /* lock_record_exit ************************************************************
599 
600  Release the lock represented by the given lock record.
601 
602  IN:
603  t.................the current thread
604  lr................the lock record
605 
606  PRE-CONDITION:
607  The current thread must own the lock represented by this lock record.
608  This is NOT checked by this function!
609 
610 *******************************************************************************/
611 
612 static inline void lock_record_exit(threadobject *t, lock_record_t *lr)
613 {
614  lr->owner = NULL;
615  lr->mutex->unlock();
616 }
617 
618 
619 /* lock_inflate ****************************************************************
620 
621  Inflate the lock of the given object. This may only be called by the
622  owner of the monitor of the object.
623 
624  IN:
625  o............the object of which to inflate the lock
626  lr...........the lock record to install. The current thread must
627  own the lock of this lock record!
628 
629  PRE-CONDITION:
630  The current thread must be the owner of this object's monitor AND
631  of the lock record's lock!
632 
633 *******************************************************************************/
634 
636 {
637  Lockword lockword(*lock_lockword_get(o));
638  lockword.inflate(lr);
639 }
640 
641 
642 /* sable_flc_waiting ***********************************************************
643 
644  Enqueue the current thread on another thread's FLC list. The function
645  blocks until the lock has been inflated by the owning thread.
646 
647  The algorithm used to be an almost literal copy from SableVM. The
648  superfluous list traversal in the waiting loop has been removed since,
649  though.
650 
651  IN:
652  lockword.....the object's lockword as seen at the first locking attempt
653  t............the current thread
654  o............the object of which to enter the monitor
655 
656 *******************************************************************************/
657 
658 static void sable_flc_waiting(uintptr_t lw_cache, threadobject *t, java_handle_t *o)
659 {
660  int32_t index;
661  threadobject *t_other;
662  int old_flc;
663 
664  Lockword lockword(lw_cache);
665  index = lockword.get_thin_lock_thread_index();
666  t_other = ThreadList::get()->get_thread_by_index(index);
667 
668  // The lockword could have changed during our way here. If the
669  // thread index is zero, the lock got unlocked and we simply
670  // return.
671  if (t_other == NULL)
672 /* failure, TODO: add statistics */
673  return;
674 
675  t_other->flc_lock->lock();
676  old_flc = t_other->flc_bit;
677  t_other->flc_bit = true;
678 
679  DEBUGLOCKS(("thread %d set flc bit for lock-holding thread %d", t->index, t_other->index));
680 
681  // Set FLC bit first, then read the lockword again.
683 
684  lw_cache = *lock_lockword_get(o);
685 
686  /* Lockword is still the way it was seen before */
687  if (lockword.is_thin_lock() && (lockword.get_thin_lock_thread_index() == index))
688  {
689  //threadobject *f;
690  /* Add tuple (t, o) to the other thread's FLC list */
691  t->flc_object = o;
692  t->flc_next = t_other->flc_list;
693  t_other->flc_list = t;
694  if (t->flc_next == 0)
695  t_other->flc_tail = t;
696  //f = t_other->flc_tail;
697 
698  // The other thread will clear flc_object.
699  while (t->flc_object)
700  {
701  // We are not cleared yet -- the other thread cannot have seen
702  // the FLC bit yet.
703  assert(t_other->flc_bit);
704 
705  // Wait until another thread sees the flc bit and notifies
706  // us of unlocking.
707  t->flc_cond->wait(t_other->flc_lock);
708  }
709 
710  t->flc_next = NULL;
711  }
712  else
713  t_other->flc_bit = old_flc;
714 
715  t_other->flc_lock->unlock();
716 }
717 
718 /* notify_flc_waiters **********************************************************
719 
720  Traverse the thread's FLC list and inflate all corresponding locks. Notify
721  the associated threads as well.
722 
723  IN:
724  t............the current thread
725  o............the object currently being unlocked
726 
727 *******************************************************************************/
728 
730 {
732 
733  t->flc_lock->lock();
734 
735  current = t->flc_list;
736  while (current)
737  {
738  if (current->flc_object != o)
739  {
740  /* The object has to be inflated so the other threads can properly
741  block on it. */
742 
743  // Only if not already inflated.
744  Lockword lockword(*lock_lockword_get(current->flc_object));
745  if (lockword.is_thin_lock()) {
747  lock_record_enter(t, lr);
748 
749  DEBUGLOCKS(("thread %d inflating lock of %p to lr %p",
750  t->index, (void*) current->flc_object, (void*) lr));
751 
752  lock_inflate(current->flc_object, lr);
753  }
754  }
755 
756  // Wake the waiting threads.
757  current->flc_cond->broadcast();
758  current->flc_object = NULL;
759 
760  current = current->flc_next;
761  }
762 
763  t->flc_list = NULL;
764  t->flc_bit = false;
765 
766  t->flc_lock->unlock();
767 }
768 
769 /* lock_monitor_enter **********************************************************
770 
771  Acquire the monitor of the given object. If the current thread already
772  owns the monitor, the lock counter is simply increased.
773 
774  This function blocks until it can acquire the monitor.
775 
776  IN:
777  t............the current thread
778  o............the object of which to enter the monitor
779 
780  RETURN VALUE:
781  true.........the lock has been successfully acquired
782  false........an exception has been thrown
783 
784 *******************************************************************************/
785 
787 {
788  // This function is inside a critical section.
790 
791  if (o == NULL) {
793  return false;
794  }
795 
797 
798  uintptr_t thinlock = t->thinlock;
799 
800 retry:
801  // Most common case: try to thin-lock an unlocked object.
802  uintptr_t *lw_ptr = lock_lockword_get(o);
803  uintptr_t lw_cache = *lw_ptr;
804  Lockword lockword(lw_cache);
805  bool result = Lockword(*lw_ptr).lock(thinlock);
806 
807  if (result == true) {
808  // Success, we locked it.
809  // NOTE: The Java Memory Model requires a memory barrier here.
810 #if defined(CAS_PROVIDES_FULL_BARRIER) && CAS_PROVIDES_FULL_BARRIER
811  // On some architectures, the CAS (hidden in the
812  // lockword.lock call above), already provides this barrier,
813  // so we only need to inform the compiler.
815 #else
817 #endif
818  return true;
819  }
820 
821  // Next common case: recursive lock with small recursion count.
822  // NOTE: We don't have to worry about stale values here, as any
823  // stale value will indicate another thread holding the lock (or
824  // an inflated lock).
825  if (lockword.get_thin_lock_without_count() == thinlock) {
826  // We own this monitor. Check the current recursion count.
827  if (lockword.is_max_thin_lock_count() == false) {
828  // The recursion count is low enough.
830 
831  // Success, we locked it.
832  return true;
833  }
834  else {
835  // Recursion count overflow.
837  lock_record_enter(t, lr);
838  lock_inflate(o, lr);
839  lr->count++;
840 
841  notify_flc_waiters(t, o);
842 
843  return true;
844  }
845  }
846 
847  // The lock is either contented or fat.
848  if (lockword.is_fat_lock()) {
849  lock_record_t* lr = lockword.get_fat_lock();
850 
851  // Check for recursive entering.
852  if (lr->owner == t) {
853  lr->count++;
854  return true;
855  }
856 
857  // Acquire the mutex of the lock record.
858  lock_record_enter(t, lr);
859 
860  // Sanity check.
861  assert(lr->count == 0);
862  return true;
863  }
864 
865  /****** inflation path ******/
866 
867 #if defined(ENABLE_JVMTI)
868  /* Monitor Contended Enter */
870 #endif
871 
872  sable_flc_waiting(lw_cache, t, o);
873 
874 #if defined(ENABLE_JVMTI)
875  /* Monitor Contended Entered */
877 #endif
878  goto retry;
879 }
880 
881 
882 /* lock_monitor_exit ***********************************************************
883 
884  Decrement the counter of a (currently owned) monitor. If the counter
885  reaches zero, release the monitor.
886 
887  If the current thread is not the owner of the monitor, an
888  IllegalMonitorState exception is thrown.
889 
890  IN:
891  t............the current thread
892  o............the object of which to exit the monitor
893 
894  RETURN VALUE:
895  true.........everything ok,
896  false........an exception has been thrown
897 
898 *******************************************************************************/
899 
901 {
902  // This function is inside a critical section.
904 
905  if (o == NULL) {
907  return false;
908  }
909 
911 
912  uintptr_t thinlock = t->thinlock;
913 
914  // We don't have to worry about stale values here, as any stale
915  // value will indicate that we don't own the lock.
916  uintptr_t *lw_ptr = lock_lockword_get(o);
917  uintptr_t lw_cache = *lw_ptr;
918  Lockword lockword(lw_cache);
919 
920  // Most common case: we release a thin lock that we hold once.
921  if (lockword.get_thin_lock() == thinlock) {
922  // Memory barrier for Java Memory Model.
924  Lockword(*lw_ptr).unlock();
925  // Memory barrier for FLC bit testing.
927 
928  /* check if there has been a flat lock contention on this object */
929 
930  if (t->flc_bit) {
931  DEBUGLOCKS(("thread %d saw flc bit", t->index));
932 
933  /* there has been a contention on this thin lock */
934  notify_flc_waiters(t, o);
935  }
936 
937  return true;
938  }
939 
940  // Next common case: we release a recursive lock, count > 0.
941  if (lockword.get_thin_lock_without_count() == thinlock) {
943  return true;
944  }
945 
946  // Either the lock is fat, or we don't hold it at all.
947  if (lockword.is_fat_lock()) {
948  lock_record_t* lr = lockword.get_fat_lock();
949 
950  // Check if we own this monitor.
951  // NOTE: We don't have to worry about stale values here, as
952  // any stale value will be != t and thus fail this check.
953  if (lr->owner != t) {
955  return false;
956  }
957 
958  /* { the current thread `t` owns the lock record `lr` on object `o` } */
959 
960  if (lr->count != 0) {
961  // We had locked this one recursively. Just decrement, it
962  // will still be locked.
963  lr->count--;
964  return true;
965  }
966 
967  // Unlock this lock record.
968  lock_record_exit(t, lr);
969  return true;
970  }
971 
972  // Legal thin lock cases have been handled above, so this is an
973  // error.
975 
976  return false;
977 }
978 
979 
980 /* lock_record_add_waiter ******************************************************
981 
982  Add a thread to the list of waiting threads of a lock record.
983 
984  IN:
985  lr...........the lock record
986  thread.......the thread to add
987 
988 *******************************************************************************/
989 
991 {
992  // Add the thread as last entry to waiters list.
993  lr->waiters->push_back(t);
994 
995  STATISTICS(size_lock_waiter += sizeof(threadobject*));
996 }
997 
998 
999 /* lock_record_remove_waiter ***************************************************
1000 
1001  Remove a thread from the list of waiting threads of a lock record.
1002 
1003  IN:
1004  lr...........the lock record
1005  t............the current thread
1006 
1007  PRE-CONDITION:
1008  The current thread must be the owner of the lock record.
1009 
1010 *******************************************************************************/
1011 
1013 {
1014  // Remove the thread from the waiters.
1015  lr->waiters->remove(t);
1016 
1017  STATISTICS(size_lock_waiter -= sizeof(threadobject*));
1018 }
1019 
1020 
1021 /* lock_record_wait ************************************************************
1022 
1023  Wait on a lock record for a given (maximum) amount of time.
1024 
1025  IN:
1026  t............the current thread
1027  lr...........the lock record
1028  millis.......milliseconds of timeout
1029  nanos........nanoseconds of timeout
1030 
1031  RETURN VALUE:
1032  true.........we have been interrupted,
1033  false........everything ok
1034 
1035  PRE-CONDITION:
1036  The current thread must be the owner of the lock record.
1037  This is NOT checked by this function!
1038 
1039 *******************************************************************************/
1040 
1041 static bool lock_record_wait(threadobject *thread, lock_record_t *lr, s8 millis, s4 nanos)
1042 {
1043  s4 lockcount;
1044  bool wasinterrupted = false;
1045 
1046  DEBUGLOCKS(("[lock_record_wait : lr=%p, t=%p, millis=%lld, nanos=%d]",
1047  lr, thread, millis, nanos));
1048 
1049  /* { the thread t owns the fat lock record lr on the object o } */
1050 
1051  /* register us as waiter for this object */
1052 
1053  lock_record_add_waiter(lr, thread);
1054 
1055  /* remember the old lock count */
1056 
1057  lockcount = lr->count;
1058 
1059  /* unlock this record */
1060 
1061  lr->count = 0;
1062  lock_record_exit(thread, lr);
1063 
1064  /* wait until notified/interrupted/timed out */
1065 
1066  threads_wait_with_timeout_relative(thread, millis, nanos);
1067 
1068  /* re-enter the monitor */
1069 
1070  lock_record_enter(thread, lr);
1071 
1072  /* remove us from the list of waiting threads */
1073 
1074  lock_record_remove_waiter(lr, thread);
1075 
1076  /* restore the old lock count */
1077 
1078  lr->count = lockcount;
1079 
1080  /* We can only be signaled OR interrupted, not both. If both flags
1081  are set, reset only signaled and leave the thread in
1082  interrupted state. Otherwise, clear both. */
1083 
1084  if (!thread->signaled) {
1085  wasinterrupted = thread->interrupted;
1086  thread->interrupted = false;
1087  }
1088 
1089  thread->signaled = false;
1090 
1091  /* return if we have been interrupted */
1092 
1093  return wasinterrupted;
1094 }
1095 
1096 
1097 /* lock_monitor_wait ***********************************************************
1098 
1099  Wait on an object for a given (maximum) amount of time.
1100 
1101  IN:
1102  t............the current thread
1103  o............the object
1104  millis.......milliseconds of timeout
1105  nanos........nanoseconds of timeout
1106 
1107  PRE-CONDITION:
1108  The current thread must be the owner of the object's monitor.
1109 
1110 *******************************************************************************/
1111 
1112 static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
1113 {
1114  lock_record_t *lr;
1115 
1116  uintptr_t *lw_ptr = lock_lockword_get(o);
1117  uintptr_t lw_cache = *lw_ptr;
1118  Lockword lockword(lw_cache);
1119 
1120  // Check if we own this monitor.
1121  // NOTE: We don't have to worry about stale values here, as any
1122  // stale value will fail this check.
1123  if (lockword.is_fat_lock()) {
1124  lr = lockword.get_fat_lock();
1125 
1126  if (lr->owner != t) {
1128  return;
1129  }
1130  }
1131  else {
1132  // It's a thin lock.
1133  if (lockword.get_thin_lock_without_count() != t->thinlock) {
1135  return;
1136  }
1137 
1138  // Get the lock-record.
1139  lr = lock_hashtable_get(o);
1140  lock_record_enter(t, lr);
1141 
1142  // Inflate this lock.
1143  Lockword(*lw_ptr).inflate(lr);
1144 
1145  notify_flc_waiters(t, o);
1146  }
1147 
1148  /* { the thread t owns the fat lock record lr on the object o } */
1149 
1150  if (lock_record_wait(t, lr, millis, nanos))
1152 }
1153 
1154 
1155 /* lock_record_notify **********************************************************
1156 
1157  Notify one thread or all threads waiting on the given lock record.
1158 
1159  IN:
1160  t............the current thread
1161  lr...........the lock record
1162  one..........if true, only notify one thread
1163 
1164  PRE-CONDITION:
1165  The current thread must be the owner of the lock record.
1166  This is NOT checked by this function!
1167 
1168 *******************************************************************************/
1169 
1170 static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
1171 {
1172 #if defined(ENABLE_GC_CACAO)
1173  // Sanity check.
1174  assert(GCCriticalSection::inside() == false);
1175 #endif
1176 
1177  // { The thread t owns the fat lock record lr on the object o }
1178 
1179  for (List<threadobject*>::iterator it = lr->waiters->begin(); it != lr->waiters->end(); it++) {
1180  threadobject* waiter = *it;
1181 
1182  // We must skip threads which have already been notified. They
1183  // will remove themselves from the list.
1184  if (waiter->signaled)
1185  continue;
1186 
1187  // Enter the wait-mutex.
1188  waiter->waitmutex->lock();
1189 
1190  DEBUGLOCKS(("[lock_record_notify: lr=%p, t=%p, waitingthread=%p, one=%d]", lr, t, waiter, one));
1191 
1192  // Signal the waiter.
1193  waiter->waitcond->signal();
1194 
1195  // Mark the thread as signaled.
1196  waiter->signaled = true;
1197 
1198  // Leave the wait-mutex.
1199  waiter->waitmutex->unlock();
1200 
1201  // If we should only wake one thread, we are done.
1202  if (one)
1203  break;
1204  }
1205 }
1206 
1207 
1208 /* lock_monitor_notify *********************************************************
1209 
1210  Notify one thread or all threads waiting on the given object.
1211 
1212  IN:
1213  t............the current thread
1214  o............the object
1215  one..........if true, only notify one thread
1216 
1217  PRE-CONDITION:
1218  The current thread must be the owner of the object's monitor.
1219 
1220 *******************************************************************************/
1221 
1222 static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
1223 {
1224  lock_record_t* lr = NULL;
1225 
1226  {
1227  // This scope is inside a critical section.
1228  GCCriticalSection cs;
1229 
1230  uintptr_t lw_cache = *lock_lockword_get(o);
1231  Lockword lockword(lw_cache);
1232 
1233  // Check if we own this monitor.
1234  // NOTE: We don't have to worry about stale values here, as any
1235  // stale value will fail this check.
1236 
1237  if (lockword.is_fat_lock()) {
1238  lr = lockword.get_fat_lock();
1239 
1240  if (lr->owner != t) {
1242  return;
1243  }
1244  }
1245  else {
1246  // It's a thin lock.
1247  if (lockword.get_thin_lock_without_count() != t->thinlock) {
1249  return;
1250  }
1251 
1252  // No thread can wait on a thin lock, so there's nothing to do.
1253  return;
1254  }
1255  }
1256 
1257  // { The thread t owns the fat lock record lr on the object o }
1258  lock_record_notify(t, lr, one);
1259 }
1260 
1261 
1262 
1263 /*============================================================================*/
1264 /* INQUIRY FUNCIONS */
1265 /*============================================================================*/
1266 
1267 
1268 /* lock_is_held_by_current_thread **********************************************
1269 
1270  Return true if the current thread owns the monitor of the given object.
1271 
1272  IN:
1273  o............the object
1274 
1275  RETURN VALUE:
1276  true, if the current thread holds the lock of this object.
1277 
1278 *******************************************************************************/
1279 
1281 {
1282  // This function is inside a critical section.
1283  GCCriticalSection cs;
1284 
1285  // Check if we own this monitor.
1286  // NOTE: We don't have to worry about stale values here, as any
1287  // stale value will fail this check.
1289  uintptr_t lw_cache = *lock_lockword_get(o);
1290  Lockword lockword(lw_cache);
1291 
1292  if (lockword.is_fat_lock()) {
1293  // It's a fat lock.
1294  lock_record_t* lr = lockword.get_fat_lock();
1295  return (lr->owner == t);
1296  }
1297  else {
1298  // It's a thin lock.
1299  return (lockword.get_thin_lock_without_count() == t->thinlock);
1300  }
1301 }
1302 
1303 
1304 
1305 /*============================================================================*/
1306 /* WRAPPERS FOR OPERATIONS ON THE CURRENT THREAD */
1307 /*============================================================================*/
1308 
1309 
1310 /* lock_wait_for_object ********************************************************
1311 
1312  Wait for the given object.
1313 
1314  IN:
1315  o............the object
1316  millis.......milliseconds to wait
1317  nanos........nanoseconds to wait
1318 
1319 *******************************************************************************/
1320 
1321 void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
1322 {
1324 
1325  thread = THREADOBJECT;
1326 
1327  lock_monitor_wait(thread, o, millis, nanos);
1328 }
1329 
1330 
1331 /* lock_notify_object **********************************************************
1332 
1333  Notify one thread waiting on the given object.
1334 
1335  IN:
1336  o............the object
1337 
1338 *******************************************************************************/
1339 
1341 {
1343 
1344  thread = THREADOBJECT;
1345 
1346  lock_monitor_notify(thread, o, true);
1347 }
1348 
1349 
1350 /* lock_notify_all_object ******************************************************
1351 
1352  Notify all threads waiting on the given object.
1353 
1354  IN:
1355  o............the object
1356 
1357 *******************************************************************************/
1358 
1360 {
1362 
1363  thread = THREADOBJECT;
1364 
1365  lock_monitor_notify(thread, o, false);
1366 }
1367 
1368 
1369 /*
1370  * These are local overrides for various environment variables in Emacs.
1371  * Please do not remove this and leave it at the end of the file, where
1372  * Emacs will automagically detect them.
1373  * ---------------------------------------------------------------------
1374  * Local variables:
1375  * mode: c++
1376  * indent-tabs-mode: t
1377  * c-basic-offset: 4
1378  * tab-width: 4
1379  * End:
1380  * vim:noexpandtab:sw=4:ts=4:
1381  */
void write_memory_barrier(void)
Definition: atomic.hpp:97
Mutex * waitmutex
Definition: thread.hpp:113
jlong jlong jlong jlong jint jmethodID jint slot
Definition: jvmti.h:497
std::size_t index
#define STATISTICS(x)
Wrapper for statistics only code.
Definition: statistics.hpp:975
static void lock_hashtable_grow(void)
Definition: lock.cpp:265
void exceptions_throw_illegalmonitorstateexception(void)
void lock_init(void)
Definition: lock.cpp:154
void gc_weakreference_unregister(java_object_t **ref)
Definition: gc.c:214
#define NEW(type)
Definition: memory.hpp:93
void signal()
Restarts one of the threads that are waiting on this condition variable.
static void notify_flc_waiters(threadobject *t, java_handle_t *o)
Definition: lock.cpp:729
#define lr
Definition: md-asm.hpp:80
threadobject * get_thread_by_index(int32_t index)
Return the thread object with the given index.
Definition: threadlist.cpp:224
static void lock_monitor_wait(threadobject *t, java_handle_t *o, s8 millis, s4 nanos)
Definition: lock.cpp:1112
#define FREE(ptr, type)
Definition: memory.hpp:94
void unlock()
Set the lockword to THIN_UNLOCKED.
Definition: lockword.hpp:144
void exceptions_throw_interruptedexception(void)
struct threadobject * flc_list
Definition: thread.hpp:105
bool lock_monitor_exit(java_handle_t *o)
Definition: lock.cpp:900
static void lock_hashtable_init(void)
Definition: lock.cpp:243
void gc_weakreference_register(java_object_t **ref, int32_t reftype)
Definition: gc.c:169
static void lock_record_finalizer(java_handle_t *object, void *p)
Definition: lock.cpp:535
struct threadobject * owner
Definition: lock.hpp:51
void decrease_thin_lock_count()
Definition: lockword.hpp:207
Dummy implementation of a mutex.
Definition: mutex-none.hpp:33
int32_t get_thin_lock_thread_index() const
Definition: lockword.hpp:168
static int32_t heap_hashcode(java_object_t *obj)
Definition: gc.hpp:150
List< threadobject * > * waiters
Definition: lock.hpp:54
static void lock_inflate(java_handle_t *o, lock_record_t *lr)
Definition: lock.cpp:635
static void lock_record_remove_waiter(lock_record_t *lr, threadobject *t)
Definition: lock.cpp:1012
static void * attach_custom_finalizer(java_handle_t *h, FinalizerFunc f, void *data)
Definition: finalizer.cpp:315
static lock_hashtable_t lock_hashtable
Definition: lock.cpp:127
struct threadobject * flc_next
Definition: thread.hpp:107
Mutex * mutex
Definition: lock.hpp:66
int64_t s8
Definition: types.hpp:48
static void lock_record_add_waiter(lock_record_t *lr, threadobject *t)
Definition: lock.cpp:990
#define MZERO(ptr, type, num)
Definition: memory.hpp:105
static lock_record_t * lock_record_new(void)
Definition: lock.cpp:168
static bool inside()
Checks if the calling thread is inside a GC critical section.
Definition: gc.hpp:94
MachineBasicBlock * current
Mutex * flc_lock
Definition: thread.hpp:109
void jvmti_MonitorContendedEntering(bool entered, jobject obj)
Definition: cacaodbg.c:366
ptrint thinlock
Definition: thread.hpp:95
static void lock_record_free(lock_record_t *lr)
Definition: lock.cpp:209
#define LLNI_class_get(obj, variable)
Definition: llni.hpp:60
void lock_notify_all_object(java_handle_t *o)
Definition: lock.cpp:1359
static void lock_record_enter(threadobject *t, lock_record_t *lr)
Definition: lock.cpp:591
Condition * waitcond
Definition: thread.hpp:114
static void lock_record_notify(threadobject *t, lock_record_t *lr, bool one)
Definition: lock.cpp:1170
static ThreadList * get()
Provides access to singleton.
Definition: threadlist.hpp:62
Critical section for the GC.
Definition: gc.hpp:42
uintptr_t get_thin_lock_without_count() const
Definition: lockword.hpp:156
bool lock(uintptr_t thinlock)
Try to lock the lockword with the given thin-lock value.
Definition: lockword.hpp:132
bool is_thin_lock() const
Definition: lockword.hpp:102
void lock_notify_object(java_handle_t *o)
Definition: lock.cpp:1340
void exceptions_throw_nullpointerexception(void)
void increase_thin_lock_count()
Definition: lockword.hpp:198
void threads_wait_with_timeout_relative(threadobject *thread, s8 millis, s4 nanos)
bool interrupted
Definition: thread.hpp:119
bool is_max_thin_lock_count() const
Definition: lockword.hpp:192
JNIEnv jthread thread
Definition: jvmti.h:207
MIIterator i
static uintptr_t * lock_lockword_get(java_handle_t *o)
Definition: lock.cpp:570
void broadcast()
Restarts all the threads that are waiting on the condition variable.
int32_t s4
Definition: types.hpp:45
struct threadobject * flc_tail
Definition: thread.hpp:106
lock_record_t * hashlink
Definition: lock.hpp:55
void lock_wait_for_object(java_handle_t *o, s8 millis, s4 nanos)
Definition: lock.cpp:1321
bool flc_bit
Definition: thread.hpp:104
java_handle_t * flc_object
Definition: thread.hpp:108
#define LOG(STMT)
Analogous to DEBUG.
Definition: logging.hpp:91
uint32_t u4
Definition: types.hpp:46
Lockword.
Definition: lockword.hpp:37
static void lock_record_exit(threadobject *t, lock_record_t *lr)
Definition: lock.cpp:612
static threadobject * thread_get_current()
Return the threadobject for the current thread.
Definition: thread-none.hpp:56
bool is_fat_lock() const
Definition: lockword.hpp:108
void memory_barrier(void)
Definition: atomic.hpp:96
#define LOCK_INITIAL_HASHTABLE_SIZE
Definition: lock.cpp:83
static void lock_monitor_notify(threadobject *t, java_handle_t *o, bool one)
Definition: lock.cpp:1222
#define MNEW(type, num)
Definition: memory.hpp:96
bool lock_is_held_by_current_thread(java_handle_t *o)
Definition: lock.cpp:1280
#define DEBUGLOCKS(format)
Definition: lock.cpp:62
lock_record_t ** ptr
Definition: lock.hpp:69
bool signaled
Definition: thread.hpp:120
void unlock()
Unlocks the given mutex object and checks for errors.
Definition: mutex-none.hpp:36
#define LLNI_DIRECT(hdl)
Definition: llni.hpp:54
static void sable_flc_waiting(uintptr_t lw_cache, threadobject *t, java_handle_t *o)
Definition: lock.cpp:658
static lock_record_t * lock_hashtable_get(java_handle_t *o)
Definition: lock.cpp:397
static java_object_t * next
Definition: copy.c:43
bool lock_monitor_enter(java_handle_t *o)
Definition: lock.cpp:786
Condition * flc_cond
Definition: thread.hpp:110
static bool lock_record_wait(threadobject *t, lock_record_t *lr, s8 millis, s4 nanos)
Definition: lock.cpp:1041
void inflate(struct lock_record_t *lr)
Inflate the lock of the given object.
Definition: lockword.cpp:44
Nl nl
Definition: OStream.cpp:56
#define STAT_DECLARE_VAR(type, var, init)
Declare an external statistics variable.
Definition: statistics.hpp:963
static void lock_hashtable_remove(threadobject *t, java_handle_t *o)
Definition: lock.cpp:474
void instruction_barrier(void)
Definition: atomic.hpp:98
struct lock_record_t * get_fat_lock() const
Definition: lockword.hpp:174
#define MFREE(ptr, type, num)
Definition: memory.hpp:97
Mutex * mutex
Definition: lock.hpp:53
java_object_t * object
Definition: lock.hpp:50
#define THREADOBJECT
Definition: thread-none.hpp:47
void wait(Mutex *mutex)
Waits for the condition variable.
void lock()
Locks the given mutex object and checks for errors.
Definition: mutex-none.hpp:35
uintptr_t get_thin_lock() const
Definition: lockword.hpp:150