1 // natObject.cc - Implementation of the Object class.
3 /* Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003, 2005 Free Software Foundation
5 This file is part of libgcj.
7 This software is copyrighted work licensed under the terms of the
8 Libgcj License. Please consult the file "LIBGCJ_LICENSE" for
16 #pragma implementation "Object.h"
20 #include <java/lang/Object.h>
21 #include <java-threads.h>
22 #include <java-signal.h>
23 #include <java/lang/CloneNotSupportedException.h>
24 #include <java/lang/IllegalArgumentException.h>
25 #include <java/lang/IllegalMonitorStateException.h>
26 #include <java/lang/InterruptedException.h>
27 #include <java/lang/NullPointerException.h>
28 #include <java/lang/Class.h>
29 #include <java/lang/Cloneable.h>
30 #include <java/lang/Thread.h>
38 using namespace java::lang
;
40 // This is used to represent synchronization information.
43 #if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
44 // We only need to keep track of initialization state if we can
45 // possibly finalize this object.
48 _Jv_ConditionVariable_t condition
;
55 java::lang::Object::getClass (void)
57 _Jv_VTable
**dt
= (_Jv_VTable
**) this;
62 java::lang::Object::hashCode (void)
64 return _Jv_HashCode (this);
68 java::lang::Object::clone (void)
70 jclass klass
= getClass ();
74 // We also clone arrays here. If we put the array code into
75 // __JArray, then we'd have to figure out a way to find the array
76 // vtbl when creating a new array class. This is easier, if uglier.
79 __JArray
*array
= (__JArray
*) this;
80 jclass comp
= getClass()->getComponentType();
82 if (comp
->isPrimitive())
84 r
= _Jv_NewPrimArray (comp
, array
->length
);
85 eltsize
= comp
->size();
89 r
= _Jv_NewObjectArray (array
->length
, comp
, NULL
);
90 eltsize
= sizeof (jobject
);
92 // We can't use sizeof on __JArray because we must account for
93 // alignment of the element type.
94 size
= (_Jv_GetArrayElementFromElementType (array
, comp
) - (char *) array
95 + array
->length
* eltsize
);
99 if (! java::lang::Cloneable::class$
.isAssignableFrom(klass
))
100 throw new CloneNotSupportedException
;
102 size
= klass
->size();
103 r
= _Jv_AllocObject (klass
);
106 memcpy ((void *) r
, (void *) this, size
);
107 #ifndef JV_HASH_SYNCHRONIZATION
108 // Guarantee that the locks associated to the two objects are
116 _Jv_FinalizeObject (jobject obj
)
118 // Ignore exceptions. From section 12.6 of the Java Language Spec.
123 catch (java::lang::Throwable
*t
)
131 // Synchronization code.
134 #ifndef JV_HASH_SYNCHRONIZATION
135 // This global is used to make sure that only one thread sets an
136 // object's `sync_info' field.
137 static _Jv_Mutex_t sync_mutex
;
139 // This macro is used to see if synchronization initialization is
141 #if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
142 # define INIT_NEEDED(Obj) (! (Obj)->sync_info \
143 || ! ((_Jv_SyncInfo *) ((Obj)->sync_info))->init)
145 # define INIT_NEEDED(Obj) (! (Obj)->sync_info)
148 #if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
149 // If we have to run a destructor for a sync_info member, then this
150 // function is registered as a finalizer for the sync_info.
152 finalize_sync_info (jobject obj
)
154 _Jv_SyncInfo
*si
= (_Jv_SyncInfo
*) obj
;
155 #if defined (_Jv_HaveCondDestroy)
156 _Jv_CondDestroy (&si
->condition
);
158 #if defined (_Jv_HaveMutexDestroy)
159 _Jv_MutexDestroy (&si
->mutex
);
165 // This is called to initialize the sync_info element of an object.
167 java::lang::Object::sync_init (void)
169 _Jv_MutexLock (&sync_mutex
);
170 // Check again to see if initialization is needed now that we have
172 if (INIT_NEEDED (this))
174 // We assume there are no pointers in the sync_info
177 // We always create a new sync_info, even if there is already
178 // one available. Any given object can only be finalized once.
179 // If we get here and sync_info is not null, then it has already
180 // been finalized. So if we just reinitialize the old one,
181 // we'll never be able to (re-)destroy the mutex and/or
182 // condition variable.
183 si
= (_Jv_SyncInfo
*) _Jv_AllocBytes (sizeof (_Jv_SyncInfo
));
184 _Jv_MutexInit (&si
->mutex
);
185 _Jv_CondInit (&si
->condition
);
186 #if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
187 // Register a finalizer.
189 _Jv_RegisterFinalizer (si
, finalize_sync_info
);
191 sync_info
= (jobject
) si
;
193 _Jv_MutexUnlock (&sync_mutex
);
197 java::lang::Object::notify (void)
199 if (__builtin_expect (INIT_NEEDED (this), false))
201 _Jv_SyncInfo
*si
= (_Jv_SyncInfo
*) sync_info
;
202 if (__builtin_expect (_Jv_CondNotify (&si
->condition
, &si
->mutex
), false))
203 throw new IllegalMonitorStateException(JvNewStringLatin1
204 ("current thread not owner"));
208 java::lang::Object::notifyAll (void)
210 if (__builtin_expect (INIT_NEEDED (this), false))
212 _Jv_SyncInfo
*si
= (_Jv_SyncInfo
*) sync_info
;
213 if (__builtin_expect (_Jv_CondNotifyAll (&si
->condition
, &si
->mutex
), false))
214 throw new IllegalMonitorStateException(JvNewStringLatin1
215 ("current thread not owner"));
219 java::lang::Object::wait (jlong timeout
, jint nanos
)
221 if (__builtin_expect (INIT_NEEDED (this), false))
223 if (__builtin_expect (timeout
< 0 || nanos
< 0 || nanos
> 999999, false))
224 throw new IllegalArgumentException
;
225 _Jv_SyncInfo
*si
= (_Jv_SyncInfo
*) sync_info
;
226 switch (_Jv_CondWait (&si
->condition
, &si
->mutex
, timeout
, nanos
))
229 throw new IllegalMonitorStateException (JvNewStringLatin1
230 ("current thread not owner"));
231 case _JV_INTERRUPTED
:
232 if (Thread::interrupted ())
233 throw new InterruptedException
;
238 // Some runtime code.
241 // This function is called at system startup to initialize the
244 _Jv_InitializeSyncMutex (void)
246 _Jv_MutexInit (&sync_mutex
);
250 _Jv_MonitorEnter (jobject obj
)
253 if (__builtin_expect (! obj
, false))
254 throw new java::lang::NullPointerException
;
256 if (__builtin_expect (INIT_NEEDED (obj
), false))
258 _Jv_SyncInfo
*si
= (_Jv_SyncInfo
*) obj
->sync_info
;
259 _Jv_MutexLock (&si
->mutex
);
260 // FIXME: In the Windows case, this can return a nonzero error code.
261 // We should turn that into some exception ...
265 _Jv_MonitorExit (jobject obj
)
268 JvAssert (! INIT_NEEDED (obj
));
269 _Jv_SyncInfo
*si
= (_Jv_SyncInfo
*) obj
->sync_info
;
270 if (__builtin_expect (_Jv_MutexUnlock (&si
->mutex
), false))
271 throw new java::lang::IllegalMonitorStateException
;
275 _Jv_ObjectCheckMonitor (jobject obj
)
277 if (__builtin_expect (INIT_NEEDED (obj
), false))
279 _Jv_SyncInfo
*si
= (_Jv_SyncInfo
*) obj
->sync_info
;
280 return _Jv_MutexCheckMonitor (&si
->mutex
);
283 #else /* JV_HASH_SYNCHRONIZATION */
285 // FIXME: We shouldn't be calling GC_register_finalizer directly.
286 #ifndef HAVE_BOEHM_GC
287 # error Hash synchronization currently requires boehm-gc
288 // That's actually a bit of a lie: It should also work with the null GC,
289 // probably even better than the alternative.
290 // To really support alternate GCs here, we would need to widen the
291 // interface to finalization, since we sometimes have to register a
292 // second finalizer for an object that already has one.
293 // We might also want to move the GC interface to a .h file, since
294 // the number of procedure call levels involved in some of these
295 // operations is already ridiculous, and would become worse if we
296 // went through the proper intermediaries.
298 # ifdef LIBGCJ_GC_DEBUG
304 // What follows currenly assumes a Linux-like platform.
305 // Some of it specifically assumes X86 or IA64 Linux, though that
306 // should be easily fixable.
308 // A Java monitor implemention based on a table of locks.
309 // Each entry in the table describes
310 // locks held for objects that hash to that location.
311 // This started out as a reimplementation of the technique used in SGIs JVM,
312 // for which we obtained permission from SGI.
313 // But in fact, this ended up quite different, though some ideas are
314 // still shared with the original.
315 // It was also influenced by some of the published IBM work,
316 // though it also differs in many ways from that.
317 // We could speed this up if we had a way to atomically update
318 // an entire cache entry, i.e. 2 contiguous words of memory.
319 // That would usually be the case with a 32 bit ABI on a 64 bit processor.
320 // But we don't currently go out of our way to target those.
321 // I don't know how to do much better with a N bit ABI on a processor
322 // that can atomically update only N bits at a time.
323 // Author: Hans-J. Boehm (Hans_Boehm@hp.com, boehm@acm.org)
326 #include <unistd.h> // for usleep, sysconf.
327 #include <gcj/javaprims.h>
328 #include <sysdep/locks.h>
329 #include <java/lang/Thread.h>
331 // Try to determine whether we are on a multiprocessor, i.e. whether
332 // spinning may be profitable.
333 // This should really use a suitable autoconf macro.
334 // False is the conservative answer, though the right one is much better.
338 #ifdef _SC_NPROCESSORS_ONLN
339 long nprocs
= sysconf(_SC_NPROCESSORS_ONLN
);
346 // A call to keep_live(p) forces p to be accessible to the GC
349 keep_live(obj_addr_t p
)
351 __asm__
__volatile__("" : : "rm"(p
) : "memory");
354 // Each hash table entry holds a single preallocated "lightweight" lock.
355 // In addition, it holds a chain of "heavyweight" locks. Lightweight
356 // locks do not support Object.wait(), and are converted to heavyweight
357 // status in response to contention. Unlike the SGI scheme, both
358 // ligtweight and heavyweight locks in one hash entry can be simultaneously
359 // in use. (The SGI scheme requires that we be able to acquire a heavyweight
360 // lock on behalf of another thread, and can thus convert a lock we don't
361 // hold to heavyweight status. Here we don't insist on that, and thus
362 // let the original holder of the lighweight lock keep it.)
365 void * reserved_for_gc
;
366 struct heavy_lock
*next
; // Hash chain link.
368 void * old_client_data
; // The only other field traced by GC.
369 GC_finalization_proc old_finalization_proc
;
370 obj_addr_t address
; // Object to which this lock corresponds.
371 // Should not be traced by GC.
372 // Cleared as heavy_lock is destroyed.
373 // Together with the rest of the heavy lock
374 // chain, this is protected by the lock
375 // bit in the hash table entry to which
376 // the chain is attached.
378 // The remaining fields save prior finalization info for
379 // the object, which we needed to replace in order to arrange
380 // for cleanup of the lock structure.
385 print_hl_list(heavy_lock
*hl
)
388 for (; 0 != p
; p
= p
->next
)
389 fprintf (stderr
, "(hl = %p, addr = %p)", p
, (void *)(p
-> address
));
391 #endif /* LOCK_DEBUG */
393 #if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
394 // If we have to run a destructor for a sync_info member, then this
395 // function could be registered as a finalizer for the sync_info.
396 // In fact, we now only invoke it explicitly.
398 heavy_lock_finalization_proc (heavy_lock
*hl
)
400 #if defined (_Jv_HaveCondDestroy)
401 _Jv_CondDestroy (&hl
->si
.condition
);
403 #if defined (_Jv_HaveMutexDestroy)
404 _Jv_MutexDestroy (&hl
->si
.mutex
);
408 #endif /* defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy) */
410 // We convert the lock back to lightweight status when
411 // we exit, so that a single contention episode doesn't doom the lock
412 // forever. But we also need to make sure that lock structures for dead
413 // objects are eventually reclaimed. We do that in a an additional
414 // finalizer on the underlying object.
415 // Note that if the corresponding object is dead, it is safe to drop
416 // the heavy_lock structure from its list. It is not necessarily
417 // safe to deallocate it, since the unlock code could still be running.
420 volatile obj_addr_t address
; // Address of object for which lightweight
422 // We assume the 3 low order bits are zero.
423 // With the Boehm collector and bitmap
424 // allocation, objects of size 4 bytes are
425 // broken anyway. Thus this is primarily
426 // a constraint on statically allocated
427 // objects used for synchronization.
428 // This allows us to use the low order
430 # define LOCKED 1 // This hash entry is locked, and its
431 // state may be invalid.
432 // The lock protects both the hash_entry
433 // itself (except for the light_count
434 // and light_thr_id fields, which
435 // are protected by the lightweight
436 // lock itself), and any heavy_monitor
437 // structures attached to it.
438 # define HEAVY 2 // Heavyweight locks associated with this
439 // hash entry may be held.
440 // The lightweight entry is still valid,
441 // if the leading bits of the address
442 // field are nonzero.
443 // If the LOCKED bit is clear, then this is
444 // set exactly when heavy_count is > 0 .
445 // Stored redundantly so a single
446 // compare-and-swap works in the easy case.
447 // If HEAVY is not set, it is safe to use
448 // an available lightweight lock entry
449 // without checking if there is an existing
450 // heavyweight lock for the same object.
451 // (There may be one, but it won't be held
453 # define REQUEST_CONVERSION 4 // The lightweight lock is held. But
454 // one or more other threads have tried
455 // to acquire the lock, and hence request
456 // conversion to heavyweight status.
457 // The heavyweight lock is already allocated.
458 // Threads requesting conversion are
459 // waiting on the condition variable associated
460 // with the heavyweight lock.
461 // Not used for conversion due to
462 // Object.wait() calls.
463 # define FLAGS (LOCKED | HEAVY | REQUEST_CONVERSION)
464 volatile _Jv_ThreadId_t light_thr_id
;
465 // Thr_id of holder of lightweight lock.
466 // Only updated by lightweight lock holder.
467 // Must be recognizably invalid if the
468 // lightweight lock is not held.
469 # define INVALID_THREAD_ID 0 // Works for Linux?
470 // If zero doesn't work, we have to
471 // initialize lock table.
472 volatile unsigned short light_count
;
473 // Number of times the lightweight lock
474 // is held minus one. Zero if lightweight
475 // lock is not held. Only updated by
476 // lightweight lock holder or, in one
477 // case, while holding the LOCKED bit in
478 // a state in which there can be no
479 // lightweight lock holder.
480 unsigned short heavy_count
; // Total number of times heavyweight locks
481 // associated with this hash entry are held
482 // or waiting to be acquired.
483 // Threads in wait() are included eventhough
484 // they have temporarily released the lock.
485 // Protected by LOCKED bit.
486 // Threads requesting conversion to heavyweight
487 // status are also included.
488 struct heavy_lock
* heavy_locks
;
489 // Chain of heavy locks. Protected
490 // by lockbit for he. Locks may
491 // remain allocated here even if HEAVY
492 // is not set and heavy_count is 0.
493 // If a lightweight and heavyweight lock
494 // correspond to the same address, the
495 // lightweight lock is the right one.
498 #ifndef JV_SYNC_TABLE_SZ
499 # define JV_SYNC_TABLE_SZ 2048 // Must be power of 2.
502 hash_entry light_locks
[JV_SYNC_TABLE_SZ
];
504 #define JV_SYNC_HASH(p) (((long)p ^ ((long)p >> 10)) & (JV_SYNC_TABLE_SZ-1))
506 // Note that the light_locks table is scanned conservatively by the
507 // collector. It is essential the the heavy_locks field is scanned.
508 // Currently the address field may or may not cause the associated object
509 // to be retained, depending on whether flag bits are set.
510 // This means that we can conceivable get an unexpected deadlock if
511 // 1) Object at address A is locked.
512 // 2) The client drops A without unlocking it.
513 // 3) Flag bits in the address entry are set, so the collector reclaims
515 // 4) A is reallocated, and an attempt is made to lock the result.
516 // This could be fixed by scanning light_locks in a more customized
517 // manner that ignores the flag bits. But it can only happen with hand
518 // generated semi-illegal .class files, and then it doesn't present a
522 void print_he(hash_entry
*he
)
524 fprintf(stderr
, "lock hash entry = %p, index = %d, address = 0x%lx\n"
525 "\tlight_thr_id = 0x%lx, light_count = %d, "
526 "heavy_count = %d\n\theavy_locks:", he
,
527 he
- light_locks
, (unsigned long)(he
-> address
),
528 (unsigned long)(he
-> light_thr_id
),
529 he
-> light_count
, he
-> heavy_count
);
530 print_hl_list(he
-> heavy_locks
);
531 fprintf(stderr
, "\n");
533 #endif /* LOCK_DEBUG */
536 // Log locking operations. For debugging only.
537 // Logging is intended to be as unintrusive as possible.
538 // Log calls are made after an operation completes, and hence
539 // may not completely reflect actual synchronization ordering.
540 // The choice of events to log is currently a bit haphazard.
541 // The intent is that if we have to track down any other bugs
542 // inthis code, we extend the logging as appropriate.
545 ACQ_LIGHT
, ACQ_LIGHT2
, ACQ_HEAVY
, ACQ_HEAVY2
, PROMOTE
, REL_LIGHT
,
546 REL_HEAVY
, REQ_CONV
, PROMOTE2
, WAIT_START
, WAIT_END
, NOTIFY
, NOTIFY_ALL
552 obj_addr_t addr
; // Often includes flags.
556 const int LOG_SIZE
= 128; // Power of 2.
558 lock_history lock_log
[LOG_SIZE
];
560 volatile obj_addr_t log_next
= 0;
561 // Next location in lock_log.
562 // Really an int, but we need compare_and_swap.
564 static void add_log_entry(event_type t
, obj_addr_t a
, _Jv_ThreadId_t th
)
567 obj_addr_t next_entry
;
571 next_entry
= ((my_entry
+ 1) & (LOG_SIZE
- 1));
573 while (!compare_and_swap(&log_next
, my_entry
, next_entry
));
574 lock_log
[my_entry
].tp
= t
;
575 lock_log
[my_entry
].addr
= a
;
576 lock_log
[my_entry
].thr
= th
;
579 # define LOG(t, a, th) add_log_entry(t, a, th)
580 #else /* !LOCK_LOG */
581 # define LOG(t, a, th)
584 static bool mp
= false; // Known multiprocesssor.
586 // Wait for roughly 2^n units, touching as little memory as possible.
590 const unsigned MP_SPINS
= 10;
591 const unsigned YIELDS
= 4;
592 const unsigned SPINS_PER_UNIT
= 30;
593 const unsigned MIN_SLEEP_USECS
= 2001; // Shorter times spin under Linux.
594 const unsigned MAX_SLEEP_USECS
= 200000;
595 static unsigned spin_limit
= 0;
596 static unsigned yield_limit
= YIELDS
;
597 static bool spin_initialized
= false;
599 if (!spin_initialized
)
604 spin_limit
= MP_SPINS
;
605 yield_limit
= MP_SPINS
+ YIELDS
;
607 spin_initialized
= true;
611 unsigned i
= SPINS_PER_UNIT
<< n
;
613 __asm__
__volatile__("");
615 else if (n
< yield_limit
)
621 unsigned duration
= MIN_SLEEP_USECS
<< (n
- yield_limit
);
622 if (n
>= 15 + yield_limit
|| duration
> MAX_SLEEP_USECS
)
623 duration
= MAX_SLEEP_USECS
;
624 _Jv_platform_usleep(duration
);
628 // Wait for a hash entry to become unlocked.
630 wait_unlocked (hash_entry
*he
)
633 while (he
-> address
& LOCKED
)
637 // Return the heavy lock for addr if it was already allocated.
638 // The client passes in the appropriate hash_entry.
639 // We hold the lock for he.
640 static inline heavy_lock
*
641 find_heavy (obj_addr_t addr
, hash_entry
*he
)
643 heavy_lock
*hl
= he
-> heavy_locks
;
644 while (hl
!= 0 && hl
-> address
!= addr
) hl
= hl
-> next
;
648 // Unlink the heavy lock for the given address from its hash table chain.
649 // Dies miserably and conspicuously if it's not there, since that should
652 unlink_heavy (obj_addr_t addr
, hash_entry
*he
)
654 heavy_lock
**currentp
= &(he
-> heavy_locks
);
655 while ((*currentp
) -> address
!= addr
)
656 currentp
= &((*currentp
) -> next
);
657 *currentp
= (*currentp
) -> next
;
660 // Finalization procedure for objects that have associated heavy-weight
661 // locks. This may replace the real finalization procedure.
663 heavy_lock_obj_finalization_proc (void *obj
, void *cd
)
665 heavy_lock
*hl
= (heavy_lock
*)cd
;
667 // This only addresses misalignment of statics, not heap objects. It
668 // works only because registering statics for finalization is a noop,
669 // no matter what the least significant bits are.
670 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
671 obj_addr_t addr
= (obj_addr_t
)obj
& ~((obj_addr_t
)0x7);
673 obj_addr_t addr
= (obj_addr_t
)obj
;
675 hash_entry
*he
= light_locks
+ JV_SYNC_HASH(addr
);
676 obj_addr_t he_address
= (he
-> address
& ~LOCKED
);
678 // Acquire lock bit immediately. It's possible that the hl was already
679 // destroyed while we were waiting for the finalizer to run. If it
680 // was, the address field was set to zero. The address filed access is
681 // protected by the lock bit to ensure that we do this exactly once.
682 // The lock bit also protects updates to the objects finalizer.
683 while (!compare_and_swap(&(he
-> address
), he_address
, he_address
|LOCKED
))
685 // Hash table entry is currently locked. We can't safely
686 // touch the list of heavy locks.
688 he_address
= (he
-> address
& ~LOCKED
);
690 if (0 == hl
-> address
)
692 // remove_all_heavy destroyed hl, and took care of the real finalizer.
693 release_set(&(he
-> address
), he_address
);
696 JvAssert(hl
-> address
== addr
);
697 GC_finalization_proc old_finalization_proc
= hl
-> old_finalization_proc
;
698 if (old_finalization_proc
!= 0)
700 // We still need to run a real finalizer. In an idealized
701 // world, in which people write thread-safe finalizers, that is
702 // likely to require synchronization. Thus we reregister
703 // ourselves as the only finalizer, and simply run the real one.
704 // Thus we don't clean up the lock yet, but we're likely to do so
705 // on the next GC cycle.
706 // It's OK if remove_all_heavy actually destroys the heavy lock,
707 // since we've updated old_finalization_proc, and thus the user's
708 // finalizer won't be rerun.
709 void * old_client_data
= hl
-> old_client_data
;
710 hl
-> old_finalization_proc
= 0;
711 hl
-> old_client_data
= 0;
712 # ifdef HAVE_BOEHM_GC
713 GC_REGISTER_FINALIZER_NO_ORDER(obj
, heavy_lock_obj_finalization_proc
, cd
, 0, 0);
715 release_set(&(he
-> address
), he_address
);
716 old_finalization_proc(obj
, old_client_data
);
720 // The object is really dead, although it's conceivable that
721 // some thread may still be in the process of releasing the
722 // heavy lock. Unlink it and, if necessary, register a finalizer
723 // to destroy sync_info.
724 unlink_heavy(addr
, he
);
725 hl
-> address
= 0; // Don't destroy it again.
726 release_set(&(he
-> address
), he_address
);
727 # if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
728 // Make sure lock is not held and then destroy condvar and mutex.
729 _Jv_MutexLock(&(hl
->si
.mutex
));
730 _Jv_MutexUnlock(&(hl
->si
.mutex
));
731 heavy_lock_finalization_proc (hl
);
736 // We hold the lock on he, and heavy_count is 0.
737 // Release the lock by replacing the address with new_address_val.
738 // Remove all heavy locks on the list. Note that the only possible way
739 // in which a lock may still be in use is if it's in the process of
741 // FIXME: Why does this unlock the hash entry? I think that
742 // could now be done more cleanly in MonitorExit.
744 remove_all_heavy (hash_entry
*he
, obj_addr_t new_address_val
)
746 JvAssert(he
-> heavy_count
== 0);
747 JvAssert(he
-> address
& LOCKED
);
748 heavy_lock
*hl
= he
-> heavy_locks
;
749 he
-> heavy_locks
= 0;
750 // We would really like to release the lock bit here. Unfortunately, that
751 // Creates a race between or finalizer removal, and the potential
752 // reinstallation of a new finalizer as a new heavy lock is created.
753 // This may need to be revisited.
754 for(; 0 != hl
; hl
= hl
->next
)
756 obj_addr_t obj
= hl
-> address
;
757 JvAssert(0 != obj
); // If this was previously finalized, it should no
758 // longer appear on our list.
759 hl
-> address
= 0; // Finalization proc might still see it after we
761 GC_finalization_proc old_finalization_proc
= hl
-> old_finalization_proc
;
762 void * old_client_data
= hl
-> old_client_data
;
763 # ifdef HAVE_BOEHM_GC
764 // Remove our finalization procedure.
765 // Reregister the clients if applicable.
766 GC_REGISTER_FINALIZER_NO_ORDER((GC_PTR
)obj
, old_finalization_proc
,
767 old_client_data
, 0, 0);
768 // Note that our old finalization procedure may have been
769 // previously determined to be runnable, and may still run.
770 // FIXME - direct dependency on boehm GC.
772 # if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
773 // Wait for a possible lock holder to finish unlocking it.
774 // This is only an issue if we have to explicitly destroy the mutex
775 // or possibly if we have to destroy a condition variable that is
776 // still being notified.
777 _Jv_MutexLock(&(hl
->si
.mutex
));
778 _Jv_MutexUnlock(&(hl
->si
.mutex
));
779 heavy_lock_finalization_proc (hl
);
782 release_set(&(he
-> address
), new_address_val
);
785 // We hold the lock on he and heavy_count is 0.
786 // We release it by replacing the address field with new_address_val.
787 // Remove all heavy locks on the list if the list is sufficiently long.
788 // This is called periodically to avoid very long lists of heavy locks.
789 // This seems to otherwise become an issue with SPECjbb, for example.
791 maybe_remove_all_heavy (hash_entry
*he
, obj_addr_t new_address_val
)
793 static const int max_len
= 5;
794 heavy_lock
*hl
= he
-> heavy_locks
;
796 for (int i
= 0; i
< max_len
; ++i
)
800 release_set(&(he
-> address
), new_address_val
);
805 remove_all_heavy(he
, new_address_val
);
808 // Allocate a new heavy lock for addr, returning its address.
809 // Assumes we already have the hash_entry locked, and there
810 // is currently no lightweight or allocated lock for addr.
811 // We register a finalizer for addr, which is responsible for
812 // removing the heavy lock when addr goes away, in addition
813 // to the responsibilities of any prior finalizer.
814 // This unfortunately holds the lock bit for the hash entry while it
815 // allocates two objects (on for the finalizer).
816 // It would be nice to avoid that somehow ...
818 alloc_heavy(obj_addr_t addr
, hash_entry
*he
)
820 heavy_lock
* hl
= (heavy_lock
*) _Jv_AllocTraceTwo(sizeof (heavy_lock
));
822 hl
-> address
= addr
;
823 _Jv_MutexInit (&(hl
-> si
.mutex
));
824 _Jv_CondInit (&(hl
-> si
.condition
));
825 # if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
826 hl
->si
.init
= true; // needed ?
828 hl
-> next
= he
-> heavy_locks
;
829 he
-> heavy_locks
= hl
;
830 // FIXME: The only call that cheats and goes directly to the GC interface.
831 # ifdef HAVE_BOEHM_GC
832 GC_REGISTER_FINALIZER_NO_ORDER(
833 (void *)addr
, heavy_lock_obj_finalization_proc
,
834 hl
, &hl
->old_finalization_proc
,
835 &hl
->old_client_data
);
836 # endif /* HAVE_BOEHM_GC */
840 // Return the heavy lock for addr, allocating if necessary.
841 // Assumes we have the cache entry locked, and there is no lightweight
844 get_heavy(obj_addr_t addr
, hash_entry
*he
)
846 heavy_lock
*hl
= find_heavy(addr
, he
);
848 hl
= alloc_heavy(addr
, he
);
853 _Jv_MonitorEnter (jobject obj
)
855 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
856 obj_addr_t addr
= (obj_addr_t
)obj
& ~((obj_addr_t
)FLAGS
);
858 obj_addr_t addr
= (obj_addr_t
)obj
;
861 unsigned hash
= JV_SYNC_HASH(addr
);
862 hash_entry
* he
= light_locks
+ hash
;
863 _Jv_ThreadId_t self
= _Jv_ThreadSelf();
865 const unsigned N_SPINS
= 18;
867 // We need to somehow check that addr is not NULL on the fast path.
868 // A very predictable
869 // branch on a register value is probably cheaper than dereferencing addr.
870 // We could also permanently lock the NULL entry in the hash table.
871 // But it's not clear that's cheaper either.
872 if (__builtin_expect(!addr
, false))
873 throw new java::lang::NullPointerException
;
875 JvAssert(!(addr
& FLAGS
));
877 if (__builtin_expect(compare_and_swap(&(he
-> address
),
880 JvAssert(he
-> light_thr_id
== INVALID_THREAD_ID
);
881 JvAssert(he
-> light_count
== 0);
882 he
-> light_thr_id
= self
;
883 // Count fields are set correctly. Heavy_count was also zero,
884 // but can change asynchronously.
885 // This path is hopefully both fast and the most common.
886 LOG(ACQ_LIGHT
, addr
, self
);
889 address
= he
-> address
;
890 if ((address
& ~(HEAVY
| REQUEST_CONVERSION
)) == addr
)
892 if (he
-> light_thr_id
== self
)
894 // We hold the lightweight lock, and it's for the right
896 count
= he
-> light_count
;
897 if (count
== USHRT_MAX
)
899 // I think most JVMs don't check for this.
900 // But I'm not convinced I couldn't turn this into a security
901 // hole, even with a 32 bit counter.
902 throw new java::lang::IllegalMonitorStateException(
903 JvNewStringLatin1("maximum monitor nesting level exceeded"));
905 he
-> light_count
= count
+ 1;
910 JvAssert(!(address
& LOCKED
));
911 // Lightweight lock is held, but by somone else.
912 // Spin a few times. This avoids turning this into a heavyweight
913 // lock if the current holder is about to release it.
914 // FIXME: Does this make sense on a uniprocessor, where
915 // it actually yields? It's probably cheaper to convert.
916 for (unsigned int i
= 0; i
< N_SPINS
; ++i
)
918 if ((he
-> address
& ~LOCKED
) != address
) goto retry
;
921 if (!compare_and_swap(&(he
-> address
), address
, address
| LOCKED
))
926 heavy_lock
*hl
= get_heavy(addr
, he
);
927 ++ (he
-> heavy_count
);
928 // The hl lock acquisition can't block for long, since it can
929 // only be held by other threads waiting for conversion, and
930 // they, like us, drop it quickly without blocking.
931 _Jv_MutexLock(&(hl
->si
.mutex
));
932 JvAssert(he
-> address
== (address
| LOCKED
));
933 release_set(&(he
-> address
), (address
| REQUEST_CONVERSION
| HEAVY
));
934 // release lock on he
935 LOG(REQ_CONV
, (address
| REQUEST_CONVERSION
| HEAVY
), self
);
936 // If _Jv_CondWait is interrupted, we ignore the interrupt, but
937 // restore the thread's interrupt status flag when done.
938 jboolean interrupt_flag
= false;
939 while ((he
-> address
& ~FLAGS
) == (address
& ~FLAGS
))
941 // Once converted, the lock has to retain heavyweight
942 // status, since heavy_count > 0.
943 int r
= _Jv_CondWait (&(hl
->si
.condition
), &(hl
->si
.mutex
), 0, 0);
944 if (r
== _JV_INTERRUPTED
)
946 interrupt_flag
= true;
947 Thread::currentThread()->interrupt_flag
= false;
951 Thread::currentThread()->interrupt_flag
= interrupt_flag
;
953 // Guarantee that hl doesn't get unlinked by finalizer.
954 // This is only an issue if the client fails to release
955 // the lock, which is unlikely.
956 JvAssert(he
-> address
& HEAVY
);
957 // Lock has been converted, we hold the heavyweight lock,
958 // heavy_count has been incremented.
962 obj_addr_t was_heavy
= (address
& HEAVY
);
963 if ((address
& LOCKED
) ||
964 !compare_and_swap(&(he
-> address
), address
, address
| LOCKED
))
969 if ((address
& ~(HEAVY
| REQUEST_CONVERSION
)) == 0)
971 // Either was_heavy is true, or something changed out from under us,
972 // since the initial test for 0 failed.
973 JvAssert(!(address
& REQUEST_CONVERSION
));
974 // Can't convert a nonexistent lightweight lock.
976 hl
= (was_heavy
? find_heavy(addr
, he
) : 0);
977 // The CAS succeeded, so was_heavy is still accurate.
980 // It is OK to use the lighweight lock, since either the
981 // heavyweight lock does not exist, or none of the
982 // heavyweight locks are currently in use. Future threads
983 // trying to acquire the lock will see the lightweight
984 // one first and use that.
985 he
-> light_thr_id
= self
; // OK, since nobody else can hold
986 // light lock or do this at the same time.
987 JvAssert(he
-> light_count
== 0);
988 JvAssert(was_heavy
== (he
-> address
& HEAVY
));
989 release_set(&(he
-> address
), (addr
| was_heavy
));
990 LOG(ACQ_LIGHT2
, addr
| was_heavy
, self
);
994 // Must use heavy lock.
995 ++ (he
-> heavy_count
);
996 JvAssert(0 == (address
& ~HEAVY
));
997 release_set(&(he
-> address
), HEAVY
);
998 LOG(ACQ_HEAVY
, addr
| was_heavy
, self
);
999 _Jv_MutexLock(&(hl
->si
.mutex
));
1004 // Lightweight lock is held, but does not correspond to this object.
1005 // We hold the lock on the hash entry, and he -> address can't
1006 // change from under us. Neither can the chain of heavy locks.
1008 JvAssert(0 == he
-> heavy_count
|| (address
& HEAVY
));
1009 heavy_lock
*hl
= get_heavy(addr
, he
);
1010 ++ (he
-> heavy_count
);
1011 release_set(&(he
-> address
), address
| HEAVY
);
1012 LOG(ACQ_HEAVY2
, address
| HEAVY
, self
);
1013 _Jv_MutexLock(&(hl
->si
.mutex
));
1020 _Jv_MonitorExit (jobject obj
)
1022 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1023 obj_addr_t addr
= (obj_addr_t
)obj
& ~((obj_addr_t
)FLAGS
);
1025 obj_addr_t addr
= (obj_addr_t
)obj
;
1027 _Jv_ThreadId_t self
= _Jv_ThreadSelf();
1028 unsigned hash
= JV_SYNC_HASH(addr
);
1029 hash_entry
* he
= light_locks
+ hash
;
1030 _Jv_ThreadId_t light_thr_id
;
1035 light_thr_id
= he
-> light_thr_id
;
1036 // Unfortunately, it turns out we always need to read the address
1037 // first. Even if we are going to update it with compare_and_swap,
1038 // we need to reset light_thr_id, and that's not safe unless we know
1039 // that we hold the lock.
1040 address
= he
-> address
;
1041 // First the (relatively) fast cases:
1042 if (__builtin_expect(light_thr_id
== self
, true))
1043 // Above must fail if addr == 0 .
1045 count
= he
-> light_count
;
1046 if (__builtin_expect((address
& ~HEAVY
) == addr
, true))
1050 // We held the lightweight lock all along. Thus the values
1051 // we saw for light_thr_id and light_count must have been valid.
1052 he
-> light_count
= count
- 1;
1057 // We hold the lightweight lock once.
1058 he
-> light_thr_id
= INVALID_THREAD_ID
;
1059 if (compare_and_swap_release(&(he
-> address
), address
,
1062 LOG(REL_LIGHT
, address
& HEAVY
, self
);
1067 he
-> light_thr_id
= light_thr_id
; // Undo prior damage.
1072 // else lock is not for this address, conversion is requested,
1073 // or the lock bit in the address field is set.
1077 if (__builtin_expect(!addr
, false))
1078 throw new java::lang::NullPointerException
;
1079 if ((address
& ~(HEAVY
| REQUEST_CONVERSION
)) == addr
)
1082 fprintf(stderr
, "Lightweight lock held by other thread\n\t"
1083 "light_thr_id = 0x%lx, self = 0x%lx, "
1084 "address = 0x%lx, heavy_count = %d, pid = %d\n",
1085 light_thr_id
, self
, (unsigned long)address
,
1086 he
-> heavy_count
, getpid());
1090 // Someone holds the lightweight lock for this object, and
1092 throw new java::lang::IllegalMonitorStateException(
1093 JvNewStringLatin1("current thread not owner"));
1096 count
= he
-> light_count
;
1098 if (address
& LOCKED
)
1103 // Now the unlikely cases.
1105 // - Address is set, and doesn't contain the LOCKED bit.
1106 // - If address refers to the same object as addr, then he -> light_thr_id
1107 // refers to this thread, and count is valid.
1108 // - The case in which we held the lightweight lock has been
1109 // completely handled, except for the REQUEST_CONVERSION case.
1111 if ((address
& ~FLAGS
) == addr
)
1113 // The lightweight lock is assigned to this object.
1114 // Thus we must be in the REQUEST_CONVERSION case.
1117 // Defer conversion until we exit completely.
1118 he
-> light_count
= count
- 1;
1121 JvAssert(he
-> light_thr_id
== self
);
1122 JvAssert(address
& REQUEST_CONVERSION
);
1123 // Conversion requested
1125 if (!compare_and_swap(&(he
-> address
), address
, address
| LOCKED
))
1127 heavy_lock
*hl
= find_heavy(addr
, he
);
1129 // Requestor created it.
1130 he
-> light_count
= 0;
1131 JvAssert(he
-> heavy_count
> 0);
1132 // was incremented by requestor.
1133 _Jv_MutexLock(&(hl
->si
.mutex
));
1134 // Release the he lock after acquiring the mutex.
1135 // Otherwise we can accidentally
1136 // notify a thread that has already seen a heavyweight
1138 he
-> light_thr_id
= INVALID_THREAD_ID
;
1139 release_set(&(he
-> address
), HEAVY
);
1140 LOG(PROMOTE
, address
, self
);
1141 // lightweight lock now unused.
1142 _Jv_CondNotifyAll(&(hl
->si
.condition
), &(hl
->si
.mutex
));
1143 _Jv_MutexUnlock(&(hl
->si
.mutex
));
1144 // heavy_count was already incremented by original requestor.
1148 // lightweight lock not for this object.
1149 JvAssert(!(address
& LOCKED
));
1150 JvAssert((address
& ~FLAGS
) != addr
);
1151 if (!compare_and_swap(&(he
-> address
), address
, address
| LOCKED
))
1153 heavy_lock
*hl
= find_heavy(addr
, he
);
1157 fprintf(stderr
, "Failed to find heavyweight lock for addr 0x%lx"
1158 " pid = %d\n", addr
, getpid());
1162 release_set(&(he
-> address
), address
);
1163 throw new java::lang::IllegalMonitorStateException(
1164 JvNewStringLatin1("current thread not owner"));
1166 JvAssert(address
& HEAVY
);
1167 count
= he
-> heavy_count
;
1168 JvAssert(count
> 0);
1170 he
-> heavy_count
= count
;
1173 const unsigned test_freq
= 16; // Power of 2
1174 static volatile unsigned counter
= 0;
1175 unsigned my_counter
= counter
;
1177 counter
= my_counter
+ 1;
1178 if (my_counter
%test_freq
== 0)
1180 // Randomize the interval length a bit.
1181 counter
= my_counter
+ (my_counter
>> 4) % (test_freq
/2);
1182 // Unlock mutex first, to avoid self-deadlock, or worse.
1183 _Jv_MutexUnlock(&(hl
->si
.mutex
));
1184 maybe_remove_all_heavy(he
, address
&~HEAVY
);
1185 // release lock bit, preserving
1186 // REQUEST_CONVERSION
1187 // and object address.
1191 release_set(&(he
-> address
), address
&~HEAVY
);
1192 _Jv_MutexUnlock(&(hl
->si
.mutex
));
1193 // Unlock after releasing the lock bit, so that
1194 // we don't switch to another thread prematurely.
1199 release_set(&(he
-> address
), address
);
1200 _Jv_MutexUnlock(&(hl
->si
.mutex
));
1202 LOG(REL_HEAVY
, addr
, self
);
1206 // Return false if obj's monitor is held by the current thread
1208 _Jv_ObjectCheckMonitor (jobject obj
)
1210 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1211 obj_addr_t addr
= (obj_addr_t
)obj
& ~((obj_addr_t
)FLAGS
);
1213 obj_addr_t addr
= (obj_addr_t
)obj
;
1216 unsigned hash
= JV_SYNC_HASH(addr
);
1217 hash_entry
* he
= light_locks
+ hash
;
1219 JvAssert(!(addr
& FLAGS
));
1220 address
= he
-> address
;
1221 // Try it the easy way first:
1222 if (address
== 0) return true;
1223 _Jv_ThreadId_t self
= _Jv_ThreadSelf();
1224 if ((address
& ~(HEAVY
| REQUEST_CONVERSION
)) == addr
)
1225 // Fails if entry is LOCKED.
1226 // I can't asynchronously become or stop being the holder.
1227 return he
-> light_thr_id
!= self
;
1229 // Acquire the hash table entry lock
1231 if (!compare_and_swap(&(he
-> address
), address
, address
| LOCKED
))
1239 if ((address
& ~FLAGS
) == addr
)
1240 not_mine
= (he
-> light_thr_id
!= self
);
1243 heavy_lock
* hl
= find_heavy(addr
, he
);
1244 not_mine
= hl
? _Jv_MutexCheckMonitor(&hl
->si
.mutex
) : true;
1247 release_set(&(he
-> address
), address
); // unlock hash entry
1251 // The rest of these are moderately thin veneers on _Jv_Cond ops.
1252 // The current version of Notify might be able to make the pthread
1253 // call AFTER releasing the lock, thus saving some context switches??
1256 java::lang::Object::wait (jlong timeout
, jint nanos
)
1258 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1259 obj_addr_t addr
= (obj_addr_t
)this & ~((obj_addr_t
)FLAGS
);
1261 obj_addr_t addr
= (obj_addr_t
)this;
1263 _Jv_ThreadId_t self
= _Jv_ThreadSelf();
1264 unsigned hash
= JV_SYNC_HASH(addr
);
1265 hash_entry
* he
= light_locks
+ hash
;
1270 if (__builtin_expect (timeout
< 0 || nanos
< 0 || nanos
> 999999, false))
1271 throw new IllegalArgumentException
;
1273 address
= he
-> address
;
1275 if (!compare_and_swap(&(he
-> address
), address
, address
| LOCKED
))
1280 // address did not have the lock bit set. We now hold the lock on he.
1281 if ((address
& ~FLAGS
) == addr
)
1283 // Convert to heavyweight.
1284 if (he
-> light_thr_id
!= self
)
1287 fprintf(stderr
, "Found wrong lightweight lock owner in wait "
1288 "address = 0x%lx pid = %d\n", address
, getpid());
1292 release_set(&(he
-> address
), address
);
1293 throw new IllegalMonitorStateException (JvNewStringLatin1
1294 ("current thread not owner"));
1296 count
= he
-> light_count
;
1297 hl
= get_heavy(addr
, he
);
1298 he
-> light_count
= 0;
1299 he
-> heavy_count
+= count
+ 1;
1300 for (unsigned i
= 0; i
<= count
; ++i
)
1301 _Jv_MutexLock(&(hl
->si
.mutex
));
1302 // Again release the he lock after acquiring the mutex.
1303 he
-> light_thr_id
= INVALID_THREAD_ID
;
1304 release_set(&(he
-> address
), HEAVY
); // lightweight lock now unused.
1305 LOG(PROMOTE2
, addr
, self
);
1306 if (address
& REQUEST_CONVERSION
)
1307 _Jv_CondNotifyAll (&(hl
->si
.condition
), &(hl
->si
.mutex
));
1308 // Since we do this before we do a CondWait, we guarantee that
1309 // threads waiting on requested conversion are awoken before
1310 // a real wait on the same condition variable.
1311 // No other notification can occur in the interim, since
1312 // we hold the heavy lock, and notifications are made
1313 // without acquiring it.
1315 else /* We should hold the heavyweight lock. */
1317 hl
= find_heavy(addr
, he
);
1318 release_set(&(he
-> address
), address
);
1322 fprintf(stderr
, "Couldn't find heavy lock in wait "
1323 "addr = 0x%lx pid = %d\n", addr
, getpid());
1327 throw new IllegalMonitorStateException (JvNewStringLatin1
1328 ("current thread not owner"));
1330 JvAssert(address
& HEAVY
);
1332 LOG(WAIT_START
, addr
, self
);
1333 switch (_Jv_CondWait (&(hl
->si
.condition
), &(hl
->si
.mutex
), timeout
, nanos
))
1336 throw new IllegalMonitorStateException (JvNewStringLatin1
1337 ("current thread not owner"));
1338 case _JV_INTERRUPTED
:
1339 if (Thread::interrupted ())
1340 throw new InterruptedException
;
1342 LOG(WAIT_END
, addr
, self
);
1346 java::lang::Object::notify (void)
1348 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1349 obj_addr_t addr
= (obj_addr_t
)this & ~((obj_addr_t
)FLAGS
);
1351 obj_addr_t addr
= (obj_addr_t
)this;
1353 _Jv_ThreadId_t self
= _Jv_ThreadSelf();
1354 unsigned hash
= JV_SYNC_HASH(addr
);
1355 hash_entry
* he
= light_locks
+ hash
;
1361 address
= ((he
-> address
) & ~LOCKED
);
1362 if (!compare_and_swap(&(he
-> address
), address
, address
| LOCKED
))
1367 if ((address
& ~FLAGS
) == addr
&& he
-> light_thr_id
== self
)
1369 // We hold lightweight lock. Since it has not
1370 // been inflated, there are no waiters.
1371 release_set(&(he
-> address
), address
); // unlock
1374 hl
= find_heavy(addr
, he
);
1375 // Hl can't disappear since we point to the underlying object.
1376 // It's important that we release the lock bit before the notify, since
1377 // otherwise we will try to wake up the target while we still hold the
1378 // bit. This results in lock bit contention, which we don't handle
1380 release_set(&(he
-> address
), address
); // unlock
1383 throw new IllegalMonitorStateException(JvNewStringLatin1
1384 ("current thread not owner"));
1387 // We know that we hold the heavyweight lock at this point,
1388 // and the lightweight lock is not in use.
1389 result
= _Jv_CondNotify(&(hl
->si
.condition
), &(hl
->si
.mutex
));
1390 LOG(NOTIFY
, addr
, self
);
1392 if (__builtin_expect (result
, 0))
1393 throw new IllegalMonitorStateException(JvNewStringLatin1
1394 ("current thread not owner"));
1398 java::lang::Object::notifyAll (void)
1400 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1401 obj_addr_t addr
= (obj_addr_t
)this & ~((obj_addr_t
)FLAGS
);
1403 obj_addr_t addr
= (obj_addr_t
)this;
1405 _Jv_ThreadId_t self
= _Jv_ThreadSelf();
1406 unsigned hash
= JV_SYNC_HASH(addr
);
1407 hash_entry
* he
= light_locks
+ hash
;
1413 address
= (he
-> address
) & ~LOCKED
;
1414 if (!compare_and_swap(&(he
-> address
), address
, address
| LOCKED
))
1419 hl
= find_heavy(addr
, he
);
1420 if ((address
& ~FLAGS
) == addr
&& he
-> light_thr_id
== self
)
1422 // We hold lightweight lock. Since it has not
1423 // been inflated, there are no waiters.
1424 release_set(&(he
-> address
), address
); // unlock
1427 release_set(&(he
-> address
), address
); // unlock
1430 throw new IllegalMonitorStateException(JvNewStringLatin1
1431 ("current thread not owner"));
1433 result
= _Jv_CondNotifyAll(&(hl
->si
.condition
), &(hl
->si
.mutex
));
1434 LOG(NOTIFY_ALL
, addr
, self
);
1435 if (__builtin_expect (result
, 0))
1436 throw new IllegalMonitorStateException(JvNewStringLatin1
1437 ("current thread not owner"));
1440 // This is declared in Java code and in Object.h.
1441 // It should never be called with JV_HASH_SYNCHRONIZATION
1443 java::lang::Object::sync_init (void)
1445 throw new IllegalMonitorStateException(JvNewStringLatin1
1446 ("internal error: sync_init"));
1449 // This is called on startup and declared in Object.h.
1450 // For now we just make it a no-op.
1452 _Jv_InitializeSyncMutex (void)
1456 #endif /* JV_HASH_SYNCHRONIZATION */