1 // natObject.cc - Implementation of the Object class.
3 /* Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003 Free Software Foundation
5 This file is part of libgcj.
7 This software is copyrighted work licensed under the terms of the
8 Libgcj License. Please consult the file "LIBGCJ_LICENSE" for
16 #pragma implementation "Object.h"
20 #include <java/lang/Object.h>
21 #include <java-threads.h>
22 #include <java-signal.h>
23 #include <java/lang/CloneNotSupportedException.h>
24 #include <java/lang/IllegalArgumentException.h>
25 #include <java/lang/IllegalMonitorStateException.h>
26 #include <java/lang/InterruptedException.h>
27 #include <java/lang/NullPointerException.h>
28 #include <java/lang/Class.h>
29 #include <java/lang/Cloneable.h>
30 #include <java/lang/Thread.h>
38 // This is used to represent synchronization information.
41 #if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
42 // We only need to keep track of initialization state if we can
43 // possibly finalize this object.
46 _Jv_ConditionVariable_t condition
;
53 java::lang::Object::getClass (void)
55 _Jv_VTable
**dt
= (_Jv_VTable
**) this;
60 java::lang::Object::hashCode (void)
62 return _Jv_HashCode (this);
66 java::lang::Object::clone (void)
68 jclass klass
= getClass ();
72 // We also clone arrays here. If we put the array code into
73 // __JArray, then we'd have to figure out a way to find the array
74 // vtbl when creating a new array class. This is easier, if uglier.
77 __JArray
*array
= (__JArray
*) this;
78 jclass comp
= getClass()->getComponentType();
80 if (comp
->isPrimitive())
82 r
= _Jv_NewPrimArray (comp
, array
->length
);
83 eltsize
= comp
->size();
87 r
= _Jv_NewObjectArray (array
->length
, comp
, NULL
);
88 eltsize
= sizeof (jobject
);
90 // We can't use sizeof on __JArray because we must account for
91 // alignment of the element type.
92 size
= (_Jv_GetArrayElementFromElementType (array
, comp
) - (char *) array
93 + array
->length
* eltsize
);
97 if (! java::lang::Cloneable::class$
.isAssignableFrom(klass
))
98 throw new CloneNotSupportedException
;
100 size
= klass
->size();
101 r
= _Jv_AllocObject (klass
);
104 memcpy ((void *) r
, (void *) this, size
);
109 _Jv_FinalizeObject (jobject obj
)
111 // Ignore exceptions. From section 12.6 of the Java Language Spec.
116 catch (java::lang::Throwable
*t
)
124 // Synchronization code.
127 #ifndef JV_HASH_SYNCHRONIZATION
128 // This global is used to make sure that only one thread sets an
129 // object's `sync_info' field.
130 static _Jv_Mutex_t sync_mutex
;
132 // This macro is used to see if synchronization initialization is
134 #if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
135 # define INIT_NEEDED(Obj) (! (Obj)->sync_info \
136 || ! ((_Jv_SyncInfo *) ((Obj)->sync_info))->init)
138 # define INIT_NEEDED(Obj) (! (Obj)->sync_info)
141 #if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
142 // If we have to run a destructor for a sync_info member, then this
143 // function is registered as a finalizer for the sync_info.
145 finalize_sync_info (jobject obj
)
147 _Jv_SyncInfo
*si
= (_Jv_SyncInfo
*) obj
;
148 #if defined (_Jv_HaveCondDestroy)
149 _Jv_CondDestroy (&si
->condition
);
151 #if defined (_Jv_HaveMutexDestroy)
152 _Jv_MutexDestroy (&si
->mutex
);
158 // This is called to initialize the sync_info element of an object.
160 java::lang::Object::sync_init (void)
162 _Jv_MutexLock (&sync_mutex
);
163 // Check again to see if initialization is needed now that we have
165 if (INIT_NEEDED (this))
167 // We assume there are no pointers in the sync_info
170 // We always create a new sync_info, even if there is already
171 // one available. Any given object can only be finalized once.
172 // If we get here and sync_info is not null, then it has already
173 // been finalized. So if we just reinitialize the old one,
174 // we'll never be able to (re-)destroy the mutex and/or
175 // condition variable.
176 si
= (_Jv_SyncInfo
*) _Jv_AllocBytes (sizeof (_Jv_SyncInfo
));
177 _Jv_MutexInit (&si
->mutex
);
178 _Jv_CondInit (&si
->condition
);
179 #if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
180 // Register a finalizer.
182 _Jv_RegisterFinalizer (si
, finalize_sync_info
);
184 sync_info
= (jobject
) si
;
186 _Jv_MutexUnlock (&sync_mutex
);
190 java::lang::Object::notify (void)
192 if (__builtin_expect (INIT_NEEDED (this), false))
194 _Jv_SyncInfo
*si
= (_Jv_SyncInfo
*) sync_info
;
195 if (__builtin_expect (_Jv_CondNotify (&si
->condition
, &si
->mutex
), false))
196 throw new IllegalMonitorStateException(JvNewStringLatin1
197 ("current thread not owner"));
201 java::lang::Object::notifyAll (void)
203 if (__builtin_expect (INIT_NEEDED (this), false))
205 _Jv_SyncInfo
*si
= (_Jv_SyncInfo
*) sync_info
;
206 if (__builtin_expect (_Jv_CondNotifyAll (&si
->condition
, &si
->mutex
), false))
207 throw new IllegalMonitorStateException(JvNewStringLatin1
208 ("current thread not owner"));
212 java::lang::Object::wait (jlong timeout
, jint nanos
)
214 if (__builtin_expect (INIT_NEEDED (this), false))
216 if (__builtin_expect (timeout
< 0 || nanos
< 0 || nanos
> 999999, false))
217 throw new IllegalArgumentException
;
218 _Jv_SyncInfo
*si
= (_Jv_SyncInfo
*) sync_info
;
219 switch (_Jv_CondWait (&si
->condition
, &si
->mutex
, timeout
, nanos
))
222 throw new IllegalMonitorStateException (JvNewStringLatin1
223 ("current thread not owner"));
224 case _JV_INTERRUPTED
:
225 if (Thread::interrupted ())
226 throw new InterruptedException
;
231 // Some runtime code.
234 // This function is called at system startup to initialize the
237 _Jv_InitializeSyncMutex (void)
239 _Jv_MutexInit (&sync_mutex
);
243 _Jv_MonitorEnter (jobject obj
)
246 if (__builtin_expect (! obj
, false))
247 throw new java::lang::NullPointerException
;
249 if (__builtin_expect (INIT_NEEDED (obj
), false))
251 _Jv_SyncInfo
*si
= (_Jv_SyncInfo
*) obj
->sync_info
;
252 _Jv_MutexLock (&si
->mutex
);
253 // FIXME: In the Windows case, this can return a nonzero error code.
254 // We should turn that into some exception ...
258 _Jv_MonitorExit (jobject obj
)
261 JvAssert (! INIT_NEEDED (obj
));
262 _Jv_SyncInfo
*si
= (_Jv_SyncInfo
*) obj
->sync_info
;
263 if (__builtin_expect (_Jv_MutexUnlock (&si
->mutex
), false))
264 throw new java::lang::IllegalMonitorStateException
;
268 _Jv_ObjectCheckMonitor (jobject obj
)
270 _Jv_SyncInfo
*si
= (_Jv_SyncInfo
*) obj
->sync_info
;
271 return _Jv_MutexCheckMonitor (&si
->mutex
);
274 #else /* JV_HASH_SYNCHRONIZATION */
276 // FIXME: We shouldn't be calling GC_register_finalizer directly.
277 #ifndef HAVE_BOEHM_GC
278 # error Hash synchronization currently requires boehm-gc
279 // That's actually a bit of a lie: It should also work with the null GC,
280 // probably even better than the alternative.
281 // To really support alternate GCs here, we would need to widen the
282 // interface to finalization, since we sometimes have to register a
283 // second finalizer for an object that already has one.
284 // We might also want to move the GC interface to a .h file, since
285 // the number of procedure call levels involved in some of these
286 // operations is already ridiculous, and would become worse if we
287 // went through the proper intermediaries.
289 # ifdef LIBGCJ_GC_DEBUG
295 // What follows currenly assumes a Linux-like platform.
296 // Some of it specifically assumes X86 or IA64 Linux, though that
297 // should be easily fixable.
299 // A Java monitor implemention based on a table of locks.
300 // Each entry in the table describes
301 // locks held for objects that hash to that location.
302 // This started out as a reimplementation of the technique used in SGIs JVM,
303 // for which we obtained permission from SGI.
304 // But in fact, this ended up quite different, though some ideas are
305 // still shared with the original.
306 // It was also influenced by some of the published IBM work,
307 // though it also differs in many ways from that.
308 // We could speed this up if we had a way to atomically update
309 // an entire cache entry, i.e. 2 contiguous words of memory.
310 // That would usually be the case with a 32 bit ABI on a 64 bit processor.
311 // But we don't currently go out of our way to target those.
312 // I don't know how to do much better with a N bit ABI on a processor
313 // that can atomically update only N bits at a time.
314 // Author: Hans-J. Boehm (Hans_Boehm@hp.com, boehm@acm.org)
317 #include <unistd.h> // for usleep, sysconf.
318 #include <gcj/javaprims.h>
319 #include <sysdep/locks.h>
320 #include <java/lang/Thread.h>
322 // Try to determine whether we are on a multiprocessor, i.e. whether
323 // spinning may be profitable.
324 // This should really use a suitable autoconf macro.
325 // False is the conservative answer, though the right one is much better.
329 #ifdef _SC_NPROCESSORS_ONLN
330 long nprocs
= sysconf(_SC_NPROCESSORS_ONLN
);
337 // A call to keep_live(p) forces p to be accessible to the GC
340 keep_live(obj_addr_t p
)
342 __asm__
__volatile__("" : : "rm"(p
) : "memory");
345 // Each hash table entry holds a single preallocated "lightweight" lock.
346 // In addition, it holds a chain of "heavyweight" locks. Lightweight
347 // locks do not support Object.wait(), and are converted to heavyweight
348 // status in response to contention. Unlike the SGI scheme, both
349 // ligtweight and heavyweight locks in one hash entry can be simultaneously
350 // in use. (The SGI scheme requires that we be able to acquire a heavyweight
351 // lock on behalf of another thread, and can thus convert a lock we don't
352 // hold to heavyweight status. Here we don't insist on that, and thus
353 // let the original holder of the lighweight lock keep it.)
356 void * reserved_for_gc
;
357 struct heavy_lock
*next
; // Hash chain link.
359 void * old_client_data
; // The only other field traced by GC.
360 GC_finalization_proc old_finalization_proc
;
361 obj_addr_t address
; // Object to which this lock corresponds.
362 // Should not be traced by GC.
363 // Cleared as heavy_lock is destroyed.
364 // Together with the rest of the heavy lock
365 // chain, this is protected by the lock
366 // bit in the hash table entry to which
367 // the chain is attached.
369 // The remaining fields save prior finalization info for
370 // the object, which we needed to replace in order to arrange
371 // for cleanup of the lock structure.
376 print_hl_list(heavy_lock
*hl
)
379 for (; 0 != p
; p
= p
->next
)
380 fprintf (stderr
, "(hl = %p, addr = %p)", p
, (void *)(p
-> address
));
382 #endif /* LOCK_DEBUG */
384 #if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
385 // If we have to run a destructor for a sync_info member, then this
386 // function could be registered as a finalizer for the sync_info.
387 // In fact, we now only invoke it explicitly.
389 heavy_lock_finalization_proc (heavy_lock
*hl
)
391 #if defined (_Jv_HaveCondDestroy)
392 _Jv_CondDestroy (&hl
->si
.condition
);
394 #if defined (_Jv_HaveMutexDestroy)
395 _Jv_MutexDestroy (&hl
->si
.mutex
);
399 #endif /* defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy) */
401 // We convert the lock back to lightweight status when
402 // we exit, so that a single contention episode doesn't doom the lock
403 // forever. But we also need to make sure that lock structures for dead
404 // objects are eventually reclaimed. We do that in a an additional
405 // finalizer on the underlying object.
406 // Note that if the corresponding object is dead, it is safe to drop
407 // the heavy_lock structure from its list. It is not necessarily
408 // safe to deallocate it, since the unlock code could still be running.
411 volatile obj_addr_t address
; // Address of object for which lightweight
413 // We assume the 3 low order bits are zero.
414 // With the Boehm collector and bitmap
415 // allocation, objects of size 4 bytes are
416 // broken anyway. Thus this is primarily
417 // a constraint on statically allocated
418 // objects used for synchronization.
419 // This allows us to use the low order
421 # define LOCKED 1 // This hash entry is locked, and its
422 // state may be invalid.
423 // The lock protects both the hash_entry
424 // itself (except for the light_count
425 // and light_thr_id fields, which
426 // are protected by the lightweight
427 // lock itself), and any heavy_monitor
428 // structures attached to it.
429 # define HEAVY 2 // Heavyweight locks associated with this
430 // hash entry may be held.
431 // The lightweight entry is still valid,
432 // if the leading bits of the address
433 // field are nonzero.
434 // If the LOCKED bit is clear, then this is
435 // set exactly when heavy_count is > 0 .
436 // Stored redundantly so a single
437 // compare-and-swap works in the easy case.
438 // If HEAVY is not set, it is safe to use
439 // an available lightweight lock entry
440 // without checking if there is an existing
441 // heavyweight lock for the same object.
442 // (There may be one, but it won't be held
444 # define REQUEST_CONVERSION 4 // The lightweight lock is held. But
445 // one or more other threads have tried
446 // to acquire the lock, and hence request
447 // conversion to heavyweight status.
448 // The heavyweight lock is already allocated.
449 // Threads requesting conversion are
450 // waiting on the condition variable associated
451 // with the heavyweight lock.
452 // Not used for conversion due to
453 // Object.wait() calls.
454 # define FLAGS (LOCKED | HEAVY | REQUEST_CONVERSION)
455 volatile _Jv_ThreadId_t light_thr_id
;
456 // Thr_id of holder of lightweight lock.
457 // Only updated by lightweight lock holder.
458 // Must be recognizably invalid if the
459 // lightweight lock is not held.
460 # define INVALID_THREAD_ID 0 // Works for Linux?
461 // If zero doesn't work, we have to
462 // initialize lock table.
463 volatile unsigned short light_count
;
464 // Number of times the lightweight lock
465 // is held minus one. Zero if lightweight
466 // lock is not held. Only updated by
467 // lightweight lock holder or, in one
468 // case, while holding the LOCKED bit in
469 // a state in which there can be no
470 // lightweight lock holder.
471 unsigned short heavy_count
; // Total number of times heavyweight locks
472 // associated with this hash entry are held
473 // or waiting to be acquired.
474 // Threads in wait() are included eventhough
475 // they have temporarily released the lock.
476 // Protected by LOCKED bit.
477 // Threads requesting conversion to heavyweight
478 // status are also included.
479 struct heavy_lock
* heavy_locks
;
480 // Chain of heavy locks. Protected
481 // by lockbit for he. Locks may
482 // remain allocated here even if HEAVY
483 // is not set and heavy_count is 0.
484 // If a lightweight and heavyweight lock
485 // correspond to the same address, the
486 // lightweight lock is the right one.
489 #ifndef JV_SYNC_TABLE_SZ
490 # define JV_SYNC_TABLE_SZ 2048 // Must be power of 2.
493 hash_entry light_locks
[JV_SYNC_TABLE_SZ
];
495 #define JV_SYNC_HASH(p) (((long)p ^ ((long)p >> 10)) & (JV_SYNC_TABLE_SZ-1))
497 // Note that the light_locks table is scanned conservatively by the
498 // collector. It is essential the the heavy_locks field is scanned.
499 // Currently the address field may or may not cause the associated object
500 // to be retained, depending on whether flag bits are set.
501 // This means that we can conceivable get an unexpected deadlock if
502 // 1) Object at address A is locked.
503 // 2) The client drops A without unlocking it.
504 // 3) Flag bits in the address entry are set, so the collector reclaims
506 // 4) A is reallocated, and an attempt is made to lock the result.
507 // This could be fixed by scanning light_locks in a more customized
508 // manner that ignores the flag bits. But it can only happen with hand
509 // generated semi-illegal .class files, and then it doesn't present a
513 void print_he(hash_entry
*he
)
515 fprintf(stderr
, "lock hash entry = %p, index = %d, address = 0x%lx\n"
516 "\tlight_thr_id = 0x%lx, light_count = %d, "
517 "heavy_count = %d\n\theavy_locks:", he
,
518 he
- light_locks
, (unsigned long)(he
-> address
),
519 (unsigned long)(he
-> light_thr_id
),
520 he
-> light_count
, he
-> heavy_count
);
521 print_hl_list(he
-> heavy_locks
);
522 fprintf(stderr
, "\n");
524 #endif /* LOCK_DEBUG */
527 // Log locking operations. For debugging only.
528 // Logging is intended to be as unintrusive as possible.
529 // Log calls are made after an operation completes, and hence
530 // may not completely reflect actual synchronization ordering.
531 // The choice of events to log is currently a bit haphazard.
532 // The intent is that if we have to track down any other bugs
533 // inthis code, we extend the logging as appropriate.
536 ACQ_LIGHT
, ACQ_LIGHT2
, ACQ_HEAVY
, ACQ_HEAVY2
, PROMOTE
, REL_LIGHT
,
537 REL_HEAVY
, REQ_CONV
, PROMOTE2
, WAIT_START
, WAIT_END
, NOTIFY
, NOTIFY_ALL
543 obj_addr_t addr
; // Often includes flags.
547 const int LOG_SIZE
= 128; // Power of 2.
549 lock_history lock_log
[LOG_SIZE
];
551 volatile obj_addr_t log_next
= 0;
552 // Next location in lock_log.
553 // Really an int, but we need compare_and_swap.
555 static void add_log_entry(event_type t
, obj_addr_t a
, _Jv_ThreadId_t th
)
558 obj_addr_t next_entry
;
562 next_entry
= ((my_entry
+ 1) & (LOG_SIZE
- 1));
564 while (!compare_and_swap(&log_next
, my_entry
, next_entry
));
565 lock_log
[my_entry
].tp
= t
;
566 lock_log
[my_entry
].addr
= a
;
567 lock_log
[my_entry
].thr
= th
;
570 # define LOG(t, a, th) add_log_entry(t, a, th)
571 #else /* !LOCK_LOG */
572 # define LOG(t, a, th)
575 static bool mp
= false; // Known multiprocesssor.
577 // Wait for roughly 2^n units, touching as little memory as possible.
581 const unsigned MP_SPINS
= 10;
582 const unsigned YIELDS
= 4;
583 const unsigned SPINS_PER_UNIT
= 30;
584 const unsigned MIN_SLEEP_USECS
= 2001; // Shorter times spin under Linux.
585 const unsigned MAX_SLEEP_USECS
= 200000;
586 static unsigned spin_limit
= 0;
587 static unsigned yield_limit
= YIELDS
;
588 static bool spin_initialized
= false;
590 if (!spin_initialized
)
595 spin_limit
= MP_SPINS
;
596 yield_limit
= MP_SPINS
+ YIELDS
;
598 spin_initialized
= true;
602 unsigned i
= SPINS_PER_UNIT
<< n
;
604 __asm__
__volatile__("");
606 else if (n
< yield_limit
)
612 unsigned duration
= MIN_SLEEP_USECS
<< (n
- yield_limit
);
613 if (n
>= 15 + yield_limit
|| duration
> MAX_SLEEP_USECS
)
614 duration
= MAX_SLEEP_USECS
;
615 _Jv_platform_usleep(duration
);
619 // Wait for a hash entry to become unlocked.
621 wait_unlocked (hash_entry
*he
)
624 while (he
-> address
& LOCKED
)
628 // Return the heavy lock for addr if it was already allocated.
629 // The client passes in the appropriate hash_entry.
630 // We hold the lock for he.
631 static inline heavy_lock
*
632 find_heavy (obj_addr_t addr
, hash_entry
*he
)
634 heavy_lock
*hl
= he
-> heavy_locks
;
635 while (hl
!= 0 && hl
-> address
!= addr
) hl
= hl
-> next
;
639 // Unlink the heavy lock for the given address from its hash table chain.
640 // Dies miserably and conspicuously if it's not there, since that should
643 unlink_heavy (obj_addr_t addr
, hash_entry
*he
)
645 heavy_lock
**currentp
= &(he
-> heavy_locks
);
646 while ((*currentp
) -> address
!= addr
)
647 currentp
= &((*currentp
) -> next
);
648 *currentp
= (*currentp
) -> next
;
651 // Finalization procedure for objects that have associated heavy-weight
652 // locks. This may replace the real finalization procedure.
654 heavy_lock_obj_finalization_proc (void *obj
, void *cd
)
656 heavy_lock
*hl
= (heavy_lock
*)cd
;
658 // This only addresses misalignment of statics, not heap objects. It
659 // works only because registering statics for finalization is a noop,
660 // no matter what the least significant bits are.
661 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
662 obj_addr_t addr
= (obj_addr_t
)obj
& ~((obj_addr_t
)0x7);
664 obj_addr_t addr
= (obj_addr_t
)obj
;
666 hash_entry
*he
= light_locks
+ JV_SYNC_HASH(addr
);
667 obj_addr_t he_address
= (he
-> address
& ~LOCKED
);
669 // Acquire lock bit immediately. It's possible that the hl was already
670 // destroyed while we were waiting for the finalizer to run. If it
671 // was, the address field was set to zero. The address filed access is
672 // protected by the lock bit to ensure that we do this exactly once.
673 // The lock bit also protects updates to the objects finalizer.
674 while (!compare_and_swap(&(he
-> address
), he_address
, he_address
|LOCKED
))
676 // Hash table entry is currently locked. We can't safely
677 // touch the list of heavy locks.
679 he_address
= (he
-> address
& ~LOCKED
);
681 if (0 == hl
-> address
)
683 // remove_all_heavy destroyed hl, and took care of the real finalizer.
684 release_set(&(he
-> address
), he_address
);
687 JvAssert(hl
-> address
== addr
);
688 GC_finalization_proc old_finalization_proc
= hl
-> old_finalization_proc
;
689 if (old_finalization_proc
!= 0)
691 // We still need to run a real finalizer. In an idealized
692 // world, in which people write thread-safe finalizers, that is
693 // likely to require synchronization. Thus we reregister
694 // ourselves as the only finalizer, and simply run the real one.
695 // Thus we don't clean up the lock yet, but we're likely to do so
696 // on the next GC cycle.
697 // It's OK if remove_all_heavy actually destroys the heavy lock,
698 // since we've updated old_finalization_proc, and thus the user's
699 // finalizer won't be rerun.
700 void * old_client_data
= hl
-> old_client_data
;
701 hl
-> old_finalization_proc
= 0;
702 hl
-> old_client_data
= 0;
703 # ifdef HAVE_BOEHM_GC
704 GC_REGISTER_FINALIZER_NO_ORDER(obj
, heavy_lock_obj_finalization_proc
, cd
, 0, 0);
706 release_set(&(he
-> address
), he_address
);
707 old_finalization_proc(obj
, old_client_data
);
711 // The object is really dead, although it's conceivable that
712 // some thread may still be in the process of releasing the
713 // heavy lock. Unlink it and, if necessary, register a finalizer
714 // to destroy sync_info.
715 unlink_heavy(addr
, he
);
716 hl
-> address
= 0; // Don't destroy it again.
717 release_set(&(he
-> address
), he_address
);
718 # if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
719 // Make sure lock is not held and then destroy condvar and mutex.
720 _Jv_MutexLock(&(hl
->si
.mutex
));
721 _Jv_MutexUnlock(&(hl
->si
.mutex
));
722 heavy_lock_finalization_proc (hl
);
727 // We hold the lock on he, and heavy_count is 0.
728 // Release the lock by replacing the address with new_address_val.
729 // Remove all heavy locks on the list. Note that the only possible way
730 // in which a lock may still be in use is if it's in the process of
732 // FIXME: Why does this unlock the hash entry? I think that
733 // could now be done more cleanly in MonitorExit.
735 remove_all_heavy (hash_entry
*he
, obj_addr_t new_address_val
)
737 JvAssert(he
-> heavy_count
== 0);
738 JvAssert(he
-> address
& LOCKED
);
739 heavy_lock
*hl
= he
-> heavy_locks
;
740 he
-> heavy_locks
= 0;
741 // We would really like to release the lock bit here. Unfortunately, that
742 // Creates a race between or finalizer removal, and the potential
743 // reinstallation of a new finalizer as a new heavy lock is created.
744 // This may need to be revisited.
745 for(; 0 != hl
; hl
= hl
->next
)
747 obj_addr_t obj
= hl
-> address
;
748 JvAssert(0 != obj
); // If this was previously finalized, it should no
749 // longer appear on our list.
750 hl
-> address
= 0; // Finalization proc might still see it after we
752 GC_finalization_proc old_finalization_proc
= hl
-> old_finalization_proc
;
753 void * old_client_data
= hl
-> old_client_data
;
754 # ifdef HAVE_BOEHM_GC
755 // Remove our finalization procedure.
756 // Reregister the clients if applicable.
757 GC_REGISTER_FINALIZER_NO_ORDER((GC_PTR
)obj
, old_finalization_proc
,
758 old_client_data
, 0, 0);
759 // Note that our old finalization procedure may have been
760 // previously determined to be runnable, and may still run.
761 // FIXME - direct dependency on boehm GC.
763 # if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
764 // Wait for a possible lock holder to finish unlocking it.
765 // This is only an issue if we have to explicitly destroy the mutex
766 // or possibly if we have to destroy a condition variable that is
767 // still being notified.
768 _Jv_MutexLock(&(hl
->si
.mutex
));
769 _Jv_MutexUnlock(&(hl
->si
.mutex
));
770 heavy_lock_finalization_proc (hl
);
773 release_set(&(he
-> address
), new_address_val
);
776 // We hold the lock on he and heavy_count is 0.
777 // We release it by replacing the address field with new_address_val.
778 // Remove all heavy locks on the list if the list is sufficiently long.
779 // This is called periodically to avoid very long lists of heavy locks.
780 // This seems to otherwise become an issue with SPECjbb, for example.
782 maybe_remove_all_heavy (hash_entry
*he
, obj_addr_t new_address_val
)
784 static const int max_len
= 5;
785 heavy_lock
*hl
= he
-> heavy_locks
;
787 for (int i
= 0; i
< max_len
; ++i
)
791 release_set(&(he
-> address
), new_address_val
);
796 remove_all_heavy(he
, new_address_val
);
799 // Allocate a new heavy lock for addr, returning its address.
800 // Assumes we already have the hash_entry locked, and there
801 // is currently no lightweight or allocated lock for addr.
802 // We register a finalizer for addr, which is responsible for
803 // removing the heavy lock when addr goes away, in addition
804 // to the responsibilities of any prior finalizer.
805 // This unfortunately holds the lock bit for the hash entry while it
806 // allocates two objects (on for the finalizer).
807 // It would be nice to avoid that somehow ...
809 alloc_heavy(obj_addr_t addr
, hash_entry
*he
)
811 heavy_lock
* hl
= (heavy_lock
*) _Jv_AllocTraceTwo(sizeof (heavy_lock
));
813 hl
-> address
= addr
;
814 _Jv_MutexInit (&(hl
-> si
.mutex
));
815 _Jv_CondInit (&(hl
-> si
.condition
));
816 # if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
817 hl
->si
.init
= true; // needed ?
819 hl
-> next
= he
-> heavy_locks
;
820 he
-> heavy_locks
= hl
;
821 // FIXME: The only call that cheats and goes directly to the GC interface.
822 # ifdef HAVE_BOEHM_GC
823 GC_REGISTER_FINALIZER_NO_ORDER(
824 (void *)addr
, heavy_lock_obj_finalization_proc
,
825 hl
, &hl
->old_finalization_proc
,
826 &hl
->old_client_data
);
827 # endif /* HAVE_BOEHM_GC */
831 // Return the heavy lock for addr, allocating if necessary.
832 // Assumes we have the cache entry locked, and there is no lightweight
835 get_heavy(obj_addr_t addr
, hash_entry
*he
)
837 heavy_lock
*hl
= find_heavy(addr
, he
);
839 hl
= alloc_heavy(addr
, he
);
844 _Jv_MonitorEnter (jobject obj
)
846 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
847 obj_addr_t addr
= (obj_addr_t
)obj
& ~((obj_addr_t
)FLAGS
);
849 obj_addr_t addr
= (obj_addr_t
)obj
;
852 unsigned hash
= JV_SYNC_HASH(addr
);
853 hash_entry
* he
= light_locks
+ hash
;
854 _Jv_ThreadId_t self
= _Jv_ThreadSelf();
856 const unsigned N_SPINS
= 18;
858 // We need to somehow check that addr is not NULL on the fast path.
859 // A very predictable
860 // branch on a register value is probably cheaper than dereferencing addr.
861 // We could also permanently lock the NULL entry in the hash table.
862 // But it's not clear that's cheaper either.
863 if (__builtin_expect(!addr
, false))
864 throw new java::lang::NullPointerException
;
866 JvAssert(!(addr
& FLAGS
));
868 if (__builtin_expect(compare_and_swap(&(he
-> address
),
871 JvAssert(he
-> light_thr_id
== INVALID_THREAD_ID
);
872 JvAssert(he
-> light_count
== 0);
873 he
-> light_thr_id
= self
;
874 // Count fields are set correctly. Heavy_count was also zero,
875 // but can change asynchronously.
876 // This path is hopefully both fast and the most common.
877 LOG(ACQ_LIGHT
, addr
, self
);
880 address
= he
-> address
;
881 if ((address
& ~(HEAVY
| REQUEST_CONVERSION
)) == addr
)
883 if (he
-> light_thr_id
== self
)
885 // We hold the lightweight lock, and it's for the right
887 count
= he
-> light_count
;
888 if (count
== USHRT_MAX
)
890 // I think most JVMs don't check for this.
891 // But I'm not convinced I couldn't turn this into a security
892 // hole, even with a 32 bit counter.
893 throw new java::lang::IllegalMonitorStateException(
894 JvNewStringLatin1("maximum monitor nesting level exceeded"));
896 he
-> light_count
= count
+ 1;
901 JvAssert(!(address
& LOCKED
));
902 // Lightweight lock is held, but by somone else.
903 // Spin a few times. This avoids turning this into a heavyweight
904 // lock if the current holder is about to release it.
905 // FIXME: Does this make sense on a uniprocessor, where
906 // it actually yields? It's probably cheaper to convert.
907 for (unsigned int i
= 0; i
< N_SPINS
; ++i
)
909 if ((he
-> address
& ~LOCKED
) != address
) goto retry
;
912 if (!compare_and_swap(&(he
-> address
), address
, address
| LOCKED
))
917 heavy_lock
*hl
= get_heavy(addr
, he
);
918 ++ (he
-> heavy_count
);
919 // The hl lock acquisition can't block for long, since it can
920 // only be held by other threads waiting for conversion, and
921 // they, like us, drop it quickly without blocking.
922 _Jv_MutexLock(&(hl
->si
.mutex
));
923 JvAssert(he
-> address
== address
| LOCKED
);
924 release_set(&(he
-> address
), (address
| REQUEST_CONVERSION
| HEAVY
));
925 // release lock on he
926 LOG(REQ_CONV
, (address
| REQUEST_CONVERSION
| HEAVY
), self
);
927 while ((he
-> address
& ~FLAGS
) == (address
& ~FLAGS
))
929 // Once converted, the lock has to retain heavyweight
930 // status, since heavy_count > 0 .
931 _Jv_CondWait (&(hl
->si
.condition
), &(hl
->si
.mutex
), 0, 0);
934 // Guarantee that hl doesn't get unlinked by finalizer.
935 // This is only an issue if the client fails to release
936 // the lock, which is unlikely.
937 JvAssert(he
-> address
& HEAVY
);
938 // Lock has been converted, we hold the heavyweight lock,
939 // heavy_count has been incremented.
943 obj_addr_t was_heavy
= (address
& HEAVY
);
944 if ((address
& LOCKED
) ||
945 !compare_and_swap(&(he
-> address
), address
, (address
| LOCKED
)))
950 if ((address
& ~(HEAVY
| REQUEST_CONVERSION
)) == 0)
952 // Either was_heavy is true, or something changed out from under us,
953 // since the initial test for 0 failed.
954 JvAssert(!(address
& REQUEST_CONVERSION
));
955 // Can't convert a nonexistent lightweight lock.
957 hl
= (was_heavy
? find_heavy(addr
, he
) : 0);
958 // The CAS succeeded, so was_heavy is still accurate.
961 // It is OK to use the lighweight lock, since either the
962 // heavyweight lock does not exist, or none of the
963 // heavyweight locks are currently in use. Future threads
964 // trying to acquire the lock will see the lightweight
965 // one first and use that.
966 he
-> light_thr_id
= self
; // OK, since nobody else can hold
967 // light lock or do this at the same time.
968 JvAssert(he
-> light_count
== 0);
969 JvAssert(was_heavy
== (he
-> address
& HEAVY
));
970 release_set(&(he
-> address
), (addr
| was_heavy
));
971 LOG(ACQ_LIGHT2
, addr
| was_heavy
, self
);
975 // Must use heavy lock.
976 ++ (he
-> heavy_count
);
977 JvAssert(0 == (address
& ~HEAVY
));
978 release_set(&(he
-> address
), HEAVY
);
979 LOG(ACQ_HEAVY
, addr
| was_heavy
, self
);
980 _Jv_MutexLock(&(hl
->si
.mutex
));
985 // Lightweight lock is held, but does not correspond to this object.
986 // We hold the lock on the hash entry, and he -> address can't
987 // change from under us. Neither can the chain of heavy locks.
989 JvAssert(0 == he
-> heavy_count
|| (address
& HEAVY
));
990 heavy_lock
*hl
= get_heavy(addr
, he
);
991 ++ (he
-> heavy_count
);
992 release_set(&(he
-> address
), address
| HEAVY
);
993 LOG(ACQ_HEAVY2
, address
| HEAVY
, self
);
994 _Jv_MutexLock(&(hl
->si
.mutex
));
1001 _Jv_MonitorExit (jobject obj
)
1003 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1004 obj_addr_t addr
= (obj_addr_t
)obj
& ~((obj_addr_t
)FLAGS
);
1006 obj_addr_t addr
= (obj_addr_t
)obj
;
1008 _Jv_ThreadId_t self
= _Jv_ThreadSelf();
1009 unsigned hash
= JV_SYNC_HASH(addr
);
1010 hash_entry
* he
= light_locks
+ hash
;
1011 _Jv_ThreadId_t light_thr_id
;
1016 light_thr_id
= he
-> light_thr_id
;
1017 // Unfortunately, it turns out we always need to read the address
1018 // first. Even if we are going to update it with compare_and_swap,
1019 // we need to reset light_thr_id, and that's not safe unless we know
1020 // that we hold the lock.
1021 address
= he
-> address
;
1022 // First the (relatively) fast cases:
1023 if (__builtin_expect(light_thr_id
== self
, true))
1024 // Above must fail if addr == 0 .
1026 count
= he
-> light_count
;
1027 if (__builtin_expect((address
& ~HEAVY
) == addr
, true))
1031 // We held the lightweight lock all along. Thus the values
1032 // we saw for light_thr_id and light_count must have been valid.
1033 he
-> light_count
= count
- 1;
1038 // We hold the lightweight lock once.
1039 he
-> light_thr_id
= INVALID_THREAD_ID
;
1040 if (compare_and_swap_release(&(he
-> address
), address
,
1043 LOG(REL_LIGHT
, address
& HEAVY
, self
);
1048 he
-> light_thr_id
= light_thr_id
; // Undo prior damage.
1053 // else lock is not for this address, conversion is requested,
1054 // or the lock bit in the address field is set.
1058 if (__builtin_expect(!addr
, false))
1059 throw new java::lang::NullPointerException
;
1060 if ((address
& ~(HEAVY
| REQUEST_CONVERSION
)) == addr
)
1063 fprintf(stderr
, "Lightweight lock held by other thread\n\t"
1064 "light_thr_id = 0x%lx, self = 0x%lx, "
1065 "address = 0x%lx, heavy_count = %d, pid = %d\n",
1066 light_thr_id
, self
, (unsigned long)address
,
1067 he
-> heavy_count
, getpid());
1071 // Someone holds the lightweight lock for this object, and
1073 throw new java::lang::IllegalMonitorStateException(
1074 JvNewStringLatin1("current thread not owner"));
1077 count
= he
-> light_count
;
1079 if (address
& LOCKED
)
1084 // Now the unlikely cases.
1086 // - Address is set, and doesn't contain the LOCKED bit.
1087 // - If address refers to the same object as addr, then he -> light_thr_id
1088 // refers to this thread, and count is valid.
1089 // - The case in which we held the lightweight lock has been
1090 // completely handled, except for the REQUEST_CONVERSION case.
1092 if ((address
& ~FLAGS
) == addr
)
1094 // The lightweight lock is assigned to this object.
1095 // Thus we must be in the REQUEST_CONVERSION case.
1098 // Defer conversion until we exit completely.
1099 he
-> light_count
= count
- 1;
1102 JvAssert(he
-> light_thr_id
== self
);
1103 JvAssert(address
& REQUEST_CONVERSION
);
1104 // Conversion requested
1106 if (!compare_and_swap(&(he
-> address
), address
, address
| LOCKED
))
1108 heavy_lock
*hl
= find_heavy(addr
, he
);
1110 // Requestor created it.
1111 he
-> light_count
= 0;
1112 JvAssert(he
-> heavy_count
> 0);
1113 // was incremented by requestor.
1114 _Jv_MutexLock(&(hl
->si
.mutex
));
1115 // Release the he lock after acquiring the mutex.
1116 // Otherwise we can accidentally
1117 // notify a thread that has already seen a heavyweight
1119 he
-> light_thr_id
= INVALID_THREAD_ID
;
1120 release_set(&(he
-> address
), HEAVY
);
1121 LOG(PROMOTE
, address
, self
);
1122 // lightweight lock now unused.
1123 _Jv_CondNotifyAll(&(hl
->si
.condition
), &(hl
->si
.mutex
));
1124 _Jv_MutexUnlock(&(hl
->si
.mutex
));
1125 // heavy_count was already incremented by original requestor.
1129 // lightweight lock not for this object.
1130 JvAssert(!(address
& LOCKED
));
1131 JvAssert((address
& ~FLAGS
) != addr
);
1132 if (!compare_and_swap(&(he
-> address
), address
, address
| LOCKED
))
1134 heavy_lock
*hl
= find_heavy(addr
, he
);
1138 fprintf(stderr
, "Failed to find heavyweight lock for addr 0x%lx"
1139 " pid = %d\n", addr
, getpid());
1143 release_set(&(he
-> address
), address
);
1144 throw new java::lang::IllegalMonitorStateException(
1145 JvNewStringLatin1("current thread not owner"));
1147 JvAssert(address
& HEAVY
);
1148 count
= he
-> heavy_count
;
1149 JvAssert(count
> 0);
1151 he
-> heavy_count
= count
;
1154 const unsigned test_freq
= 16; // Power of 2
1155 static volatile unsigned counter
= 0;
1156 unsigned my_counter
= counter
;
1158 counter
= my_counter
+ 1;
1159 if (my_counter
%test_freq
== 0)
1161 // Randomize the interval length a bit.
1162 counter
= my_counter
+ (my_counter
>> 4) % (test_freq
/2);
1163 // Unlock mutex first, to avoid self-deadlock, or worse.
1164 _Jv_MutexUnlock(&(hl
->si
.mutex
));
1165 maybe_remove_all_heavy(he
, address
&~HEAVY
);
1166 // release lock bit, preserving
1167 // REQUEST_CONVERSION
1168 // and object address.
1172 release_set(&(he
-> address
), address
&~HEAVY
);
1173 _Jv_MutexUnlock(&(hl
->si
.mutex
));
1174 // Unlock after releasing the lock bit, so that
1175 // we don't switch to another thread prematurely.
1180 release_set(&(he
-> address
), address
);
1181 _Jv_MutexUnlock(&(hl
->si
.mutex
));
1183 LOG(REL_HEAVY
, addr
, self
);
1187 // Return false if obj's monitor is held by the current thread
1189 _Jv_ObjectCheckMonitor (jobject obj
)
1191 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1192 obj_addr_t addr
= (obj_addr_t
)obj
& ~((obj_addr_t
)FLAGS
);
1194 obj_addr_t addr
= (obj_addr_t
)obj
;
1197 unsigned hash
= JV_SYNC_HASH(addr
);
1198 hash_entry
* he
= light_locks
+ hash
;
1200 JvAssert(!(addr
& FLAGS
));
1201 address
= he
-> address
;
1202 // Try it the easy way first:
1203 if (address
== 0) return true;
1204 _Jv_ThreadId_t self
= _Jv_ThreadSelf();
1205 if ((address
& ~(HEAVY
| REQUEST_CONVERSION
)) == addr
)
1206 // Fails if entry is LOCKED.
1207 // I can't asynchronously become or stop being the holder.
1208 return he
-> light_thr_id
!= self
;
1210 // Acquire the hash table entry lock
1212 if (!compare_and_swap(&(he
-> address
), address
, address
| LOCKED
))
1220 if ((address
& ~FLAGS
) == addr
)
1221 not_mine
= (he
-> light_thr_id
!= self
);
1224 heavy_lock
* hl
= find_heavy(addr
, he
);
1225 not_mine
= hl
? _Jv_MutexCheckMonitor(&hl
->si
.mutex
) : true;
1228 release_set(&(he
-> address
), address
); // unlock hash entry
1232 // The rest of these are moderately thin veneers on _Jv_Cond ops.
1233 // The current version of Notify might be able to make the pthread
1234 // call AFTER releasing the lock, thus saving some context switches??
1237 java::lang::Object::wait (jlong timeout
, jint nanos
)
1239 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1240 obj_addr_t addr
= (obj_addr_t
)this & ~((obj_addr_t
)FLAGS
);
1242 obj_addr_t addr
= (obj_addr_t
)this;
1244 _Jv_ThreadId_t self
= _Jv_ThreadSelf();
1245 unsigned hash
= JV_SYNC_HASH(addr
);
1246 hash_entry
* he
= light_locks
+ hash
;
1251 if (__builtin_expect (timeout
< 0 || nanos
< 0 || nanos
> 999999, false))
1252 throw new IllegalArgumentException
;
1254 address
= he
-> address
;
1256 if (!compare_and_swap(&(he
-> address
), address
, address
| LOCKED
))
1261 // address did not have the lock bit set. We now hold the lock on he.
1262 if ((address
& ~FLAGS
) == addr
)
1264 // Convert to heavyweight.
1265 if (he
-> light_thr_id
!= self
)
1268 fprintf(stderr
, "Found wrong lightweight lock owner in wait "
1269 "address = 0x%lx pid = %d\n", address
, getpid());
1273 release_set(&(he
-> address
), address
);
1274 throw new IllegalMonitorStateException (JvNewStringLatin1
1275 ("current thread not owner"));
1277 count
= he
-> light_count
;
1278 hl
= get_heavy(addr
, he
);
1279 he
-> light_count
= 0;
1280 he
-> heavy_count
+= count
+ 1;
1281 for (unsigned i
= 0; i
<= count
; ++i
)
1282 _Jv_MutexLock(&(hl
->si
.mutex
));
1283 // Again release the he lock after acquiring the mutex.
1284 he
-> light_thr_id
= INVALID_THREAD_ID
;
1285 release_set(&(he
-> address
), HEAVY
); // lightweight lock now unused.
1286 LOG(PROMOTE2
, addr
, self
);
1287 if (address
& REQUEST_CONVERSION
)
1288 _Jv_CondNotifyAll (&(hl
->si
.condition
), &(hl
->si
.mutex
));
1289 // Since we do this before we do a CondWait, we guarantee that
1290 // threads waiting on requested conversion are awoken before
1291 // a real wait on the same condition variable.
1292 // No other notification can occur in the interim, since
1293 // we hold the heavy lock, and notifications are made
1294 // without acquiring it.
1296 else /* We should hold the heavyweight lock. */
1298 hl
= find_heavy(addr
, he
);
1299 release_set(&(he
-> address
), address
);
1303 fprintf(stderr
, "Couldn't find heavy lock in wait "
1304 "addr = 0x%lx pid = %d\n", addr
, getpid());
1308 throw new IllegalMonitorStateException (JvNewStringLatin1
1309 ("current thread not owner"));
1311 JvAssert(address
& HEAVY
);
1313 LOG(WAIT_START
, addr
, self
);
1314 switch (_Jv_CondWait (&(hl
->si
.condition
), &(hl
->si
.mutex
), timeout
, nanos
))
1317 throw new IllegalMonitorStateException (JvNewStringLatin1
1318 ("current thread not owner"));
1319 case _JV_INTERRUPTED
:
1320 if (Thread::interrupted ())
1321 throw new InterruptedException
;
1323 LOG(WAIT_END
, addr
, self
);
1327 java::lang::Object::notify (void)
1329 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1330 obj_addr_t addr
= (obj_addr_t
)this & ~((obj_addr_t
)FLAGS
);
1332 obj_addr_t addr
= (obj_addr_t
)this;
1334 _Jv_ThreadId_t self
= _Jv_ThreadSelf();
1335 unsigned hash
= JV_SYNC_HASH(addr
);
1336 hash_entry
* he
= light_locks
+ hash
;
1342 address
= ((he
-> address
) & ~LOCKED
);
1343 if (!compare_and_swap(&(he
-> address
), address
, address
| LOCKED
))
1348 if ((address
& ~FLAGS
) == addr
&& he
-> light_thr_id
== self
)
1350 // We hold lightweight lock. Since it has not
1351 // been inflated, there are no waiters.
1352 release_set(&(he
-> address
), address
); // unlock
1355 hl
= find_heavy(addr
, he
);
1356 // Hl can't disappear since we point to the underlying object.
1357 // It's important that we release the lock bit before the notify, since
1358 // otherwise we will try to wake up the target while we still hold the
1359 // bit. This results in lock bit contention, which we don't handle
1361 release_set(&(he
-> address
), address
); // unlock
1364 throw new IllegalMonitorStateException(JvNewStringLatin1
1365 ("current thread not owner"));
1368 // We know that we hold the heavyweight lock at this point,
1369 // and the lightweight lock is not in use.
1370 result
= _Jv_CondNotify(&(hl
->si
.condition
), &(hl
->si
.mutex
));
1371 LOG(NOTIFY
, addr
, self
);
1373 if (__builtin_expect (result
, 0))
1374 throw new IllegalMonitorStateException(JvNewStringLatin1
1375 ("current thread not owner"));
1379 java::lang::Object::notifyAll (void)
1381 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1382 obj_addr_t addr
= (obj_addr_t
)this & ~((obj_addr_t
)FLAGS
);
1384 obj_addr_t addr
= (obj_addr_t
)this;
1386 _Jv_ThreadId_t self
= _Jv_ThreadSelf();
1387 unsigned hash
= JV_SYNC_HASH(addr
);
1388 hash_entry
* he
= light_locks
+ hash
;
1394 address
= (he
-> address
) & ~LOCKED
;
1395 if (!compare_and_swap(&(he
-> address
), address
, address
| LOCKED
))
1400 hl
= find_heavy(addr
, he
);
1401 if ((address
& ~FLAGS
) == addr
&& he
-> light_thr_id
== self
)
1403 // We hold lightweight lock. Since it has not
1404 // been inflated, there are no waiters.
1405 release_set(&(he
-> address
), address
); // unlock
1408 release_set(&(he
-> address
), address
); // unlock
1411 throw new IllegalMonitorStateException(JvNewStringLatin1
1412 ("current thread not owner"));
1414 result
= _Jv_CondNotifyAll(&(hl
->si
.condition
), &(hl
->si
.mutex
));
1415 LOG(NOTIFY_ALL
, addr
, self
);
1416 if (__builtin_expect (result
, 0))
1417 throw new IllegalMonitorStateException(JvNewStringLatin1
1418 ("current thread not owner"));
1421 // This is declared in Java code and in Object.h.
1422 // It should never be called with JV_HASH_SYNCHRONIZATION
1424 java::lang::Object::sync_init (void)
1426 throw new IllegalMonitorStateException(JvNewStringLatin1
1427 ("internal error: sync_init"));
1430 // This is called on startup and declared in Object.h.
1431 // For now we just make it a no-op.
1433 _Jv_InitializeSyncMutex (void)
1437 #endif /* JV_HASH_SYNCHRONIZATION */