2003-12-26 Guilhem Lavaux <guilhem@kaffe.org>
[official-gcc.git] / libjava / java / lang / natObject.cc
blob8618bf2c7a3a5ea98d122c89db8d32981f8550c8
1 // natObject.cc - Implementation of the Object class.
3 /* Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003 Free Software Foundation
5 This file is part of libgcj.
7 This software is copyrighted work licensed under the terms of the
8 Libgcj License. Please consult the file "LIBGCJ_LICENSE" for
9 details. */
11 #include <config.h>
12 #include <platform.h>
14 #include <string.h>
16 #pragma implementation "Object.h"
18 #include <gcj/cni.h>
19 #include <jvm.h>
20 #include <java/lang/Object.h>
21 #include <java-threads.h>
22 #include <java-signal.h>
23 #include <java/lang/CloneNotSupportedException.h>
24 #include <java/lang/IllegalArgumentException.h>
25 #include <java/lang/IllegalMonitorStateException.h>
26 #include <java/lang/InterruptedException.h>
27 #include <java/lang/NullPointerException.h>
28 #include <java/lang/Class.h>
29 #include <java/lang/Cloneable.h>
30 #include <java/lang/Thread.h>
32 #ifdef LOCK_DEBUG
33 # include <stdio.h>
34 #endif
38 // This is used to represent synchronization information.
39 struct _Jv_SyncInfo
41 #if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
42 // We only need to keep track of initialization state if we can
43 // possibly finalize this object.
44 bool init;
45 #endif
46 _Jv_ConditionVariable_t condition;
47 _Jv_Mutex_t mutex;
52 jclass
53 java::lang::Object::getClass (void)
55 _Jv_VTable **dt = (_Jv_VTable **) this;
56 return (*dt)->clas;
59 jint
60 java::lang::Object::hashCode (void)
62 return _Jv_HashCode (this);
65 jobject
66 java::lang::Object::clone (void)
68 jclass klass = getClass ();
69 jobject r;
70 jint size;
72 // We also clone arrays here. If we put the array code into
73 // __JArray, then we'd have to figure out a way to find the array
74 // vtbl when creating a new array class. This is easier, if uglier.
75 if (klass->isArray())
77 __JArray *array = (__JArray *) this;
78 jclass comp = getClass()->getComponentType();
79 jint eltsize;
80 if (comp->isPrimitive())
82 r = _Jv_NewPrimArray (comp, array->length);
83 eltsize = comp->size();
85 else
87 r = _Jv_NewObjectArray (array->length, comp, NULL);
88 eltsize = sizeof (jobject);
90 // We can't use sizeof on __JArray because we must account for
91 // alignment of the element type.
92 size = (_Jv_GetArrayElementFromElementType (array, comp) - (char *) array
93 + array->length * eltsize);
95 else
97 if (! java::lang::Cloneable::class$.isAssignableFrom(klass))
98 throw new CloneNotSupportedException;
100 size = klass->size();
101 r = JvAllocObject (klass, size);
104 memcpy ((void *) r, (void *) this, size);
105 return r;
108 void
109 _Jv_FinalizeObject (jobject obj)
111 // Ignore exceptions. From section 12.6 of the Java Language Spec.
114 obj->finalize ();
116 catch (java::lang::Throwable *t)
118 // Ignore.
124 // Synchronization code.
127 #ifndef JV_HASH_SYNCHRONIZATION
128 // This global is used to make sure that only one thread sets an
129 // object's `sync_info' field.
130 static _Jv_Mutex_t sync_mutex;
132 // This macro is used to see if synchronization initialization is
133 // needed.
134 #if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
135 # define INIT_NEEDED(Obj) (! (Obj)->sync_info \
136 || ! ((_Jv_SyncInfo *) ((Obj)->sync_info))->init)
137 #else
138 # define INIT_NEEDED(Obj) (! (Obj)->sync_info)
139 #endif
141 #if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
142 // If we have to run a destructor for a sync_info member, then this
143 // function is registered as a finalizer for the sync_info.
144 static void
145 finalize_sync_info (jobject obj)
147 _Jv_SyncInfo *si = (_Jv_SyncInfo *) obj;
148 #if defined (_Jv_HaveCondDestroy)
149 _Jv_CondDestroy (&si->condition);
150 #endif
151 #if defined (_Jv_HaveMutexDestroy)
152 _Jv_MutexDestroy (&si->mutex);
153 #endif
154 si->init = false;
156 #endif
158 // This is called to initialize the sync_info element of an object.
159 void
160 java::lang::Object::sync_init (void)
162 _Jv_MutexLock (&sync_mutex);
163 // Check again to see if initialization is needed now that we have
164 // the lock.
165 if (INIT_NEEDED (this))
167 // We assume there are no pointers in the sync_info
168 // representation.
169 _Jv_SyncInfo *si;
170 // We always create a new sync_info, even if there is already
171 // one available. Any given object can only be finalized once.
172 // If we get here and sync_info is not null, then it has already
173 // been finalized. So if we just reinitialize the old one,
174 // we'll never be able to (re-)destroy the mutex and/or
175 // condition variable.
176 si = (_Jv_SyncInfo *) _Jv_AllocBytes (sizeof (_Jv_SyncInfo));
177 _Jv_MutexInit (&si->mutex);
178 _Jv_CondInit (&si->condition);
179 #if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
180 // Register a finalizer.
181 si->init = true;
182 _Jv_RegisterFinalizer (si, finalize_sync_info);
183 #endif
184 sync_info = (jobject) si;
186 _Jv_MutexUnlock (&sync_mutex);
189 void
190 java::lang::Object::notify (void)
192 if (__builtin_expect (INIT_NEEDED (this), false))
193 sync_init ();
194 _Jv_SyncInfo *si = (_Jv_SyncInfo *) sync_info;
195 if (__builtin_expect (_Jv_CondNotify (&si->condition, &si->mutex), false))
196 throw new IllegalMonitorStateException(JvNewStringLatin1
197 ("current thread not owner"));
200 void
201 java::lang::Object::notifyAll (void)
203 if (__builtin_expect (INIT_NEEDED (this), false))
204 sync_init ();
205 _Jv_SyncInfo *si = (_Jv_SyncInfo *) sync_info;
206 if (__builtin_expect (_Jv_CondNotifyAll (&si->condition, &si->mutex), false))
207 throw new IllegalMonitorStateException(JvNewStringLatin1
208 ("current thread not owner"));
211 void
212 java::lang::Object::wait (jlong timeout, jint nanos)
214 if (__builtin_expect (INIT_NEEDED (this), false))
215 sync_init ();
216 if (__builtin_expect (timeout < 0 || nanos < 0 || nanos > 999999, false))
217 throw new IllegalArgumentException;
218 _Jv_SyncInfo *si = (_Jv_SyncInfo *) sync_info;
219 switch (_Jv_CondWait (&si->condition, &si->mutex, timeout, nanos))
221 case _JV_NOT_OWNER:
222 throw new IllegalMonitorStateException (JvNewStringLatin1
223 ("current thread not owner"));
224 case _JV_INTERRUPTED:
225 if (Thread::interrupted ())
226 throw new InterruptedException;
231 // Some runtime code.
234 // This function is called at system startup to initialize the
235 // `sync_mutex'.
236 void
237 _Jv_InitializeSyncMutex (void)
239 _Jv_MutexInit (&sync_mutex);
242 void
243 _Jv_MonitorEnter (jobject obj)
245 #ifndef HANDLE_SEGV
246 if (__builtin_expect (! obj, false))
247 throw new java::lang::NullPointerException;
248 #endif
249 if (__builtin_expect (INIT_NEEDED (obj), false))
250 obj->sync_init ();
251 _Jv_SyncInfo *si = (_Jv_SyncInfo *) obj->sync_info;
252 _Jv_MutexLock (&si->mutex);
253 // FIXME: In the Windows case, this can return a nonzero error code.
254 // We should turn that into some exception ...
257 void
258 _Jv_MonitorExit (jobject obj)
260 JvAssert (obj);
261 JvAssert (! INIT_NEEDED (obj));
262 _Jv_SyncInfo *si = (_Jv_SyncInfo *) obj->sync_info;
263 if (__builtin_expect (_Jv_MutexUnlock (&si->mutex), false))
264 throw new java::lang::IllegalMonitorStateException;
267 bool
268 _Jv_ObjectCheckMonitor (jobject obj)
270 _Jv_SyncInfo *si = (_Jv_SyncInfo *) obj->sync_info;
271 return _Jv_MutexCheckMonitor (&si->mutex);
274 #else /* JV_HASH_SYNCHRONIZATION */
276 // FIXME: We shouldn't be calling GC_register_finalizer directly.
277 #ifndef HAVE_BOEHM_GC
278 # error Hash synchronization currently requires boehm-gc
279 // That's actually a bit of a lie: It should also work with the null GC,
280 // probably even better than the alternative.
281 // To really support alternate GCs here, we would need to widen the
282 // interface to finalization, since we sometimes have to register a
283 // second finalizer for an object that already has one.
284 // We might also want to move the GC interface to a .h file, since
285 // the number of procedure call levels involved in some of these
286 // operations is already ridiculous, and would become worse if we
287 // went through the proper intermediaries.
288 #else
289 # include "gc.h"
290 #endif
292 // What follows currenly assumes a Linux-like platform.
293 // Some of it specifically assumes X86 or IA64 Linux, though that
294 // should be easily fixable.
296 // A Java monitor implemention based on a table of locks.
297 // Each entry in the table describes
298 // locks held for objects that hash to that location.
299 // This started out as a reimplementation of the technique used in SGIs JVM,
300 // for which we obtained permission from SGI.
301 // But in fact, this ended up quite different, though some ideas are
302 // still shared with the original.
303 // It was also influenced by some of the published IBM work,
304 // though it also differs in many ways from that.
305 // We could speed this up if we had a way to atomically update
306 // an entire cache entry, i.e. 2 contiguous words of memory.
307 // That would usually be the case with a 32 bit ABI on a 64 bit processor.
308 // But we don't currently go out of our way to target those.
309 // I don't know how to do much better with a N bit ABI on a processor
310 // that can atomically update only N bits at a time.
311 // Author: Hans-J. Boehm (Hans_Boehm@hp.com, boehm@acm.org)
313 #include <limits.h>
314 #include <unistd.h> // for usleep, sysconf.
315 #include <gcj/javaprims.h>
316 #include <sysdep/locks.h>
317 #include <java/lang/Thread.h>
319 // Try to determine whether we are on a multiprocessor, i.e. whether
320 // spinning may be profitable.
321 // This should really use a suitable autoconf macro.
322 // False is the conservative answer, though the right one is much better.
323 static bool
324 is_mp()
326 #ifdef _SC_NPROCESSORS_ONLN
327 long nprocs = sysconf(_SC_NPROCESSORS_ONLN);
328 return (nprocs > 1);
329 #else
330 return false;
331 #endif
334 // A call to keep_live(p) forces p to be accessible to the GC
335 // at this point.
336 inline static void
337 keep_live(obj_addr_t p)
339 __asm__ __volatile__("" : : "rm"(p) : "memory");
342 // Each hash table entry holds a single preallocated "lightweight" lock.
343 // In addition, it holds a chain of "heavyweight" locks. Lightweight
344 // locks do not support Object.wait(), and are converted to heavyweight
345 // status in response to contention. Unlike the SGI scheme, both
346 // ligtweight and heavyweight locks in one hash entry can be simultaneously
347 // in use. (The SGI scheme requires that we be able to acquire a heavyweight
348 // lock on behalf of another thread, and can thus convert a lock we don't
349 // hold to heavyweight status. Here we don't insist on that, and thus
350 // let the original holder of the lighweight lock keep it.)
352 struct heavy_lock {
353 void * reserved_for_gc;
354 struct heavy_lock *next; // Hash chain link.
355 // Traced by GC.
356 void * old_client_data; // The only other field traced by GC.
357 GC_finalization_proc old_finalization_proc;
358 obj_addr_t address; // Object to which this lock corresponds.
359 // Should not be traced by GC.
360 // Cleared as heavy_lock is destroyed.
361 // Together with the rest of the heavy lock
362 // chain, this is protected by the lock
363 // bit in the hash table entry to which
364 // the chain is attached.
365 _Jv_SyncInfo si;
366 // The remaining fields save prior finalization info for
367 // the object, which we needed to replace in order to arrange
368 // for cleanup of the lock structure.
371 #ifdef LOCK_DEBUG
372 void
373 print_hl_list(heavy_lock *hl)
375 heavy_lock *p = hl;
376 for (; 0 != p; p = p->next)
377 fprintf (stderr, "(hl = %p, addr = %p)", p, (void *)(p -> address));
379 #endif /* LOCK_DEBUG */
381 #if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
382 // If we have to run a destructor for a sync_info member, then this
383 // function could be registered as a finalizer for the sync_info.
384 // In fact, we now only invoke it explicitly.
385 static inline void
386 heavy_lock_finalization_proc (heavy_lock *hl)
388 #if defined (_Jv_HaveCondDestroy)
389 _Jv_CondDestroy (&hl->si.condition);
390 #endif
391 #if defined (_Jv_HaveMutexDestroy)
392 _Jv_MutexDestroy (&hl->si.mutex);
393 #endif
394 hl->si.init = false;
396 #endif /* defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy) */
398 // We convert the lock back to lightweight status when
399 // we exit, so that a single contention episode doesn't doom the lock
400 // forever. But we also need to make sure that lock structures for dead
401 // objects are eventually reclaimed. We do that in a an additional
402 // finalizer on the underlying object.
403 // Note that if the corresponding object is dead, it is safe to drop
404 // the heavy_lock structure from its list. It is not necessarily
405 // safe to deallocate it, since the unlock code could still be running.
407 struct hash_entry {
408 volatile obj_addr_t address; // Address of object for which lightweight
409 // k is held.
410 // We assume the 3 low order bits are zero.
411 // With the Boehm collector and bitmap
412 // allocation, objects of size 4 bytes are
413 // broken anyway. Thus this is primarily
414 // a constraint on statically allocated
415 // objects used for synchronization.
416 // This allows us to use the low order
417 // bits as follows:
418 # define LOCKED 1 // This hash entry is locked, and its
419 // state may be invalid.
420 // The lock protects both the hash_entry
421 // itself (except for the light_count
422 // and light_thr_id fields, which
423 // are protected by the lightweight
424 // lock itself), and any heavy_monitor
425 // structures attached to it.
426 # define HEAVY 2 // There may be heavyweight locks
427 // associated with this cache entry.
428 // The lightweight entry is still valid,
429 // if the leading bits of the address
430 // field are nonzero.
431 // Set if heavy_count is > 0 .
432 // Stored redundantly so a single
433 // compare-and-swap works in the easy case.
434 # define REQUEST_CONVERSION 4 // The lightweight lock is held. But
435 // one or more other threads have tried
436 // to acquire the lock, and hence request
437 // conversion to heavyweight status.
438 # define FLAGS (LOCKED | HEAVY | REQUEST_CONVERSION)
439 volatile _Jv_ThreadId_t light_thr_id;
440 // Thr_id of holder of lightweight lock.
441 // Only updated by lightweight lock holder.
442 // Must be recognizably invalid if the
443 // lightweight lock is not held.
444 # define INVALID_THREAD_ID 0 // Works for Linux?
445 // If zero doesn't work, we have to
446 // initialize lock table.
447 volatile unsigned short light_count;
448 // Number of times the lightweight lock
449 // is held minus one. Zero if lightweight
450 // lock is not held.
451 unsigned short heavy_count; // Total number of times heavyweight locks
452 // associated with this hash entry are held
453 // or waiting to be acquired.
454 // Threads in wait() are included eventhough
455 // they have temporarily released the lock.
456 struct heavy_lock * heavy_locks;
457 // Chain of heavy locks. Protected
458 // by lockbit for he. Locks may
459 // remain allocated here even if HEAVY
460 // is not set and heavy_count is 0.
461 // If a lightweight and heavyweight lock
462 // correspond to the same address, the
463 // lightweight lock is the right one.
466 #ifndef JV_SYNC_TABLE_SZ
467 # define JV_SYNC_TABLE_SZ 2048 // Must be power of 2.
468 #endif
470 hash_entry light_locks[JV_SYNC_TABLE_SZ];
472 #define JV_SYNC_HASH(p) (((long)p ^ ((long)p >> 10)) & (JV_SYNC_TABLE_SZ-1))
474 // Note that the light_locks table is scanned conservatively by the
475 // collector. It is essential the the heavy_locks field is scanned.
476 // Currently the address field may or may not cause the associated object
477 // to be retained, depending on whether flag bits are set.
478 // This means that we can conceivable get an unexpected deadlock if
479 // 1) Object at address A is locked.
480 // 2) The client drops A without unlocking it.
481 // 3) Flag bits in the address entry are set, so the collector reclaims
482 // the object at A.
483 // 4) A is reallocated, and an attempt is made to lock the result.
484 // This could be fixed by scanning light_locks in a more customized
485 // manner that ignores the flag bits. But it can only happen with hand
486 // generated semi-illegal .class files, and then it doesn't present a
487 // security hole.
489 #ifdef LOCK_DEBUG
490 void print_he(hash_entry *he)
492 fprintf(stderr, "lock hash entry = %p, index = %d, address = 0x%lx\n"
493 "\tlight_thr_id = 0x%lx, light_count = %d, "
494 "heavy_count = %d\n\theavy_locks:", he,
495 he - light_locks, he -> address, he -> light_thr_id,
496 he -> light_count, he -> heavy_count);
497 print_hl_list(he -> heavy_locks);
498 fprintf(stderr, "\n");
500 #endif /* LOCK_DEBUG */
502 static bool mp = false; // Known multiprocesssor.
504 // Wait for roughly 2^n units, touching as little memory as possible.
505 static void
506 spin(unsigned n)
508 const unsigned MP_SPINS = 10;
509 const unsigned YIELDS = 4;
510 const unsigned SPINS_PER_UNIT = 30;
511 const unsigned MIN_SLEEP_USECS = 2001; // Shorter times spin under Linux.
512 const unsigned MAX_SLEEP_USECS = 200000;
513 static unsigned spin_limit = 0;
514 static unsigned yield_limit = YIELDS;
515 static bool spin_initialized = false;
517 if (!spin_initialized)
519 mp = is_mp();
520 if (mp)
522 spin_limit = MP_SPINS;
523 yield_limit = MP_SPINS + YIELDS;
525 spin_initialized = true;
527 if (n < spin_limit)
529 unsigned i = SPINS_PER_UNIT << n;
530 for (; i > 0; --i)
531 __asm__ __volatile__("");
533 else if (n < yield_limit)
535 _Jv_ThreadYield();
537 else
539 unsigned duration = MIN_SLEEP_USECS << (n - yield_limit);
540 if (n >= 15 + yield_limit || duration > MAX_SLEEP_USECS)
541 duration = MAX_SLEEP_USECS;
542 _Jv_platform_usleep(duration);
546 // Wait for a hash entry to become unlocked.
547 static void
548 wait_unlocked (hash_entry *he)
550 unsigned i = 0;
551 while (he -> address & LOCKED)
552 spin (i++);
555 // Return the heavy lock for addr if it was already allocated.
556 // The client passes in the appropriate hash_entry.
557 // We hold the lock for he.
558 static inline heavy_lock *
559 find_heavy (obj_addr_t addr, hash_entry *he)
561 heavy_lock *hl = he -> heavy_locks;
562 while (hl != 0 && hl -> address != addr) hl = hl -> next;
563 return hl;
566 // Unlink the heavy lock for the given address from its hash table chain.
567 // Dies miserably and conspicuously if it's not there, since that should
568 // be impossible.
569 static inline void
570 unlink_heavy (obj_addr_t addr, hash_entry *he)
572 heavy_lock **currentp = &(he -> heavy_locks);
573 while ((*currentp) -> address != addr)
574 currentp = &((*currentp) -> next);
575 *currentp = (*currentp) -> next;
578 // Finalization procedure for objects that have associated heavy-weight
579 // locks. This may replace the real finalization procedure.
580 static void
581 heavy_lock_obj_finalization_proc (void *obj, void *cd)
583 heavy_lock *hl = (heavy_lock *)cd;
585 // This only addresses misalignment of statics, not heap objects. It
586 // works only because registering statics for finalization is a noop,
587 // no matter what the least significant bits are.
588 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
589 obj_addr_t addr = (obj_addr_t)obj & ~((obj_addr_t)0x7);
590 #else
591 obj_addr_t addr = (obj_addr_t)obj;
592 #endif
593 hash_entry *he = light_locks + JV_SYNC_HASH(addr);
594 obj_addr_t he_address = (he -> address & ~LOCKED);
596 // Acquire lock bit immediately. It's possible that the hl was already
597 // destroyed while we were waiting for the finalizer to run. If it
598 // was, the address field was set to zero. The address filed access is
599 // protected by the lock bit to ensure that we do this exactly once.
600 // The lock bit also protects updates to the objects finalizer.
601 while (!compare_and_swap(&(he -> address), he_address, he_address|LOCKED ))
603 // Hash table entry is currently locked. We can't safely
604 // touch the list of heavy locks.
605 wait_unlocked(he);
606 he_address = (he -> address & ~LOCKED);
608 if (0 == hl -> address)
610 // remove_all_heavy destroyed hl, and took care of the real finalizer.
611 release_set(&(he -> address), he_address);
612 return;
614 JvAssert(hl -> address == addr);
615 GC_finalization_proc old_finalization_proc = hl -> old_finalization_proc;
616 if (old_finalization_proc != 0)
618 // We still need to run a real finalizer. In an idealized
619 // world, in which people write thread-safe finalizers, that is
620 // likely to require synchronization. Thus we reregister
621 // ourselves as the only finalizer, and simply run the real one.
622 // Thus we don't clean up the lock yet, but we're likely to do so
623 // on the next GC cycle.
624 // It's OK if remove_all_heavy actually destroys the heavy lock,
625 // since we've updated old_finalization_proc, and thus the user's
626 // finalizer won't be rerun.
627 void * old_client_data = hl -> old_client_data;
628 hl -> old_finalization_proc = 0;
629 hl -> old_client_data = 0;
630 # ifdef HAVE_BOEHM_GC
631 GC_REGISTER_FINALIZER_NO_ORDER(obj, heavy_lock_obj_finalization_proc, cd, 0, 0);
632 # endif
633 release_set(&(he -> address), he_address);
634 old_finalization_proc(obj, old_client_data);
636 else
638 // The object is really dead, although it's conceivable that
639 // some thread may still be in the process of releasing the
640 // heavy lock. Unlink it and, if necessary, register a finalizer
641 // to destroy sync_info.
642 unlink_heavy(addr, he);
643 hl -> address = 0; // Don't destroy it again.
644 release_set(&(he -> address), he_address);
645 # if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
646 // Make sure lock is not held and then destroy condvar and mutex.
647 _Jv_MutexLock(&(hl->si.mutex));
648 _Jv_MutexUnlock(&(hl->si.mutex));
649 heavy_lock_finalization_proc (hl);
650 # endif
654 // We hold the lock on he, and heavy_count is 0.
655 // Release the lock by replacing the address with new_address_val.
656 // Remove all heavy locks on the list. Note that the only possible way
657 // in which a lock may still be in use is if it's in the process of
658 // being unlocked.
659 static void
660 remove_all_heavy (hash_entry *he, obj_addr_t new_address_val)
662 JvAssert(he -> heavy_count == 0);
663 JvAssert(he -> address & LOCKED);
664 heavy_lock *hl = he -> heavy_locks;
665 he -> heavy_locks = 0;
666 // We would really like to release the lock bit here. Unfortunately, that
667 // Creates a race between or finalizer removal, and the potential
668 // reinstallation of a new finalizer as a new heavy lock is created.
669 // This may need to be revisited.
670 for(; 0 != hl; hl = hl->next)
672 obj_addr_t obj = hl -> address;
673 JvAssert(0 != obj); // If this was previously finalized, it should no
674 // longer appear on our list.
675 hl -> address = 0; // Finalization proc might still see it after we
676 // finish.
677 GC_finalization_proc old_finalization_proc = hl -> old_finalization_proc;
678 void * old_client_data = hl -> old_client_data;
679 # ifdef HAVE_BOEHM_GC
680 // Remove our finalization procedure.
681 // Reregister the clients if applicable.
682 GC_REGISTER_FINALIZER_NO_ORDER((GC_PTR)obj, old_finalization_proc,
683 old_client_data, 0, 0);
684 // Note that our old finalization procedure may have been
685 // previously determined to be runnable, and may still run.
686 // FIXME - direct dependency on boehm GC.
687 # endif
688 # if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
689 // Wait for a possible lock holder to finish unlocking it.
690 // This is only an issue if we have to explicitly destroy the mutex
691 // or possibly if we have to destroy a condition variable that is
692 // still being notified.
693 _Jv_MutexLock(&(hl->si.mutex));
694 _Jv_MutexUnlock(&(hl->si.mutex));
695 heavy_lock_finalization_proc (hl);
696 # endif
698 release_set(&(he -> address), new_address_val);
701 // We hold the lock on he and heavy_count is 0.
702 // We release it by replacing the address field with new_address_val.
703 // Remove all heavy locks on the list if the list is sufficiently long.
704 // This is called periodically to avoid very long lists of heavy locks.
705 // This seems to otherwise become an issue with SPECjbb, for example.
706 static inline void
707 maybe_remove_all_heavy (hash_entry *he, obj_addr_t new_address_val)
709 static const int max_len = 5;
710 heavy_lock *hl = he -> heavy_locks;
712 for (int i = 0; i < max_len; ++i)
714 if (0 == hl)
716 release_set(&(he -> address), new_address_val);
717 return;
719 hl = hl -> next;
721 remove_all_heavy(he, new_address_val);
724 // Allocate a new heavy lock for addr, returning its address.
725 // Assumes we already have the hash_entry locked, and there
726 // is currently no lightweight or allocated lock for addr.
727 // We register a finalizer for addr, which is responsible for
728 // removing the heavy lock when addr goes away, in addition
729 // to the responsibilities of any prior finalizer.
730 // This unfortunately holds the lock bit for the hash entry while it
731 // allocates two objects (on for the finalizer).
732 // It would be nice to avoid that somehow ...
733 static heavy_lock *
734 alloc_heavy(obj_addr_t addr, hash_entry *he)
736 heavy_lock * hl = (heavy_lock *) _Jv_AllocTraceTwo(sizeof (heavy_lock));
738 hl -> address = addr;
739 _Jv_MutexInit (&(hl -> si.mutex));
740 _Jv_CondInit (&(hl -> si.condition));
741 # if defined (_Jv_HaveCondDestroy) || defined (_Jv_HaveMutexDestroy)
742 hl->si.init = true; // needed ?
743 # endif
744 hl -> next = he -> heavy_locks;
745 he -> heavy_locks = hl;
746 // FIXME: The only call that cheats and goes directly to the GC interface.
747 # ifdef HAVE_BOEHM_GC
748 GC_REGISTER_FINALIZER_NO_ORDER(
749 (void *)addr, heavy_lock_obj_finalization_proc,
750 hl, &hl->old_finalization_proc,
751 &hl->old_client_data);
752 # endif /* HAVE_BOEHM_GC */
753 return hl;
756 // Return the heavy lock for addr, allocating if necessary.
757 // Assumes we have the cache entry locked, and there is no lightweight
758 // lock for addr.
759 static heavy_lock *
760 get_heavy(obj_addr_t addr, hash_entry *he)
762 heavy_lock *hl = find_heavy(addr, he);
763 if (0 == hl)
764 hl = alloc_heavy(addr, he);
765 return hl;
768 void
769 _Jv_MonitorEnter (jobject obj)
771 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
772 obj_addr_t addr = (obj_addr_t)obj & ~((obj_addr_t)FLAGS);
773 #else
774 obj_addr_t addr = (obj_addr_t)obj;
775 #endif
776 obj_addr_t address;
777 unsigned hash = JV_SYNC_HASH(addr);
778 hash_entry * he = light_locks + hash;
779 _Jv_ThreadId_t self = _Jv_ThreadSelf();
780 unsigned count;
781 const unsigned N_SPINS = 18;
783 // We need to somehow check that addr is not NULL on the fast path.
784 // A very predictable
785 // branch on a register value is probably cheaper than dereferencing addr.
786 // We could also permanently lock the NULL entry in the hash table.
787 // But it's not clear that's cheaper either.
788 if (__builtin_expect(!addr, false))
789 throw new java::lang::NullPointerException;
791 JvAssert(!(addr & FLAGS));
792 retry:
793 if (__builtin_expect(compare_and_swap(&(he -> address),
794 0, addr),true))
796 JvAssert(he -> light_thr_id == INVALID_THREAD_ID);
797 JvAssert(he -> light_count == 0);
798 he -> light_thr_id = self;
799 // Count fields are set correctly. Heavy_count was also zero,
800 // but can change asynchronously.
801 // This path is hopefully both fast and the most common.
802 return;
804 address = he -> address;
805 if ((address & ~(HEAVY | REQUEST_CONVERSION)) == addr)
807 if (he -> light_thr_id == self)
809 // We hold the lightweight lock, and it's for the right
810 // address.
811 count = he -> light_count;
812 if (count == USHRT_MAX)
814 // I think most JVMs don't check for this.
815 // But I'm not convinced I couldn't turn this into a security
816 // hole, even with a 32 bit counter.
817 throw new java::lang::IllegalMonitorStateException(
818 JvNewStringLatin1("maximum monitor nesting level exceeded"));
820 he -> light_count = count + 1;
821 return;
823 else
825 // Lightweight lock is held, but by somone else.
826 // Spin a few times. This avoids turning this into a heavyweight
827 // lock if the current holder is about to release it.
828 for (unsigned int i = 0; i < N_SPINS; ++i)
830 if ((he -> address & ~LOCKED) != (address & ~LOCKED)) goto retry;
831 spin(i);
833 address &= ~LOCKED;
834 if (!compare_and_swap(&(he -> address), address, address | LOCKED ))
836 wait_unlocked(he);
837 goto retry;
839 heavy_lock *hl = get_heavy(addr, he);
840 ++ (he -> heavy_count);
841 // The hl lock acquisition can't block for long, since it can
842 // only be held by other threads waiting for conversion, and
843 // they, like us, drop it quickly without blocking.
844 _Jv_MutexLock(&(hl->si.mutex));
845 JvAssert(he -> address == address | LOCKED );
846 release_set(&(he -> address), (address | REQUEST_CONVERSION | HEAVY));
847 // release lock on he
848 while ((he -> address & ~FLAGS) == (address & ~FLAGS))
850 // Once converted, the lock has to retain heavyweight
851 // status, since heavy_count > 0 .
852 _Jv_CondWait (&(hl->si.condition), &(hl->si.mutex), 0, 0);
854 keep_live(addr);
855 // Guarantee that hl doesn't get unlinked by finalizer.
856 // This is only an issue if the client fails to release
857 // the lock, which is unlikely.
858 JvAssert(he -> address & HEAVY);
859 // Lock has been converted, we hold the heavyweight lock,
860 // heavy_count has been incremented.
861 return;
864 obj_addr_t was_heavy = (address & HEAVY);
865 address &= ~LOCKED;
866 if (!compare_and_swap(&(he -> address), address, (address | LOCKED )))
868 wait_unlocked(he);
869 goto retry;
871 if ((address & ~(HEAVY | REQUEST_CONVERSION)) == 0)
873 // Either was_heavy is true, or something changed out from under us,
874 // since the initial test for 0 failed.
875 JvAssert(!(address & REQUEST_CONVERSION));
876 // Can't convert a nonexistent lightweight lock.
877 heavy_lock *hl;
878 hl = (was_heavy? find_heavy(addr, he) : 0);
879 if (0 == hl)
881 // It is OK to use the lighweight lock, since either the
882 // heavyweight lock does not exist, or none of the
883 // heavyweight locks currently exist. Future threads
884 // trying to acquire the lock will see the lightweight
885 // one first and use that.
886 he -> light_thr_id = self; // OK, since nobody else can hold
887 // light lock or do this at the same time.
888 JvAssert(he -> light_count == 0);
889 JvAssert(was_heavy == (he -> address & HEAVY));
890 release_set(&(he -> address), (addr | was_heavy));
892 else
894 // Must use heavy lock.
895 ++ (he -> heavy_count);
896 JvAssert(0 == (address & ~HEAVY));
897 release_set(&(he -> address), HEAVY);
898 _Jv_MutexLock(&(hl->si.mutex));
899 keep_live(addr);
901 return;
903 // Lightweight lock is held, but does not correspond to this object.
904 // We hold the lock on the hash entry, and he -> address can't
905 // change from under us. Neither can the chain of heavy locks.
907 JvAssert(0 == he -> heavy_count || (address & HEAVY));
908 heavy_lock *hl = get_heavy(addr, he);
909 ++ (he -> heavy_count);
910 release_set(&(he -> address), address | HEAVY);
911 _Jv_MutexLock(&(hl->si.mutex));
912 keep_live(addr);
917 void
918 _Jv_MonitorExit (jobject obj)
920 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
921 obj_addr_t addr = (obj_addr_t)obj & ~((obj_addr_t)FLAGS);
922 #else
923 obj_addr_t addr = (obj_addr_t)obj;
924 #endif
925 _Jv_ThreadId_t self = _Jv_ThreadSelf();
926 unsigned hash = JV_SYNC_HASH(addr);
927 hash_entry * he = light_locks + hash;
928 _Jv_ThreadId_t light_thr_id;
929 unsigned count;
930 obj_addr_t address;
932 retry:
933 light_thr_id = he -> light_thr_id;
934 // Unfortunately, it turns out we always need to read the address
935 // first. Even if we are going to update it with compare_and_swap,
936 // we need to reset light_thr_id, and that's not safe unless we know
937 // that we hold the lock.
938 address = he -> address;
939 // First the (relatively) fast cases:
940 if (__builtin_expect(light_thr_id == self, true))
941 // Above must fail if addr == 0 .
943 count = he -> light_count;
944 if (__builtin_expect((address & ~HEAVY) == addr, true))
946 if (count != 0)
948 // We held the lightweight lock all along. Thus the values
949 // we saw for light_thr_id and light_count must have been valid.
950 he -> light_count = count - 1;
951 return;
953 else
955 // We hold the lightweight lock once.
956 he -> light_thr_id = INVALID_THREAD_ID;
957 if (compare_and_swap_release(&(he -> address), address,
958 address & HEAVY))
959 return;
960 else
962 he -> light_thr_id = light_thr_id; // Undo prior damage.
963 goto retry;
967 // else lock is not for this address, conversion is requested,
968 // or the lock bit in the address field is set.
970 else
972 if (__builtin_expect(!addr, false))
973 throw new java::lang::NullPointerException;
974 if ((address & ~(HEAVY | REQUEST_CONVERSION)) == addr)
976 # ifdef LOCK_DEBUG
977 fprintf(stderr, "Lightweight lock held by other thread\n\t"
978 "light_thr_id = 0x%lx, self = 0x%lx, "
979 "address = 0x%lx, pid = %d\n",
980 light_thr_id, self, address, getpid());
981 print_he(he);
982 for(;;) {}
983 # endif
984 // Someone holds the lightweight lock for this object, and
985 // it can't be us.
986 throw new java::lang::IllegalMonitorStateException(
987 JvNewStringLatin1("current thread not owner"));
989 else
990 count = he -> light_count;
992 if (address & LOCKED)
994 wait_unlocked(he);
995 goto retry;
997 // Now the unlikely cases.
998 // We do know that:
999 // - Address is set, and doesn't contain the LOCKED bit.
1000 // - If address refers to the same object as addr, then he -> light_thr_id
1001 // refers to this thread, and count is valid.
1002 // - The case in which we held the lightweight lock has been
1003 // completely handled, except for the REQUEST_CONVERSION case.
1005 if ((address & ~FLAGS) == addr)
1007 // The lightweight lock is assigned to this object.
1008 // Thus we must be in the REQUEST_CONVERSION case.
1009 if (0 != count)
1011 // Defer conversion until we exit completely.
1012 he -> light_count = count - 1;
1013 return;
1015 JvAssert(he -> light_thr_id == self);
1016 JvAssert(address & REQUEST_CONVERSION);
1017 // Conversion requested
1018 // Convert now.
1019 if (!compare_and_swap(&(he -> address), address, address | LOCKED))
1020 goto retry;
1021 heavy_lock *hl = find_heavy(addr, he);
1022 JvAssert (0 != hl);
1023 // Requestor created it.
1024 he -> light_count = 0;
1025 JvAssert(he -> heavy_count > 0);
1026 // was incremented by requestor.
1027 _Jv_MutexLock(&(hl->si.mutex));
1028 // Release the he lock after acquiring the mutex.
1029 // Otherwise we can accidentally
1030 // notify a thread that has already seen a heavyweight
1031 // lock.
1032 he -> light_thr_id = INVALID_THREAD_ID;
1033 release_set(&(he -> address), HEAVY);
1034 // lightweight lock now unused.
1035 _Jv_CondNotifyAll(&(hl->si.condition), &(hl->si.mutex));
1036 _Jv_MutexUnlock(&(hl->si.mutex));
1037 // heavy_count was already incremented by original requestor.
1038 keep_live(addr);
1039 return;
1041 // lightweight lock not for this object.
1042 JvAssert(!(address & LOCKED));
1043 JvAssert((address & ~FLAGS) != addr);
1044 if (!compare_and_swap(&(he -> address), address, address | LOCKED))
1045 goto retry;
1046 heavy_lock *hl = find_heavy(addr, he);
1047 if (NULL == hl)
1049 # ifdef LOCK_DEBUG
1050 fprintf(stderr, "Failed to find heavyweight lock for addr 0x%lx"
1051 " pid = %d\n", addr, getpid());
1052 print_he(he);
1053 for(;;) {}
1054 # endif
1055 throw new java::lang::IllegalMonitorStateException(
1056 JvNewStringLatin1("current thread not owner"));
1058 JvAssert(address & HEAVY);
1059 count = he -> heavy_count;
1060 JvAssert(count > 0);
1061 --count;
1062 he -> heavy_count = count;
1063 if (0 == count)
1065 const unsigned test_freq = 16; // Power of 2
1066 static volatile unsigned counter = 0;
1067 unsigned my_counter = counter;
1069 counter = my_counter + 1;
1070 if (my_counter%test_freq == 0)
1072 // Randomize the interval length a bit.
1073 counter = my_counter + (my_counter >> 4) % (test_freq/2);
1074 // Unlock mutex first, to avoid self-deadlock, or worse.
1075 _Jv_MutexUnlock(&(hl->si.mutex));
1076 maybe_remove_all_heavy(he, address &~HEAVY);
1077 // release lock bit, preserving
1078 // REQUEST_CONVERSION
1079 // and object address.
1081 else
1083 release_set(&(he -> address), address &~HEAVY);
1084 _Jv_MutexUnlock(&(hl->si.mutex));
1085 // Unlock after releasing the lock bit, so that
1086 // we don't switch to another thread prematurely.
1089 else
1091 release_set(&(he -> address), address);
1092 _Jv_MutexUnlock(&(hl->si.mutex));
1094 keep_live(addr);
1097 // Return false if obj's monitor is held by the current thread
1098 bool
1099 _Jv_ObjectCheckMonitor (jobject obj)
1101 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1102 obj_addr_t addr = (obj_addr_t)obj & ~((obj_addr_t)FLAGS);
1103 #else
1104 obj_addr_t addr = (obj_addr_t)obj;
1105 #endif
1106 obj_addr_t address;
1107 unsigned hash = JV_SYNC_HASH(addr);
1108 hash_entry * he = light_locks + hash;
1109 _Jv_ThreadId_t self = _Jv_ThreadSelf();
1111 JvAssert(!(addr & FLAGS));
1112 retry:
1113 // Acquire the hash table entry lock
1114 address = ((he -> address) & ~LOCKED);
1115 if (!compare_and_swap(&(he -> address), address, address | LOCKED))
1117 wait_unlocked(he);
1118 goto retry;
1121 bool not_mine;
1123 if (!(address & ~FLAGS))
1124 not_mine = true;
1125 else if ((address & ~FLAGS) == addr)
1126 not_mine = (he -> light_thr_id != self);
1127 else
1129 heavy_lock* hl = find_heavy(addr, he);
1130 not_mine = hl ? _Jv_MutexCheckMonitor(&hl->si.mutex) : true;
1133 release_set(&(he -> address), address); // unlock hash entry
1134 return not_mine;
1137 // The rest of these are moderately thin veneers on _Jv_Cond ops.
1138 // The current version of Notify might be able to make the pthread
1139 // call AFTER releasing the lock, thus saving some context switches??
1141 void
1142 java::lang::Object::wait (jlong timeout, jint nanos)
1144 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1145 obj_addr_t addr = (obj_addr_t)this & ~((obj_addr_t)FLAGS);
1146 #else
1147 obj_addr_t addr = (obj_addr_t)this;
1148 #endif
1149 _Jv_ThreadId_t self = _Jv_ThreadSelf();
1150 unsigned hash = JV_SYNC_HASH(addr);
1151 hash_entry * he = light_locks + hash;
1152 unsigned count;
1153 obj_addr_t address;
1154 heavy_lock *hl;
1156 if (__builtin_expect (timeout < 0 || nanos < 0 || nanos > 999999, false))
1157 throw new IllegalArgumentException;
1158 retry:
1159 address = he -> address;
1160 address &= ~LOCKED;
1161 if (!compare_and_swap(&(he -> address), address, address | LOCKED))
1163 wait_unlocked(he);
1164 goto retry;
1166 // address does not have the lock bit set. We hold the lock on he.
1167 if ((address & ~FLAGS) == addr)
1169 // Convert to heavyweight.
1170 if (he -> light_thr_id != self)
1172 # ifdef LOCK_DEBUG
1173 fprintf(stderr, "Found wrong lightweight lock owner in wait "
1174 "address = 0x%lx pid = %d\n", address, getpid());
1175 print_he(he);
1176 for(;;) {}
1177 # endif
1178 release_set(&(he -> address), address);
1179 throw new IllegalMonitorStateException (JvNewStringLatin1
1180 ("current thread not owner"));
1182 count = he -> light_count;
1183 hl = get_heavy(addr, he);
1184 he -> light_count = 0;
1185 he -> heavy_count += count + 1;
1186 for (unsigned i = 0; i <= count; ++i)
1187 _Jv_MutexLock(&(hl->si.mutex));
1188 // Again release the he lock after acquiring the mutex.
1189 he -> light_thr_id = INVALID_THREAD_ID;
1190 release_set(&(he -> address), HEAVY); // lightweight lock now unused.
1191 if (address & REQUEST_CONVERSION)
1192 _Jv_CondNotify (&(hl->si.condition), &(hl->si.mutex));
1194 else /* We should hold the heavyweight lock. */
1196 hl = find_heavy(addr, he);
1197 release_set(&(he -> address), address);
1198 if (0 == hl)
1200 # ifdef LOCK_DEBUG
1201 fprintf(stderr, "Couldn't find heavy lock in wait "
1202 "addr = 0x%lx pid = %d\n", addr, getpid());
1203 print_he(he);
1204 for(;;) {}
1205 # endif
1206 throw new IllegalMonitorStateException (JvNewStringLatin1
1207 ("current thread not owner"));
1209 JvAssert(address & HEAVY);
1211 switch (_Jv_CondWait (&(hl->si.condition), &(hl->si.mutex), timeout, nanos))
1213 case _JV_NOT_OWNER:
1214 throw new IllegalMonitorStateException (JvNewStringLatin1
1215 ("current thread not owner"));
1216 case _JV_INTERRUPTED:
1217 if (Thread::interrupted ())
1218 throw new InterruptedException;
1222 void
1223 java::lang::Object::notify (void)
1225 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1226 obj_addr_t addr = (obj_addr_t)this & ~((obj_addr_t)FLAGS);
1227 #else
1228 obj_addr_t addr = (obj_addr_t)this;
1229 #endif
1230 _Jv_ThreadId_t self = _Jv_ThreadSelf();
1231 unsigned hash = JV_SYNC_HASH(addr);
1232 hash_entry * he = light_locks + hash;
1233 heavy_lock *hl;
1234 obj_addr_t address;
1235 int result;
1237 retry:
1238 address = ((he -> address) & ~LOCKED);
1239 if (!compare_and_swap(&(he -> address), address, address | LOCKED))
1241 wait_unlocked(he);
1242 goto retry;
1244 if ((address & ~FLAGS) == addr && he -> light_thr_id == self)
1246 // We hold lightweight lock. Since it has not
1247 // been inflated, there are no waiters.
1248 release_set(&(he -> address), address); // unlock
1249 return;
1251 hl = find_heavy(addr, he);
1252 // Hl can't disappear since we point to the underlying object.
1253 // It's important that we release the lock bit before the notify, since
1254 // otherwise we will try to wake up thee target while we still hold the
1255 // bit. This results in lock bit contention, which we don't handle
1256 // terribly well.
1257 release_set(&(he -> address), address); // unlock
1258 if (0 == hl)
1260 throw new IllegalMonitorStateException(JvNewStringLatin1
1261 ("current thread not owner"));
1262 return;
1264 result = _Jv_CondNotify(&(hl->si.condition), &(hl->si.mutex));
1265 keep_live(addr);
1266 if (__builtin_expect (result, 0))
1267 throw new IllegalMonitorStateException(JvNewStringLatin1
1268 ("current thread not owner"));
1271 void
1272 java::lang::Object::notifyAll (void)
1274 #ifdef JV_LINKER_CANNOT_8BYTE_ALIGN_STATICS
1275 obj_addr_t addr = (obj_addr_t)this & ~((obj_addr_t)FLAGS);
1276 #else
1277 obj_addr_t addr = (obj_addr_t)this;
1278 #endif
1279 _Jv_ThreadId_t self = _Jv_ThreadSelf();
1280 unsigned hash = JV_SYNC_HASH(addr);
1281 hash_entry * he = light_locks + hash;
1282 heavy_lock *hl;
1283 obj_addr_t address;
1284 int result;
1286 retry:
1287 address = (he -> address) & ~LOCKED;
1288 if (!compare_and_swap(&(he -> address), address, address | LOCKED))
1290 wait_unlocked(he);
1291 goto retry;
1293 hl = find_heavy(addr, he);
1294 if ((address & ~FLAGS) == addr && he -> light_thr_id == self)
1296 // We hold lightweight lock. Since it has not
1297 // been inflated, there are no waiters.
1298 release_set(&(he -> address), address); // unlock
1299 return;
1301 release_set(&(he -> address), address); // unlock
1302 if (0 == hl)
1304 throw new IllegalMonitorStateException(JvNewStringLatin1
1305 ("current thread not owner"));
1307 result = _Jv_CondNotifyAll(&(hl->si.condition), &(hl->si.mutex));
1308 if (__builtin_expect (result, 0))
1309 throw new IllegalMonitorStateException(JvNewStringLatin1
1310 ("current thread not owner"));
1313 // This is declared in Java code and in Object.h.
1314 // It should never be called with JV_HASH_SYNCHRONIZATION
1315 void
1316 java::lang::Object::sync_init (void)
1318 throw new IllegalMonitorStateException(JvNewStringLatin1
1319 ("internal error: sync_init"));
1322 // This is called on startup and declared in Object.h.
1323 // For now we just make it a no-op.
1324 void
1325 _Jv_InitializeSyncMutex (void)
1329 #endif /* JV_HASH_SYNCHRONIZATION */