4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
27 #pragma ident "%Z%%M% %I% %E% SMI"
30 * This file contains most of the functionality
31 * required to support the threads portion of libc_db.
35 #include "thr_uberdata.h"
38 tdb_event_ready(void) {}
41 tdb_event_sleep(void) {}
44 tdb_event_switchto(void) {}
47 tdb_event_switchfrom(void) {}
50 tdb_event_lock_try(void) {}
53 tdb_event_catchsig(void) {}
56 tdb_event_idle(void) {}
59 tdb_event_create(void) {}
62 tdb_event_death(void) {}
65 tdb_event_preempt(void) {}
68 tdb_event_pri_inherit(void) {}
71 tdb_event_reap(void) {}
74 tdb_event_concurrency(void) {}
77 tdb_event_timeout(void) {}
80 * uberflags.uf_tdb_register_sync is set to REGISTER_SYNC_ENABLE by a debugger
81 * to empty the table and then enable synchronization object registration.
83 * uberflags.uf_tdb_register_sync is set to REGISTER_SYNC_DISABLE by a debugger
84 * to empty the table and then disable synchronization object registration.
87 const tdb_ev_func_t tdb_events
[TD_MAX_EVENT_NUM
- TD_MIN_EVENT_NUM
+ 1] = {
98 tdb_event_pri_inherit
,
100 tdb_event_concurrency
,
104 #if TDB_HASH_SHIFT != 15
105 #error "this is all broken because TDB_HASH_SHIFT is not 15"
109 tdb_addr_hash(void *addr
)
112 * This knows for a fact that the hash table has
113 * 32K entries; that is, that TDB_HASH_SHIFT is 15.
116 uint64_t value60
= ((uintptr_t)addr
>> 4); /* 60 bits */
117 uint32_t value30
= (value60
>> 30) ^ (value60
& 0x3fffffff);
119 uint32_t value30
= ((uintptr_t)addr
>> 2); /* 30 bits */
121 return ((value30
>> 15) ^ (value30
& 0x7fff));
124 static tdb_sync_stats_t
*
125 alloc_sync_addr(void *addr
)
127 uberdata_t
*udp
= curthread
->ul_uberdata
;
128 tdb_t
*tdbp
= &udp
->tdb
;
129 tdb_sync_stats_t
*sap
;
131 ASSERT(MUTEX_OWNED(&udp
->tdb_hash_lock
, curthread
));
133 if ((sap
= tdbp
->tdb_sync_addr_free
) == NULL
) {
138 * Don't keep trying after mmap() has already failed.
140 if (tdbp
->tdb_hash_alloc_failed
)
143 /* double the allocation each time */
144 tdbp
->tdb_sync_alloc
*= 2;
145 if ((vaddr
= mmap(NULL
,
146 tdbp
->tdb_sync_alloc
* sizeof (tdb_sync_stats_t
),
147 PROT_READ
|PROT_WRITE
, MAP_PRIVATE
|MAP_ANON
,
148 -1, (off_t
)0)) == MAP_FAILED
) {
149 tdbp
->tdb_hash_alloc_failed
= 1;
152 sap
= tdbp
->tdb_sync_addr_free
= vaddr
;
153 for (i
= 1; i
< tdbp
->tdb_sync_alloc
; sap
++, i
++)
154 sap
->next
= (uintptr_t)(sap
+ 1);
155 sap
->next
= (uintptr_t)0;
156 tdbp
->tdb_sync_addr_last
= sap
;
158 sap
= tdbp
->tdb_sync_addr_free
;
161 tdbp
->tdb_sync_addr_free
= (tdb_sync_stats_t
*)(uintptr_t)sap
->next
;
162 sap
->next
= (uintptr_t)0;
163 sap
->sync_addr
= (uintptr_t)addr
;
164 (void) memset(&sap
->un
, 0, sizeof (sap
->un
));
169 initialize_sync_hash()
171 uberdata_t
*udp
= curthread
->ul_uberdata
;
172 tdb_t
*tdbp
= &udp
->tdb
;
174 tdb_sync_stats_t
*sap
;
178 if (tdbp
->tdb_hash_alloc_failed
)
180 lmutex_lock(&udp
->tdb_hash_lock
);
181 if (udp
->uberflags
.uf_tdb_register_sync
== REGISTER_SYNC_DISABLE
) {
183 * There is no point allocating the hash table
184 * if we are disabling registration.
186 udp
->uberflags
.uf_tdb_register_sync
= REGISTER_SYNC_OFF
;
187 lmutex_unlock(&udp
->tdb_hash_lock
);
190 if (tdbp
->tdb_sync_addr_hash
!= NULL
|| tdbp
->tdb_hash_alloc_failed
) {
191 lmutex_unlock(&udp
->tdb_hash_lock
);
194 /* start with a free list of 2k elements */
195 tdbp
->tdb_sync_alloc
= 2*1024;
196 if ((vaddr
= mmap(NULL
, TDB_HASH_SIZE
* sizeof (uint64_t) +
197 tdbp
->tdb_sync_alloc
* sizeof (tdb_sync_stats_t
),
198 PROT_READ
|PROT_WRITE
, MAP_PRIVATE
|MAP_ANON
,
199 -1, (off_t
)0)) == MAP_FAILED
) {
200 tdbp
->tdb_hash_alloc_failed
= 1;
205 /* initialize the free list */
206 tdbp
->tdb_sync_addr_free
= sap
=
207 (tdb_sync_stats_t
*)&addr_hash
[TDB_HASH_SIZE
];
208 for (i
= 1; i
< tdbp
->tdb_sync_alloc
; sap
++, i
++)
209 sap
->next
= (uintptr_t)(sap
+ 1);
210 sap
->next
= (uintptr_t)0;
211 tdbp
->tdb_sync_addr_last
= sap
;
213 /* insert &udp->tdb_hash_lock itself into the new (empty) table */
214 udp
->tdb_hash_lock_stats
.next
= (uintptr_t)0;
215 udp
->tdb_hash_lock_stats
.sync_addr
= (uintptr_t)&udp
->tdb_hash_lock
;
216 addr_hash
[tdb_addr_hash(&udp
->tdb_hash_lock
)] =
217 (uintptr_t)&udp
->tdb_hash_lock_stats
;
219 tdbp
->tdb_register_count
= 1;
220 /* assign to tdb_sync_addr_hash only after fully initialized */
222 tdbp
->tdb_sync_addr_hash
= addr_hash
;
223 lmutex_unlock(&udp
->tdb_hash_lock
);
227 tdb_sync_obj_register(void *addr
, int *new)
229 ulwp_t
*self
= curthread
;
230 uberdata_t
*udp
= self
->ul_uberdata
;
231 tdb_t
*tdbp
= &udp
->tdb
;
233 tdb_sync_stats_t
*sap
= NULL
;
238 * Don't start statistics collection until
239 * we have initialized the primary link map.
241 if (!self
->ul_primarymap
)
247 * To avoid recursion problems, we must do two things:
248 * 1. Make a special case for tdb_hash_lock (we use it internally).
249 * 2. Deal with the dynamic linker's lock interface:
250 * When calling any external function, we may invoke the
251 * dynamic linker. It grabs a lock, which calls back here.
252 * This only happens on the first call to the external
253 * function, so we can just return NULL if we are called
254 * recursively (and miss the first count).
256 if (addr
== (void *)&udp
->tdb_hash_lock
)
257 return (&udp
->tdb_hash_lock_stats
);
258 if (self
->ul_sync_obj_reg
) /* recursive call */
260 self
->ul_sync_obj_reg
= 1;
263 * On the first time through, initialize the hash table and free list.
265 if (tdbp
->tdb_sync_addr_hash
== NULL
) {
266 initialize_sync_hash();
267 if (tdbp
->tdb_sync_addr_hash
== NULL
) { /* utter failure */
268 udp
->uberflags
.uf_tdb_register_sync
= REGISTER_SYNC_OFF
;
274 sapp
= &tdbp
->tdb_sync_addr_hash
[tdb_addr_hash(addr
)];
275 if (udp
->uberflags
.uf_tdb_register_sync
== REGISTER_SYNC_ON
) {
277 * Look up an address in the synchronization object hash table.
278 * No lock is required since it can only deliver a false
279 * negative, in which case we fall into the locked case below.
281 for (sap
= (tdb_sync_stats_t
*)(uintptr_t)*sapp
; sap
!= NULL
;
282 sap
= (tdb_sync_stats_t
*)(uintptr_t)sap
->next
) {
283 if (sap
->sync_addr
== (uintptr_t)addr
)
289 * The search with no lock held failed or a special action is required.
290 * Grab tdb_hash_lock to do special actions and/or get a precise result.
292 lmutex_lock(&udp
->tdb_hash_lock
);
295 switch (udp
->uberflags
.uf_tdb_register_sync
) {
296 case REGISTER_SYNC_ON
:
298 case REGISTER_SYNC_OFF
:
302 * For all debugger actions, first zero out the
303 * statistics block of every element in the hash table.
305 for (i
= 0; i
< TDB_HASH_SIZE
; i
++)
306 for (sap
= (tdb_sync_stats_t
*)
307 (uintptr_t)tdbp
->tdb_sync_addr_hash
[i
];
309 sap
= (tdb_sync_stats_t
*)(uintptr_t)sap
->next
)
310 (void) memset(&sap
->un
, 0, sizeof (sap
->un
));
312 switch (udp
->uberflags
.uf_tdb_register_sync
) {
313 case REGISTER_SYNC_ENABLE
:
314 udp
->uberflags
.uf_tdb_register_sync
= REGISTER_SYNC_ON
;
316 case REGISTER_SYNC_DISABLE
:
318 udp
->uberflags
.uf_tdb_register_sync
= REGISTER_SYNC_OFF
;
325 * Perform the search while holding tdb_hash_lock.
326 * Keep track of the insertion point.
328 while ((sap
= (tdb_sync_stats_t
*)(uintptr_t)*sapp
) != NULL
) {
329 if (sap
->sync_addr
== (uintptr_t)addr
)
335 * Insert a new element if necessary.
337 if (sap
== NULL
&& (sap
= alloc_sync_addr(addr
)) != NULL
) {
338 *sapp
= (uintptr_t)sap
;
339 tdbp
->tdb_register_count
++;
346 lmutex_unlock(&udp
->tdb_hash_lock
);
347 self
->ul_sync_obj_reg
= 0;
352 tdb_sync_obj_deregister(void *addr
)
354 uberdata_t
*udp
= curthread
->ul_uberdata
;
355 tdb_t
*tdbp
= &udp
->tdb
;
357 tdb_sync_stats_t
*sap
;
361 * tdb_hash_lock is never destroyed.
363 ASSERT(addr
!= &udp
->tdb_hash_lock
);
366 * Avoid acquiring tdb_hash_lock if lock statistics gathering has
367 * never been initiated or there is nothing in the hash bucket.
368 * (Once the hash table is allocated, it is never deallocated.)
370 if (tdbp
->tdb_sync_addr_hash
== NULL
||
371 tdbp
->tdb_sync_addr_hash
[hash
= tdb_addr_hash(addr
)] == NULL
)
374 lmutex_lock(&udp
->tdb_hash_lock
);
375 sapp
= &tdbp
->tdb_sync_addr_hash
[hash
];
376 while ((sap
= (tdb_sync_stats_t
*)(uintptr_t)*sapp
) != NULL
) {
377 if (sap
->sync_addr
== (uintptr_t)addr
) {
378 /* remove it from the hash table */
380 tdbp
->tdb_register_count
--;
382 sap
->next
= (uintptr_t)0;
383 sap
->sync_addr
= (uintptr_t)0;
384 /* insert it on the tail of the free list */
385 if (tdbp
->tdb_sync_addr_free
== NULL
) {
386 tdbp
->tdb_sync_addr_free
= sap
;
387 tdbp
->tdb_sync_addr_last
= sap
;
389 tdbp
->tdb_sync_addr_last
->next
= (uintptr_t)sap
;
390 tdbp
->tdb_sync_addr_last
= sap
;
396 lmutex_unlock(&udp
->tdb_hash_lock
);
400 * Return a mutex statistics block for the given mutex.
403 tdb_mutex_stats(mutex_t
*mp
)
405 tdb_sync_stats_t
*tssp
;
407 /* avoid stealing the cache line unnecessarily */
408 if (mp
->mutex_magic
!= MUTEX_MAGIC
)
409 mp
->mutex_magic
= MUTEX_MAGIC
;
410 if ((tssp
= tdb_sync_obj_register(mp
, NULL
)) == NULL
)
412 tssp
->un
.type
= TDB_MUTEX
;
413 return (&tssp
->un
.mutex
);
417 * Return a condvar statistics block for the given condvar.
420 tdb_cond_stats(cond_t
*cvp
)
422 tdb_sync_stats_t
*tssp
;
424 /* avoid stealing the cache line unnecessarily */
425 if (cvp
->cond_magic
!= COND_MAGIC
)
426 cvp
->cond_magic
= COND_MAGIC
;
427 if ((tssp
= tdb_sync_obj_register(cvp
, NULL
)) == NULL
)
429 tssp
->un
.type
= TDB_COND
;
430 return (&tssp
->un
.cond
);
434 * Return an rwlock statistics block for the given rwlock.
437 tdb_rwlock_stats(rwlock_t
*rwlp
)
439 tdb_sync_stats_t
*tssp
;
441 /* avoid stealing the cache line unnecessarily */
442 if (rwlp
->magic
!= RWL_MAGIC
)
443 rwlp
->magic
= RWL_MAGIC
;
444 if ((tssp
= tdb_sync_obj_register(rwlp
, NULL
)) == NULL
)
446 tssp
->un
.type
= TDB_RWLOCK
;
447 return (&tssp
->un
.rwlock
);
451 * Return a semaphore statistics block for the given semaphore.
454 tdb_sema_stats(sema_t
*sp
)
456 tdb_sync_stats_t
*tssp
;
459 /* avoid stealing the cache line unnecessarily */
460 if (sp
->magic
!= SEMA_MAGIC
)
461 sp
->magic
= SEMA_MAGIC
;
462 if ((tssp
= tdb_sync_obj_register(sp
, &new)) == NULL
)
464 tssp
->un
.type
= TDB_SEMA
;
466 tssp
->un
.sema
.sema_max_count
= sp
->count
;
467 tssp
->un
.sema
.sema_min_count
= sp
->count
;
469 return (&tssp
->un
.sema
);