comm: add -Wall
[unleashed.git] / usr / src / lib / libc_db / common / thread_db.c
bloba7189cbdd2f68439a6d507b4b5018398824dadb3
1 /*
2 * CDDL HEADER START
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
19 * CDDL HEADER END
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
28 * Copyright (c) 2014, Joyent, Inc. All rights reserved.
31 #include <stdio.h>
32 #include <stdlib.h>
33 #include <stddef.h>
34 #include <unistd.h>
35 #include <thr_uberdata.h>
36 #include <thread_db.h>
37 #include <libc_int.h>
40 * Private structures.
43 typedef union {
44 mutex_t lock;
45 rwlock_t rwlock;
46 sema_t semaphore;
47 cond_t condition;
48 } td_so_un_t;
50 struct td_thragent {
51 rwlock_t rwlock;
52 struct ps_prochandle *ph_p;
53 int initialized;
54 int sync_tracking;
55 int model;
56 int primary_map;
57 psaddr_t bootstrap_addr;
58 psaddr_t uberdata_addr;
59 psaddr_t tdb_eventmask_addr;
60 psaddr_t tdb_register_sync_addr;
61 psaddr_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
62 psaddr_t hash_table_addr;
63 int hash_size;
64 lwpid_t single_lwpid;
65 psaddr_t single_ulwp_addr;
69 * This is the name of the variable in libc that contains
70 * the uberdata address that we will need.
72 #define TD_BOOTSTRAP_NAME "_tdb_bootstrap"
74 * This is the actual name of uberdata, used in the event
75 * that tdb_bootstrap has not yet been initialized.
77 #define TD_UBERDATA_NAME "_uberdata"
79 * The library name should end with ".so.1", but older versions of
80 * dbx expect the unadorned name and malfunction if ".1" is specified.
81 * Unfortunately, if ".1" is not specified, mdb malfunctions when it
82 * is applied to another instance of itself (due to the presence of
83 * /usr/lib/mdb/proc/libc.so). So we try it both ways.
85 #define TD_LIBRARY_NAME "libc.so"
86 #define TD_LIBRARY_NAME_1 "libc.so.1"
88 td_err_e __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p);
90 td_err_e __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
91 void *cbdata_p, td_thr_state_e state, int ti_pri,
92 sigset_t *ti_sigmask_p, unsigned ti_user_flags);
95 * Initialize threads debugging interface.
97 #pragma weak td_init = __td_init
98 td_err_e
99 __td_init()
101 return (TD_OK);
105 * This function does nothing, and never did.
106 * But the symbol is in the ABI, so we can't delete it.
108 #pragma weak td_log = __td_log
109 void
110 __td_log()
115 * Short-cut to read just the hash table size from the process,
116 * to avoid repeatedly reading the full uberdata structure when
117 * dealing with a single-threaded process.
119 static uint_t
120 td_read_hash_size(td_thragent_t *ta_p)
122 psaddr_t addr;
123 uint_t hash_size;
125 switch (ta_p->initialized) {
126 default: /* uninitialized */
127 return (0);
128 case 1: /* partially initialized */
129 break;
130 case 2: /* fully initialized */
131 return (ta_p->hash_size);
134 if (ta_p->model == PR_MODEL_NATIVE) {
135 addr = ta_p->uberdata_addr + offsetof(uberdata_t, hash_size);
136 } else {
137 #if defined(_LP64) && defined(_SYSCALL32)
138 addr = ta_p->uberdata_addr + offsetof(uberdata32_t, hash_size);
139 #else
140 addr = 0;
141 #endif
143 if (ps_pdread(ta_p->ph_p, addr, &hash_size, sizeof (hash_size))
144 != PS_OK)
145 return (0);
146 return (hash_size);
149 static td_err_e
150 td_read_uberdata(td_thragent_t *ta_p)
152 struct ps_prochandle *ph_p = ta_p->ph_p;
153 int i;
155 if (ta_p->model == PR_MODEL_NATIVE) {
156 uberdata_t uberdata;
158 if (ps_pdread(ph_p, ta_p->uberdata_addr,
159 &uberdata, sizeof (uberdata)) != PS_OK)
160 return (TD_DBERR);
161 ta_p->primary_map = uberdata.primary_map;
162 ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
163 offsetof(uberdata_t, tdb.tdb_ev_global_mask);
164 ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
165 offsetof(uberdata_t, uberflags.uf_tdb_register_sync);
166 ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
167 ta_p->hash_size = uberdata.hash_size;
168 if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
169 ta_p->tdb_events, sizeof (ta_p->tdb_events)) != PS_OK)
170 return (TD_DBERR);
171 } else {
172 #if defined(_LP64) && defined(_SYSCALL32)
173 uberdata32_t uberdata;
174 caddr32_t tdb_events[TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1];
176 if (ps_pdread(ph_p, ta_p->uberdata_addr,
177 &uberdata, sizeof (uberdata)) != PS_OK)
178 return (TD_DBERR);
179 ta_p->primary_map = uberdata.primary_map;
180 ta_p->tdb_eventmask_addr = ta_p->uberdata_addr +
181 offsetof(uberdata32_t, tdb.tdb_ev_global_mask);
182 ta_p->tdb_register_sync_addr = ta_p->uberdata_addr +
183 offsetof(uberdata32_t, uberflags.uf_tdb_register_sync);
184 ta_p->hash_table_addr = (psaddr_t)uberdata.thr_hash_table;
185 ta_p->hash_size = uberdata.hash_size;
186 if (ps_pdread(ph_p, (psaddr_t)uberdata.tdb.tdb_events,
187 tdb_events, sizeof (tdb_events)) != PS_OK)
188 return (TD_DBERR);
189 for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++)
190 ta_p->tdb_events[i] = tdb_events[i];
191 #else
192 return (TD_DBERR);
193 #endif
197 * Unfortunately, we are (implicitly) assuming that our uberdata
198 * definition precisely matches that of our target. If this is not
199 * true (that is, if we're examining a core file from a foreign
200 * system that has a different definition of uberdata), the failure
201 * modes can be frustratingly non-explicit. In an effort to catch
202 * this upon initialization (when the debugger may still be able to
203 * opt for another thread model or may be able to fail explicitly), we
204 * check that each of our tdb_events points to valid memory (these are
205 * putatively text upon which a breakpoint can be issued), with the
206 * hope that this is enough of a self-consistency check to lead to
207 * explicit failure on a mismatch.
209 for (i = 0; i < TD_MAX_EVENT_NUM - TD_MIN_EVENT_NUM + 1; i++) {
210 uint8_t check;
212 if (ps_pdread(ph_p, (psaddr_t)ta_p->tdb_events[i],
213 &check, sizeof (check)) != PS_OK) {
214 return (TD_DBERR);
218 if (ta_p->hash_size != 1) { /* multi-threaded */
219 ta_p->initialized = 2;
220 ta_p->single_lwpid = 0;
221 ta_p->single_ulwp_addr = 0;
222 } else { /* single-threaded */
223 ta_p->initialized = 1;
225 * Get the address and lwpid of the single thread/LWP.
226 * It may not be ulwp_one if this is a child of fork1().
228 if (ta_p->model == PR_MODEL_NATIVE) {
229 thr_hash_table_t head;
230 lwpid_t lwpid = 0;
232 if (ps_pdread(ph_p, ta_p->hash_table_addr,
233 &head, sizeof (head)) != PS_OK)
234 return (TD_DBERR);
235 if ((psaddr_t)head.hash_bucket == 0)
236 ta_p->initialized = 0;
237 else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
238 offsetof(ulwp_t, ul_lwpid),
239 &lwpid, sizeof (lwpid)) != PS_OK)
240 return (TD_DBERR);
241 ta_p->single_lwpid = lwpid;
242 ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
243 } else {
244 #if defined(_LP64) && defined(_SYSCALL32)
245 thr_hash_table32_t head;
246 lwpid_t lwpid = 0;
248 if (ps_pdread(ph_p, ta_p->hash_table_addr,
249 &head, sizeof (head)) != PS_OK)
250 return (TD_DBERR);
251 if ((psaddr_t)head.hash_bucket == 0)
252 ta_p->initialized = 0;
253 else if (ps_pdread(ph_p, (psaddr_t)head.hash_bucket +
254 offsetof(ulwp32_t, ul_lwpid),
255 &lwpid, sizeof (lwpid)) != PS_OK)
256 return (TD_DBERR);
257 ta_p->single_lwpid = lwpid;
258 ta_p->single_ulwp_addr = (psaddr_t)head.hash_bucket;
259 #else
260 return (TD_DBERR);
261 #endif
264 if (!ta_p->primary_map)
265 ta_p->initialized = 0;
266 return (TD_OK);
269 static td_err_e
270 td_read_bootstrap_data(td_thragent_t *ta_p)
272 struct ps_prochandle *ph_p = ta_p->ph_p;
273 psaddr_t bootstrap_addr;
274 psaddr_t uberdata_addr;
275 ps_err_e db_return;
276 td_err_e return_val;
277 int do_1;
279 switch (ta_p->initialized) {
280 case 2: /* fully initialized */
281 return (TD_OK);
282 case 1: /* partially initialized */
283 if (td_read_hash_size(ta_p) == 1)
284 return (TD_OK);
285 return (td_read_uberdata(ta_p));
289 * Uninitialized -- do the startup work.
290 * We set ta_p->initialized to -1 to cut off recursive calls
291 * into libc_db by code in the provider of ps_pglobal_lookup().
293 do_1 = 0;
294 ta_p->initialized = -1;
295 db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME,
296 TD_BOOTSTRAP_NAME, &bootstrap_addr);
297 if (db_return == PS_NOSYM) {
298 do_1 = 1;
299 db_return = ps_pglobal_lookup(ph_p, TD_LIBRARY_NAME_1,
300 TD_BOOTSTRAP_NAME, &bootstrap_addr);
302 if (db_return == PS_NOSYM) /* libc is not linked yet */
303 return (TD_NOLIBTHREAD);
304 if (db_return != PS_OK)
305 return (TD_ERR);
306 db_return = ps_pglobal_lookup(ph_p,
307 do_1? TD_LIBRARY_NAME_1 : TD_LIBRARY_NAME,
308 TD_UBERDATA_NAME, &uberdata_addr);
309 if (db_return == PS_NOSYM) /* libc is not linked yet */
310 return (TD_NOLIBTHREAD);
311 if (db_return != PS_OK)
312 return (TD_ERR);
315 * Read the uberdata address into the thread agent structure.
317 if (ta_p->model == PR_MODEL_NATIVE) {
318 psaddr_t psaddr;
319 if (ps_pdread(ph_p, bootstrap_addr,
320 &psaddr, sizeof (psaddr)) != PS_OK)
321 return (TD_DBERR);
322 if ((ta_p->bootstrap_addr = psaddr) == 0)
323 psaddr = uberdata_addr;
324 else if (ps_pdread(ph_p, psaddr,
325 &psaddr, sizeof (psaddr)) != PS_OK)
326 return (TD_DBERR);
327 if (psaddr == 0) {
328 /* primary linkmap in the tgt is not initialized */
329 ta_p->bootstrap_addr = 0;
330 psaddr = uberdata_addr;
332 ta_p->uberdata_addr = psaddr;
333 } else {
334 #if defined(_LP64) && defined(_SYSCALL32)
335 caddr32_t psaddr;
336 if (ps_pdread(ph_p, bootstrap_addr,
337 &psaddr, sizeof (psaddr)) != PS_OK)
338 return (TD_DBERR);
339 if ((ta_p->bootstrap_addr = (psaddr_t)psaddr) == 0)
340 psaddr = (caddr32_t)uberdata_addr;
341 else if (ps_pdread(ph_p, (psaddr_t)psaddr,
342 &psaddr, sizeof (psaddr)) != PS_OK)
343 return (TD_DBERR);
344 if (psaddr == 0) {
345 /* primary linkmap in the tgt is not initialized */
346 ta_p->bootstrap_addr = 0;
347 psaddr = (caddr32_t)uberdata_addr;
349 ta_p->uberdata_addr = (psaddr_t)psaddr;
350 #else
351 return (TD_DBERR);
352 #endif /* _SYSCALL32 */
355 if ((return_val = td_read_uberdata(ta_p)) != TD_OK)
356 return (return_val);
357 if (ta_p->bootstrap_addr == 0)
358 ta_p->initialized = 0;
359 return (TD_OK);
362 #pragma weak ps_kill
363 #pragma weak ps_lrolltoaddr
366 * Allocate a new agent process handle ("thread agent").
368 #pragma weak td_ta_new = __td_ta_new
369 td_err_e
370 __td_ta_new(struct ps_prochandle *ph_p, td_thragent_t **ta_pp)
372 td_thragent_t *ta_p;
373 int model;
374 td_err_e return_val = TD_OK;
376 if (ph_p == NULL)
377 return (TD_BADPH);
378 if (ta_pp == NULL)
379 return (TD_ERR);
380 *ta_pp = NULL;
381 if (ps_pstop(ph_p) != PS_OK)
382 return (TD_DBERR);
384 * ps_pdmodel might not be defined if this is an older client.
385 * Make it a weak symbol and test if it exists before calling.
387 #pragma weak ps_pdmodel
388 if (ps_pdmodel == NULL) {
389 model = PR_MODEL_NATIVE;
390 } else if (ps_pdmodel(ph_p, &model) != PS_OK) {
391 (void) ps_pcontinue(ph_p);
392 return (TD_ERR);
394 if ((ta_p = malloc(sizeof (*ta_p))) == NULL) {
395 (void) ps_pcontinue(ph_p);
396 return (TD_MALLOC);
400 * Initialize the agent process handle.
401 * Pick up the symbol value we need from the target process.
403 (void) memset(ta_p, 0, sizeof (*ta_p));
404 ta_p->ph_p = ph_p;
405 (void) rwlock_init(&ta_p->rwlock, USYNC_THREAD, NULL);
406 ta_p->model = model;
407 return_val = td_read_bootstrap_data(ta_p);
410 * Because the old libthread_db enabled lock tracking by default,
411 * we must also do it. However, we do it only if the application
412 * provides the ps_kill() and ps_lrolltoaddr() interfaces.
413 * (dbx provides the ps_kill() and ps_lrolltoaddr() interfaces.)
415 if (return_val == TD_OK && ps_kill != NULL && ps_lrolltoaddr != NULL) {
416 register_sync_t oldenable;
417 register_sync_t enable = REGISTER_SYNC_ENABLE;
418 psaddr_t psaddr = ta_p->tdb_register_sync_addr;
420 if (ps_pdread(ph_p, psaddr,
421 &oldenable, sizeof (oldenable)) != PS_OK)
422 return_val = TD_DBERR;
423 else if (oldenable != REGISTER_SYNC_OFF ||
424 ps_pdwrite(ph_p, psaddr,
425 &enable, sizeof (enable)) != PS_OK) {
427 * Lock tracking was already enabled or we
428 * failed to enable it, probably because we
429 * are examining a core file. In either case
430 * set the sync_tracking flag non-zero to
431 * indicate that we should not attempt to
432 * disable lock tracking when we delete the
433 * agent process handle in td_ta_delete().
435 ta_p->sync_tracking = 1;
439 if (return_val == TD_OK)
440 *ta_pp = ta_p;
441 else
442 free(ta_p);
444 (void) ps_pcontinue(ph_p);
445 return (return_val);
449 * Utility function to grab the readers lock and return the prochandle,
450 * given an agent process handle. Performs standard error checking.
451 * Returns non-NULL with the lock held, or NULL with the lock not held.
453 static struct ps_prochandle *
454 ph_lock_ta(td_thragent_t *ta_p, td_err_e *err)
456 struct ps_prochandle *ph_p = NULL;
457 td_err_e error;
459 if (ta_p == NULL || ta_p->initialized == -1) {
460 *err = TD_BADTA;
461 } else if (rw_rdlock(&ta_p->rwlock) != 0) { /* can't happen? */
462 *err = TD_BADTA;
463 } else if ((ph_p = ta_p->ph_p) == NULL) {
464 (void) rw_unlock(&ta_p->rwlock);
465 *err = TD_BADPH;
466 } else if (ta_p->initialized != 2 &&
467 (error = td_read_bootstrap_data(ta_p)) != TD_OK) {
468 (void) rw_unlock(&ta_p->rwlock);
469 ph_p = NULL;
470 *err = error;
471 } else {
472 *err = TD_OK;
475 return (ph_p);
479 * Utility function to grab the readers lock and return the prochandle,
480 * given an agent thread handle. Performs standard error checking.
481 * Returns non-NULL with the lock held, or NULL with the lock not held.
483 static struct ps_prochandle *
484 ph_lock_th(const td_thrhandle_t *th_p, td_err_e *err)
486 if (th_p == NULL || th_p->th_unique == 0) {
487 *err = TD_BADTH;
488 return (NULL);
490 return (ph_lock_ta(th_p->th_ta_p, err));
494 * Utility function to grab the readers lock and return the prochandle,
495 * given a synchronization object handle. Performs standard error checking.
496 * Returns non-NULL with the lock held, or NULL with the lock not held.
498 static struct ps_prochandle *
499 ph_lock_sh(const td_synchandle_t *sh_p, td_err_e *err)
501 if (sh_p == NULL || sh_p->sh_unique == 0) {
502 *err = TD_BADSH;
503 return (NULL);
505 return (ph_lock_ta(sh_p->sh_ta_p, err));
509 * Unlock the agent process handle obtained from ph_lock_*().
511 static void
512 ph_unlock(td_thragent_t *ta_p)
514 (void) rw_unlock(&ta_p->rwlock);
518 * De-allocate an agent process handle,
519 * releasing all related resources.
521 * XXX -- This is hopelessly broken ---
522 * Storage for thread agent is not deallocated. The prochandle
523 * in the thread agent is set to NULL so that future uses of
524 * the thread agent can be detected and an error value returned.
525 * All functions in the external user interface that make
526 * use of the thread agent are expected
527 * to check for a NULL prochandle in the thread agent.
528 * All such functions are also expected to obtain a
529 * reader lock on the thread agent while it is using it.
531 #pragma weak td_ta_delete = __td_ta_delete
532 td_err_e
533 __td_ta_delete(td_thragent_t *ta_p)
535 struct ps_prochandle *ph_p;
538 * This is the only place we grab the writer lock.
539 * We are going to NULL out the prochandle.
541 if (ta_p == NULL || rw_wrlock(&ta_p->rwlock) != 0)
542 return (TD_BADTA);
543 if ((ph_p = ta_p->ph_p) == NULL) {
544 (void) rw_unlock(&ta_p->rwlock);
545 return (TD_BADPH);
548 * If synch. tracking was disabled when td_ta_new() was called and
549 * if td_ta_sync_tracking_enable() was never called, then disable
550 * synch. tracking (it was enabled by default in td_ta_new()).
552 if (ta_p->sync_tracking == 0 &&
553 ps_kill != NULL && ps_lrolltoaddr != NULL) {
554 register_sync_t enable = REGISTER_SYNC_DISABLE;
556 (void) ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
557 &enable, sizeof (enable));
559 ta_p->ph_p = NULL;
560 (void) rw_unlock(&ta_p->rwlock);
561 return (TD_OK);
565 * Map an agent process handle to a client prochandle.
566 * Currently unused by dbx.
568 #pragma weak td_ta_get_ph = __td_ta_get_ph
569 td_err_e
570 __td_ta_get_ph(td_thragent_t *ta_p, struct ps_prochandle **ph_pp)
572 td_err_e return_val;
574 if (ph_pp != NULL) /* protect stupid callers */
575 *ph_pp = NULL;
576 if (ph_pp == NULL)
577 return (TD_ERR);
578 if ((*ph_pp = ph_lock_ta(ta_p, &return_val)) == NULL)
579 return (return_val);
580 ph_unlock(ta_p);
581 return (TD_OK);
585 * Set the process's suggested concurrency level.
586 * This is a no-op in a one-level model.
587 * Currently unused by dbx.
589 #pragma weak td_ta_setconcurrency = __td_ta_setconcurrency
590 /* ARGSUSED1 */
591 td_err_e
592 __td_ta_setconcurrency(const td_thragent_t *ta_p, int level)
594 if (ta_p == NULL)
595 return (TD_BADTA);
596 if (ta_p->ph_p == NULL)
597 return (TD_BADPH);
598 return (TD_OK);
602 * Get the number of threads in the process.
604 #pragma weak td_ta_get_nthreads = __td_ta_get_nthreads
605 td_err_e
606 __td_ta_get_nthreads(td_thragent_t *ta_p, int *nthread_p)
608 struct ps_prochandle *ph_p;
609 td_err_e return_val;
610 int nthreads;
611 int nzombies;
612 psaddr_t nthreads_addr;
613 psaddr_t nzombies_addr;
615 if (ta_p->model == PR_MODEL_NATIVE) {
616 nthreads_addr = ta_p->uberdata_addr +
617 offsetof(uberdata_t, nthreads);
618 nzombies_addr = ta_p->uberdata_addr +
619 offsetof(uberdata_t, nzombies);
620 } else {
621 #if defined(_LP64) && defined(_SYSCALL32)
622 nthreads_addr = ta_p->uberdata_addr +
623 offsetof(uberdata32_t, nthreads);
624 nzombies_addr = ta_p->uberdata_addr +
625 offsetof(uberdata32_t, nzombies);
626 #else
627 nthreads_addr = 0;
628 nzombies_addr = 0;
629 #endif /* _SYSCALL32 */
632 if (nthread_p == NULL)
633 return (TD_ERR);
634 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
635 return (return_val);
636 if (ps_pdread(ph_p, nthreads_addr, &nthreads, sizeof (int)) != PS_OK)
637 return_val = TD_DBERR;
638 if (ps_pdread(ph_p, nzombies_addr, &nzombies, sizeof (int)) != PS_OK)
639 return_val = TD_DBERR;
640 ph_unlock(ta_p);
641 if (return_val == TD_OK)
642 *nthread_p = nthreads + nzombies;
643 return (return_val);
646 typedef struct {
647 thread_t tid;
648 int found;
649 td_thrhandle_t th;
650 } td_mapper_param_t;
653 * Check the value in data against the thread id.
654 * If it matches, return 1 to terminate iterations.
655 * This function is used by td_ta_map_id2thr() to map a tid to a thread handle.
657 static int
658 td_mapper_id2thr(td_thrhandle_t *th_p, td_mapper_param_t *data)
660 td_thrinfo_t ti;
662 if (__td_thr_get_info(th_p, &ti) == TD_OK &&
663 data->tid == ti.ti_tid) {
664 data->found = 1;
665 data->th = *th_p;
666 return (1);
668 return (0);
672 * Given a thread identifier, return the corresponding thread handle.
674 #pragma weak td_ta_map_id2thr = __td_ta_map_id2thr
675 td_err_e
676 __td_ta_map_id2thr(td_thragent_t *ta_p, thread_t tid,
677 td_thrhandle_t *th_p)
679 td_err_e return_val;
680 td_mapper_param_t data;
682 if (th_p != NULL && /* optimize for a single thread */
683 ta_p != NULL &&
684 ta_p->initialized == 1 &&
685 (td_read_hash_size(ta_p) == 1 ||
686 td_read_uberdata(ta_p) == TD_OK) &&
687 ta_p->initialized == 1 &&
688 ta_p->single_lwpid == tid) {
689 th_p->th_ta_p = ta_p;
690 if ((th_p->th_unique = ta_p->single_ulwp_addr) == 0)
691 return (TD_NOTHR);
692 return (TD_OK);
696 * LOCKING EXCEPTION - Locking is not required here because
697 * the locking and checking will be done in __td_ta_thr_iter.
700 if (ta_p == NULL)
701 return (TD_BADTA);
702 if (th_p == NULL)
703 return (TD_BADTH);
704 if (tid == 0)
705 return (TD_NOTHR);
707 data.tid = tid;
708 data.found = 0;
709 return_val = __td_ta_thr_iter(ta_p,
710 (td_thr_iter_f *)td_mapper_id2thr, (void *)&data,
711 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
712 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
713 if (return_val == TD_OK) {
714 if (data.found == 0)
715 return_val = TD_NOTHR;
716 else
717 *th_p = data.th;
720 return (return_val);
724 * Map the address of a synchronization object to a sync. object handle.
726 #pragma weak td_ta_map_addr2sync = __td_ta_map_addr2sync
727 td_err_e
728 __td_ta_map_addr2sync(td_thragent_t *ta_p, psaddr_t addr, td_synchandle_t *sh_p)
730 struct ps_prochandle *ph_p;
731 td_err_e return_val;
732 uint16_t sync_magic;
734 if (sh_p == NULL)
735 return (TD_BADSH);
736 if (addr == 0)
737 return (TD_ERR);
738 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
739 return (return_val);
741 * Check the magic number of the sync. object to make sure it's valid.
742 * The magic number is at the same offset for all sync. objects.
744 if (ps_pdread(ph_p, (psaddr_t)&((mutex_t *)addr)->mutex_magic,
745 &sync_magic, sizeof (sync_magic)) != PS_OK) {
746 ph_unlock(ta_p);
747 return (TD_BADSH);
749 ph_unlock(ta_p);
750 if (sync_magic != MUTEX_MAGIC && sync_magic != COND_MAGIC &&
751 sync_magic != SEMA_MAGIC && sync_magic != RWL_MAGIC)
752 return (TD_BADSH);
754 * Just fill in the appropriate fields of the sync. handle.
756 sh_p->sh_ta_p = (td_thragent_t *)ta_p;
757 sh_p->sh_unique = addr;
758 return (TD_OK);
762 * Iterate over the set of global TSD keys.
763 * The call back function is called with three arguments,
764 * a key, a pointer to the destructor function, and the cbdata pointer.
765 * Currently unused by dbx.
767 #pragma weak td_ta_tsd_iter = __td_ta_tsd_iter
768 td_err_e
769 __td_ta_tsd_iter(td_thragent_t *ta_p, td_key_iter_f *cb, void *cbdata_p)
771 struct ps_prochandle *ph_p;
772 td_err_e return_val;
773 int key;
774 int numkeys;
775 psaddr_t dest_addr;
776 psaddr_t *destructors = NULL;
777 PFrV destructor;
779 if (cb == NULL)
780 return (TD_ERR);
781 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
782 return (return_val);
783 if (ps_pstop(ph_p) != PS_OK) {
784 ph_unlock(ta_p);
785 return (TD_DBERR);
788 if (ta_p->model == PR_MODEL_NATIVE) {
789 tsd_metadata_t tsdm;
791 if (ps_pdread(ph_p,
792 ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
793 &tsdm, sizeof (tsdm)) != PS_OK)
794 return_val = TD_DBERR;
795 else {
796 numkeys = tsdm.tsdm_nused;
797 dest_addr = (psaddr_t)tsdm.tsdm_destro;
798 if (numkeys > 0)
799 destructors =
800 malloc(numkeys * sizeof (psaddr_t));
802 } else {
803 #if defined(_LP64) && defined(_SYSCALL32)
804 tsd_metadata32_t tsdm;
806 if (ps_pdread(ph_p,
807 ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
808 &tsdm, sizeof (tsdm)) != PS_OK)
809 return_val = TD_DBERR;
810 else {
811 numkeys = tsdm.tsdm_nused;
812 dest_addr = (psaddr_t)tsdm.tsdm_destro;
813 if (numkeys > 0)
814 destructors =
815 malloc(numkeys * sizeof (caddr32_t));
817 #else
818 return_val = TD_DBERR;
819 #endif /* _SYSCALL32 */
822 if (return_val != TD_OK || numkeys <= 0) {
823 (void) ps_pcontinue(ph_p);
824 ph_unlock(ta_p);
825 return (return_val);
828 if (destructors == NULL)
829 return_val = TD_MALLOC;
830 else if (ta_p->model == PR_MODEL_NATIVE) {
831 if (ps_pdread(ph_p, dest_addr,
832 destructors, numkeys * sizeof (psaddr_t)) != PS_OK)
833 return_val = TD_DBERR;
834 else {
835 for (key = 1; key < numkeys; key++) {
836 destructor = (PFrV)destructors[key];
837 if (destructor != TSD_UNALLOCATED &&
838 (*cb)(key, destructor, cbdata_p))
839 break;
842 #if defined(_LP64) && defined(_SYSCALL32)
843 } else {
844 caddr32_t *destructors32 = (caddr32_t *)destructors;
845 caddr32_t destruct32;
847 if (ps_pdread(ph_p, dest_addr,
848 destructors32, numkeys * sizeof (caddr32_t)) != PS_OK)
849 return_val = TD_DBERR;
850 else {
851 for (key = 1; key < numkeys; key++) {
852 destruct32 = destructors32[key];
853 if ((destruct32 !=
854 (caddr32_t)(uintptr_t)TSD_UNALLOCATED) &&
855 (*cb)(key, (PFrV)(uintptr_t)destruct32,
856 cbdata_p))
857 break;
860 #endif /* _SYSCALL32 */
863 free(destructors);
864 (void) ps_pcontinue(ph_p);
865 ph_unlock(ta_p);
866 return (return_val);
870 sigequalset(const sigset_t *s1, const sigset_t *s2)
872 return (
873 s1->__sigbits[0] == s2->__sigbits[0] &&
874 s1->__sigbits[1] == s2->__sigbits[1] &&
875 s1->__sigbits[2] == s2->__sigbits[2] &&
876 s1->__sigbits[3] == s2->__sigbits[3]);
880 * Description:
881 * Iterate over all threads. For each thread call
882 * the function pointed to by "cb" with a pointer
883 * to a thread handle, and a pointer to data which
884 * can be NULL. Only call td_thr_iter_f() on threads
885 * which match the properties of state, ti_pri,
886 * ti_sigmask_p, and ti_user_flags. If cb returns
887 * a non-zero value, terminate iterations.
889 * Input:
890 * *ta_p - thread agent
891 * *cb - call back function defined by user.
892 * td_thr_iter_f() takes a thread handle and
893 * cbdata_p as a parameter.
894 * cbdata_p - parameter for td_thr_iter_f().
896 * state - state of threads of interest. A value of
897 * TD_THR_ANY_STATE from enum td_thr_state_e
898 * does not restrict iterations by state.
899 * ti_pri - lower bound of priorities of threads of
900 * interest. A value of TD_THR_LOWEST_PRIORITY
901 * defined in thread_db.h does not restrict
902 * iterations by priority. A thread with priority
903 * less than ti_pri will NOT be passed to the callback
904 * function.
905 * ti_sigmask_p - signal mask of threads of interest.
906 * A value of TD_SIGNO_MASK defined in thread_db.h
907 * does not restrict iterations by signal mask.
908 * ti_user_flags - user flags of threads of interest. A
909 * value of TD_THR_ANY_USER_FLAGS defined in thread_db.h
910 * does not restrict iterations by user flags.
912 #pragma weak td_ta_thr_iter = __td_ta_thr_iter
913 td_err_e
914 __td_ta_thr_iter(td_thragent_t *ta_p, td_thr_iter_f *cb,
915 void *cbdata_p, td_thr_state_e state, int ti_pri,
916 sigset_t *ti_sigmask_p, unsigned ti_user_flags)
918 struct ps_prochandle *ph_p;
919 psaddr_t first_lwp_addr;
920 psaddr_t first_zombie_addr;
921 psaddr_t curr_lwp_addr;
922 psaddr_t next_lwp_addr;
923 td_thrhandle_t th;
924 ps_err_e db_return;
925 ps_err_e db_return2;
926 td_err_e return_val;
928 if (cb == NULL)
929 return (TD_ERR);
931 * If state is not within bound, short circuit.
933 if (state < TD_THR_ANY_STATE || state > TD_THR_STOPPED_ASLEEP)
934 return (TD_OK);
936 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
937 return (return_val);
938 if (ps_pstop(ph_p) != PS_OK) {
939 ph_unlock(ta_p);
940 return (TD_DBERR);
944 * For each ulwp_t in the circular linked lists pointed
945 * to by "all_lwps" and "all_zombies":
946 * (1) Filter each thread.
947 * (2) Create the thread_object for each thread that passes.
948 * (3) Call the call back function on each thread.
951 if (ta_p->model == PR_MODEL_NATIVE) {
952 db_return = ps_pdread(ph_p,
953 ta_p->uberdata_addr + offsetof(uberdata_t, all_lwps),
954 &first_lwp_addr, sizeof (first_lwp_addr));
955 db_return2 = ps_pdread(ph_p,
956 ta_p->uberdata_addr + offsetof(uberdata_t, all_zombies),
957 &first_zombie_addr, sizeof (first_zombie_addr));
958 } else {
959 #if defined(_LP64) && defined(_SYSCALL32)
960 caddr32_t addr32;
962 db_return = ps_pdread(ph_p,
963 ta_p->uberdata_addr + offsetof(uberdata32_t, all_lwps),
964 &addr32, sizeof (addr32));
965 first_lwp_addr = addr32;
966 db_return2 = ps_pdread(ph_p,
967 ta_p->uberdata_addr + offsetof(uberdata32_t, all_zombies),
968 &addr32, sizeof (addr32));
969 first_zombie_addr = addr32;
970 #else /* _SYSCALL32 */
971 db_return = PS_ERR;
972 db_return2 = PS_ERR;
973 #endif /* _SYSCALL32 */
975 if (db_return == PS_OK)
976 db_return = db_return2;
979 * If first_lwp_addr and first_zombie_addr are both NULL,
980 * libc must not yet be initialized or all threads have
981 * exited. Return TD_NOTHR and all will be well.
983 if (db_return == PS_OK &&
984 first_lwp_addr == 0 && first_zombie_addr == 0) {
985 (void) ps_pcontinue(ph_p);
986 ph_unlock(ta_p);
987 return (TD_NOTHR);
989 if (db_return != PS_OK) {
990 (void) ps_pcontinue(ph_p);
991 ph_unlock(ta_p);
992 return (TD_DBERR);
996 * Run down the lists of all living and dead lwps.
998 if (first_lwp_addr == 0)
999 first_lwp_addr = first_zombie_addr;
1000 curr_lwp_addr = first_lwp_addr;
1001 for (;;) {
1002 td_thr_state_e ts_state;
1003 int userpri;
1004 unsigned userflags;
1005 sigset_t mask;
1008 * Read the ulwp struct.
1010 if (ta_p->model == PR_MODEL_NATIVE) {
1011 ulwp_t ulwp;
1013 if (ps_pdread(ph_p, curr_lwp_addr,
1014 &ulwp, sizeof (ulwp)) != PS_OK &&
1015 ((void) memset(&ulwp, 0, sizeof (ulwp)),
1016 ps_pdread(ph_p, curr_lwp_addr,
1017 &ulwp, REPLACEMENT_SIZE)) != PS_OK) {
1018 return_val = TD_DBERR;
1019 break;
1021 next_lwp_addr = (psaddr_t)ulwp.ul_forw;
1023 ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
1024 ulwp.ul_stop? TD_THR_STOPPED :
1025 ulwp.ul_wchan? TD_THR_SLEEP :
1026 TD_THR_ACTIVE;
1027 userpri = ulwp.ul_pri;
1028 userflags = ulwp.ul_usropts;
1029 if (ulwp.ul_dead)
1030 (void) sigemptyset(&mask);
1031 else
1032 mask = *(sigset_t *)&ulwp.ul_sigmask;
1033 } else {
1034 #if defined(_LP64) && defined(_SYSCALL32)
1035 ulwp32_t ulwp;
1037 if (ps_pdread(ph_p, curr_lwp_addr,
1038 &ulwp, sizeof (ulwp)) != PS_OK &&
1039 ((void) memset(&ulwp, 0, sizeof (ulwp)),
1040 ps_pdread(ph_p, curr_lwp_addr,
1041 &ulwp, REPLACEMENT_SIZE32)) != PS_OK) {
1042 return_val = TD_DBERR;
1043 break;
1045 next_lwp_addr = (psaddr_t)ulwp.ul_forw;
1047 ts_state = ulwp.ul_dead? TD_THR_ZOMBIE :
1048 ulwp.ul_stop? TD_THR_STOPPED :
1049 ulwp.ul_wchan? TD_THR_SLEEP :
1050 TD_THR_ACTIVE;
1051 userpri = ulwp.ul_pri;
1052 userflags = ulwp.ul_usropts;
1053 if (ulwp.ul_dead)
1054 (void) sigemptyset(&mask);
1055 else
1056 mask = *(sigset_t *)&ulwp.ul_sigmask;
1057 #else /* _SYSCALL32 */
1058 return_val = TD_ERR;
1059 break;
1060 #endif /* _SYSCALL32 */
1064 * Filter on state, priority, sigmask, and user flags.
1067 if ((state != ts_state) &&
1068 (state != TD_THR_ANY_STATE))
1069 goto advance;
1071 if (ti_pri > userpri)
1072 goto advance;
1074 if (ti_sigmask_p != TD_SIGNO_MASK &&
1075 !sigequalset(ti_sigmask_p, &mask))
1076 goto advance;
1078 if (ti_user_flags != userflags &&
1079 ti_user_flags != (unsigned)TD_THR_ANY_USER_FLAGS)
1080 goto advance;
1083 * Call back - break if the return
1084 * from the call back is non-zero.
1086 th.th_ta_p = (td_thragent_t *)ta_p;
1087 th.th_unique = curr_lwp_addr;
1088 if ((*cb)(&th, cbdata_p))
1089 break;
1091 advance:
1092 if ((curr_lwp_addr = next_lwp_addr) == first_lwp_addr) {
1094 * Switch to the zombie list, unless it is NULL
1095 * or we have already been doing the zombie list,
1096 * in which case terminate the loop.
1098 if (first_zombie_addr == 0 ||
1099 first_lwp_addr == first_zombie_addr)
1100 break;
1101 curr_lwp_addr = first_lwp_addr = first_zombie_addr;
1105 (void) ps_pcontinue(ph_p);
1106 ph_unlock(ta_p);
1107 return (return_val);
1111 * Enable or disable process synchronization object tracking.
1112 * Currently unused by dbx.
1114 #pragma weak td_ta_sync_tracking_enable = __td_ta_sync_tracking_enable
1115 td_err_e
1116 __td_ta_sync_tracking_enable(td_thragent_t *ta_p, int onoff)
1118 struct ps_prochandle *ph_p;
1119 td_err_e return_val;
1120 register_sync_t enable;
1122 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1123 return (return_val);
1125 * Values of tdb_register_sync in the victim process:
1126 * REGISTER_SYNC_ENABLE enables registration of synch objects
1127 * REGISTER_SYNC_DISABLE disables registration of synch objects
1128 * These cause the table to be cleared and tdb_register_sync set to:
1129 * REGISTER_SYNC_ON registration in effect
1130 * REGISTER_SYNC_OFF registration not in effect
1132 enable = onoff? REGISTER_SYNC_ENABLE : REGISTER_SYNC_DISABLE;
1133 if (ps_pdwrite(ph_p, ta_p->tdb_register_sync_addr,
1134 &enable, sizeof (enable)) != PS_OK)
1135 return_val = TD_DBERR;
1137 * Remember that this interface was called (see td_ta_delete()).
1139 ta_p->sync_tracking = 1;
1140 ph_unlock(ta_p);
1141 return (return_val);
1145 * Iterate over all known synchronization variables.
1146 * It is very possible that the list generated is incomplete,
1147 * because the iterator can only find synchronization variables
1148 * that have been registered by the process since synchronization
1149 * object registration was enabled.
1150 * The call back function cb is called for each synchronization
1151 * variable with two arguments: a pointer to the synchronization
1152 * handle and the passed-in argument cbdata.
1153 * If cb returns a non-zero value, iterations are terminated.
1155 #pragma weak td_ta_sync_iter = __td_ta_sync_iter
1156 td_err_e
1157 __td_ta_sync_iter(td_thragent_t *ta_p, td_sync_iter_f *cb, void *cbdata)
1159 struct ps_prochandle *ph_p;
1160 td_err_e return_val;
1161 int i;
1162 register_sync_t enable;
1163 psaddr_t next_desc;
1164 tdb_sync_stats_t sync_stats;
1165 td_synchandle_t synchandle;
1166 psaddr_t psaddr;
1167 void *vaddr;
1168 uint64_t *sync_addr_hash = NULL;
1170 if (cb == NULL)
1171 return (TD_ERR);
1172 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1173 return (return_val);
1174 if (ps_pstop(ph_p) != PS_OK) {
1175 ph_unlock(ta_p);
1176 return (TD_DBERR);
1178 if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
1179 &enable, sizeof (enable)) != PS_OK) {
1180 return_val = TD_DBERR;
1181 goto out;
1183 if (enable != REGISTER_SYNC_ON)
1184 goto out;
1187 * First read the hash table.
1188 * The hash table is large; allocate with mmap().
1190 if ((vaddr = mmap(NULL, TDB_HASH_SIZE * sizeof (uint64_t),
1191 PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, (off_t)0))
1192 == MAP_FAILED) {
1193 return_val = TD_MALLOC;
1194 goto out;
1196 sync_addr_hash = vaddr;
1198 if (ta_p->model == PR_MODEL_NATIVE) {
1199 if (ps_pdread(ph_p, ta_p->uberdata_addr +
1200 offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
1201 &psaddr, sizeof (&psaddr)) != PS_OK) {
1202 return_val = TD_DBERR;
1203 goto out;
1205 } else {
1206 #ifdef _SYSCALL32
1207 caddr32_t addr;
1209 if (ps_pdread(ph_p, ta_p->uberdata_addr +
1210 offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
1211 &addr, sizeof (addr)) != PS_OK) {
1212 return_val = TD_DBERR;
1213 goto out;
1215 psaddr = addr;
1216 #else
1217 return_val = TD_ERR;
1218 goto out;
1219 #endif /* _SYSCALL32 */
1222 if (psaddr == 0)
1223 goto out;
1224 if (ps_pdread(ph_p, psaddr, sync_addr_hash,
1225 TDB_HASH_SIZE * sizeof (uint64_t)) != PS_OK) {
1226 return_val = TD_DBERR;
1227 goto out;
1231 * Now scan the hash table.
1233 for (i = 0; i < TDB_HASH_SIZE; i++) {
1234 for (next_desc = (psaddr_t)sync_addr_hash[i];
1235 next_desc != 0;
1236 next_desc = (psaddr_t)sync_stats.next) {
1237 if (ps_pdread(ph_p, next_desc,
1238 &sync_stats, sizeof (sync_stats)) != PS_OK) {
1239 return_val = TD_DBERR;
1240 goto out;
1242 if (sync_stats.un.type == TDB_NONE) {
1243 /* not registered since registration enabled */
1244 continue;
1246 synchandle.sh_ta_p = ta_p;
1247 synchandle.sh_unique = (psaddr_t)sync_stats.sync_addr;
1248 if ((*cb)(&synchandle, cbdata) != 0)
1249 goto out;
1253 out:
1254 if (sync_addr_hash != NULL)
1255 (void) munmap((void *)sync_addr_hash,
1256 TDB_HASH_SIZE * sizeof (uint64_t));
1257 (void) ps_pcontinue(ph_p);
1258 ph_unlock(ta_p);
1259 return (return_val);
1263 * Enable process statistics collection.
1265 #pragma weak td_ta_enable_stats = __td_ta_enable_stats
1266 /* ARGSUSED */
1267 td_err_e
1268 __td_ta_enable_stats(const td_thragent_t *ta_p, int onoff)
1270 return (TD_NOCAPAB);
1274 * Reset process statistics.
1276 #pragma weak td_ta_reset_stats = __td_ta_reset_stats
1277 /* ARGSUSED */
1278 td_err_e
1279 __td_ta_reset_stats(const td_thragent_t *ta_p)
1281 return (TD_NOCAPAB);
1285 * Read process statistics.
1287 #pragma weak td_ta_get_stats = __td_ta_get_stats
1288 /* ARGSUSED */
1289 td_err_e
1290 __td_ta_get_stats(const td_thragent_t *ta_p, td_ta_stats_t *tstats)
1292 return (TD_NOCAPAB);
1296 * Transfer information from lwp struct to thread information struct.
1297 * XXX -- lots of this needs cleaning up.
1299 static void
1300 td_thr2to(td_thragent_t *ta_p, psaddr_t ts_addr,
1301 ulwp_t *ulwp, td_thrinfo_t *ti_p)
1303 lwpid_t lwpid;
1305 if ((lwpid = ulwp->ul_lwpid) == 0)
1306 lwpid = 1;
1307 (void) memset(ti_p, 0, sizeof (*ti_p));
1308 ti_p->ti_ta_p = ta_p;
1309 ti_p->ti_user_flags = ulwp->ul_usropts;
1310 ti_p->ti_tid = lwpid;
1311 ti_p->ti_exitval = ulwp->ul_rval;
1312 ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
1313 if (!ulwp->ul_dead) {
1315 * The bloody fools got this backwards!
1317 ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
1318 ti_p->ti_stksize = ulwp->ul_stksiz;
1320 ti_p->ti_ro_area = ts_addr;
1321 ti_p->ti_ro_size = ulwp->ul_replace?
1322 REPLACEMENT_SIZE : sizeof (ulwp_t);
1323 ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
1324 ulwp->ul_stop? TD_THR_STOPPED :
1325 ulwp->ul_wchan? TD_THR_SLEEP :
1326 TD_THR_ACTIVE;
1327 ti_p->ti_db_suspended = 0;
1328 ti_p->ti_type = TD_THR_USER;
1329 ti_p->ti_sp = ulwp->ul_sp;
1330 ti_p->ti_flags = 0;
1331 ti_p->ti_pri = ulwp->ul_pri;
1332 ti_p->ti_lid = lwpid;
1333 if (!ulwp->ul_dead)
1334 ti_p->ti_sigmask = ulwp->ul_sigmask;
1335 ti_p->ti_traceme = 0;
1336 ti_p->ti_preemptflag = 0;
1337 ti_p->ti_pirecflag = 0;
1338 (void) sigemptyset(&ti_p->ti_pending);
1339 ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
1342 #if defined(_LP64) && defined(_SYSCALL32)
1343 static void
1344 td_thr2to32(td_thragent_t *ta_p, psaddr_t ts_addr,
1345 ulwp32_t *ulwp, td_thrinfo_t *ti_p)
1347 lwpid_t lwpid;
1349 if ((lwpid = ulwp->ul_lwpid) == 0)
1350 lwpid = 1;
1351 (void) memset(ti_p, 0, sizeof (*ti_p));
1352 ti_p->ti_ta_p = ta_p;
1353 ti_p->ti_user_flags = ulwp->ul_usropts;
1354 ti_p->ti_tid = lwpid;
1355 ti_p->ti_exitval = (void *)(uintptr_t)ulwp->ul_rval;
1356 ti_p->ti_startfunc = (psaddr_t)ulwp->ul_startpc;
1357 if (!ulwp->ul_dead) {
1359 * The bloody fools got this backwards!
1361 ti_p->ti_stkbase = (psaddr_t)ulwp->ul_stktop;
1362 ti_p->ti_stksize = ulwp->ul_stksiz;
1364 ti_p->ti_ro_area = ts_addr;
1365 ti_p->ti_ro_size = ulwp->ul_replace?
1366 REPLACEMENT_SIZE32 : sizeof (ulwp32_t);
1367 ti_p->ti_state = ulwp->ul_dead? TD_THR_ZOMBIE :
1368 ulwp->ul_stop? TD_THR_STOPPED :
1369 ulwp->ul_wchan? TD_THR_SLEEP :
1370 TD_THR_ACTIVE;
1371 ti_p->ti_db_suspended = 0;
1372 ti_p->ti_type = TD_THR_USER;
1373 ti_p->ti_sp = (uint32_t)ulwp->ul_sp;
1374 ti_p->ti_flags = 0;
1375 ti_p->ti_pri = ulwp->ul_pri;
1376 ti_p->ti_lid = lwpid;
1377 if (!ulwp->ul_dead)
1378 ti_p->ti_sigmask = *(sigset_t *)&ulwp->ul_sigmask;
1379 ti_p->ti_traceme = 0;
1380 ti_p->ti_preemptflag = 0;
1381 ti_p->ti_pirecflag = 0;
1382 (void) sigemptyset(&ti_p->ti_pending);
1383 ti_p->ti_events = ulwp->ul_td_evbuf.eventmask;
1385 #endif /* _SYSCALL32 */
1388 * Get thread information.
1390 #pragma weak td_thr_get_info = __td_thr_get_info
1391 td_err_e
1392 __td_thr_get_info(td_thrhandle_t *th_p, td_thrinfo_t *ti_p)
1394 struct ps_prochandle *ph_p;
1395 td_thragent_t *ta_p;
1396 td_err_e return_val;
1397 psaddr_t psaddr;
1399 if (ti_p == NULL)
1400 return (TD_ERR);
1401 (void) memset(ti_p, 0, sizeof (*ti_p));
1403 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1404 return (return_val);
1405 ta_p = th_p->th_ta_p;
1406 if (ps_pstop(ph_p) != PS_OK) {
1407 ph_unlock(ta_p);
1408 return (TD_DBERR);
1412 * Read the ulwp struct from the process.
1413 * Transfer the ulwp struct to the thread information struct.
1415 psaddr = th_p->th_unique;
1416 if (ta_p->model == PR_MODEL_NATIVE) {
1417 ulwp_t ulwp;
1419 if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
1420 ((void) memset(&ulwp, 0, sizeof (ulwp)),
1421 ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE)) != PS_OK)
1422 return_val = TD_DBERR;
1423 else
1424 td_thr2to(ta_p, psaddr, &ulwp, ti_p);
1425 } else {
1426 #if defined(_LP64) && defined(_SYSCALL32)
1427 ulwp32_t ulwp;
1429 if (ps_pdread(ph_p, psaddr, &ulwp, sizeof (ulwp)) != PS_OK &&
1430 ((void) memset(&ulwp, 0, sizeof (ulwp)),
1431 ps_pdread(ph_p, psaddr, &ulwp, REPLACEMENT_SIZE32)) !=
1432 PS_OK)
1433 return_val = TD_DBERR;
1434 else
1435 td_thr2to32(ta_p, psaddr, &ulwp, ti_p);
1436 #else
1437 return_val = TD_ERR;
1438 #endif /* _SYSCALL32 */
1441 (void) ps_pcontinue(ph_p);
1442 ph_unlock(ta_p);
1443 return (return_val);
1447 * Given a process and an event number, return information about
1448 * an address in the process or at which a breakpoint can be set
1449 * to monitor the event.
1451 #pragma weak td_ta_event_addr = __td_ta_event_addr
1452 td_err_e
1453 __td_ta_event_addr(td_thragent_t *ta_p, td_event_e event, td_notify_t *notify_p)
1455 if (ta_p == NULL)
1456 return (TD_BADTA);
1457 if (event < TD_MIN_EVENT_NUM || event > TD_MAX_EVENT_NUM)
1458 return (TD_NOEVENT);
1459 if (notify_p == NULL)
1460 return (TD_ERR);
1462 notify_p->type = NOTIFY_BPT;
1463 notify_p->u.bptaddr = ta_p->tdb_events[event - TD_MIN_EVENT_NUM];
1465 return (TD_OK);
1469 * Add the events in eventset 2 to eventset 1.
1471 static void
1472 eventsetaddset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
1474 int i;
1476 for (i = 0; i < TD_EVENTSIZE; i++)
1477 event1_p->event_bits[i] |= event2_p->event_bits[i];
1481 * Delete the events in eventset 2 from eventset 1.
1483 static void
1484 eventsetdelset(td_thr_events_t *event1_p, td_thr_events_t *event2_p)
1486 int i;
1488 for (i = 0; i < TD_EVENTSIZE; i++)
1489 event1_p->event_bits[i] &= ~event2_p->event_bits[i];
1493 * Either add or delete the given event set from a thread's event mask.
1495 static td_err_e
1496 mod_eventset(td_thrhandle_t *th_p, td_thr_events_t *events, int onoff)
1498 struct ps_prochandle *ph_p;
1499 td_err_e return_val = TD_OK;
1500 char enable;
1501 td_thr_events_t evset;
1502 psaddr_t psaddr_evset;
1503 psaddr_t psaddr_enab;
1505 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1506 return (return_val);
1507 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1508 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1509 psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
1510 psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
1511 } else {
1512 #if defined(_LP64) && defined(_SYSCALL32)
1513 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1514 psaddr_evset = (psaddr_t)&ulwp->ul_td_evbuf.eventmask;
1515 psaddr_enab = (psaddr_t)&ulwp->ul_td_events_enable;
1516 #else
1517 ph_unlock(th_p->th_ta_p);
1518 return (TD_ERR);
1519 #endif /* _SYSCALL32 */
1521 if (ps_pstop(ph_p) != PS_OK) {
1522 ph_unlock(th_p->th_ta_p);
1523 return (TD_DBERR);
1526 if (ps_pdread(ph_p, psaddr_evset, &evset, sizeof (evset)) != PS_OK)
1527 return_val = TD_DBERR;
1528 else {
1529 if (onoff)
1530 eventsetaddset(&evset, events);
1531 else
1532 eventsetdelset(&evset, events);
1533 if (ps_pdwrite(ph_p, psaddr_evset, &evset, sizeof (evset))
1534 != PS_OK)
1535 return_val = TD_DBERR;
1536 else {
1537 enable = 0;
1538 if (td_eventismember(&evset, TD_EVENTS_ENABLE))
1539 enable = 1;
1540 if (ps_pdwrite(ph_p, psaddr_enab,
1541 &enable, sizeof (enable)) != PS_OK)
1542 return_val = TD_DBERR;
1546 (void) ps_pcontinue(ph_p);
1547 ph_unlock(th_p->th_ta_p);
1548 return (return_val);
1552 * Enable or disable tracing for a given thread. Tracing
1553 * is filtered based on the event mask of each thread. Tracing
1554 * can be turned on/off for the thread without changing thread
1555 * event mask.
1556 * Currently unused by dbx.
1558 #pragma weak td_thr_event_enable = __td_thr_event_enable
1559 td_err_e
1560 __td_thr_event_enable(td_thrhandle_t *th_p, int onoff)
1562 td_thr_events_t evset;
1564 td_event_emptyset(&evset);
1565 td_event_addset(&evset, TD_EVENTS_ENABLE);
1566 return (mod_eventset(th_p, &evset, onoff));
1570 * Set event mask to enable event. event is turned on in
1571 * event mask for thread. If a thread encounters an event
1572 * for which its event mask is on, notification will be sent
1573 * to the debugger.
1574 * Addresses for each event are provided to the
1575 * debugger. It is assumed that a breakpoint of some type will
1576 * be placed at that address. If the event mask for the thread
1577 * is on, the instruction at the address will be executed.
1578 * Otherwise, the instruction will be skipped.
1580 #pragma weak td_thr_set_event = __td_thr_set_event
1581 td_err_e
1582 __td_thr_set_event(td_thrhandle_t *th_p, td_thr_events_t *events)
1584 return (mod_eventset(th_p, events, 1));
1588 * Enable or disable a set of events in the process-global event mask,
1589 * depending on the value of onoff.
1591 static td_err_e
1592 td_ta_mod_event(td_thragent_t *ta_p, td_thr_events_t *events, int onoff)
1594 struct ps_prochandle *ph_p;
1595 td_thr_events_t targ_eventset;
1596 td_err_e return_val;
1598 if ((ph_p = ph_lock_ta(ta_p, &return_val)) == NULL)
1599 return (return_val);
1600 if (ps_pstop(ph_p) != PS_OK) {
1601 ph_unlock(ta_p);
1602 return (TD_DBERR);
1604 if (ps_pdread(ph_p, ta_p->tdb_eventmask_addr,
1605 &targ_eventset, sizeof (targ_eventset)) != PS_OK)
1606 return_val = TD_DBERR;
1607 else {
1608 if (onoff)
1609 eventsetaddset(&targ_eventset, events);
1610 else
1611 eventsetdelset(&targ_eventset, events);
1612 if (ps_pdwrite(ph_p, ta_p->tdb_eventmask_addr,
1613 &targ_eventset, sizeof (targ_eventset)) != PS_OK)
1614 return_val = TD_DBERR;
1616 (void) ps_pcontinue(ph_p);
1617 ph_unlock(ta_p);
1618 return (return_val);
1622 * Enable a set of events in the process-global event mask.
1624 #pragma weak td_ta_set_event = __td_ta_set_event
1625 td_err_e
1626 __td_ta_set_event(td_thragent_t *ta_p, td_thr_events_t *events)
1628 return (td_ta_mod_event(ta_p, events, 1));
1632 * Set event mask to disable the given event set; these events are cleared
1633 * from the event mask of the thread. Events that occur for a thread
1634 * with the event masked off will not cause notification to be
1635 * sent to the debugger (see td_thr_set_event for fuller description).
1637 #pragma weak td_thr_clear_event = __td_thr_clear_event
1638 td_err_e
1639 __td_thr_clear_event(td_thrhandle_t *th_p, td_thr_events_t *events)
1641 return (mod_eventset(th_p, events, 0));
1645 * Disable a set of events in the process-global event mask.
1647 #pragma weak td_ta_clear_event = __td_ta_clear_event
1648 td_err_e
1649 __td_ta_clear_event(td_thragent_t *ta_p, td_thr_events_t *events)
1651 return (td_ta_mod_event(ta_p, events, 0));
1655 * This function returns the most recent event message, if any,
1656 * associated with a thread. Given a thread handle, return the message
1657 * corresponding to the event encountered by the thread. Only one
1658 * message per thread is saved. Messages from earlier events are lost
1659 * when later events occur.
1661 #pragma weak td_thr_event_getmsg = __td_thr_event_getmsg
1662 td_err_e
1663 __td_thr_event_getmsg(td_thrhandle_t *th_p, td_event_msg_t *msg)
1665 struct ps_prochandle *ph_p;
1666 td_err_e return_val = TD_OK;
1667 psaddr_t psaddr;
1669 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1670 return (return_val);
1671 if (ps_pstop(ph_p) != PS_OK) {
1672 ph_unlock(th_p->th_ta_p);
1673 return (TD_BADTA);
1675 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1676 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1677 td_evbuf_t evbuf;
1679 psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
1680 if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
1681 return_val = TD_DBERR;
1682 } else if (evbuf.eventnum == TD_EVENT_NONE) {
1683 return_val = TD_NOEVENT;
1684 } else {
1685 msg->event = evbuf.eventnum;
1686 msg->th_p = (td_thrhandle_t *)th_p;
1687 msg->msg.data = (uintptr_t)evbuf.eventdata;
1688 /* "Consume" the message */
1689 evbuf.eventnum = TD_EVENT_NONE;
1690 evbuf.eventdata = 0;
1691 if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
1692 != PS_OK)
1693 return_val = TD_DBERR;
1695 } else {
1696 #if defined(_LP64) && defined(_SYSCALL32)
1697 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1698 td_evbuf32_t evbuf;
1700 psaddr = (psaddr_t)&ulwp->ul_td_evbuf;
1701 if (ps_pdread(ph_p, psaddr, &evbuf, sizeof (evbuf)) != PS_OK) {
1702 return_val = TD_DBERR;
1703 } else if (evbuf.eventnum == TD_EVENT_NONE) {
1704 return_val = TD_NOEVENT;
1705 } else {
1706 msg->event = evbuf.eventnum;
1707 msg->th_p = (td_thrhandle_t *)th_p;
1708 msg->msg.data = (uintptr_t)evbuf.eventdata;
1709 /* "Consume" the message */
1710 evbuf.eventnum = TD_EVENT_NONE;
1711 evbuf.eventdata = (uintptr_t)NULL;
1712 if (ps_pdwrite(ph_p, psaddr, &evbuf, sizeof (evbuf))
1713 != PS_OK)
1714 return_val = TD_DBERR;
1716 #else
1717 return_val = TD_ERR;
1718 #endif /* _SYSCALL32 */
1721 (void) ps_pcontinue(ph_p);
1722 ph_unlock(th_p->th_ta_p);
1723 return (return_val);
1727 * The callback function td_ta_event_getmsg uses when looking for
1728 * a thread with an event. A thin wrapper around td_thr_event_getmsg.
1730 static int
1731 event_msg_cb(const td_thrhandle_t *th_p, void *arg)
1733 static td_thrhandle_t th;
1734 td_event_msg_t *msg = arg;
1736 if (__td_thr_event_getmsg((td_thrhandle_t *)th_p, msg) == TD_OK) {
1738 * Got an event, stop iterating.
1740 * Because of past mistakes in interface definition,
1741 * we are forced to pass back a static local variable
1742 * for the thread handle because th_p is a pointer
1743 * to a local variable in __td_ta_thr_iter().
1744 * Grr...
1746 th = *th_p;
1747 msg->th_p = &th;
1748 return (1);
1750 return (0);
1754 * This function is just like td_thr_event_getmsg, except that it is
1755 * passed a process handle rather than a thread handle, and returns
1756 * an event message for some thread in the process that has an event
1757 * message pending. If no thread has an event message pending, this
1758 * routine returns TD_NOEVENT. Thus, all pending event messages may
1759 * be collected from a process by repeatedly calling this routine
1760 * until it returns TD_NOEVENT.
1762 #pragma weak td_ta_event_getmsg = __td_ta_event_getmsg
1763 td_err_e
1764 __td_ta_event_getmsg(td_thragent_t *ta_p, td_event_msg_t *msg)
1766 td_err_e return_val;
1768 if (ta_p == NULL)
1769 return (TD_BADTA);
1770 if (ta_p->ph_p == NULL)
1771 return (TD_BADPH);
1772 if (msg == NULL)
1773 return (TD_ERR);
1774 msg->event = TD_EVENT_NONE;
1775 if ((return_val = __td_ta_thr_iter(ta_p, event_msg_cb, msg,
1776 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY, TD_SIGNO_MASK,
1777 TD_THR_ANY_USER_FLAGS)) != TD_OK)
1778 return (return_val);
1779 if (msg->event == TD_EVENT_NONE)
1780 return (TD_NOEVENT);
1781 return (TD_OK);
1784 static lwpid_t
1785 thr_to_lwpid(const td_thrhandle_t *th_p)
1787 struct ps_prochandle *ph_p = th_p->th_ta_p->ph_p;
1788 lwpid_t lwpid;
1791 * The caller holds the prochandle lock
1792 * and has already verfied everything.
1794 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
1795 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
1797 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
1798 &lwpid, sizeof (lwpid)) != PS_OK)
1799 lwpid = 0;
1800 else if (lwpid == 0)
1801 lwpid = 1;
1802 } else {
1803 #if defined(_LP64) && defined(_SYSCALL32)
1804 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
1806 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_lwpid,
1807 &lwpid, sizeof (lwpid)) != PS_OK)
1808 lwpid = 0;
1809 else if (lwpid == 0)
1810 lwpid = 1;
1811 #else
1812 lwpid = 0;
1813 #endif /* _SYSCALL32 */
1816 return (lwpid);
1820 * Suspend a thread.
1821 * XXX: What does this mean in a one-level model?
1823 #pragma weak td_thr_dbsuspend = __td_thr_dbsuspend
1824 td_err_e
1825 __td_thr_dbsuspend(const td_thrhandle_t *th_p)
1827 struct ps_prochandle *ph_p;
1828 td_err_e return_val;
1830 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1831 return (return_val);
1832 if (ps_lstop(ph_p, thr_to_lwpid(th_p)) != PS_OK)
1833 return_val = TD_DBERR;
1834 ph_unlock(th_p->th_ta_p);
1835 return (return_val);
1839 * Resume a suspended thread.
1840 * XXX: What does this mean in a one-level model?
1842 #pragma weak td_thr_dbresume = __td_thr_dbresume
1843 td_err_e
1844 __td_thr_dbresume(const td_thrhandle_t *th_p)
1846 struct ps_prochandle *ph_p;
1847 td_err_e return_val;
1849 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1850 return (return_val);
1851 if (ps_lcontinue(ph_p, thr_to_lwpid(th_p)) != PS_OK)
1852 return_val = TD_DBERR;
1853 ph_unlock(th_p->th_ta_p);
1854 return (return_val);
1858 * Set a thread's signal mask.
1859 * Currently unused by dbx.
1861 #pragma weak td_thr_sigsetmask = __td_thr_sigsetmask
1862 /* ARGSUSED */
1863 td_err_e
1864 __td_thr_sigsetmask(const td_thrhandle_t *th_p, const sigset_t ti_sigmask)
1866 return (TD_NOCAPAB);
1870 * Set a thread's "signals-pending" set.
1871 * Currently unused by dbx.
1873 #pragma weak td_thr_setsigpending = __td_thr_setsigpending
1874 /* ARGSUSED */
1875 td_err_e
1876 __td_thr_setsigpending(const td_thrhandle_t *th_p,
1877 uchar_t ti_pending_flag, const sigset_t ti_pending)
1879 return (TD_NOCAPAB);
1883 * Get a thread's general register set.
1885 #pragma weak td_thr_getgregs = __td_thr_getgregs
1886 td_err_e
1887 __td_thr_getgregs(td_thrhandle_t *th_p, prgregset_t regset)
1889 struct ps_prochandle *ph_p;
1890 td_err_e return_val;
1892 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1893 return (return_val);
1894 if (ps_pstop(ph_p) != PS_OK) {
1895 ph_unlock(th_p->th_ta_p);
1896 return (TD_DBERR);
1899 if (ps_lgetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
1900 return_val = TD_DBERR;
1902 (void) ps_pcontinue(ph_p);
1903 ph_unlock(th_p->th_ta_p);
1904 return (return_val);
1908 * Set a thread's general register set.
1910 #pragma weak td_thr_setgregs = __td_thr_setgregs
1911 td_err_e
1912 __td_thr_setgregs(td_thrhandle_t *th_p, const prgregset_t regset)
1914 struct ps_prochandle *ph_p;
1915 td_err_e return_val;
1917 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1918 return (return_val);
1919 if (ps_pstop(ph_p) != PS_OK) {
1920 ph_unlock(th_p->th_ta_p);
1921 return (TD_DBERR);
1924 if (ps_lsetregs(ph_p, thr_to_lwpid(th_p), regset) != PS_OK)
1925 return_val = TD_DBERR;
1927 (void) ps_pcontinue(ph_p);
1928 ph_unlock(th_p->th_ta_p);
1929 return (return_val);
1933 * Get a thread's floating-point register set.
1935 #pragma weak td_thr_getfpregs = __td_thr_getfpregs
1936 td_err_e
1937 __td_thr_getfpregs(td_thrhandle_t *th_p, prfpregset_t *fpregset)
1939 struct ps_prochandle *ph_p;
1940 td_err_e return_val;
1942 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1943 return (return_val);
1944 if (ps_pstop(ph_p) != PS_OK) {
1945 ph_unlock(th_p->th_ta_p);
1946 return (TD_DBERR);
1949 if (ps_lgetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
1950 return_val = TD_DBERR;
1952 (void) ps_pcontinue(ph_p);
1953 ph_unlock(th_p->th_ta_p);
1954 return (return_val);
1958 * Set a thread's floating-point register set.
1960 #pragma weak td_thr_setfpregs = __td_thr_setfpregs
1961 td_err_e
1962 __td_thr_setfpregs(td_thrhandle_t *th_p, const prfpregset_t *fpregset)
1964 struct ps_prochandle *ph_p;
1965 td_err_e return_val;
1967 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
1968 return (return_val);
1969 if (ps_pstop(ph_p) != PS_OK) {
1970 ph_unlock(th_p->th_ta_p);
1971 return (TD_DBERR);
1974 if (ps_lsetfpregs(ph_p, thr_to_lwpid(th_p), fpregset) != PS_OK)
1975 return_val = TD_DBERR;
1977 (void) ps_pcontinue(ph_p);
1978 ph_unlock(th_p->th_ta_p);
1979 return (return_val);
1983 * Get the size of the extra state register set for this architecture.
1984 * Currently unused by dbx.
1986 #pragma weak td_thr_getxregsize = __td_thr_getxregsize
1987 /* ARGSUSED */
1988 td_err_e
1989 __td_thr_getxregsize(td_thrhandle_t *th_p, int *xregsize)
1991 return (TD_NOXREGS);
1995 * Get a thread's extra state register set.
1997 #pragma weak td_thr_getxregs = __td_thr_getxregs
1998 /* ARGSUSED */
1999 td_err_e
2000 __td_thr_getxregs(td_thrhandle_t *th_p, void *xregset)
2002 return (TD_NOXREGS);
2006 * Set a thread's extra state register set.
2008 #pragma weak td_thr_setxregs = __td_thr_setxregs
2009 /* ARGSUSED */
2010 td_err_e
2011 __td_thr_setxregs(td_thrhandle_t *th_p, const void *xregset)
2013 return (TD_NOXREGS);
2016 struct searcher {
2017 psaddr_t addr;
2018 int status;
2022 * Check the struct thread address in *th_p again first
2023 * value in "data". If value in data is found, set second value
2024 * in "data" to 1 and return 1 to terminate iterations.
2025 * This function is used by td_thr_validate() to verify that
2026 * a thread handle is valid.
2028 static int
2029 td_searcher(const td_thrhandle_t *th_p, void *data)
2031 struct searcher *searcher_data = (struct searcher *)data;
2033 if (searcher_data->addr == th_p->th_unique) {
2034 searcher_data->status = 1;
2035 return (1);
2037 return (0);
2041 * Validate the thread handle. Check that
2042 * a thread exists in the thread agent/process that
2043 * corresponds to thread with handle *th_p.
2044 * Currently unused by dbx.
2046 #pragma weak td_thr_validate = __td_thr_validate
2047 td_err_e
2048 __td_thr_validate(const td_thrhandle_t *th_p)
2050 td_err_e return_val;
2051 struct searcher searcher_data = {0, 0};
2053 if (th_p == NULL)
2054 return (TD_BADTH);
2055 if (th_p->th_unique == 0 || th_p->th_ta_p == NULL)
2056 return (TD_BADTH);
2059 * LOCKING EXCEPTION - Locking is not required
2060 * here because no use of the thread agent is made (other
2061 * than the sanity check) and checking of the thread
2062 * agent will be done in __td_ta_thr_iter.
2065 searcher_data.addr = th_p->th_unique;
2066 return_val = __td_ta_thr_iter(th_p->th_ta_p,
2067 td_searcher, &searcher_data,
2068 TD_THR_ANY_STATE, TD_THR_LOWEST_PRIORITY,
2069 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
2071 if (return_val == TD_OK && searcher_data.status == 0)
2072 return_val = TD_NOTHR;
2074 return (return_val);
2078 * Get a thread's private binding to a given thread specific
2079 * data(TSD) key(see thr_getspecific(3T). If the thread doesn't
2080 * have a binding for a particular key, then NULL is returned.
2082 #pragma weak td_thr_tsd = __td_thr_tsd
2083 td_err_e
2084 __td_thr_tsd(td_thrhandle_t *th_p, thread_key_t key, void **data_pp)
2086 struct ps_prochandle *ph_p;
2087 td_thragent_t *ta_p;
2088 td_err_e return_val;
2089 int maxkey;
2090 int nkey;
2091 psaddr_t tsd_paddr;
2093 if (data_pp == NULL)
2094 return (TD_ERR);
2095 *data_pp = NULL;
2096 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2097 return (return_val);
2098 ta_p = th_p->th_ta_p;
2099 if (ps_pstop(ph_p) != PS_OK) {
2100 ph_unlock(ta_p);
2101 return (TD_DBERR);
2104 if (ta_p->model == PR_MODEL_NATIVE) {
2105 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2106 tsd_metadata_t tsdm;
2107 tsd_t stsd;
2109 if (ps_pdread(ph_p,
2110 ta_p->uberdata_addr + offsetof(uberdata_t, tsd_metadata),
2111 &tsdm, sizeof (tsdm)) != PS_OK)
2112 return_val = TD_DBERR;
2113 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2114 &tsd_paddr, sizeof (tsd_paddr)) != PS_OK)
2115 return_val = TD_DBERR;
2116 else if (tsd_paddr != 0 &&
2117 ps_pdread(ph_p, tsd_paddr, &stsd, sizeof (stsd)) != PS_OK)
2118 return_val = TD_DBERR;
2119 else {
2120 maxkey = tsdm.tsdm_nused;
2121 nkey = tsd_paddr == 0 ? TSD_NFAST : stsd.tsd_nalloc;
2123 if (key < TSD_NFAST)
2124 tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2126 } else {
2127 #if defined(_LP64) && defined(_SYSCALL32)
2128 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2129 tsd_metadata32_t tsdm;
2130 tsd32_t stsd;
2131 caddr32_t addr;
2133 if (ps_pdread(ph_p,
2134 ta_p->uberdata_addr + offsetof(uberdata32_t, tsd_metadata),
2135 &tsdm, sizeof (tsdm)) != PS_OK)
2136 return_val = TD_DBERR;
2137 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_stsd,
2138 &addr, sizeof (addr)) != PS_OK)
2139 return_val = TD_DBERR;
2140 else if (addr != 0 &&
2141 ps_pdread(ph_p, addr, &stsd, sizeof (stsd)) != PS_OK)
2142 return_val = TD_DBERR;
2143 else {
2144 maxkey = tsdm.tsdm_nused;
2145 nkey = addr == 0 ? TSD_NFAST : stsd.tsd_nalloc;
2147 if (key < TSD_NFAST) {
2148 tsd_paddr = (psaddr_t)&ulwp->ul_ftsd[0];
2149 } else {
2150 tsd_paddr = addr;
2153 #else
2154 return_val = TD_ERR;
2155 #endif /* _SYSCALL32 */
2158 if (return_val == TD_OK && (key < 1 || key >= maxkey))
2159 return_val = TD_NOTSD;
2160 if (return_val != TD_OK || key >= nkey) {
2161 /* NULL has already been stored in data_pp */
2162 (void) ps_pcontinue(ph_p);
2163 ph_unlock(ta_p);
2164 return (return_val);
2168 * Read the value from the thread's tsd array.
2170 if (ta_p->model == PR_MODEL_NATIVE) {
2171 void *value;
2173 if (ps_pdread(ph_p, tsd_paddr + key * sizeof (void *),
2174 &value, sizeof (value)) != PS_OK)
2175 return_val = TD_DBERR;
2176 else
2177 *data_pp = value;
2178 #if defined(_LP64) && defined(_SYSCALL32)
2179 } else {
2180 caddr32_t value32;
2182 if (ps_pdread(ph_p, tsd_paddr + key * sizeof (caddr32_t),
2183 &value32, sizeof (value32)) != PS_OK)
2184 return_val = TD_DBERR;
2185 else
2186 *data_pp = (void *)(uintptr_t)value32;
2187 #endif /* _SYSCALL32 */
2190 (void) ps_pcontinue(ph_p);
2191 ph_unlock(ta_p);
2192 return (return_val);
2196 * Get the base address of a thread's thread local storage (TLS) block
2197 * for the module (executable or shared object) identified by 'moduleid'.
2199 #pragma weak td_thr_tlsbase = __td_thr_tlsbase
2200 td_err_e
2201 __td_thr_tlsbase(td_thrhandle_t *th_p, ulong_t moduleid, psaddr_t *base)
2203 struct ps_prochandle *ph_p;
2204 td_thragent_t *ta_p;
2205 td_err_e return_val;
2207 if (base == NULL)
2208 return (TD_ERR);
2209 *base = 0;
2210 if ((ph_p = ph_lock_th(th_p, &return_val)) == NULL)
2211 return (return_val);
2212 ta_p = th_p->th_ta_p;
2213 if (ps_pstop(ph_p) != PS_OK) {
2214 ph_unlock(ta_p);
2215 return (TD_DBERR);
2218 if (ta_p->model == PR_MODEL_NATIVE) {
2219 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2220 tls_metadata_t tls_metadata;
2221 TLS_modinfo tlsmod;
2222 tls_t tls;
2224 if (ps_pdread(ph_p,
2225 ta_p->uberdata_addr + offsetof(uberdata_t, tls_metadata),
2226 &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2227 return_val = TD_DBERR;
2228 else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2229 return_val = TD_NOTLS;
2230 else if (ps_pdread(ph_p,
2231 (psaddr_t)((TLS_modinfo *)
2232 tls_metadata.tls_modinfo.tls_data + moduleid),
2233 &tlsmod, sizeof (tlsmod)) != PS_OK)
2234 return_val = TD_DBERR;
2235 else if (tlsmod.tm_memsz == 0)
2236 return_val = TD_NOTLS;
2237 else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2238 *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2239 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2240 &tls, sizeof (tls)) != PS_OK)
2241 return_val = TD_DBERR;
2242 else if (moduleid >= tls.tls_size)
2243 return_val = TD_TLSDEFER;
2244 else if (ps_pdread(ph_p,
2245 (psaddr_t)((tls_t *)tls.tls_data + moduleid),
2246 &tls, sizeof (tls)) != PS_OK)
2247 return_val = TD_DBERR;
2248 else if (tls.tls_size == 0)
2249 return_val = TD_TLSDEFER;
2250 else
2251 *base = (psaddr_t)tls.tls_data;
2252 } else {
2253 #if defined(_LP64) && defined(_SYSCALL32)
2254 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2255 tls_metadata32_t tls_metadata;
2256 TLS_modinfo32 tlsmod;
2257 tls32_t tls;
2259 if (ps_pdread(ph_p,
2260 ta_p->uberdata_addr + offsetof(uberdata32_t, tls_metadata),
2261 &tls_metadata, sizeof (tls_metadata)) != PS_OK)
2262 return_val = TD_DBERR;
2263 else if (moduleid >= tls_metadata.tls_modinfo.tls_size)
2264 return_val = TD_NOTLS;
2265 else if (ps_pdread(ph_p,
2266 (psaddr_t)((TLS_modinfo32 *)
2267 (uintptr_t)tls_metadata.tls_modinfo.tls_data + moduleid),
2268 &tlsmod, sizeof (tlsmod)) != PS_OK)
2269 return_val = TD_DBERR;
2270 else if (tlsmod.tm_memsz == 0)
2271 return_val = TD_NOTLS;
2272 else if (tlsmod.tm_flags & TM_FLG_STATICTLS)
2273 *base = (psaddr_t)ulwp - tlsmod.tm_stattlsoffset;
2274 else if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_tls,
2275 &tls, sizeof (tls)) != PS_OK)
2276 return_val = TD_DBERR;
2277 else if (moduleid >= tls.tls_size)
2278 return_val = TD_TLSDEFER;
2279 else if (ps_pdread(ph_p,
2280 (psaddr_t)((tls32_t *)(uintptr_t)tls.tls_data + moduleid),
2281 &tls, sizeof (tls)) != PS_OK)
2282 return_val = TD_DBERR;
2283 else if (tls.tls_size == 0)
2284 return_val = TD_TLSDEFER;
2285 else
2286 *base = (psaddr_t)tls.tls_data;
2287 #else
2288 return_val = TD_ERR;
2289 #endif /* _SYSCALL32 */
2292 (void) ps_pcontinue(ph_p);
2293 ph_unlock(ta_p);
2294 return (return_val);
2298 * Change a thread's priority to the value specified by ti_pri.
2299 * Currently unused by dbx.
2301 #pragma weak td_thr_setprio = __td_thr_setprio
2302 /* ARGSUSED */
2303 td_err_e
2304 __td_thr_setprio(td_thrhandle_t *th_p, int ti_pri)
2306 return (TD_NOCAPAB);
2310 * This structure links td_thr_lockowner and the lowner_cb callback function.
2312 typedef struct {
2313 td_sync_iter_f *owner_cb;
2314 void *owner_cb_arg;
2315 td_thrhandle_t *th_p;
2316 } lowner_cb_ctl_t;
2318 static int
2319 lowner_cb(const td_synchandle_t *sh_p, void *arg)
2321 lowner_cb_ctl_t *ocb = arg;
2322 int trunc = 0;
2323 union {
2324 rwlock_t rwl;
2325 mutex_t mx;
2326 } rw_m;
2328 if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2329 &rw_m, sizeof (rw_m)) != PS_OK) {
2330 trunc = 1;
2331 if (ps_pdread(sh_p->sh_ta_p->ph_p, sh_p->sh_unique,
2332 &rw_m.mx, sizeof (rw_m.mx)) != PS_OK)
2333 return (0);
2335 if (rw_m.mx.mutex_magic == MUTEX_MAGIC &&
2336 rw_m.mx.mutex_owner == ocb->th_p->th_unique)
2337 return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2338 if (!trunc && rw_m.rwl.magic == RWL_MAGIC) {
2339 mutex_t *rwlock = &rw_m.rwl.mutex;
2340 if (rwlock->mutex_owner == ocb->th_p->th_unique)
2341 return ((ocb->owner_cb)(sh_p, ocb->owner_cb_arg));
2343 return (0);
2347 * Iterate over the set of locks owned by a specified thread.
2348 * If cb returns a non-zero value, terminate iterations.
2350 #pragma weak td_thr_lockowner = __td_thr_lockowner
2351 td_err_e
2352 __td_thr_lockowner(const td_thrhandle_t *th_p, td_sync_iter_f *cb,
2353 void *cb_data)
2355 td_thragent_t *ta_p;
2356 td_err_e return_val;
2357 lowner_cb_ctl_t lcb;
2360 * Just sanity checks.
2362 if (ph_lock_th((td_thrhandle_t *)th_p, &return_val) == NULL)
2363 return (return_val);
2364 ta_p = th_p->th_ta_p;
2365 ph_unlock(ta_p);
2367 lcb.owner_cb = cb;
2368 lcb.owner_cb_arg = cb_data;
2369 lcb.th_p = (td_thrhandle_t *)th_p;
2370 return (__td_ta_sync_iter(ta_p, lowner_cb, &lcb));
2374 * If a thread is asleep on a synchronization variable,
2375 * then get the synchronization handle.
2377 #pragma weak td_thr_sleepinfo = __td_thr_sleepinfo
2378 td_err_e
2379 __td_thr_sleepinfo(const td_thrhandle_t *th_p, td_synchandle_t *sh_p)
2381 struct ps_prochandle *ph_p;
2382 td_err_e return_val = TD_OK;
2383 uintptr_t wchan;
2385 if (sh_p == NULL)
2386 return (TD_ERR);
2387 if ((ph_p = ph_lock_th((td_thrhandle_t *)th_p, &return_val)) == NULL)
2388 return (return_val);
2391 * No need to stop the process for a simple read.
2393 if (th_p->th_ta_p->model == PR_MODEL_NATIVE) {
2394 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2396 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2397 &wchan, sizeof (wchan)) != PS_OK)
2398 return_val = TD_DBERR;
2399 } else {
2400 #if defined(_LP64) && defined(_SYSCALL32)
2401 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2402 caddr32_t wchan32;
2404 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2405 &wchan32, sizeof (wchan32)) != PS_OK)
2406 return_val = TD_DBERR;
2407 wchan = wchan32;
2408 #else
2409 return_val = TD_ERR;
2410 #endif /* _SYSCALL32 */
2413 if (return_val != TD_OK || wchan == 0) {
2414 sh_p->sh_ta_p = NULL;
2415 sh_p->sh_unique = 0;
2416 if (return_val == TD_OK)
2417 return_val = TD_ERR;
2418 } else {
2419 sh_p->sh_ta_p = th_p->th_ta_p;
2420 sh_p->sh_unique = (psaddr_t)wchan;
2423 ph_unlock(th_p->th_ta_p);
2424 return (return_val);
2428 * Which thread is running on an lwp?
2430 #pragma weak td_ta_map_lwp2thr = __td_ta_map_lwp2thr
2431 td_err_e
2432 __td_ta_map_lwp2thr(td_thragent_t *ta_p, lwpid_t lwpid,
2433 td_thrhandle_t *th_p)
2435 return (__td_ta_map_id2thr(ta_p, lwpid, th_p));
2439 * Common code for td_sync_get_info() and td_sync_get_stats()
2441 static td_err_e
2442 sync_get_info_common(const td_synchandle_t *sh_p, struct ps_prochandle *ph_p,
2443 td_syncinfo_t *si_p)
2445 int trunc = 0;
2446 td_so_un_t generic_so;
2449 * Determine the sync. object type; a little type fudgery here.
2450 * First attempt to read the whole union. If that fails, attempt
2451 * to read just the condvar. A condvar is the smallest sync. object.
2453 if (ps_pdread(ph_p, sh_p->sh_unique,
2454 &generic_so, sizeof (generic_so)) != PS_OK) {
2455 trunc = 1;
2456 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2457 sizeof (generic_so.condition)) != PS_OK)
2458 return (TD_DBERR);
2461 switch (generic_so.condition.cond_magic) {
2462 case MUTEX_MAGIC:
2463 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2464 &generic_so.lock, sizeof (generic_so.lock)) != PS_OK)
2465 return (TD_DBERR);
2466 si_p->si_type = TD_SYNC_MUTEX;
2467 si_p->si_shared_type =
2468 (generic_so.lock.mutex_type & USYNC_PROCESS);
2469 (void) memcpy(si_p->si_flags, &generic_so.lock.mutex_flag,
2470 sizeof (generic_so.lock.mutex_flag));
2471 si_p->si_state.mutex_locked =
2472 (generic_so.lock.mutex_lockw != 0);
2473 si_p->si_size = sizeof (generic_so.lock);
2474 si_p->si_has_waiters = generic_so.lock.mutex_waiters;
2475 si_p->si_rcount = generic_so.lock.mutex_rcount;
2476 si_p->si_prioceiling = generic_so.lock.mutex_ceiling;
2477 if (si_p->si_state.mutex_locked) {
2478 if (si_p->si_shared_type & USYNC_PROCESS)
2479 si_p->si_ownerpid =
2480 generic_so.lock.mutex_ownerpid;
2481 si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2482 si_p->si_owner.th_unique = generic_so.lock.mutex_owner;
2484 break;
2485 case COND_MAGIC:
2486 si_p->si_type = TD_SYNC_COND;
2487 si_p->si_shared_type =
2488 (generic_so.condition.cond_type & USYNC_PROCESS);
2489 (void) memcpy(si_p->si_flags, generic_so.condition.flags.flag,
2490 sizeof (generic_so.condition.flags.flag));
2491 si_p->si_size = sizeof (generic_so.condition);
2492 si_p->si_has_waiters =
2493 (generic_so.condition.cond_waiters_user |
2494 generic_so.condition.cond_waiters_kernel)? 1 : 0;
2495 break;
2496 case SEMA_MAGIC:
2497 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2498 &generic_so.semaphore, sizeof (generic_so.semaphore))
2499 != PS_OK)
2500 return (TD_DBERR);
2501 si_p->si_type = TD_SYNC_SEMA;
2502 si_p->si_shared_type =
2503 (generic_so.semaphore.type & USYNC_PROCESS);
2504 si_p->si_state.sem_count = generic_so.semaphore.count;
2505 si_p->si_size = sizeof (generic_so.semaphore);
2506 si_p->si_has_waiters =
2507 ((lwp_sema_t *)&generic_so.semaphore)->flags[7];
2508 /* this is useless but the old interface provided it */
2509 si_p->si_data = (psaddr_t)generic_so.semaphore.count;
2510 break;
2511 case RWL_MAGIC:
2513 uint32_t rwstate;
2515 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2516 &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK)
2517 return (TD_DBERR);
2518 si_p->si_type = TD_SYNC_RWLOCK;
2519 si_p->si_shared_type =
2520 (generic_so.rwlock.rwlock_type & USYNC_PROCESS);
2521 si_p->si_size = sizeof (generic_so.rwlock);
2523 rwstate = (uint32_t)generic_so.rwlock.rwlock_readers;
2524 if (rwstate & URW_WRITE_LOCKED) {
2525 si_p->si_state.nreaders = -1;
2526 si_p->si_is_wlock = 1;
2527 si_p->si_owner.th_ta_p = sh_p->sh_ta_p;
2528 si_p->si_owner.th_unique =
2529 generic_so.rwlock.rwlock_owner;
2530 if (si_p->si_shared_type & USYNC_PROCESS)
2531 si_p->si_ownerpid =
2532 generic_so.rwlock.rwlock_ownerpid;
2533 } else {
2534 si_p->si_state.nreaders = (rwstate & URW_READERS_MASK);
2536 si_p->si_has_waiters = ((rwstate & URW_HAS_WAITERS) != 0);
2538 /* this is useless but the old interface provided it */
2539 si_p->si_data = (psaddr_t)generic_so.rwlock.readers;
2540 break;
2542 default:
2543 return (TD_BADSH);
2546 si_p->si_ta_p = sh_p->sh_ta_p;
2547 si_p->si_sv_addr = sh_p->sh_unique;
2548 return (TD_OK);
2552 * Given a synchronization handle, fill in the
2553 * information for the synchronization variable into *si_p.
2555 #pragma weak td_sync_get_info = __td_sync_get_info
2556 td_err_e
2557 __td_sync_get_info(const td_synchandle_t *sh_p, td_syncinfo_t *si_p)
2559 struct ps_prochandle *ph_p;
2560 td_err_e return_val;
2562 if (si_p == NULL)
2563 return (TD_ERR);
2564 (void) memset(si_p, 0, sizeof (*si_p));
2565 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2566 return (return_val);
2567 if (ps_pstop(ph_p) != PS_OK) {
2568 ph_unlock(sh_p->sh_ta_p);
2569 return (TD_DBERR);
2572 return_val = sync_get_info_common(sh_p, ph_p, si_p);
2574 (void) ps_pcontinue(ph_p);
2575 ph_unlock(sh_p->sh_ta_p);
2576 return (return_val);
2579 static uint_t
2580 tdb_addr_hash64(uint64_t addr)
2582 uint64_t value60 = (addr >> 4);
2583 uint32_t value30 = (value60 >> 30) ^ (value60 & 0x3fffffff);
2584 return ((value30 >> 15) ^ (value30 & 0x7fff));
2587 static uint_t
2588 tdb_addr_hash32(uint64_t addr)
2590 uint32_t value30 = (addr >> 2); /* 30 bits */
2591 return ((value30 >> 15) ^ (value30 & 0x7fff));
2594 static td_err_e
2595 read_sync_stats(td_thragent_t *ta_p, psaddr_t hash_table,
2596 psaddr_t sync_obj_addr, tdb_sync_stats_t *sync_stats)
2598 psaddr_t next_desc;
2599 uint64_t first;
2600 uint_t ix;
2603 * Compute the hash table index from the synch object's address.
2605 if (ta_p->model == PR_MODEL_LP64)
2606 ix = tdb_addr_hash64(sync_obj_addr);
2607 else
2608 ix = tdb_addr_hash32(sync_obj_addr);
2611 * Get the address of the first element in the linked list.
2613 if (ps_pdread(ta_p->ph_p, hash_table + ix * sizeof (uint64_t),
2614 &first, sizeof (first)) != PS_OK)
2615 return (TD_DBERR);
2618 * Search the linked list for an entry for the synch object..
2620 for (next_desc = (psaddr_t)first; next_desc != 0;
2621 next_desc = (psaddr_t)sync_stats->next) {
2622 if (ps_pdread(ta_p->ph_p, next_desc,
2623 sync_stats, sizeof (*sync_stats)) != PS_OK)
2624 return (TD_DBERR);
2625 if (sync_stats->sync_addr == sync_obj_addr)
2626 return (TD_OK);
2629 (void) memset(sync_stats, 0, sizeof (*sync_stats));
2630 return (TD_OK);
2634 * Given a synchronization handle, fill in the
2635 * statistics for the synchronization variable into *ss_p.
2637 #pragma weak td_sync_get_stats = __td_sync_get_stats
2638 td_err_e
2639 __td_sync_get_stats(const td_synchandle_t *sh_p, td_syncstats_t *ss_p)
2641 struct ps_prochandle *ph_p;
2642 td_thragent_t *ta_p;
2643 td_err_e return_val;
2644 register_sync_t enable;
2645 psaddr_t hashaddr;
2646 tdb_sync_stats_t sync_stats;
2647 size_t ix;
2649 if (ss_p == NULL)
2650 return (TD_ERR);
2651 (void) memset(ss_p, 0, sizeof (*ss_p));
2652 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2653 return (return_val);
2654 ta_p = sh_p->sh_ta_p;
2655 if (ps_pstop(ph_p) != PS_OK) {
2656 ph_unlock(ta_p);
2657 return (TD_DBERR);
2660 if ((return_val = sync_get_info_common(sh_p, ph_p, &ss_p->ss_info))
2661 != TD_OK) {
2662 if (return_val != TD_BADSH)
2663 goto out;
2664 /* we can correct TD_BADSH */
2665 (void) memset(&ss_p->ss_info, 0, sizeof (ss_p->ss_info));
2666 ss_p->ss_info.si_ta_p = sh_p->sh_ta_p;
2667 ss_p->ss_info.si_sv_addr = sh_p->sh_unique;
2668 /* we correct si_type and si_size below */
2669 return_val = TD_OK;
2671 if (ps_pdread(ph_p, ta_p->tdb_register_sync_addr,
2672 &enable, sizeof (enable)) != PS_OK) {
2673 return_val = TD_DBERR;
2674 goto out;
2676 if (enable != REGISTER_SYNC_ON)
2677 goto out;
2680 * Get the address of the hash table in the target process.
2682 if (ta_p->model == PR_MODEL_NATIVE) {
2683 if (ps_pdread(ph_p, ta_p->uberdata_addr +
2684 offsetof(uberdata_t, tdb.tdb_sync_addr_hash),
2685 &hashaddr, sizeof (&hashaddr)) != PS_OK) {
2686 return_val = TD_DBERR;
2687 goto out;
2689 } else {
2690 #if defined(_LP64) && defined(_SYSCALL32)
2691 caddr32_t addr;
2693 if (ps_pdread(ph_p, ta_p->uberdata_addr +
2694 offsetof(uberdata32_t, tdb.tdb_sync_addr_hash),
2695 &addr, sizeof (addr)) != PS_OK) {
2696 return_val = TD_DBERR;
2697 goto out;
2699 hashaddr = addr;
2700 #else
2701 return_val = TD_ERR;
2702 goto out;
2703 #endif /* _SYSCALL32 */
2706 if (hashaddr == 0)
2707 return_val = TD_BADSH;
2708 else
2709 return_val = read_sync_stats(ta_p, hashaddr,
2710 sh_p->sh_unique, &sync_stats);
2711 if (return_val != TD_OK)
2712 goto out;
2715 * We have the hash table entry. Transfer the data to
2716 * the td_syncstats_t structure provided by the caller.
2718 switch (sync_stats.un.type) {
2719 case TDB_MUTEX:
2721 td_mutex_stats_t *msp = &ss_p->ss_un.mutex;
2723 ss_p->ss_info.si_type = TD_SYNC_MUTEX;
2724 ss_p->ss_info.si_size = sizeof (mutex_t);
2725 msp->mutex_lock =
2726 sync_stats.un.mutex.mutex_lock;
2727 msp->mutex_sleep =
2728 sync_stats.un.mutex.mutex_sleep;
2729 msp->mutex_sleep_time =
2730 sync_stats.un.mutex.mutex_sleep_time;
2731 msp->mutex_hold_time =
2732 sync_stats.un.mutex.mutex_hold_time;
2733 msp->mutex_try =
2734 sync_stats.un.mutex.mutex_try;
2735 msp->mutex_try_fail =
2736 sync_stats.un.mutex.mutex_try_fail;
2737 if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2738 (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2739 < ta_p->hash_size * sizeof (thr_hash_table_t))
2740 msp->mutex_internal =
2741 ix / sizeof (thr_hash_table_t) + 1;
2742 break;
2744 case TDB_COND:
2746 td_cond_stats_t *csp = &ss_p->ss_un.cond;
2748 ss_p->ss_info.si_type = TD_SYNC_COND;
2749 ss_p->ss_info.si_size = sizeof (cond_t);
2750 csp->cond_wait =
2751 sync_stats.un.cond.cond_wait;
2752 csp->cond_timedwait =
2753 sync_stats.un.cond.cond_timedwait;
2754 csp->cond_wait_sleep_time =
2755 sync_stats.un.cond.cond_wait_sleep_time;
2756 csp->cond_timedwait_sleep_time =
2757 sync_stats.un.cond.cond_timedwait_sleep_time;
2758 csp->cond_timedwait_timeout =
2759 sync_stats.un.cond.cond_timedwait_timeout;
2760 csp->cond_signal =
2761 sync_stats.un.cond.cond_signal;
2762 csp->cond_broadcast =
2763 sync_stats.un.cond.cond_broadcast;
2764 if (sync_stats.sync_addr >= ta_p->hash_table_addr &&
2765 (ix = sync_stats.sync_addr - ta_p->hash_table_addr)
2766 < ta_p->hash_size * sizeof (thr_hash_table_t))
2767 csp->cond_internal =
2768 ix / sizeof (thr_hash_table_t) + 1;
2769 break;
2771 case TDB_RWLOCK:
2773 td_rwlock_stats_t *rwsp = &ss_p->ss_un.rwlock;
2775 ss_p->ss_info.si_type = TD_SYNC_RWLOCK;
2776 ss_p->ss_info.si_size = sizeof (rwlock_t);
2777 rwsp->rw_rdlock =
2778 sync_stats.un.rwlock.rw_rdlock;
2779 rwsp->rw_rdlock_try =
2780 sync_stats.un.rwlock.rw_rdlock_try;
2781 rwsp->rw_rdlock_try_fail =
2782 sync_stats.un.rwlock.rw_rdlock_try_fail;
2783 rwsp->rw_wrlock =
2784 sync_stats.un.rwlock.rw_wrlock;
2785 rwsp->rw_wrlock_hold_time =
2786 sync_stats.un.rwlock.rw_wrlock_hold_time;
2787 rwsp->rw_wrlock_try =
2788 sync_stats.un.rwlock.rw_wrlock_try;
2789 rwsp->rw_wrlock_try_fail =
2790 sync_stats.un.rwlock.rw_wrlock_try_fail;
2791 break;
2793 case TDB_SEMA:
2795 td_sema_stats_t *ssp = &ss_p->ss_un.sema;
2797 ss_p->ss_info.si_type = TD_SYNC_SEMA;
2798 ss_p->ss_info.si_size = sizeof (sema_t);
2799 ssp->sema_wait =
2800 sync_stats.un.sema.sema_wait;
2801 ssp->sema_wait_sleep =
2802 sync_stats.un.sema.sema_wait_sleep;
2803 ssp->sema_wait_sleep_time =
2804 sync_stats.un.sema.sema_wait_sleep_time;
2805 ssp->sema_trywait =
2806 sync_stats.un.sema.sema_trywait;
2807 ssp->sema_trywait_fail =
2808 sync_stats.un.sema.sema_trywait_fail;
2809 ssp->sema_post =
2810 sync_stats.un.sema.sema_post;
2811 ssp->sema_max_count =
2812 sync_stats.un.sema.sema_max_count;
2813 ssp->sema_min_count =
2814 sync_stats.un.sema.sema_min_count;
2815 break;
2817 default:
2818 return_val = TD_BADSH;
2819 break;
2822 out:
2823 (void) ps_pcontinue(ph_p);
2824 ph_unlock(ta_p);
2825 return (return_val);
2829 * Change the state of a synchronization variable.
2830 * 1) mutex lock state set to value
2831 * 2) semaphore's count set to value
2832 * 3) writer's lock set by value < 0
2833 * 4) reader's lock number of readers set to value >= 0
2834 * Currently unused by dbx.
2836 #pragma weak td_sync_setstate = __td_sync_setstate
2837 td_err_e
2838 __td_sync_setstate(const td_synchandle_t *sh_p, long lvalue)
2840 struct ps_prochandle *ph_p;
2841 int trunc = 0;
2842 td_err_e return_val;
2843 td_so_un_t generic_so;
2844 uint32_t *rwstate;
2845 int value = (int)lvalue;
2847 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2848 return (return_val);
2849 if (ps_pstop(ph_p) != PS_OK) {
2850 ph_unlock(sh_p->sh_ta_p);
2851 return (TD_DBERR);
2855 * Read the synch. variable information.
2856 * First attempt to read the whole union and if that fails
2857 * fall back to reading only the smallest member, the condvar.
2859 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so,
2860 sizeof (generic_so)) != PS_OK) {
2861 trunc = 1;
2862 if (ps_pdread(ph_p, sh_p->sh_unique, &generic_so.condition,
2863 sizeof (generic_so.condition)) != PS_OK) {
2864 (void) ps_pcontinue(ph_p);
2865 ph_unlock(sh_p->sh_ta_p);
2866 return (TD_DBERR);
2871 * Set the new value in the sync. variable, read the synch. variable
2872 * information. from the process, reset its value and write it back.
2874 switch (generic_so.condition.mutex_magic) {
2875 case MUTEX_MAGIC:
2876 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2877 &generic_so.lock, sizeof (generic_so.lock)) != PS_OK) {
2878 return_val = TD_DBERR;
2879 break;
2881 generic_so.lock.mutex_lockw = (uint8_t)value;
2882 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.lock,
2883 sizeof (generic_so.lock)) != PS_OK)
2884 return_val = TD_DBERR;
2885 break;
2886 case SEMA_MAGIC:
2887 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2888 &generic_so.semaphore, sizeof (generic_so.semaphore))
2889 != PS_OK) {
2890 return_val = TD_DBERR;
2891 break;
2893 generic_so.semaphore.count = value;
2894 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.semaphore,
2895 sizeof (generic_so.semaphore)) != PS_OK)
2896 return_val = TD_DBERR;
2897 break;
2898 case COND_MAGIC:
2899 /* Operation not supported on a condition variable */
2900 return_val = TD_ERR;
2901 break;
2902 case RWL_MAGIC:
2903 if (trunc && ps_pdread(ph_p, sh_p->sh_unique,
2904 &generic_so.rwlock, sizeof (generic_so.rwlock)) != PS_OK) {
2905 return_val = TD_DBERR;
2906 break;
2908 rwstate = (uint32_t *)&generic_so.rwlock.readers;
2909 *rwstate &= URW_HAS_WAITERS;
2910 if (value < 0)
2911 *rwstate |= URW_WRITE_LOCKED;
2912 else
2913 *rwstate |= (value & URW_READERS_MASK);
2914 if (ps_pdwrite(ph_p, sh_p->sh_unique, &generic_so.rwlock,
2915 sizeof (generic_so.rwlock)) != PS_OK)
2916 return_val = TD_DBERR;
2917 break;
2918 default:
2919 /* Bad sync. object type */
2920 return_val = TD_BADSH;
2921 break;
2924 (void) ps_pcontinue(ph_p);
2925 ph_unlock(sh_p->sh_ta_p);
2926 return (return_val);
2929 typedef struct {
2930 td_thr_iter_f *waiter_cb;
2931 psaddr_t sync_obj_addr;
2932 uint16_t sync_magic;
2933 void *waiter_cb_arg;
2934 td_err_e errcode;
2935 } waiter_cb_ctl_t;
2937 static int
2938 waiters_cb(const td_thrhandle_t *th_p, void *arg)
2940 td_thragent_t *ta_p = th_p->th_ta_p;
2941 struct ps_prochandle *ph_p = ta_p->ph_p;
2942 waiter_cb_ctl_t *wcb = arg;
2943 caddr_t wchan;
2945 if (ta_p->model == PR_MODEL_NATIVE) {
2946 ulwp_t *ulwp = (ulwp_t *)th_p->th_unique;
2948 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2949 &wchan, sizeof (wchan)) != PS_OK) {
2950 wcb->errcode = TD_DBERR;
2951 return (1);
2953 } else {
2954 #if defined(_LP64) && defined(_SYSCALL32)
2955 ulwp32_t *ulwp = (ulwp32_t *)th_p->th_unique;
2956 caddr32_t wchan32;
2958 if (ps_pdread(ph_p, (psaddr_t)&ulwp->ul_wchan,
2959 &wchan32, sizeof (wchan32)) != PS_OK) {
2960 wcb->errcode = TD_DBERR;
2961 return (1);
2963 wchan = (caddr_t)(uintptr_t)wchan32;
2964 #else
2965 wcb->errcode = TD_ERR;
2966 return (1);
2967 #endif /* _SYSCALL32 */
2970 if (wchan == NULL)
2971 return (0);
2973 if (wchan == (caddr_t)wcb->sync_obj_addr)
2974 return ((*wcb->waiter_cb)(th_p, wcb->waiter_cb_arg));
2976 return (0);
2980 * For a given synchronization variable, iterate over the
2981 * set of waiting threads. The call back function is passed
2982 * two parameters, a pointer to a thread handle and a pointer
2983 * to extra call back data.
2985 #pragma weak td_sync_waiters = __td_sync_waiters
2986 td_err_e
2987 __td_sync_waiters(const td_synchandle_t *sh_p, td_thr_iter_f *cb, void *cb_data)
2989 struct ps_prochandle *ph_p;
2990 waiter_cb_ctl_t wcb;
2991 td_err_e return_val;
2993 if ((ph_p = ph_lock_sh(sh_p, &return_val)) == NULL)
2994 return (return_val);
2995 if (ps_pdread(ph_p,
2996 (psaddr_t)&((mutex_t *)sh_p->sh_unique)->mutex_magic,
2997 (caddr_t)&wcb.sync_magic, sizeof (wcb.sync_magic)) != PS_OK) {
2998 ph_unlock(sh_p->sh_ta_p);
2999 return (TD_DBERR);
3001 ph_unlock(sh_p->sh_ta_p);
3003 switch (wcb.sync_magic) {
3004 case MUTEX_MAGIC:
3005 case COND_MAGIC:
3006 case SEMA_MAGIC:
3007 case RWL_MAGIC:
3008 break;
3009 default:
3010 return (TD_BADSH);
3013 wcb.waiter_cb = cb;
3014 wcb.sync_obj_addr = sh_p->sh_unique;
3015 wcb.waiter_cb_arg = cb_data;
3016 wcb.errcode = TD_OK;
3017 return_val = __td_ta_thr_iter(sh_p->sh_ta_p, waiters_cb, &wcb,
3018 TD_THR_SLEEP, TD_THR_LOWEST_PRIORITY,
3019 TD_SIGNO_MASK, TD_THR_ANY_USER_FLAGS);
3021 if (return_val != TD_OK)
3022 return (return_val);
3024 return (wcb.errcode);