4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
22 * Copyright 2009 Sun Microsystems, Inc. All rights reserved.
23 * Use is subject to license terms.
25 * Copyright 2018 Joyent, Inc.
31 * The *target* is the program being inspected by the debugger. The MDB target
32 * layer provides a set of functions that insulate common debugger code,
33 * including the MDB Module API, from the implementation details of how the
34 * debugger accesses information from a given target. Each target exports a
35 * standard set of properties, including one or more address spaces, one or
36 * more symbol tables, a set of load objects, and a set of threads that can be
37 * examined using the interfaces in <mdb/mdb_target.h>. This technique has
38 * been employed successfully in other debuggers, including [1], primarily
39 * to improve portability, although the term "target" often refers to the
40 * encapsulation of architectural or operating system-specific details. The
41 * target abstraction is useful for MDB because it allows us to easily extend
42 * the debugger to examine a variety of different program forms. Primarily,
43 * the target functions validate input arguments and then call an appropriate
44 * function in the target ops vector, defined in <mdb/mdb_target_impl.h>.
45 * However, this interface layer provides a very high level of flexibility for
46 * separating the debugger interface from instrumentation details. Experience
47 * has shown this kind of design can facilitate separating out debugger
48 * instrumentation into an external agent [2] and enable the development of
49 * advanced instrumentation frameworks [3]. We want MDB to be an ideal
50 * extensible framework for the development of such applications.
52 * Aside from a set of wrapper functions, the target layer also provides event
53 * management for targets that represent live executing programs. Our model of
54 * events is also extensible, and is based upon work in [3] and [4]. We define
55 * a *software event* as a state transition in the target program (for example,
56 * the transition of the program counter to a location of interest) that is
57 * observed by the debugger or its agent. A *software event specifier* is a
58 * description of a class of software events that is used by the debugger to
59 * instrument the target so that the corresponding software events can be
60 * observed. In MDB, software event specifiers are represented by the
61 * mdb_sespec_t structure, defined in <mdb/mdb_target_impl.h>. As the user,
62 * the internal debugger code, and MDB modules may all wish to observe software
63 * events and receive appropriate notification and callbacks, we do not expose
64 * software event specifiers directly as part of the user interface. Instead,
65 * clients of the target layer request that events be observed by creating
66 * new *virtual event specifiers*. Each virtual specifier is named by a unique
67 * non-zero integer (the VID), and is represented by a mdb_vespec_t structure.
68 * One or more virtual specifiers are then associated with each underlying
69 * software event specifier. This design enforces the constraint that the
70 * target must only insert one set of instrumentation, regardless of how many
71 * times the target layer was asked to trace a given event. For example, if
72 * multiple clients request a breakpoint at a particular address, the virtual
73 * specifiers will map to the same sespec, ensuring that only one breakpoint
74 * trap instruction is actually planted at the given target address. When no
75 * virtual specifiers refer to an sespec, it is no longer needed and can be
76 * removed, along with the corresponding instrumentation.
78 * The following state transition diagram illustrates the life cycle of a
79 * software event specifier and example transitions:
82 * +--------+ delete +--------+ stop +-------+
83 * (|( DEAD )|) <------- ( ACTIVE ) <------> ( ARMED )
84 * +--------+ +--------+ +-------+
85 * ^ load/unload ^ ^ failure/ |
86 * delete | object / \ reset | failure
88 * | +--------+ +-------+ |
89 * +---- ( IDLE ) ( ERR ) <----+
90 * | +--------+ +-------+
92 * +------------------------------+
94 * The MDB execution control model is based upon the synchronous debugging
95 * model exported by Solaris proc(4). A target program is set running or the
96 * debugger is attached to a running target. On ISTOP (stop on event of
97 * interest), one target thread is selected as the representative. The
98 * algorithm for selecting the representative is target-specific, but we assume
99 * that if an observed software event has occurred, the target will select the
100 * thread that triggered the state transition of interest. The other threads
101 * are stopped in sympathy with the representative as soon as possible. Prior
102 * to continuing the target, we plant our instrumentation, transitioning event
103 * specifiers from the ACTIVE to the ARMED state, and then back again when the
104 * target stops. We then query each active event specifier to learn which ones
105 * are matched, and then invoke the callbacks associated with their vespecs.
106 * If an OS error occurs while attempting to arm or disarm a specifier, the
107 * specifier is transitioned to the ERROR state; we will attempt to arm it
108 * again at the next continue. If no target process is under our control or
109 * if an event is not currently applicable (e.g. a deferred breakpoint on an
110 * object that is not yet loaded), it remains in the IDLE state. The target
111 * implementation should intercept object load events and then transition the
112 * specifier to the ACTIVE state when the corresponding object is loaded.
114 * To simplify the debugger implementation and allow targets to easily provide
115 * new types of observable events, most of the event specifier management is
116 * done by the target layer. Each software event specifier provides an ops
117 * vector of subroutines that the target layer can call to perform the
118 * various state transitions described above. The target maintains two lists
119 * of mdb_sespec_t's: the t_idle list (IDLE state) and the t_active list
120 * (ACTIVE, ARMED, and ERROR states). Each mdb_sespec_t maintains a list of
121 * associated mdb_vespec_t's. If an sespec is IDLE or ERROR, its se_errno
122 * field will have an errno value specifying the reason for its inactivity.
123 * The vespec stores the client's callback function and private data, and the
124 * arguments used to construct the sespec. All objects are reference counted
125 * so we can destroy an object when it is no longer needed. The mdb_sespec_t
126 * invariants for the respective states are as follows:
128 * IDLE: on t_idle list, se_data == NULL, se_errno != 0, se_ctor not called
129 * ACTIVE: on t_active list, se_data valid, se_errno == 0, se_ctor called
130 * ARMED: on t_active list, se_data valid, se_errno == 0, se_ctor called
131 * ERROR: on t_active list, se_data valid, se_errno != 0, se_ctor called
133 * Additional commentary on specific state transitions and issues involving
134 * event management can be found below near the target layer functions.
138 * [1] John Gilmore, "Working in GDB", Technical Report, Cygnus Support,
139 * 1.84 edition, 1994.
141 * [2] David R. Hanson and Mukund Raghavachari, "A Machine-Independent
142 * Debugger", Software--Practice and Experience, 26(11), 1277-1299(1996).
144 * [3] Michael W. Shapiro, "RDB: A System for Incremental Replay Debugging",
145 * Technical Report CS-97-12, Department of Computer Science,
148 * [4] Daniel B. Price, "New Techniques for Replay Debugging", Technical
149 * Report CS-98-05, Department of Computer Science, Brown University.
152 #include <mdb/mdb_target_impl.h>
153 #include <mdb/mdb_debug.h>
154 #include <mdb/mdb_modapi.h>
155 #include <mdb/mdb_err.h>
156 #include <mdb/mdb_callb.h>
157 #include <mdb/mdb_gelf.h>
158 #include <mdb/mdb_io_impl.h>
159 #include <mdb/mdb_string.h>
160 #include <mdb/mdb_signal.h>
161 #include <mdb/mdb_frame.h>
164 #include <sys/stat.h>
165 #include <sys/param.h>
166 #include <sys/signal.h>
172 * Define convenience macros for referencing the set of vespec flag bits that
173 * are preserved by the target implementation, and the set of bits that
174 * determine automatic ve_hits == ve_limit behavior.
176 #define T_IMPL_BITS \
177 (MDB_TGT_SPEC_INTERNAL | MDB_TGT_SPEC_SILENT | MDB_TGT_SPEC_MATCHED | \
178 MDB_TGT_SPEC_DELETED)
180 #define T_AUTO_BITS \
181 (MDB_TGT_SPEC_AUTOSTOP | MDB_TGT_SPEC_AUTODEL | MDB_TGT_SPEC_AUTODIS)
184 * Define convenience macro for referencing target flag pending continue bits.
186 #define T_CONT_BITS \
187 (MDB_TGT_F_STEP | MDB_TGT_F_STEP_OUT | MDB_TGT_F_NEXT | MDB_TGT_F_CONT)
190 mdb_tgt_create(mdb_tgt_ctor_f
*ctor
, int flags
, int argc
, const char *argv
[])
195 if (flags
& ~MDB_TGT_F_ALL
) {
196 (void) set_errno(EINVAL
);
200 t
= mdb_zalloc(sizeof (mdb_tgt_t
), UM_SLEEP
);
201 mdb_list_append(&mdb
.m_tgtlist
, t
);
203 t
->t_module
= &mdb
.m_rmod
;
204 t
->t_matched
= T_SE_END
;
209 for (mp
= mdb
.m_mhead
; mp
!= NULL
; mp
= mp
->mod_next
) {
210 if (ctor
== mp
->mod_tgt_ctor
) {
216 if (ctor(t
, argc
, argv
) != 0) {
217 mdb_list_delete(&mdb
.m_tgtlist
, t
);
218 mdb_free(t
, sizeof (mdb_tgt_t
));
222 mdb_dprintf(MDB_DBG_TGT
, "t_create %s (%p)\n",
223 t
->t_module
->mod_name
, (void *)t
);
225 (void) t
->t_ops
->t_status(t
, &t
->t_status
);
230 mdb_tgt_getflags(mdb_tgt_t
*t
)
236 mdb_tgt_setflags(mdb_tgt_t
*t
, int flags
)
238 if (flags
& ~MDB_TGT_F_ALL
)
239 return (set_errno(EINVAL
));
241 return (t
->t_ops
->t_setflags(t
, flags
));
245 mdb_tgt_setcontext(mdb_tgt_t
*t
, void *context
)
247 return (t
->t_ops
->t_setcontext(t
, context
));
252 tgt_delete_vespec(mdb_tgt_t
*t
, void *private, int vid
, void *data
)
254 (void) mdb_tgt_vespec_delete(t
, vid
);
259 mdb_tgt_destroy(mdb_tgt_t
*t
)
261 mdb_xdata_t
*xdp
, *nxdp
;
263 if (mdb
.m_target
== t
) {
264 mdb_dprintf(MDB_DBG_TGT
, "t_deactivate %s (%p)\n",
265 t
->t_module
->mod_name
, (void *)t
);
266 t
->t_ops
->t_deactivate(t
);
270 mdb_dprintf(MDB_DBG_TGT
, "t_destroy %s (%p)\n",
271 t
->t_module
->mod_name
, (void *)t
);
273 for (xdp
= mdb_list_next(&t
->t_xdlist
); xdp
!= NULL
; xdp
= nxdp
) {
274 nxdp
= mdb_list_next(xdp
);
275 mdb_list_delete(&t
->t_xdlist
, xdp
);
276 mdb_free(xdp
, sizeof (mdb_xdata_t
));
279 mdb_tgt_sespec_idle_all(t
, EBUSY
, TRUE
);
280 (void) mdb_tgt_vespec_iter(t
, tgt_delete_vespec
, NULL
);
281 t
->t_ops
->t_destroy(t
);
283 mdb_list_delete(&mdb
.m_tgtlist
, t
);
284 mdb_free(t
, sizeof (mdb_tgt_t
));
286 if (mdb
.m_target
== NULL
)
287 mdb_tgt_activate(mdb_list_prev(&mdb
.m_tgtlist
));
291 mdb_tgt_activate(mdb_tgt_t
*t
)
293 mdb_tgt_t
*otgt
= mdb
.m_target
;
295 if (mdb
.m_target
!= NULL
) {
296 mdb_dprintf(MDB_DBG_TGT
, "t_deactivate %s (%p)\n",
297 mdb
.m_target
->t_module
->mod_name
, (void *)mdb
.m_target
);
298 mdb
.m_target
->t_ops
->t_deactivate(mdb
.m_target
);
301 if ((mdb
.m_target
= t
) != NULL
) {
302 const char *v
= strstr(mdb
.m_root
, "%V");
304 mdb_dprintf(MDB_DBG_TGT
, "t_activate %s (%p)\n",
305 t
->t_module
->mod_name
, (void *)t
);
308 * If the root was explicitly set with -R and contains %V,
309 * expand it like a path. If the resulting directory is
310 * not present, then replace %V with "latest" and re-evaluate.
313 char old_root
[MAXPATHLEN
];
320 p
= mdb_path_alloc(mdb
.m_root
, &len
);
321 (void) strcpy(old_root
, mdb
.m_root
);
322 (void) strncpy(mdb
.m_root
, p
[0], MAXPATHLEN
);
323 mdb
.m_root
[MAXPATHLEN
- 1] = '\0';
324 mdb_path_free(p
, len
);
327 if (stat(mdb
.m_root
, &s
) == -1 && errno
== ENOENT
) {
328 mdb
.m_flags
|= MDB_FL_LATEST
;
329 p
= mdb_path_alloc(old_root
, &len
);
330 (void) strncpy(mdb
.m_root
, p
[0], MAXPATHLEN
);
331 mdb
.m_root
[MAXPATHLEN
- 1] = '\0';
332 mdb_path_free(p
, len
);
338 * Re-evaluate the macro and dmod paths now that we have the
339 * new target set and m_root figured out.
342 mdb_set_ipath(mdb
.m_ipathstr
);
343 mdb_set_lpath(mdb
.m_lpathstr
);
346 t
->t_ops
->t_activate(t
);
351 mdb_tgt_periodic(mdb_tgt_t
*t
)
353 t
->t_ops
->t_periodic(t
);
357 mdb_tgt_name(mdb_tgt_t
*t
)
359 return (t
->t_ops
->t_name(t
));
363 mdb_tgt_isa(mdb_tgt_t
*t
)
365 return (t
->t_ops
->t_isa(t
));
369 mdb_tgt_platform(mdb_tgt_t
*t
)
371 return (t
->t_ops
->t_platform(t
));
375 mdb_tgt_uname(mdb_tgt_t
*t
, struct utsname
*utsp
)
377 return (t
->t_ops
->t_uname(t
, utsp
));
381 mdb_tgt_dmodel(mdb_tgt_t
*t
)
383 return (t
->t_ops
->t_dmodel(t
));
387 mdb_tgt_auxv(mdb_tgt_t
*t
, const auxv_t
**auxvp
)
389 return (t
->t_ops
->t_auxv(t
, auxvp
));
393 mdb_tgt_aread(mdb_tgt_t
*t
, mdb_tgt_as_t as
,
394 void *buf
, size_t n
, mdb_tgt_addr_t addr
)
396 if (t
->t_flags
& MDB_TGT_F_ASIO
)
397 return (t
->t_ops
->t_aread(t
, as
, buf
, n
, addr
));
399 switch ((uintptr_t)as
) {
400 case (uintptr_t)MDB_TGT_AS_VIRT
:
401 return (t
->t_ops
->t_vread(t
, buf
, n
, addr
));
402 case (uintptr_t)MDB_TGT_AS_PHYS
:
403 return (t
->t_ops
->t_pread(t
, buf
, n
, addr
));
404 case (uintptr_t)MDB_TGT_AS_FILE
:
405 return (t
->t_ops
->t_fread(t
, buf
, n
, addr
));
406 case (uintptr_t)MDB_TGT_AS_IO
:
407 return (t
->t_ops
->t_ioread(t
, buf
, n
, addr
));
409 return (t
->t_ops
->t_aread(t
, as
, buf
, n
, addr
));
413 mdb_tgt_awrite(mdb_tgt_t
*t
, mdb_tgt_as_t as
,
414 const void *buf
, size_t n
, mdb_tgt_addr_t addr
)
416 if (!(t
->t_flags
& MDB_TGT_F_RDWR
))
417 return (set_errno(EMDB_TGTRDONLY
));
419 if (t
->t_flags
& MDB_TGT_F_ASIO
)
420 return (t
->t_ops
->t_awrite(t
, as
, buf
, n
, addr
));
422 switch ((uintptr_t)as
) {
423 case (uintptr_t)MDB_TGT_AS_VIRT
:
424 return (t
->t_ops
->t_vwrite(t
, buf
, n
, addr
));
425 case (uintptr_t)MDB_TGT_AS_PHYS
:
426 return (t
->t_ops
->t_pwrite(t
, buf
, n
, addr
));
427 case (uintptr_t)MDB_TGT_AS_FILE
:
428 return (t
->t_ops
->t_fwrite(t
, buf
, n
, addr
));
429 case (uintptr_t)MDB_TGT_AS_IO
:
430 return (t
->t_ops
->t_iowrite(t
, buf
, n
, addr
));
432 return (t
->t_ops
->t_awrite(t
, as
, buf
, n
, addr
));
436 mdb_tgt_vread(mdb_tgt_t
*t
, void *buf
, size_t n
, uintptr_t addr
)
438 return (t
->t_ops
->t_vread(t
, buf
, n
, addr
));
442 mdb_tgt_vwrite(mdb_tgt_t
*t
, const void *buf
, size_t n
, uintptr_t addr
)
444 if (t
->t_flags
& MDB_TGT_F_RDWR
)
445 return (t
->t_ops
->t_vwrite(t
, buf
, n
, addr
));
447 return (set_errno(EMDB_TGTRDONLY
));
451 mdb_tgt_pread(mdb_tgt_t
*t
, void *buf
, size_t n
, physaddr_t addr
)
453 return (t
->t_ops
->t_pread(t
, buf
, n
, addr
));
457 mdb_tgt_pwrite(mdb_tgt_t
*t
, const void *buf
, size_t n
, physaddr_t addr
)
459 if (t
->t_flags
& MDB_TGT_F_RDWR
)
460 return (t
->t_ops
->t_pwrite(t
, buf
, n
, addr
));
462 return (set_errno(EMDB_TGTRDONLY
));
466 mdb_tgt_fread(mdb_tgt_t
*t
, void *buf
, size_t n
, uintptr_t addr
)
468 return (t
->t_ops
->t_fread(t
, buf
, n
, addr
));
472 mdb_tgt_fwrite(mdb_tgt_t
*t
, const void *buf
, size_t n
, uintptr_t addr
)
474 if (t
->t_flags
& MDB_TGT_F_RDWR
)
475 return (t
->t_ops
->t_fwrite(t
, buf
, n
, addr
));
477 return (set_errno(EMDB_TGTRDONLY
));
481 mdb_tgt_ioread(mdb_tgt_t
*t
, void *buf
, size_t n
, uintptr_t addr
)
483 return (t
->t_ops
->t_ioread(t
, buf
, n
, addr
));
487 mdb_tgt_iowrite(mdb_tgt_t
*t
, const void *buf
, size_t n
, uintptr_t addr
)
489 if (t
->t_flags
& MDB_TGT_F_RDWR
)
490 return (t
->t_ops
->t_iowrite(t
, buf
, n
, addr
));
492 return (set_errno(EMDB_TGTRDONLY
));
496 mdb_tgt_vtop(mdb_tgt_t
*t
, mdb_tgt_as_t as
, uintptr_t va
, physaddr_t
*pap
)
498 return (t
->t_ops
->t_vtop(t
, as
, va
, pap
));
502 mdb_tgt_readstr(mdb_tgt_t
*t
, mdb_tgt_as_t as
, char *buf
,
503 size_t nbytes
, mdb_tgt_addr_t addr
)
505 ssize_t n
, nread
= mdb_tgt_aread(t
, as
, buf
, nbytes
, addr
);
509 if ((p
= memchr(buf
, '\0', nread
)) != NULL
)
510 nread
= (size_t)(p
- buf
);
517 while (nread
< nbytes
&& (n
= mdb_tgt_aread(t
, as
, p
, 1, addr
)) == 1) {
525 if (nread
== 0 && n
== -1)
526 return (-1); /* If we can't even read a byte, return -1 */
530 buf
[MIN(nread
, nbytes
- 1)] = '\0';
536 mdb_tgt_writestr(mdb_tgt_t
*t
, mdb_tgt_as_t as
,
537 const char *buf
, mdb_tgt_addr_t addr
)
539 ssize_t nwritten
= mdb_tgt_awrite(t
, as
, buf
, strlen(buf
) + 1, addr
);
540 return (nwritten
> 0 ? nwritten
- 1 : nwritten
);
544 mdb_tgt_lookup_by_name(mdb_tgt_t
*t
, const char *obj
,
545 const char *name
, GElf_Sym
*symp
, mdb_syminfo_t
*sip
)
551 if (name
== NULL
|| t
== NULL
)
552 return (set_errno(EINVAL
));
554 if (obj
== MDB_TGT_OBJ_EVERY
&&
555 mdb_gelf_symtab_lookup_by_name(mdb
.m_prsym
, name
, &sym
, &id
) == 0) {
556 info
.sym_table
= MDB_TGT_PRVSYM
;
561 if (t
->t_ops
->t_lookup_by_name(t
, obj
, name
, &sym
, &info
) == 0)
575 mdb_tgt_lookup_by_addr(mdb_tgt_t
*t
, uintptr_t addr
, uint_t flags
,
576 char *buf
, size_t len
, GElf_Sym
*symp
, mdb_syminfo_t
*sip
)
582 return (set_errno(EINVAL
));
584 if (t
->t_ops
->t_lookup_by_addr(t
, addr
, flags
,
585 buf
, len
, &sym
, &info
) == 0) {
597 * The mdb_tgt_lookup_by_scope function is a convenience routine for code that
598 * wants to look up a scoped symbol name such as "object`symbol". It is
599 * implemented as a simple wrapper around mdb_tgt_lookup_by_name. Note that
600 * we split on the *last* occurrence of "`", so the object name itself may
601 * contain additional scopes whose evaluation is left to the target. This
602 * allows targets to implement additional scopes, such as source files,
603 * function names, link map identifiers, etc.
606 mdb_tgt_lookup_by_scope(mdb_tgt_t
*t
, const char *s
, GElf_Sym
*symp
,
609 const char *object
= MDB_TGT_OBJ_EVERY
;
610 const char *name
= s
;
611 char buf
[MDB_TGT_SYM_NAMLEN
];
614 return (set_errno(EINVAL
));
616 if (strchr(name
, '`') != NULL
) {
618 (void) strncpy(buf
, s
, sizeof (buf
));
619 buf
[sizeof (buf
) - 1] = '\0';
622 if ((s
= strrsplit(buf
, '`')) != NULL
) {
626 return (set_errno(EMDB_NOOBJ
));
628 return (set_errno(EMDB_NOSYM
));
632 return (mdb_tgt_lookup_by_name(t
, object
, name
, symp
, sip
));
636 mdb_tgt_symbol_iter(mdb_tgt_t
*t
, const char *obj
, uint_t which
,
637 uint_t type
, mdb_tgt_sym_f
*cb
, void *p
)
639 if ((which
!= MDB_TGT_SYMTAB
&& which
!= MDB_TGT_DYNSYM
) ||
640 (type
& ~(MDB_TGT_BIND_ANY
| MDB_TGT_TYPE_ANY
)) != 0)
641 return (set_errno(EINVAL
));
643 return (t
->t_ops
->t_symbol_iter(t
, obj
, which
, type
, cb
, p
));
647 mdb_tgt_readsym(mdb_tgt_t
*t
, mdb_tgt_as_t as
, void *buf
, size_t nbytes
,
648 const char *obj
, const char *name
)
652 if (mdb_tgt_lookup_by_name(t
, obj
, name
, &sym
, NULL
) == 0)
653 return (mdb_tgt_aread(t
, as
, buf
, nbytes
, sym
.st_value
));
659 mdb_tgt_writesym(mdb_tgt_t
*t
, mdb_tgt_as_t as
, const void *buf
,
660 size_t nbytes
, const char *obj
, const char *name
)
664 if (mdb_tgt_lookup_by_name(t
, obj
, name
, &sym
, NULL
) == 0)
665 return (mdb_tgt_awrite(t
, as
, buf
, nbytes
, sym
.st_value
));
671 mdb_tgt_mapping_iter(mdb_tgt_t
*t
, mdb_tgt_map_f
*cb
, void *p
)
673 return (t
->t_ops
->t_mapping_iter(t
, cb
, p
));
677 mdb_tgt_object_iter(mdb_tgt_t
*t
, mdb_tgt_map_f
*cb
, void *p
)
679 return (t
->t_ops
->t_object_iter(t
, cb
, p
));
683 mdb_tgt_addr_to_map(mdb_tgt_t
*t
, uintptr_t addr
)
685 return (t
->t_ops
->t_addr_to_map(t
, addr
));
689 mdb_tgt_name_to_map(mdb_tgt_t
*t
, const char *name
)
691 return (t
->t_ops
->t_name_to_map(t
, name
));
695 mdb_tgt_addr_to_ctf(mdb_tgt_t
*t
, uintptr_t addr
)
697 return (t
->t_ops
->t_addr_to_ctf(t
, addr
));
701 mdb_tgt_name_to_ctf(mdb_tgt_t
*t
, const char *name
)
703 return (t
->t_ops
->t_name_to_ctf(t
, name
));
707 * Return the latest target status. We just copy out our cached copy. The
708 * status only needs to change when the target is run, stepped, or continued.
711 mdb_tgt_status(mdb_tgt_t
*t
, mdb_tgt_status_t
*tsp
)
713 uint_t dstop
= (t
->t_status
.st_flags
& MDB_TGT_DSTOP
);
714 uint_t istop
= (t
->t_status
.st_flags
& MDB_TGT_ISTOP
);
715 uint_t state
= t
->t_status
.st_state
;
718 return (set_errno(EINVAL
));
721 * If we're called with the address of the target's internal status,
722 * then call down to update it; otherwise copy out the saved status.
724 if (tsp
== &t
->t_status
&& t
->t_ops
->t_status(t
, &t
->t_status
) != 0)
725 return (-1); /* errno is set for us */
728 * Assert that our state is valid before returning it. The state must
729 * be valid, and DSTOP and ISTOP cannot be set simultaneously. ISTOP
730 * is only valid when stopped. DSTOP is only valid when running or
731 * stopped. If any test fails, abort the debugger.
733 if (state
> MDB_TGT_LOST
)
734 fail("invalid target state (%u)\n", state
);
735 if (state
!= MDB_TGT_STOPPED
&& istop
)
736 fail("target state is (%u) and ISTOP is set\n", state
);
737 if (state
!= MDB_TGT_STOPPED
&& state
!= MDB_TGT_RUNNING
&& dstop
)
738 fail("target state is (%u) and DSTOP is set\n", state
);
740 fail("target has ISTOP and DSTOP set simultaneously\n");
742 if (tsp
!= &t
->t_status
)
743 bcopy(&t
->t_status
, tsp
, sizeof (mdb_tgt_status_t
));
749 * For the given sespec, scan its list of vespecs for ones that are marked
750 * temporary and delete them. We use the same method as vespec_delete below.
754 mdb_tgt_sespec_prune_one(mdb_tgt_t
*t
, mdb_sespec_t
*sep
)
756 mdb_vespec_t
*vep
, *nvep
;
758 for (vep
= mdb_list_next(&sep
->se_velist
); vep
; vep
= nvep
) {
759 nvep
= mdb_list_next(vep
);
761 if ((vep
->ve_flags
& (MDB_TGT_SPEC_DELETED
|
762 MDB_TGT_SPEC_TEMPORARY
)) == MDB_TGT_SPEC_TEMPORARY
) {
763 vep
->ve_flags
|= MDB_TGT_SPEC_DELETED
;
764 mdb_tgt_vespec_rele(t
, vep
);
770 * Prune each sespec on the active list of temporary vespecs. This function
771 * is called, for example, after the target finishes a continue operation.
774 mdb_tgt_sespec_prune_all(mdb_tgt_t
*t
)
776 mdb_sespec_t
*sep
, *nsep
;
778 for (sep
= mdb_list_next(&t
->t_active
); sep
!= NULL
; sep
= nsep
) {
779 nsep
= mdb_list_next(sep
);
780 mdb_tgt_sespec_prune_one(t
, sep
);
785 * Transition the given sespec to the IDLE state. We invoke the destructor,
786 * and then move the sespec from the active list to the idle list.
789 mdb_tgt_sespec_idle_one(mdb_tgt_t
*t
, mdb_sespec_t
*sep
, int reason
)
791 ASSERT(sep
->se_state
!= MDB_TGT_SPEC_IDLE
);
793 if (sep
->se_state
== MDB_TGT_SPEC_ARMED
)
794 (void) sep
->se_ops
->se_disarm(t
, sep
);
796 sep
->se_ops
->se_dtor(t
, sep
);
799 sep
->se_state
= MDB_TGT_SPEC_IDLE
;
800 sep
->se_errno
= reason
;
802 mdb_list_delete(&t
->t_active
, sep
);
803 mdb_list_append(&t
->t_idle
, sep
);
805 mdb_tgt_sespec_prune_one(t
, sep
);
809 * Transition each sespec on the active list to the IDLE state. This function
810 * is called, for example, after the target terminates execution.
813 mdb_tgt_sespec_idle_all(mdb_tgt_t
*t
, int reason
, int clear_matched
)
815 mdb_sespec_t
*sep
, *nsep
;
818 while ((sep
= t
->t_matched
) != T_SE_END
&& clear_matched
) {
819 for (vep
= mdb_list_next(&sep
->se_velist
); vep
!= NULL
; ) {
820 vep
->ve_flags
&= ~MDB_TGT_SPEC_MATCHED
;
821 vep
= mdb_list_next(vep
);
824 t
->t_matched
= sep
->se_matched
;
825 sep
->se_matched
= NULL
;
826 mdb_tgt_sespec_rele(t
, sep
);
829 for (sep
= mdb_list_next(&t
->t_active
); sep
!= NULL
; sep
= nsep
) {
830 nsep
= mdb_list_next(sep
);
831 mdb_tgt_sespec_idle_one(t
, sep
, reason
);
836 * Attempt to transition the given sespec from the IDLE to ACTIVE state. We
837 * do this by invoking se_ctor -- if this fails, we save the reason in se_errno
838 * and return -1 with errno set. One strange case we need to deal with here is
839 * the possibility that a given vespec is sitting on the idle list with its
840 * corresponding sespec, but it is actually a duplicate of another sespec on the
841 * active list. This can happen if the sespec is associated with a
842 * MDB_TGT_SPEC_DISABLED vespec that was just enabled, and is now ready to be
843 * activated. A more interesting reason this situation might arise is the case
844 * where a virtual address breakpoint is set at an address just mmap'ed by
845 * dlmopen. Since no symbol table information is available for this mapping
846 * yet, a pre-existing deferred symbolic breakpoint may already exist for this
847 * address, but it is on the idle list. When the symbol table is ready and the
848 * DLACTIVITY event occurs, we now discover that the virtual address obtained by
849 * evaluating the symbolic breakpoint matches the explicit virtual address of
850 * the active virtual breakpoint. To resolve this conflict in either case, we
851 * destroy the idle sespec, and attach its list of vespecs to the existing
855 mdb_tgt_sespec_activate_one(mdb_tgt_t
*t
, mdb_sespec_t
*sep
)
857 mdb_vespec_t
*vep
= mdb_list_next(&sep
->se_velist
);
862 ASSERT(sep
->se_state
== MDB_TGT_SPEC_IDLE
);
865 if (vep
->ve_flags
& MDB_TGT_SPEC_DISABLED
)
866 return (0); /* cannot be activated while disabled bit set */
869 * First search the active list for an existing, duplicate sespec to
870 * handle the special case described above.
872 for (dup
= mdb_list_next(&t
->t_active
); dup
; dup
= mdb_list_next(dup
)) {
873 if (dup
->se_ops
== sep
->se_ops
&&
874 dup
->se_ops
->se_secmp(t
, dup
, vep
->ve_args
)) {
881 * If a duplicate is found, destroy the existing, idle sespec, and
882 * attach all of its vespecs to the duplicate sespec.
885 for (vep
= mdb_list_next(&sep
->se_velist
); vep
; vep
= nvep
) {
886 mdb_dprintf(MDB_DBG_TGT
, "merge [ %d ] to sespec %p\n",
887 vep
->ve_id
, (void *)dup
);
889 if (dup
->se_matched
!= NULL
)
890 vep
->ve_flags
|= MDB_TGT_SPEC_MATCHED
;
892 nvep
= mdb_list_next(vep
);
895 mdb_list_delete(&sep
->se_velist
, vep
);
896 mdb_tgt_sespec_rele(t
, sep
);
898 mdb_list_append(&dup
->se_velist
, vep
);
899 mdb_tgt_sespec_hold(t
, dup
);
903 mdb_dprintf(MDB_DBG_TGT
, "merged idle sespec %p with %p\n",
904 (void *)sep
, (void *)dup
);
909 * If no duplicate is found, call the sespec's constructor. If this
910 * is successful, move the sespec to the active list.
912 if (sep
->se_ops
->se_ctor(t
, sep
, vep
->ve_args
) < 0) {
913 sep
->se_errno
= errno
;
919 for (vep
= mdb_list_next(&sep
->se_velist
); vep
; vep
= nvep
) {
920 nvep
= mdb_list_next(vep
);
923 mdb_list_delete(&t
->t_idle
, sep
);
924 mdb_list_append(&t
->t_active
, sep
);
925 sep
->se_state
= MDB_TGT_SPEC_ACTIVE
;
932 * Transition each sespec on the idle list to the ACTIVE state. This function
933 * is called, for example, after the target's t_run() function returns. If
934 * the se_ctor() function fails, the specifier is not yet applicable; it will
935 * remain on the idle list and can be activated later.
937 * Returns 1 if there weren't any unexpected activation failures; 0 if there
941 mdb_tgt_sespec_activate_all(mdb_tgt_t
*t
)
943 mdb_sespec_t
*sep
, *nsep
;
946 for (sep
= mdb_list_next(&t
->t_idle
); sep
!= NULL
; sep
= nsep
) {
947 nsep
= mdb_list_next(sep
);
949 if (mdb_tgt_sespec_activate_one(t
, sep
) < 0 &&
950 sep
->se_errno
!= EMDB_NOOBJ
)
958 * Transition the given sespec to the ARMED state. Note that we attempt to
959 * re-arm sespecs previously in the ERROR state. If se_arm() fails the sespec
960 * transitions to the ERROR state but stays on the active list.
963 mdb_tgt_sespec_arm_one(mdb_tgt_t
*t
, mdb_sespec_t
*sep
)
965 ASSERT(sep
->se_state
!= MDB_TGT_SPEC_IDLE
);
967 if (sep
->se_state
== MDB_TGT_SPEC_ARMED
)
968 return; /* do not arm sespecs more than once */
970 if (sep
->se_ops
->se_arm(t
, sep
) == -1) {
971 sep
->se_state
= MDB_TGT_SPEC_ERROR
;
972 sep
->se_errno
= errno
;
974 sep
->se_state
= MDB_TGT_SPEC_ARMED
;
980 * Transition each sespec on the active list (except matched specs) to the
981 * ARMED state. This function is called prior to continuing the target.
984 mdb_tgt_sespec_arm_all(mdb_tgt_t
*t
)
986 mdb_sespec_t
*sep
, *nsep
;
988 for (sep
= mdb_list_next(&t
->t_active
); sep
!= NULL
; sep
= nsep
) {
989 nsep
= mdb_list_next(sep
);
990 if (sep
->se_matched
== NULL
)
991 mdb_tgt_sespec_arm_one(t
, sep
);
996 * Transition each sespec on the active list that is in the ARMED state to
997 * the ACTIVE state. If se_disarm() fails, the sespec is transitioned to
998 * the ERROR state instead, but left on the active list.
1001 tgt_disarm_sespecs(mdb_tgt_t
*t
)
1005 for (sep
= mdb_list_next(&t
->t_active
); sep
; sep
= mdb_list_next(sep
)) {
1006 if (sep
->se_state
!= MDB_TGT_SPEC_ARMED
)
1007 continue; /* do not disarm if in ERROR state */
1009 if (sep
->se_ops
->se_disarm(t
, sep
) == -1) {
1010 sep
->se_state
= MDB_TGT_SPEC_ERROR
;
1011 sep
->se_errno
= errno
;
1013 sep
->se_state
= MDB_TGT_SPEC_ACTIVE
;
1020 * Determine if the software event that triggered the most recent stop matches
1021 * any of the active event specifiers. If 'all' is TRUE, we consider all
1022 * sespecs in our search. If 'all' is FALSE, we only consider ARMED sespecs.
1023 * If we successfully match an event, we add it to the t_matched list and
1024 * place an additional hold on it.
1026 static mdb_sespec_t
*
1027 tgt_match_sespecs(mdb_tgt_t
*t
, int all
)
1031 for (sep
= mdb_list_next(&t
->t_active
); sep
; sep
= mdb_list_next(sep
)) {
1032 if (all
== FALSE
&& sep
->se_state
!= MDB_TGT_SPEC_ARMED
)
1033 continue; /* restrict search to ARMED sespecs */
1035 if (sep
->se_state
!= MDB_TGT_SPEC_ERROR
&&
1036 sep
->se_ops
->se_match(t
, sep
, &t
->t_status
)) {
1037 mdb_dprintf(MDB_DBG_TGT
, "match se %p\n", (void *)sep
);
1038 mdb_tgt_sespec_hold(t
, sep
);
1039 sep
->se_matched
= t
->t_matched
;
1044 return (t
->t_matched
);
1048 * This function provides the low-level target continue algorithm. We proceed
1049 * in three phases: (1) we arm the active sespecs, except the specs matched at
1050 * the time we last stopped, (2) we call se_cont() on any matched sespecs to
1051 * step over these event transitions, and then arm the corresponding sespecs,
1052 * and (3) we call the appropriate low-level continue routine. Once the
1053 * target stops again, we determine which sespecs were matched, and invoke the
1054 * appropriate vespec callbacks and perform other vespec maintenance.
1057 tgt_continue(mdb_tgt_t
*t
, mdb_tgt_status_t
*tsp
,
1058 int (*t_cont
)(mdb_tgt_t
*, mdb_tgt_status_t
*))
1060 mdb_var_t
*hitv
= mdb_nv_lookup(&mdb
.m_nv
, "hits");
1061 uintptr_t pc
= t
->t_status
.st_pc
;
1064 mdb_sespec_t
*sep
, *nsep
, *matched
;
1065 mdb_vespec_t
*vep
, *nvep
;
1068 uint_t cbits
= 0; /* union of pending continue bits */
1069 uint_t ncont
= 0; /* # of callbacks that requested cont */
1070 uint_t n
= 0; /* # of callbacks */
1073 * If the target is undead, dead, or lost, we no longer allow continue.
1074 * This effectively forces the user to use ::kill or ::run after death.
1076 if (t
->t_status
.st_state
== MDB_TGT_UNDEAD
)
1077 return (set_errno(EMDB_TGTZOMB
));
1078 if (t
->t_status
.st_state
== MDB_TGT_DEAD
)
1079 return (set_errno(EMDB_TGTCORE
));
1080 if (t
->t_status
.st_state
== MDB_TGT_LOST
)
1081 return (set_errno(EMDB_TGTLOST
));
1084 * If any of single-step, step-over, or step-out is pending, it takes
1085 * precedence over an explicit or pending continue, because these are
1086 * all different specialized forms of continue.
1088 if (t
->t_flags
& MDB_TGT_F_STEP
)
1089 t_cont
= t
->t_ops
->t_step
;
1090 else if (t
->t_flags
& MDB_TGT_F_NEXT
)
1091 t_cont
= t
->t_ops
->t_step
;
1092 else if (t
->t_flags
& MDB_TGT_F_STEP_OUT
)
1093 t_cont
= t
->t_ops
->t_cont
;
1096 * To handle step-over, we ask the target to find the address past the
1097 * next control transfer instruction. If an address is found, we plant
1098 * a temporary breakpoint there and continue; otherwise just step.
1100 if ((t
->t_flags
& MDB_TGT_F_NEXT
) && !(t
->t_flags
& MDB_TGT_F_STEP
)) {
1101 if (t
->t_ops
->t_next(t
, &addr
) == -1 || mdb_tgt_add_vbrkpt(t
,
1102 addr
, MDB_TGT_SPEC_HIDDEN
| MDB_TGT_SPEC_TEMPORARY
,
1103 no_se_f
, NULL
) == 0) {
1104 mdb_dprintf(MDB_DBG_TGT
, "next falling back to step: "
1105 "%s\n", mdb_strerror(errno
));
1107 t_cont
= t
->t_ops
->t_cont
;
1111 * To handle step-out, we ask the target to find the return address of
1112 * the current frame, plant a temporary breakpoint there, and continue.
1114 if (t
->t_flags
& MDB_TGT_F_STEP_OUT
) {
1115 if (t
->t_ops
->t_step_out(t
, &addr
) == -1)
1116 return (-1); /* errno is set for us */
1118 if (mdb_tgt_add_vbrkpt(t
, addr
, MDB_TGT_SPEC_HIDDEN
|
1119 MDB_TGT_SPEC_TEMPORARY
, no_se_f
, NULL
) == 0)
1120 return (-1); /* errno is set for us */
1123 (void) mdb_signal_block(SIGHUP
);
1124 (void) mdb_signal_block(SIGTERM
);
1127 t
->t_flags
&= ~T_CONT_BITS
;
1128 t
->t_flags
|= MDB_TGT_F_BUSY
;
1129 mdb_tgt_sespec_arm_all(t
);
1131 ASSERT(t
->t_matched
!= NULL
);
1132 matched
= t
->t_matched
;
1133 t
->t_matched
= T_SE_END
;
1135 if (mdb
.m_term
!= NULL
)
1136 IOP_SUSPEND(mdb
.m_term
);
1139 * Iterate over the matched sespec list, performing autostop processing
1140 * and clearing the matched bit for each associated vespec. We then
1141 * invoke each sespec's se_cont callback in order to continue past
1142 * the corresponding event. If the matched list has more than one
1143 * sespec, we assume that the se_cont callbacks are non-interfering.
1145 for (sep
= matched
; sep
!= T_SE_END
; sep
= sep
->se_matched
) {
1146 for (vep
= mdb_list_next(&sep
->se_velist
); vep
!= NULL
; ) {
1147 if ((vep
->ve_flags
& MDB_TGT_SPEC_AUTOSTOP
) &&
1148 (vep
->ve_limit
&& vep
->ve_hits
== vep
->ve_limit
))
1151 vep
->ve_flags
&= ~MDB_TGT_SPEC_MATCHED
;
1152 vep
= mdb_list_next(vep
);
1155 if (sep
->se_ops
->se_cont(t
, sep
, &t
->t_status
) == -1) {
1156 error
= errno
? errno
: -1;
1157 tgt_disarm_sespecs(t
);
1161 if (!(t
->t_status
.st_flags
& MDB_TGT_ISTOP
)) {
1162 tgt_disarm_sespecs(t
);
1163 if (t
->t_status
.st_state
== MDB_TGT_UNDEAD
)
1164 mdb_tgt_sespec_idle_all(t
, EMDB_TGTZOMB
, TRUE
);
1165 else if (t
->t_status
.st_state
== MDB_TGT_LOST
)
1166 mdb_tgt_sespec_idle_all(t
, EMDB_TGTLOST
, TRUE
);
1172 * Clear the se_matched field for each matched sespec, and drop the
1173 * reference count since the sespec is no longer on the matched list.
1175 for (sep
= matched
; sep
!= T_SE_END
; sep
= nsep
) {
1176 nsep
= sep
->se_matched
;
1177 sep
->se_matched
= NULL
;
1178 mdb_tgt_sespec_rele(t
, sep
);
1182 * If the matched list was non-empty, see if we hit another event while
1183 * performing se_cont() processing. If so, don't bother continuing any
1184 * further. If not, arm the sespecs on the old matched list by calling
1185 * mdb_tgt_sespec_arm_all() again and then continue by calling t_cont.
1187 if (matched
!= T_SE_END
) {
1188 if (error
!= 0 || !(t
->t_status
.st_flags
& MDB_TGT_ISTOP
))
1189 goto out
; /* abort now if se_cont() failed */
1191 if ((t
->t_matched
= tgt_match_sespecs(t
, FALSE
)) != T_SE_END
) {
1192 tgt_disarm_sespecs(t
);
1196 mdb_tgt_sespec_arm_all(t
);
1199 if (t_cont
!= t
->t_ops
->t_step
|| pc
== t
->t_status
.st_pc
) {
1200 if (t_cont(t
, &t
->t_status
) != 0)
1201 error
= errno
? errno
: -1;
1204 tgt_disarm_sespecs(t
);
1206 if (t
->t_flags
& MDB_TGT_F_UNLOAD
)
1207 longjmp(mdb
.m_frame
->f_pcb
, MDB_ERR_QUIT
);
1209 if (t
->t_status
.st_state
== MDB_TGT_UNDEAD
)
1210 mdb_tgt_sespec_idle_all(t
, EMDB_TGTZOMB
, TRUE
);
1211 else if (t
->t_status
.st_state
== MDB_TGT_LOST
)
1212 mdb_tgt_sespec_idle_all(t
, EMDB_TGTLOST
, TRUE
);
1213 else if (t
->t_status
.st_flags
& MDB_TGT_ISTOP
)
1214 t
->t_matched
= tgt_match_sespecs(t
, TRUE
);
1216 if (mdb
.m_term
!= NULL
)
1217 IOP_RESUME(mdb
.m_term
);
1219 (void) mdb_signal_unblock(SIGTERM
);
1220 (void) mdb_signal_unblock(SIGHUP
);
1223 for (sep
= t
->t_matched
; sep
!= T_SE_END
; sep
= sep
->se_matched
) {
1225 * When we invoke a ve_callback, it may in turn request that the
1226 * target continue immediately after callback processing is
1227 * complete. We only allow this to occur if *all* callbacks
1228 * agree to continue. To implement this behavior, we keep a
1229 * count (ncont) of such requests, and only apply the cumulative
1230 * continue bits (cbits) to the target if ncont is equal to the
1231 * total number of callbacks that are invoked (n).
1233 for (vep
= mdb_list_next(&sep
->se_velist
);
1234 vep
!= NULL
; vep
= nvep
, n
++) {
1236 * Place an extra hold on the current vespec and pick
1237 * up the next pointer before invoking the callback: we
1238 * must be prepared for the vespec to be deleted or
1239 * moved to a different list by the callback.
1241 mdb_tgt_vespec_hold(t
, vep
);
1242 nvep
= mdb_list_next(vep
);
1244 vep
->ve_flags
|= MDB_TGT_SPEC_MATCHED
;
1247 mdb_nv_set_value(mdb
.m_dot
, t
->t_status
.st_pc
);
1248 mdb_nv_set_value(hitv
, vep
->ve_hits
);
1250 ASSERT((t
->t_flags
& T_CONT_BITS
) == 0);
1251 vep
->ve_callback(t
, vep
->ve_id
, vep
->ve_data
);
1253 ncont
+= (t
->t_flags
& T_CONT_BITS
) != 0;
1254 cbits
|= (t
->t_flags
& T_CONT_BITS
);
1255 t
->t_flags
&= ~T_CONT_BITS
;
1257 if (vep
->ve_limit
&& vep
->ve_hits
== vep
->ve_limit
) {
1258 if (vep
->ve_flags
& MDB_TGT_SPEC_AUTODEL
)
1259 (void) mdb_tgt_vespec_delete(t
,
1261 else if (vep
->ve_flags
& MDB_TGT_SPEC_AUTODIS
)
1262 (void) mdb_tgt_vespec_disable(t
,
1266 if (vep
->ve_limit
&& vep
->ve_hits
< vep
->ve_limit
) {
1267 if (vep
->ve_flags
& MDB_TGT_SPEC_AUTOSTOP
)
1268 (void) mdb_tgt_continue(t
, NULL
);
1271 mdb_tgt_vespec_rele(t
, vep
);
1275 if (t
->t_matched
!= T_SE_END
&& ncont
== n
)
1276 t
->t_flags
|= cbits
; /* apply continues (see above) */
1278 mdb_tgt_sespec_prune_all(t
);
1280 t
->t_status
.st_flags
&= ~MDB_TGT_BUSY
;
1281 t
->t_flags
&= ~MDB_TGT_F_BUSY
;
1284 bcopy(&t
->t_status
, tsp
, sizeof (mdb_tgt_status_t
));
1287 return (set_errno(error
));
1293 * This function is the common glue that connects the high-level target layer
1294 * continue functions (e.g. step and cont below) with the low-level
1295 * tgt_continue() function above. Since vespec callbacks may perform any
1296 * actions, including attempting to continue the target itself, we must be
1297 * prepared to be called while the target is still marked F_BUSY. In this
1298 * case, we just set a pending bit and return. When we return from the call
1299 * to tgt_continue() that made us busy into the tgt_request_continue() call
1300 * that is still on the stack, we will loop around and call tgt_continue()
1301 * again. This allows vespecs to continue the target without recursion.
1304 tgt_request_continue(mdb_tgt_t
*t
, mdb_tgt_status_t
*tsp
, uint_t tflag
,
1305 int (*t_cont
)(mdb_tgt_t
*, mdb_tgt_status_t
*))
1307 mdb_tgt_spec_desc_t desc
;
1312 if (t
->t_flags
& MDB_TGT_F_BUSY
) {
1313 t
->t_flags
|= tflag
;
1318 status
= tgt_continue(t
, tsp
, t_cont
);
1319 } while (status
== 0 && (t
->t_flags
& T_CONT_BITS
));
1322 for (sep
= t
->t_matched
; sep
!= T_SE_END
;
1323 sep
= sep
->se_matched
) {
1326 for (vep
= mdb_list_next(&sep
->se_velist
); vep
;
1327 vep
= mdb_list_next(vep
)) {
1328 if (vep
->ve_flags
& MDB_TGT_SPEC_SILENT
)
1330 warn("%s\n", sep
->se_ops
->se_info(t
, sep
,
1331 vep
, &desc
, buf
, sizeof (buf
)));
1335 mdb_callb_fire(MDB_CALLB_STCHG
);
1338 t
->t_flags
&= ~T_CONT_BITS
;
1343 * Restart target execution: we rely upon the underlying target implementation
1344 * to do most of the work for us. In particular, we assume it will properly
1345 * preserve the state of our event lists if the run fails for some reason,
1346 * and that it will reset all events to the IDLE state if the run succeeds.
1347 * If it is successful, we attempt to activate all of the idle sespecs. The
1348 * t_run() operation is defined to leave the target stopped at the earliest
1349 * possible point in execution, and then return control to the debugger,
1350 * awaiting a step or continue operation to set it running again.
1353 mdb_tgt_run(mdb_tgt_t
*t
, int argc
, const mdb_arg_t
*argv
)
1357 for (i
= 0; i
< argc
; i
++) {
1358 if (argv
->a_type
!= MDB_TYPE_STRING
)
1359 return (set_errno(EINVAL
));
1362 if (t
->t_ops
->t_run(t
, argc
, argv
) == -1)
1363 return (-1); /* errno is set for us */
1365 t
->t_flags
&= ~T_CONT_BITS
;
1366 (void) mdb_tgt_sespec_activate_all(t
);
1368 if (mdb
.m_term
!= NULL
)
1369 IOP_CTL(mdb
.m_term
, MDB_IOC_CTTY
, NULL
);
1375 mdb_tgt_step(mdb_tgt_t
*t
, mdb_tgt_status_t
*tsp
)
1377 return (tgt_request_continue(t
, tsp
, MDB_TGT_F_STEP
, t
->t_ops
->t_step
));
1381 mdb_tgt_step_out(mdb_tgt_t
*t
, mdb_tgt_status_t
*tsp
)
1383 t
->t_flags
|= MDB_TGT_F_STEP_OUT
; /* set flag even if tgt not busy */
1384 return (tgt_request_continue(t
, tsp
, 0, t
->t_ops
->t_cont
));
1388 mdb_tgt_next(mdb_tgt_t
*t
, mdb_tgt_status_t
*tsp
)
1390 t
->t_flags
|= MDB_TGT_F_NEXT
; /* set flag even if tgt not busy */
1391 return (tgt_request_continue(t
, tsp
, 0, t
->t_ops
->t_step
));
1395 mdb_tgt_continue(mdb_tgt_t
*t
, mdb_tgt_status_t
*tsp
)
1397 return (tgt_request_continue(t
, tsp
, MDB_TGT_F_CONT
, t
->t_ops
->t_cont
));
1401 mdb_tgt_signal(mdb_tgt_t
*t
, int sig
)
1403 return (t
->t_ops
->t_signal(t
, sig
));
1407 mdb_tgt_vespec_data(mdb_tgt_t
*t
, int vid
)
1409 mdb_vespec_t
*vep
= mdb_tgt_vespec_lookup(t
, vid
);
1412 (void) set_errno(EMDB_NOSESPEC
);
1416 return (vep
->ve_data
);
1420 * Return a structured description and comment string for the given vespec.
1421 * We fill in the common information from the vespec, and then call down to
1422 * the underlying sespec to provide the comment string and modify any
1423 * event type-specific information.
1426 mdb_tgt_vespec_info(mdb_tgt_t
*t
, int vid
, mdb_tgt_spec_desc_t
*sp
,
1427 char *buf
, size_t nbytes
)
1429 mdb_vespec_t
*vep
= mdb_tgt_vespec_lookup(t
, vid
);
1431 mdb_tgt_spec_desc_t desc
;
1436 bzero(sp
, sizeof (mdb_tgt_spec_desc_t
));
1437 (void) set_errno(EMDB_NOSESPEC
);
1446 sp
->spec_id
= vep
->ve_id
;
1447 sp
->spec_flags
= vep
->ve_flags
;
1448 sp
->spec_hits
= vep
->ve_hits
;
1449 sp
->spec_limit
= vep
->ve_limit
;
1450 sp
->spec_state
= sep
->se_state
;
1451 sp
->spec_errno
= sep
->se_errno
;
1452 sp
->spec_base
= NULL
;
1454 sp
->spec_data
= vep
->ve_data
;
1456 return (sep
->se_ops
->se_info(t
, sep
, vep
, sp
, buf
, nbytes
));
1460 * Qsort callback for sorting vespecs by VID, used below.
1463 tgt_vespec_compare(const mdb_vespec_t
**lp
, const mdb_vespec_t
**rp
)
1465 return ((*lp
)->ve_id
- (*rp
)->ve_id
);
1469 * Iterate over all vespecs and call the specified callback function with the
1470 * corresponding VID and caller data pointer. We want the callback function
1471 * to see a consistent, sorted snapshot of the vespecs, and allow the callback
1472 * to take actions such as deleting the vespec itself, so we cannot simply
1473 * iterate over the lists. Instead, we pre-allocate an array of vespec
1474 * pointers, fill it in and place an additional hold on each vespec, and then
1475 * sort it. After the callback has been executed on each vespec in the
1476 * sorted array, we remove our hold and free the temporary array.
1479 mdb_tgt_vespec_iter(mdb_tgt_t
*t
, mdb_tgt_vespec_f
*func
, void *p
)
1481 mdb_vespec_t
**veps
, **vepp
, **vend
;
1482 mdb_vespec_t
*vep
, *nvep
;
1485 uint_t vecnt
= t
->t_vecnt
;
1487 veps
= mdb_alloc(sizeof (mdb_vespec_t
*) * vecnt
, UM_SLEEP
);
1488 vend
= veps
+ vecnt
;
1491 for (sep
= mdb_list_next(&t
->t_active
); sep
; sep
= mdb_list_next(sep
)) {
1492 for (vep
= mdb_list_next(&sep
->se_velist
); vep
; vep
= nvep
) {
1493 mdb_tgt_vespec_hold(t
, vep
);
1494 nvep
= mdb_list_next(vep
);
1499 for (sep
= mdb_list_next(&t
->t_idle
); sep
; sep
= mdb_list_next(sep
)) {
1500 for (vep
= mdb_list_next(&sep
->se_velist
); vep
; vep
= nvep
) {
1501 mdb_tgt_vespec_hold(t
, vep
);
1502 nvep
= mdb_list_next(vep
);
1508 fail("target has %u vespecs on list but vecnt shows %u\n",
1509 (uint_t
)(vepp
- veps
), vecnt
);
1512 qsort(veps
, vecnt
, sizeof (mdb_vespec_t
*),
1513 (int (*)(const void *, const void *))tgt_vespec_compare
);
1515 for (vepp
= veps
; vepp
< vend
; vepp
++) {
1516 if (func(t
, p
, (*vepp
)->ve_id
, (*vepp
)->ve_data
) != 0)
1520 for (vepp
= veps
; vepp
< vend
; vepp
++)
1521 mdb_tgt_vespec_rele(t
, *vepp
);
1523 mdb_free(veps
, sizeof (mdb_vespec_t
*) * vecnt
);
1528 * Reset the vespec flags, match limit, and callback data to the specified
1529 * values. We silently correct invalid parameters, except for the VID.
1530 * The caller is required to query the existing properties and pass back
1531 * the existing values for any properties that should not be modified.
1532 * If the callback data is modified, the caller is responsible for cleaning
1533 * up any state associated with the previous value.
1536 mdb_tgt_vespec_modify(mdb_tgt_t
*t
, int id
, uint_t flags
,
1537 uint_t limit
, void *data
)
1539 mdb_vespec_t
*vep
= mdb_tgt_vespec_lookup(t
, id
);
1542 return (set_errno(EMDB_NOSESPEC
));
1545 * If the value of the MDB_TGT_SPEC_DISABLED bit is changing, call the
1546 * appropriate vespec function to do the enable/disable work.
1548 if ((flags
& MDB_TGT_SPEC_DISABLED
) !=
1549 (vep
->ve_flags
& MDB_TGT_SPEC_DISABLED
)) {
1550 if (flags
& MDB_TGT_SPEC_DISABLED
)
1551 (void) mdb_tgt_vespec_disable(t
, id
);
1553 (void) mdb_tgt_vespec_enable(t
, id
);
1557 * Make that only one MDB_TGT_SPEC_AUTO* bit is set in the new flags
1558 * value: extra bits are cleared according to order of precedence.
1560 if (flags
& MDB_TGT_SPEC_AUTOSTOP
)
1561 flags
&= ~(MDB_TGT_SPEC_AUTODEL
| MDB_TGT_SPEC_AUTODIS
);
1562 else if (flags
& MDB_TGT_SPEC_AUTODEL
)
1563 flags
&= ~MDB_TGT_SPEC_AUTODIS
;
1566 * The TEMPORARY property always takes precedence over STICKY.
1568 if (flags
& MDB_TGT_SPEC_TEMPORARY
)
1569 flags
&= ~MDB_TGT_SPEC_STICKY
;
1572 * If any MDB_TGT_SPEC_AUTO* bits are changing, reset the hit count
1573 * back to zero and clear all of the old auto bits.
1575 if ((flags
& T_AUTO_BITS
) != (vep
->ve_flags
& T_AUTO_BITS
)) {
1576 vep
->ve_flags
&= ~T_AUTO_BITS
;
1580 vep
->ve_flags
= (vep
->ve_flags
& T_IMPL_BITS
) | (flags
& ~T_IMPL_BITS
);
1581 vep
->ve_data
= data
;
1584 * If any MDB_TGT_SPEC_AUTO* flags are set, make sure the limit is at
1585 * least one. If none are set, reset it back to zero.
1587 if (vep
->ve_flags
& T_AUTO_BITS
)
1588 vep
->ve_limit
= MAX(limit
, 1);
1593 * As a convenience, we allow the caller to specify SPEC_DELETED in
1594 * the flags field as indication that the event should be deleted.
1596 if (flags
& MDB_TGT_SPEC_DELETED
)
1597 (void) mdb_tgt_vespec_delete(t
, id
);
1603 * Remove the user disabled bit from the specified vespec, and attempt to
1604 * activate the underlying sespec and move it to the active list if possible.
1607 mdb_tgt_vespec_enable(mdb_tgt_t
*t
, int id
)
1609 mdb_vespec_t
*vep
= mdb_tgt_vespec_lookup(t
, id
);
1612 return (set_errno(EMDB_NOSESPEC
));
1614 if (vep
->ve_flags
& MDB_TGT_SPEC_DISABLED
) {
1615 ASSERT(mdb_list_next(vep
) == NULL
);
1616 vep
->ve_flags
&= ~MDB_TGT_SPEC_DISABLED
;
1617 if (mdb_tgt_sespec_activate_one(t
, vep
->ve_se
) < 0)
1618 return (-1); /* errno is set for us */
1625 * Set the user disabled bit on the specified vespec, and move it to the idle
1626 * list. If the vespec is not alone with its sespec or if it is a currently
1627 * matched event, we must always create a new idle sespec and move the vespec
1628 * there. If the vespec was alone and active, we can simply idle the sespec.
1631 mdb_tgt_vespec_disable(mdb_tgt_t
*t
, int id
)
1633 mdb_vespec_t
*vep
= mdb_tgt_vespec_lookup(t
, id
);
1637 return (set_errno(EMDB_NOSESPEC
));
1639 if (vep
->ve_flags
& MDB_TGT_SPEC_DISABLED
)
1640 return (0); /* already disabled */
1642 if (mdb_list_prev(vep
) != NULL
|| mdb_list_next(vep
) != NULL
||
1643 vep
->ve_se
->se_matched
!= NULL
) {
1645 sep
= mdb_tgt_sespec_insert(t
, vep
->ve_se
->se_ops
, &t
->t_idle
);
1647 mdb_list_delete(&vep
->ve_se
->se_velist
, vep
);
1648 mdb_tgt_sespec_rele(t
, vep
->ve_se
);
1650 mdb_list_append(&sep
->se_velist
, vep
);
1651 mdb_tgt_sespec_hold(t
, sep
);
1653 vep
->ve_flags
&= ~MDB_TGT_SPEC_MATCHED
;
1656 } else if (vep
->ve_se
->se_state
!= MDB_TGT_SPEC_IDLE
)
1657 mdb_tgt_sespec_idle_one(t
, vep
->ve_se
, EMDB_SPECDIS
);
1659 vep
->ve_flags
|= MDB_TGT_SPEC_DISABLED
;
1664 * Delete the given vespec. We use the MDB_TGT_SPEC_DELETED flag to ensure that
1665 * multiple calls to mdb_tgt_vespec_delete to not attempt to decrement the
1666 * reference count on the vespec more than once. This is because the vespec
1667 * may remain referenced if it is currently held by another routine (e.g.
1668 * vespec_iter), and so the user could attempt to delete it more than once
1669 * since it reference count will be >= 2 prior to the first delete call.
1672 mdb_tgt_vespec_delete(mdb_tgt_t
*t
, int id
)
1674 mdb_vespec_t
*vep
= mdb_tgt_vespec_lookup(t
, id
);
1677 return (set_errno(EMDB_NOSESPEC
));
1679 if (vep
->ve_flags
& MDB_TGT_SPEC_DELETED
)
1680 return (set_errno(EBUSY
));
1682 vep
->ve_flags
|= MDB_TGT_SPEC_DELETED
;
1683 mdb_tgt_vespec_rele(t
, vep
);
1688 mdb_tgt_add_vbrkpt(mdb_tgt_t
*t
, uintptr_t addr
,
1689 int spec_flags
, mdb_tgt_se_f
*func
, void *p
)
1691 return (t
->t_ops
->t_add_vbrkpt(t
, addr
, spec_flags
, func
, p
));
1695 mdb_tgt_add_sbrkpt(mdb_tgt_t
*t
, const char *symbol
,
1696 int spec_flags
, mdb_tgt_se_f
*func
, void *p
)
1698 return (t
->t_ops
->t_add_sbrkpt(t
, symbol
, spec_flags
, func
, p
));
1702 mdb_tgt_add_pwapt(mdb_tgt_t
*t
, physaddr_t pa
, size_t n
, uint_t flags
,
1703 int spec_flags
, mdb_tgt_se_f
*func
, void *p
)
1705 if ((flags
& ~MDB_TGT_WA_RWX
) || flags
== 0) {
1706 (void) set_errno(EINVAL
);
1711 (void) set_errno(EMDB_WPRANGE
);
1715 return (t
->t_ops
->t_add_pwapt(t
, pa
, n
, flags
, spec_flags
, func
, p
));
1719 mdb_tgt_add_vwapt(mdb_tgt_t
*t
, uintptr_t va
, size_t n
, uint_t flags
,
1720 int spec_flags
, mdb_tgt_se_f
*func
, void *p
)
1722 if ((flags
& ~MDB_TGT_WA_RWX
) || flags
== 0) {
1723 (void) set_errno(EINVAL
);
1728 (void) set_errno(EMDB_WPRANGE
);
1732 return (t
->t_ops
->t_add_vwapt(t
, va
, n
, flags
, spec_flags
, func
, p
));
1736 mdb_tgt_add_iowapt(mdb_tgt_t
*t
, uintptr_t addr
, size_t n
, uint_t flags
,
1737 int spec_flags
, mdb_tgt_se_f
*func
, void *p
)
1739 if ((flags
& ~MDB_TGT_WA_RWX
) || flags
== 0) {
1740 (void) set_errno(EINVAL
);
1744 if (addr
+ n
< addr
) {
1745 (void) set_errno(EMDB_WPRANGE
);
1749 return (t
->t_ops
->t_add_iowapt(t
, addr
, n
, flags
, spec_flags
, func
, p
));
1753 mdb_tgt_add_sysenter(mdb_tgt_t
*t
, int sysnum
,
1754 int spec_flags
, mdb_tgt_se_f
*func
, void *p
)
1756 return (t
->t_ops
->t_add_sysenter(t
, sysnum
, spec_flags
, func
, p
));
1760 mdb_tgt_add_sysexit(mdb_tgt_t
*t
, int sysnum
,
1761 int spec_flags
, mdb_tgt_se_f
*func
, void *p
)
1763 return (t
->t_ops
->t_add_sysexit(t
, sysnum
, spec_flags
, func
, p
));
1767 mdb_tgt_add_signal(mdb_tgt_t
*t
, int sig
,
1768 int spec_flags
, mdb_tgt_se_f
*func
, void *p
)
1770 return (t
->t_ops
->t_add_signal(t
, sig
, spec_flags
, func
, p
));
1774 mdb_tgt_add_fault(mdb_tgt_t
*t
, int flt
,
1775 int spec_flags
, mdb_tgt_se_f
*func
, void *p
)
1777 return (t
->t_ops
->t_add_fault(t
, flt
, spec_flags
, func
, p
));
1781 mdb_tgt_getareg(mdb_tgt_t
*t
, mdb_tgt_tid_t tid
,
1782 const char *rname
, mdb_tgt_reg_t
*rp
)
1784 return (t
->t_ops
->t_getareg(t
, tid
, rname
, rp
));
1788 mdb_tgt_putareg(mdb_tgt_t
*t
, mdb_tgt_tid_t tid
,
1789 const char *rname
, mdb_tgt_reg_t r
)
1791 return (t
->t_ops
->t_putareg(t
, tid
, rname
, r
));
1795 mdb_tgt_stack_iter(mdb_tgt_t
*t
, const mdb_tgt_gregset_t
*gregs
,
1796 mdb_tgt_stack_f
*cb
, void *p
)
1798 return (t
->t_ops
->t_stack_iter(t
, gregs
, cb
, p
));
1802 mdb_tgt_xdata_iter(mdb_tgt_t
*t
, mdb_tgt_xdata_f
*func
, void *private)
1806 for (xdp
= mdb_list_next(&t
->t_xdlist
); xdp
; xdp
= mdb_list_next(xdp
)) {
1807 if (func(private, xdp
->xd_name
, xdp
->xd_desc
,
1808 xdp
->xd_copy(t
, NULL
, 0)) != 0)
1816 mdb_tgt_getxdata(mdb_tgt_t
*t
, const char *name
, void *buf
, size_t nbytes
)
1820 for (xdp
= mdb_list_next(&t
->t_xdlist
); xdp
; xdp
= mdb_list_next(xdp
)) {
1821 if (strcmp(xdp
->xd_name
, name
) == 0)
1822 return (xdp
->xd_copy(t
, buf
, nbytes
));
1825 return (set_errno(ENODATA
));
1831 return (set_errno(EMDB_TGTNOTSUP
));
1837 (void) set_errno(EMDB_TGTNOTSUP
);
1848 mdb_tgt_xdata_insert(mdb_tgt_t
*t
, const char *name
, const char *desc
,
1849 ssize_t (*copy
)(mdb_tgt_t
*, void *, size_t))
1853 for (xdp
= mdb_list_next(&t
->t_xdlist
); xdp
; xdp
= mdb_list_next(xdp
)) {
1854 if (strcmp(xdp
->xd_name
, name
) == 0)
1855 return (set_errno(EMDB_XDEXISTS
));
1858 xdp
= mdb_alloc(sizeof (mdb_xdata_t
), UM_SLEEP
);
1859 mdb_list_append(&t
->t_xdlist
, xdp
);
1861 xdp
->xd_name
= name
;
1862 xdp
->xd_desc
= desc
;
1863 xdp
->xd_copy
= copy
;
1869 mdb_tgt_xdata_delete(mdb_tgt_t
*t
, const char *name
)
1873 for (xdp
= mdb_list_next(&t
->t_xdlist
); xdp
; xdp
= mdb_list_next(xdp
)) {
1874 if (strcmp(xdp
->xd_name
, name
) == 0) {
1875 mdb_list_delete(&t
->t_xdlist
, xdp
);
1876 mdb_free(xdp
, sizeof (mdb_xdata_t
));
1881 return (set_errno(EMDB_NOXD
));
1885 mdb_tgt_sym_match(const GElf_Sym
*sym
, uint_t mask
)
1887 #if STT_NUM != (STT_TLS + 1)
1888 #error "STT_NUM has grown. update mdb_tgt_sym_match()"
1891 uchar_t s_bind
= GELF_ST_BIND(sym
->st_info
);
1892 uchar_t s_type
= GELF_ST_TYPE(sym
->st_info
);
1895 * In case you haven't already guessed, this relies on the bitmask
1896 * used by <mdb/mdb_target.h> and <libproc.h> for encoding symbol
1897 * type and binding matching the order of STB and STT constants
1898 * in <sys/elf.h>. Changes to ELF must maintain binary
1899 * compatibility, so I think this is reasonably fair game.
1901 if (s_bind
< STB_NUM
&& s_type
< STT_NUM
) {
1902 uint_t type
= (1 << (s_type
+ 8)) | (1 << s_bind
);
1903 return ((type
& ~mask
) == 0);
1906 return (0); /* Unknown binding or type; fail to match */
1910 mdb_tgt_elf_export(mdb_gelf_file_t
*gf
)
1912 GElf_Xword d
= 0, t
= 0;
1913 GElf_Addr b
= 0, e
= 0;
1918 * Reset legacy adb variables based on the specified ELF object file
1919 * provided by the target. We define these variables:
1921 * b - the address of the data segment (first writeable Phdr)
1922 * d - the size of the data segment
1923 * e - the address of the entry point
1924 * m - the magic number identifying the file
1925 * t - the address of the text segment (first executable Phdr)
1928 const GElf_Phdr
*text
= NULL
, *data
= NULL
;
1931 e
= gf
->gf_ehdr
.e_entry
;
1932 bcopy(&gf
->gf_ehdr
.e_ident
[EI_MAG0
], &m
, sizeof (m
));
1934 for (i
= 0; i
< gf
->gf_npload
; i
++) {
1935 if (text
== NULL
&& (gf
->gf_phdrs
[i
].p_flags
& PF_X
))
1936 text
= &gf
->gf_phdrs
[i
];
1937 if (data
== NULL
&& (gf
->gf_phdrs
[i
].p_flags
& PF_W
))
1938 data
= &gf
->gf_phdrs
[i
];
1949 if ((v
= mdb_nv_lookup(&mdb
.m_nv
, "b")) != NULL
)
1950 mdb_nv_set_value(v
, b
);
1951 if ((v
= mdb_nv_lookup(&mdb
.m_nv
, "d")) != NULL
)
1952 mdb_nv_set_value(v
, d
);
1953 if ((v
= mdb_nv_lookup(&mdb
.m_nv
, "e")) != NULL
)
1954 mdb_nv_set_value(v
, e
);
1955 if ((v
= mdb_nv_lookup(&mdb
.m_nv
, "m")) != NULL
)
1956 mdb_nv_set_value(v
, m
);
1957 if ((v
= mdb_nv_lookup(&mdb
.m_nv
, "t")) != NULL
)
1958 mdb_nv_set_value(v
, t
);
1963 mdb_tgt_sespec_hold(mdb_tgt_t
*t
, mdb_sespec_t
*sep
)
1966 ASSERT(sep
->se_refs
!= 0);
1970 mdb_tgt_sespec_rele(mdb_tgt_t
*t
, mdb_sespec_t
*sep
)
1972 ASSERT(sep
->se_refs
!= 0);
1974 if (--sep
->se_refs
== 0) {
1975 mdb_dprintf(MDB_DBG_TGT
, "destroying sespec %p\n", (void *)sep
);
1976 ASSERT(mdb_list_next(&sep
->se_velist
) == NULL
);
1978 if (sep
->se_state
!= MDB_TGT_SPEC_IDLE
) {
1979 sep
->se_ops
->se_dtor(t
, sep
);
1980 mdb_list_delete(&t
->t_active
, sep
);
1982 mdb_list_delete(&t
->t_idle
, sep
);
1984 mdb_free(sep
, sizeof (mdb_sespec_t
));
1989 mdb_tgt_sespec_insert(mdb_tgt_t
*t
, const mdb_se_ops_t
*ops
, mdb_list_t
*list
)
1991 mdb_sespec_t
*sep
= mdb_zalloc(sizeof (mdb_sespec_t
), UM_SLEEP
);
1993 if (list
== &t
->t_active
)
1994 sep
->se_state
= MDB_TGT_SPEC_ACTIVE
;
1996 sep
->se_state
= MDB_TGT_SPEC_IDLE
;
1998 mdb_list_append(list
, sep
);
2004 mdb_tgt_sespec_lookup_active(mdb_tgt_t
*t
, const mdb_se_ops_t
*ops
, void *args
)
2008 for (sep
= mdb_list_next(&t
->t_active
); sep
; sep
= mdb_list_next(sep
)) {
2009 if (sep
->se_ops
== ops
&& sep
->se_ops
->se_secmp(t
, sep
, args
))
2017 mdb_tgt_sespec_lookup_idle(mdb_tgt_t
*t
, const mdb_se_ops_t
*ops
, void *args
)
2021 for (sep
= mdb_list_next(&t
->t_idle
); sep
; sep
= mdb_list_next(sep
)) {
2022 if (sep
->se_ops
== ops
&& sep
->se_ops
->se_vecmp(t
,
2023 mdb_list_next(&sep
->se_velist
), args
))
2032 mdb_tgt_vespec_hold(mdb_tgt_t
*t
, mdb_vespec_t
*vep
)
2035 ASSERT(vep
->ve_refs
!= 0);
2039 mdb_tgt_vespec_rele(mdb_tgt_t
*t
, mdb_vespec_t
*vep
)
2041 ASSERT(vep
->ve_refs
!= 0);
2043 if (--vep
->ve_refs
== 0) {
2045 * Remove this vespec from the sespec's velist and decrement
2046 * the reference count on the sespec.
2048 mdb_list_delete(&vep
->ve_se
->se_velist
, vep
);
2049 mdb_tgt_sespec_rele(t
, vep
->ve_se
);
2052 * If we are deleting the most recently assigned VID, reset
2053 * t_vepos or t_veneg as appropriate to re-use that number.
2054 * This could be enhanced to re-use any free number by
2055 * maintaining a bitmap or hash of the allocated IDs.
2057 if (vep
->ve_id
> 0 && t
->t_vepos
== vep
->ve_id
+ 1)
2058 t
->t_vepos
= vep
->ve_id
;
2059 else if (vep
->ve_id
< 0 && t
->t_veneg
== -vep
->ve_id
+ 1)
2060 t
->t_veneg
= -vep
->ve_id
;
2063 * Call the destructor to clean up ve_args, and then free
2064 * the actual vespec structure.
2067 mdb_free(vep
, sizeof (mdb_vespec_t
));
2069 ASSERT(t
->t_vecnt
!= 0);
2075 mdb_tgt_vespec_insert(mdb_tgt_t
*t
, const mdb_se_ops_t
*ops
, int flags
,
2076 mdb_tgt_se_f
*func
, void *data
, void *args
, void (*dtor
)(mdb_vespec_t
*))
2078 mdb_vespec_t
*vep
= mdb_zalloc(sizeof (mdb_vespec_t
), UM_SLEEP
);
2080 int id
, mult
, *seqp
;
2084 * Make that only one MDB_TGT_SPEC_AUTO* bit is set in the new flags
2085 * value: extra bits are cleared according to order of precedence.
2087 if (flags
& MDB_TGT_SPEC_AUTOSTOP
)
2088 flags
&= ~(MDB_TGT_SPEC_AUTODEL
| MDB_TGT_SPEC_AUTODIS
);
2089 else if (flags
& MDB_TGT_SPEC_AUTODEL
)
2090 flags
&= ~MDB_TGT_SPEC_AUTODIS
;
2093 * The TEMPORARY property always takes precedence over STICKY.
2095 if (flags
& MDB_TGT_SPEC_TEMPORARY
)
2096 flags
&= ~MDB_TGT_SPEC_STICKY
;
2099 * Find a matching sespec or create a new one on the appropriate list.
2100 * We always create a new sespec if the vespec is created disabled.
2102 if (flags
& MDB_TGT_SPEC_DISABLED
)
2103 sep
= mdb_tgt_sespec_insert(t
, ops
, &t
->t_idle
);
2104 else if ((sep
= mdb_tgt_sespec_lookup_active(t
, ops
, args
)) == NULL
&&
2105 (sep
= mdb_tgt_sespec_lookup_idle(t
, ops
, args
)) == NULL
)
2106 sep
= mdb_tgt_sespec_insert(t
, ops
, &t
->t_active
);
2109 * Generate a new ID for the vespec. Increasing positive integers are
2110 * assigned to visible vespecs; decreasing negative integers are
2111 * assigned to hidden vespecs. The target saves our most recent choice.
2113 if (flags
& MDB_TGT_SPEC_INTERNAL
) {
2123 while (mdb_tgt_vespec_lookup(t
, id
* mult
) != NULL
)
2124 id
= MAX(id
+ 1, 1);
2126 *seqp
= MAX(id
+ 1, 1);
2128 vep
->ve_id
= id
* mult
;
2129 vep
->ve_flags
= flags
& ~(MDB_TGT_SPEC_MATCHED
| MDB_TGT_SPEC_DELETED
);
2131 vep
->ve_callback
= func
;
2132 vep
->ve_data
= data
;
2133 vep
->ve_args
= args
;
2134 vep
->ve_dtor
= dtor
;
2136 mdb_list_append(&sep
->se_velist
, vep
);
2137 mdb_tgt_sespec_hold(t
, sep
);
2139 mdb_tgt_vespec_hold(t
, vep
);
2143 * If this vespec is the first reference to the sespec and it's active,
2144 * then it is newly created and we should attempt to initialize it.
2145 * If se_ctor fails, then move the sespec back to the idle list.
2147 if (sep
->se_refs
== 1 && sep
->se_state
== MDB_TGT_SPEC_ACTIVE
&&
2148 sep
->se_ops
->se_ctor(t
, sep
, vep
->ve_args
) == -1) {
2150 mdb_list_delete(&t
->t_active
, sep
);
2151 mdb_list_append(&t
->t_idle
, sep
);
2153 sep
->se_state
= MDB_TGT_SPEC_IDLE
;
2154 sep
->se_errno
= errno
;
2155 sep
->se_data
= NULL
;
2159 * If the sespec is active and the target is currently running (because
2160 * we grabbed it using PGRAB_NOSTOP), then go ahead and attempt to arm
2161 * the sespec so it will take effect immediately.
2163 if (sep
->se_state
== MDB_TGT_SPEC_ACTIVE
&&
2164 t
->t_status
.st_state
== MDB_TGT_RUNNING
)
2165 mdb_tgt_sespec_arm_one(t
, sep
);
2167 mdb_dprintf(MDB_DBG_TGT
, "inserted [ %d ] sep=%p refs=%u state=%d\n",
2168 vep
->ve_id
, (void *)sep
, sep
->se_refs
, sep
->se_state
);
2170 return (vep
->ve_id
);
2174 * Search the target's active, idle, and disabled lists for the vespec matching
2175 * the specified VID, and return a pointer to it, or NULL if no match is found.
2178 mdb_tgt_vespec_lookup(mdb_tgt_t
*t
, int vid
)
2184 return (NULL
); /* 0 is never a valid VID */
2186 for (sep
= mdb_list_next(&t
->t_active
); sep
; sep
= mdb_list_next(sep
)) {
2187 for (vep
= mdb_list_next(&sep
->se_velist
); vep
;
2188 vep
= mdb_list_next(vep
)) {
2189 if (vep
->ve_id
== vid
)
2194 for (sep
= mdb_list_next(&t
->t_idle
); sep
; sep
= mdb_list_next(sep
)) {
2195 for (vep
= mdb_list_next(&sep
->se_velist
); vep
;
2196 vep
= mdb_list_next(vep
)) {
2197 if (vep
->ve_id
== vid
)
2207 no_ve_dtor(mdb_vespec_t
*vep
)
2209 /* default destructor does nothing */
2214 no_se_f(mdb_tgt_t
*t
, int vid
, void *data
)
2216 /* default callback does nothing */
2221 no_se_dtor(mdb_tgt_t
*t
, mdb_sespec_t
*sep
)
2223 /* default destructor does nothing */
2228 no_se_secmp(mdb_tgt_t
*t
, mdb_sespec_t
*sep
, void *args
)
2230 return (sep
->se_data
== args
);
2235 no_se_vecmp(mdb_tgt_t
*t
, mdb_vespec_t
*vep
, void *args
)
2237 return (vep
->ve_args
== args
);
2242 no_se_arm(mdb_tgt_t
*t
, mdb_sespec_t
*sep
)
2244 return (0); /* return success */
2249 no_se_disarm(mdb_tgt_t
*t
, mdb_sespec_t
*sep
)
2251 return (0); /* return success */
2256 no_se_cont(mdb_tgt_t
*t
, mdb_sespec_t
*sep
, mdb_tgt_status_t
*tsp
)
2258 if (tsp
!= &t
->t_status
)
2259 bcopy(&t
->t_status
, tsp
, sizeof (mdb_tgt_status_t
));
2261 return (0); /* return success */
2265 mdb_tgt_register_dcmds(mdb_tgt_t
*t
, const mdb_dcmd_t
*dcp
, int flags
)
2269 for (; dcp
->dc_name
!= NULL
; dcp
++) {
2270 if (mdb_module_add_dcmd(t
->t_module
, dcp
, flags
) == -1) {
2271 warn("failed to add dcmd %s", dcp
->dc_name
);
2276 return (fail
> 0 ? -1 : 0);
2280 mdb_tgt_register_walkers(mdb_tgt_t
*t
, const mdb_walker_t
*wp
, int flags
)
2284 for (; wp
->walk_name
!= NULL
; wp
++) {
2285 if (mdb_module_add_walker(t
->t_module
, wp
, flags
) == -1) {
2286 warn("failed to add walk %s", wp
->walk_name
);
2291 return (fail
> 0 ? -1 : 0);
2295 mdb_tgt_register_regvars(mdb_tgt_t
*t
, const mdb_tgt_regdesc_t
*rdp
,
2296 const mdb_nv_disc_t
*disc
, int flags
)
2298 for (; rdp
->rd_name
!= NULL
; rdp
++) {
2299 if (!(rdp
->rd_flags
& MDB_TGT_R_EXPORT
))
2300 continue; /* Don't export register as a variable */
2302 if (rdp
->rd_flags
& MDB_TGT_R_RDONLY
)
2303 flags
|= MDB_NV_RDONLY
;
2305 (void) mdb_nv_insert(&mdb
.m_nv
, rdp
->rd_name
, disc
,
2306 (uintptr_t)t
, MDB_NV_PERSIST
| flags
);