2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93
30 * $FreeBSD: src/sys/kern/kern_ktrace.c,v 1.35.2.6 2002/07/05 22:36:38 darrenr Exp $
33 #include "opt_ktrace.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/sysproto.h>
38 #include <sys/kernel.h>
40 #include <sys/fcntl.h>
42 #include <sys/nlookup.h>
43 #include <sys/vnode.h>
44 #include <sys/ktrace.h>
45 #include <sys/malloc.h>
46 #include <sys/syslog.h>
47 #include <sys/sysent.h>
49 #include <vm/vm_zone.h>
51 static MALLOC_DEFINE(M_KTRACE
, "KTRACE", "KTRACE");
54 static void ktrgetheader (struct ktr_header
*kth
, int type
);
55 static struct ktr_syscall
*ktrgetsyscall(struct ktr_header
*kth
,
56 struct ktr_syscall
*ktp_cache
, int narg
);
57 static void ktrputsyscall(struct ktr_syscall
*ktp_cache
,
58 struct ktr_syscall
*ktp
);
59 static void ktrwrite (struct lwp
*, struct ktr_header
*, struct uio
*);
60 static int ktrcanset (struct thread
*,struct proc
*);
61 static int ktrsetchildren (struct thread
*, struct proc
*,
62 int, int, ktrace_node_t
);
63 static int ktrops (struct thread
*,struct proc
*,int,int, ktrace_node_t
);
70 ktrgetheader(struct ktr_header
*kth
, int type
)
72 thread_t td
= curthread
;
73 struct proc
*p
= td
->td_proc
;
74 struct lwp
*lp
= td
->td_lwp
;
77 /* XXX threaded flag is a hack at the moment */
78 kth
->ktr_flags
= (p
->p_nthreads
> 1) ? KTRH_THREADED
: 0;
79 kth
->ktr_flags
|= KTRH_CPUID_ENCODE(td
->td_gd
->gd_cpuid
);
80 /*microtime(&kth->ktr_time); set in ktrwrite */
81 kth
->ktr_pid
= p
->p_pid
;
82 kth
->ktr_tid
= lp
->lwp_tid
;
83 bcopy(p
->p_comm
, kth
->ktr_comm
, MAXCOMLEN
+ 1);
88 ktrgetsyscall(struct ktr_header
*kth
, struct ktr_syscall
*ktp_cache
, int narg
)
92 len
= offsetof(struct ktr_syscall
, ktr_args
[narg
]);
93 if (len
> sizeof(*ktp_cache
))
94 ktp_cache
= kmalloc(len
, M_KTRACE
, M_WAITOK
);
95 kth
->ktr_buf
= (caddr_t
)ktp_cache
;
96 kth
->ktr_len
= (int)len
;
102 ktrputsyscall(struct ktr_syscall
*ktp_cache
, struct ktr_syscall
*ktp
)
104 if (ktp
!= ktp_cache
)
105 kfree(ktp
, M_KTRACE
);
109 ktrsyscall(struct lwp
*lp
, int code
, int narg
, register_t args
[])
111 struct ktr_header kth
;
112 struct ktr_syscall ktp_cache
;
113 struct ktr_syscall
*ktp
;
118 * Setting the active bit prevents a ktrace recursion from the
119 * ktracing op itself.
121 lp
->lwp_traceflag
|= KTRFAC_ACTIVE
;
122 ktrgetheader(&kth
, KTR_SYSCALL
);
124 ktp
= ktrgetsyscall(&kth
, &ktp_cache
, narg
);
125 ktp
->ktr_code
= code
;
126 ktp
->ktr_narg
= narg
;
127 argp
= &ktp
->ktr_args
[0];
128 for (i
= 0; i
< narg
; i
++)
130 ktrwrite(lp
, &kth
, NULL
);
132 ktrputsyscall(&ktp_cache
, ktp
);
133 lp
->lwp_traceflag
&= ~KTRFAC_ACTIVE
;
137 ktrsysret(struct lwp
*lp
, int code
, int error
, register_t retval
)
139 struct ktr_header kth
;
140 struct ktr_sysret ktp
;
142 lp
->lwp_traceflag
|= KTRFAC_ACTIVE
;
143 ktrgetheader(&kth
, KTR_SYSRET
);
146 ktp
.ktr_error
= error
;
148 ktp
.ktr_retval
= retval
; /* what about val2 ? */
152 kth
.ktr_buf
= (caddr_t
)&ktp
;
153 kth
.ktr_len
= (int)sizeof(struct ktr_sysret
);
155 ktrwrite(lp
, &kth
, NULL
);
156 lp
->lwp_traceflag
&= ~KTRFAC_ACTIVE
;
160 ktrnamei(struct lwp
*lp
, char *path
)
162 struct ktr_header kth
;
164 lp
->lwp_traceflag
|= KTRFAC_ACTIVE
;
165 ktrgetheader(&kth
, KTR_NAMEI
);
167 kth
.ktr_len
= (int)strlen(path
);
170 ktrwrite(lp
, &kth
, NULL
);
171 lp
->lwp_traceflag
&= ~KTRFAC_ACTIVE
;
175 ktrgenio(struct lwp
*lp
, int fd
, enum uio_rw rw
, struct uio
*uio
, int error
)
177 struct ktr_header kth
;
178 struct ktr_genio ktg
;
182 lp
->lwp_traceflag
|= KTRFAC_ACTIVE
;
183 ktrgetheader(&kth
, KTR_GENIO
);
187 kth
.ktr_buf
= (caddr_t
)&ktg
;
188 kth
.ktr_len
= (int)sizeof(struct ktr_genio
);
190 uio
->uio_rw
= UIO_WRITE
;
192 ktrwrite(lp
, &kth
, uio
);
193 lp
->lwp_traceflag
&= ~KTRFAC_ACTIVE
;
197 ktrpsig(struct lwp
*lp
, int sig
, sig_t action
, sigset_t
*mask
, int code
)
199 struct ktr_header kth
;
202 lp
->lwp_traceflag
|= KTRFAC_ACTIVE
;
203 ktrgetheader(&kth
, KTR_PSIG
);
205 kp
.signo
= (char)sig
;
209 kth
.ktr_buf
= (caddr_t
)&kp
;
210 kth
.ktr_len
= (int)sizeof(struct ktr_psig
);
212 ktrwrite(lp
, &kth
, NULL
);
213 lp
->lwp_traceflag
&= ~KTRFAC_ACTIVE
;
217 ktrcsw(struct lwp
*lp
, int out
, int user
)
219 struct ktr_header kth
;
222 lp
->lwp_traceflag
|= KTRFAC_ACTIVE
;
223 ktrgetheader(&kth
, KTR_CSW
);
227 kth
.ktr_buf
= (caddr_t
)&kc
;
228 kth
.ktr_len
= (int)sizeof(struct ktr_csw
);
230 ktrwrite(lp
, &kth
, NULL
);
231 lp
->lwp_traceflag
&= ~KTRFAC_ACTIVE
;
235 /* Interface and common routines */
241 struct ktrace_clear_info
{
242 ktrace_node_t tracenode
;
247 static int ktrace_clear_callback(struct proc
*p
, void *data
);
255 sys_ktrace(struct ktrace_args
*uap
)
258 struct ktrace_clear_info info
;
259 struct thread
*td
= curthread
;
260 struct proc
*curp
= td
->td_proc
;
263 int facs
= uap
->facs
& ~KTRFAC_ROOT
;
264 int ops
= KTROP(uap
->ops
);
265 int descend
= uap
->ops
& KTRFLAG_DESCEND
;
268 struct nlookupdata nd
;
269 ktrace_node_t tracenode
= NULL
;
271 lwkt_gettoken(&curp
->p_token
);
272 curp
->p_traceflag
|= KTRFAC_ACTIVE
;
274 if (ops
!= KTROP_CLEAR
) {
276 * an operation which requires a file argument.
278 error
= nlookup_init(&nd
, uap
->fname
,
279 UIO_USERSPACE
, NLC_LOCKVP
);
281 error
= vn_open(&nd
, NULL
, FREAD
|FWRITE
|O_NOFOLLOW
, 0);
282 if (error
== 0 && nd
.nl_open_vp
->v_type
!= VREG
)
285 curp
->p_traceflag
&= ~KTRFAC_ACTIVE
;
289 tracenode
= kmalloc(sizeof(struct ktrace_node
), M_KTRACE
,
291 tracenode
->kn_vp
= nd
.nl_open_vp
;
292 tracenode
->kn_refs
= 1;
293 nd
.nl_open_vp
= NULL
;
295 vn_unlock(tracenode
->kn_vp
);
298 * Clear all uses of the tracefile. Not the most efficient operation
301 if (ops
== KTROP_CLEARFILE
) {
302 info
.tracenode
= tracenode
;
305 allproc_scan(ktrace_clear_callback
, &info
, 0);
310 * need something to (un)trace (XXX - why is this here?)
321 * By process group. Process group is referenced, preventing
324 pg
= pgfind(-uap
->pid
);
329 lwkt_gettoken(&pg
->pg_token
);
330 LIST_FOREACH(p
, &pg
->pg_members
, p_pglist
) {
333 ret
|= ktrsetchildren(td
, p
, ops
, facs
, tracenode
);
335 ret
|= ktrops(td
, p
, ops
, facs
, tracenode
);
338 lwkt_reltoken(&pg
->pg_token
);
350 ret
|= ktrsetchildren(td
, p
, ops
, facs
, tracenode
);
352 ret
|= ktrops(td
, p
, ops
, facs
, tracenode
);
359 ktrdestroy(&tracenode
);
360 curp
->p_traceflag
&= ~KTRFAC_ACTIVE
;
361 lwkt_reltoken(&curp
->p_token
);
371 * NOTE: NOT MPSAFE (yet)
374 ktrace_clear_callback(struct proc
*p
, void *data
)
376 struct ktrace_clear_info
*info
= data
;
378 if (p
->p_tracenode
) {
379 if (info
->rootclear
) {
380 if (p
->p_tracenode
== info
->tracenode
) {
381 ktrdestroy(&p
->p_tracenode
);
385 if (p
->p_tracenode
->kn_vp
== info
->tracenode
->kn_vp
) {
386 if (ktrcanset(curthread
, p
)) {
387 ktrdestroy(&p
->p_tracenode
);
406 sys_utrace(struct utrace_args
*uap
)
409 struct ktr_header kth
;
410 struct thread
*td
= curthread
; /* XXX */
414 if (!KTRPOINT(td
, KTR_USER
))
416 if (uap
->len
> KTR_USER_MAXLEN
)
418 td
->td_lwp
->lwp_traceflag
|= KTRFAC_ACTIVE
;
419 ktrgetheader(&kth
, KTR_USER
);
420 if (uap
->len
<= sizeof(cp_cache
))
423 cp
= kmalloc(uap
->len
, M_KTRACE
, M_WAITOK
);
425 if (!copyin(uap
->addr
, cp
, uap
->len
)) {
427 kth
.ktr_len
= uap
->len
;
428 ktrwrite(td
->td_lwp
, &kth
, NULL
);
432 td
->td_lwp
->lwp_traceflag
&= ~KTRFAC_ACTIVE
;
441 ktrdestroy(struct ktrace_node
**tracenodep
)
443 ktrace_node_t tracenode
;
445 if ((tracenode
= *tracenodep
) != NULL
) {
447 KKASSERT(tracenode
->kn_refs
> 0);
448 if (atomic_fetchadd_int(&tracenode
->kn_refs
, -1) == 1) {
449 vn_close(tracenode
->kn_vp
, FREAD
|FWRITE
, NULL
);
450 tracenode
->kn_vp
= NULL
;
451 kfree(tracenode
, M_KTRACE
);
457 * This allows a process to inherit a ref on a tracenode and is also used
458 * as a temporary ref to prevent a tracenode from being destroyed out from
459 * under an active operation.
462 ktrinherit(ktrace_node_t tracenode
)
465 KKASSERT(tracenode
->kn_refs
> 0);
466 atomic_add_int(&tracenode
->kn_refs
, 1);
473 ktrops(struct thread
*td
, struct proc
*p
, int ops
, int facs
,
474 ktrace_node_t tracenode
)
476 ktrace_node_t oldnode
;
478 if (!ktrcanset(td
, p
))
480 if (ops
== KTROP_SET
) {
481 if ((oldnode
= p
->p_tracenode
) != tracenode
) {
482 p
->p_tracenode
= ktrinherit(tracenode
);
483 ktrdestroy(&oldnode
);
485 p
->p_traceflag
|= facs
;
486 if (td
->td_ucred
->cr_uid
== 0)
487 p
->p_traceflag
|= KTRFAC_ROOT
;
490 if (((p
->p_traceflag
&= ~facs
) & KTRFAC_MASK
) == 0) {
491 /* no more tracing */
493 ktrdestroy(&p
->p_tracenode
);
501 ktrsetchildren(struct thread
*td
, struct proc
*top
, int ops
, int facs
,
502 ktrace_node_t tracenode
)
510 lwkt_gettoken(&p
->p_token
);
513 ret
|= ktrops(td
, p
, ops
, facs
, tracenode
);
516 * If this process has children, descend to them next,
517 * otherwise do any siblings, and if done with this level,
518 * follow back up the tree (but not past top).
520 if ((np
= LIST_FIRST(&p
->p_children
)) != NULL
) {
526 if ((np
= LIST_NEXT(p
, p_sibling
)) != NULL
) {
532 * recurse up to parent, set p in our inner
533 * loop when doing this. np can be NULL if
534 * we race a reparenting to init (thus 'top'
535 * is skipped past and never encountered).
541 lwkt_reltoken(&p
->p_token
);
544 lwkt_gettoken(&p
->p_token
);
547 lwkt_reltoken(&p
->p_token
);
552 /* Already held, but we need the token too */
553 lwkt_gettoken(&p
->p_token
);
559 ktrwrite(struct lwp
*lp
, struct ktr_header
*kth
, struct uio
*uio
)
561 struct ktrace_clear_info info
;
563 struct iovec aiov
[2];
565 ktrace_node_t tracenode
;
568 * We have to ref our tracenode to prevent it from being ripped out
569 * from under us while we are trying to use it. p_tracenode can
570 * go away at any time if another process gets a write error.
574 if (lp
->lwp_proc
->p_tracenode
== NULL
)
576 tracenode
= ktrinherit(lp
->lwp_proc
->p_tracenode
);
577 auio
.uio_iov
= &aiov
[0];
579 auio
.uio_segflg
= UIO_SYSSPACE
;
580 auio
.uio_rw
= UIO_WRITE
;
581 aiov
[0].iov_base
= (caddr_t
)kth
;
582 aiov
[0].iov_len
= sizeof(struct ktr_header
);
583 auio
.uio_resid
= sizeof(struct ktr_header
);
585 auio
.uio_td
= curthread
;
586 if (kth
->ktr_len
> 0) {
588 aiov
[1].iov_base
= kth
->ktr_buf
;
589 aiov
[1].iov_len
= kth
->ktr_len
;
590 auio
.uio_resid
+= kth
->ktr_len
;
592 kth
->ktr_len
+= uio
->uio_resid
;
596 * NOTE: Must set timestamp after obtaining lock to ensure no
597 * timestamp reversals in the output file.
599 vn_lock(tracenode
->kn_vp
, LK_EXCLUSIVE
| LK_RETRY
);
600 microtime(&kth
->ktr_time
);
601 error
= VOP_WRITE(tracenode
->kn_vp
, &auio
,
602 IO_UNIT
| IO_APPEND
, lp
->lwp_thread
->td_ucred
);
603 if (error
== 0 && uio
!= NULL
) {
604 error
= VOP_WRITE(tracenode
->kn_vp
, uio
,
605 IO_UNIT
| IO_APPEND
, lp
->lwp_thread
->td_ucred
);
607 vn_unlock(tracenode
->kn_vp
);
610 * If an error occured, give up tracing on all processes
611 * using this tracenode. This is not MP safe but is
615 "ktrace write failed, errno %d, tracing stopped\n", error
);
616 info
.tracenode
= tracenode
;
619 allproc_scan(ktrace_clear_callback
, &info
, 0);
621 ktrdestroy(&tracenode
);
625 * Return true if caller has permission to set the ktracing state
626 * of target. Essentially, the target can't possess any
627 * more permissions than the caller. KTRFAC_ROOT signifies that
628 * root previously set the tracing status on the target process, and
629 * so, only root may further change it.
631 * TODO: check groups. use caller effective gid.
634 ktrcanset(struct thread
*calltd
, struct proc
*targetp
)
636 struct ucred
*caller
= calltd
->td_ucred
;
637 struct ucred
*target
= targetp
->p_ucred
;
639 if (!PRISON_CHECK(caller
, target
))
641 if ((caller
->cr_uid
== target
->cr_ruid
&&
642 target
->cr_ruid
== target
->cr_svuid
&&
643 caller
->cr_rgid
== target
->cr_rgid
&& /* XXX */
644 target
->cr_rgid
== target
->cr_svgid
&&
645 (targetp
->p_traceflag
& KTRFAC_ROOT
) == 0 &&
646 (targetp
->p_flags
& P_SUGID
) == 0) ||