2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by the University of
16 * California, Berkeley and its contributors.
17 * 4. Neither the name of the University nor the names of its contributors
18 * may be used to endorse or promote products derived from this software
19 * without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93
34 * $FreeBSD: src/sys/kern/kern_ktrace.c,v 1.35.2.6 2002/07/05 22:36:38 darrenr Exp $
37 #include "opt_ktrace.h"
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/sysproto.h>
42 #include <sys/kernel.h>
44 #include <sys/fcntl.h>
46 #include <sys/nlookup.h>
47 #include <sys/vnode.h>
48 #include <sys/ktrace.h>
49 #include <sys/malloc.h>
50 #include <sys/syslog.h>
51 #include <sys/sysent.h>
53 #include <vm/vm_zone.h>
55 #include <sys/mplock2.h>
57 static MALLOC_DEFINE(M_KTRACE
, "KTRACE", "KTRACE");
60 static void ktrgetheader (struct ktr_header
*kth
, int type
);
61 static struct ktr_syscall
*ktrgetsyscall(struct ktr_header
*kth
,
62 struct ktr_syscall
*ktp_cache
, int narg
);
63 static void ktrputsyscall(struct ktr_syscall
*ktp_cache
,
64 struct ktr_syscall
*ktp
);
65 static void ktrwrite (struct lwp
*, struct ktr_header
*, struct uio
*);
66 static int ktrcanset (struct thread
*,struct proc
*);
67 static int ktrsetchildren (struct thread
*, struct proc
*,
68 int, int, ktrace_node_t
);
69 static int ktrops (struct thread
*,struct proc
*,int,int, ktrace_node_t
);
76 ktrgetheader(struct ktr_header
*kth
, int type
)
78 thread_t td
= curthread
;
79 struct proc
*p
= td
->td_proc
;
80 struct lwp
*lp
= td
->td_lwp
;
83 /* XXX threaded flag is a hack at the moment */
84 kth
->ktr_flags
= (p
->p_nthreads
> 1) ? KTRH_THREADED
: 0;
85 kth
->ktr_flags
|= KTRH_CPUID_ENCODE(td
->td_gd
->gd_cpuid
);
86 /*microtime(&kth->ktr_time); set in ktrwrite */
87 kth
->ktr_pid
= p
->p_pid
;
88 kth
->ktr_tid
= lp
->lwp_tid
;
89 bcopy(p
->p_comm
, kth
->ktr_comm
, MAXCOMLEN
+ 1);
94 ktrgetsyscall(struct ktr_header
*kth
, struct ktr_syscall
*ktp_cache
, int narg
)
98 len
= offsetof(struct ktr_syscall
, ktr_args
[narg
]);
99 if (len
> sizeof(*ktp_cache
))
100 ktp_cache
= kmalloc(len
, M_KTRACE
, M_WAITOK
);
101 kth
->ktr_buf
= (caddr_t
)ktp_cache
;
102 kth
->ktr_len
= (int)len
;
108 ktrputsyscall(struct ktr_syscall
*ktp_cache
, struct ktr_syscall
*ktp
)
110 if (ktp
!= ktp_cache
)
111 kfree(ktp
, M_KTRACE
);
115 ktrsyscall(struct lwp
*lp
, int code
, int narg
, register_t args
[])
117 struct ktr_header kth
;
118 struct ktr_syscall ktp_cache
;
119 struct ktr_syscall
*ktp
;
124 * Setting the active bit prevents a ktrace recursion from the
125 * ktracing op itself.
127 lp
->lwp_traceflag
|= KTRFAC_ACTIVE
;
128 ktrgetheader(&kth
, KTR_SYSCALL
);
130 ktp
= ktrgetsyscall(&kth
, &ktp_cache
, narg
);
131 ktp
->ktr_code
= code
;
132 ktp
->ktr_narg
= narg
;
133 argp
= &ktp
->ktr_args
[0];
134 for (i
= 0; i
< narg
; i
++)
136 ktrwrite(lp
, &kth
, NULL
);
138 ktrputsyscall(&ktp_cache
, ktp
);
139 lp
->lwp_traceflag
&= ~KTRFAC_ACTIVE
;
143 ktrsysret(struct lwp
*lp
, int code
, int error
, register_t retval
)
145 struct ktr_header kth
;
146 struct ktr_sysret ktp
;
148 lp
->lwp_traceflag
|= KTRFAC_ACTIVE
;
149 ktrgetheader(&kth
, KTR_SYSRET
);
152 ktp
.ktr_error
= error
;
154 ktp
.ktr_retval
= retval
; /* what about val2 ? */
158 kth
.ktr_buf
= (caddr_t
)&ktp
;
159 kth
.ktr_len
= (int)sizeof(struct ktr_sysret
);
161 ktrwrite(lp
, &kth
, NULL
);
162 lp
->lwp_traceflag
&= ~KTRFAC_ACTIVE
;
166 ktrnamei(struct lwp
*lp
, char *path
)
168 struct ktr_header kth
;
170 lp
->lwp_traceflag
|= KTRFAC_ACTIVE
;
171 ktrgetheader(&kth
, KTR_NAMEI
);
173 kth
.ktr_len
= (int)strlen(path
);
176 ktrwrite(lp
, &kth
, NULL
);
177 lp
->lwp_traceflag
&= ~KTRFAC_ACTIVE
;
181 ktrgenio(struct lwp
*lp
, int fd
, enum uio_rw rw
, struct uio
*uio
, int error
)
183 struct ktr_header kth
;
184 struct ktr_genio ktg
;
188 lp
->lwp_traceflag
|= KTRFAC_ACTIVE
;
189 ktrgetheader(&kth
, KTR_GENIO
);
193 kth
.ktr_buf
= (caddr_t
)&ktg
;
194 kth
.ktr_len
= (int)sizeof(struct ktr_genio
);
196 uio
->uio_rw
= UIO_WRITE
;
198 ktrwrite(lp
, &kth
, uio
);
199 lp
->lwp_traceflag
&= ~KTRFAC_ACTIVE
;
203 ktrpsig(struct lwp
*lp
, int sig
, sig_t action
, sigset_t
*mask
, int code
)
205 struct ktr_header kth
;
208 lp
->lwp_traceflag
|= KTRFAC_ACTIVE
;
209 ktrgetheader(&kth
, KTR_PSIG
);
211 kp
.signo
= (char)sig
;
215 kth
.ktr_buf
= (caddr_t
)&kp
;
216 kth
.ktr_len
= (int)sizeof(struct ktr_psig
);
218 ktrwrite(lp
, &kth
, NULL
);
219 lp
->lwp_traceflag
&= ~KTRFAC_ACTIVE
;
223 ktrcsw(struct lwp
*lp
, int out
, int user
)
225 struct ktr_header kth
;
228 lp
->lwp_traceflag
|= KTRFAC_ACTIVE
;
229 ktrgetheader(&kth
, KTR_CSW
);
233 kth
.ktr_buf
= (caddr_t
)&kc
;
234 kth
.ktr_len
= (int)sizeof(struct ktr_csw
);
236 ktrwrite(lp
, &kth
, NULL
);
237 lp
->lwp_traceflag
&= ~KTRFAC_ACTIVE
;
241 /* Interface and common routines */
247 struct ktrace_clear_info
{
248 ktrace_node_t tracenode
;
253 static int ktrace_clear_callback(struct proc
*p
, void *data
);
261 sys_ktrace(struct ktrace_args
*uap
)
264 struct ktrace_clear_info info
;
265 struct thread
*td
= curthread
;
266 struct proc
*curp
= td
->td_proc
;
269 int facs
= uap
->facs
& ~KTRFAC_ROOT
;
270 int ops
= KTROP(uap
->ops
);
271 int descend
= uap
->ops
& KTRFLAG_DESCEND
;
274 struct nlookupdata nd
;
275 ktrace_node_t tracenode
= NULL
;
278 curp
->p_traceflag
|= KTRFAC_ACTIVE
;
279 if (ops
!= KTROP_CLEAR
) {
281 * an operation which requires a file argument.
283 error
= nlookup_init(&nd
, uap
->fname
,
284 UIO_USERSPACE
, NLC_LOCKVP
);
286 error
= vn_open(&nd
, NULL
, FREAD
|FWRITE
|O_NOFOLLOW
, 0);
287 if (error
== 0 && nd
.nl_open_vp
->v_type
!= VREG
)
290 curp
->p_traceflag
&= ~KTRFAC_ACTIVE
;
294 tracenode
= kmalloc(sizeof(struct ktrace_node
), M_KTRACE
,
296 tracenode
->kn_vp
= nd
.nl_open_vp
;
297 tracenode
->kn_refs
= 1;
298 nd
.nl_open_vp
= NULL
;
300 vn_unlock(tracenode
->kn_vp
);
303 * Clear all uses of the tracefile. Not the most efficient operation
306 if (ops
== KTROP_CLEARFILE
) {
307 info
.tracenode
= tracenode
;
310 allproc_scan(ktrace_clear_callback
, &info
);
315 * need something to (un)trace (XXX - why is this here?)
326 * By process group. Process group is referenced, preventing
329 pg
= pgfind(-uap
->pid
);
334 lwkt_gettoken(&pg
->pg_token
);
335 LIST_FOREACH(p
, &pg
->pg_members
, p_pglist
) {
338 ret
|= ktrsetchildren(td
, p
, ops
, facs
, tracenode
);
340 ret
|= ktrops(td
, p
, ops
, facs
, tracenode
);
343 lwkt_reltoken(&pg
->pg_token
);
355 ret
|= ktrsetchildren(td
, p
, ops
, facs
, tracenode
);
357 ret
|= ktrops(td
, p
, ops
, facs
, tracenode
);
364 ktrdestroy(&tracenode
);
365 curp
->p_traceflag
&= ~KTRFAC_ACTIVE
;
376 * NOTE: NOT MPSAFE (yet)
379 ktrace_clear_callback(struct proc
*p
, void *data
)
381 struct ktrace_clear_info
*info
= data
;
383 if (p
->p_tracenode
) {
384 if (info
->rootclear
) {
385 if (p
->p_tracenode
== info
->tracenode
) {
386 ktrdestroy(&p
->p_tracenode
);
390 if (p
->p_tracenode
->kn_vp
== info
->tracenode
->kn_vp
) {
391 if (ktrcanset(curthread
, p
)) {
392 ktrdestroy(&p
->p_tracenode
);
411 sys_utrace(struct utrace_args
*uap
)
414 struct ktr_header kth
;
415 struct thread
*td
= curthread
; /* XXX */
419 if (!KTRPOINT(td
, KTR_USER
))
421 if (uap
->len
> KTR_USER_MAXLEN
)
423 td
->td_lwp
->lwp_traceflag
|= KTRFAC_ACTIVE
;
424 ktrgetheader(&kth
, KTR_USER
);
425 if (uap
->len
<= sizeof(cp_cache
))
428 cp
= kmalloc(uap
->len
, M_KTRACE
, M_WAITOK
);
430 if (!copyin(uap
->addr
, cp
, uap
->len
)) {
432 kth
.ktr_len
= uap
->len
;
433 ktrwrite(td
->td_lwp
, &kth
, NULL
);
437 td
->td_lwp
->lwp_traceflag
&= ~KTRFAC_ACTIVE
;
446 ktrdestroy(struct ktrace_node
**tracenodep
)
448 ktrace_node_t tracenode
;
450 if ((tracenode
= *tracenodep
) != NULL
) {
452 KKASSERT(tracenode
->kn_refs
> 0);
453 if (atomic_fetchadd_int(&tracenode
->kn_refs
, -1) == 1) {
454 vn_close(tracenode
->kn_vp
, FREAD
|FWRITE
);
455 tracenode
->kn_vp
= NULL
;
456 kfree(tracenode
, M_KTRACE
);
462 * This allows a process to inherit a ref on a tracenode and is also used
463 * as a temporary ref to prevent a tracenode from being destroyed out from
464 * under an active operation.
467 ktrinherit(ktrace_node_t tracenode
)
470 KKASSERT(tracenode
->kn_refs
> 0);
471 atomic_add_int(&tracenode
->kn_refs
, 1);
478 ktrops(struct thread
*td
, struct proc
*p
, int ops
, int facs
,
479 ktrace_node_t tracenode
)
481 ktrace_node_t oldnode
;
483 if (!ktrcanset(td
, p
))
485 if (ops
== KTROP_SET
) {
486 if ((oldnode
= p
->p_tracenode
) != tracenode
) {
487 p
->p_tracenode
= ktrinherit(tracenode
);
488 ktrdestroy(&oldnode
);
490 p
->p_traceflag
|= facs
;
491 if (td
->td_ucred
->cr_uid
== 0)
492 p
->p_traceflag
|= KTRFAC_ROOT
;
495 if (((p
->p_traceflag
&= ~facs
) & KTRFAC_MASK
) == 0) {
496 /* no more tracing */
498 ktrdestroy(&p
->p_tracenode
);
506 ktrsetchildren(struct thread
*td
, struct proc
*top
, int ops
, int facs
,
507 ktrace_node_t tracenode
)
515 lwkt_gettoken(&p
->p_token
);
518 ret
|= ktrops(td
, p
, ops
, facs
, tracenode
);
521 * If this process has children, descend to them next,
522 * otherwise do any siblings, and if done with this level,
523 * follow back up the tree (but not past top).
525 if ((np
= LIST_FIRST(&p
->p_children
)) != NULL
) {
531 if ((np
= LIST_NEXT(p
, p_sibling
)) != NULL
) {
537 * recurse up to parent, set p in our inner
538 * loop when doing this. np can be NULL if
539 * we race a reparenting to init (thus 'top'
540 * is skipped past and never encountered).
546 lwkt_reltoken(&p
->p_token
);
549 lwkt_gettoken(&p
->p_token
);
552 lwkt_reltoken(&p
->p_token
);
557 /* Already held, but we need the token too */
558 lwkt_gettoken(&p
->p_token
);
564 ktrwrite(struct lwp
*lp
, struct ktr_header
*kth
, struct uio
*uio
)
566 struct ktrace_clear_info info
;
568 struct iovec aiov
[2];
570 ktrace_node_t tracenode
;
573 * We have to ref our tracenode to prevent it from being ripped out
574 * from under us while we are trying to use it. p_tracenode can
575 * go away at any time if another process gets a write error.
579 if (lp
->lwp_proc
->p_tracenode
== NULL
)
581 tracenode
= ktrinherit(lp
->lwp_proc
->p_tracenode
);
582 auio
.uio_iov
= &aiov
[0];
584 auio
.uio_segflg
= UIO_SYSSPACE
;
585 auio
.uio_rw
= UIO_WRITE
;
586 aiov
[0].iov_base
= (caddr_t
)kth
;
587 aiov
[0].iov_len
= sizeof(struct ktr_header
);
588 auio
.uio_resid
= sizeof(struct ktr_header
);
590 auio
.uio_td
= curthread
;
591 if (kth
->ktr_len
> 0) {
593 aiov
[1].iov_base
= kth
->ktr_buf
;
594 aiov
[1].iov_len
= kth
->ktr_len
;
595 auio
.uio_resid
+= kth
->ktr_len
;
597 kth
->ktr_len
+= uio
->uio_resid
;
601 * NOTE: Must set timestamp after obtaining lock to ensure no
602 * timestamp reversals in the output file.
604 vn_lock(tracenode
->kn_vp
, LK_EXCLUSIVE
| LK_RETRY
);
605 microtime(&kth
->ktr_time
);
606 error
= VOP_WRITE(tracenode
->kn_vp
, &auio
,
607 IO_UNIT
| IO_APPEND
, lp
->lwp_thread
->td_ucred
);
608 if (error
== 0 && uio
!= NULL
) {
609 error
= VOP_WRITE(tracenode
->kn_vp
, uio
,
610 IO_UNIT
| IO_APPEND
, lp
->lwp_thread
->td_ucred
);
612 vn_unlock(tracenode
->kn_vp
);
615 * If an error occured, give up tracing on all processes
616 * using this tracenode. This is not MP safe but is
620 "ktrace write failed, errno %d, tracing stopped\n", error
);
621 info
.tracenode
= tracenode
;
624 allproc_scan(ktrace_clear_callback
, &info
);
626 ktrdestroy(&tracenode
);
630 * Return true if caller has permission to set the ktracing state
631 * of target. Essentially, the target can't possess any
632 * more permissions than the caller. KTRFAC_ROOT signifies that
633 * root previously set the tracing status on the target process, and
634 * so, only root may further change it.
636 * TODO: check groups. use caller effective gid.
639 ktrcanset(struct thread
*calltd
, struct proc
*targetp
)
641 struct ucred
*caller
= calltd
->td_ucred
;
642 struct ucred
*target
= targetp
->p_ucred
;
644 if (!PRISON_CHECK(caller
, target
))
646 if ((caller
->cr_uid
== target
->cr_ruid
&&
647 target
->cr_ruid
== target
->cr_svuid
&&
648 caller
->cr_rgid
== target
->cr_rgid
&& /* XXX */
649 target
->cr_rgid
== target
->cr_svgid
&&
650 (targetp
->p_traceflag
& KTRFAC_ROOT
) == 0 &&
651 (targetp
->p_flags
& P_SUGID
) == 0) ||