2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93
30 * $FreeBSD: src/sys/kern/kern_ktrace.c,v 1.35.2.6 2002/07/05 22:36:38 darrenr Exp $
33 #include "opt_ktrace.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/sysproto.h>
38 #include <sys/kernel.h>
40 #include <sys/fcntl.h>
42 #include <sys/nlookup.h>
43 #include <sys/vnode.h>
44 #include <sys/ktrace.h>
45 #include <sys/malloc.h>
46 #include <sys/syslog.h>
47 #include <sys/sysent.h>
49 #include <vm/vm_zone.h>
51 #include <sys/mplock2.h>
53 static MALLOC_DEFINE(M_KTRACE
, "KTRACE", "KTRACE");
56 static void ktrgetheader (struct ktr_header
*kth
, int type
);
57 static struct ktr_syscall
*ktrgetsyscall(struct ktr_header
*kth
,
58 struct ktr_syscall
*ktp_cache
, int narg
);
59 static void ktrputsyscall(struct ktr_syscall
*ktp_cache
,
60 struct ktr_syscall
*ktp
);
61 static void ktrwrite (struct lwp
*, struct ktr_header
*, struct uio
*);
62 static int ktrcanset (struct thread
*,struct proc
*);
63 static int ktrsetchildren (struct thread
*, struct proc
*,
64 int, int, ktrace_node_t
);
65 static int ktrops (struct thread
*,struct proc
*,int,int, ktrace_node_t
);
72 ktrgetheader(struct ktr_header
*kth
, int type
)
74 thread_t td
= curthread
;
75 struct proc
*p
= td
->td_proc
;
76 struct lwp
*lp
= td
->td_lwp
;
79 /* XXX threaded flag is a hack at the moment */
80 kth
->ktr_flags
= (p
->p_nthreads
> 1) ? KTRH_THREADED
: 0;
81 kth
->ktr_flags
|= KTRH_CPUID_ENCODE(td
->td_gd
->gd_cpuid
);
82 /*microtime(&kth->ktr_time); set in ktrwrite */
83 kth
->ktr_pid
= p
->p_pid
;
84 kth
->ktr_tid
= lp
->lwp_tid
;
85 bcopy(p
->p_comm
, kth
->ktr_comm
, MAXCOMLEN
+ 1);
90 ktrgetsyscall(struct ktr_header
*kth
, struct ktr_syscall
*ktp_cache
, int narg
)
94 len
= offsetof(struct ktr_syscall
, ktr_args
[narg
]);
95 if (len
> sizeof(*ktp_cache
))
96 ktp_cache
= kmalloc(len
, M_KTRACE
, M_WAITOK
);
97 kth
->ktr_buf
= (caddr_t
)ktp_cache
;
98 kth
->ktr_len
= (int)len
;
104 ktrputsyscall(struct ktr_syscall
*ktp_cache
, struct ktr_syscall
*ktp
)
106 if (ktp
!= ktp_cache
)
107 kfree(ktp
, M_KTRACE
);
111 ktrsyscall(struct lwp
*lp
, int code
, int narg
, register_t args
[])
113 struct ktr_header kth
;
114 struct ktr_syscall ktp_cache
;
115 struct ktr_syscall
*ktp
;
120 * Setting the active bit prevents a ktrace recursion from the
121 * ktracing op itself.
123 lp
->lwp_traceflag
|= KTRFAC_ACTIVE
;
124 ktrgetheader(&kth
, KTR_SYSCALL
);
126 ktp
= ktrgetsyscall(&kth
, &ktp_cache
, narg
);
127 ktp
->ktr_code
= code
;
128 ktp
->ktr_narg
= narg
;
129 argp
= &ktp
->ktr_args
[0];
130 for (i
= 0; i
< narg
; i
++)
132 ktrwrite(lp
, &kth
, NULL
);
134 ktrputsyscall(&ktp_cache
, ktp
);
135 lp
->lwp_traceflag
&= ~KTRFAC_ACTIVE
;
139 ktrsysret(struct lwp
*lp
, int code
, int error
, register_t retval
)
141 struct ktr_header kth
;
142 struct ktr_sysret ktp
;
144 lp
->lwp_traceflag
|= KTRFAC_ACTIVE
;
145 ktrgetheader(&kth
, KTR_SYSRET
);
148 ktp
.ktr_error
= error
;
150 ktp
.ktr_retval
= retval
; /* what about val2 ? */
154 kth
.ktr_buf
= (caddr_t
)&ktp
;
155 kth
.ktr_len
= (int)sizeof(struct ktr_sysret
);
157 ktrwrite(lp
, &kth
, NULL
);
158 lp
->lwp_traceflag
&= ~KTRFAC_ACTIVE
;
162 ktrnamei(struct lwp
*lp
, char *path
)
164 struct ktr_header kth
;
166 lp
->lwp_traceflag
|= KTRFAC_ACTIVE
;
167 ktrgetheader(&kth
, KTR_NAMEI
);
169 kth
.ktr_len
= (int)strlen(path
);
172 ktrwrite(lp
, &kth
, NULL
);
173 lp
->lwp_traceflag
&= ~KTRFAC_ACTIVE
;
177 ktrgenio(struct lwp
*lp
, int fd
, enum uio_rw rw
, struct uio
*uio
, int error
)
179 struct ktr_header kth
;
180 struct ktr_genio ktg
;
184 lp
->lwp_traceflag
|= KTRFAC_ACTIVE
;
185 ktrgetheader(&kth
, KTR_GENIO
);
189 kth
.ktr_buf
= (caddr_t
)&ktg
;
190 kth
.ktr_len
= (int)sizeof(struct ktr_genio
);
192 uio
->uio_rw
= UIO_WRITE
;
194 ktrwrite(lp
, &kth
, uio
);
195 lp
->lwp_traceflag
&= ~KTRFAC_ACTIVE
;
199 ktrpsig(struct lwp
*lp
, int sig
, sig_t action
, sigset_t
*mask
, int code
)
201 struct ktr_header kth
;
204 lp
->lwp_traceflag
|= KTRFAC_ACTIVE
;
205 ktrgetheader(&kth
, KTR_PSIG
);
207 kp
.signo
= (char)sig
;
211 kth
.ktr_buf
= (caddr_t
)&kp
;
212 kth
.ktr_len
= (int)sizeof(struct ktr_psig
);
214 ktrwrite(lp
, &kth
, NULL
);
215 lp
->lwp_traceflag
&= ~KTRFAC_ACTIVE
;
219 ktrcsw(struct lwp
*lp
, int out
, int user
)
221 struct ktr_header kth
;
224 lp
->lwp_traceflag
|= KTRFAC_ACTIVE
;
225 ktrgetheader(&kth
, KTR_CSW
);
229 kth
.ktr_buf
= (caddr_t
)&kc
;
230 kth
.ktr_len
= (int)sizeof(struct ktr_csw
);
232 ktrwrite(lp
, &kth
, NULL
);
233 lp
->lwp_traceflag
&= ~KTRFAC_ACTIVE
;
237 /* Interface and common routines */
243 struct ktrace_clear_info
{
244 ktrace_node_t tracenode
;
249 static int ktrace_clear_callback(struct proc
*p
, void *data
);
257 sys_ktrace(struct ktrace_args
*uap
)
260 struct ktrace_clear_info info
;
261 struct thread
*td
= curthread
;
262 struct proc
*curp
= td
->td_proc
;
265 int facs
= uap
->facs
& ~KTRFAC_ROOT
;
266 int ops
= KTROP(uap
->ops
);
267 int descend
= uap
->ops
& KTRFLAG_DESCEND
;
270 struct nlookupdata nd
;
271 ktrace_node_t tracenode
= NULL
;
274 curp
->p_traceflag
|= KTRFAC_ACTIVE
;
275 if (ops
!= KTROP_CLEAR
) {
277 * an operation which requires a file argument.
279 error
= nlookup_init(&nd
, uap
->fname
,
280 UIO_USERSPACE
, NLC_LOCKVP
);
282 error
= vn_open(&nd
, NULL
, FREAD
|FWRITE
|O_NOFOLLOW
, 0);
283 if (error
== 0 && nd
.nl_open_vp
->v_type
!= VREG
)
286 curp
->p_traceflag
&= ~KTRFAC_ACTIVE
;
290 tracenode
= kmalloc(sizeof(struct ktrace_node
), M_KTRACE
,
292 tracenode
->kn_vp
= nd
.nl_open_vp
;
293 tracenode
->kn_refs
= 1;
294 nd
.nl_open_vp
= NULL
;
296 vn_unlock(tracenode
->kn_vp
);
299 * Clear all uses of the tracefile. Not the most efficient operation
302 if (ops
== KTROP_CLEARFILE
) {
303 info
.tracenode
= tracenode
;
306 allproc_scan(ktrace_clear_callback
, &info
);
311 * need something to (un)trace (XXX - why is this here?)
322 * By process group. Process group is referenced, preventing
325 pg
= pgfind(-uap
->pid
);
330 lwkt_gettoken(&pg
->pg_token
);
331 LIST_FOREACH(p
, &pg
->pg_members
, p_pglist
) {
334 ret
|= ktrsetchildren(td
, p
, ops
, facs
, tracenode
);
336 ret
|= ktrops(td
, p
, ops
, facs
, tracenode
);
339 lwkt_reltoken(&pg
->pg_token
);
351 ret
|= ktrsetchildren(td
, p
, ops
, facs
, tracenode
);
353 ret
|= ktrops(td
, p
, ops
, facs
, tracenode
);
360 ktrdestroy(&tracenode
);
361 curp
->p_traceflag
&= ~KTRFAC_ACTIVE
;
372 * NOTE: NOT MPSAFE (yet)
375 ktrace_clear_callback(struct proc
*p
, void *data
)
377 struct ktrace_clear_info
*info
= data
;
379 if (p
->p_tracenode
) {
380 if (info
->rootclear
) {
381 if (p
->p_tracenode
== info
->tracenode
) {
382 ktrdestroy(&p
->p_tracenode
);
386 if (p
->p_tracenode
->kn_vp
== info
->tracenode
->kn_vp
) {
387 if (ktrcanset(curthread
, p
)) {
388 ktrdestroy(&p
->p_tracenode
);
407 sys_utrace(struct utrace_args
*uap
)
410 struct ktr_header kth
;
411 struct thread
*td
= curthread
; /* XXX */
415 if (!KTRPOINT(td
, KTR_USER
))
417 if (uap
->len
> KTR_USER_MAXLEN
)
419 td
->td_lwp
->lwp_traceflag
|= KTRFAC_ACTIVE
;
420 ktrgetheader(&kth
, KTR_USER
);
421 if (uap
->len
<= sizeof(cp_cache
))
424 cp
= kmalloc(uap
->len
, M_KTRACE
, M_WAITOK
);
426 if (!copyin(uap
->addr
, cp
, uap
->len
)) {
428 kth
.ktr_len
= uap
->len
;
429 ktrwrite(td
->td_lwp
, &kth
, NULL
);
433 td
->td_lwp
->lwp_traceflag
&= ~KTRFAC_ACTIVE
;
442 ktrdestroy(struct ktrace_node
**tracenodep
)
444 ktrace_node_t tracenode
;
446 if ((tracenode
= *tracenodep
) != NULL
) {
448 KKASSERT(tracenode
->kn_refs
> 0);
449 if (atomic_fetchadd_int(&tracenode
->kn_refs
, -1) == 1) {
450 vn_close(tracenode
->kn_vp
, FREAD
|FWRITE
, NULL
);
451 tracenode
->kn_vp
= NULL
;
452 kfree(tracenode
, M_KTRACE
);
458 * This allows a process to inherit a ref on a tracenode and is also used
459 * as a temporary ref to prevent a tracenode from being destroyed out from
460 * under an active operation.
463 ktrinherit(ktrace_node_t tracenode
)
466 KKASSERT(tracenode
->kn_refs
> 0);
467 atomic_add_int(&tracenode
->kn_refs
, 1);
474 ktrops(struct thread
*td
, struct proc
*p
, int ops
, int facs
,
475 ktrace_node_t tracenode
)
477 ktrace_node_t oldnode
;
479 if (!ktrcanset(td
, p
))
481 if (ops
== KTROP_SET
) {
482 if ((oldnode
= p
->p_tracenode
) != tracenode
) {
483 p
->p_tracenode
= ktrinherit(tracenode
);
484 ktrdestroy(&oldnode
);
486 p
->p_traceflag
|= facs
;
487 if (td
->td_ucred
->cr_uid
== 0)
488 p
->p_traceflag
|= KTRFAC_ROOT
;
491 if (((p
->p_traceflag
&= ~facs
) & KTRFAC_MASK
) == 0) {
492 /* no more tracing */
494 ktrdestroy(&p
->p_tracenode
);
502 ktrsetchildren(struct thread
*td
, struct proc
*top
, int ops
, int facs
,
503 ktrace_node_t tracenode
)
511 lwkt_gettoken(&p
->p_token
);
514 ret
|= ktrops(td
, p
, ops
, facs
, tracenode
);
517 * If this process has children, descend to them next,
518 * otherwise do any siblings, and if done with this level,
519 * follow back up the tree (but not past top).
521 if ((np
= LIST_FIRST(&p
->p_children
)) != NULL
) {
527 if ((np
= LIST_NEXT(p
, p_sibling
)) != NULL
) {
533 * recurse up to parent, set p in our inner
534 * loop when doing this. np can be NULL if
535 * we race a reparenting to init (thus 'top'
536 * is skipped past and never encountered).
542 lwkt_reltoken(&p
->p_token
);
545 lwkt_gettoken(&p
->p_token
);
548 lwkt_reltoken(&p
->p_token
);
553 /* Already held, but we need the token too */
554 lwkt_gettoken(&p
->p_token
);
560 ktrwrite(struct lwp
*lp
, struct ktr_header
*kth
, struct uio
*uio
)
562 struct ktrace_clear_info info
;
564 struct iovec aiov
[2];
566 ktrace_node_t tracenode
;
569 * We have to ref our tracenode to prevent it from being ripped out
570 * from under us while we are trying to use it. p_tracenode can
571 * go away at any time if another process gets a write error.
575 if (lp
->lwp_proc
->p_tracenode
== NULL
)
577 tracenode
= ktrinherit(lp
->lwp_proc
->p_tracenode
);
578 auio
.uio_iov
= &aiov
[0];
580 auio
.uio_segflg
= UIO_SYSSPACE
;
581 auio
.uio_rw
= UIO_WRITE
;
582 aiov
[0].iov_base
= (caddr_t
)kth
;
583 aiov
[0].iov_len
= sizeof(struct ktr_header
);
584 auio
.uio_resid
= sizeof(struct ktr_header
);
586 auio
.uio_td
= curthread
;
587 if (kth
->ktr_len
> 0) {
589 aiov
[1].iov_base
= kth
->ktr_buf
;
590 aiov
[1].iov_len
= kth
->ktr_len
;
591 auio
.uio_resid
+= kth
->ktr_len
;
593 kth
->ktr_len
+= uio
->uio_resid
;
597 * NOTE: Must set timestamp after obtaining lock to ensure no
598 * timestamp reversals in the output file.
600 vn_lock(tracenode
->kn_vp
, LK_EXCLUSIVE
| LK_RETRY
);
601 microtime(&kth
->ktr_time
);
602 error
= VOP_WRITE(tracenode
->kn_vp
, &auio
,
603 IO_UNIT
| IO_APPEND
, lp
->lwp_thread
->td_ucred
);
604 if (error
== 0 && uio
!= NULL
) {
605 error
= VOP_WRITE(tracenode
->kn_vp
, uio
,
606 IO_UNIT
| IO_APPEND
, lp
->lwp_thread
->td_ucred
);
608 vn_unlock(tracenode
->kn_vp
);
611 * If an error occured, give up tracing on all processes
612 * using this tracenode. This is not MP safe but is
616 "ktrace write failed, errno %d, tracing stopped\n", error
);
617 info
.tracenode
= tracenode
;
620 allproc_scan(ktrace_clear_callback
, &info
);
622 ktrdestroy(&tracenode
);
626 * Return true if caller has permission to set the ktracing state
627 * of target. Essentially, the target can't possess any
628 * more permissions than the caller. KTRFAC_ROOT signifies that
629 * root previously set the tracing status on the target process, and
630 * so, only root may further change it.
632 * TODO: check groups. use caller effective gid.
635 ktrcanset(struct thread
*calltd
, struct proc
*targetp
)
637 struct ucred
*caller
= calltd
->td_ucred
;
638 struct ucred
*target
= targetp
->p_ucred
;
640 if (!PRISON_CHECK(caller
, target
))
642 if ((caller
->cr_uid
== target
->cr_ruid
&&
643 target
->cr_ruid
== target
->cr_svuid
&&
644 caller
->cr_rgid
== target
->cr_rgid
&& /* XXX */
645 target
->cr_rgid
== target
->cr_svgid
&&
646 (targetp
->p_traceflag
& KTRFAC_ROOT
) == 0 &&
647 (targetp
->p_flags
& P_SUGID
) == 0) ||