2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2005 Robert N. M. Watson
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include "opt_ktrace.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/fcntl.h>
43 #include <sys/kernel.h>
44 #include <sys/kthread.h>
46 #include <sys/mutex.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
49 #include <sys/namei.h>
52 #include <sys/unistd.h>
53 #include <sys/vnode.h>
54 #include <sys/socket.h>
56 #include <sys/ktrace.h>
58 #include <sys/sysctl.h>
59 #include <sys/syslog.h>
60 #include <sys/sysproto.h>
62 #include <security/mac/mac_framework.h>
65 * The ktrace facility allows the tracing of certain key events in user space
66 * processes, such as system calls, signal delivery, context switches, and
67 * user generated events using utrace(2). It works by streaming event
68 * records and data to a vnode associated with the process using the
69 * ktrace(2) system call. In general, records can be written directly from
70 * the context that generates the event. One important exception to this is
71 * during a context switch, where sleeping is not permitted. To handle this
72 * case, trace events are generated using in-kernel ktr_request records, and
73 * then delivered to disk at a convenient moment -- either immediately, the
74 * next traceable event, at system call return, or at process exit.
76 * When dealing with multiple threads or processes writing to the same event
77 * log, ordering guarantees are weak: specifically, if an event has multiple
78 * records (i.e., system call enter and return), they may be interlaced with
79 * records from another event. Process and thread ID information is provided
80 * in the record, and user applications can de-interlace events if required.
83 static MALLOC_DEFINE(M_KTRACE
, "KTRACE", "KTRACE");
87 #ifndef KTRACE_REQUEST_POOL
88 #define KTRACE_REQUEST_POOL 100
92 struct ktr_header ktr_header
;
95 struct ktr_syscall ktr_syscall
;
96 struct ktr_sysret ktr_sysret
;
97 struct ktr_genio ktr_genio
;
98 struct ktr_psig ktr_psig
;
99 struct ktr_csw ktr_csw
;
101 STAILQ_ENTRY(ktr_request
) ktr_list
;
104 static int data_lengths
[] = {
106 offsetof(struct ktr_syscall
, ktr_args
), /* KTR_SYSCALL */
107 sizeof(struct ktr_sysret
), /* KTR_SYSRET */
109 sizeof(struct ktr_genio
), /* KTR_GENIO */
110 sizeof(struct ktr_psig
), /* KTR_PSIG */
111 sizeof(struct ktr_csw
), /* KTR_CSW */
116 static STAILQ_HEAD(, ktr_request
) ktr_free
;
118 static SYSCTL_NODE(_kern
, OID_AUTO
, ktrace
, CTLFLAG_RD
, 0, "KTRACE options");
120 static u_int ktr_requestpool
= KTRACE_REQUEST_POOL
;
121 TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool
);
123 static u_int ktr_geniosize
= PAGE_SIZE
;
124 TUNABLE_INT("kern.ktrace.genio_size", &ktr_geniosize
);
125 SYSCTL_UINT(_kern_ktrace
, OID_AUTO
, genio_size
, CTLFLAG_RW
, &ktr_geniosize
,
126 0, "Maximum size of genio event payload");
128 static int print_message
= 1;
129 struct mtx ktrace_mtx
;
130 static struct sx ktrace_sx
;
132 static void ktrace_init(void *dummy
);
133 static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS
);
134 static u_int
ktrace_resize_pool(u_int newsize
);
135 static struct ktr_request
*ktr_getrequest(int type
);
136 static void ktr_submitrequest(struct thread
*td
, struct ktr_request
*req
);
137 static void ktr_freerequest(struct ktr_request
*req
);
138 static void ktr_writerequest(struct thread
*td
, struct ktr_request
*req
);
139 static int ktrcanset(struct thread
*,struct proc
*);
140 static int ktrsetchildren(struct thread
*,struct proc
*,int,int,struct vnode
*);
141 static int ktrops(struct thread
*,struct proc
*,int,int,struct vnode
*);
144 * ktrace itself generates events, such as context switches, which we do not
145 * wish to trace. Maintain a flag, TDP_INKTRACE, on each thread to determine
146 * whether or not it is in a region where tracing of events should be
150 ktrace_enter(struct thread
*td
)
153 KASSERT(!(td
->td_pflags
& TDP_INKTRACE
), ("ktrace_enter: flag set"));
154 td
->td_pflags
|= TDP_INKTRACE
;
158 ktrace_exit(struct thread
*td
)
161 KASSERT(td
->td_pflags
& TDP_INKTRACE
, ("ktrace_exit: flag not set"));
162 td
->td_pflags
&= ~TDP_INKTRACE
;
166 ktrace_assert(struct thread
*td
)
169 KASSERT(td
->td_pflags
& TDP_INKTRACE
, ("ktrace_assert: flag not set"));
173 ktrace_init(void *dummy
)
175 struct ktr_request
*req
;
178 mtx_init(&ktrace_mtx
, "ktrace", NULL
, MTX_DEF
| MTX_QUIET
);
179 sx_init(&ktrace_sx
, "ktrace_sx");
180 STAILQ_INIT(&ktr_free
);
181 for (i
= 0; i
< ktr_requestpool
; i
++) {
182 req
= malloc(sizeof(struct ktr_request
), M_KTRACE
, M_WAITOK
);
183 STAILQ_INSERT_HEAD(&ktr_free
, req
, ktr_list
);
186 SYSINIT(ktrace_init
, SI_SUB_KTRACE
, SI_ORDER_ANY
, ktrace_init
, NULL
);
189 sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS
)
192 u_int newsize
, oldsize
, wantsize
;
195 /* Handle easy read-only case first to avoid warnings from GCC. */
197 mtx_lock(&ktrace_mtx
);
198 oldsize
= ktr_requestpool
;
199 mtx_unlock(&ktrace_mtx
);
200 return (SYSCTL_OUT(req
, &oldsize
, sizeof(u_int
)));
203 error
= SYSCTL_IN(req
, &wantsize
, sizeof(u_int
));
208 mtx_lock(&ktrace_mtx
);
209 oldsize
= ktr_requestpool
;
210 newsize
= ktrace_resize_pool(wantsize
);
211 mtx_unlock(&ktrace_mtx
);
213 error
= SYSCTL_OUT(req
, &oldsize
, sizeof(u_int
));
216 if (wantsize
> oldsize
&& newsize
< wantsize
)
220 SYSCTL_PROC(_kern_ktrace
, OID_AUTO
, request_pool
, CTLTYPE_UINT
|CTLFLAG_RW
,
221 &ktr_requestpool
, 0, sysctl_kern_ktrace_request_pool
, "IU", "");
224 ktrace_resize_pool(u_int newsize
)
226 struct ktr_request
*req
;
229 mtx_assert(&ktrace_mtx
, MA_OWNED
);
231 bound
= newsize
- ktr_requestpool
;
233 return (ktr_requestpool
);
235 /* Shrink pool down to newsize if possible. */
236 while (bound
++ < 0) {
237 req
= STAILQ_FIRST(&ktr_free
);
239 return (ktr_requestpool
);
240 STAILQ_REMOVE_HEAD(&ktr_free
, ktr_list
);
242 mtx_unlock(&ktrace_mtx
);
244 mtx_lock(&ktrace_mtx
);
247 /* Grow pool up to newsize. */
248 while (bound
-- > 0) {
249 mtx_unlock(&ktrace_mtx
);
250 req
= malloc(sizeof(struct ktr_request
), M_KTRACE
,
252 mtx_lock(&ktrace_mtx
);
253 STAILQ_INSERT_HEAD(&ktr_free
, req
, ktr_list
);
256 return (ktr_requestpool
);
259 static struct ktr_request
*
260 ktr_getrequest(int type
)
262 struct ktr_request
*req
;
263 struct thread
*td
= curthread
;
264 struct proc
*p
= td
->td_proc
;
267 ktrace_enter(td
); /* XXX: In caller instead? */
268 mtx_lock(&ktrace_mtx
);
269 if (!KTRCHECK(td
, type
)) {
270 mtx_unlock(&ktrace_mtx
);
274 req
= STAILQ_FIRST(&ktr_free
);
276 STAILQ_REMOVE_HEAD(&ktr_free
, ktr_list
);
277 req
->ktr_header
.ktr_type
= type
;
278 if (p
->p_traceflag
& KTRFAC_DROP
) {
279 req
->ktr_header
.ktr_type
|= KTR_DROP
;
280 p
->p_traceflag
&= ~KTRFAC_DROP
;
282 mtx_unlock(&ktrace_mtx
);
283 microtime(&req
->ktr_header
.ktr_time
);
284 req
->ktr_header
.ktr_pid
= p
->p_pid
;
285 req
->ktr_header
.ktr_tid
= td
->td_tid
;
286 bcopy(td
->td_name
, req
->ktr_header
.ktr_comm
, MAXCOMLEN
+ 1);
287 req
->ktr_buffer
= NULL
;
288 req
->ktr_header
.ktr_len
= 0;
290 p
->p_traceflag
|= KTRFAC_DROP
;
293 mtx_unlock(&ktrace_mtx
);
295 printf("Out of ktrace request objects.\n");
302 * Some trace generation environments don't permit direct access to VFS,
303 * such as during a context switch where sleeping is not allowed. Under these
304 * circumstances, queue a request to the thread to be written asynchronously
308 ktr_enqueuerequest(struct thread
*td
, struct ktr_request
*req
)
311 mtx_lock(&ktrace_mtx
);
312 STAILQ_INSERT_TAIL(&td
->td_proc
->p_ktr
, req
, ktr_list
);
313 mtx_unlock(&ktrace_mtx
);
318 * Drain any pending ktrace records from the per-thread queue to disk. This
319 * is used both internally before committing other records, and also on
320 * system call return. We drain all the ones we can find at the time when
321 * drain is requested, but don't keep draining after that as those events
322 * may me approximately "after" the current event.
325 ktr_drain(struct thread
*td
)
327 struct ktr_request
*queued_req
;
328 STAILQ_HEAD(, ktr_request
) local_queue
;
331 sx_assert(&ktrace_sx
, SX_XLOCKED
);
333 STAILQ_INIT(&local_queue
); /* XXXRW: needed? */
335 if (!STAILQ_EMPTY(&td
->td_proc
->p_ktr
)) {
336 mtx_lock(&ktrace_mtx
);
337 STAILQ_CONCAT(&local_queue
, &td
->td_proc
->p_ktr
);
338 mtx_unlock(&ktrace_mtx
);
340 while ((queued_req
= STAILQ_FIRST(&local_queue
))) {
341 STAILQ_REMOVE_HEAD(&local_queue
, ktr_list
);
342 ktr_writerequest(td
, queued_req
);
343 ktr_freerequest(queued_req
);
349 * Submit a trace record for immediate commit to disk -- to be used only
350 * where entering VFS is OK. First drain any pending records that may have
351 * been cached in the thread.
354 ktr_submitrequest(struct thread
*td
, struct ktr_request
*req
)
359 sx_xlock(&ktrace_sx
);
361 ktr_writerequest(td
, req
);
362 ktr_freerequest(req
);
363 sx_xunlock(&ktrace_sx
);
369 ktr_freerequest(struct ktr_request
*req
)
372 if (req
->ktr_buffer
!= NULL
)
373 free(req
->ktr_buffer
, M_KTRACE
);
374 mtx_lock(&ktrace_mtx
);
375 STAILQ_INSERT_HEAD(&ktr_free
, req
, ktr_list
);
376 mtx_unlock(&ktrace_mtx
);
380 ktrsyscall(code
, narg
, args
)
384 struct ktr_request
*req
;
385 struct ktr_syscall
*ktp
;
389 buflen
= sizeof(register_t
) * narg
;
391 buf
= malloc(buflen
, M_KTRACE
, M_WAITOK
);
392 bcopy(args
, buf
, buflen
);
394 req
= ktr_getrequest(KTR_SYSCALL
);
400 ktp
= &req
->ktr_data
.ktr_syscall
;
401 ktp
->ktr_code
= code
;
402 ktp
->ktr_narg
= narg
;
404 req
->ktr_header
.ktr_len
= buflen
;
405 req
->ktr_buffer
= buf
;
407 ktr_submitrequest(curthread
, req
);
411 ktrsysret(code
, error
, retval
)
415 struct ktr_request
*req
;
416 struct ktr_sysret
*ktp
;
418 req
= ktr_getrequest(KTR_SYSRET
);
421 ktp
= &req
->ktr_data
.ktr_sysret
;
422 ktp
->ktr_code
= code
;
423 ktp
->ktr_error
= error
;
424 ktp
->ktr_retval
= retval
; /* what about val2 ? */
425 ktr_submitrequest(curthread
, req
);
429 * When a process exits, drain per-process asynchronous trace records.
432 ktrprocexit(struct thread
*td
)
436 sx_xlock(&ktrace_sx
);
438 sx_xunlock(&ktrace_sx
);
443 * When a thread returns, drain any asynchronous records generated by the
447 ktruserret(struct thread
*td
)
451 sx_xlock(&ktrace_sx
);
453 sx_xunlock(&ktrace_sx
);
461 struct ktr_request
*req
;
465 namelen
= strlen(path
);
467 buf
= malloc(namelen
, M_KTRACE
, M_WAITOK
);
468 bcopy(path
, buf
, namelen
);
470 req
= ktr_getrequest(KTR_NAMEI
);
477 req
->ktr_header
.ktr_len
= namelen
;
478 req
->ktr_buffer
= buf
;
480 ktr_submitrequest(curthread
, req
);
484 ktrgenio(fd
, rw
, uio
, error
)
490 struct ktr_request
*req
;
491 struct ktr_genio
*ktg
;
500 uio
->uio_rw
= UIO_WRITE
;
501 datalen
= imin(uio
->uio_resid
, ktr_geniosize
);
502 buf
= malloc(datalen
, M_KTRACE
, M_WAITOK
);
503 error
= uiomove(buf
, datalen
, uio
);
509 req
= ktr_getrequest(KTR_GENIO
);
514 ktg
= &req
->ktr_data
.ktr_genio
;
517 req
->ktr_header
.ktr_len
= datalen
;
518 req
->ktr_buffer
= buf
;
519 ktr_submitrequest(curthread
, req
);
523 ktrpsig(sig
, action
, mask
, code
)
529 struct ktr_request
*req
;
532 req
= ktr_getrequest(KTR_PSIG
);
535 kp
= &req
->ktr_data
.ktr_psig
;
536 kp
->signo
= (char)sig
;
540 ktr_enqueuerequest(curthread
, req
);
547 struct ktr_request
*req
;
550 req
= ktr_getrequest(KTR_CSW
);
553 kc
= &req
->ktr_data
.ktr_csw
;
556 ktr_enqueuerequest(curthread
, req
);
560 ktrstruct(name
, namelen
, data
, datalen
)
566 struct ktr_request
*req
;
572 buflen
= namelen
+ 1 + datalen
;
573 buf
= malloc(buflen
, M_KTRACE
, M_WAITOK
);
574 bcopy(name
, buf
, namelen
);
576 bcopy(data
, buf
+ namelen
+ 1, datalen
);
577 if ((req
= ktr_getrequest(KTR_STRUCT
)) == NULL
) {
581 req
->ktr_buffer
= buf
;
582 req
->ktr_header
.ktr_len
= buflen
;
583 ktr_submitrequest(curthread
, req
);
587 /* Interface and common routines */
589 #ifndef _SYS_SYSPROTO_H_
601 register struct ktrace_args
*uap
;
604 register struct vnode
*vp
= NULL
;
605 register struct proc
*p
;
607 int facs
= uap
->facs
& ~KTRFAC_ROOT
;
608 int ops
= KTROP(uap
->ops
);
609 int descend
= uap
->ops
& KTRFLAG_DESCEND
;
611 int flags
, error
= 0, vfslocked
;
616 * Need something to (un)trace.
618 if (ops
!= KTROP_CLEARFILE
&& facs
== 0)
622 if (ops
!= KTROP_CLEAR
) {
624 * an operation which requires a file argument.
626 NDINIT(&nd
, LOOKUP
, NOFOLLOW
| MPSAFE
, UIO_USERSPACE
,
628 flags
= FREAD
| FWRITE
| O_NOFOLLOW
;
629 error
= vn_open(&nd
, &flags
, 0, NULL
);
634 vfslocked
= NDHASGIANT(&nd
);
635 NDFREE(&nd
, NDF_ONLY_PNBUF
);
638 if (vp
->v_type
!= VREG
) {
639 (void) vn_close(vp
, FREAD
|FWRITE
, td
->td_ucred
, td
);
640 VFS_UNLOCK_GIANT(vfslocked
);
644 VFS_UNLOCK_GIANT(vfslocked
);
647 * Clear all uses of the tracefile.
649 if (ops
== KTROP_CLEARFILE
) {
653 sx_slock(&allproc_lock
);
654 FOREACH_PROC_IN_SYSTEM(p
) {
656 if (p
->p_tracevp
== vp
) {
657 if (ktrcanset(td
, p
)) {
658 mtx_lock(&ktrace_mtx
);
659 cred
= p
->p_tracecred
;
660 p
->p_tracecred
= NULL
;
663 mtx_unlock(&ktrace_mtx
);
671 sx_sunlock(&allproc_lock
);
672 if (vrele_count
> 0) {
673 vfslocked
= VFS_LOCK_GIANT(vp
->v_mount
);
674 while (vrele_count
-- > 0)
676 VFS_UNLOCK_GIANT(vfslocked
);
683 sx_slock(&proctree_lock
);
688 pg
= pgfind(-uap
->pid
);
690 sx_sunlock(&proctree_lock
);
695 * ktrops() may call vrele(). Lock pg_members
696 * by the proctree_lock rather than pg_mtx.
700 LIST_FOREACH(p
, &pg
->pg_members
, p_pglist
) {
702 if (p_cansee(td
, p
) != 0) {
709 ret
|= ktrsetchildren(td
, p
, ops
, facs
, vp
);
711 ret
|= ktrops(td
, p
, ops
, facs
, vp
);
714 sx_sunlock(&proctree_lock
);
724 sx_sunlock(&proctree_lock
);
728 error
= p_cansee(td
, p
);
730 * The slock of the proctree lock will keep this process
731 * from going away, so unlocking the proc here is ok.
735 sx_sunlock(&proctree_lock
);
739 ret
|= ktrsetchildren(td
, p
, ops
, facs
, vp
);
741 ret
|= ktrops(td
, p
, ops
, facs
, vp
);
743 sx_sunlock(&proctree_lock
);
748 vfslocked
= VFS_LOCK_GIANT(vp
->v_mount
);
749 (void) vn_close(vp
, FWRITE
, td
->td_ucred
, td
);
750 VFS_UNLOCK_GIANT(vfslocked
);
763 register struct utrace_args
*uap
;
767 struct ktr_request
*req
;
771 if (!KTRPOINT(td
, KTR_USER
))
773 if (uap
->len
> KTR_USER_MAXLEN
)
775 cp
= malloc(uap
->len
, M_KTRACE
, M_WAITOK
);
776 error
= copyin(uap
->addr
, cp
, uap
->len
);
781 req
= ktr_getrequest(KTR_USER
);
786 req
->ktr_buffer
= cp
;
787 req
->ktr_header
.ktr_len
= uap
->len
;
788 ktr_submitrequest(td
, req
);
797 ktrops(td
, p
, ops
, facs
, vp
)
803 struct vnode
*tracevp
= NULL
;
804 struct ucred
*tracecred
= NULL
;
807 if (!ktrcanset(td
, p
)) {
811 mtx_lock(&ktrace_mtx
);
812 if (ops
== KTROP_SET
) {
813 if (p
->p_tracevp
!= vp
) {
815 * if trace file already in use, relinquish below
817 tracevp
= p
->p_tracevp
;
821 if (p
->p_tracecred
!= td
->td_ucred
) {
822 tracecred
= p
->p_tracecred
;
823 p
->p_tracecred
= crhold(td
->td_ucred
);
825 p
->p_traceflag
|= facs
;
826 if (priv_check(td
, PRIV_KTRACE
) == 0)
827 p
->p_traceflag
|= KTRFAC_ROOT
;
830 if (((p
->p_traceflag
&= ~facs
) & KTRFAC_MASK
) == 0) {
831 /* no more tracing */
833 tracevp
= p
->p_tracevp
;
835 tracecred
= p
->p_tracecred
;
836 p
->p_tracecred
= NULL
;
839 mtx_unlock(&ktrace_mtx
);
841 if (tracevp
!= NULL
) {
844 vfslocked
= VFS_LOCK_GIANT(tracevp
->v_mount
);
846 VFS_UNLOCK_GIANT(vfslocked
);
848 if (tracecred
!= NULL
)
855 ktrsetchildren(td
, top
, ops
, facs
, vp
)
861 register struct proc
*p
;
862 register int ret
= 0;
865 sx_assert(&proctree_lock
, SX_LOCKED
);
867 ret
|= ktrops(td
, p
, ops
, facs
, vp
);
869 * If this process has children, descend to them next,
870 * otherwise do any siblings, and if done with this level,
871 * follow back up the tree (but not past top).
873 if (!LIST_EMPTY(&p
->p_children
))
874 p
= LIST_FIRST(&p
->p_children
);
878 if (LIST_NEXT(p
, p_sibling
)) {
879 p
= LIST_NEXT(p
, p_sibling
);
889 ktr_writerequest(struct thread
*td
, struct ktr_request
*req
)
891 struct ktr_header
*kth
;
896 struct iovec aiov
[3];
898 int datalen
, buflen
, vrele_count
;
899 int error
, vfslocked
;
902 * We hold the vnode and credential for use in I/O in case ktrace is
903 * disabled on the process as we write out the request.
905 * XXXRW: This is not ideal: we could end up performing a write after
906 * the vnode has been closed.
908 mtx_lock(&ktrace_mtx
);
909 vp
= td
->td_proc
->p_tracevp
;
912 cred
= td
->td_proc
->p_tracecred
;
915 mtx_unlock(&ktrace_mtx
);
918 * If vp is NULL, the vp has been cleared out from under this
919 * request, so just drop it. Make sure the credential and vnode are
920 * in sync: we should have both or neither.
923 KASSERT(cred
== NULL
, ("ktr_writerequest: cred != NULL"));
926 KASSERT(cred
!= NULL
, ("ktr_writerequest: cred == NULL"));
928 kth
= &req
->ktr_header
;
929 datalen
= data_lengths
[(u_short
)kth
->ktr_type
& ~KTR_DROP
];
930 buflen
= kth
->ktr_len
;
931 auio
.uio_iov
= &aiov
[0];
933 auio
.uio_segflg
= UIO_SYSSPACE
;
934 auio
.uio_rw
= UIO_WRITE
;
935 aiov
[0].iov_base
= (caddr_t
)kth
;
936 aiov
[0].iov_len
= sizeof(struct ktr_header
);
937 auio
.uio_resid
= sizeof(struct ktr_header
);
941 aiov
[1].iov_base
= (caddr_t
)&req
->ktr_data
;
942 aiov
[1].iov_len
= datalen
;
943 auio
.uio_resid
+= datalen
;
945 kth
->ktr_len
+= datalen
;
948 KASSERT(req
->ktr_buffer
!= NULL
, ("ktrace: nothing to write"));
949 aiov
[auio
.uio_iovcnt
].iov_base
= req
->ktr_buffer
;
950 aiov
[auio
.uio_iovcnt
].iov_len
= buflen
;
951 auio
.uio_resid
+= buflen
;
955 vfslocked
= VFS_LOCK_GIANT(vp
->v_mount
);
956 vn_start_write(vp
, &mp
, V_WAIT
);
957 vn_lock(vp
, LK_EXCLUSIVE
| LK_RETRY
);
958 (void)VOP_LEASE(vp
, td
, cred
, LEASE_WRITE
);
960 error
= mac_vnode_check_write(cred
, NOCRED
, vp
);
963 error
= VOP_WRITE(vp
, &auio
, IO_UNIT
| IO_APPEND
, cred
);
965 vn_finished_write(mp
);
967 VFS_UNLOCK_GIANT(vfslocked
);
971 * If error encountered, give up tracing on this vnode. We defer
972 * all the vrele()'s on the vnode until after we are finished walking
973 * the various lists to avoid needlessly holding locks.
975 log(LOG_NOTICE
, "ktrace write failed, errno %d, tracing stopped\n",
979 * First, clear this vnode from being used by any processes in the
981 * XXX - If one process gets an EPERM writing to the vnode, should
982 * we really do this? Other processes might have suitable
983 * credentials for the operation.
986 sx_slock(&allproc_lock
);
987 FOREACH_PROC_IN_SYSTEM(p
) {
989 if (p
->p_tracevp
== vp
) {
990 mtx_lock(&ktrace_mtx
);
993 cred
= p
->p_tracecred
;
994 p
->p_tracecred
= NULL
;
995 mtx_unlock(&ktrace_mtx
);
1004 sx_sunlock(&allproc_lock
);
1007 * We can't clear any pending requests in threads that have cached
1008 * them but not yet committed them, as those are per-thread. The
1009 * thread will have to clear it itself on system call return.
1011 vfslocked
= VFS_LOCK_GIANT(vp
->v_mount
);
1012 while (vrele_count
-- > 0)
1014 VFS_UNLOCK_GIANT(vfslocked
);
1018 * Return true if caller has permission to set the ktracing state
1019 * of target. Essentially, the target can't possess any
1020 * more permissions than the caller. KTRFAC_ROOT signifies that
1021 * root previously set the tracing status on the target process, and
1022 * so, only root may further change it.
1025 ktrcanset(td
, targetp
)
1027 struct proc
*targetp
;
1030 PROC_LOCK_ASSERT(targetp
, MA_OWNED
);
1031 if (targetp
->p_traceflag
& KTRFAC_ROOT
&&
1032 priv_check(td
, PRIV_KTRACE
))
1035 if (p_candebug(td
, targetp
) != 0)