Ignore machine-check MSRs
[freebsd-src/fkvm-freebsd.git] / sys / kern / kern_ktrace.c
blobc8e44511d4a96653494141301ad74d7b79140f0b
1 /*-
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California.
4 * Copyright (c) 2005 Robert N. M. Watson
5 * All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 4. Neither the name of the University nor the names of its contributors
16 * may be used to endorse or promote products derived from this software
17 * without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
31 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
37 #include "opt_ktrace.h"
38 #include "opt_mac.h"
40 #include <sys/param.h>
41 #include <sys/systm.h>
42 #include <sys/fcntl.h>
43 #include <sys/kernel.h>
44 #include <sys/kthread.h>
45 #include <sys/lock.h>
46 #include <sys/mutex.h>
47 #include <sys/malloc.h>
48 #include <sys/mount.h>
49 #include <sys/namei.h>
50 #include <sys/priv.h>
51 #include <sys/proc.h>
52 #include <sys/unistd.h>
53 #include <sys/vnode.h>
54 #include <sys/socket.h>
55 #include <sys/stat.h>
56 #include <sys/ktrace.h>
57 #include <sys/sx.h>
58 #include <sys/sysctl.h>
59 #include <sys/syslog.h>
60 #include <sys/sysproto.h>
62 #include <security/mac/mac_framework.h>
65 * The ktrace facility allows the tracing of certain key events in user space
66 * processes, such as system calls, signal delivery, context switches, and
67 * user generated events using utrace(2). It works by streaming event
68 * records and data to a vnode associated with the process using the
69 * ktrace(2) system call. In general, records can be written directly from
70 * the context that generates the event. One important exception to this is
71 * during a context switch, where sleeping is not permitted. To handle this
72 * case, trace events are generated using in-kernel ktr_request records, and
73 * then delivered to disk at a convenient moment -- either immediately, the
74 * next traceable event, at system call return, or at process exit.
76 * When dealing with multiple threads or processes writing to the same event
77 * log, ordering guarantees are weak: specifically, if an event has multiple
78 * records (i.e., system call enter and return), they may be interlaced with
79 * records from another event. Process and thread ID information is provided
80 * in the record, and user applications can de-interlace events if required.
83 static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE");
85 #ifdef KTRACE
87 #ifndef KTRACE_REQUEST_POOL
88 #define KTRACE_REQUEST_POOL 100
89 #endif
91 struct ktr_request {
92 struct ktr_header ktr_header;
93 void *ktr_buffer;
94 union {
95 struct ktr_syscall ktr_syscall;
96 struct ktr_sysret ktr_sysret;
97 struct ktr_genio ktr_genio;
98 struct ktr_psig ktr_psig;
99 struct ktr_csw ktr_csw;
100 } ktr_data;
101 STAILQ_ENTRY(ktr_request) ktr_list;
104 static int data_lengths[] = {
105 0, /* none */
106 offsetof(struct ktr_syscall, ktr_args), /* KTR_SYSCALL */
107 sizeof(struct ktr_sysret), /* KTR_SYSRET */
108 0, /* KTR_NAMEI */
109 sizeof(struct ktr_genio), /* KTR_GENIO */
110 sizeof(struct ktr_psig), /* KTR_PSIG */
111 sizeof(struct ktr_csw), /* KTR_CSW */
112 0, /* KTR_USER */
113 0, /* KTR_STRUCT */
116 static STAILQ_HEAD(, ktr_request) ktr_free;
118 static SYSCTL_NODE(_kern, OID_AUTO, ktrace, CTLFLAG_RD, 0, "KTRACE options");
120 static u_int ktr_requestpool = KTRACE_REQUEST_POOL;
121 TUNABLE_INT("kern.ktrace.request_pool", &ktr_requestpool);
123 static u_int ktr_geniosize = PAGE_SIZE;
124 TUNABLE_INT("kern.ktrace.genio_size", &ktr_geniosize);
125 SYSCTL_UINT(_kern_ktrace, OID_AUTO, genio_size, CTLFLAG_RW, &ktr_geniosize,
126 0, "Maximum size of genio event payload");
128 static int print_message = 1;
129 struct mtx ktrace_mtx;
130 static struct sx ktrace_sx;
132 static void ktrace_init(void *dummy);
133 static int sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS);
134 static u_int ktrace_resize_pool(u_int newsize);
135 static struct ktr_request *ktr_getrequest(int type);
136 static void ktr_submitrequest(struct thread *td, struct ktr_request *req);
137 static void ktr_freerequest(struct ktr_request *req);
138 static void ktr_writerequest(struct thread *td, struct ktr_request *req);
139 static int ktrcanset(struct thread *,struct proc *);
140 static int ktrsetchildren(struct thread *,struct proc *,int,int,struct vnode *);
141 static int ktrops(struct thread *,struct proc *,int,int,struct vnode *);
144 * ktrace itself generates events, such as context switches, which we do not
145 * wish to trace. Maintain a flag, TDP_INKTRACE, on each thread to determine
146 * whether or not it is in a region where tracing of events should be
147 * suppressed.
149 static void
150 ktrace_enter(struct thread *td)
153 KASSERT(!(td->td_pflags & TDP_INKTRACE), ("ktrace_enter: flag set"));
154 td->td_pflags |= TDP_INKTRACE;
157 static void
158 ktrace_exit(struct thread *td)
161 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_exit: flag not set"));
162 td->td_pflags &= ~TDP_INKTRACE;
165 static void
166 ktrace_assert(struct thread *td)
169 KASSERT(td->td_pflags & TDP_INKTRACE, ("ktrace_assert: flag not set"));
172 static void
173 ktrace_init(void *dummy)
175 struct ktr_request *req;
176 int i;
178 mtx_init(&ktrace_mtx, "ktrace", NULL, MTX_DEF | MTX_QUIET);
179 sx_init(&ktrace_sx, "ktrace_sx");
180 STAILQ_INIT(&ktr_free);
181 for (i = 0; i < ktr_requestpool; i++) {
182 req = malloc(sizeof(struct ktr_request), M_KTRACE, M_WAITOK);
183 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
186 SYSINIT(ktrace_init, SI_SUB_KTRACE, SI_ORDER_ANY, ktrace_init, NULL);
188 static int
189 sysctl_kern_ktrace_request_pool(SYSCTL_HANDLER_ARGS)
191 struct thread *td;
192 u_int newsize, oldsize, wantsize;
193 int error;
195 /* Handle easy read-only case first to avoid warnings from GCC. */
196 if (!req->newptr) {
197 mtx_lock(&ktrace_mtx);
198 oldsize = ktr_requestpool;
199 mtx_unlock(&ktrace_mtx);
200 return (SYSCTL_OUT(req, &oldsize, sizeof(u_int)));
203 error = SYSCTL_IN(req, &wantsize, sizeof(u_int));
204 if (error)
205 return (error);
206 td = curthread;
207 ktrace_enter(td);
208 mtx_lock(&ktrace_mtx);
209 oldsize = ktr_requestpool;
210 newsize = ktrace_resize_pool(wantsize);
211 mtx_unlock(&ktrace_mtx);
212 ktrace_exit(td);
213 error = SYSCTL_OUT(req, &oldsize, sizeof(u_int));
214 if (error)
215 return (error);
216 if (wantsize > oldsize && newsize < wantsize)
217 return (ENOSPC);
218 return (0);
220 SYSCTL_PROC(_kern_ktrace, OID_AUTO, request_pool, CTLTYPE_UINT|CTLFLAG_RW,
221 &ktr_requestpool, 0, sysctl_kern_ktrace_request_pool, "IU", "");
223 static u_int
224 ktrace_resize_pool(u_int newsize)
226 struct ktr_request *req;
227 int bound;
229 mtx_assert(&ktrace_mtx, MA_OWNED);
230 print_message = 1;
231 bound = newsize - ktr_requestpool;
232 if (bound == 0)
233 return (ktr_requestpool);
234 if (bound < 0)
235 /* Shrink pool down to newsize if possible. */
236 while (bound++ < 0) {
237 req = STAILQ_FIRST(&ktr_free);
238 if (req == NULL)
239 return (ktr_requestpool);
240 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
241 ktr_requestpool--;
242 mtx_unlock(&ktrace_mtx);
243 free(req, M_KTRACE);
244 mtx_lock(&ktrace_mtx);
246 else
247 /* Grow pool up to newsize. */
248 while (bound-- > 0) {
249 mtx_unlock(&ktrace_mtx);
250 req = malloc(sizeof(struct ktr_request), M_KTRACE,
251 M_WAITOK);
252 mtx_lock(&ktrace_mtx);
253 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
254 ktr_requestpool++;
256 return (ktr_requestpool);
259 static struct ktr_request *
260 ktr_getrequest(int type)
262 struct ktr_request *req;
263 struct thread *td = curthread;
264 struct proc *p = td->td_proc;
265 int pm;
267 ktrace_enter(td); /* XXX: In caller instead? */
268 mtx_lock(&ktrace_mtx);
269 if (!KTRCHECK(td, type)) {
270 mtx_unlock(&ktrace_mtx);
271 ktrace_exit(td);
272 return (NULL);
274 req = STAILQ_FIRST(&ktr_free);
275 if (req != NULL) {
276 STAILQ_REMOVE_HEAD(&ktr_free, ktr_list);
277 req->ktr_header.ktr_type = type;
278 if (p->p_traceflag & KTRFAC_DROP) {
279 req->ktr_header.ktr_type |= KTR_DROP;
280 p->p_traceflag &= ~KTRFAC_DROP;
282 mtx_unlock(&ktrace_mtx);
283 microtime(&req->ktr_header.ktr_time);
284 req->ktr_header.ktr_pid = p->p_pid;
285 req->ktr_header.ktr_tid = td->td_tid;
286 bcopy(td->td_name, req->ktr_header.ktr_comm, MAXCOMLEN + 1);
287 req->ktr_buffer = NULL;
288 req->ktr_header.ktr_len = 0;
289 } else {
290 p->p_traceflag |= KTRFAC_DROP;
291 pm = print_message;
292 print_message = 0;
293 mtx_unlock(&ktrace_mtx);
294 if (pm)
295 printf("Out of ktrace request objects.\n");
296 ktrace_exit(td);
298 return (req);
302 * Some trace generation environments don't permit direct access to VFS,
303 * such as during a context switch where sleeping is not allowed. Under these
304 * circumstances, queue a request to the thread to be written asynchronously
305 * later.
307 static void
308 ktr_enqueuerequest(struct thread *td, struct ktr_request *req)
311 mtx_lock(&ktrace_mtx);
312 STAILQ_INSERT_TAIL(&td->td_proc->p_ktr, req, ktr_list);
313 mtx_unlock(&ktrace_mtx);
314 ktrace_exit(td);
318 * Drain any pending ktrace records from the per-thread queue to disk. This
319 * is used both internally before committing other records, and also on
320 * system call return. We drain all the ones we can find at the time when
321 * drain is requested, but don't keep draining after that as those events
322 * may me approximately "after" the current event.
324 static void
325 ktr_drain(struct thread *td)
327 struct ktr_request *queued_req;
328 STAILQ_HEAD(, ktr_request) local_queue;
330 ktrace_assert(td);
331 sx_assert(&ktrace_sx, SX_XLOCKED);
333 STAILQ_INIT(&local_queue); /* XXXRW: needed? */
335 if (!STAILQ_EMPTY(&td->td_proc->p_ktr)) {
336 mtx_lock(&ktrace_mtx);
337 STAILQ_CONCAT(&local_queue, &td->td_proc->p_ktr);
338 mtx_unlock(&ktrace_mtx);
340 while ((queued_req = STAILQ_FIRST(&local_queue))) {
341 STAILQ_REMOVE_HEAD(&local_queue, ktr_list);
342 ktr_writerequest(td, queued_req);
343 ktr_freerequest(queued_req);
349 * Submit a trace record for immediate commit to disk -- to be used only
350 * where entering VFS is OK. First drain any pending records that may have
351 * been cached in the thread.
353 static void
354 ktr_submitrequest(struct thread *td, struct ktr_request *req)
357 ktrace_assert(td);
359 sx_xlock(&ktrace_sx);
360 ktr_drain(td);
361 ktr_writerequest(td, req);
362 ktr_freerequest(req);
363 sx_xunlock(&ktrace_sx);
365 ktrace_exit(td);
368 static void
369 ktr_freerequest(struct ktr_request *req)
372 if (req->ktr_buffer != NULL)
373 free(req->ktr_buffer, M_KTRACE);
374 mtx_lock(&ktrace_mtx);
375 STAILQ_INSERT_HEAD(&ktr_free, req, ktr_list);
376 mtx_unlock(&ktrace_mtx);
379 void
380 ktrsyscall(code, narg, args)
381 int code, narg;
382 register_t args[];
384 struct ktr_request *req;
385 struct ktr_syscall *ktp;
386 size_t buflen;
387 char *buf = NULL;
389 buflen = sizeof(register_t) * narg;
390 if (buflen > 0) {
391 buf = malloc(buflen, M_KTRACE, M_WAITOK);
392 bcopy(args, buf, buflen);
394 req = ktr_getrequest(KTR_SYSCALL);
395 if (req == NULL) {
396 if (buf != NULL)
397 free(buf, M_KTRACE);
398 return;
400 ktp = &req->ktr_data.ktr_syscall;
401 ktp->ktr_code = code;
402 ktp->ktr_narg = narg;
403 if (buflen > 0) {
404 req->ktr_header.ktr_len = buflen;
405 req->ktr_buffer = buf;
407 ktr_submitrequest(curthread, req);
410 void
411 ktrsysret(code, error, retval)
412 int code, error;
413 register_t retval;
415 struct ktr_request *req;
416 struct ktr_sysret *ktp;
418 req = ktr_getrequest(KTR_SYSRET);
419 if (req == NULL)
420 return;
421 ktp = &req->ktr_data.ktr_sysret;
422 ktp->ktr_code = code;
423 ktp->ktr_error = error;
424 ktp->ktr_retval = retval; /* what about val2 ? */
425 ktr_submitrequest(curthread, req);
429 * When a process exits, drain per-process asynchronous trace records.
431 void
432 ktrprocexit(struct thread *td)
435 ktrace_enter(td);
436 sx_xlock(&ktrace_sx);
437 ktr_drain(td);
438 sx_xunlock(&ktrace_sx);
439 ktrace_exit(td);
443 * When a thread returns, drain any asynchronous records generated by the
444 * system call.
446 void
447 ktruserret(struct thread *td)
450 ktrace_enter(td);
451 sx_xlock(&ktrace_sx);
452 ktr_drain(td);
453 sx_xunlock(&ktrace_sx);
454 ktrace_exit(td);
457 void
458 ktrnamei(path)
459 char *path;
461 struct ktr_request *req;
462 int namelen;
463 char *buf = NULL;
465 namelen = strlen(path);
466 if (namelen > 0) {
467 buf = malloc(namelen, M_KTRACE, M_WAITOK);
468 bcopy(path, buf, namelen);
470 req = ktr_getrequest(KTR_NAMEI);
471 if (req == NULL) {
472 if (buf != NULL)
473 free(buf, M_KTRACE);
474 return;
476 if (namelen > 0) {
477 req->ktr_header.ktr_len = namelen;
478 req->ktr_buffer = buf;
480 ktr_submitrequest(curthread, req);
483 void
484 ktrgenio(fd, rw, uio, error)
485 int fd;
486 enum uio_rw rw;
487 struct uio *uio;
488 int error;
490 struct ktr_request *req;
491 struct ktr_genio *ktg;
492 int datalen;
493 char *buf;
495 if (error) {
496 free(uio, M_IOV);
497 return;
499 uio->uio_offset = 0;
500 uio->uio_rw = UIO_WRITE;
501 datalen = imin(uio->uio_resid, ktr_geniosize);
502 buf = malloc(datalen, M_KTRACE, M_WAITOK);
503 error = uiomove(buf, datalen, uio);
504 free(uio, M_IOV);
505 if (error) {
506 free(buf, M_KTRACE);
507 return;
509 req = ktr_getrequest(KTR_GENIO);
510 if (req == NULL) {
511 free(buf, M_KTRACE);
512 return;
514 ktg = &req->ktr_data.ktr_genio;
515 ktg->ktr_fd = fd;
516 ktg->ktr_rw = rw;
517 req->ktr_header.ktr_len = datalen;
518 req->ktr_buffer = buf;
519 ktr_submitrequest(curthread, req);
522 void
523 ktrpsig(sig, action, mask, code)
524 int sig;
525 sig_t action;
526 sigset_t *mask;
527 int code;
529 struct ktr_request *req;
530 struct ktr_psig *kp;
532 req = ktr_getrequest(KTR_PSIG);
533 if (req == NULL)
534 return;
535 kp = &req->ktr_data.ktr_psig;
536 kp->signo = (char)sig;
537 kp->action = action;
538 kp->mask = *mask;
539 kp->code = code;
540 ktr_enqueuerequest(curthread, req);
543 void
544 ktrcsw(out, user)
545 int out, user;
547 struct ktr_request *req;
548 struct ktr_csw *kc;
550 req = ktr_getrequest(KTR_CSW);
551 if (req == NULL)
552 return;
553 kc = &req->ktr_data.ktr_csw;
554 kc->out = out;
555 kc->user = user;
556 ktr_enqueuerequest(curthread, req);
559 void
560 ktrstruct(name, namelen, data, datalen)
561 const char *name;
562 size_t namelen;
563 void *data;
564 size_t datalen;
566 struct ktr_request *req;
567 char *buf = NULL;
568 size_t buflen;
570 if (!data)
571 datalen = 0;
572 buflen = namelen + 1 + datalen;
573 buf = malloc(buflen, M_KTRACE, M_WAITOK);
574 bcopy(name, buf, namelen);
575 buf[namelen] = '\0';
576 bcopy(data, buf + namelen + 1, datalen);
577 if ((req = ktr_getrequest(KTR_STRUCT)) == NULL) {
578 free(buf, M_KTRACE);
579 return;
581 req->ktr_buffer = buf;
582 req->ktr_header.ktr_len = buflen;
583 ktr_submitrequest(curthread, req);
585 #endif /* KTRACE */
587 /* Interface and common routines */
589 #ifndef _SYS_SYSPROTO_H_
590 struct ktrace_args {
591 char *fname;
592 int ops;
593 int facs;
594 int pid;
596 #endif
597 /* ARGSUSED */
599 ktrace(td, uap)
600 struct thread *td;
601 register struct ktrace_args *uap;
603 #ifdef KTRACE
604 register struct vnode *vp = NULL;
605 register struct proc *p;
606 struct pgrp *pg;
607 int facs = uap->facs & ~KTRFAC_ROOT;
608 int ops = KTROP(uap->ops);
609 int descend = uap->ops & KTRFLAG_DESCEND;
610 int nfound, ret = 0;
611 int flags, error = 0, vfslocked;
612 struct nameidata nd;
613 struct ucred *cred;
616 * Need something to (un)trace.
618 if (ops != KTROP_CLEARFILE && facs == 0)
619 return (EINVAL);
621 ktrace_enter(td);
622 if (ops != KTROP_CLEAR) {
624 * an operation which requires a file argument.
626 NDINIT(&nd, LOOKUP, NOFOLLOW | MPSAFE, UIO_USERSPACE,
627 uap->fname, td);
628 flags = FREAD | FWRITE | O_NOFOLLOW;
629 error = vn_open(&nd, &flags, 0, NULL);
630 if (error) {
631 ktrace_exit(td);
632 return (error);
634 vfslocked = NDHASGIANT(&nd);
635 NDFREE(&nd, NDF_ONLY_PNBUF);
636 vp = nd.ni_vp;
637 VOP_UNLOCK(vp, 0);
638 if (vp->v_type != VREG) {
639 (void) vn_close(vp, FREAD|FWRITE, td->td_ucred, td);
640 VFS_UNLOCK_GIANT(vfslocked);
641 ktrace_exit(td);
642 return (EACCES);
644 VFS_UNLOCK_GIANT(vfslocked);
647 * Clear all uses of the tracefile.
649 if (ops == KTROP_CLEARFILE) {
650 int vrele_count;
652 vrele_count = 0;
653 sx_slock(&allproc_lock);
654 FOREACH_PROC_IN_SYSTEM(p) {
655 PROC_LOCK(p);
656 if (p->p_tracevp == vp) {
657 if (ktrcanset(td, p)) {
658 mtx_lock(&ktrace_mtx);
659 cred = p->p_tracecred;
660 p->p_tracecred = NULL;
661 p->p_tracevp = NULL;
662 p->p_traceflag = 0;
663 mtx_unlock(&ktrace_mtx);
664 vrele_count++;
665 crfree(cred);
666 } else
667 error = EPERM;
669 PROC_UNLOCK(p);
671 sx_sunlock(&allproc_lock);
672 if (vrele_count > 0) {
673 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
674 while (vrele_count-- > 0)
675 vrele(vp);
676 VFS_UNLOCK_GIANT(vfslocked);
678 goto done;
681 * do it
683 sx_slock(&proctree_lock);
684 if (uap->pid < 0) {
686 * by process group
688 pg = pgfind(-uap->pid);
689 if (pg == NULL) {
690 sx_sunlock(&proctree_lock);
691 error = ESRCH;
692 goto done;
695 * ktrops() may call vrele(). Lock pg_members
696 * by the proctree_lock rather than pg_mtx.
698 PGRP_UNLOCK(pg);
699 nfound = 0;
700 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
701 PROC_LOCK(p);
702 if (p_cansee(td, p) != 0) {
703 PROC_UNLOCK(p);
704 continue;
706 PROC_UNLOCK(p);
707 nfound++;
708 if (descend)
709 ret |= ktrsetchildren(td, p, ops, facs, vp);
710 else
711 ret |= ktrops(td, p, ops, facs, vp);
713 if (nfound == 0) {
714 sx_sunlock(&proctree_lock);
715 error = ESRCH;
716 goto done;
718 } else {
720 * by pid
722 p = pfind(uap->pid);
723 if (p == NULL) {
724 sx_sunlock(&proctree_lock);
725 error = ESRCH;
726 goto done;
728 error = p_cansee(td, p);
730 * The slock of the proctree lock will keep this process
731 * from going away, so unlocking the proc here is ok.
733 PROC_UNLOCK(p);
734 if (error) {
735 sx_sunlock(&proctree_lock);
736 goto done;
738 if (descend)
739 ret |= ktrsetchildren(td, p, ops, facs, vp);
740 else
741 ret |= ktrops(td, p, ops, facs, vp);
743 sx_sunlock(&proctree_lock);
744 if (!ret)
745 error = EPERM;
746 done:
747 if (vp != NULL) {
748 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
749 (void) vn_close(vp, FWRITE, td->td_ucred, td);
750 VFS_UNLOCK_GIANT(vfslocked);
752 ktrace_exit(td);
753 return (error);
754 #else /* !KTRACE */
755 return (ENOSYS);
756 #endif /* KTRACE */
759 /* ARGSUSED */
761 utrace(td, uap)
762 struct thread *td;
763 register struct utrace_args *uap;
766 #ifdef KTRACE
767 struct ktr_request *req;
768 void *cp;
769 int error;
771 if (!KTRPOINT(td, KTR_USER))
772 return (0);
773 if (uap->len > KTR_USER_MAXLEN)
774 return (EINVAL);
775 cp = malloc(uap->len, M_KTRACE, M_WAITOK);
776 error = copyin(uap->addr, cp, uap->len);
777 if (error) {
778 free(cp, M_KTRACE);
779 return (error);
781 req = ktr_getrequest(KTR_USER);
782 if (req == NULL) {
783 free(cp, M_KTRACE);
784 return (ENOMEM);
786 req->ktr_buffer = cp;
787 req->ktr_header.ktr_len = uap->len;
788 ktr_submitrequest(td, req);
789 return (0);
790 #else /* !KTRACE */
791 return (ENOSYS);
792 #endif /* KTRACE */
795 #ifdef KTRACE
796 static int
797 ktrops(td, p, ops, facs, vp)
798 struct thread *td;
799 struct proc *p;
800 int ops, facs;
801 struct vnode *vp;
803 struct vnode *tracevp = NULL;
804 struct ucred *tracecred = NULL;
806 PROC_LOCK(p);
807 if (!ktrcanset(td, p)) {
808 PROC_UNLOCK(p);
809 return (0);
811 mtx_lock(&ktrace_mtx);
812 if (ops == KTROP_SET) {
813 if (p->p_tracevp != vp) {
815 * if trace file already in use, relinquish below
817 tracevp = p->p_tracevp;
818 VREF(vp);
819 p->p_tracevp = vp;
821 if (p->p_tracecred != td->td_ucred) {
822 tracecred = p->p_tracecred;
823 p->p_tracecred = crhold(td->td_ucred);
825 p->p_traceflag |= facs;
826 if (priv_check(td, PRIV_KTRACE) == 0)
827 p->p_traceflag |= KTRFAC_ROOT;
828 } else {
829 /* KTROP_CLEAR */
830 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
831 /* no more tracing */
832 p->p_traceflag = 0;
833 tracevp = p->p_tracevp;
834 p->p_tracevp = NULL;
835 tracecred = p->p_tracecred;
836 p->p_tracecred = NULL;
839 mtx_unlock(&ktrace_mtx);
840 PROC_UNLOCK(p);
841 if (tracevp != NULL) {
842 int vfslocked;
844 vfslocked = VFS_LOCK_GIANT(tracevp->v_mount);
845 vrele(tracevp);
846 VFS_UNLOCK_GIANT(vfslocked);
848 if (tracecred != NULL)
849 crfree(tracecred);
851 return (1);
854 static int
855 ktrsetchildren(td, top, ops, facs, vp)
856 struct thread *td;
857 struct proc *top;
858 int ops, facs;
859 struct vnode *vp;
861 register struct proc *p;
862 register int ret = 0;
864 p = top;
865 sx_assert(&proctree_lock, SX_LOCKED);
866 for (;;) {
867 ret |= ktrops(td, p, ops, facs, vp);
869 * If this process has children, descend to them next,
870 * otherwise do any siblings, and if done with this level,
871 * follow back up the tree (but not past top).
873 if (!LIST_EMPTY(&p->p_children))
874 p = LIST_FIRST(&p->p_children);
875 else for (;;) {
876 if (p == top)
877 return (ret);
878 if (LIST_NEXT(p, p_sibling)) {
879 p = LIST_NEXT(p, p_sibling);
880 break;
882 p = p->p_pptr;
885 /*NOTREACHED*/
888 static void
889 ktr_writerequest(struct thread *td, struct ktr_request *req)
891 struct ktr_header *kth;
892 struct vnode *vp;
893 struct proc *p;
894 struct ucred *cred;
895 struct uio auio;
896 struct iovec aiov[3];
897 struct mount *mp;
898 int datalen, buflen, vrele_count;
899 int error, vfslocked;
902 * We hold the vnode and credential for use in I/O in case ktrace is
903 * disabled on the process as we write out the request.
905 * XXXRW: This is not ideal: we could end up performing a write after
906 * the vnode has been closed.
908 mtx_lock(&ktrace_mtx);
909 vp = td->td_proc->p_tracevp;
910 if (vp != NULL)
911 VREF(vp);
912 cred = td->td_proc->p_tracecred;
913 if (cred != NULL)
914 crhold(cred);
915 mtx_unlock(&ktrace_mtx);
918 * If vp is NULL, the vp has been cleared out from under this
919 * request, so just drop it. Make sure the credential and vnode are
920 * in sync: we should have both or neither.
922 if (vp == NULL) {
923 KASSERT(cred == NULL, ("ktr_writerequest: cred != NULL"));
924 return;
926 KASSERT(cred != NULL, ("ktr_writerequest: cred == NULL"));
928 kth = &req->ktr_header;
929 datalen = data_lengths[(u_short)kth->ktr_type & ~KTR_DROP];
930 buflen = kth->ktr_len;
931 auio.uio_iov = &aiov[0];
932 auio.uio_offset = 0;
933 auio.uio_segflg = UIO_SYSSPACE;
934 auio.uio_rw = UIO_WRITE;
935 aiov[0].iov_base = (caddr_t)kth;
936 aiov[0].iov_len = sizeof(struct ktr_header);
937 auio.uio_resid = sizeof(struct ktr_header);
938 auio.uio_iovcnt = 1;
939 auio.uio_td = td;
940 if (datalen != 0) {
941 aiov[1].iov_base = (caddr_t)&req->ktr_data;
942 aiov[1].iov_len = datalen;
943 auio.uio_resid += datalen;
944 auio.uio_iovcnt++;
945 kth->ktr_len += datalen;
947 if (buflen != 0) {
948 KASSERT(req->ktr_buffer != NULL, ("ktrace: nothing to write"));
949 aiov[auio.uio_iovcnt].iov_base = req->ktr_buffer;
950 aiov[auio.uio_iovcnt].iov_len = buflen;
951 auio.uio_resid += buflen;
952 auio.uio_iovcnt++;
955 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
956 vn_start_write(vp, &mp, V_WAIT);
957 vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
958 (void)VOP_LEASE(vp, td, cred, LEASE_WRITE);
959 #ifdef MAC
960 error = mac_vnode_check_write(cred, NOCRED, vp);
961 if (error == 0)
962 #endif
963 error = VOP_WRITE(vp, &auio, IO_UNIT | IO_APPEND, cred);
964 VOP_UNLOCK(vp, 0);
965 vn_finished_write(mp);
966 vrele(vp);
967 VFS_UNLOCK_GIANT(vfslocked);
968 if (!error)
969 return;
971 * If error encountered, give up tracing on this vnode. We defer
972 * all the vrele()'s on the vnode until after we are finished walking
973 * the various lists to avoid needlessly holding locks.
975 log(LOG_NOTICE, "ktrace write failed, errno %d, tracing stopped\n",
976 error);
977 vrele_count = 0;
979 * First, clear this vnode from being used by any processes in the
980 * system.
981 * XXX - If one process gets an EPERM writing to the vnode, should
982 * we really do this? Other processes might have suitable
983 * credentials for the operation.
985 cred = NULL;
986 sx_slock(&allproc_lock);
987 FOREACH_PROC_IN_SYSTEM(p) {
988 PROC_LOCK(p);
989 if (p->p_tracevp == vp) {
990 mtx_lock(&ktrace_mtx);
991 p->p_tracevp = NULL;
992 p->p_traceflag = 0;
993 cred = p->p_tracecred;
994 p->p_tracecred = NULL;
995 mtx_unlock(&ktrace_mtx);
996 vrele_count++;
998 PROC_UNLOCK(p);
999 if (cred != NULL) {
1000 crfree(cred);
1001 cred = NULL;
1004 sx_sunlock(&allproc_lock);
1007 * We can't clear any pending requests in threads that have cached
1008 * them but not yet committed them, as those are per-thread. The
1009 * thread will have to clear it itself on system call return.
1011 vfslocked = VFS_LOCK_GIANT(vp->v_mount);
1012 while (vrele_count-- > 0)
1013 vrele(vp);
1014 VFS_UNLOCK_GIANT(vfslocked);
1018 * Return true if caller has permission to set the ktracing state
1019 * of target. Essentially, the target can't possess any
1020 * more permissions than the caller. KTRFAC_ROOT signifies that
1021 * root previously set the tracing status on the target process, and
1022 * so, only root may further change it.
1024 static int
1025 ktrcanset(td, targetp)
1026 struct thread *td;
1027 struct proc *targetp;
1030 PROC_LOCK_ASSERT(targetp, MA_OWNED);
1031 if (targetp->p_traceflag & KTRFAC_ROOT &&
1032 priv_check(td, PRIV_KTRACE))
1033 return (0);
1035 if (p_candebug(td, targetp) != 0)
1036 return (0);
1038 return (1);
1041 #endif /* KTRACE */