kernel - Incidental MPLOCK removal
[dragonfly.git] / sys / kern / kern_ktrace.c
blobfdddb895a21a3de38b1f1136cfe459233e2a5902
1 /*
2 * Copyright (c) 1989, 1993
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
29 * @(#)kern_ktrace.c 8.2 (Berkeley) 9/23/93
30 * $FreeBSD: src/sys/kern/kern_ktrace.c,v 1.35.2.6 2002/07/05 22:36:38 darrenr Exp $
33 #include "opt_ktrace.h"
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/sysproto.h>
38 #include <sys/kernel.h>
39 #include <sys/proc.h>
40 #include <sys/fcntl.h>
41 #include <sys/lock.h>
42 #include <sys/nlookup.h>
43 #include <sys/vnode.h>
44 #include <sys/ktrace.h>
45 #include <sys/malloc.h>
46 #include <sys/syslog.h>
47 #include <sys/sysent.h>
49 #include <vm/vm_zone.h>
51 static MALLOC_DEFINE(M_KTRACE, "KTRACE", "KTRACE");
53 #ifdef KTRACE
54 static void ktrgetheader (struct ktr_header *kth, int type);
55 static struct ktr_syscall *ktrgetsyscall(struct ktr_header *kth,
56 struct ktr_syscall *ktp_cache, int narg);
57 static void ktrputsyscall(struct ktr_syscall *ktp_cache,
58 struct ktr_syscall *ktp);
59 static void ktrwrite (struct lwp *, struct ktr_header *, struct uio *);
60 static int ktrcanset (struct thread *,struct proc *);
61 static int ktrsetchildren (struct thread *, struct proc *,
62 int, int, ktrace_node_t);
63 static int ktrops (struct thread *,struct proc *,int,int, ktrace_node_t);
66 * MPSAFE
68 static
69 void
70 ktrgetheader(struct ktr_header *kth, int type)
72 thread_t td = curthread;
73 struct proc *p = td->td_proc;
74 struct lwp *lp = td->td_lwp;
76 kth->ktr_type = type;
77 /* XXX threaded flag is a hack at the moment */
78 kth->ktr_flags = (p->p_nthreads > 1) ? KTRH_THREADED : 0;
79 kth->ktr_flags |= KTRH_CPUID_ENCODE(td->td_gd->gd_cpuid);
80 /*microtime(&kth->ktr_time); set in ktrwrite */
81 kth->ktr_pid = p->p_pid;
82 kth->ktr_tid = lp->lwp_tid;
83 bcopy(p->p_comm, kth->ktr_comm, MAXCOMLEN + 1);
86 static
87 struct ktr_syscall *
88 ktrgetsyscall(struct ktr_header *kth, struct ktr_syscall *ktp_cache, int narg)
90 size_t len;
92 len = offsetof(struct ktr_syscall, ktr_args[narg]);
93 if (len > sizeof(*ktp_cache))
94 ktp_cache = kmalloc(len, M_KTRACE, M_WAITOK);
95 kth->ktr_buf = (caddr_t)ktp_cache;
96 kth->ktr_len = (int)len;
97 return (ktp_cache);
100 static
101 void
102 ktrputsyscall(struct ktr_syscall *ktp_cache, struct ktr_syscall *ktp)
104 if (ktp != ktp_cache)
105 kfree(ktp, M_KTRACE);
108 void
109 ktrsyscall(struct lwp *lp, int code, int narg, register_t args[])
111 struct ktr_header kth;
112 struct ktr_syscall ktp_cache;
113 struct ktr_syscall *ktp;
114 register_t *argp;
115 int i;
118 * Setting the active bit prevents a ktrace recursion from the
119 * ktracing op itself.
121 lp->lwp_traceflag |= KTRFAC_ACTIVE;
122 ktrgetheader(&kth, KTR_SYSCALL);
124 ktp = ktrgetsyscall(&kth, &ktp_cache, narg);
125 ktp->ktr_code = code;
126 ktp->ktr_narg = narg;
127 argp = &ktp->ktr_args[0];
128 for (i = 0; i < narg; i++)
129 *argp++ = args[i];
130 ktrwrite(lp, &kth, NULL);
132 ktrputsyscall(&ktp_cache, ktp);
133 lp->lwp_traceflag &= ~KTRFAC_ACTIVE;
136 void
137 ktrsysret(struct lwp *lp, int code, int error, register_t retval)
139 struct ktr_header kth;
140 struct ktr_sysret ktp;
142 lp->lwp_traceflag |= KTRFAC_ACTIVE;
143 ktrgetheader(&kth, KTR_SYSRET);
145 ktp.ktr_code = code;
146 ktp.ktr_error = error;
147 if (error == 0)
148 ktp.ktr_retval = retval; /* what about val2 ? */
149 else
150 ktp.ktr_retval = 0;
152 kth.ktr_buf = (caddr_t)&ktp;
153 kth.ktr_len = (int)sizeof(struct ktr_sysret);
155 ktrwrite(lp, &kth, NULL);
156 lp->lwp_traceflag &= ~KTRFAC_ACTIVE;
159 void
160 ktrnamei(struct lwp *lp, char *path)
162 struct ktr_header kth;
164 lp->lwp_traceflag |= KTRFAC_ACTIVE;
165 ktrgetheader(&kth, KTR_NAMEI);
167 kth.ktr_len = (int)strlen(path);
168 kth.ktr_buf = path;
170 ktrwrite(lp, &kth, NULL);
171 lp->lwp_traceflag &= ~KTRFAC_ACTIVE;
174 void
175 ktrgenio(struct lwp *lp, int fd, enum uio_rw rw, struct uio *uio, int error)
177 struct ktr_header kth;
178 struct ktr_genio ktg;
180 if (error)
181 return;
182 lp->lwp_traceflag |= KTRFAC_ACTIVE;
183 ktrgetheader(&kth, KTR_GENIO);
185 ktg.ktr_fd = fd;
186 ktg.ktr_rw = rw;
187 kth.ktr_buf = (caddr_t)&ktg;
188 kth.ktr_len = (int)sizeof(struct ktr_genio);
189 uio->uio_offset = 0;
190 uio->uio_rw = UIO_WRITE;
192 ktrwrite(lp, &kth, uio);
193 lp->lwp_traceflag &= ~KTRFAC_ACTIVE;
196 void
197 ktrpsig(struct lwp *lp, int sig, sig_t action, sigset_t *mask, int code)
199 struct ktr_header kth;
200 struct ktr_psig kp;
202 lp->lwp_traceflag |= KTRFAC_ACTIVE;
203 ktrgetheader(&kth, KTR_PSIG);
205 kp.signo = (char)sig;
206 kp.action = action;
207 kp.mask = *mask;
208 kp.code = code;
209 kth.ktr_buf = (caddr_t)&kp;
210 kth.ktr_len = (int)sizeof(struct ktr_psig);
212 ktrwrite(lp, &kth, NULL);
213 lp->lwp_traceflag &= ~KTRFAC_ACTIVE;
216 void
217 ktrcsw(struct lwp *lp, int out, int user)
219 struct ktr_header kth;
220 struct ktr_csw kc;
222 lp->lwp_traceflag |= KTRFAC_ACTIVE;
223 ktrgetheader(&kth, KTR_CSW);
225 kc.out = out;
226 kc.user = user;
227 kth.ktr_buf = (caddr_t)&kc;
228 kth.ktr_len = (int)sizeof(struct ktr_csw);
230 ktrwrite(lp, &kth, NULL);
231 lp->lwp_traceflag &= ~KTRFAC_ACTIVE;
233 #endif
235 /* Interface and common routines */
237 #ifdef KTRACE
239 * ktrace system call
241 struct ktrace_clear_info {
242 ktrace_node_t tracenode;
243 int rootclear;
244 int error;
247 static int ktrace_clear_callback(struct proc *p, void *data);
249 #endif
252 * MPALMOSTSAFE
255 sys_ktrace(struct ktrace_args *uap)
257 #ifdef KTRACE
258 struct ktrace_clear_info info;
259 struct thread *td = curthread;
260 struct proc *curp = td->td_proc;
261 struct proc *p;
262 struct pgrp *pg;
263 int facs = uap->facs & ~KTRFAC_ROOT;
264 int ops = KTROP(uap->ops);
265 int descend = uap->ops & KTRFLAG_DESCEND;
266 int ret = 0;
267 int error = 0;
268 struct nlookupdata nd;
269 ktrace_node_t tracenode = NULL;
271 lwkt_gettoken(&curp->p_token);
272 curp->p_traceflag |= KTRFAC_ACTIVE;
274 if (ops != KTROP_CLEAR) {
276 * an operation which requires a file argument.
278 error = nlookup_init(&nd, uap->fname,
279 UIO_USERSPACE, NLC_LOCKVP);
280 if (error == 0)
281 error = vn_open(&nd, NULL, FREAD|FWRITE|O_NOFOLLOW, 0);
282 if (error == 0 && nd.nl_open_vp->v_type != VREG)
283 error = EACCES;
284 if (error) {
285 curp->p_traceflag &= ~KTRFAC_ACTIVE;
286 nlookup_done(&nd);
287 goto done;
289 tracenode = kmalloc(sizeof(struct ktrace_node), M_KTRACE,
290 M_WAITOK | M_ZERO);
291 tracenode->kn_vp = nd.nl_open_vp;
292 tracenode->kn_refs = 1;
293 nd.nl_open_vp = NULL;
294 nlookup_done(&nd);
295 vn_unlock(tracenode->kn_vp);
298 * Clear all uses of the tracefile. Not the most efficient operation
299 * in the world.
301 if (ops == KTROP_CLEARFILE) {
302 info.tracenode = tracenode;
303 info.error = 0;
304 info.rootclear = 0;
305 allproc_scan(ktrace_clear_callback, &info);
306 error = info.error;
307 goto done;
310 * need something to (un)trace (XXX - why is this here?)
312 if (!facs) {
313 error = EINVAL;
314 goto done;
317 * do it
319 if (uap->pid < 0) {
321 * By process group. Process group is referenced, preventing
322 * disposal.
324 pg = pgfind(-uap->pid);
325 if (pg == NULL) {
326 error = ESRCH;
327 goto done;
329 lwkt_gettoken(&pg->pg_token);
330 LIST_FOREACH(p, &pg->pg_members, p_pglist) {
331 PHOLD(p);
332 if (descend)
333 ret |= ktrsetchildren(td, p, ops, facs, tracenode);
334 else
335 ret |= ktrops(td, p, ops, facs, tracenode);
336 PRELE(p);
338 lwkt_reltoken(&pg->pg_token);
339 pgrel(pg);
340 } else {
342 * by pid
344 p = pfind(uap->pid);
345 if (p == NULL) {
346 error = ESRCH;
347 goto done;
349 if (descend)
350 ret |= ktrsetchildren(td, p, ops, facs, tracenode);
351 else
352 ret |= ktrops(td, p, ops, facs, tracenode);
353 PRELE(p);
355 if (!ret)
356 error = EPERM;
357 done:
358 if (tracenode)
359 ktrdestroy(&tracenode);
360 curp->p_traceflag &= ~KTRFAC_ACTIVE;
361 lwkt_reltoken(&curp->p_token);
362 return (error);
363 #else
364 return ENOSYS;
365 #endif
368 #ifdef KTRACE
371 * NOTE: NOT MPSAFE (yet)
373 static int
374 ktrace_clear_callback(struct proc *p, void *data)
376 struct ktrace_clear_info *info = data;
378 if (p->p_tracenode) {
379 if (info->rootclear) {
380 if (p->p_tracenode == info->tracenode) {
381 ktrdestroy(&p->p_tracenode);
382 p->p_traceflag = 0;
384 } else {
385 if (p->p_tracenode->kn_vp == info->tracenode->kn_vp) {
386 if (ktrcanset(curthread, p)) {
387 ktrdestroy(&p->p_tracenode);
388 p->p_traceflag = 0;
389 } else {
390 info->error = EPERM;
395 return(0);
398 #endif
401 * utrace system call
403 * MPALMOSTSAFE
406 sys_utrace(struct utrace_args *uap)
408 #ifdef KTRACE
409 struct ktr_header kth;
410 struct thread *td = curthread; /* XXX */
411 char cp_cache[64];
412 caddr_t cp;
414 if (!KTRPOINT(td, KTR_USER))
415 return (0);
416 if (uap->len > KTR_USER_MAXLEN)
417 return (EINVAL);
418 td->td_lwp->lwp_traceflag |= KTRFAC_ACTIVE;
419 ktrgetheader(&kth, KTR_USER);
420 if (uap->len <= sizeof(cp_cache))
421 cp = cp_cache;
422 else
423 cp = kmalloc(uap->len, M_KTRACE, M_WAITOK);
425 if (!copyin(uap->addr, cp, uap->len)) {
426 kth.ktr_buf = cp;
427 kth.ktr_len = uap->len;
428 ktrwrite(td->td_lwp, &kth, NULL);
430 if (cp != cp_cache)
431 kfree(cp, M_KTRACE);
432 td->td_lwp->lwp_traceflag &= ~KTRFAC_ACTIVE;
434 return (0);
435 #else
436 return (ENOSYS);
437 #endif
440 void
441 ktrdestroy(struct ktrace_node **tracenodep)
443 ktrace_node_t tracenode;
445 if ((tracenode = *tracenodep) != NULL) {
446 *tracenodep = NULL;
447 KKASSERT(tracenode->kn_refs > 0);
448 if (atomic_fetchadd_int(&tracenode->kn_refs, -1) == 1) {
449 vn_close(tracenode->kn_vp, FREAD|FWRITE, NULL);
450 tracenode->kn_vp = NULL;
451 kfree(tracenode, M_KTRACE);
457 * This allows a process to inherit a ref on a tracenode and is also used
458 * as a temporary ref to prevent a tracenode from being destroyed out from
459 * under an active operation.
461 ktrace_node_t
462 ktrinherit(ktrace_node_t tracenode)
464 if (tracenode) {
465 KKASSERT(tracenode->kn_refs > 0);
466 atomic_add_int(&tracenode->kn_refs, 1);
468 return(tracenode);
471 #ifdef KTRACE
472 static int
473 ktrops(struct thread *td, struct proc *p, int ops, int facs,
474 ktrace_node_t tracenode)
476 ktrace_node_t oldnode;
478 if (!ktrcanset(td, p))
479 return (0);
480 if (ops == KTROP_SET) {
481 if ((oldnode = p->p_tracenode) != tracenode) {
482 p->p_tracenode = ktrinherit(tracenode);
483 ktrdestroy(&oldnode);
485 p->p_traceflag |= facs;
486 if (td->td_ucred->cr_uid == 0)
487 p->p_traceflag |= KTRFAC_ROOT;
488 } else {
489 /* KTROP_CLEAR */
490 if (((p->p_traceflag &= ~facs) & KTRFAC_MASK) == 0) {
491 /* no more tracing */
492 p->p_traceflag = 0;
493 ktrdestroy(&p->p_tracenode);
497 return (1);
500 static int
501 ktrsetchildren(struct thread *td, struct proc *top, int ops, int facs,
502 ktrace_node_t tracenode)
504 struct proc *p;
505 struct proc *np;
506 int ret = 0;
508 p = top;
509 PHOLD(p);
510 lwkt_gettoken(&p->p_token);
512 for (;;) {
513 ret |= ktrops(td, p, ops, facs, tracenode);
516 * If this process has children, descend to them next,
517 * otherwise do any siblings, and if done with this level,
518 * follow back up the tree (but not past top).
520 if ((np = LIST_FIRST(&p->p_children)) != NULL) {
521 PHOLD(np);
523 while (np == NULL) {
524 if (p == top)
525 break;
526 if ((np = LIST_NEXT(p, p_sibling)) != NULL) {
527 PHOLD(np);
528 break;
532 * recurse up to parent, set p in our inner
533 * loop when doing this. np can be NULL if
534 * we race a reparenting to init (thus 'top'
535 * is skipped past and never encountered).
537 np = p->p_pptr;
538 if (np == NULL)
539 break;
540 PHOLD(np);
541 lwkt_reltoken(&p->p_token);
542 PRELE(p);
543 p = np;
544 lwkt_gettoken(&p->p_token);
545 np = NULL;
547 lwkt_reltoken(&p->p_token);
548 PRELE(p);
549 p = np;
550 if (p == NULL)
551 break;
552 /* Already held, but we need the token too */
553 lwkt_gettoken(&p->p_token);
555 return (ret);
558 static void
559 ktrwrite(struct lwp *lp, struct ktr_header *kth, struct uio *uio)
561 struct ktrace_clear_info info;
562 struct uio auio;
563 struct iovec aiov[2];
564 int error;
565 ktrace_node_t tracenode;
568 * We have to ref our tracenode to prevent it from being ripped out
569 * from under us while we are trying to use it. p_tracenode can
570 * go away at any time if another process gets a write error.
572 * XXX not MP safe
574 if (lp->lwp_proc->p_tracenode == NULL)
575 return;
576 tracenode = ktrinherit(lp->lwp_proc->p_tracenode);
577 auio.uio_iov = &aiov[0];
578 auio.uio_offset = 0;
579 auio.uio_segflg = UIO_SYSSPACE;
580 auio.uio_rw = UIO_WRITE;
581 aiov[0].iov_base = (caddr_t)kth;
582 aiov[0].iov_len = sizeof(struct ktr_header);
583 auio.uio_resid = sizeof(struct ktr_header);
584 auio.uio_iovcnt = 1;
585 auio.uio_td = curthread;
586 if (kth->ktr_len > 0) {
587 auio.uio_iovcnt++;
588 aiov[1].iov_base = kth->ktr_buf;
589 aiov[1].iov_len = kth->ktr_len;
590 auio.uio_resid += kth->ktr_len;
591 if (uio != NULL)
592 kth->ktr_len += uio->uio_resid;
596 * NOTE: Must set timestamp after obtaining lock to ensure no
597 * timestamp reversals in the output file.
599 vn_lock(tracenode->kn_vp, LK_EXCLUSIVE | LK_RETRY);
600 microtime(&kth->ktr_time);
601 error = VOP_WRITE(tracenode->kn_vp, &auio,
602 IO_UNIT | IO_APPEND, lp->lwp_thread->td_ucred);
603 if (error == 0 && uio != NULL) {
604 error = VOP_WRITE(tracenode->kn_vp, uio,
605 IO_UNIT | IO_APPEND, lp->lwp_thread->td_ucred);
607 vn_unlock(tracenode->kn_vp);
608 if (error) {
610 * If an error occured, give up tracing on all processes
611 * using this tracenode. This is not MP safe but is
612 * blocking-safe.
614 log(LOG_NOTICE,
615 "ktrace write failed, errno %d, tracing stopped\n", error);
616 info.tracenode = tracenode;
617 info.error = 0;
618 info.rootclear = 1;
619 allproc_scan(ktrace_clear_callback, &info);
621 ktrdestroy(&tracenode);
625 * Return true if caller has permission to set the ktracing state
626 * of target. Essentially, the target can't possess any
627 * more permissions than the caller. KTRFAC_ROOT signifies that
628 * root previously set the tracing status on the target process, and
629 * so, only root may further change it.
631 * TODO: check groups. use caller effective gid.
633 static int
634 ktrcanset(struct thread *calltd, struct proc *targetp)
636 struct ucred *caller = calltd->td_ucred;
637 struct ucred *target = targetp->p_ucred;
639 if (!PRISON_CHECK(caller, target))
640 return (0);
641 if ((caller->cr_uid == target->cr_ruid &&
642 target->cr_ruid == target->cr_svuid &&
643 caller->cr_rgid == target->cr_rgid && /* XXX */
644 target->cr_rgid == target->cr_svgid &&
645 (targetp->p_traceflag & KTRFAC_ROOT) == 0 &&
646 (targetp->p_flags & P_SUGID) == 0) ||
647 caller->cr_uid == 0)
648 return (1);
650 return (0);
653 #endif /* KTRACE */