Ignore machine-check MSRs
[freebsd-src/fkvm-freebsd.git] / sys / kern / sys_process.c
blobdbdd30f2d047db6004608c9d317b62ab2a611854
1 /*-
2 * Copyright (c) 1994, Sean Eric Fagan
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Sean Eric Fagan.
16 * 4. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include "opt_compat.h"
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/lock.h>
40 #include <sys/mutex.h>
41 #include <sys/syscallsubr.h>
42 #include <sys/sysproto.h>
43 #include <sys/proc.h>
44 #include <sys/vnode.h>
45 #include <sys/ptrace.h>
46 #include <sys/sx.h>
47 #include <sys/malloc.h>
48 #include <sys/signalvar.h>
50 #include <machine/reg.h>
52 #include <security/audit/audit.h>
54 #include <vm/vm.h>
55 #include <vm/pmap.h>
56 #include <vm/vm_extern.h>
57 #include <vm/vm_map.h>
58 #include <vm/vm_kern.h>
59 #include <vm/vm_object.h>
60 #include <vm/vm_page.h>
62 #ifdef COMPAT_IA32
63 #include <sys/procfs.h>
64 #include <machine/fpu.h>
65 #include <compat/ia32/ia32_reg.h>
67 extern struct sysentvec ia32_freebsd_sysvec;
69 struct ptrace_io_desc32 {
70 int piod_op;
71 u_int32_t piod_offs;
72 u_int32_t piod_addr;
73 u_int32_t piod_len;
75 #endif
78 * Functions implemented using PROC_ACTION():
80 * proc_read_regs(proc, regs)
81 * Get the current user-visible register set from the process
82 * and copy it into the regs structure (<machine/reg.h>).
83 * The process is stopped at the time read_regs is called.
85 * proc_write_regs(proc, regs)
86 * Update the current register set from the passed in regs
87 * structure. Take care to avoid clobbering special CPU
88 * registers or privileged bits in the PSL.
89 * Depending on the architecture this may have fix-up work to do,
90 * especially if the IAR or PCW are modified.
91 * The process is stopped at the time write_regs is called.
93 * proc_read_fpregs, proc_write_fpregs
94 * deal with the floating point register set, otherwise as above.
96 * proc_read_dbregs, proc_write_dbregs
97 * deal with the processor debug register set, otherwise as above.
99 * proc_sstep(proc)
100 * Arrange for the process to trap after executing a single instruction.
103 #define PROC_ACTION(action) do { \
104 int error; \
106 PROC_LOCK_ASSERT(td->td_proc, MA_OWNED); \
107 if ((td->td_proc->p_flag & P_INMEM) == 0) \
108 error = EIO; \
109 else \
110 error = (action); \
111 return (error); \
112 } while(0)
115 proc_read_regs(struct thread *td, struct reg *regs)
118 PROC_ACTION(fill_regs(td, regs));
122 proc_write_regs(struct thread *td, struct reg *regs)
125 PROC_ACTION(set_regs(td, regs));
129 proc_read_dbregs(struct thread *td, struct dbreg *dbregs)
132 PROC_ACTION(fill_dbregs(td, dbregs));
136 proc_write_dbregs(struct thread *td, struct dbreg *dbregs)
139 PROC_ACTION(set_dbregs(td, dbregs));
143 * Ptrace doesn't support fpregs at all, and there are no security holes
144 * or translations for fpregs, so we can just copy them.
147 proc_read_fpregs(struct thread *td, struct fpreg *fpregs)
150 PROC_ACTION(fill_fpregs(td, fpregs));
154 proc_write_fpregs(struct thread *td, struct fpreg *fpregs)
157 PROC_ACTION(set_fpregs(td, fpregs));
160 #ifdef COMPAT_IA32
161 /* For 32 bit binaries, we need to expose the 32 bit regs layouts. */
163 proc_read_regs32(struct thread *td, struct reg32 *regs32)
166 PROC_ACTION(fill_regs32(td, regs32));
170 proc_write_regs32(struct thread *td, struct reg32 *regs32)
173 PROC_ACTION(set_regs32(td, regs32));
177 proc_read_dbregs32(struct thread *td, struct dbreg32 *dbregs32)
180 PROC_ACTION(fill_dbregs32(td, dbregs32));
184 proc_write_dbregs32(struct thread *td, struct dbreg32 *dbregs32)
187 PROC_ACTION(set_dbregs32(td, dbregs32));
191 proc_read_fpregs32(struct thread *td, struct fpreg32 *fpregs32)
194 PROC_ACTION(fill_fpregs32(td, fpregs32));
198 proc_write_fpregs32(struct thread *td, struct fpreg32 *fpregs32)
201 PROC_ACTION(set_fpregs32(td, fpregs32));
203 #endif
206 proc_sstep(struct thread *td)
209 PROC_ACTION(ptrace_single_step(td));
213 proc_rwmem(struct proc *p, struct uio *uio)
215 vm_map_t map;
216 vm_object_t backing_object, object = NULL;
217 vm_offset_t pageno = 0; /* page number */
218 vm_prot_t reqprot;
219 int error, fault_flags, writing;
222 * Assert that someone has locked this vmspace. (Should be
223 * curthread but we can't assert that.) This keeps the process
224 * from exiting out from under us until this operation completes.
226 KASSERT(p->p_lock >= 1, ("%s: process %p (pid %d) not held", __func__,
227 p, p->p_pid));
230 * The map we want...
232 map = &p->p_vmspace->vm_map;
234 writing = uio->uio_rw == UIO_WRITE;
235 reqprot = writing ? (VM_PROT_WRITE | VM_PROT_OVERRIDE_WRITE) :
236 VM_PROT_READ;
237 fault_flags = writing ? VM_FAULT_DIRTY : VM_FAULT_NORMAL;
240 * Only map in one page at a time. We don't have to, but it
241 * makes things easier. This way is trivial - right?
243 do {
244 vm_map_t tmap;
245 vm_offset_t uva;
246 int page_offset; /* offset into page */
247 vm_map_entry_t out_entry;
248 vm_prot_t out_prot;
249 boolean_t wired;
250 vm_pindex_t pindex;
251 u_int len;
252 vm_page_t m;
254 object = NULL;
256 uva = (vm_offset_t)uio->uio_offset;
259 * Get the page number of this segment.
261 pageno = trunc_page(uva);
262 page_offset = uva - pageno;
265 * How many bytes to copy
267 len = min(PAGE_SIZE - page_offset, uio->uio_resid);
270 * Fault the page on behalf of the process
272 error = vm_fault(map, pageno, reqprot, fault_flags);
273 if (error) {
274 error = EFAULT;
275 break;
279 * Now we need to get the page. out_entry, out_prot, wired,
280 * and single_use aren't used. One would think the vm code
281 * would be a *bit* nicer... We use tmap because
282 * vm_map_lookup() can change the map argument.
284 tmap = map;
285 error = vm_map_lookup(&tmap, pageno, reqprot, &out_entry,
286 &object, &pindex, &out_prot, &wired);
287 if (error) {
288 error = EFAULT;
289 break;
291 VM_OBJECT_LOCK(object);
292 while ((m = vm_page_lookup(object, pindex)) == NULL &&
293 !writing &&
294 (backing_object = object->backing_object) != NULL) {
296 * Allow fallback to backing objects if we are reading.
298 VM_OBJECT_LOCK(backing_object);
299 pindex += OFF_TO_IDX(object->backing_object_offset);
300 VM_OBJECT_UNLOCK(object);
301 object = backing_object;
303 VM_OBJECT_UNLOCK(object);
304 if (m == NULL) {
305 vm_map_lookup_done(tmap, out_entry);
306 error = EFAULT;
307 break;
311 * Hold the page in memory.
313 vm_page_lock_queues();
314 vm_page_hold(m);
315 vm_page_unlock_queues();
318 * We're done with tmap now.
320 vm_map_lookup_done(tmap, out_entry);
323 * Now do the i/o move.
325 error = uiomove_fromphys(&m, page_offset, len, uio);
328 * Release the page.
330 vm_page_lock_queues();
331 vm_page_unhold(m);
332 vm_page_unlock_queues();
334 } while (error == 0 && uio->uio_resid > 0);
336 return (error);
340 * Process debugging system call.
342 #ifndef _SYS_SYSPROTO_H_
343 struct ptrace_args {
344 int req;
345 pid_t pid;
346 caddr_t addr;
347 int data;
349 #endif
351 #ifdef COMPAT_IA32
353 * This CPP subterfuge is to try and reduce the number of ifdefs in
354 * the body of the code.
355 * COPYIN(uap->addr, &r.reg, sizeof r.reg);
356 * becomes either:
357 * copyin(uap->addr, &r.reg, sizeof r.reg);
358 * or
359 * copyin(uap->addr, &r.reg32, sizeof r.reg32);
360 * .. except this is done at runtime.
362 #define COPYIN(u, k, s) wrap32 ? \
363 copyin(u, k ## 32, s ## 32) : \
364 copyin(u, k, s)
365 #define COPYOUT(k, u, s) wrap32 ? \
366 copyout(k ## 32, u, s ## 32) : \
367 copyout(k, u, s)
368 #else
369 #define COPYIN(u, k, s) copyin(u, k, s)
370 #define COPYOUT(k, u, s) copyout(k, u, s)
371 #endif
373 ptrace(struct thread *td, struct ptrace_args *uap)
376 * XXX this obfuscation is to reduce stack usage, but the register
377 * structs may be too large to put on the stack anyway.
379 union {
380 struct ptrace_io_desc piod;
381 struct ptrace_lwpinfo pl;
382 struct dbreg dbreg;
383 struct fpreg fpreg;
384 struct reg reg;
385 #ifdef COMPAT_IA32
386 struct dbreg32 dbreg32;
387 struct fpreg32 fpreg32;
388 struct reg32 reg32;
389 struct ptrace_io_desc32 piod32;
390 #endif
391 } r;
392 void *addr;
393 int error = 0;
394 #ifdef COMPAT_IA32
395 int wrap32 = 0;
397 if (td->td_proc->p_sysent == &ia32_freebsd_sysvec)
398 wrap32 = 1;
399 #endif
400 AUDIT_ARG(pid, uap->pid);
401 AUDIT_ARG(cmd, uap->req);
402 AUDIT_ARG(addr, uap->addr);
403 AUDIT_ARG(value, uap->data);
404 addr = &r;
405 switch (uap->req) {
406 case PT_GETREGS:
407 case PT_GETFPREGS:
408 case PT_GETDBREGS:
409 case PT_LWPINFO:
410 break;
411 case PT_SETREGS:
412 error = COPYIN(uap->addr, &r.reg, sizeof r.reg);
413 break;
414 case PT_SETFPREGS:
415 error = COPYIN(uap->addr, &r.fpreg, sizeof r.fpreg);
416 break;
417 case PT_SETDBREGS:
418 error = COPYIN(uap->addr, &r.dbreg, sizeof r.dbreg);
419 break;
420 case PT_IO:
421 error = COPYIN(uap->addr, &r.piod, sizeof r.piod);
422 break;
423 default:
424 addr = uap->addr;
425 break;
427 if (error)
428 return (error);
430 error = kern_ptrace(td, uap->req, uap->pid, addr, uap->data);
431 if (error)
432 return (error);
434 switch (uap->req) {
435 case PT_IO:
436 error = COPYOUT(&r.piod, uap->addr, sizeof r.piod);
437 break;
438 case PT_GETREGS:
439 error = COPYOUT(&r.reg, uap->addr, sizeof r.reg);
440 break;
441 case PT_GETFPREGS:
442 error = COPYOUT(&r.fpreg, uap->addr, sizeof r.fpreg);
443 break;
444 case PT_GETDBREGS:
445 error = COPYOUT(&r.dbreg, uap->addr, sizeof r.dbreg);
446 break;
447 case PT_LWPINFO:
448 error = copyout(&r.pl, uap->addr, uap->data);
449 break;
452 return (error);
454 #undef COPYIN
455 #undef COPYOUT
457 #ifdef COMPAT_IA32
459 * PROC_READ(regs, td2, addr);
460 * becomes either:
461 * proc_read_regs(td2, addr);
462 * or
463 * proc_read_regs32(td2, addr);
464 * .. except this is done at runtime. There is an additional
465 * complication in that PROC_WRITE disallows 32 bit consumers
466 * from writing to 64 bit address space targets.
468 #define PROC_READ(w, t, a) wrap32 ? \
469 proc_read_ ## w ## 32(t, a) : \
470 proc_read_ ## w (t, a)
471 #define PROC_WRITE(w, t, a) wrap32 ? \
472 (safe ? proc_write_ ## w ## 32(t, a) : EINVAL ) : \
473 proc_write_ ## w (t, a)
474 #else
475 #define PROC_READ(w, t, a) proc_read_ ## w (t, a)
476 #define PROC_WRITE(w, t, a) proc_write_ ## w (t, a)
477 #endif
480 kern_ptrace(struct thread *td, int req, pid_t pid, void *addr, int data)
482 struct iovec iov;
483 struct uio uio;
484 struct proc *curp, *p, *pp;
485 struct thread *td2 = NULL;
486 struct ptrace_io_desc *piod = NULL;
487 struct ptrace_lwpinfo *pl;
488 int error, write, tmp, num;
489 int proctree_locked = 0;
490 lwpid_t tid = 0, *buf;
491 #ifdef COMPAT_IA32
492 int wrap32 = 0, safe = 0;
493 struct ptrace_io_desc32 *piod32 = NULL;
494 #endif
496 curp = td->td_proc;
498 /* Lock proctree before locking the process. */
499 switch (req) {
500 case PT_TRACE_ME:
501 case PT_ATTACH:
502 case PT_STEP:
503 case PT_CONTINUE:
504 case PT_TO_SCE:
505 case PT_TO_SCX:
506 case PT_SYSCALL:
507 case PT_DETACH:
508 sx_xlock(&proctree_lock);
509 proctree_locked = 1;
510 break;
511 default:
512 break;
515 write = 0;
516 if (req == PT_TRACE_ME) {
517 p = td->td_proc;
518 PROC_LOCK(p);
519 } else {
520 if (pid <= PID_MAX) {
521 if ((p = pfind(pid)) == NULL) {
522 if (proctree_locked)
523 sx_xunlock(&proctree_lock);
524 return (ESRCH);
526 } else {
527 /* this is slow, should be optimized */
528 sx_slock(&allproc_lock);
529 FOREACH_PROC_IN_SYSTEM(p) {
530 PROC_LOCK(p);
531 FOREACH_THREAD_IN_PROC(p, td2) {
532 if (td2->td_tid == pid)
533 break;
535 if (td2 != NULL)
536 break; /* proc lock held */
537 PROC_UNLOCK(p);
539 sx_sunlock(&allproc_lock);
540 if (p == NULL) {
541 if (proctree_locked)
542 sx_xunlock(&proctree_lock);
543 return (ESRCH);
545 tid = pid;
546 pid = p->p_pid;
549 AUDIT_ARG(process, p);
551 if ((p->p_flag & P_WEXIT) != 0) {
552 error = ESRCH;
553 goto fail;
555 if ((error = p_cansee(td, p)) != 0)
556 goto fail;
558 if ((error = p_candebug(td, p)) != 0)
559 goto fail;
562 * System processes can't be debugged.
564 if ((p->p_flag & P_SYSTEM) != 0) {
565 error = EINVAL;
566 goto fail;
569 if (tid == 0) {
570 if ((p->p_flag & P_STOPPED_TRACE) != 0) {
571 KASSERT(p->p_xthread != NULL, ("NULL p_xthread"));
572 td2 = p->p_xthread;
573 } else {
574 td2 = FIRST_THREAD_IN_PROC(p);
576 tid = td2->td_tid;
579 #ifdef COMPAT_IA32
581 * Test if we're a 32 bit client and what the target is.
582 * Set the wrap controls accordingly.
584 if (td->td_proc->p_sysent == &ia32_freebsd_sysvec) {
585 if (td2->td_proc->p_sysent == &ia32_freebsd_sysvec)
586 safe = 1;
587 wrap32 = 1;
589 #endif
591 * Permissions check
593 switch (req) {
594 case PT_TRACE_ME:
595 /* Always legal. */
596 break;
598 case PT_ATTACH:
599 /* Self */
600 if (p->p_pid == td->td_proc->p_pid) {
601 error = EINVAL;
602 goto fail;
605 /* Already traced */
606 if (p->p_flag & P_TRACED) {
607 error = EBUSY;
608 goto fail;
611 /* Can't trace an ancestor if you're being traced. */
612 if (curp->p_flag & P_TRACED) {
613 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr) {
614 if (pp == p) {
615 error = EINVAL;
616 goto fail;
622 /* OK */
623 break;
625 case PT_CLEARSTEP:
626 /* Allow thread to clear single step for itself */
627 if (td->td_tid == tid)
628 break;
630 /* FALLTHROUGH */
631 default:
632 /* not being traced... */
633 if ((p->p_flag & P_TRACED) == 0) {
634 error = EPERM;
635 goto fail;
638 /* not being traced by YOU */
639 if (p->p_pptr != td->td_proc) {
640 error = EBUSY;
641 goto fail;
644 /* not currently stopped */
645 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) == 0 ||
646 p->p_suspcount != p->p_numthreads ||
647 (p->p_flag & P_WAITED) == 0) {
648 error = EBUSY;
649 goto fail;
652 if ((p->p_flag & P_STOPPED_TRACE) == 0) {
653 static int count = 0;
654 if (count++ == 0)
655 printf("P_STOPPED_TRACE not set.\n");
658 /* OK */
659 break;
662 /* Keep this process around until we finish this request. */
663 _PHOLD(p);
665 #ifdef FIX_SSTEP
667 * Single step fixup ala procfs
669 FIX_SSTEP(td2);
670 #endif
673 * Actually do the requests
676 td->td_retval[0] = 0;
678 switch (req) {
679 case PT_TRACE_ME:
680 /* set my trace flag and "owner" so it can read/write me */
681 p->p_flag |= P_TRACED;
682 p->p_oppid = p->p_pptr->p_pid;
683 break;
685 case PT_ATTACH:
686 /* security check done above */
687 p->p_flag |= P_TRACED;
688 p->p_oppid = p->p_pptr->p_pid;
689 if (p->p_pptr != td->td_proc)
690 proc_reparent(p, td->td_proc);
691 data = SIGSTOP;
692 goto sendsig; /* in PT_CONTINUE below */
694 case PT_CLEARSTEP:
695 error = ptrace_clear_single_step(td2);
696 break;
698 case PT_SETSTEP:
699 error = ptrace_single_step(td2);
700 break;
702 case PT_SUSPEND:
703 thread_lock(td2);
704 td2->td_flags |= TDF_DBSUSPEND;
705 thread_unlock(td2);
706 break;
708 case PT_RESUME:
709 thread_lock(td2);
710 td2->td_flags &= ~TDF_DBSUSPEND;
711 thread_unlock(td2);
712 break;
714 case PT_STEP:
715 case PT_CONTINUE:
716 case PT_TO_SCE:
717 case PT_TO_SCX:
718 case PT_SYSCALL:
719 case PT_DETACH:
720 /* Zero means do not send any signal */
721 if (data < 0 || data > _SIG_MAXSIG) {
722 error = EINVAL;
723 break;
726 switch (req) {
727 case PT_STEP:
728 error = ptrace_single_step(td2);
729 if (error)
730 goto out;
731 break;
732 case PT_TO_SCE:
733 p->p_stops |= S_PT_SCE;
734 break;
735 case PT_TO_SCX:
736 p->p_stops |= S_PT_SCX;
737 break;
738 case PT_SYSCALL:
739 p->p_stops |= S_PT_SCE | S_PT_SCX;
740 break;
743 if (addr != (void *)1) {
744 error = ptrace_set_pc(td2, (u_long)(uintfptr_t)addr);
745 if (error)
746 break;
749 if (req == PT_DETACH) {
750 /* reset process parent */
751 if (p->p_oppid != p->p_pptr->p_pid) {
752 struct proc *pp;
754 PROC_LOCK(p->p_pptr);
755 sigqueue_take(p->p_ksi);
756 PROC_UNLOCK(p->p_pptr);
758 PROC_UNLOCK(p);
759 pp = pfind(p->p_oppid);
760 if (pp == NULL)
761 pp = initproc;
762 else
763 PROC_UNLOCK(pp);
764 PROC_LOCK(p);
765 proc_reparent(p, pp);
766 if (pp == initproc)
767 p->p_sigparent = SIGCHLD;
769 p->p_flag &= ~(P_TRACED | P_WAITED);
770 p->p_oppid = 0;
772 /* should we send SIGCHLD? */
773 /* childproc_continued(p); */
776 sendsig:
777 if (proctree_locked) {
778 sx_xunlock(&proctree_lock);
779 proctree_locked = 0;
781 p->p_xstat = data;
782 p->p_xthread = NULL;
783 if ((p->p_flag & (P_STOPPED_SIG | P_STOPPED_TRACE)) != 0) {
784 /* deliver or queue signal */
785 thread_lock(td2);
786 td2->td_flags &= ~TDF_XSIG;
787 thread_unlock(td2);
788 td2->td_xsig = data;
790 if (req == PT_DETACH) {
791 struct thread *td3;
792 FOREACH_THREAD_IN_PROC(p, td3) {
793 thread_lock(td3);
794 td3->td_flags &= ~TDF_DBSUSPEND;
795 thread_unlock(td3);
799 * unsuspend all threads, to not let a thread run,
800 * you should use PT_SUSPEND to suspend it before
801 * continuing process.
803 PROC_SLOCK(p);
804 p->p_flag &= ~(P_STOPPED_TRACE|P_STOPPED_SIG|P_WAITED);
805 thread_unsuspend(p);
806 PROC_SUNLOCK(p);
807 } else {
808 if (data)
809 psignal(p, data);
811 break;
813 case PT_WRITE_I:
814 case PT_WRITE_D:
815 write = 1;
816 /* FALLTHROUGH */
817 case PT_READ_I:
818 case PT_READ_D:
819 PROC_UNLOCK(p);
820 tmp = 0;
821 /* write = 0 set above */
822 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp;
823 iov.iov_len = sizeof(int);
824 uio.uio_iov = &iov;
825 uio.uio_iovcnt = 1;
826 uio.uio_offset = (off_t)(uintptr_t)addr;
827 uio.uio_resid = sizeof(int);
828 uio.uio_segflg = UIO_SYSSPACE; /* i.e.: the uap */
829 uio.uio_rw = write ? UIO_WRITE : UIO_READ;
830 uio.uio_td = td;
831 error = proc_rwmem(p, &uio);
832 if (uio.uio_resid != 0) {
834 * XXX proc_rwmem() doesn't currently return ENOSPC,
835 * so I think write() can bogusly return 0.
836 * XXX what happens for short writes? We don't want
837 * to write partial data.
838 * XXX proc_rwmem() returns EPERM for other invalid
839 * addresses. Convert this to EINVAL. Does this
840 * clobber returns of EPERM for other reasons?
842 if (error == 0 || error == ENOSPC || error == EPERM)
843 error = EINVAL; /* EOF */
845 if (!write)
846 td->td_retval[0] = tmp;
847 PROC_LOCK(p);
848 break;
850 case PT_IO:
851 #ifdef COMPAT_IA32
852 if (wrap32) {
853 piod32 = addr;
854 iov.iov_base = (void *)(uintptr_t)piod32->piod_addr;
855 iov.iov_len = piod32->piod_len;
856 uio.uio_offset = (off_t)(uintptr_t)piod32->piod_offs;
857 uio.uio_resid = piod32->piod_len;
858 } else
859 #endif
861 piod = addr;
862 iov.iov_base = piod->piod_addr;
863 iov.iov_len = piod->piod_len;
864 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
865 uio.uio_resid = piod->piod_len;
867 uio.uio_iov = &iov;
868 uio.uio_iovcnt = 1;
869 uio.uio_segflg = UIO_USERSPACE;
870 uio.uio_td = td;
871 #ifdef COMPAT_IA32
872 tmp = wrap32 ? piod32->piod_op : piod->piod_op;
873 #else
874 tmp = piod->piod_op;
875 #endif
876 switch (tmp) {
877 case PIOD_READ_D:
878 case PIOD_READ_I:
879 uio.uio_rw = UIO_READ;
880 break;
881 case PIOD_WRITE_D:
882 case PIOD_WRITE_I:
883 uio.uio_rw = UIO_WRITE;
884 break;
885 default:
886 error = EINVAL;
887 goto out;
889 PROC_UNLOCK(p);
890 error = proc_rwmem(p, &uio);
891 #ifdef COMPAT_IA32
892 if (wrap32)
893 piod32->piod_len -= uio.uio_resid;
894 else
895 #endif
896 piod->piod_len -= uio.uio_resid;
897 PROC_LOCK(p);
898 break;
900 case PT_KILL:
901 data = SIGKILL;
902 goto sendsig; /* in PT_CONTINUE above */
904 case PT_SETREGS:
905 error = PROC_WRITE(regs, td2, addr);
906 break;
908 case PT_GETREGS:
909 error = PROC_READ(regs, td2, addr);
910 break;
912 case PT_SETFPREGS:
913 error = PROC_WRITE(fpregs, td2, addr);
914 break;
916 case PT_GETFPREGS:
917 error = PROC_READ(fpregs, td2, addr);
918 break;
920 case PT_SETDBREGS:
921 error = PROC_WRITE(dbregs, td2, addr);
922 break;
924 case PT_GETDBREGS:
925 error = PROC_READ(dbregs, td2, addr);
926 break;
928 case PT_LWPINFO:
929 if (data <= 0 || data > sizeof(*pl)) {
930 error = EINVAL;
931 break;
933 pl = addr;
934 pl->pl_lwpid = td2->td_tid;
935 if (td2->td_flags & TDF_XSIG)
936 pl->pl_event = PL_EVENT_SIGNAL;
937 else
938 pl->pl_event = 0;
939 pl->pl_flags = 0;
940 pl->pl_sigmask = td2->td_sigmask;
941 pl->pl_siglist = td2->td_siglist;
942 break;
944 case PT_GETNUMLWPS:
945 td->td_retval[0] = p->p_numthreads;
946 break;
948 case PT_GETLWPLIST:
949 if (data <= 0) {
950 error = EINVAL;
951 break;
953 num = imin(p->p_numthreads, data);
954 PROC_UNLOCK(p);
955 buf = malloc(num * sizeof(lwpid_t), M_TEMP, M_WAITOK);
956 tmp = 0;
957 PROC_LOCK(p);
958 FOREACH_THREAD_IN_PROC(p, td2) {
959 if (tmp >= num)
960 break;
961 buf[tmp++] = td2->td_tid;
963 PROC_UNLOCK(p);
964 error = copyout(buf, addr, tmp * sizeof(lwpid_t));
965 free(buf, M_TEMP);
966 if (!error)
967 td->td_retval[0] = tmp;
968 PROC_LOCK(p);
969 break;
971 default:
972 #ifdef __HAVE_PTRACE_MACHDEP
973 if (req >= PT_FIRSTMACH) {
974 PROC_UNLOCK(p);
975 error = cpu_ptrace(td2, req, addr, data);
976 PROC_LOCK(p);
977 } else
978 #endif
979 /* Unknown request. */
980 error = EINVAL;
981 break;
984 out:
985 /* Drop our hold on this process now that the request has completed. */
986 _PRELE(p);
987 fail:
988 PROC_UNLOCK(p);
989 if (proctree_locked)
990 sx_xunlock(&proctree_lock);
991 return (error);
993 #undef PROC_READ
994 #undef PROC_WRITE
997 * Stop a process because of a debugging event;
998 * stay stopped until p->p_step is cleared
999 * (cleared by PIOCCONT in procfs).
1001 void
1002 stopevent(struct proc *p, unsigned int event, unsigned int val)
1005 PROC_LOCK_ASSERT(p, MA_OWNED);
1006 p->p_step = 1;
1007 do {
1008 p->p_xstat = val;
1009 p->p_xthread = NULL;
1010 p->p_stype = event; /* Which event caused the stop? */
1011 wakeup(&p->p_stype); /* Wake up any PIOCWAIT'ing procs */
1012 msleep(&p->p_step, &p->p_mtx, PWAIT, "stopevent", 0);
1013 } while (p->p_step);