kernel/vm: Rename *_putpages()'s 'sync' argument to 'flags'.
[dragonfly.git] / sys / kern / sys_process.c
blob212facf77f6fb67647e465b5fe775b0985ddb6ef
1 /*
2 * Copyright (c) 1994, Sean Eric Fagan
3 * All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. All advertising materials mentioning features or use of this software
14 * must display the following acknowledgement:
15 * This product includes software developed by Sean Eric Fagan.
16 * 4. The name of the author may not be used to endorse or promote products
17 * derived from this software without specific prior written permission.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * SUCH DAMAGE.
31 * $FreeBSD: src/sys/kern/sys_process.c,v 1.51.2.6 2003/01/08 03:06:45 kan Exp $
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/sysproto.h>
37 #include <sys/uio.h>
38 #include <sys/proc.h>
39 #include <sys/priv.h>
40 #include <sys/vnode.h>
41 #include <sys/ptrace.h>
42 #include <sys/reg.h>
43 #include <sys/lock.h>
45 #include <vm/vm.h>
46 #include <vm/pmap.h>
47 #include <vm/vm_map.h>
48 #include <vm/vm_page.h>
50 #include <vfs/procfs/procfs.h>
52 #include <sys/thread2.h>
53 #include <sys/spinlock2.h>
55 /* use the equivalent procfs code */
56 #if 0
57 static int
58 pread (struct proc *procp, unsigned int addr, unsigned int *retval)
60 int rv;
61 vm_map_t map, tmap;
62 vm_object_t object;
63 vm_map_backing_t ba;
64 vm_offset_t kva = 0;
65 int page_offset; /* offset into page */
66 vm_offset_t pageno; /* page number */
67 vm_map_entry_t out_entry;
68 vm_prot_t out_prot;
69 int wflags;
70 vm_pindex_t pindex;
71 vm_pindex_t pcount;
73 /* Map page into kernel space */
75 map = &procp->p_vmspace->vm_map;
77 page_offset = addr - trunc_page(addr);
78 pageno = trunc_page(addr);
80 tmap = map;
81 rv = vm_map_lookup(&tmap, pageno, VM_PROT_READ, &out_entry,
82 &ba, &pindex, &pcount, &out_prot, &wflags);
83 if (ba)
84 object = ba->object;
85 else
86 object = NULL;
89 if (rv != KERN_SUCCESS)
90 return EINVAL;
92 vm_map_lookup_done (tmap, out_entry, 0);
94 /* Find space in kernel_map for the page we're interested in */
95 rv = vm_map_find (&kernel_map, object, NULL,
96 IDX_TO_OFF(pindex), &kva, PAGE_SIZE,
97 PAGE_SIZE, FALSE,
98 VM_MAPTYPE_NORMAL, VM_SUBSYS_PROC,
99 VM_PROT_ALL, VM_PROT_ALL, 0);
101 if (!rv) {
102 vm_object_reference XXX (object);
104 rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0);
105 if (!rv) {
106 *retval = 0;
107 bcopy ((caddr_t)kva + page_offset,
108 retval, sizeof *retval);
110 vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE);
113 return rv;
116 static int
117 pwrite (struct proc *procp, unsigned int addr, unsigned int datum)
119 int rv;
120 vm_map_t map, tmap;
121 vm_object_t object;
122 vm_map_backing_t ba;
123 vm_offset_t kva = 0;
124 int page_offset; /* offset into page */
125 vm_offset_t pageno; /* page number */
126 vm_map_entry_t out_entry;
127 vm_prot_t out_prot;
128 int wflags;
129 vm_pindex_t pindex;
130 vm_pindex_t pcount;
131 boolean_t fix_prot = 0;
133 /* Map page into kernel space */
135 map = &procp->p_vmspace->vm_map;
137 page_offset = addr - trunc_page(addr);
138 pageno = trunc_page(addr);
141 * Check the permissions for the area we're interested in.
144 if (vm_map_check_protection (map, pageno, pageno + PAGE_SIZE,
145 VM_PROT_WRITE, FALSE) == FALSE) {
147 * If the page was not writable, we make it so.
148 * XXX It is possible a page may *not* be read/executable,
149 * if a process changes that!
151 fix_prot = 1;
152 /* The page isn't writable, so let's try making it so... */
153 if ((rv = vm_map_protect (map, pageno, pageno + PAGE_SIZE,
154 VM_PROT_ALL, 0)) != KERN_SUCCESS)
155 return EFAULT; /* I guess... */
159 * Now we need to get the page. out_entry, out_prot, wflags, and
160 * single_use aren't used. One would think the vm code would be
161 * a *bit* nicer... We use tmap because vm_map_lookup() can
162 * change the map argument.
165 tmap = map;
166 rv = vm_map_lookup(&tmap, pageno, VM_PROT_WRITE, &out_entry,
167 &ba, &pindex, &pcount, &out_prot, &wflags);
168 if (ba)
169 object = ba->object;
170 else
171 object = NULL;
173 if (rv != KERN_SUCCESS)
174 return EINVAL;
177 * Okay, we've got the page. Let's release tmap.
179 vm_map_lookup_done (tmap, out_entry, 0);
182 * Fault the page in...
184 rv = vm_fault(map, pageno, VM_PROT_WRITE|VM_PROT_READ, FALSE);
185 if (rv != KERN_SUCCESS)
186 return EFAULT;
188 /* Find space in kernel_map for the page we're interested in */
189 rv = vm_map_find (&kernel_map, object, NULL,
190 IDX_TO_OFF(pindex), &kva, PAGE_SIZE,
191 PAGE_SIZE, FALSE,
192 VM_MAPTYPE_NORMAL, VM_SUBSYS_PROC,
193 VM_PROT_ALL, VM_PROT_ALL, 0);
194 if (!rv) {
195 vm_object_reference XXX (object);
197 rv = vm_map_wire (&kernel_map, kva, kva + PAGE_SIZE, 0);
198 if (!rv) {
199 bcopy (&datum, (caddr_t)kva + page_offset, sizeof datum);
201 vm_map_remove (&kernel_map, kva, kva + PAGE_SIZE);
204 if (fix_prot)
205 vm_map_protect (map, pageno, pageno + PAGE_SIZE,
206 VM_PROT_READ|VM_PROT_EXECUTE, 0);
207 return rv;
209 #endif
212 * Process debugging system call.
214 * MPALMOSTSAFE
217 sys_ptrace(struct ptrace_args *uap)
219 struct proc *p = curproc;
222 * XXX this obfuscation is to reduce stack usage, but the register
223 * structs may be too large to put on the stack anyway.
225 union {
226 struct ptrace_io_desc piod;
227 struct dbreg dbreg;
228 struct fpreg fpreg;
229 struct reg reg;
230 } r;
231 void *addr;
232 int error = 0;
234 addr = &r;
235 switch (uap->req) {
236 case PT_GETREGS:
237 case PT_GETFPREGS:
238 #ifdef PT_GETDBREGS
239 case PT_GETDBREGS:
240 #endif
241 break;
242 case PT_SETREGS:
243 error = copyin(uap->addr, &r.reg, sizeof r.reg);
244 break;
245 case PT_SETFPREGS:
246 error = copyin(uap->addr, &r.fpreg, sizeof r.fpreg);
247 break;
248 #ifdef PT_SETDBREGS
249 case PT_SETDBREGS:
250 error = copyin(uap->addr, &r.dbreg, sizeof r.dbreg);
251 break;
252 #endif
253 case PT_IO:
254 error = copyin(uap->addr, &r.piod, sizeof r.piod);
255 break;
256 default:
257 addr = uap->addr;
259 if (error)
260 return (error);
262 error = kern_ptrace(p, uap->req, uap->pid, addr, uap->data,
263 &uap->sysmsg_result);
264 if (error)
265 return (error);
267 switch (uap->req) {
268 case PT_IO:
269 (void)copyout(&r.piod, uap->addr, sizeof r.piod);
270 break;
271 case PT_GETREGS:
272 error = copyout(&r.reg, uap->addr, sizeof r.reg);
273 break;
274 case PT_GETFPREGS:
275 error = copyout(&r.fpreg, uap->addr, sizeof r.fpreg);
276 break;
277 #ifdef PT_GETDBREGS
278 case PT_GETDBREGS:
279 error = copyout(&r.dbreg, uap->addr, sizeof r.dbreg);
280 break;
281 #endif
284 return (error);
288 kern_ptrace(struct proc *curp, int req, pid_t pid, void *addr,
289 int data, int *res)
291 struct proc *p, *pp;
292 struct lwp *lp;
293 struct iovec iov;
294 struct uio uio;
295 struct ptrace_io_desc *piod;
296 int error = 0;
297 int write, tmp;
298 int t;
300 write = 0;
301 if (req == PT_TRACE_ME) {
302 p = curp;
303 PHOLD(p);
304 } else {
305 if ((p = pfind(pid)) == NULL)
306 return ESRCH;
308 if (!PRISON_CHECK(curp->p_ucred, p->p_ucred)) {
309 PRELE(p);
310 return (ESRCH);
312 if (p->p_flags & P_SYSTEM) {
313 PRELE(p);
314 return EINVAL;
317 lwkt_gettoken(&p->p_token);
318 /* Can't trace a process that's currently exec'ing. */
319 if ((p->p_flags & P_INEXEC) != 0) {
320 lwkt_reltoken(&p->p_token);
321 PRELE(p);
322 return EAGAIN;
326 * Permissions check
328 switch (req) {
329 case PT_TRACE_ME:
330 /* Always legal. */
331 break;
333 case PT_ATTACH:
334 /* Self */
335 if (p->p_pid == curp->p_pid) {
336 lwkt_reltoken(&p->p_token);
337 PRELE(p);
338 return EINVAL;
341 /* Already traced */
342 if (p->p_flags & P_TRACED) {
343 lwkt_reltoken(&p->p_token);
344 PRELE(p);
345 return EBUSY;
348 if (curp->p_flags & P_TRACED)
349 for (pp = curp->p_pptr; pp != NULL; pp = pp->p_pptr)
350 if (pp == p) {
351 lwkt_reltoken(&p->p_token);
352 PRELE(p);
353 return (EINVAL);
356 /* not owned by you, has done setuid (unless you're root) */
357 if ((p->p_ucred->cr_ruid != curp->p_ucred->cr_ruid) ||
358 (p->p_flags & P_SUGID)) {
359 if ((error = priv_check_cred(curp->p_ucred, PRIV_ROOT, 0)) != 0) {
360 lwkt_reltoken(&p->p_token);
361 PRELE(p);
362 return error;
366 /* can't trace init when securelevel > 0 */
367 if (securelevel > 0 && p->p_pid == 1) {
368 lwkt_reltoken(&p->p_token);
369 PRELE(p);
370 return EPERM;
373 /* OK */
374 break;
376 case PT_READ_I:
377 case PT_READ_D:
378 case PT_WRITE_I:
379 case PT_WRITE_D:
380 case PT_IO:
381 case PT_CONTINUE:
382 case PT_KILL:
383 case PT_STEP:
384 case PT_DETACH:
385 #ifdef PT_GETREGS
386 case PT_GETREGS:
387 #endif
388 #ifdef PT_SETREGS
389 case PT_SETREGS:
390 #endif
391 #ifdef PT_GETFPREGS
392 case PT_GETFPREGS:
393 #endif
394 #ifdef PT_SETFPREGS
395 case PT_SETFPREGS:
396 #endif
397 #ifdef PT_GETDBREGS
398 case PT_GETDBREGS:
399 #endif
400 #ifdef PT_SETDBREGS
401 case PT_SETDBREGS:
402 #endif
403 /* not being traced... */
404 if ((p->p_flags & P_TRACED) == 0) {
405 lwkt_reltoken(&p->p_token);
406 PRELE(p);
407 return EPERM;
410 /* not being traced by YOU */
411 if (p->p_pptr != curp) {
412 lwkt_reltoken(&p->p_token);
413 PRELE(p);
414 return EBUSY;
417 /* not currently stopped */
418 if (p->p_stat != SSTOP ||
419 (p->p_flags & P_WAITED) == 0) {
420 lwkt_reltoken(&p->p_token);
421 PRELE(p);
422 return EBUSY;
425 /* OK */
426 break;
428 default:
429 lwkt_reltoken(&p->p_token);
430 PRELE(p);
431 return EINVAL;
434 /* XXX lwp */
435 lp = FIRST_LWP_IN_PROC(p);
436 if (lp == NULL) {
437 lwkt_reltoken(&p->p_token);
438 PRELE(p);
439 return EINVAL;
442 #ifdef FIX_SSTEP
444 * Single step fixup ala procfs
446 FIX_SSTEP(lp);
447 #endif
450 * Actually do the requests
453 *res = 0;
455 switch (req) {
456 case PT_TRACE_ME:
457 /* set my trace flag and "owner" so it can read/write me */
458 p->p_flags |= P_TRACED;
459 p->p_oppid = p->p_pptr->p_pid;
460 lwkt_reltoken(&p->p_token);
461 PRELE(p);
462 return 0;
464 case PT_ATTACH:
465 /* security check done above */
466 p->p_flags |= P_TRACED;
467 p->p_oppid = p->p_pptr->p_pid;
468 proc_reparent(p, curp);
469 data = SIGSTOP;
470 goto sendsig; /* in PT_CONTINUE below */
472 case PT_STEP:
473 case PT_CONTINUE:
474 case PT_DETACH:
475 /* Zero means do not send any signal */
476 if (data < 0 || data > _SIG_MAXSIG) {
477 lwkt_reltoken(&p->p_token);
478 PRELE(p);
479 return EINVAL;
482 LWPHOLD(lp);
484 if (req == PT_STEP) {
485 if ((error = ptrace_single_step (lp))) {
486 LWPRELE(lp);
487 lwkt_reltoken(&p->p_token);
488 PRELE(p);
489 return error;
493 if (addr != (void *)1) {
494 if ((error = ptrace_set_pc (lp, (u_long)addr))) {
495 LWPRELE(lp);
496 lwkt_reltoken(&p->p_token);
497 PRELE(p);
498 return error;
501 LWPRELE(lp);
503 if (req == PT_DETACH) {
504 /* reset process parent */
505 if (p->p_oppid != p->p_pptr->p_pid) {
506 struct proc *pp;
508 pp = pfind(p->p_oppid);
509 if (pp) {
510 proc_reparent(p, pp);
511 PRELE(pp);
515 p->p_flags &= ~(P_TRACED | P_WAITED);
516 p->p_oppid = 0;
518 /* should we send SIGCHLD? */
521 sendsig:
523 * Deliver or queue signal. If the process is stopped
524 * force it to be SACTIVE again.
526 crit_enter();
527 if (p->p_stat == SSTOP) {
528 p->p_xstat = data;
529 proc_unstop(p, SSTOP);
530 } else if (data) {
531 ksignal(p, data);
533 crit_exit();
534 lwkt_reltoken(&p->p_token);
535 PRELE(p);
536 return 0;
538 case PT_WRITE_I:
539 case PT_WRITE_D:
540 write = 1;
541 /* fallthrough */
542 case PT_READ_I:
543 case PT_READ_D:
545 * NOTE! uio_offset represents the offset in the target
546 * process. The iov is in the current process (the guy
547 * making the ptrace call) so uio_td must be the current
548 * process (though for a SYSSPACE transfer it doesn't
549 * really matter).
551 tmp = 0;
552 /* write = 0 set above */
553 iov.iov_base = write ? (caddr_t)&data : (caddr_t)&tmp;
554 iov.iov_len = sizeof(int);
555 uio.uio_iov = &iov;
556 uio.uio_iovcnt = 1;
557 uio.uio_offset = (off_t)(uintptr_t)addr;
558 uio.uio_resid = sizeof(int);
559 uio.uio_segflg = UIO_SYSSPACE;
560 uio.uio_rw = write ? UIO_WRITE : UIO_READ;
561 uio.uio_td = curthread;
562 error = procfs_domem(curp, lp, NULL, &uio);
563 if (uio.uio_resid != 0) {
565 * XXX procfs_domem() doesn't currently return ENOSPC,
566 * so I think write() can bogusly return 0.
567 * XXX what happens for short writes? We don't want
568 * to write partial data.
569 * XXX procfs_domem() returns EPERM for other invalid
570 * addresses. Convert this to EINVAL. Does this
571 * clobber returns of EPERM for other reasons?
573 if (error == 0 || error == ENOSPC || error == EPERM)
574 error = EINVAL; /* EOF */
576 if (!write)
577 *res = tmp;
578 lwkt_reltoken(&p->p_token);
579 PRELE(p);
580 return (error);
582 case PT_IO:
584 * NOTE! uio_offset represents the offset in the target
585 * process. The iov is in the current process (the guy
586 * making the ptrace call) so uio_td must be the current
587 * process.
589 piod = addr;
590 iov.iov_base = piod->piod_addr;
591 iov.iov_len = piod->piod_len;
592 uio.uio_iov = &iov;
593 uio.uio_iovcnt = 1;
594 uio.uio_offset = (off_t)(uintptr_t)piod->piod_offs;
595 uio.uio_resid = piod->piod_len;
596 uio.uio_segflg = UIO_USERSPACE;
597 uio.uio_td = curthread;
598 switch (piod->piod_op) {
599 case PIOD_READ_D:
600 case PIOD_READ_I:
601 uio.uio_rw = UIO_READ;
602 break;
603 case PIOD_WRITE_D:
604 case PIOD_WRITE_I:
605 uio.uio_rw = UIO_WRITE;
606 break;
607 default:
608 lwkt_reltoken(&p->p_token);
609 PRELE(p);
610 return (EINVAL);
612 error = procfs_domem(curp, lp, NULL, &uio);
613 piod->piod_len -= uio.uio_resid;
614 lwkt_reltoken(&p->p_token);
615 PRELE(p);
616 return (error);
618 case PT_KILL:
619 data = SIGKILL;
620 goto sendsig; /* in PT_CONTINUE above */
622 #ifdef PT_SETREGS
623 case PT_SETREGS:
624 write = 1;
625 /* fallthrough */
626 #endif /* PT_SETREGS */
627 #ifdef PT_GETREGS
628 case PT_GETREGS:
629 /* write = 0 above */
630 #endif /* PT_SETREGS */
631 #if defined(PT_SETREGS) || defined(PT_GETREGS)
632 if (!procfs_validregs(lp)) {
633 lwkt_reltoken(&p->p_token);
634 PRELE(p);
635 return EINVAL;
636 } else {
637 iov.iov_base = addr;
638 iov.iov_len = sizeof(struct reg);
639 uio.uio_iov = &iov;
640 uio.uio_iovcnt = 1;
641 uio.uio_offset = 0;
642 uio.uio_resid = sizeof(struct reg);
643 uio.uio_segflg = UIO_SYSSPACE;
644 uio.uio_rw = write ? UIO_WRITE : UIO_READ;
645 uio.uio_td = curthread;
646 t = procfs_doregs(curp, lp, NULL, &uio);
647 lwkt_reltoken(&p->p_token);
648 PRELE(p);
649 return t;
651 #endif /* defined(PT_SETREGS) || defined(PT_GETREGS) */
653 #ifdef PT_SETFPREGS
654 case PT_SETFPREGS:
655 write = 1;
656 /* fallthrough */
657 #endif /* PT_SETFPREGS */
658 #ifdef PT_GETFPREGS
659 case PT_GETFPREGS:
660 /* write = 0 above */
661 #endif /* PT_SETFPREGS */
662 #if defined(PT_SETFPREGS) || defined(PT_GETFPREGS)
663 if (!procfs_validfpregs(lp)) {
664 lwkt_reltoken(&p->p_token);
665 PRELE(p);
666 return EINVAL;
667 } else {
668 iov.iov_base = addr;
669 iov.iov_len = sizeof(struct fpreg);
670 uio.uio_iov = &iov;
671 uio.uio_iovcnt = 1;
672 uio.uio_offset = 0;
673 uio.uio_resid = sizeof(struct fpreg);
674 uio.uio_segflg = UIO_SYSSPACE;
675 uio.uio_rw = write ? UIO_WRITE : UIO_READ;
676 uio.uio_td = curthread;
677 t = procfs_dofpregs(curp, lp, NULL, &uio);
678 lwkt_reltoken(&p->p_token);
679 PRELE(p);
680 return t;
682 #endif /* defined(PT_SETFPREGS) || defined(PT_GETFPREGS) */
684 #ifdef PT_SETDBREGS
685 case PT_SETDBREGS:
686 write = 1;
687 /* fallthrough */
688 #endif /* PT_SETDBREGS */
689 #ifdef PT_GETDBREGS
690 case PT_GETDBREGS:
691 /* write = 0 above */
692 #endif /* PT_SETDBREGS */
693 #if defined(PT_SETDBREGS) || defined(PT_GETDBREGS)
694 if (!procfs_validdbregs(lp)) {
695 lwkt_reltoken(&p->p_token);
696 PRELE(p);
697 return EINVAL;
698 } else {
699 iov.iov_base = addr;
700 iov.iov_len = sizeof(struct dbreg);
701 uio.uio_iov = &iov;
702 uio.uio_iovcnt = 1;
703 uio.uio_offset = 0;
704 uio.uio_resid = sizeof(struct dbreg);
705 uio.uio_segflg = UIO_SYSSPACE;
706 uio.uio_rw = write ? UIO_WRITE : UIO_READ;
707 uio.uio_td = curthread;
708 t = procfs_dodbregs(curp, lp, NULL, &uio);
709 lwkt_reltoken(&p->p_token);
710 PRELE(p);
711 return t;
713 #endif /* defined(PT_SETDBREGS) || defined(PT_GETDBREGS) */
715 default:
716 break;
719 lwkt_reltoken(&p->p_token);
720 PRELE(p);
722 return 0;
726 trace_req(struct proc *p)
728 return 1;
732 * stopevent()
734 * Stop a process because of a procfs event. Stay stopped until p->p_step
735 * is cleared (cleared by PIOCCONT in procfs).
737 * MPSAFE
739 void
740 stopevent(struct proc *p, unsigned int event, unsigned int val)
743 * Set event info. Recheck p_stops in case we are
744 * racing a close() on procfs.
746 spin_lock(&p->p_spin);
747 if ((p->p_stops & event) == 0) {
748 spin_unlock(&p->p_spin);
749 return;
751 p->p_xstat = val;
752 p->p_stype = event;
753 p->p_step = 1;
754 tsleep_interlock(&p->p_step, 0);
755 spin_unlock(&p->p_spin);
758 * Wakeup any PIOCWAITing procs and wait for p_step to
759 * be cleared.
761 for (;;) {
762 wakeup(&p->p_stype);
763 tsleep(&p->p_step, PINTERLOCKED, "stopevent", 0);
764 spin_lock(&p->p_spin);
765 if (p->p_step == 0) {
766 spin_unlock(&p->p_spin);
767 break;
769 tsleep_interlock(&p->p_step, 0);
770 spin_unlock(&p->p_spin);