1 /* $Id: ptrace.c,v 1.13 1999/06/17 13:25:46 ralf Exp $
3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
7 * Copyright (C) 1992 Ross Biro
8 * Copyright (C) Linus Torvalds
9 * Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle
10 * Copyright (C) 1996 David S. Miller
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/smp.h>
18 #include <linux/smp_lock.h>
19 #include <linux/user.h>
22 #include <asm/mipsregs.h>
23 #include <asm/pgtable.h>
25 #include <asm/system.h>
26 #include <asm/uaccess.h>
29 * This routine gets a long from any process space by following the page
30 * tables. NOTE! You should check that the long isn't on a page boundary,
31 * and that it is in the task area before calling this: this routine does
34 static unsigned long get_long(struct task_struct
* tsk
,
35 struct vm_area_struct
* vma
, unsigned long addr
)
40 unsigned long page
, retval
;
43 pgdir
= pgd_offset(vma
->vm_mm
, addr
);
44 if (pgd_none(*pgdir
)) {
45 handle_mm_fault(tsk
, vma
, addr
, 0);
48 if (pgd_bad(*pgdir
)) {
49 printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir
));
53 pgmiddle
= pmd_offset(pgdir
, addr
);
54 if (pmd_none(*pgmiddle
)) {
55 handle_mm_fault(tsk
, vma
, addr
, 0);
58 if (pmd_bad(*pgmiddle
)) {
59 printk("ptrace: bad page middle %08lx\n", pmd_val(*pgmiddle
));
63 pgtable
= pte_offset(pgmiddle
, addr
);
64 if (!pte_present(*pgtable
)) {
65 handle_mm_fault(tsk
, vma
, addr
, 0);
68 page
= pte_page(*pgtable
);
69 /* This is a hack for non-kernel-mapped video buffers and similar */
70 if (MAP_NR(page
) >= MAP_NR(high_memory
))
72 page
+= addr
& ~PAGE_MASK
;
73 /* We can't use flush_page_to_ram() since we're running in
77 retval
= *(unsigned long *) page
;
78 flush_cache_all(); /* VCED avoidance */
83 * This routine puts a long into any process space by following the page
84 * tables. NOTE! You should check that the long isn't on a page boundary,
85 * and that it is in the task area before calling this: this routine does
88 * Now keeps R/W state of page so that a text page stays readonly
89 * even if a debugger scribbles breakpoints into it. -M.U-
91 static void put_long(struct task_struct
*tsk
,
92 struct vm_area_struct
* vma
, unsigned long addr
,
101 pgdir
= pgd_offset(vma
->vm_mm
, addr
);
102 if (!pgd_present(*pgdir
)) {
103 handle_mm_fault(tsk
, vma
, addr
, 1);
106 if (pgd_bad(*pgdir
)) {
107 printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir
));
111 pgmiddle
= pmd_offset(pgdir
, addr
);
112 if (pmd_none(*pgmiddle
)) {
113 handle_mm_fault(tsk
, vma
, addr
, 1);
116 if (pmd_bad(*pgmiddle
)) {
117 printk("ptrace: bad page middle %08lx\n", pmd_val(*pgmiddle
));
121 pgtable
= pte_offset(pgmiddle
, addr
);
122 if (!pte_present(*pgtable
)) {
123 handle_mm_fault(tsk
, vma
, addr
, 1);
126 page
= pte_page(*pgtable
);
127 if (!pte_write(*pgtable
)) {
128 handle_mm_fault(tsk
, vma
, addr
, 1);
131 /* This is a hack for non-kernel-mapped video buffers and similar */
132 if (MAP_NR(page
) < MAP_NR(high_memory
))
134 *(unsigned long *) (page
+ (addr
& ~PAGE_MASK
)) = data
;
135 if (MAP_NR(page
) < MAP_NR(high_memory
))
138 * We're bypassing pagetables, so we have to set the dirty bit
139 * ourselves this should also re-instate whatever read-only mode
142 set_pte(pgtable
, pte_mkdirty(mk_pte(page
, vma
->vm_page_prot
)));
143 flush_tlb_page(vma
, addr
);
147 * This routine checks the page boundaries, and that the offset is
148 * within the task area. It then calls get_long() to read a long.
150 static int read_long(struct task_struct
* tsk
, unsigned long addr
,
151 unsigned long * result
)
153 struct vm_area_struct
* vma
= find_extend_vma(tsk
, addr
);
157 if ((addr
& ~PAGE_MASK
) > PAGE_SIZE
-sizeof(long)) {
158 unsigned long low
,high
;
159 struct vm_area_struct
* vma_high
= vma
;
161 if (addr
+ sizeof(long) >= vma
->vm_end
) {
162 vma_high
= vma
->vm_next
;
163 if (!vma_high
|| vma_high
->vm_start
!= vma
->vm_end
)
166 low
= get_long(tsk
, vma
, addr
& ~(sizeof(long)-1));
167 high
= get_long(tsk
, vma_high
, (addr
+sizeof(long)) & ~(sizeof(long)-1));
168 switch (addr
& (sizeof(long)-1)) {
184 *result
= get_long(tsk
, vma
, addr
);
189 * This routine checks the page boundaries, and that the offset is
190 * within the task area. It then calls put_long() to write a long.
192 static int write_long(struct task_struct
* tsk
, unsigned long addr
,
195 struct vm_area_struct
* vma
= find_extend_vma(tsk
, addr
);
199 if ((addr
& ~PAGE_MASK
) > PAGE_SIZE
-sizeof(long)) {
200 unsigned long low
,high
;
201 struct vm_area_struct
* vma_high
= vma
;
203 if (addr
+ sizeof(long) >= vma
->vm_end
) {
204 vma_high
= vma
->vm_next
;
205 if (!vma_high
|| vma_high
->vm_start
!= vma
->vm_end
)
208 low
= get_long(tsk
, vma
, addr
& ~(sizeof(long)-1));
209 high
= get_long(tsk
, vma_high
, (addr
+sizeof(long)) & ~(sizeof(long)-1));
210 switch (addr
& (sizeof(long)-1)) {
211 case 0: /* shouldn't happen, but safety first */
233 put_long(tsk
, vma
, addr
& ~(sizeof(long)-1),low
);
234 put_long(tsk
, vma_high
, (addr
+sizeof(long)) & ~(sizeof(long)-1),high
);
236 put_long(tsk
, vma
, addr
, data
);
240 asmlinkage
int sys_ptrace(long request
, long pid
, long addr
, long data
)
242 struct task_struct
*child
;
248 printk("ptrace(r=%d,pid=%d,addr=%08lx,data=%08lx)\n",
249 (int) request
, (int) pid
, (unsigned long) addr
,
250 (unsigned long) data
);
252 if (request
== PTRACE_TRACEME
) {
253 /* are we already being traced? */
254 if (current
->flags
& PF_PTRACED
) {
258 /* set the ptrace bit in the process flags. */
259 current
->flags
|= PF_PTRACED
;
263 if (pid
== 1) { /* you may not mess with init */
267 if (!(child
= find_task_by_pid(pid
))) {
271 if (request
== PTRACE_ATTACH
) {
272 if (child
== current
) {
276 if ((!child
->dumpable
||
277 (current
->uid
!= child
->euid
) ||
278 (current
->uid
!= child
->suid
) ||
279 (current
->uid
!= child
->uid
) ||
280 (current
->gid
!= child
->egid
) ||
281 (current
->gid
!= child
->sgid
) ||
282 (current
->gid
!= child
->gid
) ||
283 (!cap_issubset(child
->cap_permitted
,
284 current
->cap_permitted
)) ||
285 (current
->gid
!= child
->gid
)) && !capable(CAP_SYS_PTRACE
)){
289 /* the same process cannot be attached many times */
290 if (child
->flags
& PF_PTRACED
)
292 child
->flags
|= PF_PTRACED
;
294 write_lock_irqsave(&tasklist_lock
, flags
);
295 if (child
->p_pptr
!= current
) {
297 child
->p_pptr
= current
;
300 write_unlock_irqrestore(&tasklist_lock
, flags
);
302 send_sig(SIGSTOP
, child
, 1);
306 if (!(child
->flags
& PF_PTRACED
)) {
310 if (child
->state
!= TASK_STOPPED
) {
311 if (request
!= PTRACE_KILL
) {
316 if (child
->p_pptr
!= current
) {
322 case PTRACE_PEEKTEXT
: /* read word at location addr. */
323 case PTRACE_PEEKDATA
: {
326 down(&child
->mm
->mmap_sem
);
327 res
= read_long(child
, addr
, &tmp
);
328 up(&child
->mm
->mmap_sem
);
331 res
= put_user(tmp
,(unsigned long *) data
);
335 /* Read the word at location addr in the USER area. */
336 case PTRACE_PEEKUSR
: {
337 struct pt_regs
*regs
;
340 regs
= (struct pt_regs
*) ((unsigned long) child
+
341 KERNEL_STACK_SIZE
- 32 - sizeof(struct pt_regs
));
342 tmp
= 0; /* Default return value. */
346 tmp
= regs
->regs
[addr
];
348 case FPR_BASE
... FPR_BASE
+ 31:
349 if (child
->used_math
) {
350 unsigned long long *fregs
;
352 if (last_task_used_math
== child
) {
354 r4xx0_save_fp(child
);
356 last_task_used_math
= NULL
;
358 fregs
= (unsigned long long *)
359 &child
->tss
.fpu
.hard
.fp_regs
[0];
360 tmp
= (unsigned long) fregs
[(addr
- 32)];
362 tmp
= -1; /* FP not yet used */
369 tmp
= regs
->cp0_cause
;
372 tmp
= regs
->cp0_badvaddr
;
381 tmp
= child
->tss
.fpu
.hard
.control
;
383 case FPC_EIR
: /* implementation / version register */
391 res
= put_user(tmp
, (unsigned long *) data
);
395 case PTRACE_POKETEXT
: /* write the word at location addr. */
396 case PTRACE_POKEDATA
:
397 down(&child
->mm
->mmap_sem
);
398 res
= write_long(child
,addr
,data
);
399 up(&child
->mm
->mmap_sem
);
402 case PTRACE_POKEUSR
: {
403 unsigned long long *fregs
;
404 struct pt_regs
*regs
;
409 regs
= (struct pt_regs
*) ((unsigned long) child
+
410 KERNEL_STACK_SIZE
- 32 - sizeof(struct pt_regs
));
412 case FPR_BASE
... FPR_BASE
+ 31:
413 if (child
->used_math
) {
414 if (last_task_used_math
== child
) {
416 r4xx0_save_fp(child
);
418 last_task_used_math
= NULL
;
421 /* FP not yet used */
422 memset(&child
->tss
.fpu
.hard
, ~0,
423 sizeof(child
->tss
.fpu
.hard
));
424 child
->tss
.fpu
.hard
.control
= 0;
426 fregs
= (unsigned long long *)
427 &child
->tss
.fpu
.hard
.fp_regs
[0];
428 fregs
[(addr
- 32)] = (unsigned long long) data
;
431 regs
->cp0_epc
= data
;
440 child
->tss
.fpu
.hard
.control
= data
;
443 /* The rest are not allowed. */
450 case PTRACE_SYSCALL
: /* continue and stop at next (return from) syscall */
451 case PTRACE_CONT
: { /* restart after signal. */
452 if ((unsigned long) data
> _NSIG
) {
456 if (request
== PTRACE_SYSCALL
)
457 child
->flags
|= PF_TRACESYS
;
459 child
->flags
&= ~PF_TRACESYS
;
460 child
->exit_code
= data
;
461 wake_up_process(child
);
467 * make the child exit. Best I can do is send it a sigkill.
468 * perhaps it should be put in the status that it wants to
472 if (child
->state
!= TASK_ZOMBIE
) {
473 child
->exit_code
= SIGKILL
;
474 wake_up_process(child
);
480 case PTRACE_DETACH
: { /* detach a process that was attached. */
481 if ((unsigned long) data
> _NSIG
) {
485 child
->flags
&= ~(PF_PTRACED
|PF_TRACESYS
);
486 child
->exit_code
= data
;
488 child
->p_pptr
= child
->p_opptr
;
490 wake_up_process(child
);
504 asmlinkage
void syscall_trace(void)
506 if ((current
->flags
& (PF_PTRACED
|PF_TRACESYS
))
507 != (PF_PTRACED
|PF_TRACESYS
))
509 current
->exit_code
= SIGTRAP
;
510 current
->state
= TASK_STOPPED
;
511 notify_parent(current
, SIGCHLD
);
514 * this isn't the same as continuing with a signal, but it will do
515 * for normal use. strace only continues with a signal if the
516 * stopping signal is not SIGTRAP. -brl
518 if (current
->exit_code
) {
519 send_sig(current
->exit_code
, current
, 1);
520 current
->exit_code
= 0;