pre-2.3.4..
[davej-history.git] / arch / mips / kernel / ptrace.c
blob58a6332d4cc6f5d925a376e5c5ee2e7b8251592c
1 /* $Id: ptrace.c,v 1.11 1998/10/19 16:26:31 ralf Exp $
3 * This file is subject to the terms and conditions of the GNU General Public
4 * License. See the file "COPYING" in the main directory of this archive
5 * for more details.
7 * Copyright (C) 1992 Ross Biro
8 * Copyright (C) Linus Torvalds
9 * Copyright (C) 1994, 1995, 1996, 1997, 1998 Ralf Baechle
10 * Copyright (C) 1996 David S. Miller
12 #include <linux/kernel.h>
13 #include <linux/sched.h>
14 #include <linux/mm.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/smp.h>
18 #include <linux/smp_lock.h>
19 #include <linux/user.h>
21 #include <asm/fp.h>
22 #include <asm/mipsregs.h>
23 #include <asm/pgtable.h>
24 #include <asm/page.h>
25 #include <asm/system.h>
26 #include <asm/uaccess.h>
29 * This routine gets a long from any process space by following the page
30 * tables. NOTE! You should check that the long isn't on a page boundary,
31 * and that it is in the task area before calling this: this routine does
32 * no checking.
34 static unsigned long get_long(struct task_struct * tsk,
35 struct vm_area_struct * vma, unsigned long addr)
37 pgd_t *pgdir;
38 pmd_t *pgmiddle;
39 pte_t *pgtable;
40 unsigned long page, retval;
42 repeat:
43 pgdir = pgd_offset(vma->vm_mm, addr);
44 if (pgd_none(*pgdir)) {
45 handle_mm_fault(tsk, vma, addr, 0);
46 goto repeat;
48 if (pgd_bad(*pgdir)) {
49 printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir));
50 pgd_clear(pgdir);
51 return 0;
53 pgmiddle = pmd_offset(pgdir, addr);
54 if (pmd_none(*pgmiddle)) {
55 handle_mm_fault(tsk, vma, addr, 0);
56 goto repeat;
58 if (pmd_bad(*pgmiddle)) {
59 printk("ptrace: bad page middle %08lx\n", pmd_val(*pgmiddle));
60 pmd_clear(pgmiddle);
61 return 0;
63 pgtable = pte_offset(pgmiddle, addr);
64 if (!pte_present(*pgtable)) {
65 handle_mm_fault(tsk, vma, addr, 0);
66 goto repeat;
68 page = pte_page(*pgtable);
69 /* This is a hack for non-kernel-mapped video buffers and similar */
70 if (MAP_NR(page) >= MAP_NR(high_memory))
71 return 0;
72 page += addr & ~PAGE_MASK;
73 /* We can't use flush_page_to_ram() since we're running in
74 * another context ...
76 flush_cache_all();
77 retval = *(unsigned long *) page;
78 flush_cache_all(); /* VCED avoidance */
79 return retval;
83 * This routine puts a long into any process space by following the page
84 * tables. NOTE! You should check that the long isn't on a page boundary,
85 * and that it is in the task area before calling this: this routine does
86 * no checking.
88 * Now keeps R/W state of page so that a text page stays readonly
89 * even if a debugger scribbles breakpoints into it. -M.U-
91 static void put_long(struct task_struct *tsk,
92 struct vm_area_struct * vma, unsigned long addr,
93 unsigned long data)
95 pgd_t *pgdir;
96 pmd_t *pgmiddle;
97 pte_t *pgtable;
98 unsigned long page;
100 repeat:
101 pgdir = pgd_offset(vma->vm_mm, addr);
102 if (!pgd_present(*pgdir)) {
103 handle_mm_fault(tsk, vma, addr, 1);
104 goto repeat;
106 if (pgd_bad(*pgdir)) {
107 printk("ptrace: bad page directory %08lx\n", pgd_val(*pgdir));
108 pgd_clear(pgdir);
109 return;
111 pgmiddle = pmd_offset(pgdir, addr);
112 if (pmd_none(*pgmiddle)) {
113 handle_mm_fault(tsk, vma, addr, 1);
114 goto repeat;
116 if (pmd_bad(*pgmiddle)) {
117 printk("ptrace: bad page middle %08lx\n", pmd_val(*pgmiddle));
118 pmd_clear(pgmiddle);
119 return;
121 pgtable = pte_offset(pgmiddle, addr);
122 if (!pte_present(*pgtable)) {
123 handle_mm_fault(tsk, vma, addr, 1);
124 goto repeat;
126 page = pte_page(*pgtable);
127 if (!pte_write(*pgtable)) {
128 handle_mm_fault(tsk, vma, addr, 1);
129 goto repeat;
131 /* This is a hack for non-kernel-mapped video buffers and similar */
132 if (MAP_NR(page) < MAP_NR(high_memory))
133 flush_cache_all();
134 *(unsigned long *) (page + (addr & ~PAGE_MASK)) = data;
135 if (MAP_NR(page) < MAP_NR(high_memory))
136 flush_cache_all();
138 * We're bypassing pagetables, so we have to set the dirty bit
139 * ourselves this should also re-instate whatever read-only mode
140 * there was before
142 set_pte(pgtable, pte_mkdirty(mk_pte(page, vma->vm_page_prot)));
143 flush_tlb_page(vma, addr);
146 static struct vm_area_struct * find_extend_vma(struct task_struct * tsk, unsigned long addr)
148 struct vm_area_struct * vma;
150 addr &= PAGE_MASK;
151 vma = find_vma(tsk->mm, addr);
152 if (!vma)
153 return NULL;
154 if (vma->vm_start <= addr)
155 return vma;
156 if (!(vma->vm_flags & VM_GROWSDOWN))
157 return NULL;
158 if (vma->vm_end - addr > tsk->rlim[RLIMIT_STACK].rlim_cur)
159 return NULL;
160 vma->vm_offset -= vma->vm_start - addr;
161 vma->vm_start = addr;
162 return vma;
166 * This routine checks the page boundaries, and that the offset is
167 * within the task area. It then calls get_long() to read a long.
169 static int read_long(struct task_struct * tsk, unsigned long addr,
170 unsigned long * result)
172 struct vm_area_struct * vma = find_extend_vma(tsk, addr);
174 if (!vma)
175 return -EIO;
176 if ((addr & ~PAGE_MASK) > PAGE_SIZE-sizeof(long)) {
177 unsigned long low,high;
178 struct vm_area_struct * vma_high = vma;
180 if (addr + sizeof(long) >= vma->vm_end) {
181 vma_high = vma->vm_next;
182 if (!vma_high || vma_high->vm_start != vma->vm_end)
183 return -EIO;
185 low = get_long(tsk, vma, addr & ~(sizeof(long)-1));
186 high = get_long(tsk, vma_high, (addr+sizeof(long)) & ~(sizeof(long)-1));
187 switch (addr & (sizeof(long)-1)) {
188 case 1:
189 low >>= 8;
190 low |= high << 24;
191 break;
192 case 2:
193 low >>= 16;
194 low |= high << 16;
195 break;
196 case 3:
197 low >>= 24;
198 low |= high << 8;
199 break;
201 *result = low;
202 } else
203 *result = get_long(tsk, vma, addr);
204 return 0;
208 * This routine checks the page boundaries, and that the offset is
209 * within the task area. It then calls put_long() to write a long.
211 static int write_long(struct task_struct * tsk, unsigned long addr,
212 unsigned long data)
214 struct vm_area_struct * vma = find_extend_vma(tsk, addr);
216 if (!vma)
217 return -EIO;
218 if ((addr & ~PAGE_MASK) > PAGE_SIZE-sizeof(long)) {
219 unsigned long low,high;
220 struct vm_area_struct * vma_high = vma;
222 if (addr + sizeof(long) >= vma->vm_end) {
223 vma_high = vma->vm_next;
224 if (!vma_high || vma_high->vm_start != vma->vm_end)
225 return -EIO;
227 low = get_long(tsk, vma, addr & ~(sizeof(long)-1));
228 high = get_long(tsk, vma_high, (addr+sizeof(long)) & ~(sizeof(long)-1));
229 switch (addr & (sizeof(long)-1)) {
230 case 0: /* shouldn't happen, but safety first */
231 low = data;
232 break;
233 case 1:
234 low &= 0x000000ff;
235 low |= data << 8;
236 high &= ~0xff;
237 high |= data >> 24;
238 break;
239 case 2:
240 low &= 0x0000ffff;
241 low |= data << 16;
242 high &= ~0xffff;
243 high |= data >> 16;
244 break;
245 case 3:
246 low &= 0x00ffffff;
247 low |= data << 24;
248 high &= ~0xffffff;
249 high |= data >> 8;
250 break;
252 put_long(tsk, vma, addr & ~(sizeof(long)-1),low);
253 put_long(tsk, vma_high, (addr+sizeof(long)) & ~(sizeof(long)-1),high);
254 } else
255 put_long(tsk, vma, addr, data);
256 return 0;
259 asmlinkage int sys_ptrace(long request, long pid, long addr, long data)
261 struct task_struct *child;
262 int res;
264 lock_kernel();
265 #if 0
266 printk("ptrace(r=%d,pid=%d,addr=%08lx,data=%08lx)\n",
267 (int) request, (int) pid, (unsigned long) addr,
268 (unsigned long) data);
269 #endif
270 if (request == PTRACE_TRACEME) {
271 /* are we already being traced? */
272 if (current->flags & PF_PTRACED) {
273 res = -EPERM;
274 goto out;
276 /* set the ptrace bit in the process flags. */
277 current->flags |= PF_PTRACED;
278 res = 0;
279 goto out;
281 if (pid == 1) { /* you may not mess with init */
282 res = -EPERM;
283 goto out;
285 if (!(child = find_task_by_pid(pid))) {
286 res = -ESRCH;
287 goto out;
289 if (request == PTRACE_ATTACH) {
290 if (child == current) {
291 res = -EPERM;
292 goto out;
294 if ((!child->dumpable ||
295 (current->uid != child->euid) ||
296 (current->uid != child->suid) ||
297 (current->uid != child->uid) ||
298 (current->gid != child->egid) ||
299 (current->gid != child->sgid) ||
300 (!cap_issubset(child->cap_permitted, current->cap_permitted)) ||
301 (current->gid != child->gid)) &&
302 !capable(CAP_SYS_PTRACE)) {
303 res = -EPERM;
304 goto out;
306 /* the same process cannot be attached many times */
307 if (child->flags & PF_PTRACED) {
308 res = -EPERM;
309 goto out;
311 child->flags |= PF_PTRACED;
312 if (child->p_pptr != current) {
313 REMOVE_LINKS(child);
314 child->p_pptr = current;
315 SET_LINKS(child);
317 send_sig(SIGSTOP, child, 1);
318 res = 0;
319 goto out;
321 if (!(child->flags & PF_PTRACED)) {
322 res = -ESRCH;
323 goto out;
325 if (child->state != TASK_STOPPED) {
326 if (request != PTRACE_KILL) {
327 res = -ESRCH;
328 goto out;
331 if (child->p_pptr != current) {
332 res = -ESRCH;
333 goto out;
336 switch (request) {
337 case PTRACE_PEEKTEXT: /* read word at location addr. */
338 case PTRACE_PEEKDATA: {
339 unsigned long tmp;
341 res = read_long(child, addr, &tmp);
342 if (res < 0)
343 goto out;
344 res = put_user(tmp,(unsigned long *) data);
345 goto out;
348 /* read the word at location addr in the USER area. */
349 /* #define DEBUG_PEEKUSR */
350 case PTRACE_PEEKUSR: {
351 struct pt_regs *regs;
352 unsigned long tmp;
354 regs = (struct pt_regs *) ((unsigned long) child +
355 KERNEL_STACK_SIZE - 32 - sizeof(struct pt_regs));
356 tmp = 0; /* Default return value. */
357 if (addr < 32 && addr >= 0)
358 tmp = regs->regs[addr];
359 else if (addr >= 32 && addr < 64) {
360 unsigned long long *fregs;
362 if (child->used_math) {
363 if (last_task_used_math == child) {
364 enable_cp1();
365 r4xx0_save_fp(child);
366 disable_cp1();
367 last_task_used_math = NULL;
369 fregs = (unsigned long long *)
370 &child->tss.fpu.hard.fp_regs[0];
371 tmp = (unsigned long) fregs[(addr - 32)];
372 } else {
373 tmp = -1; /* FP not yet used */
375 } else {
376 addr -= 64;
377 switch(addr) {
378 case 0:
379 tmp = regs->cp0_epc;
380 break;
381 case 1:
382 tmp = regs->cp0_cause;
383 break;
384 case 2:
385 tmp = regs->cp0_badvaddr;
386 break;
387 case 3:
388 tmp = regs->lo;
389 break;
390 case 4:
391 tmp = regs->hi;
392 break;
393 case 5:
394 tmp = child->tss.fpu.hard.control;
395 break;
396 case 6: /* implementation / version register */
397 tmp = 0; /* XXX */
398 break;
399 default:
400 tmp = 0;
401 res = -EIO;
402 goto out;
405 res = put_user(tmp, (unsigned long *) data);
406 goto out;
409 case PTRACE_POKETEXT: /* write the word at location addr. */
410 case PTRACE_POKEDATA:
411 res = write_long(child,addr,data);
412 goto out;
414 case PTRACE_POKEUSR: {
415 struct pt_regs *regs;
416 int res = 0;
418 regs = (struct pt_regs *) ((unsigned long) child +
419 KERNEL_STACK_SIZE - 32 - sizeof(struct pt_regs));
420 if (addr < 32 && addr >= 0)
421 regs->regs[addr] = data;
422 else if (addr >= 32 && addr < 64) {
423 unsigned long long *fregs;
425 if (child->used_math) {
426 if (last_task_used_math == child) {
427 enable_cp1();
428 r4xx0_save_fp(child);
429 disable_cp1();
430 last_task_used_math = NULL;
432 } else {
433 /* FP not yet used */
434 memset(&child->tss.fpu.hard, ~0,
435 sizeof(child->tss.fpu.hard));
436 child->tss.fpu.hard.control = 0;
438 fregs = (unsigned long long *)
439 &child->tss.fpu.hard.fp_regs[0];
440 fregs[(addr - 32)] = (unsigned long long) data;
441 } else {
442 addr -= 64;
443 switch (addr) {
444 case 0:
445 regs->cp0_epc = data;
446 break;
447 case 3:
448 regs->lo = data;
449 break;
450 case 4:
451 regs->hi = data;
452 break;
453 case 5:
454 child->tss.fpu.hard.control = data;
455 break;
456 default:
457 /* The rest are not allowed. */
458 res = -EIO;
459 break;
462 goto out;
465 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
466 case PTRACE_CONT: { /* restart after signal. */
467 if ((unsigned long) data > _NSIG) {
468 res = -EIO;
469 goto out;
471 if (request == PTRACE_SYSCALL)
472 child->flags |= PF_TRACESYS;
473 else
474 child->flags &= ~PF_TRACESYS;
475 child->exit_code = data;
476 wake_up_process(child);
477 res = data;
478 goto out;
482 * make the child exit. Best I can do is send it a sigkill.
483 * perhaps it should be put in the status that it wants to
484 * exit.
486 case PTRACE_KILL: {
487 if (child->state != TASK_ZOMBIE) {
488 child->exit_code = SIGKILL;
489 wake_up_process(child);
491 res = 0;
492 goto out;
495 case PTRACE_DETACH: { /* detach a process that was attached. */
496 if ((unsigned long) data > _NSIG) {
497 res = -EIO;
498 goto out;
500 child->flags &= ~(PF_PTRACED|PF_TRACESYS);
501 child->exit_code = data;
502 REMOVE_LINKS(child);
503 child->p_pptr = child->p_opptr;
504 SET_LINKS(child);
505 wake_up_process(child);
506 res = 0;
507 goto out;
510 default:
511 res = -EIO;
512 goto out;
514 out:
515 unlock_kernel();
516 return res;
519 asmlinkage void syscall_trace(void)
521 if ((current->flags & (PF_PTRACED|PF_TRACESYS))
522 != (PF_PTRACED|PF_TRACESYS))
523 return;
524 current->exit_code = SIGTRAP;
525 current->state = TASK_STOPPED;
526 notify_parent(current, SIGCHLD);
527 schedule();
529 * this isn't the same as continuing with a signal, but it will do
530 * for normal use. strace only continues with a signal if the
531 * stopping signal is not SIGTRAP. -brl
533 if (current->exit_code) {
534 send_sig(current->exit_code, current, 1);
535 current->exit_code = 0;