USB: usbtmc: fix switch statment
[linux-2.6/mini2440.git] / kernel / ptrace.c
blob893c2c7615d5a045a8b07a0fc6b21040599146bb
1 /*
2 * linux/kernel/ptrace.c
4 * (C) Copyright 1999 Linus Torvalds
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
8 */
10 #include <linux/capability.h>
11 #include <linux/module.h>
12 #include <linux/sched.h>
13 #include <linux/errno.h>
14 #include <linux/mm.h>
15 #include <linux/highmem.h>
16 #include <linux/pagemap.h>
17 #include <linux/smp_lock.h>
18 #include <linux/ptrace.h>
19 #include <linux/security.h>
20 #include <linux/signal.h>
21 #include <linux/audit.h>
22 #include <linux/pid_namespace.h>
23 #include <linux/syscalls.h>
25 #include <asm/pgtable.h>
26 #include <asm/uaccess.h>
30 * Initialize a new task whose father had been ptraced.
32 * Called from copy_process().
34 void ptrace_fork(struct task_struct *child, unsigned long clone_flags)
36 arch_ptrace_fork(child, clone_flags);
40 * ptrace a task: make the debugger its new parent and
41 * move it to the ptrace list.
43 * Must be called with the tasklist lock write-held.
45 void __ptrace_link(struct task_struct *child, struct task_struct *new_parent)
47 BUG_ON(!list_empty(&child->ptrace_entry));
48 list_add(&child->ptrace_entry, &new_parent->ptraced);
49 child->parent = new_parent;
53 * Turn a tracing stop into a normal stop now, since with no tracer there
54 * would be no way to wake it up with SIGCONT or SIGKILL. If there was a
55 * signal sent that would resume the child, but didn't because it was in
56 * TASK_TRACED, resume it now.
57 * Requires that irqs be disabled.
59 static void ptrace_untrace(struct task_struct *child)
61 spin_lock(&child->sighand->siglock);
62 if (task_is_traced(child)) {
63 if (child->signal->flags & SIGNAL_STOP_STOPPED) {
64 __set_task_state(child, TASK_STOPPED);
65 } else {
66 signal_wake_up(child, 1);
69 spin_unlock(&child->sighand->siglock);
73 * unptrace a task: move it back to its original parent and
74 * remove it from the ptrace list.
76 * Must be called with the tasklist lock write-held.
78 void __ptrace_unlink(struct task_struct *child)
80 BUG_ON(!child->ptrace);
82 child->ptrace = 0;
83 child->parent = child->real_parent;
84 list_del_init(&child->ptrace_entry);
86 arch_ptrace_untrace(child);
87 if (task_is_traced(child))
88 ptrace_untrace(child);
92 * Check that we have indeed attached to the thing..
94 int ptrace_check_attach(struct task_struct *child, int kill)
96 int ret = -ESRCH;
99 * We take the read lock around doing both checks to close a
100 * possible race where someone else was tracing our child and
101 * detached between these two checks. After this locked check,
102 * we are sure that this is our traced child and that can only
103 * be changed by us so it's not changing right after this.
105 read_lock(&tasklist_lock);
106 if ((child->ptrace & PT_PTRACED) && child->parent == current) {
107 ret = 0;
109 * child->sighand can't be NULL, release_task()
110 * does ptrace_unlink() before __exit_signal().
112 spin_lock_irq(&child->sighand->siglock);
113 if (task_is_stopped(child))
114 child->state = TASK_TRACED;
115 else if (!task_is_traced(child) && !kill)
116 ret = -ESRCH;
117 spin_unlock_irq(&child->sighand->siglock);
119 read_unlock(&tasklist_lock);
121 if (!ret && !kill)
122 ret = wait_task_inactive(child, TASK_TRACED) ? 0 : -ESRCH;
124 /* All systems go.. */
125 return ret;
128 int __ptrace_may_access(struct task_struct *task, unsigned int mode)
130 const struct cred *cred = current_cred(), *tcred;
132 /* May we inspect the given task?
133 * This check is used both for attaching with ptrace
134 * and for allowing access to sensitive information in /proc.
136 * ptrace_attach denies several cases that /proc allows
137 * because setting up the necessary parent/child relationship
138 * or halting the specified task is impossible.
140 int dumpable = 0;
141 /* Don't let security modules deny introspection */
142 if (task == current)
143 return 0;
144 rcu_read_lock();
145 tcred = __task_cred(task);
146 if ((cred->uid != tcred->euid ||
147 cred->uid != tcred->suid ||
148 cred->uid != tcred->uid ||
149 cred->gid != tcred->egid ||
150 cred->gid != tcred->sgid ||
151 cred->gid != tcred->gid) &&
152 !capable(CAP_SYS_PTRACE)) {
153 rcu_read_unlock();
154 return -EPERM;
156 rcu_read_unlock();
157 smp_rmb();
158 if (task->mm)
159 dumpable = get_dumpable(task->mm);
160 if (!dumpable && !capable(CAP_SYS_PTRACE))
161 return -EPERM;
163 return security_ptrace_may_access(task, mode);
166 bool ptrace_may_access(struct task_struct *task, unsigned int mode)
168 int err;
169 task_lock(task);
170 err = __ptrace_may_access(task, mode);
171 task_unlock(task);
172 return (!err ? true : false);
175 int ptrace_attach(struct task_struct *task)
177 int retval;
178 unsigned long flags;
180 audit_ptrace(task);
182 retval = -EPERM;
183 if (same_thread_group(task, current))
184 goto out;
186 /* Protect exec's credential calculations against our interference;
187 * SUID, SGID and LSM creds get determined differently under ptrace.
189 retval = mutex_lock_interruptible(&task->cred_exec_mutex);
190 if (retval < 0)
191 goto out;
193 retval = -EPERM;
194 repeat:
196 * Nasty, nasty.
198 * We want to hold both the task-lock and the
199 * tasklist_lock for writing at the same time.
200 * But that's against the rules (tasklist_lock
201 * is taken for reading by interrupts on other
202 * cpu's that may have task_lock).
204 task_lock(task);
205 if (!write_trylock_irqsave(&tasklist_lock, flags)) {
206 task_unlock(task);
207 do {
208 cpu_relax();
209 } while (!write_can_lock(&tasklist_lock));
210 goto repeat;
213 if (!task->mm)
214 goto bad;
215 /* the same process cannot be attached many times */
216 if (task->ptrace & PT_PTRACED)
217 goto bad;
218 retval = __ptrace_may_access(task, PTRACE_MODE_ATTACH);
219 if (retval)
220 goto bad;
222 /* Go */
223 task->ptrace |= PT_PTRACED;
224 if (capable(CAP_SYS_PTRACE))
225 task->ptrace |= PT_PTRACE_CAP;
227 __ptrace_link(task, current);
229 send_sig_info(SIGSTOP, SEND_SIG_FORCED, task);
230 bad:
231 write_unlock_irqrestore(&tasklist_lock, flags);
232 task_unlock(task);
233 mutex_unlock(&task->cred_exec_mutex);
234 out:
235 return retval;
238 int ptrace_detach(struct task_struct *child, unsigned int data)
240 int dead = 0;
242 if (!valid_signal(data))
243 return -EIO;
245 /* Architecture-specific hardware disable .. */
246 ptrace_disable(child);
247 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
249 write_lock_irq(&tasklist_lock);
250 /* protect against de_thread()->release_task() */
251 if (child->ptrace) {
252 child->exit_code = data;
254 dead = __ptrace_detach(current, child);
256 if (!child->exit_state)
257 wake_up_process(child);
259 write_unlock_irq(&tasklist_lock);
261 if (unlikely(dead))
262 release_task(child);
264 return 0;
267 int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst, int len)
269 int copied = 0;
271 while (len > 0) {
272 char buf[128];
273 int this_len, retval;
275 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
276 retval = access_process_vm(tsk, src, buf, this_len, 0);
277 if (!retval) {
278 if (copied)
279 break;
280 return -EIO;
282 if (copy_to_user(dst, buf, retval))
283 return -EFAULT;
284 copied += retval;
285 src += retval;
286 dst += retval;
287 len -= retval;
289 return copied;
292 int ptrace_writedata(struct task_struct *tsk, char __user *src, unsigned long dst, int len)
294 int copied = 0;
296 while (len > 0) {
297 char buf[128];
298 int this_len, retval;
300 this_len = (len > sizeof(buf)) ? sizeof(buf) : len;
301 if (copy_from_user(buf, src, this_len))
302 return -EFAULT;
303 retval = access_process_vm(tsk, dst, buf, this_len, 1);
304 if (!retval) {
305 if (copied)
306 break;
307 return -EIO;
309 copied += retval;
310 src += retval;
311 dst += retval;
312 len -= retval;
314 return copied;
317 static int ptrace_setoptions(struct task_struct *child, long data)
319 child->ptrace &= ~PT_TRACE_MASK;
321 if (data & PTRACE_O_TRACESYSGOOD)
322 child->ptrace |= PT_TRACESYSGOOD;
324 if (data & PTRACE_O_TRACEFORK)
325 child->ptrace |= PT_TRACE_FORK;
327 if (data & PTRACE_O_TRACEVFORK)
328 child->ptrace |= PT_TRACE_VFORK;
330 if (data & PTRACE_O_TRACECLONE)
331 child->ptrace |= PT_TRACE_CLONE;
333 if (data & PTRACE_O_TRACEEXEC)
334 child->ptrace |= PT_TRACE_EXEC;
336 if (data & PTRACE_O_TRACEVFORKDONE)
337 child->ptrace |= PT_TRACE_VFORK_DONE;
339 if (data & PTRACE_O_TRACEEXIT)
340 child->ptrace |= PT_TRACE_EXIT;
342 return (data & ~PTRACE_O_MASK) ? -EINVAL : 0;
345 static int ptrace_getsiginfo(struct task_struct *child, siginfo_t *info)
347 int error = -ESRCH;
349 read_lock(&tasklist_lock);
350 if (likely(child->sighand != NULL)) {
351 error = -EINVAL;
352 spin_lock_irq(&child->sighand->siglock);
353 if (likely(child->last_siginfo != NULL)) {
354 *info = *child->last_siginfo;
355 error = 0;
357 spin_unlock_irq(&child->sighand->siglock);
359 read_unlock(&tasklist_lock);
360 return error;
363 static int ptrace_setsiginfo(struct task_struct *child, const siginfo_t *info)
365 int error = -ESRCH;
367 read_lock(&tasklist_lock);
368 if (likely(child->sighand != NULL)) {
369 error = -EINVAL;
370 spin_lock_irq(&child->sighand->siglock);
371 if (likely(child->last_siginfo != NULL)) {
372 *child->last_siginfo = *info;
373 error = 0;
375 spin_unlock_irq(&child->sighand->siglock);
377 read_unlock(&tasklist_lock);
378 return error;
382 #ifdef PTRACE_SINGLESTEP
383 #define is_singlestep(request) ((request) == PTRACE_SINGLESTEP)
384 #else
385 #define is_singlestep(request) 0
386 #endif
388 #ifdef PTRACE_SINGLEBLOCK
389 #define is_singleblock(request) ((request) == PTRACE_SINGLEBLOCK)
390 #else
391 #define is_singleblock(request) 0
392 #endif
394 #ifdef PTRACE_SYSEMU
395 #define is_sysemu_singlestep(request) ((request) == PTRACE_SYSEMU_SINGLESTEP)
396 #else
397 #define is_sysemu_singlestep(request) 0
398 #endif
400 static int ptrace_resume(struct task_struct *child, long request, long data)
402 if (!valid_signal(data))
403 return -EIO;
405 if (request == PTRACE_SYSCALL)
406 set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
407 else
408 clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
410 #ifdef TIF_SYSCALL_EMU
411 if (request == PTRACE_SYSEMU || request == PTRACE_SYSEMU_SINGLESTEP)
412 set_tsk_thread_flag(child, TIF_SYSCALL_EMU);
413 else
414 clear_tsk_thread_flag(child, TIF_SYSCALL_EMU);
415 #endif
417 if (is_singleblock(request)) {
418 if (unlikely(!arch_has_block_step()))
419 return -EIO;
420 user_enable_block_step(child);
421 } else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
422 if (unlikely(!arch_has_single_step()))
423 return -EIO;
424 user_enable_single_step(child);
426 else
427 user_disable_single_step(child);
429 child->exit_code = data;
430 wake_up_process(child);
432 return 0;
435 int ptrace_request(struct task_struct *child, long request,
436 long addr, long data)
438 int ret = -EIO;
439 siginfo_t siginfo;
441 switch (request) {
442 case PTRACE_PEEKTEXT:
443 case PTRACE_PEEKDATA:
444 return generic_ptrace_peekdata(child, addr, data);
445 case PTRACE_POKETEXT:
446 case PTRACE_POKEDATA:
447 return generic_ptrace_pokedata(child, addr, data);
449 #ifdef PTRACE_OLDSETOPTIONS
450 case PTRACE_OLDSETOPTIONS:
451 #endif
452 case PTRACE_SETOPTIONS:
453 ret = ptrace_setoptions(child, data);
454 break;
455 case PTRACE_GETEVENTMSG:
456 ret = put_user(child->ptrace_message, (unsigned long __user *) data);
457 break;
459 case PTRACE_GETSIGINFO:
460 ret = ptrace_getsiginfo(child, &siginfo);
461 if (!ret)
462 ret = copy_siginfo_to_user((siginfo_t __user *) data,
463 &siginfo);
464 break;
466 case PTRACE_SETSIGINFO:
467 if (copy_from_user(&siginfo, (siginfo_t __user *) data,
468 sizeof siginfo))
469 ret = -EFAULT;
470 else
471 ret = ptrace_setsiginfo(child, &siginfo);
472 break;
474 case PTRACE_DETACH: /* detach a process that was attached. */
475 ret = ptrace_detach(child, data);
476 break;
478 #ifdef PTRACE_SINGLESTEP
479 case PTRACE_SINGLESTEP:
480 #endif
481 #ifdef PTRACE_SINGLEBLOCK
482 case PTRACE_SINGLEBLOCK:
483 #endif
484 #ifdef PTRACE_SYSEMU
485 case PTRACE_SYSEMU:
486 case PTRACE_SYSEMU_SINGLESTEP:
487 #endif
488 case PTRACE_SYSCALL:
489 case PTRACE_CONT:
490 return ptrace_resume(child, request, data);
492 case PTRACE_KILL:
493 if (child->exit_state) /* already dead */
494 return 0;
495 return ptrace_resume(child, request, SIGKILL);
497 default:
498 break;
501 return ret;
505 * ptrace_traceme -- helper for PTRACE_TRACEME
507 * Performs checks and sets PT_PTRACED.
508 * Should be used by all ptrace implementations for PTRACE_TRACEME.
510 int ptrace_traceme(void)
512 int ret = -EPERM;
515 * Are we already being traced?
517 repeat:
518 task_lock(current);
519 if (!(current->ptrace & PT_PTRACED)) {
521 * See ptrace_attach() comments about the locking here.
523 unsigned long flags;
524 if (!write_trylock_irqsave(&tasklist_lock, flags)) {
525 task_unlock(current);
526 do {
527 cpu_relax();
528 } while (!write_can_lock(&tasklist_lock));
529 goto repeat;
532 ret = security_ptrace_traceme(current->parent);
535 * Set the ptrace bit in the process ptrace flags.
536 * Then link us on our parent's ptraced list.
538 if (!ret) {
539 current->ptrace |= PT_PTRACED;
540 __ptrace_link(current, current->real_parent);
543 write_unlock_irqrestore(&tasklist_lock, flags);
545 task_unlock(current);
546 return ret;
550 * ptrace_get_task_struct -- grab a task struct reference for ptrace
551 * @pid: process id to grab a task_struct reference of
553 * This function is a helper for ptrace implementations. It checks
554 * permissions and then grabs a task struct for use of the actual
555 * ptrace implementation.
557 * Returns the task_struct for @pid or an ERR_PTR() on failure.
559 struct task_struct *ptrace_get_task_struct(pid_t pid)
561 struct task_struct *child;
563 read_lock(&tasklist_lock);
564 child = find_task_by_vpid(pid);
565 if (child)
566 get_task_struct(child);
568 read_unlock(&tasklist_lock);
569 if (!child)
570 return ERR_PTR(-ESRCH);
571 return child;
574 #ifndef arch_ptrace_attach
575 #define arch_ptrace_attach(child) do { } while (0)
576 #endif
578 SYSCALL_DEFINE4(ptrace, long, request, long, pid, long, addr, long, data)
580 struct task_struct *child;
581 long ret;
584 * This lock_kernel fixes a subtle race with suid exec
586 lock_kernel();
587 if (request == PTRACE_TRACEME) {
588 ret = ptrace_traceme();
589 if (!ret)
590 arch_ptrace_attach(current);
591 goto out;
594 child = ptrace_get_task_struct(pid);
595 if (IS_ERR(child)) {
596 ret = PTR_ERR(child);
597 goto out;
600 if (request == PTRACE_ATTACH) {
601 ret = ptrace_attach(child);
603 * Some architectures need to do book-keeping after
604 * a ptrace attach.
606 if (!ret)
607 arch_ptrace_attach(child);
608 goto out_put_task_struct;
611 ret = ptrace_check_attach(child, request == PTRACE_KILL);
612 if (ret < 0)
613 goto out_put_task_struct;
615 ret = arch_ptrace(child, request, addr, data);
616 if (ret < 0)
617 goto out_put_task_struct;
619 out_put_task_struct:
620 put_task_struct(child);
621 out:
622 unlock_kernel();
623 return ret;
626 int generic_ptrace_peekdata(struct task_struct *tsk, long addr, long data)
628 unsigned long tmp;
629 int copied;
631 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
632 if (copied != sizeof(tmp))
633 return -EIO;
634 return put_user(tmp, (unsigned long __user *)data);
637 int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data)
639 int copied;
641 copied = access_process_vm(tsk, addr, &data, sizeof(data), 1);
642 return (copied == sizeof(data)) ? 0 : -EIO;
645 #if defined CONFIG_COMPAT
646 #include <linux/compat.h>
648 int compat_ptrace_request(struct task_struct *child, compat_long_t request,
649 compat_ulong_t addr, compat_ulong_t data)
651 compat_ulong_t __user *datap = compat_ptr(data);
652 compat_ulong_t word;
653 siginfo_t siginfo;
654 int ret;
656 switch (request) {
657 case PTRACE_PEEKTEXT:
658 case PTRACE_PEEKDATA:
659 ret = access_process_vm(child, addr, &word, sizeof(word), 0);
660 if (ret != sizeof(word))
661 ret = -EIO;
662 else
663 ret = put_user(word, datap);
664 break;
666 case PTRACE_POKETEXT:
667 case PTRACE_POKEDATA:
668 ret = access_process_vm(child, addr, &data, sizeof(data), 1);
669 ret = (ret != sizeof(data) ? -EIO : 0);
670 break;
672 case PTRACE_GETEVENTMSG:
673 ret = put_user((compat_ulong_t) child->ptrace_message, datap);
674 break;
676 case PTRACE_GETSIGINFO:
677 ret = ptrace_getsiginfo(child, &siginfo);
678 if (!ret)
679 ret = copy_siginfo_to_user32(
680 (struct compat_siginfo __user *) datap,
681 &siginfo);
682 break;
684 case PTRACE_SETSIGINFO:
685 memset(&siginfo, 0, sizeof siginfo);
686 if (copy_siginfo_from_user32(
687 &siginfo, (struct compat_siginfo __user *) datap))
688 ret = -EFAULT;
689 else
690 ret = ptrace_setsiginfo(child, &siginfo);
691 break;
693 default:
694 ret = ptrace_request(child, request, addr, data);
697 return ret;
700 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
701 compat_long_t addr, compat_long_t data)
703 struct task_struct *child;
704 long ret;
707 * This lock_kernel fixes a subtle race with suid exec
709 lock_kernel();
710 if (request == PTRACE_TRACEME) {
711 ret = ptrace_traceme();
712 goto out;
715 child = ptrace_get_task_struct(pid);
716 if (IS_ERR(child)) {
717 ret = PTR_ERR(child);
718 goto out;
721 if (request == PTRACE_ATTACH) {
722 ret = ptrace_attach(child);
724 * Some architectures need to do book-keeping after
725 * a ptrace attach.
727 if (!ret)
728 arch_ptrace_attach(child);
729 goto out_put_task_struct;
732 ret = ptrace_check_attach(child, request == PTRACE_KILL);
733 if (!ret)
734 ret = compat_arch_ptrace(child, request, addr, data);
736 out_put_task_struct:
737 put_task_struct(child);
738 out:
739 unlock_kernel();
740 return ret;
742 #endif /* CONFIG_COMPAT */