2 * linux/kernel/ptrace.c
4 * (C) Copyright 1999 Linus Torvalds
6 * Common interfaces for "ptrace()" which we do not want
7 * to continually duplicate across every architecture.
10 #include <linux/module.h>
11 #include <linux/sched.h>
12 #include <linux/errno.h>
14 #include <linux/highmem.h>
15 #include <linux/pagemap.h>
16 #include <linux/smp_lock.h>
17 #include <linux/ptrace.h>
18 #include <linux/security.h>
19 #include <linux/signal.h>
21 #include <asm/pgtable.h>
22 #include <asm/uaccess.h>
25 * ptrace a task: make the debugger its new parent and
26 * move it to the ptrace list.
28 * Must be called with the tasklist lock write-held.
30 void __ptrace_link(task_t
*child
, task_t
*new_parent
)
32 if (!list_empty(&child
->ptrace_list
))
34 if (child
->parent
== new_parent
)
36 list_add(&child
->ptrace_list
, &child
->parent
->ptrace_children
);
38 child
->parent
= new_parent
;
43 * Turn a tracing stop into a normal stop now, since with no tracer there
44 * would be no way to wake it up with SIGCONT or SIGKILL. If there was a
45 * signal sent that would resume the child, but didn't because it was in
46 * TASK_TRACED, resume it now.
47 * Requires that irqs be disabled.
49 void ptrace_untrace(task_t
*child
)
51 spin_lock(&child
->sighand
->siglock
);
52 if (child
->state
== TASK_TRACED
) {
53 if (child
->signal
->flags
& SIGNAL_STOP_STOPPED
) {
54 child
->state
= TASK_STOPPED
;
56 signal_wake_up(child
, 1);
59 if (child
->signal
->flags
& SIGNAL_GROUP_EXIT
) {
60 sigaddset(&child
->pending
.signal
, SIGKILL
);
61 signal_wake_up(child
, 1);
63 spin_unlock(&child
->sighand
->siglock
);
67 * unptrace a task: move it back to its original parent and
68 * remove it from the ptrace list.
70 * Must be called with the tasklist lock write-held.
72 void __ptrace_unlink(task_t
*child
)
77 if (!list_empty(&child
->ptrace_list
)) {
78 list_del_init(&child
->ptrace_list
);
80 child
->parent
= child
->real_parent
;
84 ptrace_untrace(child
);
88 * Check that we have indeed attached to the thing..
90 int ptrace_check_attach(struct task_struct
*child
, int kill
)
95 * We take the read lock around doing both checks to close a
96 * possible race where someone else was tracing our child and
97 * detached between these two checks. After this locked check,
98 * we are sure that this is our traced child and that can only
99 * be changed by us so it's not changing right after this.
101 read_lock(&tasklist_lock
);
102 if ((child
->ptrace
& PT_PTRACED
) && child
->parent
== current
&&
103 (!(child
->ptrace
& PT_ATTACHED
) || child
->real_parent
!= current
)
104 && child
->signal
!= NULL
) {
106 spin_lock_irq(&child
->sighand
->siglock
);
107 if (child
->state
== TASK_STOPPED
) {
108 child
->state
= TASK_TRACED
;
109 } else if (child
->state
!= TASK_TRACED
&& !kill
) {
112 spin_unlock_irq(&child
->sighand
->siglock
);
114 read_unlock(&tasklist_lock
);
117 wait_task_inactive(child
);
120 /* All systems go.. */
124 static int may_attach(struct task_struct
*task
)
128 if (((current
->uid
!= task
->euid
) ||
129 (current
->uid
!= task
->suid
) ||
130 (current
->uid
!= task
->uid
) ||
131 (current
->gid
!= task
->egid
) ||
132 (current
->gid
!= task
->sgid
) ||
133 (current
->gid
!= task
->gid
)) && !capable(CAP_SYS_PTRACE
))
136 if (!task
->mm
->dumpable
&& !capable(CAP_SYS_PTRACE
))
139 return security_ptrace(current
, task
);
142 int ptrace_may_attach(struct task_struct
*task
)
146 err
= may_attach(task
);
151 int ptrace_attach(struct task_struct
*task
)
158 if (task
->tgid
== current
->tgid
)
160 /* the same process cannot be attached many times */
161 if (task
->ptrace
& PT_PTRACED
)
163 retval
= may_attach(task
);
168 task
->ptrace
|= PT_PTRACED
| ((task
->real_parent
!= current
)
170 if (capable(CAP_SYS_PTRACE
))
171 task
->ptrace
|= PT_PTRACE_CAP
;
174 write_lock_irq(&tasklist_lock
);
175 __ptrace_link(task
, current
);
176 write_unlock_irq(&tasklist_lock
);
178 force_sig_specific(SIGSTOP
, task
);
186 int ptrace_detach(struct task_struct
*child
, unsigned int data
)
188 if (!valid_signal(data
))
191 /* Architecture-specific hardware disable .. */
192 ptrace_disable(child
);
194 /* .. re-parent .. */
195 child
->exit_code
= data
;
197 write_lock_irq(&tasklist_lock
);
198 __ptrace_unlink(child
);
199 /* .. and wake it up. */
200 if (child
->exit_state
!= EXIT_ZOMBIE
)
201 wake_up_process(child
);
202 write_unlock_irq(&tasklist_lock
);
208 * Access another process' address space.
209 * Source/target buffer must be kernel space,
210 * Do not walk the page table directly, use get_user_pages
213 int access_process_vm(struct task_struct
*tsk
, unsigned long addr
, void *buf
, int len
, int write
)
215 struct mm_struct
*mm
;
216 struct vm_area_struct
*vma
;
220 mm
= get_task_mm(tsk
);
224 down_read(&mm
->mmap_sem
);
225 /* ignore errors, just check how much was sucessfully transfered */
227 int bytes
, ret
, offset
;
230 ret
= get_user_pages(tsk
, mm
, addr
, 1,
231 write
, 1, &page
, &vma
);
236 offset
= addr
& (PAGE_SIZE
-1);
237 if (bytes
> PAGE_SIZE
-offset
)
238 bytes
= PAGE_SIZE
-offset
;
242 copy_to_user_page(vma
, page
, addr
,
243 maddr
+ offset
, buf
, bytes
);
244 set_page_dirty_lock(page
);
246 copy_from_user_page(vma
, page
, addr
,
247 buf
, maddr
+ offset
, bytes
);
250 page_cache_release(page
);
255 up_read(&mm
->mmap_sem
);
258 return buf
- old_buf
;
261 int ptrace_readdata(struct task_struct
*tsk
, unsigned long src
, char __user
*dst
, int len
)
267 int this_len
, retval
;
269 this_len
= (len
> sizeof(buf
)) ? sizeof(buf
) : len
;
270 retval
= access_process_vm(tsk
, src
, buf
, this_len
, 0);
276 if (copy_to_user(dst
, buf
, retval
))
286 int ptrace_writedata(struct task_struct
*tsk
, char __user
*src
, unsigned long dst
, int len
)
292 int this_len
, retval
;
294 this_len
= (len
> sizeof(buf
)) ? sizeof(buf
) : len
;
295 if (copy_from_user(buf
, src
, this_len
))
297 retval
= access_process_vm(tsk
, dst
, buf
, this_len
, 1);
311 static int ptrace_setoptions(struct task_struct
*child
, long data
)
313 child
->ptrace
&= ~PT_TRACE_MASK
;
315 if (data
& PTRACE_O_TRACESYSGOOD
)
316 child
->ptrace
|= PT_TRACESYSGOOD
;
318 if (data
& PTRACE_O_TRACEFORK
)
319 child
->ptrace
|= PT_TRACE_FORK
;
321 if (data
& PTRACE_O_TRACEVFORK
)
322 child
->ptrace
|= PT_TRACE_VFORK
;
324 if (data
& PTRACE_O_TRACECLONE
)
325 child
->ptrace
|= PT_TRACE_CLONE
;
327 if (data
& PTRACE_O_TRACEEXEC
)
328 child
->ptrace
|= PT_TRACE_EXEC
;
330 if (data
& PTRACE_O_TRACEVFORKDONE
)
331 child
->ptrace
|= PT_TRACE_VFORK_DONE
;
333 if (data
& PTRACE_O_TRACEEXIT
)
334 child
->ptrace
|= PT_TRACE_EXIT
;
336 return (data
& ~PTRACE_O_MASK
) ? -EINVAL
: 0;
339 static int ptrace_getsiginfo(struct task_struct
*child
, siginfo_t __user
* data
)
344 read_lock(&tasklist_lock
);
345 if (likely(child
->sighand
!= NULL
)) {
347 spin_lock_irq(&child
->sighand
->siglock
);
348 if (likely(child
->last_siginfo
!= NULL
)) {
349 lastinfo
= *child
->last_siginfo
;
352 spin_unlock_irq(&child
->sighand
->siglock
);
354 read_unlock(&tasklist_lock
);
356 return copy_siginfo_to_user(data
, &lastinfo
);
360 static int ptrace_setsiginfo(struct task_struct
*child
, siginfo_t __user
* data
)
365 if (copy_from_user(&newinfo
, data
, sizeof (siginfo_t
)))
368 read_lock(&tasklist_lock
);
369 if (likely(child
->sighand
!= NULL
)) {
371 spin_lock_irq(&child
->sighand
->siglock
);
372 if (likely(child
->last_siginfo
!= NULL
)) {
373 *child
->last_siginfo
= newinfo
;
376 spin_unlock_irq(&child
->sighand
->siglock
);
378 read_unlock(&tasklist_lock
);
382 int ptrace_request(struct task_struct
*child
, long request
,
383 long addr
, long data
)
388 #ifdef PTRACE_OLDSETOPTIONS
389 case PTRACE_OLDSETOPTIONS
:
391 case PTRACE_SETOPTIONS
:
392 ret
= ptrace_setoptions(child
, data
);
394 case PTRACE_GETEVENTMSG
:
395 ret
= put_user(child
->ptrace_message
, (unsigned long __user
*) data
);
397 case PTRACE_GETSIGINFO
:
398 ret
= ptrace_getsiginfo(child
, (siginfo_t __user
*) data
);
400 case PTRACE_SETSIGINFO
:
401 ret
= ptrace_setsiginfo(child
, (siginfo_t __user
*) data
);
410 #ifndef __ARCH_SYS_PTRACE
411 static int ptrace_get_task_struct(long request
, long pid
,
412 struct task_struct
**childp
)
414 struct task_struct
*child
;
418 * Callers use child == NULL as an indication to exit early even
419 * when the return value is 0, so make sure it is non-NULL here.
423 if (request
== PTRACE_TRACEME
) {
425 * Are we already being traced?
427 if (current
->ptrace
& PT_PTRACED
)
429 ret
= security_ptrace(current
->parent
, current
);
433 * Set the ptrace bit in the process ptrace flags.
435 current
->ptrace
|= PT_PTRACED
;
440 * You may not mess with init
446 read_lock(&tasklist_lock
);
447 child
= find_task_by_pid(pid
);
449 get_task_struct(child
);
450 read_unlock(&tasklist_lock
);
458 asmlinkage
long sys_ptrace(long request
, long pid
, long addr
, long data
)
460 struct task_struct
*child
;
464 * This lock_kernel fixes a subtle race with suid exec
467 ret
= ptrace_get_task_struct(request
, pid
, &child
);
471 if (request
== PTRACE_ATTACH
) {
472 ret
= ptrace_attach(child
);
476 ret
= ptrace_check_attach(child
, request
== PTRACE_KILL
);
478 goto out_put_task_struct
;
480 ret
= arch_ptrace(child
, request
, addr
, data
);
482 goto out_put_task_struct
;
485 put_task_struct(child
);
490 #endif /* __ARCH_SYS_PTRACE */