[PATCH] x86_64: allow setting RF in EFLAGS
[linux-2.6/linux-2.6-openrd.git] / arch / x86_64 / kernel / ptrace.c
blob86248bc9303ea24e81ac19598c86e939e5730921
1 /* ptrace.c */
2 /* By Ross Biro 1/23/92 */
3 /*
4 * Pentium III FXSR, SSE support
5 * Gareth Hughes <gareth@valinux.com>, May 2000
6 *
7 * x86-64 port 2000-2002 Andi Kleen
8 */
10 #include <linux/kernel.h>
11 #include <linux/sched.h>
12 #include <linux/mm.h>
13 #include <linux/smp.h>
14 #include <linux/smp_lock.h>
15 #include <linux/errno.h>
16 #include <linux/ptrace.h>
17 #include <linux/user.h>
18 #include <linux/security.h>
19 #include <linux/audit.h>
20 #include <linux/seccomp.h>
21 #include <linux/signal.h>
23 #include <asm/uaccess.h>
24 #include <asm/pgtable.h>
25 #include <asm/system.h>
26 #include <asm/processor.h>
27 #include <asm/i387.h>
28 #include <asm/debugreg.h>
29 #include <asm/ldt.h>
30 #include <asm/desc.h>
31 #include <asm/proto.h>
32 #include <asm/ia32.h>
35 * does not yet catch signals sent when the child dies.
36 * in exit.c or in signal.c.
40 * Determines which flags the user has access to [1 = access, 0 = no access].
41 * Prohibits changing ID(21), VIP(20), VIF(19), VM(17), IOPL(12-13), IF(9).
42 * Also masks reserved bits (63-22, 15, 5, 3, 1).
44 #define FLAG_MASK 0x54dd5UL
46 /* set's the trap flag. */
47 #define TRAP_FLAG 0x100UL
50 * eflags and offset of eflags on child stack..
52 #define EFLAGS offsetof(struct pt_regs, eflags)
53 #define EFL_OFFSET ((int)(EFLAGS-sizeof(struct pt_regs)))
56 * this routine will get a word off of the processes privileged stack.
57 * the offset is how far from the base addr as stored in the TSS.
58 * this routine assumes that all the privileged stacks are in our
59 * data space.
60 */
61 static inline unsigned long get_stack_long(struct task_struct *task, int offset)
63 unsigned char *stack;
65 stack = (unsigned char *)task->thread.rsp0;
66 stack += offset;
67 return (*((unsigned long *)stack));
70 static inline struct pt_regs *get_child_regs(struct task_struct *task)
72 struct pt_regs *regs = (void *)task->thread.rsp0;
73 return regs - 1;
77 * this routine will put a word on the processes privileged stack.
78 * the offset is how far from the base addr as stored in the TSS.
79 * this routine assumes that all the privileged stacks are in our
80 * data space.
82 static inline long put_stack_long(struct task_struct *task, int offset,
83 unsigned long data)
85 unsigned char * stack;
87 stack = (unsigned char *) task->thread.rsp0;
88 stack += offset;
89 *(unsigned long *) stack = data;
90 return 0;
93 #define LDT_SEGMENT 4
95 unsigned long convert_rip_to_linear(struct task_struct *child, struct pt_regs *regs)
97 unsigned long addr, seg;
99 addr = regs->rip;
100 seg = regs->cs & 0xffff;
103 * We'll assume that the code segments in the GDT
104 * are all zero-based. That is largely true: the
105 * TLS segments are used for data, and the PNPBIOS
106 * and APM bios ones we just ignore here.
108 if (seg & LDT_SEGMENT) {
109 u32 *desc;
110 unsigned long base;
112 down(&child->mm->context.sem);
113 desc = child->mm->context.ldt + (seg & ~7);
114 base = (desc[0] >> 16) | ((desc[1] & 0xff) << 16) | (desc[1] & 0xff000000);
116 /* 16-bit code segment? */
117 if (!((desc[1] >> 22) & 1))
118 addr &= 0xffff;
119 addr += base;
120 up(&child->mm->context.sem);
122 return addr;
125 static int is_at_popf(struct task_struct *child, struct pt_regs *regs)
127 int i, copied;
128 unsigned char opcode[16];
129 unsigned long addr = convert_rip_to_linear(child, regs);
131 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
132 for (i = 0; i < copied; i++) {
133 switch (opcode[i]) {
134 /* popf */
135 case 0x9d:
136 return 1;
138 /* CHECKME: 64 65 */
140 /* opcode and address size prefixes */
141 case 0x66: case 0x67:
142 continue;
143 /* irrelevant prefixes (segment overrides and repeats) */
144 case 0x26: case 0x2e:
145 case 0x36: case 0x3e:
146 case 0x64: case 0x65:
147 case 0xf0: case 0xf2: case 0xf3:
148 continue;
150 /* REX prefixes */
151 case 0x40 ... 0x4f:
152 continue;
154 /* CHECKME: f0, f2, f3 */
157 * pushf: NOTE! We should probably not let
158 * the user see the TF bit being set. But
159 * it's more pain than it's worth to avoid
160 * it, and a debugger could emulate this
161 * all in user space if it _really_ cares.
163 case 0x9c:
164 default:
165 return 0;
168 return 0;
171 static void set_singlestep(struct task_struct *child)
173 struct pt_regs *regs = get_child_regs(child);
176 * Always set TIF_SINGLESTEP - this guarantees that
177 * we single-step system calls etc.. This will also
178 * cause us to set TF when returning to user mode.
180 set_tsk_thread_flag(child, TIF_SINGLESTEP);
183 * If TF was already set, don't do anything else
185 if (regs->eflags & TRAP_FLAG)
186 return;
188 /* Set TF on the kernel stack.. */
189 regs->eflags |= TRAP_FLAG;
192 * ..but if TF is changed by the instruction we will trace,
193 * don't mark it as being "us" that set it, so that we
194 * won't clear it by hand later.
196 * AK: this is not enough, LAHF and IRET can change TF in user space too.
198 if (is_at_popf(child, regs))
199 return;
201 child->ptrace |= PT_DTRACE;
204 static void clear_singlestep(struct task_struct *child)
206 /* Always clear TIF_SINGLESTEP... */
207 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
209 /* But touch TF only if it was set by us.. */
210 if (child->ptrace & PT_DTRACE) {
211 struct pt_regs *regs = get_child_regs(child);
212 regs->eflags &= ~TRAP_FLAG;
213 child->ptrace &= ~PT_DTRACE;
218 * Called by kernel/ptrace.c when detaching..
220 * Make sure the single step bit is not set.
222 void ptrace_disable(struct task_struct *child)
224 clear_singlestep(child);
227 static int putreg(struct task_struct *child,
228 unsigned long regno, unsigned long value)
230 unsigned long tmp;
232 /* Some code in the 64bit emulation may not be 64bit clean.
233 Don't take any chances. */
234 if (test_tsk_thread_flag(child, TIF_IA32))
235 value &= 0xffffffff;
236 switch (regno) {
237 case offsetof(struct user_regs_struct,fs):
238 if (value && (value & 3) != 3)
239 return -EIO;
240 child->thread.fsindex = value & 0xffff;
241 return 0;
242 case offsetof(struct user_regs_struct,gs):
243 if (value && (value & 3) != 3)
244 return -EIO;
245 child->thread.gsindex = value & 0xffff;
246 return 0;
247 case offsetof(struct user_regs_struct,ds):
248 if (value && (value & 3) != 3)
249 return -EIO;
250 child->thread.ds = value & 0xffff;
251 return 0;
252 case offsetof(struct user_regs_struct,es):
253 if (value && (value & 3) != 3)
254 return -EIO;
255 child->thread.es = value & 0xffff;
256 return 0;
257 case offsetof(struct user_regs_struct,ss):
258 if ((value & 3) != 3)
259 return -EIO;
260 value &= 0xffff;
261 return 0;
262 case offsetof(struct user_regs_struct,fs_base):
263 if (value >= TASK_SIZE_OF(child))
264 return -EIO;
265 child->thread.fs = value;
266 return 0;
267 case offsetof(struct user_regs_struct,gs_base):
268 if (value >= TASK_SIZE_OF(child))
269 return -EIO;
270 child->thread.gs = value;
271 return 0;
272 case offsetof(struct user_regs_struct, eflags):
273 value &= FLAG_MASK;
274 tmp = get_stack_long(child, EFL_OFFSET);
275 tmp &= ~FLAG_MASK;
276 value |= tmp;
277 break;
278 case offsetof(struct user_regs_struct,cs):
279 if ((value & 3) != 3)
280 return -EIO;
281 value &= 0xffff;
282 break;
283 case offsetof(struct user_regs_struct, rip):
284 /* Check if the new RIP address is canonical */
285 if (value >= TASK_SIZE_OF(child))
286 return -EIO;
287 break;
289 put_stack_long(child, regno - sizeof(struct pt_regs), value);
290 return 0;
293 static unsigned long getreg(struct task_struct *child, unsigned long regno)
295 unsigned long val;
296 switch (regno) {
297 case offsetof(struct user_regs_struct, fs):
298 return child->thread.fsindex;
299 case offsetof(struct user_regs_struct, gs):
300 return child->thread.gsindex;
301 case offsetof(struct user_regs_struct, ds):
302 return child->thread.ds;
303 case offsetof(struct user_regs_struct, es):
304 return child->thread.es;
305 case offsetof(struct user_regs_struct, fs_base):
306 return child->thread.fs;
307 case offsetof(struct user_regs_struct, gs_base):
308 return child->thread.gs;
309 default:
310 regno = regno - sizeof(struct pt_regs);
311 val = get_stack_long(child, regno);
312 if (test_tsk_thread_flag(child, TIF_IA32))
313 val &= 0xffffffff;
314 return val;
319 long arch_ptrace(struct task_struct *child, long request, long addr, long data)
321 long i, ret;
322 unsigned ui;
324 switch (request) {
325 /* when I and D space are separate, these will need to be fixed. */
326 case PTRACE_PEEKTEXT: /* read word at location addr. */
327 case PTRACE_PEEKDATA: {
328 unsigned long tmp;
329 int copied;
331 copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
332 ret = -EIO;
333 if (copied != sizeof(tmp))
334 break;
335 ret = put_user(tmp,(unsigned long __user *) data);
336 break;
339 /* read the word at location addr in the USER area. */
340 case PTRACE_PEEKUSR: {
341 unsigned long tmp;
343 ret = -EIO;
344 if ((addr & 7) ||
345 addr > sizeof(struct user) - 7)
346 break;
348 switch (addr) {
349 case 0 ... sizeof(struct user_regs_struct) - sizeof(long):
350 tmp = getreg(child, addr);
351 break;
352 case offsetof(struct user, u_debugreg[0]):
353 tmp = child->thread.debugreg0;
354 break;
355 case offsetof(struct user, u_debugreg[1]):
356 tmp = child->thread.debugreg1;
357 break;
358 case offsetof(struct user, u_debugreg[2]):
359 tmp = child->thread.debugreg2;
360 break;
361 case offsetof(struct user, u_debugreg[3]):
362 tmp = child->thread.debugreg3;
363 break;
364 case offsetof(struct user, u_debugreg[6]):
365 tmp = child->thread.debugreg6;
366 break;
367 case offsetof(struct user, u_debugreg[7]):
368 tmp = child->thread.debugreg7;
369 break;
370 default:
371 tmp = 0;
372 break;
374 ret = put_user(tmp,(unsigned long __user *) data);
375 break;
378 /* when I and D space are separate, this will have to be fixed. */
379 case PTRACE_POKETEXT: /* write the word at location addr. */
380 case PTRACE_POKEDATA:
381 ret = 0;
382 if (access_process_vm(child, addr, &data, sizeof(data), 1) == sizeof(data))
383 break;
384 ret = -EIO;
385 break;
387 case PTRACE_POKEUSR: /* write the word at location addr in the USER area */
389 int dsize = test_tsk_thread_flag(child, TIF_IA32) ? 3 : 7;
390 ret = -EIO;
391 if ((addr & 7) ||
392 addr > sizeof(struct user) - 7)
393 break;
395 switch (addr) {
396 case 0 ... sizeof(struct user_regs_struct) - sizeof(long):
397 ret = putreg(child, addr, data);
398 break;
399 /* Disallows to set a breakpoint into the vsyscall */
400 case offsetof(struct user, u_debugreg[0]):
401 if (data >= TASK_SIZE_OF(child) - dsize) break;
402 child->thread.debugreg0 = data;
403 ret = 0;
404 break;
405 case offsetof(struct user, u_debugreg[1]):
406 if (data >= TASK_SIZE_OF(child) - dsize) break;
407 child->thread.debugreg1 = data;
408 ret = 0;
409 break;
410 case offsetof(struct user, u_debugreg[2]):
411 if (data >= TASK_SIZE_OF(child) - dsize) break;
412 child->thread.debugreg2 = data;
413 ret = 0;
414 break;
415 case offsetof(struct user, u_debugreg[3]):
416 if (data >= TASK_SIZE_OF(child) - dsize) break;
417 child->thread.debugreg3 = data;
418 ret = 0;
419 break;
420 case offsetof(struct user, u_debugreg[6]):
421 if (data >> 32)
422 break;
423 child->thread.debugreg6 = data;
424 ret = 0;
425 break;
426 case offsetof(struct user, u_debugreg[7]):
427 /* See arch/i386/kernel/ptrace.c for an explanation of
428 * this awkward check.*/
429 data &= ~DR_CONTROL_RESERVED;
430 for(i=0; i<4; i++)
431 if ((0x5454 >> ((data >> (16 + 4*i)) & 0xf)) & 1)
432 break;
433 if (i == 4) {
434 child->thread.debugreg7 = data;
435 ret = 0;
437 break;
439 break;
441 case PTRACE_SYSCALL: /* continue and stop at next (return from) syscall */
442 case PTRACE_CONT: /* restart after signal. */
444 ret = -EIO;
445 if (!valid_signal(data))
446 break;
447 if (request == PTRACE_SYSCALL)
448 set_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
449 else
450 clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
451 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
452 child->exit_code = data;
453 /* make sure the single step bit is not set. */
454 clear_singlestep(child);
455 wake_up_process(child);
456 ret = 0;
457 break;
459 #ifdef CONFIG_IA32_EMULATION
460 /* This makes only sense with 32bit programs. Allow a
461 64bit debugger to fully examine them too. Better
462 don't use it against 64bit processes, use
463 PTRACE_ARCH_PRCTL instead. */
464 case PTRACE_SET_THREAD_AREA: {
465 struct user_desc __user *p;
466 int old;
467 p = (struct user_desc __user *)data;
468 get_user(old, &p->entry_number);
469 put_user(addr, &p->entry_number);
470 ret = do_set_thread_area(&child->thread, p);
471 put_user(old, &p->entry_number);
472 break;
473 case PTRACE_GET_THREAD_AREA:
474 p = (struct user_desc __user *)data;
475 get_user(old, &p->entry_number);
476 put_user(addr, &p->entry_number);
477 ret = do_get_thread_area(&child->thread, p);
478 put_user(old, &p->entry_number);
479 break;
481 #endif
482 /* normal 64bit interface to access TLS data.
483 Works just like arch_prctl, except that the arguments
484 are reversed. */
485 case PTRACE_ARCH_PRCTL:
486 ret = do_arch_prctl(child, data, addr);
487 break;
490 * make the child exit. Best I can do is send it a sigkill.
491 * perhaps it should be put in the status that it wants to
492 * exit.
494 case PTRACE_KILL:
495 ret = 0;
496 if (child->exit_state == EXIT_ZOMBIE) /* already dead */
497 break;
498 clear_tsk_thread_flag(child, TIF_SINGLESTEP);
499 child->exit_code = SIGKILL;
500 /* make sure the single step bit is not set. */
501 clear_singlestep(child);
502 wake_up_process(child);
503 break;
505 case PTRACE_SINGLESTEP: /* set the trap flag. */
506 ret = -EIO;
507 if (!valid_signal(data))
508 break;
509 clear_tsk_thread_flag(child,TIF_SYSCALL_TRACE);
510 set_singlestep(child);
511 child->exit_code = data;
512 /* give it a chance to run. */
513 wake_up_process(child);
514 ret = 0;
515 break;
517 case PTRACE_DETACH:
518 /* detach a process that was attached. */
519 ret = ptrace_detach(child, data);
520 break;
522 case PTRACE_GETREGS: { /* Get all gp regs from the child. */
523 if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
524 sizeof(struct user_regs_struct))) {
525 ret = -EIO;
526 break;
528 ret = 0;
529 for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
530 ret |= __put_user(getreg(child, ui),(unsigned long __user *) data);
531 data += sizeof(long);
533 break;
536 case PTRACE_SETREGS: { /* Set all gp regs in the child. */
537 unsigned long tmp;
538 if (!access_ok(VERIFY_READ, (unsigned __user *)data,
539 sizeof(struct user_regs_struct))) {
540 ret = -EIO;
541 break;
543 ret = 0;
544 for (ui = 0; ui < sizeof(struct user_regs_struct); ui += sizeof(long)) {
545 ret |= __get_user(tmp, (unsigned long __user *) data);
546 putreg(child, ui, tmp);
547 data += sizeof(long);
549 break;
552 case PTRACE_GETFPREGS: { /* Get the child extended FPU state. */
553 if (!access_ok(VERIFY_WRITE, (unsigned __user *)data,
554 sizeof(struct user_i387_struct))) {
555 ret = -EIO;
556 break;
558 ret = get_fpregs((struct user_i387_struct __user *)data, child);
559 break;
562 case PTRACE_SETFPREGS: { /* Set the child extended FPU state. */
563 if (!access_ok(VERIFY_READ, (unsigned __user *)data,
564 sizeof(struct user_i387_struct))) {
565 ret = -EIO;
566 break;
568 set_stopped_child_used_math(child);
569 ret = set_fpregs(child, (struct user_i387_struct __user *)data);
570 break;
573 default:
574 ret = ptrace_request(child, request, addr, data);
575 break;
577 return ret;
580 static void syscall_trace(struct pt_regs *regs)
583 #if 0
584 printk("trace %s rip %lx rsp %lx rax %d origrax %d caller %lx tiflags %x ptrace %x\n",
585 current->comm,
586 regs->rip, regs->rsp, regs->rax, regs->orig_rax, __builtin_return_address(0),
587 current_thread_info()->flags, current->ptrace);
588 #endif
590 ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
591 ? 0x80 : 0));
593 * this isn't the same as continuing with a signal, but it will do
594 * for normal use. strace only continues with a signal if the
595 * stopping signal is not SIGTRAP. -brl
597 if (current->exit_code) {
598 send_sig(current->exit_code, current, 1);
599 current->exit_code = 0;
603 asmlinkage void syscall_trace_enter(struct pt_regs *regs)
605 /* do the secure computing check first */
606 secure_computing(regs->orig_rax);
608 if (test_thread_flag(TIF_SYSCALL_TRACE)
609 && (current->ptrace & PT_PTRACED))
610 syscall_trace(regs);
612 if (unlikely(current->audit_context)) {
613 if (test_thread_flag(TIF_IA32)) {
614 audit_syscall_entry(current, AUDIT_ARCH_I386,
615 regs->orig_rax,
616 regs->rbx, regs->rcx,
617 regs->rdx, regs->rsi);
618 } else {
619 audit_syscall_entry(current, AUDIT_ARCH_X86_64,
620 regs->orig_rax,
621 regs->rdi, regs->rsi,
622 regs->rdx, regs->r10);
627 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
629 if (unlikely(current->audit_context))
630 audit_syscall_exit(current, AUDITSC_RESULT(regs->rax), regs->rax);
632 if ((test_thread_flag(TIF_SYSCALL_TRACE)
633 || test_thread_flag(TIF_SINGLESTEP))
634 && (current->ptrace & PT_PTRACED))
635 syscall_trace(regs);